fantaxy commited on
Commit
7c324a3
·
verified ·
1 Parent(s): d1e5256

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -7
app.py CHANGED
@@ -260,7 +260,6 @@ def btn_preprocess_video_fn(video_path, width, height, start_time, end_time, cen
260
  def btn_image_edit_fn(video_path, instruct_prompt, ie_force_512, ie_seed, ie_neg_prompt):
261
  """
262
  Generate an image based on the video and text input.
263
- This function should be replaced with your actual image generation logic.
264
  """
265
  # Placeholder logic for image generation
266
 
@@ -311,13 +310,10 @@ def btn_infer_fn(video_path,
311
  # Create the UI
312
  #=====================================
313
  with gr.Blocks() as demo:
314
- gr.Markdown("# <img src='https://tiger-ai-lab.github.io/AnyV2V/static/images/icon.png' width='30'/> AnyV2V")
315
- gr.Markdown("Official 🤗 Gradio demo for [AnyV2V: A Plug-and-Play Framework For Any Video-to-Video Editing Tasks](https://tiger-ai-lab.github.io/AnyV2V/)")
316
 
317
  with gr.Tabs():
318
  with gr.TabItem('AnyV2V(I2VGenXL) + InstructPix2Pix'):
319
- gr.Markdown("# Preprocessing Video Stage")
320
- gr.Markdown("In this demo, AnyV2V only support video with 2 seconds duration and 8 fps. If your video is not in this format, we will preprocess it for you. Click on the Preprocess video button!")
321
  with gr.Row():
322
  with gr.Column():
323
  video_raw = gr.Video(label="Raw Video Input")
@@ -339,7 +335,6 @@ with gr.Blocks() as demo:
339
  pv_longest_to_width = gr.Checkbox(label="Resize Longest Dimension to Width")
340
 
341
  gr.Markdown("# Image Editing Stage")
342
- gr.Markdown("Edit the first frame of the video to your liking! Click on the Edit the first frame button after inputting the editing instruction prompt. This image editing stage is powered by InstructPix2Pix. You can try edit the image multiple times until you are happy with the result! You can also choose to download the first frame of the video and edit it with other software (e.g. Photoshop, GIMP, etc.) or use other image editing models to obtain the edited frame and upload it directly.")
343
  with gr.Row():
344
  with gr.Column():
345
  src_first_frame = gr.Image(label="First Frame", type="filepath", interactive=False)
@@ -356,7 +351,6 @@ with gr.Blocks() as demo:
356
  ie_force_512 = gr.Checkbox(label="Force resize to 512x512 before feeding into the image editing model")
357
 
358
  gr.Markdown("# Video Editing Stage")
359
- gr.Markdown("Enjoy the full control of the video editing process using the edited image and the preprocessed video! Click on the Run AnyV2V button after inputting the video description prompt. Try tweak with the setting if the output does not satisfy you!")
360
  with gr.Row():
361
  with gr.Column():
362
  video_prompt = gr.Textbox(label="Video description prompt")
 
260
  def btn_image_edit_fn(video_path, instruct_prompt, ie_force_512, ie_seed, ie_neg_prompt):
261
  """
262
  Generate an image based on the video and text input.
 
263
  """
264
  # Placeholder logic for image generation
265
 
 
310
  # Create the UI
311
  #=====================================
312
  with gr.Blocks() as demo:
313
+ gr.Markdown("# text + video + image")
 
314
 
315
  with gr.Tabs():
316
  with gr.TabItem('AnyV2V(I2VGenXL) + InstructPix2Pix'):
 
 
317
  with gr.Row():
318
  with gr.Column():
319
  video_raw = gr.Video(label="Raw Video Input")
 
335
  pv_longest_to_width = gr.Checkbox(label="Resize Longest Dimension to Width")
336
 
337
  gr.Markdown("# Image Editing Stage")
 
338
  with gr.Row():
339
  with gr.Column():
340
  src_first_frame = gr.Image(label="First Frame", type="filepath", interactive=False)
 
351
  ie_force_512 = gr.Checkbox(label="Force resize to 512x512 before feeding into the image editing model")
352
 
353
  gr.Markdown("# Video Editing Stage")
 
354
  with gr.Row():
355
  with gr.Column():
356
  video_prompt = gr.Textbox(label="Video description prompt")