wwen1997 commited on
Commit
87a9ce9
1 Parent(s): c0f86bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +266 -265
app.py CHANGED
@@ -91,16 +91,6 @@ def get_args():
91
  return args
92
 
93
 
94
- args = get_args()
95
- ensure_dirname(args.output_dir)
96
-
97
-
98
- color_list = []
99
- for i in range(20):
100
- color = np.concatenate([np.random.random(4)*255], axis=0)
101
- color_list.append(color)
102
-
103
-
104
  def interpolate_trajectory(points, n_points):
105
  x = [point[0] for point in points]
106
  y = [point[1] for point in points]
@@ -536,264 +526,275 @@ class Drag:
536
  return val_save_dir
537
 
538
 
539
- with gr.Blocks() as demo:
540
- gr.Markdown("""<h1 align="center">Framer: Interactive Frame Interpolation</h1><br>""")
541
 
542
- gr.Markdown("""Gradio Demo for <a href='https://arxiv.org/abs/2410.18978'><b>Framer: Interactive Frame Interpolation</b></a>.<br>
543
- Github Repo can be found at https://github.com/aim-uofa/Framer<br>
544
- The template is inspired by DragAnything.""")
545
-
546
- gr.Image(label="Framer: Interactive Frame Interpolation", value="assets/demos.gif", height=432, width=768)
547
-
548
- gr.Markdown("""## Usage: <br>
549
- 1. Upload images<br>
550
- &ensp; 1.1 Upload the start image via the "Upload Start Image" button.<br>
551
- &ensp; 1.2. Upload the end image via the "Upload End Image" button.<br>
552
- 2. (Optional) Draw some drags.<br>
553
- &ensp; 2.1. Click "Add Drag Trajectory" to add the motion trajectory.<br>
554
- &ensp; 2.2. You can click several points on either start or end image to forms a path.<br>
555
- &ensp; 2.3. Click "Delete last drag" to delete the whole lastest path.<br>
556
- &ensp; 2.4. Click "Delete last step" to delete the lastest clicked control point.<br>
557
- 3. Interpolate the images (according the path) with a click on "Run" button. <br>""")
558
 
559
- # device, args, height, width, model_length
560
- Framer = Drag("cuda", args, 320, 512, 14)
561
- first_frame_path = gr.State()
562
- last_frame_path = gr.State()
563
- tracking_points = gr.State([])
564
 
565
- def reset_states(first_frame_path, last_frame_path, tracking_points):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
566
  first_frame_path = gr.State()
567
  last_frame_path = gr.State()
568
  tracking_points = gr.State([])
569
-
570
- return first_frame_path, last_frame_path, tracking_points
571
-
572
-
573
- def preprocess_image(image):
574
-
575
- image_pil = image2pil(image.name)
576
-
577
- raw_w, raw_h = image_pil.size
578
- # resize_ratio = max(512 / raw_w, 320 / raw_h)
579
- # image_pil = image_pil.resize((int(raw_w * resize_ratio), int(raw_h * resize_ratio)), Image.BILINEAR)
580
- # image_pil = transforms.CenterCrop((320, 512))(image_pil.convert('RGB'))
581
- image_pil = image_pil.resize((512, 320), Image.BILINEAR)
582
-
583
- first_frame_path = os.path.join(args.output_dir, f"first_frame_{str(uuid.uuid4())[:4]}.png")
584
-
585
- image_pil.save(first_frame_path)
586
-
587
- return first_frame_path, first_frame_path, gr.State([])
588
-
589
-
590
- def preprocess_image_end(image_end):
591
-
592
- image_end_pil = image2pil(image_end.name)
593
-
594
- raw_w, raw_h = image_end_pil.size
595
- # resize_ratio = max(512 / raw_w, 320 / raw_h)
596
- # image_end_pil = image_end_pil.resize((int(raw_w * resize_ratio), int(raw_h * resize_ratio)), Image.BILINEAR)
597
- # image_end_pil = transforms.CenterCrop((320, 512))(image_end_pil.convert('RGB'))
598
- image_end_pil = image_end_pil.resize((512, 320), Image.BILINEAR)
599
-
600
- last_frame_path = os.path.join(args.output_dir, f"last_frame_{str(uuid.uuid4())[:4]}.png")
601
-
602
- image_end_pil.save(last_frame_path)
603
-
604
- return last_frame_path, last_frame_path, gr.State([])
605
-
606
-
607
- def add_drag(tracking_points):
608
- tracking_points.constructor_args['value'].append([])
609
- return tracking_points
610
-
611
-
612
- def delete_last_drag(tracking_points, first_frame_path, last_frame_path):
613
- tracking_points.constructor_args['value'].pop()
614
- transparent_background = Image.open(first_frame_path).convert('RGBA')
615
- transparent_background_end = Image.open(last_frame_path).convert('RGBA')
616
- w, h = transparent_background.size
617
- transparent_layer = np.zeros((h, w, 4))
618
-
619
- for track in tracking_points.constructor_args['value']:
620
- if len(track) > 1:
621
- for i in range(len(track)-1):
622
- start_point = track[i]
623
- end_point = track[i+1]
624
- vx = end_point[0] - start_point[0]
625
- vy = end_point[1] - start_point[1]
626
- arrow_length = np.sqrt(vx**2 + vy**2)
627
- if i == len(track)-2:
628
- cv2.arrowedLine(transparent_layer, tuple(start_point), tuple(end_point), (255, 0, 0, 255), 2, tipLength=8 / arrow_length)
629
- else:
630
- cv2.line(transparent_layer, tuple(start_point), tuple(end_point), (255, 0, 0, 255), 2,)
631
- else:
632
- cv2.circle(transparent_layer, tuple(track[0]), 5, (255, 0, 0, 255), -1)
633
-
634
- transparent_layer = Image.fromarray(transparent_layer.astype(np.uint8))
635
- trajectory_map = Image.alpha_composite(transparent_background, transparent_layer)
636
- trajectory_map_end = Image.alpha_composite(transparent_background_end, transparent_layer)
637
-
638
- return tracking_points, trajectory_map, trajectory_map_end
639
-
640
-
641
- def delete_last_step(tracking_points, first_frame_path, last_frame_path):
642
- tracking_points.constructor_args['value'][-1].pop()
643
- transparent_background = Image.open(first_frame_path).convert('RGBA')
644
- transparent_background_end = Image.open(last_frame_path).convert('RGBA')
645
- w, h = transparent_background.size
646
- transparent_layer = np.zeros((h, w, 4))
647
-
648
- for track in tracking_points.constructor_args['value']:
649
- if len(track) > 1:
650
- for i in range(len(track)-1):
651
- start_point = track[i]
652
- end_point = track[i+1]
653
- vx = end_point[0] - start_point[0]
654
- vy = end_point[1] - start_point[1]
655
- arrow_length = np.sqrt(vx**2 + vy**2)
656
- if i == len(track)-2:
657
- cv2.arrowedLine(transparent_layer, tuple(start_point), tuple(end_point), (255, 0, 0, 255), 2, tipLength=8 / arrow_length)
658
- else:
659
- cv2.line(transparent_layer, tuple(start_point), tuple(end_point), (255, 0, 0, 255), 2,)
660
- else:
661
- cv2.circle(transparent_layer, tuple(track[0]), 5, (255, 0, 0, 255), -1)
662
-
663
- transparent_layer = Image.fromarray(transparent_layer.astype(np.uint8))
664
- trajectory_map = Image.alpha_composite(transparent_background, transparent_layer)
665
- trajectory_map_end = Image.alpha_composite(transparent_background_end, transparent_layer)
666
-
667
- return tracking_points, trajectory_map, trajectory_map_end
668
-
669
-
670
- def add_tracking_points(tracking_points, first_frame_path, last_frame_path, evt: gr.SelectData): # SelectData is a subclass of EventData
671
- print(f"You selected {evt.value} at {evt.index} from {evt.target}")
672
- tracking_points.constructor_args['value'][-1].append(evt.index)
673
-
674
- transparent_background = Image.open(first_frame_path).convert('RGBA')
675
- transparent_background_end = Image.open(last_frame_path).convert('RGBA')
676
-
677
- w, h = transparent_background.size
678
- transparent_layer = 0
679
- for idx, track in enumerate(tracking_points.constructor_args['value']):
680
- # mask = cv2.imread(
681
- # os.path.join(args.output_dir, f"mask_{idx+1}.jpg")
682
- # )
683
- mask = np.zeros((320, 512, 3))
684
- color = color_list[idx+1]
685
- transparent_layer = mask[:, :, 0].reshape(h, w, 1) * color.reshape(1, 1, -1) + transparent_layer
686
-
687
- if len(track) > 1:
688
- for i in range(len(track)-1):
689
- start_point = track[i]
690
- end_point = track[i+1]
691
- vx = end_point[0] - start_point[0]
692
- vy = end_point[1] - start_point[1]
693
- arrow_length = np.sqrt(vx**2 + vy**2)
694
- if i == len(track)-2:
695
- cv2.arrowedLine(transparent_layer, tuple(start_point), tuple(end_point), (255, 0, 0, 255), 2, tipLength=8 / arrow_length)
696
- else:
697
- cv2.line(transparent_layer, tuple(start_point), tuple(end_point), (255, 0, 0, 255), 2,)
698
- else:
699
- cv2.circle(transparent_layer, tuple(track[0]), 5, (255, 0, 0, 255), -1)
700
-
701
- transparent_layer = Image.fromarray(transparent_layer.astype(np.uint8))
702
- alpha_coef = 0.99
703
- im2_data = transparent_layer.getdata()
704
- new_im2_data = [(r, g, b, int(a * alpha_coef)) for r, g, b, a in im2_data]
705
- transparent_layer.putdata(new_im2_data)
706
-
707
- trajectory_map = Image.alpha_composite(transparent_background, transparent_layer)
708
- trajectory_map_end = Image.alpha_composite(transparent_background_end, transparent_layer)
709
-
710
- return tracking_points, trajectory_map, trajectory_map_end
711
-
712
- with gr.Row():
713
- with gr.Column(scale=1):
714
- image_upload_button = gr.UploadButton(label="Upload Start Image", file_types=["image"])
715
- image_end_upload_button = gr.UploadButton(label="Upload End Image", file_types=["image"])
716
- # select_area_button = gr.Button(value="Select Area with SAM")
717
- add_drag_button = gr.Button(value="Add New Drag Trajectory")
718
- reset_button = gr.Button(value="Reset")
719
- run_button = gr.Button(value="Run")
720
- delete_last_drag_button = gr.Button(value="Delete last drag")
721
- delete_last_step_button = gr.Button(value="Delete last step")
722
-
723
- with gr.Column(scale=7):
724
- with gr.Row():
725
- with gr.Column(scale=6):
726
- input_image = gr.Image(
727
- label="start frame",
728
- interactive=True,
729
- height=320,
730
- width=512,
731
- )
732
-
733
- with gr.Column(scale=6):
734
- input_image_end = gr.Image(
735
- label="end frame",
736
- interactive=True,
737
- height=320,
738
- width=512,
739
- )
740
-
741
- with gr.Row():
742
- with gr.Column(scale=1):
743
-
744
- controlnet_cond_scale = gr.Slider(
745
- label='Control Scale',
746
- minimum=0.0,
747
- maximum=10,
748
- step=0.1,
749
- value=1.0,
750
- )
751
-
752
- motion_bucket_id = gr.Slider(
753
- label='Motion Bucket',
754
- minimum=1,
755
- maximum=180,
756
- step=1,
757
- value=100,
758
- )
759
-
760
- with gr.Column(scale=5):
761
- output_video = gr.Image(
762
- label="Output Video",
763
- height=320,
764
- width=1152,
765
- )
766
-
767
-
768
- with gr.Row():
769
- gr.Markdown("""
770
- ## Citation
771
- ```bibtex
772
- @article{wang2024framer,
773
- title={Framer: Interactive Frame Interpolation},
774
- author={Wang, Wen and Wang, Qiuyu and Zheng, Kecheng and Ouyang, Hao and Chen, Zhekai and Gong, Biao and Chen, Hao and Shen, Yujun and Shen, Chunhua},
775
- journal={arXiv preprint https://arxiv.org/abs/2410.18978},
776
- year={2024}
777
- }
778
- ```
779
- """)
780
-
781
- image_upload_button.upload(preprocess_image, image_upload_button, [input_image, first_frame_path, tracking_points])
782
-
783
- image_end_upload_button.upload(preprocess_image_end, image_end_upload_button, [input_image_end, last_frame_path, tracking_points])
784
-
785
- add_drag_button.click(add_drag, tracking_points, [tracking_points, ])
786
-
787
- delete_last_drag_button.click(delete_last_drag, [tracking_points, first_frame_path, last_frame_path], [tracking_points, input_image, input_image_end])
788
-
789
- delete_last_step_button.click(delete_last_step, [tracking_points, first_frame_path, last_frame_path], [tracking_points, input_image, input_image_end])
790
-
791
- reset_button.click(reset_states, [first_frame_path, last_frame_path, tracking_points], [first_frame_path, last_frame_path, tracking_points])
792
-
793
- input_image.select(add_tracking_points, [tracking_points, first_frame_path, last_frame_path], [tracking_points, input_image, input_image_end])
794
-
795
- input_image_end.select(add_tracking_points, [tracking_points, first_frame_path, last_frame_path], [tracking_points, input_image, input_image_end])
796
-
797
- run_button.click(Framer.run, [first_frame_path, last_frame_path, tracking_points, controlnet_cond_scale, motion_bucket_id], output_video)
798
-
799
- demo.launch()
 
 
 
 
 
 
91
  return args
92
 
93
 
 
 
 
 
 
 
 
 
 
 
94
  def interpolate_trajectory(points, n_points):
95
  x = [point[0] for point in points]
96
  y = [point[1] for point in points]
 
526
  return val_save_dir
527
 
528
 
529
+ if __name__ == "__main__":
 
530
 
531
+ args = get_args()
532
+ ensure_dirname(args.output_dir)
533
+
 
 
 
 
 
 
 
 
 
 
 
 
 
534
 
535
+ color_list = []
536
+ for i in range(20):
537
+ color = np.concatenate([np.random.random(4)*255], axis=0)
538
+ color_list.append(color)
 
539
 
540
+ with gr.Blocks() as demo:
541
+ gr.Markdown("""<h1 align="center">Framer: Interactive Frame Interpolation</h1><br>""")
542
+
543
+ gr.Markdown("""Gradio Demo for <a href='https://arxiv.org/abs/2410.18978'><b>Framer: Interactive Frame Interpolation</b></a>.<br>
544
+ Github Repo can be found at https://github.com/aim-uofa/Framer<br>
545
+ The template is inspired by DragAnything.""")
546
+
547
+ gr.Image(label="Framer: Interactive Frame Interpolation", value="assets/demos.gif", height=432, width=768)
548
+
549
+ gr.Markdown("""## Usage: <br>
550
+ 1. Upload images<br>
551
+ &ensp; 1.1 Upload the start image via the "Upload Start Image" button.<br>
552
+ &ensp; 1.2. Upload the end image via the "Upload End Image" button.<br>
553
+ 2. (Optional) Draw some drags.<br>
554
+ &ensp; 2.1. Click "Add Drag Trajectory" to add the motion trajectory.<br>
555
+ &ensp; 2.2. You can click several points on either start or end image to forms a path.<br>
556
+ &ensp; 2.3. Click "Delete last drag" to delete the whole lastest path.<br>
557
+ &ensp; 2.4. Click "Delete last step" to delete the lastest clicked control point.<br>
558
+ 3. Interpolate the images (according the path) with a click on "Run" button. <br>""")
559
+
560
+ # device, args, height, width, model_length
561
+ Framer = Drag("cuda", args, 320, 512, 14)
562
  first_frame_path = gr.State()
563
  last_frame_path = gr.State()
564
  tracking_points = gr.State([])
565
+
566
+ def reset_states(first_frame_path, last_frame_path, tracking_points):
567
+ first_frame_path = gr.State()
568
+ last_frame_path = gr.State()
569
+ tracking_points = gr.State([])
570
+
571
+ return first_frame_path, last_frame_path, tracking_points
572
+
573
+
574
+ def preprocess_image(image):
575
+
576
+ image_pil = image2pil(image.name)
577
+
578
+ raw_w, raw_h = image_pil.size
579
+ # resize_ratio = max(512 / raw_w, 320 / raw_h)
580
+ # image_pil = image_pil.resize((int(raw_w * resize_ratio), int(raw_h * resize_ratio)), Image.BILINEAR)
581
+ # image_pil = transforms.CenterCrop((320, 512))(image_pil.convert('RGB'))
582
+ image_pil = image_pil.resize((512, 320), Image.BILINEAR)
583
+
584
+ first_frame_path = os.path.join(args.output_dir, f"first_frame_{str(uuid.uuid4())[:4]}.png")
585
+
586
+ image_pil.save(first_frame_path)
587
+
588
+ return first_frame_path, first_frame_path, gr.State([])
589
+
590
+
591
+ def preprocess_image_end(image_end):
592
+
593
+ image_end_pil = image2pil(image_end.name)
594
+
595
+ raw_w, raw_h = image_end_pil.size
596
+ # resize_ratio = max(512 / raw_w, 320 / raw_h)
597
+ # image_end_pil = image_end_pil.resize((int(raw_w * resize_ratio), int(raw_h * resize_ratio)), Image.BILINEAR)
598
+ # image_end_pil = transforms.CenterCrop((320, 512))(image_end_pil.convert('RGB'))
599
+ image_end_pil = image_end_pil.resize((512, 320), Image.BILINEAR)
600
+
601
+ last_frame_path = os.path.join(args.output_dir, f"last_frame_{str(uuid.uuid4())[:4]}.png")
602
+
603
+ image_end_pil.save(last_frame_path)
604
+
605
+ return last_frame_path, last_frame_path, gr.State([])
606
+
607
+
608
+ def add_drag(tracking_points):
609
+ tracking_points.constructor_args['value'].append([])
610
+ return tracking_points
611
+
612
+
613
+ def delete_last_drag(tracking_points, first_frame_path, last_frame_path):
614
+ tracking_points.constructor_args['value'].pop()
615
+ transparent_background = Image.open(first_frame_path).convert('RGBA')
616
+ transparent_background_end = Image.open(last_frame_path).convert('RGBA')
617
+ w, h = transparent_background.size
618
+ transparent_layer = np.zeros((h, w, 4))
619
+
620
+ for track in tracking_points.constructor_args['value']:
621
+ if len(track) > 1:
622
+ for i in range(len(track)-1):
623
+ start_point = track[i]
624
+ end_point = track[i+1]
625
+ vx = end_point[0] - start_point[0]
626
+ vy = end_point[1] - start_point[1]
627
+ arrow_length = np.sqrt(vx**2 + vy**2)
628
+ if i == len(track)-2:
629
+ cv2.arrowedLine(transparent_layer, tuple(start_point), tuple(end_point), (255, 0, 0, 255), 2, tipLength=8 / arrow_length)
630
+ else:
631
+ cv2.line(transparent_layer, tuple(start_point), tuple(end_point), (255, 0, 0, 255), 2,)
632
+ else:
633
+ cv2.circle(transparent_layer, tuple(track[0]), 5, (255, 0, 0, 255), -1)
634
+
635
+ transparent_layer = Image.fromarray(transparent_layer.astype(np.uint8))
636
+ trajectory_map = Image.alpha_composite(transparent_background, transparent_layer)
637
+ trajectory_map_end = Image.alpha_composite(transparent_background_end, transparent_layer)
638
+
639
+ return tracking_points, trajectory_map, trajectory_map_end
640
+
641
+
642
+ def delete_last_step(tracking_points, first_frame_path, last_frame_path):
643
+ tracking_points.constructor_args['value'][-1].pop()
644
+ transparent_background = Image.open(first_frame_path).convert('RGBA')
645
+ transparent_background_end = Image.open(last_frame_path).convert('RGBA')
646
+ w, h = transparent_background.size
647
+ transparent_layer = np.zeros((h, w, 4))
648
+
649
+ for track in tracking_points.constructor_args['value']:
650
+ if len(track) > 1:
651
+ for i in range(len(track)-1):
652
+ start_point = track[i]
653
+ end_point = track[i+1]
654
+ vx = end_point[0] - start_point[0]
655
+ vy = end_point[1] - start_point[1]
656
+ arrow_length = np.sqrt(vx**2 + vy**2)
657
+ if i == len(track)-2:
658
+ cv2.arrowedLine(transparent_layer, tuple(start_point), tuple(end_point), (255, 0, 0, 255), 2, tipLength=8 / arrow_length)
659
+ else:
660
+ cv2.line(transparent_layer, tuple(start_point), tuple(end_point), (255, 0, 0, 255), 2,)
661
+ else:
662
+ cv2.circle(transparent_layer, tuple(track[0]), 5, (255, 0, 0, 255), -1)
663
+
664
+ transparent_layer = Image.fromarray(transparent_layer.astype(np.uint8))
665
+ trajectory_map = Image.alpha_composite(transparent_background, transparent_layer)
666
+ trajectory_map_end = Image.alpha_composite(transparent_background_end, transparent_layer)
667
+
668
+ return tracking_points, trajectory_map, trajectory_map_end
669
+
670
+
671
+ def add_tracking_points(tracking_points, first_frame_path, last_frame_path, evt: gr.SelectData): # SelectData is a subclass of EventData
672
+ print(f"You selected {evt.value} at {evt.index} from {evt.target}")
673
+ tracking_points.constructor_args['value'][-1].append(evt.index)
674
+
675
+ transparent_background = Image.open(first_frame_path).convert('RGBA')
676
+ transparent_background_end = Image.open(last_frame_path).convert('RGBA')
677
+
678
+ w, h = transparent_background.size
679
+ transparent_layer = 0
680
+ for idx, track in enumerate(tracking_points.constructor_args['value']):
681
+ # mask = cv2.imread(
682
+ # os.path.join(args.output_dir, f"mask_{idx+1}.jpg")
683
+ # )
684
+ mask = np.zeros((320, 512, 3))
685
+ color = color_list[idx+1]
686
+ transparent_layer = mask[:, :, 0].reshape(h, w, 1) * color.reshape(1, 1, -1) + transparent_layer
687
+
688
+ if len(track) > 1:
689
+ for i in range(len(track)-1):
690
+ start_point = track[i]
691
+ end_point = track[i+1]
692
+ vx = end_point[0] - start_point[0]
693
+ vy = end_point[1] - start_point[1]
694
+ arrow_length = np.sqrt(vx**2 + vy**2)
695
+ if i == len(track)-2:
696
+ cv2.arrowedLine(transparent_layer, tuple(start_point), tuple(end_point), (255, 0, 0, 255), 2, tipLength=8 / arrow_length)
697
+ else:
698
+ cv2.line(transparent_layer, tuple(start_point), tuple(end_point), (255, 0, 0, 255), 2,)
699
+ else:
700
+ cv2.circle(transparent_layer, tuple(track[0]), 5, (255, 0, 0, 255), -1)
701
+
702
+ transparent_layer = Image.fromarray(transparent_layer.astype(np.uint8))
703
+ alpha_coef = 0.99
704
+ im2_data = transparent_layer.getdata()
705
+ new_im2_data = [(r, g, b, int(a * alpha_coef)) for r, g, b, a in im2_data]
706
+ transparent_layer.putdata(new_im2_data)
707
+
708
+ trajectory_map = Image.alpha_composite(transparent_background, transparent_layer)
709
+ trajectory_map_end = Image.alpha_composite(transparent_background_end, transparent_layer)
710
+
711
+ return tracking_points, trajectory_map, trajectory_map_end
712
+
713
+ with gr.Row():
714
+ with gr.Column(scale=1):
715
+ image_upload_button = gr.UploadButton(label="Upload Start Image", file_types=["image"])
716
+ image_end_upload_button = gr.UploadButton(label="Upload End Image", file_types=["image"])
717
+ # select_area_button = gr.Button(value="Select Area with SAM")
718
+ add_drag_button = gr.Button(value="Add New Drag Trajectory")
719
+ reset_button = gr.Button(value="Reset")
720
+ run_button = gr.Button(value="Run")
721
+ delete_last_drag_button = gr.Button(value="Delete last drag")
722
+ delete_last_step_button = gr.Button(value="Delete last step")
723
+
724
+ with gr.Column(scale=7):
725
+ with gr.Row():
726
+ with gr.Column(scale=6):
727
+ input_image = gr.Image(
728
+ label="start frame",
729
+ interactive=True,
730
+ height=320,
731
+ width=512,
732
+ )
733
+
734
+ with gr.Column(scale=6):
735
+ input_image_end = gr.Image(
736
+ label="end frame",
737
+ interactive=True,
738
+ height=320,
739
+ width=512,
740
+ )
741
+
742
+ with gr.Row():
743
+ with gr.Column(scale=1):
744
+
745
+ controlnet_cond_scale = gr.Slider(
746
+ label='Control Scale',
747
+ minimum=0.0,
748
+ maximum=10,
749
+ step=0.1,
750
+ value=1.0,
751
+ )
752
+
753
+ motion_bucket_id = gr.Slider(
754
+ label='Motion Bucket',
755
+ minimum=1,
756
+ maximum=180,
757
+ step=1,
758
+ value=100,
759
+ )
760
+
761
+ with gr.Column(scale=5):
762
+ output_video = gr.Image(
763
+ label="Output Video",
764
+ height=320,
765
+ width=1152,
766
+ )
767
+
768
+
769
+ with gr.Row():
770
+ gr.Markdown("""
771
+ ## Citation
772
+ ```bibtex
773
+ @article{wang2024framer,
774
+ title={Framer: Interactive Frame Interpolation},
775
+ author={Wang, Wen and Wang, Qiuyu and Zheng, Kecheng and Ouyang, Hao and Chen, Zhekai and Gong, Biao and Chen, Hao and Shen, Yujun and Shen, Chunhua},
776
+ journal={arXiv preprint https://arxiv.org/abs/2410.18978},
777
+ year={2024}
778
+ }
779
+ ```
780
+ """)
781
+
782
+ image_upload_button.upload(preprocess_image, image_upload_button, [input_image, first_frame_path, tracking_points])
783
+
784
+ image_end_upload_button.upload(preprocess_image_end, image_end_upload_button, [input_image_end, last_frame_path, tracking_points])
785
+
786
+ add_drag_button.click(add_drag, tracking_points, [tracking_points, ])
787
+
788
+ delete_last_drag_button.click(delete_last_drag, [tracking_points, first_frame_path, last_frame_path], [tracking_points, input_image, input_image_end])
789
+
790
+ delete_last_step_button.click(delete_last_step, [tracking_points, first_frame_path, last_frame_path], [tracking_points, input_image, input_image_end])
791
+
792
+ reset_button.click(reset_states, [first_frame_path, last_frame_path, tracking_points], [first_frame_path, last_frame_path, tracking_points])
793
+
794
+ input_image.select(add_tracking_points, [tracking_points, first_frame_path, last_frame_path], [tracking_points, input_image, input_image_end])
795
+
796
+ input_image_end.select(add_tracking_points, [tracking_points, first_frame_path, last_frame_path], [tracking_points, input_image, input_image_end])
797
+
798
+ run_button.click(Framer.run, [first_frame_path, last_frame_path, tracking_points, controlnet_cond_scale, motion_bucket_id], output_video)
799
+
800
+ demo.launch()