Update app.py
Browse files
app.py
CHANGED
@@ -29,79 +29,44 @@ class App:
|
|
29 |
|
30 |
def musepose_demo(self):
|
31 |
with gr.Blocks() as demo:
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
|
|
|
|
|
|
|
|
50 |
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
btn_align_pose.click(fn=self.pose_alignment_infer.align_pose,
|
64 |
-
inputs=[vid_dance_input, img_pose_input, nb_detect_resolution, nb_image_resolution,
|
65 |
-
nb_align_frame, nb_max_frame],
|
66 |
-
outputs=[vid_dance_output, vid_dance_output_demo])
|
67 |
-
|
68 |
-
with gr.TabItem('2: MusePose Inference'):
|
69 |
-
with gr.Row():
|
70 |
-
with gr.Column(scale=3):
|
71 |
-
img_musepose_input = gr.Image(label="Input Image", type="filepath", scale=5)
|
72 |
-
vid_pose_input = gr.Video(label="Input Aligned Pose Video", max_length=10, scale=5)
|
73 |
-
with gr.Column(scale=3):
|
74 |
-
vid_output = gr.Video(label="MusePose Output", scale=5)
|
75 |
-
vid_output_demo = gr.Video(label="MusePose Output Demo", scale=5)
|
76 |
-
|
77 |
-
with gr.Column(scale=3):
|
78 |
-
with gr.Column():
|
79 |
-
weight_dtype = gr.Dropdown(label="Compute Type", choices=["fp16", "fp32"], value="fp16")
|
80 |
-
nb_width = gr.Number(label="Width", value=512, precision=0)
|
81 |
-
nb_height = gr.Number(label="Height", value=512, precision=0)
|
82 |
-
nb_video_frame_length = gr.Number(label="Video Frame Length", value=300, precision=0)
|
83 |
-
nb_video_slice_frame_length = gr.Number(label="Video Slice Frame Number", value=48, precision=0)
|
84 |
-
nb_video_slice_overlap_frame_number = gr.Number(label="Video Slice Overlap Frame Number", value=4, precision=0)
|
85 |
-
nb_cfg = gr.Number(label="CFG (Classifier Free Guidance)", value=3.5, precision=0)
|
86 |
-
nb_seed = gr.Number(label="Seed", value=99, precision=0)
|
87 |
-
nb_steps = gr.Number(label="DDIM Sampling Steps", value=20, precision=0)
|
88 |
-
nb_fps = gr.Number(label="FPS (Frames Per Second)", value=-1, precision=0, info="Set to '-1' to use same FPS with pose's")
|
89 |
-
nb_skip = gr.Number(label="SKIP (Frame Sample Rate = SKIP+1)", value=1, precision=0)
|
90 |
-
with gr.Row():
|
91 |
-
btn_generate = gr.Button("GENERATE", variant="primary")
|
92 |
-
|
93 |
-
btn_generate.click(fn=self.musepose_infer.infer_musepose,
|
94 |
-
inputs=[img_musepose_input, vid_pose_input, weight_dtype, nb_width, nb_height,
|
95 |
-
nb_video_frame_length, nb_video_slice_frame_length,
|
96 |
-
nb_video_slice_overlap_frame_number, nb_cfg, nb_seed, nb_steps, nb_fps,
|
97 |
-
nb_skip],
|
98 |
-
outputs=[vid_output, vid_output_demo])
|
99 |
-
vid_dance_output.change(fn=self.on_step1_complete,
|
100 |
-
inputs=[img_pose_input, vid_dance_output],
|
101 |
-
outputs=[img_musepose_input, vid_pose_input])
|
102 |
|
103 |
return demo
|
104 |
|
|
|
105 |
@staticmethod
|
106 |
def header():
|
107 |
header = gr.HTML(
|
|
|
29 |
|
30 |
def musepose_demo(self):
|
31 |
with gr.Blocks() as demo:
|
32 |
+
self.header()
|
33 |
+
|
34 |
+
# 첫 번째 단계: Pose Alignment
|
35 |
+
img_pose_input = gr.Image(label="Input Image", type="filepath", scale=5)
|
36 |
+
vid_dance_input = gr.Video(label="Input Dance Video", max_length=10, scale=5)
|
37 |
+
vid_dance_output = gr.Video(label="Aligned Pose Output", scale=5, interactive=False)
|
38 |
+
vid_dance_output_demo = gr.Video(label="Aligned Pose Output Demo", scale=5)
|
39 |
+
|
40 |
+
# 두 번째 단계: MusePose Inference
|
41 |
+
img_musepose_input = gr.Image(label="Input Image", type="filepath", scale=5)
|
42 |
+
vid_pose_input = gr.Video(label="Input Aligned Pose Video", max_length=10, scale=5)
|
43 |
+
vid_output = gr.Video(label="MusePose Output", scale=5)
|
44 |
+
vid_output_demo = gr.Video(label="MusePose Output Demo", scale=5)
|
45 |
+
|
46 |
+
btn_align_pose = gr.Button("ALIGN POSE", variant="primary")
|
47 |
+
btn_generate = gr.Button("GENERATE", variant="primary")
|
48 |
+
|
49 |
+
btn_align_pose.click(
|
50 |
+
fn=self.pose_alignment_infer.align_pose,
|
51 |
+
inputs=[vid_dance_input, img_pose_input],
|
52 |
+
outputs=[vid_dance_output, vid_dance_output_demo]
|
53 |
+
)
|
54 |
|
55 |
+
btn_generate.click(
|
56 |
+
fn=self.musepose_infer.infer_musepose,
|
57 |
+
inputs=[img_musepose_input, vid_pose_input],
|
58 |
+
outputs=[vid_output, vid_output_demo]
|
59 |
+
)
|
60 |
+
|
61 |
+
vid_dance_output.change(
|
62 |
+
fn=self.on_step1_complete,
|
63 |
+
inputs=[img_pose_input, vid_dance_output],
|
64 |
+
outputs=[img_musepose_input, vid_pose_input]
|
65 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
return demo
|
68 |
|
69 |
+
|
70 |
@staticmethod
|
71 |
def header():
|
72 |
header = gr.HTML(
|