|
import gradio as gr |
|
import argparse |
|
import os |
|
|
|
from musepose_inference import MusePoseInference |
|
from pose_align import PoseAlignmentInference |
|
from downloading_weights import download_models |
|
|
|
class App: |
|
def __init__(self, args): |
|
self.args = args |
|
self.pose_alignment_infer = PoseAlignmentInference( |
|
model_dir=args.model_dir, |
|
output_dir=args.output_dir |
|
) |
|
self.musepose_infer = MusePoseInference( |
|
model_dir=args.model_dir, |
|
output_dir=args.output_dir |
|
) |
|
if not args.disable_model_download_at_start: |
|
download_models(model_dir=args.model_dir) |
|
|
|
@staticmethod |
|
def on_step1_complete(input_img: str, input_pose_vid: str): |
|
return [gr.Image(label="Input Image", value=input_img, type="filepath", scale=5), |
|
gr.Video(label="Input Aligned Pose Video", value=input_pose_vid, scale=5)] |
|
|
|
def musepose_demo(self): |
|
with gr.Blocks() as demo: |
|
md_header = self.header() |
|
with gr.Tabs(): |
|
with gr.TabItem('1: Pose Alignment'): |
|
with gr.Row(): |
|
with gr.Column(scale=3): |
|
img_pose_input = gr.Image(label="Input Image", type="filepath", scale=5) |
|
vid_dance_input = gr.Video(label="Input Dance Video", max_length=10, scale=5) |
|
with gr.Column(scale=3): |
|
vid_dance_output = gr.Video(label="Aligned Pose Output", scale=5, interactive=False) |
|
vid_dance_output_demo = gr.Video(label="Aligned Pose Output Demo", scale=5) |
|
|
|
with gr.Column(scale=3): |
|
|
|
|
|
with gr.TabItem('2: MusePose Inference'): |
|
with gr.Row(): |
|
with gr.Column(scale=3): |
|
img_musepose_input = gr.Image(label="Input Image", type="filepath", scale=5) |
|
vid_pose_input = gr.Video(label="Input Aligned Pose Video", max_length=10, scale=5) |
|
with gr.Column(scale=3): |
|
vid_output = gr.Video(label="MusePose Output", scale=5) |
|
vid_output_demo = gr.Video(label="MusePose Output Demo", scale=5) |
|
|
|
return demo |
|
|
|
@staticmethod |
|
def header(): |
|
header = gr.HTML( |
|
""" |
|
<h1 style="font-size: 23px;"> |
|
<a href="https://github.com/jhj0517/MusePose-WebUI" target="_blank">MusePose WebUI</a> |
|
</h1> |
|
<p style="font-size: 18px;"> |
|
<strong>Note</strong>: This space now allows video input up to <strong>10 seconds</strong> because ZeroGPU limits the function runtime to 2 minutes. <br> |
|
If you want longer video inputs, you have to run it locally. Click the link above and follow the README to try it locally.<br><br> |
|
When you have completed the <strong>1: Pose Alignment</strong> process, go to <strong>2: MusePose Inference</strong> and click the "GENERATE" button. |
|
</p> |
|
""" |
|
) |
|
return header |
|
|
|
def launch(self): |
|
demo = self.musepose_demo() |
|
demo.queue().launch( |
|
share=self.args.share |
|
) |
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument('--model_dir', type=str, default=os.path.join("pretrained_weights"), help='Pretrained models directory for MusePose') |
|
parser.add_argument('--output_dir', type=str, default=os.path.join("outputs"), help='Output directory for the result') |
|
parser.add_argument('--disable_model_download_at_start', type=bool, default=False, nargs='?', const=True, help='Disable model download at start or not') |
|
parser.add_argument('--share', type=bool, default=False, nargs='?', const=True, help='Gradio makes sharable link if it is true') |
|
args = parser.parse_args() |
|
|
|
app = App(args=args) |
|
app.launch() |
|
|