Wan2.1 / simple_app.py
fffiloni's picture
forgot to import os
0019d72 verified
import gradio as gr
import os
import torch
is_shared_ui = True if "fffiloni/Wan2.1" in os.environ['SPACE_ID'] else False
is_gpu_associated = torch.cuda.is_available()
import re
import subprocess
import time
import select
from tqdm import tqdm
from huggingface_hub import snapshot_download
if not is_shared_ui and is_gpu_associated:
#Download model
snapshot_download(
repo_id = "Wan-AI/Wan2.1-T2V-1.3B",
local_dir = "./Wan2.1-T2V-1.3B"
)
def infer(prompt, progress=gr.Progress(track_tqdm=True)):
# Configuration:
total_process_steps = 11 # Total INFO messages expected
irrelevant_steps = 4 # First 4 INFO messages are ignored
relevant_steps = total_process_steps - irrelevant_steps # 7 overall steps
# Create overall progress bar (Level 1)
overall_bar = tqdm(total=relevant_steps, desc="Overall Process", position=1,
ncols=120, dynamic_ncols=False, leave=True)
processed_steps = 0
# Regex for video generation progress (Level 3)
progress_pattern = re.compile(r"(\d+)%\|.*\| (\d+)/(\d+)")
video_progress_bar = None
# Variables for sub-step progress bar (Level 2)
# Now using 1000 ticks to represent 40 seconds (each tick = 40 ms)
sub_bar = None
sub_ticks = 0
sub_tick_total = 1500
video_phase = False
command = [
"python", "-u", "-m", "generate", # using -u for unbuffered output
"--task", "t2v-1.3B",
"--size", "832*480",
"--ckpt_dir", "./Wan2.1-T2V-1.3B",
"--sample_shift", "8",
"--sample_guide_scale", "6",
"--prompt", prompt,
"--save_file", "generated_video.mp4"
]
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
bufsize=1)
while True:
# Poll stdout with a 40ms timeout.
rlist, _, _ = select.select([process.stdout], [], [], 0.04)
if rlist:
line = process.stdout.readline()
if not line:
break
stripped_line = line.strip()
if not stripped_line:
continue
# Check for video generation progress (Level 3)
progress_match = progress_pattern.search(stripped_line)
if progress_match:
# If a sub-step bar is active, finish it before entering video phase.
if sub_bar is not None:
if sub_ticks < sub_tick_total:
sub_bar.update(sub_tick_total - sub_ticks)
sub_bar.close()
overall_bar.update(1)
overall_bar.refresh()
sub_bar = None
sub_ticks = 0
video_phase = True
current = int(progress_match.group(2))
total = int(progress_match.group(3))
if video_progress_bar is None:
video_progress_bar = tqdm(total=total, desc="Video Generation", position=0,
ncols=120, dynamic_ncols=True, leave=True)
video_progress_bar.update(current - video_progress_bar.n)
video_progress_bar.refresh()
if video_progress_bar.n >= video_progress_bar.total:
video_phase = False
overall_bar.update(1)
overall_bar.refresh()
video_progress_bar.close()
video_progress_bar = None
continue
# Process INFO messages (Level 2 sub-step)
if "INFO:" in stripped_line:
parts = stripped_line.split("INFO:", 1)
msg = parts[1].strip() if len(parts) > 1 else ""
print(stripped_line) # Log the message
# For the first 4 INFO messages, simply count them.
if processed_steps < irrelevant_steps:
processed_steps += 1
continue
else:
# A new relevant INFO message has arrived.
# If a sub-bar exists (whether full or not), finish it now.
if sub_bar is not None:
if sub_ticks < sub_tick_total:
sub_bar.update(sub_tick_total - sub_ticks)
sub_bar.close()
overall_bar.update(1)
overall_bar.refresh()
sub_bar = None
sub_ticks = 0
# Start a new sub-step bar for the current INFO message.
sub_bar = tqdm(total=sub_tick_total, desc=msg, position=2,
ncols=120, dynamic_ncols=False, leave=True)
sub_ticks = 0
continue
else:
print(stripped_line)
else:
# No new data within 40ms.
if sub_bar is not None:
if sub_ticks < sub_tick_total:
sub_bar.update(1)
sub_ticks += 1
sub_bar.refresh()
# If full (40 seconds reached), do not advance overall step—just remain waiting.
if process.poll() is not None:
break
# Drain any remaining output.
for line in process.stdout:
print(line.strip())
process.wait()
if video_progress_bar is not None:
video_progress_bar.close()
if sub_bar is not None:
sub_bar.close()
overall_bar.close()
if process.returncode == 0:
print("Command executed successfully.")
return "generated_video.mp4"
else:
print("Error executing command.")
raise Exception("Error executing command")
css = """
div#warning-duplicate {
background-color: #ebf5ff;
padding: 0 16px 16px;
margin: 20px 0;
color: #030303!important;
}
div#warning-duplicate > .gr-prose > h2, div#warning-duplicate > .gr-prose > p {
color: #0f4592!important;
}
div#warning-duplicate strong {
color: #0f4592;
}
p.actions {
display: flex;
align-items: center;
margin: 20px 0;
}
div#warning-duplicate .actions a {
display: inline-block;
margin-right: 10px;
}
div#warning-setgpu {
background-color: #fff4eb;
padding: 0 16px 16px;
margin: 20px 0;
color: #030303!important;
}
div#warning-setgpu > .gr-prose > h2, div#warning-setgpu > .gr-prose > p {
color: #92220f!important;
}
div#warning-setgpu a, div#warning-setgpu b {
color: #91230f;
}
div#warning-setgpu p.actions > a {
display: inline-block;
background: #1f1f23;
border-radius: 40px;
padding: 6px 24px;
color: antiquewhite;
text-decoration: none;
font-weight: 600;
font-size: 1.2em;
}
div#warning-ready {
background-color: #ecfdf5;
padding: 0 16px 16px;
margin: 20px 0;
color: #030303!important;
}
div#warning-ready > .gr-prose > h2, div#warning-ready > .gr-prose > p {
color: #057857!important;
}
.custom-color {
color: #030303 !important;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column():
gr.Markdown("# Wan 2.1 1.3B")
gr.Markdown("Enjoy this simple working UI, duplicate the space to skip the queue :)")
gr.HTML("""
<div style="display:flex;column-gap:4px;">
<a href="https://huggingface.co/spaces/fffiloni/Wan2.1?duplicate=true">
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-sm.svg" alt="Duplicate this Space">
</a>
<a href="https://huggingface.co/fffiloni">
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/follow-me-on-HF-sm-dark.svg" alt="Follow me on HF">
</a>
</div>
""")
if is_shared_ui:
top_description = gr.HTML(f'''
<div class="gr-prose">
<h2 class="custom-color"><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
Attention: this Space need to be duplicated to work</h2>
<p class="main-message custom-color">
To make it work, <strong>duplicate the Space</strong> and run it on your own profile using a <strong>private</strong> GPU (L40s recommended).<br />
A L40s costs <strong>US$1.80/h</strong>.
</p>
<p class="actions custom-color">
<a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true">
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-lg-dark.svg" alt="Duplicate this Space" />
</a>
to start experimenting with this demo
</p>
</div>
''', elem_id="warning-duplicate")
else:
if(is_gpu_associated):
top_description = gr.HTML(f'''
<div class="gr-prose">
<h2 class="custom-color"><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
You have successfully associated a GPU to this Space 🎉</h2>
<p class="custom-color">
You will be billed by the minute from when you activated the GPU until when it is turned off.
</p>
</div>
''', elem_id="warning-ready")
else:
top_description = gr.HTML(f'''
<div class="gr-prose">
<h2 class="custom-color"><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
You have successfully duplicated the MimicMotion Space 🎉</h2>
<p class="custom-color">There's only one step left before you can properly play with this demo: <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings" style="text-decoration: underline" target="_blank">attribute a GPU</b> to it (via the Settings tab)</a> and run the app below.
You will be billed by the minute from when you activate the GPU until when it is turned off.</p>
<p class="actions custom-color">
<a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings">🔥 &nbsp; Set recommended GPU</a>
</p>
</div>
''', elem_id="warning-setgpu")
prompt = gr.Textbox(label="Prompt")
submit_btn = gr.Button("Submit", interactive=False if is_shared_ui else True)
video_res = gr.Video(label="Generated Video")
submit_btn.click(
fn = infer,
inputs = [prompt],
outputs = [video_res]
)
demo.queue().launch(show_error=True, show_api=False, ssr_mode=False)