Spaces:
Runtime error
Runtime error
File size: 4,278 Bytes
d61d34c d947e9b d61d34c d947e9b d61d34c 2de857a d61d34c 2de857a d61d34c 2de857a d61d34c 2de857a d61d34c 2de857a d61d34c d947e9b d61d34c 2de857a d61d34c d947e9b d61d34c 2de857a d61d34c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
import gradio as gr
# from src.audio2vid import audio2video
# from src.vid2vid import video2video
from src.create_modules import Processer
title = r"""
<h1>AniPortrait</h1>
"""
description = r"""
<b>Official 🤗 Gradio demo</b> for <a href='https://github.com/Zejun-Yang/AniPortrait' target='_blank'><b>AniPortrait: Audio-Driven Synthesis of Photorealistic Portrait Animations</b></a>.<br>
"""
main_processer = Processer()
with gr.Blocks() as demo:
gr.Markdown(title)
gr.Markdown(description)
with gr.Tab("Audio2video"):
with gr.Row():
with gr.Column():
with gr.Row():
a2v_input_audio = gr.Audio(sources=["upload", "microphone"], type="filepath", editable=True, label="Input audio", interactive=True)
a2v_ref_img = gr.Image(label="Upload reference image", sources="upload")
a2v_headpose_video = gr.Video(label="Option: upload head pose reference video", sources="upload")
with gr.Row():
a2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=512, label="Video size (-W & -H)")
a2v_step_slider = gr.Slider(minimum=5, maximum=50, step=1, value=20, label="Steps (--steps)")
with gr.Row():
a2v_length = gr.Slider(minimum=0, maximum=300, step=1, value=150, label="Length (-L) (Set 0 to automatically calculate video length.)")
a2v_seed = gr.Number(value=42, label="Seed (--seed)")
a2v_botton = gr.Button("Generate", variant="primary")
a2v_output_video = gr.PlayableVideo(label="Result", interactive=False)
gr.Examples(
examples=[
["configs/inference/audio/lyl.wav", "configs/inference/ref_images/Aragaki.png", None],
["configs/inference/audio/lyl.wav", "configs/inference/ref_images/solo.png", None],
["configs/inference/audio/lyl.wav", "configs/inference/ref_images/lyl.png", "configs/inference/head_pose_temp/pose_ref_video.mp4"],
],
inputs=[a2v_input_audio, a2v_ref_img, a2v_headpose_video],
)
with gr.Tab("Video2video"):
with gr.Row():
with gr.Column():
with gr.Row():
v2v_ref_img = gr.Image(label="Upload reference image", sources="upload")
v2v_source_video = gr.Video(label="Upload source video", sources="upload")
with gr.Row():
v2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=512, label="Video size (-W & -H)")
v2v_step_slider = gr.Slider(minimum=5, maximum=50, step=1, value=20, label="Steps (--steps)")
with gr.Row():
v2v_length = gr.Slider(minimum=0, maximum=300, step=1, value=150, label="Length (-L) (Set 0 to automatically calculate video length.)")
v2v_seed = gr.Number(value=42, label="Seed (--seed)")
v2v_botton = gr.Button("Generate", variant="primary")
v2v_output_video = gr.PlayableVideo(label="Result", interactive=False)
gr.Examples(
examples=[
["configs/inference/ref_images/Aragaki.png", "configs/inference/video/Aragaki_song.mp4"],
["configs/inference/ref_images/solo.png", "configs/inference/video/Aragaki_song.mp4"],
["configs/inference/ref_images/lyl.png", "configs/inference/head_pose_temp/pose_ref_video.mp4"],
],
inputs=[v2v_ref_img, v2v_source_video, a2v_headpose_video],
)
a2v_botton.click(
fn=main_processer.audio2video,
inputs=[a2v_input_audio, a2v_ref_img, a2v_headpose_video,
a2v_size_slider, a2v_step_slider, a2v_length, a2v_seed],
outputs=[a2v_output_video, a2v_ref_img]
)
v2v_botton.click(
fn=main_processer.video2video,
inputs=[v2v_ref_img, v2v_source_video,
v2v_size_slider, v2v_step_slider, v2v_length, v2v_seed],
outputs=[v2v_output_video, v2v_ref_img]
)
demo.launch()
|