waveydaveygravy commited on
Commit
3ebd2c3
·
1 Parent(s): 393bdec

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +114 -0
app.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 ByteDance and/or its affiliates.
2
+ #
3
+ # Copyright (2023) MagicAnimate Authors
4
+ #
5
+ # ByteDance, its affiliates and licensors retain all intellectual
6
+ # property and proprietary rights in and to this material, related
7
+ # documentation and any modifications thereto. Any use, reproduction,
8
+ # disclosure or distribution of this material and related documentation
9
+ # without an express license agreement from ByteDance or
10
+ # its affiliates is strictly prohibited.
11
+ import argparse
12
+ import imageio
13
+ import numpy as np
14
+ import gradio as gr
15
+ import os
16
+ from PIL import Image
17
+ from subprocess import PIPE, run
18
+
19
+ from demo.animate import MagicAnimate
20
+
21
+ from huggingface_hub import snapshot_download
22
+
23
+ #snapshot_download(repo_id="runwayml/stable-diffusion-v1-5", local_dir="./stable-diffusion-v1-5")
24
+ #snapshot_download(repo_id="stabilityai/sd-vae-ft-mse", local_dir="./sd-vae-ft-mse")
25
+ #snapshot_download(repo_id="zcxu-eric/MagicAnimate", local_dir="./MagicAnimate")
26
+
27
+ is_spaces = True if "SPACE_ID" in os.environ else False
28
+ true_for_shared_ui = False #This will be true only if you are in a shared UI
29
+ if(is_spaces):
30
+ true_for_shared_ui = True if "zcxu-eric/magicanimate" in os.environ['SPACE_ID'] else False
31
+
32
+
33
+ animator = MagicAnimate()
34
+
35
+ def animate(reference_image, motion_sequence_state, seed=1, steps=25, guidance_scale=7.5):
36
+ return animator(reference_image, motion_sequence_state, seed, steps, guidance_scale)
37
+
38
+ with gr.Blocks() as demo:
39
+
40
+ gr.HTML(
41
+ """
42
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center;">
43
+ <a href="https://github.com/magic-research/magic-animate" style="margin-right: 20px; text-decoration: none; display: flex; align-items: center;">
44
+ </a>
45
+ <div>
46
+ <h1 >MagicAnimate: Temporally Consistent Human Image Animation using Diffusion Model</h1>
47
+ <h5 style="margin: 0;">If you like our project, please give us a star ✨ on Github for the latest update.</h5>
48
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center;>
49
+ <a href="https://arxiv.org/abs/2311.16498"><img src="https://img.shields.io/badge/Arxiv-2311.16498-red"></a>
50
+ <a href='https://showlab.github.io/magicanimate'><img src='https://img.shields.io/badge/Project_Page-MagicAnimate-green' alt='Project Page'></a>
51
+ <a href='https://github.com/magic-research/magic-animate'><img src='https://img.shields.io/badge/Github-Code-blue'></a>
52
+ </div>
53
+ </div>
54
+ </div>
55
+ """)
56
+ animation = gr.Video(format="mp4", label="Animation Results", autoplay=True)
57
+
58
+ with gr.Row():
59
+ reference_image = gr.Image(label="Reference Image")
60
+ motion_sequence = gr.Video(format="mp4", label="Motion Sequence",max_length=5)
61
+
62
+ with gr.Column():
63
+ random_seed = gr.Textbox(label="Random seed", value=1, info="default: -1")
64
+ sampling_steps = gr.Textbox(label="Sampling steps", value=25, info="default: 25")
65
+ guidance_scale = gr.Textbox(label="Guidance scale", value=7.5, info="default: 7.5")
66
+ submit = gr.Button("Animate")
67
+
68
+ def read_video(video):
69
+ reader = imageio.get_reader(video)
70
+ fps = reader.get_meta_data()['fps']
71
+ return video
72
+
73
+ def read_image(image, size=384):
74
+ return np.array(Image.fromarray(image).resize((size, size)))
75
+
76
+ # when user uploads a new video
77
+ motion_sequence.upload(
78
+ read_video,
79
+ motion_sequence,
80
+ motion_sequence,
81
+ queue=False
82
+ )
83
+ # when `first_frame` is updated
84
+ reference_image.upload(
85
+ read_image,
86
+ reference_image,
87
+ reference_image,
88
+ queue=False
89
+ )
90
+ # when the `submit` button is clicked
91
+ submit.click(
92
+ animate,
93
+ [reference_image, motion_sequence, random_seed, sampling_steps, guidance_scale],
94
+ animation
95
+ )
96
+
97
+ # Examples
98
+ gr.Markdown("## Examples")
99
+ gr.Examples(
100
+ fn=animate,
101
+ examples=[
102
+ ["inputs/applications/source_image/monalisa.png", "inputs/applications/driving/densepose/running.mp4"],
103
+ ["inputs/applications/source_image/demo4.png", "inputs/applications/driving/densepose/demo4.mp4"],
104
+ ["inputs/applications/source_image/dalle2.jpeg", "inputs/applications/driving/densepose/running2.mp4"],
105
+ ["inputs/applications/source_image/dalle8.jpeg", "inputs/applications/driving/densepose/dancing2.mp4"],
106
+ ["inputs/applications/source_image/multi1_source.png", "inputs/applications/driving/densepose/multi_dancing.mp4"],
107
+ ],
108
+ inputs=[reference_image, motion_sequence],
109
+ outputs=animation,
110
+ cache_examples=true_for_shared_ui
111
+ )
112
+
113
+ # demo.queue(max_size=15, api_open=False)
114
+ demo.launch(share=True, show_api=False)