jbilcke-hf HF staff commited on
Commit
8024618
1 Parent(s): 12e4a4d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -98
app.py CHANGED
@@ -3,11 +3,13 @@ import numpy as np
3
  from PIL import Image
4
  import cv2
5
  from moviepy.editor import VideoFileClip
6
- from share_btn import community_icon_html, loading_icon_html, share_js
7
  import torch
8
  from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
9
  from diffusers.utils import export_to_video
10
 
 
 
 
11
  pipe_xl = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_XL", torch_dtype=torch.float16, revision="refs/pr/17")
12
  pipe_xl.vae.enable_slicing()
13
  pipe_xl.scheduler = DPMSolverMultistepScheduler.from_config(pipe_xl.scheduler.config)
@@ -52,119 +54,40 @@ def convert_mp4_to_frames(video_path, duration=3):
52
 
53
  return frames
54
 
55
- def infer(prompt, video_in, denoise_strength):
56
-
57
- negative_prompt = "text, watermark, copyright, blurry, nsfw"
 
 
 
58
 
59
  # we cannot go beyond 3 seconds on the large A10G
60
- video = convert_mp4_to_frames(video_in, duration=3)
61
  video_resized = [Image.fromarray(frame).resize((1024, 576)) for frame in video]
62
  video_frames = pipe_xl(prompt, negative_prompt=negative_prompt, video=video_resized, strength=denoise_strength).frames
63
  video_path = export_to_video(video_frames, output_video_path="xl_result.mp4")
64
 
65
  return "xl_result.mp4", gr.Group.update(visible=True)
66
 
67
- css = """
68
- #col-container {max-width: 510px; margin-left: auto; margin-right: auto;}
69
- a {text-decoration-line: underline; font-weight: 600;}
70
- .animate-spin {
71
- animation: spin 1s linear infinite;
72
- }
73
-
74
- @keyframes spin {
75
- from {
76
- transform: rotate(0deg);
77
- }
78
- to {
79
- transform: rotate(360deg);
80
- }
81
- }
82
-
83
- #share-btn-container {
84
- display: flex;
85
- padding-left: 0.5rem !important;
86
- padding-right: 0.5rem !important;
87
- background-color: #000000;
88
- justify-content: center;
89
- align-items: center;
90
- border-radius: 9999px !important;
91
- max-width: 13rem;
92
- }
93
-
94
- #share-btn-container:hover {
95
- background-color: #060606;
96
- }
97
-
98
- #share-btn {
99
- all: initial;
100
- color: #ffffff;
101
- font-weight: 600;
102
- cursor:pointer;
103
- font-family: 'IBM Plex Sans', sans-serif;
104
- margin-left: 0.5rem !important;
105
- padding-top: 0.5rem !important;
106
- padding-bottom: 0.5rem !important;
107
- right:0;
108
- }
109
-
110
- #share-btn * {
111
- all: unset;
112
- }
113
-
114
- #share-btn-container div:nth-child(-n+2){
115
- width: auto !important;
116
- min-height: 0px !important;
117
- }
118
-
119
- #share-btn-container .wrap {
120
- display: none !important;
121
- }
122
-
123
- #share-btn-container.hidden {
124
- display: none!important;
125
- }
126
- img[src*='#center'] {
127
- display: block;
128
- margin: auto;
129
- }
130
- """
131
-
132
- with gr.Blocks(css=css) as demo:
133
- with gr.Column(elem_id="col-container"):
134
- gr.Markdown(
135
- """
136
- <h1 style="text-align: center;">Zeroscope XL</h1>
137
- <p style="text-align: center;">
138
- This space is specifically designed for upscaling content made from <br />
139
- <a href="https://huggingface.co/spaces/fffiloni/zeroscope">the zeroscope_v2_576w space</a> using vid2vid. <br />
140
- Remember to use the same prompt that was used to generate the original clip.<br />
141
- For demo purpose, video length is limited to 3 seconds.
142
- </p>
143
-
144
- [![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-sm.svg#center)](https://huggingface.co/spaces/fffiloni/zeroscope-XL?duplicate=true)
145
-
146
- """
147
- )
148
 
 
 
 
 
 
149
  video_in = gr.Video(type="numpy", source="upload")
150
- prompt_in = gr.Textbox(label="Prompt", placeholder="This must be the same prompt you used for the original clip :)", elem_id="prompt-in")
151
  denoise_strength = gr.Slider(label="Denoise strength", minimum=0.6, maximum=0.9, step=0.01, value=0.66)
152
- #inference_steps = gr.Slider(label="Inference Steps", minimum=10, maximum=100, step=1, value=40, interactive=False)
 
153
  submit_btn = gr.Button("Submit")
154
  video_result = gr.Video(label="Video Output", elem_id="video-output")
155
 
156
- with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
157
- community_icon = gr.HTML(community_icon_html)
158
- loading_icon = gr.HTML(loading_icon_html)
159
- share_button = gr.Button("Share to community", elem_id="share-btn")
160
-
161
  submit_btn.click(fn=infer,
162
- inputs=[prompt_in, video_in, denoise_strength],
163
- outputs=[video_result, share_group],
164
  api_name="zero_xl"
165
  )
166
 
167
- share_button.click(None, [], [], _js=share_js)
168
-
169
- demo.queue(max_size=12).launch()
170
 
 
3
  from PIL import Image
4
  import cv2
5
  from moviepy.editor import VideoFileClip
 
6
  import torch
7
  from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
8
  from diffusers.utils import export_to_video
9
 
10
+ SECRET_TOKEN = os.getenv('SECRET_TOKEN', 'default_secret')
11
+ DESCRIPTION = 'This space is an API service meant to be used by VideoChain and VideoQuest.\nWant to use this space for yourself? Please use the original code: [https://huggingface.co/spaces/fffiloni/zeroscope-XL](https://huggingface.co/spaces/fffiloni/zeroscope-XL)'
12
+
13
  pipe_xl = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_XL", torch_dtype=torch.float16, revision="refs/pr/17")
14
  pipe_xl.vae.enable_slicing()
15
  pipe_xl.scheduler = DPMSolverMultistepScheduler.from_config(pipe_xl.scheduler.config)
 
54
 
55
  return frames
56
 
57
+ def infer(prompt, video_in, denoise_strength, duration, secret_token: str = '') -> str:
58
+ if secret_token != SECRET_TOKEN:
59
+ raise gr.Error(
60
+ f'Invalid secret token. Please fork the original space if you want to use it for yourself.')
61
+
62
+ negative_prompt = "text, watermark, copyright, blurry, cropped, noisy, pixelated, nsfw"
63
 
64
  # we cannot go beyond 3 seconds on the large A10G
65
+ video = convert_mp4_to_frames(video_in, min(duration, 3))
66
  video_resized = [Image.fromarray(frame).resize((1024, 576)) for frame in video]
67
  video_frames = pipe_xl(prompt, negative_prompt=negative_prompt, video=video_resized, strength=denoise_strength).frames
68
  video_path = export_to_video(video_frames, output_video_path="xl_result.mp4")
69
 
70
  return "xl_result.mp4", gr.Group.update(visible=True)
71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
+ with gr.Blocks() as demo:
74
+ gr.Markdown(DESCRIPTION)
75
+
76
+ with gr.Column():
77
+ secret_token = gr.Text(label='Secret Token', max_lines=1)
78
  video_in = gr.Video(type="numpy", source="upload")
79
+ prompt_in = gr.Textbox(label="Prompt", elem_id="prompt-in")
80
  denoise_strength = gr.Slider(label="Denoise strength", minimum=0.6, maximum=0.9, step=0.01, value=0.66)
81
+ duration = gr.Slider(label="Duration", minimum=0.5, maximum=3, step=0.5, value=3)
82
+ #inference_steps = gr.Slider(label="Inference Steps", minimum=7, maximum=100, step=1, value=40, interactive=False)
83
  submit_btn = gr.Button("Submit")
84
  video_result = gr.Video(label="Video Output", elem_id="video-output")
85
 
 
 
 
 
 
86
  submit_btn.click(fn=infer,
87
+ inputs=[prompt_in, video_in, denoise_strength, secret_token],
88
+ outputs=[video_result],
89
  api_name="zero_xl"
90
  )
91
 
92
+ demo.queue(max_size=6).launch()
 
 
93