fffiloni commited on
Commit
2b599ab
β€’
1 Parent(s): f474836

add duplication notice banners

Browse files
Files changed (1) hide show
  1. gradio_app.py +129 -4
gradio_app.py CHANGED
@@ -1,16 +1,28 @@
1
  import os
 
2
 
3
  os.environ['HF_HOME'] = os.path.join(os.path.dirname(__file__), 'hf_download')
4
  result_dir = os.path.join('./', 'results')
5
  os.makedirs(result_dir, exist_ok=True)
6
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  import functools
9
- import os
10
  import random
11
  import gradio as gr
12
  import numpy as np
13
- import torch
14
  import wd14tagger
15
  import memory_management
16
  import uuid
@@ -112,12 +124,19 @@ def resize_without_crop(image, target_width, target_height):
112
 
113
  @torch.inference_mode()
114
  def interrogator_process(x):
 
 
 
115
  return wd14tagger.default_interrogator(x)
116
 
117
 
118
  @torch.inference_mode()
119
  def process(input_fg, prompt, input_undo_steps, image_width, image_height, seed, steps, n_prompt, cfg,
120
  progress=gr.Progress()):
 
 
 
 
121
  rng = torch.Generator(device=memory_management.gpu).manual_seed(int(seed))
122
 
123
  memory_management.load_models_to_gpu(vae)
@@ -213,6 +232,9 @@ def process_video_inner(image_1, image_2, prompt, seed=123, steps=25, cfg_scale=
213
 
214
  @torch.inference_mode()
215
  def process_video(keyframes, prompt, steps, cfg, fps, seed, progress=gr.Progress()):
 
 
 
216
  result_frames = []
217
  cropped_images = []
218
 
@@ -236,9 +258,112 @@ def process_video(keyframes, prompt, steps, cfg, fps, seed, progress=gr.Progress
236
  video = [x.cpu().numpy() for x in video]
237
  return output_filename, video
238
 
239
-
240
- block = gr.Blocks().queue()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241
  with block:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
  gr.Markdown('# Paints-Undo')
243
 
244
  with gr.Accordion(label='Step 1: Upload Image and Generate Prompt', open=True):
 
1
  import os
2
+ import torch
3
 
4
  os.environ['HF_HOME'] = os.path.join(os.path.dirname(__file__), 'hf_download')
5
  result_dir = os.path.join('./', 'results')
6
  os.makedirs(result_dir, exist_ok=True)
7
 
8
+ is_shared_ui = True if "fffiloni/Paints-UNDO" in os.environ['SPACE_ID'] else False
9
+
10
+ is_gpu_associated = torch.cuda.is_available()
11
+
12
+ if is_gpu_associated:
13
+ gpu_info = getoutput('nvidia-smi')
14
+ if("A10G" in gpu_info):
15
+ which_gpu = "A10G"
16
+ elif("T4" in gpu_info):
17
+ which_gpu = "T4"
18
+ else:
19
+ which_gpu = "CPU"
20
 
21
  import functools
 
22
  import random
23
  import gradio as gr
24
  import numpy as np
25
+
26
  import wd14tagger
27
  import memory_management
28
  import uuid
 
124
 
125
  @torch.inference_mode()
126
  def interrogator_process(x):
127
+ if is_shared_ui:
128
+ raise gr.Error("This Space only works in duplicated instances")
129
+
130
  return wd14tagger.default_interrogator(x)
131
 
132
 
133
  @torch.inference_mode()
134
  def process(input_fg, prompt, input_undo_steps, image_width, image_height, seed, steps, n_prompt, cfg,
135
  progress=gr.Progress()):
136
+
137
+ if is_shared_ui:
138
+ raise gr.Error("This Space only works in duplicated instances")
139
+
140
  rng = torch.Generator(device=memory_management.gpu).manual_seed(int(seed))
141
 
142
  memory_management.load_models_to_gpu(vae)
 
232
 
233
  @torch.inference_mode()
234
  def process_video(keyframes, prompt, steps, cfg, fps, seed, progress=gr.Progress()):
235
+ if is_shared_ui:
236
+ raise gr.Error("This Space only works in duplicated instances")
237
+
238
  result_frames = []
239
  cropped_images = []
240
 
 
258
  video = [x.cpu().numpy() for x in video]
259
  return output_filename, video
260
 
261
+ css = """
262
+ div#warning-ready {
263
+ background-color: #ecfdf5;
264
+ padding: 0 16px 16px;
265
+ margin: 20px 0;
266
+ color: #030303!important;
267
+ }
268
+ div#warning-ready > .gr-prose > h2, div#warning-ready > .gr-prose > p {
269
+ color: #057857!important;
270
+ }
271
+ div#warning-duplicate {
272
+ background-color: #ebf5ff;
273
+ padding: 0 16px 16px;
274
+ margin: 20px 0;
275
+ color: #030303!important;
276
+ }
277
+ div#warning-duplicate > .gr-prose > h2, div#warning-duplicate > .gr-prose > p {
278
+ color: #0f4592!important;
279
+ }
280
+ div#warning-duplicate strong {
281
+ color: #0f4592;
282
+ }
283
+ p.actions {
284
+ display: flex;
285
+ align-items: center;
286
+ margin: 20px 0;
287
+ }
288
+ div#warning-duplicate .actions a {
289
+ display: inline-block;
290
+ margin-right: 10px;
291
+ }
292
+ div#warning-setgpu {
293
+ background-color: #fff4eb;
294
+ padding: 0 16px 16px;
295
+ margin: 20px 0;
296
+ color: #030303!important;
297
+ }
298
+ div#warning-setgpu > .gr-prose > h2, div#warning-setgpu > .gr-prose > p {
299
+ color: #92220f!important;
300
+ }
301
+ div#warning-setgpu a, div#warning-setgpu b {
302
+ color: #91230f;
303
+ }
304
+ div#warning-setgpu p.actions > a {
305
+ display: inline-block;
306
+ background: #1f1f23;
307
+ border-radius: 40px;
308
+ padding: 6px 24px;
309
+ color: antiquewhite;
310
+ text-decoration: none;
311
+ font-weight: 600;
312
+ font-size: 1.2em;
313
+ }
314
+ div#warning-setsleeptime {
315
+ background-color: #fff4eb;
316
+ padding: 10px 10px;
317
+ margin: 0!important;
318
+ color: #030303!important;
319
+ }
320
+ .custom-color {
321
+ color: #030303 !important;
322
+ }
323
+ """
324
+
325
+ block = gr.Blocks(css=css).queue()
326
  with block:
327
+ if is_shared_ui:
328
+ top_description = gr.HTML(f'''
329
+ <div class="gr-prose">
330
+ <h2 class="custom-color"><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
331
+ Attention: this Space need to be duplicated to work</h2>
332
+ <p class="main-message custom-color">
333
+ To make it work, <strong>duplicate the Space</strong> and run it on your own profile using a <strong>private</strong> GPU (T4-small or A10G-small).<br />
334
+ A T4 costs <strong>US$0.60/h</strong>.
335
+ </p>
336
+ <p class="actions custom-color">
337
+ <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true">
338
+ <img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-lg-dark.svg" alt="Duplicate this Space" />
339
+ </a>
340
+ to start experimenting with this demo
341
+ </p>
342
+ </div>
343
+ ''', elem_id="warning-duplicate")
344
+ else:
345
+ if(is_gpu_associated):
346
+ top_description = gr.HTML(f'''
347
+ <div class="gr-prose">
348
+ <h2 class="custom-color"><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
349
+ You have successfully associated a {which_gpu} GPU to the Paints UNDO Space πŸŽ‰</h2>
350
+ <p class="custom-color">
351
+ You will be billed by the minute from when you activated the GPU until when it is turned off.
352
+ </p>
353
+ </div>
354
+ ''', elem_id="warning-ready")
355
+ else:
356
+ top_description = gr.HTML(f'''
357
+ <div class="gr-prose">
358
+ <h2 class="custom-color"><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
359
+ You have successfully duplicated the Paints UNDO Space πŸŽ‰</h2>
360
+ <p class="custom-color">There's only one step left before you can properly play with this demo: <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings" style="text-decoration: underline" target="_blank">attribute a <b>T4-small or A10G-small GPU</b> to it (via the Settings tab)</a> and run the training below.
361
+ You will be billed by the minute from when you activate the GPU until when it is turned off.</p>
362
+ <p class="actions custom-color">
363
+ <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings">πŸ”₯ &nbsp; Set recommended GPU</a>
364
+ </p>
365
+ </div>
366
+ ''', elem_id="warning-setgpu")
367
  gr.Markdown('# Paints-Undo')
368
 
369
  with gr.Accordion(label='Step 1: Upload Image and Generate Prompt', open=True):