atatakun commited on
Commit
09b8c62
·
1 Parent(s): af18be4

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -269
app.py CHANGED
@@ -2,6 +2,8 @@ import gradio as gr
2
 
3
  from annotator.util import resize_image, HWC3
4
 
 
 
5
 
6
  model_canny = None
7
 
@@ -15,85 +17,6 @@ def canny(img, res, l, h):
15
  result = model_canny(img, l, h)
16
  return [result]
17
 
18
-
19
- model_hed = None
20
-
21
-
22
- def hed(img, res):
23
- img = resize_image(HWC3(img), res)
24
- global model_hed
25
- if model_hed is None:
26
- from annotator.hed import HEDdetector
27
- model_hed = HEDdetector()
28
- result = model_hed(img)
29
- return [result]
30
-
31
-
32
- model_pidi = None
33
-
34
-
35
- def pidi(img, res):
36
- img = resize_image(HWC3(img), res)
37
- global model_pidi
38
- if model_pidi is None:
39
- from annotator.pidinet import PidiNetDetector
40
- model_pidi = PidiNetDetector()
41
- result = model_pidi(img)
42
- return [result]
43
-
44
-
45
- model_mlsd = None
46
-
47
-
48
- def mlsd(img, res, thr_v, thr_d):
49
- img = resize_image(HWC3(img), res)
50
- global model_mlsd
51
- if model_mlsd is None:
52
- from annotator.mlsd import MLSDdetector
53
- model_mlsd = MLSDdetector()
54
- result = model_mlsd(img, thr_v, thr_d)
55
- return [result]
56
-
57
-
58
- model_midas = None
59
-
60
-
61
- def midas(img, res):
62
- img = resize_image(HWC3(img), res)
63
- global model_midas
64
- if model_midas is None:
65
- from annotator.midas import MidasDetector
66
- model_midas = MidasDetector()
67
- result = model_midas(img)
68
- return [result]
69
-
70
-
71
- model_zoe = None
72
-
73
-
74
- def zoe(img, res):
75
- img = resize_image(HWC3(img), res)
76
- global model_zoe
77
- if model_zoe is None:
78
- from annotator.zoe import ZoeDetector
79
- model_zoe = ZoeDetector()
80
- result = model_zoe(img)
81
- return [result]
82
-
83
-
84
- model_normalbae = None
85
-
86
-
87
- def normalbae(img, res):
88
- img = resize_image(HWC3(img), res)
89
- global model_normalbae
90
- if model_normalbae is None:
91
- from annotator.normalbae import NormalBaeDetector
92
- model_normalbae = NormalBaeDetector()
93
- result = model_normalbae(img)
94
- return [result]
95
-
96
-
97
  model_openpose = None
98
 
99
 
@@ -107,71 +30,6 @@ def openpose(img, res, hand_and_face):
107
  return [result]
108
 
109
 
110
- model_uniformer = None
111
-
112
-
113
- def uniformer(img, res):
114
- img = resize_image(HWC3(img), res)
115
- global model_uniformer
116
- if model_uniformer is None:
117
- from annotator.uniformer import UniformerDetector
118
- model_uniformer = UniformerDetector()
119
- result = model_uniformer(img)
120
- return [result]
121
-
122
-
123
- model_lineart_anime = None
124
-
125
-
126
- def lineart_anime(img, res):
127
- img = resize_image(HWC3(img), res)
128
- global model_lineart_anime
129
- if model_lineart_anime is None:
130
- from annotator.lineart_anime import LineartAnimeDetector
131
- model_lineart_anime = LineartAnimeDetector()
132
- result = model_lineart_anime(img)
133
- return [result]
134
-
135
-
136
- model_lineart = None
137
-
138
-
139
- def lineart(img, res, coarse=False):
140
- img = resize_image(HWC3(img), res)
141
- global model_lineart
142
- if model_lineart is None:
143
- from annotator.lineart import LineartDetector
144
- model_lineart = LineartDetector()
145
- result = model_lineart(img, coarse)
146
- return [result]
147
-
148
-
149
- model_oneformer_coco = None
150
-
151
-
152
- def oneformer_coco(img, res):
153
- img = resize_image(HWC3(img), res)
154
- global model_oneformer_coco
155
- if model_oneformer_coco is None:
156
- from annotator.oneformer import OneformerCOCODetector
157
- model_oneformer_coco = OneformerCOCODetector()
158
- result = model_oneformer_coco(img)
159
- return [result]
160
-
161
-
162
- model_oneformer_ade20k = None
163
-
164
-
165
- def oneformer_ade20k(img, res):
166
- img = resize_image(HWC3(img), res)
167
- global model_oneformer_ade20k
168
- if model_oneformer_ade20k is None:
169
- from annotator.oneformer import OneformerADE20kDetector
170
- model_oneformer_ade20k = OneformerADE20kDetector()
171
- result = model_oneformer_ade20k(img)
172
- return [result]
173
-
174
-
175
  model_content_shuffler = None
176
 
177
 
@@ -213,75 +71,6 @@ with block:
213
  gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
214
  run_button.click(fn=canny, inputs=[input_image, resolution, low_threshold, high_threshold], outputs=[gallery])
215
 
216
- with gr.Row():
217
- gr.Markdown("## HED Edge")
218
- with gr.Row():
219
- with gr.Column():
220
- input_image = gr.Image(source='upload', type="numpy")
221
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
222
- run_button = gr.Button(label="Run")
223
- with gr.Column():
224
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
225
- run_button.click(fn=hed, inputs=[input_image, resolution], outputs=[gallery])
226
-
227
- with gr.Row():
228
- gr.Markdown("## Pidi Edge")
229
- with gr.Row():
230
- with gr.Column():
231
- input_image = gr.Image(source='upload', type="numpy")
232
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
233
- run_button = gr.Button(label="Run")
234
- with gr.Column():
235
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
236
- run_button.click(fn=pidi, inputs=[input_image, resolution], outputs=[gallery])
237
-
238
- with gr.Row():
239
- gr.Markdown("## MLSD Edge")
240
- with gr.Row():
241
- with gr.Column():
242
- input_image = gr.Image(source='upload', type="numpy")
243
- value_threshold = gr.Slider(label="value_threshold", minimum=0.01, maximum=2.0, value=0.1, step=0.01)
244
- distance_threshold = gr.Slider(label="distance_threshold", minimum=0.01, maximum=20.0, value=0.1, step=0.01)
245
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=384, step=64)
246
- run_button = gr.Button(label="Run")
247
- with gr.Column():
248
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
249
- run_button.click(fn=mlsd, inputs=[input_image, resolution, value_threshold, distance_threshold], outputs=[gallery])
250
-
251
- with gr.Row():
252
- gr.Markdown("## MIDAS Depth")
253
- with gr.Row():
254
- with gr.Column():
255
- input_image = gr.Image(source='upload', type="numpy")
256
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=384, step=64)
257
- run_button = gr.Button(label="Run")
258
- with gr.Column():
259
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
260
- run_button.click(fn=midas, inputs=[input_image, resolution], outputs=[gallery])
261
-
262
-
263
- with gr.Row():
264
- gr.Markdown("## Zoe Depth")
265
- with gr.Row():
266
- with gr.Column():
267
- input_image = gr.Image(source='upload', type="numpy")
268
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
269
- run_button = gr.Button(label="Run")
270
- with gr.Column():
271
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
272
- run_button.click(fn=zoe, inputs=[input_image, resolution], outputs=[gallery])
273
-
274
- with gr.Row():
275
- gr.Markdown("## Normal Bae")
276
- with gr.Row():
277
- with gr.Column():
278
- input_image = gr.Image(source='upload', type="numpy")
279
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
280
- run_button = gr.Button(label="Run")
281
- with gr.Column():
282
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
283
- run_button.click(fn=normalbae, inputs=[input_image, resolution], outputs=[gallery])
284
-
285
  with gr.Row():
286
  gr.Markdown("## Openpose")
287
  with gr.Row():
@@ -294,62 +83,6 @@ with block:
294
  gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
295
  run_button.click(fn=openpose, inputs=[input_image, resolution, hand_and_face], outputs=[gallery])
296
 
297
- with gr.Row():
298
- gr.Markdown("## Lineart Anime")
299
- with gr.Row():
300
- with gr.Column():
301
- input_image = gr.Image(source='upload', type="numpy")
302
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
303
- run_button = gr.Button(label="Run")
304
- with gr.Column():
305
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
306
- run_button.click(fn=lineart_anime, inputs=[input_image, resolution], outputs=[gallery])
307
-
308
- with gr.Row():
309
- gr.Markdown("## Lineart")
310
- with gr.Row():
311
- with gr.Column():
312
- input_image = gr.Image(source='upload', type="numpy")
313
- coarse = gr.Checkbox(label='Using coarse model', value=False)
314
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
315
- run_button = gr.Button(label="Run")
316
- with gr.Column():
317
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
318
- run_button.click(fn=lineart, inputs=[input_image, resolution, coarse], outputs=[gallery])
319
-
320
- with gr.Row():
321
- gr.Markdown("## Uniformer Segmentation")
322
- with gr.Row():
323
- with gr.Column():
324
- input_image = gr.Image(source='upload', type="numpy")
325
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
326
- run_button = gr.Button(label="Run")
327
- with gr.Column():
328
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
329
- run_button.click(fn=uniformer, inputs=[input_image, resolution], outputs=[gallery])
330
-
331
- with gr.Row():
332
- gr.Markdown("## Oneformer COCO Segmentation")
333
- with gr.Row():
334
- with gr.Column():
335
- input_image = gr.Image(source='upload', type="numpy")
336
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
337
- run_button = gr.Button(label="Run")
338
- with gr.Column():
339
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
340
- run_button.click(fn=oneformer_coco, inputs=[input_image, resolution], outputs=[gallery])
341
-
342
- with gr.Row():
343
- gr.Markdown("## Oneformer ADE20K Segmentation")
344
- with gr.Row():
345
- with gr.Column():
346
- input_image = gr.Image(source='upload', type="numpy")
347
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=640, step=64)
348
- run_button = gr.Button(label="Run")
349
- with gr.Column():
350
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
351
- run_button.click(fn=oneformer_ade20k, inputs=[input_image, resolution], outputs=[gallery])
352
-
353
  with gr.Row():
354
  gr.Markdown("## Content Shuffle")
355
  with gr.Row():
 
2
 
3
  from annotator.util import resize_image, HWC3
4
 
5
+ DESCRIPTION = '# ControlNet v1.1 (cpu_only)'
6
+
7
 
8
  model_canny = None
9
 
 
17
  result = model_canny(img, l, h)
18
  return [result]
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  model_openpose = None
21
 
22
 
 
30
  return [result]
31
 
32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  model_content_shuffler = None
34
 
35
 
 
71
  gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
72
  run_button.click(fn=canny, inputs=[input_image, resolution, low_threshold, high_threshold], outputs=[gallery])
73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  with gr.Row():
75
  gr.Markdown("## Openpose")
76
  with gr.Row():
 
83
  gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
84
  run_button.click(fn=openpose, inputs=[input_image, resolution, hand_and_face], outputs=[gallery])
85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  with gr.Row():
87
  gr.Markdown("## Content Shuffle")
88
  with gr.Row():