atatakun commited on
Commit
29401bf
·
1 Parent(s): 1018574

Delete app..py

Browse files
Files changed (1) hide show
  1. app..py +0 -432
app..py DELETED
@@ -1,432 +0,0 @@
1
- import gradio as gr
2
- import cv2
3
- import numpy as np
4
-
5
- from annotator.util import resize_image, HWC3
6
-
7
- DESCRIPTION = '# ControlNet v1.1 Annotators (that runs on cpu only)'
8
- DESCRIPTION += '\n<p>This app generates Control Image for Mochi Diffusion&apos;s ControlNet.</p>'
9
- DESCRIPTION += '\n<p>HEIC image is not converted. Please use PNG or JPG image.</p>'
10
-
11
-
12
- model_canny = None
13
-
14
-
15
- def canny(img, res, l, h):
16
- img = resize_image(HWC3(img), res)
17
- global model_canny
18
- if model_canny is None:
19
- from annotator.canny import CannyDetector
20
- model_canny = CannyDetector()
21
- result = model_canny(img, l, h)
22
- return [result]
23
-
24
-
25
- model_hed = None
26
-
27
-
28
- def hed(img, res):
29
- img = resize_image(HWC3(img), res)
30
- global model_hed
31
- if model_hed is None:
32
- from annotator.hed import HEDdetector
33
- model_hed = HEDdetector()
34
- result = model_hed(img)
35
- return [result]
36
-
37
-
38
- model_pidi = None
39
-
40
-
41
- def pidi(img, res):
42
- img = resize_image(HWC3(img), res)
43
- global model_pidi
44
- if model_pidi is None:
45
- from annotator.pidinet import PidiNetDetector
46
- model_pidi = PidiNetDetector()
47
- result = model_pidi(img)
48
- return [result]
49
-
50
-
51
- model_mlsd = None
52
-
53
-
54
- def mlsd(img, res, thr_v, thr_d):
55
- img = resize_image(HWC3(img), res)
56
- global model_mlsd
57
- if model_mlsd is None:
58
- from annotator.mlsd import MLSDdetector
59
- model_mlsd = MLSDdetector()
60
- result = model_mlsd(img, thr_v, thr_d)
61
- return [result]
62
-
63
-
64
- model_midas = None
65
-
66
-
67
- def midas(img, res):
68
- img = resize_image(HWC3(img), res)
69
- global model_midas
70
- if model_midas is None:
71
- from annotator.midas import MidasDetector
72
- model_midas = MidasDetector()
73
- result = model_midas(img)
74
- return [result]
75
-
76
-
77
- model_zoe = None
78
-
79
-
80
- def zoe(img, res):
81
- img = resize_image(HWC3(img), res)
82
- global model_zoe
83
- if model_zoe is None:
84
- from annotator.zoe import ZoeDetector
85
- model_zoe = ZoeDetector()
86
- result = model_zoe(img)
87
- return [result]
88
-
89
-
90
- model_normalbae = None
91
-
92
-
93
- def normalbae(img, res):
94
- img = resize_image(HWC3(img), res)
95
- global model_normalbae
96
- if model_normalbae is None:
97
- from annotator.normalbae import NormalBaeDetector
98
- model_normalbae = NormalBaeDetector()
99
- result = model_normalbae(img)
100
- return [result]
101
-
102
-
103
- model_openpose = None
104
-
105
-
106
- def openpose(img, res, hand_and_face):
107
- img = resize_image(HWC3(img), res)
108
- global model_openpose
109
- if model_openpose is None:
110
- from annotator.openpose import OpenposeDetector
111
- model_openpose = OpenposeDetector()
112
- result = model_openpose(img, hand_and_face)
113
- return [result]
114
-
115
-
116
- model_uniformer = None
117
-
118
-
119
- #def uniformer(img, res):
120
- # img = resize_image(HWC3(img), res)
121
- # global model_uniformer
122
- # if model_uniformer is None:
123
- # from annotator.uniformer import UniformerDetector
124
- # model_uniformer = UniformerDetector()
125
- # result = model_uniformer(img)
126
- # return [result]
127
-
128
-
129
- model_lineart_anime = None
130
-
131
-
132
- def lineart_anime(img, res, invert=True):
133
- img = resize_image(HWC3(img), res)
134
- global model_lineart_anime
135
- if model_lineart_anime is None:
136
- from annotator.lineart_anime import LineartAnimeDetector
137
- model_lineart_anime = LineartAnimeDetector()
138
- # result = model_lineart_anime(img)
139
- if (invert):
140
- result = cv2.bitwise_not(model_lineart_anime(img))
141
- else:
142
- result = model_lineart_anime(img)
143
- return [result]
144
-
145
-
146
- model_lineart = None
147
-
148
-
149
- def lineart(img, res, coarse=False, invert=True):
150
- img = resize_image(HWC3(img), res)
151
- global model_lineart
152
- if model_lineart is None:
153
- from annotator.lineart import LineartDetector
154
- model_lineart = LineartDetector()
155
- # result = model_lineart(img, coarse)
156
- if (invert):
157
- result = cv2.bitwise_not(model_lineart(img, coarse))
158
- else:
159
- result = model_lineart(img, coarse)
160
- return [result]
161
-
162
-
163
- model_oneformer_coco = None
164
-
165
-
166
- def oneformer_coco(img, res):
167
- img = resize_image(HWC3(img), res)
168
- global model_oneformer_coco
169
- if model_oneformer_coco is None:
170
- from annotator.oneformer import OneformerCOCODetector
171
- model_oneformer_coco = OneformerCOCODetector()
172
- result = model_oneformer_coco(img)
173
- return [result]
174
-
175
-
176
- model_oneformer_ade20k = None
177
-
178
-
179
- def oneformer_ade20k(img, res):
180
- img = resize_image(HWC3(img), res)
181
- global model_oneformer_ade20k
182
- if model_oneformer_ade20k is None:
183
- from annotator.oneformer import OneformerADE20kDetector
184
- model_oneformer_ade20k = OneformerADE20kDetector()
185
- result = model_oneformer_ade20k(img)
186
- return [result]
187
-
188
-
189
- model_content_shuffler = None
190
-
191
-
192
- def content_shuffler(img, res):
193
- img = resize_image(HWC3(img), res)
194
- global model_content_shuffler
195
- if model_content_shuffler is None:
196
- from annotator.shuffle import ContentShuffleDetector
197
- model_content_shuffler = ContentShuffleDetector()
198
- result = model_content_shuffler(img)
199
- return [result]
200
-
201
-
202
- model_color_shuffler = None
203
-
204
-
205
- def color_shuffler(img, res):
206
- img = resize_image(HWC3(img), res)
207
- global model_color_shuffler
208
- if model_color_shuffler is None:
209
- from annotator.shuffle import ColorShuffleDetector
210
- model_color_shuffler = ColorShuffleDetector()
211
- result = model_color_shuffler(img)
212
- return [result]
213
-
214
- model_inpaint = None
215
-
216
-
217
- def inpaint(image, invert):
218
- # image = resize_image(img, res)
219
- color = HWC3(image["image"])
220
- if(invert):
221
- alpha = image["mask"][:, :, 0:1]
222
- else:
223
- alpha = 255 - image["mask"][:, :, 0:1]
224
- result = np.concatenate([color, alpha], axis=2)
225
- return [result]
226
-
227
- block = gr.Blocks().queue()
228
- with block:
229
- gr.Markdown(DESCRIPTION)
230
- with gr.Row():
231
- gr.Markdown("## Canny Edge")
232
- with gr.Row():
233
- with gr.Column():
234
- input_image = gr.Image(source='upload', type="numpy")
235
- low_threshold = gr.Slider(label="low_threshold", minimum=1, maximum=255, value=100, step=1)
236
- high_threshold = gr.Slider(label="high_threshold", minimum=1, maximum=255, value=200, step=1)
237
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
238
- run_button = gr.Button(label="Run")
239
- with gr.Column():
240
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
241
- run_button.click(fn=canny, inputs=[input_image, resolution, low_threshold, high_threshold], outputs=[gallery])
242
-
243
- gr.Markdown("<hr>")
244
- with gr.Row():
245
- gr.Markdown("## Inpaint \n<p>Mochi Diffusionの次バージョンで使えるようになるかもしれないので試作中")
246
- with gr.Row():
247
- with gr.Column():
248
- input_image = gr.Image(source='upload', type="numpy", tool="sketch", height=512)
249
- # resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
250
- invert = gr.Checkbox(label='Invert Mask', value=False)
251
- run_button = gr.Button(label="Run")
252
- with gr.Column():
253
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
254
- # run_button.click(fn=inpaint, inputs=[input_image, resolution], outputs=[gallery])
255
- run_button.click(fn=inpaint, inputs=[input_image, invert], outputs=[gallery])
256
-
257
- gr.Markdown("<hr>")
258
- with gr.Row():
259
- gr.Markdown("## HED Edge&nbsp;&quot;SoftEdge&quot;")
260
- with gr.Row():
261
- with gr.Column():
262
- input_image = gr.Image(source='upload', type="numpy")
263
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
264
- run_button = gr.Button(label="Run")
265
- with gr.Column():
266
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
267
- run_button.click(fn=hed, inputs=[input_image, resolution], outputs=[gallery])
268
-
269
- gr.Markdown("<hr>")
270
- with gr.Row():
271
- gr.Markdown("## Pidi Edge&nbsp;&quot;SoftEdge&quot;")
272
- with gr.Row():
273
- with gr.Column():
274
- input_image = gr.Image(source='upload', type="numpy")
275
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
276
- run_button = gr.Button(label="Run")
277
- with gr.Column():
278
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
279
- run_button.click(fn=pidi, inputs=[input_image, resolution], outputs=[gallery])
280
-
281
- gr.Markdown("<hr>")
282
- with gr.Row():
283
- gr.Markdown("## MLSD Edge")
284
- with gr.Row():
285
- with gr.Column():
286
- input_image = gr.Image(source='upload', type="numpy")
287
- value_threshold = gr.Slider(label="value_threshold", minimum=0.01, maximum=2.0, value=0.1, step=0.01)
288
- distance_threshold = gr.Slider(label="distance_threshold", minimum=0.01, maximum=20.0, value=0.1, step=0.01)
289
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=384, step=64)
290
- run_button = gr.Button(label="Run")
291
- with gr.Column():
292
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
293
- run_button.click(fn=mlsd, inputs=[input_image, resolution, value_threshold, distance_threshold], outputs=[gallery])
294
-
295
- gr.Markdown("<hr>")
296
- with gr.Row():
297
- gr.Markdown("## MIDAS Depth")
298
- with gr.Row():
299
- with gr.Column():
300
- input_image = gr.Image(source='upload', type="numpy")
301
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=384, step=64)
302
- run_button = gr.Button(label="Run")
303
- with gr.Column():
304
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
305
- run_button.click(fn=midas, inputs=[input_image, resolution], outputs=[gallery])
306
-
307
-
308
- gr.Markdown("<hr>")
309
- with gr.Row():
310
- gr.Markdown("## Zoe Depth")
311
- with gr.Row():
312
- with gr.Column():
313
- input_image = gr.Image(source='upload', type="numpy")
314
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
315
- run_button = gr.Button(label="Run")
316
- with gr.Column():
317
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
318
- run_button.click(fn=zoe, inputs=[input_image, resolution], outputs=[gallery])
319
-
320
- gr.Markdown("<hr>")
321
- with gr.Row():
322
- gr.Markdown("## Normal Bae")
323
- with gr.Row():
324
- with gr.Column():
325
- input_image = gr.Image(source='upload', type="numpy")
326
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
327
- run_button = gr.Button(label="Run")
328
- with gr.Column():
329
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
330
- run_button.click(fn=normalbae, inputs=[input_image, resolution], outputs=[gallery])
331
-
332
- gr.Markdown("<hr>")
333
- with gr.Row():
334
- gr.Markdown("## Openpose")
335
- with gr.Row():
336
- with gr.Column():
337
- input_image = gr.Image(source='upload', type="numpy")
338
- hand_and_face = gr.Checkbox(label='Hand and Face', value=False)
339
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
340
- run_button = gr.Button(label="Run")
341
- with gr.Column():
342
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
343
- run_button.click(fn=openpose, inputs=[input_image, resolution, hand_and_face], outputs=[gallery])
344
-
345
- gr.Markdown("<hr>")
346
- with gr.Row():
347
- gr.Markdown("## Lineart Anime \n<p>Check Invert to use with Mochi Diffusion.")
348
- with gr.Row():
349
- with gr.Column():
350
- input_image = gr.Image(source='upload', type="numpy")
351
- invert = gr.Checkbox(label='Invert', value=True)
352
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
353
- run_button = gr.Button(label="Run")
354
- with gr.Column():
355
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
356
- run_button.click(fn=lineart_anime, inputs=[input_image, resolution, invert], outputs=[gallery])
357
-
358
- gr.Markdown("<hr>")
359
- with gr.Row():
360
- gr.Markdown("## Lineart \n<p>Check Invert to use with Mochi Diffusion. Inverted image can also be created here for use with ControlNet Scribble.")
361
- with gr.Row():
362
- with gr.Column():
363
- input_image = gr.Image(source='upload', type="numpy")
364
- coarse = gr.Checkbox(label='Using coarse model', value=False)
365
- invert = gr.Checkbox(label='Invert', value=True)
366
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
367
- run_button = gr.Button(label="Run")
368
- with gr.Column():
369
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
370
- run_button.click(fn=lineart, inputs=[input_image, resolution, coarse, invert], outputs=[gallery])
371
-
372
- # with gr.Row():
373
- # gr.Markdown("## Uniformer Segmentation")
374
- # with gr.Row():
375
- # with gr.Column():
376
- # input_image = gr.Image(source='upload', type="numpy")
377
- # resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
378
- # run_button = gr.Button(label="Run")
379
- # with gr.Column():
380
- # gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
381
- # run_button.click(fn=uniformer, inputs=[input_image, resolution], outputs=[gallery])
382
-
383
- gr.Markdown("<hr>")
384
- with gr.Row():
385
- gr.Markdown("## Oneformer COCO Segmentation")
386
- with gr.Row():
387
- with gr.Column():
388
- input_image = gr.Image(source='upload', type="numpy")
389
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
390
- run_button = gr.Button(label="Run")
391
- with gr.Column():
392
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
393
- run_button.click(fn=oneformer_coco, inputs=[input_image, resolution], outputs=[gallery])
394
-
395
- gr.Markdown("<hr>")
396
- with gr.Row():
397
- gr.Markdown("## Oneformer ADE20K Segmentation")
398
- with gr.Row():
399
- with gr.Column():
400
- input_image = gr.Image(source='upload', type="numpy")
401
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=640, step=64)
402
- run_button = gr.Button(label="Run")
403
- with gr.Column():
404
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
405
- run_button.click(fn=oneformer_ade20k, inputs=[input_image, resolution], outputs=[gallery])
406
-
407
- gr.Markdown("<hr>")
408
- with gr.Row():
409
- gr.Markdown("## Content Shuffle")
410
- with gr.Row():
411
- with gr.Column():
412
- input_image = gr.Image(source='upload', type="numpy")
413
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
414
- run_button = gr.Button(label="Run")
415
- with gr.Column():
416
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
417
- run_button.click(fn=content_shuffler, inputs=[input_image, resolution], outputs=[gallery])
418
-
419
- gr.Markdown("<hr>")
420
- with gr.Row():
421
- gr.Markdown("## Color Shuffle")
422
- with gr.Row():
423
- with gr.Column():
424
- input_image = gr.Image(source='upload', type="numpy")
425
- resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
426
- run_button = gr.Button(label="Run")
427
- with gr.Column():
428
- gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
429
- run_button.click(fn=color_shuffler, inputs=[input_image, resolution], outputs=[gallery])
430
-
431
-
432
- block.launch(server_name='0.0.0.0')