Yash911 commited on
Commit
5a69a35
1 Parent(s): 1e57d2a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -552
app.py CHANGED
@@ -1,561 +1,60 @@
1
  # -*- coding: utf-8 -*-
2
- """fast_stable_diffusion_AUTOMATIC1111.ipynb
3
 
4
  Automatically generated by Colaboratory.
5
 
6
  Original file is located at
7
- https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb
8
-
9
- # **Colab Pro notebook from https://github.com/TheLastBen/fast-stable-diffusion** *Alternatives : [RunPod](https://www.runpod.io/console/gpu-browse?template=runpod-stable-unified) | [Paperspace](https://console.paperspace.com/github/TheLastBen/PPS?machine=Free-GPU)*
10
- ##**[Support](https://ko-fi.com/thelastben)**
11
  """
12
 
13
- #@markdown # Connect Google Drive
14
- from google.colab import drive
15
- from IPython.display import clear_output
16
- import ipywidgets as widgets
17
- import os
18
-
19
- def inf(msg, style, wdth): inf = widgets.Button(description=msg, disabled=True, button_style=style, layout=widgets.Layout(min_width=wdth));display(inf)
20
- Shared_Drive = "" #@param {type:"string"}
21
- #@markdown - Leave empty if you're not using a shared drive
22
-
23
- print("Connecting...")
24
- drive.mount('/content/gdrive')
25
-
26
- if Shared_Drive!="" and os.path.exists("/content/gdrive/Shareddrives"):
27
- mainpth="Shareddrives/"+Shared_Drive
28
- else:
29
- mainpth="MyDrive"
30
-
31
- clear_output()
32
- inf('\u2714 Done','success', '50px')
33
-
34
- #@markdown ---
35
 
36
- # Commented out IPython magic to ensure Python compatibility.
37
- #@markdown # Install/Update AUTOMATIC1111 repo
38
- from IPython.utils import capture
39
- from IPython.display import clear_output
40
- from subprocess import getoutput
41
- import ipywidgets as widgets
42
- import sys
43
- import fileinput
44
- import os
45
- import time
46
- import base64
47
- import gdown
48
- from gdown.download import get_url_from_gdrive_confirmation
49
  import requests
50
- from urllib.request import urlopen, Request
51
- from urllib.parse import urlparse, parse_qs, unquote
52
- from tqdm import tqdm
53
- import six
54
-
55
-
56
- blasphemy=base64.b64decode(("d2VidWk=").encode('ascii')).decode('ascii')
57
-
58
- if not os.path.exists("/content/gdrive"):
59
- print('Gdrive not connected, using colab storage ...')
60
- time.sleep(4)
61
- mainpth="MyDrive"
62
- !mkdir -p /content/gdrive/$mainpth
63
- Shared_Drive=""
64
-
65
- if Shared_Drive!="" and not os.path.exists("/content/gdrive/Shareddrives"):
66
- print('Shared drive not detected, using default MyDrive')
67
- mainpth="MyDrive"
68
-
69
- with capture.capture_output() as cap:
70
- def inf(msg, style, wdth): inf = widgets.Button(description=msg, disabled=True, button_style=style, layout=widgets.Layout(min_width=wdth));display(inf)
71
- fgitclone = "git clone --depth 1"
72
- # %mkdir -p /content/gdrive/$mainpth/sd
73
- # %cd /content/gdrive/$mainpth/sd
74
- !git clone -q --branch master https://github.com/AUTOMATIC1111/stable-diffusion-$blasphemy
75
- !mkdir -p /content/gdrive/$mainpth/sd/stable-diffusion-$blasphemy/cache/
76
- os.environ['TRANSFORMERS_CACHE']=f"/content/gdrive/{mainpth}/sd/stable-diffusion-"+blasphemy+"/cache"
77
- os.environ['TORCH_HOME'] = f"/content/gdrive/{mainpth}/sd/stable-diffusion-"+blasphemy+"/cache"
78
-
79
- with capture.capture_output() as cap:
80
- # %cd /content/gdrive/$mainpth/sd/stable-diffusion-$blasphemy/
81
- !git reset --hard
82
- !git checkout master
83
- time.sleep(1)
84
- !rm webui.sh
85
- !git pull
86
- clear_output()
87
- inf('\u2714 Done','success', '50px')
88
-
89
- #@markdown ---
90
-
91
- # Commented out IPython magic to ensure Python compatibility.
92
- #@markdown # Requirements
93
-
94
- print('Installing requirements...')
95
-
96
- with capture.capture_output() as cap:
97
- # %cd /content/
98
- !wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/A1111.txt
99
- !dpkg -i *.deb
100
- if not os.path.exists('/content/gdrive/'+mainpth+'/sd/stablediffusiond'): #restore later
101
- !tar -C /content/gdrive/$mainpth --zstd -xf sd_mrep.tar.zst
102
- !tar -C / --zstd -xf gcolabdeps.tar.zst
103
- !rm *.deb | rm *.zst | rm *.txt
104
- if not os.path.exists('gdrive/'+mainpth+'/sd/libtcmalloc/libtcmalloc_minimal.so.4'):
105
- # %env CXXFLAGS=-std=c++14
106
- !wget -q https://github.com/gperftools/gperftools/releases/download/gperftools-2.5/gperftools-2.5.tar.gz && tar zxf gperftools-2.5.tar.gz && mv gperftools-2.5 gperftools
107
- !wget -q https://github.com/TheLastBen/fast-stable-diffusion/raw/main/AUTOMATIC1111_files/Patch
108
- # %cd /content/gperftools
109
- !patch -p1 < /content/Patch
110
- !./configure --enable-minimal --enable-libunwind --enable-frame-pointers --enable-dynamic-sized-delete-support --enable-sized-delete --enable-emergency-malloc; make -j4
111
- !mkdir -p /content/gdrive/$mainpth/sd/libtcmalloc && cp .libs/libtcmalloc*.so* /content/gdrive/$mainpth/sd/libtcmalloc
112
- # %env LD_PRELOAD=/content/gdrive/$mainpth/sd/libtcmalloc/libtcmalloc_minimal.so.4
113
- # %cd /content
114
- !rm *.tar.gz Patch && rm -r /content/gperftools
115
- else:
116
- # %env LD_PRELOAD=/content/gdrive/$mainpth/sd/libtcmalloc/libtcmalloc_minimal.so.4
117
- os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
118
- os.environ['PYTHONWARNINGS'] = 'ignore'
119
- !sed -i 's@text = _formatwarnmsg(msg)@text =\"\"@g' /usr/lib/python3.10/warnings.py
120
- !pip install open-clip-torch==2.20.0 -qq --no-deps
121
- !pip install fastapi==0.94.0 -qq
122
- clear_output()
123
- inf('\u2714 Done','success', '50px')
124
-
125
- #@markdown ---
126
-
127
- #@markdown # Model Download/Load
128
-
129
- Use_Temp_Storage = False #@param {type:"boolean"}
130
- #@markdown - If not, make sure you have enough space on your gdrive
131
-
132
- #@markdown ---
133
-
134
- Model_Version = "SDXL" #@param ["SDXL", "1.5", "v1.5 Inpainting", "V2.1-768px"]
135
-
136
- #@markdown Or
137
- PATH_to_MODEL = "" #@param {type:"string"}
138
- #@markdown - Insert the full path of your custom model or to a folder containing multiple models
139
-
140
- #@markdown Or
141
- MODEL_LINK = "" #@param {type:"string"}
142
-
143
-
144
- def getsrc(url):
145
- parsed_url = urlparse(url)
146
- if parsed_url.netloc == 'civitai.com':
147
- src='civitai'
148
- elif parsed_url.netloc == 'drive.google.com':
149
- src='gdrive'
150
- elif parsed_url.netloc == 'huggingface.co':
151
- src='huggingface'
152
- else:
153
- src='others'
154
- return src
155
-
156
- src=getsrc(MODEL_LINK)
157
-
158
- def get_name(url, gdrive):
159
- if not gdrive:
160
- response = requests.get(url, allow_redirects=False)
161
- if "Location" in response.headers:
162
- redirected_url = response.headers["Location"]
163
- quer = parse_qs(urlparse(redirected_url).query)
164
- if "response-content-disposition" in quer:
165
- disp_val = quer["response-content-disposition"][0].split(";")
166
- for vals in disp_val:
167
- if vals.strip().startswith("filename="):
168
- filenm=unquote(vals.split("=", 1)[1].strip())
169
- return filenm.replace("\"","")
170
- else:
171
- headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}
172
- lnk="https://drive.google.com/uc?id={id}&export=download".format(id=url[url.find("/d/")+3:url.find("/view")])
173
- res = requests.session().get(lnk, headers=headers, stream=True, verify=True)
174
- res = requests.session().get(get_url_from_gdrive_confirmation(res.text), headers=headers, stream=True, verify=True)
175
- content_disposition = six.moves.urllib_parse.unquote(res.headers["Content-Disposition"])
176
- filenm = re.search(r"filename\*=UTF-8''(.*)", content_disposition).groups()[0].replace(os.path.sep, "_")
177
- return filenm
178
-
179
-
180
- def dwn(url, dst, msg):
181
- file_size = None
182
- req = Request(url, headers={"User-Agent": "torch.hub"})
183
- u = urlopen(req)
184
- meta = u.info()
185
- if hasattr(meta, 'getheaders'):
186
- content_length = meta.getheaders("Content-Length")
187
- else:
188
- content_length = meta.get_all("Content-Length")
189
- if content_length is not None and len(content_length) > 0:
190
- file_size = int(content_length[0])
191
-
192
- with tqdm(total=file_size, disable=False, mininterval=0.5,
193
- bar_format=msg+' |{bar:20}| {percentage:3.0f}%') as pbar:
194
- with open(dst, "wb") as f:
195
- while True:
196
- buffer = u.read(8192)
197
- if len(buffer) == 0:
198
- break
199
- f.write(buffer)
200
- pbar.update(len(buffer))
201
- f.close()
202
-
203
-
204
- def sdmdls(ver, Use_Temp_Storage):
205
-
206
- if ver=='1.5':
207
- if Use_Temp_Storage:
208
- os.makedirs('/content/temp_models', exist_ok=True)
209
- model='/content/temp_models/v1-5-pruned-emaonly.safetensors'
210
- else:
211
- model='/content/gdrive/'+mainpth+'/sd/stable-diffusion-'+blasphemy+'/models/Stable-diffusion/v1-5-pruned-emaonly.safetensors'
212
- link='https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors'
213
- elif ver=='V2.1-768px':
214
- if Use_Temp_Storage:
215
- os.makedirs('/content/temp_models', exist_ok=True)
216
- model='/content/temp_models/v2-1_768-ema-pruned.safetensors'
217
- else:
218
- model='/content/gdrive/'+mainpth+'/sd/stable-diffusion-'+blasphemy+'/models/Stable-diffusion/v2-1_768-ema-pruned.safetensors'
219
- link='https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.safetensors'
220
- elif ver=='v1.5 Inpainting':
221
- if Use_Temp_Storage:
222
- os.makedirs('/content/temp_models', exist_ok=True)
223
- model='/content/temp_models/sd-v1-5-inpainting.ckpt'
224
- else:
225
- model='/content/gdrive/'+mainpth+'/sd/stable-diffusion-'+blasphemy+'/models/Stable-diffusion/sd-v1-5-inpainting.ckpt'
226
- link='https://huggingface.co/runwayml/stable-diffusion-inpainting/resolve/main/sd-v1-5-inpainting.ckpt'
227
- elif ver=='SDXL':
228
- if Use_Temp_Storage:
229
- os.makedirs('/content/temp_models', exist_ok=True)
230
- model='/content/temp_models/sd_xl_base_1.0.safetensors'
231
- else:
232
- model='/content/gdrive/'+mainpth+'/sd/stable-diffusion-'+blasphemy+'/models/Stable-diffusion/sd_xl_base_1.0.safetensors'
233
- link='https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors'
234
-
235
- if not os.path.exists(model):
236
- !gdown --fuzzy -O $model $link
237
- if os.path.exists(model):
238
- clear_output()
239
- inf('\u2714 Done','success', '50px')
240
- else:
241
- inf('\u2718 Something went wrong, try again','danger', "250px")
242
- else:
243
- clear_output()
244
- inf('\u2714 Model already exists','primary', '300px')
245
-
246
- return model
247
-
248
-
249
- if (PATH_to_MODEL !=''):
250
- if os.path.exists(str(PATH_to_MODEL)):
251
- inf('\u2714 Using the trained model.','success', '200px')
252
-
253
- else:
254
- while not os.path.exists(str(PATH_to_MODEL)):
255
- inf('\u2718 Wrong path, use the colab file explorer to copy the path : ','danger', "400px")
256
- PATH_to_MODEL=input()
257
- if os.path.exists(str(PATH_to_MODEL)):
258
- inf('\u2714 Using the custom model.','success', '200px')
259
-
260
- model=PATH_to_MODEL
261
-
262
- elif MODEL_LINK != "":
263
-
264
- if src=='civitai':
265
- modelname=get_name(MODEL_LINK, False)
266
- if Use_Temp_Storage:
267
- os.makedirs('/content/temp_models', exist_ok=True)
268
- model=f'/content/temp_models/{modelname}'
269
- else:
270
- model=f'/content/gdrive/{mainpth}/sd/stable-diffusion-{blasphemy}/models/Stable-diffusion/{modelname}'
271
- if not os.path.exists(model):
272
- dwn(MODEL_LINK, model, 'Downloading the custom model')
273
- clear_output()
274
- else:
275
- inf('\u2714 Model already exists','primary', '300px')
276
- elif src=='gdrive':
277
- modelname=get_name(MODEL_LINK, True)
278
- if Use_Temp_Storage:
279
- os.makedirs('/content/temp_models', exist_ok=True)
280
- model=f'/content/temp_models/{modelname}'
281
- else:
282
- model=f'/content/gdrive/{mainpth}/sd/stable-diffusion-{blasphemy}/models/Stable-diffusion/{modelname}'
283
- if not os.path.exists(model):
284
- gdown.download(url=MODEL_LINK, output=model, quiet=False, fuzzy=True)
285
- clear_output()
286
- else:
287
- inf('\u2714 Model already exists','primary', '300px')
288
- else:
289
- modelname=os.path.basename(MODEL_LINK)
290
- if Use_Temp_Storage:
291
- os.makedirs('/content/temp_models', exist_ok=True)
292
- model=f'/content/temp_models/{modelname}'
293
- else:
294
- model=f'/content/gdrive/{mainpth}/sd/stable-diffusion-{blasphemy}/models/Stable-diffusion/{modelname}'
295
- if not os.path.exists(model):
296
- gdown.download(url=MODEL_LINK, output=model, quiet=False, fuzzy=True)
297
- clear_output()
298
- else:
299
- inf('\u2714 Model already exists','primary', '700px')
300
-
301
- if os.path.exists(model) and os.path.getsize(model) > 1810671599:
302
- inf('\u2714 Model downloaded, using the custom model.','success', '300px')
303
- else:
304
- !rm model
305
- inf('\u2718 Wrong link, check that the link is valid','danger', "300px")
306
-
307
- else:
308
- model=sdmdls(Model_Version, Use_Temp_Storage)
309
-
310
- #@markdown ---
311
-
312
- #@markdown # Download LoRA
313
-
314
- LoRA_LINK = "" #@param {type:"string"}
315
-
316
- os.makedirs('/content/gdrive/'+mainpth+'/sd/stable-diffusion-'+blasphemy+'/models/Lora', exist_ok=True)
317
-
318
- src=getsrc(LoRA_LINK)
319
-
320
- if src=='civitai':
321
- modelname=get_name(LoRA_LINK, False)
322
- loramodel=f'/content/gdrive/{mainpth}/sd/stable-diffusion-{blasphemy}/models/Lora/{modelname}'
323
- if not os.path.exists(loramodel):
324
- dwn(LoRA_LINK, loramodel, 'Downloading the LoRA model')
325
- clear_output()
326
- else:
327
- inf('\u2714 Model already exists','primary', '300px')
328
- elif src=='gdrive':
329
- modelname=get_name(LoRA_LINK, True)
330
- loramodel=f'/content/gdrive/{mainpth}/sd/stable-diffusion-{blasphemy}/models/Lora/{modelname}'
331
- if not os.path.exists(loramodel):
332
- gdown.download(url=LoRA_LINK, output=loramodel, quiet=False, fuzzy=True)
333
- clear_output()
334
- else:
335
- inf('\u2714 Model already exists','primary', '300px')
336
- else:
337
- modelname=os.path.basename(LoRA_LINK)
338
- loramodel=f'/content/gdrive/{mainpth}/sd/stable-diffusion-{blasphemy}/models/Lora/{modelname}'
339
- if not os.path.exists(loramodel):
340
- gdown.download(url=LoRA_LINK, output=loramodel, quiet=False, fuzzy=True)
341
- clear_output()
342
- else:
343
- inf('\u2714 Model already exists','primary', '700px')
344
-
345
- if os.path.exists(loramodel) :
346
- inf('\u2714 LoRA downloaded','success', '300px')
347
- else:
348
- inf('\u2718 Wrong link, check that the link is valid','danger', "300px")
349
-
350
- #@markdown ---
351
-
352
- # Commented out IPython magic to ensure Python compatibility.
353
- #@markdown # ControlNet
354
- from torch.hub import download_url_to_file
355
- from urllib.parse import urlparse
356
- import re
357
- from subprocess import run
358
-
359
- Model = "None" #@param [ "None", "All (21GB)", "Canny", "Depth", "Lineart", "MLSD", "Normal", "OpenPose", "Scribble", "Seg", "ip2p", "Shuffle", "Inpaint", "Softedge", "Lineart_Anime", "Tile", "T2iadapter_Models"]
360
-
361
- v2_Model = "None" #@param [ "None", "All", "Canny", "Depth", "HED", "OpenPose", "Scribble"]
362
-
363
- #@markdown - Download/update ControlNet extension and its models
364
-
365
- def download(url, model_dir):
366
-
367
- filename = os.path.basename(urlparse(url).path)
368
- pth = os.path.abspath(os.path.join(model_dir, filename))
369
- if not os.path.exists(pth):
370
- print('Downloading: '+os.path.basename(url))
371
- download_url_to_file(url, pth, hash_prefix=None, progress=True)
372
- else:
373
- print(f"The model {filename} already exists")
374
-
375
- Canny='https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_canny.pth'
376
- Depth='https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11f1p_sd15_depth.pth'
377
- Lineart='https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_lineart.pth'
378
- MLSD='https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_mlsd.pth'
379
- Normal='https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_normalbae.pth'
380
- OpenPose='https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_openpose.pth'
381
- Scribble='https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_scribble.pth'
382
- Seg='https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_seg.pth'
383
- ip2p='https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11e_sd15_ip2p.pth'
384
- Shuffle='https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11e_sd15_shuffle.pth'
385
- Inpaint='https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_inpaint.pth'
386
- Softedge='https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_softedge.pth'
387
- Lineart_Anime='https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15s2_lineart_anime.pth'
388
- Tile='https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11f1e_sd15_tile.pth'
389
-
390
- with capture.capture_output() as cap:
391
- # %cd /content/gdrive/$mainpth/sd/stable-diffusion-$blasphemy/extensions
392
- if not os.path.exists("sd-webui-controlnet"):
393
- !git clone https://github.com/Mikubill/sd-$blasphemy-controlnet.git
394
- # %cd /content
395
- else:
396
- # %cd sd-webui-controlnet
397
- !git reset --hard
398
- !git pull
399
- # %cd /content
400
-
401
- mdldir='/content/gdrive/'+mainpth+'/sd/stable-diffusion-'+blasphemy+'/extensions/sd-webui-controlnet/models'
402
- for filename in os.listdir(mdldir):
403
- if "_sd14v1" in filename:
404
- renamed = re.sub("_sd14v1", "-fp16", filename)
405
- os.rename(os.path.join(mdldir, filename), os.path.join(mdldir, renamed))
406
-
407
- !wget -q -O CN_models.txt https://github.com/TheLastBen/fast-stable-diffusion/raw/main/AUTOMATIC1111_files/CN_models.txt
408
- !wget -q -O CN_models_v2.txt https://github.com/TheLastBen/fast-stable-diffusion/raw/main/AUTOMATIC1111_files/CN_models_v2.txt
409
-
410
- with open("CN_models.txt", 'r') as f:
411
- mdllnk = f.read().splitlines()
412
- with open("CN_models_v2.txt", 'r') as d:
413
- mdllnk_v2 = d.read().splitlines()
414
-
415
- !rm CN_models.txt CN_models_v2.txt
416
-
417
- with capture.capture_output() as cap:
418
- cfgnames=[os.path.basename(url).split('.')[0]+'.yaml' for url in mdllnk_v2]
419
- # %cd /content/gdrive/$mainpth/sd/stable-diffusion-$blasphemy/extensions/sd-webui-controlnet/models
420
- for name in cfgnames:
421
- run(['cp', 'cldm_v21.yaml', name])
422
- # %cd /content
423
-
424
- if Model == "All (21GB)":
425
- for lnk in mdllnk:
426
- download(lnk, mdldir)
427
- clear_output()
428
-
429
- elif Model == "T2iadapter_Models":
430
- mdllnk=list(filter(lambda x: 't2i' in x, mdllnk))
431
- for lnk in mdllnk:
432
- download(lnk, mdldir)
433
- clear_output()
434
-
435
- elif Model == "None":
436
- pass
437
- clear_output()
438
-
439
- else:
440
- download(globals()[Model], mdldir)
441
- clear_output()
442
-
443
- Canny='https://huggingface.co/thibaud/controlnet-sd21/resolve/main/control_v11p_sd21_canny.safetensors'
444
- Depth='https://huggingface.co/thibaud/controlnet-sd21/resolve/main/control_v11p_sd21_depth.safetensors'
445
- HED='https://huggingface.co/thibaud/controlnet-sd21/resolve/main/control_v11p_sd21_hed.safetensors'
446
- OpenPose='https://huggingface.co/thibaud/controlnet-sd21/resolve/main/control_v11p_sd21_openposev2.safetensors'
447
- Scribble='https://huggingface.co/thibaud/controlnet-sd21/resolve/main/control_v11p_sd21_scribble.safetensors'
448
-
449
- if v2_Model == "All":
450
- for lnk_v2 in mdllnk_v2:
451
- download(lnk_v2, mdldir)
452
- clear_output()
453
- inf('\u2714 Done','success', '50px')
454
-
455
- elif v2_Model == "None":
456
- pass
457
- clear_output()
458
- inf('\u2714 Done','success', '50px')
459
-
460
- else:
461
- download(globals()[v2_Model], mdldir)
462
- clear_output()
463
- inf('\u2714 Done','success', '50px')
464
-
465
- #@markdown ---
466
-
467
- # Commented out IPython magic to ensure Python compatibility.
468
- #@markdown # Start Stable-Diffusion
469
- from IPython.utils import capture
470
- import time
471
- import sys
472
- import fileinput
473
- from pyngrok import ngrok, conf
474
- import re
475
-
476
-
477
- Use_Cloudflare_Tunnel = False #@param {type:"boolean"}
478
- #@markdown - Offers better gradio responsivity
479
-
480
- Ngrok_token = "" #@param {type:"string"}
481
-
482
- #@markdown - Input your ngrok token if you want to use ngrok server
483
-
484
- User = "" #@param {type:"string"}
485
- Password= "" #@param {type:"string"}
486
- #@markdown - Add credentials to your Gradio interface (optional)
487
-
488
- auth=f"--gradio-auth {User}:{Password}"
489
- if User =="" or Password=="":
490
- auth=""
491
-
492
-
493
- with capture.capture_output() as cap:
494
- # %cd /content/gdrive/$mainpth/sd/stable-diffusion-$blasphemy/modules/
495
- !wget -q -O extras.py https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-$blasphemy/master/modules/extras.py
496
- !wget -q -O sd_models.py https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-$blasphemy/master/modules/sd_models.py
497
- !wget -q -O /usr/local/lib/python3.10/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py
498
- # %cd /content/gdrive/$mainpth/sd/stable-diffusion-$blasphemy/
499
-
500
- !sed -i '[email protected]_ui().*@ui.create_ui();shared.demo.queue(concurrency_count=999999,status_update_rate=0.1)@' /content/gdrive/$mainpth/sd/stable-diffusion-$blasphemy/webui.py
501
-
502
- !sed -i 's@possible_sd_paths =.*@possible_sd_paths = [\"/content/gdrive/{mainpth}/sd/stablediffusion\"]@' /content/gdrive/$mainpth/sd/stable-diffusion-$blasphemy/modules/paths.py
503
- !sed -i 's@\.\.\/@src/@g' /content/gdrive/$mainpth/sd/stable-diffusion-$blasphemy/modules/paths.py
504
- !sed -i 's@src/generative-models@generative-models@g' /content/gdrive/$mainpth/sd/stable-diffusion-$blasphemy/modules/paths.py
505
-
506
- !sed -i 's@print(\"No module.*@@' /content/gdrive/$mainpth/sd/stablediffusion/ldm/modules/diffusionmodules/model.py
507
- !sed -i 's@\["sd_model_checkpoint"\]@\["sd_model_checkpoint", "sd_vae", "CLIP_stop_at_last_layers", "inpainting_mask_weight", "initial_noise_multiplier"\]@g' /content/gdrive/$mainpth/sd/stable-diffusion-$blasphemy/modules/shared.py
508
-
509
- share=''
510
- if Ngrok_token!="":
511
- ngrok.kill()
512
- srv=ngrok.connect(7860, pyngrok_config=conf.PyngrokConfig(auth_token=Ngrok_token) , bind_tls=True).public_url
513
-
514
- for line in fileinput.input('/usr/local/lib/python3.10/dist-packages/gradio/blocks.py', inplace=True):
515
- if line.strip().startswith('self.server_name ='):
516
- line = f' self.server_name = "{srv[8:]}"\n'
517
- if line.strip().startswith('self.protocol = "https"'):
518
- line = ' self.protocol = "https"\n'
519
- if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
520
- line = ''
521
- if line.strip().startswith('else "http"'):
522
- line = ''
523
- sys.stdout.write(line)
524
-
525
- elif Use_Cloudflare_Tunnel:
526
- with capture.capture_output() as cap:
527
- !pkill cloudflared
528
- time.sleep(4)
529
- !nohup cloudflared tunnel --url http://localhost:7860 > /content/srv.txt 2>&1 &
530
- time.sleep(4)
531
- with open('/content/srv.txt', "r") as file: text = file.read()
532
- srv= re.findall(r"https?://(?:\S+?\.)?trycloudflare\.com\S*", text)[0]
533
-
534
- for line in fileinput.input('/usr/local/lib/python3.10/dist-packages/gradio/blocks.py', inplace=True):
535
- if line.strip().startswith('self.server_name ='):
536
- line = f' self.server_name = "{srv[8:]}"\n'
537
- if line.strip().startswith('self.protocol = "https"'):
538
- line = ' self.protocol = "https"\n'
539
- if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
540
- line = ''
541
- if line.strip().startswith('else "http"'):
542
- line = ''
543
- sys.stdout.write(line)
544
-
545
- !rm /content/srv.txt
546
-
547
- else:
548
- share='--share'
549
-
550
- ckptdir=''
551
- if os.path.exists('/content/temp_models'):
552
- ckptdir='--ckpt-dir /content/temp_models'
553
-
554
- try:
555
- model
556
- if os.path.isfile(model):
557
- !python /content/gdrive/$mainpth/sd/stable-diffusion-$blasphemy/webui.py $share --api --disable-safe-unpickle --enable-insecure-extension-access --no-download-sd-model --no-half-vae --ckpt "$model" --xformers $auth --disable-console-progressbars --upcast-sampling $ckptdir
558
- else:
559
- !python /content/gdrive/$mainpth/sd/stable-diffusion-$blasphemy/webui.py $share --api --disable-safe-unpickle --enable-insecure-extension-access --no-download-sd-model --no-half-vae --ckpt-dir "$model" --xformers $auth --disable-console-progressbars --upcast-sampling
560
- except:
561
- !python /content/gdrive/$mainpth/sd/stable-diffusion-$blasphemy/webui.py $share --api --disable-safe-unpickle --enable-insecure-extension-access --no-download-sd-model --no-half-vae --xformers $auth --disable-console-progressbars --upcast-sampling $ckptdir
 
1
  # -*- coding: utf-8 -*-
2
+ """mini_t2i.ipynb
3
 
4
  Automatically generated by Colaboratory.
5
 
6
  Original file is located at
7
+ https://colab.research.google.com/drive/1QL7cDE204_CEk2kw87aiM7ArlAVF-1Uu
 
 
 
8
  """
9
 
10
+ # !pip install gradio
11
+ # !pip install cloudinary
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
+ import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
14
  import requests
15
+ import cloudinary
16
+ import cloudinary.uploader
17
+ from PIL import Image
18
+ import io
19
+
20
+ # Set up Cloudinary credentials
21
+ cloudinary.config(
22
+ cloud_name="dvuowbmrz",
23
+ api_key="177664162661619",
24
+ api_secret="qVMYel17N_C5QUUUuBIuatB5tq0"
25
+ )
26
+
27
+ API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
28
+ headers = {"Authorization": "Bearer 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"}
29
+
30
+ def query_model_with_image(image_description):
31
+ payload = {
32
+ "inputs": image_description
33
+ }
34
+ response = requests.post(API_URL, headers=headers, json=payload)
35
+ image_bytes = response.content
36
+
37
+ image = Image.open(io.BytesIO(image_bytes))
38
+ return image
39
+
40
+ def upload_to_cloudinary(image):
41
+ image_data = io.BytesIO()
42
+ image.save(image_data, format="JPEG")
43
+ image_data.seek(0)
44
+
45
+ upload_result = cloudinary.uploader.upload(image_data, folder="compvis_app")
46
+ return upload_result["secure_url"]
47
+
48
+ def process_and_upload(image_description):
49
+ processed_image = query_model_with_image(image_description)
50
+ uploaded_url = upload_to_cloudinary(processed_image)
51
+ return processed_image, uploaded_url
52
+
53
+ iface = gr.Interface(
54
+ fn=process_and_upload,
55
+ inputs=gr.inputs.Textbox(label="Image Description"),
56
+ outputs=["image", "text"]
57
+ )
58
+
59
+ if __name__ == "__main__":
60
+ iface.launch(share=True)