Bradarr nitrosocke commited on
Commit
a08b800
·
0 Parent(s):

Duplicate from nitrosocke/Diffusion_Space

Browse files

Co-authored-by: Nitrosocke <[email protected]>

Files changed (6) hide show
  1. .gitattributes +33 -0
  2. README.md +14 -0
  3. app.py +279 -0
  4. nsfw.png +0 -0
  5. requirements.txt +10 -0
  6. utils.py +6 -0
.gitattributes ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.npy filter=lfs diff=lfs merge=lfs -text
14
+ *.npz filter=lfs diff=lfs merge=lfs -text
15
+ *.onnx filter=lfs diff=lfs merge=lfs -text
16
+ *.ot filter=lfs diff=lfs merge=lfs -text
17
+ *.parquet filter=lfs diff=lfs merge=lfs -text
18
+ *.pb filter=lfs diff=lfs merge=lfs -text
19
+ *.pickle filter=lfs diff=lfs merge=lfs -text
20
+ *.pkl filter=lfs diff=lfs merge=lfs -text
21
+ *.pt filter=lfs diff=lfs merge=lfs -text
22
+ *.pth filter=lfs diff=lfs merge=lfs -text
23
+ *.rar filter=lfs diff=lfs merge=lfs -text
24
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
25
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
26
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
27
+ *.tflite filter=lfs diff=lfs merge=lfs -text
28
+ *.tgz filter=lfs diff=lfs merge=lfs -text
29
+ *.wasm filter=lfs diff=lfs merge=lfs -text
30
+ *.xz filter=lfs diff=lfs merge=lfs -text
31
+ *.zip filter=lfs diff=lfs merge=lfs -text
32
+ *.zst filter=lfs diff=lfs merge=lfs -text
33
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Diffusion Space
3
+ emoji: 💽
4
+ colorFrom: blue
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 3.6
8
+ app_file: app.py
9
+ pinned: true
10
+ license: creativeml-openrail-m
11
+ duplicated_from: nitrosocke/Diffusion_Space
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ import torch
4
+ import utils
5
+ import datetime
6
+ import time
7
+ import psutil
8
+ from imwatermark import WatermarkEncoder
9
+ import numpy as np
10
+ from PIL import Image
11
+ from diffusers import EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, AutoencoderKL, UNet2DConditionModel, StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
12
+
13
+ start_time = time.time()
14
+ is_colab = utils.is_google_colab()
15
+
16
+ #wm = "SDV2"
17
+ #wm_encoder = WatermarkEncoder()
18
+ #wm_encoder.set_watermark('bytes', wm.encode('utf-8'))
19
+ #def put_watermark(img, wm_encoder=None):
20
+ # if wm_encoder is not None:
21
+ # img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
22
+ # img = wm_encoder.encode(img, 'dwtDct')
23
+ # img = Image.fromarray(img[:, :, ::-1])
24
+ # return img
25
+
26
+ class Model:
27
+ def __init__(self, name, path="", prefix=""):
28
+ self.name = name
29
+ self.path = path
30
+ self.prefix = prefix
31
+ self.pipe_t2i = None
32
+ self.pipe_i2i = None
33
+
34
+ models = [
35
+ Model("Redshift Diffusion 768", "nitrosocke/redshift-diffusion-768", "redshift style")
36
+ ]
37
+ # Model("Ghibli Diffusion", "nitrosocke/Ghibli-Diffusion", "ghibli style"),
38
+ # Model("Redshift Diffusion", "nitrosocke/Redshift-Diffusion", "redshift style"),
39
+ # Model("Nitro Diffusion", "nitrosocke/Nitro-Diffusion", "archer arcane modern disney"),
40
+
41
+ scheduler = EulerDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-2", subfolder="scheduler", prediction_type="v_prediction")
42
+
43
+ #scheduler = EulerDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-2-base", subfolder="scheduler")
44
+
45
+ custom_model = None
46
+ if is_colab:
47
+ models.insert(0, Model("Custom model"))
48
+ custom_model = models[0]
49
+
50
+ last_mode = "txt2img"
51
+ current_model = models[1] if is_colab else models[0]
52
+ current_model_path = current_model.path
53
+
54
+ if is_colab:
55
+ pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler)
56
+
57
+ else: # download all models
58
+ print(f"{datetime.datetime.now()} Downloading vae...")
59
+ pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler)
60
+ #vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", torch_dtype=torch.float16)
61
+ for model in models:
62
+ try:
63
+ print(f"{datetime.datetime.now()} Downloading {model.name} model...")
64
+ unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", torch_dtype=torch.float16)
65
+ model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, torch_dtype=torch.float16, scheduler=scheduler)
66
+ model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, torch_dtype=torch.float16, scheduler=scheduler)
67
+ except Exception as e:
68
+ print(f"{datetime.datetime.now()} Failed to load model " + model.name + ": " + str(e))
69
+ models.remove(model)
70
+ pipe = models[0].pipe_t2i
71
+
72
+ if torch.cuda.is_available():
73
+ pipe = pipe.to("cuda")
74
+
75
+ device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
76
+
77
+ def error_str(error, title="Error"):
78
+ return f"""#### {title}
79
+ {error}""" if error else ""
80
+
81
+ def custom_model_changed(path):
82
+ models[0].path = path
83
+ global current_model
84
+ current_model = models[0]
85
+
86
+ def on_model_change(model_name):
87
+
88
+ prefix = "Enter prompt. \"" + next((m.prefix for m in models if m.name == model_name), None) + "\" is prefixed automatically" if model_name != models[0].name else "Don't forget to use the custom model prefix in the prompt!"
89
+
90
+ return gr.update(visible = model_name == models[0].name), gr.update(placeholder=prefix)
91
+
92
+ def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""):
93
+
94
+ print(psutil.virtual_memory()) # print memory usage
95
+
96
+ global current_model
97
+ for model in models:
98
+ if model.name == model_name:
99
+ current_model = model
100
+ model_path = current_model.path
101
+
102
+ generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
103
+
104
+ try:
105
+ if img is not None:
106
+ return img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None
107
+ else:
108
+ return txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator), None
109
+ except Exception as e:
110
+ return None, error_str(e)
111
+
112
+ def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator):
113
+
114
+ print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}")
115
+
116
+ global last_mode
117
+ global pipe
118
+ global current_model_path
119
+ if model_path != current_model_path or last_mode != "txt2img":
120
+ current_model_path = model_path
121
+
122
+ if is_colab or current_model == custom_model:
123
+ pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler)
124
+ else:
125
+ pipe = pipe.to("cpu")
126
+ pipe = current_model.pipe_t2i
127
+
128
+ if torch.cuda.is_available():
129
+ pipe = pipe.to("cuda")
130
+ last_mode = "txt2img"
131
+
132
+ prompt = f"{current_model.prefix} {prompt}"
133
+ results = pipe(
134
+ prompt,
135
+ negative_prompt = neg_prompt,
136
+ # num_images_per_prompt=n_images,
137
+ num_inference_steps = int(steps),
138
+ guidance_scale = guidance,
139
+ width = width,
140
+ height = height,
141
+ generator = generator)
142
+
143
+ return results.images[0]
144
+
145
+ def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
146
+
147
+ print(f"{datetime.datetime.now()} img_to_img, model: {model_path}")
148
+
149
+ global last_mode
150
+ global pipe
151
+ global current_model_path
152
+ if model_path != current_model_path or last_mode != "img2img":
153
+ current_model_path = model_path
154
+
155
+ if is_colab or current_model == custom_model:
156
+ pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler)
157
+ else:
158
+ pipe = pipe.to("cpu")
159
+ pipe = current_model.pipe_i2i
160
+
161
+ if torch.cuda.is_available():
162
+ pipe = pipe.to("cuda")
163
+ last_mode = "img2img"
164
+
165
+ prompt = f"{current_model.prefix} {prompt}"
166
+ ratio = min(height / img.height, width / img.width)
167
+ img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
168
+ results = pipe(
169
+ prompt,
170
+ negative_prompt = neg_prompt,
171
+ # num_images_per_prompt=n_images,
172
+ init_image = img,
173
+ num_inference_steps = int(steps),
174
+ strength = strength,
175
+ guidance_scale = guidance,
176
+ width = width,
177
+ height = height,
178
+ generator = generator)
179
+
180
+ return results.images[0]
181
+
182
+ def replace_nsfw_images(results):
183
+
184
+ if is_colab:
185
+ return results.images[0]
186
+
187
+ for i in range(len(results.images)):
188
+ if results.nsfw_content_detected[i]:
189
+ results.images[i] = Image.open("nsfw.png")
190
+ return results.images[0]
191
+
192
+ css = """.finetuned-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.finetuned-diffusion-div div h1{font-weight:900;margin-bottom:7px}.finetuned-diffusion-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
193
+ """
194
+ with gr.Blocks(css=css) as demo:
195
+ gr.HTML(
196
+ f"""
197
+ <div class="diffusion-spave-div">
198
+ <div>
199
+ <h1>Diffusion Space</h1>
200
+ </div>
201
+ <p>
202
+ Demo for Nitrosocke's fine-tuned models.
203
+ </p>
204
+ <p>You can skip the queue and load custom models in the colab: <a href="https://colab.research.google.com/drive/1Yr2QvQcqLHlApoQHDPzZmKREizVm9iZw"><img data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" src="https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667"></a></p>
205
+ <p>You can also duplicate this space and upgrade to gpu by going to settings: <a style="display:inline-block" href="https://huggingface.co/spaces/nitrosocke/Diffusion_Space?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></p>
206
+ </p>
207
+ </div>
208
+ """
209
+ )
210
+ with gr.Row():
211
+
212
+ with gr.Column(scale=55):
213
+ with gr.Group():
214
+ model_name = gr.Dropdown(label="Model", choices=[m.name for m in models], value=current_model.name)
215
+ with gr.Box(visible=False) as custom_model_group:
216
+ custom_model_path = gr.Textbox(label="Custom model path", placeholder="nitrosocke/Future-Diffusion", interactive=True)
217
+ gr.HTML("<div><font size='2'>Custom models have to be downloaded first, so give it some time.</font></div>")
218
+
219
+ with gr.Row():
220
+ prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="Enter prompt. Style applied automatically").style(container=False)
221
+ generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
222
+
223
+
224
+ image_out = gr.Image(height=512)
225
+ # gallery = gr.Gallery(
226
+ # label="Generated images", show_label=False, elem_id="gallery"
227
+ # ).style(grid=[1], height="auto")
228
+ error_output = gr.Markdown()
229
+
230
+ with gr.Column(scale=45):
231
+ with gr.Tab("Options"):
232
+ with gr.Group():
233
+ neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
234
+
235
+ # n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=4, step=1)
236
+
237
+ with gr.Row():
238
+ guidance = gr.Slider(label="Guidance scale", value=7, maximum=15, step=1)
239
+ steps = gr.Slider(label="Steps", value=20, minimum=2, maximum=30, step=1)
240
+
241
+ with gr.Row():
242
+ width = gr.Slider(label="Width", value=768, minimum=768, maximum=1024, step=64)
243
+ height = gr.Slider(label="Height", value=768, minimum=768, maximum=1024, step=64)
244
+
245
+ seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
246
+
247
+ with gr.Tab("Image to image"):
248
+ with gr.Group():
249
+ image = gr.Image(label="Image", height=256, tool="editor", type="pil")
250
+ strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
251
+
252
+ if is_colab:
253
+ model_name.change(on_model_change, inputs=model_name, outputs=[custom_model_group, prompt], queue=False)
254
+ custom_model_path.change(custom_model_changed, inputs=custom_model_path, outputs=None)
255
+ # n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery)
256
+
257
+ inputs = [model_name, prompt, guidance, steps, width, height, seed, image, strength, neg_prompt]
258
+ outputs = [image_out, error_output]
259
+ prompt.submit(inference, inputs=inputs, outputs=outputs)
260
+ generate.click(inference, inputs=inputs, outputs=outputs)
261
+
262
+ ex = gr.Examples([
263
+ [models[0].name, "redshift style portrait black female cyberpunk hacker tattoos colorful short hair wearing a crop top redshift style", "mutated body double head bad anatomy long face long neck long body text watermark signature", 7, 20],
264
+ [models[0].name, "redshift style beautiful fjord at sunrise", "fog blurry soft", 7, 20],
265
+
266
+ ], inputs=[model_name, prompt, neg_prompt, guidance, steps, seed], outputs=outputs, fn=inference, cache_examples=False)
267
+
268
+ gr.HTML("""
269
+ <div style="border-top: 1px solid #303030;">
270
+ <br>
271
+ <p>Model by Nitrosocke.</p>
272
+ </div>
273
+ """)
274
+
275
+ print(f"Space built in {time.time() - start_time:.2f} seconds")
276
+
277
+ if not is_colab:
278
+ demo.queue(concurrency_count=1)
279
+ demo.launch(debug=is_colab, share=is_colab)
nsfw.png ADDED
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu113
2
+ torch==1.13.0
3
+ torchvision
4
+ git+https://github.com/huggingface/diffusers.git
5
+ transformers
6
+ accelerate
7
+ ftfy
8
+ python-dotenv
9
+ invisible-watermark
10
+ https://github.com/apolinario/xformers/releases/download/0.0.3/xformers-0.0.14.dev0-cp38-cp38-linux_x86_64.whl
utils.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ def is_google_colab():
2
+ try:
3
+ import google.colab
4
+ return True
5
+ except:
6
+ return False