zhao commited on
Commit
d4f7443
1 Parent(s): df035c3

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +325 -0
app.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+ import random
7
+
8
+ import gradio as gr
9
+ import numpy as np
10
+ import PIL.Image
11
+ import torch
12
+ from diffusers import DiffusionPipeline
13
+
14
+ DESCRIPTION = '# SD-XL'
15
+ if not torch.cuda.is_available():
16
+ DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
17
+
18
+ MAX_SEED = np.iinfo(np.int32).max
19
+ CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv(
20
+ 'CACHE_EXAMPLES') == '1'
21
+ MAX_IMAGE_SIZE = int(os.getenv('MAX_IMAGE_SIZE', '1024'))
22
+ USE_TORCH_COMPILE = os.getenv('USE_TORCH_COMPILE') == '1'
23
+ ENABLE_CPU_OFFLOAD = os.getenv('ENABLE_CPU_OFFLOAD') == '1'
24
+
25
+ device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
26
+ if torch.cuda.is_available():
27
+ pipe = DiffusionPipeline.from_pretrained(
28
+ 'stabilityai/stable-diffusion-xl-base-1.0',
29
+ torch_dtype=torch.float16,
30
+ use_safetensors=True,
31
+ variant='fp16')
32
+ refiner = DiffusionPipeline.from_pretrained(
33
+ 'stabilityai/stable-diffusion-xl-refiner-1.0',
34
+ torch_dtype=torch.float16,
35
+ use_safetensors=True,
36
+ variant='fp16')
37
+
38
+ if ENABLE_CPU_OFFLOAD:
39
+ pipe.enable_model_cpu_offload()
40
+ refiner.enable_model_cpu_offload()
41
+ else:
42
+ pipe.to(device)
43
+ refiner.to(device)
44
+
45
+ if USE_TORCH_COMPILE:
46
+ pipe.unet = torch.compile(pipe.unet,
47
+ mode='reduce-overhead',
48
+ fullgraph=True)
49
+ else:
50
+ pipe = None
51
+ refiner = None
52
+
53
+
54
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
55
+ if randomize_seed:
56
+ seed = random.randint(0, MAX_SEED)
57
+ return seed
58
+
59
+
60
+ def generate(prompt: str,
61
+ negative_prompt: str = '',
62
+ prompt_2: str = '',
63
+ negative_prompt_2: str = '',
64
+ use_negative_prompt: bool = False,
65
+ use_prompt_2: bool = False,
66
+ use_negative_prompt_2: bool = False,
67
+ seed: int = 0,
68
+ width: int = 1024,
69
+ height: int = 1024,
70
+ guidance_scale_base: float = 5.0,
71
+ guidance_scale_refiner: float = 5.0,
72
+ num_inference_steps_base: int = 50,
73
+ num_inference_steps_refiner: int = 50,
74
+ apply_refiner: bool = False) -> PIL.Image.Image:
75
+ generator = torch.Generator().manual_seed(seed)
76
+
77
+ if not use_negative_prompt:
78
+ negative_prompt = None # type: ignore
79
+ if not use_prompt_2:
80
+ prompt_2 = None # type: ignore
81
+ if not use_negative_prompt_2:
82
+ negative_prompt_2 = None # type: ignore
83
+
84
+ if not apply_refiner:
85
+ return pipe(prompt=prompt,
86
+ negative_prompt=negative_prompt,
87
+ prompt_2=prompt_2,
88
+ negative_prompt_2=negative_prompt_2,
89
+ width=width,
90
+ height=height,
91
+ guidance_scale=guidance_scale_base,
92
+ num_inference_steps=num_inference_steps_base,
93
+ generator=generator,
94
+ output_type='pil').images[0]
95
+ else:
96
+ latents = pipe(prompt=prompt,
97
+ negative_prompt=negative_prompt,
98
+ prompt_2=prompt_2,
99
+ negative_prompt_2=negative_prompt_2,
100
+ width=width,
101
+ height=height,
102
+ guidance_scale=guidance_scale_base,
103
+ num_inference_steps=num_inference_steps_base,
104
+ generator=generator,
105
+ output_type='latent').images
106
+ image = refiner(prompt=prompt,
107
+ negative_prompt=negative_prompt,
108
+ prompt_2=prompt_2,
109
+ negative_prompt_2=negative_prompt_2,
110
+ guidance_scale=guidance_scale_refiner,
111
+ num_inference_steps=num_inference_steps_refiner,
112
+ image=latents,
113
+ generator=generator).images[0]
114
+ return image
115
+
116
+
117
+ examples = [
118
+ 'Astronaut in a jungle, cold color palette, muted colors, detailed, 8k',
119
+ 'An astronaut riding a green horse',
120
+ ]
121
+
122
+ with gr.Blocks(css='style.css') as demo:
123
+ gr.Markdown(DESCRIPTION)
124
+ gr.DuplicateButton(value='Duplicate Space for private use',
125
+ elem_id='duplicate-button',
126
+ visible=os.getenv('SHOW_DUPLICATE_BUTTON') == '1')
127
+ with gr.Box():
128
+ with gr.Row():
129
+ prompt = gr.Text(
130
+ label='Prompt',
131
+ show_label=False,
132
+ max_lines=1,
133
+ placeholder='Enter your prompt',
134
+ container=False,
135
+ )
136
+ run_button = gr.Button('Run', scale=0)
137
+ result = gr.Image(label='Result', show_label=False)
138
+ with gr.Accordion('Advanced options', open=False):
139
+ with gr.Row():
140
+ use_negative_prompt = gr.Checkbox(label='Use negative prompt',
141
+ value=False)
142
+ use_prompt_2 = gr.Checkbox(label='Use prompt 2', value=False)
143
+ use_negative_prompt_2 = gr.Checkbox(
144
+ label='Use negative prompt 2', value=False)
145
+ negative_prompt = gr.Text(
146
+ label='Negative prompt',
147
+ max_lines=1,
148
+ placeholder='Enter a negative prompt',
149
+ visible=False,
150
+ )
151
+ prompt_2 = gr.Text(
152
+ label='Prompt 2',
153
+ max_lines=1,
154
+ placeholder='Enter your prompt',
155
+ visible=False,
156
+ )
157
+ negative_prompt_2 = gr.Text(
158
+ label='Negative prompt 2',
159
+ max_lines=1,
160
+ placeholder='Enter a negative prompt',
161
+ visible=False,
162
+ )
163
+
164
+ seed = gr.Slider(label='Seed',
165
+ minimum=0,
166
+ maximum=MAX_SEED,
167
+ step=1,
168
+ value=0)
169
+ randomize_seed = gr.Checkbox(label='Randomize seed', value=True)
170
+ with gr.Row():
171
+ width = gr.Slider(
172
+ label='Width',
173
+ minimum=256,
174
+ maximum=MAX_IMAGE_SIZE,
175
+ step=32,
176
+ value=1024,
177
+ )
178
+ height = gr.Slider(
179
+ label='Height',
180
+ minimum=256,
181
+ maximum=MAX_IMAGE_SIZE,
182
+ step=32,
183
+ value=1024,
184
+ )
185
+ apply_refiner = gr.Checkbox(label='Apply refiner', value=False)
186
+ with gr.Row():
187
+ guidance_scale_base = gr.Slider(
188
+ label='Guidance scale for base',
189
+ minimum=1,
190
+ maximum=20,
191
+ step=0.1,
192
+ value=5.0)
193
+ num_inference_steps_base = gr.Slider(
194
+ label='Number of inference steps for base',
195
+ minimum=10,
196
+ maximum=100,
197
+ step=1,
198
+ value=50)
199
+ with gr.Row(visible=False) as refiner_params:
200
+ guidance_scale_refiner = gr.Slider(
201
+ label='Guidance scale for refiner',
202
+ minimum=1,
203
+ maximum=20,
204
+ step=0.1,
205
+ value=5.0)
206
+ num_inference_steps_refiner = gr.Slider(
207
+ label='Number of inference steps for refiner',
208
+ minimum=10,
209
+ maximum=100,
210
+ step=1,
211
+ value=50)
212
+
213
+ gr.Examples(examples=examples,
214
+ inputs=prompt,
215
+ outputs=result,
216
+ fn=generate,
217
+ cache_examples=CACHE_EXAMPLES)
218
+
219
+ use_negative_prompt.change(
220
+ fn=lambda x: gr.update(visible=x),
221
+ inputs=use_negative_prompt,
222
+ outputs=negative_prompt,
223
+ queue=False,
224
+ api_name=False,
225
+ )
226
+ use_prompt_2.change(
227
+ fn=lambda x: gr.update(visible=x),
228
+ inputs=use_prompt_2,
229
+ outputs=prompt_2,
230
+ queue=False,
231
+ api_name=False,
232
+ )
233
+ use_negative_prompt_2.change(
234
+ fn=lambda x: gr.update(visible=x),
235
+ inputs=use_negative_prompt_2,
236
+ outputs=negative_prompt_2,
237
+ queue=False,
238
+ api_name=False,
239
+ )
240
+ apply_refiner.change(
241
+ fn=lambda x: gr.update(visible=x),
242
+ inputs=apply_refiner,
243
+ outputs=refiner_params,
244
+ queue=False,
245
+ api_name=False,
246
+ )
247
+
248
+ inputs = [
249
+ prompt,
250
+ negative_prompt,
251
+ prompt_2,
252
+ negative_prompt_2,
253
+ use_negative_prompt,
254
+ use_prompt_2,
255
+ use_negative_prompt_2,
256
+ seed,
257
+ width,
258
+ height,
259
+ guidance_scale_base,
260
+ guidance_scale_refiner,
261
+ num_inference_steps_base,
262
+ num_inference_steps_refiner,
263
+ apply_refiner,
264
+ ]
265
+ prompt.submit(
266
+ fn=randomize_seed_fn,
267
+ inputs=[seed, randomize_seed],
268
+ outputs=seed,
269
+ queue=False,
270
+ api_name=False,
271
+ ).then(
272
+ fn=generate,
273
+ inputs=inputs,
274
+ outputs=result,
275
+ api_name='run',
276
+ )
277
+ negative_prompt.submit(
278
+ fn=randomize_seed_fn,
279
+ inputs=[seed, randomize_seed],
280
+ outputs=seed,
281
+ queue=False,
282
+ api_name=False,
283
+ ).then(
284
+ fn=generate,
285
+ inputs=inputs,
286
+ outputs=result,
287
+ api_name=False,
288
+ )
289
+ prompt_2.submit(
290
+ fn=randomize_seed_fn,
291
+ inputs=[seed, randomize_seed],
292
+ outputs=seed,
293
+ queue=False,
294
+ api_name=False,
295
+ ).then(
296
+ fn=generate,
297
+ inputs=inputs,
298
+ outputs=result,
299
+ api_name=False,
300
+ )
301
+ negative_prompt_2.submit(
302
+ fn=randomize_seed_fn,
303
+ inputs=[seed, randomize_seed],
304
+ outputs=seed,
305
+ queue=False,
306
+ api_name=False,
307
+ ).then(
308
+ fn=generate,
309
+ inputs=inputs,
310
+ outputs=result,
311
+ api_name=False,
312
+ )
313
+ run_button.click(
314
+ fn=randomize_seed_fn,
315
+ inputs=[seed, randomize_seed],
316
+ outputs=seed,
317
+ queue=False,
318
+ api_name=False,
319
+ ).then(
320
+ fn=generate,
321
+ inputs=inputs,
322
+ outputs=result,
323
+ api_name=False,
324
+ )
325
+ demo.queue(max_size=20).launch()