Huage001 commited on
Commit
5fdb7f7
1 Parent(s): b4bb1b4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +144 -9
app.py CHANGED
@@ -6,7 +6,11 @@ from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
6
  import torch
7
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
- model_repo_id = "Lykon/dreamshaper-8" #Replace to the model you would like to use
 
 
 
 
10
 
11
  if torch.cuda.is_available():
12
  torch_dtype = torch.float16
@@ -17,7 +21,7 @@ MAX_SEED = np.iinfo(np.int32).max
17
  MAX_IMAGE_SIZE = 1024
18
 
19
  #@spaces.GPU #[uncomment to use ZeroGPU]
20
- def infer_t2i(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
21
 
22
  if randomize_seed:
23
  seed = random.randint(0, MAX_SEED)
@@ -40,7 +44,7 @@ def infer_t2i(prompt, negative_prompt, seed, randomize_seed, width, height, guid
40
  return image, seed
41
 
42
  #@spaces.GPU #[uncomment to use ZeroGPU]
43
- def infer_i2i(prompt, image, strength, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
44
 
45
  if randomize_seed:
46
  seed = random.randint(0, MAX_SEED)
@@ -64,6 +68,34 @@ def infer_i2i(prompt, image, strength, negative_prompt, seed, randomize_seed, wi
64
 
65
  return image, seed
66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  examples = [
68
  "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
69
  "An astronaut riding a green horse",
@@ -116,6 +148,8 @@ with gr.Blocks(css=css) as demo:
116
  step=1,
117
  value=0,
118
  )
 
 
119
 
120
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
121
 
@@ -126,7 +160,7 @@ with gr.Blocks(css=css) as demo:
126
  minimum=256,
127
  maximum=MAX_IMAGE_SIZE,
128
  step=32,
129
- value=1024, #Replace with defaults that work for your model
130
  )
131
 
132
  height = gr.Slider(
@@ -134,7 +168,7 @@ with gr.Blocks(css=css) as demo:
134
  minimum=256,
135
  maximum=MAX_IMAGE_SIZE,
136
  step=32,
137
- value=1024, #Replace with defaults that work for your model
138
  )
139
 
140
  with gr.Row():
@@ -162,7 +196,7 @@ with gr.Blocks(css=css) as demo:
162
 
163
  run_button.click(
164
  fn=infer_t2i,
165
- inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
166
  outputs = [result, seed]
167
  )
168
 
@@ -207,6 +241,8 @@ with gr.Blocks(css=css) as demo:
207
  )
208
 
209
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
 
 
210
 
211
  with gr.Row():
212
 
@@ -215,7 +251,7 @@ with gr.Blocks(css=css) as demo:
215
  minimum=256,
216
  maximum=MAX_IMAGE_SIZE,
217
  step=32,
218
- value=1024, #Replace with defaults that work for your model
219
  )
220
 
221
  height = gr.Slider(
@@ -223,7 +259,7 @@ with gr.Blocks(css=css) as demo:
223
  minimum=256,
224
  maximum=MAX_IMAGE_SIZE,
225
  step=32,
226
- value=1024, #Replace with defaults that work for your model
227
  )
228
 
229
  with gr.Row():
@@ -259,7 +295,106 @@ with gr.Blocks(css=css) as demo:
259
 
260
  run_button.click(
261
  fn=infer_i2i,
262
- inputs = [prompt, image_upload_input, editing_strength, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263
  outputs = [result, seed]
264
  )
265
 
 
6
  import torch
7
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
+ all_model_id = {
10
+ "DreamShaper-8": "Lykon/dreamshaper-8",
11
+ "SD-v1.4": "CompVis/stable-diffusion-v1-4",
12
+ "RealisticVision-v4.0": "SG161222/Realistic_Vision_V4.0_noVAE"
13
+ }
14
 
15
  if torch.cuda.is_available():
16
  torch_dtype = torch.float16
 
21
  MAX_IMAGE_SIZE = 1024
22
 
23
  #@spaces.GPU #[uncomment to use ZeroGPU]
24
+ def infer_t2i(model_repo_id, prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
25
 
26
  if randomize_seed:
27
  seed = random.randint(0, MAX_SEED)
 
44
  return image, seed
45
 
46
  #@spaces.GPU #[uncomment to use ZeroGPU]
47
+ def infer_i2i(model_repo_id, prompt, image, strength, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
48
 
49
  if randomize_seed:
50
  seed = random.randint(0, MAX_SEED)
 
68
 
69
  return image, seed
70
 
71
+ #@spaces.GPU #[uncomment to use ZeroGPU]
72
+ def infer_ip_adapter(model_repo_id, prompt, image, scale, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
73
+
74
+ if randomize_seed:
75
+ seed = random.randint(0, MAX_SEED)
76
+
77
+ generator = torch.Generator().manual_seed(seed)
78
+
79
+ pipe = StableDiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
80
+ pipe = pipe.to(device)
81
+ pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin")
82
+ pipeline.set_ip_adapter_scale(scale)
83
+
84
+ image = pipe(
85
+ prompt = prompt,
86
+ image = image.resize((width, height)),
87
+ strength = strength,
88
+ negative_prompt = negative_prompt,
89
+ guidance_scale = guidance_scale,
90
+ num_inference_steps = num_inference_steps,
91
+ ip_adapter_image = image,
92
+ width = width,
93
+ height = height,
94
+ generator = generator
95
+ ).images[0]
96
+
97
+ return image, seed
98
+
99
  examples = [
100
  "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
101
  "An astronaut riding a green horse",
 
148
  step=1,
149
  value=0,
150
  )
151
+
152
+ model_choice = gr.Dropdown(label="Choose Model", choices=list(all_model_id.key()), value=list(all_model_id.key())[0])
153
 
154
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
155
 
 
160
  minimum=256,
161
  maximum=MAX_IMAGE_SIZE,
162
  step=32,
163
+ value=512, #Replace with defaults that work for your model
164
  )
165
 
166
  height = gr.Slider(
 
168
  minimum=256,
169
  maximum=MAX_IMAGE_SIZE,
170
  step=32,
171
+ value=512, #Replace with defaults that work for your model
172
  )
173
 
174
  with gr.Row():
 
196
 
197
  run_button.click(
198
  fn=infer_t2i,
199
+ inputs = [all_model_id[model_choice], prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
200
  outputs = [result, seed]
201
  )
202
 
 
241
  )
242
 
243
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
244
+
245
+ model_choice = gr.Dropdown(label="Choose Model", choices=list(all_model_id.key()), value=list(all_model_id.key())[0])
246
 
247
  with gr.Row():
248
 
 
251
  minimum=256,
252
  maximum=MAX_IMAGE_SIZE,
253
  step=32,
254
+ value=512, #Replace with defaults that work for your model
255
  )
256
 
257
  height = gr.Slider(
 
259
  minimum=256,
260
  maximum=MAX_IMAGE_SIZE,
261
  step=32,
262
+ value=512, #Replace with defaults that work for your model
263
  )
264
 
265
  with gr.Row():
 
295
 
296
  run_button.click(
297
  fn=infer_i2i,
298
+ inputs = [all_model_id[model_choice], prompt, image_upload_input, editing_strength, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
299
+ outputs = [result, seed]
300
+ )
301
+
302
+ with gr.Tab("IP-Adapter"):
303
+
304
+ with gr.Column(elem_id="col-container"):
305
+ gr.Markdown(f"""
306
+ # Text-to-Image Gradio Template
307
+ """)
308
+
309
+ with gr.Row():
310
+
311
+ prompt = gr.Text(
312
+ label="Prompt",
313
+ show_label=False,
314
+ max_lines=1,
315
+ placeholder="Enter your prompt",
316
+ container=False,
317
+ )
318
+
319
+ run_button = gr.Button("Run", scale=0)
320
+
321
+ image_upload_input = gr.Image(label="Upload an Image", type="pil")
322
+
323
+ result = gr.Image(label="Result", show_label=False)
324
+
325
+ with gr.Accordion("Advanced Settings", open=False):
326
+
327
+ negative_prompt = gr.Text(
328
+ label="Negative prompt",
329
+ max_lines=1,
330
+ placeholder="Enter a negative prompt",
331
+ visible=False,
332
+ )
333
+
334
+ seed = gr.Slider(
335
+ label="Seed",
336
+ minimum=0,
337
+ maximum=MAX_SEED,
338
+ step=1,
339
+ value=0,
340
+ )
341
+
342
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
343
+
344
+ model_choice = gr.Dropdown(label="Choose Model", choices=list(all_model_id.key()), value=list(all_model_id.key())[0])
345
+
346
+ with gr.Row():
347
+
348
+ width = gr.Slider(
349
+ label="Width",
350
+ minimum=256,
351
+ maximum=MAX_IMAGE_SIZE,
352
+ step=32,
353
+ value=512, #Replace with defaults that work for your model
354
+ )
355
+
356
+ height = gr.Slider(
357
+ label="Height",
358
+ minimum=256,
359
+ maximum=MAX_IMAGE_SIZE,
360
+ step=32,
361
+ value=512, #Replace with defaults that work for your model
362
+ )
363
+
364
+ with gr.Row():
365
+
366
+ guidance_scale = gr.Slider(
367
+ label="Guidance scale",
368
+ minimum=0.0,
369
+ maximum=10.0,
370
+ step=0.1,
371
+ value=7.5, #Replace with defaults that work for your model
372
+ )
373
+
374
+ num_inference_steps = gr.Slider(
375
+ label="Number of inference steps",
376
+ minimum=1,
377
+ maximum=50,
378
+ step=1,
379
+ value=25, #Replace with defaults that work for your model
380
+ )
381
+
382
+ ip_adapter_scale = gr.Slider(
383
+ label="Strength of image condition",
384
+ minimum=0,
385
+ maximum=1,
386
+ step=0.01,
387
+ value=0.4, #Replace with defaults that work for your model
388
+ )
389
+
390
+ gr.Examples(
391
+ examples = examples,
392
+ inputs = [prompt]
393
+ )
394
+
395
+ run_button.click(
396
+ fn=infer_ip_adapter,
397
+ inputs = [all_model_id[model_choice], prompt, image_upload_input, ip_adapter_scale, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
398
  outputs = [result, seed]
399
  )
400