linoyts HF staff commited on
Commit
9724323
1 Parent(s): d5ce88c

controlnet support

Browse files
Files changed (1) hide show
  1. app.py +43 -14
app.py CHANGED
@@ -65,8 +65,8 @@ def generate(slider_x, slider_y, prompt, seed, iterations, steps,
65
  x_concept_1, x_concept_2, y_concept_1, y_concept_2,
66
  avg_diff_x_1, avg_diff_x_2,
67
  avg_diff_y_1, avg_diff_y_2,
68
- img2img_type = None,
69
- img = None):
70
 
71
  start_time = time.time()
72
  # check if avg diff for directions need to be re-calculated
@@ -93,7 +93,7 @@ def generate(slider_x, slider_y, prompt, seed, iterations, steps,
93
 
94
  if img2img_type=="controlnet canny" and img is not None:
95
  control_img = process_controlnet_img(img)
96
- image = clip_slider.generate(prompt, image=control_img, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=(avg_diff_0,avg_diff_1), avg_diff_2nd=(avg_diff_2nd_0,avg_diff_2nd_1))
97
  elif img2img_type=="ip adapter" and img is not None:
98
  image = clip_slider.generate(prompt, ip_adapter_image=img, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=(avg_diff_0,avg_diff_1), avg_diff_2nd=(avg_diff_2nd_0,avg_diff_2nd_1))
99
  else: # text to image
@@ -115,12 +115,13 @@ def generate(slider_x, slider_y, prompt, seed, iterations, steps,
115
  @spaces.GPU
116
  def update_scales(x,y,prompt,seed, steps,
117
  avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2,
118
- img2img_type = None, img = None):
 
119
  avg_diff = (avg_diff_x_1.cuda(), avg_diff_x_2.cuda())
120
  avg_diff_2nd = (avg_diff_y_1.cuda(), avg_diff_y_2.cuda())
121
  if img2img_type=="controlnet canny" and img is not None:
122
  control_img = process_controlnet_img(img)
123
- image = clip_slider.generate(prompt, image=control_img, scale=x, scale_2nd=y, seed=seed, num_inference_steps=steps, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
124
  elif img2img_type=="ip adapter" and img is not None:
125
  image = clip_slider.generate(prompt, ip_adapter_image=img, scale=x, scale_2nd=y, seed=seed, num_inference_steps=steps, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
126
  else:
@@ -191,13 +192,20 @@ with gr.Blocks(css=css) as demo:
191
  prompt = gr.Textbox(label="Prompt")
192
  submit = gr.Button("Submit")
193
  with gr.Group(elem_id="group"):
194
- x = gr.Slider(minimum=-10, value=0, maximum=10, elem_id="x", interactive=False)
195
- y = gr.Slider(minimum=-10, value=0, maximum=10, elem_id="y", interactive=False)
196
  output_image = gr.Image(elem_id="image_out")
197
 
198
  with gr.Accordion(label="advanced options", open=False):
199
- iterations = gr.Slider(label = "num iterations", minimum=0, value=100, maximum=300)
200
  steps = gr.Slider(label = "num inference steps", minimum=1, value=8, maximum=30)
 
 
 
 
 
 
 
201
  seed = gr.Slider(minimum=0, maximum=np.iinfo(np.int32).max, label="Seed", interactive=True, randomize=True)
202
 
203
 
@@ -218,18 +226,39 @@ with gr.Blocks(css=css) as demo:
218
  with gr.Accordion(label="advanced options", open=False):
219
  iterations_a = gr.Slider(label = "num iterations", minimum=0, value=200, maximum=300)
220
  steps_a = gr.Slider(label = "num inference steps", minimum=1, value=8, maximum=30)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
  seed_a = gr.Slider(minimum=0, maximum=np.iinfo(np.int32).max, label="Seed", interactive=True, randomize=True)
222
 
223
  submit.click(fn=generate,
224
- inputs=[slider_x, slider_y, prompt, seed, iterations, steps, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2],
225
  outputs=[x, y, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2, output_image])
226
- x.change(fn=update_scales, inputs=[x,y, prompt, seed, steps, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2], outputs=[output_image])
227
- y.change(fn=update_scales, inputs=[x,y, prompt, seed, steps, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2], outputs=[output_image])
228
  submit_a.click(fn=generate,
229
- inputs=[slider_x_a, slider_y_a, prompt_a, seed_a, iterations_a, steps_a, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2],
230
  outputs=[x_a, y_a, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2, output_image_a])
231
- x_a.change(fn=update_scales, inputs=[x_a,y_a, prompt_a, seed_a, steps_a, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2], outputs=[output_image_a])
232
- y_a.change(fn=update_scales, inputs=[x_a,y_a, prompt, seed_a, steps_a, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2], outputs=[output_image_a])
233
 
234
 
235
  if __name__ == "__main__":
 
65
  x_concept_1, x_concept_2, y_concept_1, y_concept_2,
66
  avg_diff_x_1, avg_diff_x_2,
67
  avg_diff_y_1, avg_diff_y_2,
68
+ img2img_type = None, img = None,
69
+ controlnet_scale= None, ip_adapter_scale=None):
70
 
71
  start_time = time.time()
72
  # check if avg diff for directions need to be re-calculated
 
93
 
94
  if img2img_type=="controlnet canny" and img is not None:
95
  control_img = process_controlnet_img(img)
96
+ image = clip_slider.generate(prompt, image=control_img, controlnet_conditioning_scale =controlnet_scale, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=(avg_diff_0,avg_diff_1), avg_diff_2nd=(avg_diff_2nd_0,avg_diff_2nd_1))
97
  elif img2img_type=="ip adapter" and img is not None:
98
  image = clip_slider.generate(prompt, ip_adapter_image=img, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=(avg_diff_0,avg_diff_1), avg_diff_2nd=(avg_diff_2nd_0,avg_diff_2nd_1))
99
  else: # text to image
 
115
  @spaces.GPU
116
  def update_scales(x,y,prompt,seed, steps,
117
  avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2,
118
+ img2img_type = None, img = None,
119
+ controlnet_scale= None, ip_adapter_scale=None):
120
  avg_diff = (avg_diff_x_1.cuda(), avg_diff_x_2.cuda())
121
  avg_diff_2nd = (avg_diff_y_1.cuda(), avg_diff_y_2.cuda())
122
  if img2img_type=="controlnet canny" and img is not None:
123
  control_img = process_controlnet_img(img)
124
+ image = clip_slider.generate(prompt, image=control_img, controlnet_conditioning_scale =controlnet_scale, scale=x, scale_2nd=y, seed=seed, num_inference_steps=steps, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
125
  elif img2img_type=="ip adapter" and img is not None:
126
  image = clip_slider.generate(prompt, ip_adapter_image=img, scale=x, scale_2nd=y, seed=seed, num_inference_steps=steps, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
127
  else:
 
192
  prompt = gr.Textbox(label="Prompt")
193
  submit = gr.Button("Submit")
194
  with gr.Group(elem_id="group"):
195
+ x = gr.Slider(minimum=-7, value=0, maximum=7, elem_id="x", interactive=False)
196
+ y = gr.Slider(minimum=-7, value=0, maximum=7, elem_id="y", interactive=False)
197
  output_image = gr.Image(elem_id="image_out")
198
 
199
  with gr.Accordion(label="advanced options", open=False):
200
+ iterations = gr.Slider(label = "num iterations", minimum=0, value=200, maximum=400)
201
  steps = gr.Slider(label = "num inference steps", minimum=1, value=8, maximum=30)
202
+ guidance_scale = gr.Slider(
203
+ label="Guidance scale",
204
+ minimum=0.1,
205
+ maximum=10.0,
206
+ step=0.1,
207
+ value=5,
208
+ )
209
  seed = gr.Slider(minimum=0, maximum=np.iinfo(np.int32).max, label="Seed", interactive=True, randomize=True)
210
 
211
 
 
226
  with gr.Accordion(label="advanced options", open=False):
227
  iterations_a = gr.Slider(label = "num iterations", minimum=0, value=200, maximum=300)
228
  steps_a = gr.Slider(label = "num inference steps", minimum=1, value=8, maximum=30)
229
+ guidance_scale_a = gr.Slider(
230
+ label="Guidance scale",
231
+ minimum=0.1,
232
+ maximum=10.0,
233
+ step=0.1,
234
+ value=5,
235
+ )
236
+ controlnet_conditioning_scale = gr.Slider(
237
+ label="controlnet conditioning scale",
238
+ minimum=0.5,
239
+ maximum=5.0,
240
+ step=0.1,
241
+ value=0.7,
242
+ )
243
+ ip_adapter_scale = gr.Slider(
244
+ label="ip adapter scale",
245
+ minimum=0.5,
246
+ maximum=5.0,
247
+ step=0.1,
248
+ value=0.8,
249
+ )
250
  seed_a = gr.Slider(minimum=0, maximum=np.iinfo(np.int32).max, label="Seed", interactive=True, randomize=True)
251
 
252
  submit.click(fn=generate,
253
+ inputs=[slider_x, slider_y, prompt, seed, iterations, steps, guidance_scale, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2],
254
  outputs=[x, y, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2, output_image])
255
+ x.change(fn=update_scales, inputs=[x,y, prompt, seed, steps, guidance_scale, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2], outputs=[output_image])
256
+ y.change(fn=update_scales, inputs=[x,y, prompt, seed, steps, guidance_scale, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2], outputs=[output_image])
257
  submit_a.click(fn=generate,
258
+ inputs=[slider_x_a, slider_y_a, prompt_a, seed_a, iterations_a, steps_a, guidance_scale_a, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2, img2img_type, image, controlnet_conditioning_scale, ip_adapter_scale],
259
  outputs=[x_a, y_a, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2, output_image_a])
260
+ x_a.change(fn=update_scales, inputs=[x_a,y_a, prompt_a, seed_a, steps_a, guidance_scale_a, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2, img2img_type, image, controlnet_conditioning_scale, ip_adapter_scale], outputs=[output_image_a])
261
+ y_a.change(fn=update_scales, inputs=[x_a,y_a, prompt, seed_a, steps_a, guidance_scale_a, avg_diff_x_1, avg_diff_x_2, avg_diff_y_1, avg_diff_y_2, img2img_type, image, controlnet_conditioning_scale, ip_adapter_scale], outputs=[output_image_a])
262
 
263
 
264
  if __name__ == "__main__":