ChenWu98 commited on
Commit
fc379d8
β€’
1 Parent(s): 3e4185b

Debug cac support

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. app.py +12 -6
README.md CHANGED
@@ -7,7 +7,7 @@ sdk: gradio
7
  sdk_version: 3.9
8
  app_file: app.py
9
  pinned: false
10
- license: mit
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
7
  sdk_version: 3.9
8
  app_file: app.py
9
  pinned: false
10
+ license: apache-2.0
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -228,7 +228,7 @@ def get_equalizer(text: str, word_select: Union[int, Tuple[int, ...]], values: U
228
 
229
  def inference(source_prompt, target_prompt, source_guidance_scale=1, guidance_scale=5, num_inference_steps=100,
230
  width=512, height=512, seed=0, img=None, strength=0.7,
231
- cross_attention_control=None, cross_replace_steps=0.8, self_replace_steps=0.4):
232
 
233
  torch.manual_seed(seed)
234
 
@@ -236,6 +236,7 @@ def inference(source_prompt, target_prompt, source_guidance_scale=1, guidance_sc
236
  img = img.resize((int(img.width * ratio), int(img.height * ratio)))
237
 
238
  # create the CAC controller.
 
239
  if cross_attention_control == "replace":
240
  controller = AttentionReplace([source_prompt, target_prompt],
241
  num_inference_steps,
@@ -282,8 +283,8 @@ with gr.Blocks(css=css) as demo:
282
  </div>
283
  <p>
284
  Demo for CycleDiffusion with Stable Diffusion. <br>
285
- CycleDiffusion (<a href="https://github.com/ChenWu98/cycle-diffusion">Github</a> | <a href="https://arxiv.org/abs/2210.05559">πŸ“„ Paper link</a> | <a href="https://huggingface.co/docs/diffusers/main/en/api/pipelines/cycle_diffusion">🧨 Pipeline doc</a>) is an image-to-image translation method that supports stochastic samplers for diffusion models. <br>
286
- It also supports Cross Attention Control (<a href="https://github.com/google/prompt-to-prompt">Github</a> | <a href="https://arxiv.org/abs/2208.01626">πŸ“„ Paper link</a>), which is a technique to transfer the attention map from the source prompt to the target prompt. <br>
287
  </p>
288
  <p>You can skip the queue in the colab: <a href="https://colab.research.google.com/gist/ChenWu98/0aa4fe7be80f6b45d3d055df9f14353a/copy-of-fine-tuned-diffusion-gradio.ipynb"><img data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" src="https://colab.research.google.com/assets/colab-badge.svg"></a></p>
289
  Running on <b>{device_print}</b>{(" in a <b>Google Colab</b>." if is_colab else "")}
@@ -314,9 +315,8 @@ with gr.Blocks(css=css) as demo:
314
  guidance_scale = gr.Slider(label="Target guidance scale", value=5, minimum=1, maximum=10)
315
  with gr.Row():
316
  strength = gr.Slider(label="Strength", value=0.7, minimum=0.5, maximum=1, step=0.01)
317
-
318
  with gr.Row():
319
- generate = gr.Button(value="Edit")
320
  with gr.Tab("Basic options"):
321
  with gr.Group():
322
  with gr.Row():
@@ -326,6 +326,8 @@ with gr.Blocks(css=css) as demo:
326
 
327
  with gr.Row():
328
  seed = gr.Slider(0, 2147483647, label='Seed', value=0, step=1)
 
 
329
 
330
  with gr.Tab("CAC options"):
331
  with gr.Group():
@@ -335,11 +337,15 @@ with gr.Blocks(css=css) as demo:
335
  # If not "None", the following two parameters will be used.
336
  cross_replace_steps = gr.Slider(label="Cross replace steps", value=0.8, minimum=0.0, maximum=1, step=0.01)
337
  self_replace_steps = gr.Slider(label="Self replace steps", value=0.4, minimum=0.0, maximum=1, step=0.01)
 
 
338
 
339
  inputs = [source_prompt, target_prompt, source_guidance_scale, guidance_scale, num_inference_steps,
340
  width, height, seed, img, strength,
341
  cross_attention_control, cross_replace_steps, self_replace_steps]
342
- generate.click(inference, inputs=inputs, outputs=image_out)
 
 
343
 
344
  ex = gr.Examples(
345
  [
 
228
 
229
  def inference(source_prompt, target_prompt, source_guidance_scale=1, guidance_scale=5, num_inference_steps=100,
230
  width=512, height=512, seed=0, img=None, strength=0.7,
231
+ cross_attention_control="None", cross_replace_steps=0.8, self_replace_steps=0.4):
232
 
233
  torch.manual_seed(seed)
234
 
 
236
  img = img.resize((int(img.width * ratio), int(img.height * ratio)))
237
 
238
  # create the CAC controller.
239
+ assert cross_attention_control in ['Replace', 'Refine', "None"]
240
  if cross_attention_control == "replace":
241
  controller = AttentionReplace([source_prompt, target_prompt],
242
  num_inference_steps,
 
283
  </div>
284
  <p>
285
  Demo for CycleDiffusion with Stable Diffusion. <br>
286
+ CycleDiffusion (<a href="https://arxiv.org/abs/2210.05559">πŸ“„ Paper link</a> | <a href="https://huggingface.co/docs/diffusers/main/en/api/pipelines/cycle_diffusion">🧨 Pipeline doc</a>) is an image-to-image translation method that supports stochastic samplers for diffusion models. <br>
287
+ It also supports Cross Attention Control (<a href="https://arxiv.org/abs/2208.01626">πŸ“„ Paper link</a>), which is a technique to transfer the attention map from the source prompt to the target prompt. <br>
288
  </p>
289
  <p>You can skip the queue in the colab: <a href="https://colab.research.google.com/gist/ChenWu98/0aa4fe7be80f6b45d3d055df9f14353a/copy-of-fine-tuned-diffusion-gradio.ipynb"><img data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" src="https://colab.research.google.com/assets/colab-badge.svg"></a></p>
290
  Running on <b>{device_print}</b>{(" in a <b>Google Colab</b>." if is_colab else "")}
 
315
  guidance_scale = gr.Slider(label="Target guidance scale", value=5, minimum=1, maximum=10)
316
  with gr.Row():
317
  strength = gr.Slider(label="Strength", value=0.7, minimum=0.5, maximum=1, step=0.01)
 
318
  with gr.Row():
319
+ generate1 = gr.Button(value="Edit")
320
  with gr.Tab("Basic options"):
321
  with gr.Group():
322
  with gr.Row():
 
326
 
327
  with gr.Row():
328
  seed = gr.Slider(0, 2147483647, label='Seed', value=0, step=1)
329
+ with gr.Row():
330
+ generate2 = gr.Button(value="Edit")
331
 
332
  with gr.Tab("CAC options"):
333
  with gr.Group():
 
337
  # If not "None", the following two parameters will be used.
338
  cross_replace_steps = gr.Slider(label="Cross replace steps", value=0.8, minimum=0.0, maximum=1, step=0.01)
339
  self_replace_steps = gr.Slider(label="Self replace steps", value=0.4, minimum=0.0, maximum=1, step=0.01)
340
+ with gr.Row():
341
+ generate3 = gr.Button(value="Edit")
342
 
343
  inputs = [source_prompt, target_prompt, source_guidance_scale, guidance_scale, num_inference_steps,
344
  width, height, seed, img, strength,
345
  cross_attention_control, cross_replace_steps, self_replace_steps]
346
+ generate1.click(inference, inputs=inputs, outputs=image_out)
347
+ generate2.click(inference, inputs=inputs, outputs=image_out)
348
+ generate3.click(inference, inputs=inputs, outputs=image_out)
349
 
350
  ex = gr.Examples(
351
  [