Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -232,7 +232,7 @@ def merge_incompatible_lora(full_path_lora, lora_scale):
|
|
232 |
del weights_sd
|
233 |
del lora_model
|
234 |
@spaces.GPU
|
235 |
-
def generate_image(prompt, negative, face_emb, face_image, face_kps, image_strength, guidance_scale, face_strength, depth_control_scale,
|
236 |
print(loaded_state_dict)
|
237 |
et = time.time()
|
238 |
elapsed_time = et - st
|
@@ -292,8 +292,8 @@ def generate_image(prompt, negative, face_emb, face_image, face_kps, image_stren
|
|
292 |
pooled_prompt_embeds=pooled,
|
293 |
negative_prompt_embeds=negative_conditioning,
|
294 |
negative_pooled_prompt_embeds=negative_pooled,
|
295 |
-
width=
|
296 |
-
height=
|
297 |
image_embeds=face_emb,
|
298 |
image=face_image,
|
299 |
strength=1-image_strength,
|
@@ -308,7 +308,7 @@ def generate_image(prompt, negative, face_emb, face_image, face_kps, image_stren
|
|
308 |
last_lora = repo_name
|
309 |
return image
|
310 |
|
311 |
-
def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_strength, image_strength, guidance_scale, depth_control_scale,
|
312 |
print("Custom LoRA: ", custom_lora)
|
313 |
custom_lora_path = custom_lora[0] if custom_lora else None
|
314 |
selected_state_index = selected_state.index if selected_state else -1
|
@@ -362,18 +362,7 @@ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_stre
|
|
362 |
print('Small content processing took: ', elapsed_time, 'seconds')
|
363 |
|
364 |
st = time.time()
|
365 |
-
|
366 |
-
if(aspect_ratio == "portrait"):
|
367 |
-
width = 1024
|
368 |
-
height = 1536
|
369 |
-
elif(aspect_ratio == "landscape"):
|
370 |
-
width = 1536
|
371 |
-
height = 1024
|
372 |
-
else:
|
373 |
-
width = 1024
|
374 |
-
height = 1024
|
375 |
-
print(f"Width: {width}, Height: {height}")
|
376 |
-
image = generate_image(prompt, negative, face_emb, face_image, face_kps, image_strength, guidance_scale, face_strength, depth_control_scale, width, height, repo_name, full_path_lora, lora_scale, sdxl_loras, selected_state_index, st)
|
377 |
return (face_image, image), gr.update(visible=True)
|
378 |
|
379 |
def shuffle_gallery(sdxl_loras):
|
@@ -481,7 +470,7 @@ def load_custom_lora(link):
|
|
481 |
<div class="custom_lora_card">
|
482 |
<span>Loaded custom LoRA:</span>
|
483 |
<div class="card_internal">
|
484 |
-
<img src="{image}"
|
485 |
<div>
|
486 |
<h3>{title}</h3>
|
487 |
<small>{"Using: <code><b>"+trigger_word+"</code></b> as the trigger word" if trigger_word else "No trigger word found. If there's a trigger word, include it in your prompt"}<br></small>
|
@@ -553,7 +542,6 @@ with gr.Blocks(css="custom.css") as demo:
|
|
553 |
image_strength = gr.Slider(0, 1, value=0.15, step=0.01, label="Image strength", info="Higher values increase the similarity with the structure/colors of the original photo")
|
554 |
guidance_scale = gr.Slider(0, 50, value=7, step=0.1, label="Guidance Scale")
|
555 |
depth_control_scale = gr.Slider(0, 1, value=0.8, step=0.01, label="Zoe Depth ControlNet strenght")
|
556 |
-
aspect_ratio = gr.Radio(choices=["square", "portrait", "landscape"], value="square", label="Aspect Ratio")
|
557 |
prompt_title = gr.Markdown(
|
558 |
value="### Click on a LoRA in the gallery to select it",
|
559 |
visible=True,
|
@@ -593,7 +581,7 @@ with gr.Blocks(css="custom.css") as demo:
|
|
593 |
show_progress=False
|
594 |
).success(
|
595 |
fn=run_lora,
|
596 |
-
inputs=[photo, prompt, negative, weight, selected_state, face_strength, image_strength, guidance_scale, depth_control_scale,
|
597 |
outputs=[result, share_group],
|
598 |
)
|
599 |
button.click(
|
@@ -602,7 +590,7 @@ with gr.Blocks(css="custom.css") as demo:
|
|
602 |
show_progress=False
|
603 |
).success(
|
604 |
fn=run_lora,
|
605 |
-
inputs=[photo, prompt, negative, weight, selected_state, face_strength, image_strength, guidance_scale, depth_control_scale,
|
606 |
outputs=[result, share_group],
|
607 |
)
|
608 |
share_button.click(None, [], [], js=share_js)
|
|
|
232 |
del weights_sd
|
233 |
del lora_model
|
234 |
@spaces.GPU
|
235 |
+
def generate_image(prompt, negative, face_emb, face_image, face_kps, image_strength, guidance_scale, face_strength, depth_control_scale, repo_name, loaded_state_dict, lora_scale, sdxl_loras, selected_state_index, st):
|
236 |
print(loaded_state_dict)
|
237 |
et = time.time()
|
238 |
elapsed_time = et - st
|
|
|
292 |
pooled_prompt_embeds=pooled,
|
293 |
negative_prompt_embeds=negative_conditioning,
|
294 |
negative_pooled_prompt_embeds=negative_pooled,
|
295 |
+
width=1024,
|
296 |
+
height=1024,
|
297 |
image_embeds=face_emb,
|
298 |
image=face_image,
|
299 |
strength=1-image_strength,
|
|
|
308 |
last_lora = repo_name
|
309 |
return image
|
310 |
|
311 |
+
def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_strength, image_strength, guidance_scale, depth_control_scale, sdxl_loras, custom_lora, progress=gr.Progress(track_tqdm=True)):
|
312 |
print("Custom LoRA: ", custom_lora)
|
313 |
custom_lora_path = custom_lora[0] if custom_lora else None
|
314 |
selected_state_index = selected_state.index if selected_state else -1
|
|
|
362 |
print('Small content processing took: ', elapsed_time, 'seconds')
|
363 |
|
364 |
st = time.time()
|
365 |
+
image = generate_image(prompt, negative, face_emb, face_image, face_kps, image_strength, guidance_scale, face_strength, depth_control_scale, repo_name, full_path_lora, lora_scale, sdxl_loras, selected_state_index, st)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
366 |
return (face_image, image), gr.update(visible=True)
|
367 |
|
368 |
def shuffle_gallery(sdxl_loras):
|
|
|
470 |
<div class="custom_lora_card">
|
471 |
<span>Loaded custom LoRA:</span>
|
472 |
<div class="card_internal">
|
473 |
+
<img src="{image}" />
|
474 |
<div>
|
475 |
<h3>{title}</h3>
|
476 |
<small>{"Using: <code><b>"+trigger_word+"</code></b> as the trigger word" if trigger_word else "No trigger word found. If there's a trigger word, include it in your prompt"}<br></small>
|
|
|
542 |
image_strength = gr.Slider(0, 1, value=0.15, step=0.01, label="Image strength", info="Higher values increase the similarity with the structure/colors of the original photo")
|
543 |
guidance_scale = gr.Slider(0, 50, value=7, step=0.1, label="Guidance Scale")
|
544 |
depth_control_scale = gr.Slider(0, 1, value=0.8, step=0.01, label="Zoe Depth ControlNet strenght")
|
|
|
545 |
prompt_title = gr.Markdown(
|
546 |
value="### Click on a LoRA in the gallery to select it",
|
547 |
visible=True,
|
|
|
581 |
show_progress=False
|
582 |
).success(
|
583 |
fn=run_lora,
|
584 |
+
inputs=[photo, prompt, negative, weight, selected_state, face_strength, image_strength, guidance_scale, depth_control_scale, gr_sdxl_loras, custom_loaded_lora],
|
585 |
outputs=[result, share_group],
|
586 |
)
|
587 |
button.click(
|
|
|
590 |
show_progress=False
|
591 |
).success(
|
592 |
fn=run_lora,
|
593 |
+
inputs=[photo, prompt, negative, weight, selected_state, face_strength, image_strength, guidance_scale, depth_control_scale, gr_sdxl_loras, custom_loaded_lora],
|
594 |
outputs=[result, share_group],
|
595 |
)
|
596 |
share_button.click(None, [], [], js=share_js)
|