Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -33,6 +33,8 @@ from stablepy.diffusers_vanilla.model import scheduler_names
|
|
33 |
from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
|
34 |
import torch
|
35 |
import re
|
|
|
|
|
36 |
|
37 |
preprocessor_controlnet = {
|
38 |
"openpose": [
|
@@ -152,9 +154,9 @@ os.makedirs(directory_vaes, exist_ok=True)
|
|
152 |
# - **Download SD 1.5 Models**
|
153 |
download_model = "https://huggingface.co/frankjoshua/toonyou_beta6/resolve/main/toonyou_beta6.safetensors"
|
154 |
# - **Download VAEs**
|
155 |
-
download_vae = "https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/resolve/main/sdxl_vae-fp16fix-c-1.1-b-0.5.safetensors?download=true, https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/resolve/main/sdxl_vae-fp16fix-blessed.safetensors?download=true, https://huggingface.co/digiplay/VAE/resolve/main/vividReal_v20.safetensors?download=true, https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/kl-f8-anime2_fp16.safetensors?download=true, https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/ClearVAE_V2.3_fp16.safetensors?download=true, https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/vae-ft-mse-840000-ema-pruned_fp16.safetensors?download=true, https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/blessed2_fp16.safetensors?download=true
|
156 |
# - **Download LoRAs**
|
157 |
-
download_lora = "https://civitai.com/api/download/models/
|
158 |
load_diffusers_format_model = [
|
159 |
'stabilityai/stable-diffusion-xl-base-1.0',
|
160 |
'misri/epicrealismXL_v7FinalDestination',
|
@@ -171,8 +173,8 @@ load_diffusers_format_model = [
|
|
171 |
'digiplay/DarkSushi2.5D_v1',
|
172 |
]
|
173 |
|
174 |
-
CIVITAI_API_KEY = ""
|
175 |
-
hf_token = ""
|
176 |
|
177 |
# Download stuffs
|
178 |
for url in [url.strip() for url in download_model.split(',')]:
|
@@ -302,12 +304,16 @@ class GuiSD:
|
|
302 |
def __init__(self):
|
303 |
self.model = None
|
304 |
|
|
|
|
|
|
|
|
|
|
|
305 |
@spaces.GPU(duration=120)
|
306 |
def infer(self, model, pipe_params):
|
307 |
images, image_list = model(**pipe_params)
|
308 |
return images
|
309 |
|
310 |
-
# @spaces.GPU
|
311 |
def generate_pipeline(
|
312 |
self,
|
313 |
prompt,
|
@@ -401,7 +407,16 @@ class GuiSD:
|
|
401 |
mask_blur_b,
|
402 |
mask_padding_b,
|
403 |
):
|
404 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
405 |
task = task_stablepy[task]
|
406 |
|
407 |
# First load
|
@@ -438,6 +453,8 @@ class GuiSD:
|
|
438 |
|
439 |
logging.getLogger("ultralytics").setLevel(logging.INFO if adetailer_verbose else logging.ERROR)
|
440 |
|
|
|
|
|
441 |
self.model.load_pipe(
|
442 |
model_name,
|
443 |
task_name=task,
|
@@ -549,15 +566,21 @@ class GuiSD:
|
|
549 |
|
550 |
# print(pipe_params)
|
551 |
|
552 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
553 |
|
|
|
554 |
|
555 |
-
sd_gen = GuiSD()
|
556 |
|
557 |
-
|
558 |
-
title_tab_adetailer = "<h2 style='color: #97BC62;'>Adetailer</h2>"
|
559 |
-
title_tab_hires = "<h2 style='color: #97BC62;'>High-resolution</h2>"
|
560 |
-
title_tab_settings = "<h2 style='color: #97BC62;'>Settings</h2>"
|
561 |
|
562 |
CSS ="""
|
563 |
.contain { display: flex; flex-direction: column; }
|
@@ -617,10 +640,10 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
|
|
617 |
object_fit="contain",
|
618 |
# height="auto",
|
619 |
interactive=False,
|
620 |
-
preview=
|
621 |
selected_index=50,
|
622 |
)
|
623 |
-
|
624 |
with gr.Column(scale=1):
|
625 |
steps_gui = gr.Slider(minimum=1, maximum=100, step=1, value=30, label="Steps")
|
626 |
cfg_gui = gr.Slider(minimum=0, maximum=30, step=0.5, value=7.5, label="CFG")
|
@@ -672,17 +695,17 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
|
|
672 |
adapter_conditioning_scale_gui = gr.Slider(minimum=0, maximum=5., step=0.1, value=1, label="Adapter Conditioning Scale")
|
673 |
adapter_conditioning_factor_gui = gr.Slider(minimum=0, maximum=1., step=0.01, value=0.55, label="Adapter Conditioning Factor (%)")
|
674 |
|
675 |
-
with gr.Accordion("LoRA", open=False, visible=
|
676 |
lora1_gui = gr.Dropdown(label="Lora1", choices=lora_model_list)
|
677 |
-
lora_scale_1_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=
|
678 |
lora2_gui = gr.Dropdown(label="Lora2", choices=lora_model_list)
|
679 |
-
lora_scale_2_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=
|
680 |
lora3_gui = gr.Dropdown(label="Lora3", choices=lora_model_list)
|
681 |
-
lora_scale_3_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=
|
682 |
lora4_gui = gr.Dropdown(label="Lora4", choices=lora_model_list)
|
683 |
-
lora_scale_4_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=
|
684 |
lora5_gui = gr.Dropdown(label="Lora5", choices=lora_model_list)
|
685 |
-
lora_scale_5_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=
|
686 |
|
687 |
with gr.Accordion("Styles", open=False, visible=True):
|
688 |
|
|
|
33 |
from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
|
34 |
import torch
|
35 |
import re
|
36 |
+
import shutil
|
37 |
+
|
38 |
|
39 |
preprocessor_controlnet = {
|
40 |
"openpose": [
|
|
|
154 |
# - **Download SD 1.5 Models**
|
155 |
download_model = "https://huggingface.co/frankjoshua/toonyou_beta6/resolve/main/toonyou_beta6.safetensors"
|
156 |
# - **Download VAEs**
|
157 |
+
download_vae = "https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/resolve/main/sdxl_vae-fp16fix-c-1.1-b-0.5.safetensors?download=true, https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/resolve/main/sdxl_vae-fp16fix-blessed.safetensors?download=true, https://huggingface.co/digiplay/VAE/resolve/main/vividReal_v20.safetensors?download=true, https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/kl-f8-anime2_fp16.safetensors?download=true, https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/ClearVAE_V2.3_fp16.safetensors?download=true, https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/vae-ft-mse-840000-ema-pruned_fp16.safetensors?download=true, https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/blessed2_fp16.safetensors?download=true"
|
158 |
# - **Download LoRAs**
|
159 |
+
download_lora = "https://civitai.com/api/download/models/135867, https://civitai.com/api/download/models/135931, https://civitai.com/api/download/models/177492, https://civitai.com/api/download/models/145907, https://huggingface.co/Linaqruf/anime-detailer-xl-lora/resolve/main/anime-detailer-xl.safetensors?download=true, https://huggingface.co/Linaqruf/style-enhancer-xl-lora/resolve/main/style-enhancer-xl.safetensors?download=true, https://civitai.com/api/download/models/28609"
|
160 |
load_diffusers_format_model = [
|
161 |
'stabilityai/stable-diffusion-xl-base-1.0',
|
162 |
'misri/epicrealismXL_v7FinalDestination',
|
|
|
173 |
'digiplay/DarkSushi2.5D_v1',
|
174 |
]
|
175 |
|
176 |
+
CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
|
177 |
+
hf_token = os.environ.get("HF_TOKEN")
|
178 |
|
179 |
# Download stuffs
|
180 |
for url in [url.strip() for url in download_model.split(',')]:
|
|
|
304 |
def __init__(self):
|
305 |
self.model = None
|
306 |
|
307 |
+
@spaces.GPU
|
308 |
+
def infer_short(self, model, pipe_params):
|
309 |
+
images, image_list = model(**pipe_params)
|
310 |
+
return images
|
311 |
+
|
312 |
@spaces.GPU(duration=120)
|
313 |
def infer(self, model, pipe_params):
|
314 |
images, image_list = model(**pipe_params)
|
315 |
return images
|
316 |
|
|
|
317 |
def generate_pipeline(
|
318 |
self,
|
319 |
prompt,
|
|
|
407 |
mask_blur_b,
|
408 |
mask_padding_b,
|
409 |
):
|
410 |
+
|
411 |
+
loras_list = [lora1, lora2, lora3, lora4, lora5]
|
412 |
+
for la in loras_list:
|
413 |
+
if (
|
414 |
+
la is not None
|
415 |
+
and "animetarot" in la.lower()
|
416 |
+
and "xl" in model_name.lower()
|
417 |
+
):
|
418 |
+
gr.Info(f"The LoRA {la} is for SD 1.5, but you are using SDXL.")
|
419 |
+
|
420 |
task = task_stablepy[task]
|
421 |
|
422 |
# First load
|
|
|
453 |
|
454 |
logging.getLogger("ultralytics").setLevel(logging.INFO if adetailer_verbose else logging.ERROR)
|
455 |
|
456 |
+
print(model_name, vae_model, loras_list)
|
457 |
+
|
458 |
self.model.load_pipe(
|
459 |
model_name,
|
460 |
task_name=task,
|
|
|
566 |
|
567 |
# print(pipe_params)
|
568 |
|
569 |
+
if (
|
570 |
+
(img_height > 1700 and img_width > 1700)
|
571 |
+
or (num_images > 1)
|
572 |
+
or (adetailer_active_a and adetailer_active_b)
|
573 |
+
or (upscaler_model and upscaler_increases_size > 1.7)
|
574 |
+
or (steps > 75)
|
575 |
+
or (image_resolution > 1048)
|
576 |
+
):
|
577 |
+
print("Inference 2")
|
578 |
+
return self.infer(self.model, pipe_params)
|
579 |
|
580 |
+
return self.infer_short(self.model, pipe_params)
|
581 |
|
|
|
582 |
|
583 |
+
sd_gen = GuiSD()
|
|
|
|
|
|
|
584 |
|
585 |
CSS ="""
|
586 |
.contain { display: flex; flex-direction: column; }
|
|
|
640 |
object_fit="contain",
|
641 |
# height="auto",
|
642 |
interactive=False,
|
643 |
+
preview=False,
|
644 |
selected_index=50,
|
645 |
)
|
646 |
+
|
647 |
with gr.Column(scale=1):
|
648 |
steps_gui = gr.Slider(minimum=1, maximum=100, step=1, value=30, label="Steps")
|
649 |
cfg_gui = gr.Slider(minimum=0, maximum=30, step=0.5, value=7.5, label="CFG")
|
|
|
695 |
adapter_conditioning_scale_gui = gr.Slider(minimum=0, maximum=5., step=0.1, value=1, label="Adapter Conditioning Scale")
|
696 |
adapter_conditioning_factor_gui = gr.Slider(minimum=0, maximum=1., step=0.01, value=0.55, label="Adapter Conditioning Factor (%)")
|
697 |
|
698 |
+
with gr.Accordion("LoRA", open=False, visible=True):
|
699 |
lora1_gui = gr.Dropdown(label="Lora1", choices=lora_model_list)
|
700 |
+
lora_scale_1_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 1")
|
701 |
lora2_gui = gr.Dropdown(label="Lora2", choices=lora_model_list)
|
702 |
+
lora_scale_2_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 2")
|
703 |
lora3_gui = gr.Dropdown(label="Lora3", choices=lora_model_list)
|
704 |
+
lora_scale_3_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 3")
|
705 |
lora4_gui = gr.Dropdown(label="Lora4", choices=lora_model_list)
|
706 |
+
lora_scale_4_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 4")
|
707 |
lora5_gui = gr.Dropdown(label="Lora5", choices=lora_model_list)
|
708 |
+
lora_scale_5_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 5")
|
709 |
|
710 |
with gr.Accordion("Styles", open=False, visible=True):
|
711 |
|