Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -185,6 +185,10 @@ download_vae = "https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/resolve/m
|
|
185 |
download_lora = "https://civitai.com/api/download/models/28907, https://huggingface.co/Leopain/color/resolve/main/Coloring_book_-_LineArt.safetensors, https://civitai.com/api/download/models/135867, https://civitai.com/api/download/models/145907, https://huggingface.co/Linaqruf/anime-detailer-xl-lora/resolve/main/anime-detailer-xl.safetensors?download=true, https://huggingface.co/Linaqruf/style-enhancer-xl-lora/resolve/main/style-enhancer-xl.safetensors?download=true, https://civitai.com/api/download/models/28609, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SD15-8steps-CFG-lora.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SDXL-8steps-CFG-lora.safetensors?download=true"
|
186 |
load_diffusers_format_model = [
|
187 |
'stabilityai/stable-diffusion-xl-base-1.0',
|
|
|
|
|
|
|
|
|
188 |
'cagliostrolab/animagine-xl-3.1',
|
189 |
'John6666/epicrealism-xl-v8kiss-sdxl',
|
190 |
'misri/epicrealismXL_v7FinalDestination',
|
@@ -404,7 +408,7 @@ class GuiSD:
|
|
404 |
|
405 |
print("Loading model...")
|
406 |
self.model = Model_Diffusers(
|
407 |
-
base_model_id="
|
408 |
task_name="txt2img",
|
409 |
vae_model=None,
|
410 |
type_model_precision=torch.float16,
|
@@ -433,12 +437,13 @@ class GuiSD:
|
|
433 |
model_name,
|
434 |
task_name=task_stablepy[task],
|
435 |
vae_model=vae_model if vae_model != "None" else None,
|
436 |
-
type_model_precision=torch.float16,
|
437 |
retain_task_model_in_cache=False,
|
438 |
)
|
439 |
yield f"Model loaded: {model_name}"
|
440 |
|
441 |
-
@spaces.GPU
|
|
|
442 |
def generate_pipeline(
|
443 |
self,
|
444 |
prompt,
|
@@ -597,9 +602,9 @@ class GuiSD:
|
|
597 |
params_ip_mode.append(modeip)
|
598 |
params_ip_scale.append(scaleip)
|
599 |
|
|
|
|
|
600 |
# First load
|
601 |
-
model_precision = torch.float16
|
602 |
-
self.model.device = torch.device("cuda:0")
|
603 |
if not self.model:
|
604 |
print("Loading model...")
|
605 |
self.model = Model_Diffusers(
|
@@ -750,6 +755,11 @@ class GuiSD:
|
|
750 |
"ip_adapter_scale": params_ip_scale,
|
751 |
}
|
752 |
|
|
|
|
|
|
|
|
|
|
|
753 |
info_state = "PROCESSING "
|
754 |
for img, seed, image_path, metadata in self.model(**pipe_params):
|
755 |
info_state += ">"
|
@@ -1638,4 +1648,4 @@ app.launch(
|
|
1638 |
show_error=True,
|
1639 |
debug=True,
|
1640 |
allowed_paths=["./images/"],
|
1641 |
-
)
|
|
|
185 |
download_lora = "https://civitai.com/api/download/models/28907, https://huggingface.co/Leopain/color/resolve/main/Coloring_book_-_LineArt.safetensors, https://civitai.com/api/download/models/135867, https://civitai.com/api/download/models/145907, https://huggingface.co/Linaqruf/anime-detailer-xl-lora/resolve/main/anime-detailer-xl.safetensors?download=true, https://huggingface.co/Linaqruf/style-enhancer-xl-lora/resolve/main/style-enhancer-xl.safetensors?download=true, https://civitai.com/api/download/models/28609, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SD15-8steps-CFG-lora.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SDXL-8steps-CFG-lora.safetensors?download=true"
|
186 |
load_diffusers_format_model = [
|
187 |
'stabilityai/stable-diffusion-xl-base-1.0',
|
188 |
+
'black-forest-labs/FLUX.1-dev',
|
189 |
+
'John6666/blue-pencil-flux1-v021-fp8-flux',
|
190 |
+
'John6666/wai-ani-flux-v10forfp8-fp8-flux',
|
191 |
+
'John6666/xe-anime-flux-v04-fp8-flux',
|
192 |
'cagliostrolab/animagine-xl-3.1',
|
193 |
'John6666/epicrealism-xl-v8kiss-sdxl',
|
194 |
'misri/epicrealismXL_v7FinalDestination',
|
|
|
408 |
|
409 |
print("Loading model...")
|
410 |
self.model = Model_Diffusers(
|
411 |
+
base_model_id="Lykon/dreamshaper-8",
|
412 |
task_name="txt2img",
|
413 |
vae_model=None,
|
414 |
type_model_precision=torch.float16,
|
|
|
437 |
model_name,
|
438 |
task_name=task_stablepy[task],
|
439 |
vae_model=vae_model if vae_model != "None" else None,
|
440 |
+
type_model_precision=torch.float16 if "flux" not in model_name.lower() else torch.bfloat16,
|
441 |
retain_task_model_in_cache=False,
|
442 |
)
|
443 |
yield f"Model loaded: {model_name}"
|
444 |
|
445 |
+
@spaces.GPU(duration=35)
|
446 |
+
@torch.inference_mode()
|
447 |
def generate_pipeline(
|
448 |
self,
|
449 |
prompt,
|
|
|
602 |
params_ip_mode.append(modeip)
|
603 |
params_ip_scale.append(scaleip)
|
604 |
|
605 |
+
model_precision = torch.float16 if "flux" not in model_name.lower() else torch.bfloat16
|
606 |
+
|
607 |
# First load
|
|
|
|
|
608 |
if not self.model:
|
609 |
print("Loading model...")
|
610 |
self.model = Model_Diffusers(
|
|
|
755 |
"ip_adapter_scale": params_ip_scale,
|
756 |
}
|
757 |
|
758 |
+
self.model.device = torch.device("cuda:0")
|
759 |
+
if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * 5:
|
760 |
+
self.model.pipe.transformer.to(self.model.device)
|
761 |
+
print("transformer to cuda")
|
762 |
+
|
763 |
info_state = "PROCESSING "
|
764 |
for img, seed, image_path, metadata in self.model(**pipe_params):
|
765 |
info_state += ">"
|
|
|
1648 |
show_error=True,
|
1649 |
debug=True,
|
1650 |
allowed_paths=["./images/"],
|
1651 |
+
)
|