Spaces:
Running
on
Zero
Running
on
Zero
Upload 2 files
Browse files- dc.py +11 -11
- modutils.py +2 -2
dc.py
CHANGED
@@ -360,8 +360,8 @@ class GuiSD:
|
|
360 |
retain_task_model_in_cache=False,
|
361 |
device="cpu",
|
362 |
)
|
363 |
-
self.model.load_beta_styles()
|
364 |
-
|
365 |
|
366 |
def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
|
367 |
#progress(0, desc="Start inference...")
|
@@ -504,7 +504,7 @@ class GuiSD:
|
|
504 |
mode_ip2,
|
505 |
scale_ip2,
|
506 |
pag_scale,
|
507 |
-
progress=gr.Progress(track_tqdm=True),
|
508 |
):
|
509 |
#progress(0, desc="Preparing inference...")
|
510 |
|
@@ -614,15 +614,15 @@ class GuiSD:
|
|
614 |
"high_threshold": high_threshold,
|
615 |
"value_threshold": value_threshold,
|
616 |
"distance_threshold": distance_threshold,
|
617 |
-
"lora_A": lora1 if lora1 != "None" else None,
|
618 |
"lora_scale_A": lora_scale1,
|
619 |
-
"lora_B": lora2 if lora2 != "None" else None,
|
620 |
"lora_scale_B": lora_scale2,
|
621 |
-
"lora_C": lora3 if lora3 != "None" else None,
|
622 |
"lora_scale_C": lora_scale3,
|
623 |
-
"lora_D": lora4 if lora4 != "None" else None,
|
624 |
"lora_scale_D": lora_scale4,
|
625 |
-
"lora_E": lora5 if lora5 != "None" else None,
|
626 |
"lora_scale_E": lora_scale5,
|
627 |
## BEGIN MOD
|
628 |
"textual_inversion": get_embed_list(self.model.class_name) if textual_inversion else [],
|
@@ -672,14 +672,14 @@ class GuiSD:
|
|
672 |
}
|
673 |
|
674 |
self.model.device = torch.device("cuda:0")
|
675 |
-
if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * 5:
|
676 |
self.model.pipe.transformer.to(self.model.device)
|
677 |
print("transformer to cuda")
|
678 |
|
679 |
#progress(1, desc="Inference preparation completed. Starting inference...")
|
680 |
|
681 |
info_state = "" # for yield version
|
682 |
-
return self.infer_short(self.model, pipe_params
|
683 |
## END MOD
|
684 |
|
685 |
def dynamic_gpu_duration(func, duration, *args):
|
@@ -814,7 +814,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
814 |
progress(1, desc="Preparation completed. Starting inference...")
|
815 |
|
816 |
progress(0, desc="Loading model...")
|
817 |
-
sd_gen.load_new_model(model_name, vae, TASK_MODEL_LIST[0]
|
818 |
progress(1, desc="Model loaded.")
|
819 |
progress(0, desc="Starting Inference...")
|
820 |
images, info = sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
|
|
|
360 |
retain_task_model_in_cache=False,
|
361 |
device="cpu",
|
362 |
)
|
363 |
+
#self.model.load_beta_styles()
|
364 |
+
self.model.device = torch.device("cpu") #
|
365 |
|
366 |
def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
|
367 |
#progress(0, desc="Start inference...")
|
|
|
504 |
mode_ip2,
|
505 |
scale_ip2,
|
506 |
pag_scale,
|
507 |
+
#progress=gr.Progress(track_tqdm=True),
|
508 |
):
|
509 |
#progress(0, desc="Preparing inference...")
|
510 |
|
|
|
614 |
"high_threshold": high_threshold,
|
615 |
"value_threshold": value_threshold,
|
616 |
"distance_threshold": distance_threshold,
|
617 |
+
"lora_A": lora1 if lora1 != "None" and lora1 != "" else None,
|
618 |
"lora_scale_A": lora_scale1,
|
619 |
+
"lora_B": lora2 if lora2 != "None" and lora2 != "" else None,
|
620 |
"lora_scale_B": lora_scale2,
|
621 |
+
"lora_C": lora3 if lora3 != "None" and lora3 != "" else None,
|
622 |
"lora_scale_C": lora_scale3,
|
623 |
+
"lora_D": lora4 if lora4 != "None" and lora4 != "" else None,
|
624 |
"lora_scale_D": lora_scale4,
|
625 |
+
"lora_E": lora5 if lora5 != "None" and lora5 != "" else None,
|
626 |
"lora_scale_E": lora_scale5,
|
627 |
## BEGIN MOD
|
628 |
"textual_inversion": get_embed_list(self.model.class_name) if textual_inversion else [],
|
|
|
672 |
}
|
673 |
|
674 |
self.model.device = torch.device("cuda:0")
|
675 |
+
if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * 5 and loras_list != [""] * 5:
|
676 |
self.model.pipe.transformer.to(self.model.device)
|
677 |
print("transformer to cuda")
|
678 |
|
679 |
#progress(1, desc="Inference preparation completed. Starting inference...")
|
680 |
|
681 |
info_state = "" # for yield version
|
682 |
+
return self.infer_short(self.model, pipe_params), info_state
|
683 |
## END MOD
|
684 |
|
685 |
def dynamic_gpu_duration(func, duration, *args):
|
|
|
814 |
progress(1, desc="Preparation completed. Starting inference...")
|
815 |
|
816 |
progress(0, desc="Loading model...")
|
817 |
+
sd_gen.load_new_model(model_name, vae, TASK_MODEL_LIST[0])
|
818 |
progress(1, desc="Model loaded.")
|
819 |
progress(0, desc="Starting Inference...")
|
820 |
images, info = sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
|
modutils.py
CHANGED
@@ -136,7 +136,7 @@ def save_gallery_images(images, progress=gr.Progress(track_tqdm=True)):
|
|
136 |
dt_now = datetime.now(timezone(timedelta(hours=9)))
|
137 |
basename = dt_now.strftime('%Y%m%d_%H%M%S_')
|
138 |
i = 1
|
139 |
-
if not images: return images
|
140 |
output_images = []
|
141 |
output_paths = []
|
142 |
for image in images:
|
@@ -153,7 +153,7 @@ def save_gallery_images(images, progress=gr.Progress(track_tqdm=True)):
|
|
153 |
output_paths.append(str(newpath))
|
154 |
output_images.append((str(newpath), str(filename)))
|
155 |
progress(1, desc="Gallery updated.")
|
156 |
-
return gr.update(value=output_images), gr.update(value=output_paths
|
157 |
|
158 |
|
159 |
def download_private_repo(repo_id, dir_path, is_replace):
|
|
|
136 |
dt_now = datetime.now(timezone(timedelta(hours=9)))
|
137 |
basename = dt_now.strftime('%Y%m%d_%H%M%S_')
|
138 |
i = 1
|
139 |
+
if not images: return images, gr.update(visible=False)
|
140 |
output_images = []
|
141 |
output_paths = []
|
142 |
for image in images:
|
|
|
153 |
output_paths.append(str(newpath))
|
154 |
output_images.append((str(newpath), str(filename)))
|
155 |
progress(1, desc="Gallery updated.")
|
156 |
+
return gr.update(value=output_images), gr.update(value=output_paths, visible=True)
|
157 |
|
158 |
|
159 |
def download_private_repo(repo_id, dir_path, is_replace):
|