minor process and ui improvements
Browse files
app.py
CHANGED
@@ -217,13 +217,17 @@ def run_inference(config, config_history, pipeline, progress=gr.Progress(track_t
|
|
217 |
# str_config = str_config.replace("'", '"').replace('None', 'null').replace('False', 'false')
|
218 |
# config = json.loads(str_config)
|
219 |
|
220 |
-
if str(config["model"]) != 'None' and str(config["model"]) != 'null' and str(config["scheduler"]) != 'None':
|
221 |
|
222 |
-
progress(
|
223 |
|
224 |
torch.backends.cuda.matmul.allow_tf32 = get_bool(config["allow_tensorfloat32"]) # Use TensorFloat-32 as of https://huggingface.co/docs/diffusers/main/en/optimization/fp16 faster, but slightly less accurate computations
|
225 |
-
|
226 |
-
|
|
|
|
|
|
|
|
|
227 |
|
228 |
# INIT PIPELINE
|
229 |
pipeline = DiffusionPipeline.from_pretrained(
|
@@ -232,6 +236,8 @@ def run_inference(config, config_history, pipeline, progress=gr.Progress(track_t
|
|
232 |
torch_dtype = get_data_type(config["data_type"]),
|
233 |
variant = get_variant(config["variant"])).to(config["device"])
|
234 |
|
|
|
|
|
235 |
if str(config["cpu_offload"]).lower() != 'false':
|
236 |
pipeline.enable_model_cpu_offload()
|
237 |
|
@@ -242,8 +248,17 @@ def run_inference(config, config_history, pipeline, progress=gr.Progress(track_t
|
|
242 |
if str(config["enable_vae_slicing"]).lower() != 'false': pipeline.enable_vae_slicing()
|
243 |
if str(config["enable_vae_tiling"]).lower() != 'false': pipeline.enable_vae_tiling()
|
244 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
245 |
# INIT REFINER
|
246 |
if str(config['refiner']).lower() != 'none' and str(config['refiner']).lower() != 'null':
|
|
|
|
|
247 |
refiner = DiffusionPipeline.from_pretrained(
|
248 |
config['refiner'],
|
249 |
text_encoder_2=pipeline.text_encoder_2,
|
@@ -258,38 +273,28 @@ def run_inference(config, config_history, pipeline, progress=gr.Progress(track_t
|
|
258 |
if str(config["enable_vae_slicing"]).lower() != 'false': refiner.enable_vae_slicing()
|
259 |
if str(config["enable_vae_tiling"]).lower() != 'false': refiner.enable_vae_tiling()
|
260 |
|
261 |
-
# SAFETY CHECKER
|
262 |
-
if str(config["safety_checker"]).lower() == 'false': pipeline.safety_checker = None
|
263 |
-
pipeline.requires_safety_checker = get_bool(config["requires_safety_checker"])
|
264 |
|
265 |
-
# SCHEDULER/SOLVER
|
266 |
-
pipeline.scheduler = get_scheduler(config["scheduler"], pipeline.scheduler.config)
|
267 |
-
|
268 |
-
# MANUAL SEED/GENERATOR
|
269 |
-
if config["manual_seed"] is None or config["manual_seed"] == '' or int(config["manual_seed"]) < 0:
|
270 |
-
generator = None
|
271 |
-
else:
|
272 |
-
generator = torch.manual_seed(int(config["manual_seed"]))
|
273 |
-
|
274 |
# ADAPTERS
|
275 |
# TEXTUAL INVERSION
|
276 |
-
if str(config["adapter_textual_inversion"]).lower() != 'none' and str(config["adapter_textual_inversion"]).lower() != 'null':
|
|
|
277 |
pipeline.load_textual_inversion(config["adapter_textual_inversion"], token=config["adapter_textual_inversion_token"])
|
278 |
|
279 |
# LoRA
|
280 |
if len(config["adapter_lora"]) > 0 and len(config["adapter_lora"]) == len(config["adapter_lora_weight"]):
|
281 |
adapter_lora_balancing = []
|
282 |
for adapter_lora_index, adapter_lora in enumerate(config["adapter_lora"]):
|
|
|
283 |
if str(config["adapter_lora_weight"][adapter_lora_index]).lower() != 'none':
|
284 |
pipeline.load_lora_weights(adapter_lora, weight_name=config["adapter_lora_weight"][adapter_lora_index], adapter_name=config["adapter_lora_token"][adapter_lora_index])
|
285 |
else:
|
286 |
pipeline.load_lora_weights(adapter_lora, adapter_name=config["adapter_lora_token"][adapter_lora_index])
|
287 |
adapter_lora_balancing.append(config["adapter_lora_balancing"][adapter_lora])
|
288 |
|
289 |
-
adapter_weights =
|
290 |
-
pipeline.set_adapters(
|
291 |
|
292 |
-
progress(
|
293 |
|
294 |
prompt = config["prompt"] + config["trigger_token"] + config["adapter_textual_inversion_token"] + ' '.join(config["adapter_lora_token"])
|
295 |
|
@@ -309,11 +314,12 @@ def run_inference(config, config_history, pipeline, progress=gr.Progress(track_t
|
|
309 |
|
310 |
config_history.append(config.copy())
|
311 |
|
|
|
312 |
return image[0], dict_list_to_markdown_table(config_history), config_history, pipeline
|
313 |
|
314 |
else:
|
315 |
|
316 |
-
return "Please select a model AND a scheduler.",
|
317 |
|
318 |
appConfig = load_app_config()
|
319 |
models = appConfig.get("models", {})
|
|
|
217 |
# str_config = str_config.replace("'", '"').replace('None', 'null').replace('False', 'false')
|
218 |
# config = json.loads(str_config)
|
219 |
|
220 |
+
if str(config["model"]) != 'None' and str(config["model"]) != 'null' and str(config["model"]) != '' and str(config["scheduler"]) != 'None':
|
221 |
|
222 |
+
progress(1, desc="Initializing pipeline...")
|
223 |
|
224 |
torch.backends.cuda.matmul.allow_tf32 = get_bool(config["allow_tensorfloat32"]) # Use TensorFloat-32 as of https://huggingface.co/docs/diffusers/main/en/optimization/fp16 faster, but slightly less accurate computations
|
225 |
+
|
226 |
+
# MANUAL SEED/GENERATOR
|
227 |
+
if config["manual_seed"] is None or config["manual_seed"] == '' or int(config["manual_seed"]) < 0:
|
228 |
+
generator = None
|
229 |
+
else:
|
230 |
+
generator = torch.manual_seed(int(config["manual_seed"]))
|
231 |
|
232 |
# INIT PIPELINE
|
233 |
pipeline = DiffusionPipeline.from_pretrained(
|
|
|
236 |
torch_dtype = get_data_type(config["data_type"]),
|
237 |
variant = get_variant(config["variant"])).to(config["device"])
|
238 |
|
239 |
+
progress(2, desc="Setting pipeline params...")
|
240 |
+
|
241 |
if str(config["cpu_offload"]).lower() != 'false':
|
242 |
pipeline.enable_model_cpu_offload()
|
243 |
|
|
|
248 |
if str(config["enable_vae_slicing"]).lower() != 'false': pipeline.enable_vae_slicing()
|
249 |
if str(config["enable_vae_tiling"]).lower() != 'false': pipeline.enable_vae_tiling()
|
250 |
|
251 |
+
# SAFETY CHECKER
|
252 |
+
if str(config["safety_checker"]).lower() == 'false': pipeline.safety_checker = None
|
253 |
+
pipeline.requires_safety_checker = get_bool(config["requires_safety_checker"])
|
254 |
+
|
255 |
+
# SCHEDULER/SOLVER
|
256 |
+
pipeline.scheduler = get_scheduler(config["scheduler"], pipeline.scheduler.config)
|
257 |
+
|
258 |
# INIT REFINER
|
259 |
if str(config['refiner']).lower() != 'none' and str(config['refiner']).lower() != 'null':
|
260 |
+
|
261 |
+
progress(3, desc="Initializing refiner...")
|
262 |
refiner = DiffusionPipeline.from_pretrained(
|
263 |
config['refiner'],
|
264 |
text_encoder_2=pipeline.text_encoder_2,
|
|
|
273 |
if str(config["enable_vae_slicing"]).lower() != 'false': refiner.enable_vae_slicing()
|
274 |
if str(config["enable_vae_tiling"]).lower() != 'false': refiner.enable_vae_tiling()
|
275 |
|
|
|
|
|
|
|
276 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
277 |
# ADAPTERS
|
278 |
# TEXTUAL INVERSION
|
279 |
+
if str(config["adapter_textual_inversion"]).lower() != 'none' and str(config["adapter_textual_inversion"]).lower() != 'null' and str(config["adapter_textual_inversion"]).lower() != '':
|
280 |
+
progress(4, desc=f"Loading textual inversion adapter {config['adapter_textual_inversion']}...")
|
281 |
pipeline.load_textual_inversion(config["adapter_textual_inversion"], token=config["adapter_textual_inversion_token"])
|
282 |
|
283 |
# LoRA
|
284 |
if len(config["adapter_lora"]) > 0 and len(config["adapter_lora"]) == len(config["adapter_lora_weight"]):
|
285 |
adapter_lora_balancing = []
|
286 |
for adapter_lora_index, adapter_lora in enumerate(config["adapter_lora"]):
|
287 |
+
progress(5, desc=f"Loading LoRA adapters {config['adapter_lora']}...")
|
288 |
if str(config["adapter_lora_weight"][adapter_lora_index]).lower() != 'none':
|
289 |
pipeline.load_lora_weights(adapter_lora, weight_name=config["adapter_lora_weight"][adapter_lora_index], adapter_name=config["adapter_lora_token"][adapter_lora_index])
|
290 |
else:
|
291 |
pipeline.load_lora_weights(adapter_lora, adapter_name=config["adapter_lora_token"][adapter_lora_index])
|
292 |
adapter_lora_balancing.append(config["adapter_lora_balancing"][adapter_lora])
|
293 |
|
294 |
+
adapter_weights = adapter_lora_balancing
|
295 |
+
pipeline.set_adapters(config["adapter_lora_token"], adapter_weights=adapter_weights)
|
296 |
|
297 |
+
progress(6, desc="Inferencing...")
|
298 |
|
299 |
prompt = config["prompt"] + config["trigger_token"] + config["adapter_textual_inversion_token"] + ' '.join(config["adapter_lora_token"])
|
300 |
|
|
|
314 |
|
315 |
config_history.append(config.copy())
|
316 |
|
317 |
+
# expected output: out_image, out_config_history, config_history, pipeline
|
318 |
return image[0], dict_list_to_markdown_table(config_history), config_history, pipeline
|
319 |
|
320 |
else:
|
321 |
|
322 |
+
return "Please select a model AND a scheduler.", "Please select a model AND a scheduler.", None, pipeline
|
323 |
|
324 |
appConfig = load_app_config()
|
325 |
models = appConfig.get("models", {})
|
config.py
CHANGED
@@ -212,7 +212,7 @@ def assemble_code(str_config):
|
|
212 |
code.append(f'generator = torch.manual_seed(manual_seed)')
|
213 |
|
214 |
# ADAPTER
|
215 |
-
if str(config["adapter_textual_inversion"]).lower() != 'none':
|
216 |
code.append(f'pipeline.load_textual_inversion("{config["adapter_textual_inversion"]}", token="{config["adapter_textual_inversion_token"]}")')
|
217 |
|
218 |
if len(config["adapter_lora"]) > 0 and len(config["adapter_lora"]) == len(config["adapter_lora_weight"]):
|
@@ -227,7 +227,7 @@ def assemble_code(str_config):
|
|
227 |
code.append(f'adapter_weights = {adapter_lora_balancing}')
|
228 |
code.append(f'pipeline.set_adapters({config["adapter_lora_token"]}, adapter_weights=adapter_weights)')
|
229 |
|
230 |
-
code.append(f'prompt = "{config["prompt"]} {config["trigger_token"]} {config["adapter_textual_inversion_token"]} {" ".join(config["adapter_lora_token"])}"')
|
231 |
code.append(f'negative_prompt = "{config["negative_prompt"]}"')
|
232 |
code.append(f'inference_steps = {config["inference_steps"]}')
|
233 |
code.append(f'guidance_scale = {config["guidance_scale"]}')
|
|
|
212 |
code.append(f'generator = torch.manual_seed(manual_seed)')
|
213 |
|
214 |
# ADAPTER
|
215 |
+
if str(config["adapter_textual_inversion"]).lower() != 'none' and str(config["adapter_textual_inversion"]).lower() != 'null' and str(config["adapter_textual_inversion"]).lower() != '':
|
216 |
code.append(f'pipeline.load_textual_inversion("{config["adapter_textual_inversion"]}", token="{config["adapter_textual_inversion_token"]}")')
|
217 |
|
218 |
if len(config["adapter_lora"]) > 0 and len(config["adapter_lora"]) == len(config["adapter_lora_weight"]):
|
|
|
227 |
code.append(f'adapter_weights = {adapter_lora_balancing}')
|
228 |
code.append(f'pipeline.set_adapters({config["adapter_lora_token"]}, adapter_weights=adapter_weights)')
|
229 |
|
230 |
+
code.append(f'prompt = "{config["prompt"]} {config["trigger_token"]} {config["adapter_textual_inversion_token"]} {", ".join(config["adapter_lora_token"])}"')
|
231 |
code.append(f'negative_prompt = "{config["negative_prompt"]}"')
|
232 |
code.append(f'inference_steps = {config["inference_steps"]}')
|
233 |
code.append(f'guidance_scale = {config["guidance_scale"]}')
|