adding torch empty cache
Browse files
app.py
CHANGED
@@ -227,7 +227,7 @@ def re_run_inference(config, config_history, pipeline, progress=gr.Progress(trac
|
|
227 |
return "Please select a model AND a scheduler.", "Please select a model AND a scheduler.", None, pipeline
|
228 |
|
229 |
if pipeline == None:
|
230 |
-
return "Please run full inference first.", "Please
|
231 |
|
232 |
# # MANUAL SEED/GENERATOR - we probably don't need that again?
|
233 |
# if config["manual_seed"] is None or config["manual_seed"] == '' or int(config["manual_seed"]) < 0:
|
@@ -269,14 +269,15 @@ def re_run_inference(config, config_history, pipeline, progress=gr.Progress(trac
|
|
269 |
|
270 |
# expected output: out_image, out_config_history, config_history, pipeline
|
271 |
return image[0], dict_list_to_markdown_table(config_history), config_history, pipeline
|
272 |
-
|
273 |
-
|
274 |
def run_inference(config, config_history, pipeline, progress=gr.Progress(track_tqdm=True)):
|
275 |
|
276 |
if str(config["model"]) != 'None' and str(config["model"]) != 'null' and str(config["model"]) != '' and str(config["scheduler"]) != 'None':
|
277 |
|
278 |
progress(1, desc="Initializing pipeline...")
|
279 |
|
|
|
|
|
280 |
torch.backends.cuda.matmul.allow_tf32 = get_bool(config["allow_tensorfloat32"]) # Use TensorFloat-32 as of https://huggingface.co/docs/diffusers/main/en/optimization/fp16 faster, but slightly less accurate computations
|
281 |
|
282 |
# INIT PIPELINE
|
@@ -427,7 +428,7 @@ with gr.Blocks(analytics_enabled=False) as demo:
|
|
427 |
pipeline = gr.State()
|
428 |
|
429 |
gr.Markdown('''## Text-2-Image Playground
|
430 |
-
<small>by <a target="_blank" href="https://
|
431 |
home base: https://huggingface.co/spaces/n42/pictero
|
432 |
</small>''')
|
433 |
gr.Markdown("### Device specific settings")
|
|
|
227 |
return "Please select a model AND a scheduler.", "Please select a model AND a scheduler.", None, pipeline
|
228 |
|
229 |
if pipeline == None:
|
230 |
+
return "Please run full inference first.", "Please run full inference first.", None, pipeline
|
231 |
|
232 |
# # MANUAL SEED/GENERATOR - we probably don't need that again?
|
233 |
# if config["manual_seed"] is None or config["manual_seed"] == '' or int(config["manual_seed"]) < 0:
|
|
|
269 |
|
270 |
# expected output: out_image, out_config_history, config_history, pipeline
|
271 |
return image[0], dict_list_to_markdown_table(config_history), config_history, pipeline
|
272 |
+
|
|
|
273 |
def run_inference(config, config_history, pipeline, progress=gr.Progress(track_tqdm=True)):
|
274 |
|
275 |
if str(config["model"]) != 'None' and str(config["model"]) != 'null' and str(config["model"]) != '' and str(config["scheduler"]) != 'None':
|
276 |
|
277 |
progress(1, desc="Initializing pipeline...")
|
278 |
|
279 |
+
torch.cuda.empty_cache()
|
280 |
+
|
281 |
torch.backends.cuda.matmul.allow_tf32 = get_bool(config["allow_tensorfloat32"]) # Use TensorFloat-32 as of https://huggingface.co/docs/diffusers/main/en/optimization/fp16 faster, but slightly less accurate computations
|
282 |
|
283 |
# INIT PIPELINE
|
|
|
428 |
pipeline = gr.State()
|
429 |
|
430 |
gr.Markdown('''## Text-2-Image Playground
|
431 |
+
<small>by <a target="_blank" href="https://nickyreinert.de/">Nicky Reinert</a> |
|
432 |
home base: https://huggingface.co/spaces/n42/pictero
|
433 |
</small>''')
|
434 |
gr.Markdown("### Device specific settings")
|