fixing int-str error
Browse files
app.py
CHANGED
@@ -236,7 +236,7 @@ def run_inference(config, config_history, progress=gr.Progress(track_tqdm=True))
|
|
236 |
pipeline.scheduler = get_scheduler(config["scheduler"], pipeline.scheduler.config)
|
237 |
|
238 |
# MANUAL SEED/GENERATOR
|
239 |
-
if config["manual_seed"]
|
240 |
generator = None
|
241 |
else:
|
242 |
generator = torch.manual_seed(int(config["manual_seed"]))
|
@@ -342,15 +342,6 @@ with gr.Blocks(analytics_enabled=False) as demo:
|
|
342 |
# with gr.Row():
|
343 |
# gr.Markdown('Choose an adapter.')
|
344 |
|
345 |
-
gr.Markdown("### Inference settings")
|
346 |
-
with gr.Row():
|
347 |
-
in_prompt = gr.TextArea(label="Prompt", value=config.value["prompt"])
|
348 |
-
in_negative_prompt = gr.TextArea(label="Negative prompt", value=config.value["negative_prompt"])
|
349 |
-
with gr.Row():
|
350 |
-
in_inference_steps = gr.Number(label="Inference steps", value=config.value["inference_steps"], info="Each step improves the final result but also results in higher computation")
|
351 |
-
in_manual_seed = gr.Number(label="Manual seed", value=config.value["manual_seed"], info="Set this to -1 or leave it empty to randomly generate an image. A fixed value will result in a similar image for every run")
|
352 |
-
in_guidance_scale = gr.Slider(minimum=0, maximum=100, step=0.1, label="Guidance Scale", value=config.value["guidance_scale"], info="A low guidance scale leads to a faster inference time, with the drawback that negative prompts don’t have any effect on the denoising process.")
|
353 |
-
|
354 |
gr.Markdown("### Auto Encoder")
|
355 |
with gr.Row():
|
356 |
gr.Markdown("**VAE** stands for Variational Auto Encoders. An 'autoencoder' is an artificial neural network that is able to encode input data and decode to output data to bascially recreate the input. The VAE whereas adds a couple of additional layers of complexity to create new and unique output.")
|
@@ -374,6 +365,15 @@ with gr.Blocks(analytics_enabled=False) as demo:
|
|
374 |
in_adapters_textual_inversion_token = gr.Textbox(value="None", label="Token", info="required to activate the token, will be added to your prompt")
|
375 |
out_adapters_textual_inversion_description = gr.Textbox(value="", label="Description")
|
376 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
377 |
gr.Markdown("### Output")
|
378 |
with gr.Row():
|
379 |
btn_start_pipeline = gr.Button(value="Run", variant="primary")
|
|
|
236 |
pipeline.scheduler = get_scheduler(config["scheduler"], pipeline.scheduler.config)
|
237 |
|
238 |
# MANUAL SEED/GENERATOR
|
239 |
+
if config["manual_seed"] is None or config["manual_seed"] == '' or int(config["manual_seed"]) < 0:
|
240 |
generator = None
|
241 |
else:
|
242 |
generator = torch.manual_seed(int(config["manual_seed"]))
|
|
|
342 |
# with gr.Row():
|
343 |
# gr.Markdown('Choose an adapter.')
|
344 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
345 |
gr.Markdown("### Auto Encoder")
|
346 |
with gr.Row():
|
347 |
gr.Markdown("**VAE** stands for Variational Auto Encoders. An 'autoencoder' is an artificial neural network that is able to encode input data and decode to output data to bascially recreate the input. The VAE whereas adds a couple of additional layers of complexity to create new and unique output.")
|
|
|
365 |
in_adapters_textual_inversion_token = gr.Textbox(value="None", label="Token", info="required to activate the token, will be added to your prompt")
|
366 |
out_adapters_textual_inversion_description = gr.Textbox(value="", label="Description")
|
367 |
|
368 |
+
gr.Markdown("### Inference settings")
|
369 |
+
with gr.Row():
|
370 |
+
in_prompt = gr.TextArea(label="Prompt", value=config.value["prompt"])
|
371 |
+
in_negative_prompt = gr.TextArea(label="Negative prompt", value=config.value["negative_prompt"])
|
372 |
+
with gr.Row():
|
373 |
+
in_inference_steps = gr.Number(label="Inference steps", value=config.value["inference_steps"], info="Each step improves the final result but also results in higher computation")
|
374 |
+
in_manual_seed = gr.Number(label="Manual seed", value=config.value["manual_seed"], info="Set this to -1 or leave it empty to randomly generate an image. A fixed value will result in a similar image for every run")
|
375 |
+
in_guidance_scale = gr.Slider(minimum=0, maximum=100, step=0.1, label="Guidance Scale", value=config.value["guidance_scale"], info="A low guidance scale leads to a faster inference time, with the drawback that negative prompts don’t have any effect on the denoising process.")
|
376 |
+
|
377 |
gr.Markdown("### Output")
|
378 |
with gr.Row():
|
379 |
btn_start_pipeline = gr.Button(value="Run", variant="primary")
|