nickyreinert-vml commited on
Commit
739e268
·
1 Parent(s): a494f64

clean up ui, adding accordeons

Browse files
Files changed (1) hide show
  1. app.py +71 -61
app.py CHANGED
@@ -431,82 +431,90 @@ with gr.Blocks(analytics_enabled=False) as demo:
431
  <small>by <a target="_blank" href="https://nickyreinert.de/">Nicky Reinert</a> |
432
  home base: https://huggingface.co/spaces/n42/pictero
433
  </small>''')
434
- gr.Markdown("### Device specific settings")
 
435
  with gr.Row():
436
- in_devices = gr.Dropdown(label="Device:", value=config.value["device"], choices=devices, filterable=True, multiselect=False, allow_custom_value=True, info="(you may add a custom device address at any time)")
437
- in_cpu_offload = gr.Radio(label="CPU Offload:", value=config.value["cpu_offload"], choices=["True", "False"], info="This may increase performance, as it offloads computations from the GPU to the CPU. But this can also lead to slower executions and lower effectiveness. Compare running time and outputs before making sure, that this setting will help you")
438
- with gr.Row():
439
- in_data_type = gr.Radio(label="Data Type:", value=config.value["data_type"], choices=["bfloat16", "float16", "float32"], info="`bfloat16` is not supported on MPS devices right now; `float16` may also not be supported on all devices, Half-precision weights, will save GPU memory, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16")
440
- in_allow_tensorfloat32 = gr.Radio(label="Allow TensorFloat32:", value=config.value["allow_tensorfloat32"], choices=["True", "False"], info="is not supported on MPS devices right now; use TensorFloat-32 is faster, but results in slightly less accurate computations, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16 ")
441
- in_variant = gr.Radio(label="Variant:", value=config.value["variant"], choices=["fp16", None], info="Use half-precision weights will save GPU memory, not all models support that, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16 ")
442
- in_attention_slicing = gr.Radio(label="Attention slicing:", value=config.value["attention_slicing"], choices=["True", "False"], info="Attention operation will be cutted into multiple steps, see https://huggingface.co/docs/diffusers/optimization/mps")
443
-
444
- gr.Markdown("### Model specific settings")
445
- with gr.Row():
446
- in_models = gr.Dropdown(choices=list(models.keys()), label="Model")
447
- out_model_description = gr.Textbox(value="", label="Description")
 
 
448
  with gr.Row():
449
  with gr.Column(scale=1):
 
 
 
 
 
450
  in_trigger_token = gr.Textbox(value=config.value["trigger_token"], label="Trigger Token", info="will be added to your prompt to `activate` a fine tuned model")
451
  in_model_refiner = gr.Dropdown(value=config.value["refiner"], choices=['none'] + refiners, label="Refiner", allow_custom_value=True, multiselect=False)
452
- with gr.Column(scale=1):
 
453
  in_use_safetensors = gr.Radio(label="Use safe tensors:", choices=["True", "False"], interactive=False)
454
  in_safety_checker = gr.Radio(label="Enable safety checker:", value=config.value["safety_checker"], choices=["True", "False"])
455
  in_requires_safety_checker = gr.Radio(label="Requires safety checker:", value=config.value["requires_safety_checker"], choices=["True", "False"])
456
 
457
  gr.Markdown("### Scheduler")
 
458
  with gr.Row():
459
- in_schedulers = gr.Dropdown(value="", choices=list(schedulers.keys()), allow_custom_value=True, label="Scheduler/Solver", info="schedulers employ various strategies for noise control, the scheduler controls parameter adaption between each inference step, depending on the right scheduler for your model, it may only take 10 or 20 steps to achieve very good results, see https://huggingface.co/docs/diffusers/using-diffusers/loading#schedulers" )
460
- out_scheduler_description = gr.Textbox(value="", label="Description")
461
-
462
- # gr.Markdown("### Adapters")
463
- # with gr.Row():
464
- # gr.Markdown('Choose an adapter.')
465
-
466
- gr.Markdown("### Auto Encoder")
467
- with gr.Row():
468
- gr.Markdown("**VAE** stands for Variational Auto Encoders. An 'autoencoder' is an artificial neural network that is able to encode input data and decode to output data to bascially recreate the input. The VAE whereas adds a couple of additional layers of complexity to create new and unique output.")
469
- with gr.Row():
470
- with gr.Column():
471
  in_auto_encoders = gr.Dropdown(value="None", choices=list(auto_encoders.keys()), label="Auto encoder", info="leave empty to not add an auto encoder")
472
  out_auto_encoder_description = gr.Textbox(value="", label="Description")
473
- with gr.Column():
 
474
  in_enable_vae_slicing = gr.Radio(label="Enable VAE slicing:", value=config.value["enable_vae_slicing"], choices=["True", "False"], info="decoding the batches of latents one image at a time, which may reduce memory usage, see https://huggingface.co/docs/diffusers/main/en/optimization/memory")
475
  in_enable_vae_tiling= gr.Radio(label="Enable VAE tiling:", value=config.value["enable_vae_tiling"], choices=["True", "False"], info="splitting the image into overlapping tiles, decoding the tiles, and then blending the outputs together to compose the final image, see https://huggingface.co/docs/diffusers/main/en/optimization/memory")
476
-
477
- gr.Markdown("### Adapters")
478
- with gr.Row():
479
- gr.Markdown('''Adapters allow you to apply finetuned weights to your base model. They come in many flavors depending on how they were trained. See see https://huggingface.co/docs/diffusers/using-diffusers/loading_adapters''')
480
- with gr.Row():
481
- gr.Markdown('#### Textual Inversion Adapters')
482
- with gr.Row():
483
- gr.Markdown('(a technique that enables a model like Stable Diffusion to learn a new concept from just a few sample images)')
484
- with gr.Row():
485
- in_adapters_textual_inversion = gr.Dropdown(value="", choices=list(adapters['textual_inversion'].keys()), label="Textual Inversion Adapter", info="leave empty to not use an adapter")
486
- in_adapters_textual_inversion_token = gr.Textbox(value="", label="Token", info="required to activate the token, will be added to your prompt")
487
- out_adapters_textual_inversion_description = gr.Textbox(value="", label="Description")
488
- with gr.Row():
489
- gr.Markdown('#### LoRA')
490
- with gr.Row():
491
- gr.Markdown('(Low-Rank-Adaption is a performant fine tuning technique)')
492
- with gr.Row():
493
- in_adapters_lora = gr.Dropdown(value="None", choices=list(adapters['lora'].keys()), multiselect=True, label="LoRA Adapter", info="leave empty to not use an adapter")
494
- out_adapters_lora_description = gr.Textbox(value="", label="Description")
495
- with gr.Row():
496
- in_adapters_lora_token = gr.Textbox(value="None", label="Token(s)", info="required to activate the token, will be added to your prompt")
497
- in_adapters_lora_weight = gr.Textbox(value="", label="Weight(s)/Checkpoint(s)")
498
- in_adapters_lora_balancing = gr.Textbox(value={}, label="Balancing", info="provide a list of balancing weights in the order of your LoRA adapter (according to `token`s)")
499
-
 
 
500
  gr.Markdown("### Inference settings")
501
  with gr.Row():
502
  in_prompt = gr.TextArea(label="Prompt", value=config.value["prompt"])
503
  in_negative_prompt = gr.TextArea(label="Negative prompt", value=config.value["negative_prompt"])
504
  with gr.Row():
 
505
  in_inference_steps = gr.Number(label="Inference steps", value=config.value["inference_steps"], info="Each step improves the final result but also results in higher computation")
506
  in_manual_seed = gr.Number(label="Manual seed", value=config.value["manual_seed"], info="Set this to -1 or leave it empty to randomly generate an image. A fixed value will result in a similar image for every run")
507
- with gr.Row():
508
- in_guidance_scale = gr.Slider(minimum=0, maximum=100, step=0.1, label="Guidance Scale", value=config.value["guidance_scale"], info="A low guidance scale leads to a faster inference time, with the drawback that negative prompts don’t have any effect on the denoising process.")
509
- in_lora_scale = gr.Slider(minimum=0, maximum=1, step=0.1, label="LoRA Scale", value=config.value["lora_scale"], info="How should the LoRA model influence the result, from 0 (no influence) to 1 (full influencer)")
510
 
511
  gr.Markdown("### Output")
512
  with gr.Row():
@@ -517,13 +525,15 @@ with gr.Blocks(analytics_enabled=False) as demo:
517
  btn_stop_pipeline = gr.Button(value="Stop", variant="stop")
518
  with gr.Row():
519
  out_image = gr.Image()
520
- out_code = gr.Code(assemble_code(config.value), label="Code")
521
- with gr.Row():
522
- # out_config = gr.Code(value=str(config.value), label="Current config")
523
- out_config = gr.JSON(value=config.value, label="Current config")
524
- with gr.Row():
525
- out_config_history = gr.Markdown(dict_list_to_markdown_table(config_history.value))
526
-
 
 
527
  # `SPECIAL` CHANGE LISTENERS
528
  in_models.change(models_change, inputs=[in_models, in_schedulers, config], outputs=[out_model_description, in_trigger_token, in_use_safetensors, in_schedulers, config, out_config, out_code], js="(model, config) => set_model_cookie(model, config)")
529
  in_schedulers.change(schedulers_change, inputs=[in_schedulers, config], outputs=[out_scheduler_description, config, out_config, out_code], js="(value, config) => set_cookie_2('scheduler', value, config)")
 
431
  <small>by <a target="_blank" href="https://nickyreinert.de/">Nicky Reinert</a> |
432
  home base: https://huggingface.co/spaces/n42/pictero
433
  </small>''')
434
+ gr.Markdown("### Device")
435
+ gr.Markdown("(you may add a custom device address at any time)")
436
  with gr.Row():
437
+ in_devices = gr.Dropdown(label="Device:", value=config.value["device"], choices=devices, filterable=True, multiselect=False, allow_custom_value=True, info="")
438
+ gr.Column("")
439
+ gr.Column("")
440
+ with gr.Accordion("Device specific settings", open=False):
441
+ with gr.Row():
442
+ in_cpu_offload = gr.Radio(label="CPU Offload:", value=config.value["cpu_offload"], choices=["True", "False"], info="This may increase performance, as it offloads computations from the GPU to the CPU. But this can also lead to slower executions and lower effectiveness. Compare running time and outputs before making sure, that this setting will help you")
443
+ in_data_type = gr.Radio(label="Data Type:", value=config.value["data_type"], choices=["bfloat16", "float16", "float32"], info="`bfloat16` is not supported on MPS devices right now; `float16` may also not be supported on all devices, Half-precision weights, will save GPU memory, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16")
444
+ in_allow_tensorfloat32 = gr.Radio(label="Allow TensorFloat32:", value=config.value["allow_tensorfloat32"], choices=["True", "False"], info="is not supported on MPS devices right now; use TensorFloat-32 is faster, but results in slightly less accurate computations, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16 ")
445
+ with gr.Row():
446
+ in_variant = gr.Radio(label="Variant:", value=config.value["variant"], choices=["fp16", None], info="Use half-precision weights will save GPU memory, not all models support that, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16 ")
447
+ in_attention_slicing = gr.Radio(label="Attention slicing:", value=config.value["attention_slicing"], choices=["True", "False"], info="Attention operation will be cutted into multiple steps, see https://huggingface.co/docs/diffusers/optimization/mps")
448
+ gr.Column("")
449
+
450
+ gr.Markdown("### Model")
451
  with gr.Row():
452
  with gr.Column(scale=1):
453
+ in_models = gr.Dropdown(choices=list(models.keys()), label="Model")
454
+ with gr.Column(scale=2):
455
+ out_model_description = gr.Textbox(value="", label="Description")
456
+ with gr.Accordion("Model specific settings", open=False):
457
+ with gr.Row():
458
  in_trigger_token = gr.Textbox(value=config.value["trigger_token"], label="Trigger Token", info="will be added to your prompt to `activate` a fine tuned model")
459
  in_model_refiner = gr.Dropdown(value=config.value["refiner"], choices=['none'] + refiners, label="Refiner", allow_custom_value=True, multiselect=False)
460
+ gr.Column("")
461
+ with gr.Row():
462
  in_use_safetensors = gr.Radio(label="Use safe tensors:", choices=["True", "False"], interactive=False)
463
  in_safety_checker = gr.Radio(label="Enable safety checker:", value=config.value["safety_checker"], choices=["True", "False"])
464
  in_requires_safety_checker = gr.Radio(label="Requires safety checker:", value=config.value["requires_safety_checker"], choices=["True", "False"])
465
 
466
  gr.Markdown("### Scheduler")
467
+ gr.Markdown("Schedulers employ various strategies for noise control, the scheduler controls parameter adaption between each inference step, depending on the right scheduler for your model, it may only take 10 or 20 steps to achieve very good results, see https://huggingface.co/docs/diffusers/using-diffusers/loading#schedulers")
468
  with gr.Row():
469
+ with gr.Column(scale=1):
470
+ in_schedulers = gr.Dropdown(value="", choices=list(schedulers.keys()), allow_custom_value=True, label="Scheduler/Solver", info="")
471
+ with gr.Column(scale=2):
472
+ out_scheduler_description = gr.Textbox(value="", label="Description")
473
+
474
+ with gr.Accordion("Auto Encoder", open=False):
475
+ with gr.Row():
476
+ gr.Markdown("**VAE** stands for Variational Auto Encoders. An 'autoencoder' is an artificial neural network that is able to encode input data and decode to output data to bascially recreate the input. The VAE whereas adds a couple of additional layers of complexity to create new and unique output.")
477
+ with gr.Row():
 
 
 
478
  in_auto_encoders = gr.Dropdown(value="None", choices=list(auto_encoders.keys()), label="Auto encoder", info="leave empty to not add an auto encoder")
479
  out_auto_encoder_description = gr.Textbox(value="", label="Description")
480
+ gr.Column("")
481
+ with gr.Row():
482
  in_enable_vae_slicing = gr.Radio(label="Enable VAE slicing:", value=config.value["enable_vae_slicing"], choices=["True", "False"], info="decoding the batches of latents one image at a time, which may reduce memory usage, see https://huggingface.co/docs/diffusers/main/en/optimization/memory")
483
  in_enable_vae_tiling= gr.Radio(label="Enable VAE tiling:", value=config.value["enable_vae_tiling"], choices=["True", "False"], info="splitting the image into overlapping tiles, decoding the tiles, and then blending the outputs together to compose the final image, see https://huggingface.co/docs/diffusers/main/en/optimization/memory")
484
+ gr.Column("")
485
+
486
+ with gr.Accordion("Adapters", open=False):
487
+ with gr.Row():
488
+ gr.Markdown('''Adapters allow you to apply finetuned weights to your base model. They come in many flavors depending on how they were trained. See see https://huggingface.co/docs/diffusers/using-diffusers/loading_adapters''')
489
+ with gr.Row():
490
+ gr.Markdown('#### Textual Inversion Adapters')
491
+ with gr.Row():
492
+ gr.Markdown('(a technique that enables a model like Stable Diffusion to learn a new concept from just a few sample images)')
493
+ with gr.Row():
494
+ in_adapters_textual_inversion = gr.Dropdown(value="", choices=list(adapters['textual_inversion'].keys()), label="Textual Inversion Adapter", info="leave empty to not use an adapter")
495
+ in_adapters_textual_inversion_token = gr.Textbox(value="", label="Token", info="required to activate the token, will be added to your prompt")
496
+ out_adapters_textual_inversion_description = gr.Textbox(value="", label="Description")
497
+ with gr.Row():
498
+ gr.Markdown('#### LoRA')
499
+ with gr.Row():
500
+ gr.Markdown('(Low-Rank-Adaption is a performant fine tuning technique)')
501
+ with gr.Row():
502
+ in_adapters_lora = gr.Dropdown(value="None", choices=list(adapters['lora'].keys()), multiselect=True, label="LoRA Adapter", info="leave empty to not use an adapter")
503
+ out_adapters_lora_description = gr.Textbox(value="", label="Description")
504
+ in_lora_scale = gr.Slider(minimum=0, maximum=1, step=0.1, label="LoRA Scale", value=config.value["lora_scale"], info="How should the LoRA model influence the result, from 0 (no influence) to 1 (full influencer)")
505
+ with gr.Row():
506
+ in_adapters_lora_token = gr.Textbox(value="None", label="Token(s)", info="required to activate the token, will be added to your prompt")
507
+ in_adapters_lora_weight = gr.Textbox(value="", label="Weight(s)/Checkpoint(s)")
508
+ in_adapters_lora_balancing = gr.Textbox(value={}, label="Balancing", info="provide a list of balancing weights in the order of your LoRA adapter (according to `token`s)")
509
+
510
  gr.Markdown("### Inference settings")
511
  with gr.Row():
512
  in_prompt = gr.TextArea(label="Prompt", value=config.value["prompt"])
513
  in_negative_prompt = gr.TextArea(label="Negative prompt", value=config.value["negative_prompt"])
514
  with gr.Row():
515
+ in_guidance_scale = gr.Slider(minimum=0, maximum=100, step=0.1, label="Guidance Scale", value=config.value["guidance_scale"], info="A low guidance scale leads to a faster inference time, with the drawback that negative prompts don’t have any effect on the denoising process.")
516
  in_inference_steps = gr.Number(label="Inference steps", value=config.value["inference_steps"], info="Each step improves the final result but also results in higher computation")
517
  in_manual_seed = gr.Number(label="Manual seed", value=config.value["manual_seed"], info="Set this to -1 or leave it empty to randomly generate an image. A fixed value will result in a similar image for every run")
 
 
 
518
 
519
  gr.Markdown("### Output")
520
  with gr.Row():
 
525
  btn_stop_pipeline = gr.Button(value="Stop", variant="stop")
526
  with gr.Row():
527
  out_image = gr.Image()
528
+
529
+ with gr.Accordion("Code and Configuration", open=False):
530
+ with gr.Row():
531
+ out_code = gr.Code(assemble_code(config.value), label="Code")
532
+ # out_config = gr.Code(value=str(config.value), label="Current config")
533
+ out_config = gr.JSON(value=config.value, label="Current config")
534
+ with gr.Row():
535
+ out_config_history = gr.Markdown(dict_list_to_markdown_table(config_history.value))
536
+
537
  # `SPECIAL` CHANGE LISTENERS
538
  in_models.change(models_change, inputs=[in_models, in_schedulers, config], outputs=[out_model_description, in_trigger_token, in_use_safetensors, in_schedulers, config, out_config, out_code], js="(model, config) => set_model_cookie(model, config)")
539
  in_schedulers.change(schedulers_change, inputs=[in_schedulers, config], outputs=[out_scheduler_description, config, out_config, out_code], js="(value, config) => set_cookie_2('scheduler', value, config)")