|
import gradio as gr |
|
import torch |
|
import modin.pandas as pd |
|
from PIL import Image |
|
from diffusers import DiffusionPipeline |
|
import os |
|
import random |
|
import torchsde |
|
from math import floor, copysign |
|
import time |
|
import urllib.parse |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
main_dir = "C:/Spaghetti_AI" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
only_use_local_files = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
use_custom_hugging_face_cache_dir = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cache_directory_folder_name = "model_data" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
default_base_model = "sdxl" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
use_safety_checker = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto_save_imagery = 1 |
|
save_canceled_images = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
saved_images_folder_name = "saved_images" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
use_custom_temporary_files_folder = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gradio_temporary_files_folder_name = "gradio_temporary_files" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto_open_browser = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
enable_image_generation_cancellation = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
default_use_denoising_start_in_base_model_when_using_refiner = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
default_base_model_output_to_refiner_is_in_latent_space = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
log_generation_times = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
use_image_gallery = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
show_image_creation_progress_log = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
show_messages_in_command_prompt = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
show_messages_in_modal_on_page = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
suppress_hugging_face_hub_offline_status = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
default_dark_theme = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
default_add_seed_into_pipe = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
default_use_torch_manual_seed_but_do_not_add_to_pipe = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
default_save_base_image_when_using_refiner_or_upscaler = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
default_save_refined_image_when_using_upscaler = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
max_queue_size_if_cpu = 3 |
|
max_queue_size_if_torch = 20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
allow_other_model_versions = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
enable_image_preview = 1 |
|
image_preview_step_interval = 10 |
|
image_preview_seconds_interval = 30 |
|
load_image_preview_frequency_in_seconds = 2 |
|
delete_preview_images_immediately = 1 |
|
|
|
default_create_preview_images = 1 |
|
default_do_not_create_refining_preview_images = 0 |
|
default_do_not_create_upscaling_preview_images = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
enable_longer_prompts = 1 |
|
|
|
allow_longer_prompts_for_sd_1_5_based_models = 1 |
|
|
|
base_models_supporting_special_long_prompt_method_object = { |
|
"photoreal": 1, |
|
"sd_1_5_runwayml": 1 |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pytorch_cuda_alloc_conf_max_split_size_mb = 6000 |
|
|
|
|
|
|
|
sdxl_link = "https://huggingface.co/spaces/Manjushri/SDXL-1.0" |
|
photoreal_link = "https://huggingface.co/spaces/Manjushri/PhotoReal-V3.8.1" |
|
sdxl_turbo_link = "https://huggingface.co/spaces/diffusers/unofficial-SDXL-Turbo-i2i-t2i" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
base_model_array = [ |
|
"sdxl", |
|
"photoreal", |
|
"sdxl_turbo", |
|
"sd_1_5_runwayml" |
|
] |
|
|
|
base_model_names_object = { |
|
"sdxl": "Stable Diffusion XL", |
|
"photoreal": "PhotoReal", |
|
"sdxl_turbo": "Stable Diffusion XL Turbo", |
|
"sd_1_5_runwayml": "Stable Diffusion 1.5" |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
base_model_object_of_model_configuration_arrays = { |
|
"sdxl": [ |
|
"sdxl_default", |
|
"sdxl_1-0" |
|
], |
|
"photoreal": [ |
|
"photoreal_default", |
|
"photoreal_3-8-1", |
|
"photoreal_3-8", |
|
"photoreal_3-7-5", |
|
"photoreal_3-6" |
|
], |
|
"sdxl_turbo": [ |
|
"sdxl_turbo_default", |
|
"sdxl_turbo_initial" |
|
], |
|
"sd_1_5_runwayml": [ |
|
"sd_1_5_runwayml_default" |
|
] |
|
} |
|
|
|
|
|
|
|
model_configuration_names_object = { |
|
"sdxl_default": "Default (currently 1.0)", |
|
"sdxl_1-0": "1.0", |
|
"photoreal_default": "Default (currently 3.6)", |
|
"photoreal_3-8-1": "3.8.1", |
|
"photoreal_3-8": "3.8", |
|
"photoreal_3-7-5": "3.7.5", |
|
"photoreal_3-6": "3.6", |
|
"sdxl_turbo_default": "Default (currently Initial Release)", |
|
"sdxl_turbo_initial": "Initial Release", |
|
"sd_1_5_runwayml_default": "Default" |
|
} |
|
|
|
model_configuration_links_object = { |
|
"sdxl_default": "stabilityai/stable-diffusion-xl-base-1.0", |
|
"sdxl_1-0": "stabilityai/stable-diffusion-xl-base-1.0", |
|
"photoreal_default": "circulus/canvers-realistic-v3.6", |
|
"photoreal_3-8-1": "circulus/canvers-real-v3.8.1", |
|
"photoreal_3-8": "circulus/canvers-real-v3.8", |
|
"photoreal_3-7-5": "circulus/canvers-real-v3.7.5", |
|
"photoreal_3-6": "circulus/canvers-realistic-v3.6", |
|
"sdxl_turbo_default": "stabilityai/sdxl-turbo", |
|
"sdxl_turbo_initial": "stabilityai/sdxl-turbo", |
|
"sd_1_5_runwayml_default": "runwayml/stable-diffusion-v1-5" |
|
} |
|
|
|
|
|
|
|
base_models_not_supporting_denoising_end_for_base_model_object = { |
|
"photoreal": 1, |
|
"sd_1_5_runwayml": 1 |
|
} |
|
|
|
base_models_not_supporting_base_model_output_in_latent_space_to_refiner_object = { |
|
"photoreal": 1, |
|
"sd_1_5_runwayml": 1 |
|
} |
|
|
|
|
|
|
|
hugging_face_refiner_partial_path = "stabilityai/stable-diffusion-xl-refiner-1.0" |
|
hugging_face_upscaler_partial_path = "stabilityai/sd-x2-latent-upscaler" |
|
|
|
|
|
|
|
base_model_model_configuration_defaults_object = { |
|
"sdxl": "sdxl_default", |
|
"photoreal": "photoreal_default", |
|
"sdxl_turbo": "sdxl_turbo_default", |
|
"sd_1_5_runwayml": "sd_1_5_runwayml_default" |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
default_scheduler = "model_default" |
|
|
|
|
|
|
|
schedulers_array = [ |
|
"model_default", |
|
"ddim", |
|
"ddpm", |
|
"dpm_solver_multistep", |
|
"dpm_solver_multistep_karras_sigmas_true", |
|
"dpm_solver_multistep_algorithm_type_sde-dpmsolver_pp", |
|
"dpm_solver_multistep_karras_sigmas_true_algorithm_type_sde-dpmsolver_pp", |
|
"dpm_solver_singlestep", |
|
"dpm_solver_singlestep_karras_sigmas_true", |
|
"kdpm2_discrete", |
|
"kdpm2_discrete_karras_sigmas_true", |
|
"kdpm2_ancestral_discrete", |
|
"kdpm2_ancestral_discrete_karras_sigmas_true", |
|
"euler_discrete", |
|
"euler_ancestral_discrete", |
|
"heun_discrete", |
|
"lms_discrete", |
|
"lms_discrete_karras_sigmas_true", |
|
"pndm", |
|
"pndm_skip_prk_steps_true", |
|
"deis_multistep", |
|
"dpm_solver_sde", |
|
"uni_pc_multistep" |
|
] |
|
|
|
|
|
|
|
scheduler_long_names_object = { |
|
"model_default": "Model Default", |
|
"ddim": "DDIM", |
|
"ddpm": "DDPM", |
|
"dpm_solver_multistep": "DPM++ 2M (DPMSolverMultistep)", |
|
"dpm_solver_multistep_karras_sigmas_true": "DPM++ 2M Karras (DPMSolverMultistep with use_karras_sigmas=True)", |
|
"dpm_solver_multistep_algorithm_type_sde-dpmsolver_pp": "DPM++ 2M SDE (DPMSolverMultistep with algorithm_type=\"sde-dpmsolver++\")", |
|
"dpm_solver_multistep_karras_sigmas_true_algorithm_type_sde-dpmsolver_pp": "DPM++ 2M SDE Karras (DPMSolverMultistep with use_karras_sigmas=True & algorithm_type=\"sde-dpmsolver++\")", |
|
"dpm_solver_singlestep": "DPM++ SDE (DPMSolverSinglestep)", |
|
"dpm_solver_singlestep_karras_sigmas_true": "DPM++ SDE Karras (DPMSolverSinglestep with use_karras_sigmas=True)", |
|
"kdpm2_discrete": "DPM2 (KDPM2Discrete)", |
|
"kdpm2_discrete_karras_sigmas_true": "DPM2 Karras (KDPM2Discrete with use_karras_sigmas=True)", |
|
"kdpm2_ancestral_discrete": "DPM2 a (KDPM2AncestralDiscrete)", |
|
"kdpm2_ancestral_discrete_karras_sigmas_true": "DPM2 a Karras (KDPM2AncestralDiscrete with use_karras_sigmas=True)", |
|
"euler_discrete": "Euler (EulerDiscrete)", |
|
"euler_ancestral_discrete": "Euler a (EulerAncestralDiscrete)", |
|
"heun_discrete": "Heun (HeunDiscrete)", |
|
"lms_discrete": "LMS (LMSDiscrete)", |
|
"lms_discrete_karras_sigmas_true": "LMS Karras (LMSDiscrete with use_karras_sigmas=True)", |
|
"pndm": "PNDM", |
|
"pndm_skip_prk_steps_true": "PNDM (with skip_prk_steps=True) - Close to PLMS", |
|
"deis_multistep": "DEISMultistep", |
|
"dpm_solver_sde": "DPMSolverSDE", |
|
"uni_pc_multistep": "UniPCMultistep" |
|
} |
|
|
|
|
|
|
|
scheduler_short_names_object = { |
|
"ddim": "DDIM", |
|
"ddpm": "DDPM", |
|
"dpm_solver_multistep": "DPM++ 2M", |
|
"dpm_solver_multistep_karras_sigmas_true": "DPM++ 2M Karras", |
|
"dpm_solver_multistep_algorithm_type_sde-dpmsolver_pp": "DPM++ 2M SDE", |
|
"dpm_solver_multistep_karras_sigmas_true_algorithm_type_sde-dpmsolver_pp": "DPM++ 2M SDE Karras", |
|
"dpm_solver_singlestep": "DPM++ SDE", |
|
"dpm_solver_singlestep_karras_sigmas_true": "DPM++ SDE Karras", |
|
"kdpm2_discrete": "DPM2", |
|
"kdpm2_discrete_karras_sigmas_true": "DPM2 Karras", |
|
"kdpm2_ancestral_discrete": "DPM2 a", |
|
"kdpm2_ancestral_discrete_karras_sigmas_true": "DPM2 a Karras", |
|
"euler_discrete": "Euler", |
|
"euler_ancestral_discrete": "Euler a", |
|
"heun_discrete": "Heun", |
|
"lms_discrete": "LMS", |
|
"lms_discrete_karras_sigmas_true": "LMS Karras", |
|
"pndm": "PNDM", |
|
"pndm_skip_prk_steps_true": "PNDM (with skip_prk_steps=True) - Close to PLMS", |
|
"deis_multistep": "DEISMultistep", |
|
"dpm_solver_sde": "DPMSolverSDE", |
|
"uni_pc_multistep": "UniPCMultistep" |
|
} |
|
|
|
|
|
|
|
scheduler_name_to_identifier_in_app_object = { |
|
"DDIMScheduler": "ddim", |
|
"DDPMScheduler": "ddpm", |
|
"DPMSolverMultistepScheduler": "dpm_solver_multistep", |
|
"DPMSolverSinglestepScheduler": "dpm_solver_singlestep", |
|
"KDPM2DiscreteScheduler": "kdpm2_discrete", |
|
"KDPM2AncestralDiscreteScheduler": "kdpm2_ancestral_discrete", |
|
"EulerDiscreteScheduler": "euler_discrete", |
|
"EulerAncestralDiscreteScheduler": "euler_ancestral_discrete", |
|
"HeunDiscreteScheduler": "heun_discrete", |
|
"LMSDiscreteScheduler": "lms_discrete", |
|
"PNDMScheduler": "pndm", |
|
"DEISMultistepScheduler": "deis_multistep", |
|
"DPMSolverSDEScheduler": "dpm_solver_sde", |
|
"UniPCMultistepScheduler": "uni_pc_multistep" |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
device = "cpu" |
|
|
|
if torch.cuda.is_available(): |
|
|
|
device = "cuda" |
|
|
|
PYTORCH_CUDA_ALLOC_CONF = { |
|
"max_split_size_mb": pytorch_cuda_alloc_conf_max_split_size_mb |
|
} |
|
torch.cuda.max_memory_allocated( |
|
device = device |
|
) |
|
torch.cuda.empty_cache() |
|
|
|
if device == "cpu": |
|
|
|
default_base_model = "sdxl_turbo" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
if (str(os.uname()).find("magicfixeseverything") >= 0): |
|
script_being_run_on_hugging_face = 1 |
|
except: |
|
script_being_run_on_hugging_face = 0 |
|
|
|
if script_being_run_on_hugging_face == 1: |
|
|
|
allow_other_model_versions = 0 |
|
|
|
|
|
|
|
|
|
|
|
default_prompt = "" |
|
default_negative_prompt = "" |
|
|
|
|
|
|
|
default_width = 768 |
|
default_height = 768 |
|
|
|
minimum_width = 64 |
|
minimum_height = 64 |
|
|
|
maximum_width = 2048 |
|
maximum_height = 2048 |
|
|
|
|
|
|
|
default_base_model_steps = 50 |
|
default_base_model_steps_for_sdxl_turbo = 2 |
|
|
|
maximum_base_model_steps = 150 |
|
maximum_base_model_steps_for_sdxl_turbo = 25 |
|
|
|
|
|
|
|
default_guidance_scale = 7.5 |
|
|
|
minimum_guidance_scale = 0 |
|
maximum_guidance_scale = 30 |
|
|
|
guidance_scale_input_slider_steps = 0.25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
default_seed_value = "random" |
|
maximum_seed = 1000000000000000000 |
|
|
|
|
|
|
|
add_generation_information_to_image = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
enable_refiner = 1 |
|
enable_upscaler = 1 |
|
|
|
|
|
|
|
default_refiner_selected = 0 |
|
default_upscaler_selected = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
default_refiner_accordion_open = 1 |
|
default_upscaler_accordion_open = 1 |
|
|
|
|
|
|
|
default_refiner_denoise_start = 0.95 |
|
|
|
minimum_refiner_denoise_start = 0.01 |
|
maximum_refiner_denoise_start = 0.99 |
|
|
|
refiner_denoise_start_input_slider_steps = 0.01 |
|
|
|
|
|
|
|
|
|
default_refining_steps_for_online_config_field = 100 |
|
maximum_refining_steps_for_online_config_field = 100 |
|
|
|
|
|
|
|
|
|
|
|
maximum_upscaler_steps = 150 |
|
default_upscaler_steps = 15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
use_xformers = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
use_default_attn_processor = 0 |
|
|
|
|
|
|
|
display_xformers_usage_in_prompt_info = 1 |
|
include_transformers_version_in_prompt_info = 1 |
|
display_diffusers_version_in_prompt_info = 1 |
|
display_default_attn_processor_usage_in_prompt_info = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
use_sequential_cpu_offload_for_base_model = 1 |
|
use_sequential_cpu_offload_for_refiner = 1 |
|
use_sequential_cpu_offload_for_upscaler = 1 |
|
|
|
use_model_cpu_offload_for_base_model = 0 |
|
use_model_cpu_offload_for_refiner = 0 |
|
use_model_cpu_offload_for_upscaler = 0 |
|
|
|
|
|
|
|
use_torch_compile_for_base_model = 0 |
|
use_torch_compile_for_refiner = 0 |
|
use_torch_compile_for_upscaler = 0 |
|
|
|
if default_base_model == "sdxl": |
|
|
|
|
|
|
|
default_width = 1024 |
|
default_height = 1024 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
width_and_height_input_slider_steps = 8 |
|
|
|
|
|
|
|
maximum_prompt_characer_count = 1250 |
|
maximum_neg_prompt_characer_count = 1000 |
|
|
|
|
|
|
|
opening_html = "" |
|
ending_html = "" |
|
|
|
max_queue_size = max_queue_size_if_torch |
|
|
|
if device == "cpu": |
|
|
|
if script_being_run_on_hugging_face == 1: |
|
|
|
opening_html = "<span style=\"font-weight: bold; color: #c00;\">This app is extremely slow.</span> This app is not running on a GPU. The first time it loads after the space is rebuilt it might take 10 minutes to generate a SDXL Turbo image. It may take around 3 minutes after that point to do two steps. (with no refining or upscaling) For other models, it may take an hour to create a single image. Want apps that work fast? Use these which this app is based on: <a href=\"" + sdxl_link + "\" target=\"_blank\">Stable Diffusion XL</a>, <a href=\"" + photoreal_link + "\" target=\"_blank\">PhotoReal with SDXL 1.0 Refiner</a> and <a href=\"" + sdxl_turbo_link + "\" target=\"_blank\">SDXL Turbo Unofficial Demo</a>. This app is designed to give more options, but it's too slow to operate and test on a CPU. There are some features that are either not available, or more limited, in the online version of this app. (such as smaller allowed image dimensions and less steps allowed) This app is still in development." |
|
|
|
else: |
|
|
|
opening_html = "<span style=\"font-weight: bold; color: #c00;\">This app is currently running on a CPU.</span> If you have a NVIDIA graphics card, make sure you have torch installed so that you can use your GPU to create imagery. If you don't, it will work extremely slowly." |
|
|
|
max_queue_size = max_queue_size_if_cpu |
|
|
|
|
|
|
|
ending_html += """<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFUAAABkCAMAAADADYxgAAABelBMVEVzQShrNiBOLB2FSSlaNSPm4uCVZ0RhQjVCJhtcKBc3HhN2TkN8WE2DYVtwRjl8TjOHWjtHGRA4DQivgltQHxLz6OD68+onGRYvWZGlWibswHv78dxOOzKSVDLwzY+idlNrU0ytZDCaUiPZrkrYnFe6jmLhpVvVro7Cl2zWomq4bjIOX7T158vAg1HAdzfR1dzntm6sdk4fc8jWk0rHgUC80OMda7vFoHXkrWWBrtvxyITSqHbOikW2dULcsHydXzSIUDvLj1OjbEP01Zvdq3BGCAqWdnKVamWRWUXJlF/w3ruVc1pGRk2sw9rcvZzeuYydhoAyKSjLo4KKbWjMoUCubjxaT1Lnv5y3knEaX6k8PEGkfGvDlzt2HBpaEBB+WSLy2q3kzam2jDSWbimQKyUUVJhHXH/DlX2nOTCnfS4mSG+slJBniaK5UkAnfdMBZszt7e5FjdRoods2hNTk6Oxcmtl2qNuuyeXJ2emRuN+gwOFSlNfb4+vS3ur8OZTmAAAAbnRSTlPj4+Pj4/vk4+Pj4+Pk4+Pj5OPj4+P7++P14+Pj5OPj4+Tj4+Pj4+Pj4+Pj++Pj4/vj4/7j4/775OP+4+Pj4+Pj4+Pj4+Pj4+Pj4+Pj4/zj4+Tk4+Pj4+bj4/nl5OPj4+Pj4+Pj4/Xx4+Pj7+Py4w/umxAAAA4ISURBVGje3NJBbgUhCAbgXoqFlyKAioB69+LbvMxmpi9NNyUZk39IPtH4hX9R/1aVxZfMnlkUsdP8hUpXtVNHHJa81huVjWgVrDuMmp/hiHZK0cjMUvX8L4jeTltzVWi0a193s0abXIxr2yqDBGH0vgMLuTgd9WRApdILSVXSymD8cAOFFoRgbQVzcZwe0ALHzh4clV8nHm2M/N43cK/WWWKTvFQm5xbaLdVVr+qSrPlTdWzpetSlAjQ7KZcWqYEPeqtKLmVJYs5ni6dZ3dZ2rC1gbcU6lnkEogwIMOyQqmfWvUwx+yYoBjiBH19WqnrJMmada+Bdfa7yICLgT9Xnqt+00TFrg0AYxvE3Koe6GKtDe5ALoj0SoWKIUDGGoOmSPW6BQjv1+3+BPseRiIkp7dB/Lg4Ov/d4xff6g/r4H5H78EO+Lrzk/ybXJfcu24vOuRCpR98Y6vtku7eur442HY/NPPKmnnOdVh38bsfQKAtSm22SSFnmec7f2kPOn8kZzRgUhnR8N4Zuf8/Q2db1OkESlcA5ZxM0EHHZwQA1guLj54AFqU2gXVHUOGsd/JILsVFwnzWxLPxxLhkWxXH8Zdn2mXVdbU4WK7bNiizNsjRNi6apkUwkr0S1CYKAkUrDdBXexOiDcF14OEBt2zYMdoi6LoqiLnpSRWlRYxGllDmvKsGBL6beaOQBjlUntdwHwBBhojZod7tdu2oZM01zFm1fmiZZljlcybEHUXERTO9Fp1illqszVBa1KzYz2XyO0cxkGIDrZkWyTEq4HO5+L8QruxN901W/rYkDQRjAN3GZBBOTPfKinKeuLabaIxZFwWKrxT8vTgMV5ArCFVp7r+77f4F7ZrcxXuGGtNCW/jp9dnby+v7TsH8kNEtKpalJSI5IITalKlxXV1+/fl+sOw+j67s6R7BqrZCwrTQ99CaT/b7b7T49PR1F1flj2ffXD1MkvT0JhXgAR/haOPgzRM1m8+vlLdodjzpwW2h39bbdbrNTfSsKN9b9/fITMFKQQNXhkBJIPkklI4yJ4wolHFGJFdFyOBt31uuOZYGyUVapgn19fwHLIybpmCWUAJUoISPXhStwrjxBODZ6fJ49sIlRwJlts/+oKKTwYkdBTrYZDh0G9+250udQHLBw8Vktk3SDOUa2gzrYz81mtqzKKSACq2qYglXu03fhmr5RDKvhEGN2e8sNo1aIFdm+HY9HHBMOaz+ZHA5WtSm8/0YCXS1AojyXSd/MsOuFHverCCUqV8+LPl+LwRT97ia9XpomtrTWhBJfqoVr2pPO+YWu+vyTi4uLalQLIhEoZjngy03/O3K442zT0uNSKIGFWC3KN6rRfS7bKasXbp6HkccJk+bb9nwzY/YabAqx8FB8aQWWbulyguciV6PBbjX38jAIpStEhdDacgaWB3f6ccMK0BZWItiiV9tmlRlfNhr8eJ7Hu6fW8H4FQR65Dthl/2Y+n4PtYBLuP5H4Z/Fh3AKVLNgsTMxe1A7bERIOIz/MoyAPsEorNOxvbtb1+c2iU5/OqTSZRJkV+cEatNwwxgzCWq0Wgo1DV/4KPMpjyW5ztlkPBrhf9WmdDFqaiMmMvE2B20OdVJhBCNWwQkUuUewRhfhVpPvY74wwslgyBNaobBpDYGmwi0PzoRaotGgI1BSPq/IEFqNSpCHERMP7RWc0wO5KmD2pLqvL5RKblLstWsXDqoyCQkUFzAodqJgURiAn0r3dfDFmdkeWhQrBqEPU0rBFq9KiXgC1bDYS2GGIQMGLKZ10u7v5ff9hzatrwgMLtlB9kaDAcrbSObWKiqxauIHg5aIh6wS5xthel/37xWiAl80KGyAlJYRVfVZxiYdUsSq+g4dRpFqytlmwmgTugOAxcCo4sPn6uo5XTfYt2x5UbFX/Q01LtQwAKuqsWbAgVZyksZlHgZfkeN1CAi3sw6xL/6iJ7dWBKq3qQI2gcrXP2EjECSmNdzafDYb28ZI3DF40mVUtyirqpNpYJXr1CrU8MbDcLNGEkC6vL0Q7W2DDTN+y7JioM1Wzqoln1rEqn9VJDUuV2VgdNE0SBRZHl/bS2QJTUF/t9gkp4ZSqZvSHVSUadRFAqX4eL6RKacqdauS67G/GnXrrjrchxUb1jZprraHGgll7q4TZAGBLF9Vue42GK9Fp0ktiokTHmILZ5mHQutsBLVtl9W9d9v7TNBTFAfy2HW23tWu1Ao5s7hEm7cgcqGwu0zGM4kiYMI0vjP7kKz6GApKg8X/3e85pN0D8KsapfDg99/bcDhHewghUKlQjFezUxby2bZlkKqwF7ZqpsG3DklF8toEnOjwnTncrq/HI9WAoPljhMiD7QEwZjzrfGbh+Khb3QdDuho3bEVTsVUJpRxZSOqlujOKwgwpX2fOcjK6DJZQKoLNHqZzXR4lmO8SZ6RlBuNXrRRhcO4wiVhmN0jPKQfCVclheX8WqTuGjldicAoik+bEuVwqDftBum+hqsbG18QCPnSs7ptIEdZ8+dS1dOajGU5bFqsZtZUR+lWLRG2lLAX03w3BgdsPQ9IpGdSPawCAIg7agOXdzd3ezrKtsDrEQjyYvVJWCiLqu8EOLsFrKlgN83k4NwnbJDLrdmmka1cpWD/vV9HcUq1l388uXXbegICL0dXxqLKkY4fHpmMYVO05Wpaix8/OjQiGjD8InH4P73agdmOvVfH0RDzCB6nkyO5zNXaiWgkiRWlElqiV0HgGrazi4HAuqbtkjqFbWDPHcE9S63aC0Xq3W7+KZvq1CHwsixe6SmuLwuef51E0UBpPYEW2mrPsUB0GKZi5M/KtSuPT4xVKjF9WCCql4x3TNq5lQudinm5uk6gijjvL4KCdVt0fzI9uOVadsyTGucln3NY6BFy+ghvef5aGuXF/psSosbwIVv6KJ54maTsvsLtBf0TW5wqI5qMV9/WQJ7FYURevP8Ex/e2FxpSYdkGJRRU5RFbR5HalVYzVOvKxPMVjK5ZzFTwePXuJp8slON4q66xWoVGugdak6Xp6y69JqFRBSy2V5VwYVf80mohx385EMrHKW0Ncflx4vPVnqQa1AvXH3zsJKSXV5spCa5WO+bImKJKqmWZPkWEWzyHVcVnH2kfpAVLxPMr2Q6pAFAlQ4pVoKrGJV5eTeUKKCpQCF+vIJ9sBO70EU3YvVaxiLokozn2/HqgX0rCqshYveJdahJCz2QO/B4kaiLgQ1H6qkYD2HarHKIVOaAJVuf5RK6hewOFw89FV6wOrGjaQDUZcWK1EL29tQKRereOXSAqHYTewWjEqj3x8MzODjzotedKOer+Bd3a3rC42FbjKwR3ufr75//ypWC/TBpqhggWoKd3F/gLhQNWUgcM1g6cWDjXplqoaE7n1+/2pZopiUnFI5GhyOUoZHpcsrsOFO1KhM1MWFml74/J641s2bYwSqBM2FyncBq3LoMqQhWWfKFs2gRxsAap3U6wu7n1mUb4seHHdUgiKJCpQtCf8O8xAsvWJ2tbtVn6gr7a+t5WUif3eGzRmKKkwDjVR8CMrjO80dwaKB5fJZjRoTNfra4us++MWgqJgiNtbOLuztyYZNVIRUJEWoS7XaeqziTVxe1NdiHjfF+yNqhjMa7YmqmFNQwclU4I2m0JlL85mUqI3GaiVfhVv/0Grh0jszZ6PmKSN7b297G6rG1RJOyuUr2qmkMrZlAYV6r5FaJ9W8SuhBc3butHnEKh2e24jUSmqOa9b1y2nFX8P3MbLo8TunSF1/FugKqv8K6364BlJUMeNageLmZdWAZmie4xELN2NlffQUEysLE62FSR0w9VS+6hD6c2YO5PlaUSmpZbBSK/1wiVXUVVsHlsPBZdEUyHqsVhZ9W8+XX2E7dWbn5uZmJ+pwOFyT1RIVA8yKTVU1cTAY0mX+vh+mDFBSpdRFZae40hOgCNeLNPf393/Eqk1T0cGsj2s1qvdMJwuVIvcvHphY7fNa3YO6R5UOZ/+j0skHlT7HVxBZvdHAK2GpYsRj1e/zWlUic0Tor9lYnf1HRej9k494YEW9bSLcW0FxZCdqsbJeiYL3y3T5iKCiSl+nKhUDFSJMUldLpdVVg0tNUKTfN4pIZesrLxQys/br5KQznJkVtYnIHWsTKxWdVlcbjXsGt5RJF/EFNSq7rRa2FMyf+3EO1/5ZLTs+/A1R8ZxzG596+8Y6UDYFdQg1EBPoIaFjFiXD02r8XE3REzVfryutkqcRyypcbipMzgfcpmu4fKp0/BP/e3STtfOqxDY4eah5TasaMl49Th8x4pSxUY+pp8DGM7RSTRQ9vljNkKqMKqlVVJlOC3s6Rfp5la8fauf4eDjH6/8/NYPOylrR4ORvQ0ClnDGR58ut8a85qPEe6Bz8AHqBqmtv3n1rBIa0FaxKQ03moBIxydXl8Y8ZqNhMncN9yQVqJv0J+dZYFRQt4FLPqFO4TKVyneimeIcH5zugY3hq75BPgyKriKFIvcQq31xEJvCH5ZvoKuU37dMhLdfsj3Mq3sb302+Qd0qJil+AkiqNFTVxvVctbACwsgXiQXB4Xq0Fg/RbYjWNVWxTlIokKo+xifsc9+raRD0U9YT6gJInaikIzPRbSkqDh+2qYvWKqOpMrWjAmD6PXGrrz7Vm82TM3aUtlqi+aQ7SD4E+TE9U7bQKVNRpAwiF2oEhuUnu96mKaeR7+kOKLmVVtbOqEXegyD+z2AFDRvHjODY7RydUtqi/WUXOqAapmqjnS5W2NsWkvdU57pw0+fY6OjqaQ68RvIBqWbEK7SLVmPaV9xUuESx9UCBOMvmDv2JFnMdVezSBAAAAAElFTkSuQmCC" alt="Spaghetti AI Logo" title="Spaghetti AI Logo" style="float: left; padding: 5px 15px 15px 0px;"> |
|
|
|
Tokens are not individual characters. If the prompt length is too long, the display will notify you what part of the prompt wasn't used. Changing just the image dimensions alone will change the image generated. For some models, trying to make a large image, such as 1024x1024, may add extra people and come out worse than using smaller dimensions.""" |
|
|
|
if enable_longer_prompts == 1: |
|
|
|
ending_html += """ |
|
|
|
Longer prompts are not available for most models. When available, it is done using the method <a href=\"https://github.com/huggingface/diffusers/issues/2136#issuecomment-1514969011\" target=\"_blank\">here</a>. Another method will likely eventually be added for SDXL models in this app.""" |
|
|
|
ending_html += """ |
|
|
|
The original script for this app was written by <a href=\"https://huggingface.co/Manjushri\" target=\"_blank\">Manjushri</a>.""" |
|
|
|
|
|
|
|
refiner_on_text = "Refiner is on. " |
|
refiner_off_text = "Refiner is off. " |
|
|
|
upscaler_on_text = "Upscaler is on. " |
|
upscaler_off_text = "Upscaler is off. " |
|
|
|
number_of_reserved_tokens = 2 |
|
|
|
|
|
|
|
generate_image_button_normal_text = "Generate Image" |
|
generate_image_button_in_progress_text = "Generating..." |
|
|
|
cancel_image_button_text = "Cancel" |
|
cancel_image_button_in_progress_text = "Cancelling..." |
|
|
|
|
|
|
|
gradio_image_component_height = 300 |
|
gradio_image_gallery_component_height = 350 |
|
gradio_extra_image_component_height = 250 |
|
|
|
|
|
|
|
canceled_image_in_queue_message = "Due to how the queue works in this app, you need to reload the page after canceling an image. Otherwise, you will not be able to generate another image until you reach the position in the queue you were in originally. At that time, the button to generate an image will appear again. You can easily create a link on this page to save what you have entered and visit that link to generate the image." |
|
|
|
canceled_image_in_process_of_being_generated = "<div style=\"text-align: center;\">Image generation will be canceled once the current step completes.</div>" |
|
|
|
|
|
|
|
base_model_field_key_in_url = "model" |
|
download_data_key_in_url = "download_data" |
|
model_configuration_key_in_url = "model_config" |
|
prompt_field_key_in_url = "prompt" |
|
negative_prompt_field_key_in_url = "neg_prompt" |
|
allow_longer_prompts_for_sd_1_5_based_models_key_in_url = "longer_prompt_for_sd_1_5" |
|
scheduler_field_key_in_url = "scheduler" |
|
image_width_field_key_in_url = "width" |
|
image_height_field_key_in_url = "height" |
|
guidance_scale_field_key_in_url = "guidance" |
|
steps_key_in_url = "steps" |
|
seed_field_key_in_url = "seed" |
|
add_seed_key_in_url = "add_seed" |
|
use_torch_manual_seed_but_not_in_generator_key_in_url = "use_torch_manual_seed_but_not_in_generator" |
|
refiner_key_in_url = "refiner" |
|
refiner_denoise_start_key_in_url = "denoise_start" |
|
refining_steps_option_key_in_url = "refiner_steps_option" |
|
refining_steps_key_in_url = "refiner_steps" |
|
use_denoising_start_in_base_model_when_using_refiner_key_in_url = "use_denoise_end" |
|
base_model_output_to_refiner_is_in_latent_space_key_in_url = "latent_space_before_refiner" |
|
upscaler_key_in_url = "upscaler" |
|
upscaling_steps_key_in_url = "upscaler_steps" |
|
show_base_image_when_using_refiner_or_upscaler_key_in_url = "show_base_image_when_using_refiner_or_upscaler" |
|
show_refined_image_when_using_upscaler_key_in_url = "show_refined_image_when_using_upscaler" |
|
create_preview_images_key_in_url = "do_preview" |
|
do_not_create_refining_preview_images_key_in_url = "no_refining_preview" |
|
do_not_create_upscaling_preview_images_key_in_url = "do_upscaling_preview" |
|
theme_key_in_url = "theme" |
|
special_theme_key_in_url = "__theme" |
|
|
|
|
|
|
|
prompt_textbox_label_with_length_limit = "Prompt (77 token limit):" |
|
prompt_textbox_label_with_no_length_limit = "Prompt:" |
|
|
|
negative_prompt_textbox_label_with_length_limit = "Negative Prompt (77 token limit):" |
|
negative_prompt_textbox_label_with_no_length_limit = "Negative Prompt:" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if default_add_seed_into_pipe == 1: |
|
|
|
default_use_torch_manual_seed_but_do_not_add_to_pipe = 0 |
|
|
|
|
|
|
|
if script_being_run_on_hugging_face == 1: |
|
|
|
|
|
|
|
use_custom_hugging_face_cache_dir = 0 |
|
auto_save_imagery = 0 |
|
show_messages_in_modal_on_page = 0 |
|
show_messages_in_command_prompt = 1 |
|
|
|
only_use_local_files = 0 |
|
|
|
if device == "cpu": |
|
|
|
|
|
|
|
|
|
use_torch_compile_for_base_model = 0 |
|
use_torch_compile_for_refiner = 0 |
|
use_torch_compile_for_upscaler = 0 |
|
|
|
show_image_creation_progress_log = 1 |
|
|
|
minimum_width = 256 |
|
minimum_height = 256 |
|
|
|
maximum_width = 768 |
|
maximum_height = 768 |
|
|
|
minimum_guidance_scale = 1 |
|
maximum_guidance_scale = 15 |
|
|
|
maximum_base_model_steps = 30 |
|
|
|
maximum_base_model_steps_for_sdxl_turbo = 5 |
|
|
|
minimum_refiner_denoise_start = 0.70 |
|
|
|
maximum_upscaler_steps = 15 |
|
default_upscaler_steps = 10 |
|
|
|
|
|
|
|
ending_html = """ |
|
If you would like to download this <a href="https://huggingface.co/spaces/magicfixeseverything/ai_image_creation/tree/main" target="_blank">app</a> to run offline on a Windows computer that has a NVIDIA graphics card, click <a href=\"https://huggingface.co/spaces/magicfixeseverything/ai_image_creation/resolve/main/spaghetti_ai.zip\">here</a> to download it. |
|
|
|
""" + ending_html |
|
|
|
|
|
|
|
if default_width < minimum_width: |
|
default_width = minimum_width |
|
|
|
if default_height < minimum_height: |
|
default_height = minimum_height |
|
|
|
|
|
|
|
if default_width > maximum_width: |
|
default_width = maximum_width |
|
|
|
if default_height > maximum_height: |
|
default_height = maximum_height |
|
|
|
|
|
|
|
if default_base_model_steps > maximum_base_model_steps: |
|
default_base_model_steps = maximum_base_model_steps |
|
|
|
if default_base_model_steps_for_sdxl_turbo > maximum_base_model_steps_for_sdxl_turbo: |
|
default_base_model_steps_for_sdxl_turbo = maximum_base_model_steps_for_sdxl_turbo |
|
|
|
|
|
|
|
if default_guidance_scale < minimum_guidance_scale: |
|
default_guidance_scale = minimum_guidance_scale |
|
|
|
if default_guidance_scale > maximum_guidance_scale: |
|
default_guidance_scale = maximum_guidance_scale |
|
|
|
|
|
|
|
if default_upscaler_steps > maximum_upscaler_steps: |
|
default_upscaler_steps = maximum_upscaler_steps |
|
|
|
|
|
|
|
only_use_local_files_bool = False |
|
|
|
if only_use_local_files == 1: |
|
|
|
only_use_local_files_bool = True |
|
|
|
|
|
|
|
if allow_other_model_versions == 0: |
|
|
|
base_model_array = [ |
|
"sdxl", |
|
"photoreal", |
|
"sdxl_turbo" |
|
] |
|
|
|
base_model_object_of_model_configuration_arrays = { |
|
"sdxl": [ |
|
"sdxl_default" |
|
], |
|
"photoreal": [ |
|
"photoreal_default" |
|
], |
|
"sdxl_turbo": [ |
|
"sdxl_turbo_default" |
|
] |
|
} |
|
|
|
base_model_model_configuration_defaults_object = { |
|
"sdxl": "sdxl_default", |
|
"photoreal": "photoreal_default", |
|
"sdxl_turbo": "sdxl_turbo_default" |
|
} |
|
|
|
|
|
|
|
hugging_face_hub_is_offline = 0 |
|
|
|
if script_being_run_on_hugging_face == 0: |
|
|
|
if ( |
|
("HF_HUB_OFFLINE" in os.environ) and |
|
(int(os.environ["HF_HUB_OFFLINE"]) == 1) |
|
): |
|
|
|
hugging_face_hub_is_offline = 1 |
|
|
|
only_use_local_files = 1 |
|
|
|
if suppress_hugging_face_hub_offline_status == 1: |
|
|
|
if hugging_face_hub_is_offline == 0: |
|
|
|
print ("Note: The Hugging Face cache directory does not automatically delete older data. Over time, it could eventually grow to use all the space on the drive it is on. You either need to manually clean out the folder occasionally or see Instructons.txt on how to not automatically update data once you have downloaded everything you need.") |
|
|
|
else: |
|
|
|
print ("You are working offline. Data will not be downloaded. See \"ai_image_creation.bat\" or \"Instructions.txt\" for more info.") |
|
|
|
|
|
|
|
saved_images_dir = main_dir + "/" + saved_images_folder_name |
|
|
|
hugging_face_cache_dir = main_dir + "/" + cache_directory_folder_name |
|
|
|
if not os.path.exists(hugging_face_cache_dir): |
|
os.makedirs(hugging_face_cache_dir) |
|
|
|
|
|
|
|
if auto_save_imagery == 1: |
|
|
|
from datetime import datetime |
|
|
|
|
|
|
|
gradio_temporary_files_dir = os.environ.get("GRADIO_TEMP_DIR") |
|
|
|
if use_custom_temporary_files_folder == 1: |
|
|
|
gradio_temporary_files_dir = main_dir + "/" + gradio_temporary_files_folder_name |
|
|
|
if not os.path.exists(gradio_temporary_files_dir): |
|
os.makedirs(gradio_temporary_files_dir) |
|
|
|
os.environ["GRADIO_TEMP_DIR"] = gradio_temporary_files_dir |
|
|
|
|
|
|
|
if device == "cpu": |
|
|
|
use_sequential_cpu_offload_for_base_model = 0 |
|
use_sequential_cpu_offload_for_refiner = 0 |
|
use_sequential_cpu_offload_for_upscaler = 0 |
|
|
|
use_model_cpu_offload_for_base_model = 0 |
|
use_model_cpu_offload_for_refiner = 0 |
|
use_model_cpu_offload_for_upscaler = 0 |
|
|
|
use_xformers = 0 |
|
|
|
|
|
|
|
if ( |
|
(use_sequential_cpu_offload_for_base_model == 1) and |
|
(use_model_cpu_offload_for_base_model == 1) |
|
): |
|
|
|
use_sequential_cpu_offload_for_base_model = 0 |
|
|
|
if ( |
|
(use_sequential_cpu_offload_for_refiner == 1) and |
|
(use_model_cpu_offload_for_refiner == 1) |
|
): |
|
|
|
use_sequential_cpu_offload_for_refiner = 0 |
|
|
|
if ( |
|
(use_sequential_cpu_offload_for_upscaler == 1) and |
|
(use_model_cpu_offload_for_upscaler == 1) |
|
): |
|
|
|
use_sequential_cpu_offload_for_upscaler = 0 |
|
|
|
|
|
|
|
def error_function( |
|
text_message |
|
): |
|
|
|
print (text_message) |
|
|
|
raise Exception(text_message) |
|
|
|
|
|
|
|
additional_prompt_info_html = "" |
|
|
|
if auto_save_imagery == 1: |
|
|
|
additional_prompt_info_html = " The image, and a text file with generation information, will be saved automatically." |
|
|
|
else: |
|
|
|
canceled_image_in_queue_message += " Make sure to save any imagery before reloading the page." |
|
|
|
|
|
|
|
if use_xformers == 1: |
|
|
|
from xformers.ops import MemoryEfficientAttentionFlashAttentionOp |
|
|
|
if use_default_attn_processor == 1: |
|
|
|
from diffusers.models.attention_processor import AttnProcessor |
|
|
|
|
|
|
|
if ( |
|
default_base_model and |
|
(default_base_model in base_model_object_of_model_configuration_arrays) and |
|
(default_base_model in base_model_model_configuration_defaults_object) |
|
): |
|
|
|
default_model_configuration = base_model_model_configuration_defaults_object[default_base_model] |
|
|
|
if default_model_configuration in model_configuration_names_object: |
|
|
|
default_model_configuration_choices_array = [] |
|
|
|
for this_model_configuration in base_model_object_of_model_configuration_arrays[default_base_model]: |
|
|
|
if this_model_configuration in model_configuration_names_object: |
|
|
|
default_model_configuration_choices_array.append( |
|
model_configuration_names_object[this_model_configuration] |
|
) |
|
|
|
else: |
|
|
|
error_function("A default model version must be properly named in the code.") |
|
|
|
else: |
|
|
|
error_function("A default model version must be properly configured in the code.") |
|
|
|
else: |
|
|
|
error_function("A default base model must be properly configured in the code.") |
|
|
|
|
|
|
|
default_base_model_nicely_named_value = base_model_names_object[default_base_model] |
|
|
|
default_model_configuration_nicely_named_value = model_configuration_names_object[default_model_configuration] |
|
|
|
|
|
|
|
if not ( |
|
default_scheduler and |
|
default_scheduler in scheduler_long_names_object |
|
): |
|
|
|
error_function("A default scheduler must be properly configured in the code.") |
|
|
|
default_scheduler_nicely_named_value = scheduler_long_names_object[default_scheduler] |
|
|
|
|
|
|
|
if enable_refiner != 1: |
|
|
|
default_refiner_selected = 0 |
|
|
|
if enable_upscaler != 1: |
|
|
|
default_upscaler_selected = 0 |
|
|
|
|
|
|
|
default_refine_option = "No" |
|
|
|
if default_refiner_selected == 1: |
|
|
|
default_refine_option = "Yes" |
|
|
|
default_upscale_option = "No" |
|
|
|
if default_upscaler_selected == 1: |
|
|
|
default_upscale_option = "Yes" |
|
|
|
|
|
|
|
default_refiner_and_upscaler_status_text = "" |
|
|
|
|
|
|
|
default_use_denoising_start_in_base_model_when_using_refiner_is_selected = False |
|
|
|
if default_use_denoising_start_in_base_model_when_using_refiner == 1: |
|
|
|
default_use_denoising_start_in_base_model_when_using_refiner_is_selected = True |
|
|
|
default_base_model_output_to_refiner_is_in_latent_space_is_selected = False |
|
|
|
if default_base_model_output_to_refiner_is_in_latent_space == 1: |
|
|
|
default_base_model_output_to_refiner_is_in_latent_space_is_selected = True |
|
|
|
|
|
|
|
default_base_model_output_in_latent_space_note_field_row_visibility = False |
|
|
|
if ( |
|
(default_refiner_selected == 1) and |
|
base_models_not_supporting_base_model_output_in_latent_space_to_refiner_object[default_base_model] and |
|
(default_base_model_output_to_refiner_is_in_latent_space == 1) |
|
): |
|
|
|
default_base_model_output_in_latent_space_note_field_row_visibility = True |
|
|
|
|
|
|
|
refiner_accordion_visible = True |
|
|
|
if enable_refiner != 1: |
|
|
|
refiner_accordion_visible = False |
|
|
|
refiner_accordion_open = False |
|
|
|
if default_refiner_accordion_open == 1: |
|
|
|
refiner_accordion_open = True |
|
|
|
|
|
|
|
refiner_group_visible = False |
|
|
|
if enable_refiner == 1: |
|
|
|
refiner_group_visible = True |
|
|
|
if default_refiner_selected == 1: |
|
|
|
default_refiner_and_upscaler_status_text += refiner_on_text |
|
|
|
else: |
|
|
|
default_refiner_and_upscaler_status_text += refiner_off_text |
|
|
|
|
|
|
|
upscaler_accordion_open = False |
|
|
|
if ( |
|
(default_upscaler_selected == 1) or |
|
(default_upscaler_accordion_open == 1) |
|
): |
|
|
|
upscaler_accordion_open = True |
|
|
|
upscaler_group_visible = False |
|
|
|
if enable_upscaler == 1: |
|
|
|
upscaler_group_visible = True |
|
|
|
if default_upscaler_selected == 1: |
|
|
|
default_refiner_and_upscaler_status_text += upscaler_on_text |
|
|
|
else: |
|
|
|
default_refiner_and_upscaler_status_text += upscaler_off_text |
|
|
|
|
|
|
|
default_negative_prompt_field_row_visibility = True |
|
default_negative_prompt_for_sdxl_turbo_field_row_visibility = False |
|
default_base_model_steps_field_row_visibility = True |
|
default_base_model_steps_field_for_sdxl_turbo_field_row_visibility = False |
|
default_guidance_scale_field_row_visibility = True |
|
default_guidance_scale_for_sdxl_turbo_field_row_visibility = False |
|
|
|
if default_base_model == "sdxl_turbo": |
|
|
|
default_negative_prompt_field_row_visibility = False |
|
default_negative_prompt_for_sdxl_turbo_field_row_visibility = True |
|
default_base_model_steps_field_row_visibility = False |
|
default_base_model_steps_field_for_sdxl_turbo_field_row_visibility = True |
|
default_guidance_scale_field_row_visibility = False |
|
default_guidance_scale_for_sdxl_turbo_field_row_visibility = True |
|
|
|
|
|
|
|
default_refining_use_denoising_start_in_base_model_when_using_refiner_field_row_visibility = True |
|
|
|
if default_base_model in base_models_not_supporting_denoising_end_for_base_model_object: |
|
|
|
default_refining_use_denoising_start_in_base_model_when_using_refiner_field_row_visibility = False |
|
|
|
|
|
|
|
default_dark_theme_is_selected = False |
|
|
|
if default_dark_theme == 1: |
|
|
|
default_dark_theme_is_selected = True |
|
|
|
|
|
|
|
default_allow_longer_prompts_row_visibility = True |
|
|
|
if enable_longer_prompts == 0: |
|
|
|
allow_longer_prompts_for_sd_1_5_based_models = 0 |
|
|
|
default_allow_longer_prompts_row_visibility = False |
|
|
|
default_allow_longer_prompts_for_sd_1_5_based_models_is_selected = False |
|
|
|
prompt_textbox_label_to_use = prompt_textbox_label_with_length_limit |
|
negative_prompt_textbox_label_to_use = prompt_textbox_label_with_length_limit |
|
|
|
if allow_longer_prompts_for_sd_1_5_based_models == 1: |
|
|
|
default_allow_longer_prompts_for_sd_1_5_based_models_is_selected = True |
|
|
|
prompt_textbox_label_to_use = prompt_textbox_label_with_no_length_limit |
|
negative_prompt_textbox_label_to_use = prompt_textbox_label_with_no_length_limit |
|
|
|
|
|
|
|
default_add_seed_into_pipe_is_selected = False |
|
|
|
if default_add_seed_into_pipe == 1: |
|
|
|
default_add_seed_into_pipe_is_selected = True |
|
|
|
|
|
|
|
default_use_torch_manual_seed_but_do_not_add_to_pipe_is_selected = False |
|
|
|
if default_use_torch_manual_seed_but_do_not_add_to_pipe == 1: |
|
|
|
default_use_torch_manual_seed_but_do_not_add_to_pipe_is_selected = True |
|
|
|
|
|
|
|
default_save_base_image_when_using_refiner_or_upscaler_is_selected = False |
|
|
|
if default_save_base_image_when_using_refiner_or_upscaler == 1: |
|
|
|
default_save_base_image_when_using_refiner_or_upscaler_is_selected = True |
|
|
|
|
|
|
|
default_create_preview_images_is_selected = False |
|
|
|
if default_create_preview_images == 1: |
|
|
|
default_create_preview_images_is_selected = True |
|
|
|
|
|
|
|
default_do_not_create_refining_preview_images_is_selected = False |
|
|
|
if default_do_not_create_refining_preview_images == 1: |
|
|
|
default_do_not_create_refining_preview_images_is_selected = True |
|
|
|
|
|
|
|
default_do_not_create_upscaling_preview_images_is_selected = False |
|
|
|
if default_do_not_create_upscaling_preview_images == 1: |
|
|
|
default_do_not_create_upscaling_preview_images_is_selected = True |
|
|
|
|
|
|
|
default_save_refined_image_when_using_upscaler_is_selected = False |
|
|
|
if default_save_base_image_when_using_refiner_or_upscaler == 1: |
|
|
|
default_save_refined_image_when_using_upscaler_is_selected = True |
|
|
|
|
|
|
|
default_base_model_choices_array = [] |
|
|
|
stored_model_configuration_names_object = {} |
|
|
|
for this_base_model in base_model_array: |
|
|
|
if this_base_model not in base_model_object_of_model_configuration_arrays: |
|
continue |
|
|
|
default_base_model_choices_array.append( |
|
base_model_names_object[this_base_model] |
|
) |
|
|
|
stored_model_configuration = base_model_model_configuration_defaults_object[this_base_model] |
|
|
|
stored_model_configuration_names_object[this_base_model] = model_configuration_names_object[stored_model_configuration] |
|
|
|
|
|
|
|
default_scheduler_choices_array = [] |
|
|
|
for this_scheduler in schedulers_array: |
|
|
|
default_scheduler_choices_array.append( |
|
scheduler_long_names_object[this_scheduler] |
|
) |
|
|
|
|
|
|
|
make_seed_selection_a_textbox = 1 |
|
|
|
if maximum_seed <= 9007199254740992: |
|
|
|
make_seed_selection_a_textbox = 0 |
|
|
|
|
|
|
|
current_preview_image = "" |
|
previous_preview_image = None |
|
current_preview_image_user_id = 0 |
|
|
|
|
|
|
|
current_image_generation_id_in_progress = 0 |
|
|
|
|
|
|
|
cancel_image_generation_ids_object = {} |
|
cancel_image_generation_times_object = {} |
|
seconds_to_store_cancellations_in_cancel_image_generation_times_object = 86400 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def rounded_number(n, decimals=0): |
|
n = float(n) |
|
multiplier = 10**decimals |
|
rounded_abs = (floor(abs(n) * multiplier + 0.5) / multiplier) |
|
rounded_value = round(copysign(rounded_abs, n), decimals) |
|
return rounded_value |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def formatted_number(n, decimals=0): |
|
rounded_value = rounded_number(n, decimals) |
|
formatted_value = '{:.{prec}f}'.format(rounded_value, prec=decimals) |
|
return formatted_value |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_random_seed(): |
|
|
|
maximum_seed_for_random = maximum_seed |
|
|
|
if maximum_seed_for_random > 9007199254740992: |
|
|
|
|
|
|
|
|
|
maximum_seed_for_random = 9007199254740992 |
|
|
|
actual_seed = int(random.randrange(0, 10**len(str(maximum_seed_for_random)))) |
|
|
|
return actual_seed |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def show_message( |
|
message_to_display |
|
): |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print (message_to_display) |
|
|
|
if show_messages_in_modal_on_page == 1: |
|
|
|
gr.Info(message_to_display) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def nice_elapsed_time( |
|
seconds |
|
): |
|
|
|
|
|
|
|
hours = seconds // 3600 |
|
minutes = (seconds % 3600) // 60 |
|
seconds = seconds % 60 |
|
|
|
if hours > 0: |
|
|
|
hours_text = "hr" |
|
|
|
if hours > 1: |
|
|
|
hours_text = "hrs" |
|
|
|
time_html = str(int(hours)) + " " + hours_text + ". " + str(int(minutes)) + " min. " + str(round(seconds, 1)) + " sec." |
|
|
|
elif minutes > 0: |
|
|
|
time_html = str(int(minutes)) + " min. " + str(round(seconds, 1)) + " sec." |
|
|
|
else: |
|
|
|
time_html = str(round(seconds, 2)) + " sec." |
|
|
|
return time_html |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def base_model_valid(base_model_name_value): |
|
try: |
|
base_model_name_value_str = str(base_model_name_value).lower() |
|
if ( |
|
(base_model_name_value_str in base_model_object_of_model_configuration_arrays) and |
|
(base_model_name_value_str in base_model_model_configuration_defaults_object) |
|
): |
|
return True |
|
else: |
|
return False |
|
except ValueError: |
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def model_configuration_valid( |
|
base_model_name_value, |
|
model_configuration_name_value |
|
): |
|
try: |
|
base_model_name_value_str = str(base_model_name_value).lower() |
|
model_configuration_name_value_str = str(model_configuration_name_value).lower() |
|
for this_base_model in base_model_array: |
|
if this_base_model not in base_model_object_of_model_configuration_arrays: |
|
continue |
|
for this_model_configuration in base_model_object_of_model_configuration_arrays[this_base_model]: |
|
if ( |
|
(base_model_name_value_str == this_base_model) and |
|
(model_configuration_name_value_str == this_model_configuration) |
|
): |
|
return True |
|
return False |
|
except ValueError: |
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def prompt_valid(prompt_field): |
|
try: |
|
prompt_field_str = str(prompt_field) |
|
if len(prompt_field_str) <= maximum_prompt_characer_count: |
|
return True |
|
else: |
|
return False |
|
except ValueError: |
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def negative_prompt_valid(negative_prompt_field): |
|
try: |
|
negative_prompt_field_str = str(negative_prompt_field) |
|
if len(negative_prompt_field_str) <= maximum_neg_prompt_characer_count: |
|
return True |
|
else: |
|
return False |
|
except ValueError: |
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def scheduler_valid(scheduler_field): |
|
try: |
|
scheduler_str = str(scheduler_field).lower() |
|
if scheduler_str in scheduler_long_names_object: |
|
return True |
|
else: |
|
return False |
|
except ValueError: |
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def width_valid(width_num_str): |
|
try: |
|
width_num = int(width_num_str) |
|
if ( |
|
(width_num >= int(minimum_width)) and |
|
(width_num <= int(maximum_width)) and |
|
(width_num % int(width_and_height_input_slider_steps)) == 0 |
|
): |
|
return True |
|
else: |
|
return False |
|
except ValueError: |
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def height_valid(height_num_str): |
|
try: |
|
height_num = int(height_num_str) |
|
if ( |
|
(height_num >= int(minimum_height)) and |
|
(height_num <= int(maximum_height)) and |
|
(height_num % int(width_and_height_input_slider_steps)) == 0 |
|
): |
|
return True |
|
else: |
|
return False |
|
except ValueError: |
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def guidance_scale_valid(guidance_scale_str): |
|
try: |
|
guidance_scale_num = float(guidance_scale_str) |
|
guidance_scale_num_times_100 = (guidance_scale_num * 100) |
|
guidance_scale_num_times_100_with_int = int(guidance_scale_num_times_100) |
|
guidance_scale_input_slider_steps_times_100 = (float(guidance_scale_input_slider_steps) * 100) |
|
if ( |
|
(guidance_scale_num >= float(minimum_guidance_scale)) and |
|
(guidance_scale_num <= float(maximum_guidance_scale)) and |
|
(guidance_scale_num_times_100 == guidance_scale_num_times_100_with_int) and |
|
((guidance_scale_num_times_100 % guidance_scale_input_slider_steps_times_100) == 0) |
|
): |
|
return True |
|
else: |
|
return False |
|
except ValueError: |
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def steps_valid( |
|
steps_num_str, |
|
base_model_name_value |
|
): |
|
try: |
|
steps_num = int(steps_num_str) |
|
base_model_name_value_str = str(base_model_name_value).lower() |
|
if steps_num > 0: |
|
if (base_model_name_value_str == "sdxl_turbo"): |
|
if steps_num <= int(maximum_base_model_steps_for_sdxl_turbo): |
|
return True |
|
else: |
|
if steps_num <= int(maximum_base_model_steps): |
|
return True |
|
return False |
|
except ValueError: |
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def seed_valid( |
|
seed_num_str |
|
): |
|
try: |
|
seed_num = int(seed_num_str) |
|
if ( |
|
(seed_num >= 0) and |
|
(seed_num <= int(maximum_seed)) |
|
): |
|
return True |
|
else: |
|
return False |
|
except ValueError: |
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def refiner_denoise_start_valid( |
|
refiner_denoise_start_str |
|
): |
|
try: |
|
refiner_denoise_start_num = float(refiner_denoise_start_str) |
|
refiner_denoise_start_rounded = rounded_number(refiner_denoise_start_str, 2) |
|
|
|
if ( |
|
(refiner_denoise_start_num >= float(minimum_refiner_denoise_start)) and |
|
(refiner_denoise_start_num <= float(maximum_refiner_denoise_start)) and |
|
(int(refiner_denoise_start_rounded % float(refiner_denoise_start_input_slider_steps)) == 0) |
|
): |
|
return True |
|
else: |
|
return False |
|
except ValueError: |
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def refining_steps_valid( |
|
refining_steps_num_str |
|
): |
|
try: |
|
refining_steps_num = int(refining_steps_num_str) |
|
if ( |
|
(refining_steps_num > 0) and |
|
(refining_steps_num <= int(maximum_refining_steps_for_online_config_field)) |
|
): |
|
return True |
|
else: |
|
return False |
|
except ValueError: |
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def upscaling_steps_valid( |
|
upscaling_steps_num_str |
|
): |
|
try: |
|
upscaling_steps_num = int(upscaling_steps_num_str) |
|
if ( |
|
(upscaling_steps_num > 0) and |
|
(upscaling_steps_num <= int(maximum_upscaler_steps)) |
|
): |
|
return True |
|
else: |
|
return False |
|
except ValueError: |
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def theme_valid( |
|
theme_field |
|
): |
|
try: |
|
theme_field_str = str(theme_field).lower() |
|
if ( |
|
(theme_field_str == "light") or |
|
(theme_field_str == "dark") |
|
): |
|
return True |
|
else: |
|
return False |
|
except ValueError: |
|
return False |
|
|
|
|
|
|
|
try: |
|
upscaling_steps_num = int(upscaling_steps_num_str) |
|
if ( |
|
(upscaling_steps_num > 0) and |
|
(upscaling_steps_num <= int(maximum_upscaler_steps)) |
|
): |
|
return True |
|
else: |
|
return False |
|
except ValueError: |
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def numerical_bool( |
|
original_value |
|
): |
|
|
|
new_value = 0 |
|
|
|
if ( |
|
(original_value == 1) or |
|
(original_value == "Yes") or |
|
(original_value == "True") or |
|
(original_value == True) |
|
): |
|
|
|
new_value = 1 |
|
|
|
return new_value |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def truncate_prompt ( |
|
pipe, |
|
existing_prompt_text |
|
): |
|
|
|
|
|
|
|
|
|
|
|
tokenizer = pipe.tokenizer |
|
|
|
max_token_length_of_model = pipe.tokenizer.model_max_length - number_of_reserved_tokens |
|
|
|
prompt_text_words_array = existing_prompt_text.split(" ") |
|
|
|
prompt_text_words_array_length = len(prompt_text_words_array) |
|
|
|
prompt_text_words_index = 0 |
|
|
|
prompt_text_substring = "" |
|
prompt_text_not_used_substring = "" |
|
|
|
for prompt_text_word in prompt_text_words_array: |
|
|
|
prompt_text_words_index += 1 |
|
|
|
substring_to_test = prompt_text_substring |
|
|
|
if prompt_text_words_index > 1: |
|
|
|
substring_to_test += " " |
|
|
|
substring_to_test += prompt_text_word |
|
|
|
token_length_of_substring_to_test = len(tokenizer.tokenize(substring_to_test)) |
|
|
|
if token_length_of_substring_to_test > max_token_length_of_model: |
|
|
|
prompt_text_not_used_substring += prompt_text_word + " " |
|
|
|
else: |
|
|
|
prompt_text_substring = substring_to_test |
|
|
|
return ( |
|
prompt_text_substring, |
|
prompt_text_not_used_substring |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def construct_pipe ( |
|
base_model_name_value, |
|
model_configuration_name_value |
|
): |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
base_model_kwargs = {} |
|
|
|
if ( |
|
(base_model_name_value == "sdxl") or |
|
(base_model_name_value == "photoreal") or |
|
(base_model_name_value == "sdxl_turbo") or |
|
(base_model_name_value == "sd_1_5_runwayml") |
|
): |
|
|
|
base_model_kwargs["use_safetensors"] = True |
|
|
|
if use_safety_checker == 0: |
|
|
|
if ( |
|
(base_model_name_value == "photoreal") or |
|
(base_model_name_value == "sd_1_5_runwayml") |
|
): |
|
|
|
base_model_kwargs = { |
|
"safety_checker": None, |
|
"requires_safety_checker": False |
|
} |
|
|
|
if device == "cuda": |
|
|
|
if ( |
|
(base_model_name_value == "sdxl") or |
|
(base_model_name_value == "sdxl_turbo") or |
|
(base_model_name_value == "sd_1_5_runwayml") |
|
): |
|
|
|
base_model_kwargs["variant"] = "fp16" |
|
|
|
base_model_kwargs["torch_dtype"] = torch.float16 |
|
|
|
if use_custom_hugging_face_cache_dir == 1: |
|
|
|
base_model_kwargs["cache_dir"] = hugging_face_cache_dir |
|
|
|
pipe = DiffusionPipeline.from_pretrained( |
|
pretrained_model_name_or_path = model_configuration_links_object[model_configuration_name_value], |
|
local_files_only = only_use_local_files_bool, |
|
**base_model_kwargs |
|
) |
|
|
|
if use_model_cpu_offload_for_base_model == 1: |
|
pipe.enable_model_cpu_offload() |
|
|
|
if use_xformers == 1: |
|
pipe.enable_xformers_memory_efficient_attention() |
|
|
|
pipe = pipe.to(device) |
|
|
|
if use_sequential_cpu_offload_for_base_model == 1: |
|
pipe.enable_sequential_cpu_offload() |
|
|
|
if use_default_attn_processor == 1: |
|
pipe.unet.set_default_attn_processor() |
|
|
|
if use_torch_compile_for_base_model == 1: |
|
pipe.unet = torch.compile( |
|
pipe.unet, |
|
mode = "reduce-overhead", |
|
fullgraph = True |
|
) |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
return ( |
|
pipe |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def configure_scheduler ( |
|
pipe, |
|
scheduler_value |
|
): |
|
|
|
scheduler_config = pipe.scheduler.config |
|
|
|
scheduler = scheduler_value |
|
|
|
|
|
|
|
if scheduler_value == "model_default": |
|
|
|
scheduler_name = pipe.scheduler.config._class_name |
|
|
|
if scheduler_name in scheduler_name_to_identifier_in_app_object: |
|
|
|
scheduler = scheduler_name_to_identifier_in_app_object[scheduler_name] |
|
|
|
|
|
|
|
scheduler_used = scheduler |
|
|
|
|
|
|
|
if scheduler == "ddim": |
|
|
|
from diffusers import DDIMScheduler |
|
pipe.scheduler = DDIMScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "ddpm": |
|
|
|
from diffusers import DDPMScheduler |
|
pipe.scheduler = DDPMScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "dpm_solver_multistep": |
|
|
|
from diffusers import DPMSolverMultistepScheduler |
|
pipe.scheduler = DPMSolverMultistepScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "dpm_solver_multistep_karras_sigmas_true": |
|
|
|
new_scheduler_config = dict(pipe.scheduler.config) |
|
new_scheduler_config.update({"use_karras_sigmas": True}) |
|
|
|
from diffusers import DPMSolverMultistepScheduler |
|
pipe.scheduler = DPMSolverMultistepScheduler.from_config(new_scheduler_config) |
|
|
|
elif scheduler == "dpm_solver_multistep_algorithm_type_sde-dpmsolver_pp": |
|
|
|
new_scheduler_config = dict(pipe.scheduler.config) |
|
new_scheduler_config.update({"algorithm_type": "sde-dpmsolver++"}) |
|
|
|
from diffusers import DPMSolverMultistepScheduler |
|
pipe.scheduler = DPMSolverMultistepScheduler.from_config(new_scheduler_config) |
|
|
|
elif scheduler == "dpm_solver_multistep_karras_sigmas_true_algorithm_type_sde-dpmsolver_pp": |
|
|
|
new_scheduler_config = dict(pipe.scheduler.config) |
|
new_scheduler_config.update({"use_karras_sigmas": True}) |
|
new_scheduler_config.update({"algorithm_type": "sde-dpmsolver++"}) |
|
|
|
from diffusers import DPMSolverMultistepScheduler |
|
pipe.scheduler = DPMSolverMultistepScheduler.from_config(new_scheduler_config) |
|
|
|
elif scheduler == "dpm_solver_singlestep": |
|
|
|
from diffusers import DPMSolverSinglestepScheduler |
|
pipe.scheduler = DPMSolverSinglestepScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "dpm_solver_singlestep_karras_sigmas_true": |
|
|
|
new_scheduler_config = dict(pipe.scheduler.config) |
|
new_scheduler_config.update({"use_karras_sigmas": True}) |
|
|
|
from diffusers import DPMSolverSinglestepScheduler |
|
pipe.scheduler = DPMSolverSinglestepScheduler.from_config(new_scheduler_config) |
|
|
|
elif scheduler == "kdpm2_discrete": |
|
|
|
from diffusers import KDPM2DiscreteScheduler |
|
pipe.scheduler = KDPM2DiscreteScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "kdpm2_discrete_karras_sigmas_true": |
|
|
|
new_scheduler_config = dict(pipe.scheduler.config) |
|
new_scheduler_config.update({"use_karras_sigmas": True}) |
|
|
|
from diffusers import KDPM2DiscreteScheduler |
|
pipe.scheduler = KDPM2DiscreteScheduler.from_config(new_scheduler_config) |
|
|
|
elif scheduler == "kdpm2_ancestral_discrete": |
|
|
|
from diffusers import KDPM2AncestralDiscreteScheduler |
|
pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "kdpm2_ancestral_discrete_karras_sigmas_true": |
|
|
|
new_scheduler_config = dict(pipe.scheduler.config) |
|
new_scheduler_config.update({"use_karras_sigmas": True}) |
|
|
|
from diffusers import KDPM2AncestralDiscreteScheduler |
|
pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(new_scheduler_config) |
|
|
|
elif scheduler == "euler_discrete": |
|
|
|
from diffusers import EulerDiscreteScheduler |
|
pipe.scheduler = EulerDiscreteScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "euler_ancestral_discrete": |
|
|
|
from diffusers import EulerAncestralDiscreteScheduler |
|
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "heun_discrete": |
|
|
|
from diffusers import HeunDiscreteScheduler |
|
pipe.scheduler = HeunDiscreteScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "lms_discrete": |
|
|
|
from diffusers import LMSDiscreteScheduler |
|
pipe.scheduler = LMSDiscreteScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "lms_discrete_karras_sigmas_true": |
|
|
|
new_scheduler_config = dict(pipe.scheduler.config) |
|
new_scheduler_config.update({"use_karras_sigmas": True}) |
|
|
|
from diffusers import LMSDiscreteScheduler |
|
pipe.scheduler = LMSDiscreteScheduler.from_config(new_scheduler_config) |
|
|
|
elif scheduler == "pndm": |
|
|
|
from diffusers import PNDMScheduler |
|
pipe.scheduler = PNDMScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "pndm_skip_prk_steps_true": |
|
|
|
new_scheduler_config = dict(pipe.scheduler.config) |
|
new_scheduler_config.update({"skip_prk_steps": True}) |
|
|
|
from diffusers import PNDMScheduler |
|
pipe.scheduler = PNDMScheduler.from_config(new_scheduler_config) |
|
|
|
elif scheduler == "deis_multistep": |
|
|
|
from diffusers import DEISMultistepScheduler |
|
pipe.scheduler = DEISMultistepScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "dpm_solver_sde": |
|
|
|
from diffusers import DPMSolverSDEScheduler |
|
pipe.scheduler = DPMSolverSDEScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "uni_pc_multistep": |
|
|
|
from diffusers import UniPCMultistepScheduler |
|
pipe.scheduler = UniPCMultistepScheduler.from_config(scheduler_config) |
|
|
|
else: |
|
|
|
from diffusers import PNDMScheduler |
|
pipe.scheduler = PNDMScheduler.from_config(scheduler_config) |
|
|
|
scheduler_used = "pndm" |
|
|
|
|
|
|
|
return ( |
|
scheduler_used |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def construct_refiner (): |
|
|
|
refiner_kwargs = { |
|
"use_safetensors": True |
|
} |
|
|
|
if device == "cuda": |
|
|
|
refiner_kwargs["variant"] = "fp16" |
|
refiner_kwargs["torch_dtype"] = torch.float16 |
|
|
|
if use_custom_hugging_face_cache_dir == 1: |
|
|
|
refiner_kwargs["cache_dir"] = hugging_face_cache_dir |
|
|
|
refiner = DiffusionPipeline.from_pretrained( |
|
pretrained_model_name_or_path = hugging_face_refiner_partial_path, |
|
local_files_only = only_use_local_files_bool, |
|
**refiner_kwargs |
|
) |
|
|
|
if use_model_cpu_offload_for_refiner == 1: |
|
|
|
refiner.enable_model_cpu_offload() |
|
|
|
if use_xformers == 1: |
|
|
|
refiner.enable_xformers_memory_efficient_attention() |
|
|
|
refiner = refiner.to(device) |
|
|
|
if use_sequential_cpu_offload_for_refiner == 1: |
|
|
|
refiner.enable_sequential_cpu_offload() |
|
|
|
if use_default_attn_processor == 1: |
|
|
|
refiner.unet.set_default_attn_processor() |
|
|
|
if use_torch_compile_for_refiner == 1: |
|
refiner.unet = torch.compile( |
|
refiner.unet, |
|
mode = "reduce-overhead", |
|
fullgraph = True |
|
) |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
return ( |
|
refiner |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def construct_upscaler (): |
|
|
|
upscaler_kwargs = { |
|
"use_safetensors": True |
|
} |
|
|
|
if device == "cuda": |
|
|
|
upscaler_kwargs["torch_dtype"] = torch.float16 |
|
|
|
if use_custom_hugging_face_cache_dir == 1: |
|
|
|
upscaler_kwargs["cache_dir"] = hugging_face_cache_dir |
|
|
|
upscaler = DiffusionPipeline.from_pretrained( |
|
pretrained_model_name_or_path = hugging_face_upscaler_partial_path, |
|
local_files_only = only_use_local_files_bool, |
|
**upscaler_kwargs |
|
) |
|
|
|
if use_model_cpu_offload_for_upscaler == 1: |
|
|
|
upscaler.enable_model_cpu_offload() |
|
|
|
if use_xformers == 1: |
|
|
|
upscaler.enable_xformers_memory_efficient_attention() |
|
|
|
upscaler = upscaler.to(device) |
|
|
|
if use_sequential_cpu_offload_for_upscaler == 1: |
|
|
|
upscaler.enable_sequential_cpu_offload() |
|
|
|
if use_default_attn_processor == 1: |
|
|
|
upscaler.unet.set_default_attn_processor() |
|
|
|
if use_torch_compile_for_refiner == 1: |
|
upscaler.unet = torch.compile( |
|
upscaler.unet, |
|
mode = "reduce-overhead", |
|
fullgraph = True |
|
) |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
return ( |
|
upscaler |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def update_prompt_info_from_gallery ( |
|
gallery_data: gr.SelectData, |
|
image_gallery_array_state_value |
|
): |
|
|
|
gallery_data_index = gallery_data.index |
|
|
|
output_image_gallery_field_update = gr.Gallery( |
|
selected_index = gallery_data_index |
|
) |
|
|
|
output_text_field_update = image_gallery_array_state_value[gallery_data_index] |
|
|
|
return { |
|
output_image_gallery_field: output_image_gallery_field_update, |
|
output_text_field: output_text_field_update |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def save_image_file ( |
|
saved_image_path_and_file, |
|
image_to_return, |
|
add_generation_information_to_image, |
|
info_to_save_in_image |
|
): |
|
|
|
if add_generation_information_to_image == 1: |
|
|
|
from PIL.PngImagePlugin import PngInfo |
|
|
|
saved_image_metadata = PngInfo() |
|
saved_image_metadata.add_text( |
|
"parameters", |
|
info_to_save_in_image |
|
) |
|
|
|
image_to_return_file = image_to_return.save( |
|
saved_image_path_and_file, |
|
pnginfo = saved_image_metadata |
|
) |
|
|
|
else: |
|
|
|
image_to_return_file = image_to_return.save( |
|
saved_image_path_and_file |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_image_generation_information ( |
|
base_model_name_value, |
|
model_configuration_name_value, |
|
scheduler_used, |
|
scheduler_value, |
|
prompt_text, |
|
prompt_text_not_used_substring, |
|
negative_prompt_text, |
|
negative_prompt_text_not_used_substring, |
|
allow_longer_prompts_for_sd_1_5_based_models_field_value, |
|
image_width, |
|
image_height, |
|
actual_seed, |
|
add_seed_into_pipe, |
|
use_torch_manual_seed_but_do_not_add_to_pipe_field_value, |
|
guidance_scale, |
|
base_model_steps, |
|
display_xformers_usage_in_prompt_info, |
|
display_default_attn_processor_usage_in_prompt_info, |
|
display_diffusers_version_in_prompt_info, |
|
use_refiner, |
|
refiner_error, |
|
refining_denoise_start_field_value, |
|
denoising_end_applicable, |
|
refining_use_denoising_start_in_base_model_when_using_refiner_field_value, |
|
refining_base_model_output_to_refiner_is_in_latent_space_field_value, |
|
refining_steps_option_for_older_configuration_field_value, |
|
refining_steps_for_older_configuration_field_value, |
|
use_upscaler, |
|
upscaler_error, |
|
upscaling_steps, |
|
upscaled_image_width, |
|
upscaled_image_height, |
|
current_actual_total_base_model_steps, |
|
current_actual_total_refiner_steps, |
|
current_actual_total_upscaler_steps, |
|
generation_start_time, |
|
image_has_been_canceled, |
|
which_image |
|
): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
info_about_prompt_lines_array = [] |
|
|
|
if image_has_been_canceled == 1: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Image was canceled before completion. Some details below may not be accurate." |
|
]) |
|
|
|
if refiner_error == 1: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Refiner Error: An error occurred in the refining progress and was skipped. Some details below will not be accurate." |
|
]) |
|
|
|
if upscaler_error == 1: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Upscaler Error: An error occurred in the upscaling progress and was skipped. Some details below will not be accurate." |
|
]) |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Prompt: " + prompt_text |
|
]) |
|
|
|
if len(negative_prompt_text) > 0: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Negative Prompt: " + negative_prompt_text |
|
]) |
|
|
|
dimensions_title = "Dimensions" |
|
|
|
if use_upscaler == 1: |
|
|
|
dimensions_title = "Original Dimensions" |
|
|
|
info_about_prompt_lines_array.extend([ |
|
dimensions_title + ": " + str(image_width) + "x" + str(image_height) + " px" |
|
]) |
|
|
|
if use_upscaler == 1: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Upscaled Dimensions: " + str(upscaled_image_width) + "x" + str(upscaled_image_height) + " px" |
|
]) |
|
|
|
if ( |
|
(add_seed_into_pipe == 1) or |
|
( |
|
(add_seed_into_pipe != 1) and |
|
(use_torch_manual_seed_but_do_not_add_to_pipe_field_value == 1) |
|
) |
|
): |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Seed: " + str(actual_seed) |
|
]) |
|
|
|
nice_seed_added_to_generation = "No" |
|
|
|
if add_seed_into_pipe == 1: |
|
|
|
nice_seed_added_to_generation = "Yes" |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Seed added to generation? " + nice_seed_added_to_generation |
|
]) |
|
|
|
if ( |
|
(add_seed_into_pipe != 1) and |
|
(use_torch_manual_seed_but_do_not_add_to_pipe_field_value == 1) |
|
): |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Use torch.manual_seed, but don't explicitly add it as a generator during generation? Yes" |
|
]) |
|
|
|
if int(guidance_scale) > 0: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Guidance Scale: " + str(guidance_scale) |
|
]) |
|
|
|
if ( |
|
(image_has_been_canceled == 0) or |
|
(base_model_steps == current_actual_total_base_model_steps) |
|
): |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Steps: " + str(base_model_steps) |
|
]) |
|
|
|
else: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Selected Steps: " + str(base_model_steps), |
|
"Actual Base Model Steps: " + str(current_actual_total_base_model_steps) |
|
]) |
|
|
|
nice_model_name = base_model_names_object[base_model_name_value] + " (" + model_configuration_links_object[model_configuration_name_value] + ")" |
|
|
|
nice_scheduler_name = scheduler_short_names_object[scheduler_used] |
|
|
|
if scheduler_value == "model_default": |
|
|
|
nice_scheduler_name += " (model default)" |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Model: " + nice_model_name |
|
]) |
|
|
|
if allow_longer_prompts_for_sd_1_5_based_models_field_value == 1: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Longer prompt allowed using special method? Yes" |
|
]) |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Scheduler/Sampler: " + nice_scheduler_name |
|
]) |
|
|
|
if ( |
|
(use_refiner == 1) or |
|
(refiner_error == 1) |
|
): |
|
|
|
refiner_usage_text = "Yes" |
|
|
|
if refiner_error == 1: |
|
|
|
refiner_usage_text = "No (an error prevented it from refining)\nHowever, selected refining details are still included." |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Refiner Used? " + refiner_usage_text |
|
]) |
|
|
|
nice_refiner_denoise_start = str(refining_denoise_start_field_value) |
|
|
|
if denoising_end_applicable == 1: |
|
|
|
if refining_use_denoising_start_in_base_model_when_using_refiner_field_value == 1: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Set \"denoising_end\" in base model generation? Yes", |
|
"Base model denoise end %: " + nice_refiner_denoise_start |
|
]) |
|
|
|
if current_actual_total_base_model_steps > 0: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Actual Base Model Steps: " + formatted_number(current_actual_total_base_model_steps) |
|
]) |
|
|
|
else: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Set \"denoising_end\" in base model generation? No", |
|
]) |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Refiner denoise start %: " + nice_refiner_denoise_start |
|
]) |
|
|
|
if refining_steps_option_for_older_configuration_field_value == 1: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Number of steps in refiner that \"denoise start %\" applies to: " + str(refining_steps_for_older_configuration_field_value) |
|
]) |
|
|
|
if current_actual_total_refiner_steps > 0: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Actual Refining Steps: " + formatted_number(current_actual_total_refiner_steps) |
|
]) |
|
|
|
if refining_base_model_output_to_refiner_is_in_latent_space_field_value == 1: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Base model output in latent space before refining? Yes", |
|
]) |
|
|
|
else: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Base model output in latent space before refining? No", |
|
]) |
|
|
|
if ( |
|
(use_upscaler == 1) or |
|
(upscaler_error == 1) |
|
): |
|
|
|
if use_upscaler == 1: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Upscaled (2x)? Yes", |
|
"Upscaler Steps: " + str(upscaling_steps) |
|
]) |
|
|
|
elif upscaler_error: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Upscaled (2x)? No (an error prevented it from upscaling)", |
|
"Selected Upscaler Steps: " + str(upscaling_steps) |
|
]) |
|
|
|
if log_generation_times == 1: |
|
|
|
generation_end_time = time.time() |
|
|
|
generation_time_in_seconds = (generation_end_time - generation_start_time) |
|
|
|
nice_generation_time = nice_elapsed_time(generation_time_in_seconds) |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Time: " + nice_generation_time |
|
]) |
|
|
|
if len(prompt_text_not_used_substring) > 0: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"End of Prompt Truncated: " + prompt_text_not_used_substring |
|
]) |
|
|
|
if len(negative_prompt_text_not_used_substring) > 0: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"End of Negative Prompt Truncated: " + negative_prompt_text_not_used_substring |
|
]) |
|
|
|
if display_xformers_usage_in_prompt_info == 1: |
|
|
|
nice_xformers_usage = "No" |
|
|
|
if use_xformers == 1: |
|
|
|
nice_xformers_usage = "Yes" |
|
|
|
if include_transformers_version_in_prompt_info == 1: |
|
|
|
import transformers |
|
|
|
nice_xformers_usage += " (version " + str(transformers.__version__) + ")" |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"xFormers Used?: " + nice_xformers_usage |
|
]) |
|
|
|
if display_default_attn_processor_usage_in_prompt_info == 1: |
|
|
|
nice_default_attn_processor_usage = "No" |
|
|
|
if use_default_attn_processor == 1: |
|
|
|
nice_default_attn_processor_usage = "Yes" |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Default AttnProcessor Used? " + nice_default_attn_processor_usage |
|
]) |
|
|
|
if display_diffusers_version_in_prompt_info == 1: |
|
|
|
try: |
|
|
|
import diffusers |
|
|
|
diffusers_version = diffusers.__version__ |
|
|
|
except: |
|
|
|
diffusers_version = "" |
|
|
|
if diffusers_version: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Diffusers Version: " + str(diffusers_version) |
|
]) |
|
|
|
info_about_prompt = "\n".join(info_about_prompt_lines_array) |
|
|
|
return info_about_prompt |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def load_image_preview ( |
|
user_id_state |
|
): |
|
|
|
user_id_state_value = user_id_state.value |
|
|
|
|
|
|
|
if ( |
|
(user_id_state_value > 0) and |
|
(user_id_state_value == current_preview_image_user_id) |
|
): |
|
|
|
|
|
|
|
return { |
|
output_image_preview_field: current_preview_image[0] |
|
} |
|
|
|
else: |
|
|
|
return None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def before_create_image_function ( |
|
create_preview_images |
|
): |
|
|
|
generate_image_button_update = gr.Button( |
|
value = generate_image_button_in_progress_text, |
|
variant = "secondary", |
|
interactive = False |
|
) |
|
|
|
|
|
|
|
output_base_model_image_field_accordion_update = gr.Accordion( |
|
visible = False |
|
) |
|
|
|
output_base_model_image_field_update = gr.Image( |
|
value = None |
|
) |
|
|
|
output_refiner_image_field_accordion_update = gr.Accordion( |
|
visible = False |
|
) |
|
|
|
output_refiner_image_field_update = gr.Image( |
|
value = None |
|
) |
|
|
|
|
|
|
|
output_text_field_update = gr.Textbox( |
|
visible = False |
|
) |
|
|
|
prompt_truncated_field_group_update = gr.Group( |
|
visible = False |
|
) |
|
|
|
prompt_truncated_field_update = gr.HTML( |
|
value = "" |
|
) |
|
|
|
negative_prompt_truncated_field_group_update = gr.Group( |
|
visible = False |
|
) |
|
|
|
negative_prompt_truncated_field_update = gr.HTML( |
|
value = "" |
|
) |
|
|
|
error_text_field_accordion_update = gr.Accordion( |
|
visible = False |
|
) |
|
|
|
error_text_field_update = gr.HTML( |
|
value = "" |
|
) |
|
|
|
|
|
|
|
image_generation_id = int(random.randrange(0, 1000000000)) |
|
|
|
image_generation_id_state_update = gr.State( |
|
value = image_generation_id |
|
) |
|
|
|
|
|
|
|
before_create_image_object = { |
|
generate_image_button: generate_image_button_update, |
|
output_base_model_image_field_accordion: output_base_model_image_field_accordion_update, |
|
output_base_model_image_field: output_base_model_image_field_update, |
|
output_refiner_image_field_accordion: output_refiner_image_field_accordion_update, |
|
output_refiner_image_field: output_refiner_image_field_update, |
|
output_text_field: output_text_field_update, |
|
prompt_truncated_field_group: prompt_truncated_field_group_update, |
|
prompt_truncated_field: prompt_truncated_field_update, |
|
negative_prompt_truncated_field_group: negative_prompt_truncated_field_group_update, |
|
negative_prompt_truncated_field: negative_prompt_truncated_field_update, |
|
error_text_field_accordion: error_text_field_accordion_update, |
|
error_text_field: error_text_field_update, |
|
image_generation_id_state: image_generation_id_state_update |
|
} |
|
|
|
if create_preview_images == 1: |
|
|
|
output_image_field_update = gr.Image( |
|
height = 100 |
|
) |
|
|
|
output_image_gallery_field_update = gr.Gallery( |
|
label = "", |
|
height = 100 |
|
) |
|
|
|
output_image_preview_field_accordion_update = gr.Accordion( |
|
visible = True |
|
) |
|
|
|
output_image_preview_field_update = gr.Image( |
|
value = None |
|
) |
|
|
|
before_create_image_object.update({ |
|
output_image_field: output_image_field_update, |
|
output_image_preview_field_accordion: output_image_preview_field_accordion_update, |
|
output_image_preview_field: output_image_preview_field_update |
|
}) |
|
|
|
if enable_image_generation_cancellation == 1: |
|
|
|
cancel_image_button_row_update = gr.Row( |
|
visible = True |
|
) |
|
|
|
cancel_image_button_update = gr.Button( |
|
interactive = True |
|
) |
|
|
|
before_create_image_object.update({ |
|
cancel_image_button_row: cancel_image_button_row_update, |
|
cancel_image_button: cancel_image_button_update |
|
}) |
|
|
|
return before_create_image_object |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def after_create_image_function (): |
|
|
|
generate_image_button_update = gr.Button( |
|
value = generate_image_button_normal_text, |
|
variant = "primary", |
|
interactive = True |
|
) |
|
|
|
output_text_field_update = gr.Textbox( |
|
visible = True |
|
) |
|
|
|
after_create_image_object = { |
|
generate_image_button: generate_image_button_update, |
|
output_text_field: output_text_field_update |
|
} |
|
|
|
if enable_image_preview == 1: |
|
|
|
output_image_field_update = gr.Image( |
|
height = gradio_image_component_height |
|
) |
|
|
|
output_image_gallery_field_update = gr.Gallery( |
|
height = gradio_image_gallery_component_height |
|
) |
|
|
|
output_image_preview_field_accordion_update = gr.Accordion( |
|
visible = False |
|
) |
|
|
|
after_create_image_object.update({ |
|
output_image_field: output_image_field_update, |
|
output_image_gallery_field: output_image_gallery_field_update, |
|
output_image_preview_field_accordion: output_image_preview_field_accordion_update |
|
}) |
|
|
|
if enable_image_generation_cancellation == 1: |
|
|
|
generate_image_button_row_update = gr.Row( |
|
visible = True |
|
) |
|
|
|
cancel_image_button_row_update = gr.Row( |
|
visible = False |
|
) |
|
|
|
cancel_image_button_update = gr.Button( |
|
value = cancel_image_button_text, |
|
interactive = False |
|
) |
|
|
|
cancel_image_message_field_row_update = gr.Button( |
|
visible = False |
|
) |
|
|
|
cancel_image_message_field_update = gr.Button( |
|
value = "" |
|
) |
|
|
|
after_create_image_object.update({ |
|
generate_image_button_row: generate_image_button_row_update, |
|
cancel_image_button_row: cancel_image_button_row_update, |
|
cancel_image_button: cancel_image_button_update, |
|
cancel_image_message_field_row: cancel_image_message_field_row_update, |
|
cancel_image_message_field: cancel_image_message_field_update |
|
}) |
|
|
|
return after_create_image_object |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def remove_from_cancel_object ( |
|
user_id_state_value, |
|
image_generation_id_state_value |
|
): |
|
|
|
if ( |
|
(user_id_state_value in cancel_image_generation_ids_object) and |
|
(image_generation_id_state_value in cancel_image_generation_ids_object[user_id_state_value]) |
|
): |
|
|
|
cancel_image_generation_ids_object[user_id_state_value].remove(image_generation_id_state_value) |
|
|
|
if len(cancel_image_generation_ids_object[user_id_state_value]) == 0: |
|
|
|
cancel_image_generation_ids_object.pop(user_id_state_value, None) |
|
|
|
if image_generation_id_state_value in cancel_image_generation_times_object: |
|
|
|
cancel_image_generation_times_object.pop(image_generation_id_state_value, None) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def image_processing_is_canceled ( |
|
user_id_state_value, |
|
image_generation_id_state_value |
|
): |
|
|
|
global cancel_image_generation_ids_object |
|
|
|
image_processing_was_canceled = False |
|
|
|
if ( |
|
enable_image_generation_cancellation and |
|
(user_id_state_value in cancel_image_generation_ids_object) and |
|
(image_generation_id_state_value in cancel_image_generation_ids_object[user_id_state_value]) |
|
): |
|
|
|
image_processing_was_canceled = True |
|
|
|
return image_processing_was_canceled |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_image_from_latents ( |
|
model_to_use, |
|
pipe, |
|
latents, |
|
generator, |
|
is_final_image |
|
): |
|
|
|
if ( |
|
(model_to_use == "sdxl") or |
|
(model_to_use == "sdxl_turbo") or |
|
(model_to_use == "refiner") |
|
): |
|
|
|
|
|
|
|
|
|
|
|
needs_upcasting = pipe.vae.dtype == torch.float16 and pipe.vae.config.force_upcast |
|
|
|
if needs_upcasting: |
|
pipe.upcast_vae() |
|
latents = latents.to(next(iter(pipe.vae.post_quant_conv.parameters())).dtype) |
|
|
|
image = pipe.vae.decode(latents / pipe.vae.config.scaling_factor, return_dict=False)[0] |
|
|
|
|
|
if needs_upcasting: |
|
pipe.vae.to(dtype=torch.float16) |
|
|
|
if int(is_final_image) == 1: |
|
|
|
|
|
if pipe.watermark is not None: |
|
image = pipe.watermark.apply_watermark(image) |
|
|
|
image = pipe.image_processor.postprocess(image, output_type="pil") |
|
|
|
return image |
|
|
|
elif ( |
|
(model_to_use == "sd_1_5_runwayml") or |
|
(model_to_use == "photoreal") |
|
): |
|
|
|
|
|
|
|
image = pipe.vae.decode(latents / pipe.vae.config.scaling_factor, return_dict=False, generator=generator)[0] |
|
|
|
do_denormalize = [True] * image.shape[0] |
|
|
|
image = pipe.image_processor.postprocess(image, output_type="pil", do_denormalize=do_denormalize) |
|
|
|
return image |
|
|
|
return "" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_preview_image ( |
|
model_to_use, |
|
user_id_state, |
|
pipe, |
|
latents, |
|
generator, |
|
temporary_extra |
|
): |
|
|
|
is_final_image = 0 |
|
|
|
pil_image = create_image_from_latents( |
|
model_to_use, |
|
pipe, |
|
latents, |
|
generator, |
|
is_final_image |
|
) |
|
|
|
if pil_image: |
|
|
|
global current_preview_image |
|
global current_preview_image_user_id |
|
|
|
current_preview_image = pil_image |
|
current_preview_image_user_id = user_id_state |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_pipeline_embeds( |
|
pipe, |
|
prompt, |
|
negative_prompt, |
|
device, |
|
token_length_of_prompt_text, |
|
token_length_of_negative_prompt_text |
|
): |
|
|
|
max_length = pipe.tokenizer.model_max_length |
|
|
|
|
|
|
|
if token_length_of_prompt_text >= token_length_of_negative_prompt_text: |
|
|
|
input_ids = pipe.tokenizer( |
|
prompt, |
|
return_tensors = "pt", |
|
truncation = False |
|
).input_ids.to(device) |
|
|
|
shape_max_length = input_ids.shape[-1] |
|
|
|
negative_ids = pipe.tokenizer( |
|
negative_prompt, |
|
truncation = False, |
|
padding = "max_length", |
|
max_length = shape_max_length, |
|
return_tensors = "pt" |
|
).input_ids.to(device) |
|
|
|
else: |
|
|
|
negative_ids = pipe.tokenizer( |
|
negative_prompt, |
|
return_tensors = "pt", |
|
truncation = False |
|
).input_ids.to(device) |
|
|
|
shape_max_length = negative_ids.shape[-1] |
|
|
|
input_ids = pipe.tokenizer( |
|
prompt, |
|
return_tensors="pt", |
|
truncation = False, |
|
padding = "max_length", |
|
max_length = shape_max_length |
|
).input_ids.to(device) |
|
|
|
concat_embeds = [] |
|
|
|
neg_embeds = [] |
|
|
|
for i in range(0, shape_max_length, max_length): |
|
|
|
concat_embeds.append(pipe.text_encoder(input_ids[:, i: i + max_length])[0]) |
|
|
|
neg_embeds.append(pipe.text_encoder(negative_ids[:, i: i + max_length])[0]) |
|
|
|
return torch.cat(concat_embeds, dim=1), torch.cat(neg_embeds, dim=1) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_image_function ( |
|
base_model_field_index, |
|
prompt_text, |
|
negative_prompt_text, |
|
allow_longer_prompts_for_sd_1_5_based_models_field_value, |
|
scheduler_index, |
|
image_width, |
|
image_height, |
|
guidance_scale, |
|
base_model_steps, |
|
base_model_steps_field_for_sdxl_turbo, |
|
actual_seed, |
|
add_seed_into_pipe, |
|
use_torch_manual_seed_but_do_not_add_to_pipe_field_value, |
|
refining_selection_field_value, |
|
refining_denoise_start_field_value, |
|
refining_use_denoising_start_in_base_model_when_using_refiner_field_value, |
|
refining_base_model_output_to_refiner_is_in_latent_space_field_value, |
|
refining_steps_option_for_older_configuration_field_value, |
|
refining_steps_for_older_configuration_field_value, |
|
upscaling_selection_field_value, |
|
upscaling_steps, |
|
image_gallery_array_state_value, |
|
prompt_information_array_state_value, |
|
last_model_configuration_name_selected_state_value, |
|
last_refiner_name_selected_state_value, |
|
last_upscaler_name_selected_state_value, |
|
stored_pipe_state, |
|
stored_refiner_state, |
|
stored_upscaler_state, |
|
create_preview_images, |
|
do_not_create_refining_preview_images, |
|
do_not_create_upscaling_preview_images, |
|
save_base_image_when_using_refiner_or_upscaler_field_value, |
|
save_refined_image_when_using_upscaler_field_value, |
|
user_id_state, |
|
image_generation_id_state, |
|
*model_configuration_dropdown_fields_array, |
|
progress = gr.Progress() |
|
): |
|
|
|
image_generation_id_state_value = image_generation_id_state.value |
|
user_id_state_value = user_id_state.value |
|
|
|
global current_image_generation_id_in_progress |
|
|
|
current_image_generation_id_in_progress = image_generation_id_state_value |
|
|
|
global cancel_image_generation_ids_object |
|
global cancel_image_generation_times_object |
|
|
|
global current_preview_image |
|
global current_preview_image_user_id |
|
|
|
current_preview_image = None |
|
current_preview_image_user_id = 0 |
|
|
|
error_count = 0 |
|
|
|
error_message_array = [] |
|
|
|
|
|
|
|
if image_processing_is_canceled(user_id_state_value, image_generation_id_state_value): |
|
|
|
|
|
|
|
remove_from_cancel_object(user_id_state_value, image_generation_id_state_value) |
|
|
|
output_text_field_update = gr.Textbox() |
|
return { |
|
output_text_field: output_text_field_update |
|
} |
|
|
|
|
|
|
|
guidance_scale = float(guidance_scale) |
|
base_model_steps = int(base_model_steps) |
|
base_model_steps_field_for_sdxl_turbo = int(base_model_steps_field_for_sdxl_turbo) |
|
upscaling_steps = int(upscaling_steps) |
|
|
|
|
|
|
|
base_model_name_value = base_model_array[base_model_field_index] |
|
|
|
|
|
|
|
position_in_array = 0 |
|
|
|
model_configuration_field_object = {} |
|
|
|
for model_configuration_field_index in model_configuration_dropdown_fields_array: |
|
|
|
this_base_model = base_model_array[position_in_array] |
|
|
|
model_configuration_field_object[this_base_model] = model_configuration_field_index |
|
|
|
position_in_array += 1 |
|
|
|
model_configuration_field_index = model_configuration_field_object[base_model_name_value] |
|
model_configuration_name_value = base_model_object_of_model_configuration_arrays[base_model_name_value][model_configuration_field_index] |
|
|
|
|
|
|
|
if base_model_name_value == "sdxl_turbo": |
|
|
|
negative_prompt_text = "" |
|
base_model_steps = base_model_steps_field_for_sdxl_turbo |
|
guidance_scale = 0 |
|
|
|
|
|
|
|
current_estimated_total_base_model_steps = base_model_steps |
|
current_estimated_total_refiner_steps = 0 |
|
current_estimated_total_upscaler_steps = upscaling_steps |
|
|
|
global current_actual_total_base_model_steps |
|
global current_actual_total_refiner_steps |
|
global current_actual_total_upscaler_steps |
|
|
|
current_actual_total_base_model_steps = 0 |
|
current_actual_total_refiner_steps = 0 |
|
current_actual_total_upscaler_steps = 0 |
|
|
|
|
|
|
|
scheduler_value = schedulers_array[scheduler_index] |
|
|
|
|
|
|
|
if not base_model_valid(base_model_name_value): |
|
error_function("Base model is not valid.") |
|
|
|
if not model_configuration_valid(base_model_name_value, model_configuration_name_value): |
|
error_function("Model configuration is not valid.") |
|
|
|
if not prompt_valid(prompt_text): |
|
error_function("Prompt is not valid.") |
|
|
|
if not negative_prompt_valid(negative_prompt_text): |
|
error_function("Negative prompt is not valid.") |
|
|
|
if not scheduler_valid(scheduler_value): |
|
error_function("Scheduler/sampler is not valid.") |
|
|
|
if not width_valid(image_width): |
|
error_function("Image width is not valid.") |
|
|
|
if not height_valid(image_height): |
|
error_function("Image height is not valid.") |
|
|
|
if base_model_name_value != "sdxl_turbo": |
|
|
|
if not guidance_scale_valid(guidance_scale): |
|
error_function("Guidance scale is not valid.") |
|
|
|
if not steps_valid(base_model_steps, base_model_name_value): |
|
error_function("Steps option is not valid.") |
|
|
|
if actual_seed == "": |
|
|
|
actual_seed = generate_random_seed() |
|
|
|
elif not seed_valid(actual_seed): |
|
|
|
error_function("Seed is not valid.") |
|
|
|
|
|
|
|
image_width = int(image_width) |
|
image_height = int(image_height) |
|
actual_seed = int(actual_seed) |
|
refining_steps_for_older_configuration_field_value = int(refining_steps_for_older_configuration_field_value) |
|
|
|
|
|
|
|
allow_longer_prompts_for_sd_1_5_based_models_field_value = numerical_bool(allow_longer_prompts_for_sd_1_5_based_models_field_value) |
|
add_seed_into_pipe = numerical_bool(add_seed_into_pipe) |
|
use_torch_manual_seed_but_do_not_add_to_pipe_field_value = numerical_bool(use_torch_manual_seed_but_do_not_add_to_pipe_field_value) |
|
|
|
refining_selection_field_value = numerical_bool(refining_selection_field_value) |
|
|
|
refining_use_denoising_start_in_base_model_when_using_refiner_field_value = numerical_bool(refining_use_denoising_start_in_base_model_when_using_refiner_field_value) |
|
refining_base_model_output_to_refiner_is_in_latent_space_field_value = numerical_bool(refining_base_model_output_to_refiner_is_in_latent_space_field_value) |
|
|
|
refining_steps_option_for_older_configuration_field_value = numerical_bool(refining_steps_option_for_older_configuration_field_value) |
|
|
|
use_upscaler = numerical_bool(upscaling_selection_field_value) |
|
|
|
create_preview_images = numerical_bool(create_preview_images) |
|
do_not_create_refining_preview_images = numerical_bool(do_not_create_refining_preview_images) |
|
do_not_create_upscaling_preview_images = numerical_bool(do_not_create_upscaling_preview_images) |
|
save_base_image_when_using_refiner_or_upscaler_field_value = numerical_bool(save_base_image_when_using_refiner_or_upscaler_field_value) |
|
save_refined_image_when_using_upscaler_field_value = numerical_bool(save_refined_image_when_using_upscaler_field_value) |
|
|
|
|
|
|
|
if create_preview_images == 0: |
|
|
|
do_not_create_refining_preview_images = 1 |
|
do_not_create_upscaling_preview_images = 1 |
|
|
|
|
|
|
|
use_refiner = 0 |
|
|
|
num_inference_steps_in_refiner = base_model_steps |
|
|
|
base_model_output_forced_to_be_pil_image = 0 |
|
|
|
if refining_selection_field_value: |
|
|
|
use_refiner = 1 |
|
|
|
if not refiner_denoise_start_valid(refining_denoise_start_field_value): |
|
error_function("Refiner denoise start is not valid.") |
|
|
|
refining_denoise_start_field_value = rounded_number(refining_denoise_start_field_value, 2) |
|
|
|
if refining_steps_option_for_older_configuration_field_value == 1: |
|
|
|
|
|
|
|
if not refining_steps_valid(refining_steps_for_older_configuration_field_value): |
|
error_function("Number of steps in refiner that \"denoise start %\" applies to is not valid.") |
|
|
|
num_inference_steps_in_refiner = refining_steps_for_older_configuration_field_value |
|
|
|
if base_model_name_value in base_models_not_supporting_base_model_output_in_latent_space_to_refiner_object: |
|
|
|
if refining_base_model_output_to_refiner_is_in_latent_space_field_value == 1: |
|
base_model_output_forced_to_be_pil_image = 1 |
|
|
|
error_count += 1 |
|
|
|
error_message_array.extend([ |
|
"The base model output must be a PIL image when using the refiner with the model you have chosen. If it wasn't, the image would not come out properly. Despite your selection, that change was made automatically." |
|
]) |
|
|
|
refining_base_model_output_to_refiner_is_in_latent_space_field_value = 0 |
|
|
|
|
|
|
|
if ( |
|
( |
|
(allow_longer_prompts_for_sd_1_5_based_models_field_value == 1) and |
|
(base_model_name_value not in base_models_supporting_special_long_prompt_method_object) |
|
) or |
|
(use_refiner == 1) or |
|
(use_upscaler == 1) |
|
): |
|
|
|
allow_longer_prompts_for_sd_1_5_based_models_field_value = 0 |
|
|
|
error_count += 1 |
|
|
|
error_message_array.extend([ |
|
"Longer prompts using the method we currently use cannot be done for certain models. It is also not available when using the refiner or upscaler. We disabled longer prompts for your image generation." |
|
]) |
|
|
|
|
|
|
|
if use_upscaler == 1: |
|
|
|
from diffusers import StableDiffusionLatentUpscalePipeline |
|
|
|
if not upscaling_steps_valid(upscaling_steps): |
|
error_function("Upscaling steps option is not valid.") |
|
|
|
|
|
|
|
if ( |
|
(last_model_configuration_name_selected_state_value == "") or |
|
(model_configuration_name_value != last_model_configuration_name_selected_state_value) |
|
): |
|
|
|
if (last_model_configuration_name_selected_state_value != ""): |
|
|
|
if "pipe" in globals(): |
|
del pipe |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Base model is loading."); |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Base model is loading" |
|
) |
|
|
|
( |
|
pipe |
|
) = construct_pipe( |
|
base_model_name_value, |
|
model_configuration_name_value |
|
) |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
last_model_configuration_name_selected_state_value = model_configuration_name_value |
|
|
|
else: |
|
|
|
pipe = stored_pipe_state |
|
|
|
|
|
|
|
( |
|
scheduler_used |
|
) = configure_scheduler( |
|
pipe, |
|
scheduler_value |
|
) |
|
|
|
|
|
|
|
if image_processing_is_canceled(user_id_state_value, image_generation_id_state_value): |
|
use_refiner = 0 |
|
use_upscaler = 0 |
|
|
|
|
|
|
|
if log_generation_times == 1: |
|
|
|
generation_start_time = time.time() |
|
|
|
|
|
|
|
prompt_text_to_use_inside_pipeline = prompt_text |
|
negative_prompt_text_to_use_inside_pipeline = negative_prompt_text |
|
|
|
|
|
|
|
prompt_truncated_field_group_update = gr.Group( |
|
visible = False |
|
) |
|
|
|
prompt_truncated_field_update = gr.HTML( |
|
value = "" |
|
) |
|
|
|
negative_prompt_truncated_field_group_update = gr.Group( |
|
visible = False |
|
) |
|
|
|
negative_prompt_truncated_field_update = gr.HTML( |
|
value = "" |
|
) |
|
|
|
prompt_text_not_used_substring = "" |
|
negative_prompt_text_not_used_substring = "" |
|
|
|
|
|
|
|
tokenizer = pipe.tokenizer |
|
|
|
max_token_length_of_model = pipe.tokenizer.model_max_length - number_of_reserved_tokens |
|
|
|
token_length_of_prompt_text = len(tokenizer.tokenize(prompt_text)) |
|
token_length_of_negative_prompt_text = len(tokenizer.tokenize(negative_prompt_text)) |
|
|
|
both_prompts_under_length_limit = 0 |
|
|
|
if ( |
|
(token_length_of_prompt_text <= max_token_length_of_model) and |
|
(token_length_of_negative_prompt_text <= max_token_length_of_model) |
|
): |
|
|
|
|
|
|
|
both_prompts_under_length_limit = 1 |
|
|
|
allow_longer_prompts_for_sd_1_5_based_models_field_value = 0 |
|
|
|
|
|
|
|
if ( |
|
(both_prompts_under_length_limit == 0) and |
|
(allow_longer_prompts_for_sd_1_5_based_models_field_value == 1) |
|
): |
|
|
|
|
|
|
|
prompt_embeds, negative_prompt_embeds = get_pipeline_embeds( |
|
pipe, |
|
prompt_text, |
|
negative_prompt_text, |
|
device, |
|
token_length_of_prompt_text, |
|
token_length_of_negative_prompt_text |
|
) |
|
|
|
prompt_text_to_use_inside_pipeline = None |
|
negative_prompt_text_to_use_inside_pipeline = None |
|
|
|
else: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
prompt_embeds = None |
|
negative_prompt_embeds = None |
|
|
|
truncated_prompts = 0 |
|
|
|
partial_prompt_or_negative_prompt_length_too_long_message = "" |
|
|
|
if token_length_of_prompt_text > max_token_length_of_model: |
|
|
|
( |
|
prompt_text, |
|
prompt_text_not_used_substring |
|
) = truncate_prompt( |
|
pipe, |
|
prompt_text |
|
) |
|
|
|
prompt_truncated_field_group_update = gr.Group( |
|
visible = True |
|
) |
|
|
|
prompt_truncated_field_update = gr.Textbox( |
|
value = prompt_text_not_used_substring |
|
) |
|
|
|
truncated_prompts += 1 |
|
|
|
partial_prompt_or_negative_prompt_length_too_long_message += "prompt" |
|
|
|
if token_length_of_negative_prompt_text > max_token_length_of_model: |
|
|
|
( |
|
negative_prompt_text, |
|
negative_prompt_text_not_used_substring |
|
) = truncate_prompt( |
|
pipe, |
|
negative_prompt_text |
|
) |
|
|
|
negative_prompt_truncated_field_group_update = gr.Group( |
|
visible = True |
|
) |
|
|
|
negative_prompt_truncated_field_update = gr.Textbox( |
|
value = negative_prompt_text_not_used_substring |
|
) |
|
|
|
truncated_prompts += 1 |
|
|
|
if truncated_prompts == 2: |
|
partial_prompt_or_negative_prompt_length_too_long_message += " and " |
|
|
|
partial_prompt_or_negative_prompt_length_too_long_message += "negative prompt" |
|
|
|
if len(partial_prompt_or_negative_prompt_length_too_long_message) > 0: |
|
|
|
partial_prompt_or_negative_prompt_length_too_long_message += " was" |
|
|
|
if truncated_prompts == 2: |
|
partial_prompt_or_negative_prompt_length_too_long_message += " were" |
|
|
|
prompt_or_negative_prompt_length_too_long_message = "Note: Part of your " + partial_prompt_or_negative_prompt_length_too_long_message + " truncated automatically because it was too long." |
|
|
|
show_message(prompt_or_negative_prompt_length_too_long_message) |
|
|
|
|
|
|
|
if add_seed_into_pipe == 1: |
|
|
|
generator = torch.manual_seed(actual_seed) |
|
|
|
else: |
|
|
|
if use_torch_manual_seed_but_do_not_add_to_pipe_field_value == 1: |
|
|
|
torch.manual_seed(actual_seed) |
|
|
|
generator = None |
|
|
|
|
|
|
|
denoising_end_applicable = 0 |
|
|
|
if base_model_name_value not in base_models_not_supporting_denoising_end_for_base_model_object: |
|
|
|
denoising_end_applicable = 1 |
|
|
|
|
|
|
|
denoising_end_in_base_model_to_use = None |
|
|
|
output_type_in_base_model_to_use = "pil" |
|
|
|
if use_refiner == 1: |
|
|
|
if ( |
|
(refining_use_denoising_start_in_base_model_when_using_refiner_field_value == 1) and |
|
(denoising_end_applicable == 1) |
|
): |
|
|
|
denoising_end_in_base_model_to_use = refining_denoise_start_field_value |
|
|
|
current_estimated_total_base_model_steps = rounded_number(base_model_steps * refining_denoise_start_field_value) |
|
|
|
if current_estimated_total_base_model_steps < 1: |
|
|
|
current_estimated_total_base_model_steps = 1 |
|
|
|
if refining_base_model_output_to_refiner_is_in_latent_space_field_value == 1: |
|
|
|
output_type_in_base_model_to_use = "latent" |
|
|
|
current_estimated_total_refiner_steps = rounded_number(num_inference_steps_in_refiner - (num_inference_steps_in_refiner * refining_denoise_start_field_value)) |
|
|
|
if current_estimated_total_refiner_steps < 1: |
|
|
|
current_estimated_total_refiner_steps = 1 |
|
|
|
|
|
|
|
upscaled_image_width = 0 |
|
upscaled_image_height = 0 |
|
|
|
if use_upscaler == 1: |
|
|
|
upscaled_image_width = int(image_width * 2) |
|
upscaled_image_height = int(image_height * 2) |
|
|
|
|
|
|
|
current_base_model_generation_start_time = 0 |
|
|
|
global saved_final_base_model_pil_image_if_using_refiner |
|
saved_final_base_model_pil_image_if_using_refiner = None |
|
|
|
global upscaled_image_canceled |
|
upscaled_image_canceled = 0 |
|
|
|
global canceled_pil_image |
|
canceled_pil_image = None |
|
|
|
def callback_function_for_base_model_progress( |
|
callback_pipe, |
|
callback_step_index, |
|
callback_timestep, |
|
callback_kwargs |
|
): |
|
|
|
callback_step_number = (int(callback_step_index) + 1) |
|
|
|
global current_actual_total_base_model_steps |
|
|
|
current_actual_total_base_model_steps += 1 |
|
|
|
global current_base_model_generation_start_time |
|
|
|
if callback_step_number == 1: |
|
|
|
current_base_model_generation_start_time = time.time() |
|
|
|
seconds_per_step = 0 |
|
|
|
if callback_step_number >= 2: |
|
|
|
seconds_per_step = ((time.time() - current_base_model_generation_start_time) / callback_step_number) |
|
|
|
nice_time_per_step = nice_elapsed_time(seconds_per_step) |
|
|
|
base_model_progress_text = nice_time_per_step + " per step" |
|
|
|
else: |
|
|
|
base_model_progress_text = "Base model processing started" |
|
|
|
cancel_process = image_processing_is_canceled(user_id_state_value, image_generation_id_state_value) |
|
|
|
if cancel_process: |
|
|
|
pipe._interrupt = True |
|
|
|
if ( |
|
create_preview_images and |
|
( |
|
( |
|
( |
|
((int(callback_step_index) % image_preview_step_interval) == 0) or |
|
(seconds_per_step >= image_preview_seconds_interval) |
|
) and |
|
(callback_step_number < current_estimated_total_base_model_steps) |
|
) or |
|
( |
|
( |
|
(use_refiner == 1) or |
|
(use_upscaler == 1) |
|
) and |
|
(callback_step_number == current_estimated_total_base_model_steps) |
|
) |
|
) |
|
): |
|
|
|
latents = callback_kwargs["latents"] |
|
|
|
temporary_extra = str(user_id_state_value) + "_base_model_" + str(callback_step_number) |
|
|
|
model_to_use = base_model_name_value |
|
|
|
is_final_image = 0 |
|
|
|
create_preview_image( |
|
model_to_use, |
|
user_id_state_value, |
|
pipe, |
|
latents, |
|
generator, |
|
temporary_extra |
|
) |
|
|
|
if ( |
|
cancel_process or |
|
( |
|
(base_model_steps == callback_step_number) and |
|
( |
|
(use_refiner == 1) or |
|
(use_upscaler == 1) |
|
) |
|
) |
|
): |
|
|
|
global saved_final_base_model_pil_image_if_using_refiner |
|
|
|
latents = callback_kwargs["latents"] |
|
|
|
is_final_image = 1 |
|
|
|
saved_final_base_model_pil_image_if_using_refiner = create_image_from_latents( |
|
base_model_name_value, |
|
callback_pipe, |
|
latents, |
|
generator, |
|
is_final_image |
|
) |
|
|
|
if ( |
|
(show_image_creation_progress_log == 1) and |
|
(callback_step_number <= current_estimated_total_base_model_steps) |
|
): |
|
|
|
progress( |
|
progress = ( |
|
callback_step_number, |
|
current_estimated_total_base_model_steps |
|
), |
|
desc = base_model_progress_text, |
|
unit = "base model steps" |
|
) |
|
|
|
return {} |
|
|
|
callback_to_do_for_base_model_progress = callback_function_for_base_model_progress |
|
|
|
|
|
|
|
if ( |
|
(show_image_creation_progress_log == 1) or |
|
enable_image_generation_cancellation or |
|
create_preview_images |
|
): |
|
|
|
current_refiner_generation_start_time = 0 |
|
|
|
def callback_function_for_refiner_progress( |
|
callback_pipe, |
|
callback_step_index, |
|
callback_timestep, |
|
callback_kwargs |
|
): |
|
|
|
callback_step_number = (int(callback_step_index) + 1) |
|
|
|
global current_actual_total_refiner_steps |
|
|
|
current_actual_total_refiner_steps += 1 |
|
|
|
global current_refiner_generation_start_time |
|
|
|
if callback_step_number == 1: |
|
|
|
current_refiner_generation_start_time = time.time() |
|
|
|
seconds_per_step = 0 |
|
|
|
if callback_step_number >= 2: |
|
|
|
seconds_per_step = ((time.time() - current_refiner_generation_start_time) / callback_step_number) |
|
|
|
nice_time_per_step = nice_elapsed_time(seconds_per_step) |
|
|
|
refiner_progress_text = nice_time_per_step + " per step" |
|
|
|
else: |
|
|
|
refiner_progress_text = "Refiner processing started" |
|
|
|
if ( |
|
(do_not_create_refining_preview_images == 0) and |
|
( |
|
( |
|
( |
|
((int(callback_step_index) % image_preview_step_interval) == 0) or |
|
(seconds_per_step >= image_preview_seconds_interval) |
|
) and |
|
(callback_step_number < current_estimated_total_refiner_steps) |
|
) or |
|
( |
|
(use_upscaler == 1) and |
|
(callback_step_number == current_estimated_total_refiner_steps) |
|
) |
|
) |
|
): |
|
|
|
latents = callback_kwargs["latents"] |
|
|
|
temporary_extra = str(user_id_state_value) + "_refiner_" + str(callback_step_number) |
|
|
|
model_to_use = "refiner" |
|
|
|
create_preview_image( |
|
model_to_use, |
|
user_id_state_value, |
|
callback_pipe, |
|
latents, |
|
generator, |
|
temporary_extra |
|
) |
|
|
|
if ( |
|
(show_image_creation_progress_log == 1) and |
|
(callback_step_number <= current_estimated_total_refiner_steps) |
|
): |
|
|
|
progress( |
|
progress = ( |
|
callback_step_number, |
|
current_estimated_total_refiner_steps |
|
), |
|
desc = refiner_progress_text, |
|
unit = "est. refiner steps" |
|
) |
|
|
|
if image_processing_is_canceled(user_id_state_value, image_generation_id_state_value): |
|
|
|
refiner._interrupt = True |
|
|
|
return {} |
|
|
|
callback_to_do_for_refiner_progress = callback_function_for_refiner_progress |
|
|
|
|
|
|
|
current_upscaler_generation_start_time = 0 |
|
|
|
def callback_function_for_upscaler_progress( |
|
callback_step_index, |
|
callback_timestep, |
|
callback_latents |
|
): |
|
|
|
callback_step_number = (int(callback_step_index) + 1) |
|
|
|
global current_actual_total_upscaler_steps |
|
|
|
current_actual_total_upscaler_steps += 1 |
|
|
|
global current_upscaler_generation_start_time |
|
|
|
if callback_step_number == 1: |
|
|
|
current_upscaler_generation_start_time = time.time() |
|
|
|
seconds_per_step = 0 |
|
|
|
if callback_step_number >= 2: |
|
|
|
seconds_per_step = ((time.time() - current_upscaler_generation_start_time) / callback_step_number) |
|
|
|
nice_time_per_step = nice_elapsed_time(seconds_per_step) |
|
|
|
upscaler_progress_text = nice_time_per_step + " per step" |
|
|
|
else: |
|
|
|
upscaler_progress_text = "Upscaler processing started" |
|
|
|
if ( |
|
(do_not_create_upscaling_preview_images == 0) and |
|
( |
|
( |
|
((int(callback_step_index) % image_preview_step_interval) == 0) or |
|
(seconds_per_step >= image_preview_seconds_interval) |
|
) and |
|
(callback_step_number < current_estimated_total_upscaler_steps) |
|
) |
|
): |
|
|
|
latents = callback_latents |
|
|
|
temporary_extra = str(user_id_state_value) + "_upscale_" + str(callback_step_index) |
|
|
|
model_to_use = base_model_name_value |
|
|
|
create_preview_image( |
|
model_to_use, |
|
user_id_state_value, |
|
pipe, |
|
latents, |
|
generator, |
|
temporary_extra |
|
) |
|
|
|
if ( |
|
(show_image_creation_progress_log == 1) and |
|
(callback_step_number <= current_estimated_total_upscaler_steps) |
|
): |
|
|
|
progress( |
|
progress = ( |
|
callback_step_number, |
|
current_estimated_total_upscaler_steps |
|
), |
|
desc = upscaler_progress_text, |
|
unit = "upscaler steps" |
|
) |
|
|
|
if image_processing_is_canceled(user_id_state_value, image_generation_id_state_value): |
|
|
|
|
|
|
|
|
|
|
|
|
|
global upscaled_image_canceled |
|
global canceled_pil_image |
|
|
|
upscaled_image_canceled = 1 |
|
|
|
model_to_use = base_model_name_value |
|
|
|
latents = callback_latents |
|
|
|
is_final_image = 1 |
|
|
|
canceled_pil_image = create_image_from_latents( |
|
model_to_use, |
|
pipe, |
|
latents, |
|
generator, |
|
is_final_image |
|
) |
|
|
|
raise Exception("end_at_this_step") |
|
|
|
return {} |
|
|
|
callback_to_do_for_upscaler_progress = callback_function_for_upscaler_progress |
|
|
|
|
|
|
|
else: |
|
|
|
callback_to_do_for_base_model_progress = None |
|
callback_to_do_for_refiner_progress = None |
|
callback_to_do_for_upscaler_progress = None |
|
|
|
|
|
|
|
if image_processing_is_canceled(user_id_state_value, image_generation_id_state_value): |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Image processing was canceled before any part of the image could be created."); |
|
|
|
remove_from_cancel_object(user_id_state_value, image_generation_id_state_value) |
|
|
|
output_text_field_update = gr.Textbox() |
|
return { |
|
output_text_field: output_text_field_update |
|
} |
|
|
|
|
|
|
|
task_info_for_progress = "Initial image creation has begun" |
|
if use_refiner == 1: |
|
if use_upscaler == 1: |
|
task_info_for_command_prompt = "Will create initial image, then refine and then upscale.\nInitial image steps..." |
|
else: |
|
task_info_for_command_prompt = "Will create initial image and then refine.\nInitial image steps..." |
|
else: |
|
if use_upscaler == 1: |
|
task_info_for_command_prompt = "Will create initial image and then upscale.\nInitial image steps..." |
|
else: |
|
task_info_for_command_prompt = "Will create image (no refining or upscaling).\nImage steps..." |
|
task_info_for_progress = "Image creation has begun" |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print (task_info_for_command_prompt); |
|
|
|
if show_image_creation_progress_log == 1: |
|
|
|
progress( |
|
progress = 0, |
|
desc = task_info_for_progress |
|
) |
|
|
|
base_image = pipe( |
|
prompt = prompt_text_to_use_inside_pipeline, |
|
negative_prompt = negative_prompt_text_to_use_inside_pipeline, |
|
prompt_embeds = prompt_embeds, |
|
negative_prompt_embeds = negative_prompt_embeds, |
|
width = image_width, |
|
height = image_height, |
|
num_inference_steps = base_model_steps, |
|
guidance_scale = guidance_scale, |
|
num_images_per_prompt = 1, |
|
generator = generator, |
|
denoising_end = denoising_end_in_base_model_to_use, |
|
output_type = output_type_in_base_model_to_use, |
|
callback_on_step_end = callback_to_do_for_base_model_progress |
|
) |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
|
|
|
|
have_upscaled_image = 0 |
|
have_refined_image = 0 |
|
|
|
base_image_for_next_step = base_image.images |
|
|
|
refined_image_for_next_step = None |
|
upscaled_image_for_next_step = None |
|
|
|
|
|
|
|
if image_processing_is_canceled(user_id_state_value, image_generation_id_state_value): |
|
|
|
use_refiner = 0 |
|
use_upscaler = 0 |
|
|
|
|
|
|
|
refiner_error = 0 |
|
|
|
if ( |
|
(use_refiner == 1) and |
|
(last_refiner_name_selected_state_value == "") |
|
): |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Refiner is loading."); |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Refiner is loading" |
|
) |
|
|
|
try: |
|
|
|
refiner = construct_refiner() |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
last_refiner_name_selected_state_value = "refiner" |
|
|
|
except BaseException as error_message: |
|
|
|
use_refiner = 0 |
|
|
|
refiner_error = 1 |
|
|
|
error_count += 1 |
|
|
|
error_message_array.extend([ |
|
"An error occurred while trying to load the refiner:\n" + str(error_message) |
|
]) |
|
|
|
if image_processing_is_canceled(user_id_state_value, image_generation_id_state_value): |
|
|
|
use_refiner = 0 |
|
use_upscaler = 0 |
|
|
|
else: |
|
|
|
refiner = stored_refiner_state |
|
|
|
|
|
|
|
if ( |
|
(use_refiner == 1) or |
|
(use_upscaler == 1) |
|
): |
|
|
|
image_for_next_step = base_image_for_next_step |
|
|
|
|
|
|
|
if use_refiner == 1: |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Refiner steps..."); |
|
|
|
if show_image_creation_progress_log == 1: |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Refining is beginning" |
|
) |
|
|
|
try: |
|
|
|
refined_image = refiner( |
|
prompt = prompt_text_to_use_inside_pipeline, |
|
negative_prompt = negative_prompt_text_to_use_inside_pipeline, |
|
prompt_embeds = prompt_embeds, |
|
negative_prompt_embeds = negative_prompt_embeds, |
|
image = base_image_for_next_step, |
|
num_inference_steps = num_inference_steps_in_refiner, |
|
denoising_start = refining_denoise_start_field_value, |
|
output_type = "pil", |
|
generator = generator, |
|
callback_on_step_end = callback_to_do_for_refiner_progress |
|
) |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
except BaseException as error_message: |
|
|
|
|
|
|
|
|
|
|
|
use_refiner = 0 |
|
use_upscaler = 0 |
|
|
|
refiner_error = 1 |
|
|
|
error_count += 1 |
|
|
|
error_message_array.extend([ |
|
"An error occurred while refining:\n" + str(error_message) |
|
]) |
|
|
|
|
|
|
|
if image_processing_is_canceled(user_id_state_value, image_generation_id_state_value): |
|
|
|
use_upscaler = 0 |
|
|
|
if use_refiner == 1: |
|
|
|
|
|
|
|
have_refined_image = 1 |
|
|
|
refined_image_for_next_step = refined_image.images |
|
|
|
image_for_next_step = refined_image_for_next_step |
|
|
|
|
|
|
|
upscaler_error = 0 |
|
|
|
|
|
|
|
if ( |
|
(use_upscaler == 1) and |
|
(last_upscaler_name_selected_state_value == "") |
|
): |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Upscaler is loading."); |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Upscaler is loading" |
|
) |
|
|
|
try: |
|
|
|
upscaler = construct_upscaler() |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
last_upscaler_name_selected_state_value = "upscaler" |
|
|
|
except BaseException as error_message: |
|
|
|
use_upscaler = 0 |
|
|
|
upscaler_error = 1 |
|
|
|
error_count += 1 |
|
|
|
error_message_array.extend([ |
|
"An error occurred while trying to load the upscaler:\n" + str(error_message) |
|
]) |
|
|
|
if image_processing_is_canceled(user_id_state_value, image_generation_id_state_value): |
|
|
|
use_upscaler = 0 |
|
|
|
else: |
|
|
|
upscaler = stored_upscaler_state |
|
|
|
|
|
|
|
if use_upscaler == 1: |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Upscaler steps..."); |
|
|
|
if show_image_creation_progress_log == 1: |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Upscaling is beginning" |
|
) |
|
|
|
|
|
|
|
if refining_base_model_output_to_refiner_is_in_latent_space_field_value == 1: |
|
|
|
if use_refiner == 1: |
|
|
|
image_for_next_step = refined_image_for_next_step[0] |
|
|
|
elif use_refiner == 0: |
|
|
|
image_for_next_step = base_image_for_next_step |
|
|
|
else: |
|
|
|
if use_refiner == 1: |
|
|
|
image_for_next_step = refined_image_for_next_step[0] |
|
|
|
elif use_refiner == 0: |
|
|
|
image_for_next_step = base_image_for_next_step[0] |
|
|
|
|
|
|
|
try: |
|
|
|
upscaled_image = upscaler( |
|
prompt = prompt_text, |
|
negative_prompt = negative_prompt_text, |
|
image = image_for_next_step, |
|
num_inference_steps = upscaling_steps, |
|
guidance_scale = 0, |
|
generator = generator, |
|
callback = callback_to_do_for_upscaler_progress, |
|
callback_steps = 1 |
|
) |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
except BaseException as error_message: |
|
|
|
use_upscaler = 0 |
|
|
|
if str(error_message) != "end_at_this_step": |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
upscaler_error = 1 |
|
|
|
error_count += 1 |
|
|
|
error_message_array.extend([ |
|
"An error occurred while upscaling:\n" + str(error_message) |
|
]) |
|
|
|
|
|
|
|
if use_upscaler == 1: |
|
|
|
|
|
|
|
have_upscaled_image = 1 |
|
|
|
upscaled_image_for_next_step = upscaled_image.images[0] |
|
|
|
|
|
|
|
if upscaled_image_canceled == 1: |
|
|
|
for key, value in enumerate(canceled_pil_image): |
|
image_to_return = value |
|
|
|
elif have_upscaled_image == 1: |
|
|
|
image_to_return = upscaled_image_for_next_step |
|
|
|
elif have_refined_image == 1: |
|
|
|
image_to_return = refined_image_for_next_step[0] |
|
|
|
else: |
|
|
|
if output_type_in_base_model_to_use == "latent": |
|
|
|
|
|
|
|
for key, value in enumerate(saved_final_base_model_pil_image_if_using_refiner): |
|
image_to_return = value |
|
|
|
else: |
|
|
|
image_to_return = base_image_for_next_step[0] |
|
|
|
|
|
|
|
image_has_been_canceled = 0 |
|
|
|
if image_processing_is_canceled(user_id_state_value, image_generation_id_state_value): |
|
|
|
image_has_been_canceled = 1 |
|
|
|
|
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
|
|
|
|
which_image = "" |
|
|
|
|
|
|
|
image_generation_information = create_image_generation_information( |
|
base_model_name_value, |
|
model_configuration_name_value, |
|
scheduler_used, |
|
scheduler_value, |
|
prompt_text, |
|
prompt_text_not_used_substring, |
|
negative_prompt_text, |
|
negative_prompt_text_not_used_substring, |
|
allow_longer_prompts_for_sd_1_5_based_models_field_value, |
|
image_width, |
|
image_height, |
|
actual_seed, |
|
add_seed_into_pipe, |
|
use_torch_manual_seed_but_do_not_add_to_pipe_field_value, |
|
guidance_scale, |
|
base_model_steps, |
|
display_xformers_usage_in_prompt_info, |
|
display_default_attn_processor_usage_in_prompt_info, |
|
display_diffusers_version_in_prompt_info, |
|
use_refiner, |
|
refiner_error, |
|
refining_denoise_start_field_value, |
|
denoising_end_applicable, |
|
refining_use_denoising_start_in_base_model_when_using_refiner_field_value, |
|
refining_base_model_output_to_refiner_is_in_latent_space_field_value, |
|
refining_steps_option_for_older_configuration_field_value, |
|
refining_steps_for_older_configuration_field_value, |
|
use_upscaler, |
|
upscaler_error, |
|
upscaling_steps, |
|
upscaled_image_width, |
|
upscaled_image_height, |
|
current_actual_total_base_model_steps, |
|
current_actual_total_refiner_steps, |
|
current_actual_total_upscaler_steps, |
|
generation_start_time, |
|
image_has_been_canceled, |
|
which_image |
|
) |
|
|
|
|
|
|
|
output_text_field_update = gr.Textbox( |
|
value = image_generation_information, |
|
lines = 12 |
|
) |
|
|
|
|
|
|
|
if add_generation_information_to_image == 1: |
|
|
|
|
|
|
|
info_to_save_in_image = "\n-----------\nImage generation information:\n" + image_generation_information + "\n-----------\n" |
|
|
|
image_to_return.info = {"parameters": info_to_save_in_image} |
|
|
|
|
|
|
|
if ( |
|
(auto_save_imagery == 1) and |
|
( |
|
(image_has_been_canceled == 0) or |
|
(save_canceled_images == 1) |
|
) |
|
): |
|
|
|
|
|
|
|
if not os.path.exists(saved_images_dir): |
|
os.makedirs(saved_images_dir) |
|
|
|
yy_mm_dd_date_stamp = datetime.today().strftime('%Y-%m-%d') |
|
|
|
saved_images_date_dir = saved_images_dir + "/" + yy_mm_dd_date_stamp + "/" |
|
|
|
if not os.path.exists(saved_images_date_dir): |
|
os.makedirs(saved_images_date_dir) |
|
|
|
image_count = 1 |
|
|
|
file_name_without_extension = yy_mm_dd_date_stamp + "-" + ('%04d' % image_count) |
|
|
|
saved_image_path_and_file = saved_images_date_dir + file_name_without_extension + ".png" |
|
|
|
while os.path.exists(saved_image_path_and_file): |
|
|
|
file_name_without_extension = yy_mm_dd_date_stamp + "-" + ('%04d' % image_count) |
|
|
|
saved_image_path_and_file = saved_images_date_dir + file_name_without_extension + ".png" |
|
|
|
image_count += 1 |
|
|
|
final_image_file_path_and_file = saved_images_date_dir + file_name_without_extension + ".png" |
|
|
|
save_image_file( |
|
final_image_file_path_and_file, |
|
image_to_return, |
|
add_generation_information_to_image, |
|
info_to_save_in_image |
|
) |
|
|
|
final_image_text_file_path_and_file = saved_images_date_dir + file_name_without_extension + ".txt" |
|
|
|
prompt_info_file_handle = open(final_image_text_file_path_and_file, "w") |
|
prompt_info_file_handle.writelines(image_generation_information) |
|
prompt_info_file_handle.close() |
|
|
|
|
|
|
|
output_image_field_update = gr.Image( |
|
value = image_to_return |
|
) |
|
|
|
|
|
|
|
image_gallery_array_state_value.insert(0, image_to_return) |
|
prompt_information_array_state_value.insert(0, image_generation_information) |
|
|
|
output_image_gallery_field_update = gr.Gallery( |
|
value = image_gallery_array_state_value, |
|
selected_index = 0 |
|
) |
|
|
|
image_gallery_array_state_update = image_gallery_array_state_value |
|
|
|
prompt_information_array_state_update = prompt_information_array_state_value |
|
|
|
|
|
|
|
output_base_model_image_field_accordion_update = gr.Accordion( |
|
visible = False |
|
) |
|
|
|
output_base_model_image_field_update = gr.Image() |
|
|
|
if ( |
|
( |
|
(have_refined_image == 1) or |
|
(have_upscaled_image == 1) |
|
) and |
|
(save_base_image_when_using_refiner_or_upscaler_field_value == 1) |
|
): |
|
|
|
|
|
|
|
for key, value in enumerate(saved_final_base_model_pil_image_if_using_refiner): |
|
base_model_image_to_return = value |
|
|
|
if add_generation_information_to_image == 1: |
|
|
|
|
|
|
|
info_to_save_in_base_model_image = "\n-----------\nThis is the base model image that was saved during the image generation that matched the details below. Some of the details below do not apply to this image.\n\nImage generation information:\n" + image_generation_information + "\n-----------\n" |
|
|
|
base_model_image_to_return.info = {"parameters": info_to_save_in_base_model_image} |
|
|
|
if ( |
|
(auto_save_imagery == 1) and |
|
( |
|
(image_has_been_canceled == 0) or |
|
(save_canceled_images == 1) |
|
) |
|
): |
|
|
|
base_model_image_file_path_and_file = saved_images_date_dir + file_name_without_extension + "_base_model.png" |
|
|
|
save_image_file( |
|
base_model_image_file_path_and_file, |
|
base_model_image_to_return, |
|
add_generation_information_to_image, |
|
info_to_save_in_base_model_image |
|
) |
|
|
|
output_base_model_image_field_accordion_update = gr.Accordion( |
|
visible = True |
|
) |
|
|
|
output_base_model_image_field_update = gr.Image( |
|
value = base_model_image_to_return |
|
) |
|
|
|
|
|
|
|
output_refiner_image_field_accordion_update = gr.Accordion( |
|
visible = False |
|
) |
|
|
|
output_refiner_image_field_update = gr.Image() |
|
|
|
if ( |
|
(use_refiner == 1) and |
|
(have_upscaled_image == 1) and |
|
(save_refined_image_when_using_upscaler_field_value == 1) |
|
): |
|
|
|
|
|
|
|
refined_image_to_return = refined_image_for_next_step[0] |
|
|
|
if add_generation_information_to_image == 1: |
|
|
|
|
|
|
|
info_to_save_in_refined_image = "\n-----------\nThis is the refined image that was saved during the image generation that matched the details below. Some of the details below do not apply to this image.\n\nImage generation information:\n" + image_generation_information + "\n-----------\n" |
|
|
|
refined_image_to_return.info = {"parameters": info_to_save_in_refined_image} |
|
|
|
if ( |
|
(auto_save_imagery == 1) and |
|
( |
|
(image_has_been_canceled == 0) or |
|
(save_canceled_images == 1) |
|
) |
|
): |
|
|
|
refined_image_file_path_and_file = saved_images_date_dir + file_name_without_extension + "_refined.png" |
|
|
|
save_image_file( |
|
refined_image_file_path_and_file, |
|
refined_image_to_return, |
|
add_generation_information_to_image, |
|
info_to_save_in_refined_image |
|
) |
|
|
|
output_refiner_image_field_accordion_update = gr.Accordion( |
|
visible = True |
|
) |
|
|
|
output_refiner_image_field_update = gr.Image( |
|
value = refined_image_to_return |
|
) |
|
|
|
|
|
|
|
error_text_field_accordion_update = gr.Accordion( |
|
visible = False |
|
) |
|
|
|
error_text_field_update = gr.Textbox( |
|
value = "" |
|
) |
|
|
|
if (len(error_message_array) > 0): |
|
|
|
error_information = "" |
|
|
|
error_message_array_length = len(error_message_array) |
|
|
|
error_count = 1 |
|
|
|
for error_message in error_message_array: |
|
|
|
if (error_message_array_length > 1): |
|
error_information += "Error " + str(error_count) + ":\n" |
|
|
|
error_information += error_message + "\n\n" |
|
|
|
error_count += 1 |
|
|
|
error_text_field_accordion_update = gr.Accordion( |
|
visible = True |
|
) |
|
|
|
error_text_field_update = gr.Textbox( |
|
value = error_information |
|
) |
|
|
|
|
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Image created.") |
|
|
|
|
|
|
|
last_model_configuration_name_selected_state_update = last_model_configuration_name_selected_state_value |
|
last_refiner_name_selected_state_update = last_refiner_name_selected_state_value |
|
last_upscaler_name_selected_state_update = last_upscaler_name_selected_state_value |
|
|
|
|
|
|
|
current_preview_image = None |
|
current_preview_image_user_id = 0 |
|
|
|
|
|
|
|
if image_has_been_canceled == 1: |
|
|
|
remove_from_cancel_object(user_id_state_value, image_generation_id_state_value) |
|
|
|
|
|
|
|
return ( |
|
output_image_field_update, |
|
output_image_gallery_field_update, |
|
output_base_model_image_field_accordion_update, |
|
output_base_model_image_field_update, |
|
output_refiner_image_field_accordion_update, |
|
output_refiner_image_field_update, |
|
output_text_field_update, |
|
prompt_truncated_field_group_update, |
|
prompt_truncated_field_update, |
|
negative_prompt_truncated_field_group_update, |
|
negative_prompt_truncated_field_update, |
|
last_model_configuration_name_selected_state_update, |
|
last_refiner_name_selected_state_update, |
|
last_upscaler_name_selected_state_update, |
|
pipe, |
|
refiner, |
|
upscaler, |
|
error_text_field_accordion_update, |
|
error_text_field_update |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def cancel_image_function( |
|
user_id_state, |
|
image_generation_id_state |
|
): |
|
|
|
user_id_state_value = user_id_state.value |
|
image_generation_id_state_value = image_generation_id_state.value |
|
|
|
|
|
|
|
global image_generation_ids_object |
|
global cancel_image_generation_times_object |
|
|
|
if (user_id_state_value not in cancel_image_generation_ids_object): |
|
|
|
cancel_image_generation_ids_object[user_id_state_value] = [] |
|
|
|
cancel_image_generation_ids_object[user_id_state_value].append(image_generation_id_state_value) |
|
cancel_image_generation_times_object[image_generation_id_state_value] = time.time() |
|
|
|
|
|
|
|
cancel_image_button_update = gr.Button( |
|
value = cancel_image_button_in_progress_text, |
|
interactive = False |
|
) |
|
|
|
cancel_image_message_field_row_update = gr.Row( |
|
visible = True |
|
) |
|
|
|
cancel_image_object = {} |
|
|
|
if image_generation_id_state_value == current_image_generation_id_in_progress: |
|
|
|
|
|
|
|
|
|
|
|
|
|
cancel_image_message_field_html = canceled_image_in_process_of_being_generated |
|
|
|
|
|
|
|
show_message("Your image generation is being canceled. After the current step in the process is complete, the cancellation will be complete.") |
|
|
|
|
|
|
|
generate_image_button_row_update = gr.Row( |
|
visible = False |
|
) |
|
|
|
cancel_image_object.update({ |
|
generate_image_button_row: generate_image_button_row_update, |
|
cancel_image_message_field_row: cancel_image_message_field_row_update |
|
}) |
|
|
|
else: |
|
|
|
|
|
|
|
|
|
cancel_image_message_field_html = canceled_image_in_queue_message |
|
|
|
generate_image_button_update = gr.Button( |
|
value = generate_image_button_normal_text, |
|
interactive = False |
|
) |
|
|
|
output_text_field_update = gr.Textbox( |
|
visible = True |
|
) |
|
|
|
output_image_preview_field_accordion_update = gr.Accordion( |
|
visible = False |
|
) |
|
|
|
cancel_image_button_row_update = gr.Row( |
|
visible = False |
|
) |
|
|
|
cancel_image_object.update({ |
|
generate_image_button: generate_image_button_update, |
|
output_text_field: output_text_field_update, |
|
output_image_preview_field_accordion: output_image_preview_field_accordion_update, |
|
cancel_image_button_row: cancel_image_button_row_update, |
|
cancel_image_message_field_row: cancel_image_message_field_row_update |
|
}) |
|
|
|
|
|
|
|
cancel_image_message_field_update = gr.Button( |
|
value = cancel_image_message_field_html |
|
) |
|
|
|
cancel_image_object.update({ |
|
cancel_image_button: cancel_image_button_update, |
|
cancel_image_message_field_row: cancel_image_message_field_row_update, |
|
cancel_image_message_field: cancel_image_message_field_update |
|
}) |
|
|
|
|
|
|
|
return cancel_image_object |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def download_data_from_huggingface( |
|
download_data_option |
|
): |
|
|
|
if ( |
|
(script_being_run_on_hugging_face == 0) and |
|
("HF_HUB_OFFLINE" in os.environ) and |
|
(int(os.environ["HF_HUB_OFFLINE"]) == 0) |
|
): |
|
|
|
data_to_get_partial_message = "the default model configuration defined in \"base_model_model_configuration_defaults_object\" for that model will be downloaded. It accesses data that is linked in \"model_configuration_links_object\"." |
|
|
|
if download_data_option == "2": |
|
|
|
data_to_get_partial_message = "all model data, for each model configuration, will be downloaded. This is defined in \"base_model_object_of_model_configuration_arrays\" and accesses data that is linked in \"model_configuration_links_object\"." |
|
|
|
download_data_message = "For each model in the model dropdown (\"base_model_array\"), " + data_to_get_partial_message + " That could easily be dozens of gigabytes of data or more that is about to be downloaded. If you want to stop the download, close the command prompt." |
|
|
|
print (download_data_message) |
|
|
|
data_links_downloaded_object = {} |
|
|
|
for this_base_model in base_model_array: |
|
|
|
base_model_name_value = this_base_model |
|
|
|
if download_data_option == "1": |
|
|
|
default_model_configuration_for_this_base_model = base_model_model_configuration_defaults_object[this_base_model] |
|
|
|
print ("Downloading/loading \"" + this_base_model + "\" model data for \"" + default_model_configuration_for_this_base_model + "\"...") |
|
|
|
model_configuration_name_value = default_model_configuration_for_this_base_model |
|
|
|
construct_pipe ( |
|
base_model_name_value, |
|
model_configuration_name_value |
|
) |
|
|
|
else: |
|
|
|
for this_model_configuration in base_model_object_of_model_configuration_arrays[this_base_model]: |
|
|
|
if ( |
|
(this_model_configuration in model_configuration_names_object) and |
|
(this_model_configuration in model_configuration_links_object) |
|
): |
|
|
|
model_configuration_name_value = this_model_configuration |
|
model_configuration_link_value = model_configuration_links_object[this_model_configuration] |
|
|
|
if model_configuration_link_value not in data_links_downloaded_object: |
|
|
|
print ("Downloading/loading \"" + this_base_model + "\" model data from \"" + model_configuration_link_value + "\"...") |
|
|
|
construct_pipe ( |
|
base_model_name_value, |
|
model_configuration_name_value |
|
) |
|
|
|
data_links_downloaded_object[model_configuration_value] = 1 |
|
|
|
print ("Downloading/loading refiner data...") |
|
|
|
construct_refiner() |
|
|
|
print ("Downloading/loading upscaler data...") |
|
|
|
construct_upscaler() |
|
|
|
print ("The data has been downloaded.") |
|
|
|
else: |
|
|
|
error_function("In order to download model data, \"HF_HUB_OFFLINE\" must be set to \"0\" in the Windows .bat file that launched this script.") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_link_function( |
|
page_url_hidden_field_value, |
|
generate_link_field_value, |
|
|
|
base_model_field_index, |
|
prompt_text, |
|
negative_prompt_text, |
|
allow_longer_prompts_for_sd_1_5_based_models_field_value, |
|
scheduler_index, |
|
image_width, |
|
image_height, |
|
guidance_scale, |
|
base_model_steps, |
|
base_model_steps_field_for_sdxl_turbo, |
|
actual_seed, |
|
add_seed_into_pipe, |
|
use_torch_manual_seed_but_do_not_add_to_pipe_field_value, |
|
refining_selection_field_value, |
|
refining_denoise_start_field_value, |
|
refining_use_denoising_start_in_base_model_when_using_refiner_field_value, |
|
refining_base_model_output_to_refiner_is_in_latent_space_field_value, |
|
refining_steps_option_for_older_configuration_field_value, |
|
refining_steps_for_older_configuration_field_value, |
|
upscaling_selection_field_value, |
|
upscaling_steps, |
|
image_gallery_array_state_value, |
|
prompt_information_array_state_value, |
|
last_model_configuration_name_selected_state_value, |
|
last_refiner_name_selected_state_value, |
|
last_upscaler_name_selected_state_value, |
|
stored_pipe_state, |
|
stored_refiner_state, |
|
stored_upscaler_state, |
|
create_preview_images, |
|
do_not_create_refining_preview_images, |
|
do_not_create_upscaling_preview_images, |
|
save_base_image_when_using_refiner_or_upscaler_field_value, |
|
save_refined_image_when_using_upscaler_field_value, |
|
user_id_state, |
|
image_generation_id_state, |
|
*model_configuration_dropdown_fields_array |
|
): |
|
|
|
base_model_name_value = base_model_array[base_model_field_index] |
|
|
|
|
|
|
|
position_in_array = 0 |
|
|
|
model_configuration_field_object = {} |
|
|
|
for model_configuration_field_index in model_configuration_dropdown_fields_array: |
|
|
|
this_base_model = base_model_array[position_in_array] |
|
|
|
model_configuration_field_object[this_base_model] = model_configuration_field_index |
|
|
|
position_in_array += 1 |
|
|
|
model_configuration_field_index = model_configuration_field_object[base_model_name_value] |
|
model_configuration_name_value = base_model_object_of_model_configuration_arrays[base_model_name_value][model_configuration_field_index] |
|
|
|
|
|
|
|
scheduler_value = schedulers_array[scheduler_index] |
|
|
|
|
|
|
|
if base_model_name_value == "sdxl_turbo": |
|
base_model_steps = base_model_steps_field_for_sdxl_turbo |
|
|
|
|
|
|
|
page_vars = "" |
|
|
|
|
|
|
|
if base_model_valid(base_model_name_value): |
|
page_vars += base_model_field_key_in_url + "=" + base_model_name_value + "&" |
|
|
|
|
|
|
|
if model_configuration_valid(base_model_name_value, model_configuration_name_value): |
|
page_vars += model_configuration_key_in_url + "=" + model_configuration_name_value + "&" |
|
|
|
|
|
|
|
if scheduler_valid(scheduler_value): |
|
page_vars += scheduler_field_key_in_url + "=" + scheduler_value + "&" |
|
|
|
|
|
|
|
if width_valid(image_width): |
|
page_vars += image_width_field_key_in_url + "=" + str(image_width) + "&" |
|
|
|
|
|
|
|
if height_valid(image_height): |
|
page_vars += image_height_field_key_in_url + "=" + str(image_height) + "&" |
|
|
|
|
|
|
|
if guidance_scale_valid(guidance_scale): |
|
page_vars += guidance_scale_field_key_in_url + "=" + str(guidance_scale) + "&" |
|
|
|
|
|
|
|
if steps_valid(base_model_steps, base_model_name_value): |
|
page_vars += steps_key_in_url + "=" + str(base_model_steps) + "&" |
|
|
|
|
|
|
|
if seed_valid(actual_seed): |
|
page_vars += seed_field_key_in_url + "=" + str(actual_seed) + "&" |
|
|
|
|
|
|
|
nice_add_seed_into_pipe = "yes" |
|
if add_seed_into_pipe == False: |
|
nice_add_seed_into_pipe = "no" |
|
|
|
page_vars += add_seed_key_in_url + "=" + nice_add_seed_into_pipe + "&" |
|
|
|
|
|
|
|
if ( |
|
(add_seed_into_pipe == False) and |
|
(use_torch_manual_seed_but_do_not_add_to_pipe_field_value == True) |
|
): |
|
|
|
page_vars += use_torch_manual_seed_but_not_in_generator_key_in_url + "=yes&" |
|
|
|
|
|
|
|
nice_refining_selection_field_value = "no" |
|
if refining_selection_field_value == "Yes": |
|
nice_refining_selection_field_value = "yes" |
|
|
|
page_vars += refiner_key_in_url + "=" + nice_refining_selection_field_value + "&" |
|
|
|
|
|
|
|
if refiner_denoise_start_valid(refining_denoise_start_field_value): |
|
refining_denoise_start_field_value = rounded_number(refining_denoise_start_field_value, 2) |
|
page_vars += refiner_denoise_start_key_in_url + "=" + str(refining_denoise_start_field_value) + "&" |
|
|
|
|
|
|
|
if refining_steps_option_for_older_configuration_field_value == True: |
|
page_vars += refining_steps_option_key_in_url + "=yes&" |
|
|
|
if refining_steps_valid(refining_steps_for_older_configuration_field_value): |
|
page_vars += refining_steps_key_in_url + "=" + str(refining_steps_for_older_configuration_field_value) + "&" |
|
|
|
|
|
|
|
nice_refining_use_denoising_start_in_base_model_when_using_refiner_field_value = "yes" |
|
if refining_use_denoising_start_in_base_model_when_using_refiner_field_value == False: |
|
nice_refining_use_denoising_start_in_base_model_when_using_refiner_field_value = "no" |
|
|
|
page_vars += use_denoising_start_in_base_model_when_using_refiner_key_in_url + "=" + nice_refining_use_denoising_start_in_base_model_when_using_refiner_field_value + "&" |
|
|
|
|
|
|
|
nice_refining_base_model_output_to_refiner_is_in_latent_space_field_value = "yes" |
|
if refining_base_model_output_to_refiner_is_in_latent_space_field_value == False: |
|
nice_refining_base_model_output_to_refiner_is_in_latent_space_field_value = "no" |
|
|
|
page_vars += base_model_output_to_refiner_is_in_latent_space_key_in_url + "=" + nice_refining_base_model_output_to_refiner_is_in_latent_space_field_value + "&" |
|
|
|
|
|
|
|
nice_upscaling_selection_field_value = "no" |
|
if upscaling_selection_field_value == "Yes": |
|
nice_upscaling_selection_field_value = "yes" |
|
|
|
page_vars += upscaler_key_in_url + "=" + nice_upscaling_selection_field_value + "&" |
|
|
|
|
|
|
|
if upscaling_steps_valid(upscaling_steps): |
|
page_vars += upscaling_steps_key_in_url + "=" + str(upscaling_steps) + "&" |
|
|
|
|
|
|
|
nice_show_base_image_when_using_refiner_or_upscaler = "yes" |
|
if save_base_image_when_using_refiner_or_upscaler_field_value == False: |
|
nice_show_base_image_when_using_refiner_or_upscaler = "no" |
|
|
|
page_vars += show_base_image_when_using_refiner_or_upscaler_key_in_url + "=" + nice_show_base_image_when_using_refiner_or_upscaler + "&" |
|
|
|
|
|
|
|
nice_show_refined_image_when_using_upscaler = "yes" |
|
if save_refined_image_when_using_upscaler_field_value == False: |
|
nice_show_refined_image_when_using_upscaler = "no" |
|
|
|
page_vars += show_refined_image_when_using_upscaler_key_in_url + "=" + nice_show_refined_image_when_using_upscaler + "&" |
|
|
|
|
|
|
|
nice_create_preview_images = "yes" |
|
if create_preview_images == False: |
|
nice_create_preview_images = "no" |
|
|
|
page_vars += create_preview_images_key_in_url + "=" + nice_create_preview_images + "&" |
|
|
|
|
|
|
|
nice_do_not_create_refining_preview_images = "yes" |
|
if do_not_create_refining_preview_images == False: |
|
nice_do_not_create_refining_preview_images = "no" |
|
|
|
page_vars += do_not_create_refining_preview_images_key_in_url + "=" + nice_do_not_create_refining_preview_images + "&" |
|
|
|
|
|
|
|
nice_do_not_create_upscaling_preview_images = "yes" |
|
if do_not_create_upscaling_preview_images == False: |
|
nice_do_not_create_upscaling_preview_images = "no" |
|
|
|
page_vars += do_not_create_upscaling_preview_images_key_in_url + "=" + nice_do_not_create_upscaling_preview_images + "&" |
|
|
|
|
|
|
|
nice_allow_longer_prompts_for_sd_1_5_based_models = "yes" |
|
if allow_longer_prompts_for_sd_1_5_based_models_field_value == False: |
|
nice_allow_longer_prompts_for_sd_1_5_based_models = "no" |
|
|
|
page_vars += allow_longer_prompts_for_sd_1_5_based_models_key_in_url + "=" + nice_allow_longer_prompts_for_sd_1_5_based_models + "&" |
|
|
|
|
|
|
|
prompt_text = prompt_text[:maximum_prompt_characer_count] |
|
if prompt_valid(prompt_text): |
|
page_vars += prompt_field_key_in_url + "=" + urllib.parse.quote(prompt_text) + "&" |
|
|
|
|
|
|
|
negative_prompt_text = negative_prompt_text[:maximum_neg_prompt_characer_count] |
|
if prompt_valid(negative_prompt_text): |
|
page_vars += negative_prompt_field_key_in_url + "=" + urllib.parse.quote(negative_prompt_text) + "&" |
|
|
|
|
|
|
|
|
|
|
|
page_vars = page_vars[:-1] |
|
|
|
page_url = page_url_hidden_field_value + "?" + page_vars |
|
|
|
return page_url |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_link_textbox_function( |
|
page_url_hidden_field_value, |
|
generate_link_field_value, |
|
|
|
base_model_field_index, |
|
prompt_text, |
|
negative_prompt_text, |
|
allow_longer_prompts_for_sd_1_5_based_models_field_value, |
|
scheduler_index, |
|
image_width, |
|
image_height, |
|
guidance_scale, |
|
base_model_steps, |
|
base_model_steps_field_for_sdxl_turbo, |
|
actual_seed, |
|
add_seed_into_pipe, |
|
use_torch_manual_seed_but_do_not_add_to_pipe_field_value, |
|
refining_selection_field_value, |
|
refining_denoise_start_field_value, |
|
refining_use_denoising_start_in_base_model_when_using_refiner_field_value, |
|
refining_base_model_output_to_refiner_is_in_latent_space_field_value, |
|
refining_steps_option_for_older_configuration_field_value, |
|
refining_steps_for_older_configuration_field_value, |
|
upscaling_selection_field_value, |
|
upscaling_steps, |
|
image_gallery_array_state_value, |
|
prompt_information_array_state_value, |
|
last_model_configuration_name_selected_state_value, |
|
last_refiner_name_selected_state_value, |
|
last_upscaler_name_selected_state_value, |
|
stored_pipe_state, |
|
stored_refiner_state, |
|
stored_upscaler_state, |
|
create_preview_images, |
|
do_not_create_refining_preview_images, |
|
do_not_create_upscaling_preview_images, |
|
save_base_image_when_using_refiner_or_upscaler_field_value, |
|
save_refined_image_when_using_upscaler_field_value, |
|
user_id_state, |
|
image_generation_id_state, |
|
*model_configuration_dropdown_fields_array |
|
): |
|
|
|
page_url = generate_link_function( |
|
page_url_hidden_field_value, |
|
generate_link_field_value, |
|
|
|
base_model_field_index, |
|
prompt_text, |
|
negative_prompt_text, |
|
allow_longer_prompts_for_sd_1_5_based_models_field_value, |
|
scheduler_index, |
|
image_width, |
|
image_height, |
|
guidance_scale, |
|
base_model_steps, |
|
base_model_steps_field_for_sdxl_turbo, |
|
actual_seed, |
|
add_seed_into_pipe, |
|
use_torch_manual_seed_but_do_not_add_to_pipe_field_value, |
|
refining_selection_field_value, |
|
refining_denoise_start_field_value, |
|
refining_use_denoising_start_in_base_model_when_using_refiner_field_value, |
|
refining_base_model_output_to_refiner_is_in_latent_space_field_value, |
|
refining_steps_option_for_older_configuration_field_value, |
|
refining_steps_for_older_configuration_field_value, |
|
upscaling_selection_field_value, |
|
upscaling_steps, |
|
image_gallery_array_state_value, |
|
prompt_information_array_state_value, |
|
last_model_configuration_name_selected_state_value, |
|
last_refiner_name_selected_state_value, |
|
last_upscaler_name_selected_state_value, |
|
stored_pipe_state, |
|
stored_refiner_state, |
|
stored_upscaler_state, |
|
create_preview_images, |
|
do_not_create_refining_preview_images, |
|
do_not_create_upscaling_preview_images, |
|
save_base_image_when_using_refiner_or_upscaler_field_value, |
|
save_refined_image_when_using_upscaler_field_value, |
|
user_id_state, |
|
image_generation_id_state, |
|
*model_configuration_dropdown_fields_array |
|
) |
|
|
|
generate_link_field_update = gr.Textbox( |
|
visible = True, |
|
value = page_url |
|
) |
|
|
|
return { |
|
generate_link_field: generate_link_field_update |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_query_params( |
|
request: gr.Request |
|
): |
|
|
|
raw_url_params = str(request.query_params) |
|
|
|
unprocessed_url_object = urllib.parse.parse_qs(raw_url_params) |
|
|
|
url_object = {} |
|
|
|
for url_param_key in unprocessed_url_object: |
|
|
|
url_param_value = unprocessed_url_object[url_param_key][0] |
|
|
|
if len(url_param_value) > 0: |
|
|
|
url_param_key = str(url_param_key) |
|
|
|
url_param_key_lowercase = url_param_key.lower() |
|
|
|
url_object[url_param_key_lowercase] = str(unprocessed_url_object[url_param_key][0]).lower() |
|
|
|
|
|
|
|
page_url = str(request.headers["referer"]) |
|
|
|
page_url = page_url.split("?")[0] |
|
|
|
page_url_hidden_field_update = gr.Textbox( |
|
value = page_url |
|
) |
|
|
|
field_object = { |
|
page_url_hidden_field: page_url_hidden_field_update |
|
} |
|
|
|
|
|
|
|
user_id_number = int(random.randrange(0, 1000000000)) |
|
|
|
user_id_state_update = gr.State(user_id_number) |
|
|
|
field_object.update({user_id_state: user_id_state_update}) |
|
|
|
|
|
|
|
base_model_name_value = default_base_model |
|
|
|
if base_model_field_key_in_url in url_object: |
|
|
|
base_model_field_in_url = url_object[base_model_field_key_in_url].lower() |
|
|
|
if base_model_valid(base_model_field_in_url): |
|
|
|
base_model_name_value = base_model_field_in_url |
|
|
|
base_model_nicely_named_value = base_model_names_object[base_model_name_value] |
|
|
|
field_object.update({base_model_field: base_model_nicely_named_value}) |
|
|
|
|
|
|
|
if download_data_key_in_url in url_object: |
|
|
|
download_data_option_in_url = str(url_object[download_data_key_in_url]) |
|
|
|
if ( |
|
(download_data_option_in_url == "1") or |
|
(download_data_option_in_url == "2") |
|
): |
|
|
|
download_data_from_huggingface(download_data_option_in_url) |
|
|
|
|
|
|
|
model_configuration_in_url = "" |
|
|
|
if model_configuration_key_in_url in url_object: |
|
|
|
model_configuration_in_url = url_object[model_configuration_key_in_url].lower() |
|
|
|
for this_base_model in base_model_array: |
|
|
|
if this_base_model not in base_model_object_of_model_configuration_arrays: |
|
continue |
|
|
|
if base_model_name_value == this_base_model: |
|
|
|
model_configuration_name_value = base_model_model_configuration_defaults_object[this_base_model] |
|
|
|
if len(model_configuration_in_url) > 0: |
|
|
|
for this_model_configuration in base_model_object_of_model_configuration_arrays[this_base_model]: |
|
|
|
if model_configuration_valid(base_model_name_value, model_configuration_in_url): |
|
|
|
model_configuration_name_value = this_model_configuration |
|
|
|
field_object.update({initial_model_configuration_name_selected_state: model_configuration_in_url}) |
|
|
|
|
|
|
|
if prompt_field_key_in_url in url_object: |
|
|
|
prompt_field_in_url = url_object[prompt_field_key_in_url].lower() |
|
|
|
if prompt_valid(prompt_field_in_url): |
|
|
|
field_object.update({prompt_field: prompt_field_in_url}) |
|
|
|
|
|
|
|
if negative_prompt_field_key_in_url in url_object: |
|
|
|
negative_prompt_field_in_url = url_object[negative_prompt_field_key_in_url].lower() |
|
|
|
if prompt_valid(negative_prompt_valid): |
|
|
|
field_object.update({negative_prompt_field: negative_prompt_field_in_url}) |
|
|
|
|
|
|
|
if allow_longer_prompts_for_sd_1_5_based_models_key_in_url in url_object: |
|
|
|
allow_longer_prompts_for_sd_1_5_based_models_in_url = url_object[allow_longer_prompts_for_sd_1_5_based_models_key_in_url].lower() |
|
|
|
allow_longer_prompts_for_sd_1_5_based_models_in_url_formatted = True |
|
|
|
if ( |
|
(allow_longer_prompts_for_sd_1_5_based_models_in_url == "0") or |
|
(allow_longer_prompts_for_sd_1_5_based_models_in_url == "n") or |
|
(allow_longer_prompts_for_sd_1_5_based_models_in_url == "no") or |
|
(allow_longer_prompts_for_sd_1_5_based_models_in_url == "false") |
|
): |
|
|
|
allow_longer_prompts_for_sd_1_5_based_models_in_url_formatted = False |
|
|
|
field_object.update({allow_longer_prompts_for_sd_1_5_based_models_field: allow_longer_prompts_for_sd_1_5_based_models_in_url_formatted}) |
|
|
|
|
|
|
|
if scheduler_field_key_in_url in url_object: |
|
|
|
scheduler_field_in_url = url_object[scheduler_field_key_in_url].lower() |
|
|
|
if scheduler_valid(scheduler_field_in_url): |
|
|
|
scheduler_name_value = scheduler_field_in_url |
|
|
|
scheduler_nicely_named_value = scheduler_long_names_object[scheduler_name_value] |
|
|
|
field_object.update({scheduler_field: scheduler_nicely_named_value}) |
|
|
|
|
|
|
|
if image_width_field_key_in_url in url_object: |
|
|
|
image_width_field_in_url = str(url_object[image_width_field_key_in_url]) |
|
|
|
if width_valid(image_width_field_in_url): |
|
|
|
field_object.update({image_width_field: image_width_field_in_url}) |
|
|
|
|
|
|
|
if image_height_field_key_in_url in url_object: |
|
|
|
image_height_field_in_url = str(url_object[image_height_field_key_in_url]) |
|
|
|
if height_valid(image_height_field_in_url): |
|
|
|
field_object.update({image_height_field: image_height_field_in_url}) |
|
|
|
|
|
|
|
if guidance_scale_field_key_in_url in url_object: |
|
|
|
guidance_scale_field_in_url = str(url_object[guidance_scale_field_key_in_url]) |
|
|
|
if guidance_scale_valid(guidance_scale_field_in_url): |
|
|
|
field_object.update({guidance_scale_field: guidance_scale_field_in_url}) |
|
|
|
|
|
|
|
if steps_key_in_url in url_object: |
|
|
|
steps_in_url = str(url_object[steps_key_in_url]) |
|
|
|
if steps_valid(steps_in_url, base_model_name_value): |
|
|
|
if base_model_name_value == "sdxl_turbo": |
|
|
|
field_object.update({base_model_steps_field_for_sdxl_turbo_field: steps_in_url}) |
|
|
|
else: |
|
|
|
field_object.update({base_model_steps_field: steps_in_url}) |
|
|
|
|
|
|
|
if seed_field_key_in_url in url_object: |
|
|
|
seed_field_in_url = url_object[seed_field_key_in_url] |
|
|
|
if seed_valid(seed_field_in_url): |
|
|
|
field_object.update({seed_field: seed_field_in_url}) |
|
|
|
|
|
|
|
add_seed_to_generation = None |
|
|
|
if add_seed_key_in_url in url_object: |
|
|
|
add_seed_in_url = url_object[add_seed_key_in_url].lower() |
|
|
|
add_seed_to_generation = True |
|
|
|
if ( |
|
(add_seed_in_url == "0") or |
|
(add_seed_in_url == "n") or |
|
(add_seed_in_url == "no") or |
|
(add_seed_in_url == "false") |
|
): |
|
|
|
add_seed_to_generation = False |
|
|
|
field_object.update({add_seed_into_pipe_field: add_seed_to_generation}) |
|
|
|
|
|
|
|
if use_torch_manual_seed_but_not_in_generator_key_in_url in url_object: |
|
|
|
use_torch_manual_seed_but_not_in_generator_in_url = url_object[use_torch_manual_seed_but_not_in_generator_key_in_url].lower() |
|
|
|
use_torch_manual_seed_but_not_in_generator_to_generation = False |
|
|
|
if ( |
|
(use_torch_manual_seed_but_not_in_generator_in_url == "1") or |
|
(use_torch_manual_seed_but_not_in_generator_in_url == "y") or |
|
(use_torch_manual_seed_but_not_in_generator_in_url == "yes") or |
|
(use_torch_manual_seed_but_not_in_generator_in_url == "true") |
|
): |
|
|
|
use_torch_manual_seed_but_not_in_generator_to_generation = True |
|
|
|
if ( |
|
add_seed_to_generation and |
|
add_seed_to_generation == True |
|
): |
|
|
|
use_torch_manual_seed_but_not_in_generator_to_generation = False |
|
|
|
field_object.update({use_torch_manual_seed_but_do_not_add_to_pipe_field: use_torch_manual_seed_but_not_in_generator_to_generation}) |
|
|
|
|
|
|
|
if refiner_key_in_url in url_object: |
|
|
|
refiner_in_url = url_object[refiner_key_in_url].lower() |
|
|
|
refiner_in_url_formatted = "No" |
|
|
|
if ( |
|
(refiner_in_url == "1") or |
|
(refiner_in_url == "y") or |
|
(refiner_in_url == "yes") or |
|
(refiner_in_url == "true") |
|
): |
|
|
|
refiner_in_url_formatted = "Yes" |
|
|
|
field_object.update({refining_selection_field: refiner_in_url_formatted}) |
|
|
|
|
|
|
|
if refiner_denoise_start_key_in_url in url_object: |
|
|
|
refiner_denoise_start_in_url = str(url_object[refiner_denoise_start_key_in_url]) |
|
|
|
if refiner_denoise_start_valid(refiner_denoise_start_in_url): |
|
|
|
refiner_denoise_start_in_url = rounded_number(refiner_denoise_start_in_url, 2) |
|
|
|
field_object.update({refining_denoise_start_field: refiner_denoise_start_in_url}) |
|
|
|
|
|
|
|
if refining_steps_option_key_in_url in url_object: |
|
|
|
refining_steps_option_in_url = url_object[refiner_key_in_url].lower() |
|
|
|
refining_steps_option_in_url_formatted = False |
|
|
|
if ( |
|
(refining_steps_option_in_url == "1") or |
|
(refining_steps_option_in_url == "y") or |
|
(refining_steps_option_in_url == "yes") or |
|
(refining_steps_option_in_url == "true") |
|
): |
|
|
|
refining_steps_option_in_url_formatted = True |
|
|
|
field_object.update({refining_steps_option_for_older_configuration_field: refining_steps_option_in_url_formatted}) |
|
|
|
|
|
|
|
if refining_steps_key_in_url in url_object: |
|
|
|
refining_steps_in_url = str(url_object[refining_steps_key_in_url]) |
|
|
|
if refining_steps_valid(refining_steps_in_url): |
|
|
|
field_object.update({refining_steps_for_older_configuration_field: refining_steps_in_url}) |
|
|
|
|
|
|
|
if use_denoising_start_in_base_model_when_using_refiner_key_in_url in url_object: |
|
|
|
use_denoising_start_in_base_model_when_using_refiner_in_url = url_object[use_denoising_start_in_base_model_when_using_refiner_key_in_url].lower() |
|
|
|
use_denoising_start_in_base_model_when_using_refiner_bool = True |
|
|
|
if ( |
|
(use_denoising_start_in_base_model_when_using_refiner_in_url == "0") or |
|
(use_denoising_start_in_base_model_when_using_refiner_in_url == "n") or |
|
(use_denoising_start_in_base_model_when_using_refiner_in_url == "no") or |
|
(use_denoising_start_in_base_model_when_using_refiner_in_url == "false") |
|
): |
|
|
|
use_denoising_start_in_base_model_when_using_refiner_bool = False |
|
|
|
field_object.update({refining_use_denoising_start_in_base_model_when_using_refiner_field: use_denoising_start_in_base_model_when_using_refiner_bool}) |
|
|
|
|
|
|
|
if base_model_output_to_refiner_is_in_latent_space_key_in_url in url_object: |
|
|
|
base_model_output_to_refiner_is_in_latent_space_in_url = url_object[base_model_output_to_refiner_is_in_latent_space_key_in_url].lower() |
|
|
|
base_model_output_to_refiner_is_in_latent_space_bool = True |
|
|
|
if ( |
|
(base_model_output_to_refiner_is_in_latent_space_in_url == "0") or |
|
(base_model_output_to_refiner_is_in_latent_space_in_url == "n") or |
|
(base_model_output_to_refiner_is_in_latent_space_in_url == "no") or |
|
(base_model_output_to_refiner_is_in_latent_space_in_url == "false") |
|
): |
|
|
|
base_model_output_to_refiner_is_in_latent_space_bool = False |
|
|
|
field_object.update({refining_base_model_output_to_refiner_is_in_latent_space_field: base_model_output_to_refiner_is_in_latent_space_bool}) |
|
|
|
|
|
|
|
if upscaler_key_in_url in url_object: |
|
|
|
upscaler_in_url = url_object[upscaler_key_in_url].lower() |
|
|
|
upscaler_in_url_formatted = "No" |
|
|
|
if ( |
|
(upscaler_in_url == "1") or |
|
(upscaler_in_url == "y") or |
|
(upscaler_in_url == "yes") or |
|
(upscaler_in_url == "true") |
|
): |
|
|
|
upscaler_in_url_formatted = "Yes" |
|
|
|
field_object.update({upscaling_selection_field: upscaler_in_url_formatted}) |
|
|
|
|
|
|
|
if upscaling_steps_key_in_url in url_object: |
|
|
|
upscaling_steps_in_url = str(url_object[upscaling_steps_key_in_url]) |
|
|
|
if upscaling_steps_valid(upscaling_steps_in_url): |
|
|
|
field_object.update({upscaling_num_inference_steps_field: upscaling_steps_in_url}) |
|
|
|
|
|
|
|
if show_base_image_when_using_refiner_or_upscaler_key_in_url in url_object: |
|
|
|
show_base_image_when_using_refiner_or_upscaler_in_url = str(url_object[show_base_image_when_using_refiner_or_upscaler_key_in_url]) |
|
|
|
show_base_image_when_using_refiner_or_upscaler_in_url_formatted = True |
|
|
|
if ( |
|
(show_base_image_when_using_refiner_or_upscaler_in_url == "0") or |
|
(show_base_image_when_using_refiner_or_upscaler_in_url == "n") or |
|
(show_base_image_when_using_refiner_or_upscaler_in_url == "no") or |
|
(show_base_image_when_using_refiner_or_upscaler_in_url == "false") |
|
): |
|
|
|
show_base_image_when_using_refiner_or_upscaler_in_url_formatted = False |
|
|
|
field_object.update({save_base_image_when_using_refiner_or_upscaler_field: show_base_image_when_using_refiner_or_upscaler_in_url_formatted}) |
|
|
|
|
|
|
|
if show_refined_image_when_using_upscaler_key_in_url in url_object: |
|
|
|
show_refined_image_when_using_upscaler_in_url = str(url_object[show_refined_image_when_using_upscaler_key_in_url]) |
|
|
|
show_refined_image_when_using_upscaler_in_url_formatted = True |
|
|
|
if ( |
|
(show_refined_image_when_using_upscaler_in_url == "0") or |
|
(show_refined_image_when_using_upscaler_in_url == "n") or |
|
(show_refined_image_when_using_upscaler_in_url == "no") or |
|
(show_refined_image_when_using_upscaler_in_url == "false") |
|
): |
|
|
|
show_refined_image_when_using_upscaler_in_url_formatted = False |
|
|
|
field_object.update({save_refined_image_when_using_upscaler_field: show_refined_image_when_using_upscaler_in_url_formatted}) |
|
|
|
|
|
|
|
if create_preview_images_key_in_url in url_object: |
|
|
|
create_preview_images_in_url = str(url_object[create_preview_images_key_in_url]) |
|
|
|
create_preview_images_in_url_formatted = True |
|
|
|
if ( |
|
(create_preview_images_in_url == "0") or |
|
(create_preview_images_in_url == "n") or |
|
(create_preview_images_in_url == "no") or |
|
(create_preview_images_in_url == "false") |
|
): |
|
|
|
create_preview_images_in_url_formatted = False |
|
|
|
field_object.update({create_preview_images_field: create_preview_images_in_url_formatted}) |
|
|
|
|
|
|
|
if do_not_create_refining_preview_images_key_in_url in url_object: |
|
|
|
do_not_create_refining_preview_images_in_url = str(url_object[do_not_create_refining_preview_images_key_in_url]) |
|
|
|
do_not_create_refining_preview_images_in_url_formatted = True |
|
|
|
if ( |
|
(do_not_create_refining_preview_images_in_url == "0") or |
|
(do_not_create_refining_preview_images_in_url == "n") or |
|
(do_not_create_refining_preview_images_in_url == "no") or |
|
(do_not_create_refining_preview_images_in_url == "false") |
|
): |
|
|
|
do_not_create_refining_preview_images_in_url_formatted = False |
|
|
|
field_object.update({do_not_create_refining_preview_images_field: do_not_create_refining_preview_images_in_url_formatted}) |
|
|
|
|
|
|
|
if do_not_create_upscaling_preview_images_key_in_url in url_object: |
|
|
|
do_not_create_upscaling_preview_images_in_url = str(url_object[do_not_create_upscaling_preview_images_key_in_url]) |
|
|
|
do_not_create_upscaling_preview_images_in_url_formatted = True |
|
|
|
if ( |
|
(do_not_create_upscaling_preview_images_in_url == "0") or |
|
(do_not_create_upscaling_preview_images_in_url == "n") or |
|
(do_not_create_upscaling_preview_images_in_url == "no") or |
|
(do_not_create_upscaling_preview_images_in_url == "false") |
|
): |
|
|
|
do_not_create_upscaling_preview_images_in_url_formatted = False |
|
|
|
field_object.update({do_not_create_upscaling_preview_images_field: do_not_create_upscaling_preview_images_in_url_formatted}) |
|
|
|
|
|
|
|
if special_theme_key_in_url in url_object: |
|
|
|
special_theme_in_url = str(url_object[special_theme_key_in_url]) |
|
|
|
if theme_valid(special_theme_in_url): |
|
|
|
special_theme_value = False |
|
if (special_theme_in_url == "dark"): |
|
special_theme_value = True |
|
|
|
field_object.update({dark_theme_field: special_theme_value}) |
|
|
|
elif theme_key_in_url in url_object: |
|
|
|
theme_in_url = str(url_object[theme_key_in_url]) |
|
|
|
if theme_valid(theme_in_url): |
|
|
|
theme_value = False |
|
if (theme_in_url == "dark"): |
|
theme_value = True |
|
|
|
field_object.update({dark_theme_field: theme_value}) |
|
|
|
|
|
|
|
generate_image_button_update = gr.Button( |
|
interactive = True |
|
) |
|
|
|
field_object.update({generate_image_button: generate_image_button_update}) |
|
|
|
|
|
|
|
return field_object |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def set_base_model_and_model_configuration_from_query_params( |
|
base_model_field_index, |
|
initial_model_configuration_name_selected_state_value, |
|
*model_configuration_dropdown_fields_array |
|
): |
|
|
|
base_model_name_value = base_model_array[base_model_field_index] |
|
|
|
model_configuration_name_value_for_selected_base_model = initial_model_configuration_name_selected_state_value |
|
|
|
model_configuration_dropdown_fields_array = [] |
|
|
|
for this_base_model in base_model_array: |
|
|
|
model_configuration_name_default_value_for_this_base_model = base_model_model_configuration_defaults_object[this_base_model] |
|
|
|
if this_base_model not in base_model_object_of_model_configuration_arrays: |
|
continue |
|
|
|
for this_model_configuration in base_model_object_of_model_configuration_arrays[this_base_model]: |
|
|
|
if ( |
|
(base_model_name_value == this_base_model) and |
|
(model_configuration_name_value_for_selected_base_model == this_model_configuration) |
|
): |
|
|
|
model_configuration_name_default_value_for_this_base_model = model_configuration_name_value_for_selected_base_model |
|
|
|
this_configuration_field_default_value = model_configuration_names_object[model_configuration_name_default_value_for_this_base_model] |
|
|
|
this_configuration_field = gr.Dropdown( |
|
value = this_configuration_field_default_value |
|
) |
|
|
|
model_configuration_dropdown_fields_array.append(this_configuration_field) |
|
|
|
base_model_and_model_configuration_return_outputs = [] |
|
|
|
for this_model_configuration_dropdown_field in model_configuration_dropdown_fields_array: |
|
|
|
base_model_and_model_configuration_return_outputs.append( |
|
this_model_configuration_dropdown_field |
|
) |
|
|
|
return base_model_and_model_configuration_return_outputs |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def delete_preview_imagery( |
|
output_image_preview_field_value |
|
): |
|
|
|
global previous_preview_image |
|
|
|
locally_saved_image = output_image_preview_field_value |
|
|
|
try: |
|
|
|
if previous_preview_image != None: |
|
|
|
if os.path.exists(previous_preview_image): |
|
|
|
os.remove(previous_preview_image) |
|
|
|
folder_to_delete = os.path.dirname(previous_preview_image) |
|
|
|
if os.path.exists(folder_to_delete): |
|
|
|
os.rmdir(folder_to_delete) |
|
|
|
except BaseException as error_message: |
|
|
|
print ("Either preview image or directory could not be automatically deleted.") |
|
|
|
previous_preview_image = locally_saved_image |
|
|
|
return {} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def allow_longer_prompts_for_sd_1_5_based_models_function( |
|
allow_longer_prompts_for_sd_1_5_based_models_field_value |
|
): |
|
|
|
allow_longer_prompts_for_sd_1_5_based_models_field_value = numerical_bool(allow_longer_prompts_for_sd_1_5_based_models_field_value) |
|
|
|
prompt_textbox_label = prompt_textbox_label_with_length_limit |
|
negative_prompt_textbox_label = negative_prompt_textbox_label_with_length_limit |
|
|
|
if allow_longer_prompts_for_sd_1_5_based_models_field_value == 1: |
|
|
|
prompt_textbox_label = prompt_textbox_label_with_no_length_limit |
|
negative_prompt_textbox_label = negative_prompt_textbox_label_with_no_length_limit |
|
|
|
prompt_field_update = gr.Textbox( |
|
label = prompt_textbox_label |
|
) |
|
|
|
negative_prompt_field_update = gr.Textbox( |
|
label = negative_prompt_textbox_label |
|
) |
|
|
|
return { |
|
prompt_field: prompt_field_update, |
|
negative_prompt_field: negative_prompt_field_update |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
css_to_use = """ |
|
|
|
/* Hide border on image preview */ |
|
|
|
.generating {border: none !important;} |
|
|
|
/* Hide footer */ |
|
/* |
|
footer |
|
{ |
|
display: none !important; |
|
} |
|
*/ |
|
/* Dropdowns */ |
|
|
|
.sp_dropdown ul |
|
{ |
|
min-width: auto; |
|
max-width: fit-content !important; |
|
max-height: 250px !important; |
|
} |
|
|
|
/* Checkboxes */ |
|
|
|
.sp_checkbox label |
|
{ |
|
align-items: start; |
|
} |
|
|
|
.sp_checkbox label input |
|
{ |
|
margin-top: 3px; |
|
} |
|
/* Scrollbar on prompt information */ |
|
|
|
.textbox_vertical_scroll textarea |
|
{ |
|
resize: none; |
|
overflow-y: auto !important; |
|
} |
|
|
|
.textbox_vertical_scroll textarea::-webkit-scrollbar |
|
{ |
|
width: 15px; |
|
} |
|
|
|
.textbox_vertical_scroll textarea::-webkit-scrollbar-track |
|
{ |
|
background-color: rgb(245, 245, 245); |
|
} |
|
|
|
.textbox_vertical_scroll textarea::-webkit-scrollbar-track:hover |
|
{ |
|
background-color: rgb(242, 242, 242); |
|
} |
|
|
|
.textbox_vertical_scroll textarea::-webkit-scrollbar-thumb |
|
{ |
|
background-color: rgb(214, 214, 214); |
|
width: 100%; |
|
height: 60px; |
|
max-height: 80%; |
|
border: 1px solid rgb(209, 209, 209); |
|
} |
|
|
|
.textbox_vertical_scroll textarea::-webkit-scrollbar-thumb:hover |
|
{ |
|
background-color: rgb(184, 184, 184); |
|
border-color: rgb(179, 179, 179); |
|
} |
|
|
|
.image_scaling button.image-button img |
|
{ |
|
object-fit: scale-down; |
|
} |
|
|
|
.extra_image_class div.image-container |
|
{ |
|
height: 200px; |
|
} |
|
|
|
.extra_image_class button.image-button img |
|
{ |
|
object-fit: scale-down; |
|
} |
|
|
|
body.dark .textbox_vertical_scroll textarea::-webkit-scrollbar-track |
|
{ |
|
background-color: rgb(66, 66, 66) !important; |
|
} |
|
|
|
body.dark .textbox_vertical_scroll textarea::-webkit-scrollbar-track:hover |
|
{ |
|
background-color: rgb(63, 63, 63) !important; |
|
} |
|
|
|
body.dark .textbox_vertical_scroll textarea::-webkit-scrollbar-thumb |
|
{ |
|
background-color: rgb(104, 104, 104) !important; |
|
border-color: rgb(94, 94, 94) !important; |
|
} |
|
|
|
body.dark .textbox_vertical_scroll textarea::-webkit-scrollbar-thumb:hover |
|
{ |
|
background-color: rgb(123, 123, 123) !important; |
|
} |
|
|
|
/* Size of image for image preview */ |
|
|
|
#image_preview_id div.image-container |
|
{ |
|
width: 100%; |
|
} |
|
|
|
#image_preview_id img |
|
{ |
|
max-height: 300px; |
|
max-width: 300px; |
|
# max-height: 100%; |
|
# max-width: 100%; |
|
object-fit: scale-down; |
|
margin: 0 auto; |
|
} |
|
|
|
/* Refining steps in options section */ |
|
|
|
#refining_steps_for_older_configuration_field_row_id |
|
{ |
|
max-width: 800px; |
|
margin: 0 auto; |
|
} |
|
|
|
#refining_steps_for_older_configuration_field_id |
|
{ |
|
width: 300px; |
|
} |
|
|
|
#refining_steps_message_for_older_configuration_field_id |
|
{ |
|
} |
|
|
|
.html_field_style_class |
|
{ |
|
background-color: var(--block-background-fill); |
|
} |
|
|
|
""" |
|
|
|
with gr.Blocks( |
|
title = "Spaghetti AI", |
|
css = css_to_use, |
|
theme = gr.themes.Default( |
|
spacing_size = "sm", |
|
radius_size = "sm" |
|
), |
|
analytics_enabled = False |
|
) as sd_interface: |
|
|
|
|
|
|
|
image_gallery_array_state = gr.State([]) |
|
|
|
prompt_information_array_state = gr.State([]) |
|
|
|
initial_model_configuration_name_selected_state = gr.State("") |
|
|
|
last_model_configuration_name_selected_state = gr.State("") |
|
last_refiner_name_selected_state = gr.State("") |
|
last_upscaler_name_selected_state = gr.State("") |
|
|
|
stored_pipe_state = gr.State({}) |
|
stored_refiner_state = gr.State({}) |
|
stored_upscaler_state = gr.State({}) |
|
|
|
user_id_state = gr.State(0) |
|
image_generation_id_state = gr.State("") |
|
|
|
page_url_hidden_field = gr.Textbox( |
|
visible = False, |
|
interactive = False |
|
) |
|
|
|
gr.Markdown(opening_html) |
|
|
|
with gr.Row(): |
|
|
|
with gr.Column( |
|
scale = 4, |
|
min_width = 200 |
|
): |
|
|
|
with gr.Row( |
|
visible = True |
|
) as generate_image_button_row: |
|
|
|
generate_image_button = gr.Button( |
|
value = generate_image_button_normal_text, |
|
variant = "primary", |
|
interactive = False |
|
) |
|
|
|
with gr.Row( |
|
visible = False |
|
) as cancel_image_button_row: |
|
|
|
cancel_image_button = gr.Button( |
|
value = cancel_image_button_text, |
|
variant = "stop", |
|
interactive = False |
|
) |
|
|
|
with gr.Row( |
|
visible = False |
|
) as cancel_image_message_field_row: |
|
|
|
cancel_image_message_field = gr.HTML( |
|
value = "" |
|
) |
|
|
|
with gr.Group(): |
|
|
|
with gr.Row(): |
|
|
|
prompt_field = gr.Textbox( |
|
label = prompt_textbox_label_to_use, |
|
value = default_prompt |
|
) |
|
|
|
with gr.Row( |
|
elem_id = "negative_prompt_field_row_id", |
|
visible = default_negative_prompt_field_row_visibility |
|
): |
|
|
|
negative_prompt_field = gr.Textbox( |
|
label = negative_prompt_textbox_label_to_use, |
|
value = default_negative_prompt |
|
) |
|
|
|
with gr.Row( |
|
elem_id = "negative_prompt_for_sdxl_turbo_field_row_id", |
|
visible = default_negative_prompt_for_sdxl_turbo_field_row_visibility |
|
): |
|
|
|
negative_prompt_for_sdxl_turbo_field = gr.HTML( |
|
value = "<div style=\"padding: 10px; text-align: center;\">Negative prompt is not used for SDXL Turbo.</div>", |
|
elem_classes = "html_field_style_class" |
|
) |
|
|
|
with gr.Row( |
|
elem_id = "allow_longer_prompts_row_id", |
|
visible = default_allow_longer_prompts_row_visibility |
|
): |
|
|
|
model_types_for_longer_prompts_html = "" |
|
|
|
sd15_available = 0 |
|
photoreal_available = 0 |
|
|
|
if "sd_1_5_runwayml" in base_model_object_of_model_configuration_arrays: |
|
|
|
sd15_available = 1 |
|
|
|
model_types_for_longer_prompts_html += base_model_names_object["sd_1_5_runwayml"] |
|
|
|
if "photoreal" in base_model_object_of_model_configuration_arrays: |
|
|
|
photoreal_available = 1 |
|
|
|
if sd15_available == 1: |
|
|
|
model_types_for_longer_prompts_html += " and " |
|
|
|
model_types_for_longer_prompts_html += base_model_names_object["photoreal"] |
|
|
|
if ( |
|
(sd15_available == 1) and |
|
(photoreal_available == 1) |
|
): |
|
|
|
model_types_for_longer_prompts_html += " models" |
|
|
|
allow_longer_prompts_for_sd_1_5_based_models_field = gr.Checkbox( |
|
label = "Allow longer prompts for " + model_types_for_longer_prompts_html + " models when not using the refiner or upscaler.", |
|
value = default_allow_longer_prompts_for_sd_1_5_based_models_is_selected, |
|
interactive = True, |
|
container = True, |
|
elem_classes = "sp_checkbox" |
|
) |
|
|
|
|
|
|
|
with gr.Group( |
|
visible = refiner_group_visible |
|
): |
|
|
|
refiner_label_text = "Refiner" |
|
|
|
with gr.Accordion( |
|
label = refiner_label_text, |
|
open = refiner_accordion_open, |
|
visible = refiner_accordion_visible |
|
) as refiner_accordion: |
|
|
|
with gr.Row(): |
|
|
|
refining_selection_field = gr.Radio( |
|
label = "Use refiner?", |
|
choices = ["Yes", "No"], |
|
value = default_refine_option, |
|
container = True |
|
) |
|
|
|
with gr.Row(): |
|
|
|
refining_denoise_start_field = gr.Slider( |
|
label = "Refiner denoise start %", |
|
minimum = minimum_refiner_denoise_start, |
|
maximum = maximum_refiner_denoise_start, |
|
value = default_refiner_denoise_start, |
|
step = refiner_denoise_start_input_slider_steps |
|
) |
|
|
|
with gr.Row( |
|
elem_id = "refining_use_denoising_start_in_base_model_when_using_refiner_field_row_id", |
|
visible = default_refining_use_denoising_start_in_base_model_when_using_refiner_field_row_visibility |
|
): |
|
|
|
refining_use_denoising_start_in_base_model_when_using_refiner_field = gr.Checkbox( |
|
label = "Use \"denoising_start\" value as \"denoising_end\" value in base model generation when using refiner", |
|
value = default_use_denoising_start_in_base_model_when_using_refiner_is_selected, |
|
interactive = True, |
|
container = True, |
|
elem_classes = "sp_checkbox" |
|
) |
|
|
|
with gr.Row(): |
|
|
|
refining_base_model_output_to_refiner_is_in_latent_space_field = gr.Checkbox( |
|
label = "Base model output in latent space instead of PIL image when using refiner", |
|
value = default_base_model_output_to_refiner_is_in_latent_space_is_selected, |
|
interactive = True, |
|
container = True, |
|
elem_classes = "sp_checkbox" |
|
) |
|
|
|
with gr.Row( |
|
elem_id = "base_model_output_in_latent_space_note_field_row_id", |
|
visible = default_base_model_output_in_latent_space_note_field_row_visibility |
|
): |
|
|
|
base_model_output_in_latent_space_note_field = gr.HTML( |
|
value = "<div style=\"padding: 10px;\">If you use the refiner, and the model you have chosen, the base model output that will be used in the refiner will always be a PIL image regardless of your choice. The image would be very distorted if we didn't do this.</div>", |
|
elem_classes = "html_field_style_class" |
|
) |
|
|
|
with gr.Group( |
|
visible = upscaler_group_visible |
|
): |
|
|
|
with gr.Accordion( |
|
label = "Upscaler", |
|
open = upscaler_accordion_open, |
|
visible = upscaler_group_visible |
|
): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
|
upscaling_selection_field = gr.Radio( |
|
label = "Upscale by 2x?", |
|
choices = ["Yes", "No"], |
|
value = default_upscale_option, |
|
container = True |
|
) |
|
|
|
with gr.Row(): |
|
|
|
upscaling_num_inference_steps_field = gr.Slider( |
|
label = "Upscaler number of steps", |
|
minimum = 1, |
|
maximum = maximum_upscaler_steps, |
|
value = default_upscaler_steps, |
|
step = 1 |
|
) |
|
|
|
if ( |
|
(enable_refiner == 1) or |
|
(enable_upscaler == 1) |
|
): |
|
|
|
refiner_and_upscaler_text_field = gr.HTML( |
|
value = "<div id=\"refiner_and_upscaler_info_message_div_id\" style=\"text-align: center;\">" + default_refiner_and_upscaler_status_text + "</div>" |
|
) |
|
|
|
with gr.Column( |
|
scale = 3, |
|
min_width = 200 |
|
): |
|
|
|
with gr.Group(): |
|
|
|
with gr.Row(): |
|
|
|
base_model_field = gr.Dropdown( |
|
label = "Model:", |
|
choices = default_base_model_choices_array, |
|
value = default_base_model_nicely_named_value, |
|
type = "index", |
|
filterable = False, |
|
interactive = True, |
|
elem_classes = "sp_dropdown" |
|
) |
|
|
|
model_configuration_dropdown_field_values_for_js = "" |
|
|
|
model_configuration_dropdown_fields_array = [] |
|
|
|
for this_base_model in base_model_array: |
|
|
|
this_model_configuration_choices_array = [] |
|
|
|
if this_base_model not in base_model_object_of_model_configuration_arrays: |
|
|
|
continue |
|
|
|
for this_model_configuration in base_model_object_of_model_configuration_arrays[this_base_model]: |
|
|
|
this_model_configuration_choices_array.append( |
|
model_configuration_names_object[this_model_configuration] |
|
) |
|
|
|
this_configuration_field_row_visibility = False |
|
|
|
if ( |
|
(this_base_model == default_base_model) and |
|
(allow_other_model_versions == 1) |
|
): |
|
|
|
this_configuration_field_row_visibility = True |
|
|
|
this_configuration_field_default_value = model_configuration_names_object[base_model_model_configuration_defaults_object[this_base_model]] |
|
|
|
this_configuration_field_default_value_for_js = this_configuration_field_default_value |
|
this_configuration_field_default_value_for_js = this_configuration_field_default_value_for_js.replace("\"", "\\\"") |
|
|
|
model_configuration_dropdown_field_values_for_js += "\"" + this_base_model + "\": \"" + this_configuration_field_default_value_for_js + "\"," |
|
|
|
with gr.Row( |
|
elem_id = "model_configuration_field_" + this_base_model + "_row_id", |
|
visible = this_configuration_field_row_visibility |
|
): |
|
|
|
this_configuration_field = gr.Dropdown( |
|
label = "Version:", |
|
choices = this_model_configuration_choices_array, |
|
value = this_configuration_field_default_value, |
|
type = "index", |
|
filterable = False, |
|
interactive = True, |
|
elem_classes = "sp_dropdown" |
|
) |
|
|
|
model_configuration_dropdown_fields_array.append(this_configuration_field) |
|
|
|
with gr.Row(): |
|
|
|
scheduler_field = gr.Dropdown( |
|
label = "Scheduler / Sampler:", |
|
choices = default_scheduler_choices_array, |
|
value = default_scheduler_nicely_named_value, |
|
type = "index", |
|
filterable = False, |
|
interactive = True, |
|
elem_classes = "sp_dropdown" |
|
) |
|
|
|
with gr.Row(): |
|
|
|
image_width_field = gr.Slider( |
|
label = "Width:", |
|
minimum = minimum_width, |
|
maximum = maximum_width, |
|
value = default_width, |
|
step = width_and_height_input_slider_steps, |
|
interactive = True |
|
) |
|
|
|
image_height_field = gr.Slider( |
|
label = "Height:", |
|
minimum = minimum_height, |
|
maximum = maximum_height, |
|
value = default_height, |
|
step = width_and_height_input_slider_steps, |
|
interactive = True |
|
) |
|
|
|
with gr.Row( |
|
elem_id = "base_model_steps_field_row_id", |
|
visible = default_base_model_steps_field_row_visibility |
|
): |
|
|
|
base_model_steps_field = gr.Slider( |
|
label = "Steps:", |
|
minimum = 1, |
|
maximum = maximum_base_model_steps, |
|
value = default_base_model_steps, |
|
step = 1, |
|
interactive = True |
|
) |
|
|
|
with gr.Row( |
|
elem_id = "base_model_steps_field_for_sdxl_turbo_field_row_id", |
|
visible = default_base_model_steps_field_for_sdxl_turbo_field_row_visibility |
|
): |
|
|
|
base_model_steps_field_for_sdxl_turbo_field = gr.Slider( |
|
label = "Steps:", |
|
info = "Try using only 1 or a couple of steps.", |
|
minimum = 1, |
|
maximum = maximum_base_model_steps_for_sdxl_turbo, |
|
value = default_base_model_steps_for_sdxl_turbo, |
|
step = 1, |
|
interactive = True |
|
) |
|
|
|
with gr.Row( |
|
elem_id = "guidance_scale_field_row_id", |
|
visible = default_guidance_scale_field_row_visibility |
|
): |
|
|
|
guidance_scale_field = gr.Slider( |
|
label = "Guidance Scale:", |
|
minimum = minimum_guidance_scale, |
|
maximum = maximum_guidance_scale, |
|
value = default_guidance_scale, |
|
step = guidance_scale_input_slider_steps, |
|
interactive = True |
|
) |
|
|
|
with gr.Row( |
|
elem_id = "guidance_scale_for_sdxl_turbo_field_row_id", |
|
visible = default_guidance_scale_for_sdxl_turbo_field_row_visibility |
|
): |
|
|
|
guidance_scale_for_sdxl_turbo_field = gr.HTML( |
|
value = "<div style=\"padding: 10px; text-align: center;\">Guidance scale is not used for SDXL Turbo.</div>", |
|
elem_classes = "html_field_style_class" |
|
) |
|
|
|
with gr.Row(): |
|
|
|
if default_seed_value == "random": |
|
|
|
default_seed_value = generate_random_seed() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if make_seed_selection_a_textbox == 1: |
|
|
|
seed_field = gr.Textbox( |
|
label = "Seed:", |
|
value = default_seed_value, |
|
interactive = True |
|
) |
|
|
|
else: |
|
|
|
seed_field = gr.Slider( |
|
label = "Seed:", |
|
minimum = 0, |
|
maximum = maximum_seed, |
|
value = default_seed_value, |
|
step = 1, |
|
interactive = True |
|
) |
|
|
|
with gr.Column( |
|
scale = 4, |
|
min_width = 200 |
|
): |
|
|
|
output_image_field_row_visibility = True |
|
output_image_gallery_field_row_visibility = False |
|
|
|
if use_image_gallery == 1: |
|
|
|
output_image_field_row_visibility = False |
|
output_image_gallery_field_row_visibility = True |
|
|
|
with gr.Row( |
|
visible = output_image_field_row_visibility |
|
) as output_image_field_accordion: |
|
|
|
output_image_field = gr.Image( |
|
label = "Generated Image", |
|
type = "pil", |
|
height = gradio_image_component_height, |
|
show_download_button = True, |
|
elem_classes = "image_scaling" |
|
) |
|
|
|
with gr.Row( |
|
visible = output_image_gallery_field_row_visibility |
|
) as output_image_gallery_field_accordion: |
|
|
|
output_image_gallery_field = gr.Gallery( |
|
label = "Generated Images", |
|
value = [], |
|
selected_index = 0, |
|
allow_preview = True, |
|
preview = True, |
|
columns = "2", |
|
rows = None, |
|
height = gradio_image_gallery_component_height, |
|
object_fit = "scale-down", |
|
show_download_button = True, |
|
elem_classes = "image_scaling" |
|
) |
|
|
|
with gr.Accordion( |
|
label = "Initial Image", |
|
visible = False |
|
) as output_base_model_image_field_accordion: |
|
|
|
output_base_model_image_field = gr.Image( |
|
type = "pil", |
|
height = gradio_extra_image_component_height, |
|
show_download_button = True, |
|
container = False, |
|
elem_classes = "extra_image_class" |
|
) |
|
|
|
with gr.Accordion( |
|
label = "Refined Image (before upscaling)", |
|
visible = False |
|
) as output_refiner_image_field_accordion: |
|
|
|
output_refiner_image_field = gr.Image( |
|
type = "pil", |
|
height = gradio_extra_image_component_height, |
|
show_download_button = True, |
|
container = False, |
|
elem_classes = "extra_image_class" |
|
) |
|
|
|
with gr.Accordion( |
|
label = "Preview", |
|
visible = False |
|
) as output_image_preview_field_accordion: |
|
|
|
output_image_preview_field = gr.Image( |
|
elem_id = "image_preview_id", |
|
type = "filepath", |
|
interactive = False, |
|
show_download_button = True, |
|
height = gradio_image_gallery_component_height, |
|
container = False, |
|
elem_classes = "image_scaling" |
|
) |
|
|
|
with gr.Accordion( |
|
label = "Error Information:", |
|
visible = False |
|
) as error_text_field_accordion: |
|
|
|
error_text_field = gr.Textbox( |
|
value = "", |
|
show_copy_button = True, |
|
lines = 5, |
|
max_lines = 8, |
|
autoscroll = False, |
|
interactive = False, |
|
container = False, |
|
elem_classes = "textbox_vertical_scroll" |
|
) |
|
|
|
with gr.Row(): |
|
|
|
output_text_field = gr.Textbox( |
|
label = "Prompt Information:", |
|
value = "After an image is generated, its generation information will appear here. All of this information is also embedded in the image itself. If you open the image in a text program, it will appear at the top." + additional_prompt_info_html, |
|
show_copy_button = True, |
|
lines = 5, |
|
max_lines = 8, |
|
autoscroll = False, |
|
interactive = False, |
|
container = True, |
|
elem_classes = "textbox_vertical_scroll" |
|
) |
|
|
|
with gr.Group( |
|
visible = False |
|
) as prompt_truncated_field_group: |
|
|
|
with gr.Row(): |
|
|
|
prompt_truncated_field = gr.Textbox( |
|
label = "Prompt Truncated:", |
|
info = "", |
|
show_copy_button = True, |
|
lines = 3, |
|
max_lines = 5, |
|
autoscroll = False, |
|
interactive = False, |
|
container = True, |
|
elem_classes = "textbox_vertical_scroll" |
|
) |
|
|
|
with gr.Group( |
|
visible = False |
|
) as negative_prompt_truncated_field_group: |
|
|
|
with gr.Row(): |
|
|
|
negative_prompt_truncated_field = gr.Textbox( |
|
label = "Negative Prompt Truncated:", |
|
info = "Your negative prompt was been truncated because it was too long. The part below was removed.", |
|
value = "", |
|
show_copy_button = True, |
|
lines = 3, |
|
max_lines = 5, |
|
autoscroll = False, |
|
interactive = False, |
|
container = True, |
|
elem_classes = "textbox_vertical_scroll" |
|
) |
|
|
|
with gr.Accordion( |
|
label = "Other Settings", |
|
open = True |
|
): |
|
|
|
with gr.Row(): |
|
|
|
dark_theme_field = gr.Checkbox( |
|
label = "Use dark theme (?theme=dark)", |
|
value = default_dark_theme_is_selected, |
|
interactive = True, |
|
container = False, |
|
elem_classes = "sp_checkbox" |
|
) |
|
|
|
with gr.Row(): |
|
|
|
add_seed_into_pipe_field = gr.Checkbox( |
|
label = "Add seed to generation (to make it deterministic)", |
|
value = default_add_seed_into_pipe_is_selected, |
|
interactive = True, |
|
container = False, |
|
elem_classes = "sp_checkbox" |
|
) |
|
|
|
with gr.Row(): |
|
|
|
use_torch_manual_seed_but_do_not_add_to_pipe_field = gr.Checkbox( |
|
label = "Use torch.manual_seed, but don't explicitly add it as a generator during generation (ignored if \"Add seed to generation\" is checked. This is to provide compatibility with PhotoReal site)", |
|
value = default_use_torch_manual_seed_but_do_not_add_to_pipe_is_selected, |
|
interactive = True, |
|
container = False, |
|
elem_classes = "sp_checkbox" |
|
) |
|
|
|
with gr.Row(): |
|
|
|
save_or_display_word_text_for_save_base_image = "Display" |
|
if auto_save_imagery == 1: |
|
save_or_display_word_text_for_save_base_image = "Save" |
|
|
|
save_base_image_when_using_refiner_or_upscaler_field = gr.Checkbox( |
|
label = save_or_display_word_text_for_save_base_image + " base image as well when using refiner or upscaler", |
|
value = default_save_base_image_when_using_refiner_or_upscaler_is_selected, |
|
interactive = True, |
|
container = False, |
|
elem_classes = "sp_checkbox" |
|
) |
|
|
|
with gr.Row(): |
|
|
|
save_or_display_word_text_for_save_refined_image = "display" |
|
if auto_save_imagery == 1: |
|
save_or_display_word_text_for_save_refined_image = "save" |
|
|
|
save_refined_image_when_using_upscaler_field = gr.Checkbox( |
|
label = "If applicable, " + save_or_display_word_text_for_save_refined_image + " refined image as well when using upscaler", |
|
value = default_save_refined_image_when_using_upscaler_is_selected, |
|
interactive = True, |
|
container = False, |
|
elem_classes = "sp_checkbox" |
|
) |
|
|
|
with gr.Row(): |
|
|
|
create_preview_images_field = gr.Checkbox( |
|
label = "Create preview images during image generation", |
|
value = default_create_preview_images_is_selected, |
|
interactive = True, |
|
container = False, |
|
elem_classes = "sp_checkbox" |
|
) |
|
|
|
with gr.Row(): |
|
|
|
do_not_create_refining_preview_images_field = gr.Checkbox( |
|
label = "If applicable, do not create preview images during refining", |
|
value = default_do_not_create_refining_preview_images_is_selected, |
|
interactive = True, |
|
container = False, |
|
elem_classes = "sp_checkbox" |
|
) |
|
|
|
with gr.Row(): |
|
|
|
do_not_create_upscaling_preview_images_field = gr.Checkbox( |
|
label = "If applicable, do not create preview images during upscaling", |
|
value = default_do_not_create_upscaling_preview_images_is_selected, |
|
interactive = True, |
|
container = False, |
|
elem_classes = "sp_checkbox" |
|
) |
|
|
|
with gr.Row(): |
|
|
|
refining_steps_option_for_older_configuration_field = gr.Checkbox( |
|
label = "For refiner, choose number of steps that denoise start % applies to.", |
|
value = False, |
|
interactive = True, |
|
container = False, |
|
elem_classes = "sp_checkbox" |
|
) |
|
|
|
with gr.Row( |
|
elem_id = "refining_steps_for_older_configuration_field_row_id", |
|
visible = False |
|
) as refining_steps_for_older_configuration_field_row: |
|
|
|
refining_steps_for_older_configuration_field = gr.Slider( |
|
elem_id = "refining_steps_for_older_configuration_field_id", |
|
label = "Number of steps in refiner that \"denoise start %\" applies to:", |
|
minimum = 1, |
|
maximum = maximum_refining_steps_for_online_config_field, |
|
value = default_refining_steps_for_online_config_field, |
|
step = 1 |
|
) |
|
|
|
refining_steps_message_for_older_configuration_field = gr.HTML( |
|
elem_id = "refining_steps_message_for_older_configuration_field_id", |
|
value = "<div style=\"padding: 10px;\">This does not define the actual number of refining steps. By default in our app, the number of steps used for the base model generation applies to the refiner. This option allows you to use a different number. If you set 100 steps here, and a \"denoise start %\" of \"0.75\", 25 steps in the refiner would occur after the base model processing has ended.</div>", |
|
elem_classes = "html_field_style_class" |
|
) |
|
|
|
|
|
|
|
if len(ending_html) > 0: |
|
|
|
with gr.Accordion( |
|
label = "Information", |
|
open = True |
|
): |
|
|
|
gr.Markdown(ending_html) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
update_refiner_and_upscaler_status_function_js = """ |
|
|
|
async ( |
|
refiningSelectionFieldValue, |
|
upscalingSelectionFieldValue |
|
) => {{ |
|
"use strict"; |
|
|
|
var refinerOnText = "{0}"; |
|
var refinerOffText = "{1}"; |
|
var upscalerOnText = "{2}"; |
|
var upscalerOffText = "{3}"; |
|
|
|
var refinerAndUpscalerInfoMessageHtml = ""; |
|
|
|
if (refiningSelectionFieldValue === "Yes") {{ |
|
|
|
refinerAndUpscalerInfoMessageHtml += refinerOnText; |
|
|
|
}} |
|
else {{ |
|
|
|
refinerAndUpscalerInfoMessageHtml += refinerOffText; |
|
|
|
}} |
|
|
|
if (upscalingSelectionFieldValue === "Yes") {{ |
|
|
|
refinerAndUpscalerInfoMessageHtml += upscalerOnText; |
|
|
|
}} |
|
else {{ |
|
|
|
refinerAndUpscalerInfoMessageHtml += upscalerOffText; |
|
|
|
}} |
|
|
|
document.getElementById("refiner_and_upscaler_info_message_div_id").innerHTML = refinerAndUpscalerInfoMessageHtml; |
|
|
|
}} |
|
|
|
""".format( |
|
refiner_on_text, |
|
refiner_off_text, |
|
upscaler_on_text, |
|
upscaler_off_text |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_change_function_js = """ |
|
|
|
async ( |
|
baseModelFieldFullNameValue, |
|
possiblyModelConfigurationFullNameValue |
|
) => {{ |
|
"use strict"; |
|
|
|
var baseModelNamesObject = {0}; |
|
var modelConfigurationNamesObject = {1}; |
|
var baseModelArray = {2}; |
|
var baseModelsNotSupportingDenoisingEndForBaseModelObject = {3} |
|
var allowOtherModelVersions = {4}; |
|
|
|
var baseModelFullNamesToBaseModelIdConversion = {{}}; |
|
Object.keys(baseModelNamesObject).forEach(key => {{ |
|
baseModelFullNamesToBaseModelIdConversion[baseModelNamesObject[key]] = key; |
|
}}); |
|
var baseModelFieldValue = ""; |
|
if (baseModelFullNamesToBaseModelIdConversion.hasOwnProperty(baseModelFieldFullNameValue)) {{ |
|
baseModelFieldValue = baseModelFullNamesToBaseModelIdConversion[baseModelFieldFullNameValue]; |
|
}} |
|
|
|
var modelConfigurationFullNameValue = "" |
|
|
|
var isBaseModelDropdownChange = 0 |
|
|
|
if (baseModelFieldFullNameValue === possiblyModelConfigurationFullNameValue) {{ |
|
|
|
isBaseModelDropdownChange = 1; |
|
|
|
modelConfigurationFullNameValue = window.modelConfigurationDropdownFieldValuesObject[baseModelFieldValue]; |
|
|
|
}} |
|
else {{ |
|
|
|
modelConfigurationFullNameValue = possiblyModelConfigurationFullNameValue; |
|
|
|
window.modelConfigurationDropdownFieldValuesObject[baseModelFieldValue] = modelConfigurationFullNameValue; |
|
|
|
}} |
|
|
|
var modelConfigurationFullNamesToModelConfigurationIdConversion = {{}}; |
|
Object.keys(modelConfigurationNamesObject).forEach(key => {{ |
|
modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationNamesObject[key]] = key; |
|
}}); |
|
var modelConfigurationNameValue = ""; |
|
if (modelConfigurationFullNamesToModelConfigurationIdConversion.hasOwnProperty(modelConfigurationFullNameValue)) {{ |
|
modelConfigurationNameValue = modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationFullNameValue]; |
|
}} |
|
|
|
for (var thisBaseModel of baseModelArray) {{ |
|
|
|
var thisModelConfigurationElementId = "model_configuration_field_" + thisBaseModel + "_row_id"; |
|
|
|
var thisModelConfigurationElementDisplay = "none"; |
|
|
|
if ( |
|
(thisBaseModel === baseModelFieldValue) && |
|
(allowOtherModelVersions === 1) |
|
) {{ |
|
|
|
thisModelConfigurationElementDisplay = "block"; |
|
|
|
}} |
|
|
|
document.getElementById(thisModelConfigurationElementId).style.display = thisModelConfigurationElementDisplay; |
|
|
|
}} |
|
|
|
var modelConfigurationFullNamesToModelConfigurationIdConversion = {{}}; |
|
Object.keys(modelConfigurationNamesObject).forEach(key => {{ |
|
modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationNamesObject[key]] = key; |
|
}}); |
|
var modelConfigurationNameValue = ""; |
|
if (modelConfigurationFullNamesToModelConfigurationIdConversion.hasOwnProperty(modelConfigurationFullNameValue)) {{ |
|
modelConfigurationNameValue = modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationFullNameValue]; |
|
}} |
|
|
|
if ( |
|
baseModelFieldValue && |
|
modelConfigurationNameValue |
|
) {{ |
|
|
|
var negativePromptFieldDisplay = "block"; |
|
var negativePromptForSdxlTurboFieldDisplay = "none"; |
|
var baseModelNumInferenceStepsFieldDisplay = "block"; |
|
var baseModelNumInferenceStepsFieldForSdxlTurboFieldDisplay = "none"; |
|
var guidanceScaleFieldDisplay = "block"; |
|
var guidanceScaleForSdxlTurboFieldDisplay = "none"; |
|
|
|
if (baseModelFieldValue === "sdxl_turbo") {{ |
|
|
|
negativePromptFieldDisplay = "none"; |
|
negativePromptForSdxlTurboFieldDisplay = "block"; |
|
baseModelNumInferenceStepsFieldDisplay = "none"; |
|
baseModelNumInferenceStepsFieldForSdxlTurboFieldDisplay = "block"; |
|
guidanceScaleFieldDisplay = "none"; |
|
guidanceScaleForSdxlTurboFieldDisplay = "block"; |
|
|
|
}} |
|
|
|
document.getElementById("negative_prompt_field_row_id").style.display = negativePromptFieldDisplay; |
|
document.getElementById("negative_prompt_for_sdxl_turbo_field_row_id").style.display = negativePromptForSdxlTurboFieldDisplay; |
|
document.getElementById("base_model_steps_field_row_id").style.display = baseModelNumInferenceStepsFieldDisplay; |
|
document.getElementById("base_model_steps_field_for_sdxl_turbo_field_row_id").style.display = baseModelNumInferenceStepsFieldForSdxlTurboFieldDisplay; |
|
document.getElementById("guidance_scale_field_row_id").style.display = guidanceScaleFieldDisplay; |
|
document.getElementById("guidance_scale_for_sdxl_turbo_field_row_id").style.display = guidanceScaleForSdxlTurboFieldDisplay; |
|
|
|
var refiningUseDenoisingStartInBaseModelWhenUsingRefinerFieldDisplay = "block"; |
|
|
|
if (Object.keys(baseModelsNotSupportingDenoisingEndForBaseModelObject).includes(baseModelFieldValue)) {{ |
|
|
|
refiningUseDenoisingStartInBaseModelWhenUsingRefinerFieldDisplay = "none"; |
|
|
|
}} |
|
|
|
document.getElementById("refining_use_denoising_start_in_base_model_when_using_refiner_field_row_id").style.display = refiningUseDenoisingStartInBaseModelWhenUsingRefinerFieldDisplay; |
|
|
|
}} |
|
|
|
}} |
|
|
|
""".format( |
|
base_model_names_object, |
|
model_configuration_names_object, |
|
base_model_array, |
|
base_models_not_supporting_denoising_end_for_base_model_object, |
|
allow_other_model_versions |
|
) |
|
|
|
|
|
|
|
base_model_field.change( |
|
fn = None, |
|
inputs = [ |
|
base_model_field |
|
], |
|
outputs = None, |
|
js = model_change_function_js |
|
) |
|
|
|
|
|
|
|
for this_model_configuration_dropdown_field in model_configuration_dropdown_fields_array: |
|
|
|
this_model_configuration_dropdown_field.change( |
|
fn = None, |
|
inputs = [ |
|
base_model_field, |
|
this_model_configuration_dropdown_field |
|
], |
|
outputs = None, |
|
js = model_change_function_js |
|
) |
|
|
|
|
|
|
|
output_image_gallery_field.select( |
|
fn = update_prompt_info_from_gallery, |
|
inputs = [ |
|
prompt_information_array_state |
|
], |
|
outputs = [ |
|
output_image_gallery_field, |
|
output_text_field |
|
], |
|
show_progress = "hidden" |
|
) |
|
|
|
|
|
|
|
if ( |
|
(enable_refiner == 1) or |
|
(enable_upscaler == 1) |
|
): |
|
|
|
update_refiner_and_upscaler_status_triggers_array = [] |
|
|
|
if enable_refiner == 1: |
|
|
|
update_refiner_and_upscaler_status_triggers_array.extend([ |
|
base_model_field.change, |
|
refining_selection_field.change |
|
]) |
|
|
|
for this_model_configuration_dropdown_field in model_configuration_dropdown_fields_array: |
|
|
|
update_refiner_and_upscaler_status_triggers_array.extend([ |
|
this_model_configuration_dropdown_field.change |
|
]) |
|
|
|
if enable_upscaler == 1: |
|
|
|
update_refiner_and_upscaler_status_triggers_array.extend([ |
|
upscaling_selection_field.change |
|
]) |
|
|
|
gr.on( |
|
triggers = update_refiner_and_upscaler_status_triggers_array, |
|
fn = None, |
|
inputs = [ |
|
refining_selection_field, |
|
upscaling_selection_field |
|
], |
|
outputs = None, |
|
show_progress = "hidden", |
|
queue = False, |
|
js = update_refiner_and_upscaler_status_function_js |
|
) |
|
|
|
|
|
|
|
create_image_function_inputs = [ |
|
base_model_field, |
|
prompt_field, |
|
negative_prompt_field, |
|
allow_longer_prompts_for_sd_1_5_based_models_field, |
|
scheduler_field, |
|
image_width_field, |
|
image_height_field, |
|
guidance_scale_field, |
|
base_model_steps_field, |
|
base_model_steps_field_for_sdxl_turbo_field, |
|
seed_field, |
|
add_seed_into_pipe_field, |
|
use_torch_manual_seed_but_do_not_add_to_pipe_field, |
|
refining_selection_field, |
|
refining_denoise_start_field, |
|
refining_use_denoising_start_in_base_model_when_using_refiner_field, |
|
refining_base_model_output_to_refiner_is_in_latent_space_field, |
|
refining_steps_option_for_older_configuration_field, |
|
refining_steps_for_older_configuration_field, |
|
upscaling_selection_field, |
|
upscaling_num_inference_steps_field, |
|
image_gallery_array_state, |
|
prompt_information_array_state, |
|
last_model_configuration_name_selected_state, |
|
last_refiner_name_selected_state, |
|
last_upscaler_name_selected_state, |
|
stored_pipe_state, |
|
stored_refiner_state, |
|
stored_upscaler_state, |
|
create_preview_images_field, |
|
do_not_create_refining_preview_images_field, |
|
do_not_create_upscaling_preview_images_field, |
|
save_base_image_when_using_refiner_or_upscaler_field, |
|
save_refined_image_when_using_upscaler_field, |
|
user_id_state, |
|
image_generation_id_state |
|
] |
|
|
|
for this_model_configuration_dropdown_field in model_configuration_dropdown_fields_array: |
|
|
|
create_image_function_inputs.append( |
|
this_model_configuration_dropdown_field |
|
) |
|
|
|
|
|
|
|
generate_image_button_click_event = generate_image_button.click( |
|
fn = before_create_image_function, |
|
|
|
inputs = [ |
|
create_preview_images_field |
|
], |
|
outputs = [ |
|
generate_image_button, |
|
output_image_field, |
|
output_image_gallery_field, |
|
output_base_model_image_field_accordion, |
|
output_base_model_image_field, |
|
output_refiner_image_field_accordion, |
|
output_refiner_image_field, |
|
output_text_field, |
|
output_image_preview_field_accordion, |
|
output_image_preview_field, |
|
prompt_truncated_field_group, |
|
prompt_truncated_field, |
|
negative_prompt_truncated_field_group, |
|
negative_prompt_truncated_field, |
|
cancel_image_button_row, |
|
cancel_image_button, |
|
cancel_image_message_field_row, |
|
cancel_image_message_field, |
|
error_text_field_accordion, |
|
error_text_field, |
|
image_generation_id_state |
|
], |
|
show_progress = "hidden", |
|
queue = True |
|
).then( |
|
fn = create_image_function, |
|
inputs = create_image_function_inputs, |
|
outputs = [ |
|
output_image_field, |
|
output_image_gallery_field, |
|
output_base_model_image_field_accordion, |
|
output_base_model_image_field, |
|
output_refiner_image_field_accordion, |
|
output_refiner_image_field, |
|
output_text_field, |
|
prompt_truncated_field_group, |
|
prompt_truncated_field, |
|
negative_prompt_truncated_field_group, |
|
negative_prompt_truncated_field, |
|
last_model_configuration_name_selected_state, |
|
last_refiner_name_selected_state, |
|
last_upscaler_name_selected_state, |
|
stored_pipe_state, |
|
stored_refiner_state, |
|
stored_upscaler_state, |
|
error_text_field_accordion, |
|
error_text_field |
|
], |
|
show_progress = "full", |
|
queue = True |
|
).then( |
|
fn = after_create_image_function, |
|
inputs = None, |
|
outputs = [ |
|
generate_image_button, |
|
output_image_field, |
|
output_image_gallery_field, |
|
output_text_field, |
|
output_image_preview_field_accordion, |
|
generate_image_button_row, |
|
cancel_image_button_row, |
|
cancel_image_button, |
|
cancel_image_message_field_row, |
|
cancel_image_message_field |
|
], |
|
show_progress = "hidden", |
|
queue = True |
|
) |
|
|
|
|
|
|
|
verify_seed_field_textbox_function_js = """ |
|
|
|
async ( |
|
seedFieldTextboxValue |
|
) => {{ |
|
"use strict"; |
|
|
|
var defaultSeedMaximum = parseInt({0}); |
|
|
|
seedFieldTextboxValue = parseInt(seedFieldTextboxValue); |
|
|
|
if (isNaN(seedFieldTextboxValue)) {{ |
|
|
|
seedFieldTextboxValue = ""; |
|
|
|
}} |
|
else if (seedFieldTextboxValue > defaultSeedMaximum) {{ |
|
|
|
seedFieldTextboxValue = defaultSeedMaximum; |
|
|
|
}} |
|
|
|
return [ |
|
seedFieldTextboxValue |
|
]; |
|
|
|
}} |
|
|
|
""".format( |
|
maximum_seed |
|
) |
|
|
|
|
|
|
|
if make_seed_selection_a_textbox == 1: |
|
|
|
seed_field.change( |
|
fn = None, |
|
inputs = [ |
|
seed_field |
|
], |
|
outputs = [ |
|
seed_field |
|
], |
|
show_progress = "hidden", |
|
queue = False, |
|
js = verify_seed_field_textbox_function_js |
|
) |
|
|
|
|
|
|
|
if enable_image_generation_cancellation == 1: |
|
|
|
|
|
|
|
cancel_image_click_event = cancel_image_button.click( |
|
fn = cancel_image_function, |
|
inputs = [ |
|
user_id_state, |
|
image_generation_id_state |
|
], |
|
outputs = [ |
|
generate_image_button_row, |
|
generate_image_button, |
|
output_text_field, |
|
output_image_preview_field_accordion, |
|
cancel_image_button_row, |
|
cancel_image_button, |
|
cancel_image_message_field_row, |
|
cancel_image_message_field |
|
], |
|
show_progress = "hidden", |
|
cancels = [generate_image_button_click_event], |
|
queue = True |
|
) |
|
|
|
|
|
|
|
|
|
|
|
model_configuration_dropdown_field_values_for_js = model_configuration_dropdown_field_values_for_js[:-1] |
|
|
|
|
|
|
|
script_on_load_js = """ |
|
|
|
async () => {{ |
|
"use strict"; |
|
|
|
window.modelConfigurationDropdownFieldValuesObject = {{{0}}}; |
|
|
|
document.querySelectorAll(".textbox_vertical_scroll textarea").forEach(e => e.classList.remove("scroll-hide")); |
|
|
|
var urlParams = new URLSearchParams(window.location.search); |
|
var theme = ""; |
|
|
|
if (typeof urlParams.get("__theme") === "string") {{ |
|
theme = (urlParams.get("__theme")).toLowerCase(); |
|
}} |
|
else if (typeof urlParams.get("theme") === "string") {{ |
|
theme = (urlParams.get("theme")).toLowerCase(); |
|
}} |
|
|
|
var use_dark = 0; |
|
if (theme === "dark") {{ |
|
use_dark = 1; |
|
|
|
}} |
|
else if (theme === "light") {{ |
|
use_dark = 0; |
|
}} |
|
else {{ |
|
|
|
if ( |
|
window.matchMedia && |
|
window.matchMedia("(prefers-color-scheme: dark)").matches |
|
) {{ |
|
use_dark = 1; |
|
}} |
|
|
|
}} |
|
|
|
if (use_dark) {{ |
|
document.getElementsByTagName("body")[0].classList.add("dark"); |
|
}} |
|
|
|
}} |
|
|
|
""".format( |
|
model_configuration_dropdown_field_values_for_js |
|
) |
|
|
|
|
|
|
|
model_base_model_and_model_configuration_inputs = [ |
|
base_model_field, |
|
initial_model_configuration_name_selected_state |
|
] |
|
|
|
model_base_model_and_model_configuration_outputs = [] |
|
|
|
for this_model_configuration_dropdown_field in model_configuration_dropdown_fields_array: |
|
|
|
model_base_model_and_model_configuration_inputs.append( |
|
this_model_configuration_dropdown_field |
|
) |
|
|
|
model_base_model_and_model_configuration_outputs.append( |
|
this_model_configuration_dropdown_field |
|
) |
|
|
|
|
|
|
|
sd_interface_load_outputs = create_image_function_inputs + [ |
|
initial_model_configuration_name_selected_state, |
|
generate_image_button, |
|
dark_theme_field, |
|
page_url_hidden_field |
|
] |
|
|
|
|
|
|
|
every_value_in_seconds_for_image_preview = None |
|
|
|
if enable_image_preview == 1: |
|
|
|
every_value_in_seconds_for_image_preview = load_image_preview_frequency_in_seconds |
|
|
|
|
|
|
|
sd_interface_continuous = sd_interface.load( |
|
fn = get_query_params, |
|
inputs = None, |
|
outputs = sd_interface_load_outputs, |
|
show_progress = "hidden", |
|
queue = False, |
|
js = script_on_load_js |
|
).then( |
|
fn = set_base_model_and_model_configuration_from_query_params, |
|
inputs = model_base_model_and_model_configuration_inputs, |
|
outputs = model_base_model_and_model_configuration_outputs, |
|
show_progress = "hidden", |
|
queue = False |
|
).then( |
|
fn = load_image_preview, |
|
inputs = [ |
|
user_id_state |
|
], |
|
outputs = [ |
|
output_image_preview_field |
|
], |
|
show_progress = "hidden", |
|
every = every_value_in_seconds_for_image_preview, |
|
queue = True |
|
) |
|
|
|
|
|
|
|
dark_theme_function_js = """ |
|
|
|
async ( |
|
dark_theme_value |
|
) => {{ |
|
"use strict"; |
|
|
|
if (dark_theme_value) {{ |
|
document.getElementsByTagName("body")[0].classList.add("dark"); |
|
}} |
|
else {{ |
|
document.getElementsByTagName("body")[0].classList.remove("dark"); |
|
}} |
|
|
|
}} |
|
|
|
""".format() |
|
|
|
|
|
|
|
dark_theme_field_change_event = dark_theme_field.change( |
|
fn = None, |
|
inputs = [ |
|
dark_theme_field |
|
], |
|
outputs = None, |
|
js = dark_theme_function_js |
|
) |
|
|
|
|
|
|
|
with gr.Group(): |
|
|
|
with gr.Row(): |
|
|
|
generate_link_button = gr.Button( |
|
value = "Create link with your selections", |
|
variant = "secondary" |
|
) |
|
|
|
with gr.Row(): |
|
|
|
generate_link_field = gr.Textbox( |
|
visible = False, |
|
label = "The link below contains the details entered above. If you share this link, someone would need to generate the image in order to view it. The image is not saved.", |
|
value = "", |
|
show_copy_button = True, |
|
lines = 2, |
|
max_lines = 4, |
|
autoscroll = False, |
|
interactive = False, |
|
container = True, |
|
elem_classes = "textbox_vertical_scroll" |
|
) |
|
|
|
|
|
|
|
generate_link_inputs = [ |
|
page_url_hidden_field, |
|
generate_link_field |
|
] + create_image_function_inputs |
|
|
|
generate_link_button_click_event = generate_link_button.click( |
|
fn = generate_link_textbox_function, |
|
inputs = generate_link_inputs, |
|
outputs = [ |
|
generate_link_field |
|
], |
|
show_progress = "hidden", |
|
queue = False |
|
) |
|
|
|
|
|
|
|
choose_number_of_steps_that_denoise_start_applies_to_function_js = """ |
|
|
|
async ( |
|
chooseNumberOfStepsThatDenoiseStartAppliesToValue |
|
) => {{ |
|
"use strict"; |
|
|
|
var refiningStepsForOlderConfigurationFieldRowDisplay = "none"; |
|
if (chooseNumberOfStepsThatDenoiseStartAppliesToValue) {{ |
|
refiningStepsForOlderConfigurationFieldRowDisplay = "block"; |
|
}} |
|
document.getElementById("refining_steps_for_older_configuration_field_row_id").style.display = refiningStepsForOlderConfigurationFieldRowDisplay; |
|
|
|
}} |
|
|
|
""".format() |
|
|
|
|
|
|
|
refining_steps_option_for_older_configuration_field_change_event = refining_steps_option_for_older_configuration_field.change( |
|
fn = None, |
|
inputs = [ |
|
refining_steps_option_for_older_configuration_field |
|
], |
|
outputs = None, |
|
js = choose_number_of_steps_that_denoise_start_applies_to_function_js |
|
) |
|
|
|
|
|
|
|
if ( |
|
(enable_image_preview == 1) and |
|
(delete_preview_images_immediately == 1) |
|
): |
|
|
|
output_image_preview_field_change_event = output_image_preview_field.change( |
|
fn = delete_preview_imagery, |
|
inputs = [ |
|
output_image_preview_field |
|
], |
|
outputs = None, |
|
show_progress = "hidden", |
|
queue = False |
|
) |
|
|
|
|
|
|
|
if enable_refiner == 1: |
|
|
|
base_model_output_in_latent_space_note_function_js = """ |
|
|
|
async ( |
|
refiningSelectionFieldValue, |
|
baseModelFieldFullNameValue, |
|
refiningBaseModelOutputToRefinerIsInLatentSpaceValue |
|
) => {{ |
|
"use strict"; |
|
|
|
var baseModelNamesObject = {0}; |
|
var baseModelsNotSupportingBaseModelOutputInLatentSpaceToRefinerObject = {1}; |
|
|
|
var baseModelFullNamesToBaseModelIdConversion = {{}}; |
|
Object.keys(baseModelNamesObject).forEach(key => {{ |
|
baseModelFullNamesToBaseModelIdConversion[baseModelNamesObject[key]] = key; |
|
}}); |
|
var baseModelFieldValue = ""; |
|
if (baseModelFullNamesToBaseModelIdConversion.hasOwnProperty(baseModelFieldFullNameValue)) {{ |
|
baseModelFieldValue = baseModelFullNamesToBaseModelIdConversion[baseModelFieldFullNameValue]; |
|
}} |
|
|
|
var baseModelOutputInLatentSpaceNoteFieldDisplay = "none"; |
|
if ( |
|
(refiningSelectionFieldValue === "Yes") && |
|
baseModelsNotSupportingBaseModelOutputInLatentSpaceToRefinerObject.hasOwnProperty(baseModelFieldValue) && |
|
(refiningBaseModelOutputToRefinerIsInLatentSpaceValue === true) |
|
) {{ |
|
baseModelOutputInLatentSpaceNoteFieldDisplay = "block"; |
|
}} |
|
document.getElementById("base_model_output_in_latent_space_note_field_row_id").style.display = baseModelOutputInLatentSpaceNoteFieldDisplay; |
|
|
|
}} |
|
|
|
""".format( |
|
base_model_names_object, |
|
base_models_not_supporting_base_model_output_in_latent_space_to_refiner_object |
|
) |
|
|
|
|
|
|
|
base_model_output_in_latent_space_note_triggers_array = [ |
|
refining_selection_field.change, |
|
base_model_field.change, |
|
refining_base_model_output_to_refiner_is_in_latent_space_field.change |
|
] |
|
|
|
gr.on( |
|
triggers = base_model_output_in_latent_space_note_triggers_array, |
|
fn = None, |
|
inputs = [ |
|
refining_selection_field, |
|
base_model_field, |
|
refining_base_model_output_to_refiner_is_in_latent_space_field |
|
], |
|
outputs = None, |
|
show_progress = "hidden", |
|
queue = False, |
|
js = base_model_output_in_latent_space_note_function_js |
|
) |
|
|
|
|
|
|
|
if enable_longer_prompts == 1: |
|
|
|
allow_longer_prompts_for_sd_1_5_based_models_field_change_event = allow_longer_prompts_for_sd_1_5_based_models_field.change( |
|
fn = allow_longer_prompts_for_sd_1_5_based_models_function, |
|
inputs = [ |
|
allow_longer_prompts_for_sd_1_5_based_models_field |
|
], |
|
outputs = [ |
|
prompt_field, |
|
negative_prompt_field |
|
], |
|
show_progress = "hidden", |
|
queue = False |
|
) |
|
|
|
|
|
|
|
sd_interface.queue( |
|
max_size = max_queue_size |
|
) |
|
|
|
|
|
|
|
inbrowser = False |
|
|
|
if auto_open_browser == 1: |
|
|
|
inbrowser = True |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
sd_interface.queue().launch( |
|
|
|
|
|
inbrowser = inbrowser, |
|
share = None, |
|
show_api = False, |
|
quiet = True, |
|
show_error = True, |
|
state_session_capacity = 10000, |
|
max_threads = 40 |
|
) |
|
|