|
import gradio as gr |
|
import torch |
|
|
|
import modin.pandas as pd |
|
from PIL import Image |
|
from diffusers import DiffusionPipeline |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
main_dir = "C:/Diffusers" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
use_custom_hugging_face_cache_dir = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cache_directory_folder_name = "model_data" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
default_base_model = "sdxl" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto_save_imagery = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
saved_images_folder_name = "saved_images" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto_open_browser = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
make_seed_selection_a_textbox = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
enable_close_command_prompt_button = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
default_use_denoising_start_in_base_model_when_using_refiner = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
default_base_model_output_to_refiner_is_in_latent_space = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
log_generation_times = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
use_image_gallery = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
base_model_array = [ |
|
"sdxl", |
|
"photoreal", |
|
"sdxl_turbo", |
|
"sd_1_5_runwayml" |
|
] |
|
|
|
base_model_names_object = { |
|
"sdxl": "Stable Diffusion XL 1.0", |
|
"photoreal": "PhotoReal", |
|
"sdxl_turbo": "Stable Diffusion XL Turbo", |
|
"sd_1_5_runwayml": "Stable Diffusion 1.5" |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
base_model_object_of_model_configuration_arrays = { |
|
"sdxl": [ |
|
"sdxl_default", |
|
"sdxl_2023-11-12", |
|
"sdxl_2023-09-05" |
|
], |
|
"photoreal": [ |
|
"photoreal_default", |
|
"photoreal_2023-11-12", |
|
"photoreal_2023-09-01" |
|
], |
|
"sdxl_turbo": [ |
|
"sdxl_turbo_default" |
|
], |
|
"sd_1_5_runwayml": [ |
|
"sd_1_5_runwayml_default" |
|
] |
|
} |
|
|
|
|
|
|
|
model_configuration_names_object = { |
|
"sdxl_default": "1.0 - Default (subject to change)", |
|
"sdxl_2023-11-12": "1.0 (2023-11-12 online config)", |
|
"sdxl_2023-09-05": "1.0 (2023-09-05 online config)", |
|
"photoreal_default": "3.7.5 - Default (subject to change)", |
|
"photoreal_2023-11-12": "3.7.5 (2023-11-12 online config)", |
|
"photoreal_2023-09-01": "3.6 (2023-09-01 online config)", |
|
"sdxl_turbo_default": "Default (subject to change)", |
|
"sd_1_5_runwayml_default": "1.5 - Default (subject to change)" |
|
} |
|
|
|
model_configuration_links_object = { |
|
"sdxl_default": "stabilityai/stable-diffusion-xl-base-1.0", |
|
"sdxl_2023-11-12": "stabilityai/stable-diffusion-xl-base-1.0", |
|
"sdxl_2023-09-05": "stabilityai/stable-diffusion-xl-base-1.0", |
|
"photoreal_default": "circulus/canvers-real-v3.7.5", |
|
"photoreal_2023-11-12": "circulus/canvers-real-v3.7.5", |
|
"photoreal_2023-09-01": "circulus/canvers-realistic-v3.6", |
|
"sdxl_turbo_default": "stabilityai/sdxl-turbo", |
|
"sd_1_5_runwayml_default": "runwayml/stable-diffusion-v1-5" |
|
} |
|
|
|
model_configuration_force_refiner_object = { |
|
"sdxl_2023-11-12": 1, |
|
"sdxl_2023-09-05": 1 |
|
} |
|
|
|
|
|
|
|
base_model_model_configuration_defaults_object = { |
|
"sdxl": "sdxl_default", |
|
"photoreal": "photoreal_default", |
|
"sdxl_turbo": "sdxl_turbo_default", |
|
"sd_1_5_runwayml": "sd_1_5_runwayml_default" |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
device = "cpu" |
|
|
|
if torch.cuda.is_available(): |
|
|
|
device = "cuda" |
|
|
|
PYTORCH_CUDA_ALLOC_CONF = { |
|
"max_split_size_mb": 8000 |
|
} |
|
torch.cuda.max_memory_allocated( |
|
device = device |
|
) |
|
torch.cuda.empty_cache() |
|
|
|
if device == "cpu": |
|
|
|
default_base_model = "sdxl_turbo" |
|
|
|
|
|
|
|
default_prompt = "" |
|
default_negative_prompt = "" |
|
|
|
default_width = 768 |
|
default_height = 768 |
|
|
|
default_guidance_scale_value = 7 |
|
|
|
default_base_model_base_model_num_inference_steps = 50 |
|
default_base_model_base_model_num_inference_steps_for_sdxl_turbo = 2 |
|
|
|
default_seed_maximum = 999999999999999999 |
|
default_seed_value = 876678173805928800 |
|
|
|
|
|
|
|
|
|
enable_refiner = 1 |
|
enable_upscaler = 1 |
|
|
|
|
|
|
|
default_refiner_selected = 0 |
|
default_upscaler_selected = 0 |
|
|
|
|
|
|
|
|
|
|
|
use_xformers = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
use_default_attn_processor = 0 |
|
|
|
display_xformers_usage_in_prompt_info = 1 |
|
include_transformers_version_in_prompt_info = 1 |
|
display_default_attn_processor_usage_in_prompt_info = 1 |
|
|
|
|
|
|
|
|
|
use_sequential_cpu_offload_for_base_model = 1 |
|
use_sequential_cpu_offload_for_refiner = 1 |
|
use_sequential_cpu_offload_for_upscaler = 1 |
|
|
|
use_model_cpu_offload_for_base_model = 0 |
|
use_model_cpu_offload_for_refiner = 0 |
|
use_model_cpu_offload_for_upscaler = 0 |
|
|
|
|
|
|
|
if default_base_model == "photoreal": |
|
|
|
|
|
|
|
default_seed_value = 3648905360627576 |
|
|
|
elif default_base_model == "sdxl_turbo": |
|
|
|
|
|
|
|
default_seed_value = 2725116121543 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
else: |
|
|
|
|
|
|
|
default_width = 1024 |
|
default_height = 1024 |
|
default_guidance_scale_value = 10 |
|
|
|
|
|
|
|
|
|
|
|
width_and_height_input_slider_steps = 8 |
|
|
|
|
|
|
|
show_messages_in_command_prompt = 1 |
|
show_messages_in_modal_on_page = 1 |
|
|
|
|
|
|
|
opening_html = "" |
|
|
|
if device == "cpu": |
|
|
|
opening_html = "<span style=\"font-weight: bold; color: red;\">THIS APP IS EXCEPTIONALLY SLOW! THE REFINER CODE DOESN'T WORK RIGHT YET.</span><br/>This app is not running on a GPU. The first time it loads after the space is rebuilt it might take 10 minutes to generate a SDXL Turbo image. It may take two minutes after that point. For other models, it may take hours to create a single image!" |
|
|
|
|
|
|
|
ending_html = """This app allows you to try to match images that can be generated using several tools online. (<a href=\"https://huggingface.co/spaces/Manjushri/SDXL-1.0\" target=\"_blank\">Stable Diffusion XL</a>, <a href=\"https://huggingface.co/spaces/Manjushri/PhotoReal-V3.7.5\" target=\"_blank\">PhotoReal with SDXL 1.0 Refiner</a> and <a href=\"https://huggingface.co/spaces/diffusers/unofficial-SDXL-Turbo-i2i-t2i\" target=\"_blank\">SDXL Turbo Unofficial Demo</a>) You can select the base model you want to use in the first dropdown option. The second configuration option involves choosing which version and/or configuration to choose. Certain configurations try to match the version online, taking into account changes that were made over time. Another configuration involves a default configuration I choose and is subject to change while I am still designing this app. |
|
|
|
Tokens are not individual characters. If the prompt length is too long, the display will notify you what part of the prompt wasn't used. Changing just the image dimensions alone will change the image generated. For some models, trying to make a large image, such as 1024x1024, may add extra people and come out worse than using smaller dimensions. If you have a seed greater than 9007199254740992, it may not be processed correctly. Make sure the prompt matches the seed you entered. (shown in the \"Prompt Information\" section once you create an image) If it doesn't, set \"make_seed_selection_a_textbox\" to 1 in the script. This bug is described <a href=\"https://github.com/gradio-app/gradio/issues/5354\" target=\"_blank\">here</a>. |
|
|
|
The original script for this app was written by <a href=\"https://huggingface.co/Manjushri\" target=\"_blank\">Manjushri</a>.""" |
|
|
|
|
|
|
|
refiner_and_upscaler_status_opening_html = "<div style=\"text-align: center;\">" |
|
|
|
refiner_and_upscaler_status_closing_html = "</div>" |
|
|
|
refiner_on_text = "Refiner is on. " |
|
refiner_off_text = "Refiner is off. " |
|
|
|
upscaler_on_text = "Upscaler is on. " |
|
upscaler_off_text = "Upscaler is off. " |
|
|
|
number_of_reserved_tokens = 2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
which_output_type_before_refiner_and_upscaler = "latent" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
|
|
try: |
|
if (os.uname().find("magicfixeseverything") >= 0): |
|
script_being_run_on_hugging_face = 0 |
|
except: |
|
script_being_run_on_hugging_face = 0 |
|
|
|
|
|
|
|
if device == "cuda": |
|
|
|
PYTORCH_CUDA_ALLOC_CONF = { |
|
"max_split_size_mb": 8000 |
|
} |
|
torch.cuda.max_memory_allocated( |
|
device = device |
|
) |
|
torch.cuda.empty_cache() |
|
|
|
|
|
|
|
if script_being_run_on_hugging_face == 1: |
|
|
|
use_custom_hugging_face_cache_dir = 0 |
|
show_messages_in_modal_on_page = 0 |
|
|
|
ending_html = "<p>If you would like to download this app to run offline on a Windows computer that has a NVIDIA graphics card, click <a href=\"https://huggingface.co/spaces/magicfixeseverything/ai_image_creation/blob/main/ai_image_creation.zip\">here</a> to download it.</p>" + ending_html |
|
|
|
|
|
|
|
saved_images_dir = main_dir + "/" + saved_images_folder_name |
|
|
|
hugging_face_cache_dir = main_dir + "/" + cache_directory_folder_name |
|
|
|
if not os.path.exists(hugging_face_cache_dir): |
|
os.makedirs(hugging_face_cache_dir) |
|
|
|
|
|
|
|
if auto_save_imagery == 1: |
|
|
|
from datetime import datetime |
|
import time |
|
|
|
|
|
|
|
if log_generation_times == 1: |
|
|
|
import time |
|
|
|
|
|
|
|
if device == "cpu": |
|
|
|
use_sequential_cpu_offload_for_base_model = 0 |
|
use_sequential_cpu_offload_for_refiner = 0 |
|
use_sequential_cpu_offload_for_upscaler = 0 |
|
|
|
use_model_cpu_offload_for_base_model = 0 |
|
use_model_cpu_offload_for_refiner = 0 |
|
use_model_cpu_offload_for_upscaler = 0 |
|
|
|
use_xformers = 0 |
|
|
|
|
|
|
|
if ( |
|
(use_sequential_cpu_offload_for_base_model == 1) and |
|
(use_model_cpu_offload_for_base_model == 1) |
|
): |
|
|
|
use_sequential_cpu_offload_for_base_model = 0 |
|
|
|
if ( |
|
(use_sequential_cpu_offload_for_refiner == 1) and |
|
(use_model_cpu_offload_for_refiner == 1) |
|
): |
|
|
|
use_sequential_cpu_offload_for_refiner = 0 |
|
|
|
if ( |
|
(use_sequential_cpu_offload_for_upscaler == 1) and |
|
(use_model_cpu_offload_for_upscaler == 1) |
|
): |
|
|
|
use_sequential_cpu_offload_for_upscaler = 0 |
|
|
|
|
|
|
|
def error_function( |
|
text_message |
|
): |
|
|
|
print (text_message) |
|
|
|
gr.Error(text_message) |
|
|
|
exit(1) |
|
|
|
|
|
|
|
|
|
|
|
default_model_configuration_object = { |
|
"sdxl_default": 1, |
|
"photoreal_default": 1, |
|
"sdxl_turbo_default": 1, |
|
"sd_1_5_runwayml_default": 1 |
|
} |
|
|
|
|
|
|
|
additional_prompt_info_html = "" |
|
|
|
if auto_save_imagery == 1: |
|
|
|
additional_prompt_info_html = " The image, and a text file with generation information, will be saved automatically." |
|
|
|
|
|
|
|
if use_xformers == 1: |
|
|
|
from xformers.ops import MemoryEfficientAttentionFlashAttentionOp |
|
|
|
if use_default_attn_processor == 1: |
|
|
|
from diffusers.models.attention_processor import AttnProcessor |
|
|
|
|
|
|
|
if ( |
|
default_base_model and |
|
(default_base_model in base_model_object_of_model_configuration_arrays) and |
|
(default_base_model in base_model_model_configuration_defaults_object) |
|
): |
|
|
|
default_model_configuration = base_model_model_configuration_defaults_object[default_base_model] |
|
|
|
if default_model_configuration in model_configuration_names_object: |
|
|
|
default_model_configuration_choices_array = [] |
|
|
|
for this_model_configuration in base_model_object_of_model_configuration_arrays[default_base_model]: |
|
|
|
if model_configuration_names_object[this_model_configuration]: |
|
|
|
default_model_configuration_choices_array.append( |
|
model_configuration_names_object[this_model_configuration] |
|
) |
|
|
|
else: |
|
|
|
error_function("A default configuration must be properly named in the code.") |
|
|
|
else: |
|
|
|
error_function("A default configuration must be properly configured in the code.") |
|
|
|
else: |
|
|
|
error_function("A default base model must be properly configured in the code.") |
|
|
|
|
|
|
|
default_base_model_nicely_named_value = base_model_names_object[default_base_model] |
|
|
|
default_model_configuration_nicely_named_value = model_configuration_names_object[default_model_configuration] |
|
|
|
|
|
|
|
if enable_refiner != 1: |
|
|
|
default_refiner_selected = 0 |
|
|
|
if enable_upscaler != 1: |
|
|
|
default_upscaler_selected = 0 |
|
|
|
|
|
|
|
model_configuration_requires_refiner = 0 |
|
|
|
if default_model_configuration in model_configuration_force_refiner_object: |
|
|
|
model_configuration_requires_refiner = model_configuration_force_refiner_object[default_model_configuration] |
|
|
|
if model_configuration_requires_refiner == 1: |
|
|
|
enable_refiner = 1 |
|
default_refiner_selected = 1 |
|
|
|
default_refine_option = "No" |
|
|
|
if default_refiner_selected == 1: |
|
|
|
default_refine_option = "Yes" |
|
|
|
default_upscale_option = "No" |
|
|
|
if default_upscaler_selected == 1: |
|
|
|
default_upscale_option = "Yes" |
|
|
|
is_default_config = 0 |
|
|
|
if default_model_configuration in default_model_configuration_object: |
|
|
|
is_default_config = 1 |
|
|
|
default_refiner_and_upscaler_status_text = refiner_and_upscaler_status_opening_html |
|
|
|
|
|
|
|
refiner_default_config_accordion_visible = True |
|
|
|
if ( |
|
(enable_refiner != 1) or |
|
(is_default_config != 1) |
|
): |
|
|
|
refiner_default_config_accordion_visible = False |
|
|
|
refiner_default_config_accordion_open = False |
|
|
|
if ( |
|
(is_default_config == 1) and |
|
(default_refiner_selected == 1) |
|
): |
|
|
|
refiner_default_config_accordion_open = True |
|
|
|
|
|
|
|
refiner_online_config_accordion_visible = True |
|
|
|
if ( |
|
(enable_refiner != 1) or |
|
(is_default_config == 1) |
|
): |
|
|
|
refiner_online_config_accordion_visible = False |
|
|
|
refiner_online_config_accordion_open = False |
|
|
|
if ( |
|
(is_default_config != 1) and |
|
(default_refiner_selected == 1) |
|
): |
|
|
|
refiner_online_config_accordion_open = True |
|
|
|
refiner_group_visible = False |
|
|
|
if enable_refiner == 1: |
|
|
|
refiner_group_visible = True |
|
|
|
if default_refiner_selected == 1: |
|
|
|
default_refiner_and_upscaler_status_text += refiner_on_text |
|
|
|
else: |
|
|
|
default_refiner_and_upscaler_status_text += refiner_off_text |
|
|
|
|
|
|
|
upscaler_accordion_open = False |
|
|
|
if default_upscaler_selected == 1: |
|
|
|
upscaler_accordion_open = True |
|
|
|
upscaler_group_visible = False |
|
|
|
if enable_upscaler == 1: |
|
|
|
upscaler_group_visible = True |
|
|
|
if default_upscaler_selected == 1: |
|
|
|
default_refiner_and_upscaler_status_text += upscaler_on_text |
|
|
|
else: |
|
|
|
default_refiner_and_upscaler_status_text += upscaler_off_text |
|
|
|
|
|
|
|
default_refiner_and_upscaler_status_text += refiner_and_upscaler_status_closing_html |
|
|
|
|
|
|
|
image_gallery_array = [] |
|
prompt_information_array = [] |
|
|
|
|
|
|
|
default_negative_prompt_field_visibility = True |
|
default_negative_prompt_for_sdxl_turbo_field_visibility = False |
|
default_base_model_num_inference_steps_field_visibility = True |
|
default_base_model_num_inference_steps_field_for_sdxl_turbo_field_visibility = False |
|
default_guidance_scale_field_visibility = True |
|
default_guidance_scale_for_sdxl_turbo_field_visibility = False |
|
|
|
if default_base_model == "sdxl_turbo": |
|
|
|
default_negative_prompt_field_visibility = False |
|
default_negative_prompt_for_sdxl_turbo_field_visibility = True |
|
default_base_model_num_inference_steps_field_visibility = False |
|
default_base_model_num_inference_steps_field_for_sdxl_turbo_field_visibility = True |
|
default_guidance_scale_field_visibility = False |
|
default_guidance_scale_for_sdxl_turbo_field_visibility = True |
|
|
|
|
|
|
|
global pipe |
|
global refiner |
|
global upscaler |
|
|
|
last_model_configuration_name_value = "" |
|
last_refiner_selected = "" |
|
last_upscaler_selected = "" |
|
|
|
|
|
|
|
default_base_model_choices_array = [] |
|
|
|
stored_model_configuration_names_object = {} |
|
|
|
for this_base_model in base_model_array: |
|
|
|
default_base_model_choices_array.append( |
|
base_model_names_object[this_base_model] |
|
) |
|
|
|
stored_model_configuration = base_model_model_configuration_defaults_object[this_base_model] |
|
|
|
stored_model_configuration_names_object[this_base_model] = model_configuration_names_object[stored_model_configuration] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def show_message( |
|
message_to_display |
|
): |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print (message_to_display) |
|
|
|
if show_messages_in_modal_on_page == 1: |
|
|
|
gr.Info(message_to_display) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def convert_seconds( |
|
seconds |
|
): |
|
|
|
|
|
|
|
hours = seconds // 3600 |
|
minutes = (seconds % 3600) // 60 |
|
seconds = seconds % 60 |
|
return hours, minutes, seconds |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def seed_not_valid(seed_num_str): |
|
try: |
|
seed_num = int(seed_num_str) |
|
if (seed_num > 0) and (seed_num < default_seed_maximum): |
|
return False |
|
else: |
|
return True |
|
except ValueError: |
|
return True |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def numerical_bool( |
|
original_value |
|
): |
|
|
|
new_value = 0 |
|
|
|
if ( |
|
(original_value == 1) or |
|
(original_value == "Yes") or |
|
(original_value == "True") or |
|
(original_value == True) |
|
): |
|
|
|
new_value = 1 |
|
|
|
return new_value |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def truncate_prompt ( |
|
existing_prompt_text |
|
): |
|
|
|
|
|
|
|
|
|
|
|
tokenizer = pipe.tokenizer |
|
|
|
max_token_length_of_model = pipe.tokenizer.model_max_length - number_of_reserved_tokens |
|
|
|
prompt_text_words_array = existing_prompt_text.split(" ") |
|
|
|
prompt_text_words_array_length = len(prompt_text_words_array) |
|
|
|
prompt_text_words_index = 0 |
|
|
|
prompt_text_substring = "" |
|
prompt_text_not_used_substring = "" |
|
|
|
for prompt_text_word in prompt_text_words_array: |
|
|
|
prompt_text_words_index += 1 |
|
|
|
substring_to_test = prompt_text_substring |
|
|
|
if prompt_text_words_index > 1: |
|
|
|
substring_to_test += " " |
|
|
|
substring_to_test += prompt_text_word |
|
|
|
token_length_of_substring_to_test = len(tokenizer.tokenize(substring_to_test)) |
|
|
|
if token_length_of_substring_to_test > max_token_length_of_model: |
|
|
|
prompt_text_not_used_substring += prompt_text_word + " " |
|
|
|
else: |
|
|
|
prompt_text_substring = substring_to_test |
|
|
|
return ( |
|
prompt_text_substring, |
|
prompt_text_not_used_substring |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def update_prompt_info_from_gallery ( |
|
gallery_data: gr.SelectData |
|
): |
|
|
|
gallery_data_index = gallery_data.index |
|
|
|
output_image_field_update = gr.Gallery( |
|
selected_index = gallery_data_index |
|
) |
|
|
|
output_text_field_update = prompt_information_array[gallery_data_index] |
|
|
|
return { |
|
output_image_field: output_image_field_update, |
|
output_text_field: output_text_field_update |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_image_function ( |
|
base_model_field_index, |
|
model_configuration_field_index, |
|
prompt_text, |
|
negative_prompt_text, |
|
image_width, |
|
image_height, |
|
guidance_scale, |
|
base_model_num_inference_steps, |
|
base_model_num_inference_steps_field_for_sdxl_turbo, |
|
actual_seed, |
|
|
|
refining_selection_online_config_normal_field_value, |
|
refining_selection_online_config_automatically_selected_field_value, |
|
|
|
refining_use_denoising_start_in_base_model_when_using_refiner_field_value, |
|
refining_base_model_output_to_refiner_is_in_latent_space_field_value, |
|
|
|
refining_denoise_start_for_online_config_field_value, |
|
refining_number_of_iterations_for_online_config_field_value, |
|
|
|
upscaling_selection_field_value, |
|
upscaling_num_inference_steps |
|
): |
|
|
|
refining_selection_online_config_normal_field_value = numerical_bool(refining_selection_online_config_normal_field_value) |
|
refining_selection_online_config_automatically_selected_field_value = numerical_bool(refining_selection_online_config_automatically_selected_field_value) |
|
|
|
|
|
refining_use_denoising_start_in_base_model_when_using_refiner_field_value = numerical_bool(refining_use_denoising_start_in_base_model_when_using_refiner_field_value) |
|
refining_base_model_output_to_refiner_is_in_latent_space_field_value = numerical_bool(refining_base_model_output_to_refiner_is_in_latent_space_field_value) |
|
|
|
use_upscaler = numerical_bool(upscaling_selection_field_value) |
|
|
|
|
|
|
|
base_model_name_value = base_model_array[base_model_field_index] |
|
model_configuration_name_value = base_model_object_of_model_configuration_arrays[base_model_name_value][model_configuration_field_index] |
|
|
|
|
|
|
|
is_config_state = 0 |
|
|
|
if model_configuration_name_value in default_model_configuration_object: |
|
|
|
is_config_state = 1 |
|
|
|
use_refiner = 0 |
|
|
|
|
|
|
|
if ( |
|
( |
|
(is_config_state == 1) and |
|
refining_selection_online_config_normal_field_value |
|
) or ( |
|
(is_config_state != 1) and |
|
refining_selection_online_config_automatically_selected_field_value |
|
) |
|
): |
|
|
|
use_refiner = 1 |
|
|
|
|
|
|
|
if base_model_name_value == "sdxl_turbo": |
|
|
|
negative_prompt_text = "" |
|
base_model_num_inference_steps = base_model_num_inference_steps_field_for_sdxl_turbo |
|
guidance_scale = 0 |
|
|
|
|
|
|
|
global last_model_configuration_name_value |
|
|
|
global pipe |
|
global refiner |
|
global upscaler |
|
|
|
global image_gallery_array |
|
global prompt_information_array |
|
|
|
if ( |
|
(last_model_configuration_name_value == "") or |
|
(model_configuration_name_value != last_model_configuration_name_value) |
|
): |
|
|
|
show_message("Loading base model...") |
|
|
|
if (last_model_configuration_name_value != ""): |
|
|
|
del pipe |
|
|
|
if 'refiner' in globals(): |
|
del refiner |
|
|
|
if 'upscaler' in globals(): |
|
del upscaler |
|
|
|
import gc |
|
|
|
gc.collect() |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
if base_model_name_value == "photoreal": |
|
|
|
base_model_kwargs = { |
|
"safety_checker": None, |
|
"requires_safety_checker": False |
|
} |
|
|
|
elif base_model_name_value == "sdxl_turbo": |
|
|
|
base_model_kwargs = { |
|
"use_safetensors": True, |
|
"safety_checker": None |
|
} |
|
|
|
if device == "cuda": |
|
|
|
base_model_kwargs["variant"] = "fp16" |
|
|
|
else: |
|
|
|
base_model_kwargs = { |
|
"use_safetensors": True |
|
} |
|
|
|
if device == "cuda": |
|
|
|
base_model_kwargs["variant"] = "fp16" |
|
|
|
if device == "cuda": |
|
|
|
base_model_kwargs["torch_dtype"] = torch.float16 |
|
|
|
if use_custom_hugging_face_cache_dir == 1: |
|
|
|
base_model_kwargs["cache_dir"] = hugging_face_cache_dir |
|
|
|
pipe = DiffusionPipeline.from_pretrained( |
|
model_configuration_links_object[model_configuration_name_value], |
|
**base_model_kwargs |
|
) |
|
|
|
if use_model_cpu_offload_for_base_model == 1: |
|
pipe.enable_model_cpu_offload() |
|
|
|
if use_xformers == 1: |
|
pipe.enable_xformers_memory_efficient_attention() |
|
|
|
pipe = pipe.to(device) |
|
|
|
if use_sequential_cpu_offload_for_base_model == 1: |
|
pipe.enable_sequential_cpu_offload() |
|
|
|
if use_default_attn_processor == 1: |
|
pipe.unet.set_default_attn_processor() |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
else: |
|
pipe.unet = torch.compile( |
|
pipe.unet, |
|
mode = "reduce-overhead", |
|
fullgraph = True |
|
) |
|
|
|
last_model_configuration_name_value = model_configuration_name_value |
|
|
|
|
|
|
|
if use_refiner == 1: |
|
|
|
show_message("Loading refiner...") |
|
|
|
refiner_kwargs = { |
|
"use_safetensors": True |
|
} |
|
|
|
if device == "cuda": |
|
|
|
refiner_kwargs["variant"] = "fp16" |
|
refiner_kwargs["torch_dtype"] = torch.float16 |
|
|
|
if use_custom_hugging_face_cache_dir == 1: |
|
|
|
refiner_kwargs["cache_dir"] = hugging_face_cache_dir |
|
|
|
refiner = DiffusionPipeline.from_pretrained( |
|
"stabilityai/stable-diffusion-xl-refiner-1.0", |
|
**refiner_kwargs |
|
) |
|
|
|
if use_model_cpu_offload_for_refiner == 1: |
|
|
|
refiner.enable_model_cpu_offload() |
|
|
|
if use_xformers == 1: |
|
|
|
refiner.enable_xformers_memory_efficient_attention() |
|
|
|
refiner = refiner.to(device) |
|
|
|
if use_sequential_cpu_offload_for_refiner == 1: |
|
|
|
refiner.enable_sequential_cpu_offload() |
|
|
|
if use_default_attn_processor == 1: |
|
|
|
refiner.unet.set_default_attn_processor() |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
else: |
|
refiner.unet = torch.compile( |
|
refiner.unet, |
|
mode = "reduce-overhead", |
|
fullgraph = True |
|
) |
|
|
|
|
|
|
|
if use_upscaler == 1: |
|
|
|
show_message("Loading upscaler...") |
|
|
|
upscaler_kwargs = { |
|
"use_safetensors": True |
|
} |
|
|
|
if device == "cuda": |
|
|
|
upscaler_kwargs["variant"] = "fp16" |
|
upscaler_kwargs["torch_dtype"] = torch.float16 |
|
|
|
if use_custom_hugging_face_cache_dir == 1: |
|
|
|
upscaler_kwargs["cache_dir"] = hugging_face_cache_dir |
|
|
|
upscaler = DiffusionPipeline.from_pretrained( |
|
"stabilityai/sd-x2-latent-upscaler", |
|
**upscaler_kwargs |
|
) |
|
|
|
if use_model_cpu_offload_for_upscaler == 1: |
|
|
|
upscaler.enable_model_cpu_offload() |
|
|
|
if use_xformers == 1: |
|
|
|
upscaler.enable_xformers_memory_efficient_attention() |
|
|
|
upscaler = upscaler.to(device) |
|
|
|
if use_sequential_cpu_offload_for_upscaler == 1: |
|
|
|
upscaler.enable_sequential_cpu_offload() |
|
|
|
if use_default_attn_processor == 1: |
|
|
|
upscaler.unet.set_default_attn_processor() |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
else: |
|
upscaler.unet = torch.compile( |
|
upscaler.unet, |
|
mode = "reduce-overhead", |
|
fullgraph = True |
|
) |
|
|
|
|
|
|
|
if log_generation_times == 1: |
|
|
|
start_time = time.time() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tokenizer = pipe.tokenizer |
|
|
|
max_token_length_of_model = pipe.tokenizer.model_max_length - number_of_reserved_tokens |
|
|
|
token_length_of_prompt_text = len(tokenizer.tokenize(prompt_text)) |
|
token_length_of_negative_prompt_text = len(tokenizer.tokenize(negative_prompt_text)) |
|
|
|
prompt_text_not_used_substring = "" |
|
|
|
message_about_prompt_truncation = "" |
|
|
|
if token_length_of_prompt_text > max_token_length_of_model: |
|
|
|
( |
|
prompt_text, |
|
prompt_text_not_used_substring |
|
) = truncate_prompt( |
|
prompt_text |
|
) |
|
|
|
message_about_prompt_truncation += "Your prompt has been truncated because it is too long. This part has been truncated:<br/><br/><span style=\"font-style: italic;\">" + prompt_text_not_used_substring + "</span>" |
|
|
|
negative_prompt_text_not_used_substring = "" |
|
|
|
if token_length_of_negative_prompt_text > max_token_length_of_model: |
|
|
|
( |
|
negative_prompt_text, |
|
negative_prompt_text_not_used_substring |
|
) = truncate_prompt( |
|
negative_prompt_text |
|
) |
|
|
|
if len(message_about_prompt_truncation) > 0: |
|
|
|
message_about_prompt_truncation += "<br/><br/>" |
|
|
|
message_about_prompt_truncation += "Your negative prompt has been truncated because it is too long. This part has been truncated:<br/><br/><span style=\"font-style: italic;\">" + negative_prompt_text_not_used_substring + "</span>" |
|
|
|
prompt_truncated_field_udpate = gr.HTML( |
|
value = "", |
|
visible = False |
|
) |
|
|
|
if len(message_about_prompt_truncation) > 0: |
|
|
|
prompt_truncated_field_udpate = gr.HTML( |
|
value = "<div style=\"padding: 10px; background: #fff;\"><span style=\"font-weight: bold;\">Note</span>: " + message_about_prompt_truncation + "</div>", |
|
visible = True |
|
) |
|
|
|
show_message("Note: Part of your prompt has been truncated automatically because it was too long.") |
|
|
|
|
|
|
|
actual_seed = int(actual_seed) |
|
|
|
if actual_seed == 0: |
|
|
|
import random |
|
|
|
default_seed_maximum_for_random = default_seed_maximum |
|
|
|
if default_seed_maximum_for_random > 9007199254740992: |
|
|
|
|
|
|
|
default_seed_maximum_for_random = 9007199254740992 |
|
|
|
actual_seed = int(random.randrange(1, 10**len(str(default_seed_maximum)))) |
|
|
|
if seed_not_valid(actual_seed): |
|
|
|
raise Exception("Seed is not valid.") |
|
|
|
generator = torch.manual_seed(actual_seed) |
|
|
|
|
|
|
|
if model_configuration_name_value.find("default") < 0: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
prompt = prompt_text |
|
negative_prompt = negative_prompt_text |
|
width = image_width |
|
height = image_height |
|
scale = guidance_scale |
|
steps = base_model_num_inference_steps |
|
refining = use_refiner |
|
if refining == 1: |
|
refining = "Yes" |
|
upscaling = use_upscaler |
|
if upscaling == 1: |
|
upscaling = "Yes" |
|
|
|
prompt_2 = "" |
|
negative_prompt_2 = "" |
|
|
|
high_noise_frac = refining_denoise_start_for_online_config_field_value |
|
|
|
if ( |
|
model_configuration_name_value == "sdxl_2023-11-12" or |
|
model_configuration_name_value == "sdxl_2023-09-05" |
|
): |
|
|
|
n_steps = refining_number_of_iterations_for_online_config_field_value |
|
|
|
upscaling_num_inference_steps = 15 |
|
|
|
if model_configuration_name_value == "sdxl_2023-09-05": |
|
|
|
upscaling_num_inference_steps = 5 |
|
|
|
|
|
|
|
show_message("Initial image creation has begun."); |
|
int_image = pipe(prompt, prompt_2=prompt_2, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, num_inference_steps=steps, height=height, width=width, guidance_scale=scale, num_images_per_prompt=1, generator=generator, output_type="latent").images |
|
if upscaling == 'Yes': |
|
show_message("Refining has begun."); |
|
image = refiner(prompt=prompt, prompt_2=prompt_2, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, image=int_image, num_inference_steps=n_steps, denoising_start=high_noise_frac).images[0] |
|
show_message("Upscaling has begun."); |
|
|
|
|
|
|
|
|
|
|
|
|
|
upscaled = upscaler(prompt=prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=upscaling_num_inference_steps, guidance_scale=0).images[0] |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
|
|
|
|
|
|
|
|
|
|
image_to_return = upscaled |
|
|
|
else: |
|
show_message("Refining has begun."); |
|
image = refiner(prompt=prompt, prompt_2=prompt_2, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, image=int_image, num_inference_steps=n_steps ,denoising_start=high_noise_frac).images[0] |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
|
|
|
|
|
|
|
|
image_to_return = image |
|
|
|
|
|
|
|
elif ( |
|
model_configuration_name_value == "photoreal_2023-11-12" or |
|
model_configuration_name_value == "photoreal_2023-09-01" |
|
): |
|
|
|
Prompt = prompt |
|
upscale = refining |
|
|
|
|
|
|
|
if upscale == "Yes": |
|
show_message("Initial image creation has begun."); |
|
int_image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images |
|
show_message("Refining has begun."); |
|
image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0] |
|
else: |
|
show_message("Image creation has begun."); |
|
image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0] |
|
|
|
|
|
|
|
image_to_return = image |
|
|
|
else: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if use_refiner == 1: |
|
|
|
if use_upscaler == 1: |
|
|
|
show_message("Will create initial image, then refine and then upscale"); |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Initial image steps..."); |
|
|
|
intitial_image = pipe( |
|
prompt = prompt_text, |
|
negative_prompt = negative_prompt_text, |
|
width = image_width, |
|
height = image_height, |
|
num_inference_steps = base_model_num_inference_steps, |
|
guidance_scale = guidance_scale, |
|
num_images_per_prompt = 1, |
|
generator = generator, |
|
|
|
output_type = which_output_type_before_refiner_and_upscaler |
|
).images |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Refiner steps..."); |
|
|
|
refined_image = refiner( |
|
prompt = prompt_text, |
|
negative_prompt = negative_prompt_text, |
|
image = intitial_image, |
|
num_inference_steps = base_model_num_inference_steps, |
|
denoising_start = refining_denoise_start_for_default_config, |
|
output_type = "pil" |
|
).images |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Upscaler steps..."); |
|
|
|
upscaled_image = upscaler( |
|
prompt = prompt_text, |
|
negative_prompt = negative_prompt_text, |
|
image = refined_image, |
|
num_inference_steps = upscaling_num_inference_steps, |
|
guidance_scale = 0 |
|
).images[0] |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
image_to_return = upscaled_image |
|
|
|
else: |
|
|
|
show_message("Will create initial image and then refine"); |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Initial image steps..."); |
|
|
|
intitial_image = pipe( |
|
prompt = prompt_text, |
|
negative_prompt = negative_prompt_text, |
|
width = image_width, |
|
height = image_height, |
|
|
|
|
|
|
|
num_inference_steps = base_model_num_inference_steps, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
guidance_scale = guidance_scale, |
|
num_images_per_prompt = 1, |
|
generator = generator, |
|
output_type = which_output_type_before_refiner_and_upscaler |
|
).images |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Refiner steps..."); |
|
|
|
refined_image = refiner( |
|
prompt = prompt_text, |
|
negative_prompt = negative_prompt_text, |
|
image = intitial_image, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
num_inference_steps = 60, |
|
denoising_start = 0.25 |
|
|
|
|
|
|
|
).images[0] |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
image_to_return = refined_image |
|
|
|
else: |
|
|
|
if use_upscaler == 1: |
|
|
|
show_message("Will create initial image and then upscale"); |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Initial image steps..."); |
|
|
|
intitial_image = pipe( |
|
prompt = prompt_text, |
|
negative_prompt = negative_prompt_text, |
|
width = image_width, |
|
height = image_height, |
|
num_inference_steps = base_model_num_inference_steps, |
|
guidance_scale = guidance_scale, |
|
num_images_per_prompt = 1, |
|
generator = generator, |
|
|
|
output_type = "pil" |
|
).images |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Upscaler steps..."); |
|
|
|
upscaled_image = upscaler( |
|
prompt = prompt_text, |
|
negative_prompt = negative_prompt_text, |
|
image = intitial_image, |
|
num_inference_steps = upscaling_num_inference_steps, |
|
guidance_scale = 0 |
|
).images[0] |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
image_to_return = upscaled_image |
|
|
|
else: |
|
|
|
show_message("Will create image (no refining or upscaling)"); |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Image steps..."); |
|
|
|
image = pipe( |
|
prompt = prompt_text, |
|
negative_prompt = negative_prompt_text, |
|
width = image_width, |
|
height = image_height, |
|
num_inference_steps = base_model_num_inference_steps, |
|
guidance_scale = guidance_scale, |
|
num_images_per_prompt = 1, |
|
generator = generator |
|
).images[0] |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
image_to_return = image |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
nice_model_name = base_model_names_object[base_model_name_value] + " (" + model_configuration_links_object[model_configuration_name_value] + ")" |
|
|
|
info_about_prompt_lines_array = [ |
|
"Prompt:\n" + prompt_text |
|
] |
|
|
|
if len(negative_prompt_text) > 0: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Negative Prompt:\n" + negative_prompt_text |
|
]) |
|
|
|
dimensions_title = "Dimensions" |
|
|
|
if use_upscaler == 1: |
|
|
|
dimensions_title = "Original Dimensions" |
|
|
|
info_about_prompt_lines_array.extend([ |
|
dimensions_title + ": " + str(image_width) + "x" + str(image_height) + " px" |
|
]) |
|
|
|
if use_upscaler == 1: |
|
|
|
upscaled_image_width = int(image_width * 2) |
|
upscaled_image_height = int(image_height * 2) |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Upscaled Dimensions: " + str(upscaled_image_width) + "x" + str(upscaled_image_height) + " px" |
|
]) |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Seed: " + str(actual_seed) |
|
]) |
|
|
|
if int(guidance_scale) > 0: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Guidance Scale: " + str(guidance_scale) |
|
]) |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Base Model Steps: " + str(base_model_num_inference_steps), |
|
"Model: " + nice_model_name |
|
]) |
|
|
|
if use_refiner == 1: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if refining_denoise_start_for_online_config_field_value != 0: |
|
|
|
nice_refiner_denoise_start = str(refining_denoise_start_for_online_config_field_value * 100) + "%" |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Refiner?: Yes", |
|
"Refiner denoise start %: " + nice_refiner_denoise_start |
|
]) |
|
|
|
if int(refining_number_of_iterations_for_online_config_field_value) != 0: |
|
|
|
nice_refiner_number_of_iterations = str(refining_number_of_iterations_for_online_config_field_value) |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Refiner number of iterations: " + nice_refiner_number_of_iterations |
|
]) |
|
|
|
if use_upscaler == 1: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Upscaled (2x)? Yes", |
|
"Refiner Steps: " + str(upscaling_num_inference_steps) |
|
]) |
|
|
|
if log_generation_times == 1: |
|
|
|
end_time = time.time() |
|
|
|
generation_time_in_seconds = (end_time - start_time) |
|
|
|
( |
|
generation_partial_hours, |
|
generation_partial_minutes, |
|
generation_partial_seconds |
|
) = convert_seconds(generation_time_in_seconds) |
|
|
|
if generation_partial_hours > 0: |
|
|
|
hours_text = "hr" |
|
|
|
if generation_partial_hours > 1: |
|
|
|
hours_text = "hrs" |
|
|
|
nice_generation_time = str(int(generation_partial_hours)) + " " + hours_text + ". " + str(int(generation_partial_minutes)) + " min. " + str(round(generation_partial_seconds, 1)) + " sec." |
|
|
|
elif generation_partial_minutes > 0: |
|
|
|
nice_generation_time = str(int(generation_partial_minutes)) + " min. " + str(round(generation_partial_seconds, 1)) + " sec." |
|
|
|
else: |
|
|
|
nice_generation_time = str(round(generation_time_in_seconds, 2)) + " sec." |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Time: " + nice_generation_time |
|
]) |
|
|
|
|
|
|
|
if len(prompt_text_not_used_substring) > 0: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"End of Prompt Truncated: " + prompt_text_not_used_substring |
|
]) |
|
|
|
if len(negative_prompt_text_not_used_substring) > 0: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"End of Negative Prompt Truncated: " + negative_prompt_text_not_used_substring |
|
]) |
|
|
|
|
|
|
|
if display_xformers_usage_in_prompt_info > 0: |
|
|
|
nice_xformers_usage = "No" |
|
|
|
if use_xformers == 1: |
|
|
|
nice_xformers_usage = "Yes" |
|
|
|
if include_transformers_version_in_prompt_info == 1: |
|
|
|
import transformers |
|
|
|
nice_xformers_usage += " (version " + str(transformers.__version__) + ")" |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"xFormers Used?: " + nice_xformers_usage |
|
]) |
|
|
|
if display_default_attn_processor_usage_in_prompt_info > 0: |
|
|
|
nice_default_attn_processor_usage = "No" |
|
|
|
if use_default_attn_processor == 1: |
|
|
|
nice_default_attn_processor_usage = "Yes" |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Default AttnProcessor Used?: " + nice_default_attn_processor_usage |
|
]) |
|
|
|
|
|
|
|
info_about_prompt = '\n'.join(info_about_prompt_lines_array) |
|
|
|
|
|
|
|
if auto_save_imagery == 1: |
|
|
|
|
|
|
|
if not os.path.exists(saved_images_dir): |
|
os.makedirs(saved_images_dir) |
|
|
|
yy_mm_dd_date_stamp = datetime.today().strftime('%Y-%m-%d') |
|
|
|
saved_images_date_dir = saved_images_dir + "/" + yy_mm_dd_date_stamp + "/" |
|
|
|
if not os.path.exists(saved_images_date_dir): |
|
os.makedirs(saved_images_date_dir) |
|
|
|
image_count = 1 |
|
|
|
file_name_without_extension = yy_mm_dd_date_stamp + "-" + ('%04d' % image_count) |
|
|
|
saved_image_path_and_file = saved_images_date_dir + file_name_without_extension + ".png" |
|
|
|
while os.path.exists(saved_image_path_and_file): |
|
|
|
file_name_without_extension = yy_mm_dd_date_stamp + "-" + ('%04d' % image_count) |
|
|
|
saved_image_path_and_file = saved_images_date_dir + file_name_without_extension + ".png" |
|
|
|
image_count += 1 |
|
|
|
image_to_return_file = image_to_return.save(saved_image_path_and_file) |
|
|
|
saved_text_file_path_and_file = saved_images_date_dir + file_name_without_extension + ".txt" |
|
|
|
prompt_info_file_handle = open(saved_text_file_path_and_file, "w") |
|
prompt_info_file_handle.writelines(info_about_prompt) |
|
prompt_info_file_handle.close() |
|
|
|
|
|
|
|
if use_image_gallery == 1: |
|
|
|
image_gallery_array.insert(0, image_to_return) |
|
prompt_information_array.insert(0, info_about_prompt) |
|
|
|
output_image_field_update = gr.Gallery( |
|
value = image_gallery_array, |
|
selected_index = 0 |
|
) |
|
|
|
else: |
|
|
|
output_image_field_update = gr.Image( |
|
value = image_to_return |
|
) |
|
|
|
|
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Image created.") |
|
|
|
|
|
|
|
return { |
|
output_image_field: output_image_field_update, |
|
output_text_field: info_about_prompt, |
|
prompt_truncated_field: prompt_truncated_field_udpate |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def cancel_image_processing(): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gr.Warning("The command prompt window has been closed. Any image generation in progress has been stopped. To generate any other images, you will need to launch the command prompt again.") |
|
|
|
os.system('title kill_window') |
|
|
|
os.system(f'taskkill /f /fi "WINDOWTITLE eq kill_window"') |
|
os.system(f'taskkill /f /fi "WINDOWTITLE eq kill_window"') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def base_model_field_update_function( |
|
base_model_field_index |
|
): |
|
|
|
base_model_field_value = base_model_array[base_model_field_index] |
|
|
|
if base_model_field_value in base_model_array: |
|
|
|
if base_model_field_value in base_model_object_of_model_configuration_arrays: |
|
|
|
model_configuration_choices_array_update = [] |
|
|
|
for this_model_configuration in base_model_object_of_model_configuration_arrays[base_model_field_value]: |
|
|
|
model_configuration_choices_array_update.append( |
|
model_configuration_names_object[this_model_configuration] |
|
) |
|
|
|
if base_model_field_value in base_model_model_configuration_defaults_object: |
|
|
|
model_configuration_field_selected_value = stored_model_configuration_names_object[base_model_field_value] |
|
|
|
model_configuration_field_update = gr.Dropdown( |
|
choices = model_configuration_choices_array_update, |
|
value = model_configuration_field_selected_value |
|
) |
|
|
|
negative_prompt_field_visibility = True |
|
negative_prompt_for_sdxl_turbo_field_visibility = False |
|
base_model_num_inference_steps_field_visibility = True |
|
base_model_num_inference_steps_field_for_sdxl_turbo_visibility = False |
|
guidance_scale_field_visibility = True |
|
guidance_scale_for_sdxl_turbo_field_visibility = False |
|
|
|
if base_model_field_value == "sdxl_turbo": |
|
|
|
negative_prompt_field_visibility = False |
|
negative_prompt_for_sdxl_turbo_field_visibility = True |
|
base_model_num_inference_steps_field_visibility = False |
|
base_model_num_inference_steps_field_for_sdxl_turbo_visibility = True |
|
guidance_scale_field_visibility = False |
|
guidance_scale_for_sdxl_turbo_field_visibility = True |
|
|
|
negative_prompt_field_update = gr.Textbox( |
|
visible = negative_prompt_field_visibility |
|
) |
|
|
|
negative_prompt_for_sdxl_turbo_field_update = gr.HTML( |
|
visible = negative_prompt_for_sdxl_turbo_field_visibility |
|
) |
|
|
|
base_model_num_inference_steps_field_update = gr.Slider( |
|
visible = base_model_num_inference_steps_field_visibility |
|
) |
|
|
|
base_model_num_inference_steps_field_for_sdxl_turbo_update = gr.Slider( |
|
visible = base_model_num_inference_steps_field_for_sdxl_turbo_visibility |
|
) |
|
|
|
guidance_scale_field_update = gr.Slider( |
|
visible = guidance_scale_field_visibility |
|
) |
|
|
|
guidance_scale_for_sdxl_turbo_field_update = gr.HTML( |
|
visible = guidance_scale_for_sdxl_turbo_field_visibility |
|
) |
|
|
|
return { |
|
model_configuration_field: model_configuration_field_update, |
|
negative_prompt_field: negative_prompt_field_update, |
|
negative_prompt_for_sdxl_turbo_field: negative_prompt_for_sdxl_turbo_field_update, |
|
base_model_num_inference_steps_field: base_model_num_inference_steps_field_update, |
|
base_model_num_inference_steps_field_for_sdxl_turbo_field: base_model_num_inference_steps_field_for_sdxl_turbo_update, |
|
guidance_scale_field: guidance_scale_field_update, |
|
guidance_scale_for_sdxl_turbo_field: guidance_scale_for_sdxl_turbo_field_update |
|
|
|
} |
|
|
|
error_function("Error") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def model_configuration_field_update_function( |
|
base_model_field_index, |
|
model_configuration_field_index |
|
): |
|
|
|
base_model_field_value = base_model_array[base_model_field_index] |
|
|
|
if base_model_field_value in base_model_object_of_model_configuration_arrays[base_model_field_value][model_configuration_field_index]: |
|
|
|
model_configuration_name_value = base_model_object_of_model_configuration_arrays[base_model_field_value][model_configuration_field_index] |
|
|
|
|
|
|
|
stored_model_configuration_names_object[base_model_field_value] = model_configuration_names_object[model_configuration_name_value] |
|
|
|
|
|
|
|
is_config_state = 0 |
|
|
|
if model_configuration_name_value in default_model_configuration_object: |
|
|
|
is_config_state = 1 |
|
|
|
negative_prompt_field_visibility = True |
|
negative_prompt_for_sdxl_turbo_field_visibility = False |
|
base_model_num_inference_steps_field_visibility = True |
|
base_model_num_inference_steps_field_for_sdxl_turbo_visibility = False |
|
guidance_scale_field_visibility = True |
|
guidance_scale_for_sdxl_turbo_field_visibility = False |
|
|
|
if base_model_field_value == "sdxl_turbo": |
|
|
|
negative_prompt_field_visibility = False |
|
negative_prompt_for_sdxl_turbo_field_visibility = True |
|
base_model_num_inference_steps_field_visibility = False |
|
base_model_num_inference_steps_field_for_sdxl_turbo_visibility = True |
|
guidance_scale_field_visibility = False |
|
guidance_scale_for_sdxl_turbo_field_visibility = True |
|
|
|
negative_prompt_field_update = gr.Textbox( |
|
visible = negative_prompt_field_visibility |
|
) |
|
|
|
negative_prompt_for_sdxl_turbo_field_update = gr.HTML( |
|
visible = negative_prompt_for_sdxl_turbo_field_visibility |
|
) |
|
|
|
base_model_num_inference_steps_field_update = gr.Slider( |
|
visible = base_model_num_inference_steps_field_visibility |
|
) |
|
|
|
base_model_num_inference_steps_field_for_sdxl_turbo_update = gr.Slider( |
|
visible = base_model_num_inference_steps_field_for_sdxl_turbo_visibility |
|
) |
|
|
|
guidance_scale_field_update = gr.Slider( |
|
visible = guidance_scale_field_visibility |
|
) |
|
|
|
guidance_scale_for_sdxl_turbo_field_update = gr.HTML( |
|
visible = guidance_scale_for_sdxl_turbo_field_visibility |
|
) |
|
|
|
|
|
|
|
refiner_default_config_accordion_visibility = False |
|
refiner_online_config_accordion_visibility = True |
|
|
|
if is_config_state == 1: |
|
|
|
refiner_default_config_accordion_visibility = True |
|
refiner_online_config_accordion_visibility = False |
|
|
|
|
|
|
|
refining_selection_automatically_selected_message_field_visibility = False |
|
|
|
refining_selection_online_config_normal_field_visibility = True |
|
refining_selection_online_config_automatically_selected_field_visibility = False |
|
|
|
if model_configuration_name_value in model_configuration_force_refiner_object: |
|
|
|
refining_selection_automatically_selected_message_field_visibility = True |
|
|
|
refining_selection_online_config_normal_field_visibility = False |
|
refining_selection_online_config_automatically_selected_field_visibility = True |
|
|
|
|
|
|
|
refiner_default_config_accordion_update = gr.Accordion( |
|
visible = refiner_default_config_accordion_visibility |
|
) |
|
|
|
refiner_online_config_accordion_update = gr.Accordion( |
|
visible = refiner_online_config_accordion_visibility |
|
) |
|
|
|
refining_selection_automatically_selected_message_field_update = gr.Markdown( |
|
visible = refining_selection_automatically_selected_message_field_visibility |
|
) |
|
|
|
refining_selection_online_config_normal_field_update = gr.Radio( |
|
visible = refining_selection_online_config_normal_field_visibility |
|
) |
|
|
|
refining_selection_online_config_automatically_selected_field_update = gr.Radio( |
|
visible = refining_selection_online_config_automatically_selected_field_visibility |
|
) |
|
|
|
|
|
|
|
return { |
|
negative_prompt_field: negative_prompt_field_update, |
|
negative_prompt_for_sdxl_turbo_field: negative_prompt_for_sdxl_turbo_field_update, |
|
base_model_num_inference_steps_field: base_model_num_inference_steps_field_update, |
|
base_model_num_inference_steps_field_for_sdxl_turbo_field: base_model_num_inference_steps_field_for_sdxl_turbo_update, |
|
guidance_scale_field: guidance_scale_field_update, |
|
guidance_scale_for_sdxl_turbo_field: guidance_scale_for_sdxl_turbo_field_update, |
|
|
|
refiner_default_config_accordion: refiner_default_config_accordion_update, |
|
refiner_online_config_accordion: refiner_online_config_accordion_update, |
|
refining_selection_automatically_selected_message_field: refining_selection_automatically_selected_message_field_update, |
|
refining_selection_online_config_normal_field: refining_selection_online_config_normal_field_update, |
|
refining_selection_online_config_automatically_selected_field: refining_selection_online_config_automatically_selected_field_update |
|
|
|
} |
|
|
|
error_function("Error") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def update_refiner_and_upscaler_status_function( |
|
base_model_field_index, |
|
model_configuration_field_index, |
|
refining_selection_default_config_field_value, |
|
refining_selection_online_config_normal_field_value, |
|
refining_selection_online_config_automatically_selected_field_value, |
|
upscaling_selection_field_value |
|
): |
|
|
|
base_model_field_value = base_model_array[base_model_field_index] |
|
|
|
if base_model_field_value in base_model_object_of_model_configuration_arrays[base_model_field_value][model_configuration_field_index]: |
|
|
|
model_configuration_name_value = base_model_object_of_model_configuration_arrays[base_model_field_value][model_configuration_field_index] |
|
|
|
is_config_state = 0 |
|
|
|
if model_configuration_name_value in default_model_configuration_object: |
|
|
|
is_config_state = 1 |
|
|
|
refining_selection_default_config_field_value = numerical_bool(refining_selection_default_config_field_value) |
|
refining_selection_online_config_normal_field_value = numerical_bool(refining_selection_online_config_normal_field_value) |
|
refining_selection_online_config_automatically_selected_field_value = numerical_bool(refining_selection_online_config_automatically_selected_field_value) |
|
upscaling_selection_field_value = numerical_bool(upscaling_selection_field_value) |
|
|
|
refiner_and_upscaler_status_text = refiner_and_upscaler_status_opening_html |
|
|
|
if ( |
|
( |
|
(is_config_state == 1) and |
|
refining_selection_online_config_normal_field_value |
|
) or ( |
|
(is_config_state != 1) and |
|
refining_selection_online_config_automatically_selected_field_value |
|
) |
|
): |
|
|
|
refiner_and_upscaler_status_text += refiner_on_text |
|
|
|
else: |
|
|
|
refiner_and_upscaler_status_text += refiner_off_text |
|
|
|
if upscaling_selection_field_value == 1: |
|
|
|
refiner_and_upscaler_status_text += upscaler_on_text |
|
|
|
else: |
|
|
|
refiner_and_upscaler_status_text += upscaler_off_text |
|
|
|
refiner_and_upscaler_status_text += refiner_and_upscaler_status_closing_html |
|
|
|
refiner_and_upscaler_text_field_update = gr.HTML( |
|
value = refiner_and_upscaler_status_text |
|
) |
|
|
|
return { |
|
refiner_and_upscaler_text_field: refiner_and_upscaler_text_field_update |
|
} |
|
|
|
error_function("Error") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks( |
|
title = "AI Image Creation", |
|
css = "footer{display:none !important}", |
|
theme = gr.themes.Default( |
|
spacing_size = gr.themes.sizes.spacing_md, |
|
|
|
radius_size = gr.themes.sizes.radius_none |
|
) |
|
) as sd_interface: |
|
|
|
gr.Markdown(opening_html) |
|
|
|
with gr.Row(): |
|
|
|
with gr.Column(scale = 1): |
|
|
|
generate_image_btn = gr.Button( |
|
value = "Generate", |
|
variant = "primary" |
|
) |
|
|
|
with gr.Group(): |
|
|
|
with gr.Row(): |
|
|
|
prompt_field = gr.Textbox( |
|
label = "Prompt (77 token limit):", |
|
value = default_prompt |
|
) |
|
|
|
with gr.Row(): |
|
|
|
negative_prompt_field = gr.Textbox( |
|
label = "Negative Prompt (77 token limit):", |
|
value = default_negative_prompt, |
|
visible = default_negative_prompt_field_visibility |
|
) |
|
|
|
with gr.Row(): |
|
|
|
negative_prompt_for_sdxl_turbo_field = gr.HTML( |
|
value = "<div style=\"padding: 10px; text-align: center; background: #fff;\">Negative prompt is not used for SDXL Turbo.</div>", |
|
visible = default_negative_prompt_for_sdxl_turbo_field_visibility |
|
) |
|
|
|
with gr.Group( |
|
visible = refiner_group_visible |
|
): |
|
|
|
with gr.Accordion( |
|
label = "Refiner (Default Config)", |
|
elem_id = "refiner_default_config_accordion_id", |
|
open = refiner_default_config_accordion_open, |
|
visible = refiner_default_config_accordion_visible |
|
) as refiner_default_config_accordion: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
|
gr.Markdown("This can be used if the image has too much noise.") |
|
|
|
with gr.Row(): |
|
|
|
refining_selection_default_config_field = gr.Radio( |
|
choices = ["Yes", "No"], |
|
value = default_refine_option, |
|
show_label = False, |
|
container = False |
|
) |
|
|
|
with gr.Row(): |
|
|
|
refining_use_denoising_start_in_base_model_when_using_refiner_field = gr.Checkbox( |
|
label = "Use \"denoising_start\" value as \"denoising_end\" value in base model generation when using refiner (doesn't work yet)", |
|
value = default_use_denoising_start_in_base_model_when_using_refiner, |
|
|
|
container = True |
|
) |
|
|
|
with gr.Row(): |
|
|
|
refining_base_model_output_to_refiner_is_in_latent_space_field = gr.Checkbox( |
|
label = "Base model output in latent space instead of PIL image when using refiner (doesn't work yet)", |
|
value = default_base_model_output_to_refiner_is_in_latent_space, |
|
|
|
container = True |
|
) |
|
|
|
with gr.Row(): |
|
|
|
refining_denoise_start_for_default_config_field = gr.Slider( |
|
label = "Refiner denoise start %", |
|
minimum = 0.7, |
|
maximum = 0.99, |
|
value = 0.95, |
|
step = 0.01 |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Accordion( |
|
label = "Refiner (Online Config)", |
|
elem_id = "refiner_online_config_accordion_id", |
|
open = refiner_online_config_accordion_open, |
|
visible = refiner_online_config_accordion_visible |
|
) as refiner_online_config_accordion: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
refining_selection_automatically_selected_message_field_visible = False |
|
|
|
refining_selection_online_config_normal_field_visible = True |
|
refining_selection_online_config_automatically_selected_field_visible = False |
|
|
|
if model_configuration_requires_refiner == 1: |
|
|
|
refining_selection_automatically_selected_message_field_visible = True |
|
|
|
refining_selection_online_config_normal_field_visible = False |
|
refining_selection_online_config_automatically_selected_field_visible = True |
|
|
|
with gr.Row(): |
|
|
|
refining_selection_automatically_selected_message_field = gr.Markdown( |
|
value = "The online configuration you selected automatically uses the refiner.", |
|
visible = refining_selection_automatically_selected_message_field_visible |
|
) |
|
|
|
with gr.Row(): |
|
|
|
refining_selection_online_config_normal_field = gr.Radio( |
|
choices = ["Yes", "No"], |
|
value = default_refine_option, |
|
show_label = False, |
|
container = False, |
|
visible = refining_selection_online_config_normal_field_visible |
|
) |
|
|
|
with gr.Row(): |
|
|
|
refining_selection_online_config_automatically_selected_field = gr.Radio( |
|
choices = ["Yes"], |
|
value = "Yes", |
|
show_label = False, |
|
container = False, |
|
visible = refining_selection_online_config_automatically_selected_field_visible |
|
) |
|
|
|
with gr.Row(): |
|
|
|
refining_denoise_start_for_online_config_field = gr.Slider( |
|
label = "Refiner denoise start %", |
|
minimum = 0.7, |
|
maximum = 0.99, |
|
value = 0.95, |
|
step = 0.01 |
|
) |
|
|
|
with gr.Row(): |
|
|
|
refining_number_of_iterations_for_online_config_field = gr.Slider( |
|
label = "Refiner number of iterations", |
|
minimum = 1, |
|
maximum = 100, |
|
value = 100, |
|
step = 1 |
|
) |
|
|
|
with gr.Group( |
|
visible = upscaler_group_visible |
|
): |
|
|
|
with gr.Accordion( |
|
label = "Upscaler", |
|
elem_id = "upscaler_accordion_id", |
|
open = upscaler_accordion_open, |
|
visible = upscaler_group_visible |
|
): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
|
gr.Markdown("Upscale by 2x?") |
|
|
|
with gr.Row(): |
|
|
|
upscaling_selection_field = gr.Radio( |
|
choices = ['Yes', 'No'], |
|
value = default_upscale_option, |
|
show_label = False, |
|
container = False |
|
) |
|
|
|
with gr.Row(): |
|
|
|
upscaling_num_inference_steps_field = gr.Slider( |
|
label = "Upscaler number of iterations", |
|
minimum = 1, |
|
maximum = 100, |
|
value = 100, |
|
step = 1 |
|
) |
|
|
|
if ( |
|
(enable_refiner == 1) or |
|
(enable_upscaler == 1) |
|
): |
|
|
|
refiner_and_upscaler_text_field = gr.HTML( |
|
value = default_refiner_and_upscaler_status_text |
|
) |
|
|
|
with gr.Column(scale = 1): |
|
|
|
with gr.Group(): |
|
|
|
with gr.Row(): |
|
|
|
base_model_field = gr.Dropdown( |
|
label = "Base Model:", |
|
choices = default_base_model_choices_array, |
|
value = default_base_model_nicely_named_value, |
|
type = "index", |
|
|
|
filterable = False, |
|
min_width = 240, |
|
interactive = True |
|
) |
|
|
|
model_configuration_field = gr.Dropdown( |
|
label = "Configuration Type:", |
|
choices = default_model_configuration_choices_array, |
|
value = default_model_configuration_nicely_named_value, |
|
type = "index", |
|
|
|
filterable = False, |
|
min_width = 240, |
|
interactive = True |
|
) |
|
|
|
with gr.Row(): |
|
|
|
image_width_field = gr.Slider( |
|
label = "Width:", |
|
minimum = 256, |
|
maximum = 1024, |
|
value = default_width, |
|
step = width_and_height_input_slider_steps, |
|
interactive = True |
|
) |
|
|
|
image_height_field = gr.Slider( |
|
label = "Height:", |
|
minimum = 256, |
|
maximum = 1024, |
|
value = default_height, |
|
step = width_and_height_input_slider_steps, |
|
interactive = True |
|
) |
|
|
|
with gr.Row(): |
|
|
|
base_model_num_inference_steps_field = gr.Slider( |
|
label = "Steps:", |
|
minimum = 1, |
|
maximum = 100, |
|
value = default_base_model_base_model_num_inference_steps, |
|
step = 1, |
|
visible = default_base_model_num_inference_steps_field_visibility, |
|
interactive = True |
|
) |
|
|
|
with gr.Row(): |
|
|
|
base_model_num_inference_steps_field_for_sdxl_turbo_field = gr.Slider( |
|
label = "Steps:", |
|
info = "Try using only 1 or a couple of steps.", |
|
minimum = 1, |
|
maximum = 25, |
|
value = default_base_model_base_model_num_inference_steps_for_sdxl_turbo, |
|
step = 1, |
|
visible = default_base_model_num_inference_steps_field_for_sdxl_turbo_field_visibility, |
|
interactive = True |
|
) |
|
|
|
with gr.Row(): |
|
|
|
guidance_scale_field = gr.Slider( |
|
label = "Guidance Scale:", |
|
minimum = 1, |
|
maximum = 15, |
|
value = default_guidance_scale_value, |
|
step = 0.25, |
|
visible = default_guidance_scale_field_visibility, |
|
interactive = True |
|
) |
|
|
|
with gr.Row(): |
|
|
|
guidance_scale_for_sdxl_turbo_field = gr.HTML( |
|
value = "<div style=\"padding: 10px; text-align: center; background: #fff;\">Guidance scale is not used for SDXL Turbo.</div>", |
|
visible = default_guidance_scale_for_sdxl_turbo_field_visibility |
|
) |
|
|
|
with gr.Row(): |
|
|
|
seed_selection_option = gr.Slider( |
|
label = "Seed (0 is random):", |
|
minimum = 0, |
|
maximum = default_seed_maximum, |
|
value = default_seed_value, |
|
step = 1, |
|
interactive = True |
|
) |
|
|
|
if make_seed_selection_a_textbox == 1: |
|
|
|
seed_selection_option = gr.Textbox( |
|
label = "Seed (0 is random; " + str(default_seed_maximum) + " max):", |
|
value = "0", |
|
interactive = True |
|
) |
|
|
|
seed_field = seed_selection_option |
|
|
|
|
|
|
|
with gr.Column(scale = 1): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
|
if use_image_gallery == 1: |
|
|
|
output_image_field = gr.Gallery( |
|
label = "Generated Images", |
|
value = [], |
|
|
|
|
|
selected_index = 0, |
|
elem_id = "image_gallery", |
|
allow_preview = "True", |
|
preview = True |
|
) |
|
|
|
else: |
|
|
|
output_image_field = gr.Image( |
|
label = "Generated Image", |
|
type = "pil" |
|
) |
|
|
|
with gr.Row(): |
|
|
|
output_text_field = gr.Text( |
|
label = "Prompt Information:", |
|
value = "After an image is generated, its generation information will appear here." + additional_prompt_info_html, |
|
show_copy_button = True, |
|
lines = 4 |
|
) |
|
|
|
with gr.Row(): |
|
|
|
prompt_truncated_field = gr.HTML( |
|
value = "", |
|
visible = False |
|
) |
|
|
|
global cancel_image_btn |
|
|
|
if enable_close_command_prompt_button == 1: |
|
|
|
cancel_image_btn = gr.Button( |
|
value = "Close Command Prompt / Cancel", |
|
variant = "stop" |
|
) |
|
|
|
gr.Markdown("Closing the command prompt will cancel any images in the process of being created. You will need to launch it again to create more images.") |
|
|
|
if len(ending_html) > 0: |
|
|
|
with gr.Accordion( |
|
label = "Information", |
|
elem_id = "information_section_id", |
|
open = True |
|
): |
|
|
|
gr.Markdown(ending_html) |
|
|
|
base_model_field.change( |
|
fn = base_model_field_update_function, |
|
inputs = [ |
|
base_model_field |
|
], |
|
outputs = [ |
|
model_configuration_field, |
|
negative_prompt_field, |
|
negative_prompt_for_sdxl_turbo_field, |
|
base_model_num_inference_steps_field, |
|
base_model_num_inference_steps_field_for_sdxl_turbo_field, |
|
guidance_scale_field, |
|
guidance_scale_for_sdxl_turbo_field |
|
], |
|
queue = None, |
|
show_progress = "hidden" |
|
) |
|
|
|
model_configuration_field.change( |
|
fn = model_configuration_field_update_function, |
|
inputs = [ |
|
base_model_field, |
|
model_configuration_field |
|
], |
|
outputs = [ |
|
negative_prompt_field, |
|
negative_prompt_for_sdxl_turbo_field, |
|
base_model_num_inference_steps_field, |
|
base_model_num_inference_steps_field_for_sdxl_turbo_field, |
|
guidance_scale_field, |
|
guidance_scale_for_sdxl_turbo_field, |
|
refiner_default_config_accordion, |
|
refiner_online_config_accordion, |
|
refining_selection_automatically_selected_message_field, |
|
refining_selection_online_config_normal_field, |
|
refining_selection_online_config_automatically_selected_field |
|
], |
|
queue = None, |
|
show_progress = "hidden" |
|
) |
|
|
|
if use_image_gallery == 1: |
|
|
|
output_image_field.select( |
|
fn = update_prompt_info_from_gallery, |
|
inputs = None, |
|
outputs = [ |
|
output_image_field, |
|
output_text_field |
|
] |
|
) |
|
|
|
if ( |
|
(enable_refiner == 1) or |
|
(enable_upscaler == 1) |
|
): |
|
|
|
triggers_array = [] |
|
|
|
if enable_refiner == 1: |
|
|
|
triggers_array.extend([ |
|
refining_selection_default_config_field.change, |
|
refining_selection_online_config_normal_field.change, |
|
refining_selection_online_config_automatically_selected_field.change |
|
]) |
|
|
|
if enable_upscaler == 1: |
|
|
|
triggers_array.extend([ |
|
upscaling_selection_field.change |
|
]) |
|
|
|
gr.on( |
|
triggers = triggers_array, |
|
fn = update_refiner_and_upscaler_status_function, |
|
inputs = [ |
|
base_model_field, |
|
model_configuration_field, |
|
refining_selection_default_config_field, |
|
refining_selection_online_config_normal_field, |
|
refining_selection_online_config_automatically_selected_field, |
|
upscaling_selection_field |
|
], |
|
outputs = [ |
|
refiner_and_upscaler_text_field |
|
], |
|
queue = None, |
|
show_progress = "hidden" |
|
) |
|
|
|
generate_image_btn_click_event = generate_image_btn.click( |
|
fn = create_image_function, |
|
inputs = [ |
|
base_model_field, |
|
model_configuration_field, |
|
prompt_field, |
|
negative_prompt_field, |
|
image_width_field, |
|
image_height_field, |
|
guidance_scale_field, |
|
base_model_num_inference_steps_field, |
|
base_model_num_inference_steps_field_for_sdxl_turbo_field, |
|
seed_field, |
|
|
|
refining_selection_online_config_normal_field, |
|
refining_selection_online_config_automatically_selected_field, |
|
|
|
refining_use_denoising_start_in_base_model_when_using_refiner_field, |
|
refining_base_model_output_to_refiner_is_in_latent_space_field, |
|
|
|
refining_denoise_start_for_online_config_field, |
|
refining_number_of_iterations_for_online_config_field, |
|
|
|
upscaling_selection_field, |
|
upscaling_num_inference_steps_field |
|
], |
|
outputs = [ |
|
output_image_field, |
|
output_text_field, |
|
prompt_truncated_field |
|
] |
|
) |
|
|
|
if enable_close_command_prompt_button == 1: |
|
|
|
|
|
|
|
cancel_image_btn.click( |
|
fn = cancel_image_processing, |
|
inputs = None, |
|
outputs = None, |
|
cancels = [generate_image_btn_click_event] |
|
) |
|
|
|
|
|
|
|
sd_interface.queue( |
|
|
|
max_size = 20 |
|
) |
|
|
|
inbrowser = False |
|
|
|
if auto_open_browser == 1: |
|
|
|
inbrowser = True |
|
|
|
sd_interface.launch( |
|
inbrowser = inbrowser, |
|
|
|
share = None, |
|
show_api = False, |
|
quiet = True, |
|
show_error = True, |
|
max_threads = 1 |
|
) |
|
|
|
sd_interface.load( |
|
scroll_to_output = False, |
|
show_progress = "full" |
|
) |
|
|