aai / tabs /images /events.py
barreloflube's picture
Refactor image_tab function to update the Guidance Scale range
3d75865
raw
history blame
18.5 kB
import spaces
import gradio as gr
from huggingface_hub import ModelCard
from config import Config
from .models import *
from .handlers import gen_img
# Common
def update_model_options(model):
for m in Config.IMAGES_MODELS:
if m['repo_id'] == model:
if m['loader'] == 'flux':
return (
gr.update( # negative_prompt
visible=False
),
gr.update( # lora_gallery
value=[(lora['image'], lora['title']) for lora in Config.IMAGES_LORAS_FLUX]
),
gr.update( # embeddings_accordion
visible=False
),
gr.update( # scribble_tab
visible=False
),
gr.update( # scheduler
value='fm_euler'
),
gr.update( # image_clip_skip
visible=False
),
gr.update( # image_guidance_scale
value=3.5
)
)
elif m['loader'] == 'sdxl':
return (
gr.update( # negative_prompt
visible=True
),
gr.update( # lora_gallery
value=[(lora['image'], lora['title']) for lora in Config.IMAGES_LORAS_SDXL]
),
gr.update( # embeddings_accordion
visible=True
),
gr.update( # scribble_tab
visible=True
),
gr.update( # scheduler
value='dpmpp_2m_sde_k'
),
gr.update( # image_clip_skip
visible=True
),
gr.update( # image_guidance_scale
value=7.0
)
)
def update_fast_generation(model, fast_generation):
for m in Config.IMAGES_MODELS:
if m['repo_id'] == model:
if m['loader'] == 'flux':
if fast_generation:
return (
gr.update( # image_num_inference_steps
value=8
),
gr.update( # image_guidance_scale
value=3.5
)
)
else:
return (
gr.update( # image_num_inference_steps
value=20
),
gr.update( # image_guidance_scale
value=3.5
)
)
elif m['loader'] == 'sdxl':
if fast_generation:
return (
gr.update( # image_num_inference_steps
value=8
),
gr.update( # image_guidance_scale
value=1.0
)
)
else:
return (
gr.update( # image_num_inference_steps
value=20
),
gr.update( # image_guidance_scale
value=7.0
)
)
# Loras
def selected_lora_from_gallery(evt: gr.SelectData):
return (
gr.update(
value=evt.index
)
)
def update_selected_lora(custom_lora):
link = custom_lora.split("/")
if len(link) == 2:
model_card = ModelCard.load(custom_lora)
trigger_word = model_card.data.get("instance_prompt", "")
image_url = f"""https://huggingface.co/{custom_lora}/resolve/main/{model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)}"""
custom_lora_info_css = """
<style>
.custom-lora-info {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', sans-serif;
background: linear-gradient(135deg, #4a90e2, #7b61ff);
color: white;
padding: 16px;
border-radius: 8px;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
margin: 16px 0;
}
.custom-lora-header {
font-size: 18px;
font-weight: 600;
margin-bottom: 12px;
}
.custom-lora-content {
display: flex;
align-items: center;
background-color: rgba(255, 255, 255, 0.1);
border-radius: 6px;
padding: 12px;
}
.custom-lora-image {
width: 80px;
height: 80px;
object-fit: cover;
border-radius: 6px;
margin-right: 16px;
}
.custom-lora-text h3 {
margin: 0 0 8px 0;
font-size: 16px;
font-weight: 600;
}
.custom-lora-text small {
font-size: 14px;
opacity: 0.9;
}
.custom-trigger-word {
background-color: rgba(255, 255, 255, 0.2);
padding: 2px 6px;
border-radius: 4px;
font-weight: 600;
}
</style>
"""
custom_lora_info_html = f"""
<div class="custom-lora-info">
<div class="custom-lora-header">Custom LoRA: {custom_lora}</div>
<div class="custom-lora-content">
<img class="custom-lora-image" src="{image_url}" alt="LoRA preview">
<div class="custom-lora-text">
<h3>{link[1].replace("-", " ").replace("_", " ")}</h3>
<small>{"Using: <span class='custom-trigger-word'>"+trigger_word+"</span> as the trigger word" if trigger_word else "No trigger word found. If there's a trigger word, include it in your prompt"}</small>
</div>
</div>
</div>
"""
custom_lora_info_html = f"{custom_lora_info_css}{custom_lora_info_html}"
return (
gr.update( # selected_lora
value=custom_lora,
),
gr.update( # custom_lora_info
value=custom_lora_info_html,
visible=True
)
)
else:
return (
gr.update( # selected_lora
value=custom_lora,
),
gr.update( # custom_lora_info
value=custom_lora_info_html if len(link) == 0 else "",
visible=False
)
)
def update_lora_sliders(enabled_loras):
sliders = []
remove_buttons = []
for lora in enabled_loras:
sliders.append(
gr.update(
label=lora.get("repo_id", ""),
info=f"Trigger Word: {lora.get('trigger_word', '')}",
visible=True,
interactive=True
)
)
remove_buttons.append(
gr.update(
visible=True,
interactive=True
)
)
if len(sliders) < 6:
for i in range(len(sliders), 6):
sliders.append(
gr.update(
visible=False
)
)
remove_buttons.append(
gr.update(
visible=False
)
)
return *sliders, *remove_buttons
def remove_from_enabled_loras(enabled_loras, index):
enabled_loras.pop(index)
return (
gr.update(
value=enabled_loras
)
)
def add_to_enabled_loras(model, selected_lora, enabled_loras):
for m in Config.IMAGES_MODELS:
if m['repo_id'] == model:
lora_data = []
if m['loader'] == 'flux':
lora_data = Config.IMAGES_LORAS_FLUX
elif m['loader'] == 'sdxl':
lora_data = Config.IMAGES_LORAS_SDXL
try:
selected_lora = int(selected_lora)
if 0 <= selected_lora: # is the index of the lora in the gallery
lora_info = lora_data[selected_lora]
enabled_loras.append({
"repo_id": lora_info["repo"],
"trigger_word": lora_info["trigger_word"]
})
except ValueError:
link = selected_lora.split("/")
if len(link) == 2:
model_card = ModelCard.load(selected_lora)
trigger_word = model_card.data.get("instance_prompt", "")
enabled_loras.append({
"repo_id": selected_lora,
"trigger_word": trigger_word
})
return (
gr.update( # selected_lora
value=""
),
gr.update( # custom_lora_info
value="",
visible=False
),
gr.update( # enabled_loras
value=enabled_loras
)
)
# Custom Embedding
def update_custom_embedding(custom_embedding):
link = custom_embedding.split("/")
if len(link) == 2:
model_card = ModelCard.load(custom_embedding)
trigger_word = model_card.data.get("instance_prompt", "")
image_url = f"""https://huggingface.co/{custom_embedding}/resolve/main/{model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)}"""
custom_embedding_info_css = """
<style>
.custom-embedding-info {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', sans-serif;
background: linear-gradient(135deg, #4a90e2, #7b61ff);
color: white;
padding: 16px;
border-radius: 8px;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
margin: 16px 0;
}
.custom-embedding-header {
font-size: 18px;
font-weight: 600;
margin-bottom: 12px;
}
.custom-embedding-content {
display: flex;
align-items: center;
background-color: rgba(255, 255, 255, 0.1);
border-radius: 6px;
padding: 12px;
}
.custom-embedding-image {
width: 80px;
height: 80px;
object-fit: cover;
border-radius: 6px;
margin-right: 16px;
}
.custom-embedding-text h3 {
margin: 0 0 8px 0;
font-size: 16px;
font-weight: 600;
}
.custom-embedding-text small {
font-size: 14px;
opacity: 0.9;
}
.custom-trigger-word {
background-color: rgba(255, 255, 255, 0.2);
padding: 2px 6px;
border-radius: 4px;
font-weight: 600;
}
</style>
"""
custom_embedding_info_html = f"""
<div class="custom-embedding-info">
<div class="custom-embedding-header">Custom Embedding: {custom_embedding}</div>
<div class="custom-embedding-content">
<img class="custom-embedding-image" src="{image_url}" alt="Embedding preview">
<div class="custom-embedding-text">
<h3>{link[1].replace("-", " ").replace("_", " ")}</h3>
<small>{"Using: <span class='custom-trigger-word'>"+trigger_word+"</span> as the trigger word" if trigger_word else "No trigger word found. If there's a trigger word, include it in your prompt"}</small>
</div>
</div>
</div>
"""
custom_embedding_info_html = f"{custom_embedding_info_css}{custom_embedding_info_html}"
return gr.update(value=custom_embedding_info_html, visible=True)
else:
return gr.update(value="", visible=False)
def add_to_embeddings(custom_embedding, enabled_embeddings):
link = custom_embedding.split("/")
if len(link) == 2:
if ModelCard.load(custom_embedding):
enabled_embeddings.append(custom_embedding)
return (
gr.update( # custom_embedding
value=""
),
gr.update( # custom_embedding_info
value="",
visible=False
),
gr.update( # enabled_embeddings
value=enabled_embeddings
)
)
def update_enabled_embeddings_list(enabled_embeddings):
return gr.update( # enabled_embeddings_list
value=enabled_embeddings,
choices=enabled_embeddings
)
def update_enabled_embeddings(enabled_embeddings_list):
return gr.update( # enabled_embeddings
value=enabled_embeddings_list
)
# Generate Image
@spaces.GPU(duration=75)
def generate_image(
model, prompt, negative_prompt, fast_generation, enabled_loras, enabled_embeddings,
lora_slider_0, lora_slider_1, lora_slider_2, lora_slider_3, lora_slider_4, lora_slider_5, # type: ignore
img2img_image, inpaint_image, canny_image, pose_image, depth_image, scribble_image, # type: ignore
img2img_strength, inpaint_strength, canny_strength, pose_strength, depth_strength, scribble_strength, # type: ignore
resize_mode,
scheduler, image_height, image_width, image_num_images_per_prompt, # type: ignore
image_num_inference_steps, image_clip_skip, image_guidance_scale, image_seed, # type: ignore
refiner, vae,
progress=gr.Progress(track_tqdm=True)
):
try:
progress(0, "Configuring arguments...")
base_args = {
"model": model,
"prompt": prompt,
# "negative_prompt": negative_prompt,
"fast_generation": fast_generation,
"loras": None,
# "embeddings": None,
"resize_mode": resize_mode,
"scheduler": scheduler,
"height": int(image_height),
"width": int(image_width),
"num_images_per_prompt": float(image_num_images_per_prompt),
"num_inference_steps": float(image_num_inference_steps),
# "clip_skip": None,
"guidance_scale": image_guidance_scale,
"seed": int(image_seed),
"refiner": refiner,
"vae": vae,
"controlnet_config": None,
}
base_args = BaseReq(**base_args)
if len(enabled_loras) > 0:
base_args.loras = []
for enabled_lora, slider in zip(enabled_loras, [lora_slider_0, lora_slider_1, lora_slider_2, lora_slider_3, lora_slider_4, lora_slider_5]):
if enabled_lora['repo_id']:
base_args.loras.append({
"repo_id": enabled_lora['repo_id'],
"weight": slider
})
# Load SDXL related args
if model in Config.IMAGES_MODELS:
if model['loader'] == 'sdxl':
base_args.negative_prompt = negative_prompt
base_args.clip_skip = image_clip_skip
if len(enabled_embeddings) > 0:
base_args.embeddings = enabled_embeddings
image = None
mask_image = None
strength = None
if img2img_image:
image = img2img_image
strength = float(img2img_strength)
base_args = BaseImg2ImgReq(
**base_args.__dict__,
image=image,
strength=strength
)
elif inpaint_image:
image = inpaint_image['background'] if not all(pixel == (0, 0, 0) for pixel in list(inpaint_image['background'].getdata())) else None
mask_image = inpaint_image['layers'][0] if image else None
strength = float(inpaint_strength)
if image and mask_image:
base_args = BaseInpaintReq(
**base_args.__dict__,
image=image,
mask_image=mask_image,
strength=strength
)
elif any([canny_image, pose_image, depth_image]):
base_args.controlnet_config = ControlNetReq(
controlnets=[],
control_images=[],
controlnet_conditioning_scale=[]
)
if canny_image:
base_args.controlnet_config.controlnets.append("canny")
base_args.controlnet_config.control_images.append(canny_image)
base_args.controlnet_config.controlnet_conditioning_scale.append(float(canny_strength))
if pose_image:
base_args.controlnet_config.controlnets.append("pose")
base_args.controlnet_config.control_images.append(pose_image)
base_args.controlnet_config.controlnet_conditioning_scale.append(float(pose_strength))
if depth_image:
base_args.controlnet_config.controlnets.append("depth")
base_args.controlnet_config.control_images.append(depth_image)
base_args.controlnet_config.controlnet_conditioning_scale.append(float(depth_strength))
if model in Config.IMAGES_MODELS and model['loader'] == 'sdxl' and scribble_image:
base_args.controlnet_config.controlnets.append("scribble")
base_args.controlnet_config.control_images.append(scribble_image)
base_args.controlnet_config.controlnet_conditioning_scale.append(float(scribble_strength))
else:
base_args = BaseReq(**base_args.__dict__)
return gr.update(
value=gen_img(base_args, progress),
interactive=True
)
except Exception as e:
raise gr.Error(f"Error: {e}") from e