Tonioesparza's picture
Update app.py
3d6fc0f verified
raw
history blame
7.97 kB
import gradio as gr
import numpy as np
import random
#import spaces #[uncomment to use ZeroGPU]
import os
from PIL import Image, ImageDraw, ImageFont
import torch
from PIL import Image
from diffusers.utils import load_image
from diffusers import StableDiffusionXLImg2ImgPipeline, DPMSolverMultistepScheduler, AutoencoderTiny, StableDiffusionXLControlNetPipeline, ControlNetModel
from diffusers.utils import load_image
from diffusers.image_processor import IPAdapterMaskProcessor
device = "cuda" if torch.cuda.is_available() else "cpu"
model_repo_id = "stabilityai/sdxl-turbo" #Replace to the model you would like to use
if torch.cuda.is_available():
torch_dtype = torch.float16
else:
torch_dtype = torch.float32
processor_mask = IPAdapterMaskProcessor()
controlnets = [
ControlNetModel.from_pretrained(
"diffusers/controlnet-depth-sdxl-1.0",variant="fp16",use_safetensors=True,torch_dtype=torch.float16
),
ControlNetModel.from_pretrained(
"diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16, use_safetensors=True,variant="fp16"
),
]
###load pipelines
pipe_CN = StableDiffusionXLControlNetPipeline.from_pretrained("SG161222/RealVisXL_V5.0", torch_dtype=torch.float16,controlnet=controlnets, use_safetensors=True, variant='fp16')
pipe_CN.vae = AutoencoderTiny.from_pretrained("madebyollin/taesdxl", torch_dtype=torch.float16)
pipe_CN.scheduler=DPMSolverMultistepScheduler.from_pretrained("SG161222/RealVisXL_V5.0",subfolder="scheduler",use_karras_sigmas=True)
pipe_CN.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin")
pipe_CN.to("cuda")
##############################load loras
pipe_CN.load_lora_weights('Tonioesparza/ourhood_training_dreambooth_lora_2_0', weight_name='pytorch_lora_weights.safetensors',adapter_name='ourhood')
###pipe_CN.set_adapters(['ourhood'],[0.98])
pipe_CN.fuse_lora()
refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0",text_encoder_2=pipe_CN.text_encoder_2,vae=pipe_CN.vae,torch_dtype=torch.float16,use_safetensors=True,variant="fp16")
refiner.to("cuda")
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
def ourhood_inference(prompt=str,num_inference_steps=int,scaffold=int,seed=int):
###pro_encode = pipe_cn.encode_text(prompt)
### function has no formats defined
scaff_dic={1:{'mask1':"https://huggingface.co/Tonioesparza/ourhood_training_dreambooth_lora_2_0/resolve/main/mask_in_square_2.png",
'depth_image':"https://huggingface.co/Tonioesparza/ourhood_training_dreambooth_lora_2_0/resolve/main/mask_depth_noroof_square.png",
'canny_image':"https://huggingface.co/Tonioesparza/ourhood_training_dreambooth_lora_2_0/resolve/main/mask_depth_solo_square.png"},
2:{'mask1':"https://huggingface.co/Tonioesparza/ourhood_training_dreambooth_lora_2_0/resolve/main/mask_in_C.png",
'depth_image':"https://huggingface.co/Tonioesparza/ourhood_training_dreambooth_lora_2_0/resolve/main/depth_C.png",
'canny_image':"https://huggingface.co/Tonioesparza/ourhood_training_dreambooth_lora_2_0/resolve/main/canny_C_solo.png"},
3:{'mask1':"https://huggingface.co/Tonioesparza/ourhood_training_dreambooth_lora_2_0/resolve/main/mask_in_B.png",
'depth_image':"https://huggingface.co/Tonioesparza/ourhood_training_dreambooth_lora_2_0/resolve/main/depth_B.png",
'canny_image':"https://huggingface.co/Tonioesparza/ourhood_training_dreambooth_lora_2_0/resolve/main/canny_B_solo.png"}}
### mask init
output_height = 1024
output_width = 1024
mask1 = load_image(scaff_dic[scaffold]['mask1'])
masks = processor_mask.preprocess([mask1], height=output_height, width=output_width)
masks = [masks.reshape(1, masks.shape[0], masks.shape[2], masks.shape[3])]
###ip_images init
ip_img_1 = load_image("https://huggingface.co/Tonioesparza/ourhood_training_dreambooth_lora_2_0/resolve/main/25hours-hotel_25h_IndreBy_StephanLemke_Sauna1-1024x768.png")
ip_images = [[ip_img_1]]
pipe_CN.set_ip_adapter_scale([[0.5]])
n_steps = num_inference_steps
###precomputed depth image
depth_image = load_image(scaff_dic[scaffold]['depth_image'])
canny_image = load_image(scaff_dic[scaffold]['canny_image'])
images_CN = [depth_image, canny_image]
### inference
generator = torch.Generator(device="cuda").manual_seed(seed)
results = pipe_CN(
prompt=prompt,
ip_adapter_image=ip_images,
negative_prompt="deformed, ugly, wrong proportion, low res, worst quality, low quality,text,watermark",
generator=generator,
num_inference_steps=n_steps,
num_images_per_prompt=1,
denoising_end=0.95,
image=images_CN,
output_type="latent",
control_guidance_start=[0.0, 0.35],
control_guidance_end=[0.35, 1.0],
controlnet_conditioning_scale=[0.5, 1.0],
cross_attention_kwargs={"ip_adapter_masks": masks}
).images[0]
image = refiner(
prompt=prompt,
generator=generator,
num_inference_steps=num_inference_steps,
denoising_start=0.95,
image=results,
).images[0]
return image
#@spaces.GPU #[uncomment to use ZeroGPU]
examples = [
"A photograph, of an Ourhood privacy booth, front view, in a warehouse eventspace environment, in the style of event photography, silken oak frame, checkered warm grey exterior fabric, checkered warm grey interior fabric, curtains, diner seating, pillows",
"A photograph, of an Ourhood privacy booth, side view, in a warehouse eventspace environment, in the style of event photography, silken oak frame, taupe exterior fabric",
"A photograph, of an Ourhood privacy booth, close-up, in a HolmrisB8_HQ office environment, in the style of makeshift photoshoot, silken oak frame, taupe exterior fabric, taupe interior fabric, pillows",
"A rendering, of an Ourhood privacy booth, front view, in a Nordic atrium environment, in the style of Keyshot, silken oak frame, taupe exterior fabric, taupe interior fabric, diner seating"]
css="""
#col-container {
margin: 0 auto;
max-width: 640px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# HB8-Ourhood inference test
""")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
perspective = gr.Slider(
label="perspective",
minimum=1,
maximum=3,
step=1,
value=1,
)
seed = gr.Slider(
label="tracking number (seed)",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
with gr.Row():
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=35,
maximum=50,
step=1,
value=35, #Replace with defaults that work for your model
)
gr.Examples(
examples = examples,
inputs = [prompt]
)
gr.on(
triggers=[run_button.click, prompt.submit],
fn = ourhood_inference,
inputs = [prompt, num_inference_steps, perspective, seed],
outputs = [result]
)
demo.queue().launch()