vumichien's picture
Update app.py
697a8e3
from PIL import Image
import streamlit as st
from streamlit_drawable_canvas import st_canvas
from streamlit_lottie import st_lottie
from streamlit_option_menu import option_menu
import requests
import os
import cv2
import einops
import gradio as gr
import numpy as np
import torch
import random
from huggingface_hub import hf_hub_download
from pytorch_lightning import seed_everything
from annotator.util import resize_image, HWC3
from annotator.hed import HEDdetector, nms
from cldm.model import create_model, load_state_dict
from cldm.ddim_hacked import DDIMSampler
st.set_page_config(
page_title="ControllNet",
page_icon="🖥️",
layout="wide",
initial_sidebar_state="expanded"
)
save_memory = False
@st.experimental_singleton
def load_model():
model_path = hf_hub_download('lllyasviel/ControlNet', 'models/control_sd15_scribble.pth')
model = create_model('./models/cldm_v15.yaml').cpu()
if torch.cuda.is_available():
model.load_state_dict(load_state_dict(model_path, location='cuda'))
model = model.cuda()
else:
model.load_state_dict(load_state_dict(model_path, location='cpu'))
return model
def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta):
with torch.no_grad():
input_image = HWC3(input_image[:, :, 0])
detected_map = apply_hed(resize_image(input_image, detect_resolution))
detected_map = HWC3(detected_map)
img = resize_image(input_image, image_resolution)
H, W, C = img.shape
detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
detected_map = nms(detected_map, 127, 3.0)
detected_map = cv2.GaussianBlur(detected_map, (0, 0), 3.0)
detected_map[detected_map > 4] = 255
detected_map[detected_map < 255] = 0
if torch.cuda.is_available():
control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
else:
control = torch.from_numpy(detected_map.copy()).float() / 255.0
control = torch.stack([control for _ in range(num_samples)], dim=0)
control = einops.rearrange(control, 'b h w c -> b c h w').clone()
if seed == -1:
seed = random.randint(0, 2147483647)
seed_everything(seed)
if save_memory:
model.low_vram_shift(is_diffusing=False)
cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
shape = (4, H // 8, W // 8)
if save_memory:
model.low_vram_shift(is_diffusing=True)
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13) # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
shape, cond, verbose=False, eta=eta,
unconditional_guidance_scale=scale,
unconditional_conditioning=un_cond)
if save_memory:
model.low_vram_shift(is_diffusing=False)
x_samples = model.decode_first_stage(samples)
x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
results = [x_samples[i] for i in range(num_samples)]
# return [255 - detected_map] + results
return results
@st.experimental_memo
def load_lottieurl(url: str):
r = requests.get(url)
if r.status_code != 200:
return None
return r.json()
model = load_model()
ddim_sampler = DDIMSampler(model)
apply_hed = HEDdetector()
def main():
lottie_penguin = load_lottieurl('https://assets5.lottiefiles.com/datafiles/B8q1AyJ5t1wb5S8a2ggTqYNxS1WiKN9mjS76TBpw/articulation/articulation.json')
st.header('Draw and generate image with ControlNet')
with st.sidebar:
st_lottie(lottie_penguin, height=200)
choose = option_menu("Generate image", ["Canvas", "Upload"],
icons=['file-plus', 'cloud-upload'],
menu_icon="infinity", default_index=0,
styles={
"container": {"padding": ".0rem", "font-size": "14px"},
"nav-link-selected": {"color": "#000000", "font-size": "16px"},
}
)
st.sidebar.markdown(
"""
___
<p style='text-align: center'>
ControlNet is as fast as fine-tuning a diffusion model to support additional input conditions
<br/>
<a href="https://arxiv.org/abs/2302.05543" target="_blank">Article</a>
</p>
<p style='text-align: center; font-size: 14px;'>
Spaces creating by
<br/>
<a href="https://www.linkedin.com/in/vumichien/" target="_blank">Chien Vu</a>
<br/>
<img src='https://visitor-badge.glitch.me/badge?page_id=Canvas.ControlNet' alt='visitor badge'>
</p>
""",
unsafe_allow_html=True,
)
if choose == 'Upload':
st.info("Upload your own scribbles, fill the prompt and enjoy")
with st.form(key='generate_form'):
upload_file = st.file_uploader("Upload image", type=["png", "jpg", "jpeg"])
prompt = st.text_input(label="Prompt", placeholder='Type your instruction')
col11, col12 = st.columns(2)
with st.expander('Advanced option', expanded=False):
col21, col22 = st.columns(2)
with col21:
image_resolution = st.slider(label="Image Resolution", min_value=256, max_value=512, value=512, step=256)
strength = st.slider(label="Control Strength", min_value=0.0, max_value=2.0, value=1.0, step=0.01)
guess_mode = st.checkbox(label='Guess Mode', value=False)
detect_resolution = st.slider(label="HED Resolution", min_value=128, max_value=1024, value=512, step=1)
ddim_steps = st.slider(label="Steps", min_value=1, max_value=100, value=20, step=1)
with col22:
scale = st.slider(label="Guidance Scale", min_value=0.1, max_value=30.0, value=9.0, step=0.1)
seed = st.number_input(label="Seed", min_value=-1, value=-1)
eta = st.number_input(label="eta (DDIM)", value=0.0)
a_prompt = st.text_input(label="Added Prompt", value='best quality, extremely detailed')
n_prompt = st.text_input(label="Negative Prompt",
value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
generate_button = st.form_submit_button(label='Generate Image')
if upload_file:
input_image = np.asarray(Image.open(upload_file).convert("RGB"))
print("input_image", input_image.shape)
if generate_button:
with st.spinner(text=f"It may take up to 1 minute under high load. Generating images..."):
results = process(input_image, prompt, a_prompt, n_prompt, 1, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta)
print("input_image", input_image.shape)
print("results", results[0].shape)
H, W, C = input_image.shape
output_image = cv2.resize(results[0], (W, H), interpolation=cv2.INTER_AREA)
col11.image(input_image, channels='RGB', width=None, clamp=False, caption='Input image')
col12.image(output_image, channels='RGB', width=None, clamp=False, caption='Generated image')
elif choose == 'Canvas':
st.info("Step 1a. Draw your image with canvas"
" \n Step 1b. You also can upload image directly by select Upload in side bar"
" \n Step 2. Input prompt to instruct model (You can also change some config with advanced option if need)"
" \n Step 3. Generate and enjoy")
with st.form(key='canvas_generate_form'):
# Specify canvas parameters in application
stroke_width = st.sidebar.slider("Stroke width: ", 1, 25, 3)
stroke_color = st.sidebar.color_picker("Stroke color hex: ")
bg_color = st.sidebar.color_picker("Background color hex: ", "#eee")
realtime_update = st.sidebar.checkbox("Update in realtime", True)
# Create a canvas component
col31, col32 = st.columns(2)
with col31:
canvas_result = st_canvas(
fill_color="rgba(255, 165, 0, 0.3)", # Fixed fill color with some opacity
stroke_width=stroke_width,
stroke_color=stroke_color,
background_color=bg_color,
background_image=None,
update_streamlit=realtime_update,
height=512,
width=512,
drawing_mode="freedraw",
point_display_radius=0,
key="canvas",
)
prompt = st.text_input(label="Prompt", placeholder='Type your instruction')
with st.expander('Advanced option', expanded=False):
col41, col42 = st.columns(2)
with col41:
image_resolution = st.slider(label="Image Resolution", min_value=256, max_value=512, value=512, step=256)
strength = st.slider(label="Control Strength", min_value=0.0, max_value=2.0, value=1.0, step=0.01)
guess_mode = st.checkbox(label='Guess Mode', value=False)
detect_resolution = st.slider(label="HED Resolution", min_value=128, max_value=1024, value=512, step=1)
ddim_steps = st.slider(label="Steps", min_value=1, max_value=100, value=20, step=1)
with col42:
scale = st.slider(label="Guidance Scale", min_value=0.1, max_value=30.0, value=9.0, step=0.1)
seed = st.number_input(label="Seed", min_value=-1, value=-1)
eta = st.number_input(label="eta (DDIM)", value=0.0)
a_prompt = st.text_input(label="Added Prompt", value='best quality, extremely detailed')
n_prompt = st.text_input(label="Negative Prompt",
value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
# Generate image from canvas
generate_button = st.form_submit_button(label='Generate Image')
if generate_button:
if canvas_result.image_data is not None:
input_image = canvas_result.image_data
with st.spinner(text=f"It may take up to 1 minute under high load. Generating images..."):
results = process(input_image, prompt, a_prompt, n_prompt, 1, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta)
H, W, C = input_image.shape
output_image = cv2.resize(results[0], (W, H), interpolation=cv2.INTER_AREA)
col32.image(output_image, channels='RGB', width=None, clamp=True, caption='Generated image')
# Image gallery
with st.expander('Image gallery', expanded=True):
col01, col02, = st.columns(2)
with col01:
st.image('demo/example_1.jpg', caption="Sport car")
st.image('demo/example_2.jpg', caption="Dog house")
st.image('demo/example_3.jpg', caption="Guitar")
with col02:
st.image('demo/example_4.jpg', caption="Sport car")
st.image('demo/example_5.jpg', caption="Dog house")
st.image('demo/example_6.jpg', caption="Guitar")
if __name__ == '__main__':
main()