Huage001's picture
Update app.py
323d1f6 verified
raw
history blame contribute delete
No virus
14.3 kB
import gradio as gr
import numpy as np
import random
import spaces
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
import torch
from src.linfusion import LinFusion
device = "cuda" if torch.cuda.is_available() else "cpu"
all_model_id = {
"DreamShaper-8": "Lykon/dreamshaper-8",
"RealisticVision-v4.0": "SG161222/Realistic_Vision_V4.0_noVAE",
"SD-v1.4": "CompVis/stable-diffusion-v1-4"
}
if torch.cuda.is_available():
torch_dtype = torch.float16
else:
torch_dtype = torch.float32
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
pipes = {}
for model_id, repo_id in all_model_id.items():
pipes[model_id + '_t2i'] = StableDiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch_dtype)
LinFusion.construct_for(pipes[model_id + '_t2i'])
pipes[model_id + '_ip_adapter'] = StableDiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch_dtype)
pipes[model_id + '_ip_adapter'].load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin")
LinFusion.construct_for(pipes[model_id + '_ip_adapter'])
pipes[model_id + '_i2i'] = StableDiffusionImg2ImgPipeline.from_pretrained(repo_id, torch_dtype=torch_dtype)
LinFusion.construct_for(pipes[model_id + '_i2i'])
@spaces.GPU
def infer_t2i(model, prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
pipe = pipes[model + '_t2i'].to(device)
image = pipe(
prompt = prompt,
negative_prompt = negative_prompt,
guidance_scale = guidance_scale,
num_inference_steps = num_inference_steps,
width = width,
height = height,
generator = generator
).images[0]
return image, seed
@spaces.GPU
def infer_i2i(model, prompt, image, strength, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
pipe = pipes[model + '_i2i'].to(device)
image = pipe(
prompt = prompt,
image = image.resize((width, height)),
strength = strength,
negative_prompt = negative_prompt,
guidance_scale = guidance_scale,
num_inference_steps = num_inference_steps,
width = width,
height = height,
generator = generator
).images[0]
return image, seed
@spaces.GPU
def infer_ip_adapter(model, prompt, image, scale, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
pipe = pipes[model + '_ip_adapter'].to(device)
pipe.set_ip_adapter_scale(scale)
image = pipe(
prompt = prompt,
image = image.resize((width, height)),
negative_prompt = negative_prompt,
guidance_scale = guidance_scale,
num_inference_steps = num_inference_steps,
ip_adapter_image = image,
width = width,
height = height,
generator = generator
).images[0]
return image, seed
examples = [
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
"An astronaut riding a green horse",
"A delicious ceviche cheesecake slice",
]
css="""
#col-container {
margin: 0 auto;
max-width: 640px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Tab("Text-to-Image"):
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# LinFusion Text-to-Image Gradio Demo
""")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
visible=False,
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
model_choice = gr.Dropdown(label="Choose Model", choices=list(all_model_id.keys()), value=list(all_model_id.keys())[0])
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512, #Replace with defaults that work for your model
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512, #Replace with defaults that work for your model
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=10.0,
step=0.1,
value=7.5, #Replace with defaults that work for your model
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=25, #Replace with defaults that work for your model
)
gr.Examples(
examples = examples,
inputs = [prompt]
)
run_button.click(
fn=infer_t2i,
inputs = [model_choice, prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
outputs = [result, seed]
)
with gr.Tab("Image-to-Image"):
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# LinFusion Image-to-Image Gradio Demo
""")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
image_upload_input = gr.Image(label="Upload an Image", type="pil")
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
visible=False,
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
model_choice = gr.Dropdown(label="Choose Model", choices=list(all_model_id.keys()), value=list(all_model_id.keys())[0])
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512, #Replace with defaults that work for your model
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512, #Replace with defaults that work for your model
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=10.0,
step=0.1,
value=7.5, #Replace with defaults that work for your model
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=25, #Replace with defaults that work for your model
)
editing_strength = gr.Slider(
label="Strength of editing",
minimum=0,
maximum=1,
step=0.01,
value=0.5, #Replace with defaults that work for your model
)
gr.Examples(
examples = examples,
inputs = [prompt]
)
run_button.click(
fn=infer_i2i,
inputs = [model_choice, prompt, image_upload_input, editing_strength, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
outputs = [result, seed]
)
with gr.Tab("IP-Adapter"):
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# LinFusion IP-Adapter Gradio Demo
""")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
image_upload_input = gr.Image(label="Upload an Image", type="pil")
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
visible=False,
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
model_choice = gr.Dropdown(label="Choose Model", choices=list(all_model_id.keys()), value=list(all_model_id.keys())[0])
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512, #Replace with defaults that work for your model
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512, #Replace with defaults that work for your model
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=10.0,
step=0.1,
value=7.5, #Replace with defaults that work for your model
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=25, #Replace with defaults that work for your model
)
ip_adapter_scale = gr.Slider(
label="Strength of image condition",
minimum=0,
maximum=1,
step=0.01,
value=0.4, #Replace with defaults that work for your model
)
gr.Examples(
examples = examples,
inputs = [prompt]
)
run_button.click(
fn=infer_ip_adapter,
inputs = [model_choice, prompt, image_upload_input, ip_adapter_scale, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
outputs = [result, seed]
)
demo.queue().launch()