Diffusion / app.py
torinriley's picture
Upload 26 files
ef6c3c2 verified
raw
history blame
5.14 kB
import gradio as gr
import numpy as np
import random
import torch
from PIL import Image
import os
from huggingface_hub import hf_hub_download
from pathlib import Path
import sys
# Add src directory to Python path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from src import model_loader
from src import pipeline
from src.config import Config, DeviceConfig
from transformers import CLIPTokenizer
# Create data directory if it doesn't exist
data_dir = Path("data")
data_dir.mkdir(exist_ok=True)
# Model configuration
MODEL_REPO = "stable-diffusion-v1-5/stable-diffusion-v1-5"
MODEL_FILENAME = "v1-5-pruned-emaonly.ckpt"
model_file = data_dir / MODEL_FILENAME
# Download model if it doesn't exist
if not model_file.exists():
print(f"Downloading model from {MODEL_REPO}...")
model_file = hf_hub_download(
repo_id=MODEL_REPO,
filename=MODEL_FILENAME,
local_dir=data_dir,
local_dir_use_symlinks=False
)
print("Model downloaded successfully!")
# Device configuration
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")
# Initialize configuration
config = Config(
device=DeviceConfig(device=device),
tokenizer=CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
)
# Load models with SE blocks enabled
config.models = model_loader.load_models(str(model_file), device, use_se=True)
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
def infer(
prompt,
negative_prompt,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps,
progress=gr.Progress(track_tqdm=True),
):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
# Update config with user settings
config.seed = seed
config.diffusion.cfg_scale = guidance_scale
config.diffusion.n_inference_steps = num_inference_steps
config.model.width = width
config.model.height = height
# Generate image
output_image = pipeline.generate(
prompt=prompt,
uncond_prompt=negative_prompt,
config=config
)
# Convert numpy array to PIL Image
image = Image.fromarray(output_image)
return image, seed
examples = [
"A ultra sharp photorealtici painting of a futuristic cityscape at night with neon lights and flying cars",
"A serene mountain landscape at sunset with snow-capped peaks and a clear lake reflection",
"A detailed portrait of a cyberpunk character with glowing neon implants and holographic tattoos",
]
css = """
#col-container {
margin: 0 auto;
max-width: 640px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(" # Custom Diffusion Model Text-to-Image Generator")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0, variant="primary")
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
visible=False,
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=42,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512,
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512,
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=10.0,
step=0.1,
value=7.5,
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=50,
)
gr.Examples(examples=examples, inputs=[prompt])
gr.on(
triggers=[run_button.click, prompt.submit],
fn=infer,
inputs=[
prompt,
negative_prompt,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps,
],
outputs=[result, seed],
)
if __name__ == "__main__":
demo.launch()