Spaces:
Sleeping
Sleeping
import gradio as gr | |
import numpy as np | |
import random | |
from diffusers import DiffusionPipeline, EulerDiscreteScheduler | |
import torch | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 | |
# Define model and checkpoint | |
model_repo_id = "ByteDance/SDXL-Lightning" | |
ckpt = "sdxl_lightning_4step_unet.safetensors" | |
# Load the diffusion pipeline | |
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype) | |
pipe.to(device) | |
# Update the scheduler | |
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing") | |
MAX_SEED = np.iinfo(np.int32).max | |
def generate_and_display(prompt, num_inference_steps=10, guidance_scale=0): | |
generator = torch.Generator(device).manual_seed(random.randint(0, MAX_SEED)) | |
image = pipe( | |
prompt=prompt, | |
num_inference_steps=num_inference_steps, | |
guidance_scale=guidance_scale, | |
generator=generator, | |
).images[0] | |
return image | |
def launch_demo(): | |
gr.launch(server_name="0.0.0.0", server_port=8080) # Use a specified port for local testing | |
if __name__ == "__main__": | |
launch_demo() | |