Spaces:
Paused
Paused
File size: 4,419 Bytes
7c7890b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
from diffusers import DiffusionPipeline
import torch
import os
try:
import intel_extension_for_pytorch as ipex
except:
pass
from PIL import Image
import numpy as np
import gradio as gr
import psutil
import time
SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
TORCH_COMPILE = os.environ.get("TORCH_COMPILE", None)
HF_TOKEN = os.environ.get("HF_TOKEN", None)
# check if MPS is available OSX only M1/M2/M3 chips
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
xpu_available = hasattr(torch, "xpu") and torch.xpu.is_available()
device = torch.device(
"cuda" if torch.cuda.is_available() else "xpu" if xpu_available else "cpu"
)
torch_device = device
torch_dtype = torch.float16
print(f"SAFETY_CHECKER: {SAFETY_CHECKER}")
print(f"TORCH_COMPILE: {TORCH_COMPILE}")
print(f"device: {device}")
if mps_available:
device = torch.device("mps")
torch_device = "cpu"
torch_dtype = torch.float32
if SAFETY_CHECKER == "True":
pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", revision="pr/4")
else:
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/sdxl-turbo", revision="pr/4", safety_checker=None
)
pipe.to(device=torch_device, dtype=torch_dtype).to(device)
pipe.unet.to(memory_format=torch.channels_last)
pipe.set_progress_bar_config(disable=True)
def predict(prompt, steps, seed=1231231):
generator = torch.manual_seed(seed)
last_time = time.time()
results = pipe(
prompt=prompt,
generator=generator,
num_inference_steps=steps,
guidance_scale=0.0,
width=512,
height=512,
# original_inference_steps=params.lcm_steps,
output_type="pil",
)
print(f"Pipe took {time.time() - last_time} seconds")
nsfw_content_detected = (
results.nsfw_content_detected[0]
if "nsfw_content_detected" in results
else False
)
if nsfw_content_detected:
gr.Warning("NSFW content detected.")
return Image.new("RGB", (512, 512))
return results.images[0]
css = """
#container{
margin: 0 auto;
max-width: 40rem;
}
#intro{
max-width: 100%;
text-align: center;
margin: 0 auto;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="container"):
gr.Markdown(
"""# SDXL Turbo - Text To Image
## Unofficial Demo
SDXL Turbo model can generate high quality images in a single pass read more on [stability.ai post](https://stability.ai/news/stability-ai-sdxl-turbo).
**Model**: https://huggingface.co/stabilityai/sdxl-turbo
""",
elem_id="intro",
)
with gr.Row():
with gr.Row():
prompt = gr.Textbox(
placeholder="Insert your prompt here:", scale=5, container=False
)
generate_bt = gr.Button("Generate", scale=1)
image = gr.Image(type="filepath")
with gr.Accordion("Advanced options", open=False):
steps = gr.Slider(label="Steps", value=2, minimum=1, maximum=10, step=1)
seed = gr.Slider(
randomize=True, minimum=0, maximum=12013012031030, label="Seed", step=1
)
with gr.Accordion("Run with diffusers"):
gr.Markdown(
"""## Running SDXL Turbo with `diffusers`
```bash
pip install diffusers==0.23.1
```
```py
from diffusers import DiffusionPipeline
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/sdxl-turbo", revision="refs/pr/4"
).to("cuda")
results = pipe(
prompt="A cinematic shot of a baby racoon wearing an intricate italian priest robe",
num_inference_steps=1,
guidance_scale=0.0,
)
imga = results.images[0]
imga.save("image.png")
```
"""
)
inputs = [prompt, steps, seed]
generate_bt.click(fn=predict, inputs=inputs, outputs=image, show_progress=False)
prompt.input(fn=predict, inputs=inputs, outputs=image, show_progress=False)
steps.change(fn=predict, inputs=inputs, outputs=image, show_progress=False)
seed.change(fn=predict, inputs=inputs, outputs=image, show_progress=False)
demo.queue()
demo.launch()
|