Spaces:
Runtime error
Runtime error
File size: 5,769 Bytes
de9d198 25d3a18 de9d198 bd88c3e de9d198 bd88c3e de9d198 bd88c3e de9d198 bd88c3e de9d198 bd88c3e de9d198 bd88c3e de9d198 bd88c3e de9d198 bd88c3e de9d198 bd88c3e de9d198 bd88c3e de9d198 feb562f c89f94f 718f25f c89f94f ce3b7d7 c89f94f 8678edc de9d198 5366491 de9d198 12fd800 de9d198 cfdc849 de9d198 5366491 de9d198 bd88c3e 80e4491 de9d198 a1c7876 de9d198 e9f4989 bd88c3e 8f40af2 80e4491 ba79244 bd88c3e 80e4491 bd88c3e 80e4491 cfdc849 80e4491 5e7f076 80e4491 bd88c3e de9d198 bd88c3e de9d198 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 |
from diffusers import DiffusionPipeline, LCMScheduler, AutoencoderTiny
import torch
import os
import datetime
import time
from PIL import Image
try:
import intel_extension_for_pytorch as ipex
except:
pass
from PIL import Image
import numpy as np
import gradio as gr
import psutil
import time
SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
TORCH_COMPILE = os.environ.get("TORCH_COMPILE", None)
HF_TOKEN = os.environ.get("HF_TOKEN", None)
# check if MPS is available OSX only M1/M2/M3 chips
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
xpu_available = hasattr(torch, "xpu") and torch.xpu.is_available()
device = torch.device(
"cuda" if torch.cuda.is_available() else "xpu" if xpu_available else "cpu"
)
torch_device = device
torch_dtype = torch.float16
print(f"SAFETY_CHECKER: {SAFETY_CHECKER}")
print(f"TORCH_COMPILE: {TORCH_COMPILE}")
print(f"device: {device}")
if mps_available:
device = torch.device("mps")
torch_device = "cpu"
torch_dtype = torch.float32
if SAFETY_CHECKER == "True":
pipe = DiffusionPipeline.from_pretrained("Lykon/dreamshaper-7")
else:
pipe = DiffusionPipeline.from_pretrained("Lykon/dreamshaper-7", safety_checker=None)
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
pipe.to(device=torch_device, dtype=torch_dtype).to(device)
pipe.unet.to(memory_format=torch.channels_last)
pipe.set_progress_bar_config(disable=True)
# check if computer has less than 64GB of RAM using sys or os
if psutil.virtual_memory().total < 64 * 1024**3:
pipe.enable_attention_slicing()
if TORCH_COMPILE:
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
pipe(prompt="warmup", num_inference_steps=1, guidance_scale=8.0)
# Load LCM LoRA
pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5")
pipe.fuse_lora()
def predict(prompt, guidance, steps, seed=1231231):
generator = torch.manual_seed(seed)
last_time = time.time()
results = pipe(
prompt=prompt,
generator=generator,
num_inference_steps=steps,
guidance_scale=guidance,
width=512,
height=512,
# original_inference_steps=params.lcm_steps,
output_type="pil",
)
print(f"Pipe took {time.time() - last_time} seconds")
nsfw_content_detected = (
results.nsfw_content_detected[0]
if "nsfw_content_detected" in results
else False
)
if nsfw_content_detected:
nsfw=gr.Button("🕹️NSFW🎨", scale=1)
# Generate file name
date_str = datetime.datetime.now().strftime("%Y%m%d")
safe_prompt = prompt.replace(" ", "_")[:50] # Truncate long prompts
filename = f"{date_str}_{safe_prompt}.png"
# Save the image
if len(results.images) > 0:
image_path = os.path.join("", filename) # Specify your directory
results.images[0].save(image_path)
print(f"#Image saved as {image_path}")
# Create a download link
with open(image_path, "rb") as file:
btn = st.download_button(
label="Download 📥",
data=file,
file_name=filename,
mime="image/png"
)
return results.images[0] if len(results.images) > 0 else None
css = """
#container{
margin: 0 auto;
max-width: 40rem;
}
#intro{
max-width: 100%;
text-align: center;
margin: 0 auto;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="container"):
gr.Markdown(
"""## 🕹️ Stable Diffusion 1.5 - Real Time 🎨 Image Generation Using 🌐 Latent Consistency LoRAs""",
elem_id="intro",
)
with gr.Row():
with gr.Row():
prompt = gr.Textbox(
placeholder="Insert your prompt here:", scale=5, container=False
)
generate_bt = gr.Button("Generate", scale=1)
image = gr.Image(type="filepath")
with gr.Accordion("Advanced options", open=False):
guidance = gr.Slider(
label="Guidance", minimum=0.0, maximum=5, value=0.3, step=0.001
)
steps = gr.Slider(label="Steps", value=4, minimum=2, maximum=10, step=1)
seed = gr.Slider(
randomize=True, minimum=0, maximum=12013012031030, label="Seed", step=1
)
with gr.Accordion("Run with diffusers"):
gr.Markdown(
"""## Running LCM-LoRAs it with `diffusers`
```bash
pip install diffusers==0.23.0
```
```py
from diffusers import DiffusionPipeline, LCMScheduler
pipe = DiffusionPipeline.from_pretrained("Lykon/dreamshaper-7").to("cuda")
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") #yes, it's a normal LoRA
results = pipe(
prompt="ImageEditor",
num_inference_steps=4,
guidance_scale=0.0,
)
results.images[0]
```
"""
)
inputs = [prompt, guidance, steps, seed]
generate_bt.click(fn=predict, inputs=inputs, outputs=image, show_progress=False)
prompt.input(fn=predict, inputs=inputs, outputs=image, show_progress=False)
guidance.change(fn=predict, inputs=inputs, outputs=image, show_progress=False)
steps.change(fn=predict, inputs=inputs, outputs=image, show_progress=False)
seed.change(fn=predict, inputs=inputs, outputs=image, show_progress=False)
demo.queue()
demo.launch()
|