el-el-san's picture
Update app.py
a5c27c2 verified
import spaces
import gradio as gr
import numpy as np
import PIL.Image
from PIL import Image
import random
from diffusers import ControlNetModel, StableDiffusionXLPipeline, AutoencoderKL
import cv2
import torch
import os
import time
import glob
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
HeunDiscreteScheduler,
KDPM2DiscreteScheduler,
KDPM2AncestralDiscreteScheduler,
LMSDiscreteScheduler,
UniPCMultistepScheduler,
)
# 一時ファイルの管理設定
TEMP_DIR = "temp_images"
FILE_RETENTION_PERIOD = 3600 # 1時間
os.makedirs(TEMP_DIR, exist_ok=True)
def cleanup_old_files():
"""古い一時ファイルを削除する"""
current_time = time.time()
pattern = os.path.join(TEMP_DIR, "output_*.png")
for file_path in glob.glob(pattern):
try:
file_modified_time = os.path.getmtime(file_path)
if current_time - file_modified_time > FILE_RETENTION_PERIOD:
os.remove(file_path)
except Exception as e:
print(f"Error while cleaning up file {file_path}: {e}")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
pipe = StableDiffusionXLPipeline.from_single_file(
"https://huggingface.co/bluepen5805/illustrious_pencil-XL/illustrious_pencil-XL-v2.0.0.safetensors",
use_safetensors=True,
torch_dtype=torch.float16,
)
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(device)
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1216
@spaces.GPU
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, sampler_name):
# 古い一時ファイルの削除
cleanup_old_files()
# サンプラーの設定
if sampler_name == "DDIM":
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
elif sampler_name == "DPMSolverMultistep":
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
elif sampler_name == "Euler":
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
elif sampler_name == "EulerAncestral":
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
elif sampler_name == "Heun":
pipe.scheduler = HeunDiscreteScheduler.from_config(pipe.scheduler.config)
elif sampler_name == "KDPM2":
pipe.scheduler = KDPM2DiscreteScheduler.from_config(pipe.scheduler.config)
elif sampler_name == "KDPM2Ancestral":
pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(pipe.scheduler.config)
elif sampler_name == "LMS":
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
elif sampler_name == "UniPC":
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
else:
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
output_image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
width=width,
height=height,
generator=generator
).images[0]
# RGBモードで保存
if output_image.mode != 'RGB':
output_image = output_image.convert('RGB')
# 一時ファイルとして保存
timestamp = int(time.time())
temp_filename = os.path.join(TEMP_DIR, f"output_{timestamp}.png")
output_image.save(temp_filename)
return temp_filename
css = """
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown("""
Text-to-Image Demo
using [illustrious_pencil-XL](https://huggingface.co/bluepen5805/illustrious_pencil-XL)
""")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Image(
label="Result",
show_label=False,
type="filepath", # filepathに変更
elem_id="output_image"
)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
value="nsfw, (low quality, worst quality:1.2), very displeasing, 3d, watermark, signature, ugly, poorly drawn"
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
sampler_name = gr.Dropdown(
label="Sampler",
choices=["DDIM", "DPMSolverMultistep", "Euler", "EulerAncestral", "Heun", "KDPM2", "KDPM2Ancestral", "LMS", "UniPC"],
value="EulerAncestral",
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=20.0,
step=0.1,
value=4,
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=28,
step=1,
value=28,
)
run_button.click(
fn=infer,
inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, sampler_name],
outputs=[result]
)
# 起動時に古いファイルを削除
cleanup_old_files()
demo.queue().launch()