t2i_noob_pre / app.py
el-el-san's picture
Update app.py
dda46e9 verified
raw
history blame
7.44 kB
import spaces
import gradio as gr
import numpy as np
import PIL.Image
from PIL import Image
import random
from diffusers import ControlNetModel, StableDiffusionXLPipeline, AutoencoderKL
from diffusers import DDIMScheduler, EulerAncestralDiscreteScheduler
import torch
import os
import time
import glob
# 一時ファイルの保存ディレクトリ
TEMP_DIR = "temp_images"
# 一時ファイルの保持期間(秒)
FILE_RETENTION_PERIOD = 3600 # 1時間
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 一時ディレクトリの作成
os.makedirs(TEMP_DIR, exist_ok=True)
def cleanup_old_files():
"""古い一時ファイルを削除する"""
current_time = time.time()
pattern = os.path.join(TEMP_DIR, "output_*.png")
for file_path in glob.glob(pattern):
try:
# ファイルの最終更新時刻を取得
file_modified_time = os.path.getmtime(file_path)
if current_time - file_modified_time > FILE_RETENTION_PERIOD:
os.remove(file_path)
except Exception as e:
print(f"Error while cleaning up file {file_path}: {e}")
pipe = StableDiffusionXLPipeline.from_single_file(
"https://huggingface.co/Laxhar/noob_sdxl_beta/noob_hercules3/checkpoint/checkpoint-e2_s109089.safetensors/checkpoint-e2_s109089.safetensors",
use_safetensors=True,
torch_dtype=torch.float16,
)
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(device)
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1216
@spaces.GPU
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
# 古い一時ファイルの削除
cleanup_old_files()
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
# 画像生成
output_image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
width=width,
height=height,
generator=generator
).images[0]
# RGBモードで保存
if output_image.mode != 'RGB':
output_image = output_image.convert('RGB')
# 一時ファイルとして保存
timestamp = int(time.time())
temp_filename = os.path.join(TEMP_DIR, f"output_{timestamp}.png")
output_image.save(temp_filename)
return temp_filename
css = """
#col-container {
margin: 0 auto;
max-width: 100%;
padding: 0 1rem;
}
/* プロンプト入力エリアのスタイル */
.prompt-input {
min-height: 100px !important;
font-size: 16px !important;
line-height: 1.5 !important;
padding: 12px !important;
border-radius: 8px !important;
border: 1px solid #e0e0e0 !important;
background-color: #ffffff !important;
}
.prompt-input:focus {
border-color: #2196f3 !important;
box-shadow: 0 0 0 2px rgba(33, 150, 243, 0.1) !important;
}
/* ボタンのスタイル */
.generate-button {
margin-top: 1rem !important;
padding: 12px 24px !important;
font-size: 16px !important;
font-weight: 600 !important;
border-radius: 8px !important;
background-color: #2196f3 !important;
color: white !important;
transition: all 0.3s ease !important;
}
.generate-button:hover {
background-color: #1976d2 !important;
transform: translateY(-1px) !important;
}
/* スマートフォン対応 */
@media (max-width: 768px) {
#col-container {
padding: 0 0.5rem;
}
.prompt-input {
font-size: 16px !important;
}
.advanced-settings {
margin-top: 1rem;
}
}
/* 結果画像のコンテナ */
#output_image {
margin-top: 1rem;
border-radius: 8px;
overflow: hidden;
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
}
/* アコーディオンのスタイル */
.advanced-settings {
margin-top: 2rem;
border: 1px solid #e0e0e0;
border-radius: 8px;
overflow: hidden;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown("""
# Text-to-Image Demo
Using [Noob SDXL beta model](https://huggingface.co/Laxhar) to generate amazing images!
""")
with gr.Group():
prompt = gr.Textbox(
label="What would you like to create?",
elem_classes="prompt-input",
lines=3,
placeholder="Describe the image you want to generate. Be specific about details, style, and atmosphere.\n\nExample: 'A serene mountain landscape at sunset, with snow-capped peaks and a clear lake reflection, painted in watercolor style'",
show_label=True,
)
run_button = gr.Button(
"✨ Generate Image",
elem_classes="generate-button",
variant="primary",
scale=1,
size="lg"
)
result = gr.Image(
label="Generated Image",
show_label=True,
type="filepath",
elem_id="output_image"
)
with gr.Accordion("Advanced Settings", open=False, elem_classes="advanced-settings"):
negative_prompt = gr.Textbox(
label="Negative Prompt",
lines=2,
placeholder="Specify what you don't want in the image. Example: nsfw, low quality, blur, etc.",
value="nsfw, (low quality, worst quality:1.2), very displeasing, 3d, watermark, signature, ugly, poorly drawn"
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(
label="Randomize seed",
value=True,
info="Generate different results each time"
)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance Scale",
minimum=0.0,
maximum=20.0,
step=0.1,
value=7,
info="Controls how closely the image follows the prompt"
)
num_inference_steps = gr.Slider(
label="Number of Steps",
minimum=1,
maximum=28,
step=1,
value=28,
info="More steps = higher quality but slower generation"
)
run_button.click(
fn=infer,
inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
outputs=[result]
)
# 起動時に古いファイルを削除
cleanup_old_files()
demo.queue().launch()