x-mas / app.py
fantos's picture
Update app.py
1d059ee verified
import spaces
import gradio as gr
import torch
from PIL import Image
from diffusers import DiffusionPipeline
import random
from transformers import pipeline
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cuda.matmul.allow_tf32 = True
# ๋ฒˆ์—ญ ๋ชจ๋ธ ์ดˆ๊ธฐํ™”
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
base_model = "black-forest-labs/FLUX.1-dev"
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
lora_repo = "strangerzonehf/Flux-Xmas-Realpix-LoRA"
trigger_word = ""
pipe.load_lora_weights(lora_repo)
pipe.to("cuda")
MAX_SEED = 2**32-1
@spaces.GPU()
def translate_and_generate(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
# ํ•œ๊ธ€ ๊ฐ์ง€ ๋ฐ ๋ฒˆ์—ญ
def contains_korean(text):
return any(ord('๊ฐ€') <= ord(char) <= ord('ํžฃ') for char in text)
if contains_korean(prompt):
# ํ•œ๊ธ€์„ ์˜์–ด๋กœ ๋ฒˆ์—ญ
translated = translator(prompt)[0]['translation_text']
actual_prompt = translated
else:
actual_prompt = prompt
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator(device="cuda").manual_seed(seed)
progress(0, "Starting image generation...")
for i in range(1, steps + 1):
if i % (steps // 10) == 0:
progress(i / steps * 100, f"Processing step {i} of {steps}...")
image = pipe(
prompt=f"{actual_prompt} {trigger_word}",
num_inference_steps=steps,
guidance_scale=cfg_scale,
width=width,
height=height,
generator=generator,
joint_attention_kwargs={"scale": lora_scale},
).images[0]
progress(100, "Completed!")
return image, seed
example_image_path = "example0.webp"
example_prompt = """Cozy winter scene with a Christmas atmosphere: a snow-covered cabin in the forest, warm light glowing from the windows, surrounded by sparkling Christmas decorations and a beautifully adorned Christmas tree. The sky is filled with stars, and soft snowflakes are gently falling, creating a serene and warm ambiance"""
example_cfg_scale = 3.2
example_steps = 32
example_width = 1152
example_height = 896
example_seed = 3981632454
example_lora_scale = 0.85
def load_example():
example_image = Image.open(example_image_path)
return example_prompt, example_cfg_scale, example_steps, True, example_seed, example_width, example_height, example_lora_scale, example_image
# CSS ์ •์˜
custom_css = """
/* ๊ธฐ๋ณธ ์Šคํƒ€์ผ */
body {
margin: 0;
padding: 0;
background: url('file/example0.webp') no-repeat center center fixed;
background-size: cover;
min-height: 100vh;
}
/* ์ปจํ…Œ์ด๋„ˆ */
.container {
max-width: 1200px;
margin: 0 auto;
padding: 20px;
box-sizing: border-box;
}
/* ํ—ค๋” */
.header {
text-align: center;
color: white;
text-shadow: 2px 2px 4px rgba(0,0,0,0.7);
margin-bottom: 30px;
font-size: 2.5em;
}
/* ๋ฐ•์Šค ๊ณตํ†ต ์Šคํƒ€์ผ */
.box-common {
background-color: rgba(255, 255, 255, 0.85);
backdrop-filter: blur(10px);
border-radius: 15px;
padding: 20px;
margin: 20px 0;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
}
/* ๊ฒฐ๊ณผ ์ด๋ฏธ์ง€ ๋ฐ•์Šค */
.result-box {
width: 90%;
max-width: 1000px;
margin: 20px auto;
}
.image-output {
width: 100%;
max-width: 800px;
margin: 0 auto;
display: block;
}
/* ํ”„๋กฌํ”„ํŠธ ์ž…๋ ฅ ๋ฐ•์Šค */
.prompt-box {
width: 90%;
max-width: 1000px;
margin: 20px auto;
}
/* ๋ฒ„ํŠผ ์Šคํƒ€์ผ */
.generate-btn {
background-color: #2ecc71 !important;
color: white !important;
padding: 12px 30px !important;
border-radius: 8px !important;
border: none !important;
font-size: 1.1em !important;
cursor: pointer !important;
transition: background-color 0.3s ease !important;
width: 200px !important;
margin: 20px auto !important;
display: block !important;
}
.generate-btn:hover {
background-color: #27ae60 !important;
}
/* ์•„์ฝ”๋””์–ธ ์Šคํƒ€์ผ */
.accordion {
width: 90%;
max-width: 1000px;
margin: 20px auto;
}
/* ์˜ค๋””์˜ค ์ปจํŠธ๋กค */
.audio-controls {
position: fixed;
bottom: 20px;
right: 20px;
z-index: 1000;
display: flex;
gap: 10px;
background-color: rgba(255, 255, 255, 0.9);
padding: 15px;
border-radius: 10px;
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);
}
.audio-btn {
background-color: #3498db;
color: white;
border: none;
padding: 8px 15px;
border-radius: 5px;
cursor: pointer;
transition: background-color 0.3s ease;
}
.audio-btn:hover {
background-color: #2980b9;
}
/* ๋ˆˆ ๋‚ด๋ฆฌ๋Š” ํšจ๊ณผ */
@keyframes snowfall {
0% {
transform: translateY(-10vh) translateX(0);
opacity: 1;
}
100% {
transform: translateY(100vh) translateX(100px);
opacity: 0.3;
}
}
.snowflake {
position: fixed;
color: white;
font-size: 1.5em;
user-select: none;
z-index: 1000;
pointer-events: none;
animation: snowfall linear infinite;
}
"""
# JavaScript ์ฝ”๋“œ
snow_js = """
function createSnowflake() {
const snowflake = document.createElement('div');
snowflake.innerHTML = 'โ„';
snowflake.className = 'snowflake';
snowflake.style.left = Math.random() * 100 + 'vw';
snowflake.style.animationDuration = Math.random() * 3 + 2 + 's';
snowflake.style.opacity = Math.random();
document.body.appendChild(snowflake);
setTimeout(() => {
snowflake.remove();
}, 5000);
}
setInterval(createSnowflake, 200);
"""
audio_js = """
let currentlyPlaying = null;
function toggleAudio(num) {
const audio = document.getElementById('bgMusic' + num);
const otherAudio = document.getElementById('bgMusic' + (num === 1 ? 2 : 1));
if (currentlyPlaying === audio) {
audio.pause();
currentlyPlaying = null;
} else {
if (currentlyPlaying) {
currentlyPlaying.pause();
}
otherAudio.pause();
audio.play();
currentlyPlaying = audio;
}
}
function stopAllAudio() {
const audios = document.querySelectorAll('audio');
audios.forEach(audio => audio.pause());
currentlyPlaying = null;
}
"""
# Gradio ์ธํ„ฐํŽ˜์ด์Šค ๊ตฌ์„ฑ
app = gr.Blocks(css=custom_css,theme="Yntec/HaleyCH_Theme_Orange")
with app:
# JavaScript ์ดˆ๊ธฐํ™”
gr.HTML(f"<script>{snow_js}</script>")
with gr.Column(elem_classes="container"):
gr.Markdown("# ๐ŸŽ„ X-MAS LoRA", elem_classes="header")
# ์ด๋ฏธ์ง€ ์ถœ๋ ฅ ์˜์—ญ
with gr.Group(elem_classes="result-box box-common"):
gr.Markdown("### ๐Ÿ–ผ๏ธ Generated Image")
result = gr.Image(label="Result", elem_classes="image-output")
# ํ”„๋กฌํ”„ํŠธ ์ž…๋ ฅ ์˜์—ญ
with gr.Group(elem_classes="prompt-box box-common"):
prompt = gr.TextArea(
label="โœ๏ธ Your Prompt (ํ•œ๊ธ€ ๋˜๋Š” ์˜์–ด)",
placeholder="์ด๋ฏธ์ง€๋ฅผ ์„ค๋ช…ํ•˜์„ธ์š”...",
lines=5
)
generate_button = gr.Button(
"๐Ÿš€ Generate Image",
elem_classes="generate-btn"
)
# ๊ณ ๊ธ‰ ์˜ต์…˜
with gr.Accordion("๐ŸŽจ Advanced Options", open=False, elem_classes="accordion box-common"):
with gr.Group(elem_classes="parameter-box"):
gr.Markdown("### ๐ŸŽ›๏ธ Generation Parameters")
with gr.Row():
with gr.Column():
cfg_scale = gr.Slider(
label="CFG Scale",
minimum=1,
maximum=20,
step=0.5,
value=example_cfg_scale
)
steps = gr.Slider(
label="Steps",
minimum=1,
maximum=100,
step=1,
value=example_steps
)
lora_scale = gr.Slider(
label="LoRA Scale",
minimum=0,
maximum=1,
step=0.01,
value=example_lora_scale
)
with gr.Group(elem_classes="parameter-box"):
gr.Markdown("### ๐Ÿ“ Image Dimensions")
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=1536,
step=64,
value=example_width
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=1536,
step=64,
value=example_height
)
with gr.Group(elem_classes="parameter-box"):
gr.Markdown("### ๐ŸŽฒ Seed Settings")
with gr.Row():
randomize_seed = gr.Checkbox(
True,
label="Randomize seed"
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=example_seed
)
# ์˜ค๋””์˜ค ์ปจํŠธ๋กค
gr.HTML(f"""
<div class="audio-controls">
<button class="audio-btn" onclick="toggleAudio(1)">๐ŸŽต Music 1</button>
<button class="audio-btn" onclick="toggleAudio(2)">๐ŸŽต Music 2</button>
<button class="audio-btn" onclick="stopAllAudio()">โน Stop</button>
</div>
<audio id="bgMusic1" loop>
<source src="file/1.mp3" type="audio/mp3">
</audio>
<audio id="bgMusic2" loop>
<source src="file/2.mp3" type="audio/mp3">
</audio>
<script>{audio_js}</script>
""")
# ์ด๋ฒคํŠธ ํ•ธ๋“ค๋Ÿฌ
app.load(
load_example,
inputs=[],
outputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, result]
)
generate_button.click(
translate_and_generate,
inputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale],
outputs=[result, seed]
)
app.queue()
app.launch()