File size: 1,818 Bytes
0e39717 01b01da 87bcb39 01b01da 87bcb39 a05f2ab 0e39717 01b01da a05f2ab 15542be 0e39717 a05f2ab 0e39717 01b01da a2d4afc 01b01da 0e39717 01b01da 0e39717 01b01da |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import torch
from diffusers import LCMScheduler, AutoPipelineForText2Image,DDPMScheduler
from PIL import Image
import numpy as np
import gradio as gr
import os
from transformers import pipeline
def translate(text):
model_checkpoint = "Helsinki-NLP/opus-mt-ko-en"
translator = pipeline("translation", model=model_checkpoint)
translated=translator(text)
translated=translated[0]["translation_text"]
return translated
model_id = "stabilityai/stable-diffusion-xl-base-1.0"
adapter_id = "ksyint/teu_lora"
pipe = AutoPipelineForText2Image.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16", low_cpu_mem_usage=False)
pipe.scheduler = DDPMScheduler.from_config(pipe.scheduler.config)
pipe.to("cuda")
pipe.load_lora_weights(adapter_id)
pipe.fuse_lora()
def main(English,Korean,Negative_English,Negative_Korean):
english=English
korean=Korean
prompt2="2024SS "
if korean=="" and english is not None:
prompt2+=english
negative=Negative_English
elif english=="" and korean is not None:
prompt2+=translate(korean)
negative=translate(Negative_Korean)
elif english=="" and korean=="":
raise Exception("only one language or at least one language")
else:
raise Exception("only one language or at least one language")
steps=60
image = pipe(prompt=prompt2, negative_prompt=f"worst quality,multiple people, {negative}",num_inference_steps=steps, guidance_scale=5.0,strength=5.0).images[0]
return image
iface = gr.Interface(fn=main, inputs=["text","text","text","text"], outputs="image", title="Generate 2024SS style from your favorites",
description="Input one Language, English or Korean. Do not input 2024SS")
iface.launch() |