JoPmt's picture
Update app.py
36ff355
raw
history blame
1.9 kB
from PIL import Image
import cv2
import gradio as gr
import numpy as np
import torch, os, random
from accelerate import Accelerator
from transformers import pipeline
from diffusers.utils import load_image
from diffusers import DiffusionPipeline, DDPMScheduler
from diffusers.pipelines.wuerstchen import DEFAULT_STAGE_C_TIMESTEPS
accelerator = Accelerator(cpu=True)
warp_prior = accelerator.prepare(DiffusionPipeline.from_pretrained("warp-ai/wuerstchen-prior", torch_dtype=torch.bfloat16, use_safetensors=True, safety_cheker=None))
warp_prior.scheduler = DDPMScheduler.from_config(warp_prior.scheduler.config)
warp_prior = warp_prior.to("cpu")
warp = accelerator.prepare(DiffusionPipeline.from_pretrained("warp-ai/wuerstchen", torch_dtype=torch.bfloat16 use_safetensors=True, safety_checker=None))
warp.scheduler = DDPMScheduler.from_config(warp.scheduler.config)
warp = warp.to("cpu")
generator = torch.Generator(device="cpu").manual_seed(random.randint(1, 4876364))
def plex(cook, one, two):
###goof = load_image(img).resize((512, 512))
negative_prompt = "lowres,text,bad quality,low quality,jpeg artifacts,ugly,bad hands,bad face,blurry,bad eyes,watermark,signature"
warp_out = warp_prior(prompt=cook, height=512,width=512,negative_prompt=negative_prompt,guidance_scale=4.0, num_inference_steps=5,generator=generator)
primpt = ""
imas = warp(**warp_out, height=512, width=512, num_inference_steps=5, prompt=cook,negative_prompt=primpt,guidance_scale=0.0,output_type="pil",generator=generator).images[0]
return imas
iface = gr.Interface(fn=plex,inputs=[gr.Textbox(label="prompt"), gr.Slider(label="Inference steps",minimum=1,step=1,maximum=10,value=5), gr.Slider(label="Prior guidance scale",minimum=4.1,step=0.1,maximum=19.9,value=4.1)], outputs=gr.Image(), title="Txt2Img Wrstchn SD", description="Txt2Img Wrstchn SD")
iface.queue(max_size=1)
iface.launch(max_threads=1)