import gradio as gr import numpy as np import random from optimum.intel import OVStableDiffusionXLPipeline import torch from diffusers import EulerDiscreteScheduler model_id = "None1145/noobai-XL-Vpred-0.65s-openvino" prev_height = 1216 prev_width = 832 MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = 2048 def reload_model(new_model_id): global pipe, model_id, prev_height, prev_width model_id = new_model_id try: print(f"{model_id}...") pipe = OVStableDiffusionXLPipeline.from_pretrained(model_id, compile=False) if model_id == "None1145/noobai-XL-Vpred-0.65s-openvino" or "Vpred" in model_id or "vpred" in model_id or "v-pred" in model_id: scheduler_args = {"prediction_type": "v_prediction", "rescale_betas_zero_snr": True} pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, **scheduler_args) # pipe.to("gpu") pipe.reshape(batch_size=1, height=prev_height, width=prev_width, num_images_per_prompt=1) pipe.compile() print(f"{model_id}!!!") return f"Model successfully loaded: {model_id}" except Exception as e: return f"Failed to load model: {str(e)}" reload_model(model_id) def infer( prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True), ): global prev_width, prev_height, pipe if randomize_seed: seed = random.randint(0, MAX_SEED) generator = torch.Generator().manual_seed(seed) if prev_width != width or prev_height != height: pipe.reshape(batch_size=1, height=height, width=width, num_images_per_prompt=1) pipe.compile() prev_width = width prev_height = height image = pipe( prompt=prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, width=width, height=height, generator=generator, ).images[0] return image, seed examples = [ "masterpiece,best quality,newest,absurdres,highres,takarada rikka,1girl,solo,artist:koruri,artist:haoni,artist:deadnooodles,artist:mishima_kurone,artist:yd orange maru,year 2023,black hair,smile,looking at viewer,one eye closed,pov,close-up,blue eyes,medium hair,pointing at viewer,orange scrunchie,:t,white shirt,school uniform,standing,cowboy shot,classroom,bent over,head rest,head tilt,face focus,", "masterpiece,best quality,artist:john_kafka,artist:nixeu,artist:quasarcake, ,gritty,marcille donato,1girl, abstract background, ambrosia \(dungeon meshi\), backpack, bag, belt, belt pouch, blonde hair, blue capelet, blue shirt, book, book holster, capelet, elf, food, food in mouth, green eyes, grimoire, hair ribbon, holding, holding staff, kebab, multicolored background, pants, pointy ears, ponytail, pouch, red ribbon, ribbon, shirt, sideways glance, skewer, sleeping bag, sprout, staff, v-shaped eyebrows, white pants", "kazusa \(blue archive\), reisa \(blue archive\),sfw,very awa,highres,absurdres,incredibly absurdres,masterpiece,oil painting \(medium\),Yasuda Akira,(henriiku (ahemaru):1.3),armored core,artist:ciloranko,artist:sho sho lwlw,(as109:0.7),ink (medium),, (2girls:1.5), black neckerchief, blue hair, buttons, double-breasted, drooling, grey serafuku, halo, light blue hair, midriff peek, multicolored hair, neckerchief, nose bubble, pink hair, pink halo, pleated skirt, refraction, saliva, school uniform, serafuku,skirt, sleeping, socks, star halo, striped clothes, striped socks,", "1girl,,camellya \(wuthering waves\),,::, (by kana616:0.8), [(by hen-tie:1.3)|(by kagami_\(galgamesion\):1.2)], (by kinokohime:1.2), (by yatsuha_\(hachiyoh\):0.8), year 2024,,parted lips, looking at animal, upper body, profile, holding, centauroid, holding bouquet,,streaked hair, hair flower, multicolored hair, breasts, hair ornament, hair between eyes, jewelry, twintails, white hair,,animal ears, deer girl, flower wreath, white bird, off-shoulder shirt, shirt, sleeves past wrists, deer ears, off shoulder, head wreath, long sleeves,,blurry background, depth of field, bokeh, planted sword, red background, planted, red theme, bird, grass, flower, very awa, masterpiece, best quality, newest, highres, absurdres", ] with gr.Blocks() as img: gr.Markdown("# OpenVINO Text to Image") with gr.Column(elem_id="col-container"): with gr.Row(): prompt = gr.Text( label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False, ) run_button = gr.Button("Run", scale=0, variant="primary") result = gr.Image(label="Result", show_label=False) with gr.Accordion("Advanced Settings", open=False): negative_prompt = gr.Text( label="Negative prompt", max_lines=1, placeholder="Enter a negative prompt", visible=False, ) seed = gr.Slider( label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, ) randomize_seed = gr.Checkbox(label="Randomize seed", value=True) with gr.Row(): width = gr.Slider( label="Width", minimum=512, maximum=MAX_IMAGE_SIZE, step=32, value=832, ) height = gr.Slider( label="Height", minimum=512, maximum=MAX_IMAGE_SIZE, step=32, value=1216, ) with gr.Row(): guidance_scale = gr.Slider( label="Guidance scale", minimum=0.0, maximum=10.0, step=0.1, value=5.0, ) num_inference_steps = gr.Slider( label="Number of inference steps", minimum=1, maximum=60, step=1, value=28, ) gr.Examples(examples=examples, inputs=[prompt]) gr.Markdown("### Model Reload") with gr.Row(): new_model_id = gr.Text(label="New Model ID", placeholder="Enter model ID", value=model_id) reload_button = gr.Button("Reload Model", variant="primary") reload_status = gr.Text(label="Status", interactive=False) reload_button.click( fn=reload_model, inputs=new_model_id, outputs=reload_status, ) gr.on( triggers=[run_button.click, prompt.submit], fn=infer, inputs=[ prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, ], outputs=[result, seed], ) if __name__ == "__main__": img.launch()