|
import gradio as gr |
|
from diffusers import DiffusionPipeline |
|
import dask |
|
from dask import delayed |
|
|
|
|
|
pipe = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1") |
|
|
|
def generate_image(prompt, num_inference_steps=50): |
|
""" |
|
Generate an image based on a text prompt using diffusion with optimizations. |
|
The number of inference steps is reduced for faster generation. |
|
""" |
|
|
|
image = pipe(prompt, num_inference_steps=num_inference_steps).images[0] |
|
return image |
|
|
|
|
|
@delayed |
|
def dask_generate(prompt): |
|
return generate_image(prompt) |
|
|
|
def parallel_generate(prompt): |
|
|
|
image = dask.compute(dask_generate(prompt))[0] |
|
return image |
|
|
|
|
|
iface = gr.Interface( |
|
fn=parallel_generate, |
|
inputs=gr.Textbox(label="Prompt", placeholder="Enter your prompt here"), |
|
outputs=gr.Image(type="pil"), |
|
title="CPU Optimized Image Generation", |
|
description="Enter a prompt to generate an image efficiently using CPU optimization." |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
iface.launch() |