|
import gradio as gr |
|
from diffusers import DiffusionPipeline |
|
import dask |
|
from dask import delayed, compute |
|
from concurrent.futures import ThreadPoolExecutor |
|
import os |
|
os.environ['HF_HOME'] = '/blabla/cache/' |
|
|
|
|
|
|
|
pipe = DiffusionPipeline.from_pretrained("prompthero/openjourney-v4") |
|
|
|
def generate_image(prompt, num_inference_steps=50): |
|
""" |
|
Generate an image based on a text prompt using diffusion with optimizations. |
|
The number of inference steps is reduced for faster generation. |
|
""" |
|
|
|
image = pipe(prompt, num_inference_steps=num_inference_steps).images[0] |
|
return image |
|
|
|
@delayed |
|
def dask_generate(prompt): |
|
return generate_image(prompt) |
|
|
|
def parallel_generate(prompt): |
|
|
|
with ThreadPoolExecutor(max_workers=4) as executor: |
|
futures = [executor.submit(dask_generate, prompt) for _ in range(4)] |
|
results = [future.result() for future in futures] |
|
|
|
|
|
images = compute(*results) |
|
return images[0] |
|
|
|
|
|
iface = gr.Interface( |
|
fn=parallel_generate, |
|
inputs=gr.Textbox(label="Prompt", placeholder="Enter your prompt here"), |
|
outputs=gr.Image(type="pil"), |
|
title="Multithreaded CPU Optimized Image Generation", |
|
description="Enter a prompt to generate an image efficiently using CPU optimization and multithreading." |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
iface.launch() |