File size: 1,723 Bytes
71cb700 d8a6468 71cb700 d8a6468 71cb700 d8a6468 71cb700 e440e66 d8a6468 e440e66 71cb700 e440e66 d8a6468 e440e66 71cb700 e440e66 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
import gradio as gr
from diffusers import DiffusionPipeline
import torch
# Load the pipeline with optimizations for CPU
pipeline = DiffusionPipeline.from_pretrained(
"John6666/t-ponynai3-v6-sdxl",
torch_dtype=torch.float16, # Use FP16 precision if supported
safety_checker=None, # Disable safety checker for faster performance
).to("cpu")
# Enable attention slicing for memory management
pipeline.enable_attention_slicing()
def generate_image(prompt, negative_prompt, progress=gr.Progress()):
num_inference_steps = 20 # Set number of inference steps
# Track progress for each step
for i in range(num_inference_steps):
progress(i / num_inference_steps) # Update progress bar
# Perform generation step by step (simulate the process)
image = pipeline(prompt, negative_prompt=negative_prompt, num_inference_steps=num_inference_steps).images[0]
return image
# Create Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Text-to-Image Generator with John6666/t-ponynai3-v6-sdxl models")
with gr.Row():
with gr.Column():
prompt = gr.Textbox(label="Enter your prompt", placeholder="Describe the image you want to generate")
negative_prompt = gr.Textbox(label="Enter negative prompt", placeholder="Describe what you want to avoid")
generate_button = gr.Button("Generate")
with gr.Column():
output_image = gr.Image(label="Generated Image")
# Add the progress bar component and connect it with the generate_image function
generate_button.click(fn=generate_image, inputs=[prompt, negative_prompt], outputs=output_image)
# Launch the Gradio app
demo.launch()
|