Jamal / app.py
Ghamryy's picture
trying to clear cuda cache after each generation
7c3695e verified
from accelerate.utils import write_basic_config
from diffusers import DiffusionPipeline
import torch
import gradio as gr
write_basic_config()
base = DiffusionPipeline.from_pretrained(
"Mahdy225/JAMAL",
torch_dtype=torch.float16,
use_safetensors=True
)
base.to("cuda")
def text2Image(prompt, steps=50, scale=7, Width=1024, Height=1024):
torch.cuda.empty_cache()
image = base(prompt, num_inference_steps=steps, guidance_scale=scale,
width=Width, height=Height, cross_attention_kwargs={"scale": 1}).images[0]
return image
ui = gr.Interface(fn=text2Image,
inputs=[
gr.Textbox(label="Enter Text Prompt"),
gr.Slider(minimum=1, maximum=150, value=50, label="Number of Inference Steps"),
gr.Slider(minimum=4, maximum=10, value=7, step=0.1, label="Guidance Scale"),
gr.Number(label="Image Width", value=1024),
gr.Number(label="Image Height", value=1024)],
outputs="image",
title="JAMAL/ฺ†ู…ุงู„: Transforming Words into Reality"
)
# ui.launch(share=True)
if __name__ == "__main__":
ui.launch()