Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from diffusers import LCMScheduler, AutoPipelineForText2Image,DDPMScheduler
|
3 |
+
from PIL import Image
|
4 |
+
import numpy as np
|
5 |
+
import gradio as gr
|
6 |
+
import gc
|
7 |
+
|
8 |
+
|
9 |
+
def main(prompt):
|
10 |
+
|
11 |
+
model_id = "stabilityai/stable-diffusion-xl-base-1.0"
|
12 |
+
adapter_id = "lorabase"
|
13 |
+
|
14 |
+
pipe = AutoPipelineForText2Image.from_pretrained(model_id, torch_dtype=torch.float32, variant="fp16")
|
15 |
+
pipe.scheduler = DDPMScheduler.from_config(pipe.scheduler.config)
|
16 |
+
pipe.to("cuda")
|
17 |
+
|
18 |
+
pipe.load_lora_weights(adapter_id)
|
19 |
+
pipe.fuse_lora()
|
20 |
+
|
21 |
+
image = pipe(prompt=prompt, num_inference_steps=60, guidance_scale=7.0,strength=5.0).images[0]
|
22 |
+
#gc.collect()
|
23 |
+
#torch.cuda.empty_cache()
|
24 |
+
return image
|
25 |
+
|
26 |
+
|
27 |
+
iface = gr.Interface(fn=main, inputs="text", outputs="image", title="Text to Image Generation",
|
28 |
+
description="Generate images based on textual prompts.")
|
29 |
+
|
30 |
+
iface.launch( debug=False,share=True,
|
31 |
+
server_name="0.0.0.0",
|
32 |
+
server_port=8433) #iface.launch( show_error=True, server_name='0.0.0.0' , share=True)
|
33 |
+
|
34 |
+
|
35 |
+
#interface.launch(server_name=β0.0.0.0β, server_port=7860)
|
36 |
+
|