Spaces:
Sleeping
Sleeping
Neo Anderson
commited on
Commit
·
6ce788a
1
Parent(s):
85eb75c
hotdog
Browse files
app.py
CHANGED
@@ -1,64 +1,43 @@
|
|
1 |
import gradio as gr
|
2 |
-
# from diffusers import DiffusionPipeline
|
3 |
from diffusers import AutoPipelineForText2Image
|
4 |
import torch
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
torch_dtype = torch.float16
|
11 |
else:
|
12 |
-
|
|
|
|
|
13 |
|
14 |
-
|
15 |
pipe = AutoPipelineForText2Image.from_pretrained(model_repo_id, torch_dtype=torch_dtype, variant="fp16")
|
16 |
pipe = pipe.to(device)
|
17 |
|
18 |
def infer(prompt):
|
19 |
-
# generator = torch.Generator().manual_seed(0)
|
20 |
num_inference_steps = 2
|
21 |
guidance_scale = 0.0
|
22 |
width = 512
|
23 |
height = 512
|
24 |
image = pipe(
|
25 |
prompt=prompt,
|
26 |
-
# negative_prompt=negative_prompt,
|
27 |
guidance_scale=guidance_scale,
|
28 |
num_inference_steps=num_inference_steps,
|
29 |
width=width,
|
30 |
height=height,
|
31 |
-
# generator=generator,
|
32 |
).images[0]
|
33 |
|
34 |
return image
|
35 |
|
36 |
|
37 |
examples = [
|
38 |
-
"A cinematic shot of a baby racoon wearing an intricate
|
39 |
-
"
|
40 |
-
"
|
41 |
-
"A
|
42 |
]
|
43 |
|
44 |
-
# prompt = gr.Text(
|
45 |
-
# label="Prompt",
|
46 |
-
# show_label=False,
|
47 |
-
# max_lines=1,
|
48 |
-
# placeholder="Enter your prompt",
|
49 |
-
# container=False,
|
50 |
-
# )
|
51 |
-
# run_button = gr.Button("Run", scale=0, variant="primary")
|
52 |
-
# result = gr.Image(label="Result", show_label=False)
|
53 |
-
# gr.on(
|
54 |
-
# triggers=[run_button.click, prompt.submit],
|
55 |
-
# fn=infer,
|
56 |
-
# inputs=[
|
57 |
-
# prompt,
|
58 |
-
# ],
|
59 |
-
# outputs=[result],
|
60 |
-
# )
|
61 |
-
|
62 |
demo = gr.Interface(
|
63 |
fn=infer,
|
64 |
inputs=gr.Textbox(
|
@@ -70,7 +49,7 @@ demo = gr.Interface(
|
|
70 |
title="Text-to-Image",
|
71 |
description="Generate images from text prompts.",
|
72 |
theme="compact",
|
73 |
-
|
74 |
)
|
75 |
|
76 |
if __name__ == "__main__":
|
|
|
1 |
import gradio as gr
|
|
|
2 |
from diffusers import AutoPipelineForText2Image
|
3 |
import torch
|
4 |
|
5 |
+
if torch.backends.mps.is_available():
|
6 |
+
device = "mps"
|
7 |
+
elif torch.cuda.is_available():
|
8 |
+
device = "cuda"
|
|
|
9 |
else:
|
10 |
+
device = "cpu"
|
11 |
+
|
12 |
+
torch_dtype = torch.float32 if device == "cpu" else torch.float16
|
13 |
|
14 |
+
model_repo_id = "stabilityai/sdxl-turbo"
|
15 |
pipe = AutoPipelineForText2Image.from_pretrained(model_repo_id, torch_dtype=torch_dtype, variant="fp16")
|
16 |
pipe = pipe.to(device)
|
17 |
|
18 |
def infer(prompt):
|
|
|
19 |
num_inference_steps = 2
|
20 |
guidance_scale = 0.0
|
21 |
width = 512
|
22 |
height = 512
|
23 |
image = pipe(
|
24 |
prompt=prompt,
|
|
|
25 |
guidance_scale=guidance_scale,
|
26 |
num_inference_steps=num_inference_steps,
|
27 |
width=width,
|
28 |
height=height,
|
|
|
29 |
).images[0]
|
30 |
|
31 |
return image
|
32 |
|
33 |
|
34 |
examples = [
|
35 |
+
"A cinematic shot of a baby racoon wearing an intricate kungfu master robe.",
|
36 |
+
"A cartoonish drawing of a tiger with a rainbow mane.",
|
37 |
+
"A realistic painting of a futuristic cityscape with flying cars.",
|
38 |
+
"A watercolor painting of a whimsical fairy in a mystical forest.",
|
39 |
]
|
40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
demo = gr.Interface(
|
42 |
fn=infer,
|
43 |
inputs=gr.Textbox(
|
|
|
49 |
title="Text-to-Image",
|
50 |
description="Generate images from text prompts.",
|
51 |
theme="compact",
|
52 |
+
examples=examples,
|
53 |
)
|
54 |
|
55 |
if __name__ == "__main__":
|