Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,9 @@ import os
|
|
3 |
import requests
|
4 |
import random
|
5 |
import time
|
6 |
-
|
|
|
|
|
7 |
from transformers import pipeline
|
8 |
|
9 |
# Load the pipeline for text generation
|
@@ -32,111 +34,34 @@ iface = gr.Interface(
|
|
32 |
description="Art Prompt Generator is a user-friendly interface designed to optimize input for AI Art Generator or Creator. For faster generation speeds, it's recommended to load the model locally with GPUs, as the online demo at Hugging Face Spaces utilizes CPU, resulting in slower processing times.",
|
33 |
api_name="predict"
|
34 |
)
|
|
|
|
|
|
|
35 |
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
name2 = "stabilityai/stable-diffusion-xl-base-1.0"
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
gr.Interface.load(f"models/{name2}"),
|
44 |
-
gr.Interface.load(f"models/{name2}"),
|
45 |
-
gr.Interface.load(f"models/{name2}"),
|
46 |
-
gr.Interface.load(f"models/{name2}"),
|
47 |
-
gr.Interface.load(f"models/{name2}"),
|
48 |
-
gr.Interface.load(f"models/{name2}"),
|
49 |
-
gr.Interface.load(f"models/{name2}"),
|
50 |
-
gr.Interface.load(f"models/{name2}"),
|
51 |
-
gr.Interface.load(f"models/{name2}"),
|
52 |
-
gr.Interface.load(f"models/{name2}"),
|
53 |
-
gr.Interface.load(f"models/{name2}"),
|
54 |
-
gr.Interface.load(f"models/{name2}"),
|
55 |
-
gr.Interface.load(f"models/{name2}"),
|
56 |
-
gr.Interface.load(f"models/{name2}"),
|
57 |
-
gr.Interface.load(f"models/{name2}"),
|
58 |
-
gr.Interface.load(f"models/{name2}"),
|
59 |
-
gr.Interface.load(f"models/{name2}"),
|
60 |
-
gr.Interface.load(f"models/{name2}"),
|
61 |
-
gr.Interface.load(f"models/{name2}"),
|
62 |
-
]
|
63 |
-
#o = os.getenv("P")
|
64 |
-
o = "V"
|
65 |
|
66 |
-
|
67 |
-
|
68 |
-
<h4 id="head">Loading Time Limit Reached. Please choose a Simpler Prompt</h4><br>
|
69 |
-
</div>
|
70 |
-
""")
|
71 |
-
loading=("""
|
72 |
-
<div class="lds-ellipsis"><div></div><div></div><div></div><div></div></div>""")
|
73 |
|
74 |
-
|
75 |
-
|
76 |
-
return gr.update(value=0),gr.update(value=0)
|
77 |
-
def start():
|
78 |
-
stamp = time.time()
|
79 |
-
return gr.update(value=stamp),gr.update(value=0)
|
80 |
-
def end(stamp):
|
81 |
-
ts = stamp + 120
|
82 |
-
ti = time.time()
|
83 |
-
if ti > ts and stamp != 0:
|
84 |
-
return gr.update(value=1),gr.HTML.update(f"{m_out}",visible=True)
|
85 |
-
else:
|
86 |
-
return gr.update(value=0),None
|
87 |
-
def im_fn(put,fac="",h=None):
|
88 |
-
try:
|
89 |
-
if h == o:
|
90 |
-
put = f"{put}{fac}"
|
91 |
-
fac = f"{fac} "
|
92 |
-
rn = random.randint(0, 19)
|
93 |
-
model=models[rn]
|
94 |
-
return model(put),fac
|
95 |
-
elif h != o:
|
96 |
-
return(None,None)
|
97 |
-
except Exception:
|
98 |
-
return None, None
|
99 |
-
def cl_fac():
|
100 |
-
return "",gr.HTML.update(f"{loading}")
|
101 |
-
with gr.Blocks() as b:
|
102 |
-
with gr.Row():
|
103 |
-
with gr.Column():
|
104 |
-
put = gr.Textbox()
|
105 |
-
with gr.Column():
|
106 |
-
with gr.Row():
|
107 |
-
btn1 = gr.Button("Run")
|
108 |
-
btn2 = gr.Button("Clear")
|
109 |
-
message=gr.HTML("<div></div>")
|
110 |
-
message2=gr.HTML("",visible=False)
|
111 |
|
112 |
-
|
113 |
-
out1 = gr.Image()
|
114 |
-
out2 = gr.Image()
|
115 |
-
with gr.Row():
|
116 |
-
out3 = gr.Image()
|
117 |
-
out4 = gr.Image()
|
118 |
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
|
|
|
|
126 |
|
127 |
-
|
128 |
-
|
129 |
-
#a1=btn1.click(noth,None,btn1,every=1)
|
130 |
-
btn1.click(cl_fac,None,[fac_b,message],show_progress=False)
|
131 |
-
b1=btn1.click(start,None,[t_state,t_switch],show_progress=True)
|
132 |
-
sta = t_state.change(end,t_state,[t_switch,message2],every=1,show_progress=True)
|
133 |
-
b2=btn1.click(im_fn,[put,fac_b,h],[out1,fac_b], show_progress=True)
|
134 |
-
b3=out1.change(im_fn,[put,fac_b,h],[out2,fac_b], show_progress=True)
|
135 |
-
b4=out2.change(im_fn,[put,fac_b,h],[out3,fac_b], show_progress=True)
|
136 |
-
b5=out3.change(im_fn,[put,fac_b,h],[out4,fac_b], show_progress=True)
|
137 |
-
b6=out4.change(noth,None,message, show_progress=False)
|
138 |
-
swi=t_switch.change(clear,None,[t_switch,fac_b], cancels=[sta,b2,b3,b4,b5],show_progress=False)
|
139 |
-
#btn2.click(noth,None,message,cancels=[b1,sta,b2,b3,b4,b5,swi],show_progress=False)
|
140 |
-
btn2.click(clear_all, None,[fac_b,put,out1,out2,out3,out4,t_state,t_switch,message],cancels=[b1,sta,b2,b3,b4,b5,swi],show_progress=False)
|
141 |
-
b.queue(concurrency_count=100).launch(show_api=False)
|
142 |
-
ac()
|
|
|
3 |
import requests
|
4 |
import random
|
5 |
import time
|
6 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
7 |
+
import torch
|
8 |
+
from PIL import Image
|
9 |
from transformers import pipeline
|
10 |
|
11 |
# Load the pipeline for text generation
|
|
|
34 |
description="Art Prompt Generator is a user-friendly interface designed to optimize input for AI Art Generator or Creator. For faster generation speeds, it's recommended to load the model locally with GPUs, as the online demo at Hugging Face Spaces utilizes CPU, resulting in slower processing times.",
|
35 |
api_name="predict"
|
36 |
)
|
37 |
+
# Load tokenizer and model
|
38 |
+
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B")
|
39 |
+
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B")
|
40 |
|
41 |
+
def generate_image(text):
|
42 |
+
# Tokenize input text
|
43 |
+
input_ids = tokenizer.encode(text, return_tensors="pt")
|
|
|
44 |
|
45 |
+
# Generate image conditioned on input text
|
46 |
+
output = model.generate(input_ids, do_sample=True, max_length=128, num_return_sequences=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
+
# Decode generated image tokens to get image
|
49 |
+
image_bytes = tokenizer.decode(output[0], skip_special_tokens=True)
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
+
# Convert image bytes to PIL image
|
52 |
+
image = Image.open(image_bytes)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
+
return image
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
+
# Create Gradio interface
|
57 |
+
iface = gr.Interface(
|
58 |
+
fn=generate_image,
|
59 |
+
inputs=gr.inputs.Textbox(lines=3, label="Input Text"),
|
60 |
+
outputs="image",
|
61 |
+
title="Text-to-Image Generator",
|
62 |
+
description="Generate images from text using Hugging Face's GPT-Neo model.",
|
63 |
+
theme="huggingface"
|
64 |
+
)
|
65 |
|
66 |
+
# Launch the interface
|
67 |
+
iface.launch(show_api=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|