djrana commited on
Commit
82f7a21
1 Parent(s): 90541a0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +137 -44
app.py CHANGED
@@ -1,46 +1,139 @@
1
- import json
2
- import requests
3
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- def generate_image(prompt, negative_prompt, width, height, samples, num_inference_steps, safety_checker, enhance_prompt, seed, guidance_scale, multi_lingual, panorama, self_attention, upscale, embeddings, lora, webhook, track_id):
6
- url = "https://modelslab.com/api/v6/images/text2img"
7
-
8
- payload = json.dumps({
9
- "key": "sHj15HTjxiCkFtV3PHmSeehjaVGdpNotsb1iMbIpniNzfTsjgbN7Z9RFB8Wu",
10
- "model_id": "juggernaut-xl-v8",
11
- "prompt": prompt,
12
- "negative_prompt": negative_prompt,
13
- "width": width,
14
- "height": height,
15
- "samples": samples,
16
- "num_inference_steps": num_inference_steps,
17
- "safety_checker": safety_checker,
18
- "enhance_prompt": enhance_prompt,
19
- "seed": seed,
20
- "guidance_scale": guidance_scale,
21
- "multi_lingual": multi_lingual,
22
- "panorama": panorama,
23
- "self_attention": self_attention,
24
- "upscale": upscale,
25
- "embeddings": embeddings,
26
- "lora": lora,
27
- "webhook": webhook,
28
- "track_id": track_id
29
- })
30
-
31
- headers = {
32
- 'Content-Type': 'application/json'
33
- }
34
-
35
- response = requests.request("POST", url, headers=headers, data=payload)
36
-
37
- return response.text
38
-
39
- # Interface
40
- iface = gr.Interface(fn=generate_image,
41
- inputs=["text", "text", "text", "text", "text", "text", "text", "text", "text", "number", "text", "text", "text", "text", "text", "text", "text", "text", "text"],
42
- outputs="text",
43
- title="Text to Image Generation",
44
- description="Generate an image based on text prompts.",
45
- article="Enter your prompts and settings and click 'Generate Image'.")
46
- iface.launch()
 
 
 
1
  import gradio as gr
2
+ import os
3
+ import requests
4
+ import random
5
+ import time
6
+
7
+ from transformers import pipeline
8
+
9
+ # Load the pipeline for text generation
10
+ pipe = pipeline(
11
+ "text-generation",
12
+ model="Ar4ikov/gpt2-650k-stable-diffusion-prompt-generator",
13
+ tokenizer="gpt2"
14
+ )
15
+
16
+ # Initialize a list to store the history of generated prompts
17
+ history = []
18
+
19
+ # Function to generate text based on input prompt and record the history
20
+ def generate_text(prompt):
21
+ generated_text = pipe(prompt, max_length=77)[0]["generated_text"]
22
+ # Append the generated prompt and its result to the history list
23
+ history.append({"prompt": prompt, "generated_text": generated_text})
24
+ return generated_text
25
+
26
+ # Create a Gradio interface with history recording
27
+ iface = gr.Interface(
28
+ fn=generate_text,
29
+ inputs=gr.Textbox(lines=5, label="Prompt"),
30
+ outputs=gr.Textbox(label="Output", show_copy_button=True),
31
+ title="AI Art Prompt Generator",
32
+ description="Art Prompt Generator is a user-friendly interface designed to optimize input for AI Art Generator or Creator. For faster generation speeds, it's recommended to load the model locally with GPUs, as the online demo at Hugging Face Spaces utilizes CPU, resulting in slower processing times.",
33
+ api_name="predict"
34
+ )
35
+
36
+ name2 = "stabilityai/stable-diffusion-xl-base-1.0"
37
+
38
+ models=[
39
+ gr.Interface.load(f"models/{name2}"),
40
+ gr.Interface.load(f"models/{name2}"),
41
+ gr.Interface.load(f"models/{name2}"),
42
+ gr.Interface.load(f"models/{name2}"),
43
+ gr.Interface.load(f"models/{name2}"),
44
+ gr.Interface.load(f"models/{name2}"),
45
+ gr.Interface.load(f"models/{name2}"),
46
+ gr.Interface.load(f"models/{name2}"),
47
+ gr.Interface.load(f"models/{name2}"),
48
+ gr.Interface.load(f"models/{name2}"),
49
+ gr.Interface.load(f"models/{name2}"),
50
+ gr.Interface.load(f"models/{name2}"),
51
+ gr.Interface.load(f"models/{name2}"),
52
+ gr.Interface.load(f"models/{name2}"),
53
+ gr.Interface.load(f"models/{name2}"),
54
+ gr.Interface.load(f"models/{name2}"),
55
+ gr.Interface.load(f"models/{name2}"),
56
+ gr.Interface.load(f"models/{name2}"),
57
+ gr.Interface.load(f"models/{name2}"),
58
+ gr.Interface.load(f"models/{name2}"),
59
+ ]
60
+ #o = os.getenv("P")
61
+ o = "V"
62
+
63
+ m_out = ("""
64
+ <div id="restart">
65
+ <h4 id="head">Loading Time Limit Reached. Please choose a Simpler Prompt</h4><br>
66
+ </div>
67
+ """)
68
+ loading=("""
69
+ <div class="lds-ellipsis"><div></div><div></div><div></div><div></div></div>""")
70
+
71
+ def ac():
72
+ def clear():
73
+ return gr.update(value=0),gr.update(value=0)
74
+ def start():
75
+ stamp = time.time()
76
+ return gr.update(value=stamp),gr.update(value=0)
77
+ def end(stamp):
78
+ ts = stamp + 120
79
+ ti = time.time()
80
+ if ti > ts and stamp != 0:
81
+ return gr.update(value=1),gr.HTML.update(f"{m_out}",visible=True)
82
+ else:
83
+ return gr.update(value=0),None
84
+ def im_fn(put,fac="",h=None):
85
+ try:
86
+ if h == o:
87
+ put = f"{put}{fac}"
88
+ fac = f"{fac} "
89
+ rn = random.randint(0, 19)
90
+ model=models[rn]
91
+ return model(put),fac
92
+ elif h != o:
93
+ return(None,None)
94
+ except Exception:
95
+ return None, None
96
+ def cl_fac():
97
+ return "",gr.HTML.update(f"{loading}")
98
+ with gr.Blocks() as b:
99
+ with gr.Row():
100
+ with gr.Column():
101
+ put = gr.Textbox()
102
+ with gr.Column():
103
+ with gr.Row():
104
+ btn1 = gr.Button("Run")
105
+ btn2 = gr.Button("Clear")
106
+ message=gr.HTML("<div></div>")
107
+ message2=gr.HTML("",visible=False)
108
+
109
+ with gr.Row():
110
+ out1 = gr.Image()
111
+ out2 = gr.Image()
112
+ with gr.Row():
113
+ out3 = gr.Image()
114
+ out4 = gr.Image()
115
+
116
+ with gr.Row(visible=False):
117
+ h=gr.Textbox(value="V")
118
+ t_state=gr.Number()
119
+ t_switch=gr.Textbox(value=0)
120
+ def clear_all():
121
+ return "",None,None,None,None,None,None,1,gr.HTML.update("<div></div>")
122
+ fac_b = gr.Textbox(value="", visible=False)
123
 
124
+ def noth():
125
+ return gr.HTML.update("<div></div>")
126
+ #a1=btn1.click(noth,None,btn1,every=1)
127
+ btn1.click(cl_fac,None,[fac_b,message],show_progress=False)
128
+ b1=btn1.click(start,None,[t_state,t_switch],show_progress=True)
129
+ sta = t_state.change(end,t_state,[t_switch,message2],every=1,show_progress=True)
130
+ b2=btn1.click(im_fn,[put,fac_b,h],[out1,fac_b], show_progress=True)
131
+ b3=out1.change(im_fn,[put,fac_b,h],[out2,fac_b], show_progress=True)
132
+ b4=out2.change(im_fn,[put,fac_b,h],[out3,fac_b], show_progress=True)
133
+ b5=out3.change(im_fn,[put,fac_b,h],[out4,fac_b], show_progress=True)
134
+ b6=out4.change(noth,None,message, show_progress=False)
135
+ swi=t_switch.change(clear,None,[t_switch,fac_b], cancels=[sta,b2,b3,b4,b5],show_progress=False)
136
+ #btn2.click(noth,None,message,cancels=[b1,sta,b2,b3,b4,b5,swi],show_progress=False)
137
+ btn2.click(clear_all, None,[fac_b,put,out1,out2,out3,out4,t_state,t_switch,message],cancels=[b1,sta,b2,b3,b4,b5,swi],show_progress=False)
138
+ b.queue(concurrency_count=100).launch(show_api=False)
139
+ ac()