File size: 5,241 Bytes
1312050
82f7a21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1312050
82f7a21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import gradio as gr
import os
import requests
import random
import time

from transformers import pipeline

# Load the pipeline for text generation
pipe = pipeline(
    "text-generation",
    model="Ar4ikov/gpt2-650k-stable-diffusion-prompt-generator",
    tokenizer="gpt2"
)

# Initialize a list to store the history of generated prompts
history = []

# Function to generate text based on input prompt and record the history
def generate_text(prompt):
    generated_text = pipe(prompt, max_length=77)[0]["generated_text"]
    # Append the generated prompt and its result to the history list
    history.append({"prompt": prompt, "generated_text": generated_text})
    return generated_text

# Create a Gradio interface with history recording
iface = gr.Interface(
    fn=generate_text,
    inputs=gr.Textbox(lines=5, label="Prompt"),
    outputs=gr.Textbox(label="Output", show_copy_button=True),
    title="AI Art Prompt Generator",
    description="Art Prompt Generator is a user-friendly interface designed to optimize input for AI Art Generator or Creator. For faster generation speeds, it's recommended to load the model locally with GPUs, as the online demo at Hugging Face Spaces utilizes CPU, resulting in slower processing times.",
    api_name="predict"
)

name2 = "stabilityai/stable-diffusion-xl-base-1.0"

models=[
    gr.Interface.load(f"models/{name2}"),
    gr.Interface.load(f"models/{name2}"),
    gr.Interface.load(f"models/{name2}"),
    gr.Interface.load(f"models/{name2}"),
    gr.Interface.load(f"models/{name2}"),
    gr.Interface.load(f"models/{name2}"),
    gr.Interface.load(f"models/{name2}"),
    gr.Interface.load(f"models/{name2}"),
    gr.Interface.load(f"models/{name2}"),
    gr.Interface.load(f"models/{name2}"),
    gr.Interface.load(f"models/{name2}"),
    gr.Interface.load(f"models/{name2}"),
    gr.Interface.load(f"models/{name2}"),
    gr.Interface.load(f"models/{name2}"),
    gr.Interface.load(f"models/{name2}"),
    gr.Interface.load(f"models/{name2}"),
    gr.Interface.load(f"models/{name2}"),
    gr.Interface.load(f"models/{name2}"),
    gr.Interface.load(f"models/{name2}"),
    gr.Interface.load(f"models/{name2}"),
]
#o = os.getenv("P")
o = "V"

m_out = ("""
<div id="restart">
<h4 id="head">Loading Time Limit Reached. Please choose a Simpler Prompt</h4><br>
</div>
""")
loading=("""
<div class="lds-ellipsis"><div></div><div></div><div></div><div></div></div>""")

def ac():
    def clear():
        return gr.update(value=0),gr.update(value=0)
    def start():
        stamp = time.time()
        return gr.update(value=stamp),gr.update(value=0)
    def end(stamp):
        ts = stamp + 120
        ti = time.time()
        if ti > ts and stamp != 0:
            return gr.update(value=1),gr.HTML.update(f"{m_out}",visible=True)
        else:
            return gr.update(value=0),None
    def im_fn(put,fac="",h=None):
        try:
            if h == o:
                put = f"{put}{fac}"
                fac = f"{fac} "
                rn = random.randint(0, 19)
                model=models[rn]
                return model(put),fac
            elif h != o:
                return(None,None)
        except Exception:
            return None, None 
    def cl_fac():
        return "",gr.HTML.update(f"{loading}")
    with gr.Blocks() as b:
        with gr.Row():
            with gr.Column():
                put = gr.Textbox()     
            with gr.Column():
                with gr.Row():
                    btn1 = gr.Button("Run")
                    btn2 = gr.Button("Clear")
        message=gr.HTML("<div></div>")
        message2=gr.HTML("",visible=False)

        with gr.Row():
            out1 = gr.Image()
            out2 = gr.Image()
        with gr.Row():
            out3 = gr.Image()
            out4 = gr.Image()

        with gr.Row(visible=False):
            h=gr.Textbox(value="V")
            t_state=gr.Number()
            t_switch=gr.Textbox(value=0)
        def clear_all():
            return "",None,None,None,None,None,None,1,gr.HTML.update("<div></div>")
        fac_b = gr.Textbox(value="", visible=False)

        def noth():
            return gr.HTML.update("<div></div>")
          #a1=btn1.click(noth,None,btn1,every=1)
        btn1.click(cl_fac,None,[fac_b,message],show_progress=False)
        b1=btn1.click(start,None,[t_state,t_switch],show_progress=True)
        sta = t_state.change(end,t_state,[t_switch,message2],every=1,show_progress=True)
        b2=btn1.click(im_fn,[put,fac_b,h],[out1,fac_b], show_progress=True)
        b3=out1.change(im_fn,[put,fac_b,h],[out2,fac_b], show_progress=True)        
        b4=out2.change(im_fn,[put,fac_b,h],[out3,fac_b], show_progress=True)        
        b5=out3.change(im_fn,[put,fac_b,h],[out4,fac_b], show_progress=True)
        b6=out4.change(noth,None,message, show_progress=False)
        swi=t_switch.change(clear,None,[t_switch,fac_b], cancels=[sta,b2,b3,b4,b5],show_progress=False)
        #btn2.click(noth,None,message,cancels=[b1,sta,b2,b3,b4,b5,swi],show_progress=False)
        btn2.click(clear_all, None,[fac_b,put,out1,out2,out3,out4,t_state,t_switch,message],cancels=[b1,sta,b2,b3,b4,b5,swi],show_progress=False)
    b.queue(concurrency_count=100).launch(show_api=False)
ac()