|
import gradio as gr |
|
import os |
|
import sys |
|
from pathlib import Path |
|
|
|
models = [ |
|
"Yntec/OpenLexica", |
|
"Yntec/MapleSyrup", |
|
] |
|
current_model = models[0] |
|
|
|
text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link") |
|
|
|
models2=[ |
|
gr.Interface.load(f"models/{models[0]}",live=True,preprocess=False), |
|
gr.Interface.load(f"models/{models[1]}",live=True,preprocess=False), |
|
] |
|
|
|
|
|
def text_it1(inputs,text_gen1=text_gen1): |
|
go_t1=text_gen1(inputs) |
|
return(go_t1) |
|
|
|
def set_model(current_model): |
|
current_model = models[current_model] |
|
return gr.update(label=(f"{current_model}")) |
|
|
|
|
|
def send_it1(inputs, model_choice): |
|
proc1=models2[model_choice] |
|
output1=proc1(inputs) |
|
return(output1) |
|
css="""""" |
|
|
|
|
|
with gr.Blocks(css=css) as myface: |
|
gr.HTML(""" |
|
<div style="text-align: center; max-width: 1200px; margin: 0 auto;"> |
|
<div> |
|
<style> |
|
h1 { |
|
font-size: 6em; |
|
color: #ffffff; |
|
margin-top: 30px; |
|
margin-bottom: 30px; |
|
text-shadow: 3px 3px 0 rgba(0, 0, 0, 1) !important; |
|
} |
|
h3 { |
|
color: #ffaa66; !important; |
|
} |
|
h4 { |
|
color: #ffffff; !important; |
|
} |
|
.gradio-container { |
|
background-image: linear-gradient(#8150df, #000000) !important; |
|
color: #ffaa66 !important; |
|
font-family: 'IBM Plex Sans', sans-serif !important; |
|
} |
|
</style> |
|
<body> |
|
<div class="center"><h1>ToyWorld XL 401</h1> |
|
</div> |
|
</body> |
|
</div> |
|
<p style="margin-bottom: 10px; color: #ffaa66;"> |
|
<h3>Top SDXLModels and 401 SD1.5 models for your enjoyment!</h3></p> |
|
<p style="margin-bottom: 10px; font-size: 98%"> |
|
<br><h4>The first time you load a model it takes 200 seconds</h4> |
|
<br><h4>But after it loads each image takes 20 seconds to generate!</h4></p> |
|
</div> |
|
""") |
|
with gr.Row(): |
|
with gr.Column(scale=100): |
|
|
|
model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True) |
|
with gr.Row(): |
|
with gr.Column(scale=100): |
|
magic1=gr.Textbox(label="Your Prompt", lines=4) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gr.HTML("""<style> .gr-button { |
|
color: white !important; |
|
top:50%; |
|
background-image: linear-gradient(#6d43e4, #8150df) !important; |
|
border-radius: 24px !important; |
|
border: 12px outset #95c200; |
|
padding: 6px 30px; |
|
} |
|
</style>""") |
|
run=gr.Button("Generate Image") |
|
with gr.Row(): |
|
with gr.Column(style="width=800px"): |
|
output1=gr.Image(label=(f"{current_model}")) |
|
|
|
|
|
with gr.Row(): |
|
with gr.Column(scale=50): |
|
input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea",lines=2) |
|
use_short=gr.Button("Use Short Prompt") |
|
see_prompts=gr.Button("Extend Idea") |
|
|
|
|
|
def short_prompt(inputs): |
|
return(inputs) |
|
|
|
model_name1.change(set_model,inputs=model_name1,outputs=[output1]) |
|
|
|
run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1]) |
|
|
|
use_short.click(short_prompt,inputs=[input_text],outputs=magic1) |
|
|
|
see_prompts.click(text_it1,inputs=[input_text],outputs=magic1) |
|
|
|
myface.queue(concurrency_count=200) |
|
myface.launch(inline=True, show_api=False, max_threads=400) |