Upload app.py
Browse files
app.py
CHANGED
@@ -94,7 +94,7 @@ def upscale(samples, upscale_method, scale_by):
|
|
94 |
|
95 |
def check_inputs(prompt: str, control_image: Image.Image):
|
96 |
if control_image is None:
|
97 |
-
raise gr.Error("Please select or upload
|
98 |
if prompt is None or prompt == "":
|
99 |
raise gr.Error("Prompt is required")
|
100 |
|
@@ -186,4 +186,89 @@ def inference(
|
|
186 |
},
|
187 |
)
|
188 |
|
189 |
-
return out_image["images"][0], gr.update(visible=True), gr.update(visible=True), my_seed
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
|
95 |
def check_inputs(prompt: str, control_image: Image.Image):
|
96 |
if control_image is None:
|
97 |
+
raise gr.Error("Please select or upload a photo of a person.")
|
98 |
if prompt is None or prompt == "":
|
99 |
raise gr.Error("Prompt is required")
|
100 |
|
|
|
186 |
},
|
187 |
)
|
188 |
|
189 |
+
return out_image["images"][0], gr.update(visible=True), gr.update(visible=True), my_seed
|
190 |
+
|
191 |
+
with gr.Blocks() as app:
|
192 |
+
gr.Markdown(
|
193 |
+
'''
|
194 |
+
<center><h1>Core Ultra Heroes</h1></span>
|
195 |
+
<span font-size:16px;">Turn yourself into an AI-powered superhero!</span>
|
196 |
+
</center>
|
197 |
+
|
198 |
+
'''
|
199 |
+
)
|
200 |
+
state_img_input = gr.State()
|
201 |
+
state_img_output = gr.State()
|
202 |
+
with gr.Row():
|
203 |
+
with gr.Column():
|
204 |
+
control_image = gr.Image(label="Provide a photo of yourself", type="pil", elem_id="control_image")
|
205 |
+
# controlnet_conditioning_scale = gr.Slider(minimum=0.0, maximum=5.0, step=0.01, value=0.8, label="Illusion strength", elem_id="illusion_strength", info="ControlNet conditioning scale")
|
206 |
+
prompt = gr.Textbox(label="Prompt", elem_id="prompt", info="Type what you want to generate", placeholder="Medieval village scene with busy streets and castle in the distance")
|
207 |
+
negative_prompt = gr.Textbox(label="Negative Prompt", info="Type what you don't want to see", value="low quality", elem_id="negative_prompt")
|
208 |
+
with gr.Accordion(label="Advanced Options", open=False):
|
209 |
+
guidance_scale = gr.Slider(minimum=0.0, maximum=50.0, step=0.25, value=7.5, label="Guidance Scale")
|
210 |
+
sampler = gr.Dropdown(choices=list(SAMPLER_MAP.keys()), value="Euler")
|
211 |
+
control_start = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0, label="Start of ControlNet")
|
212 |
+
control_end = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=1, label="End of ControlNet")
|
213 |
+
strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=1, label="Strength of the upscaler")
|
214 |
+
seed = gr.Slider(minimum=-1, maximum=9999999999, step=1, value=-1, label="Seed", info="-1 means random seed")
|
215 |
+
used_seed = gr.Number(label="Last seed used",interactive=False)
|
216 |
+
run_btn = gr.Button("Run")
|
217 |
+
with gr.Column():
|
218 |
+
result_image = gr.Image(label="You're a hero!", interactive=False, elem_id="output")
|
219 |
+
|
220 |
+
controlnet_conditioning_scale = 0.5
|
221 |
+
|
222 |
+
prompt.submit(
|
223 |
+
check_inputs,
|
224 |
+
inputs=[prompt, control_image],
|
225 |
+
queue=False
|
226 |
+
).success(
|
227 |
+
convert_to_pil,
|
228 |
+
inputs=[control_image],
|
229 |
+
outputs=[state_img_input],
|
230 |
+
queue=False,
|
231 |
+
preprocess=False,
|
232 |
+
).success(
|
233 |
+
inference,
|
234 |
+
inputs=[state_img_input, prompt, negative_prompt, guidance_scale, control_start, control_end, strength, seed, sampler],
|
235 |
+
outputs=[state_img_output, result_image, used_seed]
|
236 |
+
).success(
|
237 |
+
convert_to_base64,
|
238 |
+
inputs=[state_img_output],
|
239 |
+
outputs=[result_image],
|
240 |
+
queue=False,
|
241 |
+
postprocess=False
|
242 |
+
)
|
243 |
+
run_btn.click(
|
244 |
+
check_inputs,
|
245 |
+
inputs=[prompt, control_image],
|
246 |
+
queue=False
|
247 |
+
).success(
|
248 |
+
convert_to_pil,
|
249 |
+
inputs=[control_image],
|
250 |
+
outputs=[state_img_input],
|
251 |
+
queue=False,
|
252 |
+
preprocess=False,
|
253 |
+
).success(
|
254 |
+
inference,
|
255 |
+
inputs=[state_img_input, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
|
256 |
+
outputs=[state_img_output, result_image, share_group, used_seed]
|
257 |
+
).success(
|
258 |
+
convert_to_base64,
|
259 |
+
inputs=[state_img_output],
|
260 |
+
outputs=[result_image],
|
261 |
+
queue=False,
|
262 |
+
postprocess=False
|
263 |
+
)
|
264 |
+
|
265 |
+
with gr.Blocks(css=css) as app_with_history:
|
266 |
+
with gr.Tab("Demo"):
|
267 |
+
app.render()
|
268 |
+
with gr.Tab("Past generations"):
|
269 |
+
user_history.render()
|
270 |
+
|
271 |
+
app_with_history.queue(max_size=20,api_open=False )
|
272 |
+
|
273 |
+
if __name__ == "__main__":
|
274 |
+
app_with_history.launch(max_threads=400)
|