Update app.py
Browse files
app.py
CHANGED
@@ -15,12 +15,12 @@ def genie (Prompt, scale, steps, Seed):
|
|
15 |
generator = torch.Generator(device=device).manual_seed(Seed)
|
16 |
#images = pipe(prompt, num_inference_steps=steps, guidance_scale=scale, generator=generator).images[0]
|
17 |
low_res_latents = pipe(Prompt, num_inference_steps=steps, guidance_scale=scale, generator=generator, output_type="latent").images
|
18 |
-
upscaled_image = upscaler(prompt='', image=low_res_latents, num_inference_steps=
|
19 |
return upscaled_image
|
20 |
|
21 |
gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
|
22 |
gr.Slider(1, maximum=15, value=10, step=.25, label='Prompt Guidance Scale:', interactive=True),
|
23 |
-
gr.Slider(1, maximum=100, value=
|
24 |
gr.Slider(minimum=1, step=10, maximum=999999999999999999, randomize=True, interactive=True)],
|
25 |
outputs=gr.Image(label='512x512 Generated Image'),
|
26 |
title="OpenJourney V4 GPU with SD x2 Upscaler",
|
|
|
15 |
generator = torch.Generator(device=device).manual_seed(Seed)
|
16 |
#images = pipe(prompt, num_inference_steps=steps, guidance_scale=scale, generator=generator).images[0]
|
17 |
low_res_latents = pipe(Prompt, num_inference_steps=steps, guidance_scale=scale, generator=generator, output_type="latent").images
|
18 |
+
upscaled_image = upscaler(prompt='', image=low_res_latents, num_inference_steps=10, guidance_scale=0, generator=generator).images[0]
|
19 |
return upscaled_image
|
20 |
|
21 |
gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
|
22 |
gr.Slider(1, maximum=15, value=10, step=.25, label='Prompt Guidance Scale:', interactive=True),
|
23 |
+
gr.Slider(1, maximum=100, value=50, step=1, label='Number of Iterations: 50 is typically fine.'),
|
24 |
gr.Slider(minimum=1, step=10, maximum=999999999999999999, randomize=True, interactive=True)],
|
25 |
outputs=gr.Image(label='512x512 Generated Image'),
|
26 |
title="OpenJourney V4 GPU with SD x2 Upscaler",
|