Update app.py
Browse files
app.py
CHANGED
|
@@ -13,6 +13,9 @@ pipe.enable_xformers_memory_efficient_attention()
|
|
| 13 |
anime = DiffusionPipeline.from_pretrained("circulus/canvers-anime-v3.8.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-anime-v3.8.1")
|
| 14 |
anime = anime.to(device)
|
| 15 |
anime.enable_xformers_memory_efficient_attention()
|
|
|
|
|
|
|
|
|
|
| 16 |
refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
|
| 17 |
refiner.enable_xformers_memory_efficient_attention()
|
| 18 |
refiner = refiner.to(device)
|
|
@@ -26,17 +29,24 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, up
|
|
| 26 |
return image
|
| 27 |
else:
|
| 28 |
image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
|
| 29 |
-
|
| 30 |
if upscale == "Yes":
|
| 31 |
int_image = anime(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
|
| 32 |
image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
|
| 33 |
return image
|
| 34 |
else:
|
| 35 |
image = anime(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
return image
|
| 38 |
|
| 39 |
-
gr.Interface(fn=genie, inputs=[gr.Radio(['Real', 'Anime'], label='Choose Canvers Model'),
|
| 40 |
gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
|
| 41 |
gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
|
| 42 |
gr.Slider(512, 1024, 768, step=128, label='Height'),
|
|
|
|
| 13 |
anime = DiffusionPipeline.from_pretrained("circulus/canvers-anime-v3.8.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-anime-v3.8.1")
|
| 14 |
anime = anime.to(device)
|
| 15 |
anime.enable_xformers_memory_efficient_attention()
|
| 16 |
+
disney = DiffusionPipeline.from_pretrained("circulus/canvers-disney-v3.8.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-disney-v3.8.1")
|
| 17 |
+
disney = disney.to(device)
|
| 18 |
+
disney.enable_xformers_memory_efficient_attention()
|
| 19 |
refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
|
| 20 |
refiner.enable_xformers_memory_efficient_attention()
|
| 21 |
refiner = refiner.to(device)
|
|
|
|
| 29 |
return image
|
| 30 |
else:
|
| 31 |
image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
|
| 32 |
+
if Model == "Anime":
|
| 33 |
if upscale == "Yes":
|
| 34 |
int_image = anime(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
|
| 35 |
image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
|
| 36 |
return image
|
| 37 |
else:
|
| 38 |
image = anime(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
|
| 39 |
+
else:
|
| 40 |
+
if upscale == "Yes":
|
| 41 |
+
int_image = disney(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
|
| 42 |
+
image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
|
| 43 |
+
return image
|
| 44 |
+
else:
|
| 45 |
+
image = disney(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
|
| 46 |
|
| 47 |
return image
|
| 48 |
|
| 49 |
+
gr.Interface(fn=genie, inputs=[gr.Radio(['Real', 'Anime', 'Disney'], value='Real', label='Choose Canvers Model'),
|
| 50 |
gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
|
| 51 |
gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
|
| 52 |
gr.Slider(512, 1024, 768, step=128, label='Height'),
|