Update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,7 @@ import modin.pandas as pd
|
|
5 |
from PIL import Image
|
6 |
from diffusers import DiffusionPipeline, StableDiffusionLatentUpscalePipeline
|
7 |
import random
|
8 |
-
|
9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
|
11 |
pipe = DiffusionPipeline.from_pretrained("circulus/canvers-realistic-v3.6", torch_dtype=torch.float16, safety_checker=None)
|
@@ -18,7 +18,7 @@ upscaler.enable_xformers_memory_efficient_attention()
|
|
18 |
|
19 |
def genie (Prompt, negative_prompt, height, width, scale, steps, seed, upscale):
|
20 |
|
21 |
-
generator = torch.manual_seed(seed)
|
22 |
|
23 |
if upscale == "Yes":
|
24 |
image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
|
|
|
5 |
from PIL import Image
|
6 |
from diffusers import DiffusionPipeline, StableDiffusionLatentUpscalePipeline
|
7 |
import random
|
8 |
+
|
9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
|
11 |
pipe = DiffusionPipeline.from_pretrained("circulus/canvers-realistic-v3.6", torch_dtype=torch.float16, safety_checker=None)
|
|
|
18 |
|
19 |
def genie (Prompt, negative_prompt, height, width, scale, steps, seed, upscale):
|
20 |
|
21 |
+
generator = torch.manual_seed(0) if seed == 0 else torch.manual_seed(seed)
|
22 |
|
23 |
if upscale == "Yes":
|
24 |
image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
|