Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,7 @@ import numpy as np
|
|
3 |
import PIL.Image
|
4 |
from PIL import Image
|
5 |
import random
|
6 |
-
from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL
|
7 |
from diffusers import DDIMScheduler, EulerAncestralDiscreteScheduler
|
8 |
#from diffusers.utils import load_image
|
9 |
|
@@ -24,9 +24,14 @@ controlnet = ControlNetModel.from_pretrained(
|
|
24 |
|
25 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
26 |
|
27 |
-
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
"yodayo-ai/holodayo-xl-2.1",
|
29 |
-
controlnet=controlnet,
|
30 |
vae=vae,
|
31 |
torch_dtype=torch.float16,
|
32 |
)
|
@@ -45,12 +50,12 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
45 |
use_image = False
|
46 |
#image = None
|
47 |
|
48 |
-
if use_image :# and image is not None :
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
|
55 |
if randomize_seed:
|
56 |
seed = random.randint(0, MAX_SEED)
|
@@ -58,17 +63,17 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
58 |
generator = torch.Generator().manual_seed(seed)
|
59 |
|
60 |
if use_image:
|
61 |
-
output_image = pipe(
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
).images[0]
|
72 |
else:
|
73 |
# If no valid image is provided, generate an image based only on the text prompt
|
74 |
output_image = pipe(
|
|
|
3 |
import PIL.Image
|
4 |
from PIL import Image
|
5 |
import random
|
6 |
+
from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, StableDiffusionXLPipeline, AutoencoderKL
|
7 |
from diffusers import DDIMScheduler, EulerAncestralDiscreteScheduler
|
8 |
#from diffusers.utils import load_image
|
9 |
|
|
|
24 |
|
25 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
26 |
|
27 |
+
#pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
28 |
+
# "yodayo-ai/holodayo-xl-2.1",
|
29 |
+
# controlnet=controlnet,
|
30 |
+
# vae=vae,
|
31 |
+
# torch_dtype=torch.float16,
|
32 |
+
#)
|
33 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(
|
34 |
"yodayo-ai/holodayo-xl-2.1",
|
|
|
35 |
vae=vae,
|
36 |
torch_dtype=torch.float16,
|
37 |
)
|
|
|
50 |
use_image = False
|
51 |
#image = None
|
52 |
|
53 |
+
#if use_image :# and image is not None :
|
54 |
+
# width, height = image['composite'].size
|
55 |
+
# ratio = np.sqrt(1024. * 1024. / (width * height))
|
56 |
+
# new_width, new_height = int(width * ratio), int(height * ratio)
|
57 |
+
# image = image['composite'].resize((new_width, new_height))
|
58 |
+
# print(image)
|
59 |
|
60 |
if randomize_seed:
|
61 |
seed = random.randint(0, MAX_SEED)
|
|
|
63 |
generator = torch.Generator().manual_seed(seed)
|
64 |
|
65 |
if use_image:
|
66 |
+
#output_image = pipe(
|
67 |
+
# prompt=prompt + ", masterpiece, best quality, very aesthetic, absurdres",
|
68 |
+
# negative_prompt=negative_prompt,
|
69 |
+
# image=image,
|
70 |
+
# controlnet_conditioning_scale=1.0,
|
71 |
+
# guidance_scale=guidance_scale,
|
72 |
+
# num_inference_steps=num_inference_steps,
|
73 |
+
# width=new_width,
|
74 |
+
# height=new_height,
|
75 |
+
# generator=generator
|
76 |
+
#).images[0]
|
77 |
else:
|
78 |
# If no valid image is provided, generate an image based only on the text prompt
|
79 |
output_image = pipe(
|