placed the vase and refiner the same as i had on osiris
Browse files
app.py
CHANGED
@@ -37,24 +37,33 @@ processor = AutoProcessor.from_pretrained("xtuner/llava-llama-3-8b-v1_1-transfor
|
|
37 |
|
38 |
llava_model.generation_config.eos_token_id = 128009
|
39 |
|
40 |
-
# Initialize Stable Diffusion pipelines
|
41 |
-
base = DiffusionPipeline.from_pretrained(
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
)
|
47 |
-
base.to('cuda')
|
48 |
-
|
49 |
-
refiner = DiffusionPipeline.from_pretrained(
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
)
|
57 |
-
refiner.to('cuda')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
class ModeManager:
|
60 |
def __init__(self):
|
|
|
37 |
|
38 |
llava_model.generation_config.eos_token_id = 128009
|
39 |
|
40 |
+
# # Initialize Stable Diffusion pipelines
|
41 |
+
# base = DiffusionPipeline.from_pretrained(
|
42 |
+
# "stabilityai/stable-diffusion-xl-base-1.0",
|
43 |
+
# torch_dtype=torch.float16,
|
44 |
+
# variant="fp16",
|
45 |
+
# use_safetensors=True,
|
46 |
+
# )
|
47 |
+
# base.to('cuda')
|
48 |
+
|
49 |
+
# refiner = DiffusionPipeline.from_pretrained(
|
50 |
+
# "stabilityai/stable-diffusion-xl-base-1.0",
|
51 |
+
# text_encoder_2=base.text_encoder_2,
|
52 |
+
# vae=base.vae,
|
53 |
+
# torch_dtype=torch.float16,
|
54 |
+
# use_safetensors=True,
|
55 |
+
# variant="fp16",
|
56 |
+
# )
|
57 |
+
# refiner.to('cuda')
|
58 |
+
|
59 |
+
# load both base and refiner
|
60 |
+
base = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, use_safetensors=True, variant="fp16").to('cuda')
|
61 |
+
refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0",
|
62 |
+
text_encoder_2=base.text_encoder_2,
|
63 |
+
vae=base.vae,
|
64 |
+
torch_dtype=torch.float16,
|
65 |
+
use_safetensor=True,
|
66 |
+
variant="fp16").to('cuda')
|
67 |
|
68 |
class ModeManager:
|
69 |
def __init__(self):
|