update demo
Browse files- .gitattributes +4 -0
- app.py +423 -186
- demo.ipynb +59 -0
- images/.placeholder +1 -0
- images/amelia-watson.png +3 -0
- images/furina.png +3 -0
- images/pastel-style.png +3 -0
- images/ufotable-style.png +3 -0
- lora.toml +35 -0
- lora_diffusers.py +539 -0
- lpw_stable_diffusion_xl.py +1496 -0
- requirements.txt +3 -2
- style.css +35 -0
- utils.py +7 -0
.gitattributes
CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
images/amelia-watson.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
images/furina.png filter=lfs diff=lfs merge=lfs -text
|
38 |
+
images/pastel-style.png filter=lfs diff=lfs merge=lfs -text
|
39 |
+
images/ufotable-style.png filter=lfs diff=lfs merge=lfs -text
|
app.py
CHANGED
@@ -4,46 +4,52 @@ from __future__ import annotations
|
|
4 |
|
5 |
import os
|
6 |
import random
|
7 |
-
|
8 |
import gradio as gr
|
9 |
import numpy as np
|
10 |
import PIL.Image
|
11 |
import torch
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
from diffusers.models import AutoencoderKL
|
13 |
-
from diffusers import
|
14 |
|
15 |
-
DESCRIPTION =
|
16 |
if not torch.cuda.is_available():
|
17 |
-
DESCRIPTION +=
|
18 |
-
|
19 |
MAX_SEED = np.iinfo(np.int32).max
|
20 |
-
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv(
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
ENABLE_CPU_OFFLOAD = os.getenv('ENABLE_CPU_OFFLOAD') == '1'
|
25 |
|
26 |
MODEL = "Linaqruf/animagine-xl"
|
27 |
|
28 |
-
device = torch.device(
|
29 |
if torch.cuda.is_available():
|
30 |
-
pipe =
|
31 |
MODEL,
|
32 |
torch_dtype=torch.float16,
|
|
|
33 |
use_safetensors=True,
|
34 |
-
variant=
|
|
|
35 |
|
36 |
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
37 |
-
|
38 |
if ENABLE_CPU_OFFLOAD:
|
39 |
pipe.enable_model_cpu_offload()
|
40 |
else:
|
41 |
pipe.to(device)
|
42 |
|
43 |
if USE_TORCH_COMPILE:
|
44 |
-
pipe.unet = torch.compile(pipe.unet,
|
45 |
-
|
46 |
-
fullgraph=True)
|
47 |
else:
|
48 |
pipe = None
|
49 |
|
@@ -54,197 +60,423 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
|
54 |
return seed
|
55 |
|
56 |
|
57 |
-
def
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
generator = torch.Generator().manual_seed(seed)
|
73 |
|
74 |
-
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
if not use_prompt_2:
|
77 |
-
prompt_2 = None
|
78 |
-
negative_prompt_2 = None
|
79 |
-
if negative_prompt_2 ==
|
80 |
-
negative_prompt_2 = None
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
|
95 |
|
96 |
examples = [
|
97 |
-
|
98 |
-
|
99 |
]
|
100 |
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
with gr.Row():
|
127 |
with gr.Column(scale=1):
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
label=
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
|
|
|
|
|
|
|
|
|
|
165 |
)
|
166 |
-
|
167 |
-
label=
|
168 |
-
minimum=
|
169 |
-
maximum=
|
170 |
-
step=
|
171 |
-
value=
|
172 |
)
|
173 |
-
|
174 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
175 |
with gr.Row():
|
176 |
-
|
177 |
-
label=
|
178 |
-
minimum=
|
179 |
-
maximum=
|
180 |
-
step=
|
181 |
-
value=
|
182 |
)
|
183 |
-
|
184 |
-
label=
|
185 |
-
minimum=
|
186 |
-
maximum=
|
187 |
-
step=
|
188 |
-
value=
|
189 |
)
|
190 |
-
with gr.
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
197 |
)
|
198 |
-
target_height = gr.Slider(
|
199 |
-
label='Target Height',
|
200 |
-
minimum=1024,
|
201 |
-
maximum=4096,
|
202 |
-
step=32,
|
203 |
-
value=1024,
|
204 |
-
)
|
205 |
-
seed = gr.Slider(label='Seed',
|
206 |
-
minimum=0,
|
207 |
-
maximum=MAX_SEED,
|
208 |
-
step=1,
|
209 |
-
value=0)
|
210 |
-
|
211 |
-
randomize_seed = gr.Checkbox(label='Randomize seed', value=True)
|
212 |
-
with gr.Row():
|
213 |
-
guidance_scale_base = gr.Slider(
|
214 |
-
label='Guidance scale',
|
215 |
-
minimum=1,
|
216 |
-
maximum=20,
|
217 |
-
step=0.1,
|
218 |
-
value=12.0)
|
219 |
-
num_inference_steps_base = gr.Slider(
|
220 |
-
label='Number of inference steps',
|
221 |
-
minimum=10,
|
222 |
-
maximum=100,
|
223 |
-
step=1,
|
224 |
-
value=50)
|
225 |
-
|
226 |
with gr.Column(scale=2):
|
227 |
with gr.Blocks():
|
228 |
-
run_button = gr.Button(
|
229 |
-
result = gr.Image(label=
|
230 |
-
|
231 |
-
gr.Examples(
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
237 |
use_prompt_2.change(
|
238 |
fn=lambda x: gr.update(visible=x),
|
239 |
inputs=use_prompt_2,
|
240 |
-
outputs=
|
241 |
queue=False,
|
242 |
api_name=False,
|
243 |
)
|
244 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
245 |
fn=lambda x: gr.update(visible=x),
|
246 |
-
inputs=
|
247 |
-
outputs=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
248 |
queue=False,
|
249 |
api_name=False,
|
250 |
)
|
@@ -262,8 +494,13 @@ with gr.Blocks(css='style.css', theme='NoCrypt/[email protected]') as demo:
|
|
262 |
target_height,
|
263 |
original_width,
|
264 |
original_height,
|
265 |
-
|
266 |
-
|
|
|
|
|
|
|
|
|
|
|
267 |
]
|
268 |
prompt.submit(
|
269 |
fn=randomize_seed_fn,
|
@@ -275,7 +512,7 @@ with gr.Blocks(css='style.css', theme='NoCrypt/[email protected]') as demo:
|
|
275 |
fn=generate,
|
276 |
inputs=inputs,
|
277 |
outputs=result,
|
278 |
-
api_name=
|
279 |
)
|
280 |
negative_prompt.submit(
|
281 |
fn=randomize_seed_fn,
|
@@ -326,4 +563,4 @@ with gr.Blocks(css='style.css', theme='NoCrypt/[email protected]') as demo:
|
|
326 |
api_name=False,
|
327 |
)
|
328 |
|
329 |
-
demo.queue(max_size=20).launch()
|
|
|
4 |
|
5 |
import os
|
6 |
import random
|
7 |
+
import toml
|
8 |
import gradio as gr
|
9 |
import numpy as np
|
10 |
import PIL.Image
|
11 |
import torch
|
12 |
+
import utils
|
13 |
+
import gc
|
14 |
+
from safetensors.torch import load_file
|
15 |
+
import lora_diffusers
|
16 |
+
from lora_diffusers import LoRANetwork, create_network_from_weights
|
17 |
+
from huggingface_hub import hf_hub_download
|
18 |
from diffusers.models import AutoencoderKL
|
19 |
+
from diffusers import DiffusionPipeline, EulerAncestralDiscreteScheduler
|
20 |
|
21 |
+
DESCRIPTION = "Animagine XL"
|
22 |
if not torch.cuda.is_available():
|
23 |
+
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
24 |
+
IS_COLAB = utils.is_google_colab()
|
25 |
MAX_SEED = np.iinfo(np.int32).max
|
26 |
+
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
|
27 |
+
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "2048"))
|
28 |
+
USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
|
29 |
+
ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
|
|
|
30 |
|
31 |
MODEL = "Linaqruf/animagine-xl"
|
32 |
|
33 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
34 |
if torch.cuda.is_available():
|
35 |
+
pipe = DiffusionPipeline.from_pretrained(
|
36 |
MODEL,
|
37 |
torch_dtype=torch.float16,
|
38 |
+
custom_pipeline="lpw_stable_diffusion_xl.py",
|
39 |
use_safetensors=True,
|
40 |
+
variant="fp16",
|
41 |
+
)
|
42 |
|
43 |
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
44 |
+
|
45 |
if ENABLE_CPU_OFFLOAD:
|
46 |
pipe.enable_model_cpu_offload()
|
47 |
else:
|
48 |
pipe.to(device)
|
49 |
|
50 |
if USE_TORCH_COMPILE:
|
51 |
+
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
52 |
+
|
|
|
53 |
else:
|
54 |
pipe = None
|
55 |
|
|
|
60 |
return seed
|
61 |
|
62 |
|
63 |
+
def get_image_path(base_path):
|
64 |
+
extensions = [".jpg", ".jpeg", ".png", ".bmp", ".gif"]
|
65 |
+
for ext in extensions:
|
66 |
+
if os.path.exists(base_path + ext):
|
67 |
+
return base_path + ext
|
68 |
+
# If no match is found, return None or raise an error
|
69 |
+
return None
|
70 |
+
|
71 |
+
|
72 |
+
def update_selection(selected_state: gr.SelectData):
|
73 |
+
lora_repo = sdxl_loras[selected_state.index]["repo"]
|
74 |
+
lora_weight = sdxl_loras[selected_state.index]["multiplier"]
|
75 |
+
updated_selected_info = f"{lora_repo}"
|
76 |
+
updated_prompt = sdxl_loras[selected_state.index]["sample_prompt"]
|
77 |
+
updated_negative = sdxl_loras[selected_state.index]["sample_negative"]
|
78 |
+
|
79 |
+
return (
|
80 |
+
updated_selected_info,
|
81 |
+
selected_state,
|
82 |
+
lora_weight,
|
83 |
+
updated_prompt,
|
84 |
+
negative_presets_dict.get(updated_negative, ""),
|
85 |
+
updated_negative,
|
86 |
+
)
|
87 |
+
|
88 |
+
|
89 |
+
def create_network(text_encoders, unet, state_dict, multiplier, device):
|
90 |
+
network = create_network_from_weights(
|
91 |
+
text_encoders, unet, state_dict, multiplier=multiplier
|
92 |
+
)
|
93 |
+
network.load_state_dict(state_dict)
|
94 |
+
network.to(device, dtype=unet.dtype)
|
95 |
+
network.apply_to(multiplier=multiplier)
|
96 |
+
return network
|
97 |
+
|
98 |
+
|
99 |
+
# def backup_sd(state_dict):
|
100 |
+
# for k, v in state_dict.items():
|
101 |
+
# state_dict[k] = v.detach().cpu()
|
102 |
+
# return state_dict
|
103 |
+
|
104 |
+
|
105 |
+
def generate(
|
106 |
+
prompt: str,
|
107 |
+
negative_prompt: str = "",
|
108 |
+
prompt_2: str = "",
|
109 |
+
negative_prompt_2: str = "",
|
110 |
+
use_prompt_2: bool = False,
|
111 |
+
seed: int = 0,
|
112 |
+
width: int = 1024,
|
113 |
+
height: int = 1024,
|
114 |
+
target_width: int = 1024,
|
115 |
+
target_height: int = 1024,
|
116 |
+
original_width: int = 4096,
|
117 |
+
original_height: int = 4096,
|
118 |
+
guidance_scale: float = 12.0,
|
119 |
+
num_inference_steps: int = 50,
|
120 |
+
use_lora: bool = False,
|
121 |
+
lora_weight: float = 1.0,
|
122 |
+
set_target_size: bool = False,
|
123 |
+
set_original_size: bool = False,
|
124 |
+
selected_state: str = "",
|
125 |
+
) -> PIL.Image.Image:
|
126 |
generator = torch.Generator().manual_seed(seed)
|
127 |
|
128 |
+
network = None # Initialize to None
|
129 |
+
network_state = {"current_lora": None, "multiplier": None}
|
130 |
+
|
131 |
+
# _unet = pipe.unet.state_dict()
|
132 |
+
# backup_sd(_unet)
|
133 |
+
# _text_encoder = pipe.text_encoder.state_dict()
|
134 |
+
# backup_sd(_text_encoder)
|
135 |
+
# _text_encoder_2 = pipe.text_encoder_2.state_dict()
|
136 |
+
# backup_sd(_text_encoder_2)
|
137 |
+
|
138 |
+
if not set_original_size:
|
139 |
+
original_width = 4096
|
140 |
+
original_height = 4096
|
141 |
+
if not set_target_size:
|
142 |
+
target_width = width
|
143 |
+
target_height = height
|
144 |
+
if negative_prompt == "":
|
145 |
+
negative_prompt = None
|
146 |
if not use_prompt_2:
|
147 |
+
prompt_2 = None
|
148 |
+
negative_prompt_2 = None
|
149 |
+
if negative_prompt_2 == "":
|
150 |
+
negative_prompt_2 = None
|
151 |
+
|
152 |
+
if use_lora:
|
153 |
+
if not selected_state:
|
154 |
+
raise Exception("You must select a LoRA")
|
155 |
+
|
156 |
+
repo_name = sdxl_loras[selected_state.index]["repo"]
|
157 |
+
full_path_lora = saved_names[selected_state.index]
|
158 |
+
weight_name = sdxl_loras[selected_state.index]["weights"]
|
159 |
+
|
160 |
+
lora_sd = load_file(full_path_lora)
|
161 |
+
text_encoders = [pipe.text_encoder, pipe.text_encoder_2]
|
162 |
+
|
163 |
+
if network_state["current_lora"] != repo_name:
|
164 |
+
network = create_network(
|
165 |
+
text_encoders, pipe.unet, lora_sd, lora_weight, device
|
166 |
+
)
|
167 |
+
network_state["current_lora"] = repo_name
|
168 |
+
network_state["multiplier"] = lora_weight
|
169 |
+
|
170 |
+
elif network_state["multiplier"] != lora_weight:
|
171 |
+
network = create_network(
|
172 |
+
text_encoders, pipe.unet, lora_sd, lora_weight, device
|
173 |
+
)
|
174 |
+
network_state["multiplier"] = lora_weight
|
175 |
+
else:
|
176 |
+
if network:
|
177 |
+
network.unapply_to()
|
178 |
+
network = None
|
179 |
+
network_state = {"current_lora": None, "multiplier": None}
|
180 |
+
|
181 |
+
try:
|
182 |
+
image = pipe(
|
183 |
+
prompt=prompt,
|
184 |
+
negative_prompt=negative_prompt,
|
185 |
+
prompt_2=prompt_2,
|
186 |
+
negative_prompt_2=negative_prompt_2,
|
187 |
+
width=width,
|
188 |
+
height=height,
|
189 |
+
target_size=(target_width, target_height),
|
190 |
+
original_size=(original_width, original_height),
|
191 |
+
guidance_scale=guidance_scale,
|
192 |
+
num_inference_steps=num_inference_steps,
|
193 |
+
generator=generator,
|
194 |
+
output_type="pil",
|
195 |
+
).images[0]
|
196 |
+
|
197 |
+
if network:
|
198 |
+
network.unapply_to()
|
199 |
+
network = None
|
200 |
+
|
201 |
+
return image
|
202 |
+
|
203 |
+
except Exception as e:
|
204 |
+
print(f"An error occurred: {e}")
|
205 |
+
raise
|
206 |
+
|
207 |
+
finally:
|
208 |
+
# pipe.unet.load_state_dict(_unet)
|
209 |
+
# pipe.text_encoder.load_state_dict(_text_encoder)
|
210 |
+
# pipe.text_encoder_2.load_state_dict(_text_encoder_2)
|
211 |
+
|
212 |
+
# del _unet, _text_encoder, _text_encoder_2
|
213 |
+
|
214 |
+
if network:
|
215 |
+
network.unapply_to()
|
216 |
+
network = None
|
217 |
+
|
218 |
+
if use_lora:
|
219 |
+
del lora_sd, text_encoders
|
220 |
+
gc.collect()
|
221 |
|
222 |
|
223 |
examples = [
|
224 |
+
"face focus, cute, masterpiece, best quality, 1girl, green hair, sweater, looking at viewer, upper body, beanie, outdoors, night, turtleneck",
|
225 |
+
"face focus, bishounen, masterpiece, best quality, 1boy, green hair, sweater, looking at viewer, upper body, beanie, outdoors, night, turtleneck",
|
226 |
]
|
227 |
|
228 |
+
negative_presets_dict = {
|
229 |
+
"None": "",
|
230 |
+
"Standard": "lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry",
|
231 |
+
"Weighted": "(low quality, worst quality:1.2), 3d, watermark, signature, ugly, poorly drawn, bad image",
|
232 |
+
}
|
233 |
+
|
234 |
+
with open("lora.toml", "r") as file:
|
235 |
+
data = toml.load(file)
|
236 |
+
sdxl_loras = [
|
237 |
+
{
|
238 |
+
"image": get_image_path(item["image"]),
|
239 |
+
"title": item["title"],
|
240 |
+
"repo": item["repo"],
|
241 |
+
"weights": item["weights"],
|
242 |
+
"multiplier": item["multiplier"] if "multiplier" in item else "1.0",
|
243 |
+
"sample_prompt": item["sample_prompt"],
|
244 |
+
"sample_negative": item["sample_negative"],
|
245 |
+
}
|
246 |
+
for item in data["data"]
|
247 |
+
]
|
248 |
+
saved_names = [hf_hub_download(item["repo"], item["weights"]) for item in sdxl_loras]
|
249 |
+
|
250 |
+
|
251 |
+
with gr.Blocks(css="style.css", theme="NoCrypt/[email protected]") as demo:
|
252 |
+
title = gr.HTML(
|
253 |
+
f"""<h1><span>{DESCRIPTION}</span></h1>""",
|
254 |
+
elem_id="title",
|
255 |
+
)
|
256 |
+
gr.Markdown(
|
257 |
+
f"""Gradio demo for [Linaqruf/animagine-xl](https://huggingface.co/spaces/Linaqruf/Animagine-XL)""",
|
258 |
+
elem_id="subtitle",
|
259 |
+
)
|
260 |
+
gr.DuplicateButton(
|
261 |
+
value="Duplicate Space for private use",
|
262 |
+
elem_id="duplicate-button",
|
263 |
+
visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
|
264 |
+
)
|
265 |
+
selected_state = gr.State()
|
266 |
with gr.Row():
|
267 |
with gr.Column(scale=1):
|
268 |
+
with gr.Group():
|
269 |
+
prompt = gr.Text(
|
270 |
+
label="Prompt",
|
271 |
+
max_lines=5,
|
272 |
+
placeholder="Enter your prompt",
|
273 |
+
)
|
274 |
+
negative_prompt = gr.Text(
|
275 |
+
label="Negative Prompt",
|
276 |
+
max_lines=5,
|
277 |
+
placeholder="Enter a negative prompt",
|
278 |
+
value="lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry",
|
279 |
+
)
|
280 |
+
with gr.Accordion(label="Negative Presets", open=False):
|
281 |
+
negative_presets = gr.Dropdown(
|
282 |
+
label="Negative Presets",
|
283 |
+
show_label=False,
|
284 |
+
choices=list(negative_presets_dict.keys()),
|
285 |
+
value="Standard",
|
286 |
+
)
|
287 |
+
|
288 |
+
with gr.Row():
|
289 |
+
use_prompt_2 = gr.Checkbox(label="Use prompt 2", value=False)
|
290 |
+
use_lora = gr.Checkbox(label="Use LoRA", value=False)
|
291 |
+
|
292 |
+
with gr.Group(visible=False) as prompt2_group:
|
293 |
+
prompt_2 = gr.Text(
|
294 |
+
label="Prompt 2",
|
295 |
+
max_lines=5,
|
296 |
+
placeholder="Enter your prompt",
|
297 |
+
)
|
298 |
+
negative_prompt_2 = gr.Text(
|
299 |
+
label="Negative prompt 2",
|
300 |
+
max_lines=5,
|
301 |
+
placeholder="Enter a negative prompt",
|
302 |
+
)
|
303 |
|
304 |
+
with gr.Group(visible=False) as lora_group:
|
305 |
+
selector_info = gr.Text(
|
306 |
+
label="Selected LoRA",
|
307 |
+
max_lines=1,
|
308 |
+
value="No LoRA selected.",
|
309 |
+
)
|
310 |
+
lora_selection = gr.Gallery(
|
311 |
+
value=[(item["image"], item["title"]) for item in sdxl_loras],
|
312 |
+
label="Animagine XL LoRA",
|
313 |
+
show_label=False,
|
314 |
+
allow_preview=False,
|
315 |
+
columns=2,
|
316 |
+
elem_id="gallery",
|
317 |
+
show_share_button=False,
|
318 |
)
|
319 |
+
lora_weight = gr.Slider(
|
320 |
+
label="Multiplier",
|
321 |
+
minimum=0,
|
322 |
+
maximum=1,
|
323 |
+
step=0.05,
|
324 |
+
value=1,
|
325 |
)
|
326 |
+
|
327 |
+
with gr.Group():
|
328 |
+
with gr.Row():
|
329 |
+
width = gr.Slider(
|
330 |
+
label="Width",
|
331 |
+
minimum=256,
|
332 |
+
maximum=MAX_IMAGE_SIZE,
|
333 |
+
step=32,
|
334 |
+
value=1024,
|
335 |
+
)
|
336 |
+
height = gr.Slider(
|
337 |
+
label="Height",
|
338 |
+
minimum=256,
|
339 |
+
maximum=MAX_IMAGE_SIZE,
|
340 |
+
step=32,
|
341 |
+
value=1024,
|
342 |
+
)
|
343 |
+
|
344 |
+
with gr.Accordion(label="Advanced Options", open=False):
|
345 |
+
seed = gr.Slider(
|
346 |
+
label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0
|
347 |
+
)
|
348 |
+
|
349 |
+
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
350 |
+
|
351 |
with gr.Row():
|
352 |
+
guidance_scale = gr.Slider(
|
353 |
+
label="Guidance scale",
|
354 |
+
minimum=1,
|
355 |
+
maximum=20,
|
356 |
+
step=0.1,
|
357 |
+
value=12.0,
|
358 |
)
|
359 |
+
num_inference_steps = gr.Slider(
|
360 |
+
label="Number of inference steps",
|
361 |
+
minimum=10,
|
362 |
+
maximum=100,
|
363 |
+
step=1,
|
364 |
+
value=50,
|
365 |
)
|
366 |
+
with gr.Group():
|
367 |
+
with gr.Row():
|
368 |
+
set_target_size = gr.Checkbox(
|
369 |
+
label="Target Size", value=False
|
370 |
+
)
|
371 |
+
set_original_size = gr.Checkbox(
|
372 |
+
label="Original Size", value=False
|
373 |
+
)
|
374 |
+
with gr.Group():
|
375 |
+
with gr.Row():
|
376 |
+
original_width = gr.Slider(
|
377 |
+
label="Original Width",
|
378 |
+
minimum=1024,
|
379 |
+
maximum=4096,
|
380 |
+
step=32,
|
381 |
+
value=4096,
|
382 |
+
visible=False,
|
383 |
+
)
|
384 |
+
original_height = gr.Slider(
|
385 |
+
label="Original Height",
|
386 |
+
minimum=1024,
|
387 |
+
maximum=4096,
|
388 |
+
step=32,
|
389 |
+
value=4096,
|
390 |
+
visible=False,
|
391 |
+
)
|
392 |
+
with gr.Row():
|
393 |
+
target_width = gr.Slider(
|
394 |
+
label="Target Width",
|
395 |
+
minimum=1024,
|
396 |
+
maximum=4096,
|
397 |
+
step=32,
|
398 |
+
value=width.value,
|
399 |
+
visible=False,
|
400 |
+
)
|
401 |
+
target_height = gr.Slider(
|
402 |
+
label="Target Height",
|
403 |
+
minimum=1024,
|
404 |
+
maximum=4096,
|
405 |
+
step=32,
|
406 |
+
value=height.value,
|
407 |
+
visible=False,
|
408 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
409 |
with gr.Column(scale=2):
|
410 |
with gr.Blocks():
|
411 |
+
run_button = gr.Button("Generate", variant="primary")
|
412 |
+
result = gr.Image(label="Result", show_label=False)
|
413 |
+
|
414 |
+
gr.Examples(
|
415 |
+
examples=examples,
|
416 |
+
inputs=prompt,
|
417 |
+
outputs=result,
|
418 |
+
fn=generate,
|
419 |
+
cache_examples=CACHE_EXAMPLES,
|
420 |
+
)
|
421 |
+
lora_selection.select(
|
422 |
+
update_selection,
|
423 |
+
outputs=[
|
424 |
+
selector_info,
|
425 |
+
selected_state,
|
426 |
+
lora_weight,
|
427 |
+
prompt,
|
428 |
+
negative_prompt,
|
429 |
+
negative_presets,
|
430 |
+
],
|
431 |
+
queue=False,
|
432 |
+
show_progress=False,
|
433 |
+
)
|
434 |
use_prompt_2.change(
|
435 |
fn=lambda x: gr.update(visible=x),
|
436 |
inputs=use_prompt_2,
|
437 |
+
outputs=prompt2_group,
|
438 |
queue=False,
|
439 |
api_name=False,
|
440 |
)
|
441 |
+
negative_presets.change(
|
442 |
+
fn=lambda x: gr.update(value=negative_presets_dict.get(x, "")),
|
443 |
+
inputs=negative_presets,
|
444 |
+
outputs=negative_prompt,
|
445 |
+
queue=False,
|
446 |
+
api_name=False,
|
447 |
+
)
|
448 |
+
use_lora.change(
|
449 |
fn=lambda x: gr.update(visible=x),
|
450 |
+
inputs=use_lora,
|
451 |
+
outputs=lora_group,
|
452 |
+
queue=False,
|
453 |
+
api_name=False,
|
454 |
+
)
|
455 |
+
set_target_size.change(
|
456 |
+
fn=lambda x: (gr.update(visible=x), gr.update(visible=x)),
|
457 |
+
inputs=set_target_size,
|
458 |
+
outputs=[target_width, target_height],
|
459 |
+
queue=False,
|
460 |
+
api_name=False,
|
461 |
+
)
|
462 |
+
set_original_size.change(
|
463 |
+
fn=lambda x: (gr.update(visible=x), gr.update(visible=x)),
|
464 |
+
inputs=set_original_size,
|
465 |
+
outputs=[original_width, original_height],
|
466 |
+
queue=False,
|
467 |
+
api_name=False,
|
468 |
+
)
|
469 |
+
width.change(
|
470 |
+
fn=lambda x: gr.update(value=x),
|
471 |
+
inputs=width,
|
472 |
+
outputs=target_width,
|
473 |
+
queue=False,
|
474 |
+
api_name=False,
|
475 |
+
)
|
476 |
+
height.change(
|
477 |
+
fn=lambda x: gr.update(value=x),
|
478 |
+
inputs=height,
|
479 |
+
outputs=target_height,
|
480 |
queue=False,
|
481 |
api_name=False,
|
482 |
)
|
|
|
494 |
target_height,
|
495 |
original_width,
|
496 |
original_height,
|
497 |
+
guidance_scale,
|
498 |
+
num_inference_steps,
|
499 |
+
use_lora,
|
500 |
+
lora_weight,
|
501 |
+
set_target_size,
|
502 |
+
set_original_size,
|
503 |
+
selected_state,
|
504 |
]
|
505 |
prompt.submit(
|
506 |
fn=randomize_seed_fn,
|
|
|
512 |
fn=generate,
|
513 |
inputs=inputs,
|
514 |
outputs=result,
|
515 |
+
api_name="run",
|
516 |
)
|
517 |
negative_prompt.submit(
|
518 |
fn=randomize_seed_fn,
|
|
|
563 |
api_name=False,
|
564 |
)
|
565 |
|
566 |
+
demo.queue(max_size=20).launch(debug=IS_COLAB, share=IS_COLAB)
|
demo.ipynb
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": null,
|
6 |
+
"metadata": {
|
7 |
+
"id": "PeEyOhUDHhzF"
|
8 |
+
},
|
9 |
+
"outputs": [],
|
10 |
+
"source": [
|
11 |
+
"import os\n",
|
12 |
+
"import subprocess\n",
|
13 |
+
"\n",
|
14 |
+
"ROOT_DIR = \"/content\"\n",
|
15 |
+
"REPO_URL = \"https://huggingface.co/spaces/Linaqruf/Animagine-XL\"\n",
|
16 |
+
"REPO_DIR = os.path.join(ROOT_DIR, \"Animagine-XL\")\n",
|
17 |
+
"\n",
|
18 |
+
"def clone(url, dir, branch=None):\n",
|
19 |
+
" subprocess.run([\"git\", \"clone\", url, dir], check=True)\n",
|
20 |
+
" if branch:\n",
|
21 |
+
" subprocess.run([\"git\", \"checkout\", branch], cwd=dir, check=True)\n",
|
22 |
+
"\n",
|
23 |
+
"def install_deps(dir):\n",
|
24 |
+
" subprocess.run([\"pip\", \"install\", \"-r\", \"requirements.txt\"], cwd=dir, check=True)\n",
|
25 |
+
"\n",
|
26 |
+
"def main():\n",
|
27 |
+
" if not os.path.exists(REPO_DIR):\n",
|
28 |
+
" print(f\"Cloning Repository to {REPO_DIR}\")\n",
|
29 |
+
" clone(REPO_URL, REPO_DIR)\n",
|
30 |
+
" print(f\"Installing required python libraries\")\n",
|
31 |
+
" install_deps(REPO_DIR)\n",
|
32 |
+
" print(\"Done!\")\n",
|
33 |
+
"\n",
|
34 |
+
" os.chdir(REPO_DIR)\n",
|
35 |
+
" !python app.py\n",
|
36 |
+
"\n",
|
37 |
+
"if __name__ == \"__main__\":\n",
|
38 |
+
" main()\n"
|
39 |
+
]
|
40 |
+
}
|
41 |
+
],
|
42 |
+
"metadata": {
|
43 |
+
"accelerator": "GPU",
|
44 |
+
"colab": {
|
45 |
+
"machine_shape": "hm",
|
46 |
+
"provenance": [],
|
47 |
+
"gpuType": "A100"
|
48 |
+
},
|
49 |
+
"kernelspec": {
|
50 |
+
"display_name": "Python 3",
|
51 |
+
"name": "python3"
|
52 |
+
},
|
53 |
+
"language_info": {
|
54 |
+
"name": "python"
|
55 |
+
}
|
56 |
+
},
|
57 |
+
"nbformat": 4,
|
58 |
+
"nbformat_minor": 0
|
59 |
+
}
|
images/.placeholder
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
|
images/amelia-watson.png
ADDED
Git LFS Details
|
images/furina.png
ADDED
Git LFS Details
|
images/pastel-style.png
ADDED
Git LFS Details
|
images/ufotable-style.png
ADDED
Git LFS Details
|
lora.toml
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[[data]]
|
2 |
+
image = "images/pastel-style"
|
3 |
+
title = "Pastel Style"
|
4 |
+
repo = "Linaqruf/pastel-anime-xl-lora"
|
5 |
+
weights = "pastel-anime-xl-latest.safetensors"
|
6 |
+
multiplier = 0.6
|
7 |
+
sample_prompt = "face focus, cute, masterpiece, best quality, 1girl, green hair, sweater, looking at viewer, upper body, beanie, outdoors, night, turtleneck"
|
8 |
+
sample_negative = "Standard"
|
9 |
+
|
10 |
+
[[data]]
|
11 |
+
image = "images/ufotable-style"
|
12 |
+
title = "Ufotable Style"
|
13 |
+
repo = "Linaqruf/ufotable-xl-lora"
|
14 |
+
weights = "ufotable_style_xl.safetensors"
|
15 |
+
multiplier = 0.4
|
16 |
+
sample_prompt = "face focus, cute, masterpiece, best quality, bokeh, breasts, 1girl, solo, looking at viewer, long hair, white ribbon, smile, school uniform, bangs, black hair ribbon, swept bangs, sailor collar, serafuku, blush, ribbon, ahoge, brown eyes, long sleeves, collarbone, parted lips, sweater"
|
17 |
+
sample_negative = "Weighted"
|
18 |
+
|
19 |
+
[[data]]
|
20 |
+
image = "images/amelia-watson"
|
21 |
+
title = "Amelia Watson"
|
22 |
+
repo = "Linaqruf/amelia-watson-xl-lora"
|
23 |
+
weights = "amelia_watson_xl.safetensors"
|
24 |
+
multiplier = 0.5
|
25 |
+
sample_prompt = "face focus, masterpiece, best quality, amelia watson, bokeh, cute, 1girl, solo, monocle hair ornament, medium hair, brown eyewear, white shirt, red necktie, upper body, looking at viewer, blue eyes, leaf, plant"
|
26 |
+
sample_negative = "Weighted"
|
27 |
+
|
28 |
+
[[data]]
|
29 |
+
image = "images/furina"
|
30 |
+
title = "Furina"
|
31 |
+
repo = "Linaqruf/furina-xl-lora"
|
32 |
+
weights = "furina_xl.safetensors"
|
33 |
+
multiplier = 0.7
|
34 |
+
sample_prompt = "face focus, masterpiece, best quality, furina, bokeh, cute, 1girl, ahoge, ascot, blue eyes, blue gemstone, blue hair, blue headwear, blue jacket, gem, hair between eyes, hat, jacket, light blue hair, looking at viewer, multicolored hair, closed mouth, solo, top hat, white hair"
|
35 |
+
sample_negative = "Weighted"
|
lora_diffusers.py
ADDED
@@ -0,0 +1,539 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
LoRA module for Diffusers
|
3 |
+
==========================
|
4 |
+
|
5 |
+
This file works independently and is designed to operate with Diffusers.
|
6 |
+
|
7 |
+
Credits
|
8 |
+
-------
|
9 |
+
- Modified from: https://github.com/vladmandic/automatic/blob/master/modules/lora_diffusers.py
|
10 |
+
- Originally from: https://github.com/kohya-ss/sd-scripts/blob/sdxl/networks/lora_diffusers.py
|
11 |
+
"""
|
12 |
+
|
13 |
+
import bisect
|
14 |
+
import math
|
15 |
+
import random
|
16 |
+
from typing import Any, Dict, List, Mapping, Optional, Union
|
17 |
+
from diffusers import UNet2DConditionModel
|
18 |
+
import numpy as np
|
19 |
+
from tqdm import tqdm
|
20 |
+
import diffusers.models.lora as diffusers_lora
|
21 |
+
from transformers import CLIPTextModel
|
22 |
+
import torch
|
23 |
+
|
24 |
+
|
25 |
+
def make_unet_conversion_map() -> Dict[str, str]:
|
26 |
+
unet_conversion_map_layer = []
|
27 |
+
|
28 |
+
for i in range(3): # num_blocks is 3 in sdxl
|
29 |
+
# loop over downblocks/upblocks
|
30 |
+
for j in range(2):
|
31 |
+
# loop over resnets/attentions for downblocks
|
32 |
+
hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
|
33 |
+
sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
|
34 |
+
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
|
35 |
+
|
36 |
+
if i < 3:
|
37 |
+
# no attention layers in down_blocks.3
|
38 |
+
hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
|
39 |
+
sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1."
|
40 |
+
unet_conversion_map_layer.append(
|
41 |
+
(sd_down_atn_prefix, hf_down_atn_prefix)
|
42 |
+
)
|
43 |
+
|
44 |
+
for j in range(3):
|
45 |
+
# loop over resnets/attentions for upblocks
|
46 |
+
hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
|
47 |
+
sd_up_res_prefix = f"output_blocks.{3*i + j}.0."
|
48 |
+
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
|
49 |
+
|
50 |
+
# if i > 0: commentout for sdxl
|
51 |
+
# no attention layers in up_blocks.0
|
52 |
+
hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
|
53 |
+
sd_up_atn_prefix = f"output_blocks.{3*i + j}.1."
|
54 |
+
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
|
55 |
+
|
56 |
+
if i < 3:
|
57 |
+
# no downsample in down_blocks.3
|
58 |
+
hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
|
59 |
+
sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op."
|
60 |
+
unet_conversion_map_layer.append(
|
61 |
+
(sd_downsample_prefix, hf_downsample_prefix)
|
62 |
+
)
|
63 |
+
|
64 |
+
# no upsample in up_blocks.3
|
65 |
+
hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
|
66 |
+
sd_upsample_prefix = f"output_blocks.{3*i + 2}.{2}." # change for sdxl
|
67 |
+
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
|
68 |
+
|
69 |
+
hf_mid_atn_prefix = "mid_block.attentions.0."
|
70 |
+
sd_mid_atn_prefix = "middle_block.1."
|
71 |
+
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
|
72 |
+
|
73 |
+
for j in range(2):
|
74 |
+
hf_mid_res_prefix = f"mid_block.resnets.{j}."
|
75 |
+
sd_mid_res_prefix = f"middle_block.{2*j}."
|
76 |
+
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
|
77 |
+
|
78 |
+
unet_conversion_map_resnet = [
|
79 |
+
# (stable-diffusion, HF Diffusers)
|
80 |
+
("in_layers.0.", "norm1."),
|
81 |
+
("in_layers.2.", "conv1."),
|
82 |
+
("out_layers.0.", "norm2."),
|
83 |
+
("out_layers.3.", "conv2."),
|
84 |
+
("emb_layers.1.", "time_emb_proj."),
|
85 |
+
("skip_connection.", "conv_shortcut."),
|
86 |
+
]
|
87 |
+
|
88 |
+
unet_conversion_map = []
|
89 |
+
for sd, hf in unet_conversion_map_layer:
|
90 |
+
if "resnets" in hf:
|
91 |
+
for sd_res, hf_res in unet_conversion_map_resnet:
|
92 |
+
unet_conversion_map.append((sd + sd_res, hf + hf_res))
|
93 |
+
else:
|
94 |
+
unet_conversion_map.append((sd, hf))
|
95 |
+
|
96 |
+
for j in range(2):
|
97 |
+
hf_time_embed_prefix = f"time_embedding.linear_{j+1}."
|
98 |
+
sd_time_embed_prefix = f"time_embed.{j*2}."
|
99 |
+
unet_conversion_map.append((sd_time_embed_prefix, hf_time_embed_prefix))
|
100 |
+
|
101 |
+
for j in range(2):
|
102 |
+
hf_label_embed_prefix = f"add_embedding.linear_{j+1}."
|
103 |
+
sd_label_embed_prefix = f"label_emb.0.{j*2}."
|
104 |
+
unet_conversion_map.append((sd_label_embed_prefix, hf_label_embed_prefix))
|
105 |
+
|
106 |
+
unet_conversion_map.append(("input_blocks.0.0.", "conv_in."))
|
107 |
+
unet_conversion_map.append(("out.0.", "conv_norm_out."))
|
108 |
+
unet_conversion_map.append(("out.2.", "conv_out."))
|
109 |
+
|
110 |
+
sd_hf_conversion_map = {
|
111 |
+
sd.replace(".", "_")[:-1]: hf.replace(".", "_")[:-1]
|
112 |
+
for sd, hf in unet_conversion_map
|
113 |
+
}
|
114 |
+
return sd_hf_conversion_map
|
115 |
+
|
116 |
+
|
117 |
+
UNET_CONVERSION_MAP = make_unet_conversion_map()
|
118 |
+
|
119 |
+
|
120 |
+
class LoRAModule(torch.nn.Module):
|
121 |
+
"""
|
122 |
+
replaces forward method of the original Linear, instead of replacing the original Linear module.
|
123 |
+
"""
|
124 |
+
|
125 |
+
def __init__(
|
126 |
+
self,
|
127 |
+
lora_name,
|
128 |
+
org_module: torch.nn.Module,
|
129 |
+
multiplier=1.0,
|
130 |
+
lora_dim=4,
|
131 |
+
alpha=1,
|
132 |
+
):
|
133 |
+
"""if alpha == 0 or None, alpha is rank (no scaling)."""
|
134 |
+
super().__init__()
|
135 |
+
self.lora_name = lora_name
|
136 |
+
|
137 |
+
if isinstance(
|
138 |
+
org_module, diffusers_lora.LoRACompatibleConv
|
139 |
+
): # Modified to support Diffusers>=0.19.2
|
140 |
+
in_dim = org_module.in_channels
|
141 |
+
out_dim = org_module.out_channels
|
142 |
+
else:
|
143 |
+
in_dim = org_module.in_features
|
144 |
+
out_dim = org_module.out_features
|
145 |
+
|
146 |
+
self.lora_dim = lora_dim
|
147 |
+
|
148 |
+
if isinstance(
|
149 |
+
org_module, diffusers_lora.LoRACompatibleConv
|
150 |
+
): # Modified to support Diffusers>=0.19.2
|
151 |
+
kernel_size = org_module.kernel_size
|
152 |
+
stride = org_module.stride
|
153 |
+
padding = org_module.padding
|
154 |
+
self.lora_down = torch.nn.Conv2d(
|
155 |
+
in_dim, self.lora_dim, kernel_size, stride, padding, bias=False
|
156 |
+
)
|
157 |
+
self.lora_up = torch.nn.Conv2d(
|
158 |
+
self.lora_dim, out_dim, (1, 1), (1, 1), bias=False
|
159 |
+
)
|
160 |
+
else:
|
161 |
+
self.lora_down = torch.nn.Linear(in_dim, self.lora_dim, bias=False)
|
162 |
+
self.lora_up = torch.nn.Linear(self.lora_dim, out_dim, bias=False)
|
163 |
+
|
164 |
+
if isinstance(alpha, torch.Tensor):
|
165 |
+
alpha = alpha.detach().float().numpy() # without casting, bf16 causes error
|
166 |
+
alpha = self.lora_dim if alpha is None or alpha == 0 else alpha
|
167 |
+
self.scale = alpha / self.lora_dim
|
168 |
+
self.register_buffer(
|
169 |
+
"alpha", torch.tensor(alpha)
|
170 |
+
) # 勾配計算に含めない / not included in gradient calculation
|
171 |
+
|
172 |
+
# same as microsoft's
|
173 |
+
torch.nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5))
|
174 |
+
torch.nn.init.zeros_(self.lora_up.weight)
|
175 |
+
|
176 |
+
self.multiplier = multiplier
|
177 |
+
self.org_module = [org_module]
|
178 |
+
self.enabled = True
|
179 |
+
self.network: LoRANetwork = None
|
180 |
+
self.org_forward = None
|
181 |
+
|
182 |
+
# override org_module's forward method
|
183 |
+
def apply_to(self, multiplier=None):
|
184 |
+
if multiplier is not None:
|
185 |
+
self.multiplier = multiplier
|
186 |
+
if self.org_forward is None:
|
187 |
+
self.org_forward = self.org_module[0].forward
|
188 |
+
self.org_module[0].forward = self.forward
|
189 |
+
|
190 |
+
# restore org_module's forward method
|
191 |
+
def unapply_to(self):
|
192 |
+
if self.org_forward is not None:
|
193 |
+
self.org_module[0].forward = self.org_forward
|
194 |
+
|
195 |
+
# forward with lora
|
196 |
+
def forward(self, x):
|
197 |
+
if not self.enabled:
|
198 |
+
return self.org_forward(x)
|
199 |
+
return (
|
200 |
+
self.org_forward(x)
|
201 |
+
+ self.lora_up(self.lora_down(x)) * self.multiplier * self.scale
|
202 |
+
)
|
203 |
+
|
204 |
+
def set_network(self, network):
|
205 |
+
self.network = network
|
206 |
+
|
207 |
+
# merge lora weight to org weight
|
208 |
+
def merge_to(self, multiplier=1.0):
|
209 |
+
# get lora weight
|
210 |
+
lora_weight = self.get_weight(multiplier)
|
211 |
+
|
212 |
+
# get org weight
|
213 |
+
org_sd = self.org_module[0].state_dict()
|
214 |
+
org_weight = org_sd["weight"]
|
215 |
+
weight = org_weight + lora_weight.to(org_weight.device, dtype=org_weight.dtype)
|
216 |
+
|
217 |
+
# set weight to org_module
|
218 |
+
org_sd["weight"] = weight
|
219 |
+
self.org_module[0].load_state_dict(org_sd)
|
220 |
+
|
221 |
+
# restore org weight from lora weight
|
222 |
+
def restore_from(self, multiplier=1.0):
|
223 |
+
# get lora weight
|
224 |
+
lora_weight = self.get_weight(multiplier)
|
225 |
+
|
226 |
+
# get org weight
|
227 |
+
org_sd = self.org_module[0].state_dict()
|
228 |
+
org_weight = org_sd["weight"]
|
229 |
+
weight = org_weight - lora_weight.to(org_weight.device, dtype=org_weight.dtype)
|
230 |
+
|
231 |
+
# set weight to org_module
|
232 |
+
org_sd["weight"] = weight
|
233 |
+
self.org_module[0].load_state_dict(org_sd)
|
234 |
+
|
235 |
+
# return lora weight
|
236 |
+
def get_weight(self, multiplier=None):
|
237 |
+
if multiplier is None:
|
238 |
+
multiplier = self.multiplier
|
239 |
+
|
240 |
+
# get up/down weight from module
|
241 |
+
up_weight = self.lora_up.weight.to(torch.float)
|
242 |
+
down_weight = self.lora_down.weight.to(torch.float)
|
243 |
+
|
244 |
+
# pre-calculated weight
|
245 |
+
if len(down_weight.size()) == 2:
|
246 |
+
# linear
|
247 |
+
weight = self.multiplier * (up_weight @ down_weight) * self.scale
|
248 |
+
elif down_weight.size()[2:4] == (1, 1):
|
249 |
+
# conv2d 1x1
|
250 |
+
weight = (
|
251 |
+
self.multiplier
|
252 |
+
* (up_weight.squeeze(3).squeeze(2) @ down_weight.squeeze(3).squeeze(2))
|
253 |
+
.unsqueeze(2)
|
254 |
+
.unsqueeze(3)
|
255 |
+
* self.scale
|
256 |
+
)
|
257 |
+
else:
|
258 |
+
# conv2d 3x3
|
259 |
+
conved = torch.nn.functional.conv2d(
|
260 |
+
down_weight.permute(1, 0, 2, 3), up_weight
|
261 |
+
).permute(1, 0, 2, 3)
|
262 |
+
weight = self.multiplier * conved * self.scale
|
263 |
+
|
264 |
+
return weight
|
265 |
+
|
266 |
+
|
267 |
+
# Create network from weights for inference, weights are not loaded here
|
268 |
+
def create_network_from_weights(
|
269 |
+
text_encoder: Union[CLIPTextModel, List[CLIPTextModel]],
|
270 |
+
unet: UNet2DConditionModel,
|
271 |
+
weights_sd: Dict,
|
272 |
+
multiplier: float = 1.0,
|
273 |
+
):
|
274 |
+
# get dim/alpha mapping
|
275 |
+
modules_dim = {}
|
276 |
+
modules_alpha = {}
|
277 |
+
for key, value in weights_sd.items():
|
278 |
+
if "." not in key:
|
279 |
+
continue
|
280 |
+
|
281 |
+
lora_name = key.split(".")[0]
|
282 |
+
if "alpha" in key:
|
283 |
+
modules_alpha[lora_name] = value
|
284 |
+
elif "lora_down" in key:
|
285 |
+
dim = value.size()[0]
|
286 |
+
modules_dim[lora_name] = dim
|
287 |
+
# print(lora_name, value.size(), dim)
|
288 |
+
|
289 |
+
# support old LoRA without alpha
|
290 |
+
for key in modules_dim.keys():
|
291 |
+
if key not in modules_alpha:
|
292 |
+
modules_alpha[key] = modules_dim[key]
|
293 |
+
|
294 |
+
return LoRANetwork(
|
295 |
+
text_encoder,
|
296 |
+
unet,
|
297 |
+
multiplier=multiplier,
|
298 |
+
modules_dim=modules_dim,
|
299 |
+
modules_alpha=modules_alpha,
|
300 |
+
)
|
301 |
+
|
302 |
+
|
303 |
+
def merge_lora_weights(pipe, weights_sd: Dict, multiplier: float = 1.0):
|
304 |
+
text_encoders = (
|
305 |
+
[pipe.text_encoder, pipe.text_encoder_2]
|
306 |
+
if hasattr(pipe, "text_encoder_2")
|
307 |
+
else [pipe.text_encoder]
|
308 |
+
)
|
309 |
+
unet = pipe.unet
|
310 |
+
|
311 |
+
lora_network = create_network_from_weights(
|
312 |
+
text_encoders, unet, weights_sd, multiplier=multiplier
|
313 |
+
)
|
314 |
+
lora_network.load_state_dict(weights_sd)
|
315 |
+
lora_network.merge_to(multiplier=multiplier)
|
316 |
+
|
317 |
+
|
318 |
+
# block weightや学習に対応しない簡易版 / simple version without block weight and training
|
319 |
+
class LoRANetwork(torch.nn.Module):
|
320 |
+
UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel"]
|
321 |
+
UNET_TARGET_REPLACE_MODULE_CONV2D_3X3 = [
|
322 |
+
"ResnetBlock2D",
|
323 |
+
"Downsample2D",
|
324 |
+
"Upsample2D",
|
325 |
+
]
|
326 |
+
TEXT_ENCODER_TARGET_REPLACE_MODULE = ["CLIPAttention", "CLIPMLP"]
|
327 |
+
LORA_PREFIX_UNET = "lora_unet"
|
328 |
+
LORA_PREFIX_TEXT_ENCODER = "lora_te"
|
329 |
+
|
330 |
+
# SDXL: must starts with LORA_PREFIX_TEXT_ENCODER
|
331 |
+
LORA_PREFIX_TEXT_ENCODER1 = "lora_te1"
|
332 |
+
LORA_PREFIX_TEXT_ENCODER2 = "lora_te2"
|
333 |
+
|
334 |
+
def __init__(
|
335 |
+
self,
|
336 |
+
text_encoder: Union[List[CLIPTextModel], CLIPTextModel],
|
337 |
+
unet: UNet2DConditionModel,
|
338 |
+
multiplier: float = 1.0,
|
339 |
+
modules_dim: Optional[Dict[str, int]] = None,
|
340 |
+
modules_alpha: Optional[Dict[str, int]] = None,
|
341 |
+
varbose: Optional[bool] = False,
|
342 |
+
) -> None:
|
343 |
+
super().__init__()
|
344 |
+
self.multiplier = multiplier
|
345 |
+
|
346 |
+
print(f"create LoRA network from weights")
|
347 |
+
|
348 |
+
# convert SDXL Stability AI's U-Net modules to Diffusers
|
349 |
+
converted = self.convert_unet_modules(modules_dim, modules_alpha)
|
350 |
+
if converted:
|
351 |
+
print(
|
352 |
+
f"converted {converted} Stability AI's U-Net LoRA modules to Diffusers (SDXL)"
|
353 |
+
)
|
354 |
+
|
355 |
+
# create module instances
|
356 |
+
def create_modules(
|
357 |
+
is_unet: bool,
|
358 |
+
text_encoder_idx: Optional[int], # None, 1, 2
|
359 |
+
root_module: torch.nn.Module,
|
360 |
+
target_replace_modules: List[torch.nn.Module],
|
361 |
+
) -> List[LoRAModule]:
|
362 |
+
prefix = (
|
363 |
+
self.LORA_PREFIX_UNET
|
364 |
+
if is_unet
|
365 |
+
else (
|
366 |
+
self.LORA_PREFIX_TEXT_ENCODER
|
367 |
+
if text_encoder_idx is None
|
368 |
+
else (
|
369 |
+
self.LORA_PREFIX_TEXT_ENCODER1
|
370 |
+
if text_encoder_idx == 1
|
371 |
+
else self.LORA_PREFIX_TEXT_ENCODER2
|
372 |
+
)
|
373 |
+
)
|
374 |
+
)
|
375 |
+
loras = []
|
376 |
+
skipped = []
|
377 |
+
for name, module in root_module.named_modules():
|
378 |
+
if module.__class__.__name__ in target_replace_modules:
|
379 |
+
for child_name, child_module in module.named_modules():
|
380 |
+
is_linear = isinstance(
|
381 |
+
child_module,
|
382 |
+
(torch.nn.Linear, diffusers_lora.LoRACompatibleLinear),
|
383 |
+
) # Modified to support Diffusers>=0.19.2
|
384 |
+
is_conv2d = isinstance(
|
385 |
+
child_module,
|
386 |
+
(torch.nn.Conv2d, diffusers_lora.LoRACompatibleConv),
|
387 |
+
) # Modified to support Diffusers>=0.19.2
|
388 |
+
|
389 |
+
if is_linear or is_conv2d:
|
390 |
+
lora_name = prefix + "." + name + "." + child_name
|
391 |
+
lora_name = lora_name.replace(".", "_")
|
392 |
+
|
393 |
+
if lora_name not in modules_dim:
|
394 |
+
# print(f"skipped {lora_name} (not found in modules_dim)")
|
395 |
+
skipped.append(lora_name)
|
396 |
+
continue
|
397 |
+
|
398 |
+
dim = modules_dim[lora_name]
|
399 |
+
alpha = modules_alpha[lora_name]
|
400 |
+
lora = LoRAModule(
|
401 |
+
lora_name,
|
402 |
+
child_module,
|
403 |
+
self.multiplier,
|
404 |
+
dim,
|
405 |
+
alpha,
|
406 |
+
)
|
407 |
+
loras.append(lora)
|
408 |
+
return loras, skipped
|
409 |
+
|
410 |
+
text_encoders = text_encoder if type(text_encoder) == list else [text_encoder]
|
411 |
+
|
412 |
+
# create LoRA for text encoder
|
413 |
+
# 毎回すべてのモジュールを作るのは無駄なので要検討 / it is wasteful to create all modules every time, need to consider
|
414 |
+
self.text_encoder_loras: List[LoRAModule] = []
|
415 |
+
skipped_te = []
|
416 |
+
for i, text_encoder in enumerate(text_encoders):
|
417 |
+
if len(text_encoders) > 1:
|
418 |
+
index = i + 1
|
419 |
+
else:
|
420 |
+
index = None
|
421 |
+
|
422 |
+
text_encoder_loras, skipped = create_modules(
|
423 |
+
False,
|
424 |
+
index,
|
425 |
+
text_encoder,
|
426 |
+
LoRANetwork.TEXT_ENCODER_TARGET_REPLACE_MODULE,
|
427 |
+
)
|
428 |
+
self.text_encoder_loras.extend(text_encoder_loras)
|
429 |
+
skipped_te += skipped
|
430 |
+
print(f"create LoRA for Text Encoder: {len(self.text_encoder_loras)} modules.")
|
431 |
+
if len(skipped_te) > 0:
|
432 |
+
print(f"skipped {len(skipped_te)} modules because of missing weight.")
|
433 |
+
|
434 |
+
# extend U-Net target modules to include Conv2d 3x3
|
435 |
+
target_modules = (
|
436 |
+
LoRANetwork.UNET_TARGET_REPLACE_MODULE
|
437 |
+
+ LoRANetwork.UNET_TARGET_REPLACE_MODULE_CONV2D_3X3
|
438 |
+
)
|
439 |
+
|
440 |
+
self.unet_loras: List[LoRAModule]
|
441 |
+
self.unet_loras, skipped_un = create_modules(True, None, unet, target_modules)
|
442 |
+
print(f"create LoRA for U-Net: {len(self.unet_loras)} modules.")
|
443 |
+
if len(skipped_un) > 0:
|
444 |
+
print(f"skipped {len(skipped_un)} modules because of missing weight.")
|
445 |
+
|
446 |
+
# assertion
|
447 |
+
names = set()
|
448 |
+
for lora in self.text_encoder_loras + self.unet_loras:
|
449 |
+
names.add(lora.lora_name)
|
450 |
+
for lora_name in modules_dim.keys():
|
451 |
+
assert (
|
452 |
+
lora_name in names
|
453 |
+
), f"{lora_name} is not found in created LoRA modules."
|
454 |
+
|
455 |
+
# make to work load_state_dict
|
456 |
+
for lora in self.text_encoder_loras + self.unet_loras:
|
457 |
+
self.add_module(lora.lora_name, lora)
|
458 |
+
|
459 |
+
# SDXL: convert SDXL Stability AI's U-Net modules to Diffusers
|
460 |
+
def convert_unet_modules(self, modules_dim, modules_alpha):
|
461 |
+
converted_count = 0
|
462 |
+
not_converted_count = 0
|
463 |
+
|
464 |
+
map_keys = list(UNET_CONVERSION_MAP.keys())
|
465 |
+
map_keys.sort()
|
466 |
+
|
467 |
+
for key in list(modules_dim.keys()):
|
468 |
+
if key.startswith(LoRANetwork.LORA_PREFIX_UNET + "_"):
|
469 |
+
search_key = key.replace(LoRANetwork.LORA_PREFIX_UNET + "_", "")
|
470 |
+
position = bisect.bisect_right(map_keys, search_key)
|
471 |
+
map_key = map_keys[position - 1]
|
472 |
+
if search_key.startswith(map_key):
|
473 |
+
new_key = key.replace(map_key, UNET_CONVERSION_MAP[map_key])
|
474 |
+
modules_dim[new_key] = modules_dim[key]
|
475 |
+
modules_alpha[new_key] = modules_alpha[key]
|
476 |
+
del modules_dim[key]
|
477 |
+
del modules_alpha[key]
|
478 |
+
converted_count += 1
|
479 |
+
else:
|
480 |
+
not_converted_count += 1
|
481 |
+
assert (
|
482 |
+
converted_count == 0 or not_converted_count == 0
|
483 |
+
), f"some modules are not converted: {converted_count} converted, {not_converted_count} not converted"
|
484 |
+
return converted_count
|
485 |
+
|
486 |
+
def set_multiplier(self, multiplier):
|
487 |
+
self.multiplier = multiplier
|
488 |
+
for lora in self.text_encoder_loras + self.unet_loras:
|
489 |
+
lora.multiplier = self.multiplier
|
490 |
+
|
491 |
+
def apply_to(self, multiplier=1.0, apply_text_encoder=True, apply_unet=True):
|
492 |
+
if apply_text_encoder:
|
493 |
+
print("enable LoRA for text encoder")
|
494 |
+
for lora in self.text_encoder_loras:
|
495 |
+
lora.apply_to(multiplier)
|
496 |
+
if apply_unet:
|
497 |
+
print("enable LoRA for U-Net")
|
498 |
+
for lora in self.unet_loras:
|
499 |
+
lora.apply_to(multiplier)
|
500 |
+
|
501 |
+
def unapply_to(self):
|
502 |
+
for lora in self.text_encoder_loras + self.unet_loras:
|
503 |
+
lora.unapply_to()
|
504 |
+
|
505 |
+
def merge_to(self, multiplier=1.0):
|
506 |
+
print("merge LoRA weights to original weights")
|
507 |
+
for lora in tqdm(self.text_encoder_loras + self.unet_loras):
|
508 |
+
lora.merge_to(multiplier)
|
509 |
+
print(f"weights are merged")
|
510 |
+
|
511 |
+
def restore_from(self, multiplier=1.0):
|
512 |
+
print("restore LoRA weights from original weights")
|
513 |
+
for lora in tqdm(self.text_encoder_loras + self.unet_loras):
|
514 |
+
lora.restore_from(multiplier)
|
515 |
+
print(f"weights are restored")
|
516 |
+
|
517 |
+
def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True):
|
518 |
+
# convert SDXL Stability AI's state dict to Diffusers' based state dict
|
519 |
+
map_keys = list(UNET_CONVERSION_MAP.keys()) # prefix of U-Net modules
|
520 |
+
map_keys.sort()
|
521 |
+
for key in list(state_dict.keys()):
|
522 |
+
if key.startswith(LoRANetwork.LORA_PREFIX_UNET + "_"):
|
523 |
+
search_key = key.replace(LoRANetwork.LORA_PREFIX_UNET + "_", "")
|
524 |
+
position = bisect.bisect_right(map_keys, search_key)
|
525 |
+
map_key = map_keys[position - 1]
|
526 |
+
if search_key.startswith(map_key):
|
527 |
+
new_key = key.replace(map_key, UNET_CONVERSION_MAP[map_key])
|
528 |
+
state_dict[new_key] = state_dict[key]
|
529 |
+
del state_dict[key]
|
530 |
+
|
531 |
+
# in case of V2, some weights have different shape, so we need to convert them
|
532 |
+
# because V2 LoRA is based on U-Net created by use_linear_projection=False
|
533 |
+
my_state_dict = self.state_dict()
|
534 |
+
for key in state_dict.keys():
|
535 |
+
if state_dict[key].size() != my_state_dict[key].size():
|
536 |
+
# print(f"convert {key} from {state_dict[key].size()} to {my_state_dict[key].size()}")
|
537 |
+
state_dict[key] = state_dict[key].view(my_state_dict[key].size())
|
538 |
+
|
539 |
+
return super().load_state_dict(state_dict, strict)
|
lpw_stable_diffusion_xl.py
ADDED
@@ -0,0 +1,1496 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## ----------------------------------------------------------
|
2 |
+
# A SDXL pipeline can take unlimited weighted prompt
|
3 |
+
#
|
4 |
+
# Author: Andrew Zhu
|
5 |
+
# Github: https://github.com/xhinker
|
6 |
+
# Medium: https://medium.com/@xhinker
|
7 |
+
## -----------------------------------------------------------
|
8 |
+
|
9 |
+
import inspect
|
10 |
+
import os
|
11 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
12 |
+
|
13 |
+
import torch
|
14 |
+
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
|
15 |
+
|
16 |
+
from diffusers import DiffusionPipeline, StableDiffusionXLPipeline
|
17 |
+
from diffusers.image_processor import VaeImageProcessor
|
18 |
+
from diffusers.loaders import (
|
19 |
+
FromSingleFileMixin,
|
20 |
+
LoraLoaderMixin,
|
21 |
+
TextualInversionLoaderMixin,
|
22 |
+
)
|
23 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
24 |
+
from diffusers.models.attention_processor import (
|
25 |
+
AttnProcessor2_0,
|
26 |
+
LoRAAttnProcessor2_0,
|
27 |
+
LoRAXFormersAttnProcessor,
|
28 |
+
XFormersAttnProcessor,
|
29 |
+
)
|
30 |
+
from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
|
31 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
32 |
+
from diffusers.utils import (
|
33 |
+
is_accelerate_available,
|
34 |
+
is_accelerate_version,
|
35 |
+
is_invisible_watermark_available,
|
36 |
+
logging,
|
37 |
+
randn_tensor,
|
38 |
+
replace_example_docstring,
|
39 |
+
)
|
40 |
+
|
41 |
+
|
42 |
+
if is_invisible_watermark_available():
|
43 |
+
from diffusers.pipelines.stable_diffusion_xl.watermark import (
|
44 |
+
StableDiffusionXLWatermarker,
|
45 |
+
)
|
46 |
+
|
47 |
+
|
48 |
+
def parse_prompt_attention(text):
|
49 |
+
"""
|
50 |
+
Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
|
51 |
+
Accepted tokens are:
|
52 |
+
(abc) - increases attention to abc by a multiplier of 1.1
|
53 |
+
(abc:3.12) - increases attention to abc by a multiplier of 3.12
|
54 |
+
[abc] - decreases attention to abc by a multiplier of 1.1
|
55 |
+
\( - literal character '('
|
56 |
+
\[ - literal character '['
|
57 |
+
\) - literal character ')'
|
58 |
+
\] - literal character ']'
|
59 |
+
\\ - literal character '\'
|
60 |
+
anything else - just text
|
61 |
+
|
62 |
+
>>> parse_prompt_attention('normal text')
|
63 |
+
[['normal text', 1.0]]
|
64 |
+
>>> parse_prompt_attention('an (important) word')
|
65 |
+
[['an ', 1.0], ['important', 1.1], [' word', 1.0]]
|
66 |
+
>>> parse_prompt_attention('(unbalanced')
|
67 |
+
[['unbalanced', 1.1]]
|
68 |
+
>>> parse_prompt_attention('\(literal\]')
|
69 |
+
[['(literal]', 1.0]]
|
70 |
+
>>> parse_prompt_attention('(unnecessary)(parens)')
|
71 |
+
[['unnecessaryparens', 1.1]]
|
72 |
+
>>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
|
73 |
+
[['a ', 1.0],
|
74 |
+
['house', 1.5730000000000004],
|
75 |
+
[' ', 1.1],
|
76 |
+
['on', 1.0],
|
77 |
+
[' a ', 1.1],
|
78 |
+
['hill', 0.55],
|
79 |
+
[', sun, ', 1.1],
|
80 |
+
['sky', 1.4641000000000006],
|
81 |
+
['.', 1.1]]
|
82 |
+
"""
|
83 |
+
import re
|
84 |
+
|
85 |
+
re_attention = re.compile(
|
86 |
+
r"""
|
87 |
+
\\\(|\\\)|\\\[|\\]|\\\\|\\|\(|\[|:([+-]?[.\d]+)\)|
|
88 |
+
\)|]|[^\\()\[\]:]+|:
|
89 |
+
""",
|
90 |
+
re.X,
|
91 |
+
)
|
92 |
+
|
93 |
+
re_break = re.compile(r"\s*\bBREAK\b\s*", re.S)
|
94 |
+
|
95 |
+
res = []
|
96 |
+
round_brackets = []
|
97 |
+
square_brackets = []
|
98 |
+
|
99 |
+
round_bracket_multiplier = 1.1
|
100 |
+
square_bracket_multiplier = 1 / 1.1
|
101 |
+
|
102 |
+
def multiply_range(start_position, multiplier):
|
103 |
+
for p in range(start_position, len(res)):
|
104 |
+
res[p][1] *= multiplier
|
105 |
+
|
106 |
+
for m in re_attention.finditer(text):
|
107 |
+
text = m.group(0)
|
108 |
+
weight = m.group(1)
|
109 |
+
|
110 |
+
if text.startswith("\\"):
|
111 |
+
res.append([text[1:], 1.0])
|
112 |
+
elif text == "(":
|
113 |
+
round_brackets.append(len(res))
|
114 |
+
elif text == "[":
|
115 |
+
square_brackets.append(len(res))
|
116 |
+
elif weight is not None and len(round_brackets) > 0:
|
117 |
+
multiply_range(round_brackets.pop(), float(weight))
|
118 |
+
elif text == ")" and len(round_brackets) > 0:
|
119 |
+
multiply_range(round_brackets.pop(), round_bracket_multiplier)
|
120 |
+
elif text == "]" and len(square_brackets) > 0:
|
121 |
+
multiply_range(square_brackets.pop(), square_bracket_multiplier)
|
122 |
+
else:
|
123 |
+
parts = re.split(re_break, text)
|
124 |
+
for i, part in enumerate(parts):
|
125 |
+
if i > 0:
|
126 |
+
res.append(["BREAK", -1])
|
127 |
+
res.append([part, 1.0])
|
128 |
+
|
129 |
+
for pos in round_brackets:
|
130 |
+
multiply_range(pos, round_bracket_multiplier)
|
131 |
+
|
132 |
+
for pos in square_brackets:
|
133 |
+
multiply_range(pos, square_bracket_multiplier)
|
134 |
+
|
135 |
+
if len(res) == 0:
|
136 |
+
res = [["", 1.0]]
|
137 |
+
|
138 |
+
# merge runs of identical weights
|
139 |
+
i = 0
|
140 |
+
while i + 1 < len(res):
|
141 |
+
if res[i][1] == res[i + 1][1]:
|
142 |
+
res[i][0] += res[i + 1][0]
|
143 |
+
res.pop(i + 1)
|
144 |
+
else:
|
145 |
+
i += 1
|
146 |
+
|
147 |
+
return res
|
148 |
+
|
149 |
+
|
150 |
+
def get_prompts_tokens_with_weights(clip_tokenizer: CLIPTokenizer, prompt: str):
|
151 |
+
"""
|
152 |
+
Get prompt token ids and weights, this function works for both prompt and negative prompt
|
153 |
+
|
154 |
+
Args:
|
155 |
+
pipe (CLIPTokenizer)
|
156 |
+
A CLIPTokenizer
|
157 |
+
prompt (str)
|
158 |
+
A prompt string with weights
|
159 |
+
|
160 |
+
Returns:
|
161 |
+
text_tokens (list)
|
162 |
+
A list contains token ids
|
163 |
+
text_weight (list)
|
164 |
+
A list contains the correspodent weight of token ids
|
165 |
+
|
166 |
+
Example:
|
167 |
+
import torch
|
168 |
+
from transformers import CLIPTokenizer
|
169 |
+
|
170 |
+
clip_tokenizer = CLIPTokenizer.from_pretrained(
|
171 |
+
"stablediffusionapi/deliberate-v2"
|
172 |
+
, subfolder = "tokenizer"
|
173 |
+
, dtype = torch.float16
|
174 |
+
)
|
175 |
+
|
176 |
+
token_id_list, token_weight_list = get_prompts_tokens_with_weights(
|
177 |
+
clip_tokenizer = clip_tokenizer
|
178 |
+
,prompt = "a (red:1.5) cat"*70
|
179 |
+
)
|
180 |
+
"""
|
181 |
+
texts_and_weights = parse_prompt_attention(prompt)
|
182 |
+
text_tokens, text_weights = [], []
|
183 |
+
for word, weight in texts_and_weights:
|
184 |
+
# tokenize and discard the starting and the ending token
|
185 |
+
token = clip_tokenizer(word, truncation=False).input_ids[
|
186 |
+
1:-1
|
187 |
+
] # so that tokenize whatever length prompt
|
188 |
+
# the returned token is a 1d list: [320, 1125, 539, 320]
|
189 |
+
|
190 |
+
# merge the new tokens to the all tokens holder: text_tokens
|
191 |
+
text_tokens = [*text_tokens, *token]
|
192 |
+
|
193 |
+
# each token chunk will come with one weight, like ['red cat', 2.0]
|
194 |
+
# need to expand weight for each token.
|
195 |
+
chunk_weights = [weight] * len(token)
|
196 |
+
|
197 |
+
# append the weight back to the weight holder: text_weights
|
198 |
+
text_weights = [*text_weights, *chunk_weights]
|
199 |
+
return text_tokens, text_weights
|
200 |
+
|
201 |
+
|
202 |
+
def group_tokens_and_weights(token_ids: list, weights: list, pad_last_block=False):
|
203 |
+
"""
|
204 |
+
Produce tokens and weights in groups and pad the missing tokens
|
205 |
+
|
206 |
+
Args:
|
207 |
+
token_ids (list)
|
208 |
+
The token ids from tokenizer
|
209 |
+
weights (list)
|
210 |
+
The weights list from function get_prompts_tokens_with_weights
|
211 |
+
pad_last_block (bool)
|
212 |
+
Control if fill the last token list to 75 tokens with eos
|
213 |
+
Returns:
|
214 |
+
new_token_ids (2d list)
|
215 |
+
new_weights (2d list)
|
216 |
+
|
217 |
+
Example:
|
218 |
+
token_groups,weight_groups = group_tokens_and_weights(
|
219 |
+
token_ids = token_id_list
|
220 |
+
, weights = token_weight_list
|
221 |
+
)
|
222 |
+
"""
|
223 |
+
bos, eos = 49406, 49407
|
224 |
+
|
225 |
+
# this will be a 2d list
|
226 |
+
new_token_ids = []
|
227 |
+
new_weights = []
|
228 |
+
while len(token_ids) >= 75:
|
229 |
+
# get the first 75 tokens
|
230 |
+
head_75_tokens = [token_ids.pop(0) for _ in range(75)]
|
231 |
+
head_75_weights = [weights.pop(0) for _ in range(75)]
|
232 |
+
|
233 |
+
# extract token ids and weights
|
234 |
+
temp_77_token_ids = [bos] + head_75_tokens + [eos]
|
235 |
+
temp_77_weights = [1.0] + head_75_weights + [1.0]
|
236 |
+
|
237 |
+
# add 77 token and weights chunk to the holder list
|
238 |
+
new_token_ids.append(temp_77_token_ids)
|
239 |
+
new_weights.append(temp_77_weights)
|
240 |
+
|
241 |
+
# padding the left
|
242 |
+
if len(token_ids) > 0:
|
243 |
+
padding_len = 75 - len(token_ids) if pad_last_block else 0
|
244 |
+
|
245 |
+
temp_77_token_ids = [bos] + token_ids + [eos] * padding_len + [eos]
|
246 |
+
new_token_ids.append(temp_77_token_ids)
|
247 |
+
|
248 |
+
temp_77_weights = [1.0] + weights + [1.0] * padding_len + [1.0]
|
249 |
+
new_weights.append(temp_77_weights)
|
250 |
+
|
251 |
+
return new_token_ids, new_weights
|
252 |
+
|
253 |
+
|
254 |
+
def get_weighted_text_embeddings_sdxl(
|
255 |
+
pipe: StableDiffusionXLPipeline,
|
256 |
+
prompt: str = "",
|
257 |
+
prompt_2: str = None,
|
258 |
+
neg_prompt: str = "",
|
259 |
+
neg_prompt_2: str = None,
|
260 |
+
):
|
261 |
+
"""
|
262 |
+
This function can process long prompt with weights, no length limitation
|
263 |
+
for Stable Diffusion XL
|
264 |
+
|
265 |
+
Args:
|
266 |
+
pipe (StableDiffusionPipeline)
|
267 |
+
prompt (str)
|
268 |
+
prompt_2 (str)
|
269 |
+
neg_prompt (str)
|
270 |
+
neg_prompt_2 (str)
|
271 |
+
Returns:
|
272 |
+
prompt_embeds (torch.Tensor)
|
273 |
+
neg_prompt_embeds (torch.Tensor)
|
274 |
+
"""
|
275 |
+
if prompt_2:
|
276 |
+
prompt = f"{prompt} {prompt_2}"
|
277 |
+
|
278 |
+
if neg_prompt_2:
|
279 |
+
neg_prompt = f"{neg_prompt} {neg_prompt_2}"
|
280 |
+
|
281 |
+
eos = pipe.tokenizer.eos_token_id
|
282 |
+
|
283 |
+
# tokenizer 1
|
284 |
+
prompt_tokens, prompt_weights = get_prompts_tokens_with_weights(
|
285 |
+
pipe.tokenizer, prompt
|
286 |
+
)
|
287 |
+
|
288 |
+
neg_prompt_tokens, neg_prompt_weights = get_prompts_tokens_with_weights(
|
289 |
+
pipe.tokenizer, neg_prompt
|
290 |
+
)
|
291 |
+
|
292 |
+
# tokenizer 2
|
293 |
+
prompt_tokens_2, prompt_weights_2 = get_prompts_tokens_with_weights(
|
294 |
+
pipe.tokenizer_2, prompt
|
295 |
+
)
|
296 |
+
|
297 |
+
neg_prompt_tokens_2, neg_prompt_weights_2 = get_prompts_tokens_with_weights(
|
298 |
+
pipe.tokenizer_2, neg_prompt
|
299 |
+
)
|
300 |
+
|
301 |
+
# padding the shorter one for prompt set 1
|
302 |
+
prompt_token_len = len(prompt_tokens)
|
303 |
+
neg_prompt_token_len = len(neg_prompt_tokens)
|
304 |
+
|
305 |
+
if prompt_token_len > neg_prompt_token_len:
|
306 |
+
# padding the neg_prompt with eos token
|
307 |
+
neg_prompt_tokens = neg_prompt_tokens + [eos] * abs(
|
308 |
+
prompt_token_len - neg_prompt_token_len
|
309 |
+
)
|
310 |
+
neg_prompt_weights = neg_prompt_weights + [1.0] * abs(
|
311 |
+
prompt_token_len - neg_prompt_token_len
|
312 |
+
)
|
313 |
+
else:
|
314 |
+
# padding the prompt
|
315 |
+
prompt_tokens = prompt_tokens + [eos] * abs(
|
316 |
+
prompt_token_len - neg_prompt_token_len
|
317 |
+
)
|
318 |
+
prompt_weights = prompt_weights + [1.0] * abs(
|
319 |
+
prompt_token_len - neg_prompt_token_len
|
320 |
+
)
|
321 |
+
|
322 |
+
# padding the shorter one for token set 2
|
323 |
+
prompt_token_len_2 = len(prompt_tokens_2)
|
324 |
+
neg_prompt_token_len_2 = len(neg_prompt_tokens_2)
|
325 |
+
|
326 |
+
if prompt_token_len_2 > neg_prompt_token_len_2:
|
327 |
+
# padding the neg_prompt with eos token
|
328 |
+
neg_prompt_tokens_2 = neg_prompt_tokens_2 + [eos] * abs(
|
329 |
+
prompt_token_len_2 - neg_prompt_token_len_2
|
330 |
+
)
|
331 |
+
neg_prompt_weights_2 = neg_prompt_weights_2 + [1.0] * abs(
|
332 |
+
prompt_token_len_2 - neg_prompt_token_len_2
|
333 |
+
)
|
334 |
+
else:
|
335 |
+
# padding the prompt
|
336 |
+
prompt_tokens_2 = prompt_tokens_2 + [eos] * abs(
|
337 |
+
prompt_token_len_2 - neg_prompt_token_len_2
|
338 |
+
)
|
339 |
+
prompt_weights_2 = prompt_weights + [1.0] * abs(
|
340 |
+
prompt_token_len_2 - neg_prompt_token_len_2
|
341 |
+
)
|
342 |
+
|
343 |
+
embeds = []
|
344 |
+
neg_embeds = []
|
345 |
+
|
346 |
+
prompt_token_groups, prompt_weight_groups = group_tokens_and_weights(
|
347 |
+
prompt_tokens.copy(), prompt_weights.copy()
|
348 |
+
)
|
349 |
+
|
350 |
+
neg_prompt_token_groups, neg_prompt_weight_groups = group_tokens_and_weights(
|
351 |
+
neg_prompt_tokens.copy(), neg_prompt_weights.copy()
|
352 |
+
)
|
353 |
+
|
354 |
+
prompt_token_groups_2, prompt_weight_groups_2 = group_tokens_and_weights(
|
355 |
+
prompt_tokens_2.copy(), prompt_weights_2.copy()
|
356 |
+
)
|
357 |
+
|
358 |
+
neg_prompt_token_groups_2, neg_prompt_weight_groups_2 = group_tokens_and_weights(
|
359 |
+
neg_prompt_tokens_2.copy(), neg_prompt_weights_2.copy()
|
360 |
+
)
|
361 |
+
|
362 |
+
# get prompt embeddings one by one is not working.
|
363 |
+
for i in range(len(prompt_token_groups)):
|
364 |
+
# get positive prompt embeddings with weights
|
365 |
+
token_tensor = torch.tensor(
|
366 |
+
[prompt_token_groups[i]], dtype=torch.long, device=pipe.device
|
367 |
+
)
|
368 |
+
weight_tensor = torch.tensor(
|
369 |
+
prompt_weight_groups[i], dtype=torch.float16, device=pipe.device
|
370 |
+
)
|
371 |
+
|
372 |
+
token_tensor_2 = torch.tensor(
|
373 |
+
[prompt_token_groups_2[i]], dtype=torch.long, device=pipe.device
|
374 |
+
)
|
375 |
+
|
376 |
+
# use first text encoder
|
377 |
+
prompt_embeds_1 = pipe.text_encoder(
|
378 |
+
token_tensor.to(pipe.device), output_hidden_states=True
|
379 |
+
)
|
380 |
+
prompt_embeds_1_hidden_states = prompt_embeds_1.hidden_states[-2]
|
381 |
+
|
382 |
+
# use second text encoder
|
383 |
+
prompt_embeds_2 = pipe.text_encoder_2(
|
384 |
+
token_tensor_2.to(pipe.device), output_hidden_states=True
|
385 |
+
)
|
386 |
+
prompt_embeds_2_hidden_states = prompt_embeds_2.hidden_states[-2]
|
387 |
+
pooled_prompt_embeds = prompt_embeds_2[0]
|
388 |
+
|
389 |
+
prompt_embeds_list = [
|
390 |
+
prompt_embeds_1_hidden_states,
|
391 |
+
prompt_embeds_2_hidden_states,
|
392 |
+
]
|
393 |
+
token_embedding = torch.concat(prompt_embeds_list, dim=-1).squeeze(0)
|
394 |
+
|
395 |
+
for j in range(len(weight_tensor)):
|
396 |
+
if weight_tensor[j] != 1.0:
|
397 |
+
token_embedding[j] = (
|
398 |
+
token_embedding[-1]
|
399 |
+
+ (token_embedding[j] - token_embedding[-1]) * weight_tensor[j]
|
400 |
+
)
|
401 |
+
|
402 |
+
token_embedding = token_embedding.unsqueeze(0)
|
403 |
+
embeds.append(token_embedding)
|
404 |
+
|
405 |
+
# get negative prompt embeddings with weights
|
406 |
+
neg_token_tensor = torch.tensor(
|
407 |
+
[neg_prompt_token_groups[i]], dtype=torch.long, device=pipe.device
|
408 |
+
)
|
409 |
+
neg_token_tensor_2 = torch.tensor(
|
410 |
+
[neg_prompt_token_groups_2[i]], dtype=torch.long, device=pipe.device
|
411 |
+
)
|
412 |
+
neg_weight_tensor = torch.tensor(
|
413 |
+
neg_prompt_weight_groups[i], dtype=torch.float16, device=pipe.device
|
414 |
+
)
|
415 |
+
|
416 |
+
# use first text encoder
|
417 |
+
neg_prompt_embeds_1 = pipe.text_encoder(
|
418 |
+
neg_token_tensor.to(pipe.device), output_hidden_states=True
|
419 |
+
)
|
420 |
+
neg_prompt_embeds_1_hidden_states = neg_prompt_embeds_1.hidden_states[-2]
|
421 |
+
|
422 |
+
# use second text encoder
|
423 |
+
neg_prompt_embeds_2 = pipe.text_encoder_2(
|
424 |
+
neg_token_tensor_2.to(pipe.device), output_hidden_states=True
|
425 |
+
)
|
426 |
+
neg_prompt_embeds_2_hidden_states = neg_prompt_embeds_2.hidden_states[-2]
|
427 |
+
negative_pooled_prompt_embeds = neg_prompt_embeds_2[0]
|
428 |
+
|
429 |
+
neg_prompt_embeds_list = [
|
430 |
+
neg_prompt_embeds_1_hidden_states,
|
431 |
+
neg_prompt_embeds_2_hidden_states,
|
432 |
+
]
|
433 |
+
neg_token_embedding = torch.concat(neg_prompt_embeds_list, dim=-1).squeeze(0)
|
434 |
+
|
435 |
+
for z in range(len(neg_weight_tensor)):
|
436 |
+
if neg_weight_tensor[z] != 1.0:
|
437 |
+
neg_token_embedding[z] = (
|
438 |
+
neg_token_embedding[-1]
|
439 |
+
+ (neg_token_embedding[z] - neg_token_embedding[-1])
|
440 |
+
* neg_weight_tensor[z]
|
441 |
+
)
|
442 |
+
|
443 |
+
neg_token_embedding = neg_token_embedding.unsqueeze(0)
|
444 |
+
neg_embeds.append(neg_token_embedding)
|
445 |
+
|
446 |
+
prompt_embeds = torch.cat(embeds, dim=1)
|
447 |
+
negative_prompt_embeds = torch.cat(neg_embeds, dim=1)
|
448 |
+
|
449 |
+
return (
|
450 |
+
prompt_embeds,
|
451 |
+
negative_prompt_embeds,
|
452 |
+
pooled_prompt_embeds,
|
453 |
+
negative_pooled_prompt_embeds,
|
454 |
+
)
|
455 |
+
|
456 |
+
|
457 |
+
# -------------------------------------------------------------------------------------------------------------------------------
|
458 |
+
# reuse the backbone code from StableDiffusionXLPipeline
|
459 |
+
# -------------------------------------------------------------------------------------------------------------------------------
|
460 |
+
|
461 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
462 |
+
|
463 |
+
EXAMPLE_DOC_STRING = """
|
464 |
+
Examples:
|
465 |
+
```py
|
466 |
+
from diffusers import DiffusionPipeline
|
467 |
+
import torch
|
468 |
+
|
469 |
+
pipe = DiffusionPipeline.from_pretrained(
|
470 |
+
"stabilityai/stable-diffusion-xl-base-1.0"
|
471 |
+
, torch_dtype = torch.float16
|
472 |
+
, use_safetensors = True
|
473 |
+
, variant = "fp16"
|
474 |
+
, custom_pipeline = "lpw_stable_diffusion_xl",
|
475 |
+
)
|
476 |
+
|
477 |
+
prompt = "a white cat running on the grass"*20
|
478 |
+
prompt2 = "play a football"*20
|
479 |
+
prompt = f"{prompt},{prompt2}"
|
480 |
+
neg_prompt = "blur, low quality"
|
481 |
+
|
482 |
+
pipe.to("cuda")
|
483 |
+
images = pipe(
|
484 |
+
prompt = prompt
|
485 |
+
, negative_prompt = neg_prompt
|
486 |
+
).images[0]
|
487 |
+
|
488 |
+
pipe.to("cpu")
|
489 |
+
torch.cuda.empty_cache()
|
490 |
+
images
|
491 |
+
```
|
492 |
+
"""
|
493 |
+
|
494 |
+
|
495 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
|
496 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
497 |
+
"""
|
498 |
+
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
|
499 |
+
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
|
500 |
+
"""
|
501 |
+
std_text = noise_pred_text.std(
|
502 |
+
dim=list(range(1, noise_pred_text.ndim)), keepdim=True
|
503 |
+
)
|
504 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
505 |
+
# rescale the results from guidance (fixes overexposure)
|
506 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
507 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
508 |
+
noise_cfg = (
|
509 |
+
guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
510 |
+
)
|
511 |
+
return noise_cfg
|
512 |
+
|
513 |
+
|
514 |
+
class SDXLLongPromptWeightingPipeline(
|
515 |
+
DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin
|
516 |
+
):
|
517 |
+
r"""
|
518 |
+
Pipeline for text-to-image generation using Stable Diffusion XL.
|
519 |
+
|
520 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
521 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
522 |
+
|
523 |
+
In addition the pipeline inherits the following loading methods:
|
524 |
+
- *LoRA*: [`StableDiffusionXLPipeline.load_lora_weights`]
|
525 |
+
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
|
526 |
+
|
527 |
+
as well as the following saving methods:
|
528 |
+
- *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`]
|
529 |
+
|
530 |
+
Args:
|
531 |
+
vae ([`AutoencoderKL`]):
|
532 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
533 |
+
text_encoder ([`CLIPTextModel`]):
|
534 |
+
Frozen text-encoder. Stable Diffusion XL uses the text portion of
|
535 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
536 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
537 |
+
text_encoder_2 ([` CLIPTextModelWithProjection`]):
|
538 |
+
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
|
539 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
|
540 |
+
specifically the
|
541 |
+
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
|
542 |
+
variant.
|
543 |
+
tokenizer (`CLIPTokenizer`):
|
544 |
+
Tokenizer of class
|
545 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
546 |
+
tokenizer_2 (`CLIPTokenizer`):
|
547 |
+
Second Tokenizer of class
|
548 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
549 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
550 |
+
scheduler ([`SchedulerMixin`]):
|
551 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
552 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
553 |
+
"""
|
554 |
+
|
555 |
+
def __init__(
|
556 |
+
self,
|
557 |
+
vae: AutoencoderKL,
|
558 |
+
text_encoder: CLIPTextModel,
|
559 |
+
text_encoder_2: CLIPTextModelWithProjection,
|
560 |
+
tokenizer: CLIPTokenizer,
|
561 |
+
tokenizer_2: CLIPTokenizer,
|
562 |
+
unet: UNet2DConditionModel,
|
563 |
+
scheduler: KarrasDiffusionSchedulers,
|
564 |
+
force_zeros_for_empty_prompt: bool = True,
|
565 |
+
add_watermarker: Optional[bool] = None,
|
566 |
+
):
|
567 |
+
super().__init__()
|
568 |
+
|
569 |
+
self.register_modules(
|
570 |
+
vae=vae,
|
571 |
+
text_encoder=text_encoder,
|
572 |
+
text_encoder_2=text_encoder_2,
|
573 |
+
tokenizer=tokenizer,
|
574 |
+
tokenizer_2=tokenizer_2,
|
575 |
+
unet=unet,
|
576 |
+
scheduler=scheduler,
|
577 |
+
)
|
578 |
+
self.register_to_config(
|
579 |
+
force_zeros_for_empty_prompt=force_zeros_for_empty_prompt
|
580 |
+
)
|
581 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
582 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
583 |
+
self.default_sample_size = self.unet.config.sample_size
|
584 |
+
|
585 |
+
add_watermarker = (
|
586 |
+
add_watermarker
|
587 |
+
if add_watermarker is not None
|
588 |
+
else is_invisible_watermark_available()
|
589 |
+
)
|
590 |
+
|
591 |
+
if add_watermarker:
|
592 |
+
self.watermark = StableDiffusionXLWatermarker()
|
593 |
+
else:
|
594 |
+
self.watermark = None
|
595 |
+
|
596 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
|
597 |
+
def enable_vae_slicing(self):
|
598 |
+
r"""
|
599 |
+
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
|
600 |
+
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
|
601 |
+
"""
|
602 |
+
self.vae.enable_slicing()
|
603 |
+
|
604 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
|
605 |
+
def disable_vae_slicing(self):
|
606 |
+
r"""
|
607 |
+
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
|
608 |
+
computing decoding in one step.
|
609 |
+
"""
|
610 |
+
self.vae.disable_slicing()
|
611 |
+
|
612 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
|
613 |
+
def enable_vae_tiling(self):
|
614 |
+
r"""
|
615 |
+
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
|
616 |
+
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
|
617 |
+
processing larger images.
|
618 |
+
"""
|
619 |
+
self.vae.enable_tiling()
|
620 |
+
|
621 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
|
622 |
+
def disable_vae_tiling(self):
|
623 |
+
r"""
|
624 |
+
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
|
625 |
+
computing decoding in one step.
|
626 |
+
"""
|
627 |
+
self.vae.disable_tiling()
|
628 |
+
|
629 |
+
def enable_model_cpu_offload(self, gpu_id=0):
|
630 |
+
r"""
|
631 |
+
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
|
632 |
+
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
|
633 |
+
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
|
634 |
+
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
|
635 |
+
"""
|
636 |
+
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
|
637 |
+
from accelerate import cpu_offload_with_hook
|
638 |
+
else:
|
639 |
+
raise ImportError(
|
640 |
+
"`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher."
|
641 |
+
)
|
642 |
+
|
643 |
+
device = torch.device(f"cuda:{gpu_id}")
|
644 |
+
|
645 |
+
if self.device.type != "cpu":
|
646 |
+
self.to("cpu", silence_dtype_warnings=True)
|
647 |
+
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
|
648 |
+
|
649 |
+
model_sequence = (
|
650 |
+
[self.text_encoder, self.text_encoder_2]
|
651 |
+
if self.text_encoder is not None
|
652 |
+
else [self.text_encoder_2]
|
653 |
+
)
|
654 |
+
model_sequence.extend([self.unet, self.vae])
|
655 |
+
|
656 |
+
hook = None
|
657 |
+
for cpu_offloaded_model in model_sequence:
|
658 |
+
_, hook = cpu_offload_with_hook(
|
659 |
+
cpu_offloaded_model, device, prev_module_hook=hook
|
660 |
+
)
|
661 |
+
|
662 |
+
# We'll offload the last model manually.
|
663 |
+
self.final_offload_hook = hook
|
664 |
+
|
665 |
+
def encode_prompt(
|
666 |
+
self,
|
667 |
+
prompt: str,
|
668 |
+
prompt_2: Optional[str] = None,
|
669 |
+
device: Optional[torch.device] = None,
|
670 |
+
num_images_per_prompt: int = 1,
|
671 |
+
do_classifier_free_guidance: bool = True,
|
672 |
+
negative_prompt: Optional[str] = None,
|
673 |
+
negative_prompt_2: Optional[str] = None,
|
674 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
675 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
676 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
677 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
678 |
+
lora_scale: Optional[float] = None,
|
679 |
+
):
|
680 |
+
r"""
|
681 |
+
Encodes the prompt into text encoder hidden states.
|
682 |
+
|
683 |
+
Args:
|
684 |
+
prompt (`str` or `List[str]`, *optional*):
|
685 |
+
prompt to be encoded
|
686 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
687 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
688 |
+
used in both text-encoders
|
689 |
+
device: (`torch.device`):
|
690 |
+
torch device
|
691 |
+
num_images_per_prompt (`int`):
|
692 |
+
number of images that should be generated per prompt
|
693 |
+
do_classifier_free_guidance (`bool`):
|
694 |
+
whether to use classifier free guidance or not
|
695 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
696 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
697 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
698 |
+
less than `1`).
|
699 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
700 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
701 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
702 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
703 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
704 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
705 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
706 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
707 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
708 |
+
argument.
|
709 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
710 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
711 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
712 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
713 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
714 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
715 |
+
input argument.
|
716 |
+
lora_scale (`float`, *optional*):
|
717 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
718 |
+
"""
|
719 |
+
device = device or self._execution_device
|
720 |
+
|
721 |
+
# set lora scale so that monkey patched LoRA
|
722 |
+
# function of text encoder can correctly access it
|
723 |
+
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
724 |
+
self._lora_scale = lora_scale
|
725 |
+
|
726 |
+
if prompt is not None and isinstance(prompt, str):
|
727 |
+
batch_size = 1
|
728 |
+
elif prompt is not None and isinstance(prompt, list):
|
729 |
+
batch_size = len(prompt)
|
730 |
+
else:
|
731 |
+
batch_size = prompt_embeds.shape[0]
|
732 |
+
|
733 |
+
# Define tokenizers and text encoders
|
734 |
+
tokenizers = (
|
735 |
+
[self.tokenizer, self.tokenizer_2]
|
736 |
+
if self.tokenizer is not None
|
737 |
+
else [self.tokenizer_2]
|
738 |
+
)
|
739 |
+
text_encoders = (
|
740 |
+
[self.text_encoder, self.text_encoder_2]
|
741 |
+
if self.text_encoder is not None
|
742 |
+
else [self.text_encoder_2]
|
743 |
+
)
|
744 |
+
|
745 |
+
if prompt_embeds is None:
|
746 |
+
prompt_2 = prompt_2 or prompt
|
747 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
748 |
+
prompt_embeds_list = []
|
749 |
+
prompts = [prompt, prompt_2]
|
750 |
+
for prompt, tokenizer, text_encoder in zip(
|
751 |
+
prompts, tokenizers, text_encoders
|
752 |
+
):
|
753 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
754 |
+
prompt = self.maybe_convert_prompt(prompt, tokenizer)
|
755 |
+
|
756 |
+
text_inputs = tokenizer(
|
757 |
+
prompt,
|
758 |
+
padding="max_length",
|
759 |
+
max_length=tokenizer.model_max_length,
|
760 |
+
truncation=True,
|
761 |
+
return_tensors="pt",
|
762 |
+
)
|
763 |
+
|
764 |
+
text_input_ids = text_inputs.input_ids
|
765 |
+
untruncated_ids = tokenizer(
|
766 |
+
prompt, padding="longest", return_tensors="pt"
|
767 |
+
).input_ids
|
768 |
+
|
769 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[
|
770 |
+
-1
|
771 |
+
] and not torch.equal(text_input_ids, untruncated_ids):
|
772 |
+
removed_text = tokenizer.batch_decode(
|
773 |
+
untruncated_ids[:, tokenizer.model_max_length - 1 : -1]
|
774 |
+
)
|
775 |
+
logger.warning(
|
776 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
777 |
+
f" {tokenizer.model_max_length} tokens: {removed_text}"
|
778 |
+
)
|
779 |
+
|
780 |
+
prompt_embeds = text_encoder(
|
781 |
+
text_input_ids.to(device),
|
782 |
+
output_hidden_states=True,
|
783 |
+
)
|
784 |
+
|
785 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
786 |
+
pooled_prompt_embeds = prompt_embeds[0]
|
787 |
+
prompt_embeds = prompt_embeds.hidden_states[-2]
|
788 |
+
|
789 |
+
prompt_embeds_list.append(prompt_embeds)
|
790 |
+
|
791 |
+
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
|
792 |
+
|
793 |
+
# get unconditional embeddings for classifier free guidance
|
794 |
+
zero_out_negative_prompt = (
|
795 |
+
negative_prompt is None and self.config.force_zeros_for_empty_prompt
|
796 |
+
)
|
797 |
+
if (
|
798 |
+
do_classifier_free_guidance
|
799 |
+
and negative_prompt_embeds is None
|
800 |
+
and zero_out_negative_prompt
|
801 |
+
):
|
802 |
+
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
|
803 |
+
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
|
804 |
+
elif do_classifier_free_guidance and negative_prompt_embeds is None:
|
805 |
+
negative_prompt = negative_prompt or ""
|
806 |
+
negative_prompt_2 = negative_prompt_2 or negative_prompt
|
807 |
+
|
808 |
+
uncond_tokens: List[str]
|
809 |
+
if prompt is not None and type(prompt) is not type(negative_prompt):
|
810 |
+
raise TypeError(
|
811 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
812 |
+
f" {type(prompt)}."
|
813 |
+
)
|
814 |
+
elif isinstance(negative_prompt, str):
|
815 |
+
uncond_tokens = [negative_prompt, negative_prompt_2]
|
816 |
+
elif batch_size != len(negative_prompt):
|
817 |
+
raise ValueError(
|
818 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
819 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
820 |
+
" the batch size of `prompt`."
|
821 |
+
)
|
822 |
+
else:
|
823 |
+
uncond_tokens = [negative_prompt, negative_prompt_2]
|
824 |
+
|
825 |
+
negative_prompt_embeds_list = []
|
826 |
+
for negative_prompt, tokenizer, text_encoder in zip(
|
827 |
+
uncond_tokens, tokenizers, text_encoders
|
828 |
+
):
|
829 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
830 |
+
negative_prompt = self.maybe_convert_prompt(
|
831 |
+
negative_prompt, tokenizer
|
832 |
+
)
|
833 |
+
|
834 |
+
max_length = prompt_embeds.shape[1]
|
835 |
+
uncond_input = tokenizer(
|
836 |
+
negative_prompt,
|
837 |
+
padding="max_length",
|
838 |
+
max_length=max_length,
|
839 |
+
truncation=True,
|
840 |
+
return_tensors="pt",
|
841 |
+
)
|
842 |
+
|
843 |
+
negative_prompt_embeds = text_encoder(
|
844 |
+
uncond_input.input_ids.to(device),
|
845 |
+
output_hidden_states=True,
|
846 |
+
)
|
847 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
848 |
+
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
|
849 |
+
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
|
850 |
+
|
851 |
+
negative_prompt_embeds_list.append(negative_prompt_embeds)
|
852 |
+
|
853 |
+
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
|
854 |
+
|
855 |
+
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
856 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
857 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
858 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
859 |
+
prompt_embeds = prompt_embeds.view(
|
860 |
+
bs_embed * num_images_per_prompt, seq_len, -1
|
861 |
+
)
|
862 |
+
|
863 |
+
if do_classifier_free_guidance:
|
864 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
865 |
+
seq_len = negative_prompt_embeds.shape[1]
|
866 |
+
negative_prompt_embeds = negative_prompt_embeds.to(
|
867 |
+
dtype=self.text_encoder_2.dtype, device=device
|
868 |
+
)
|
869 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(
|
870 |
+
1, num_images_per_prompt, 1
|
871 |
+
)
|
872 |
+
negative_prompt_embeds = negative_prompt_embeds.view(
|
873 |
+
batch_size * num_images_per_prompt, seq_len, -1
|
874 |
+
)
|
875 |
+
|
876 |
+
pooled_prompt_embeds = pooled_prompt_embeds.repeat(
|
877 |
+
1, num_images_per_prompt
|
878 |
+
).view(bs_embed * num_images_per_prompt, -1)
|
879 |
+
if do_classifier_free_guidance:
|
880 |
+
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(
|
881 |
+
1, num_images_per_prompt
|
882 |
+
).view(bs_embed * num_images_per_prompt, -1)
|
883 |
+
|
884 |
+
return (
|
885 |
+
prompt_embeds,
|
886 |
+
negative_prompt_embeds,
|
887 |
+
pooled_prompt_embeds,
|
888 |
+
negative_pooled_prompt_embeds,
|
889 |
+
)
|
890 |
+
|
891 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
892 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
893 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
894 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
895 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
896 |
+
# and should be between [0, 1]
|
897 |
+
|
898 |
+
accepts_eta = "eta" in set(
|
899 |
+
inspect.signature(self.scheduler.step).parameters.keys()
|
900 |
+
)
|
901 |
+
extra_step_kwargs = {}
|
902 |
+
if accepts_eta:
|
903 |
+
extra_step_kwargs["eta"] = eta
|
904 |
+
|
905 |
+
# check if the scheduler accepts generator
|
906 |
+
accepts_generator = "generator" in set(
|
907 |
+
inspect.signature(self.scheduler.step).parameters.keys()
|
908 |
+
)
|
909 |
+
if accepts_generator:
|
910 |
+
extra_step_kwargs["generator"] = generator
|
911 |
+
return extra_step_kwargs
|
912 |
+
|
913 |
+
def check_inputs(
|
914 |
+
self,
|
915 |
+
prompt,
|
916 |
+
prompt_2,
|
917 |
+
height,
|
918 |
+
width,
|
919 |
+
callback_steps,
|
920 |
+
negative_prompt=None,
|
921 |
+
negative_prompt_2=None,
|
922 |
+
prompt_embeds=None,
|
923 |
+
negative_prompt_embeds=None,
|
924 |
+
pooled_prompt_embeds=None,
|
925 |
+
negative_pooled_prompt_embeds=None,
|
926 |
+
):
|
927 |
+
if height % 8 != 0 or width % 8 != 0:
|
928 |
+
raise ValueError(
|
929 |
+
f"`height` and `width` have to be divisible by 8 but are {height} and {width}."
|
930 |
+
)
|
931 |
+
|
932 |
+
if (callback_steps is None) or (
|
933 |
+
callback_steps is not None
|
934 |
+
and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
935 |
+
):
|
936 |
+
raise ValueError(
|
937 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
938 |
+
f" {type(callback_steps)}."
|
939 |
+
)
|
940 |
+
|
941 |
+
if prompt is not None and prompt_embeds is not None:
|
942 |
+
raise ValueError(
|
943 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
944 |
+
" only forward one of the two."
|
945 |
+
)
|
946 |
+
elif prompt_2 is not None and prompt_embeds is not None:
|
947 |
+
raise ValueError(
|
948 |
+
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
949 |
+
" only forward one of the two."
|
950 |
+
)
|
951 |
+
elif prompt is None and prompt_embeds is None:
|
952 |
+
raise ValueError(
|
953 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
954 |
+
)
|
955 |
+
elif prompt is not None and (
|
956 |
+
not isinstance(prompt, str) and not isinstance(prompt, list)
|
957 |
+
):
|
958 |
+
raise ValueError(
|
959 |
+
f"`prompt` has to be of type `str` or `list` but is {type(prompt)}"
|
960 |
+
)
|
961 |
+
elif prompt_2 is not None and (
|
962 |
+
not isinstance(prompt_2, str) and not isinstance(prompt_2, list)
|
963 |
+
):
|
964 |
+
raise ValueError(
|
965 |
+
f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}"
|
966 |
+
)
|
967 |
+
|
968 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
969 |
+
raise ValueError(
|
970 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
971 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
972 |
+
)
|
973 |
+
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
|
974 |
+
raise ValueError(
|
975 |
+
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
|
976 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
977 |
+
)
|
978 |
+
|
979 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
980 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
981 |
+
raise ValueError(
|
982 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
983 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
984 |
+
f" {negative_prompt_embeds.shape}."
|
985 |
+
)
|
986 |
+
|
987 |
+
if prompt_embeds is not None and pooled_prompt_embeds is None:
|
988 |
+
raise ValueError(
|
989 |
+
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
|
990 |
+
)
|
991 |
+
|
992 |
+
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
|
993 |
+
raise ValueError(
|
994 |
+
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
|
995 |
+
)
|
996 |
+
|
997 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
998 |
+
def prepare_latents(
|
999 |
+
self,
|
1000 |
+
batch_size,
|
1001 |
+
num_channels_latents,
|
1002 |
+
height,
|
1003 |
+
width,
|
1004 |
+
dtype,
|
1005 |
+
device,
|
1006 |
+
generator,
|
1007 |
+
latents=None,
|
1008 |
+
):
|
1009 |
+
shape = (
|
1010 |
+
batch_size,
|
1011 |
+
num_channels_latents,
|
1012 |
+
height // self.vae_scale_factor,
|
1013 |
+
width // self.vae_scale_factor,
|
1014 |
+
)
|
1015 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
1016 |
+
raise ValueError(
|
1017 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
1018 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
1019 |
+
)
|
1020 |
+
|
1021 |
+
if latents is None:
|
1022 |
+
latents = randn_tensor(
|
1023 |
+
shape, generator=generator, device=device, dtype=dtype
|
1024 |
+
)
|
1025 |
+
else:
|
1026 |
+
latents = latents.to(device)
|
1027 |
+
|
1028 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
1029 |
+
latents = latents * self.scheduler.init_noise_sigma
|
1030 |
+
return latents
|
1031 |
+
|
1032 |
+
def _get_add_time_ids(
|
1033 |
+
self, original_size, crops_coords_top_left, target_size, dtype
|
1034 |
+
):
|
1035 |
+
add_time_ids = list(original_size + crops_coords_top_left + target_size)
|
1036 |
+
|
1037 |
+
passed_add_embed_dim = (
|
1038 |
+
self.unet.config.addition_time_embed_dim * len(add_time_ids)
|
1039 |
+
+ self.text_encoder_2.config.projection_dim
|
1040 |
+
)
|
1041 |
+
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
|
1042 |
+
|
1043 |
+
if expected_add_embed_dim != passed_add_embed_dim:
|
1044 |
+
raise ValueError(
|
1045 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
|
1046 |
+
)
|
1047 |
+
|
1048 |
+
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
|
1049 |
+
return add_time_ids
|
1050 |
+
|
1051 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
|
1052 |
+
def upcast_vae(self):
|
1053 |
+
dtype = self.vae.dtype
|
1054 |
+
self.vae.to(dtype=torch.float32)
|
1055 |
+
use_torch_2_0_or_xformers = isinstance(
|
1056 |
+
self.vae.decoder.mid_block.attentions[0].processor,
|
1057 |
+
(
|
1058 |
+
AttnProcessor2_0,
|
1059 |
+
XFormersAttnProcessor,
|
1060 |
+
LoRAXFormersAttnProcessor,
|
1061 |
+
LoRAAttnProcessor2_0,
|
1062 |
+
),
|
1063 |
+
)
|
1064 |
+
# if xformers or torch_2_0 is used attention block does not need
|
1065 |
+
# to be in float32 which can save lots of memory
|
1066 |
+
if use_torch_2_0_or_xformers:
|
1067 |
+
self.vae.post_quant_conv.to(dtype)
|
1068 |
+
self.vae.decoder.conv_in.to(dtype)
|
1069 |
+
self.vae.decoder.mid_block.to(dtype)
|
1070 |
+
|
1071 |
+
@torch.no_grad()
|
1072 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
1073 |
+
def __call__(
|
1074 |
+
self,
|
1075 |
+
prompt: str = None,
|
1076 |
+
prompt_2: Optional[str] = None,
|
1077 |
+
height: Optional[int] = None,
|
1078 |
+
width: Optional[int] = None,
|
1079 |
+
num_inference_steps: int = 50,
|
1080 |
+
denoising_end: Optional[float] = None,
|
1081 |
+
guidance_scale: float = 5.0,
|
1082 |
+
negative_prompt: Optional[str] = None,
|
1083 |
+
negative_prompt_2: Optional[str] = None,
|
1084 |
+
num_images_per_prompt: Optional[int] = 1,
|
1085 |
+
eta: float = 0.0,
|
1086 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
1087 |
+
latents: Optional[torch.FloatTensor] = None,
|
1088 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
1089 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
1090 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
1091 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
1092 |
+
output_type: Optional[str] = "pil",
|
1093 |
+
return_dict: bool = True,
|
1094 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
1095 |
+
callback_steps: int = 1,
|
1096 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
1097 |
+
guidance_rescale: float = 0.0,
|
1098 |
+
original_size: Optional[Tuple[int, int]] = None,
|
1099 |
+
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
1100 |
+
target_size: Optional[Tuple[int, int]] = None,
|
1101 |
+
):
|
1102 |
+
r"""
|
1103 |
+
Function invoked when calling the pipeline for generation.
|
1104 |
+
|
1105 |
+
Args:
|
1106 |
+
prompt (`str`):
|
1107 |
+
The prompt to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
1108 |
+
instead.
|
1109 |
+
prompt_2 (`str`):
|
1110 |
+
The prompt to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
1111 |
+
used in both text-encoders
|
1112 |
+
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
1113 |
+
The height in pixels of the generated image.
|
1114 |
+
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
1115 |
+
The width in pixels of the generated image.
|
1116 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
1117 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
1118 |
+
expense of slower inference.
|
1119 |
+
denoising_end (`float`, *optional*):
|
1120 |
+
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
|
1121 |
+
completed before it is intentionally prematurely terminated. As a result, the returned sample will
|
1122 |
+
still retain a substantial amount of noise as determined by the discrete timesteps selected by the
|
1123 |
+
scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
|
1124 |
+
"Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
|
1125 |
+
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
|
1126 |
+
guidance_scale (`float`, *optional*, defaults to 5.0):
|
1127 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
1128 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
1129 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
1130 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
1131 |
+
usually at the expense of lower image quality.
|
1132 |
+
negative_prompt (`str`):
|
1133 |
+
The prompt not to guide the image generation. If not defined, one has to pass
|
1134 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
1135 |
+
less than `1`).
|
1136 |
+
negative_prompt_2 (`str`):
|
1137 |
+
The prompt not to guide the image generation to be sent to `tokenizer_2` and
|
1138 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
1139 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
1140 |
+
The number of images to generate per prompt.
|
1141 |
+
eta (`float`, *optional*, defaults to 0.0):
|
1142 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
1143 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
1144 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
1145 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
1146 |
+
to make generation deterministic.
|
1147 |
+
latents (`torch.FloatTensor`, *optional*):
|
1148 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
1149 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
1150 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
1151 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
1152 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
1153 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
1154 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
1155 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
1156 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
1157 |
+
argument.
|
1158 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
1159 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
1160 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
1161 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
1162 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
1163 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
1164 |
+
input argument.
|
1165 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
1166 |
+
The output format of the generate image. Choose between
|
1167 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
1168 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
1169 |
+
Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
|
1170 |
+
of a plain tuple.
|
1171 |
+
callback (`Callable`, *optional*):
|
1172 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
1173 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
1174 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
1175 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
1176 |
+
called at every step.
|
1177 |
+
cross_attention_kwargs (`dict`, *optional*):
|
1178 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
1179 |
+
`self.processor` in
|
1180 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
1181 |
+
guidance_rescale (`float`, *optional*, defaults to 0.7):
|
1182 |
+
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
|
1183 |
+
Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
|
1184 |
+
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
|
1185 |
+
Guidance rescale factor should fix overexposure when using zero terminal SNR.
|
1186 |
+
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
1187 |
+
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
|
1188 |
+
`original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
|
1189 |
+
explained in section 2.2 of
|
1190 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
1191 |
+
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
1192 |
+
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
|
1193 |
+
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
|
1194 |
+
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
|
1195 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
1196 |
+
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
1197 |
+
For most cases, `target_size` should be set to the desired height and width of the generated image. If
|
1198 |
+
not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
|
1199 |
+
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
1200 |
+
|
1201 |
+
Examples:
|
1202 |
+
|
1203 |
+
Returns:
|
1204 |
+
[`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`:
|
1205 |
+
[`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
|
1206 |
+
`tuple`. When returning a tuple, the first element is a list with the generated images.
|
1207 |
+
"""
|
1208 |
+
# 0. Default height and width to unet
|
1209 |
+
height = height or self.default_sample_size * self.vae_scale_factor
|
1210 |
+
width = width or self.default_sample_size * self.vae_scale_factor
|
1211 |
+
|
1212 |
+
original_size = original_size or (height, width)
|
1213 |
+
target_size = target_size or (height, width)
|
1214 |
+
|
1215 |
+
# 1. Check inputs. Raise error if not correct
|
1216 |
+
self.check_inputs(
|
1217 |
+
prompt,
|
1218 |
+
prompt_2,
|
1219 |
+
height,
|
1220 |
+
width,
|
1221 |
+
callback_steps,
|
1222 |
+
negative_prompt,
|
1223 |
+
negative_prompt_2,
|
1224 |
+
prompt_embeds,
|
1225 |
+
negative_prompt_embeds,
|
1226 |
+
pooled_prompt_embeds,
|
1227 |
+
negative_pooled_prompt_embeds,
|
1228 |
+
)
|
1229 |
+
|
1230 |
+
# 2. Define call parameters
|
1231 |
+
if prompt is not None and isinstance(prompt, str):
|
1232 |
+
batch_size = 1
|
1233 |
+
elif prompt is not None and isinstance(prompt, list):
|
1234 |
+
batch_size = len(prompt)
|
1235 |
+
else:
|
1236 |
+
batch_size = prompt_embeds.shape[0]
|
1237 |
+
|
1238 |
+
device = self._execution_device
|
1239 |
+
|
1240 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
1241 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
1242 |
+
# corresponds to doing no classifier free guidance.
|
1243 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
1244 |
+
|
1245 |
+
# 3. Encode input prompt
|
1246 |
+
(
|
1247 |
+
cross_attention_kwargs.get("scale", None)
|
1248 |
+
if cross_attention_kwargs is not None
|
1249 |
+
else None
|
1250 |
+
)
|
1251 |
+
|
1252 |
+
negative_prompt = negative_prompt if negative_prompt is not None else ""
|
1253 |
+
|
1254 |
+
(
|
1255 |
+
prompt_embeds,
|
1256 |
+
negative_prompt_embeds,
|
1257 |
+
pooled_prompt_embeds,
|
1258 |
+
negative_pooled_prompt_embeds,
|
1259 |
+
) = get_weighted_text_embeddings_sdxl(
|
1260 |
+
pipe=self, prompt=prompt, neg_prompt=negative_prompt
|
1261 |
+
)
|
1262 |
+
|
1263 |
+
# 4. Prepare timesteps
|
1264 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
1265 |
+
|
1266 |
+
timesteps = self.scheduler.timesteps
|
1267 |
+
|
1268 |
+
# 5. Prepare latent variables
|
1269 |
+
num_channels_latents = self.unet.config.in_channels
|
1270 |
+
latents = self.prepare_latents(
|
1271 |
+
batch_size * num_images_per_prompt,
|
1272 |
+
num_channels_latents,
|
1273 |
+
height,
|
1274 |
+
width,
|
1275 |
+
prompt_embeds.dtype,
|
1276 |
+
device,
|
1277 |
+
generator,
|
1278 |
+
latents,
|
1279 |
+
)
|
1280 |
+
|
1281 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
1282 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
1283 |
+
|
1284 |
+
# 7. Prepare added time ids & embeddings
|
1285 |
+
add_text_embeds = pooled_prompt_embeds
|
1286 |
+
add_time_ids = self._get_add_time_ids(
|
1287 |
+
original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype
|
1288 |
+
)
|
1289 |
+
|
1290 |
+
if do_classifier_free_guidance:
|
1291 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
1292 |
+
add_text_embeds = torch.cat(
|
1293 |
+
[negative_pooled_prompt_embeds, add_text_embeds], dim=0
|
1294 |
+
)
|
1295 |
+
add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0)
|
1296 |
+
|
1297 |
+
prompt_embeds = prompt_embeds.to(device)
|
1298 |
+
add_text_embeds = add_text_embeds.to(device)
|
1299 |
+
add_time_ids = add_time_ids.to(device).repeat(
|
1300 |
+
batch_size * num_images_per_prompt, 1
|
1301 |
+
)
|
1302 |
+
|
1303 |
+
# 8. Denoising loop
|
1304 |
+
num_warmup_steps = max(
|
1305 |
+
len(timesteps) - num_inference_steps * self.scheduler.order, 0
|
1306 |
+
)
|
1307 |
+
|
1308 |
+
# 7.1 Apply denoising_end
|
1309 |
+
if (
|
1310 |
+
denoising_end is not None
|
1311 |
+
and type(denoising_end) == float
|
1312 |
+
and denoising_end > 0
|
1313 |
+
and denoising_end < 1
|
1314 |
+
):
|
1315 |
+
discrete_timestep_cutoff = int(
|
1316 |
+
round(
|
1317 |
+
self.scheduler.config.num_train_timesteps
|
1318 |
+
- (denoising_end * self.scheduler.config.num_train_timesteps)
|
1319 |
+
)
|
1320 |
+
)
|
1321 |
+
num_inference_steps = len(
|
1322 |
+
list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))
|
1323 |
+
)
|
1324 |
+
timesteps = timesteps[:num_inference_steps]
|
1325 |
+
|
1326 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
1327 |
+
for i, t in enumerate(timesteps):
|
1328 |
+
# expand the latents if we are doing classifier free guidance
|
1329 |
+
latent_model_input = (
|
1330 |
+
torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
1331 |
+
)
|
1332 |
+
|
1333 |
+
latent_model_input = self.scheduler.scale_model_input(
|
1334 |
+
latent_model_input, t
|
1335 |
+
)
|
1336 |
+
|
1337 |
+
# predict the noise residual
|
1338 |
+
added_cond_kwargs = {
|
1339 |
+
"text_embeds": add_text_embeds,
|
1340 |
+
"time_ids": add_time_ids,
|
1341 |
+
}
|
1342 |
+
noise_pred = self.unet(
|
1343 |
+
latent_model_input,
|
1344 |
+
t,
|
1345 |
+
encoder_hidden_states=prompt_embeds,
|
1346 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
1347 |
+
added_cond_kwargs=added_cond_kwargs,
|
1348 |
+
return_dict=False,
|
1349 |
+
)[0]
|
1350 |
+
|
1351 |
+
# perform guidance
|
1352 |
+
if do_classifier_free_guidance:
|
1353 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
1354 |
+
noise_pred = noise_pred_uncond + guidance_scale * (
|
1355 |
+
noise_pred_text - noise_pred_uncond
|
1356 |
+
)
|
1357 |
+
|
1358 |
+
if do_classifier_free_guidance and guidance_rescale > 0.0:
|
1359 |
+
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
1360 |
+
noise_pred = rescale_noise_cfg(
|
1361 |
+
noise_pred, noise_pred_text, guidance_rescale=guidance_rescale
|
1362 |
+
)
|
1363 |
+
|
1364 |
+
# compute the previous noisy sample x_t -> x_t-1
|
1365 |
+
latents = self.scheduler.step(
|
1366 |
+
noise_pred, t, latents, **extra_step_kwargs, return_dict=False
|
1367 |
+
)[0]
|
1368 |
+
|
1369 |
+
# call the callback, if provided
|
1370 |
+
if i == len(timesteps) - 1 or (
|
1371 |
+
(i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0
|
1372 |
+
):
|
1373 |
+
progress_bar.update()
|
1374 |
+
if callback is not None and i % callback_steps == 0:
|
1375 |
+
callback(i, t, latents)
|
1376 |
+
|
1377 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
1378 |
+
if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
|
1379 |
+
self.upcast_vae()
|
1380 |
+
latents = latents.to(
|
1381 |
+
next(iter(self.vae.post_quant_conv.parameters())).dtype
|
1382 |
+
)
|
1383 |
+
|
1384 |
+
if not output_type == "latent":
|
1385 |
+
image = self.vae.decode(
|
1386 |
+
latents / self.vae.config.scaling_factor, return_dict=False
|
1387 |
+
)[0]
|
1388 |
+
else:
|
1389 |
+
image = latents
|
1390 |
+
return StableDiffusionXLPipelineOutput(images=image)
|
1391 |
+
|
1392 |
+
# apply watermark if available
|
1393 |
+
if self.watermark is not None:
|
1394 |
+
image = self.watermark.apply_watermark(image)
|
1395 |
+
|
1396 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
1397 |
+
|
1398 |
+
# Offload last model to CPU
|
1399 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
1400 |
+
self.final_offload_hook.offload()
|
1401 |
+
|
1402 |
+
if not return_dict:
|
1403 |
+
return (image,)
|
1404 |
+
|
1405 |
+
return StableDiffusionXLPipelineOutput(images=image)
|
1406 |
+
|
1407 |
+
# Overrride to properly handle the loading and unloading of the additional text encoder.
|
1408 |
+
def load_lora_weights(
|
1409 |
+
self,
|
1410 |
+
pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
|
1411 |
+
**kwargs,
|
1412 |
+
):
|
1413 |
+
# We could have accessed the unet config from `lora_state_dict()` too. We pass
|
1414 |
+
# it here explicitly to be able to tell that it's coming from an SDXL
|
1415 |
+
# pipeline.
|
1416 |
+
state_dict, network_alphas = self.lora_state_dict(
|
1417 |
+
pretrained_model_name_or_path_or_dict,
|
1418 |
+
unet_config=self.unet.config,
|
1419 |
+
**kwargs,
|
1420 |
+
)
|
1421 |
+
self.load_lora_into_unet(
|
1422 |
+
state_dict, network_alphas=network_alphas, unet=self.unet
|
1423 |
+
)
|
1424 |
+
|
1425 |
+
text_encoder_state_dict = {
|
1426 |
+
k: v for k, v in state_dict.items() if "text_encoder." in k
|
1427 |
+
}
|
1428 |
+
if len(text_encoder_state_dict) > 0:
|
1429 |
+
self.load_lora_into_text_encoder(
|
1430 |
+
text_encoder_state_dict,
|
1431 |
+
network_alphas=network_alphas,
|
1432 |
+
text_encoder=self.text_encoder,
|
1433 |
+
prefix="text_encoder",
|
1434 |
+
lora_scale=self.lora_scale,
|
1435 |
+
)
|
1436 |
+
|
1437 |
+
text_encoder_2_state_dict = {
|
1438 |
+
k: v for k, v in state_dict.items() if "text_encoder_2." in k
|
1439 |
+
}
|
1440 |
+
if len(text_encoder_2_state_dict) > 0:
|
1441 |
+
self.load_lora_into_text_encoder(
|
1442 |
+
text_encoder_2_state_dict,
|
1443 |
+
network_alphas=network_alphas,
|
1444 |
+
text_encoder=self.text_encoder_2,
|
1445 |
+
prefix="text_encoder_2",
|
1446 |
+
lora_scale=self.lora_scale,
|
1447 |
+
)
|
1448 |
+
|
1449 |
+
@classmethod
|
1450 |
+
def save_lora_weights(
|
1451 |
+
self,
|
1452 |
+
save_directory: Union[str, os.PathLike],
|
1453 |
+
unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
|
1454 |
+
text_encoder_lora_layers: Dict[
|
1455 |
+
str, Union[torch.nn.Module, torch.Tensor]
|
1456 |
+
] = None,
|
1457 |
+
text_encoder_2_lora_layers: Dict[
|
1458 |
+
str, Union[torch.nn.Module, torch.Tensor]
|
1459 |
+
] = None,
|
1460 |
+
is_main_process: bool = True,
|
1461 |
+
weight_name: str = None,
|
1462 |
+
save_function: Callable = None,
|
1463 |
+
safe_serialization: bool = False,
|
1464 |
+
):
|
1465 |
+
state_dict = {}
|
1466 |
+
|
1467 |
+
def pack_weights(layers, prefix):
|
1468 |
+
layers_weights = (
|
1469 |
+
layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
|
1470 |
+
)
|
1471 |
+
layers_state_dict = {
|
1472 |
+
f"{prefix}.{module_name}": param
|
1473 |
+
for module_name, param in layers_weights.items()
|
1474 |
+
}
|
1475 |
+
return layers_state_dict
|
1476 |
+
|
1477 |
+
state_dict.update(pack_weights(unet_lora_layers, "unet"))
|
1478 |
+
|
1479 |
+
if text_encoder_lora_layers and text_encoder_2_lora_layers:
|
1480 |
+
state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder"))
|
1481 |
+
state_dict.update(
|
1482 |
+
pack_weights(text_encoder_2_lora_layers, "text_encoder_2")
|
1483 |
+
)
|
1484 |
+
|
1485 |
+
self.write_lora_layers(
|
1486 |
+
state_dict=state_dict,
|
1487 |
+
save_directory=save_directory,
|
1488 |
+
is_main_process=is_main_process,
|
1489 |
+
weight_name=weight_name,
|
1490 |
+
save_function=save_function,
|
1491 |
+
safe_serialization=safe_serialization,
|
1492 |
+
)
|
1493 |
+
|
1494 |
+
def _remove_text_encoder_monkey_patch(self):
|
1495 |
+
self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)
|
1496 |
+
self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
|
requirements.txt
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
accelerate==0.21.0
|
2 |
-
diffusers==0.
|
3 |
gradio==3.40.1
|
4 |
invisible-watermark==0.2.0
|
5 |
Pillow==10.0.0
|
6 |
torch==2.0.1
|
7 |
-
transformers==4.31.0
|
|
|
|
1 |
accelerate==0.21.0
|
2 |
+
diffusers==0.20.0
|
3 |
gradio==3.40.1
|
4 |
invisible-watermark==0.2.0
|
5 |
Pillow==10.0.0
|
6 |
torch==2.0.1
|
7 |
+
transformers==4.31.0
|
8 |
+
toml==0.10.2
|
style.css
CHANGED
@@ -3,6 +3,11 @@ h1 {
|
|
3 |
font-size: 10vw; /* relative to the viewport width */
|
4 |
}
|
5 |
|
|
|
|
|
|
|
|
|
|
|
6 |
#duplicate-button {
|
7 |
margin: auto;
|
8 |
color: #fff;
|
@@ -23,3 +28,33 @@ h1 {
|
|
23 |
padding-top: 1rem;
|
24 |
}
|
25 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
font-size: 10vw; /* relative to the viewport width */
|
4 |
}
|
5 |
|
6 |
+
h2 {
|
7 |
+
text-align: center;
|
8 |
+
font-size: 10vw; /* relative to the viewport width */
|
9 |
+
}
|
10 |
+
|
11 |
#duplicate-button {
|
12 |
margin: auto;
|
13 |
color: #fff;
|
|
|
28 |
padding-top: 1rem;
|
29 |
}
|
30 |
}
|
31 |
+
|
32 |
+
#gallery .grid-wrap{
|
33 |
+
min-height: 25%;
|
34 |
+
}
|
35 |
+
|
36 |
+
#title-container {
|
37 |
+
display: flex;
|
38 |
+
justify-content: center;
|
39 |
+
align-items: center;
|
40 |
+
height: 100vh; /* Adjust this value to position the title vertically */
|
41 |
+
}
|
42 |
+
|
43 |
+
#title {
|
44 |
+
font-size: 3em;
|
45 |
+
text-align: center;
|
46 |
+
color: #333;
|
47 |
+
font-family: 'Helvetica Neue', sans-serif;
|
48 |
+
text-transform: uppercase;
|
49 |
+
background: transparent;
|
50 |
+
}
|
51 |
+
|
52 |
+
#title span {
|
53 |
+
background: -webkit-linear-gradient(45deg, #4EACEF, #28b485);
|
54 |
+
-webkit-background-clip: text;
|
55 |
+
-webkit-text-fill-color: transparent;
|
56 |
+
}
|
57 |
+
|
58 |
+
#subtitle {
|
59 |
+
text-align: center;
|
60 |
+
}
|
utils.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def is_google_colab():
|
2 |
+
try:
|
3 |
+
import google.colab
|
4 |
+
|
5 |
+
return True
|
6 |
+
except:
|
7 |
+
return False
|