duongve commited on
Commit
7ef93e7
1 Parent(s): 773f99a

Upload 24 files

Browse files
Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dockerfile Public T4
2
+
3
+ FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-devel
4
+ ENV DEBIAN_FRONTEND noninteractive
5
+
6
+ WORKDIR /content
7
+ RUN pip install numexpr einops transformers k_diffusion safetensors gradio diffusers xformers
8
+
9
+ ADD . .
10
+ RUN adduser --disabled-password --gecos '' user
11
+ RUN chown -R user:user /content
12
+ RUN chmod -R 777 /content
13
+ USER user
14
+
15
+ EXPOSE 7860
16
+ CMD python /content/app.py
README.md CHANGED
@@ -1,11 +1,13 @@
1
  ---
2
- title: Spatial Control For SD
3
- emoji: 👁
4
- colorFrom: blue
5
  colorTo: gray
6
  sdk: docker
 
7
  pinned: false
8
- license: apache-2.0
 
9
  ---
10
 
11
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Sd Diffusers Webui
3
+ emoji: 🐳
4
+ colorFrom: purple
5
  colorTo: gray
6
  sdk: docker
7
+ sdk_version: 3.9
8
  pinned: false
9
+ license: openrail
10
+ app_port: 7860
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
The diff for this file is too large to render. See raw diff
 
modules/attention_modify.py ADDED
@@ -0,0 +1,1044 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers.utils import (
2
+ USE_PEFT_BACKEND,
3
+ _get_model_file,
4
+ delete_adapter_layers,
5
+ is_accelerate_available,
6
+ logging,
7
+ set_adapter_layers,
8
+ set_weights_and_activate_adapters,
9
+ )
10
+
11
+ import torch
12
+ import torch.nn.functional as F
13
+ from torch.autograd.function import Function
14
+ import torch.nn as nn
15
+ from torch import einsum
16
+ import os
17
+ from collections import defaultdict
18
+ from contextlib import nullcontext
19
+ from typing import Callable, Dict, List, Optional, Union
20
+ from diffusers.utils import USE_PEFT_BACKEND, BaseOutput, deprecate, logging, scale_lora_layers, unscale_lora_layers
21
+ from diffusers.models.embeddings import ImageProjection
22
+ from diffusers.models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_model_dict_into_meta
23
+ import math
24
+ from einops import rearrange
25
+ from diffusers.image_processor import IPAdapterMaskProcessor
26
+
27
+ xformers_available = False
28
+ try:
29
+ import xformers
30
+
31
+ xformers_available = True
32
+ except ImportError:
33
+ pass
34
+
35
+ EPSILON = 1e-6
36
+ exists = lambda val: val is not None
37
+ default = lambda val, d: val if exists(val) else d
38
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
+ def get_attention_scores(attn, query, key, attention_mask=None):
40
+
41
+ if attn.upcast_attention:
42
+ query = query.float()
43
+ key = key.float()
44
+ if attention_mask is None:
45
+ baddbmm_input = torch.empty(
46
+ query.shape[0],
47
+ query.shape[1],
48
+ key.shape[1],
49
+ dtype=query.dtype,
50
+ device=query.device,
51
+ )
52
+ beta = 0
53
+ else:
54
+ baddbmm_input = attention_mask
55
+ beta = 1
56
+
57
+ attention_scores = torch.baddbmm(
58
+ baddbmm_input,
59
+ query,
60
+ key.transpose(-1, -2),
61
+ beta=beta,
62
+ alpha=attn.scale,
63
+ )
64
+
65
+ del baddbmm_input
66
+
67
+ if attn.upcast_softmax:
68
+ attention_scores = attention_scores.float()
69
+
70
+ return attention_scores.to(query.dtype)
71
+
72
+
73
+ # Get attention_score with this:
74
+ def scaled_dot_product_attention_regionstate(query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False, scale=None,weight_func =None, region_state = None, sigma = None) -> torch.Tensor:
75
+ # Efficient implementation equivalent to the following:
76
+ L, S = query.size(-2), key.size(-2)
77
+ scale_factor = 1 / math.sqrt(query.size(-1)) if scale is None else scale
78
+ attn_bias = torch.zeros(L, S, dtype=query.dtype,device = query.device)
79
+ if is_causal:
80
+ assert attn_mask is None
81
+ temp_mask = torch.ones(L, S, dtype=torch.bool).tril(diagonal=0)
82
+ attn_bias.masked_fill_(temp_mask.logical_not(), float("-inf"))
83
+ attn_bias.to(query.dtype)
84
+
85
+ if attn_mask is not None:
86
+ if attn_mask.dtype == torch.bool:
87
+ attn_mask.masked_fill_(attn_mask.logical_not(), float("-inf"))
88
+ else:
89
+ attn_bias += attn_mask
90
+ attn_weight = query @ key.transpose(-2, -1) * scale_factor
91
+ attn_weight += attn_bias
92
+
93
+ batch_size, num_heads, sequence_length, embed_dim = attn_weight.shape
94
+ attn_weight = attn_weight.reshape((-1,sequence_length,embed_dim))
95
+ cross_attention_weight = weight_func(region_state, sigma, attn_weight)
96
+ repeat_time = attn_weight.shape[0]//cross_attention_weight.shape[0]
97
+ attn_weight += torch.repeat_interleave(
98
+ cross_attention_weight, repeats=repeat_time, dim=0
99
+ )
100
+ attn_weight = attn_weight.reshape((-1,num_heads,sequence_length,embed_dim))
101
+ attn_weight = torch.softmax(attn_weight, dim=-1)
102
+ attn_weight = torch.dropout(attn_weight, dropout_p, train=True)
103
+ return attn_weight @ value
104
+
105
+ class FlashAttentionFunction(Function):
106
+ @staticmethod
107
+ @torch.no_grad()
108
+ def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):
109
+ """Algorithm 2 in the paper"""
110
+
111
+ device = q.device
112
+ max_neg_value = -torch.finfo(q.dtype).max
113
+ qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)
114
+
115
+ o = torch.zeros_like(q)
116
+ all_row_sums = torch.zeros((*q.shape[:-1], 1), device=device)
117
+ all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, device=device)
118
+
119
+ scale = q.shape[-1] ** -0.5
120
+
121
+ if not exists(mask):
122
+ mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)
123
+ else:
124
+ mask = rearrange(mask, "b n -> b 1 1 n")
125
+ mask = mask.split(q_bucket_size, dim=-1)
126
+
127
+ row_splits = zip(
128
+ q.split(q_bucket_size, dim=-2),
129
+ o.split(q_bucket_size, dim=-2),
130
+ mask,
131
+ all_row_sums.split(q_bucket_size, dim=-2),
132
+ all_row_maxes.split(q_bucket_size, dim=-2),
133
+ )
134
+
135
+ for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):
136
+ q_start_index = ind * q_bucket_size - qk_len_diff
137
+
138
+ col_splits = zip(
139
+ k.split(k_bucket_size, dim=-2),
140
+ v.split(k_bucket_size, dim=-2),
141
+ )
142
+
143
+ for k_ind, (kc, vc) in enumerate(col_splits):
144
+ k_start_index = k_ind * k_bucket_size
145
+
146
+ attn_weights = einsum("... i d, ... j d -> ... i j", qc, kc) * scale
147
+
148
+ if exists(row_mask):
149
+ attn_weights.masked_fill_(~row_mask, max_neg_value)
150
+
151
+ if causal and q_start_index < (k_start_index + k_bucket_size - 1):
152
+ causal_mask = torch.ones(
153
+ (qc.shape[-2], kc.shape[-2]), dtype=torch.bool, device=device
154
+ ).triu(q_start_index - k_start_index + 1)
155
+ attn_weights.masked_fill_(causal_mask, max_neg_value)
156
+
157
+ block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)
158
+ attn_weights -= block_row_maxes
159
+ exp_weights = torch.exp(attn_weights)
160
+
161
+ if exists(row_mask):
162
+ exp_weights.masked_fill_(~row_mask, 0.0)
163
+
164
+ block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(
165
+ min=EPSILON
166
+ )
167
+
168
+ new_row_maxes = torch.maximum(block_row_maxes, row_maxes)
169
+
170
+ exp_values = einsum("... i j, ... j d -> ... i d", exp_weights, vc)
171
+
172
+ exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)
173
+ exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)
174
+
175
+ new_row_sums = (
176
+ exp_row_max_diff * row_sums
177
+ + exp_block_row_max_diff * block_row_sums
178
+ )
179
+
180
+ oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_(
181
+ (exp_block_row_max_diff / new_row_sums) * exp_values
182
+ )
183
+
184
+ row_maxes.copy_(new_row_maxes)
185
+ row_sums.copy_(new_row_sums)
186
+
187
+ lse = all_row_sums.log() + all_row_maxes
188
+
189
+ ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)
190
+ ctx.save_for_backward(q, k, v, o, lse)
191
+
192
+ return o
193
+
194
+ @staticmethod
195
+ @torch.no_grad()
196
+ def backward(ctx, do):
197
+ """Algorithm 4 in the paper"""
198
+
199
+ causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args
200
+ q, k, v, o, lse = ctx.saved_tensors
201
+
202
+ device = q.device
203
+
204
+ max_neg_value = -torch.finfo(q.dtype).max
205
+ qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)
206
+
207
+ dq = torch.zeros_like(q)
208
+ dk = torch.zeros_like(k)
209
+ dv = torch.zeros_like(v)
210
+
211
+ row_splits = zip(
212
+ q.split(q_bucket_size, dim=-2),
213
+ o.split(q_bucket_size, dim=-2),
214
+ do.split(q_bucket_size, dim=-2),
215
+ mask,
216
+ lse.split(q_bucket_size, dim=-2),
217
+ dq.split(q_bucket_size, dim=-2),
218
+ )
219
+
220
+ for ind, (qc, oc, doc, row_mask, lsec, dqc) in enumerate(row_splits):
221
+ q_start_index = ind * q_bucket_size - qk_len_diff
222
+
223
+ col_splits = zip(
224
+ k.split(k_bucket_size, dim=-2),
225
+ v.split(k_bucket_size, dim=-2),
226
+ dk.split(k_bucket_size, dim=-2),
227
+ dv.split(k_bucket_size, dim=-2),
228
+ )
229
+
230
+ for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):
231
+ k_start_index = k_ind * k_bucket_size
232
+
233
+ attn_weights = einsum("... i d, ... j d -> ... i j", qc, kc) * scale
234
+
235
+ if causal and q_start_index < (k_start_index + k_bucket_size - 1):
236
+ causal_mask = torch.ones(
237
+ (qc.shape[-2], kc.shape[-2]), dtype=torch.bool, device=device
238
+ ).triu(q_start_index - k_start_index + 1)
239
+ attn_weights.masked_fill_(causal_mask, max_neg_value)
240
+
241
+ p = torch.exp(attn_weights - lsec)
242
+
243
+ if exists(row_mask):
244
+ p.masked_fill_(~row_mask, 0.0)
245
+
246
+ dv_chunk = einsum("... i j, ... i d -> ... j d", p, doc)
247
+ dp = einsum("... i d, ... j d -> ... i j", doc, vc)
248
+
249
+ D = (doc * oc).sum(dim=-1, keepdims=True)
250
+ ds = p * scale * (dp - D)
251
+
252
+ dq_chunk = einsum("... i j, ... j d -> ... i d", ds, kc)
253
+ dk_chunk = einsum("... i j, ... i d -> ... j d", ds, qc)
254
+
255
+ dqc.add_(dq_chunk)
256
+ dkc.add_(dk_chunk)
257
+ dvc.add_(dv_chunk)
258
+
259
+ return dq, dk, dv, None, None, None, None
260
+
261
+ class AttnProcessor(nn.Module):
262
+ def __call__(
263
+ self,
264
+ attn,
265
+ hidden_states,
266
+ encoder_hidden_states=None,
267
+ attention_mask=None,
268
+ temb: Optional[torch.Tensor] = None,
269
+ region_prompt = None,
270
+ ip_adapter_masks = None,
271
+ *args,
272
+ **kwargs,
273
+ ):
274
+ if len(args) > 0 or kwargs.get("scale", None) is not None:
275
+ deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
276
+ deprecate("scale", "1.0.0", deprecation_message)
277
+
278
+ residual = hidden_states
279
+
280
+
281
+ #_,img_sequence_length,_ = hidden_states.shape
282
+ img_sequence_length = hidden_states.shape[1]
283
+
284
+ if attn.spatial_norm is not None:
285
+ hidden_states = attn.spatial_norm(hidden_states, temb)
286
+
287
+ input_ndim = hidden_states.ndim
288
+
289
+ if input_ndim == 4:
290
+ batch_size, channel, height, width = hidden_states.shape
291
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
292
+
293
+
294
+ is_xattn = False
295
+ if encoder_hidden_states is not None and region_prompt is not None:
296
+ is_xattn = True
297
+ region_state = region_prompt["region_state"]
298
+ weight_func = region_prompt["weight_func"]
299
+ sigma = region_prompt["sigma"]
300
+
301
+ batch_size, sequence_length, _ = (
302
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
303
+ )
304
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length,batch_size)
305
+
306
+ if attn.group_norm is not None:
307
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
308
+
309
+ query = attn.to_q(hidden_states)
310
+
311
+ if encoder_hidden_states is None:
312
+ encoder_hidden_states = hidden_states
313
+ elif attn.norm_cross:
314
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
315
+
316
+
317
+ key = attn.to_k(encoder_hidden_states)
318
+ value = attn.to_v(encoder_hidden_states)
319
+
320
+ query = attn.head_to_batch_dim(query)
321
+ key = attn.head_to_batch_dim(key)
322
+ value = attn.head_to_batch_dim(value)
323
+
324
+ if is_xattn and isinstance(region_state, dict):
325
+ # use torch.baddbmm method (slow)
326
+ attention_scores = get_attention_scores(attn, query, key, attention_mask)
327
+ cross_attention_weight = weight_func(region_state[img_sequence_length].to(query.device), sigma, attention_scores)
328
+ attention_scores += torch.repeat_interleave(
329
+ cross_attention_weight, repeats=attention_scores.shape[0] // cross_attention_weight.shape[0], dim=0
330
+ )
331
+
332
+ # calc probs
333
+ attention_probs = attention_scores.softmax(dim=-1)
334
+ attention_probs = attention_probs.to(query.dtype)
335
+ hidden_states = torch.bmm(attention_probs, value)
336
+
337
+ elif xformers_available:
338
+ hidden_states = xformers.ops.memory_efficient_attention(
339
+ query.contiguous(),
340
+ key.contiguous(),
341
+ value.contiguous(),
342
+ attn_bias=attention_mask,
343
+ )
344
+ hidden_states = hidden_states.to(query.dtype)
345
+
346
+ else:
347
+ '''q_bucket_size = 512
348
+ k_bucket_size = 1024
349
+
350
+ # use flash-attention
351
+ hidden_states = FlashAttentionFunction.apply(
352
+ query.contiguous(),
353
+ key.contiguous(),
354
+ value.contiguous(),
355
+ attention_mask,
356
+ False,
357
+ q_bucket_size,
358
+ k_bucket_size,
359
+ )'''
360
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
361
+ hidden_states = torch.bmm(attention_probs, value)
362
+ hidden_states = hidden_states.to(query.dtype)
363
+
364
+ hidden_states = attn.batch_to_head_dim(hidden_states)
365
+
366
+ # linear proj
367
+ hidden_states = attn.to_out[0](hidden_states)
368
+
369
+ # dropout
370
+ hidden_states = attn.to_out[1](hidden_states)
371
+
372
+ if input_ndim == 4:
373
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
374
+
375
+ if attn.residual_connection:
376
+ hidden_states = hidden_states + residual
377
+
378
+ hidden_states = hidden_states / attn.rescale_output_factor
379
+
380
+ return hidden_states
381
+ class IPAdapterAttnProcessor(nn.Module):
382
+ r"""
383
+ Attention processor for Multiple IP-Adapters.
384
+
385
+ Args:
386
+ hidden_size (`int`):
387
+ The hidden size of the attention layer.
388
+ cross_attention_dim (`int`):
389
+ The number of channels in the `encoder_hidden_states`.
390
+ num_tokens (`int`, `Tuple[int]` or `List[int]`, defaults to `(4,)`):
391
+ The context length of the image features.
392
+ scale (`float` or List[`float`], defaults to 1.0):
393
+ the weight scale of image prompt.
394
+ """
395
+
396
+ def __init__(self, hidden_size, cross_attention_dim=None, num_tokens=(4,), scale=1.0):
397
+ super().__init__()
398
+
399
+ self.hidden_size = hidden_size
400
+ self.cross_attention_dim = cross_attention_dim
401
+
402
+ if not isinstance(num_tokens, (tuple, list)):
403
+ num_tokens = [num_tokens]
404
+ self.num_tokens = num_tokens
405
+
406
+ if not isinstance(scale, list):
407
+ scale = [scale] * len(num_tokens)
408
+ if len(scale) != len(num_tokens):
409
+ raise ValueError("`scale` should be a list of integers with the same length as `num_tokens`.")
410
+ self.scale = scale
411
+
412
+ self.to_k_ip = nn.ModuleList(
413
+ [nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))]
414
+ )
415
+ self.to_v_ip = nn.ModuleList(
416
+ [nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))]
417
+ )
418
+
419
+ def __call__(
420
+ self,
421
+ attn,
422
+ hidden_states,
423
+ encoder_hidden_states=None,
424
+ attention_mask=None,
425
+ temb=None,
426
+ scale=1.0,
427
+ region_prompt = None,
428
+ ip_adapter_masks = None,
429
+ ):
430
+
431
+ #_,img_sequence_length,_ = hidden_states.shape
432
+ img_sequence_length= hidden_states.shape[1]
433
+ residual = hidden_states
434
+
435
+ is_xattn = False
436
+ if encoder_hidden_states is not None and region_prompt is not None:
437
+ is_xattn = True
438
+ region_state = region_prompt["region_state"]
439
+ weight_func = region_prompt["weight_func"]
440
+ sigma = region_prompt["sigma"]
441
+
442
+ # separate ip_hidden_states from encoder_hidden_states
443
+ if encoder_hidden_states is not None:
444
+ if isinstance(encoder_hidden_states, tuple):
445
+ encoder_hidden_states, ip_hidden_states = encoder_hidden_states
446
+ else:
447
+ deprecation_message = (
448
+ "You have passed a tensor as `encoder_hidden_states`. This is deprecated and will be removed in a future release."
449
+ " Please make sure to update your script to pass `encoder_hidden_states` as a tuple to suppress this warning."
450
+ )
451
+ deprecate("encoder_hidden_states not a tuple", "1.0.0", deprecation_message, standard_warn=False)
452
+ end_pos = encoder_hidden_states.shape[1] - self.num_tokens[0]
453
+ encoder_hidden_states, ip_hidden_states = (
454
+ encoder_hidden_states[:, :end_pos, :],
455
+ [encoder_hidden_states[:, end_pos:, :]],
456
+ )
457
+
458
+
459
+ if attn.spatial_norm is not None:
460
+ hidden_states = attn.spatial_norm(hidden_states, temb)
461
+
462
+ input_ndim = hidden_states.ndim
463
+
464
+ if input_ndim == 4:
465
+ batch_size, channel, height, width = hidden_states.shape
466
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
467
+
468
+
469
+
470
+ batch_size, sequence_length, _ = (
471
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
472
+ )
473
+
474
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
475
+
476
+ if attn.group_norm is not None:
477
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
478
+
479
+ query = attn.to_q(hidden_states)
480
+
481
+ if encoder_hidden_states is None:
482
+ encoder_hidden_states = hidden_states
483
+ elif attn.norm_cross:
484
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
485
+
486
+ key = attn.to_k(encoder_hidden_states)
487
+ value = attn.to_v(encoder_hidden_states)
488
+
489
+
490
+ query = attn.head_to_batch_dim(query)
491
+ key = attn.head_to_batch_dim(key)
492
+ value = attn.head_to_batch_dim(value)
493
+
494
+ if is_xattn and isinstance(region_state, dict):
495
+ # use torch.baddbmm method (slow)
496
+ attention_scores = get_attention_scores(attn, query, key, attention_mask)
497
+ cross_attention_weight = weight_func(region_state[img_sequence_length].to(query.device), sigma, attention_scores)
498
+ attention_scores += torch.repeat_interleave(
499
+ cross_attention_weight, repeats=attention_scores.shape[0] // cross_attention_weight.shape[0], dim=0
500
+ )
501
+
502
+ # calc probs
503
+ attention_probs = attention_scores.softmax(dim=-1)
504
+ attention_probs = attention_probs.to(query.dtype)
505
+ hidden_states = torch.bmm(attention_probs, value)
506
+
507
+ elif xformers_available:
508
+ hidden_states = xformers.ops.memory_efficient_attention(
509
+ query.contiguous(),
510
+ key.contiguous(),
511
+ value.contiguous(),
512
+ attn_bias=attention_mask,
513
+ )
514
+ hidden_states = hidden_states.to(query.dtype)
515
+
516
+ else:
517
+ '''q_bucket_size = 512
518
+ k_bucket_size = 1024
519
+
520
+ # use flash-attention
521
+ hidden_states = FlashAttentionFunction.apply(
522
+ query.contiguous(),
523
+ key.contiguous(),
524
+ value.contiguous(),
525
+ attention_mask,
526
+ False,
527
+ q_bucket_size,
528
+ k_bucket_size,
529
+ )'''
530
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
531
+ hidden_states = torch.bmm(attention_probs, value)
532
+ hidden_states = hidden_states.to(query.dtype)
533
+
534
+ hidden_states = attn.batch_to_head_dim(hidden_states)
535
+
536
+
537
+ '''# for ip-adapter
538
+ for current_ip_hidden_states, scale, to_k_ip, to_v_ip in zip(
539
+ ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip
540
+ ):
541
+ ip_key = to_k_ip(current_ip_hidden_states)
542
+ ip_value = to_v_ip(current_ip_hidden_states)
543
+
544
+ ip_key = attn.head_to_batch_dim(ip_key)
545
+ ip_value = attn.head_to_batch_dim(ip_value)
546
+
547
+ if xformers_available:
548
+ current_ip_hidden_states = xformers.ops.memory_efficient_attention(
549
+ query.contiguous(),
550
+ ip_key.contiguous(),
551
+ ip_value.contiguous(),
552
+ attn_bias=None,
553
+ )
554
+ current_ip_hidden_states = current_ip_hidden_states.to(query.dtype)
555
+ else:
556
+ ip_attention_probs = attn.get_attention_scores(query, ip_key, None)
557
+ current_ip_hidden_states = torch.bmm(ip_attention_probs, ip_value)
558
+ current_ip_hidden_states = current_ip_hidden_states.to(query.dtype)
559
+
560
+ current_ip_hidden_states = attn.batch_to_head_dim(current_ip_hidden_states)
561
+ hidden_states = hidden_states + scale * current_ip_hidden_states'''
562
+
563
+ #control region apply ip-adapter
564
+ if ip_adapter_masks is not None:
565
+ if not isinstance(ip_adapter_masks, List):
566
+ # for backward compatibility, we accept `ip_adapter_mask` as a tensor of shape [num_ip_adapter, 1, height, width]
567
+ ip_adapter_masks = list(ip_adapter_masks.unsqueeze(1))
568
+ if not (len(ip_adapter_masks) == len(self.scale) == len(ip_hidden_states)):
569
+ raise ValueError(
570
+ f"Length of ip_adapter_masks array ({len(ip_adapter_masks)}) must match "
571
+ f"length of self.scale array ({len(self.scale)}) and number of ip_hidden_states "
572
+ f"({len(ip_hidden_states)})"
573
+ )
574
+ else:
575
+ for index, (mask, scale, ip_state) in enumerate(zip(ip_adapter_masks, self.scale, ip_hidden_states)):
576
+ if not isinstance(mask, torch.Tensor) or mask.ndim != 4:
577
+ raise ValueError(
578
+ "Each element of the ip_adapter_masks array should be a tensor with shape "
579
+ "[1, num_images_for_ip_adapter, height, width]."
580
+ " Please use `IPAdapterMaskProcessor` to preprocess your mask"
581
+ )
582
+ if mask.shape[1] != ip_state.shape[1]:
583
+ raise ValueError(
584
+ f"Number of masks ({mask.shape[1]}) does not match "
585
+ f"number of ip images ({ip_state.shape[1]}) at index {index}"
586
+ )
587
+ if isinstance(scale, list) and not len(scale) == mask.shape[1]:
588
+ raise ValueError(
589
+ f"Number of masks ({mask.shape[1]}) does not match "
590
+ f"number of scales ({len(scale)}) at index {index}"
591
+ )
592
+ else:
593
+ ip_adapter_masks = [None] * len(self.scale)
594
+
595
+ # for ip-adapter
596
+ for current_ip_hidden_states, scale, to_k_ip, to_v_ip, mask in zip(
597
+ ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip, ip_adapter_masks
598
+ ):
599
+ skip = False
600
+ if isinstance(scale, list):
601
+ if all(s == 0 for s in scale):
602
+ skip = True
603
+ elif scale == 0:
604
+ skip = True
605
+ if not skip:
606
+ if mask is not None:
607
+ if not isinstance(scale, list):
608
+ scale = [scale] * mask.shape[1]
609
+
610
+ current_num_images = mask.shape[1]
611
+ for i in range(current_num_images):
612
+ ip_key = to_k_ip(current_ip_hidden_states[:, i, :, :])
613
+ ip_value = to_v_ip(current_ip_hidden_states[:, i, :, :])
614
+
615
+ ip_key = attn.head_to_batch_dim(ip_key)
616
+ ip_value = attn.head_to_batch_dim(ip_value)
617
+
618
+ ip_attention_probs = attn.get_attention_scores(query, ip_key, None)
619
+ _current_ip_hidden_states = torch.bmm(ip_attention_probs, ip_value)
620
+ _current_ip_hidden_states = attn.batch_to_head_dim(_current_ip_hidden_states)
621
+
622
+ mask_downsample = IPAdapterMaskProcessor.downsample(
623
+ mask[:, i, :, :],
624
+ batch_size,
625
+ _current_ip_hidden_states.shape[1],
626
+ _current_ip_hidden_states.shape[2],
627
+ )
628
+
629
+ mask_downsample = mask_downsample.to(dtype=query.dtype, device=query.device)
630
+
631
+ hidden_states = hidden_states + scale[i] * (_current_ip_hidden_states * mask_downsample)
632
+ else:
633
+ ip_key = to_k_ip(current_ip_hidden_states)
634
+ ip_value = to_v_ip(current_ip_hidden_states)
635
+
636
+ ip_key = attn.head_to_batch_dim(ip_key)
637
+ ip_value = attn.head_to_batch_dim(ip_value)
638
+
639
+ ip_attention_probs = attn.get_attention_scores(query, ip_key, None)
640
+ current_ip_hidden_states = torch.bmm(ip_attention_probs, ip_value)
641
+ current_ip_hidden_states = attn.batch_to_head_dim(current_ip_hidden_states)
642
+
643
+ hidden_states = hidden_states + scale * current_ip_hidden_states
644
+
645
+ # linear proj
646
+ hidden_states = attn.to_out[0](hidden_states)
647
+
648
+ # dropout
649
+ hidden_states = attn.to_out[1](hidden_states)
650
+
651
+ if input_ndim == 4:
652
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
653
+
654
+ if attn.residual_connection:
655
+ hidden_states = hidden_states + residual
656
+
657
+ hidden_states = hidden_states / attn.rescale_output_factor
658
+
659
+ return hidden_states
660
+
661
+
662
+
663
+ class AttnProcessor2_0:
664
+ r"""
665
+ Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).
666
+ """
667
+
668
+ def __init__(self):
669
+ if not hasattr(F, "scaled_dot_product_attention"):
670
+ raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
671
+
672
+ def __call__(
673
+ self,
674
+ attn,
675
+ hidden_states: torch.Tensor,
676
+ encoder_hidden_states = None,
677
+ attention_mask: Optional[torch.Tensor] = None,
678
+ temb: Optional[torch.Tensor] = None,
679
+ region_prompt = None,
680
+ ip_adapter_masks = None,
681
+ *args,
682
+ **kwargs,
683
+ ) -> torch.Tensor:
684
+
685
+ if len(args) > 0 or kwargs.get("scale", None) is not None:
686
+
687
+ deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
688
+
689
+ deprecate("scale", "1.0.0", deprecation_message)
690
+
691
+ residual = hidden_states
692
+
693
+ #_,img_sequence_length,_ = hidden_states.shape
694
+ img_sequence_length= hidden_states.shape[1]
695
+ if attn.spatial_norm is not None:
696
+ hidden_states = attn.spatial_norm(hidden_states, temb)
697
+
698
+ input_ndim = hidden_states.ndim
699
+
700
+ if input_ndim == 4:
701
+ batch_size, channel, height, width = hidden_states.shape
702
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
703
+
704
+ is_xattn = False
705
+ if encoder_hidden_states is not None and region_prompt is not None:
706
+ is_xattn = True
707
+ region_state = region_prompt["region_state"]
708
+ weight_func = region_prompt["weight_func"]
709
+ sigma = region_prompt["sigma"]
710
+
711
+ batch_size, sequence_length, _ = (
712
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
713
+ )
714
+
715
+ if attention_mask is not None:
716
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
717
+ # scaled_dot_product_attention expects attention_mask shape to be
718
+ # (batch, heads, source_length, target_length)
719
+ attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
720
+
721
+ if attn.group_norm is not None:
722
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
723
+
724
+ query = attn.to_q(hidden_states)
725
+
726
+ if encoder_hidden_states is None:
727
+ encoder_hidden_states = hidden_states
728
+ elif attn.norm_cross:
729
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
730
+
731
+ key = attn.to_k(encoder_hidden_states)
732
+ value = attn.to_v(encoder_hidden_states)
733
+
734
+ inner_dim = key.shape[-1]
735
+ head_dim = inner_dim // attn.heads
736
+
737
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
738
+
739
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
740
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
741
+
742
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
743
+ # TODO: add support for attn.scale when we move to Torch 2.1
744
+
745
+ if is_xattn and isinstance(region_state, dict):
746
+ #w = attn.head_to_batch_dim(w,out_dim = 4).transpose(1, 2)
747
+ hidden_states = scaled_dot_product_attention_regionstate(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False,weight_func = weight_func,region_state=region_state[img_sequence_length].to(query.device),sigma = sigma)
748
+ else:
749
+ hidden_states = F.scaled_dot_product_attention(
750
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
751
+ )
752
+
753
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
754
+ hidden_states = hidden_states.to(query.dtype)
755
+
756
+ # linear proj
757
+ hidden_states = attn.to_out[0](hidden_states)
758
+ # dropout
759
+ hidden_states = attn.to_out[1](hidden_states)
760
+
761
+ if input_ndim == 4:
762
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
763
+
764
+ if attn.residual_connection:
765
+ hidden_states = hidden_states + residual
766
+
767
+ hidden_states = hidden_states / attn.rescale_output_factor
768
+
769
+ return hidden_states
770
+
771
+
772
+ class IPAdapterAttnProcessor2_0(torch.nn.Module):
773
+ r"""
774
+ Attention processor for IP-Adapter for PyTorch 2.0.
775
+
776
+ Args:
777
+ hidden_size (`int`):
778
+ The hidden size of the attention layer.
779
+ cross_attention_dim (`int`):
780
+ The number of channels in the `encoder_hidden_states`.
781
+ num_tokens (`int`, `Tuple[int]` or `List[int]`, defaults to `(4,)`):
782
+ The context length of the image features.
783
+ scale (`float` or `List[float]`, defaults to 1.0):
784
+ the weight scale of image prompt.
785
+ """
786
+
787
+ def __init__(self, hidden_size, cross_attention_dim=None, num_tokens=(4,), scale=1.0):
788
+ super().__init__()
789
+
790
+ if not hasattr(F, "scaled_dot_product_attention"):
791
+ raise ImportError(
792
+ f"{self.__class__.__name__} requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0."
793
+ )
794
+
795
+ self.hidden_size = hidden_size
796
+ self.cross_attention_dim = cross_attention_dim
797
+
798
+ if not isinstance(num_tokens, (tuple, list)):
799
+ num_tokens = [num_tokens]
800
+ self.num_tokens = num_tokens
801
+
802
+ if not isinstance(scale, list):
803
+ scale = [scale] * len(num_tokens)
804
+ if len(scale) != len(num_tokens):
805
+ raise ValueError("`scale` should be a list of integers with the same length as `num_tokens`.")
806
+ self.scale = scale
807
+
808
+ self.to_k_ip = nn.ModuleList(
809
+ [nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))]
810
+ )
811
+ self.to_v_ip = nn.ModuleList(
812
+ [nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))]
813
+ )
814
+
815
+ def __call__(
816
+ self,
817
+ attn,
818
+ hidden_states,
819
+ encoder_hidden_states=None,
820
+ attention_mask=None,
821
+ temb=None,
822
+ scale=1.0,
823
+ region_prompt = None,
824
+ ip_adapter_masks = None,
825
+ ):
826
+ residual = hidden_states
827
+
828
+ #_,img_sequence_length,_ = hidden_states.shape
829
+ img_sequence_length= hidden_states.shape[1]
830
+
831
+ is_xattn = False
832
+ if encoder_hidden_states is not None and region_prompt is not None:
833
+ is_xattn = True
834
+ region_state = region_prompt["region_state"]
835
+ weight_func = region_prompt["weight_func"]
836
+ sigma = region_prompt["sigma"]
837
+
838
+ # separate ip_hidden_states from encoder_hidden_states
839
+ if encoder_hidden_states is not None:
840
+ if isinstance(encoder_hidden_states, tuple):
841
+ encoder_hidden_states, ip_hidden_states = encoder_hidden_states
842
+ else:
843
+ deprecation_message = (
844
+ "You have passed a tensor as `encoder_hidden_states`. This is deprecated and will be removed in a future release."
845
+ " Please make sure to update your script to pass `encoder_hidden_states` as a tuple to suppress this warning."
846
+ )
847
+ deprecate("encoder_hidden_states not a tuple", "1.0.0", deprecation_message, standard_warn=False)
848
+ end_pos = encoder_hidden_states.shape[1] - self.num_tokens[0]
849
+ encoder_hidden_states, ip_hidden_states = (
850
+ encoder_hidden_states[:, :end_pos, :],
851
+ [encoder_hidden_states[:, end_pos:, :]],
852
+ )
853
+
854
+ if attn.spatial_norm is not None:
855
+ hidden_states = attn.spatial_norm(hidden_states, temb)
856
+
857
+ input_ndim = hidden_states.ndim
858
+
859
+ if input_ndim == 4:
860
+ batch_size, channel, height, width = hidden_states.shape
861
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
862
+
863
+
864
+
865
+
866
+ batch_size, sequence_length, _ = (
867
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
868
+ )
869
+
870
+ if attention_mask is not None:
871
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
872
+ # scaled_dot_product_attention expects attention_mask shape to be
873
+ # (batch, heads, source_length, target_length)
874
+ attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
875
+
876
+ if attn.group_norm is not None:
877
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
878
+
879
+ query = attn.to_q(hidden_states)
880
+
881
+ if encoder_hidden_states is None:
882
+ encoder_hidden_states = hidden_states
883
+ elif attn.norm_cross:
884
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
885
+
886
+
887
+ key = attn.to_k(encoder_hidden_states)
888
+ value = attn.to_v(encoder_hidden_states)
889
+
890
+ inner_dim = key.shape[-1]
891
+ head_dim = inner_dim // attn.heads
892
+
893
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
894
+
895
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
896
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
897
+
898
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
899
+ # TODO: add support for attn.scale when we move to Torch 2.1
900
+
901
+ if is_xattn and isinstance(region_state, dict):
902
+ #w = attn.head_to_batch_dim(w,out_dim = 4).transpose(1, 2)
903
+ hidden_states = scaled_dot_product_attention_regionstate(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False,weight_func = weight_func,region_state=region_state[img_sequence_length].to(query.device),sigma = sigma)
904
+ else:
905
+ hidden_states = F.scaled_dot_product_attention(
906
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
907
+ )
908
+
909
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
910
+ hidden_states = hidden_states.to(query.dtype)
911
+
912
+ ''''# for ip-adapter
913
+ for current_ip_hidden_states, scale, to_k_ip, to_v_ip in zip(
914
+ ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip
915
+ ):
916
+ ip_key = to_k_ip(current_ip_hidden_states)
917
+ ip_value = to_v_ip(current_ip_hidden_states)
918
+
919
+ ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
920
+ ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
921
+
922
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
923
+ # TODO: add support for attn.scale when we move to Torch 2.1
924
+ current_ip_hidden_states = F.scaled_dot_product_attention(
925
+ query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False
926
+ )
927
+
928
+ current_ip_hidden_states = current_ip_hidden_states.transpose(1, 2).reshape(
929
+ batch_size, -1, attn.heads * head_dim
930
+ )
931
+ current_ip_hidden_states = current_ip_hidden_states.to(query.dtype)
932
+
933
+ hidden_states = hidden_states + scale * current_ip_hidden_states'''
934
+
935
+
936
+ if ip_adapter_masks is not None:
937
+ if not isinstance(ip_adapter_masks, List):
938
+ # for backward compatibility, we accept `ip_adapter_mask` as a tensor of shape [num_ip_adapter, 1, height, width]
939
+ ip_adapter_masks = list(ip_adapter_masks.unsqueeze(1))
940
+ if not (len(ip_adapter_masks) == len(self.scale) == len(ip_hidden_states)):
941
+ raise ValueError(
942
+ f"Length of ip_adapter_masks array ({len(ip_adapter_masks)}) must match "
943
+ f"length of self.scale array ({len(self.scale)}) and number of ip_hidden_states "
944
+ f"({len(ip_hidden_states)})"
945
+ )
946
+ else:
947
+ for index, (mask, scale, ip_state) in enumerate(zip(ip_adapter_masks, self.scale, ip_hidden_states)):
948
+ if not isinstance(mask, torch.Tensor) or mask.ndim != 4:
949
+ raise ValueError(
950
+ "Each element of the ip_adapter_masks array should be a tensor with shape "
951
+ "[1, num_images_for_ip_adapter, height, width]."
952
+ " Please use `IPAdapterMaskProcessor` to preprocess your mask"
953
+ )
954
+ if mask.shape[1] != ip_state.shape[1]:
955
+ raise ValueError(
956
+ f"Number of masks ({mask.shape[1]}) does not match "
957
+ f"number of ip images ({ip_state.shape[1]}) at index {index}"
958
+ )
959
+ if isinstance(scale, list) and not len(scale) == mask.shape[1]:
960
+ raise ValueError(
961
+ f"Number of masks ({mask.shape[1]}) does not match "
962
+ f"number of scales ({len(scale)}) at index {index}"
963
+ )
964
+ else:
965
+ ip_adapter_masks = [None] * len(self.scale)
966
+
967
+ # for ip-adapter
968
+ for current_ip_hidden_states, scale, to_k_ip, to_v_ip, mask in zip(
969
+ ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip, ip_adapter_masks
970
+ ):
971
+ skip = False
972
+ if isinstance(scale, list):
973
+ if all(s == 0 for s in scale):
974
+ skip = True
975
+ elif scale == 0:
976
+ skip = True
977
+ if not skip:
978
+ if mask is not None:
979
+ if not isinstance(scale, list):
980
+ scale = [scale] * mask.shape[1]
981
+
982
+ current_num_images = mask.shape[1]
983
+ for i in range(current_num_images):
984
+ ip_key = to_k_ip(current_ip_hidden_states[:, i, :, :])
985
+ ip_value = to_v_ip(current_ip_hidden_states[:, i, :, :])
986
+
987
+ ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
988
+ ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
989
+
990
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
991
+ # TODO: add support for attn.scale when we move to Torch 2.1
992
+ _current_ip_hidden_states = F.scaled_dot_product_attention(
993
+ query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False
994
+ )
995
+
996
+ _current_ip_hidden_states = _current_ip_hidden_states.transpose(1, 2).reshape(
997
+ batch_size, -1, attn.heads * head_dim
998
+ )
999
+ _current_ip_hidden_states = _current_ip_hidden_states.to(query.dtype)
1000
+
1001
+ mask_downsample = IPAdapterMaskProcessor.downsample(
1002
+ mask[:, i, :, :],
1003
+ batch_size,
1004
+ _current_ip_hidden_states.shape[1],
1005
+ _current_ip_hidden_states.shape[2],
1006
+ )
1007
+
1008
+ mask_downsample = mask_downsample.to(dtype=query.dtype, device=query.device)
1009
+ hidden_states = hidden_states + scale[i] * (_current_ip_hidden_states * mask_downsample)
1010
+ else:
1011
+ ip_key = to_k_ip(current_ip_hidden_states)
1012
+ ip_value = to_v_ip(current_ip_hidden_states)
1013
+
1014
+ ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
1015
+ ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
1016
+
1017
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
1018
+ # TODO: add support for attn.scale when we move to Torch 2.1
1019
+ current_ip_hidden_states = F.scaled_dot_product_attention(
1020
+ query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False
1021
+ )
1022
+
1023
+ current_ip_hidden_states = current_ip_hidden_states.transpose(1, 2).reshape(
1024
+ batch_size, -1, attn.heads * head_dim
1025
+ )
1026
+ current_ip_hidden_states = current_ip_hidden_states.to(query.dtype)
1027
+
1028
+ hidden_states = hidden_states + scale * current_ip_hidden_states
1029
+
1030
+ # linear proj
1031
+ hidden_states = attn.to_out[0](hidden_states)
1032
+ # dropout
1033
+ hidden_states = attn.to_out[1](hidden_states)
1034
+
1035
+ if input_ndim == 4:
1036
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
1037
+
1038
+ if attn.residual_connection:
1039
+ hidden_states = hidden_states + residual
1040
+
1041
+ hidden_states = hidden_states / attn.rescale_output_factor
1042
+
1043
+ return hidden_states
1044
+
modules/controlnetxs/controlnetxs.py ADDED
@@ -0,0 +1,1017 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import math
15
+ from dataclasses import dataclass
16
+ from typing import Any, Dict, List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.utils.checkpoint
20
+ from torch import nn
21
+ from torch.nn import functional as F
22
+ from torch.nn.modules.normalization import GroupNorm
23
+
24
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
25
+ from diffusers.models.attention_processor import USE_PEFT_BACKEND, AttentionProcessor
26
+ from diffusers.models.autoencoders import AutoencoderKL
27
+ from diffusers.models.lora import LoRACompatibleConv
28
+ from diffusers.models.modeling_utils import ModelMixin
29
+ from diffusers.models.unet_2d_blocks import (
30
+ CrossAttnDownBlock2D,
31
+ CrossAttnUpBlock2D,
32
+ DownBlock2D,
33
+ Downsample2D,
34
+ ResnetBlock2D,
35
+ Transformer2DModel,
36
+ UpBlock2D,
37
+ Upsample2D,
38
+ )
39
+ from diffusers.models.unet_2d_condition import UNet2DConditionModel
40
+ from diffusers.utils import BaseOutput, logging
41
+ from modules.attention_modify import CrossAttnProcessor,IPAdapterAttnProcessor
42
+
43
+
44
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
45
+
46
+
47
+ @dataclass
48
+ class ControlNetXSOutput(BaseOutput):
49
+ """
50
+ The output of [`ControlNetXSModel`].
51
+
52
+ Args:
53
+ sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
54
+ The output of the `ControlNetXSModel`. Unlike `ControlNetOutput` this is NOT to be added to the base model
55
+ output, but is already the final output.
56
+ """
57
+
58
+ sample: torch.FloatTensor = None
59
+
60
+
61
+ # copied from diffusers.models.controlnet.ControlNetConditioningEmbedding
62
+ class ControlNetConditioningEmbedding(nn.Module):
63
+ """
64
+ Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN
65
+ [11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
66
+ training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
67
+ convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
68
+ (activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
69
+ model) to encode image-space conditions ... into feature maps ..."
70
+ """
71
+
72
+ def __init__(
73
+ self,
74
+ conditioning_embedding_channels: int,
75
+ conditioning_channels: int = 3,
76
+ block_out_channels: Tuple[int, ...] = (16, 32, 96, 256),
77
+ ):
78
+ super().__init__()
79
+
80
+ self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
81
+
82
+ self.blocks = nn.ModuleList([])
83
+
84
+ for i in range(len(block_out_channels) - 1):
85
+ channel_in = block_out_channels[i]
86
+ channel_out = block_out_channels[i + 1]
87
+ self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
88
+ self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
89
+
90
+ self.conv_out = zero_module(
91
+ nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
92
+ )
93
+
94
+ def forward(self, conditioning):
95
+ embedding = self.conv_in(conditioning)
96
+ embedding = F.silu(embedding)
97
+
98
+ for block in self.blocks:
99
+ embedding = block(embedding)
100
+ embedding = F.silu(embedding)
101
+
102
+ embedding = self.conv_out(embedding)
103
+
104
+ return embedding
105
+
106
+
107
+ class ControlNetXSModel(ModelMixin, ConfigMixin):
108
+ r"""
109
+ A ControlNet-XS model
110
+
111
+ This model inherits from [`ModelMixin`] and [`ConfigMixin`]. Check the superclass documentation for it's generic
112
+ methods implemented for all models (such as downloading or saving).
113
+
114
+ Most of parameters for this model are passed into the [`UNet2DConditionModel`] it creates. Check the documentation
115
+ of [`UNet2DConditionModel`] for them.
116
+
117
+ Parameters:
118
+ conditioning_channels (`int`, defaults to 3):
119
+ Number of channels of conditioning input (e.g. an image)
120
+ controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`):
121
+ The channel order of conditional image. Will convert to `rgb` if it's `bgr`.
122
+ conditioning_embedding_out_channels (`tuple[int]`, defaults to `(16, 32, 96, 256)`):
123
+ The tuple of output channel for each block in the `controlnet_cond_embedding` layer.
124
+ time_embedding_input_dim (`int`, defaults to 320):
125
+ Dimension of input into time embedding. Needs to be same as in the base model.
126
+ time_embedding_dim (`int`, defaults to 1280):
127
+ Dimension of output from time embedding. Needs to be same as in the base model.
128
+ learn_embedding (`bool`, defaults to `False`):
129
+ Whether to use time embedding of the control model. If yes, the time embedding is a linear interpolation of
130
+ the time embeddings of the control and base model with interpolation parameter `time_embedding_mix**3`.
131
+ time_embedding_mix (`float`, defaults to 1.0):
132
+ Linear interpolation parameter used if `learn_embedding` is `True`. A value of 1.0 means only the
133
+ control model's time embedding will be used. A value of 0.0 means only the base model's time embedding will be used.
134
+ base_model_channel_sizes (`Dict[str, List[Tuple[int]]]`):
135
+ Channel sizes of each subblock of base model. Use `gather_subblock_sizes` on your base model to compute it.
136
+ """
137
+
138
+ @classmethod
139
+ def init_original(cls, base_model: UNet2DConditionModel, is_sdxl=True):
140
+ """
141
+ Create a ControlNetXS model with the same parameters as in the original paper (https://github.com/vislearn/ControlNet-XS).
142
+
143
+ Parameters:
144
+ base_model (`UNet2DConditionModel`):
145
+ Base UNet model. Needs to be either StableDiffusion or StableDiffusion-XL.
146
+ is_sdxl (`bool`, defaults to `True`):
147
+ Whether passed `base_model` is a StableDiffusion-XL model.
148
+ """
149
+
150
+ def get_dim_attn_heads(base_model: UNet2DConditionModel, size_ratio: float, num_attn_heads: int):
151
+ """
152
+ Currently, diffusers can only set the dimension of attention heads (see https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 for why).
153
+ The original ControlNet-XS model, however, define the number of attention heads.
154
+ That's why compute the dimensions needed to get the correct number of attention heads.
155
+ """
156
+ block_out_channels = [int(size_ratio * c) for c in base_model.config.block_out_channels]
157
+ dim_attn_heads = [math.ceil(c / num_attn_heads) for c in block_out_channels]
158
+ return dim_attn_heads
159
+
160
+ if is_sdxl:
161
+ return ControlNetXSModel.from_unet(
162
+ base_model,
163
+ time_embedding_mix=0.95,
164
+ learn_embedding=True,
165
+ size_ratio=0.1,
166
+ conditioning_embedding_out_channels=(16, 32, 96, 256),
167
+ num_attention_heads=get_dim_attn_heads(base_model, 0.1, 64),
168
+ )
169
+ else:
170
+ return ControlNetXSModel.from_unet(
171
+ base_model,
172
+ time_embedding_mix=1.0,
173
+ learn_embedding=True,
174
+ size_ratio=0.0125,
175
+ conditioning_embedding_out_channels=(16, 32, 96, 256),
176
+ num_attention_heads=get_dim_attn_heads(base_model, 0.0125, 8),
177
+ )
178
+
179
+ @classmethod
180
+ def _gather_subblock_sizes(cls, unet: UNet2DConditionModel, base_or_control: str):
181
+ """To create correctly sized connections between base and control model, we need to know
182
+ the input and output channels of each subblock.
183
+
184
+ Parameters:
185
+ unet (`UNet2DConditionModel`):
186
+ Unet of which the subblock channels sizes are to be gathered.
187
+ base_or_control (`str`):
188
+ Needs to be either "base" or "control". If "base", decoder is also considered.
189
+ """
190
+ if base_or_control not in ["base", "control"]:
191
+ raise ValueError("`base_or_control` needs to be either `base` or `control`")
192
+
193
+ channel_sizes = {"down": [], "mid": [], "up": []}
194
+
195
+ # input convolution
196
+ channel_sizes["down"].append((unet.conv_in.in_channels, unet.conv_in.out_channels))
197
+
198
+ # encoder blocks
199
+ for module in unet.down_blocks:
200
+ if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)):
201
+ for r in module.resnets:
202
+ channel_sizes["down"].append((r.in_channels, r.out_channels))
203
+ if module.downsamplers:
204
+ channel_sizes["down"].append(
205
+ (module.downsamplers[0].channels, module.downsamplers[0].out_channels)
206
+ )
207
+ else:
208
+ raise ValueError(f"Encountered unknown module of type {type(module)} while creating ControlNet-XS.")
209
+
210
+ # middle block
211
+ channel_sizes["mid"].append((unet.mid_block.resnets[0].in_channels, unet.mid_block.resnets[0].out_channels))
212
+
213
+ # decoder blocks
214
+ if base_or_control == "base":
215
+ for module in unet.up_blocks:
216
+ if isinstance(module, (CrossAttnUpBlock2D, UpBlock2D)):
217
+ for r in module.resnets:
218
+ channel_sizes["up"].append((r.in_channels, r.out_channels))
219
+ else:
220
+ raise ValueError(
221
+ f"Encountered unknown module of type {type(module)} while creating ControlNet-XS."
222
+ )
223
+
224
+ return channel_sizes
225
+
226
+ @register_to_config
227
+ def __init__(
228
+ self,
229
+ conditioning_channels: int = 3,
230
+ conditioning_embedding_out_channels: Tuple[int] = (16, 32, 96, 256),
231
+ controlnet_conditioning_channel_order: str = "rgb",
232
+ time_embedding_input_dim: int = 320,
233
+ time_embedding_dim: int = 1280,
234
+ time_embedding_mix: float = 1.0,
235
+ learn_embedding: bool = False,
236
+ base_model_channel_sizes: Dict[str, List[Tuple[int]]] = {
237
+ "down": [
238
+ (4, 320),
239
+ (320, 320),
240
+ (320, 320),
241
+ (320, 320),
242
+ (320, 640),
243
+ (640, 640),
244
+ (640, 640),
245
+ (640, 1280),
246
+ (1280, 1280),
247
+ ],
248
+ "mid": [(1280, 1280)],
249
+ "up": [
250
+ (2560, 1280),
251
+ (2560, 1280),
252
+ (1920, 1280),
253
+ (1920, 640),
254
+ (1280, 640),
255
+ (960, 640),
256
+ (960, 320),
257
+ (640, 320),
258
+ (640, 320),
259
+ ],
260
+ },
261
+ sample_size: Optional[int] = None,
262
+ down_block_types: Tuple[str] = (
263
+ "CrossAttnDownBlock2D",
264
+ "CrossAttnDownBlock2D",
265
+ "CrossAttnDownBlock2D",
266
+ "DownBlock2D",
267
+ ),
268
+ up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
269
+ block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
270
+ norm_num_groups: Optional[int] = 32,
271
+ cross_attention_dim: Union[int, Tuple[int]] = 1280,
272
+ transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
273
+ num_attention_heads: Optional[Union[int, Tuple[int]]] = 8,
274
+ upcast_attention: bool = False,
275
+ ):
276
+ super().__init__()
277
+
278
+ # 1 - Create control unet
279
+ self.control_model = UNet2DConditionModel(
280
+ sample_size=sample_size,
281
+ down_block_types=down_block_types,
282
+ up_block_types=up_block_types,
283
+ block_out_channels=block_out_channels,
284
+ norm_num_groups=norm_num_groups,
285
+ cross_attention_dim=cross_attention_dim,
286
+ transformer_layers_per_block=transformer_layers_per_block,
287
+ attention_head_dim=num_attention_heads,
288
+ use_linear_projection=True,
289
+ upcast_attention=upcast_attention,
290
+ time_embedding_dim=time_embedding_dim,
291
+ )
292
+
293
+ # 2 - Do model surgery on control model
294
+ # 2.1 - Allow to use the same time information as the base model
295
+ adjust_time_dims(self.control_model, time_embedding_input_dim, time_embedding_dim)
296
+
297
+ # 2.2 - Allow for information infusion from base model
298
+
299
+ # We concat the output of each base encoder subblocks to the input of the next control encoder subblock
300
+ # (We ignore the 1st element, as it represents the `conv_in`.)
301
+ extra_input_channels = [input_channels for input_channels, _ in base_model_channel_sizes["down"][1:]]
302
+ it_extra_input_channels = iter(extra_input_channels)
303
+
304
+ for b, block in enumerate(self.control_model.down_blocks):
305
+ for r in range(len(block.resnets)):
306
+ increase_block_input_in_encoder_resnet(
307
+ self.control_model, block_no=b, resnet_idx=r, by=next(it_extra_input_channels)
308
+ )
309
+
310
+ if block.downsamplers:
311
+ increase_block_input_in_encoder_downsampler(
312
+ self.control_model, block_no=b, by=next(it_extra_input_channels)
313
+ )
314
+
315
+ increase_block_input_in_mid_resnet(self.control_model, by=extra_input_channels[-1])
316
+
317
+ # 2.3 - Make group norms work with modified channel sizes
318
+ adjust_group_norms(self.control_model)
319
+
320
+ # 3 - Gather Channel Sizes
321
+ self.ch_inout_ctrl = ControlNetXSModel._gather_subblock_sizes(self.control_model, base_or_control="control")
322
+ self.ch_inout_base = base_model_channel_sizes
323
+
324
+ # 4 - Build connections between base and control model
325
+ self.down_zero_convs_out = nn.ModuleList([])
326
+ self.down_zero_convs_in = nn.ModuleList([])
327
+ self.middle_block_out = nn.ModuleList([])
328
+ self.middle_block_in = nn.ModuleList([])
329
+ self.up_zero_convs_out = nn.ModuleList([])
330
+ self.up_zero_convs_in = nn.ModuleList([])
331
+
332
+ for ch_io_base in self.ch_inout_base["down"]:
333
+ self.down_zero_convs_in.append(self._make_zero_conv(in_channels=ch_io_base[1], out_channels=ch_io_base[1]))
334
+ for i in range(len(self.ch_inout_ctrl["down"])):
335
+ self.down_zero_convs_out.append(
336
+ self._make_zero_conv(self.ch_inout_ctrl["down"][i][1], self.ch_inout_base["down"][i][1])
337
+ )
338
+
339
+ self.middle_block_out = self._make_zero_conv(
340
+ self.ch_inout_ctrl["mid"][-1][1], self.ch_inout_base["mid"][-1][1]
341
+ )
342
+
343
+ self.up_zero_convs_out.append(
344
+ self._make_zero_conv(self.ch_inout_ctrl["down"][-1][1], self.ch_inout_base["mid"][-1][1])
345
+ )
346
+ for i in range(1, len(self.ch_inout_ctrl["down"])):
347
+ self.up_zero_convs_out.append(
348
+ self._make_zero_conv(self.ch_inout_ctrl["down"][-(i + 1)][1], self.ch_inout_base["up"][i - 1][1])
349
+ )
350
+
351
+ # 5 - Create conditioning hint embedding
352
+ self.controlnet_cond_embedding = ControlNetConditioningEmbedding(
353
+ conditioning_embedding_channels=block_out_channels[0],
354
+ block_out_channels=conditioning_embedding_out_channels,
355
+ conditioning_channels=conditioning_channels,
356
+ )
357
+
358
+ # In the mininal implementation setting, we only need the control model up to the mid block
359
+ del self.control_model.up_blocks
360
+ del self.control_model.conv_norm_out
361
+ del self.control_model.conv_out
362
+
363
+ @classmethod
364
+ def from_unet(
365
+ cls,
366
+ unet: UNet2DConditionModel,
367
+ conditioning_channels: int = 3,
368
+ conditioning_embedding_out_channels: Tuple[int] = (16, 32, 96, 256),
369
+ controlnet_conditioning_channel_order: str = "rgb",
370
+ learn_embedding: bool = False,
371
+ time_embedding_mix: float = 1.0,
372
+ block_out_channels: Optional[Tuple[int]] = None,
373
+ size_ratio: Optional[float] = None,
374
+ num_attention_heads: Optional[Union[int, Tuple[int]]] = 8,
375
+ norm_num_groups: Optional[int] = None,
376
+ ):
377
+ r"""
378
+ Instantiate a [`ControlNetXSModel`] from [`UNet2DConditionModel`].
379
+
380
+ Parameters:
381
+ unet (`UNet2DConditionModel`):
382
+ The UNet model we want to control. The dimensions of the ControlNetXSModel will be adapted to it.
383
+ conditioning_channels (`int`, defaults to 3):
384
+ Number of channels of conditioning input (e.g. an image)
385
+ conditioning_embedding_out_channels (`tuple[int]`, defaults to `(16, 32, 96, 256)`):
386
+ The tuple of output channel for each block in the `controlnet_cond_embedding` layer.
387
+ controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`):
388
+ The channel order of conditional image. Will convert to `rgb` if it's `bgr`.
389
+ learn_embedding (`bool`, defaults to `False`):
390
+ Wether to use time embedding of the control model. If yes, the time embedding is a linear interpolation
391
+ of the time embeddings of the control and base model with interpolation parameter
392
+ `time_embedding_mix**3`.
393
+ time_embedding_mix (`float`, defaults to 1.0):
394
+ Linear interpolation parameter used if `learn_embedding` is `True`.
395
+ block_out_channels (`Tuple[int]`, *optional*):
396
+ Down blocks output channels in control model. Either this or `size_ratio` must be given.
397
+ size_ratio (float, *optional*):
398
+ When given, block_out_channels is set to a relative fraction of the base model's block_out_channels.
399
+ Either this or `block_out_channels` must be given.
400
+ num_attention_heads (`Union[int, Tuple[int]]`, *optional*):
401
+ The dimension of the attention heads. The naming seems a bit confusing and it is, see https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 for why.
402
+ norm_num_groups (int, *optional*, defaults to `None`):
403
+ The number of groups to use for the normalization of the control unet. If `None`,
404
+ `int(unet.config.norm_num_groups * size_ratio)` is taken.
405
+ """
406
+
407
+ # Check input
408
+ fixed_size = block_out_channels is not None
409
+ relative_size = size_ratio is not None
410
+ if not (fixed_size ^ relative_size):
411
+ raise ValueError(
412
+ "Pass exactly one of `block_out_channels` (for absolute sizing) or `control_model_ratio` (for relative sizing)."
413
+ )
414
+
415
+ # Create model
416
+ if block_out_channels is None:
417
+ block_out_channels = [int(size_ratio * c) for c in unet.config.block_out_channels]
418
+
419
+ # Check that attention heads and group norms match channel sizes
420
+ # - attention heads
421
+ def attn_heads_match_channel_sizes(attn_heads, channel_sizes):
422
+ if isinstance(attn_heads, (tuple, list)):
423
+ return all(c % a == 0 for a, c in zip(attn_heads, channel_sizes))
424
+ else:
425
+ return all(c % attn_heads == 0 for c in channel_sizes)
426
+
427
+ num_attention_heads = num_attention_heads or unet.config.attention_head_dim
428
+ if not attn_heads_match_channel_sizes(num_attention_heads, block_out_channels):
429
+ raise ValueError(
430
+ f"The dimension of attention heads ({num_attention_heads}) must divide `block_out_channels` ({block_out_channels}). If you didn't set `num_attention_heads` the default settings don't match your model. Set `num_attention_heads` manually."
431
+ )
432
+
433
+ # - group norms
434
+ def group_norms_match_channel_sizes(num_groups, channel_sizes):
435
+ return all(c % num_groups == 0 for c in channel_sizes)
436
+
437
+ if norm_num_groups is None:
438
+ if group_norms_match_channel_sizes(unet.config.norm_num_groups, block_out_channels):
439
+ norm_num_groups = unet.config.norm_num_groups
440
+ else:
441
+ norm_num_groups = min(block_out_channels)
442
+
443
+ if group_norms_match_channel_sizes(norm_num_groups, block_out_channels):
444
+ print(
445
+ f"`norm_num_groups` was set to `min(block_out_channels)` (={norm_num_groups}) so it divides all block_out_channels` ({block_out_channels}). Set it explicitly to remove this information."
446
+ )
447
+ else:
448
+ raise ValueError(
449
+ f"`block_out_channels` ({block_out_channels}) don't match the base models `norm_num_groups` ({unet.config.norm_num_groups}). Setting `norm_num_groups` to `min(block_out_channels)` ({norm_num_groups}) didn't fix this. Pass `norm_num_groups` explicitly so it divides all block_out_channels."
450
+ )
451
+
452
+ def get_time_emb_input_dim(unet: UNet2DConditionModel):
453
+ return unet.time_embedding.linear_1.in_features
454
+
455
+ def get_time_emb_dim(unet: UNet2DConditionModel):
456
+ return unet.time_embedding.linear_2.out_features
457
+
458
+ # Clone params from base unet if
459
+ # (i) it's required to build SD or SDXL, and
460
+ # (ii) it's not used for the time embedding (as time embedding of control model is never used), and
461
+ # (iii) it's not set further below anyway
462
+ to_keep = [
463
+ "cross_attention_dim",
464
+ "down_block_types",
465
+ "sample_size",
466
+ "transformer_layers_per_block",
467
+ "up_block_types",
468
+ "upcast_attention",
469
+ ]
470
+ kwargs = {k: v for k, v in dict(unet.config).items() if k in to_keep}
471
+ kwargs.update(block_out_channels=block_out_channels)
472
+ kwargs.update(num_attention_heads=num_attention_heads)
473
+ kwargs.update(norm_num_groups=norm_num_groups)
474
+
475
+ # Add controlnetxs-specific params
476
+ kwargs.update(
477
+ conditioning_channels=conditioning_channels,
478
+ controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,
479
+ time_embedding_input_dim=get_time_emb_input_dim(unet),
480
+ time_embedding_dim=get_time_emb_dim(unet),
481
+ time_embedding_mix=time_embedding_mix,
482
+ learn_embedding=learn_embedding,
483
+ base_model_channel_sizes=ControlNetXSModel._gather_subblock_sizes(unet, base_or_control="base"),
484
+ conditioning_embedding_out_channels=conditioning_embedding_out_channels,
485
+ )
486
+
487
+ return cls(**kwargs)
488
+
489
+ @property
490
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
491
+ r"""
492
+ Returns:
493
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
494
+ indexed by its weight name.
495
+ """
496
+ return self.control_model.attn_processors
497
+
498
+ def set_attn_processor(
499
+ self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False
500
+ ):
501
+ r"""
502
+ Sets the attention processor to use to compute attention.
503
+
504
+ Parameters:
505
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
506
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
507
+ for **all** `Attention` layers.
508
+
509
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
510
+ processor. This is strongly recommended when setting trainable attention processors.
511
+
512
+ """
513
+ self.control_model.set_attn_processor(processor, _remove_lora)
514
+
515
+ def set_default_attn_processor(self):
516
+ """
517
+ Disables custom attention processors and sets the default attention implementation.
518
+ """
519
+ self.control_model.set_default_attn_processor()
520
+
521
+ def set_attention_slice(self, slice_size):
522
+ r"""
523
+ Enable sliced attention computation.
524
+
525
+ When this option is enabled, the attention module splits the input tensor in slices to compute attention in
526
+ several steps. This is useful for saving some memory in exchange for a small decrease in speed.
527
+
528
+ Args:
529
+ slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
530
+ When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
531
+ `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
532
+ provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
533
+ must be a multiple of `slice_size`.
534
+ """
535
+ self.control_model.set_attention_slice(slice_size)
536
+
537
+ def _set_gradient_checkpointing(self, module, value=False):
538
+ if isinstance(module, (UNet2DConditionModel)):
539
+ if value:
540
+ module.enable_gradient_checkpointing()
541
+ else:
542
+ module.disable_gradient_checkpointing()
543
+
544
+ def forward(
545
+ self,
546
+ base_model: UNet2DConditionModel,
547
+ sample: torch.FloatTensor,
548
+ timestep: Union[torch.Tensor, float, int],
549
+ encoder_hidden_states: Dict,
550
+ controlnet_cond: torch.Tensor,
551
+ conditioning_scale: float = 1.0,
552
+ class_labels: Optional[torch.Tensor] = None,
553
+ timestep_cond: Optional[torch.Tensor] = None,
554
+ attention_mask: Optional[torch.Tensor] = None,
555
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
556
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
557
+ return_dict: bool = True,
558
+ ) -> Union[ControlNetXSOutput, Tuple]:
559
+ """
560
+ The [`ControlNetModel`] forward method.
561
+
562
+ Args:
563
+ base_model (`UNet2DConditionModel`):
564
+ The base unet model we want to control.
565
+ sample (`torch.FloatTensor`):
566
+ The noisy input tensor.
567
+ timestep (`Union[torch.Tensor, float, int]`):
568
+ The number of timesteps to denoise an input.
569
+ encoder_hidden_states (`torch.Tensor`):
570
+ The encoder hidden states.
571
+ controlnet_cond (`torch.FloatTensor`):
572
+ The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
573
+ conditioning_scale (`float`, defaults to `1.0`):
574
+ How much the control model affects the base model outputs.
575
+ class_labels (`torch.Tensor`, *optional*, defaults to `None`):
576
+ Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
577
+ timestep_cond (`torch.Tensor`, *optional*, defaults to `None`):
578
+ Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the
579
+ timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep
580
+ embeddings.
581
+ attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
582
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
583
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
584
+ negative values to the attention scores corresponding to "discard" tokens.
585
+ added_cond_kwargs (`dict`):
586
+ Additional conditions for the Stable Diffusion XL UNet.
587
+ cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`):
588
+ A kwargs dictionary that if specified is passed along to the `AttnProcessor`.
589
+ return_dict (`bool`, defaults to `True`):
590
+ Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple.
591
+
592
+ Returns:
593
+ [`~models.controlnetxs.ControlNetXSOutput`] **or** `tuple`:
594
+ If `return_dict` is `True`, a [`~models.controlnetxs.ControlNetXSOutput`] is returned, otherwise a
595
+ tuple is returned where the first element is the sample tensor.
596
+ """
597
+ # check channel order
598
+ channel_order = self.config.controlnet_conditioning_channel_order
599
+
600
+ if channel_order == "rgb":
601
+ # in rgb order by default
602
+ ...
603
+ elif channel_order == "bgr":
604
+ controlnet_cond = torch.flip(controlnet_cond, dims=[1])
605
+ else:
606
+ raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}")
607
+
608
+ # scale control strength
609
+ n_connections = len(self.down_zero_convs_out) + 1 + len(self.up_zero_convs_out)
610
+ scale_list = torch.full((n_connections,), conditioning_scale)
611
+
612
+ # prepare attention_mask
613
+ if attention_mask is not None:
614
+ attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
615
+ attention_mask = attention_mask.unsqueeze(1)
616
+
617
+ # 1. time
618
+ timesteps = timestep
619
+ if not torch.is_tensor(timesteps):
620
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
621
+ # This would be a good case for the `match` statement (Python 3.10+)
622
+ is_mps = sample.device.type == "mps"
623
+ if isinstance(timestep, float):
624
+ dtype = torch.float32 if is_mps else torch.float64
625
+ else:
626
+ dtype = torch.int32 if is_mps else torch.int64
627
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
628
+ elif len(timesteps.shape) == 0:
629
+ timesteps = timesteps[None].to(sample.device)
630
+
631
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
632
+ timesteps = timesteps.expand(sample.shape[0])
633
+
634
+ t_emb = base_model.time_proj(timesteps)
635
+
636
+ # timesteps does not contain any weights and will always return f32 tensors
637
+ # but time_embedding might actually be running in fp16. so we need to cast here.
638
+ # there might be better ways to encapsulate this.
639
+ t_emb = t_emb.to(dtype=sample.dtype)
640
+
641
+ if self.config.learn_embedding:
642
+ ctrl_temb = self.control_model.time_embedding(t_emb, timestep_cond)
643
+ base_temb = base_model.time_embedding(t_emb, timestep_cond)
644
+ interpolation_param = self.config.time_embedding_mix**0.3
645
+
646
+ temb = ctrl_temb * interpolation_param + base_temb * (1 - interpolation_param)
647
+ else:
648
+ temb = base_model.time_embedding(t_emb)
649
+
650
+ # added time & text embeddings
651
+ aug_emb = None
652
+
653
+ if base_model.class_embedding is not None:
654
+ if class_labels is None:
655
+ raise ValueError("class_labels should be provided when num_class_embeds > 0")
656
+
657
+ if base_model.config.class_embed_type == "timestep":
658
+ class_labels = base_model.time_proj(class_labels)
659
+
660
+ class_emb = base_model.class_embedding(class_labels).to(dtype=self.dtype)
661
+ temb = temb + class_emb
662
+
663
+ if base_model.config.addition_embed_type is not None:
664
+ if base_model.config.addition_embed_type == "text":
665
+ aug_emb = base_model.add_embedding(encoder_hidden_states["states"])
666
+ elif base_model.config.addition_embed_type == "text_image":
667
+ raise NotImplementedError()
668
+ elif base_model.config.addition_embed_type == "text_time":
669
+ # SDXL - style
670
+ if "text_embeds" not in added_cond_kwargs:
671
+ raise ValueError(
672
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
673
+ )
674
+ text_embeds = added_cond_kwargs.get("text_embeds")
675
+ if "time_ids" not in added_cond_kwargs:
676
+ raise ValueError(
677
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
678
+ )
679
+ time_ids = added_cond_kwargs.get("time_ids")
680
+ time_embeds = base_model.add_time_proj(time_ids.flatten())
681
+ time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
682
+ add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
683
+ add_embeds = add_embeds.to(temb.dtype)
684
+ aug_emb = base_model.add_embedding(add_embeds)
685
+ elif base_model.config.addition_embed_type == "image":
686
+ raise NotImplementedError()
687
+ elif base_model.config.addition_embed_type == "image_hint":
688
+ raise NotImplementedError()
689
+
690
+ temb = temb + aug_emb if aug_emb is not None else temb
691
+
692
+ # text embeddings
693
+ cemb = encoder_hidden_states["states"]
694
+
695
+ # Preparation
696
+ guided_hint = self.controlnet_cond_embedding(controlnet_cond)
697
+
698
+ h_ctrl = h_base = sample
699
+ hs_base, hs_ctrl = [], []
700
+ it_down_convs_in, it_down_convs_out, it_dec_convs_in, it_up_convs_out = map(
701
+ iter, (self.down_zero_convs_in, self.down_zero_convs_out, self.up_zero_convs_in, self.up_zero_convs_out)
702
+ )
703
+ scales = iter(scale_list)
704
+
705
+ base_down_subblocks = to_sub_blocks(base_model.down_blocks)
706
+ ctrl_down_subblocks = to_sub_blocks(self.control_model.down_blocks)
707
+ base_mid_subblocks = to_sub_blocks([base_model.mid_block])
708
+ ctrl_mid_subblocks = to_sub_blocks([self.control_model.mid_block])
709
+ base_up_subblocks = to_sub_blocks(base_model.up_blocks)
710
+
711
+ # Cross Control
712
+ # 0 - conv in
713
+ h_base = base_model.conv_in(h_base)
714
+ h_ctrl = self.control_model.conv_in(h_ctrl)
715
+ if guided_hint is not None:
716
+ h_ctrl += guided_hint
717
+ h_base = h_base + next(it_down_convs_out)(h_ctrl) * next(scales) # D - add ctrl -> base
718
+
719
+ hs_base.append(h_base)
720
+ hs_ctrl.append(h_ctrl)
721
+
722
+ # 1 - down
723
+ for m_base, m_ctrl in zip(base_down_subblocks, ctrl_down_subblocks):
724
+ h_ctrl = torch.cat([h_ctrl, next(it_down_convs_in)(h_base)], dim=1) # A - concat base -> ctrl
725
+ h_base = m_base(h_base, temb, cemb, attention_mask, cross_attention_kwargs) # B - apply base subblock
726
+ h_ctrl = m_ctrl(h_ctrl, temb, cemb, attention_mask, cross_attention_kwargs) # C - apply ctrl subblock
727
+ h_base = h_base + next(it_down_convs_out)(h_ctrl) * next(scales) # D - add ctrl -> base
728
+ hs_base.append(h_base)
729
+ hs_ctrl.append(h_ctrl)
730
+
731
+ # 2 - mid
732
+ h_ctrl = torch.cat([h_ctrl, next(it_down_convs_in)(h_base)], dim=1) # A - concat base -> ctrl
733
+ for m_base, m_ctrl in zip(base_mid_subblocks, ctrl_mid_subblocks):
734
+ h_base = m_base(h_base, temb, cemb, attention_mask, cross_attention_kwargs) # B - apply base subblock
735
+ h_ctrl = m_ctrl(h_ctrl, temb, cemb, attention_mask, cross_attention_kwargs) # C - apply ctrl subblock
736
+ h_base = h_base + self.middle_block_out(h_ctrl) * next(scales) # D - add ctrl -> base
737
+
738
+ # 3 - up
739
+ for i, m_base in enumerate(base_up_subblocks):
740
+ h_base = h_base + next(it_up_convs_out)(hs_ctrl.pop()) * next(scales) # add info from ctrl encoder
741
+ h_base = torch.cat([h_base, hs_base.pop()], dim=1) # concat info from base encoder+ctrl encoder
742
+ h_base = m_base(h_base, temb, cemb, attention_mask, cross_attention_kwargs)
743
+
744
+ h_base = base_model.conv_norm_out(h_base)
745
+ h_base = base_model.conv_act(h_base)
746
+ h_base = base_model.conv_out(h_base)
747
+
748
+ if not return_dict:
749
+ return h_base
750
+
751
+ return ControlNetXSOutput(sample=h_base)
752
+
753
+ def _make_zero_conv(self, in_channels, out_channels=None):
754
+ # keep running track of channels sizes
755
+ self.in_channels = in_channels
756
+ self.out_channels = out_channels or in_channels
757
+
758
+ return zero_module(nn.Conv2d(in_channels, out_channels, 1, padding=0))
759
+
760
+ @torch.no_grad()
761
+ def _check_if_vae_compatible(self, vae: AutoencoderKL):
762
+ condition_downscale_factor = 2 ** (len(self.config.conditioning_embedding_out_channels) - 1)
763
+ vae_downscale_factor = 2 ** (len(vae.config.block_out_channels) - 1)
764
+ compatible = condition_downscale_factor == vae_downscale_factor
765
+ return compatible, condition_downscale_factor, vae_downscale_factor
766
+
767
+
768
+ class SubBlock(nn.ModuleList):
769
+ """A SubBlock is the largest piece of either base or control model, that is executed independently of the other model respectively.
770
+ Before each subblock, information is concatted from base to control. And after each subblock, information is added from control to base.
771
+ """
772
+
773
+ def __init__(self, ms, *args, **kwargs):
774
+ if not is_iterable(ms):
775
+ ms = [ms]
776
+ super().__init__(ms, *args, **kwargs)
777
+
778
+ def forward(
779
+ self,
780
+ x: torch.Tensor,
781
+ temb: torch.Tensor,
782
+ cemb: torch.Tensor,
783
+ attention_mask: Optional[torch.Tensor] = None,
784
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
785
+ ):
786
+ """Iterate through children and pass correct information to each."""
787
+ for m in self:
788
+ if isinstance(m, ResnetBlock2D):
789
+ x = m(x, temb)
790
+ elif isinstance(m, Transformer2DModel):
791
+ x = m(x, cemb, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs).sample
792
+ elif isinstance(m, Downsample2D):
793
+ x = m(x)
794
+ elif isinstance(m, Upsample2D):
795
+ x = m(x)
796
+ else:
797
+ raise ValueError(
798
+ f"Type of m is {type(m)} but should be `ResnetBlock2D`, `Transformer2DModel`, `Downsample2D` or `Upsample2D`"
799
+ )
800
+
801
+ return x
802
+
803
+
804
+ def adjust_time_dims(unet: UNet2DConditionModel, in_dim: int, out_dim: int):
805
+ unet.time_embedding.linear_1 = nn.Linear(in_dim, out_dim)
806
+
807
+
808
+ def increase_block_input_in_encoder_resnet(unet: UNet2DConditionModel, block_no, resnet_idx, by):
809
+ """Increase channels sizes to allow for additional concatted information from base model"""
810
+ r = unet.down_blocks[block_no].resnets[resnet_idx]
811
+ old_norm1, old_conv1 = r.norm1, r.conv1
812
+ # norm
813
+ norm_args = "num_groups num_channels eps affine".split(" ")
814
+ for a in norm_args:
815
+ assert hasattr(old_norm1, a)
816
+ norm_kwargs = {a: getattr(old_norm1, a) for a in norm_args}
817
+ norm_kwargs["num_channels"] += by # surgery done here
818
+ # conv1
819
+ conv1_args = [
820
+ "in_channels",
821
+ "out_channels",
822
+ "kernel_size",
823
+ "stride",
824
+ "padding",
825
+ "dilation",
826
+ "groups",
827
+ "bias",
828
+ "padding_mode",
829
+ ]
830
+ if not USE_PEFT_BACKEND:
831
+ conv1_args.append("lora_layer")
832
+
833
+ for a in conv1_args:
834
+ assert hasattr(old_conv1, a)
835
+
836
+ conv1_kwargs = {a: getattr(old_conv1, a) for a in conv1_args}
837
+ conv1_kwargs["bias"] = "bias" in conv1_kwargs # as param, bias is a boolean, but as attr, it's a tensor.
838
+ conv1_kwargs["in_channels"] += by # surgery done here
839
+ # conv_shortcut
840
+ # as we changed the input size of the block, the input and output sizes are likely different,
841
+ # therefore we need a conv_shortcut (simply adding won't work)
842
+ conv_shortcut_args_kwargs = {
843
+ "in_channels": conv1_kwargs["in_channels"],
844
+ "out_channels": conv1_kwargs["out_channels"],
845
+ # default arguments from resnet.__init__
846
+ "kernel_size": 1,
847
+ "stride": 1,
848
+ "padding": 0,
849
+ "bias": True,
850
+ }
851
+ # swap old with new modules
852
+ unet.down_blocks[block_no].resnets[resnet_idx].norm1 = GroupNorm(**norm_kwargs)
853
+ unet.down_blocks[block_no].resnets[resnet_idx].conv1 = (
854
+ nn.Conv2d(**conv1_kwargs) if USE_PEFT_BACKEND else LoRACompatibleConv(**conv1_kwargs)
855
+ )
856
+ unet.down_blocks[block_no].resnets[resnet_idx].conv_shortcut = (
857
+ nn.Conv2d(**conv_shortcut_args_kwargs) if USE_PEFT_BACKEND else LoRACompatibleConv(**conv_shortcut_args_kwargs)
858
+ )
859
+ unet.down_blocks[block_no].resnets[resnet_idx].in_channels += by # surgery done here
860
+
861
+
862
+ def increase_block_input_in_encoder_downsampler(unet: UNet2DConditionModel, block_no, by):
863
+ """Increase channels sizes to allow for additional concatted information from base model"""
864
+ old_down = unet.down_blocks[block_no].downsamplers[0].conv
865
+
866
+ args = [
867
+ "in_channels",
868
+ "out_channels",
869
+ "kernel_size",
870
+ "stride",
871
+ "padding",
872
+ "dilation",
873
+ "groups",
874
+ "bias",
875
+ "padding_mode",
876
+ ]
877
+ if not USE_PEFT_BACKEND:
878
+ args.append("lora_layer")
879
+
880
+ for a in args:
881
+ assert hasattr(old_down, a)
882
+ kwargs = {a: getattr(old_down, a) for a in args}
883
+ kwargs["bias"] = "bias" in kwargs # as param, bias is a boolean, but as attr, it's a tensor.
884
+ kwargs["in_channels"] += by # surgery done here
885
+ # swap old with new modules
886
+ unet.down_blocks[block_no].downsamplers[0].conv = (
887
+ nn.Conv2d(**kwargs) if USE_PEFT_BACKEND else LoRACompatibleConv(**kwargs)
888
+ )
889
+ unet.down_blocks[block_no].downsamplers[0].channels += by # surgery done here
890
+
891
+
892
+ def increase_block_input_in_mid_resnet(unet: UNet2DConditionModel, by):
893
+ """Increase channels sizes to allow for additional concatted information from base model"""
894
+ m = unet.mid_block.resnets[0]
895
+ old_norm1, old_conv1 = m.norm1, m.conv1
896
+ # norm
897
+ norm_args = "num_groups num_channels eps affine".split(" ")
898
+ for a in norm_args:
899
+ assert hasattr(old_norm1, a)
900
+ norm_kwargs = {a: getattr(old_norm1, a) for a in norm_args}
901
+ norm_kwargs["num_channels"] += by # surgery done here
902
+ conv1_args = [
903
+ "in_channels",
904
+ "out_channels",
905
+ "kernel_size",
906
+ "stride",
907
+ "padding",
908
+ "dilation",
909
+ "groups",
910
+ "bias",
911
+ "padding_mode",
912
+ ]
913
+ if not USE_PEFT_BACKEND:
914
+ conv1_args.append("lora_layer")
915
+
916
+ conv1_kwargs = {a: getattr(old_conv1, a) for a in conv1_args}
917
+ conv1_kwargs["bias"] = "bias" in conv1_kwargs # as param, bias is a boolean, but as attr, it's a tensor.
918
+ conv1_kwargs["in_channels"] += by # surgery done here
919
+ # conv_shortcut
920
+ # as we changed the input size of the block, the input and output sizes are likely different,
921
+ # therefore we need a conv_shortcut (simply adding won't work)
922
+ conv_shortcut_args_kwargs = {
923
+ "in_channels": conv1_kwargs["in_channels"],
924
+ "out_channels": conv1_kwargs["out_channels"],
925
+ # default arguments from resnet.__init__
926
+ "kernel_size": 1,
927
+ "stride": 1,
928
+ "padding": 0,
929
+ "bias": True,
930
+ }
931
+ # swap old with new modules
932
+ unet.mid_block.resnets[0].norm1 = GroupNorm(**norm_kwargs)
933
+ unet.mid_block.resnets[0].conv1 = (
934
+ nn.Conv2d(**conv1_kwargs) if USE_PEFT_BACKEND else LoRACompatibleConv(**conv1_kwargs)
935
+ )
936
+ unet.mid_block.resnets[0].conv_shortcut = (
937
+ nn.Conv2d(**conv_shortcut_args_kwargs) if USE_PEFT_BACKEND else LoRACompatibleConv(**conv_shortcut_args_kwargs)
938
+ )
939
+ unet.mid_block.resnets[0].in_channels += by # surgery done here
940
+
941
+
942
+ def adjust_group_norms(unet: UNet2DConditionModel, max_num_group: int = 32):
943
+ def find_denominator(number, start):
944
+ if start >= number:
945
+ return number
946
+ while start != 0:
947
+ residual = number % start
948
+ if residual == 0:
949
+ return start
950
+ start -= 1
951
+
952
+ for block in [*unet.down_blocks, unet.mid_block]:
953
+ # resnets
954
+ for r in block.resnets:
955
+ if r.norm1.num_groups < max_num_group:
956
+ r.norm1.num_groups = find_denominator(r.norm1.num_channels, start=max_num_group)
957
+
958
+ if r.norm2.num_groups < max_num_group:
959
+ r.norm2.num_groups = find_denominator(r.norm2.num_channels, start=max_num_group)
960
+
961
+ # transformers
962
+ if hasattr(block, "attentions"):
963
+ for a in block.attentions:
964
+ if a.norm.num_groups < max_num_group:
965
+ a.norm.num_groups = find_denominator(a.norm.num_channels, start=max_num_group)
966
+
967
+
968
+ def is_iterable(o):
969
+ if isinstance(o, str):
970
+ return False
971
+ try:
972
+ iter(o)
973
+ return True
974
+ except TypeError:
975
+ return False
976
+
977
+
978
+ def to_sub_blocks(blocks):
979
+ if not is_iterable(blocks):
980
+ blocks = [blocks]
981
+
982
+ sub_blocks = []
983
+
984
+ for b in blocks:
985
+ if hasattr(b, "resnets"):
986
+ if hasattr(b, "attentions") and b.attentions is not None:
987
+ for r, a in zip(b.resnets, b.attentions):
988
+ sub_blocks.append([r, a])
989
+
990
+ num_resnets = len(b.resnets)
991
+ num_attns = len(b.attentions)
992
+
993
+ if num_resnets > num_attns:
994
+ # we can have more resnets than attentions, so add each resnet as separate subblock
995
+ for i in range(num_attns, num_resnets):
996
+ sub_blocks.append([b.resnets[i]])
997
+ else:
998
+ for r in b.resnets:
999
+ sub_blocks.append([r])
1000
+
1001
+ # upsamplers are part of the same subblock
1002
+ if hasattr(b, "upsamplers") and b.upsamplers is not None:
1003
+ for u in b.upsamplers:
1004
+ sub_blocks[-1].extend([u])
1005
+
1006
+ # downsamplers are own subblock
1007
+ if hasattr(b, "downsamplers") and b.downsamplers is not None:
1008
+ for d in b.downsamplers:
1009
+ sub_blocks.append([d])
1010
+
1011
+ return list(map(SubBlock, sub_blocks))
1012
+
1013
+
1014
+ def zero_module(module):
1015
+ for p in module.parameters():
1016
+ nn.init.zeros_(p)
1017
+ return module
modules/controlnetxs/pipeline_controlnet_xs.py ADDED
@@ -0,0 +1,1022 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Union
17
+
18
+ import numpy as np
19
+ import PIL.Image
20
+ import torch
21
+ import torch.nn.functional as F
22
+ from controlnetxs import ControlNetXSModel
23
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
24
+
25
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
26
+ from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
27
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
28
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
29
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
30
+ from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
31
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
32
+ from diffusers.schedulers import KarrasDiffusionSchedulers
33
+ from diffusers.utils import (
34
+ USE_PEFT_BACKEND,
35
+ deprecate,
36
+ logging,
37
+ scale_lora_layers,
38
+ unscale_lora_layers,
39
+ )
40
+ from diffusers.utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor
41
+ from modules.prompt_parser import FrozenCLIPEmbedderWithCustomWords
42
+
43
+
44
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
45
+
46
+
47
+
48
+ #Support for find the region of object
49
+ def encode_sketchs(state,tokenizer,unet, scale_ratio=8, g_strength=1.0, text_ids=None):
50
+ uncond, cond = text_ids[0], text_ids[1]
51
+
52
+ img_state = []
53
+ if state is None:
54
+ return torch.FloatTensor(0)
55
+
56
+ for k, v in state.items():
57
+ if v["map"] is None:
58
+ continue
59
+
60
+ v_input = tokenizer(
61
+ k,
62
+ max_length=tokenizer.model_max_length,
63
+ truncation=True,
64
+ add_special_tokens=False,
65
+ ).input_ids
66
+
67
+ dotmap = v["map"] < 255
68
+ out = dotmap.astype(float)
69
+ if v["mask_outsides"]:
70
+ out[out==0] = -1
71
+
72
+ arr = torch.from_numpy(
73
+ out * float(v["weight"]) * g_strength
74
+ )
75
+ img_state.append((v_input, arr))
76
+
77
+ if len(img_state) == 0:
78
+ return torch.FloatTensor(0)
79
+
80
+ w_tensors = dict()
81
+ cond = cond.tolist()
82
+ uncond = uncond.tolist()
83
+ for layer in unet.down_blocks:
84
+ c = int(len(cond))
85
+ w, h = img_state[0][1].shape
86
+ w_r, h_r = w // scale_ratio, h // scale_ratio
87
+
88
+ ret_cond_tensor = torch.zeros((1, int(w_r * h_r), c), dtype=torch.float32)
89
+ ret_uncond_tensor = torch.zeros((1, int(w_r * h_r), c), dtype=torch.float32)
90
+
91
+ for v_as_tokens, img_where_color in img_state:
92
+ is_in = 0
93
+
94
+ ret = (
95
+ F.interpolate(
96
+ img_where_color.unsqueeze(0).unsqueeze(1),
97
+ scale_factor=1 / scale_ratio,
98
+ mode="bilinear",
99
+ align_corners=True,
100
+ )
101
+ .squeeze()
102
+ .reshape(-1, 1)
103
+ .repeat(1, len(v_as_tokens))
104
+ )
105
+
106
+ for idx, tok in enumerate(cond):
107
+ if cond[idx : idx + len(v_as_tokens)] == v_as_tokens:
108
+ is_in = 1
109
+ ret_cond_tensor[0, :, idx : idx + len(v_as_tokens)] += ret
110
+
111
+ for idx, tok in enumerate(uncond):
112
+ if uncond[idx : idx + len(v_as_tokens)] == v_as_tokens:
113
+ is_in = 1
114
+ ret_uncond_tensor[0, :, idx : idx + len(v_as_tokens)] += ret
115
+
116
+ if not is_in == 1:
117
+ print(f"tokens {v_as_tokens} not found in text")
118
+
119
+ w_tensors[w_r * h_r] = torch.cat([ret_uncond_tensor, ret_cond_tensor])
120
+ scale_ratio *= 2
121
+
122
+ return w_tensors
123
+
124
+
125
+ class StableDiffusionControlNetXSPipeline(
126
+ DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
127
+ ):
128
+ r"""
129
+ Pipeline for text-to-image generation using Stable Diffusion with ControlNet-XS guidance.
130
+
131
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
132
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
133
+
134
+ The pipeline also inherits the following loading methods:
135
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
136
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
137
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
138
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
139
+
140
+ Args:
141
+ vae ([`AutoencoderKL`]):
142
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
143
+ text_encoder ([`~transformers.CLIPTextModel`]):
144
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
145
+ tokenizer ([`~transformers.CLIPTokenizer`]):
146
+ A `CLIPTokenizer` to tokenize text.
147
+ unet ([`UNet2DConditionModel`]):
148
+ A `UNet2DConditionModel` to denoise the encoded image latents.
149
+ controlnet ([`ControlNetXSModel`]):
150
+ Provides additional conditioning to the `unet` during the denoising process.
151
+ scheduler ([`SchedulerMixin`]):
152
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
153
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
154
+ safety_checker ([`StableDiffusionSafetyChecker`]):
155
+ Classification module that estimates whether generated images could be considered offensive or harmful.
156
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
157
+ about a model's potential harms.
158
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
159
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
160
+ """
161
+
162
+ model_cpu_offload_seq = "text_encoder->unet->vae>controlnet"
163
+ _optional_components = ["safety_checker", "feature_extractor"]
164
+ _exclude_from_cpu_offload = ["safety_checker"]
165
+
166
+ def __init__(
167
+ self,
168
+ vae: AutoencoderKL,
169
+ text_encoder: CLIPTextModel,
170
+ tokenizer: CLIPTokenizer,
171
+ unet: UNet2DConditionModel,
172
+ controlnet: ControlNetXSModel,
173
+ scheduler: KarrasDiffusionSchedulers,
174
+ safety_checker: StableDiffusionSafetyChecker,
175
+ feature_extractor: CLIPImageProcessor,
176
+ requires_safety_checker: bool = True,
177
+ ):
178
+ super().__init__()
179
+
180
+ if safety_checker is None and requires_safety_checker:
181
+ logger.warning(
182
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
183
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
184
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
185
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
186
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
187
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
188
+ )
189
+
190
+ if safety_checker is not None and feature_extractor is None:
191
+ raise ValueError(
192
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
193
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
194
+ )
195
+
196
+ vae_compatible, cnxs_condition_downsample_factor, vae_downsample_factor = controlnet._check_if_vae_compatible(
197
+ vae
198
+ )
199
+ if not vae_compatible:
200
+ raise ValueError(
201
+ f"The downsampling factors of the VAE ({vae_downsample_factor}) and the conditioning part of ControlNetXS model {cnxs_condition_downsample_factor} need to be equal. Consider building the ControlNetXS model with different `conditioning_block_sizes`."
202
+ )
203
+
204
+ self.register_modules(
205
+ vae=vae,
206
+ text_encoder=text_encoder,
207
+ tokenizer=tokenizer,
208
+ unet=unet,
209
+ controlnet=controlnet,
210
+ scheduler=scheduler,
211
+ safety_checker=safety_checker,
212
+ feature_extractor=feature_extractor,
213
+ )
214
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
215
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
216
+ self.control_image_processor = VaeImageProcessor(
217
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
218
+ )
219
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
220
+
221
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
222
+ def enable_vae_slicing(self):
223
+ r"""
224
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
225
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
226
+ """
227
+ self.vae.enable_slicing()
228
+
229
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
230
+ def disable_vae_slicing(self):
231
+ r"""
232
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
233
+ computing decoding in one step.
234
+ """
235
+ self.vae.disable_slicing()
236
+
237
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
238
+ def enable_vae_tiling(self):
239
+ r"""
240
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
241
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
242
+ processing larger images.
243
+ """
244
+ self.vae.enable_tiling()
245
+
246
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
247
+ def disable_vae_tiling(self):
248
+ r"""
249
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
250
+ computing decoding in one step.
251
+ """
252
+ self.vae.disable_tiling()
253
+
254
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
255
+ def _encode_prompt(
256
+ self,
257
+ prompt,
258
+ device,
259
+ num_images_per_prompt,
260
+ do_classifier_free_guidance,
261
+ negative_prompt=None,
262
+ prompt_embeds: Optional[torch.FloatTensor] = None,
263
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
264
+ lora_scale: Optional[float] = None,
265
+ **kwargs,
266
+ ):
267
+ deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
268
+ deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
269
+
270
+ prompt_embeds_tuple = self.encode_prompt(
271
+ prompt=prompt,
272
+ device=device,
273
+ num_images_per_prompt=num_images_per_prompt,
274
+ do_classifier_free_guidance=do_classifier_free_guidance,
275
+ negative_prompt=negative_prompt,
276
+ prompt_embeds=prompt_embeds,
277
+ negative_prompt_embeds=negative_prompt_embeds,
278
+ lora_scale=lora_scale,
279
+ **kwargs,
280
+ )
281
+
282
+ # concatenate for backwards comp
283
+ prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
284
+
285
+ return prompt_embeds
286
+
287
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
288
+ def encode_prompt(
289
+ self,
290
+ prompt,
291
+ device,
292
+ num_images_per_prompt,
293
+ do_classifier_free_guidance,
294
+ negative_prompt=None,
295
+ prompt_embeds: Optional[torch.FloatTensor] = None,
296
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
297
+ lora_scale: Optional[float] = None,
298
+ clip_skip: Optional[int] = None,
299
+ ):
300
+ r"""
301
+ Encodes the prompt into text encoder hidden states.
302
+
303
+ Args:
304
+ prompt (`str` or `List[str]`, *optional*):
305
+ prompt to be encoded
306
+ device: (`torch.device`):
307
+ torch device
308
+ num_images_per_prompt (`int`):
309
+ number of images that should be generated per prompt
310
+ do_classifier_free_guidance (`bool`):
311
+ whether to use classifier free guidance or not
312
+ negative_prompt (`str` or `List[str]`, *optional*):
313
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
314
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
315
+ less than `1`).
316
+ prompt_embeds (`torch.FloatTensor`, *optional*):
317
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
318
+ provided, text embeddings will be generated from `prompt` input argument.
319
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
320
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
321
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
322
+ argument.
323
+ lora_scale (`float`, *optional*):
324
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
325
+ clip_skip (`int`, *optional*):
326
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
327
+ the output of the pre-final layer will be used for computing the prompt embeddings.
328
+ """
329
+ # set lora scale so that monkey patched LoRA
330
+ # function of text encoder can correctly access it
331
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
332
+ self._lora_scale = lora_scale
333
+
334
+ # dynamically adjust the LoRA scale
335
+ if not USE_PEFT_BACKEND:
336
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
337
+ else:
338
+ scale_lora_layers(self.text_encoder, lora_scale)
339
+
340
+ if prompt is not None and isinstance(prompt, str):
341
+ batch_size = 1
342
+ elif prompt is not None and isinstance(prompt, list):
343
+ batch_size = len(prompt)
344
+ else:
345
+ batch_size = prompt_embeds.shape[0]
346
+
347
+ if prompt_embeds is None:
348
+ # textual inversion: procecss multi-vector tokens if necessary
349
+ if isinstance(self, TextualInversionLoaderMixin):
350
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
351
+
352
+ text_inputs = self.tokenizer(
353
+ prompt,
354
+ padding="max_length",
355
+ max_length=self.tokenizer.model_max_length,
356
+ truncation=True,
357
+ return_tensors="pt",
358
+ )
359
+ text_input_ids = text_inputs.input_ids
360
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
361
+
362
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
363
+ text_input_ids, untruncated_ids
364
+ ):
365
+ removed_text = self.tokenizer.batch_decode(
366
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
367
+ )
368
+ logger.warning(
369
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
370
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
371
+ )
372
+
373
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
374
+ attention_mask = text_inputs.attention_mask.to(device)
375
+ else:
376
+ attention_mask = None
377
+
378
+ if clip_skip is None:
379
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
380
+ prompt_embeds = prompt_embeds[0]
381
+ else:
382
+ prompt_embeds = self.text_encoder(
383
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
384
+ )
385
+ # Access the `hidden_states` first, that contains a tuple of
386
+ # all the hidden states from the encoder layers. Then index into
387
+ # the tuple to access the hidden states from the desired layer.
388
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
389
+ # We also need to apply the final LayerNorm here to not mess with the
390
+ # representations. The `last_hidden_states` that we typically use for
391
+ # obtaining the final prompt representations passes through the LayerNorm
392
+ # layer.
393
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
394
+
395
+ if self.text_encoder is not None:
396
+ prompt_embeds_dtype = self.text_encoder.dtype
397
+ elif self.unet is not None:
398
+ prompt_embeds_dtype = self.unet.dtype
399
+ else:
400
+ prompt_embeds_dtype = prompt_embeds.dtype
401
+
402
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
403
+
404
+ bs_embed, seq_len, _ = prompt_embeds.shape
405
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
406
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
407
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
408
+
409
+ # get unconditional embeddings for classifier free guidance
410
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
411
+ uncond_tokens: List[str]
412
+ if negative_prompt is None:
413
+ uncond_tokens = [""] * batch_size
414
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
415
+ raise TypeError(
416
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
417
+ f" {type(prompt)}."
418
+ )
419
+ elif isinstance(negative_prompt, str):
420
+ uncond_tokens = [negative_prompt]
421
+ elif batch_size != len(negative_prompt):
422
+ raise ValueError(
423
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
424
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
425
+ " the batch size of `prompt`."
426
+ )
427
+ else:
428
+ uncond_tokens = negative_prompt
429
+
430
+ # textual inversion: procecss multi-vector tokens if necessary
431
+ if isinstance(self, TextualInversionLoaderMixin):
432
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
433
+
434
+ max_length = prompt_embeds.shape[1]
435
+ uncond_input = self.tokenizer(
436
+ uncond_tokens,
437
+ padding="max_length",
438
+ max_length=max_length,
439
+ truncation=True,
440
+ return_tensors="pt",
441
+ )
442
+
443
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
444
+ attention_mask = uncond_input.attention_mask.to(device)
445
+ else:
446
+ attention_mask = None
447
+
448
+ negative_prompt_embeds = self.text_encoder(
449
+ uncond_input.input_ids.to(device),
450
+ attention_mask=attention_mask,
451
+ )
452
+ negative_prompt_embeds = negative_prompt_embeds[0]
453
+
454
+ if do_classifier_free_guidance:
455
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
456
+ seq_len = negative_prompt_embeds.shape[1]
457
+
458
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
459
+
460
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
461
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
462
+
463
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
464
+ # Retrieve the original scale by scaling back the LoRA layers
465
+ unscale_lora_layers(self.text_encoder, lora_scale)
466
+
467
+ return prompt_embeds, negative_prompt_embeds
468
+
469
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
470
+ def run_safety_checker(self, image, device, dtype):
471
+ if self.safety_checker is None:
472
+ has_nsfw_concept = None
473
+ else:
474
+ if torch.is_tensor(image):
475
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
476
+ else:
477
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
478
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
479
+ image, has_nsfw_concept = self.safety_checker(
480
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
481
+ )
482
+ return image, has_nsfw_concept
483
+
484
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
485
+ def decode_latents(self, latents):
486
+ deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
487
+ deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
488
+
489
+ latents = 1 / self.vae.config.scaling_factor * latents
490
+ image = self.vae.decode(latents, return_dict=False)[0]
491
+ image = (image / 2 + 0.5).clamp(0, 1)
492
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
493
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
494
+ return image
495
+
496
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
497
+ def prepare_extra_step_kwargs(self, generator, eta):
498
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
499
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
500
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
501
+ # and should be between [0, 1]
502
+
503
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
504
+ extra_step_kwargs = {}
505
+ if accepts_eta:
506
+ extra_step_kwargs["eta"] = eta
507
+
508
+ # check if the scheduler accepts generator
509
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
510
+ if accepts_generator:
511
+ extra_step_kwargs["generator"] = generator
512
+ return extra_step_kwargs
513
+
514
+ def check_inputs(
515
+ self,
516
+ prompt,
517
+ image,
518
+ callback_steps,
519
+ negative_prompt=None,
520
+ prompt_embeds=None,
521
+ negative_prompt_embeds=None,
522
+ controlnet_conditioning_scale=1.0,
523
+ control_guidance_start=0.0,
524
+ control_guidance_end=1.0,
525
+ ):
526
+ if (callback_steps is None) or (
527
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
528
+ ):
529
+ raise ValueError(
530
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
531
+ f" {type(callback_steps)}."
532
+ )
533
+
534
+ if prompt is not None and prompt_embeds is not None:
535
+ raise ValueError(
536
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
537
+ " only forward one of the two."
538
+ )
539
+ elif prompt is None and prompt_embeds is None:
540
+ raise ValueError(
541
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
542
+ )
543
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
544
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
545
+
546
+ if negative_prompt is not None and negative_prompt_embeds is not None:
547
+ raise ValueError(
548
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
549
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
550
+ )
551
+
552
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
553
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
554
+ raise ValueError(
555
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
556
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
557
+ f" {negative_prompt_embeds.shape}."
558
+ )
559
+
560
+ # Check `image`
561
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
562
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
563
+ )
564
+ if (
565
+ isinstance(self.controlnet, ControlNetXSModel)
566
+ or is_compiled
567
+ and isinstance(self.controlnet._orig_mod, ControlNetXSModel)
568
+ ):
569
+ self.check_image(image, prompt, prompt_embeds)
570
+ else:
571
+ assert False
572
+
573
+ # Check `controlnet_conditioning_scale`
574
+ if (
575
+ isinstance(self.controlnet, ControlNetXSModel)
576
+ or is_compiled
577
+ and isinstance(self.controlnet._orig_mod, ControlNetXSModel)
578
+ ):
579
+ if not isinstance(controlnet_conditioning_scale, float):
580
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
581
+ else:
582
+ assert False
583
+
584
+ start, end = control_guidance_start, control_guidance_end
585
+ if start >= end:
586
+ raise ValueError(
587
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
588
+ )
589
+ if start < 0.0:
590
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
591
+ if end > 1.0:
592
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
593
+
594
+ def check_image(self, image, prompt, prompt_embeds):
595
+ image_is_pil = isinstance(image, PIL.Image.Image)
596
+ image_is_tensor = isinstance(image, torch.Tensor)
597
+ image_is_np = isinstance(image, np.ndarray)
598
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
599
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
600
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
601
+
602
+ if (
603
+ not image_is_pil
604
+ and not image_is_tensor
605
+ and not image_is_np
606
+ and not image_is_pil_list
607
+ and not image_is_tensor_list
608
+ and not image_is_np_list
609
+ ):
610
+ raise TypeError(
611
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
612
+ )
613
+
614
+ if image_is_pil:
615
+ image_batch_size = 1
616
+ else:
617
+ image_batch_size = len(image)
618
+
619
+ if prompt is not None and isinstance(prompt, str):
620
+ prompt_batch_size = 1
621
+ elif prompt is not None and isinstance(prompt, list):
622
+ prompt_batch_size = len(prompt)
623
+ elif prompt_embeds is not None:
624
+ prompt_batch_size = prompt_embeds.shape[0]
625
+
626
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
627
+ raise ValueError(
628
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
629
+ )
630
+
631
+ def prepare_image(
632
+ self,
633
+ image,
634
+ width,
635
+ height,
636
+ batch_size,
637
+ num_images_per_prompt,
638
+ device,
639
+ dtype,
640
+ do_classifier_free_guidance=False,
641
+ ):
642
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
643
+ image_batch_size = image.shape[0]
644
+
645
+ if image_batch_size == 1:
646
+ repeat_by = batch_size
647
+ else:
648
+ # image batch size is the same as prompt batch size
649
+ repeat_by = num_images_per_prompt
650
+
651
+ image = image.repeat_interleave(repeat_by, dim=0)
652
+
653
+ image = image.to(device=device, dtype=dtype)
654
+
655
+ if do_classifier_free_guidance:
656
+ image = torch.cat([image] * 2)
657
+
658
+ return image
659
+
660
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
661
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
662
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
663
+ if isinstance(generator, list) and len(generator) != batch_size:
664
+ raise ValueError(
665
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
666
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
667
+ )
668
+
669
+ if latents is None:
670
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
671
+ else:
672
+ latents = latents.to(device)
673
+
674
+ # scale the initial noise by the standard deviation required by the scheduler
675
+ latents = latents * self.scheduler.init_noise_sigma
676
+ return latents
677
+
678
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_freeu
679
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
680
+ r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
681
+
682
+ The suffixes after the scaling factors represent the stages where they are being applied.
683
+
684
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
685
+ that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
686
+
687
+ Args:
688
+ s1 (`float`):
689
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
690
+ mitigate "oversmoothing effect" in the enhanced denoising process.
691
+ s2 (`float`):
692
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
693
+ mitigate "oversmoothing effect" in the enhanced denoising process.
694
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
695
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
696
+ """
697
+ if not hasattr(self, "unet"):
698
+ raise ValueError("The pipeline must have `unet` for using FreeU.")
699
+ self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
700
+
701
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_freeu
702
+ def disable_freeu(self):
703
+ """Disables the FreeU mechanism if enabled."""
704
+ self.unet.disable_freeu()
705
+
706
+ def type_output(self,output_type,device,d_type,return_dict,latents,generator):
707
+ if not output_type == "latent":
708
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False,generator=generator)[0]
709
+ image, has_nsfw_concept = self.run_safety_checker(image, device, d_type)
710
+ else:
711
+ image = latents
712
+ has_nsfw_concept = None
713
+
714
+ if has_nsfw_concept is None:
715
+ do_denormalize = [True] * image.shape[0]
716
+ else:
717
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
718
+
719
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
720
+
721
+ # Offload all models
722
+ self.maybe_free_model_hooks()
723
+
724
+ if not return_dict:
725
+ return (image, has_nsfw_concept)
726
+
727
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
728
+
729
+ @torch.no_grad()
730
+ def __call__(
731
+ self,
732
+ prompt: Union[str, List[str]] = None,
733
+ image: PipelineImageInput = None,
734
+ height: Optional[int] = None,
735
+ width: Optional[int] = None,
736
+ num_inference_steps: int = 50,
737
+ guidance_scale: float = 7.5,
738
+ negative_prompt: Optional[Union[str, List[str]]] = None,
739
+ num_images_per_prompt: Optional[int] = 1,
740
+ eta: float = 0.0,
741
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
742
+ latents: Optional[torch.FloatTensor] = None,
743
+ prompt_embeds: Optional[torch.FloatTensor] = None,
744
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
745
+ output_type: Optional[str] = "pil",
746
+ return_dict: bool = True,
747
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
748
+ callback_steps: int = 1,
749
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
750
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
751
+ control_guidance_start: float = 0.0,
752
+ control_guidance_end: float = 1.0,
753
+ clip_skip: Optional[int] = 0,
754
+ pww_state=None,
755
+ pww_attn_weight=1.0,
756
+ weight_func = lambda w, sigma, qk: w * sigma * qk.std(),
757
+ latent_processing = 0,
758
+ ):
759
+ r"""
760
+ The call function to the pipeline for generation.
761
+
762
+ Args:
763
+ prompt (`str` or `List[str]`, *optional*):
764
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
765
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,
766
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
767
+ The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
768
+ specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
769
+ accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
770
+ and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
771
+ `init`, images must be passed as a list such that each element of the list can be correctly batched for
772
+ input to a single ControlNet.
773
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
774
+ The height in pixels of the generated image.
775
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
776
+ The width in pixels of the generated image.
777
+ num_inference_steps (`int`, *optional*, defaults to 50):
778
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
779
+ expense of slower inference.
780
+ guidance_scale (`float`, *optional*, defaults to 7.5):
781
+ A higher guidance scale value encourages the model to generate images closely linked to the text
782
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
783
+ negative_prompt (`str` or `List[str]`, *optional*):
784
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
785
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
786
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
787
+ The number of images to generate per prompt.
788
+ eta (`float`, *optional*, defaults to 0.0):
789
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
790
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
791
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
792
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
793
+ generation deterministic.
794
+ latents (`torch.FloatTensor`, *optional*):
795
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
796
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
797
+ tensor is generated by sampling using the supplied random `generator`.
798
+ prompt_embeds (`torch.FloatTensor`, *optional*):
799
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
800
+ provided, text embeddings are generated from the `prompt` input argument.
801
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
802
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
803
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
804
+ output_type (`str`, *optional*, defaults to `"pil"`):
805
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
806
+ return_dict (`bool`, *optional*, defaults to `True`):
807
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
808
+ plain tuple.
809
+ callback (`Callable`, *optional*):
810
+ A function that calls every `callback_steps` steps during inference. The function is called with the
811
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
812
+ callback_steps (`int`, *optional*, defaults to 1):
813
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
814
+ every step.
815
+ cross_attention_kwargs (`dict`, *optional*):
816
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
817
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
818
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
819
+ The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
820
+ to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
821
+ the corresponding scale as a list.
822
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
823
+ The percentage of total steps at which the ControlNet starts applying.
824
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
825
+ The percentage of total steps at which the ControlNet stops applying.
826
+ clip_skip (`int`, *optional*):
827
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
828
+ the output of the pre-final layer will be used for computing the prompt embeddings.
829
+
830
+ Examples:
831
+
832
+ Returns:
833
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
834
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
835
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
836
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
837
+ "not-safe-for-work" (nsfw) content.
838
+ """
839
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
840
+
841
+ if height is None:
842
+ height = image.height
843
+ if width is None:
844
+ width = image.width
845
+
846
+ self.prompt_parser = FrozenCLIPEmbedderWithCustomWords(self.tokenizer, self.text_encoder,clip_skip+1)
847
+
848
+ # 1. Check inputs. Raise error if not correct
849
+ self.check_inputs(
850
+ prompt,
851
+ image,
852
+ callback_steps,
853
+ negative_prompt,
854
+ prompt_embeds,
855
+ negative_prompt_embeds,
856
+ controlnet_conditioning_scale,
857
+ control_guidance_start,
858
+ control_guidance_end,
859
+ )
860
+
861
+ # 2. Define call parameters
862
+ if prompt is not None and isinstance(prompt, str):
863
+ batch_size = 1
864
+ elif prompt is not None and isinstance(prompt, list):
865
+ batch_size = len(prompt)
866
+ else:
867
+ batch_size = prompt_embeds.shape[0]
868
+
869
+ device = self._execution_device
870
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
871
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
872
+ # corresponds to doing no classifier free guidance.
873
+ do_classifier_free_guidance = guidance_scale > 1.0
874
+
875
+ text_ids, text_embeddings = self.prompt_parser([negative_prompt, prompt])
876
+ text_embeddings = text_embeddings.to(self.unet.dtype)
877
+
878
+ # 3. Encode input prompt
879
+ text_encoder_lora_scale = (
880
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
881
+ )
882
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
883
+ prompt,
884
+ device,
885
+ num_images_per_prompt,
886
+ do_classifier_free_guidance,
887
+ negative_prompt,
888
+ prompt_embeds=prompt_embeds,
889
+ negative_prompt_embeds=negative_prompt_embeds,
890
+ lora_scale=text_encoder_lora_scale,
891
+ clip_skip=clip_skip,
892
+ )
893
+ # For classifier free guidance, we need to do two forward passes.
894
+ # Here we concatenate the unconditional and text embeddings into a single batch
895
+ # to avoid doing two forward passes
896
+ if do_classifier_free_guidance:
897
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
898
+
899
+ # 4. Prepare image
900
+ if isinstance(controlnet, ControlNetXSModel):
901
+ image = self.prepare_image(
902
+ image=image,
903
+ width=width,
904
+ height=height,
905
+ batch_size=batch_size * num_images_per_prompt,
906
+ num_images_per_prompt=num_images_per_prompt,
907
+ device=device,
908
+ dtype=controlnet.dtype,
909
+ do_classifier_free_guidance=do_classifier_free_guidance,
910
+ )
911
+ height, width = image.shape[-2:]
912
+ else:
913
+ assert False
914
+
915
+ # 5. Prepare timesteps
916
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
917
+ timesteps = self.scheduler.timesteps
918
+
919
+ # 6. Prepare latent variables
920
+ img_state = encode_sketchs(
921
+ pww_state,
922
+ tokenizer = self.tokenizer,
923
+ unet = self.unet,
924
+ g_strength=pww_attn_weight,
925
+ text_ids=text_ids,
926
+ )
927
+
928
+ num_channels_latents = self.unet.config.in_channels
929
+ latents = self.prepare_latents(
930
+ batch_size * num_images_per_prompt,
931
+ num_channels_latents,
932
+ height,
933
+ width,
934
+ prompt_embeds.dtype,
935
+ device,
936
+ generator,
937
+ latents,
938
+ )
939
+
940
+ if latent_processing == 1:
941
+ lst_latent = [self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator).images[0]]
942
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
943
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
944
+
945
+ # 8. Denoising loop
946
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
947
+ is_unet_compiled = is_compiled_module(self.unet)
948
+ is_controlnet_compiled = is_compiled_module(self.controlnet)
949
+ is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1")
950
+
951
+ if pww_state is not None:
952
+ prompt_embeds = text_embeddings.clone().detach()
953
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
954
+ step_x = 0
955
+ for i, t in enumerate(timesteps):
956
+ # Relevant thread:
957
+ # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428
958
+ if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1:
959
+ torch._inductor.cudagraph_mark_step_begin()
960
+ # expand the latents if we are doing classifier free guidance
961
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
962
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
963
+
964
+ # predict the noise residual
965
+ dont_control = (
966
+ i / len(timesteps) < control_guidance_start or (i + 1) / len(timesteps) > control_guidance_end
967
+ )
968
+ encoder_state = {
969
+ "img_state": img_state,
970
+ "states": prompt_embeds,
971
+ "sigma": self.scheduler.sigmas[step_x],
972
+ "weight_func": weight_func,
973
+ }
974
+ step_x=step_x+1
975
+ if dont_control:
976
+ noise_pred = self.unet(
977
+ sample=latent_model_input,
978
+ timestep=t,
979
+ encoder_hidden_states=encoder_state,
980
+ cross_attention_kwargs=cross_attention_kwargs,
981
+ return_dict=True,
982
+ ).sample
983
+ else:
984
+ noise_pred = self.controlnet(
985
+ base_model=self.unet,
986
+ sample=latent_model_input,
987
+ timestep=t,
988
+ encoder_hidden_states=encoder_state,
989
+ controlnet_cond=image,
990
+ conditioning_scale=controlnet_conditioning_scale,
991
+ cross_attention_kwargs=cross_attention_kwargs,
992
+ return_dict=True,
993
+ ).sample
994
+
995
+ # perform guidance
996
+ if do_classifier_free_guidance:
997
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
998
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
999
+
1000
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1001
+
1002
+ # call the callback, if provided
1003
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1004
+ progress_bar.update()
1005
+ if callback is not None and i % callback_steps == 0:
1006
+ step_idx = i // getattr(self.scheduler, "order", 1)
1007
+ callback(step_idx, t, latents)
1008
+
1009
+ # If we do sequential model offloading, let's offload unet and controlnet
1010
+ # manually for max memory savings
1011
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1012
+ self.unet.to("cpu")
1013
+ self.controlnet.to("cpu")
1014
+ torch.cuda.empty_cache()
1015
+ if latent_processing == 1:
1016
+ if output_type == 'latent':
1017
+ lst_latent.append(self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator).images[0])
1018
+ return lst_latent
1019
+ if output_type == 'latent':
1020
+ return [self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator).images[0],self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator).images[0]]
1021
+ return [self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator).images[0]]
1022
+
modules/encode_region_map_function.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, Dict, List, Optional, Union
2
+ import importlib
3
+ import inspect
4
+ import math
5
+ from pathlib import Path
6
+ import re
7
+ from collections import defaultdict
8
+ import cv2
9
+ import time
10
+ import numpy as np
11
+ import PIL
12
+ import torch
13
+ import torch.nn as nn
14
+ import torch.nn.functional as F
15
+ from torch import einsum
16
+ from torch.autograd.function import Function
17
+ from diffusers import DiffusionPipeline
18
+
19
+
20
+ #Support for find the region of object
21
+ def encode_region_map_sp(state,tokenizer,unet,width,height, scale_ratio=8, text_ids=None,do_classifier_free_guidance = True):
22
+ if text_ids is None:
23
+ return torch.Tensor(0)
24
+ uncond, cond = text_ids[0], text_ids[1]
25
+
26
+ '''img_state = []
27
+
28
+
29
+ for k, v in state.items():
30
+ if v["map"] is None:
31
+ continue
32
+
33
+ v_input = tokenizer(
34
+ k,
35
+ max_length=tokenizer.model_max_length,
36
+ truncation=True,
37
+ add_special_tokens=False,
38
+ ).input_ids
39
+
40
+ dotmap = v["map"] < 255
41
+ out = dotmap.astype(float)
42
+ out = out * float(v["weight"]) * g_strength
43
+ #if v["mask_outsides"]:
44
+ out[out==0] = -1 * float(v["mask_outsides"])
45
+
46
+ arr = torch.from_numpy(
47
+ out
48
+ )
49
+ img_state.append((v_input, arr))
50
+
51
+ if len(img_state) == 0:
52
+ return torch.Tensor(0)'''
53
+
54
+ w_tensors = dict()
55
+ cond = cond.reshape(-1,).tolist() if isinstance(cond,np.ndarray) or isinstance(cond, torch.Tensor) else None
56
+ uncond = uncond.reshape(-1,).tolist() if isinstance(uncond,np.ndarray) or isinstance(uncond, torch.Tensor) else None
57
+ for layer in unet.down_blocks:
58
+ c = int(len(cond))
59
+ #w, h = img_state[0][1].shape
60
+ w_r, h_r = int(math.ceil(width / scale_ratio)), int(math.ceil(height / scale_ratio))
61
+
62
+ ret_cond_tensor = torch.zeros((1, int(w_r * h_r), c), dtype=torch.float32)
63
+ ret_uncond_tensor = torch.zeros((1, int(w_r * h_r), c), dtype=torch.float32)
64
+
65
+ #for v_as_tokens, img_where_color in img_state:
66
+ if state is not None:
67
+ for k, v in state.items():
68
+ if v["map"] is None:
69
+ continue
70
+ is_in = 0
71
+
72
+ k_as_tokens = tokenizer(
73
+ k,
74
+ max_length=tokenizer.model_max_length,
75
+ truncation=True,
76
+ add_special_tokens=False,
77
+ ).input_ids
78
+
79
+ region_map_resize = np.array(v["map"] < 255 ,dtype = np.uint8)
80
+ region_map_resize = cv2.resize(region_map_resize,(w_r,h_r),interpolation = cv2.INTER_CUBIC)
81
+ region_map_resize = (region_map_resize == np.max(region_map_resize)).astype(float)
82
+ region_map_resize = region_map_resize * float(v["weight"])
83
+ region_map_resize[region_map_resize==0] = -1 * float(v["mask_outsides"])
84
+ ret = torch.from_numpy(
85
+ region_map_resize
86
+ )
87
+ ret = ret.reshape(-1, 1).repeat(1, len(k_as_tokens))
88
+
89
+ '''ret = (
90
+ F.interpolate(
91
+ img_where_color.unsqueeze(0).unsqueeze(1),
92
+ scale_factor=1 / scale_ratio,
93
+ mode="bilinear",
94
+ align_corners=True,
95
+ )
96
+ .squeeze()
97
+ .reshape(-1, 1)
98
+ .repeat(1, len(v_as_tokens))
99
+ )'''
100
+
101
+ if cond is not None:
102
+ for idx, tok in enumerate(cond):
103
+ if cond[idx : idx + len(k_as_tokens)] == k_as_tokens:
104
+ is_in = 1
105
+ ret_cond_tensor[0, :, idx : idx + len(k_as_tokens)] += ret
106
+
107
+ if uncond is not None:
108
+ for idx, tok in enumerate(uncond):
109
+ if uncond[idx : idx + len(k_as_tokens)] == k_as_tokens:
110
+ is_in = 1
111
+ ret_uncond_tensor[0, :, idx : idx + len(k_as_tokens)] += ret
112
+
113
+ if not is_in == 1:
114
+ print(f"tokens {k_as_tokens} not found in text")
115
+
116
+ w_tensors[w_r * h_r] = torch.cat([ret_uncond_tensor, ret_cond_tensor]) if do_classifier_free_guidance else ret_cond_tensor
117
+ scale_ratio *= 2
118
+
119
+ return w_tensors
120
+
121
+ def encode_region_map(
122
+ pipe : DiffusionPipeline,
123
+ state,
124
+ width,
125
+ height,
126
+ num_images_per_prompt,
127
+ text_ids = None,
128
+ ):
129
+ negative_prompt_tokens_id, prompt_tokens_id = text_ids[0] , text_ids[1]
130
+ if prompt_tokens_id is None:
131
+ return torch.Tensor(0)
132
+ prompt_tokens_id = np.array(prompt_tokens_id)
133
+ negative_prompt_tokens_id = np.array(prompt_tokens_id) if negative_prompt_tokens_id is not None else None
134
+
135
+ #Spilit to each prompt
136
+ number_prompt = prompt_tokens_id.shape[0]
137
+ prompt_tokens_id = np.split(prompt_tokens_id,number_prompt)
138
+ negative_prompt_tokens_id = np.split(negative_prompt_tokens_id,number_prompt) if negative_prompt_tokens_id is not None else None
139
+ lst_prompt_map = []
140
+ if not isinstance(state,list):
141
+ state = [state]
142
+ if len(state) < number_prompt:
143
+ state = [state] + [None] * int(number_prompt - len(state))
144
+ for i in range(0,number_prompt):
145
+ text_ids = [negative_prompt_tokens_id[i],prompt_tokens_id[i]] if negative_prompt_tokens_id is not None else [None,prompt_tokens_id[i]]
146
+ region_map = encode_region_map_sp(state[i],pipe.tokenizer,pipe.unet,width,height,scale_ratio = pipe.vae_scale_factor,text_ids = text_ids,do_classifier_free_guidance = pipe.do_classifier_free_guidance)
147
+ lst_prompt_map.append(region_map)
148
+
149
+ region_state_sp = {}
150
+ for d in lst_prompt_map:
151
+ for key, tensor in d.items():
152
+ if key in region_state_sp:
153
+ #If key exist, concat
154
+ region_state_sp[key] = torch.cat((region_state_sp[key], tensor))
155
+ else:
156
+ # if key doesnt exist, add
157
+ region_state_sp[key] = tensor
158
+
159
+ #add_when_apply num_images_per_prompt
160
+ region_state = {}
161
+
162
+ for key, tensor in region_state_sp.items():
163
+ # Repeant accoding to axis = 0
164
+ region_state[key] = tensor.repeat(num_images_per_prompt,1,1)
165
+
166
+ return region_state
167
+
168
+
modules/encoder_prompt_modify.py ADDED
@@ -0,0 +1,831 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import math
3
+ import numpy as np
4
+ import torch
5
+ from diffusers import DiffusionPipeline
6
+ from typing import Any, Callable, Dict, List, Optional, Union
7
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
8
+ from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
9
+ from diffusers.utils import (
10
+ USE_PEFT_BACKEND,
11
+ deprecate,
12
+ logging,
13
+ replace_example_docstring,
14
+ scale_lora_layers,
15
+ unscale_lora_layers,
16
+ )
17
+ from .prompt_parser import FrozenCLIPEmbedderWithCustomWords
18
+
19
+
20
+
21
+ re_attention = re.compile(
22
+ r"""
23
+ \\\(|
24
+ \\\)|
25
+ \\\[|
26
+ \\]|
27
+ \\\\|
28
+ \\|
29
+ \(|
30
+ \[|
31
+ :([+-]?[.\d]+)\)|
32
+ \)|
33
+ ]|
34
+ [^\\()\[\]:]+|
35
+ :
36
+ """,
37
+ re.X,
38
+ )
39
+
40
+
41
+ def parse_prompt_attention(text):
42
+ """
43
+ Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
44
+ Accepted tokens are:
45
+ (abc) - increases attention to abc by a multiplier of 1.1
46
+ (abc:3.12) - increases attention to abc by a multiplier of 3.12
47
+ [abc] - decreases attention to abc by a multiplier of 1.1
48
+ \\( - literal character '('
49
+ \\[ - literal character '['
50
+ \\) - literal character ')'
51
+ \\] - literal character ']'
52
+ \\ - literal character '\'
53
+ anything else - just text
54
+ >>> parse_prompt_attention('normal text')
55
+ [['normal text', 1.0]]
56
+ >>> parse_prompt_attention('an (important) word')
57
+ [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
58
+ >>> parse_prompt_attention('(unbalanced')
59
+ [['unbalanced', 1.1]]
60
+ >>> parse_prompt_attention('\\(literal\\]')
61
+ [['(literal]', 1.0]]
62
+ >>> parse_prompt_attention('(unnecessary)(parens)')
63
+ [['unnecessaryparens', 1.1]]
64
+ >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
65
+ [['a ', 1.0],
66
+ ['house', 1.5730000000000004],
67
+ [' ', 1.1],
68
+ ['on', 1.0],
69
+ [' a ', 1.1],
70
+ ['hill', 0.55],
71
+ [', sun, ', 1.1],
72
+ ['sky', 1.4641000000000006],
73
+ ['.', 1.1]]
74
+ """
75
+
76
+ res = []
77
+ round_brackets = []
78
+ square_brackets = []
79
+
80
+ round_bracket_multiplier = 1.1
81
+ square_bracket_multiplier = 1 / 1.1
82
+
83
+ def multiply_range(start_position, multiplier):
84
+ for p in range(start_position, len(res)):
85
+ res[p][1] *= multiplier
86
+
87
+ for m in re_attention.finditer(text):
88
+ text = m.group(0)
89
+ weight = m.group(1)
90
+
91
+ if text.startswith("\\"):
92
+ res.append([text[1:], 1.0])
93
+ elif text == "(":
94
+ round_brackets.append(len(res))
95
+ elif text == "[":
96
+ square_brackets.append(len(res))
97
+ elif weight is not None and len(round_brackets) > 0:
98
+ multiply_range(round_brackets.pop(), float(weight))
99
+ elif text == ")" and len(round_brackets) > 0:
100
+ multiply_range(round_brackets.pop(), round_bracket_multiplier)
101
+ elif text == "]" and len(square_brackets) > 0:
102
+ multiply_range(square_brackets.pop(), square_bracket_multiplier)
103
+ else:
104
+ res.append([text, 1.0])
105
+
106
+ for pos in round_brackets:
107
+ multiply_range(pos, round_bracket_multiplier)
108
+
109
+ for pos in square_brackets:
110
+ multiply_range(pos, square_bracket_multiplier)
111
+
112
+ if len(res) == 0:
113
+ res = [["", 1.0]]
114
+
115
+ # merge runs of identical weights
116
+ i = 0
117
+ while i + 1 < len(res):
118
+ if res[i][1] == res[i + 1][1]:
119
+ res[i][0] += res[i + 1][0]
120
+ res.pop(i + 1)
121
+ else:
122
+ i += 1
123
+
124
+ return res
125
+
126
+
127
+ def get_prompts_with_weights(pipe: DiffusionPipeline, prompt: List[str], max_length: int):
128
+ r"""
129
+ Tokenize a list of prompts and return its tokens with weights of each token.
130
+
131
+ No padding, starting or ending token is included.
132
+ """
133
+ tokens = []
134
+ weights = []
135
+ truncated = False
136
+ for text in prompt:
137
+ texts_and_weights = parse_prompt_attention(text)
138
+ text_token = []
139
+ text_weight = []
140
+ for word, weight in texts_and_weights:
141
+ # tokenize and discard the starting and the ending token
142
+ token = pipe.tokenizer(word).input_ids[1:-1]
143
+ text_token += token
144
+ # copy the weight by length of token
145
+ text_weight += [weight] * len(token)
146
+ # stop if the text is too long (longer than truncation limit)
147
+ if len(text_token) > max_length:
148
+ truncated = True
149
+ break
150
+ # truncate
151
+ if len(text_token) > max_length:
152
+ truncated = True
153
+ text_token = text_token[:max_length]
154
+ text_weight = text_weight[:max_length]
155
+ tokens.append(text_token)
156
+ weights.append(text_weight)
157
+ if truncated:
158
+ logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
159
+ return tokens, weights
160
+
161
+
162
+ def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77):
163
+ r"""
164
+ Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
165
+ """
166
+ max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
167
+ weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
168
+ for i in range(len(tokens)):
169
+ tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos]
170
+ if no_boseos_middle:
171
+ weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
172
+ else:
173
+ w = []
174
+ if len(weights[i]) == 0:
175
+ w = [1.0] * weights_length
176
+ else:
177
+ for j in range(max_embeddings_multiples):
178
+ w.append(1.0) # weight for starting token in this chunk
179
+ w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
180
+ w.append(1.0) # weight for ending token in this chunk
181
+ w += [1.0] * (weights_length - len(w))
182
+ weights[i] = w[:]
183
+
184
+ return tokens, weights
185
+
186
+ def clip_skip_prompt(
187
+ pipe,
188
+ text_input,
189
+ clip_skip = None,
190
+ ):
191
+ if hasattr(pipe.text_encoder.config, "use_attention_mask") and pipe.text_encoder.config.use_attention_mask:
192
+ attention_mask = text_inputs.attention_mask.to(device)
193
+ else:
194
+ attention_mask = None
195
+ if clip_skip is not None and clip_skip > 1:
196
+ text_embedding = pipe.text_encoder(text_input, attention_mask=attention_mask, output_hidden_states=True)
197
+ # Access the `hidden_states` first, that contains a tuple of
198
+ # all the hidden states from the encoder layers. Then index into
199
+ # the tuple to access the hidden states from the desired layer.
200
+ text_embedding = text_embedding[-1][-clip_skip]
201
+ # We also need to apply the final LayerNorm here to not mess with the
202
+ # representations. The `last_hidden_states` that we typically use for
203
+ # obtaining the final prompt representations passes through the LayerNorm
204
+ # layer.
205
+ text_embedding = pipe.text_encoder.text_model.final_layer_norm(text_embedding)
206
+ else:
207
+ text_embedding = pipe.text_encoder(text_input, attention_mask=attention_mask)
208
+ text_embedding = text_embedding[0]
209
+
210
+ return text_embedding
211
+
212
+ def get_unweighted_text_embeddings(
213
+ pipe: DiffusionPipeline,
214
+ text_input: torch.Tensor,
215
+ chunk_length: int,
216
+ no_boseos_middle: Optional[bool] = True,
217
+ clip_skip : Optional[int] = None,
218
+ ):
219
+ """
220
+ When the length of tokens is a multiple of the capacity of the text encoder,
221
+ it should be split into chunks and sent to the text encoder individually.
222
+ """
223
+ max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
224
+ if max_embeddings_multiples > 1:
225
+ text_embeddings = []
226
+ for i in range(max_embeddings_multiples):
227
+ # extract the i-th chunk
228
+ text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone()
229
+
230
+ # cover the head and the tail by the starting and the ending tokens
231
+ text_input_chunk[:, 0] = text_input[0, 0]
232
+ text_input_chunk[:, -1] = text_input[0, -1]
233
+
234
+ text_embedding = clip_skip_prompt(pipe,text_input_chunk,clip_skip)
235
+
236
+ if no_boseos_middle:
237
+ if i == 0:
238
+ # discard the ending token
239
+ text_embedding = text_embedding[:, :-1]
240
+ elif i == max_embeddings_multiples - 1:
241
+ # discard the starting token
242
+ text_embedding = text_embedding[:, 1:]
243
+ else:
244
+ # discard both starting and ending tokens
245
+ text_embedding = text_embedding[:, 1:-1]
246
+
247
+ text_embeddings.append(text_embedding)
248
+ text_embeddings = torch.concat(text_embeddings, axis=1)
249
+ else:
250
+ text_embeddings = clip_skip_prompt(pipe,text_input,clip_skip)
251
+ return text_embeddings
252
+
253
+
254
+ def get_weighted_text_embeddings(
255
+ pipe: DiffusionPipeline,
256
+ prompt: Union[str, List[str]],
257
+ uncond_prompt: Optional[Union[str, List[str]]] = None,
258
+ max_embeddings_multiples: Optional[int] = 3,
259
+ no_boseos_middle: Optional[bool] = False,
260
+ skip_parsing: Optional[bool] = False,
261
+ skip_weighting: Optional[bool] = False,
262
+ clip_skip : Optional[int] = None,
263
+ ):
264
+ r"""
265
+ Prompts can be assigned with local weights using brackets. For example,
266
+ prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
267
+ and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
268
+
269
+ Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
270
+
271
+ Args:
272
+ pipe (`DiffusionPipeline`):
273
+ Pipe to provide access to the tokenizer and the text encoder.
274
+ prompt (`str` or `List[str]`):
275
+ The prompt or prompts to guide the image generation.
276
+ uncond_prompt (`str` or `List[str]`):
277
+ The unconditional prompt or prompts for guide the image generation. If unconditional prompt
278
+ is provided, the embeddings of prompt and uncond_prompt are concatenated.
279
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
280
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
281
+ no_boseos_middle (`bool`, *optional*, defaults to `False`):
282
+ If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
283
+ ending token in each of the chunk in the middle.
284
+ skip_parsing (`bool`, *optional*, defaults to `False`):
285
+ Skip the parsing of brackets.
286
+ skip_weighting (`bool`, *optional*, defaults to `False`):
287
+ Skip the weighting. When the parsing is skipped, it is forced True.
288
+ """
289
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
290
+ prompt_tokens_id = None
291
+ negative_prompt_tokens_id = None
292
+ if isinstance(prompt, str):
293
+ prompt = [prompt]
294
+
295
+ if not skip_parsing:
296
+ prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
297
+ if uncond_prompt is not None:
298
+ if isinstance(uncond_prompt, str):
299
+ uncond_prompt = [uncond_prompt]
300
+ uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
301
+ else:
302
+ prompt_tokens = [
303
+ token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True).input_ids
304
+ ]
305
+ prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
306
+ if uncond_prompt is not None:
307
+ if isinstance(uncond_prompt, str):
308
+ uncond_prompt = [uncond_prompt]
309
+ uncond_tokens = [
310
+ token[1:-1]
311
+ for token in pipe.tokenizer(uncond_prompt, max_length=max_length, truncation=True).input_ids
312
+ ]
313
+ uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
314
+
315
+ # round up the longest length of tokens to a multiple of (model_max_length - 2)
316
+ max_length = max([len(token) for token in prompt_tokens])
317
+ if uncond_prompt is not None:
318
+ max_length = max(max_length, max([len(token) for token in uncond_tokens]))
319
+
320
+ max_embeddings_multiples = min(
321
+ max_embeddings_multiples,
322
+ (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
323
+ )
324
+ max_embeddings_multiples = max(1, max_embeddings_multiples)
325
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
326
+
327
+ # pad the length of tokens and weights
328
+ bos = pipe.tokenizer.bos_token_id
329
+ eos = pipe.tokenizer.eos_token_id
330
+ pad = getattr(pipe.tokenizer, "pad_token_id", eos)
331
+ prompt_tokens, prompt_weights = pad_tokens_and_weights(
332
+ prompt_tokens,
333
+ prompt_weights,
334
+ max_length,
335
+ bos,
336
+ eos,
337
+ pad,
338
+ no_boseos_middle=no_boseos_middle,
339
+ chunk_length=pipe.tokenizer.model_max_length,
340
+ )
341
+
342
+ prompt_tokens_id = np.array(prompt_tokens, dtype=np.int64)
343
+ prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=pipe.device)
344
+ if uncond_prompt is not None:
345
+ uncond_tokens, uncond_weights = pad_tokens_and_weights(
346
+ uncond_tokens,
347
+ uncond_weights,
348
+ max_length,
349
+ bos,
350
+ eos,
351
+ pad,
352
+ no_boseos_middle=no_boseos_middle,
353
+ chunk_length=pipe.tokenizer.model_max_length,
354
+ )
355
+ negative_prompt_tokens_id = np.array(uncond_tokens, dtype=np.int64)
356
+ uncond_tokens = torch.tensor(uncond_tokens, dtype=torch.long, device=pipe.device)
357
+
358
+ # get the embeddings
359
+ text_embeddings = get_unweighted_text_embeddings(
360
+ pipe,
361
+ prompt_tokens,
362
+ pipe.tokenizer.model_max_length,
363
+ no_boseos_middle=no_boseos_middle,
364
+ clip_skip = clip_skip,
365
+ )
366
+ prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=text_embeddings.device)
367
+ if uncond_prompt is not None:
368
+ uncond_embeddings = get_unweighted_text_embeddings(
369
+ pipe,
370
+ uncond_tokens,
371
+ pipe.tokenizer.model_max_length,
372
+ no_boseos_middle=no_boseos_middle,
373
+ clip_skip = clip_skip,
374
+ )
375
+ uncond_weights = torch.tensor(uncond_weights, dtype=uncond_embeddings.dtype, device=uncond_embeddings.device)
376
+
377
+ # assign weights to the prompts and normalize in the sense of mean
378
+ # TODO: should we normalize by chunk or in a whole (current implementation)?
379
+ if (not skip_parsing) and (not skip_weighting):
380
+ previous_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
381
+ text_embeddings *= prompt_weights.unsqueeze(-1)
382
+ current_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
383
+ text_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
384
+ if uncond_prompt is not None:
385
+ previous_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
386
+ uncond_embeddings *= uncond_weights.unsqueeze(-1)
387
+ current_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
388
+ uncond_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
389
+
390
+ if uncond_prompt is not None:
391
+ return text_embeddings, uncond_embeddings, negative_prompt_tokens_id, prompt_tokens_id
392
+ return text_embeddings, None, None, prompt_tokens_id
393
+
394
+
395
+ def encoder_long_prompt(
396
+ pipe,
397
+ prompt,
398
+ device,
399
+ num_images_per_prompt,
400
+ do_classifier_free_guidance,
401
+ negative_prompt=None,
402
+ prompt_embeds: Optional[torch.Tensor] = None,
403
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
404
+ lora_scale: Optional[float] = None,
405
+ clip_skip : Optional[int] = None,
406
+ max_embeddings_multiples: Optional[int] = 3,
407
+ ):
408
+ r"""
409
+ Encodes the prompt into text encoder hidden states.
410
+
411
+ Args:
412
+ prompt (`str` or `list(int)`):
413
+ prompt to be encoded
414
+ device: (`torch.device`):
415
+ torch device
416
+ num_images_per_prompt (`int`):
417
+ number of images that should be generated per prompt
418
+ do_classifier_free_guidance (`bool`):
419
+ whether to use classifier free guidance or not
420
+ negative_prompt (`str` or `List[str]`):
421
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
422
+ if `guidance_scale` is less than `1`).
423
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
424
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
425
+ """
426
+
427
+ # set lora scale so that monkey patched LoRA
428
+ # function of text encoder can correctly access it
429
+ if lora_scale is not None and isinstance(pipe, LoraLoaderMixin):
430
+ pipe._lora_scale = lora_scale
431
+ # dynamically adjust the LoRA scale
432
+ if not USE_PEFT_BACKEND:
433
+ adjust_lora_scale_text_encoder(pipe.text_encoder, lora_scale)
434
+ else:
435
+ scale_lora_layers(pipe.text_encoder, lora_scale)
436
+ if prompt is not None and isinstance(prompt, str):
437
+ batch_size = 1
438
+ elif prompt is not None and isinstance(prompt, list):
439
+ batch_size = len(prompt)
440
+ else:
441
+ batch_size = prompt_embeds.shape[0]
442
+
443
+ negative_prompt_tokens_id, prompt_tokens_id = None, None
444
+ if negative_prompt_embeds is None:
445
+ if negative_prompt is None:
446
+ negative_prompt = [""] * batch_size
447
+ elif isinstance(negative_prompt, str):
448
+ negative_prompt = [negative_prompt] * batch_size
449
+ if batch_size != len(negative_prompt):
450
+ raise ValueError(
451
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
452
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
453
+ " the batch size of `prompt`."
454
+ )
455
+ if prompt_embeds is None or negative_prompt_embeds is None:
456
+ if isinstance(pipe, TextualInversionLoaderMixin):
457
+ prompt = pipe.maybe_convert_prompt(prompt, pipe.tokenizer)
458
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
459
+ negative_prompt = pipe.maybe_convert_prompt(negative_prompt, pipe.tokenizer)
460
+
461
+ prompt_embeds1, negative_prompt_embeds1, negative_prompt_tokens_id, prompt_tokens_id = get_weighted_text_embeddings(
462
+ pipe=pipe,
463
+ prompt=prompt,
464
+ uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
465
+ max_embeddings_multiples=int(max_embeddings_multiples),
466
+ clip_skip = clip_skip,
467
+ )
468
+ if prompt_embeds is None:
469
+ prompt_embeds = prompt_embeds1
470
+ if negative_prompt_embeds is None:
471
+ negative_prompt_embeds = negative_prompt_embeds1
472
+
473
+ bs_embed, seq_len, _ = prompt_embeds.shape
474
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
475
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
476
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
477
+
478
+ if do_classifier_free_guidance:
479
+ bs_embed, seq_len, _ = negative_prompt_embeds.shape
480
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
481
+ negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
482
+
483
+ if isinstance(pipe, LoraLoaderMixin) and USE_PEFT_BACKEND:
484
+ # Retrieve the original scale by scaling back the LoRA layers
485
+ unscale_lora_layers(pipe.text_encoder, lora_scale)
486
+
487
+ return prompt_embeds, negative_prompt_embeds, [negative_prompt_tokens_id, prompt_tokens_id]
488
+
489
+
490
+
491
+
492
+ def encode_short_prompt(
493
+ pipe,
494
+ prompt,
495
+ device,
496
+ num_images_per_prompt,
497
+ do_classifier_free_guidance,
498
+ negative_prompt=None,
499
+ prompt_embeds: Optional[torch.Tensor] = None,
500
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
501
+ lora_scale: Optional[float] = None,
502
+ clip_skip: Optional[int] = None,
503
+ ):
504
+ r"""
505
+ Encodes the prompt into text encoder hidden states.
506
+
507
+ Args:
508
+ prompt (`str` or `List[str]`, *optional*):
509
+ prompt to be encoded
510
+ device: (`torch.device`):
511
+ torch device
512
+ num_images_per_prompt (`int`):
513
+ number of images that should be generated per prompt
514
+ do_classifier_free_guidance (`bool`):
515
+ whether to use classifier free guidance or not
516
+ negative_prompt (`str` or `List[str]`, *optional*):
517
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
518
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
519
+ less than `1`).
520
+ prompt_embeds (`torch.Tensor`, *optional*):
521
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
522
+ provided, text embeddings will be generated from `prompt` input argument.
523
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
524
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
525
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
526
+ argument.
527
+ lora_scale (`float`, *optional*):
528
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
529
+ clip_skip (`int`, *optional*):
530
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
531
+ the output of the pre-final layer will be used for computing the prompt embeddings.
532
+ """
533
+ # set lora scale so that monkey patched LoRA
534
+ # function of text encoder can correctly access it
535
+ if lora_scale is not None and isinstance(pipe, LoraLoaderMixin):
536
+ pipe._lora_scale = lora_scale
537
+
538
+ # dynamically adjust the LoRA scale
539
+ if not USE_PEFT_BACKEND:
540
+ adjust_lora_scale_text_encoder(pipe.text_encoder, lora_scale)
541
+ else:
542
+ scale_lora_layers(pipe.text_encoder, lora_scale)
543
+
544
+ if prompt is not None and isinstance(prompt, str):
545
+ batch_size = 1
546
+ elif prompt is not None and isinstance(prompt, list):
547
+ batch_size = len(prompt)
548
+ else:
549
+ batch_size = prompt_embeds.shape[0]
550
+
551
+ prompt_tokens_id = None
552
+ negative_prompt_tokens_id = None
553
+
554
+ if prompt_embeds is None:
555
+ # textual inversion: process multi-vector tokens if necessary
556
+ if isinstance(pipe, TextualInversionLoaderMixin):
557
+ prompt = pipe.maybe_convert_prompt(prompt, pipe.tokenizer)
558
+
559
+ text_inputs = pipe.tokenizer(
560
+ prompt,
561
+ padding="max_length",
562
+ max_length=pipe.tokenizer.model_max_length,
563
+ truncation=True,
564
+ return_tensors="pt",
565
+ )
566
+ text_input_ids = text_inputs.input_ids
567
+ prompt_tokens_id = text_inputs.input_ids.detach().cpu().numpy()
568
+ untruncated_ids = pipe.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
569
+
570
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
571
+ text_input_ids, untruncated_ids
572
+ ):
573
+ removed_text = pipe.tokenizer.batch_decode(
574
+ untruncated_ids[:, pipe.tokenizer.model_max_length - 1 : -1]
575
+ )
576
+ logger.warning(
577
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
578
+ f" {pipe.tokenizer.model_max_length} tokens: {removed_text}"
579
+ )
580
+
581
+ if hasattr(pipe.text_encoder.config, "use_attention_mask") and pipe.text_encoder.config.use_attention_mask:
582
+ attention_mask = text_inputs.attention_mask.to(device)
583
+ else:
584
+ attention_mask = None
585
+
586
+ if clip_skip is not None and clip_skip > 1:
587
+ prompt_embeds = pipe.text_encoder(
588
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
589
+ )
590
+ # Access the `hidden_states` first, that contains a tuple of
591
+ # all the hidden states from the encoder layers. Then index into
592
+ # the tuple to access the hidden states from the desired layer.
593
+ prompt_embeds = prompt_embeds[-1][-clip_skip]
594
+ # We also need to apply the final LayerNorm here to not mess with the
595
+ # representations. The `last_hidden_states` that we typically use for
596
+ # obtaining the final prompt representations passes through the LayerNorm
597
+ # layer.
598
+ prompt_embeds = pipe.text_encoder.text_model.final_layer_norm(prompt_embeds)
599
+ else:
600
+ prompt_embeds = pipe.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
601
+ prompt_embeds = prompt_embeds[0]
602
+
603
+ if pipe.text_encoder is not None:
604
+ prompt_embeds_dtype = pipe.text_encoder.dtype
605
+ elif pipe.unet is not None:
606
+ prompt_embeds_dtype = pipe.unet.dtype
607
+ else:
608
+ prompt_embeds_dtype = prompt_embeds.dtype
609
+
610
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
611
+
612
+ bs_embed, seq_len, _ = prompt_embeds.shape
613
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
614
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
615
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
616
+
617
+ # get unconditional embeddings for classifier free guidance
618
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
619
+ uncond_tokens: List[str]
620
+ if negative_prompt is None:
621
+ uncond_tokens = [""] * batch_size
622
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
623
+ raise TypeError(
624
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
625
+ f" {type(prompt)}."
626
+ )
627
+ elif isinstance(negative_prompt, str):
628
+ uncond_tokens = [negative_prompt]
629
+ elif batch_size != len(negative_prompt):
630
+ raise ValueError(
631
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
632
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
633
+ " the batch size of `prompt`."
634
+ )
635
+ else:
636
+ uncond_tokens = negative_prompt
637
+
638
+ # textual inversion: process multi-vector tokens if necessary
639
+ if isinstance(pipe, TextualInversionLoaderMixin):
640
+ uncond_tokens = pipe.maybe_convert_prompt(uncond_tokens, pipe.tokenizer)
641
+
642
+ max_length = prompt_embeds.shape[1]
643
+ uncond_input = pipe.tokenizer(
644
+ uncond_tokens,
645
+ padding="max_length",
646
+ max_length=max_length,
647
+ truncation=True,
648
+ return_tensors="pt",
649
+ )
650
+ negative_prompt_tokens_id = uncond_input.input_ids.detach().cpu().numpy()
651
+
652
+ if hasattr(pipe.text_encoder.config, "use_attention_mask") and pipe.text_encoder.config.use_attention_mask:
653
+ attention_mask = uncond_input.attention_mask.to(device)
654
+ else:
655
+ attention_mask = None
656
+
657
+ if clip_skip is not None and clip_skip > 1:
658
+ negative_prompt_embeds = pipe.text_encoder(
659
+ uncond_input.input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
660
+ )
661
+ # Access the `hidden_states` first, that contains a tuple of
662
+ # all the hidden states from the encoder layers. Then index into
663
+ # the tuple to access the hidden states from the desired layer.
664
+ negative_prompt_embeds = negative_prompt_embeds[-1][-clip_skip ]
665
+ # We also need to apply the final LayerNorm here to not mess with the
666
+ # representations. The `last_hidden_states` that we typically use for
667
+ # obtaining the final prompt representations passes through the LayerNorm
668
+ # layer.
669
+ negative_prompt_embeds = pipe.text_encoder.text_model.final_layer_norm(negative_prompt_embeds)
670
+ else:
671
+ negative_prompt_embeds = pipe.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask)
672
+ negative_prompt_embeds = negative_prompt_embeds[0]
673
+
674
+ if do_classifier_free_guidance:
675
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
676
+ seq_len = negative_prompt_embeds.shape[1]
677
+
678
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
679
+
680
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
681
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
682
+
683
+ if isinstance(pipe, LoraLoaderMixin) and USE_PEFT_BACKEND:
684
+ # Retrieve the original scale by scaling back the LoRA layers
685
+ unscale_lora_layers(pipe.text_encoder, lora_scale)
686
+
687
+ return prompt_embeds, negative_prompt_embeds, [negative_prompt_tokens_id, prompt_tokens_id]
688
+
689
+
690
+
691
+ def encode_prompt_automatic1111(
692
+ pipe,
693
+ prompt,
694
+ device,
695
+ num_images_per_prompt,
696
+ do_classifier_free_guidance,
697
+ negative_prompt=None,
698
+ prompt_embeds: Optional[torch.Tensor] = None,
699
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
700
+ lora_scale: Optional[float] = None,
701
+ clip_skip: Optional[int] = None,
702
+ ):
703
+ if lora_scale is not None and isinstance(pipe, LoraLoaderMixin):
704
+ pipe._lora_scale = lora_scale
705
+
706
+ # dynamically adjust the LoRA scale
707
+ if not USE_PEFT_BACKEND:
708
+ adjust_lora_scale_text_encoder(pipe.text_encoder, lora_scale)
709
+ else:
710
+ scale_lora_layers(pipe.text_encoder, lora_scale)
711
+
712
+ if prompt is not None and isinstance(prompt, str):
713
+ batch_size = 1
714
+ elif prompt is not None and isinstance(prompt, list):
715
+ batch_size = len(prompt)
716
+ else:
717
+ batch_size = prompt_embeds.shape[0]
718
+
719
+ prompt_tokens_id = None
720
+ negative_prompt_tokens_id = None
721
+
722
+
723
+ # get unconditional embeddings for classifier free guidance
724
+ uncond_tokens = []
725
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
726
+ if negative_prompt is None:
727
+ uncond_tokens = [""] * batch_size
728
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
729
+ raise TypeError(
730
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
731
+ f" {type(prompt)}."
732
+ )
733
+ elif isinstance(negative_prompt, str):
734
+ uncond_tokens = [negative_prompt] + [""] * (batch_size - 1)
735
+ elif batch_size != len(negative_prompt):
736
+ raise ValueError(
737
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
738
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
739
+ " the batch size of `prompt`."
740
+ )
741
+ else:
742
+ uncond_tokens = negative_prompt
743
+
744
+ # textual inversion: process multi-vector tokens if necessary
745
+ if isinstance(pipe, TextualInversionLoaderMixin):
746
+ uncond_tokens = pipe.maybe_convert_prompt(uncond_tokens, pipe.tokenizer)
747
+ if len(uncond_tokens) == 0:
748
+ uncond_tokens = [""]* batch_size
749
+ # textual inversion: process multi-vector tokens if necessary
750
+ if isinstance(pipe, TextualInversionLoaderMixin):
751
+ uncond_tokens = pipe.maybe_convert_prompt(uncond_tokens, pipe.tokenizer)
752
+
753
+ if prompt_embeds is None:
754
+ if not isinstance(prompt,list):
755
+ prompt = [prompt]
756
+ # textual inversion: process multi-vector tokens if necessary
757
+ if isinstance(pipe, TextualInversionLoaderMixin):
758
+ prompt = pipe.maybe_convert_prompt(prompt, pipe.tokenizer)
759
+
760
+ prompt_parser = FrozenCLIPEmbedderWithCustomWords(pipe.tokenizer, pipe.text_encoder,clip_skip)
761
+ prompt_embeds_lst = []
762
+ negative_prompt_embeds_lst =[]
763
+ negative_prompt_tokens_id_lst =[]
764
+ prompt_tokens_id_lst =[]
765
+ for i in range(0,batch_size):
766
+ text_ids, text_embeddings = prompt_parser([uncond_tokens[i], prompt[i]])
767
+ negative_prompt_embeddings, prompt_embeddings = torch.chunk(text_embeddings, 2, dim=0)
768
+ text_ids = np.split(text_ids,text_ids.shape[0])
769
+ negative_prompt_embeddings_id, prompt_embeddings_id = text_ids[0], text_ids[1]
770
+ prompt_embeds_lst.append(prompt_embeddings)
771
+ negative_prompt_embeds_lst.append(negative_prompt_embeddings)
772
+ negative_prompt_tokens_id_lst.append(negative_prompt_embeddings_id)
773
+ prompt_tokens_id_lst.append(prompt_embeddings_id)
774
+
775
+ if prompt_embeds is None:
776
+ prompt_embeds = torch.cat(prompt_embeds_lst)
777
+ prompt_tokens_id = np.concatenate(prompt_tokens_id_lst)
778
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
779
+ negative_prompt_embeds = torch.cat(negative_prompt_embeds_lst)
780
+ negative_prompt_tokens_id = np.concatenate(negative_prompt_tokens_id_lst)
781
+
782
+ if pipe.text_encoder is not None:
783
+ prompt_embeds_dtype = pipe.text_encoder.dtype
784
+ elif pipe.unet is not None:
785
+ prompt_embeds_dtype = pipe.unet.dtype
786
+ else:
787
+ prompt_embeds_dtype = prompt_embeds.dtype
788
+
789
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
790
+
791
+ bs_embed, seq_len, _ = prompt_embeds.shape
792
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
793
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
794
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
795
+
796
+ if do_classifier_free_guidance:
797
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
798
+ seq_len = negative_prompt_embeds.shape[1]
799
+
800
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
801
+
802
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
803
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
804
+
805
+ if isinstance(pipe, LoraLoaderMixin) and USE_PEFT_BACKEND:
806
+ # Retrieve the original scale by scaling back the LoRA layers
807
+ unscale_lora_layers(pipe.text_encoder, lora_scale)
808
+
809
+ return prompt_embeds, negative_prompt_embeds, [negative_prompt_tokens_id, prompt_tokens_id]
810
+
811
+
812
+
813
+
814
+ def encode_prompt_function(
815
+ pipe,
816
+ prompt,
817
+ device,
818
+ num_images_per_prompt,
819
+ do_classifier_free_guidance,
820
+ negative_prompt=None,
821
+ prompt_embeds: Optional[torch.Tensor] = None,
822
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
823
+ lora_scale: Optional[float] = None,
824
+ clip_skip: Optional[int] = None,
825
+ long_encode: Optional[bool] = False,
826
+ ):
827
+ if long_encode == 0:
828
+ return encode_prompt_automatic1111(pipe, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds, negative_prompt_embeds, lora_scale, clip_skip)
829
+ elif long_encode == 1:
830
+ return encoder_long_prompt(pipe, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds, negative_prompt_embeds, lora_scale, clip_skip)
831
+ return encode_short_prompt(pipe, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds, negative_prompt_embeds, lora_scale, clip_skip)
modules/external_k_diffusion.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+ from torch import nn
5
+ import k_diffusion
6
+ from k_diffusion import sampling, utils
7
+
8
+ class VDenoiser(nn.Module):
9
+ """A v-diffusion-pytorch model wrapper for k-diffusion."""
10
+
11
+ def __init__(self, inner_model):
12
+ super().__init__()
13
+ self.inner_model = inner_model
14
+ self.sigma_data = 1.
15
+
16
+ def get_scalings(self, sigma):
17
+ c_skip = self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2)
18
+ c_out = -sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
19
+ c_in = 1 / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
20
+ return c_skip, c_out, c_in
21
+
22
+ def sigma_to_t(self, sigma):
23
+ return sigma.atan() / math.pi * 2
24
+
25
+ def t_to_sigma(self, t):
26
+ return (t * math.pi / 2).tan()
27
+
28
+ def loss(self, input, noise, sigma, **kwargs):
29
+ c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)]
30
+ noised_input = input + noise * utils.append_dims(sigma, input.ndim)
31
+ model_output = self.inner_model(noised_input * c_in, self.sigma_to_t(sigma), **kwargs)
32
+ target = (input - c_skip * noised_input) / c_out
33
+ return (model_output - target).pow(2).flatten(1).mean(1)
34
+
35
+ def forward(self, input, sigma, **kwargs):
36
+ c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)]
37
+ return self.inner_model(input * c_in, self.sigma_to_t(sigma), **kwargs) * c_out + input * c_skip
38
+
39
+
40
+ class DiscreteSchedule(nn.Module):
41
+ """A mapping between continuous noise levels (sigmas) and a list of discrete noise
42
+ levels."""
43
+
44
+ def __init__(self, sigmas, quantize):
45
+ super().__init__()
46
+ self.register_buffer('sigmas', sigmas)
47
+ self.register_buffer('log_sigmas', sigmas.log())
48
+ self.quantize = quantize
49
+
50
+ @property
51
+ def sigma_min(self):
52
+ return self.sigmas[0]
53
+
54
+ @property
55
+ def sigma_max(self):
56
+ return self.sigmas[-1]
57
+
58
+ def get_sigmas(self, n=None):
59
+ if n is None:
60
+ return sampling.append_zero(self.sigmas.flip(0))
61
+ t_max = len(self.sigmas) - 1
62
+ t = torch.linspace(t_max, 0, n, device=self.sigmas.device)
63
+ return sampling.append_zero(self.t_to_sigma(t))
64
+
65
+ def sigma_to_t(self, sigma, quantize=None):
66
+ quantize = self.quantize if quantize is None else quantize
67
+ log_sigma = sigma.log()
68
+ dists = log_sigma - self.log_sigmas[:, None]
69
+ if quantize:
70
+ return dists.abs().argmin(dim=0).view(sigma.shape)
71
+ low_idx = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2)
72
+ high_idx = low_idx + 1
73
+ low, high = self.log_sigmas[low_idx], self.log_sigmas[high_idx]
74
+ w = (low - log_sigma) / (low - high)
75
+ w = w.clamp(0, 1)
76
+ t = (1 - w) * low_idx + w * high_idx
77
+ return t.view(sigma.shape)
78
+
79
+ def t_to_sigma(self, t):
80
+ t = t.float()
81
+ low_idx, high_idx, w = t.floor().long(), t.ceil().long(), t.frac()
82
+ log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx]
83
+ return log_sigma.exp()
84
+
85
+
86
+ class DiscreteEpsDDPMDenoiser(DiscreteSchedule):
87
+ """A wrapper for discrete schedule DDPM models that output eps (the predicted
88
+ noise)."""
89
+
90
+ def __init__(self, model, alphas_cumprod, quantize):
91
+ super().__init__(((1 - alphas_cumprod) / alphas_cumprod) ** 0.5, quantize)
92
+ self.inner_model = model
93
+ self.sigma_data = 1.
94
+
95
+ def get_scalings(self, sigma):
96
+ c_out = -sigma
97
+ c_in = 1 / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
98
+ return c_out, c_in
99
+
100
+ def get_eps(self, *args, **kwargs):
101
+ return self.inner_model(*args, **kwargs)
102
+
103
+ def loss(self, input, noise, sigma, **kwargs):
104
+ c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)]
105
+ noised_input = input + noise * utils.append_dims(sigma, input.ndim)
106
+ eps = self.get_eps(noised_input * c_in, self.sigma_to_t(sigma), **kwargs)
107
+ return (eps - noise).pow(2).flatten(1).mean(1)
108
+
109
+ def forward(self, input, sigma, **kwargs):
110
+ c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)]
111
+ eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
112
+ # !!! fix for special models (controlnet, inpaint, depth, ..)
113
+ input = input[:, :eps.shape[1],...]
114
+ return input + eps * c_out
115
+
116
+
117
+ class OpenAIDenoiser(DiscreteEpsDDPMDenoiser):
118
+ """A wrapper for OpenAI diffusion models."""
119
+
120
+ def __init__(self, model, diffusion, quantize=False, has_learned_sigmas=True, device='cpu'):
121
+ alphas_cumprod = torch.tensor(diffusion.alphas_cumprod, device=device, dtype=torch.float32)
122
+ super().__init__(model, alphas_cumprod, quantize=quantize)
123
+ self.has_learned_sigmas = has_learned_sigmas
124
+
125
+ def get_eps(self, *args, **kwargs):
126
+ model_output = self.inner_model(*args, **kwargs)
127
+ if self.has_learned_sigmas:
128
+ return model_output.chunk(2, dim=1)[0]
129
+ return model_output
130
+
131
+
132
+ class CompVisDenoiser(DiscreteEpsDDPMDenoiser):
133
+ """A wrapper for CompVis diffusion models."""
134
+
135
+ def __init__(self, model, quantize=False, device='cpu'):
136
+ super().__init__(model, model.alphas_cumprod, quantize=quantize)
137
+
138
+ def get_eps(self, *args, **kwargs):
139
+ return self.inner_model.apply_model(*args, **kwargs)
140
+
141
+
142
+ class DiscreteVDDPMDenoiser(DiscreteSchedule):
143
+ """A wrapper for discrete schedule DDPM models that output v."""
144
+
145
+ def __init__(self, model, alphas_cumprod, quantize):
146
+ super().__init__(((1 - alphas_cumprod) / alphas_cumprod) ** 0.5, quantize)
147
+ self.inner_model = model
148
+ self.sigma_data = 1.
149
+
150
+ def get_scalings(self, sigma):
151
+ c_skip = self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2)
152
+ c_out = -sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
153
+ c_in = 1 / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
154
+ return c_skip, c_out, c_in
155
+
156
+ def get_v(self, *args, **kwargs):
157
+ return self.inner_model(*args, **kwargs)
158
+
159
+ def loss(self, input, noise, sigma, **kwargs):
160
+ c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)]
161
+ noised_input = input + noise * utils.append_dims(sigma, input.ndim)
162
+ model_output = self.get_v(noised_input * c_in, self.sigma_to_t(sigma), **kwargs)
163
+ target = (input - c_skip * noised_input) / c_out
164
+ return (model_output - target).pow(2).flatten(1).mean(1)
165
+
166
+ def forward(self, input, sigma, **kwargs):
167
+ c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)]
168
+ vout = self.get_v(input * c_in, self.sigma_to_t(sigma), **kwargs) * c_out
169
+ # !!! fix for special models (controlnet, upscale, ..)
170
+ input = input[:, :vout.shape[1],...]
171
+ return vout + input * c_skip
172
+ #return self.get_v(input * c_in, self.sigma_to_t(sigma), **kwargs) * c_out + input * c_skip
173
+
174
+
175
+ class CompVisVDenoiser(DiscreteVDDPMDenoiser):
176
+ """A wrapper for CompVis diffusion models that output v."""
177
+
178
+ def __init__(self, model, quantize=False, device='cpu'):
179
+ super().__init__(model, model.alphas_cumprod, quantize=quantize)
180
+
181
+ def get_v(self, x, t, cond, **kwargs):
182
+ return self.inner_model.apply_model(x, t, cond)
modules/ip_adapter.py ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from pathlib import Path
16
+ from typing import Callable, Dict, List, Optional, Union
17
+
18
+ import torch
19
+ import torch.nn.functional as F
20
+ from huggingface_hub.utils import validate_hf_hub_args
21
+ from safetensors import safe_open
22
+
23
+ from diffusers.models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_state_dict
24
+
25
+
26
+ from diffusers.utils import (
27
+ USE_PEFT_BACKEND,
28
+ _get_model_file,
29
+ is_accelerate_available,
30
+ is_torch_version,
31
+ is_transformers_available,
32
+ logging,
33
+ )
34
+
35
+ from diffusers.loaders.unet_loader_utils import _maybe_expand_lora_scales
36
+
37
+
38
+
39
+ if is_transformers_available():
40
+ from transformers import (
41
+ CLIPImageProcessor,
42
+ CLIPVisionModelWithProjection,
43
+ )
44
+
45
+ from .attention_modify import (
46
+ AttnProcessor,
47
+ IPAdapterAttnProcessor,
48
+ AttnProcessor2_0,
49
+ IPAdapterAttnProcessor2_0
50
+ )
51
+
52
+ logger = logging.get_logger(__name__)
53
+
54
+
55
+ class IPAdapterMixin:
56
+ """Mixin for handling IP Adapters."""
57
+
58
+ @validate_hf_hub_args
59
+ def load_ip_adapter(
60
+ self,
61
+ pretrained_model_name_or_path_or_dict: Union[str, List[str], Dict[str, torch.Tensor]],
62
+ subfolder: Union[str, List[str]],
63
+ weight_name: Union[str, List[str]],
64
+ image_encoder_folder: Optional[str] = "image_encoder",
65
+ **kwargs,
66
+ ):
67
+ """
68
+ Parameters:
69
+ pretrained_model_name_or_path_or_dict (`str` or `List[str]` or `os.PathLike` or `List[os.PathLike]` or `dict` or `List[dict]`):
70
+ Can be either:
71
+
72
+ - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
73
+ the Hub.
74
+ - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
75
+ with [`ModelMixin.save_pretrained`].
76
+ - A [torch state
77
+ dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
78
+ subfolder (`str` or `List[str]`):
79
+ The subfolder location of a model file within a larger model repository on the Hub or locally. If a
80
+ list is passed, it should have the same length as `weight_name`.
81
+ weight_name (`str` or `List[str]`):
82
+ The name of the weight file to load. If a list is passed, it should have the same length as
83
+ `weight_name`.
84
+ image_encoder_folder (`str`, *optional*, defaults to `image_encoder`):
85
+ The subfolder location of the image encoder within a larger model repository on the Hub or locally.
86
+ Pass `None` to not load the image encoder. If the image encoder is located in a folder inside
87
+ `subfolder`, you only need to pass the name of the folder that contains image encoder weights, e.g.
88
+ `image_encoder_folder="image_encoder"`. If the image encoder is located in a folder other than
89
+ `subfolder`, you should pass the path to the folder that contains image encoder weights, for example,
90
+ `image_encoder_folder="different_subfolder/image_encoder"`.
91
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
92
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
93
+ is not used.
94
+ force_download (`bool`, *optional*, defaults to `False`):
95
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
96
+ cached versions if they exist.
97
+ resume_download:
98
+ Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1
99
+ of Diffusers.
100
+ proxies (`Dict[str, str]`, *optional*):
101
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
102
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
103
+ local_files_only (`bool`, *optional*, defaults to `False`):
104
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
105
+ won't be downloaded from the Hub.
106
+ token (`str` or *bool*, *optional*):
107
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
108
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
109
+ revision (`str`, *optional*, defaults to `"main"`):
110
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
111
+ allowed by Git.
112
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
113
+ Speed up model loading only loading the pretrained weights and not initializing the weights. This also
114
+ tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
115
+ Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
116
+ argument to `True` will raise an error.
117
+ """
118
+
119
+ # handle the list inputs for multiple IP Adapters
120
+ if not isinstance(weight_name, list):
121
+ weight_name = [weight_name]
122
+
123
+ if not isinstance(pretrained_model_name_or_path_or_dict, list):
124
+ pretrained_model_name_or_path_or_dict = [pretrained_model_name_or_path_or_dict]
125
+ if len(pretrained_model_name_or_path_or_dict) == 1:
126
+ pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict * len(weight_name)
127
+
128
+ if not isinstance(subfolder, list):
129
+ subfolder = [subfolder]
130
+ if len(subfolder) == 1:
131
+ subfolder = subfolder * len(weight_name)
132
+
133
+ if len(weight_name) != len(pretrained_model_name_or_path_or_dict):
134
+ raise ValueError("`weight_name` and `pretrained_model_name_or_path_or_dict` must have the same length.")
135
+
136
+ if len(weight_name) != len(subfolder):
137
+ raise ValueError("`weight_name` and `subfolder` must have the same length.")
138
+
139
+ # Load the main state dict first.
140
+ cache_dir = kwargs.pop("cache_dir", None)
141
+ force_download = kwargs.pop("force_download", False)
142
+ resume_download = kwargs.pop("resume_download", None)
143
+ proxies = kwargs.pop("proxies", None)
144
+ local_files_only = kwargs.pop("local_files_only", None)
145
+ token = kwargs.pop("token", None)
146
+ revision = kwargs.pop("revision", None)
147
+ low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
148
+
149
+ if low_cpu_mem_usage and not is_accelerate_available():
150
+ low_cpu_mem_usage = False
151
+ logger.warning(
152
+ "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the"
153
+ " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install"
154
+ " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip"
155
+ " install accelerate\n```\n."
156
+ )
157
+
158
+ if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"):
159
+ raise NotImplementedError(
160
+ "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set"
161
+ " `low_cpu_mem_usage=False`."
162
+ )
163
+
164
+ user_agent = {
165
+ "file_type": "attn_procs_weights",
166
+ "framework": "pytorch",
167
+ }
168
+ state_dicts = []
169
+ for pretrained_model_name_or_path_or_dict, weight_name, subfolder in zip(
170
+ pretrained_model_name_or_path_or_dict, weight_name, subfolder
171
+ ):
172
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
173
+ model_file = _get_model_file(
174
+ pretrained_model_name_or_path_or_dict,
175
+ weights_name=weight_name,
176
+ cache_dir=cache_dir,
177
+ force_download=force_download,
178
+ resume_download=resume_download,
179
+ proxies=proxies,
180
+ local_files_only=local_files_only,
181
+ token=token,
182
+ revision=revision,
183
+ subfolder=subfolder,
184
+ user_agent=user_agent,
185
+ )
186
+ if weight_name.endswith(".safetensors"):
187
+ state_dict = {"image_proj": {}, "ip_adapter": {}}
188
+ with safe_open(model_file, framework="pt", device="cpu") as f:
189
+ for key in f.keys():
190
+ if key.startswith("image_proj."):
191
+ state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
192
+ elif key.startswith("ip_adapter."):
193
+ state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
194
+ else:
195
+ state_dict = load_state_dict(model_file)
196
+ else:
197
+ state_dict = pretrained_model_name_or_path_or_dict
198
+
199
+ keys = list(state_dict.keys())
200
+ if keys != ["image_proj", "ip_adapter"]:
201
+ raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.")
202
+
203
+ state_dicts.append(state_dict)
204
+
205
+ # load CLIP image encoder here if it has not been registered to the pipeline yet
206
+ if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is None:
207
+ if image_encoder_folder is not None:
208
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
209
+ logger.info(f"loading image_encoder from {pretrained_model_name_or_path_or_dict}")
210
+ if image_encoder_folder.count("/") == 0:
211
+ image_encoder_subfolder = Path(subfolder, image_encoder_folder).as_posix()
212
+ else:
213
+ image_encoder_subfolder = Path(image_encoder_folder).as_posix()
214
+
215
+ image_encoder = CLIPVisionModelWithProjection.from_pretrained(
216
+ pretrained_model_name_or_path_or_dict,
217
+ subfolder=image_encoder_subfolder,
218
+ low_cpu_mem_usage=low_cpu_mem_usage,
219
+ ).to(self.device, dtype=self.dtype)
220
+ self.register_modules(image_encoder=image_encoder)
221
+ else:
222
+ raise ValueError(
223
+ "`image_encoder` cannot be loaded because `pretrained_model_name_or_path_or_dict` is a state dict."
224
+ )
225
+ else:
226
+ logger.warning(
227
+ "image_encoder is not loaded since `image_encoder_folder=None` passed. You will not be able to use `ip_adapter_image` when calling the pipeline with IP-Adapter."
228
+ "Use `ip_adapter_image_embeds` to pass pre-generated image embedding instead."
229
+ )
230
+
231
+ # create feature extractor if it has not been registered to the pipeline yet
232
+ if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is None:
233
+ feature_extractor = CLIPImageProcessor()
234
+ self.register_modules(feature_extractor=feature_extractor)
235
+
236
+ # load ip-adapter into unet
237
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
238
+ unet._load_ip_adapter_weights(state_dicts, low_cpu_mem_usage=low_cpu_mem_usage)
239
+
240
+ extra_loras = unet._load_ip_adapter_loras(state_dicts)
241
+ if extra_loras != {}:
242
+ if not USE_PEFT_BACKEND:
243
+ logger.warning("PEFT backend is required to load these weights.")
244
+ else:
245
+ # apply the IP Adapter Face ID LoRA weights
246
+ peft_config = getattr(unet, "peft_config", {})
247
+ for k, lora in extra_loras.items():
248
+ if f"faceid_{k}" not in peft_config:
249
+ self.load_lora_weights(lora, adapter_name=f"faceid_{k}")
250
+ self.set_adapters([f"faceid_{k}"], adapter_weights=[1.0])
251
+
252
+ def set_ip_adapter_scale(self, scale):
253
+ """
254
+ Set IP-Adapter scales per-transformer block. Input `scale` could be a single config or a list of configs for
255
+ granular control over each IP-Adapter behavior. A config can be a float or a dictionary.
256
+
257
+ Example:
258
+
259
+ ```py
260
+ # To use original IP-Adapter
261
+ scale = 1.0
262
+ pipeline.set_ip_adapter_scale(scale)
263
+
264
+ # To use style block only
265
+ scale = {
266
+ "up": {"block_0": [0.0, 1.0, 0.0]},
267
+ }
268
+ pipeline.set_ip_adapter_scale(scale)
269
+
270
+ # To use style+layout blocks
271
+ scale = {
272
+ "down": {"block_2": [0.0, 1.0]},
273
+ "up": {"block_0": [0.0, 1.0, 0.0]},
274
+ }
275
+ pipeline.set_ip_adapter_scale(scale)
276
+
277
+ # To use style and layout from 2 reference images
278
+ scales = [{"down": {"block_2": [0.0, 1.0]}}, {"up": {"block_0": [0.0, 1.0, 0.0]}}]
279
+ pipeline.set_ip_adapter_scale(scales)
280
+ ```
281
+ """
282
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
283
+ if not isinstance(scale, list):
284
+ scale = [scale]
285
+ scale_configs = _maybe_expand_lora_scales(unet, scale, default_scale=0.0)
286
+
287
+ for attn_name, attn_processor in unet.attn_processors.items():
288
+ if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):
289
+ if len(scale_configs) != len(attn_processor.scale):
290
+ raise ValueError(
291
+ f"Cannot assign {len(scale_configs)} scale_configs to "
292
+ f"{len(attn_processor.scale)} IP-Adapter."
293
+ )
294
+ elif len(scale_configs) == 1:
295
+ scale_configs = scale_configs * len(attn_processor.scale)
296
+ for i, scale_config in enumerate(scale_configs):
297
+ if isinstance(scale_config, dict):
298
+ for k, s in scale_config.items():
299
+ if attn_name.startswith(k):
300
+ attn_processor.scale[i] = s
301
+ else:
302
+ attn_processor.scale[i] = scale_config
303
+
304
+ def unload_ip_adapter(self):
305
+ """
306
+ Unloads the IP Adapter weights
307
+
308
+ Examples:
309
+
310
+ ```python
311
+ >>> # Assuming `pipeline` is already loaded with the IP Adapter weights.
312
+ >>> pipeline.unload_ip_adapter()
313
+ >>> ...
314
+ ```
315
+ """
316
+ # remove CLIP image encoder
317
+ if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is not None:
318
+ self.image_encoder = None
319
+ self.register_to_config(image_encoder=[None, None])
320
+
321
+ # remove feature extractor only when safety_checker is None as safety_checker uses
322
+ # the feature_extractor later
323
+ if not hasattr(self, "safety_checker"):
324
+ if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is not None:
325
+ self.feature_extractor = None
326
+ self.register_to_config(feature_extractor=[None, None])
327
+
328
+ # remove hidden encoder
329
+ self.unet.encoder_hid_proj = None
330
+ self.config.encoder_hid_dim_type = None
331
+
332
+ # restore original Unet attention processors layers
333
+ attn_procs = {}
334
+ for name, value in self.unet.attn_processors.items():
335
+ attn_processor_class = (
336
+ AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnProcessor()
337
+ )
338
+ attn_procs[name] = (
339
+ attn_processor_class
340
+ if isinstance(value, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0))
341
+ else value.__class__()
342
+ )
343
+ self.unet.set_attn_processor(attn_procs)
modules/keypose/__init__.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import torch
4
+
5
+ import os
6
+ #from modules import devices
7
+ #from annotator.annotator_path import models_path
8
+
9
+ import mmcv
10
+ from mmdet.apis import inference_detector, init_detector
11
+ from mmpose.apis import inference_top_down_pose_model
12
+ from mmpose.apis import init_pose_model, process_mmdet_results, vis_pose_result
13
+
14
+ device = "cpu"
15
+ if torch.cuda.is_available():
16
+ device = "cuda"
17
+
18
+ def preprocessing(image, device):
19
+ # Resize
20
+ scale = 640 / max(image.shape[:2])
21
+ image = cv2.resize(image, dsize=None, fx=scale, fy=scale)
22
+ raw_image = image.astype(np.uint8)
23
+
24
+ # Subtract mean values
25
+ image = image.astype(np.float32)
26
+ image -= np.array(
27
+ [
28
+ float(104.008),
29
+ float(116.669),
30
+ float(122.675),
31
+ ]
32
+ )
33
+
34
+ # Convert to torch.Tensor and add "batch" axis
35
+ image = torch.from_numpy(image.transpose(2, 0, 1)).float().unsqueeze(0)
36
+ image = image.to(device)
37
+
38
+ return image, raw_image
39
+
40
+
41
+ def imshow_keypoints(img,
42
+ pose_result,
43
+ skeleton=None,
44
+ kpt_score_thr=0.1,
45
+ pose_kpt_color=None,
46
+ pose_link_color=None,
47
+ radius=4,
48
+ thickness=1):
49
+ """Draw keypoints and links on an image.
50
+ Args:
51
+ img (ndarry): The image to draw poses on.
52
+ pose_result (list[kpts]): The poses to draw. Each element kpts is
53
+ a set of K keypoints as an Kx3 numpy.ndarray, where each
54
+ keypoint is represented as x, y, score.
55
+ kpt_score_thr (float, optional): Minimum score of keypoints
56
+ to be shown. Default: 0.3.
57
+ pose_kpt_color (np.array[Nx3]`): Color of N keypoints. If None,
58
+ the keypoint will not be drawn.
59
+ pose_link_color (np.array[Mx3]): Color of M links. If None, the
60
+ links will not be drawn.
61
+ thickness (int): Thickness of lines.
62
+ """
63
+
64
+ img_h, img_w, _ = img.shape
65
+ img = np.zeros(img.shape)
66
+
67
+ for idx, kpts in enumerate(pose_result):
68
+ if idx > 1:
69
+ continue
70
+ kpts = kpts['keypoints']
71
+ # print(kpts)
72
+ kpts = np.array(kpts, copy=False)
73
+
74
+ # draw each point on image
75
+ if pose_kpt_color is not None:
76
+ assert len(pose_kpt_color) == len(kpts)
77
+
78
+ for kid, kpt in enumerate(kpts):
79
+ x_coord, y_coord, kpt_score = int(kpt[0]), int(kpt[1]), kpt[2]
80
+
81
+ if kpt_score < kpt_score_thr or pose_kpt_color[kid] is None:
82
+ # skip the point that should not be drawn
83
+ continue
84
+
85
+ color = tuple(int(c) for c in pose_kpt_color[kid])
86
+ cv2.circle(img, (int(x_coord), int(y_coord)),
87
+ radius, color, -1)
88
+
89
+ # draw links
90
+ if skeleton is not None and pose_link_color is not None:
91
+ assert len(pose_link_color) == len(skeleton)
92
+
93
+ for sk_id, sk in enumerate(skeleton):
94
+ pos1 = (int(kpts[sk[0], 0]), int(kpts[sk[0], 1]))
95
+ pos2 = (int(kpts[sk[1], 0]), int(kpts[sk[1], 1]))
96
+
97
+ if (pos1[0] <= 0 or pos1[0] >= img_w or pos1[1] <= 0 or pos1[1] >= img_h or pos2[0] <= 0
98
+ or pos2[0] >= img_w or pos2[1] <= 0 or pos2[1] >= img_h or kpts[sk[0], 2] < kpt_score_thr
99
+ or kpts[sk[1], 2] < kpt_score_thr or pose_link_color[sk_id] is None):
100
+ # skip the link that should not be drawn
101
+ continue
102
+ color = tuple(int(c) for c in pose_link_color[sk_id])
103
+ cv2.line(img, pos1, pos2, color, thickness=thickness)
104
+
105
+ return img
106
+
107
+
108
+ human_det, pose_model = None, None
109
+ det_model_path = "https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth"
110
+ pose_model_path = "https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth"
111
+
112
+ #modeldir = os.path.join(models_path, "keypose")
113
+ modeldir = os.getcwd()
114
+ old_modeldir = os.path.dirname(os.path.realpath(__file__))
115
+
116
+ det_config = 'faster_rcnn_r50_fpn_coco.py'
117
+ pose_config = 'hrnet_w48_coco_256x192.py'
118
+
119
+ det_checkpoint = 'faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth'
120
+ pose_checkpoint = 'hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth'
121
+ det_cat_id = 1
122
+ bbox_thr = 0.2
123
+
124
+ skeleton = [
125
+ [15, 13], [13, 11], [16, 14], [14, 12], [11, 12], [5, 11], [6, 12], [5, 6], [5, 7], [6, 8],
126
+ [7, 9], [8, 10],
127
+ [1, 2], [0, 1], [0, 2], [1, 3], [2, 4], [3, 5], [4, 6]
128
+ ]
129
+
130
+ pose_kpt_color = [
131
+ [51, 153, 255], [51, 153, 255], [51, 153, 255], [51, 153, 255], [51, 153, 255],
132
+ [0, 255, 0],
133
+ [255, 128, 0], [0, 255, 0], [255, 128, 0], [0, 255, 0], [255, 128, 0], [0, 255, 0],
134
+ [255, 128, 0],
135
+ [0, 255, 0], [255, 128, 0], [0, 255, 0], [255, 128, 0]
136
+ ]
137
+
138
+ pose_link_color = [
139
+ [0, 255, 0], [0, 255, 0], [255, 128, 0], [255, 128, 0],
140
+ [51, 153, 255], [51, 153, 255], [51, 153, 255], [51, 153, 255], [0, 255, 0],
141
+ [255, 128, 0],
142
+ [0, 255, 0], [255, 128, 0], [51, 153, 255], [51, 153, 255], [51, 153, 255],
143
+ [51, 153, 255],
144
+ [51, 153, 255], [51, 153, 255], [51, 153, 255]
145
+ ]
146
+
147
+ def find_download_model(checkpoint, remote_path):
148
+ modelpath = os.path.join(modeldir, checkpoint)
149
+ old_modelpath = os.path.join(old_modeldir, checkpoint)
150
+
151
+ if os.path.exists(old_modelpath):
152
+ modelpath = old_modelpath
153
+ elif not os.path.exists(modelpath):
154
+ from basicsr.utils.download_util import load_file_from_url
155
+ load_file_from_url(remote_path, model_dir=modeldir)
156
+
157
+ return modelpath
158
+
159
+ def apply_keypose(input_image):
160
+ global human_det, pose_model,device
161
+ if netNetwork is None:
162
+ det_model_local = find_download_model(det_checkpoint, det_model_path)
163
+ hrnet_model_local = find_download_model(pose_checkpoint, pose_model_path)
164
+ det_config_mmcv = mmcv.Config.fromfile(det_config)
165
+ pose_config_mmcv = mmcv.Config.fromfile(pose_config)
166
+ human_det = init_detector(det_config_mmcv, det_model_local, device=device)
167
+ pose_model = init_pose_model(pose_config_mmcv, hrnet_model_local, device=device)
168
+
169
+ assert input_image.ndim == 3
170
+ input_image = input_image.copy()
171
+ with torch.no_grad():
172
+ image = torch.from_numpy(input_image).float().to(device)
173
+ image = image / 255.0
174
+ mmdet_results = inference_detector(human_det, image)
175
+
176
+ # keep the person class bounding boxes.
177
+ person_results = process_mmdet_results(mmdet_results, det_cat_id)
178
+
179
+ return_heatmap = False
180
+ dataset = pose_model.cfg.data['test']['type']
181
+
182
+ # e.g. use ('backbone', ) to return backbone feature
183
+ output_layer_names = None
184
+ pose_results, _ = inference_top_down_pose_model(
185
+ pose_model,
186
+ image,
187
+ person_results,
188
+ bbox_thr=bbox_thr,
189
+ format='xyxy',
190
+ dataset=dataset,
191
+ dataset_info=None,
192
+ return_heatmap=return_heatmap,
193
+ outputs=output_layer_names
194
+ )
195
+
196
+ im_keypose_out = imshow_keypoints(
197
+ image,
198
+ pose_results,
199
+ skeleton=skeleton,
200
+ pose_kpt_color=pose_kpt_color,
201
+ pose_link_color=pose_link_color,
202
+ radius=2,
203
+ thickness=2
204
+ )
205
+ im_keypose_out = im_keypose_out.astype(np.uint8)
206
+
207
+ # image_hed = rearrange(image_hed, 'h w c -> 1 c h w')
208
+ # edge = netNetwork(image_hed)[0]
209
+ # edge = (edge.cpu().numpy() * 255.0).clip(0, 255).astype(np.uint8)
210
+ return im_keypose_out
211
+
212
+
213
+ def unload_hed_model():
214
+ global netNetwork
215
+ if netNetwork is not None:
216
+ netNetwork.cpu()
modules/keypose/faster_rcnn_r50_fpn_coco.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoint_config = dict(interval=1)
2
+ # yapf:disable
3
+ log_config = dict(
4
+ interval=50,
5
+ hooks=[
6
+ dict(type='TextLoggerHook'),
7
+ # dict(type='TensorboardLoggerHook')
8
+ ])
9
+ # yapf:enable
10
+ dist_params = dict(backend='nccl')
11
+ log_level = 'INFO'
12
+ load_from = None
13
+ resume_from = None
14
+ workflow = [('train', 1)]
15
+ # optimizer
16
+ optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
17
+ optimizer_config = dict(grad_clip=None)
18
+ # learning policy
19
+ lr_config = dict(
20
+ policy='step',
21
+ warmup='linear',
22
+ warmup_iters=500,
23
+ warmup_ratio=0.001,
24
+ step=[8, 11])
25
+ total_epochs = 12
26
+
27
+ model = dict(
28
+ type='FasterRCNN',
29
+ pretrained='torchvision://resnet50',
30
+ backbone=dict(
31
+ type='ResNet',
32
+ depth=50,
33
+ num_stages=4,
34
+ out_indices=(0, 1, 2, 3),
35
+ frozen_stages=1,
36
+ norm_cfg=dict(type='BN', requires_grad=True),
37
+ norm_eval=True,
38
+ style='pytorch'),
39
+ neck=dict(
40
+ type='FPN',
41
+ in_channels=[256, 512, 1024, 2048],
42
+ out_channels=256,
43
+ num_outs=5),
44
+ rpn_head=dict(
45
+ type='RPNHead',
46
+ in_channels=256,
47
+ feat_channels=256,
48
+ anchor_generator=dict(
49
+ type='AnchorGenerator',
50
+ scales=[8],
51
+ ratios=[0.5, 1.0, 2.0],
52
+ strides=[4, 8, 16, 32, 64]),
53
+ bbox_coder=dict(
54
+ type='DeltaXYWHBBoxCoder',
55
+ target_means=[.0, .0, .0, .0],
56
+ target_stds=[1.0, 1.0, 1.0, 1.0]),
57
+ loss_cls=dict(
58
+ type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
59
+ loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
60
+ roi_head=dict(
61
+ type='StandardRoIHead',
62
+ bbox_roi_extractor=dict(
63
+ type='SingleRoIExtractor',
64
+ roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
65
+ out_channels=256,
66
+ featmap_strides=[4, 8, 16, 32]),
67
+ bbox_head=dict(
68
+ type='Shared2FCBBoxHead',
69
+ in_channels=256,
70
+ fc_out_channels=1024,
71
+ roi_feat_size=7,
72
+ num_classes=80,
73
+ bbox_coder=dict(
74
+ type='DeltaXYWHBBoxCoder',
75
+ target_means=[0., 0., 0., 0.],
76
+ target_stds=[0.1, 0.1, 0.2, 0.2]),
77
+ reg_class_agnostic=False,
78
+ loss_cls=dict(
79
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
80
+ loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
81
+ # model training and testing settings
82
+ train_cfg=dict(
83
+ rpn=dict(
84
+ assigner=dict(
85
+ type='MaxIoUAssigner',
86
+ pos_iou_thr=0.7,
87
+ neg_iou_thr=0.3,
88
+ min_pos_iou=0.3,
89
+ match_low_quality=True,
90
+ ignore_iof_thr=-1),
91
+ sampler=dict(
92
+ type='RandomSampler',
93
+ num=256,
94
+ pos_fraction=0.5,
95
+ neg_pos_ub=-1,
96
+ add_gt_as_proposals=False),
97
+ allowed_border=-1,
98
+ pos_weight=-1,
99
+ debug=False),
100
+ rpn_proposal=dict(
101
+ nms_pre=2000,
102
+ max_per_img=1000,
103
+ nms=dict(type='nms', iou_threshold=0.7),
104
+ min_bbox_size=0),
105
+ rcnn=dict(
106
+ assigner=dict(
107
+ type='MaxIoUAssigner',
108
+ pos_iou_thr=0.5,
109
+ neg_iou_thr=0.5,
110
+ min_pos_iou=0.5,
111
+ match_low_quality=False,
112
+ ignore_iof_thr=-1),
113
+ sampler=dict(
114
+ type='RandomSampler',
115
+ num=512,
116
+ pos_fraction=0.25,
117
+ neg_pos_ub=-1,
118
+ add_gt_as_proposals=True),
119
+ pos_weight=-1,
120
+ debug=False)),
121
+ test_cfg=dict(
122
+ rpn=dict(
123
+ nms_pre=1000,
124
+ max_per_img=1000,
125
+ nms=dict(type='nms', iou_threshold=0.7),
126
+ min_bbox_size=0),
127
+ rcnn=dict(
128
+ score_thr=0.05,
129
+ nms=dict(type='nms', iou_threshold=0.5),
130
+ max_per_img=100)
131
+ # soft-nms is also supported for rcnn testing
132
+ # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
133
+ ))
134
+
135
+ dataset_type = 'CocoDataset'
136
+ data_root = 'data/coco'
137
+ img_norm_cfg = dict(
138
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
139
+ train_pipeline = [
140
+ dict(type='LoadImageFromFile'),
141
+ dict(type='LoadAnnotations', with_bbox=True),
142
+ dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
143
+ dict(type='RandomFlip', flip_ratio=0.5),
144
+ dict(type='Normalize', **img_norm_cfg),
145
+ dict(type='Pad', size_divisor=32),
146
+ dict(type='DefaultFormatBundle'),
147
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
148
+ ]
149
+ test_pipeline = [
150
+ dict(type='LoadImageFromFile'),
151
+ dict(
152
+ type='MultiScaleFlipAug',
153
+ img_scale=(1333, 800),
154
+ flip=False,
155
+ transforms=[
156
+ dict(type='Resize', keep_ratio=True),
157
+ dict(type='RandomFlip'),
158
+ dict(type='Normalize', **img_norm_cfg),
159
+ dict(type='Pad', size_divisor=32),
160
+ dict(type='DefaultFormatBundle'),
161
+ dict(type='Collect', keys=['img']),
162
+ ])
163
+ ]
164
+ data = dict(
165
+ samples_per_gpu=2,
166
+ workers_per_gpu=2,
167
+ train=dict(
168
+ type=dataset_type,
169
+ ann_file=f'{data_root}/annotations/instances_train2017.json',
170
+ img_prefix=f'{data_root}/train2017/',
171
+ pipeline=train_pipeline),
172
+ val=dict(
173
+ type=dataset_type,
174
+ ann_file=f'{data_root}/annotations/instances_val2017.json',
175
+ img_prefix=f'{data_root}/val2017/',
176
+ pipeline=test_pipeline),
177
+ test=dict(
178
+ type=dataset_type,
179
+ ann_file=f'{data_root}/annotations/instances_val2017.json',
180
+ img_prefix=f'{data_root}/val2017/',
181
+ pipeline=test_pipeline))
182
+ evaluation = dict(interval=1, metric='bbox')
modules/keypose/hrnet_w48_coco_256x192.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # _base_ = [
2
+ # '../../../../_base_/default_runtime.py',
3
+ # '../../../../_base_/datasets/coco.py'
4
+ # ]
5
+ evaluation = dict(interval=10, metric='mAP', save_best='AP')
6
+
7
+ optimizer = dict(
8
+ type='Adam',
9
+ lr=5e-4,
10
+ )
11
+ optimizer_config = dict(grad_clip=None)
12
+ # learning policy
13
+ lr_config = dict(
14
+ policy='step',
15
+ warmup='linear',
16
+ warmup_iters=500,
17
+ warmup_ratio=0.001,
18
+ step=[170, 200])
19
+ total_epochs = 210
20
+ channel_cfg = dict(
21
+ num_output_channels=17,
22
+ dataset_joints=17,
23
+ dataset_channel=[
24
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
25
+ ],
26
+ inference_channel=[
27
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
28
+ ])
29
+
30
+ # model settings
31
+ model = dict(
32
+ type='TopDown',
33
+ pretrained='https://download.openmmlab.com/mmpose/'
34
+ 'pretrain_models/hrnet_w48-8ef0771d.pth',
35
+ backbone=dict(
36
+ type='HRNet',
37
+ in_channels=3,
38
+ extra=dict(
39
+ stage1=dict(
40
+ num_modules=1,
41
+ num_branches=1,
42
+ block='BOTTLENECK',
43
+ num_blocks=(4, ),
44
+ num_channels=(64, )),
45
+ stage2=dict(
46
+ num_modules=1,
47
+ num_branches=2,
48
+ block='BASIC',
49
+ num_blocks=(4, 4),
50
+ num_channels=(48, 96)),
51
+ stage3=dict(
52
+ num_modules=4,
53
+ num_branches=3,
54
+ block='BASIC',
55
+ num_blocks=(4, 4, 4),
56
+ num_channels=(48, 96, 192)),
57
+ stage4=dict(
58
+ num_modules=3,
59
+ num_branches=4,
60
+ block='BASIC',
61
+ num_blocks=(4, 4, 4, 4),
62
+ num_channels=(48, 96, 192, 384))),
63
+ ),
64
+ keypoint_head=dict(
65
+ type='TopdownHeatmapSimpleHead',
66
+ in_channels=48,
67
+ out_channels=channel_cfg['num_output_channels'],
68
+ num_deconv_layers=0,
69
+ extra=dict(final_conv_kernel=1, ),
70
+ loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
71
+ train_cfg=dict(),
72
+ test_cfg=dict(
73
+ flip_test=True,
74
+ post_process='default',
75
+ shift_heatmap=True,
76
+ modulate_kernel=11))
77
+
78
+ data_cfg = dict(
79
+ image_size=[192, 256],
80
+ heatmap_size=[48, 64],
81
+ num_output_channels=channel_cfg['num_output_channels'],
82
+ num_joints=channel_cfg['dataset_joints'],
83
+ dataset_channel=channel_cfg['dataset_channel'],
84
+ inference_channel=channel_cfg['inference_channel'],
85
+ soft_nms=False,
86
+ nms_thr=1.0,
87
+ oks_thr=0.9,
88
+ vis_thr=0.2,
89
+ use_gt_bbox=False,
90
+ det_bbox_thr=0.0,
91
+ bbox_file='data/coco/person_detection_results/'
92
+ 'COCO_val2017_detections_AP_H_56_person.json',
93
+ )
94
+
95
+ train_pipeline = [
96
+ dict(type='LoadImageFromFile'),
97
+ dict(type='TopDownGetBboxCenterScale', padding=1.25),
98
+ dict(type='TopDownRandomShiftBboxCenter', shift_factor=0.16, prob=0.3),
99
+ dict(type='TopDownRandomFlip', flip_prob=0.5),
100
+ dict(
101
+ type='TopDownHalfBodyTransform',
102
+ num_joints_half_body=8,
103
+ prob_half_body=0.3),
104
+ dict(
105
+ type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
106
+ dict(type='TopDownAffine'),
107
+ dict(type='ToTensor'),
108
+ dict(
109
+ type='NormalizeTensor',
110
+ mean=[0.485, 0.456, 0.406],
111
+ std=[0.229, 0.224, 0.225]),
112
+ dict(type='TopDownGenerateTarget', sigma=2),
113
+ dict(
114
+ type='Collect',
115
+ keys=['img', 'target', 'target_weight'],
116
+ meta_keys=[
117
+ 'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
118
+ 'rotation', 'bbox_score', 'flip_pairs'
119
+ ]),
120
+ ]
121
+
122
+ val_pipeline = [
123
+ dict(type='LoadImageFromFile'),
124
+ dict(type='TopDownGetBboxCenterScale', padding=1.25),
125
+ dict(type='TopDownAffine'),
126
+ dict(type='ToTensor'),
127
+ dict(
128
+ type='NormalizeTensor',
129
+ mean=[0.485, 0.456, 0.406],
130
+ std=[0.229, 0.224, 0.225]),
131
+ dict(
132
+ type='Collect',
133
+ keys=['img'],
134
+ meta_keys=[
135
+ 'image_file', 'center', 'scale', 'rotation', 'bbox_score',
136
+ 'flip_pairs'
137
+ ]),
138
+ ]
139
+
140
+ test_pipeline = val_pipeline
141
+
142
+ data_root = 'data/coco'
143
+ data = dict(
144
+ samples_per_gpu=32,
145
+ workers_per_gpu=2,
146
+ val_dataloader=dict(samples_per_gpu=32),
147
+ test_dataloader=dict(samples_per_gpu=32),
148
+ train=dict(
149
+ type='TopDownCocoDataset',
150
+ ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
151
+ img_prefix=f'{data_root}/train2017/',
152
+ data_cfg=data_cfg,
153
+ pipeline=train_pipeline,
154
+ dataset_info={{_base_.dataset_info}}),
155
+ val=dict(
156
+ type='TopDownCocoDataset',
157
+ ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
158
+ img_prefix=f'{data_root}/val2017/',
159
+ data_cfg=data_cfg,
160
+ pipeline=val_pipeline,
161
+ dataset_info={{_base_.dataset_info}}),
162
+ test=dict(
163
+ type='TopDownCocoDataset',
164
+ ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
165
+ img_prefix=f'{data_root}/val2017/',
166
+ data_cfg=data_cfg,
167
+ pipeline=test_pipeline,
168
+ dataset_info={{_base_.dataset_info}}),
169
+ )
modules/lora.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LoRA network module
2
+ # reference:
3
+ # https://github.com/microsoft/LoRA/blob/main/loralib/layers.py
4
+ # https://github.com/cloneofsimo/lora/blob/master/lora_diffusion/lora.py
5
+ # https://github.com/bmaltais/kohya_ss/blob/master/networks/lora.py#L48
6
+
7
+ import math
8
+ import os
9
+ import torch
10
+ import diffusers
11
+ import modules.safe as _
12
+ from safetensors.torch import load_file
13
+
14
+
15
+ class LoRAModule(torch.nn.Module):
16
+ """
17
+ replaces forward method of the original Linear, instead of replacing the original Linear module.
18
+ """
19
+
20
+ def __init__(
21
+ self,
22
+ lora_name,
23
+ org_module: torch.nn.Module,
24
+ multiplier=1.0,
25
+ lora_dim=4,
26
+ alpha=1,
27
+ ):
28
+ """if alpha == 0 or None, alpha is rank (no scaling)."""
29
+ super().__init__()
30
+ self.lora_name = lora_name
31
+ self.lora_dim = lora_dim
32
+
33
+ if org_module.__class__.__name__ == "Conv2d":
34
+ in_dim = org_module.in_channels
35
+ out_dim = org_module.out_channels
36
+ self.lora_down = torch.nn.Conv2d(in_dim, lora_dim, (1, 1), bias=False)
37
+ self.lora_up = torch.nn.Conv2d(lora_dim, out_dim, (1, 1), bias=False)
38
+ else:
39
+ in_dim = org_module.in_features
40
+ out_dim = org_module.out_features
41
+ self.lora_down = torch.nn.Linear(in_dim, lora_dim, bias=False)
42
+ self.lora_up = torch.nn.Linear(lora_dim, out_dim, bias=False)
43
+
44
+ if type(alpha) == torch.Tensor:
45
+ alpha = alpha.detach().float().numpy() # without casting, bf16 causes error
46
+
47
+ alpha = lora_dim if alpha is None or alpha == 0 else alpha
48
+ self.scale = alpha / self.lora_dim
49
+ self.register_buffer("alpha", torch.tensor(alpha)) # 定数として扱える
50
+
51
+ # same as microsoft's
52
+ torch.nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5))
53
+ torch.nn.init.zeros_(self.lora_up.weight)
54
+
55
+ self.multiplier = multiplier
56
+ self.org_module = org_module # remove in applying
57
+ self.enable = False
58
+
59
+ def resize(self, rank, alpha, multiplier):
60
+ self.alpha = alpha.clone().detach()
61
+ self.multiplier = multiplier
62
+ self.scale = alpha / rank
63
+ if self.lora_down.__class__.__name__ == "Conv2d":
64
+ in_dim = self.lora_down.in_channels
65
+ out_dim = self.lora_up.out_channels
66
+ self.lora_down = torch.nn.Conv2d(in_dim, rank, (1, 1), bias=False)
67
+ self.lora_up = torch.nn.Conv2d(rank, out_dim, (1, 1), bias=False)
68
+ else:
69
+ in_dim = self.lora_down.in_features
70
+ out_dim = self.lora_up.out_features
71
+ self.lora_down = torch.nn.Linear(in_dim, rank, bias=False)
72
+ self.lora_up = torch.nn.Linear(rank, out_dim, bias=False)
73
+
74
+ def apply(self):
75
+ if hasattr(self, "org_module"):
76
+ self.org_forward = self.org_module.forward
77
+ self.org_module.forward = self.forward
78
+ del self.org_module
79
+
80
+ def forward(self, x):
81
+ if self.enable:
82
+ return (
83
+ self.org_forward(x)
84
+ + self.lora_up(self.lora_down(x)) * self.multiplier * self.scale
85
+ )
86
+ return self.org_forward(x)
87
+
88
+
89
+ class LoRANetwork(torch.nn.Module):
90
+ UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel", "Attention"]
91
+ TEXT_ENCODER_TARGET_REPLACE_MODULE = ["CLIPAttention", "CLIPMLP"]
92
+ LORA_PREFIX_UNET = "lora_unet"
93
+ LORA_PREFIX_TEXT_ENCODER = "lora_te"
94
+
95
+ def __init__(self, text_encoder, unet, multiplier=1.0, lora_dim=4, alpha=1) -> None:
96
+ super().__init__()
97
+ self.multiplier = multiplier
98
+ self.lora_dim = lora_dim
99
+ self.alpha = alpha
100
+
101
+ # create module instances
102
+ def create_modules(prefix, root_module: torch.nn.Module, target_replace_modules):
103
+ loras = []
104
+ for name, module in root_module.named_modules():
105
+ if module.__class__.__name__ in target_replace_modules:
106
+ for child_name, child_module in module.named_modules():
107
+ if child_module.__class__.__name__ == "Linear" or (child_module.__class__.__name__ == "Conv2d" and child_module.kernel_size == (1, 1)):
108
+ lora_name = prefix + "." + name + "." + child_name
109
+ lora_name = lora_name.replace(".", "_")
110
+ lora = LoRAModule(lora_name, child_module, self.multiplier, self.lora_dim, self.alpha,)
111
+ loras.append(lora)
112
+ return loras
113
+
114
+ if isinstance(text_encoder, list):
115
+ self.text_encoder_loras = text_encoder
116
+ else:
117
+ self.text_encoder_loras = create_modules(LoRANetwork.LORA_PREFIX_TEXT_ENCODER, text_encoder, LoRANetwork.TEXT_ENCODER_TARGET_REPLACE_MODULE)
118
+ print(f"Create LoRA for Text Encoder: {len(self.text_encoder_loras)} modules.")
119
+
120
+ if diffusers.__version__ >= "0.15.0":
121
+ LoRANetwork.UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel"]
122
+
123
+ self.unet_loras = create_modules(LoRANetwork.LORA_PREFIX_UNET, unet, LoRANetwork.UNET_TARGET_REPLACE_MODULE)
124
+ print(f"Create LoRA for U-Net: {len(self.unet_loras)} modules.")
125
+
126
+ self.weights_sd = None
127
+
128
+ # assertion
129
+ names = set()
130
+ for lora in self.text_encoder_loras + self.unet_loras:
131
+ assert (lora.lora_name not in names), f"duplicated lora name: {lora.lora_name}"
132
+ names.add(lora.lora_name)
133
+
134
+ lora.apply()
135
+ self.add_module(lora.lora_name, lora)
136
+
137
+ def reset(self):
138
+ for lora in self.text_encoder_loras + self.unet_loras:
139
+ lora.enable = False
140
+
141
+ def load(self, file, scale):
142
+
143
+ weights = None
144
+ if os.path.splitext(file)[1] == ".safetensors":
145
+ weights = load_file(file)
146
+ else:
147
+ weights = torch.load(file, map_location="cpu")
148
+
149
+ if not weights:
150
+ return
151
+
152
+ network_alpha = None
153
+ network_dim = None
154
+ for key, value in weights.items():
155
+ if network_alpha is None and "alpha" in key:
156
+ network_alpha = value
157
+ if network_dim is None and "lora_down" in key and len(value.size()) == 2:
158
+ network_dim = value.size()[0]
159
+
160
+ if network_alpha is None:
161
+ network_alpha = network_dim
162
+
163
+ weights_has_text_encoder = weights_has_unet = False
164
+ weights_to_modify = []
165
+
166
+ for key in weights.keys():
167
+ if key.startswith(LoRANetwork.LORA_PREFIX_TEXT_ENCODER):
168
+ weights_has_text_encoder = True
169
+
170
+ if key.startswith(LoRANetwork.LORA_PREFIX_UNET):
171
+ weights_has_unet = True
172
+
173
+ if weights_has_text_encoder:
174
+ weights_to_modify += self.text_encoder_loras
175
+
176
+ if weights_has_unet:
177
+ weights_to_modify += self.unet_loras
178
+
179
+ for lora in self.text_encoder_loras + self.unet_loras:
180
+ lora.resize(network_dim, network_alpha, scale)
181
+ if lora in weights_to_modify:
182
+ lora.enable = True
183
+
184
+ info = self.load_state_dict(weights, False)
185
+ if len(info.unexpected_keys) > 0:
186
+ print(f"Weights are loaded. Unexpected keys={info.unexpected_keys}")
187
+
modules/model_diffusers.py ADDED
The diff for this file is too large to render. See raw diff
 
modules/model_k_diffusion.py ADDED
@@ -0,0 +1,1960 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import inspect
3
+ import math
4
+ from pathlib import Path
5
+ import re
6
+ from collections import defaultdict
7
+ from typing import List, Optional, Union
8
+ import cv2
9
+ import time
10
+ import k_diffusion
11
+ import numpy as np
12
+ import PIL
13
+ import torch
14
+ import torch.nn as nn
15
+ import torch.nn.functional as F
16
+ from einops import rearrange
17
+ from .external_k_diffusion import CompVisDenoiser, CompVisVDenoiser
18
+ #from .prompt_parser import FrozenCLIPEmbedderWithCustomWords
19
+ from torch import einsum
20
+ from torch.autograd.function import Function
21
+
22
+ from diffusers.utils import PIL_INTERPOLATION, is_accelerate_available
23
+ from diffusers.utils import logging
24
+ from diffusers.utils.torch_utils import randn_tensor,is_compiled_module
25
+ from diffusers.image_processor import VaeImageProcessor,PipelineImageInput
26
+ from safetensors.torch import load_file
27
+ from diffusers import ControlNetModel
28
+ from PIL import Image
29
+ import torchvision.transforms as transforms
30
+ from diffusers.models import AutoencoderKL, ImageProjection
31
+ from .ip_adapter import IPAdapterMixin
32
+ from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
33
+ import gc
34
+ from .t2i_adapter import preprocessing_t2i_adapter,default_height_width
35
+ from .encoder_prompt_modify import encode_prompt_function
36
+ from .encode_region_map_function import encode_region_map
37
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
38
+ from diffusers.loaders import LoraLoaderMixin
39
+ from diffusers.loaders import TextualInversionLoaderMixin
40
+
41
+ def get_image_size(image):
42
+ height, width = None, None
43
+ if isinstance(image, Image.Image):
44
+ return image.size
45
+ elif isinstance(image, np.ndarray):
46
+ height, width = image.shape[:2]
47
+ return (width, height)
48
+ elif torch.is_tensor(image):
49
+ #RGB image
50
+ if len(image.shape) == 3:
51
+ _, height, width = image.shape
52
+ else:
53
+ height, width = image.shape
54
+ return (width, height)
55
+ else:
56
+ raise TypeError("The image must be an instance of PIL.Image, numpy.ndarray, or torch.Tensor.")
57
+
58
+
59
+ def retrieve_latents(
60
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
61
+ ):
62
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
63
+ return encoder_output.latent_dist.sample(generator)
64
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
65
+ return encoder_output.latent_dist.mode()
66
+ elif hasattr(encoder_output, "latents"):
67
+ return encoder_output.latents
68
+ else:
69
+ raise AttributeError("Could not access latents of provided encoder_output")
70
+
71
+ # from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
72
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
73
+ """
74
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
75
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
76
+ """
77
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
78
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
79
+ # rescale the results from guidance (fixes overexposure)
80
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
81
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
82
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
83
+ return noise_cfg
84
+
85
+
86
+ class ModelWrapper:
87
+ def __init__(self, model, alphas_cumprod):
88
+ self.model = model
89
+ self.alphas_cumprod = alphas_cumprod
90
+
91
+ def apply_model(self, *args, **kwargs):
92
+ if len(args) == 3:
93
+ encoder_hidden_states = args[-1]
94
+ args = args[:2]
95
+ if kwargs.get("cond", None) is not None:
96
+ encoder_hidden_states = kwargs.pop("cond")
97
+ return self.model(
98
+ *args, encoder_hidden_states=encoder_hidden_states, **kwargs
99
+ ).sample
100
+
101
+
102
+ class StableDiffusionPipeline(IPAdapterMixin,DiffusionPipeline,StableDiffusionMixin,LoraLoaderMixin,TextualInversionLoaderMixin):
103
+
104
+ _optional_components = ["safety_checker", "feature_extractor"]
105
+
106
+ def __init__(
107
+ self,
108
+ vae,
109
+ text_encoder,
110
+ tokenizer,
111
+ unet,
112
+ scheduler,
113
+ feature_extractor,
114
+ image_encoder = None,
115
+ ):
116
+ super().__init__()
117
+
118
+ # get correct sigmas from LMS
119
+ self.register_modules(
120
+ vae=vae,
121
+ text_encoder=text_encoder,
122
+ tokenizer=tokenizer,
123
+ unet=unet,
124
+ scheduler=scheduler,
125
+ feature_extractor=feature_extractor,
126
+ image_encoder=image_encoder,
127
+ )
128
+ self.controlnet = None
129
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
130
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
131
+ self.mask_processor = VaeImageProcessor(
132
+ vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True
133
+ )
134
+ self.setup_unet(self.unet)
135
+ #self.setup_text_encoder()
136
+
137
+ '''def setup_text_encoder(self, n=1, new_encoder=None):
138
+ if new_encoder is not None:
139
+ self.text_encoder = new_encoder
140
+
141
+ self.prompt_parser = FrozenCLIPEmbedderWithCustomWords(self.tokenizer, self.text_encoder,n)'''
142
+ #self.prompt_parser.CLIP_stop_at_last_layers = n
143
+
144
+ def setup_unet(self, unet):
145
+ unet = unet.to(self.device)
146
+ model = ModelWrapper(unet, self.scheduler.alphas_cumprod)
147
+ if self.scheduler.config.prediction_type == "v_prediction":
148
+ self.k_diffusion_model = CompVisVDenoiser(model)
149
+ else:
150
+ self.k_diffusion_model = CompVisDenoiser(model)
151
+
152
+ def get_scheduler(self, scheduler_type: str):
153
+ library = importlib.import_module("k_diffusion")
154
+ sampling = getattr(library, "sampling")
155
+ return getattr(sampling, scheduler_type)
156
+
157
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
158
+ dtype = next(self.image_encoder.parameters()).dtype
159
+
160
+ if not isinstance(image, torch.Tensor):
161
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
162
+
163
+ image = image.to(device=device, dtype=dtype)
164
+ if output_hidden_states:
165
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
166
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
167
+ uncond_image_enc_hidden_states = self.image_encoder(
168
+ torch.zeros_like(image), output_hidden_states=True
169
+ ).hidden_states[-2]
170
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
171
+ num_images_per_prompt, dim=0
172
+ )
173
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
174
+ else:
175
+ image_embeds = self.image_encoder(image).image_embeds
176
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
177
+ uncond_image_embeds = torch.zeros_like(image_embeds)
178
+
179
+ return image_embeds, uncond_image_embeds
180
+
181
+
182
+ def prepare_ip_adapter_image_embeds(
183
+ self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
184
+ ):
185
+ if ip_adapter_image_embeds is None:
186
+ if not isinstance(ip_adapter_image, list):
187
+ ip_adapter_image = [ip_adapter_image]
188
+
189
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
190
+ raise ValueError(
191
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
192
+ )
193
+
194
+ image_embeds = []
195
+ for single_ip_adapter_image, image_proj_layer in zip(
196
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
197
+ ):
198
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
199
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
200
+ single_ip_adapter_image, device, 1, output_hidden_state
201
+ )
202
+ single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
203
+ single_negative_image_embeds = torch.stack(
204
+ [single_negative_image_embeds] * num_images_per_prompt, dim=0
205
+ )
206
+
207
+ if do_classifier_free_guidance:
208
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
209
+ single_image_embeds = single_image_embeds.to(device)
210
+
211
+ image_embeds.append(single_image_embeds)
212
+ else:
213
+ repeat_dims = [1]
214
+ image_embeds = []
215
+ for single_image_embeds in ip_adapter_image_embeds:
216
+ if do_classifier_free_guidance:
217
+ single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
218
+ single_image_embeds = single_image_embeds.repeat(
219
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
220
+ )
221
+ single_negative_image_embeds = single_negative_image_embeds.repeat(
222
+ num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:]))
223
+ )
224
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
225
+ else:
226
+ single_image_embeds = single_image_embeds.repeat(
227
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
228
+ )
229
+ image_embeds.append(single_image_embeds)
230
+
231
+ return image_embeds
232
+
233
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
234
+ r"""
235
+ Enable sliced attention computation.
236
+
237
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
238
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
239
+
240
+ Args:
241
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
242
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
243
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
244
+ `attention_head_dim` must be a multiple of `slice_size`.
245
+ """
246
+ if slice_size == "auto":
247
+ # half the attention head size is usually a good trade-off between
248
+ # speed and memory
249
+ slice_size = self.unet.config.attention_head_dim // 2
250
+ self.unet.set_attention_slice(slice_size)
251
+
252
+ def disable_attention_slicing(self):
253
+ r"""
254
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
255
+ back to computing attention in one step.
256
+ """
257
+ # set slice_size = `None` to disable `attention slicing`
258
+ self.enable_attention_slicing(None)
259
+
260
+ def enable_sequential_cpu_offload(self, gpu_id=0):
261
+ r"""
262
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
263
+ text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
264
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
265
+ """
266
+ if is_accelerate_available():
267
+ from accelerate import cpu_offload
268
+ else:
269
+ raise ImportError("Please install accelerate via `pip install accelerate`")
270
+
271
+ device = torch.device(f"cuda:{gpu_id}")
272
+
273
+ for cpu_offloaded_model in [
274
+ self.unet,
275
+ self.text_encoder,
276
+ self.vae,
277
+ self.safety_checker,
278
+ ]:
279
+ if cpu_offloaded_model is not None:
280
+ cpu_offload(cpu_offloaded_model, device)
281
+
282
+ @property
283
+ def _execution_device(self):
284
+ r"""
285
+ Returns the device on which the pipeline's models will be executed. After calling
286
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
287
+ hooks.
288
+ """
289
+ if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"):
290
+ return self.device
291
+ for module in self.unet.modules():
292
+ if (
293
+ hasattr(module, "_hf_hook")
294
+ and hasattr(module._hf_hook, "execution_device")
295
+ and module._hf_hook.execution_device is not None
296
+ ):
297
+ return torch.device(module._hf_hook.execution_device)
298
+ return self.device
299
+
300
+ def decode_latents(self, latents):
301
+ latents = latents.to(self.device, dtype=self.vae.dtype)
302
+ #latents = 1 / 0.18215 * latents
303
+ latents = 1 / self.vae.config.scaling_factor * latents
304
+ image = self.vae.decode(latents).sample
305
+ image = (image / 2 + 0.5).clamp(0, 1)
306
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
307
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
308
+ return image
309
+
310
+
311
+ def _default_height_width(self, height, width, image):
312
+ if isinstance(image, list):
313
+ image = image[0]
314
+
315
+ if height is None:
316
+ if isinstance(image, PIL.Image.Image):
317
+ height = image.height
318
+ elif isinstance(image, torch.Tensor):
319
+ height = image.shape[3]
320
+
321
+ height = (height // 8) * 8 # round down to nearest multiple of 8
322
+
323
+ if width is None:
324
+ if isinstance(image, PIL.Image.Image):
325
+ width = image.width
326
+ elif isinstance(image, torch.Tensor):
327
+ width = image.shape[2]
328
+
329
+ width = (width // 8) * 8 # round down to nearest multiple of 8
330
+
331
+ return height, width
332
+
333
+ def check_inputs(self, prompt, height, width, callback_steps):
334
+ if not isinstance(prompt, str) and not isinstance(prompt, list):
335
+ raise ValueError(
336
+ f"`prompt` has to be of type `str` or `list` but is {type(prompt)}"
337
+ )
338
+
339
+ if height % 8 != 0 or width % 8 != 0:
340
+ raise ValueError(
341
+ f"`height` and `width` have to be divisible by 8 but are {height} and {width}."
342
+ )
343
+
344
+ if (callback_steps is None) or (
345
+ callback_steps is not None
346
+ and (not isinstance(callback_steps, int) or callback_steps <= 0)
347
+ ):
348
+ raise ValueError(
349
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
350
+ f" {type(callback_steps)}."
351
+ )
352
+
353
+ @property
354
+ def do_classifier_free_guidance(self):
355
+ return self._do_classifier_free_guidance and self.unet.config.time_cond_proj_dim is None
356
+
357
+ def setup_controlnet(self,controlnet):
358
+ if isinstance(controlnet, (list, tuple)):
359
+ controlnet = MultiControlNetModel(controlnet)
360
+ self.register_modules(
361
+ controlnet=controlnet,
362
+ )
363
+
364
+ def preprocess_controlnet(self,controlnet_conditioning_scale,control_guidance_start,control_guidance_end,image,width,height,num_inference_steps,batch_size,num_images_per_prompt):
365
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
366
+
367
+ # align format for control guidance
368
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
369
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
370
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
371
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
372
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
373
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
374
+ control_guidance_start, control_guidance_end = (
375
+ mult * [control_guidance_start],
376
+ mult * [control_guidance_end],
377
+ )
378
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
379
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
380
+
381
+ global_pool_conditions = (
382
+ controlnet.config.global_pool_conditions
383
+ if isinstance(controlnet, ControlNetModel)
384
+ else controlnet.nets[0].config.global_pool_conditions
385
+ )
386
+ guess_mode = False or global_pool_conditions
387
+
388
+ # 4. Prepare image
389
+ if isinstance(controlnet, ControlNetModel):
390
+ image = self.prepare_image(
391
+ image=image,
392
+ width=width,
393
+ height=height,
394
+ batch_size=batch_size,
395
+ num_images_per_prompt=num_images_per_prompt,
396
+ device=self._execution_device,
397
+ dtype=controlnet.dtype,
398
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
399
+ guess_mode=guess_mode,
400
+ )
401
+ height, width = image.shape[-2:]
402
+ elif isinstance(controlnet, MultiControlNetModel):
403
+ images = []
404
+
405
+ for image_ in image:
406
+ image_ = self.prepare_image(
407
+ image=image_,
408
+ width=width,
409
+ height=height,
410
+ batch_size=batch_size,
411
+ num_images_per_prompt=num_images_per_prompt,
412
+ device=self._execution_device,
413
+ dtype=controlnet.dtype,
414
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
415
+ guess_mode=guess_mode,
416
+ )
417
+
418
+ images.append(image_)
419
+
420
+ image = images
421
+ height, width = image[0].shape[-2:]
422
+ else:
423
+ assert False
424
+
425
+ # 7.2 Create tensor stating which controlnets to keep
426
+ controlnet_keep = []
427
+ for i in range(num_inference_steps):
428
+ keeps = [
429
+ 1.0 - float(i / num_inference_steps < s or (i + 1) / num_inference_steps > e)
430
+ for s, e in zip(control_guidance_start, control_guidance_end)
431
+ ]
432
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
433
+ return image,controlnet_keep,guess_mode,controlnet_conditioning_scale
434
+
435
+
436
+
437
+ def prepare_latents(
438
+ self,
439
+ batch_size,
440
+ num_channels_latents,
441
+ height,
442
+ width,
443
+ dtype,
444
+ device,
445
+ generator,
446
+ latents=None,
447
+ ):
448
+ shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor)
449
+ if latents is None:
450
+ if device.type == "mps":
451
+ # randn does not work reproducibly on mps
452
+ latents = torch.randn(
453
+ shape, generator=generator, device="cpu", dtype=dtype
454
+ ).to(device)
455
+ else:
456
+ latents = torch.randn(
457
+ shape, generator=generator, device=device, dtype=dtype
458
+ )
459
+ else:
460
+ # if latents.shape != shape:
461
+ # raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
462
+ latents = latents.to(device)
463
+
464
+ # scale the initial noise by the standard deviation required by the scheduler
465
+ return latents
466
+
467
+ def preprocess(self, image):
468
+ if isinstance(image, torch.Tensor):
469
+ return image
470
+ elif isinstance(image, PIL.Image.Image):
471
+ image = [image]
472
+
473
+ if isinstance(image[0], PIL.Image.Image):
474
+ w, h = image[0].size
475
+ w, h = map(lambda x: x - x % 8, (w, h)) # resize to integer multiple of 8
476
+
477
+ image = [
478
+ np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[
479
+ None, :
480
+ ]
481
+ for i in image
482
+ ]
483
+ image = np.concatenate(image, axis=0)
484
+ image = np.array(image).astype(np.float32) / 255.0
485
+ image = image.transpose(0, 3, 1, 2)
486
+ image = 2.0 * image - 1.0
487
+ image = torch.from_numpy(image)
488
+ elif isinstance(image[0], torch.Tensor):
489
+ image = torch.cat(image, dim=0)
490
+ return image
491
+
492
+ def prepare_image(
493
+ self,
494
+ image,
495
+ width,
496
+ height,
497
+ batch_size,
498
+ num_images_per_prompt,
499
+ device,
500
+ dtype,
501
+ do_classifier_free_guidance=False,
502
+ guess_mode=False,
503
+ ):
504
+
505
+ self.control_image_processor = VaeImageProcessor(
506
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
507
+ )
508
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
509
+ image_batch_size = image.shape[0]
510
+
511
+ if image_batch_size == 1:
512
+ repeat_by = batch_size
513
+ else:
514
+ # image batch size is the same as prompt batch size
515
+ repeat_by = num_images_per_prompt
516
+
517
+ #image = image.repeat_interleave(repeat_by, dim=0)
518
+
519
+ image = image.to(device=device, dtype=dtype)
520
+
521
+ if do_classifier_free_guidance and not guess_mode:
522
+ image = torch.cat([image] * 2)
523
+
524
+ return image
525
+
526
+ def numpy_to_pil(self,images):
527
+ r"""
528
+ Convert a numpy image or a batch of images to a PIL image.
529
+ """
530
+ if images.ndim == 3:
531
+ images = images[None, ...]
532
+ #images = (images * 255).round().astype("uint8")
533
+ images = np.clip((images * 255).round(), 0, 255).astype("uint8")
534
+ if images.shape[-1] == 1:
535
+ # special case for grayscale (single channel) images
536
+ pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images]
537
+ else:
538
+ pil_images = [Image.fromarray(image) for image in images]
539
+
540
+ return pil_images
541
+
542
+ def latent_to_image(self,latent,output_type):
543
+ image = self.decode_latents(latent)
544
+ if output_type == "pil":
545
+ image = self.numpy_to_pil(image)
546
+ if len(image) > 1:
547
+ return image
548
+ return image[0]
549
+
550
+
551
+ @torch.no_grad()
552
+ def img2img(
553
+ self,
554
+ prompt: Union[str, List[str]],
555
+ num_inference_steps: int = 50,
556
+ guidance_scale: float = 7.5,
557
+ negative_prompt: Optional[Union[str, List[str]]] = None,
558
+ generator: Optional[torch.Generator] = None,
559
+ image: Optional[torch.Tensor] = None,
560
+ output_type: Optional[str] = "pil",
561
+ latents=None,
562
+ strength=1.0,
563
+ region_map_state=None,
564
+ sampler_name="",
565
+ sampler_opt={},
566
+ start_time=-1,
567
+ timeout=180,
568
+ scale_ratio=8.0,
569
+ latent_processing = 0,
570
+ weight_func = lambda w, sigma, qk: w * sigma * qk.std(),
571
+ upscale=False,
572
+ upscale_x: float = 2.0,
573
+ upscale_method: str = "bicubic",
574
+ upscale_antialias: bool = False,
575
+ upscale_denoising_strength: int = 0.7,
576
+ width = None,
577
+ height = None,
578
+ seed = 0,
579
+ sampler_name_hires="",
580
+ sampler_opt_hires= {},
581
+ latent_upscale_processing = False,
582
+ ip_adapter_image = None,
583
+ control_img = None,
584
+ controlnet_conditioning_scale = None,
585
+ control_guidance_start = None,
586
+ control_guidance_end = None,
587
+ image_t2i_adapter : Optional[PipelineImageInput] = None,
588
+ adapter_conditioning_scale: Union[float, List[float]] = 1.0,
589
+ adapter_conditioning_factor: float = 1.0,
590
+ guidance_rescale: float = 0.0,
591
+ cross_attention_kwargs = None,
592
+ clip_skip = None,
593
+ long_encode = 0,
594
+ num_images_per_prompt = 1,
595
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
596
+ ):
597
+ if isinstance(sampler_name, str):
598
+ sampler = self.get_scheduler(sampler_name)
599
+ else:
600
+ sampler = sampler_name
601
+ if height is None:
602
+ _,height = get_image_size(image)
603
+ height = int((height // 8)*8)
604
+ if width is None:
605
+ width,_ = get_image_size(image)
606
+ width = int((width // 8)*8)
607
+
608
+ if image_t2i_adapter is not None:
609
+ height, width = default_height_width(self,height, width, image_t2i_adapter)
610
+ if image is not None:
611
+ image = self.preprocess(image)
612
+ image = image.to(self.vae.device, dtype=self.vae.dtype)
613
+
614
+ init_latents = self.vae.encode(image).latent_dist.sample(generator)
615
+ latents = 0.18215 * init_latents
616
+
617
+ # 2. Define call parameters
618
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
619
+ device = self._execution_device
620
+ latents = latents.to(device, dtype=self.unet.dtype)
621
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
622
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
623
+ # corresponds to doing no classifier free guidance.
624
+
625
+ lora_scale = (
626
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
627
+ )
628
+ self._do_classifier_free_guidance = False if guidance_scale <= 1.0 else True
629
+ '''if guidance_scale <= 1.0:
630
+ raise ValueError("has to use guidance_scale")'''
631
+ # 3. Encode input prompt
632
+
633
+ text_embeddings, negative_prompt_embeds, text_input_ids = encode_prompt_function(
634
+ self,
635
+ prompt,
636
+ device,
637
+ num_images_per_prompt,
638
+ self.do_classifier_free_guidance,
639
+ negative_prompt,
640
+ lora_scale = lora_scale,
641
+ clip_skip = clip_skip,
642
+ long_encode = long_encode,
643
+ )
644
+
645
+ if self.do_classifier_free_guidance:
646
+ text_embeddings = torch.cat([negative_prompt_embeds, text_embeddings])
647
+
648
+ #text_ids, text_embeddings = self.prompt_parser([negative_prompt, prompt])
649
+ text_embeddings = text_embeddings.to(self.unet.dtype)
650
+
651
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
652
+ t_start = max(num_inference_steps - init_timestep, 0)
653
+
654
+ sigmas = self.get_sigmas(num_inference_steps, sampler_opt).to(
655
+ text_embeddings.device, dtype=text_embeddings.dtype
656
+ )
657
+
658
+ sigma_sched = sigmas[t_start:]
659
+
660
+ noise = randn_tensor(
661
+ latents.shape,
662
+ generator=generator,
663
+ device=device,
664
+ dtype=text_embeddings.dtype,
665
+ )
666
+ latents = latents.to(device)
667
+ latents = latents + noise * (sigma_sched[0]**2 + 1) ** 0.5
668
+ #latents = latents + noise * sigma_sched[0] #Nearly
669
+ steps_denoising = len(sigma_sched)
670
+ # 5. Prepare latent variables
671
+ self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device)
672
+ self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(
673
+ latents.device
674
+ )
675
+
676
+ region_state = encode_region_map(
677
+ self,
678
+ region_map_state,
679
+ width = width,
680
+ height = height,
681
+ num_images_per_prompt = num_images_per_prompt,
682
+ text_ids=text_input_ids,
683
+ )
684
+ if cross_attention_kwargs is None:
685
+ cross_attention_kwargs ={}
686
+
687
+ controlnet_conditioning_scale_copy = controlnet_conditioning_scale.copy() if isinstance(controlnet_conditioning_scale, list) else controlnet_conditioning_scale
688
+ control_guidance_start_copy = control_guidance_start.copy() if isinstance(control_guidance_start, list) else control_guidance_start
689
+ control_guidance_end_copy = control_guidance_end.copy() if isinstance(control_guidance_end, list) else control_guidance_end
690
+ guess_mode = False
691
+
692
+ if self.controlnet is not None:
693
+ img_control,controlnet_keep,guess_mode,controlnet_conditioning_scale = self.preprocess_controlnet(controlnet_conditioning_scale,control_guidance_start,control_guidance_end,control_img,width,height,len(sigma_sched),batch_size,num_images_per_prompt)
694
+ #print(len(controlnet_keep))
695
+
696
+ #controlnet_conditioning_scale_copy = controlnet_conditioning_scale.copy()
697
+ #sp_control = 1
698
+
699
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
700
+ image_embeds = self.prepare_ip_adapter_image_embeds(
701
+ ip_adapter_image,
702
+ ip_adapter_image_embeds,
703
+ device,
704
+ batch_size * num_images_per_prompt,
705
+ self.do_classifier_free_guidance,
706
+ )
707
+ # 6.1 Add image embeds for IP-Adapter
708
+ added_cond_kwargs = (
709
+ {"image_embeds": image_embeds}
710
+ if (ip_adapter_image is not None or ip_adapter_image_embeds is not None)
711
+ else None
712
+ )
713
+ #if controlnet_img is not None:
714
+ #controlnet_img_processing = controlnet_img.convert("RGB")
715
+ #transform = transforms.Compose([transforms.PILToTensor()])
716
+ #controlnet_img_processing = transform(controlnet_img)
717
+ #controlnet_img_processing=controlnet_img_processing.to(device=device, dtype=self.cnet.dtype)
718
+ #controlnet_img = torch.from_numpy(controlnet_img).half()
719
+ #controlnet_img = controlnet_img.unsqueeze(0)
720
+ #controlnet_img = controlnet_img.repeat_interleave(3, dim=0)
721
+ #controlnet_img=controlnet_img.to(device)
722
+ #controlnet_img = controlnet_img.repeat_interleave(4 // len(controlnet_img), 0)
723
+ if latent_processing == 1:
724
+ latents_process = [self.latent_to_image(latents,output_type)]
725
+ lst_latent_sigma = []
726
+ step_control = -1
727
+ adapter_state = None
728
+ adapter_sp_count = []
729
+ if image_t2i_adapter is not None:
730
+ adapter_state = preprocessing_t2i_adapter(self,image_t2i_adapter,width,height,adapter_conditioning_scale,1)
731
+ def model_fn(x, sigma):
732
+ nonlocal step_control,lst_latent_sigma,adapter_sp_count
733
+
734
+ if start_time > 0 and timeout > 0:
735
+ assert (time.time() - start_time) < timeout, "inference process timed out"
736
+
737
+ latent_model_input = torch.cat([x] * 2) if self.do_classifier_free_guidance else x
738
+
739
+ region_prompt = {
740
+ "region_state": region_state,
741
+ "sigma": sigma[0],
742
+ "weight_func": weight_func,
743
+ }
744
+ cross_attention_kwargs["region_prompt"] = region_prompt
745
+
746
+ #print(self.k_diffusion_model.sigma_to_t(sigma[0]))
747
+
748
+ if latent_model_input.dtype != text_embeddings.dtype:
749
+ latent_model_input = latent_model_input.to(text_embeddings.dtype)
750
+ ukwargs = {}
751
+
752
+ down_intrablock_additional_residuals = None
753
+ if adapter_state is not None:
754
+ if len(adapter_sp_count) < int( steps_denoising* adapter_conditioning_factor):
755
+ down_intrablock_additional_residuals = [state.clone() for state in adapter_state]
756
+ else:
757
+ down_intrablock_additional_residuals = None
758
+ sigma_string_t2i = str(sigma.item())
759
+ if sigma_string_t2i not in adapter_sp_count:
760
+ adapter_sp_count.append(sigma_string_t2i)
761
+
762
+ if self.controlnet is not None :
763
+ sigma_string = str(sigma.item())
764
+ if sigma_string not in lst_latent_sigma:
765
+ #sigmas_sp = sigma.detach().clone()
766
+ step_control+=1
767
+ lst_latent_sigma.append(sigma_string)
768
+
769
+ if isinstance(controlnet_keep[step_control], list):
770
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[step_control])]
771
+ else:
772
+ controlnet_cond_scale = controlnet_conditioning_scale
773
+ if isinstance(controlnet_cond_scale, list):
774
+ controlnet_cond_scale = controlnet_cond_scale[0]
775
+ cond_scale = controlnet_cond_scale * controlnet_keep[step_control]
776
+
777
+ down_block_res_samples = None
778
+ mid_block_res_sample = None
779
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
780
+ latent_model_input / ((sigma**2 + 1) ** 0.5),
781
+ self.k_diffusion_model.sigma_to_t(sigma),
782
+ encoder_hidden_states=text_embeddings,
783
+ controlnet_cond=img_control,
784
+ conditioning_scale=cond_scale,
785
+ guess_mode=guess_mode,
786
+ return_dict=False,
787
+ )
788
+ if guess_mode and self.do_classifier_free_guidance:
789
+ # Infered ControlNet only for the conditional batch.
790
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
791
+ # add 0 to the unconditional batch to keep it unchanged.
792
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
793
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
794
+ ukwargs ={
795
+ "down_block_additional_residuals": down_block_res_samples,
796
+ "mid_block_additional_residual":mid_block_res_sample,
797
+ }
798
+
799
+ noise_pred = self.k_diffusion_model(
800
+ latent_model_input, sigma, cond=text_embeddings,cross_attention_kwargs = cross_attention_kwargs,down_intrablock_additional_residuals = down_intrablock_additional_residuals,added_cond_kwargs=added_cond_kwargs, **ukwargs
801
+ )
802
+
803
+
804
+ if self.do_classifier_free_guidance:
805
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
806
+ noise_pred = noise_pred_uncond + guidance_scale * (
807
+ noise_pred_text - noise_pred_uncond
808
+ )
809
+
810
+ if guidance_rescale > 0.0:
811
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
812
+ if latent_processing == 1:
813
+ latents_process.append(self.latent_to_image(noise_pred,output_type))
814
+ # noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=0.7)
815
+ return noise_pred
816
+
817
+ sampler_args = self.get_sampler_extra_args_i2i(sigma_sched,len(sigma_sched),sampler_opt,latents,seed, sampler)
818
+ latents = sampler(model_fn, latents, **sampler_args)
819
+ self.maybe_free_model_hooks()
820
+ torch.cuda.empty_cache()
821
+ gc.collect()
822
+ if upscale:
823
+ vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
824
+ target_height = int(height * upscale_x // vae_scale_factor )* 8
825
+ target_width = int(width * upscale_x // vae_scale_factor)*8
826
+
827
+ latents = torch.nn.functional.interpolate(
828
+ latents,
829
+ size=(
830
+ int(target_height // vae_scale_factor),
831
+ int(target_width // vae_scale_factor),
832
+ ),
833
+ mode=upscale_method,
834
+ antialias=upscale_antialias,
835
+ )
836
+ #if controlnet_img is not None:
837
+ #controlnet_img = cv2.resize(controlnet_img,(latents.size(0), latents.size(1)))
838
+ #controlnet_img=controlnet_img.resize((latents.size(0), latents.size(1)), Image.LANCZOS)
839
+
840
+ #region_map_state = apply_size_sketch(int(target_width),int(target_height),region_map_state)
841
+ latent_reisze= self.img2img(
842
+ prompt=prompt,
843
+ num_inference_steps=num_inference_steps,
844
+ guidance_scale=guidance_scale,
845
+ negative_prompt=negative_prompt,
846
+ generator=generator,
847
+ latents=latents,
848
+ strength=upscale_denoising_strength,
849
+ sampler_name=sampler_name_hires,
850
+ sampler_opt=sampler_opt_hires,
851
+ region_map_state=region_map_state,
852
+ latent_processing = latent_upscale_processing,
853
+ width = int(target_width),
854
+ height = int(target_height),
855
+ seed = seed,
856
+ ip_adapter_image = ip_adapter_image,
857
+ control_img = control_img,
858
+ controlnet_conditioning_scale = controlnet_conditioning_scale_copy,
859
+ control_guidance_start = control_guidance_start_copy,
860
+ control_guidance_end = control_guidance_end_copy,
861
+ image_t2i_adapter= image_t2i_adapter,
862
+ adapter_conditioning_scale = adapter_conditioning_scale,
863
+ adapter_conditioning_factor = adapter_conditioning_factor,
864
+ guidance_rescale = guidance_rescale,
865
+ cross_attention_kwargs = cross_attention_kwargs,
866
+ clip_skip = clip_skip,
867
+ long_encode = long_encode,
868
+ num_images_per_prompt = num_images_per_prompt,
869
+ )
870
+ '''if latent_processing == 1:
871
+ latents = latents_process.copy()
872
+ images = []
873
+ for i in latents:
874
+ images.append(self.decode_latents(i))
875
+ image = []
876
+ if output_type == "pil":
877
+ for i in images:
878
+ image.append(self.numpy_to_pil(i))
879
+ image[-1] = latent_reisze
880
+ return image'''
881
+ if latent_processing == 1:
882
+ latents_process= latents_process+latent_reisze
883
+ return latents_process
884
+ torch.cuda.empty_cache()
885
+ gc.collect()
886
+ return latent_reisze
887
+
888
+ '''if latent_processing == 1:
889
+ latents = latents_process.copy()
890
+ images = []
891
+ for i in latents:
892
+ images.append(self.decode_latents(i))
893
+ image = []
894
+ # 10. Convert to PIL
895
+ if output_type == "pil":
896
+ for i in images:
897
+ image.append(self.numpy_to_pil(i))
898
+ else:
899
+ image = self.decode_latents(latents)
900
+ # 10. Convert to PIL
901
+ if output_type == "pil":
902
+ image = self.numpy_to_pil(image)'''
903
+ if latent_processing == 1:
904
+ return latents_process
905
+ self.maybe_free_model_hooks()
906
+ torch.cuda.empty_cache()
907
+ gc.collect()
908
+ return [self.latent_to_image(latents,output_type)]
909
+
910
+ def get_sigmas(self, steps, params):
911
+ discard_next_to_last_sigma = params.get("discard_next_to_last_sigma", False)
912
+ steps += 1 if discard_next_to_last_sigma else 0
913
+
914
+ if params.get("scheduler", None) == "karras":
915
+ sigma_min, sigma_max = (
916
+ self.k_diffusion_model.sigmas[0].item(),
917
+ self.k_diffusion_model.sigmas[-1].item(),
918
+ )
919
+ sigmas = k_diffusion.sampling.get_sigmas_karras(
920
+ n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=self.device
921
+ )
922
+ elif params.get("scheduler", None) == "exponential":
923
+ sigma_min, sigma_max = (
924
+ self.k_diffusion_model.sigmas[0].item(),
925
+ self.k_diffusion_model.sigmas[-1].item(),
926
+ )
927
+ sigmas = k_diffusion.sampling.get_sigmas_exponential(
928
+ n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=self.device
929
+ )
930
+ elif params.get("scheduler", None) == "polyexponential":
931
+ sigma_min, sigma_max = (
932
+ self.k_diffusion_model.sigmas[0].item(),
933
+ self.k_diffusion_model.sigmas[-1].item(),
934
+ )
935
+ sigmas = k_diffusion.sampling.get_sigmas_polyexponential(
936
+ n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=self.device
937
+ )
938
+ else:
939
+ sigmas = self.k_diffusion_model.get_sigmas(steps)
940
+
941
+ if discard_next_to_last_sigma:
942
+ sigmas = torch.cat([sigmas[:-2], sigmas[-1:]])
943
+
944
+ return sigmas
945
+
946
+ def create_noise_sampler(self, x, sigmas, p,seed):
947
+ """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes"""
948
+
949
+ from k_diffusion.sampling import BrownianTreeNoiseSampler
950
+ sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
951
+ #current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size]
952
+ return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed)
953
+
954
+ # https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/48a15821de768fea76e66f26df83df3fddf18f4b/modules/sd_samplers.py#L454
955
+ def get_sampler_extra_args_t2i(self, sigmas, eta, steps,sampler_opt,latents,seed, func):
956
+ extra_params_kwargs = {}
957
+
958
+ if "eta" in inspect.signature(func).parameters:
959
+ extra_params_kwargs["eta"] = eta
960
+
961
+ if "sigma_min" in inspect.signature(func).parameters:
962
+ extra_params_kwargs["sigma_min"] = sigmas[0].item()
963
+ extra_params_kwargs["sigma_max"] = sigmas[-1].item()
964
+
965
+ if "n" in inspect.signature(func).parameters:
966
+ extra_params_kwargs["n"] = steps
967
+ else:
968
+ extra_params_kwargs["sigmas"] = sigmas
969
+ if sampler_opt.get('brownian_noise', False):
970
+ noise_sampler = self.create_noise_sampler(latents, sigmas, steps,seed)
971
+ extra_params_kwargs['noise_sampler'] = noise_sampler
972
+ if sampler_opt.get('solver_type', None) == 'heun':
973
+ extra_params_kwargs['solver_type'] = 'heun'
974
+
975
+ return extra_params_kwargs
976
+
977
+ # https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/48a15821de768fea76e66f26df83df3fddf18f4b/modules/sd_samplers.py#L454
978
+ def get_sampler_extra_args_i2i(self, sigmas,steps,sampler_opt,latents,seed, func):
979
+ extra_params_kwargs = {}
980
+
981
+ if "sigma_min" in inspect.signature(func).parameters:
982
+ ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last
983
+ extra_params_kwargs["sigma_min"] = sigmas[-2]
984
+
985
+ if "sigma_max" in inspect.signature(func).parameters:
986
+ extra_params_kwargs["sigma_max"] = sigmas[0]
987
+
988
+ if "n" in inspect.signature(func).parameters:
989
+ extra_params_kwargs["n"] = len(sigmas) - 1
990
+
991
+ if "sigma_sched" in inspect.signature(func).parameters:
992
+ extra_params_kwargs["sigma_sched"] = sigmas
993
+
994
+ if "sigmas" in inspect.signature(func).parameters:
995
+ extra_params_kwargs["sigmas"] = sigmas
996
+ if sampler_opt.get('brownian_noise', False):
997
+ noise_sampler = self.create_noise_sampler(latents, sigmas, steps,seed)
998
+ extra_params_kwargs['noise_sampler'] = noise_sampler
999
+ if sampler_opt.get('solver_type', None) == 'heun':
1000
+ extra_params_kwargs['solver_type'] = 'heun'
1001
+
1002
+ return extra_params_kwargs
1003
+
1004
+ @torch.no_grad()
1005
+ def txt2img(
1006
+ self,
1007
+ prompt: Union[str, List[str]],
1008
+ height: int = 512,
1009
+ width: int = 512,
1010
+ num_inference_steps: int = 50,
1011
+ guidance_scale: float = 7.5,
1012
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1013
+ eta: float = 0.0,
1014
+ generator: Optional[torch.Generator] = None,
1015
+ latents: Optional[torch.Tensor] = None,
1016
+ output_type: Optional[str] = "pil",
1017
+ callback_steps: Optional[int] = 1,
1018
+ upscale=False,
1019
+ upscale_x: float = 2.0,
1020
+ upscale_method: str = "bicubic",
1021
+ upscale_antialias: bool = False,
1022
+ upscale_denoising_strength: int = 0.7,
1023
+ region_map_state=None,
1024
+ sampler_name="",
1025
+ sampler_opt={},
1026
+ start_time=-1,
1027
+ timeout=180,
1028
+ latent_processing = 0,
1029
+ weight_func = lambda w, sigma, qk: w * sigma * qk.std(),
1030
+ seed = 0,
1031
+ sampler_name_hires= "",
1032
+ sampler_opt_hires= {},
1033
+ latent_upscale_processing = False,
1034
+ ip_adapter_image = None,
1035
+ control_img = None,
1036
+ controlnet_conditioning_scale = None,
1037
+ control_guidance_start = None,
1038
+ control_guidance_end = None,
1039
+ image_t2i_adapter : Optional[PipelineImageInput] = None,
1040
+ adapter_conditioning_scale: Union[float, List[float]] = 1.0,
1041
+ adapter_conditioning_factor: float = 1.0,
1042
+ guidance_rescale: float = 0.0,
1043
+ cross_attention_kwargs = None,
1044
+ clip_skip = None,
1045
+ long_encode = 0,
1046
+ num_images_per_prompt = 1,
1047
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
1048
+ ):
1049
+ height, width = self._default_height_width(height, width, None)
1050
+ if isinstance(sampler_name, str):
1051
+ sampler = self.get_scheduler(sampler_name)
1052
+ else:
1053
+ sampler = sampler_name
1054
+ # 1. Check inputs. Raise error if not correct
1055
+ if image_t2i_adapter is not None:
1056
+ height, width = default_height_width(self,height, width, image_t2i_adapter)
1057
+ #print(default_height_width(self,height, width, image_t2i_adapter))
1058
+ self.check_inputs(prompt, height, width, callback_steps)
1059
+ # 2. Define call parameters
1060
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
1061
+ device = self._execution_device
1062
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1063
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1064
+ # corresponds to doing no classifier free guidance.
1065
+ '''do_classifier_free_guidance = True
1066
+ if guidance_scale <= 1.0:
1067
+ raise ValueError("has to use guidance_scale")'''
1068
+
1069
+ lora_scale = (
1070
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
1071
+ )
1072
+ self._do_classifier_free_guidance = False if guidance_scale <= 1.0 else True
1073
+ '''if guidance_scale <= 1.0:
1074
+ raise ValueError("has to use guidance_scale")'''
1075
+ # 3. Encode input prompt
1076
+
1077
+ text_embeddings, negative_prompt_embeds, text_input_ids = encode_prompt_function(
1078
+ self,
1079
+ prompt,
1080
+ device,
1081
+ num_images_per_prompt,
1082
+ self.do_classifier_free_guidance,
1083
+ negative_prompt,
1084
+ lora_scale = lora_scale,
1085
+ clip_skip = clip_skip,
1086
+ long_encode = long_encode,
1087
+ )
1088
+ if self.do_classifier_free_guidance:
1089
+ text_embeddings = torch.cat([negative_prompt_embeds, text_embeddings])
1090
+
1091
+ # 3. Encode input prompt
1092
+ #text_ids, text_embeddings = self.prompt_parser([negative_prompt, prompt])
1093
+ text_embeddings = text_embeddings.to(self.unet.dtype)
1094
+
1095
+ # 4. Prepare timesteps
1096
+ sigmas = self.get_sigmas(num_inference_steps, sampler_opt).to(
1097
+ text_embeddings.device, dtype=text_embeddings.dtype
1098
+ )
1099
+
1100
+ # 5. Prepare latent variables
1101
+ num_channels_latents = self.unet.config.in_channels
1102
+ latents = self.prepare_latents(
1103
+ batch_size * num_images_per_prompt,
1104
+ num_channels_latents,
1105
+ height,
1106
+ width,
1107
+ text_embeddings.dtype,
1108
+ device,
1109
+ generator,
1110
+ latents,
1111
+ )
1112
+ latents = latents * (sigmas[0]**2 + 1) ** 0.5
1113
+ #latents = latents * sigmas[0]#Nearly
1114
+ steps_denoising = len(sigmas)
1115
+ self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device)
1116
+ self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(
1117
+ latents.device
1118
+ )
1119
+
1120
+ region_state = encode_region_map(
1121
+ self,
1122
+ region_map_state,
1123
+ width = width,
1124
+ height = height,
1125
+ num_images_per_prompt = num_images_per_prompt,
1126
+ text_ids=text_input_ids,
1127
+ )
1128
+ if cross_attention_kwargs is None:
1129
+ cross_attention_kwargs ={}
1130
+ controlnet_conditioning_scale_copy = controlnet_conditioning_scale.copy() if isinstance(controlnet_conditioning_scale, list) else controlnet_conditioning_scale
1131
+ control_guidance_start_copy = control_guidance_start.copy() if isinstance(control_guidance_start, list) else control_guidance_start
1132
+ control_guidance_end_copy = control_guidance_end.copy() if isinstance(control_guidance_end, list) else control_guidance_end
1133
+ guess_mode = False
1134
+
1135
+ if self.controlnet is not None:
1136
+ img_control,controlnet_keep,guess_mode,controlnet_conditioning_scale = self.preprocess_controlnet(controlnet_conditioning_scale,control_guidance_start,control_guidance_end,control_img,width,height,num_inference_steps,batch_size,num_images_per_prompt)
1137
+ #print(len(controlnet_keep))
1138
+
1139
+ #controlnet_conditioning_scale_copy = controlnet_conditioning_scale.copy()
1140
+ #sp_control = 1
1141
+
1142
+
1143
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1144
+ image_embeds = self.prepare_ip_adapter_image_embeds(
1145
+ ip_adapter_image,
1146
+ ip_adapter_image_embeds,
1147
+ device,
1148
+ batch_size * num_images_per_prompt,
1149
+ self.do_classifier_free_guidance,
1150
+ )
1151
+ # 6.1 Add image embeds for IP-Adapter
1152
+ added_cond_kwargs = (
1153
+ {"image_embeds": image_embeds}
1154
+ if (ip_adapter_image is not None or ip_adapter_image_embeds is not None)
1155
+ else None
1156
+ )
1157
+ #if controlnet_img is not None:
1158
+ #controlnet_img_processing = controlnet_img.convert("RGB")
1159
+ #transform = transforms.Compose([transforms.PILToTensor()])
1160
+ #controlnet_img_processing = transform(controlnet_img)
1161
+ #controlnet_img_processing=controlnet_img_processing.to(device=device, dtype=self.cnet.dtype)
1162
+ if latent_processing == 1:
1163
+ latents_process = [self.latent_to_image(latents,output_type)]
1164
+ #sp_find_new = None
1165
+ lst_latent_sigma = []
1166
+ step_control = -1
1167
+ adapter_state = None
1168
+ adapter_sp_count = []
1169
+ if image_t2i_adapter is not None:
1170
+ adapter_state = preprocessing_t2i_adapter(self,image_t2i_adapter,width,height,adapter_conditioning_scale,1)
1171
+ def model_fn(x, sigma):
1172
+ nonlocal step_control,lst_latent_sigma,adapter_sp_count
1173
+
1174
+ if start_time > 0 and timeout > 0:
1175
+ assert (time.time() - start_time) < timeout, "inference process timed out"
1176
+
1177
+ latent_model_input = torch.cat([x] * 2) if self.do_classifier_free_guidance else x
1178
+ region_prompt = {
1179
+ "region_state": region_state,
1180
+ "sigma": sigma[0],
1181
+ "weight_func": weight_func,
1182
+ }
1183
+ cross_attention_kwargs["region_prompt"] = region_prompt
1184
+
1185
+ #print(self.k_diffusion_model.sigma_to_t(sigma[0]))
1186
+
1187
+ if latent_model_input.dtype != text_embeddings.dtype:
1188
+ latent_model_input = latent_model_input.to(text_embeddings.dtype)
1189
+ ukwargs = {}
1190
+
1191
+ down_intrablock_additional_residuals = None
1192
+ if adapter_state is not None:
1193
+ if len(adapter_sp_count) < int( steps_denoising* adapter_conditioning_factor):
1194
+ down_intrablock_additional_residuals = [state.clone() for state in adapter_state]
1195
+ else:
1196
+ down_intrablock_additional_residuals = None
1197
+ sigma_string_t2i = str(sigma.item())
1198
+ if sigma_string_t2i not in adapter_sp_count:
1199
+ adapter_sp_count.append(sigma_string_t2i)
1200
+
1201
+ if self.controlnet is not None :
1202
+ sigma_string = str(sigma.item())
1203
+ if sigma_string not in lst_latent_sigma:
1204
+ #sigmas_sp = sigma.detach().clone()
1205
+ step_control+=1
1206
+ lst_latent_sigma.append(sigma_string)
1207
+
1208
+ if isinstance(controlnet_keep[step_control], list):
1209
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[step_control])]
1210
+ else:
1211
+ controlnet_cond_scale = controlnet_conditioning_scale
1212
+ if isinstance(controlnet_cond_scale, list):
1213
+ controlnet_cond_scale = controlnet_cond_scale[0]
1214
+ cond_scale = controlnet_cond_scale * controlnet_keep[step_control]
1215
+
1216
+ down_block_res_samples = None
1217
+ mid_block_res_sample = None
1218
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1219
+ latent_model_input / ((sigma**2 + 1) ** 0.5),
1220
+ self.k_diffusion_model.sigma_to_t(sigma),
1221
+ encoder_hidden_states=text_embeddings,
1222
+ controlnet_cond=img_control,
1223
+ conditioning_scale=cond_scale,
1224
+ guess_mode=guess_mode,
1225
+ return_dict=False,
1226
+ )
1227
+ if guess_mode and self.do_classifier_free_guidance:
1228
+ # Infered ControlNet only for the conditional batch.
1229
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
1230
+ # add 0 to the unconditional batch to keep it unchanged.
1231
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
1232
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
1233
+ ukwargs ={
1234
+ "down_block_additional_residuals": down_block_res_samples,
1235
+ "mid_block_additional_residual":mid_block_res_sample,
1236
+ }
1237
+
1238
+
1239
+ noise_pred = self.k_diffusion_model(
1240
+ latent_model_input, sigma, cond=text_embeddings,cross_attention_kwargs=cross_attention_kwargs,down_intrablock_additional_residuals=down_intrablock_additional_residuals,added_cond_kwargs=added_cond_kwargs, **ukwargs
1241
+ )
1242
+
1243
+
1244
+ if self.do_classifier_free_guidance:
1245
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1246
+ noise_pred = noise_pred_uncond + guidance_scale * (
1247
+ noise_pred_text - noise_pred_uncond
1248
+ )
1249
+ if guidance_rescale > 0.0:
1250
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
1251
+ if latent_processing == 1:
1252
+ latents_process.append(self.latent_to_image(noise_pred,output_type))
1253
+ # noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=0.7)
1254
+ return noise_pred
1255
+ extra_args = self.get_sampler_extra_args_t2i(
1256
+ sigmas, eta, num_inference_steps,sampler_opt,latents,seed, sampler
1257
+ )
1258
+ latents = sampler(model_fn, latents, **extra_args)
1259
+ #latents = latents_process[0]
1260
+ #print(len(latents_process))
1261
+ self.maybe_free_model_hooks()
1262
+ torch.cuda.empty_cache()
1263
+ gc.collect()
1264
+ if upscale:
1265
+ vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
1266
+ target_height = int(height * upscale_x // vae_scale_factor )* 8
1267
+ target_width = int(width * upscale_x // vae_scale_factor)*8
1268
+ latents = torch.nn.functional.interpolate(
1269
+ latents,
1270
+ size=(
1271
+ int(target_height // vae_scale_factor),
1272
+ int(target_width // vae_scale_factor),
1273
+ ),
1274
+ mode=upscale_method,
1275
+ antialias=upscale_antialias,
1276
+ )
1277
+
1278
+ #if controlnet_img is not None:
1279
+ #controlnet_img = cv2.resize(controlnet_img,(latents.size(0), latents.size(1)))
1280
+ #controlnet_img=controlnet_img.resize((latents.size(0), latents.size(1)), Image.LANCZOS)
1281
+ latent_reisze= self.img2img(
1282
+ prompt=prompt,
1283
+ num_inference_steps=num_inference_steps,
1284
+ guidance_scale=guidance_scale,
1285
+ negative_prompt=negative_prompt,
1286
+ generator=generator,
1287
+ latents=latents,
1288
+ strength=upscale_denoising_strength,
1289
+ sampler_name=sampler_name_hires,
1290
+ sampler_opt=sampler_opt_hires,
1291
+ region_map_state = region_map_state,
1292
+ latent_processing = latent_upscale_processing,
1293
+ width = int(target_width),
1294
+ height = int(target_height),
1295
+ seed = seed,
1296
+ ip_adapter_image = ip_adapter_image,
1297
+ control_img = control_img,
1298
+ controlnet_conditioning_scale = controlnet_conditioning_scale_copy,
1299
+ control_guidance_start = control_guidance_start_copy,
1300
+ control_guidance_end = control_guidance_end_copy,
1301
+ image_t2i_adapter= image_t2i_adapter,
1302
+ adapter_conditioning_scale = adapter_conditioning_scale,
1303
+ adapter_conditioning_factor = adapter_conditioning_factor,
1304
+ guidance_rescale = guidance_rescale,
1305
+ cross_attention_kwargs = cross_attention_kwargs,
1306
+ clip_skip = clip_skip,
1307
+ long_encode = long_encode,
1308
+ num_images_per_prompt = num_images_per_prompt,
1309
+ )
1310
+ '''if latent_processing == 1:
1311
+ latents = latents_process.copy()
1312
+ images = []
1313
+ for i in latents:
1314
+ images.append(self.decode_latents(i))
1315
+ image = []
1316
+ if output_type == "pil":
1317
+ for i in images:
1318
+ image.append(self.numpy_to_pil(i))
1319
+ image[-1] = latent_reisze
1320
+ return image'''
1321
+ if latent_processing == 1:
1322
+ latents_process= latents_process+latent_reisze
1323
+ return latents_process
1324
+ torch.cuda.empty_cache()
1325
+ gc.collect()
1326
+ return latent_reisze
1327
+
1328
+ # 8. Post-processing
1329
+ '''if latent_processing == 1:
1330
+ latents = latents_process.copy()
1331
+ images = []
1332
+ for i in latents:
1333
+ images.append(self.decode_latents(i))
1334
+ image = []
1335
+ # 10. Convert to PIL
1336
+ if output_type == "pil":
1337
+ for i in images:
1338
+ image.append(self.numpy_to_pil(i))
1339
+ else:
1340
+ image = self.decode_latents(latents)
1341
+ # 10. Convert to PIL
1342
+ if output_type == "pil":
1343
+ image = self.numpy_to_pil(image)'''
1344
+ if latent_processing == 1:
1345
+ return latents_process
1346
+ return [self.latent_to_image(latents,output_type)]
1347
+
1348
+
1349
+ def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
1350
+ if isinstance(generator, list):
1351
+ image_latents = [
1352
+ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
1353
+ for i in range(image.shape[0])
1354
+ ]
1355
+ image_latents = torch.cat(image_latents, dim=0)
1356
+ else:
1357
+ image_latents = retrieve_latents(self.vae.encode(image), generator=generator)
1358
+
1359
+ image_latents = self.vae.config.scaling_factor * image_latents
1360
+
1361
+ return image_latents
1362
+
1363
+ def prepare_mask_latents(
1364
+ self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
1365
+ ):
1366
+ # resize the mask to latents shape as we concatenate the mask to the latents
1367
+ # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
1368
+ # and half precision
1369
+ mask = torch.nn.functional.interpolate(
1370
+ mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
1371
+ )
1372
+ mask = mask.to(device=device, dtype=dtype)
1373
+
1374
+ masked_image = masked_image.to(device=device, dtype=dtype)
1375
+
1376
+ if masked_image.shape[1] == 4:
1377
+ masked_image_latents = masked_image
1378
+ else:
1379
+ masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
1380
+
1381
+ # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
1382
+ if mask.shape[0] < batch_size:
1383
+ if not batch_size % mask.shape[0] == 0:
1384
+ raise ValueError(
1385
+ "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
1386
+ f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
1387
+ " of masks that you pass is divisible by the total requested batch size."
1388
+ )
1389
+ mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
1390
+ if masked_image_latents.shape[0] < batch_size:
1391
+ if not batch_size % masked_image_latents.shape[0] == 0:
1392
+ raise ValueError(
1393
+ "The passed images and the required batch size don't match. Images are supposed to be duplicated"
1394
+ f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
1395
+ " Make sure the number of images that you pass is divisible by the total requested batch size."
1396
+ )
1397
+ masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
1398
+
1399
+ mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
1400
+ masked_image_latents = (
1401
+ torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
1402
+ )
1403
+
1404
+ # aligning device to prevent device errors when concating it with the latent model input
1405
+ masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
1406
+ return mask, masked_image_latents
1407
+
1408
+ '''def get_image_latents(self,batch_size,image,device,dtype,generator):
1409
+ image = image.to(device=device, dtype=dtype)
1410
+
1411
+ if image.shape[1] == 4:
1412
+ image_latents = image
1413
+ else:
1414
+ image_latents = self._encode_vae_image(image=image, generator=generator)
1415
+ image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
1416
+ return image_latents'''
1417
+
1418
+ def _sigma_to_alpha_sigma_t(self, sigma):
1419
+ alpha_t = 1 / ((sigma**2 + 1) ** 0.5)
1420
+ sigma_t = sigma * alpha_t
1421
+
1422
+ return alpha_t, sigma_t
1423
+
1424
+ def add_noise(self,init_latents_proper,noise,sigma):
1425
+ if isinstance(sigma, torch.Tensor) and sigma.numel() > 1:
1426
+ sigma,_ = sigma.sort(descending=True)
1427
+ sigma = sigma[0].item()
1428
+ #alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma)
1429
+ init_latents_proper = init_latents_proper + sigma * noise
1430
+ return init_latents_proper
1431
+
1432
+ def prepare_latents_inpating(
1433
+ self,
1434
+ batch_size,
1435
+ num_channels_latents,
1436
+ height,
1437
+ width,
1438
+ dtype,
1439
+ device,
1440
+ generator,
1441
+ latents=None,
1442
+ image=None,
1443
+ sigma=None,
1444
+ is_strength_max=True,
1445
+ return_noise=False,
1446
+ return_image_latents=False,
1447
+ ):
1448
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
1449
+ if isinstance(generator, list) and len(generator) != batch_size:
1450
+ raise ValueError(
1451
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
1452
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
1453
+ )
1454
+
1455
+ if (image is None or sigma is None) and not is_strength_max:
1456
+ raise ValueError(
1457
+ "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
1458
+ "However, either the image or the noise sigma has not been provided."
1459
+ )
1460
+
1461
+ if return_image_latents or (latents is None and not is_strength_max):
1462
+ image = image.to(device=device, dtype=dtype)
1463
+
1464
+ if image.shape[1] == 4:
1465
+ image_latents = image
1466
+ else:
1467
+ image_latents = self._encode_vae_image(image=image, generator=generator)
1468
+ image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
1469
+
1470
+ if latents is None:
1471
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
1472
+ # if strength is 1. then initialise the latents to noise, else initial to image + noise
1473
+ latents = noise if is_strength_max else self.add_noise(image_latents, noise, sigma)
1474
+ # if pure noise then scale the initial latents by the Scheduler's init sigma
1475
+ latents = latents * (sigma.item()**2 + 1) ** 0.5 if is_strength_max else latents
1476
+ #latents = latents * sigma.item() if is_strength_max else latents #Nearly
1477
+ else:
1478
+ noise = latents.to(device)
1479
+ latents = noise * (sigma.item()**2 + 1) ** 0.5
1480
+ #latents = noise * sigma.item() #Nearly
1481
+
1482
+ outputs = (latents,)
1483
+
1484
+ if return_noise:
1485
+ outputs += (noise,)
1486
+
1487
+ if return_image_latents:
1488
+ outputs += (image_latents,)
1489
+
1490
+ return outputs
1491
+
1492
+ @torch.no_grad()
1493
+ def inpaiting(
1494
+ self,
1495
+ prompt: Union[str, List[str]],
1496
+ height: int = 512,
1497
+ width: int = 512,
1498
+ num_inference_steps: int = 50,
1499
+ guidance_scale: float = 7.5,
1500
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1501
+ eta: float = 0.0,
1502
+ generator: Optional[torch.Generator] = None,
1503
+ latents: Optional[torch.Tensor] = None,
1504
+ output_type: Optional[str] = "pil",
1505
+ callback_steps: Optional[int] = 1,
1506
+ upscale=False,
1507
+ upscale_x: float = 2.0,
1508
+ upscale_method: str = "bicubic",
1509
+ upscale_antialias: bool = False,
1510
+ upscale_denoising_strength: int = 0.7,
1511
+ region_map_state=None,
1512
+ sampler_name="",
1513
+ sampler_opt={},
1514
+ start_time=-1,
1515
+ timeout=180,
1516
+ latent_processing = 0,
1517
+ weight_func = lambda w, sigma, qk: w * sigma * qk.std(),
1518
+ seed = 0,
1519
+ sampler_name_hires= "",
1520
+ sampler_opt_hires= {},
1521
+ latent_upscale_processing = False,
1522
+ ip_adapter_image = None,
1523
+ control_img = None,
1524
+ controlnet_conditioning_scale = None,
1525
+ control_guidance_start = None,
1526
+ control_guidance_end = None,
1527
+ image_t2i_adapter : Optional[PipelineImageInput] = None,
1528
+ adapter_conditioning_scale: Union[float, List[float]] = 1.0,
1529
+ adapter_conditioning_factor: float = 1.0,
1530
+ guidance_rescale: float = 0.0,
1531
+ cross_attention_kwargs = None,
1532
+ clip_skip = None,
1533
+ long_encode = 0,
1534
+ num_images_per_prompt = 1,
1535
+ image: Union[torch.Tensor, PIL.Image.Image] = None,
1536
+ mask_image: Union[torch.Tensor, PIL.Image.Image] = None,
1537
+ masked_image_latents: torch.Tensor = None,
1538
+ padding_mask_crop: Optional[int] = None,
1539
+ strength: float = 1.0,
1540
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
1541
+ ):
1542
+ height, width = self._default_height_width(height, width, None)
1543
+ if isinstance(sampler_name, str):
1544
+ sampler = self.get_scheduler(sampler_name)
1545
+ else:
1546
+ sampler = sampler_name
1547
+ # 1. Check inputs. Raise error if not correct
1548
+ if image_t2i_adapter is not None:
1549
+ height, width = default_height_width(self,height, width, image_t2i_adapter)
1550
+ #print(default_height_width(self,height, width, image_t2i_adapter))
1551
+ self.check_inputs(prompt, height, width, callback_steps)
1552
+ # 2. Define call parameters
1553
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
1554
+ device = self._execution_device
1555
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1556
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1557
+ # corresponds to doing no classifier free guidance.
1558
+ '''do_classifier_free_guidance = True
1559
+ if guidance_scale <= 1.0:
1560
+ raise ValueError("has to use guidance_scale")'''
1561
+
1562
+ lora_scale = (
1563
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
1564
+ )
1565
+ self._do_classifier_free_guidance = False if guidance_scale <= 1.0 else True
1566
+ '''if guidance_scale <= 1.0:
1567
+ raise ValueError("has to use guidance_scale")'''
1568
+ # 3. Encode input prompt
1569
+
1570
+ text_embeddings, negative_prompt_embeds, text_input_ids = encode_prompt_function(
1571
+ self,
1572
+ prompt,
1573
+ device,
1574
+ num_images_per_prompt,
1575
+ self.do_classifier_free_guidance,
1576
+ negative_prompt,
1577
+ lora_scale = lora_scale,
1578
+ clip_skip = clip_skip,
1579
+ long_encode = long_encode,
1580
+ )
1581
+ if self.do_classifier_free_guidance:
1582
+ text_embeddings = torch.cat([negative_prompt_embeds, text_embeddings])
1583
+
1584
+ text_embeddings = text_embeddings.to(self.unet.dtype)
1585
+
1586
+ # 4. Prepare timesteps
1587
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
1588
+ t_start = max(num_inference_steps - init_timestep, 0)
1589
+ sigmas = self.get_sigmas(num_inference_steps, sampler_opt).to(
1590
+ text_embeddings.device, dtype=text_embeddings.dtype
1591
+ )
1592
+ sigmas = sigmas[t_start:] if strength >= 0 and strength < 1.0 else sigmas
1593
+ is_strength_max = strength == 1.0
1594
+
1595
+ '''if latents is None:
1596
+ noise_inpaiting = randn_tensor((batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8), generator=generator, device=device, dtype=text_embeddings.dtype)
1597
+ else:
1598
+ noise_inpaiting = latents.to(device)'''
1599
+
1600
+
1601
+ # 5. Prepare mask, image,
1602
+ if padding_mask_crop is not None:
1603
+ crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop)
1604
+ resize_mode = "fill"
1605
+ else:
1606
+ crops_coords = None
1607
+ resize_mode = "default"
1608
+
1609
+ original_image = image
1610
+ init_image = self.image_processor.preprocess(
1611
+ image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode
1612
+ )
1613
+ init_image = init_image.to(dtype=torch.float32)
1614
+
1615
+ # 6. Prepare latent variables
1616
+ num_channels_latents = self.vae.config.latent_channels
1617
+ num_channels_unet = self.unet.config.in_channels
1618
+ return_image_latents = num_channels_unet == 4
1619
+
1620
+ image_latents = None
1621
+ noise_inpaiting = None
1622
+
1623
+ '''latents = self.prepare_latents(
1624
+ batch_size * num_images_per_prompt,
1625
+ num_channels_unet,
1626
+ height,
1627
+ width,
1628
+ text_embeddings.dtype,
1629
+ device,
1630
+ generator,
1631
+ latents,
1632
+ )'''
1633
+ #latents = latents * sigmas[0]
1634
+
1635
+ latents_outputs = self.prepare_latents_inpating(
1636
+ batch_size * num_images_per_prompt,
1637
+ num_channels_latents,
1638
+ height,
1639
+ width,
1640
+ text_embeddings.dtype,
1641
+ device,
1642
+ generator,
1643
+ latents,
1644
+ image=init_image,
1645
+ sigma=sigmas[0],
1646
+ is_strength_max=is_strength_max,
1647
+ return_noise=True,
1648
+ return_image_latents=return_image_latents,
1649
+ )
1650
+
1651
+ if return_image_latents:
1652
+ latents, noise_inpaiting, image_latents = latents_outputs
1653
+ else:
1654
+ latents, noise_inpaiting = latents_outputs
1655
+
1656
+ # 7. Prepare mask latent variables
1657
+ mask_condition = self.mask_processor.preprocess(
1658
+ mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords
1659
+ )
1660
+
1661
+ if masked_image_latents is None:
1662
+ masked_image = init_image * (mask_condition < 0.5)
1663
+ else:
1664
+ masked_image = masked_image_latents
1665
+
1666
+ mask, masked_image_latents = self.prepare_mask_latents(
1667
+ mask_condition,
1668
+ masked_image,
1669
+ batch_size * num_images_per_prompt,
1670
+ height,
1671
+ width,
1672
+ text_embeddings.dtype,
1673
+ device,
1674
+ generator,
1675
+ self.do_classifier_free_guidance,
1676
+ )
1677
+
1678
+ # 8. Check that sizes of mask, masked image and latents match
1679
+ if num_channels_unet == 9:
1680
+ # default case for runwayml/stable-diffusion-inpainting
1681
+ num_channels_mask = mask.shape[1]
1682
+ num_channels_masked_image = masked_image_latents.shape[1]
1683
+ if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
1684
+ raise ValueError(
1685
+ f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
1686
+ f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
1687
+ f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
1688
+ f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
1689
+ " `pipeline.unet` or your `mask_image` or `image` input."
1690
+ )
1691
+ elif num_channels_unet != 4:
1692
+ raise ValueError(
1693
+ f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
1694
+ )
1695
+
1696
+ steps_denoising = len(sigmas)
1697
+ self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device)
1698
+ self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(
1699
+ latents.device
1700
+ )
1701
+
1702
+ region_state = encode_region_map(
1703
+ self,
1704
+ region_map_state,
1705
+ width = width,
1706
+ height = height,
1707
+ num_images_per_prompt = num_images_per_prompt,
1708
+ text_ids=text_input_ids,
1709
+ )
1710
+ if cross_attention_kwargs is None:
1711
+ cross_attention_kwargs ={}
1712
+ controlnet_conditioning_scale_copy = controlnet_conditioning_scale.copy() if isinstance(controlnet_conditioning_scale, list) else controlnet_conditioning_scale
1713
+ control_guidance_start_copy = control_guidance_start.copy() if isinstance(control_guidance_start, list) else control_guidance_start
1714
+ control_guidance_end_copy = control_guidance_end.copy() if isinstance(control_guidance_end, list) else control_guidance_end
1715
+ guess_mode = False
1716
+
1717
+ if self.controlnet is not None:
1718
+ img_control,controlnet_keep,guess_mode,controlnet_conditioning_scale = self.preprocess_controlnet(controlnet_conditioning_scale,control_guidance_start,control_guidance_end,control_img,width,height,num_inference_steps,batch_size,num_images_per_prompt)
1719
+ #print(len(controlnet_keep))
1720
+
1721
+ #controlnet_conditioning_scale_copy = controlnet_conditioning_scale.copy()
1722
+ #sp_control = 1
1723
+
1724
+
1725
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1726
+ image_embeds = self.prepare_ip_adapter_image_embeds(
1727
+ ip_adapter_image,
1728
+ ip_adapter_image_embeds,
1729
+ device,
1730
+ batch_size * num_images_per_prompt,
1731
+ self.do_classifier_free_guidance,
1732
+ )
1733
+ # 6.1 Add image embeds for IP-Adapter
1734
+ added_cond_kwargs = (
1735
+ {"image_embeds": image_embeds}
1736
+ if (ip_adapter_image is not None or ip_adapter_image_embeds is not None)
1737
+ else None
1738
+ )
1739
+ #if controlnet_img is not None:
1740
+ #controlnet_img_processing = controlnet_img.convert("RGB")
1741
+ #transform = transforms.Compose([transforms.PILToTensor()])
1742
+ #controlnet_img_processing = transform(controlnet_img)
1743
+ #controlnet_img_processing=controlnet_img_processing.to(device=device, dtype=self.cnet.dtype)
1744
+ if latent_processing == 1:
1745
+ latents_process = [self.latent_to_image(latents,output_type)]
1746
+ #sp_find_new = None
1747
+ lst_latent_sigma = []
1748
+ step_control = -1
1749
+ adapter_state = None
1750
+ adapter_sp_count = []
1751
+ flag_add_noise_inpaiting = 0
1752
+ if image_t2i_adapter is not None:
1753
+ adapter_state = preprocessing_t2i_adapter(self,image_t2i_adapter,width,height,adapter_conditioning_scale,1)
1754
+ def model_fn(x, sigma):
1755
+ nonlocal step_control,lst_latent_sigma,adapter_sp_count,flag_add_noise_inpaiting
1756
+
1757
+ if start_time > 0 and timeout > 0:
1758
+ assert (time.time() - start_time) < timeout, "inference process timed out"
1759
+
1760
+ if num_channels_unet == 4 and flag_add_noise_inpaiting:
1761
+ init_latents_proper = image_latents
1762
+ if self.do_classifier_free_guidance:
1763
+ init_mask, _ = mask.chunk(2)
1764
+ else:
1765
+ init_mask = mask
1766
+
1767
+ if sigma.item() > sigmas[-1].item():
1768
+ #indices = torch.where(sigmas == sigma.item())[0]
1769
+ #sigma_next = sigmas[indices+1]
1770
+ alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma.item())
1771
+ init_latents_proper = alpha_t * init_latents_proper + sigma_t * noise_inpaiting
1772
+
1773
+ rate_latent_timestep_sigma = (sigma**2 + 1) ** 0.5
1774
+
1775
+ x = ((1 - init_mask) * init_latents_proper + init_mask * x/ rate_latent_timestep_sigma ) * rate_latent_timestep_sigma
1776
+
1777
+ non_inpainting_latent_model_input = (
1778
+ torch.cat([x] * 2) if self.do_classifier_free_guidance else x
1779
+ )
1780
+
1781
+ inpainting_latent_model_input = torch.cat(
1782
+ [non_inpainting_latent_model_input,mask, masked_image_latents], dim=1
1783
+ ) if num_channels_unet == 9 else non_inpainting_latent_model_input
1784
+ region_prompt = {
1785
+ "region_state": region_state,
1786
+ "sigma": sigma[0],
1787
+ "weight_func": weight_func,
1788
+ }
1789
+ cross_attention_kwargs["region_prompt"] = region_prompt
1790
+
1791
+ #print(self.k_diffusion_model.sigma_to_t(sigma[0]))
1792
+
1793
+ if non_inpainting_latent_model_input.dtype != text_embeddings.dtype:
1794
+ non_inpainting_latent_model_input = non_inpainting_latent_model_input.to(text_embeddings.dtype)
1795
+
1796
+ if inpainting_latent_model_input.dtype != text_embeddings.dtype:
1797
+ inpainting_latent_model_input = inpainting_latent_model_input.to(text_embeddings.dtype)
1798
+ ukwargs = {}
1799
+
1800
+ down_intrablock_additional_residuals = None
1801
+ if adapter_state is not None:
1802
+ if len(adapter_sp_count) < int( steps_denoising* adapter_conditioning_factor):
1803
+ down_intrablock_additional_residuals = [state.clone() for state in adapter_state]
1804
+ else:
1805
+ down_intrablock_additional_residuals = None
1806
+ sigma_string_t2i = str(sigma.item())
1807
+ if sigma_string_t2i not in adapter_sp_count:
1808
+ adapter_sp_count.append(sigma_string_t2i)
1809
+
1810
+ if self.controlnet is not None :
1811
+ sigma_string = str(sigma.item())
1812
+ if sigma_string not in lst_latent_sigma:
1813
+ #sigmas_sp = sigma.detach().clone()
1814
+ step_control+=1
1815
+ lst_latent_sigma.append(sigma_string)
1816
+
1817
+ if isinstance(controlnet_keep[step_control], list):
1818
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[step_control])]
1819
+ else:
1820
+ controlnet_cond_scale = controlnet_conditioning_scale
1821
+ if isinstance(controlnet_cond_scale, list):
1822
+ controlnet_cond_scale = controlnet_cond_scale[0]
1823
+ cond_scale = controlnet_cond_scale * controlnet_keep[step_control]
1824
+
1825
+ down_block_res_samples = None
1826
+ mid_block_res_sample = None
1827
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1828
+ non_inpainting_latent_model_input / ((sigma**2 + 1) ** 0.5),
1829
+ self.k_diffusion_model.sigma_to_t(sigma),
1830
+ encoder_hidden_states=text_embeddings,
1831
+ controlnet_cond=img_control,
1832
+ conditioning_scale=cond_scale,
1833
+ guess_mode=guess_mode,
1834
+ return_dict=False,
1835
+ )
1836
+ if guess_mode and self.do_classifier_free_guidance:
1837
+ # Infered ControlNet only for the conditional batch.
1838
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
1839
+ # add 0 to the unconditional batch to keep it unchanged.
1840
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
1841
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
1842
+ ukwargs ={
1843
+ "down_block_additional_residuals": down_block_res_samples,
1844
+ "mid_block_additional_residual":mid_block_res_sample,
1845
+ }
1846
+
1847
+
1848
+ noise_pred = self.k_diffusion_model(
1849
+ inpainting_latent_model_input, sigma, cond=text_embeddings,cross_attention_kwargs=cross_attention_kwargs,down_intrablock_additional_residuals=down_intrablock_additional_residuals,added_cond_kwargs=added_cond_kwargs, **ukwargs
1850
+ )
1851
+
1852
+
1853
+ if self.do_classifier_free_guidance:
1854
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1855
+ noise_pred = noise_pred_uncond + guidance_scale * (
1856
+ noise_pred_text - noise_pred_uncond
1857
+ )
1858
+ if guidance_rescale > 0.0:
1859
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
1860
+
1861
+
1862
+ if latent_processing == 1:
1863
+ latents_process.append(self.latent_to_image(noise_pred,output_type))
1864
+ flag_add_noise_inpaiting = 1
1865
+ return noise_pred
1866
+ extra_args = self.get_sampler_extra_args_t2i(
1867
+ sigmas, eta, num_inference_steps,sampler_opt,latents,seed, sampler
1868
+ )
1869
+ latents = sampler(model_fn, latents, **extra_args)
1870
+ #latents = latents_process[0]
1871
+ #print(len(latents_process))
1872
+ self.maybe_free_model_hooks()
1873
+ torch.cuda.empty_cache()
1874
+ gc.collect()
1875
+ if upscale:
1876
+ vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
1877
+ target_height = int(height * upscale_x // vae_scale_factor )* 8
1878
+ target_width = int(width * upscale_x // vae_scale_factor)*8
1879
+ latents = torch.nn.functional.interpolate(
1880
+ latents,
1881
+ size=(
1882
+ int(target_height // vae_scale_factor),
1883
+ int(target_width // vae_scale_factor),
1884
+ ),
1885
+ mode=upscale_method,
1886
+ antialias=upscale_antialias,
1887
+ )
1888
+
1889
+ #if controlnet_img is not None:
1890
+ #controlnet_img = cv2.resize(controlnet_img,(latents.size(0), latents.size(1)))
1891
+ #controlnet_img=controlnet_img.resize((latents.size(0), latents.size(1)), Image.LANCZOS)
1892
+ latent_reisze= self.img2img(
1893
+ prompt=prompt,
1894
+ num_inference_steps=num_inference_steps,
1895
+ guidance_scale=guidance_scale,
1896
+ negative_prompt=negative_prompt,
1897
+ generator=generator,
1898
+ latents=latents,
1899
+ strength=upscale_denoising_strength,
1900
+ sampler_name=sampler_name_hires,
1901
+ sampler_opt=sampler_opt_hires,
1902
+ region_map_state = region_map_state,
1903
+ latent_processing = latent_upscale_processing,
1904
+ width = int(target_width),
1905
+ height = int(target_height),
1906
+ seed = seed,
1907
+ ip_adapter_image = ip_adapter_image,
1908
+ control_img = control_img,
1909
+ controlnet_conditioning_scale = controlnet_conditioning_scale_copy,
1910
+ control_guidance_start = control_guidance_start_copy,
1911
+ control_guidance_end = control_guidance_end_copy,
1912
+ image_t2i_adapter= image_t2i_adapter,
1913
+ adapter_conditioning_scale = adapter_conditioning_scale,
1914
+ adapter_conditioning_factor = adapter_conditioning_factor,
1915
+ guidance_rescale = guidance_rescale,
1916
+ cross_attention_kwargs = cross_attention_kwargs,
1917
+ clip_skip = clip_skip,
1918
+ long_encode = long_encode,
1919
+ num_images_per_prompt = num_images_per_prompt,
1920
+ )
1921
+ '''if latent_processing == 1:
1922
+ latents = latents_process.copy()
1923
+ images = []
1924
+ for i in latents:
1925
+ images.append(self.decode_latents(i))
1926
+ image = []
1927
+ if output_type == "pil":
1928
+ for i in images:
1929
+ image.append(self.numpy_to_pil(i))
1930
+ image[-1] = latent_reisze
1931
+ return image'''
1932
+ if latent_processing == 1:
1933
+ latents_process= latents_process+latent_reisze
1934
+ return latents_process
1935
+ torch.cuda.empty_cache()
1936
+ gc.collect()
1937
+ return latent_reisze
1938
+
1939
+ # 8. Post-processing
1940
+ '''if latent_processing == 1:
1941
+ latents = latents_process.copy()
1942
+ images = []
1943
+ for i in latents:
1944
+ images.append(self.decode_latents(i))
1945
+ image = []
1946
+ # 10. Convert to PIL
1947
+ if output_type == "pil":
1948
+ for i in images:
1949
+ image.append(self.numpy_to_pil(i))
1950
+ else:
1951
+ image = self.decode_latents(latents)
1952
+ # 10. Convert to PIL
1953
+ if output_type == "pil":
1954
+ image = self.numpy_to_pil(image)'''
1955
+ if latent_processing == 1:
1956
+ return latents_process
1957
+ return [self.latent_to_image(latents,output_type)]
1958
+
1959
+
1960
+
modules/preprocessing_segmentation.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os
3
+ from PIL import Image
4
+ import numpy as np
5
+ from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
6
+ import random
7
+
8
+ lst_model_segmentation = {
9
+ "Convnet tiny": "openmmlab/upernet-convnext-tiny",
10
+ "Convnet small": "openmmlab/upernet-convnext-small",
11
+ "Convnet base": "openmmlab/upernet-convnext-base",
12
+ "Convnet large": "openmmlab/upernet-convnext-large",
13
+ "Convnet xlarge": "openmmlab/upernet-convnext-xlarge",
14
+ "Swin tiny": "openmmlab/upernet-swin-tiny",
15
+ "Swin small": "openmmlab/upernet-swin-small",
16
+ "Swin base": "openmmlab/upernet-swin-base",
17
+ "Swin large": "openmmlab/upernet-swin-large",
18
+ }
19
+
20
+ def preprocessing_segmentation(method,image):
21
+ global lst_model_segmentation
22
+ method = lst_model_segmentation[method]
23
+ device = 'cpu'
24
+ if torch.cuda.is_available():
25
+ device = 'cuda'
26
+ image_processor = AutoImageProcessor.from_pretrained(method)
27
+ image_segmentor = UperNetForSemanticSegmentation.from_pretrained(method).to(device)
28
+
29
+ pixel_values = image_processor(image, return_tensors="pt").pixel_values.to(device)
30
+ with torch.no_grad():
31
+ outputs = image_segmentor(pixel_values)
32
+ seg = image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
33
+ color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) # height, width, 3
34
+
35
+ seg = seg.to('cpu')
36
+ unique_values = torch.unique(seg)
37
+
38
+ lst_color = []
39
+ for i in unique_values:
40
+ color = [random.randrange(0,256), random.randrange(0,256), random.randrange(0,256)]
41
+ while color in lst_color:
42
+ color = [random.randrange(0,256), random.randrange(0,256), random.randrange(0,256)]
43
+ color_seg[seg == i, :] = color
44
+ lst_color.append(color)
45
+ color_seg = color_seg.astype(np.uint8)
46
+ control_image = Image.fromarray(color_seg)
47
+ return control_image
modules/prompt_parser.py ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import re
3
+ import math
4
+ import numpy as np
5
+ import torch
6
+
7
+ # Code from https://github.com/AUTOMATIC1111/stable-diffusion-webui/commit/8e2aeee4a127b295bfc880800e4a312e0f049b85, modified.
8
+
9
+ class PromptChunk:
10
+ """
11
+ This object contains token ids, weight (multipliers:1.4) and textual inversion embedding info for a chunk of prompt.
12
+ If a prompt is short, it is represented by one PromptChunk, otherwise, multiple are necessary.
13
+ Each PromptChunk contains an exact amount of tokens - 77, which includes one for start and end token,
14
+ so just 75 tokens from prompt.
15
+ """
16
+
17
+ def __init__(self):
18
+ self.tokens = []
19
+ self.multipliers = []
20
+ self.fixes = []
21
+
22
+
23
+ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
24
+ """A pytorch module that is a wrapper for FrozenCLIPEmbedder module. it enhances FrozenCLIPEmbedder, making it possible to
25
+ have unlimited prompt length and assign weights to tokens in prompt.
26
+ """
27
+
28
+ def __init__(self, text_encoder, enable_emphasis=True):
29
+ super().__init__()
30
+
31
+ self.device = lambda: text_encoder.device
32
+ self.enable_emphasis = enable_emphasis
33
+ """Original FrozenCLIPEmbedder module; can also be FrozenOpenCLIPEmbedder or xlmr.BertSeriesModelWithTransformation,
34
+ depending on model."""
35
+
36
+ self.chunk_length = 75
37
+
38
+ def empty_chunk(self):
39
+ """creates an empty PromptChunk and returns it"""
40
+
41
+ chunk = PromptChunk()
42
+ chunk.tokens = [self.id_start] + [self.id_end] * (self.chunk_length + 1)
43
+ chunk.multipliers = [1.0] * (self.chunk_length + 2)
44
+ return chunk
45
+
46
+ def get_target_prompt_token_count(self, token_count):
47
+ """returns the maximum number of tokens a prompt of a known length can have before it requires one more PromptChunk to be represented"""
48
+
49
+ return math.ceil(max(token_count, 1) / self.chunk_length) * self.chunk_length
50
+
51
+ def tokenize_line(self, line):
52
+ """
53
+ this transforms a single prompt into a list of PromptChunk objects - as many as needed to
54
+ represent the prompt.
55
+ Returns the list and the total number of tokens in the prompt.
56
+ """
57
+
58
+ if self.enable_emphasis:
59
+ parsed = parse_prompt_attention(line)
60
+ else:
61
+ parsed = [[line, 1.0]]
62
+
63
+ tokenized = self.tokenize([text for text, _ in parsed])
64
+
65
+ chunks = []
66
+ chunk = PromptChunk()
67
+ token_count = 0
68
+ last_comma = -1
69
+
70
+ def next_chunk(is_last=False):
71
+ """puts current chunk into the list of results and produces the next one - empty;
72
+ if is_last is true, tokens <end-of-text> tokens at the end won't add to token_count"""
73
+ nonlocal token_count
74
+ nonlocal last_comma
75
+ nonlocal chunk
76
+
77
+ if is_last:
78
+ token_count += len(chunk.tokens)
79
+ else:
80
+ token_count += self.chunk_length
81
+
82
+ to_add = self.chunk_length - len(chunk.tokens)
83
+ if to_add > 0:
84
+ chunk.tokens += [self.id_end] * to_add
85
+ chunk.multipliers += [1.0] * to_add
86
+
87
+ chunk.tokens = [self.id_start] + chunk.tokens + [self.id_end]
88
+ chunk.multipliers = [1.0] + chunk.multipliers + [1.0]
89
+
90
+ last_comma = -1
91
+ chunks.append(chunk)
92
+ chunk = PromptChunk()
93
+
94
+ comma_padding_backtrack = 20 # default value in https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/6cff4401824299a983c8e13424018efc347b4a2b/modules/shared.py#L410
95
+ for tokens, (text, weight) in zip(tokenized, parsed):
96
+ if text == "BREAK" and weight == -1:
97
+ next_chunk()
98
+ continue
99
+
100
+ position = 0
101
+ while position < len(tokens):
102
+ token = tokens[position]
103
+
104
+ if token == self.comma_token:
105
+ last_comma = len(chunk.tokens)
106
+
107
+ # this is when we are at the end of alloted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack
108
+ # is a setting that specifies that if there is a comma nearby, the text after the comma should be moved out of this chunk and into the next.
109
+ elif (
110
+ comma_padding_backtrack != 0
111
+ and len(chunk.tokens) == self.chunk_length
112
+ and last_comma != -1
113
+ and len(chunk.tokens) - last_comma <= comma_padding_backtrack
114
+ ):
115
+ break_location = last_comma + 1
116
+
117
+ reloc_tokens = chunk.tokens[break_location:]
118
+ reloc_mults = chunk.multipliers[break_location:]
119
+
120
+ chunk.tokens = chunk.tokens[:break_location]
121
+ chunk.multipliers = chunk.multipliers[:break_location]
122
+
123
+ next_chunk()
124
+ chunk.tokens = reloc_tokens
125
+ chunk.multipliers = reloc_mults
126
+
127
+ if len(chunk.tokens) == self.chunk_length:
128
+ next_chunk()
129
+
130
+ chunk.tokens.append(token)
131
+ chunk.multipliers.append(weight)
132
+ position += 1
133
+
134
+ if len(chunk.tokens) > 0 or len(chunks) == 0:
135
+ next_chunk(is_last=True)
136
+
137
+ return chunks, token_count
138
+
139
+ def process_texts(self, texts):
140
+ """
141
+ Accepts a list of texts and calls tokenize_line() on each, with cache. Returns the list of results and maximum
142
+ length, in tokens, of all texts.
143
+ """
144
+
145
+ token_count = 0
146
+
147
+ cache = {}
148
+ batch_chunks = []
149
+ for line in texts:
150
+ if line in cache:
151
+ chunks = cache[line]
152
+ else:
153
+ chunks, current_token_count = self.tokenize_line(line)
154
+ token_count = max(current_token_count, token_count)
155
+
156
+ cache[line] = chunks
157
+
158
+ batch_chunks.append(chunks)
159
+
160
+ return batch_chunks, token_count
161
+
162
+ def forward(self, texts):
163
+ """
164
+ Accepts an array of texts; Passes texts through transformers network to create a tensor with numerical representation of those texts.
165
+ Returns a tensor with shape of (B, T, C), where B is length of the array; T is length, in tokens, of texts (including padding) - T will
166
+ be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768, and for SD2 it's 1024.
167
+ An example shape returned by this function can be: (2, 77, 768).
168
+ Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one elemenet
169
+ is when you do prompt editing: "a picture of a [cat:dog:0.4] eating ice cream"
170
+ """
171
+
172
+ batch_chunks, token_count = self.process_texts(texts)
173
+ chunk_count = max([len(x) for x in batch_chunks])
174
+
175
+ zs = []
176
+ ts = []
177
+ for i in range(chunk_count):
178
+ batch_chunk = [
179
+ chunks[i] if i < len(chunks) else self.empty_chunk()
180
+ for chunks in batch_chunks
181
+ ]
182
+
183
+ tokens = [x.tokens for x in batch_chunk]
184
+ multipliers = [x.multipliers for x in batch_chunk]
185
+ # self.embeddings.fixes = [x.fixes for x in batch_chunk]
186
+
187
+ # for fixes in self.embeddings.fixes:
188
+ # for position, embedding in fixes:
189
+ # used_embeddings[embedding.name] = embedding
190
+
191
+ z = self.process_tokens(tokens, multipliers)
192
+ zs.append(z)
193
+ ts.append(tokens)
194
+
195
+ return np.hstack(ts), torch.hstack(zs)
196
+
197
+ def process_tokens(self, remade_batch_tokens, batch_multipliers):
198
+ """
199
+ sends one single prompt chunk to be encoded by transformers neural network.
200
+ remade_batch_tokens is a batch of tokens - a list, where every element is a list of tokens; usually
201
+ there are exactly 77 tokens in the list. batch_multipliers is the same but for multipliers instead of tokens.
202
+ Multipliers are used to give more or less weight to the outputs of transformers network. Each multiplier
203
+ corresponds to one token.
204
+ """
205
+ tokens = torch.asarray(remade_batch_tokens).to(self.device())
206
+
207
+ # this is for SD2: SD1 uses the same token for padding and end of text, while SD2 uses different ones.
208
+ if self.id_end != self.id_pad:
209
+ for batch_pos in range(len(remade_batch_tokens)):
210
+ index = remade_batch_tokens[batch_pos].index(self.id_end)
211
+ tokens[batch_pos, index + 1 : tokens.shape[1]] = self.id_pad
212
+
213
+ z = self.encode_with_transformers(tokens)
214
+
215
+ # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
216
+ batch_multipliers = torch.asarray(batch_multipliers).to(self.device())
217
+ original_mean = z.mean()
218
+ z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
219
+ new_mean = z.mean()
220
+ z = z * (original_mean / new_mean)
221
+
222
+ return z
223
+
224
+
225
+ class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase):
226
+ def __init__(self, tokenizer, text_encoder,CLIP_stop_at_last_layers):
227
+ super().__init__(text_encoder)
228
+ self.tokenizer = tokenizer
229
+ self.text_encoder = text_encoder
230
+ self.CLIP_stop_at_last_layers = CLIP_stop_at_last_layers
231
+
232
+ vocab = self.tokenizer.get_vocab()
233
+
234
+ self.comma_token = vocab.get(",</w>", None)
235
+
236
+ self.token_mults = {}
237
+ tokens_with_parens = [
238
+ (k, v)
239
+ for k, v in vocab.items()
240
+ if "(" in k or ")" in k or "[" in k or "]" in k
241
+ ]
242
+ for text, ident in tokens_with_parens:
243
+ mult = 1.0
244
+ for c in text:
245
+ if c == "[":
246
+ mult /= 1.1
247
+ if c == "]":
248
+ mult *= 1.1
249
+ if c == "(":
250
+ mult *= 1.1
251
+ if c == ")":
252
+ mult /= 1.1
253
+
254
+ if mult != 1.0:
255
+ self.token_mults[ident] = mult
256
+
257
+ self.id_start = self.tokenizer.bos_token_id
258
+ self.id_end = self.tokenizer.eos_token_id
259
+ self.id_pad = self.id_end
260
+
261
+ def tokenize(self, texts):
262
+ tokenized = self.tokenizer(
263
+ texts, truncation=False, add_special_tokens=False
264
+ )["input_ids"]
265
+
266
+ return tokenized
267
+
268
+ def encode_with_transformers(self, tokens):
269
+ CLIP_stop_at_last_layers = self.CLIP_stop_at_last_layers
270
+ tokens = tokens.to(self.text_encoder.device)
271
+ outputs = self.text_encoder(tokens, output_hidden_states=True)
272
+
273
+ if CLIP_stop_at_last_layers > 1:
274
+ z = outputs.hidden_states[-CLIP_stop_at_last_layers]
275
+ z = self.text_encoder.text_model.final_layer_norm(z)
276
+ else:
277
+ z = outputs.last_hidden_state
278
+
279
+ return z
280
+
281
+
282
+ re_attention = re.compile(
283
+ r"""
284
+ \\\(|
285
+ \\\)|
286
+ \\\[|
287
+ \\]|
288
+ \\\\|
289
+ \\|
290
+ \(|
291
+ \[|
292
+ :([+-]?[.\d]+)\)|
293
+ \)|
294
+ ]|
295
+ [^\\()\[\]:]+|
296
+ :
297
+ """,
298
+ re.X,
299
+ )
300
+
301
+ re_break = re.compile(r"\s*\bBREAK\b\s*", re.S)
302
+
303
+
304
+ def parse_prompt_attention(text):
305
+ """
306
+ Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
307
+ Accepted tokens are:
308
+ (abc) - increases attention to abc by a multiplier of 1.1
309
+ (abc:3.12) - increases attention to abc by a multiplier of 3.12
310
+ [abc] - decreases attention to abc by a multiplier of 1.1
311
+ \( - literal character '('
312
+ \[ - literal character '['
313
+ \) - literal character ')'
314
+ \] - literal character ']'
315
+ \\ - literal character '\'
316
+ anything else - just text
317
+
318
+ >>> parse_prompt_attention('normal text')
319
+ [['normal text', 1.0]]
320
+ >>> parse_prompt_attention('an (important) word')
321
+ [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
322
+ >>> parse_prompt_attention('(unbalanced')
323
+ [['unbalanced', 1.1]]
324
+ >>> parse_prompt_attention('\(literal\]')
325
+ [['(literal]', 1.0]]
326
+ >>> parse_prompt_attention('(unnecessary)(parens)')
327
+ [['unnecessaryparens', 1.1]]
328
+ >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
329
+ [['a ', 1.0],
330
+ ['house', 1.5730000000000004],
331
+ [' ', 1.1],
332
+ ['on', 1.0],
333
+ [' a ', 1.1],
334
+ ['hill', 0.55],
335
+ [', sun, ', 1.1],
336
+ ['sky', 1.4641000000000006],
337
+ ['.', 1.1]]
338
+ """
339
+
340
+ res = []
341
+ round_brackets = []
342
+ square_brackets = []
343
+
344
+ round_bracket_multiplier = 1.1
345
+ square_bracket_multiplier = 1 / 1.1
346
+
347
+ def multiply_range(start_position, multiplier):
348
+ for p in range(start_position, len(res)):
349
+ res[p][1] *= multiplier
350
+
351
+ for m in re_attention.finditer(text):
352
+ text = m.group(0)
353
+ weight = m.group(1)
354
+
355
+ if text.startswith("\\"):
356
+ res.append([text[1:], 1.0])
357
+ elif text == "(":
358
+ round_brackets.append(len(res))
359
+ elif text == "[":
360
+ square_brackets.append(len(res))
361
+ elif weight is not None and len(round_brackets) > 0:
362
+ multiply_range(round_brackets.pop(), float(weight))
363
+ elif text == ")" and len(round_brackets) > 0:
364
+ multiply_range(round_brackets.pop(), round_bracket_multiplier)
365
+ elif text == "]" and len(square_brackets) > 0:
366
+ multiply_range(square_brackets.pop(), square_bracket_multiplier)
367
+ else:
368
+ parts = re.split(re_break, text)
369
+ for i, part in enumerate(parts):
370
+ if i > 0:
371
+ res.append(["BREAK", -1])
372
+ res.append([part, 1.0])
373
+
374
+ for pos in round_brackets:
375
+ multiply_range(pos, round_bracket_multiplier)
376
+
377
+ for pos in square_brackets:
378
+ multiply_range(pos, square_bracket_multiplier)
379
+
380
+ if len(res) == 0:
381
+ res = [["", 1.0]]
382
+
383
+ # merge runs of identical weights
384
+ i = 0
385
+ while i + 1 < len(res):
386
+ if res[i][1] == res[i + 1][1]:
387
+ res[i][0] += res[i + 1][0]
388
+ res.pop(i + 1)
389
+ else:
390
+ i += 1
391
+
392
+ return res
modules/safe.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # this code is adapted from the script contributed by anon from /h/
2
+ # modified, from https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/6cff4401824299a983c8e13424018efc347b4a2b/modules/safe.py
3
+
4
+ import io
5
+ import pickle
6
+ import collections
7
+ import sys
8
+ import traceback
9
+
10
+ import torch
11
+ import numpy
12
+ import _codecs
13
+ import zipfile
14
+ import re
15
+
16
+
17
+ # PyTorch 1.13 and later have _TypedStorage renamed to TypedStorage
18
+ TypedStorage = torch.storage.TypedStorage if hasattr(torch.storage, 'TypedStorage') else torch.storage._TypedStorage
19
+
20
+
21
+ def encode(*args):
22
+ out = _codecs.encode(*args)
23
+ return out
24
+
25
+
26
+ class RestrictedUnpickler(pickle.Unpickler):
27
+ extra_handler = None
28
+
29
+ def persistent_load(self, saved_id):
30
+ assert saved_id[0] == 'storage'
31
+ return TypedStorage()
32
+
33
+ def find_class(self, module, name):
34
+ if self.extra_handler is not None:
35
+ res = self.extra_handler(module, name)
36
+ if res is not None:
37
+ return res
38
+
39
+ if module == 'collections' and name == 'OrderedDict':
40
+ return getattr(collections, name)
41
+ if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter', '_rebuild_device_tensor_from_numpy']:
42
+ return getattr(torch._utils, name)
43
+ if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage', 'DoubleStorage', 'ByteStorage', 'float32']:
44
+ return getattr(torch, name)
45
+ if module == 'torch.nn.modules.container' and name in ['ParameterDict']:
46
+ return getattr(torch.nn.modules.container, name)
47
+ if module == 'numpy.core.multiarray' and name in ['scalar', '_reconstruct']:
48
+ return getattr(numpy.core.multiarray, name)
49
+ if module == 'numpy' and name in ['dtype', 'ndarray']:
50
+ return getattr(numpy, name)
51
+ if module == '_codecs' and name == 'encode':
52
+ return encode
53
+ if module == "pytorch_lightning.callbacks" and name == 'model_checkpoint':
54
+ import pytorch_lightning.callbacks
55
+ return pytorch_lightning.callbacks.model_checkpoint
56
+ if module == "pytorch_lightning.callbacks.model_checkpoint" and name == 'ModelCheckpoint':
57
+ import pytorch_lightning.callbacks.model_checkpoint
58
+ return pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint
59
+ if module == "__builtin__" and name == 'set':
60
+ return set
61
+
62
+ # Forbid everything else.
63
+ raise Exception(f"global '{module}/{name}' is forbidden")
64
+
65
+
66
+ # Regular expression that accepts 'dirname/version', 'dirname/data.pkl', and 'dirname/data/<number>'
67
+ allowed_zip_names_re = re.compile(r"^([^/]+)/((data/\d+)|version|(data\.pkl))$")
68
+ data_pkl_re = re.compile(r"^([^/]+)/data\.pkl$")
69
+
70
+ def check_zip_filenames(filename, names):
71
+ for name in names:
72
+ if allowed_zip_names_re.match(name):
73
+ continue
74
+
75
+ raise Exception(f"bad file inside {filename}: {name}")
76
+
77
+
78
+ def check_pt(filename, extra_handler):
79
+ try:
80
+
81
+ # new pytorch format is a zip file
82
+ with zipfile.ZipFile(filename) as z:
83
+ check_zip_filenames(filename, z.namelist())
84
+
85
+ # find filename of data.pkl in zip file: '<directory name>/data.pkl'
86
+ data_pkl_filenames = [f for f in z.namelist() if data_pkl_re.match(f)]
87
+ if len(data_pkl_filenames) == 0:
88
+ raise Exception(f"data.pkl not found in {filename}")
89
+ if len(data_pkl_filenames) > 1:
90
+ raise Exception(f"Multiple data.pkl found in {filename}")
91
+ with z.open(data_pkl_filenames[0]) as file:
92
+ unpickler = RestrictedUnpickler(file)
93
+ unpickler.extra_handler = extra_handler
94
+ unpickler.load()
95
+
96
+ except zipfile.BadZipfile:
97
+
98
+ # if it's not a zip file, it's an olf pytorch format, with five objects written to pickle
99
+ with open(filename, "rb") as file:
100
+ unpickler = RestrictedUnpickler(file)
101
+ unpickler.extra_handler = extra_handler
102
+ for i in range(5):
103
+ unpickler.load()
104
+
105
+
106
+ def load(filename, *args, **kwargs):
107
+ return load_with_extra(filename, extra_handler=global_extra_handler, *args, **kwargs)
108
+
109
+
110
+ def load_with_extra(filename, extra_handler=None, *args, **kwargs):
111
+ """
112
+ this function is intended to be used by extensions that want to load models with
113
+ some extra classes in them that the usual unpickler would find suspicious.
114
+
115
+ Use the extra_handler argument to specify a function that takes module and field name as text,
116
+ and returns that field's value:
117
+
118
+ ```python
119
+ def extra(module, name):
120
+ if module == 'collections' and name == 'OrderedDict':
121
+ return collections.OrderedDict
122
+
123
+ return None
124
+
125
+ safe.load_with_extra('model.pt', extra_handler=extra)
126
+ ```
127
+
128
+ The alternative to this is just to use safe.unsafe_torch_load('model.pt'), which as the name implies is
129
+ definitely unsafe.
130
+ """
131
+
132
+ try:
133
+ check_pt(filename, extra_handler)
134
+
135
+ except pickle.UnpicklingError:
136
+ print(f"Error verifying pickled file from {filename}:", file=sys.stderr)
137
+ print(traceback.format_exc(), file=sys.stderr)
138
+ print("The file is most likely corrupted.", file=sys.stderr)
139
+ return None
140
+
141
+ except Exception:
142
+ print(f"Error verifying pickled file from {filename}:", file=sys.stderr)
143
+ print(traceback.format_exc(), file=sys.stderr)
144
+ print("\nThe file may be malicious, so the program is not going to read it.", file=sys.stderr)
145
+ print("You can skip this check with --disable-safe-unpickle commandline argument.\n\n", file=sys.stderr)
146
+ return None
147
+
148
+ return unsafe_torch_load(filename, *args, **kwargs)
149
+
150
+
151
+ class Extra:
152
+ """
153
+ A class for temporarily setting the global handler for when you can't explicitly call load_with_extra
154
+ (because it's not your code making the torch.load call). The intended use is like this:
155
+
156
+ ```
157
+ import torch
158
+ from modules import safe
159
+
160
+ def handler(module, name):
161
+ if module == 'torch' and name in ['float64', 'float16']:
162
+ return getattr(torch, name)
163
+
164
+ return None
165
+
166
+ with safe.Extra(handler):
167
+ x = torch.load('model.pt')
168
+ ```
169
+ """
170
+
171
+ def __init__(self, handler):
172
+ self.handler = handler
173
+
174
+ def __enter__(self):
175
+ global global_extra_handler
176
+
177
+ assert global_extra_handler is None, 'already inside an Extra() block'
178
+ global_extra_handler = self.handler
179
+
180
+ def __exit__(self, exc_type, exc_val, exc_tb):
181
+ global global_extra_handler
182
+
183
+ global_extra_handler = None
184
+
185
+
186
+ unsafe_torch_load = torch.load
187
+ torch.load = load
188
+ global_extra_handler = None
modules/samplers_extra_k_diffusion.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import tqdm
3
+ import k_diffusion.sampling
4
+ from k_diffusion.sampling import default_noise_sampler,to_d, get_sigmas_karras
5
+ from tqdm.auto import trange
6
+ @torch.no_grad()
7
+ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list=None):
8
+ """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)
9
+ Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}
10
+ If restart_list is None: will choose restart_list automatically, otherwise will use the given restart_list
11
+ """
12
+ extra_args = {} if extra_args is None else extra_args
13
+ s_in = x.new_ones([x.shape[0]])
14
+ step_id = 0
15
+
16
+ def heun_step(x, old_sigma, new_sigma, second_order=True):
17
+ nonlocal step_id
18
+ denoised = model(x, old_sigma * s_in, **extra_args)
19
+ d = to_d(x, old_sigma, denoised)
20
+ if callback is not None:
21
+ callback({'x': x, 'i': step_id, 'sigma': new_sigma, 'sigma_hat': old_sigma, 'denoised': denoised})
22
+ dt = new_sigma - old_sigma
23
+ if new_sigma == 0 or not second_order:
24
+ # Euler method
25
+ x = x + d * dt
26
+ else:
27
+ # Heun's method
28
+ x_2 = x + d * dt
29
+ denoised_2 = model(x_2, new_sigma * s_in, **extra_args)
30
+ d_2 = to_d(x_2, new_sigma, denoised_2)
31
+ d_prime = (d + d_2) / 2
32
+ x = x + d_prime * dt
33
+ step_id += 1
34
+ return x
35
+
36
+ steps = sigmas.shape[0] - 1
37
+ if restart_list is None:
38
+ if steps >= 20:
39
+ restart_steps = 9
40
+ restart_times = 1
41
+ if steps >= 36:
42
+ restart_steps = steps // 4
43
+ restart_times = 2
44
+ sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2].item(), sigmas[0].item(), device=sigmas.device)
45
+ restart_list = {0.1: [restart_steps + 1, restart_times, 2]}
46
+ else:
47
+ restart_list = {}
48
+
49
+ restart_list = {int(torch.argmin(abs(sigmas - key), dim=0)): value for key, value in restart_list.items()}
50
+
51
+ step_list = []
52
+ for i in range(len(sigmas) - 1):
53
+ step_list.append((sigmas[i], sigmas[i + 1]))
54
+ if i + 1 in restart_list:
55
+ restart_steps, restart_times, restart_max = restart_list[i + 1]
56
+ min_idx = i + 1
57
+ max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0))
58
+ if max_idx < min_idx:
59
+ sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1]
60
+ while restart_times > 0:
61
+ restart_times -= 1
62
+ step_list.extend([(old_sigma, new_sigma) for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:])])
63
+
64
+ last_sigma = None
65
+ for old_sigma, new_sigma in tqdm.tqdm(step_list, disable=disable):
66
+ if last_sigma is None:
67
+ last_sigma = old_sigma
68
+ elif last_sigma < old_sigma:
69
+ x = x + k_diffusion.sampling.torch.randn_like(x) * s_noise * (old_sigma ** 2 - last_sigma ** 2) ** 0.5
70
+ x = heun_step(x, old_sigma, new_sigma)
71
+ last_sigma = new_sigma
72
+
73
+ return x
74
+
75
+
76
+ def DDPMSampler_step(x, sigma, sigma_prev, noise, noise_sampler):
77
+ alpha_cumprod = 1 / ((sigma * sigma) + 1)
78
+ alpha_cumprod_prev = 1 / ((sigma_prev * sigma_prev) + 1)
79
+ alpha = (alpha_cumprod / alpha_cumprod_prev)
80
+
81
+ mu = (1.0 / alpha).sqrt() * (x - (1 - alpha) * noise / (1 - alpha_cumprod).sqrt())
82
+ if sigma_prev > 0:
83
+ mu += ((1 - alpha) * (1. - alpha_cumprod_prev) / (1. - alpha_cumprod)).sqrt() * noise_sampler(sigma, sigma_prev)
84
+ return mu
85
+
86
+
87
+ def generic_step_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None, step_function=None):
88
+ extra_args = {} if extra_args is None else extra_args
89
+ noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
90
+ s_in = x.new_ones([x.shape[0]])
91
+
92
+ for i in trange(len(sigmas) - 1, disable=disable):
93
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
94
+ if callback is not None:
95
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
96
+ x = step_function(x / torch.sqrt(1.0 + sigmas[i] ** 2.0), sigmas[i], sigmas[i + 1], (x - denoised) / sigmas[i], noise_sampler)
97
+ if sigmas[i + 1] != 0:
98
+ x *= torch.sqrt(1.0 + sigmas[i + 1] ** 2.0)
99
+ return x
100
+
101
+
102
+ @torch.no_grad()
103
+ def sample_ddpm(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None):
104
+ return generic_step_sampler(model, x, sigmas, extra_args, callback, disable, noise_sampler, DDPMSampler_step)
105
+
106
+
107
+ @torch.no_grad()
108
+ def sample_lcm(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None):
109
+ extra_args = {} if extra_args is None else extra_args
110
+ noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
111
+ s_in = x.new_ones([x.shape[0]])
112
+ for i in trange(len(sigmas) - 1, disable=disable):
113
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
114
+ if callback is not None:
115
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
116
+
117
+ x = denoised
118
+ if sigmas[i + 1] > 0:
119
+ x += sigmas[i + 1] * noise_sampler(sigmas[i], sigmas[i + 1])
120
+ return x
121
+
122
+ @torch.no_grad()
123
+ def sample_heunpp2(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
124
+ # From MIT licensed: https://github.com/Carzit/sd-webui-samplers-scheduler/
125
+ extra_args = {} if extra_args is None else extra_args
126
+ s_in = x.new_ones([x.shape[0]])
127
+ s_end = sigmas[-1]
128
+ for i in trange(len(sigmas) - 1, disable=disable):
129
+ gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
130
+ eps = torch.randn_like(x) * s_noise
131
+ sigma_hat = sigmas[i] * (gamma + 1)
132
+ if gamma > 0:
133
+ x = x + eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5
134
+ denoised = model(x, sigma_hat * s_in, **extra_args)
135
+ d = to_d(x, sigma_hat, denoised)
136
+ if callback is not None:
137
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
138
+ dt = sigmas[i + 1] - sigma_hat
139
+ if sigmas[i + 1] == s_end:
140
+ # Euler method
141
+ x = x + d * dt
142
+ elif sigmas[i + 2] == s_end:
143
+
144
+ # Heun's method
145
+ x_2 = x + d * dt
146
+ denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args)
147
+ d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
148
+
149
+ w = 2 * sigmas[0]
150
+ w2 = sigmas[i+1]/w
151
+ w1 = 1 - w2
152
+
153
+ d_prime = d * w1 + d_2 * w2
154
+
155
+
156
+ x = x + d_prime * dt
157
+
158
+ else:
159
+ # Heun++
160
+ x_2 = x + d * dt
161
+ denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args)
162
+ d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
163
+ dt_2 = sigmas[i + 2] - sigmas[i + 1]
164
+
165
+ x_3 = x_2 + d_2 * dt_2
166
+ denoised_3 = model(x_3, sigmas[i + 2] * s_in, **extra_args)
167
+ d_3 = to_d(x_3, sigmas[i + 2], denoised_3)
168
+
169
+ w = 3 * sigmas[0]
170
+ w2 = sigmas[i + 1] / w
171
+ w3 = sigmas[i + 2] / w
172
+ w1 = 1 - w2 - w3
173
+
174
+ d_prime = w1 * d + w2 * d_2 + w3 * d_3
175
+ x = x + d_prime * dt
176
+ return x
modules/t2i_adapter.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import inspect
3
+ import math
4
+ from pathlib import Path
5
+ import re
6
+ from collections import defaultdict
7
+ from typing import List, Optional, Union
8
+ import cv2
9
+ import time
10
+ import k_diffusion
11
+ import numpy as np
12
+ import PIL
13
+ import torch
14
+ import torch.nn as nn
15
+ import torch.nn.functional as F
16
+ from einops import rearrange
17
+ from .external_k_diffusion import CompVisDenoiser, CompVisVDenoiser
18
+ from .prompt_parser import FrozenCLIPEmbedderWithCustomWords
19
+ from torch import einsum
20
+ from torch.autograd.function import Function
21
+
22
+ from diffusers import DiffusionPipeline
23
+ from diffusers.utils import PIL_INTERPOLATION, is_accelerate_available
24
+ from diffusers.utils import logging
25
+ from diffusers.utils.torch_utils import randn_tensor,is_compiled_module,is_torch_version
26
+ from diffusers.image_processor import VaeImageProcessor,PipelineImageInput
27
+ from safetensors.torch import load_file
28
+ from diffusers import ControlNetModel
29
+ from PIL import Image
30
+ import torchvision.transforms as transforms
31
+ from typing import Any, Callable, Dict, List, Optional, Union
32
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
33
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
34
+ from diffusers import AutoencoderKL, LMSDiscreteScheduler
35
+ from .u_net_condition_modify import UNet2DConditionModel
36
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
37
+ from diffusers.models import AutoencoderKL, ImageProjection, MultiAdapter, T2IAdapter
38
+ from diffusers.schedulers import KarrasDiffusionSchedulers
39
+ from diffusers.utils import (
40
+ PIL_INTERPOLATION,
41
+ USE_PEFT_BACKEND,
42
+ BaseOutput,
43
+ deprecate,
44
+ logging,
45
+ replace_example_docstring,
46
+ scale_lora_layers,
47
+ unscale_lora_layers,
48
+ )
49
+ from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
50
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
51
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
52
+ from packaging import version
53
+ from diffusers.configuration_utils import FrozenDict
54
+
55
+ def _preprocess_adapter_image(image, height, width):
56
+ if isinstance(image, torch.Tensor):
57
+ return image
58
+ elif isinstance(image, PIL.Image.Image):
59
+ image = [image]
60
+
61
+ if isinstance(image[0], PIL.Image.Image):
62
+ image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])) for i in image]
63
+ image = [
64
+ i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image
65
+ ] # expand [h, w] or [h, w, c] to [b, h, w, c]
66
+ image = np.concatenate(image, axis=0)
67
+ image = np.array(image).astype(np.float32) / 255.0
68
+ image = image.transpose(0, 3, 1, 2)
69
+ image = torch.from_numpy(image)
70
+ elif isinstance(image[0], torch.Tensor):
71
+ if image[0].ndim == 3:
72
+ image = torch.stack(image, dim=0)
73
+ elif image[0].ndim == 4:
74
+ image = torch.cat(image, dim=0)
75
+ else:
76
+ raise ValueError(
77
+ f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}"
78
+ )
79
+ return image
80
+
81
+ #t2i_adapter setup
82
+ def setup_model_t2i_adapter(class_name,adapter = None):
83
+ if isinstance(adapter, (list, tuple)):
84
+ adapter = MultiAdapter(adapter)
85
+ class_name.adapter = adapter
86
+
87
+
88
+
89
+ def preprocessing_t2i_adapter(class_name,image,width,height,adapter_conditioning_scale,num_images_per_prompt = 1):
90
+ if isinstance(class_name.adapter, MultiAdapter):
91
+ adapter_input = []
92
+ for one_image in image:
93
+ one_image = _preprocess_adapter_image(one_image, height, width)
94
+ one_image = one_image.to(device=class_name.device, dtype=class_name.adapter.dtype)
95
+ adapter_input.append(one_image)
96
+ else:
97
+ adapter_input = _preprocess_adapter_image(image, height, width)
98
+ adapter_input = adapter_input.to(device=class_name.device, dtype=class_name.adapter.dtype)
99
+
100
+ if isinstance(class_name.adapter, MultiAdapter):
101
+ adapter_state = class_name.adapter(adapter_input, adapter_conditioning_scale)
102
+ for k, v in enumerate(adapter_state):
103
+ adapter_state[k] = v
104
+ else:
105
+ adapter_state = class_name.adapter(adapter_input)
106
+ for k, v in enumerate(adapter_state):
107
+ adapter_state[k] = v * adapter_conditioning_scale
108
+
109
+
110
+ if num_images_per_prompt > 1:
111
+ for k, v in enumerate(adapter_state):
112
+ adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1)
113
+ if class_name.do_classifier_free_guidance:
114
+ for k, v in enumerate(adapter_state):
115
+ adapter_state[k] = torch.cat([v] * 2, dim=0)
116
+ return adapter_state
117
+
118
+
119
+ def default_height_width(class_name, height, width, image):
120
+ # NOTE: It is possible that a list of images have different
121
+ # dimensions for each image, so just checking the first image
122
+ # is not _exactly_ correct, but it is simple.
123
+ while isinstance(image, list):
124
+ image = image[0]
125
+
126
+ if height is None:
127
+ if isinstance(image, PIL.Image.Image):
128
+ height = image.height
129
+ elif isinstance(image, torch.Tensor):
130
+ height = image.shape[-2]
131
+
132
+ # round down to nearest multiple of `self.adapter.downscale_factor`
133
+ height = (height // class_name.adapter.downscale_factor) * class_name.adapter.downscale_factor
134
+
135
+ if width is None:
136
+ if isinstance(image, PIL.Image.Image):
137
+ width = image.width
138
+ elif isinstance(image, torch.Tensor):
139
+ width = image.shape[-1]
140
+
141
+ # round down to nearest multiple of `self.adapter.downscale_factor`
142
+ width = (width // class_name.adapter.downscale_factor) * class_name.adapter.downscale_factor
143
+
144
+ return height, width
modules/u_net_condition_modify.py ADDED
@@ -0,0 +1,1318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from dataclasses import dataclass
15
+ from typing import Any, Dict, List, Optional, Tuple, Union
16
+
17
+ import torch
18
+ import torch.nn as nn
19
+ import torch.utils.checkpoint
20
+
21
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
22
+ from diffusers.loaders import PeftAdapterMixin
23
+ from .u_net_modify import UNet2DConditionLoadersMixin_modify
24
+ from diffusers.loaders.single_file_model import FromOriginalModelMixin
25
+ from diffusers.utils import USE_PEFT_BACKEND, BaseOutput, deprecate, logging, scale_lora_layers, unscale_lora_layers
26
+ from diffusers.models.activations import get_activation
27
+ from diffusers.models.attention_processor import (
28
+ ADDED_KV_ATTENTION_PROCESSORS,
29
+ CROSS_ATTENTION_PROCESSORS,
30
+ Attention,
31
+ AttentionProcessor,
32
+ AttnAddedKVProcessor,
33
+ AttnProcessor,
34
+ )
35
+
36
+ from diffusers.models.embeddings import (
37
+ GaussianFourierProjection,
38
+ GLIGENTextBoundingboxProjection,
39
+ ImageHintTimeEmbedding,
40
+ ImageProjection,
41
+ ImageTimeEmbedding,
42
+ TextImageProjection,
43
+ TextImageTimeEmbedding,
44
+ TextTimeEmbedding,
45
+ TimestepEmbedding,
46
+ Timesteps,
47
+ )
48
+ from diffusers.models.modeling_utils import ModelMixin
49
+ from diffusers.models.unets.unet_2d_blocks import (
50
+ get_down_block,
51
+ get_mid_block,
52
+ get_up_block,
53
+ )
54
+
55
+
56
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
57
+
58
+
59
+ @dataclass
60
+ class UNet2DConditionOutput(BaseOutput):
61
+ """
62
+ The output of [`UNet2DConditionModel`].
63
+
64
+ Args:
65
+ sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
66
+ The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model.
67
+ """
68
+
69
+ sample: torch.Tensor = None
70
+
71
+
72
+ class UNet2DConditionModel(
73
+ ModelMixin, ConfigMixin, FromOriginalModelMixin, UNet2DConditionLoadersMixin_modify, PeftAdapterMixin
74
+ ):
75
+ r"""
76
+ A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample
77
+ shaped output.
78
+
79
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
80
+ for all models (such as downloading or saving).
81
+
82
+ Parameters:
83
+ sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
84
+ Height and width of input/output sample.
85
+ in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample.
86
+ out_channels (`int`, *optional*, defaults to 4): Number of channels in the output.
87
+ center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
88
+ flip_sin_to_cos (`bool`, *optional*, defaults to `True`):
89
+ Whether to flip the sin to cos in the time embedding.
90
+ freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
91
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
92
+ The tuple of downsample blocks to use.
93
+ mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`):
94
+ Block type for middle of UNet, it can be one of `UNetMidBlock2DCrossAttn`, `UNetMidBlock2D`, or
95
+ `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped.
96
+ up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`):
97
+ The tuple of upsample blocks to use.
98
+ only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`):
99
+ Whether to include self-attention in the basic transformer blocks, see
100
+ [`~models.attention.BasicTransformerBlock`].
101
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
102
+ The tuple of output channels for each block.
103
+ layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
104
+ downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
105
+ mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
106
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
107
+ act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
108
+ norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
109
+ If `None`, normalization and activation layers is skipped in post-processing.
110
+ norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
111
+ cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280):
112
+ The dimension of the cross attention features.
113
+ transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1):
114
+ The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
115
+ [`~models.unets.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unets.unet_2d_blocks.CrossAttnUpBlock2D`],
116
+ [`~models.unets.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
117
+ reverse_transformer_layers_per_block : (`Tuple[Tuple]`, *optional*, defaults to None):
118
+ The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`], in the upsampling
119
+ blocks of the U-Net. Only relevant if `transformer_layers_per_block` is of type `Tuple[Tuple]` and for
120
+ [`~models.unets.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unets.unet_2d_blocks.CrossAttnUpBlock2D`],
121
+ [`~models.unets.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
122
+ encoder_hid_dim (`int`, *optional*, defaults to None):
123
+ If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
124
+ dimension to `cross_attention_dim`.
125
+ encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
126
+ If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
127
+ embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
128
+ attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
129
+ num_attention_heads (`int`, *optional*):
130
+ The number of attention heads. If not defined, defaults to `attention_head_dim`
131
+ resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
132
+ for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.
133
+ class_embed_type (`str`, *optional*, defaults to `None`):
134
+ The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
135
+ `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
136
+ addition_embed_type (`str`, *optional*, defaults to `None`):
137
+ Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
138
+ "text". "text" will use the `TextTimeEmbedding` layer.
139
+ addition_time_embed_dim: (`int`, *optional*, defaults to `None`):
140
+ Dimension for the timestep embeddings.
141
+ num_class_embeds (`int`, *optional*, defaults to `None`):
142
+ Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
143
+ class conditioning with `class_embed_type` equal to `None`.
144
+ time_embedding_type (`str`, *optional*, defaults to `positional`):
145
+ The type of position embedding to use for timesteps. Choose from `positional` or `fourier`.
146
+ time_embedding_dim (`int`, *optional*, defaults to `None`):
147
+ An optional override for the dimension of the projected time embedding.
148
+ time_embedding_act_fn (`str`, *optional*, defaults to `None`):
149
+ Optional activation function to use only once on the time embeddings before they are passed to the rest of
150
+ the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`.
151
+ timestep_post_act (`str`, *optional*, defaults to `None`):
152
+ The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`.
153
+ time_cond_proj_dim (`int`, *optional*, defaults to `None`):
154
+ The dimension of `cond_proj` layer in the timestep embedding.
155
+ conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer.
156
+ conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer.
157
+ projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when
158
+ `class_embed_type="projection"`. Required when `class_embed_type="projection"`.
159
+ class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time
160
+ embeddings with the class embeddings.
161
+ mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`):
162
+ Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If
163
+ `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the
164
+ `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False`
165
+ otherwise.
166
+ """
167
+
168
+ _supports_gradient_checkpointing = True
169
+ _no_split_modules = ["BasicTransformerBlock", "ResnetBlock2D", "CrossAttnUpBlock2D"]
170
+
171
+ @register_to_config
172
+ def __init__(
173
+ self,
174
+ sample_size: Optional[int] = None,
175
+ in_channels: int = 4,
176
+ out_channels: int = 4,
177
+ center_input_sample: bool = False,
178
+ flip_sin_to_cos: bool = True,
179
+ freq_shift: int = 0,
180
+ down_block_types: Tuple[str] = (
181
+ "CrossAttnDownBlock2D",
182
+ "CrossAttnDownBlock2D",
183
+ "CrossAttnDownBlock2D",
184
+ "DownBlock2D",
185
+ ),
186
+ mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
187
+ up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
188
+ only_cross_attention: Union[bool, Tuple[bool]] = False,
189
+ block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
190
+ layers_per_block: Union[int, Tuple[int]] = 2,
191
+ downsample_padding: int = 1,
192
+ mid_block_scale_factor: float = 1,
193
+ dropout: float = 0.0,
194
+ act_fn: str = "silu",
195
+ norm_num_groups: Optional[int] = 32,
196
+ norm_eps: float = 1e-5,
197
+ cross_attention_dim: Union[int, Tuple[int]] = 1280,
198
+ transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
199
+ reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None,
200
+ encoder_hid_dim: Optional[int] = None,
201
+ encoder_hid_dim_type: Optional[str] = None,
202
+ attention_head_dim: Union[int, Tuple[int]] = 8,
203
+ num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
204
+ dual_cross_attention: bool = False,
205
+ use_linear_projection: bool = False,
206
+ class_embed_type: Optional[str] = None,
207
+ addition_embed_type: Optional[str] = None,
208
+ addition_time_embed_dim: Optional[int] = None,
209
+ num_class_embeds: Optional[int] = None,
210
+ upcast_attention: bool = False,
211
+ resnet_time_scale_shift: str = "default",
212
+ resnet_skip_time_act: bool = False,
213
+ resnet_out_scale_factor: float = 1.0,
214
+ time_embedding_type: str = "positional",
215
+ time_embedding_dim: Optional[int] = None,
216
+ time_embedding_act_fn: Optional[str] = None,
217
+ timestep_post_act: Optional[str] = None,
218
+ time_cond_proj_dim: Optional[int] = None,
219
+ conv_in_kernel: int = 3,
220
+ conv_out_kernel: int = 3,
221
+ projection_class_embeddings_input_dim: Optional[int] = None,
222
+ attention_type: str = "default",
223
+ class_embeddings_concat: bool = False,
224
+ mid_block_only_cross_attention: Optional[bool] = None,
225
+ cross_attention_norm: Optional[str] = None,
226
+ addition_embed_type_num_heads: int = 64,
227
+ ):
228
+ super().__init__()
229
+
230
+ self.sample_size = sample_size
231
+
232
+ if num_attention_heads is not None:
233
+ raise ValueError(
234
+ "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19."
235
+ )
236
+
237
+ # If `num_attention_heads` is not defined (which is the case for most models)
238
+ # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
239
+ # The reason for this behavior is to correct for incorrectly named variables that were introduced
240
+ # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
241
+ # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
242
+ # which is why we correct for the naming here.
243
+ num_attention_heads = num_attention_heads or attention_head_dim
244
+
245
+ # Check inputs
246
+ self._check_config(
247
+ down_block_types=down_block_types,
248
+ up_block_types=up_block_types,
249
+ only_cross_attention=only_cross_attention,
250
+ block_out_channels=block_out_channels,
251
+ layers_per_block=layers_per_block,
252
+ cross_attention_dim=cross_attention_dim,
253
+ transformer_layers_per_block=transformer_layers_per_block,
254
+ reverse_transformer_layers_per_block=reverse_transformer_layers_per_block,
255
+ attention_head_dim=attention_head_dim,
256
+ num_attention_heads=num_attention_heads,
257
+ )
258
+
259
+ # input
260
+ conv_in_padding = (conv_in_kernel - 1) // 2
261
+ self.conv_in = nn.Conv2d(
262
+ in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
263
+ )
264
+
265
+ # time
266
+ time_embed_dim, timestep_input_dim = self._set_time_proj(
267
+ time_embedding_type,
268
+ block_out_channels=block_out_channels,
269
+ flip_sin_to_cos=flip_sin_to_cos,
270
+ freq_shift=freq_shift,
271
+ time_embedding_dim=time_embedding_dim,
272
+ )
273
+
274
+ self.time_embedding = TimestepEmbedding(
275
+ timestep_input_dim,
276
+ time_embed_dim,
277
+ act_fn=act_fn,
278
+ post_act_fn=timestep_post_act,
279
+ cond_proj_dim=time_cond_proj_dim,
280
+ )
281
+
282
+ self._set_encoder_hid_proj(
283
+ encoder_hid_dim_type,
284
+ cross_attention_dim=cross_attention_dim,
285
+ encoder_hid_dim=encoder_hid_dim,
286
+ )
287
+
288
+ # class embedding
289
+ self._set_class_embedding(
290
+ class_embed_type,
291
+ act_fn=act_fn,
292
+ num_class_embeds=num_class_embeds,
293
+ projection_class_embeddings_input_dim=projection_class_embeddings_input_dim,
294
+ time_embed_dim=time_embed_dim,
295
+ timestep_input_dim=timestep_input_dim,
296
+ )
297
+
298
+ self._set_add_embedding(
299
+ addition_embed_type,
300
+ addition_embed_type_num_heads=addition_embed_type_num_heads,
301
+ addition_time_embed_dim=addition_time_embed_dim,
302
+ cross_attention_dim=cross_attention_dim,
303
+ encoder_hid_dim=encoder_hid_dim,
304
+ flip_sin_to_cos=flip_sin_to_cos,
305
+ freq_shift=freq_shift,
306
+ projection_class_embeddings_input_dim=projection_class_embeddings_input_dim,
307
+ time_embed_dim=time_embed_dim,
308
+ )
309
+
310
+ if time_embedding_act_fn is None:
311
+ self.time_embed_act = None
312
+ else:
313
+ self.time_embed_act = get_activation(time_embedding_act_fn)
314
+
315
+ self.down_blocks = nn.ModuleList([])
316
+ self.up_blocks = nn.ModuleList([])
317
+
318
+ if isinstance(only_cross_attention, bool):
319
+ if mid_block_only_cross_attention is None:
320
+ mid_block_only_cross_attention = only_cross_attention
321
+
322
+ only_cross_attention = [only_cross_attention] * len(down_block_types)
323
+
324
+ if mid_block_only_cross_attention is None:
325
+ mid_block_only_cross_attention = False
326
+
327
+ if isinstance(num_attention_heads, int):
328
+ num_attention_heads = (num_attention_heads,) * len(down_block_types)
329
+
330
+ if isinstance(attention_head_dim, int):
331
+ attention_head_dim = (attention_head_dim,) * len(down_block_types)
332
+
333
+ if isinstance(cross_attention_dim, int):
334
+ cross_attention_dim = (cross_attention_dim,) * len(down_block_types)
335
+
336
+ if isinstance(layers_per_block, int):
337
+ layers_per_block = [layers_per_block] * len(down_block_types)
338
+
339
+ if isinstance(transformer_layers_per_block, int):
340
+ transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
341
+
342
+ if class_embeddings_concat:
343
+ # The time embeddings are concatenated with the class embeddings. The dimension of the
344
+ # time embeddings passed to the down, middle, and up blocks is twice the dimension of the
345
+ # regular time embeddings
346
+ blocks_time_embed_dim = time_embed_dim * 2
347
+ else:
348
+ blocks_time_embed_dim = time_embed_dim
349
+
350
+ # down
351
+ output_channel = block_out_channels[0]
352
+ for i, down_block_type in enumerate(down_block_types):
353
+ input_channel = output_channel
354
+ output_channel = block_out_channels[i]
355
+ is_final_block = i == len(block_out_channels) - 1
356
+
357
+ down_block = get_down_block(
358
+ down_block_type,
359
+ num_layers=layers_per_block[i],
360
+ transformer_layers_per_block=transformer_layers_per_block[i],
361
+ in_channels=input_channel,
362
+ out_channels=output_channel,
363
+ temb_channels=blocks_time_embed_dim,
364
+ add_downsample=not is_final_block,
365
+ resnet_eps=norm_eps,
366
+ resnet_act_fn=act_fn,
367
+ resnet_groups=norm_num_groups,
368
+ cross_attention_dim=cross_attention_dim[i],
369
+ num_attention_heads=num_attention_heads[i],
370
+ downsample_padding=downsample_padding,
371
+ dual_cross_attention=dual_cross_attention,
372
+ use_linear_projection=use_linear_projection,
373
+ only_cross_attention=only_cross_attention[i],
374
+ upcast_attention=upcast_attention,
375
+ resnet_time_scale_shift=resnet_time_scale_shift,
376
+ attention_type=attention_type,
377
+ resnet_skip_time_act=resnet_skip_time_act,
378
+ resnet_out_scale_factor=resnet_out_scale_factor,
379
+ cross_attention_norm=cross_attention_norm,
380
+ attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
381
+ dropout=dropout,
382
+ )
383
+ self.down_blocks.append(down_block)
384
+
385
+ # mid
386
+ self.mid_block = get_mid_block(
387
+ mid_block_type,
388
+ temb_channels=blocks_time_embed_dim,
389
+ in_channels=block_out_channels[-1],
390
+ resnet_eps=norm_eps,
391
+ resnet_act_fn=act_fn,
392
+ resnet_groups=norm_num_groups,
393
+ output_scale_factor=mid_block_scale_factor,
394
+ transformer_layers_per_block=transformer_layers_per_block[-1],
395
+ num_attention_heads=num_attention_heads[-1],
396
+ cross_attention_dim=cross_attention_dim[-1],
397
+ dual_cross_attention=dual_cross_attention,
398
+ use_linear_projection=use_linear_projection,
399
+ mid_block_only_cross_attention=mid_block_only_cross_attention,
400
+ upcast_attention=upcast_attention,
401
+ resnet_time_scale_shift=resnet_time_scale_shift,
402
+ attention_type=attention_type,
403
+ resnet_skip_time_act=resnet_skip_time_act,
404
+ cross_attention_norm=cross_attention_norm,
405
+ attention_head_dim=attention_head_dim[-1],
406
+ dropout=dropout,
407
+ )
408
+
409
+ # count how many layers upsample the images
410
+ self.num_upsamplers = 0
411
+
412
+ # up
413
+ reversed_block_out_channels = list(reversed(block_out_channels))
414
+ reversed_num_attention_heads = list(reversed(num_attention_heads))
415
+ reversed_layers_per_block = list(reversed(layers_per_block))
416
+ reversed_cross_attention_dim = list(reversed(cross_attention_dim))
417
+ reversed_transformer_layers_per_block = (
418
+ list(reversed(transformer_layers_per_block))
419
+ if reverse_transformer_layers_per_block is None
420
+ else reverse_transformer_layers_per_block
421
+ )
422
+ only_cross_attention = list(reversed(only_cross_attention))
423
+
424
+ output_channel = reversed_block_out_channels[0]
425
+ for i, up_block_type in enumerate(up_block_types):
426
+ is_final_block = i == len(block_out_channels) - 1
427
+
428
+ prev_output_channel = output_channel
429
+ output_channel = reversed_block_out_channels[i]
430
+ input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
431
+
432
+ # add upsample block for all BUT final layer
433
+ if not is_final_block:
434
+ add_upsample = True
435
+ self.num_upsamplers += 1
436
+ else:
437
+ add_upsample = False
438
+
439
+ up_block = get_up_block(
440
+ up_block_type,
441
+ num_layers=reversed_layers_per_block[i] + 1,
442
+ transformer_layers_per_block=reversed_transformer_layers_per_block[i],
443
+ in_channels=input_channel,
444
+ out_channels=output_channel,
445
+ prev_output_channel=prev_output_channel,
446
+ temb_channels=blocks_time_embed_dim,
447
+ add_upsample=add_upsample,
448
+ resnet_eps=norm_eps,
449
+ resnet_act_fn=act_fn,
450
+ resolution_idx=i,
451
+ resnet_groups=norm_num_groups,
452
+ cross_attention_dim=reversed_cross_attention_dim[i],
453
+ num_attention_heads=reversed_num_attention_heads[i],
454
+ dual_cross_attention=dual_cross_attention,
455
+ use_linear_projection=use_linear_projection,
456
+ only_cross_attention=only_cross_attention[i],
457
+ upcast_attention=upcast_attention,
458
+ resnet_time_scale_shift=resnet_time_scale_shift,
459
+ attention_type=attention_type,
460
+ resnet_skip_time_act=resnet_skip_time_act,
461
+ resnet_out_scale_factor=resnet_out_scale_factor,
462
+ cross_attention_norm=cross_attention_norm,
463
+ attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
464
+ dropout=dropout,
465
+ )
466
+ self.up_blocks.append(up_block)
467
+ prev_output_channel = output_channel
468
+
469
+ # out
470
+ if norm_num_groups is not None:
471
+ self.conv_norm_out = nn.GroupNorm(
472
+ num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
473
+ )
474
+
475
+ self.conv_act = get_activation(act_fn)
476
+
477
+ else:
478
+ self.conv_norm_out = None
479
+ self.conv_act = None
480
+
481
+ conv_out_padding = (conv_out_kernel - 1) // 2
482
+ self.conv_out = nn.Conv2d(
483
+ block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding
484
+ )
485
+
486
+ self._set_pos_net_if_use_gligen(attention_type=attention_type, cross_attention_dim=cross_attention_dim)
487
+
488
+ def _check_config(
489
+ self,
490
+ down_block_types: Tuple[str],
491
+ up_block_types: Tuple[str],
492
+ only_cross_attention: Union[bool, Tuple[bool]],
493
+ block_out_channels: Tuple[int],
494
+ layers_per_block: Union[int, Tuple[int]],
495
+ cross_attention_dim: Union[int, Tuple[int]],
496
+ transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple[int]]],
497
+ reverse_transformer_layers_per_block: bool,
498
+ attention_head_dim: int,
499
+ num_attention_heads: Optional[Union[int, Tuple[int]]],
500
+ ):
501
+ if len(down_block_types) != len(up_block_types):
502
+ raise ValueError(
503
+ f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
504
+ )
505
+
506
+ if len(block_out_channels) != len(down_block_types):
507
+ raise ValueError(
508
+ f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
509
+ )
510
+
511
+ if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
512
+ raise ValueError(
513
+ f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
514
+ )
515
+
516
+ if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
517
+ raise ValueError(
518
+ f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
519
+ )
520
+
521
+ if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
522
+ raise ValueError(
523
+ f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
524
+ )
525
+
526
+ if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):
527
+ raise ValueError(
528
+ f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}."
529
+ )
530
+
531
+ if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types):
532
+ raise ValueError(
533
+ f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}."
534
+ )
535
+ if isinstance(transformer_layers_per_block, list) and reverse_transformer_layers_per_block is None:
536
+ for layer_number_per_block in transformer_layers_per_block:
537
+ if isinstance(layer_number_per_block, list):
538
+ raise ValueError("Must provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet.")
539
+
540
+ def _set_time_proj(
541
+ self,
542
+ time_embedding_type: str,
543
+ block_out_channels: int,
544
+ flip_sin_to_cos: bool,
545
+ freq_shift: float,
546
+ time_embedding_dim: int,
547
+ ) -> Tuple[int, int]:
548
+ if time_embedding_type == "fourier":
549
+ time_embed_dim = time_embedding_dim or block_out_channels[0] * 2
550
+ if time_embed_dim % 2 != 0:
551
+ raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.")
552
+ self.time_proj = GaussianFourierProjection(
553
+ time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos
554
+ )
555
+ timestep_input_dim = time_embed_dim
556
+ elif time_embedding_type == "positional":
557
+ time_embed_dim = time_embedding_dim or block_out_channels[0] * 4
558
+
559
+ self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
560
+ timestep_input_dim = block_out_channels[0]
561
+ else:
562
+ raise ValueError(
563
+ f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`."
564
+ )
565
+
566
+ return time_embed_dim, timestep_input_dim
567
+
568
+ def _set_encoder_hid_proj(
569
+ self,
570
+ encoder_hid_dim_type: Optional[str],
571
+ cross_attention_dim: Union[int, Tuple[int]],
572
+ encoder_hid_dim: Optional[int],
573
+ ):
574
+ if encoder_hid_dim_type is None and encoder_hid_dim is not None:
575
+ encoder_hid_dim_type = "text_proj"
576
+ self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
577
+ logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")
578
+
579
+ if encoder_hid_dim is None and encoder_hid_dim_type is not None:
580
+ raise ValueError(
581
+ f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
582
+ )
583
+
584
+ if encoder_hid_dim_type == "text_proj":
585
+ self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
586
+ elif encoder_hid_dim_type == "text_image_proj":
587
+ # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
588
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
589
+ # case when `addition_embed_type == "text_image_proj"` (Kandinsky 2.1)`
590
+ self.encoder_hid_proj = TextImageProjection(
591
+ text_embed_dim=encoder_hid_dim,
592
+ image_embed_dim=cross_attention_dim,
593
+ cross_attention_dim=cross_attention_dim,
594
+ )
595
+ elif encoder_hid_dim_type == "image_proj":
596
+ # Kandinsky 2.2
597
+ self.encoder_hid_proj = ImageProjection(
598
+ image_embed_dim=encoder_hid_dim,
599
+ cross_attention_dim=cross_attention_dim,
600
+ )
601
+ elif encoder_hid_dim_type is not None:
602
+ raise ValueError(
603
+ f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'."
604
+ )
605
+ else:
606
+ self.encoder_hid_proj = None
607
+
608
+ def _set_class_embedding(
609
+ self,
610
+ class_embed_type: Optional[str],
611
+ act_fn: str,
612
+ num_class_embeds: Optional[int],
613
+ projection_class_embeddings_input_dim: Optional[int],
614
+ time_embed_dim: int,
615
+ timestep_input_dim: int,
616
+ ):
617
+ if class_embed_type is None and num_class_embeds is not None:
618
+ self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
619
+ elif class_embed_type == "timestep":
620
+ self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn)
621
+ elif class_embed_type == "identity":
622
+ self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
623
+ elif class_embed_type == "projection":
624
+ if projection_class_embeddings_input_dim is None:
625
+ raise ValueError(
626
+ "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
627
+ )
628
+ # The projection `class_embed_type` is the same as the timestep `class_embed_type` except
629
+ # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
630
+ # 2. it projects from an arbitrary input dimension.
631
+ #
632
+ # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
633
+ # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
634
+ # As a result, `TimestepEmbedding` can be passed arbitrary vectors.
635
+ self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
636
+ elif class_embed_type == "simple_projection":
637
+ if projection_class_embeddings_input_dim is None:
638
+ raise ValueError(
639
+ "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set"
640
+ )
641
+ self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim)
642
+ else:
643
+ self.class_embedding = None
644
+
645
+ def _set_add_embedding(
646
+ self,
647
+ addition_embed_type: str,
648
+ addition_embed_type_num_heads: int,
649
+ addition_time_embed_dim: Optional[int],
650
+ flip_sin_to_cos: bool,
651
+ freq_shift: float,
652
+ cross_attention_dim: Optional[int],
653
+ encoder_hid_dim: Optional[int],
654
+ projection_class_embeddings_input_dim: Optional[int],
655
+ time_embed_dim: int,
656
+ ):
657
+ if addition_embed_type == "text":
658
+ if encoder_hid_dim is not None:
659
+ text_time_embedding_from_dim = encoder_hid_dim
660
+ else:
661
+ text_time_embedding_from_dim = cross_attention_dim
662
+
663
+ self.add_embedding = TextTimeEmbedding(
664
+ text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
665
+ )
666
+ elif addition_embed_type == "text_image":
667
+ # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
668
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
669
+ # case when `addition_embed_type == "text_image"` (Kandinsky 2.1)`
670
+ self.add_embedding = TextImageTimeEmbedding(
671
+ text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
672
+ )
673
+ elif addition_embed_type == "text_time":
674
+ self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
675
+ self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
676
+ elif addition_embed_type == "image":
677
+ # Kandinsky 2.2
678
+ self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
679
+ elif addition_embed_type == "image_hint":
680
+ # Kandinsky 2.2 ControlNet
681
+ self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
682
+ elif addition_embed_type is not None:
683
+ raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
684
+
685
+ def _set_pos_net_if_use_gligen(self, attention_type: str, cross_attention_dim: int):
686
+ if attention_type in ["gated", "gated-text-image"]:
687
+ positive_len = 768
688
+ if isinstance(cross_attention_dim, int):
689
+ positive_len = cross_attention_dim
690
+ elif isinstance(cross_attention_dim, (list, tuple)):
691
+ positive_len = cross_attention_dim[0]
692
+
693
+ feature_type = "text-only" if attention_type == "gated" else "text-image"
694
+ self.position_net = GLIGENTextBoundingboxProjection(
695
+ positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type
696
+ )
697
+
698
+ @property
699
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
700
+ r"""
701
+ Returns:
702
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
703
+ indexed by its weight name.
704
+ """
705
+ # set recursively
706
+ processors = {}
707
+
708
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
709
+ if hasattr(module, "get_processor"):
710
+ processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
711
+
712
+ for sub_name, child in module.named_children():
713
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
714
+
715
+ return processors
716
+
717
+ for name, module in self.named_children():
718
+ fn_recursive_add_processors(name, module, processors)
719
+
720
+ return processors
721
+
722
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
723
+ r"""
724
+ Sets the attention processor to use to compute attention.
725
+
726
+ Parameters:
727
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
728
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
729
+ for **all** `Attention` layers.
730
+
731
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
732
+ processor. This is strongly recommended when setting trainable attention processors.
733
+
734
+ """
735
+ count = len(self.attn_processors.keys())
736
+
737
+ if isinstance(processor, dict) and len(processor) != count:
738
+ raise ValueError(
739
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
740
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
741
+ )
742
+
743
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
744
+ if hasattr(module, "set_processor"):
745
+ if not isinstance(processor, dict):
746
+ module.set_processor(processor)
747
+ else:
748
+ module.set_processor(processor.pop(f"{name}.processor"))
749
+
750
+ for sub_name, child in module.named_children():
751
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
752
+
753
+ for name, module in self.named_children():
754
+ fn_recursive_attn_processor(name, module, processor)
755
+
756
+ def set_default_attn_processor(self):
757
+ """
758
+ Disables custom attention processors and sets the default attention implementation.
759
+ """
760
+ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
761
+ processor = AttnAddedKVProcessor()
762
+ elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
763
+ processor = AttnProcessor()
764
+ else:
765
+ raise ValueError(
766
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
767
+ )
768
+
769
+ self.set_attn_processor(processor)
770
+
771
+ def set_attention_slice(self, slice_size: Union[str, int, List[int]] = "auto"):
772
+ r"""
773
+ Enable sliced attention computation.
774
+
775
+ When this option is enabled, the attention module splits the input tensor in slices to compute attention in
776
+ several steps. This is useful for saving some memory in exchange for a small decrease in speed.
777
+
778
+ Args:
779
+ slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
780
+ When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
781
+ `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
782
+ provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
783
+ must be a multiple of `slice_size`.
784
+ """
785
+ sliceable_head_dims = []
786
+
787
+ def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
788
+ if hasattr(module, "set_attention_slice"):
789
+ sliceable_head_dims.append(module.sliceable_head_dim)
790
+
791
+ for child in module.children():
792
+ fn_recursive_retrieve_sliceable_dims(child)
793
+
794
+ # retrieve number of attention layers
795
+ for module in self.children():
796
+ fn_recursive_retrieve_sliceable_dims(module)
797
+
798
+ num_sliceable_layers = len(sliceable_head_dims)
799
+
800
+ if slice_size == "auto":
801
+ # half the attention head size is usually a good trade-off between
802
+ # speed and memory
803
+ slice_size = [dim // 2 for dim in sliceable_head_dims]
804
+ elif slice_size == "max":
805
+ # make smallest slice possible
806
+ slice_size = num_sliceable_layers * [1]
807
+
808
+ slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
809
+
810
+ if len(slice_size) != len(sliceable_head_dims):
811
+ raise ValueError(
812
+ f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
813
+ f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
814
+ )
815
+
816
+ for i in range(len(slice_size)):
817
+ size = slice_size[i]
818
+ dim = sliceable_head_dims[i]
819
+ if size is not None and size > dim:
820
+ raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
821
+
822
+ # Recursively walk through all the children.
823
+ # Any children which exposes the set_attention_slice method
824
+ # gets the message
825
+ def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
826
+ if hasattr(module, "set_attention_slice"):
827
+ module.set_attention_slice(slice_size.pop())
828
+
829
+ for child in module.children():
830
+ fn_recursive_set_attention_slice(child, slice_size)
831
+
832
+ reversed_slice_size = list(reversed(slice_size))
833
+ for module in self.children():
834
+ fn_recursive_set_attention_slice(module, reversed_slice_size)
835
+
836
+ def _set_gradient_checkpointing(self, module, value=False):
837
+ if hasattr(module, "gradient_checkpointing"):
838
+ module.gradient_checkpointing = value
839
+
840
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
841
+ r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497.
842
+
843
+ The suffixes after the scaling factors represent the stage blocks where they are being applied.
844
+
845
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that
846
+ are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
847
+
848
+ Args:
849
+ s1 (`float`):
850
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
851
+ mitigate the "oversmoothing effect" in the enhanced denoising process.
852
+ s2 (`float`):
853
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
854
+ mitigate the "oversmoothing effect" in the enhanced denoising process.
855
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
856
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
857
+ """
858
+ for i, upsample_block in enumerate(self.up_blocks):
859
+ setattr(upsample_block, "s1", s1)
860
+ setattr(upsample_block, "s2", s2)
861
+ setattr(upsample_block, "b1", b1)
862
+ setattr(upsample_block, "b2", b2)
863
+
864
+ def disable_freeu(self):
865
+ """Disables the FreeU mechanism."""
866
+ freeu_keys = {"s1", "s2", "b1", "b2"}
867
+ for i, upsample_block in enumerate(self.up_blocks):
868
+ for k in freeu_keys:
869
+ if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None:
870
+ setattr(upsample_block, k, None)
871
+
872
+ def fuse_qkv_projections(self):
873
+ """
874
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
875
+ are fused. For cross-attention modules, key and value projection matrices are fused.
876
+
877
+ <Tip warning={true}>
878
+
879
+ This API is 🧪 experimental.
880
+
881
+ </Tip>
882
+ """
883
+ self.original_attn_processors = None
884
+
885
+ for _, attn_processor in self.attn_processors.items():
886
+ if "Added" in str(attn_processor.__class__.__name__):
887
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
888
+
889
+ self.original_attn_processors = self.attn_processors
890
+
891
+ for module in self.modules():
892
+ if isinstance(module, Attention):
893
+ module.fuse_projections(fuse=True)
894
+
895
+ def unfuse_qkv_projections(self):
896
+ """Disables the fused QKV projection if enabled.
897
+
898
+ <Tip warning={true}>
899
+
900
+ This API is 🧪 experimental.
901
+
902
+ </Tip>
903
+
904
+ """
905
+ if self.original_attn_processors is not None:
906
+ self.set_attn_processor(self.original_attn_processors)
907
+
908
+ def unload_lora(self):
909
+ """Unloads LoRA weights."""
910
+ deprecate(
911
+ "unload_lora",
912
+ "0.28.0",
913
+ "Calling `unload_lora()` is deprecated and will be removed in a future version. Please install `peft` and then call `disable_adapters().",
914
+ )
915
+ for module in self.modules():
916
+ if hasattr(module, "set_lora_layer"):
917
+ module.set_lora_layer(None)
918
+
919
+ def get_time_embed(
920
+ self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int]
921
+ ) -> Optional[torch.Tensor]:
922
+ timesteps = timestep
923
+ if not torch.is_tensor(timesteps):
924
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
925
+ # This would be a good case for the `match` statement (Python 3.10+)
926
+ is_mps = sample.device.type == "mps"
927
+ if isinstance(timestep, float):
928
+ dtype = torch.float32 if is_mps else torch.float64
929
+ else:
930
+ dtype = torch.int32 if is_mps else torch.int64
931
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
932
+ elif len(timesteps.shape) == 0:
933
+ timesteps = timesteps[None].to(sample.device)
934
+
935
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
936
+ timesteps = timesteps.expand(sample.shape[0])
937
+
938
+ t_emb = self.time_proj(timesteps)
939
+ # `Timesteps` does not contain any weights and will always return f32 tensors
940
+ # but time_embedding might actually be running in fp16. so we need to cast here.
941
+ # there might be better ways to encapsulate this.
942
+ t_emb = t_emb.to(dtype=sample.dtype)
943
+ return t_emb
944
+
945
+ def get_class_embed(self, sample: torch.Tensor, class_labels: Optional[torch.Tensor]) -> Optional[torch.Tensor]:
946
+ class_emb = None
947
+ if self.class_embedding is not None:
948
+ if class_labels is None:
949
+ raise ValueError("class_labels should be provided when num_class_embeds > 0")
950
+
951
+ if self.config.class_embed_type == "timestep":
952
+ class_labels = self.time_proj(class_labels)
953
+
954
+ # `Timesteps` does not contain any weights and will always return f32 tensors
955
+ # there might be better ways to encapsulate this.
956
+ class_labels = class_labels.to(dtype=sample.dtype)
957
+
958
+ class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)
959
+ return class_emb
960
+
961
+ def get_aug_embed(
962
+ self, emb: torch.Tensor, encoder_hidden_states: torch.Tensor, added_cond_kwargs: Dict[str, Any]
963
+ ) -> Optional[torch.Tensor]:
964
+ aug_emb = None
965
+ if self.config.addition_embed_type == "text":
966
+ aug_emb = self.add_embedding(encoder_hidden_states)
967
+ elif self.config.addition_embed_type == "text_image":
968
+ # Kandinsky 2.1 - style
969
+ if "image_embeds" not in added_cond_kwargs:
970
+ raise ValueError(
971
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
972
+ )
973
+
974
+ image_embs = added_cond_kwargs.get("image_embeds")
975
+ text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states)
976
+ aug_emb = self.add_embedding(text_embs, image_embs)
977
+ elif self.config.addition_embed_type == "text_time":
978
+ # SDXL - style
979
+ if "text_embeds" not in added_cond_kwargs:
980
+ raise ValueError(
981
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
982
+ )
983
+ text_embeds = added_cond_kwargs.get("text_embeds")
984
+ if "time_ids" not in added_cond_kwargs:
985
+ raise ValueError(
986
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
987
+ )
988
+ time_ids = added_cond_kwargs.get("time_ids")
989
+ time_embeds = self.add_time_proj(time_ids.flatten())
990
+ time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
991
+ add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
992
+ add_embeds = add_embeds.to(emb.dtype)
993
+ aug_emb = self.add_embedding(add_embeds)
994
+ elif self.config.addition_embed_type == "image":
995
+ # Kandinsky 2.2 - style
996
+ if "image_embeds" not in added_cond_kwargs:
997
+ raise ValueError(
998
+ f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
999
+ )
1000
+ image_embs = added_cond_kwargs.get("image_embeds")
1001
+ aug_emb = self.add_embedding(image_embs)
1002
+ elif self.config.addition_embed_type == "image_hint":
1003
+ # Kandinsky 2.2 - style
1004
+ if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs:
1005
+ raise ValueError(
1006
+ f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`"
1007
+ )
1008
+ image_embs = added_cond_kwargs.get("image_embeds")
1009
+ hint = added_cond_kwargs.get("hint")
1010
+ aug_emb = self.add_embedding(image_embs, hint)
1011
+ return aug_emb
1012
+
1013
+ def process_encoder_hidden_states(
1014
+ self, encoder_hidden_states: torch.Tensor, added_cond_kwargs: Dict[str, Any]
1015
+ ) -> torch.Tensor:
1016
+ if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj":
1017
+ encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)
1018
+ elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj":
1019
+ # Kandinsky 2.1 - style
1020
+ if "image_embeds" not in added_cond_kwargs:
1021
+ raise ValueError(
1022
+ f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
1023
+ )
1024
+
1025
+ image_embeds = added_cond_kwargs.get("image_embeds")
1026
+ encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds)
1027
+ elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj":
1028
+ # Kandinsky 2.2 - style
1029
+ if "image_embeds" not in added_cond_kwargs:
1030
+ raise ValueError(
1031
+ f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
1032
+ )
1033
+ image_embeds = added_cond_kwargs.get("image_embeds")
1034
+ encoder_hidden_states = self.encoder_hid_proj(image_embeds)
1035
+ elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "ip_image_proj":
1036
+ if "image_embeds" not in added_cond_kwargs:
1037
+ raise ValueError(
1038
+ f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
1039
+ )
1040
+ image_embeds = added_cond_kwargs.get("image_embeds")
1041
+ image_embeds = self.encoder_hid_proj(image_embeds)
1042
+ encoder_hidden_states = (encoder_hidden_states, image_embeds)
1043
+ return encoder_hidden_states
1044
+
1045
+ def forward(
1046
+ self,
1047
+ sample: torch.Tensor,
1048
+ timestep: Union[torch.Tensor, float, int],
1049
+ encoder_hidden_states: torch.Tensor,
1050
+ class_labels: Optional[torch.Tensor] = None,
1051
+ timestep_cond: Optional[torch.Tensor] = None,
1052
+ attention_mask: Optional[torch.Tensor] = None,
1053
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1054
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
1055
+ down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
1056
+ mid_block_additional_residual: Optional[torch.Tensor] = None,
1057
+ down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
1058
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1059
+ return_dict: bool = True,
1060
+ ) -> Union[UNet2DConditionOutput, Tuple]:
1061
+ r"""
1062
+ The [`UNet2DConditionModel`] forward method.
1063
+
1064
+ Args:
1065
+ sample (`torch.Tensor`):
1066
+ The noisy input tensor with the following shape `(batch, channel, height, width)`.
1067
+ timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input.
1068
+ encoder_hidden_states (`torch.Tensor`):
1069
+ The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
1070
+ class_labels (`torch.Tensor`, *optional*, defaults to `None`):
1071
+ Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
1072
+ timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
1073
+ Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
1074
+ through the `self.time_embedding` layer to obtain the timestep embeddings.
1075
+ attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
1076
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
1077
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
1078
+ negative values to the attention scores corresponding to "discard" tokens.
1079
+ cross_attention_kwargs (`dict`, *optional*):
1080
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1081
+ `self.processor` in
1082
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1083
+ added_cond_kwargs: (`dict`, *optional*):
1084
+ A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
1085
+ are passed along to the UNet blocks.
1086
+ down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
1087
+ A tuple of tensors that if specified are added to the residuals of down unet blocks.
1088
+ mid_block_additional_residual: (`torch.Tensor`, *optional*):
1089
+ A tensor that if specified is added to the residual of the middle unet block.
1090
+ down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
1091
+ additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
1092
+ encoder_attention_mask (`torch.Tensor`):
1093
+ A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
1094
+ `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
1095
+ which adds large negative values to the attention scores corresponding to "discard" tokens.
1096
+ return_dict (`bool`, *optional*, defaults to `True`):
1097
+ Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
1098
+ tuple.
1099
+
1100
+ Returns:
1101
+ [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
1102
+ If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned,
1103
+ otherwise a `tuple` is returned where the first element is the sample tensor.
1104
+ """
1105
+ # By default samples have to be AT least a multiple of the overall upsampling factor.
1106
+ # The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
1107
+ # However, the upsampling interpolation output size can be forced to fit any upsampling size
1108
+ # on the fly if necessary.
1109
+ default_overall_up_factor = 2**self.num_upsamplers
1110
+
1111
+ # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
1112
+ forward_upsample_size = False
1113
+ upsample_size = None
1114
+
1115
+ for dim in sample.shape[-2:]:
1116
+ if dim % default_overall_up_factor != 0:
1117
+ # Forward upsample size to force interpolation output size.
1118
+ forward_upsample_size = True
1119
+ break
1120
+
1121
+ # ensure attention_mask is a bias, and give it a singleton query_tokens dimension
1122
+ # expects mask of shape:
1123
+ # [batch, key_tokens]
1124
+ # adds singleton query_tokens dimension:
1125
+ # [batch, 1, key_tokens]
1126
+ # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
1127
+ # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
1128
+ # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
1129
+ if attention_mask is not None:
1130
+ # assume that mask is expressed as:
1131
+ # (1 = keep, 0 = discard)
1132
+ # convert mask into a bias that can be added to attention scores:
1133
+ # (keep = +0, discard = -10000.0)
1134
+ attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
1135
+ attention_mask = attention_mask.unsqueeze(1)
1136
+
1137
+ # convert encoder_attention_mask to a bias the same way we do for attention_mask
1138
+ if encoder_attention_mask is not None:
1139
+ encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
1140
+ encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
1141
+
1142
+ # 0. center input if necessary
1143
+ if self.config.center_input_sample:
1144
+ sample = 2 * sample - 1.0
1145
+
1146
+ # 1. time
1147
+ t_emb = self.get_time_embed(sample=sample, timestep=timestep)
1148
+ emb = self.time_embedding(t_emb, timestep_cond)
1149
+ aug_emb = None
1150
+
1151
+ class_emb = self.get_class_embed(sample=sample, class_labels=class_labels)
1152
+ if class_emb is not None:
1153
+ if self.config.class_embeddings_concat:
1154
+ emb = torch.cat([emb, class_emb], dim=-1)
1155
+ else:
1156
+ emb = emb + class_emb
1157
+
1158
+ aug_emb = self.get_aug_embed(
1159
+ emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
1160
+ )
1161
+ if self.config.addition_embed_type == "image_hint":
1162
+ aug_emb, hint = aug_emb
1163
+ sample = torch.cat([sample, hint], dim=1)
1164
+
1165
+ emb = emb + aug_emb if aug_emb is not None else emb
1166
+
1167
+ if self.time_embed_act is not None:
1168
+ emb = self.time_embed_act(emb)
1169
+
1170
+ encoder_hidden_states = self.process_encoder_hidden_states(
1171
+ encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
1172
+ )
1173
+
1174
+ # 2. pre-process
1175
+ sample = self.conv_in(sample)
1176
+
1177
+ # 2.5 GLIGEN position net
1178
+ if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None:
1179
+ cross_attention_kwargs = cross_attention_kwargs.copy()
1180
+ gligen_args = cross_attention_kwargs.pop("gligen")
1181
+ cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)}
1182
+
1183
+ # 3. down
1184
+ # we're popping the `scale` instead of getting it because otherwise `scale` will be propagated
1185
+ # to the internal blocks and will raise deprecation warnings. this will be confusing for our users.
1186
+ if cross_attention_kwargs is not None:
1187
+ cross_attention_kwargs = cross_attention_kwargs.copy()
1188
+ lora_scale = cross_attention_kwargs.pop("scale", 1.0)
1189
+ else:
1190
+ lora_scale = 1.0
1191
+
1192
+ if USE_PEFT_BACKEND:
1193
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
1194
+ scale_lora_layers(self, lora_scale)
1195
+
1196
+ is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None
1197
+ # using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets
1198
+ is_adapter = down_intrablock_additional_residuals is not None
1199
+ # maintain backward compatibility for legacy usage, where
1200
+ # T2I-Adapter and ControlNet both use down_block_additional_residuals arg
1201
+ # but can only use one or the other
1202
+ if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None:
1203
+ deprecate(
1204
+ "T2I should not use down_block_additional_residuals",
1205
+ "1.3.0",
1206
+ "Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \
1207
+ and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \
1208
+ for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ",
1209
+ standard_warn=False,
1210
+ )
1211
+ down_intrablock_additional_residuals = down_block_additional_residuals
1212
+ is_adapter = True
1213
+
1214
+ down_block_res_samples = (sample,)
1215
+ for downsample_block in self.down_blocks:
1216
+ if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
1217
+ # For t2i-adapter CrossAttnDownBlock2D
1218
+ additional_residuals = {}
1219
+ if is_adapter and len(down_intrablock_additional_residuals) > 0:
1220
+ additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0)
1221
+
1222
+ sample, res_samples = downsample_block(
1223
+ hidden_states=sample,
1224
+ temb=emb,
1225
+ encoder_hidden_states=encoder_hidden_states,
1226
+ attention_mask=attention_mask,
1227
+ cross_attention_kwargs=cross_attention_kwargs,
1228
+ encoder_attention_mask=encoder_attention_mask,
1229
+ **additional_residuals,
1230
+ )
1231
+ else:
1232
+ sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
1233
+ if is_adapter and len(down_intrablock_additional_residuals) > 0:
1234
+ sample += down_intrablock_additional_residuals.pop(0)
1235
+
1236
+ down_block_res_samples += res_samples
1237
+
1238
+ if is_controlnet:
1239
+ new_down_block_res_samples = ()
1240
+
1241
+ for down_block_res_sample, down_block_additional_residual in zip(
1242
+ down_block_res_samples, down_block_additional_residuals
1243
+ ):
1244
+ down_block_res_sample = down_block_res_sample + down_block_additional_residual
1245
+ new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)
1246
+
1247
+ down_block_res_samples = new_down_block_res_samples
1248
+
1249
+ # 4. mid
1250
+ if self.mid_block is not None:
1251
+ if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
1252
+ sample = self.mid_block(
1253
+ sample,
1254
+ emb,
1255
+ encoder_hidden_states=encoder_hidden_states,
1256
+ attention_mask=attention_mask,
1257
+ cross_attention_kwargs=cross_attention_kwargs,
1258
+ encoder_attention_mask=encoder_attention_mask,
1259
+ )
1260
+ else:
1261
+ sample = self.mid_block(sample, emb)
1262
+
1263
+ # To support T2I-Adapter-XL
1264
+ if (
1265
+ is_adapter
1266
+ and len(down_intrablock_additional_residuals) > 0
1267
+ and sample.shape == down_intrablock_additional_residuals[0].shape
1268
+ ):
1269
+ sample += down_intrablock_additional_residuals.pop(0)
1270
+
1271
+ if is_controlnet:
1272
+ sample = sample + mid_block_additional_residual
1273
+
1274
+ # 5. up
1275
+ for i, upsample_block in enumerate(self.up_blocks):
1276
+ is_final_block = i == len(self.up_blocks) - 1
1277
+
1278
+ res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
1279
+ down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
1280
+
1281
+ # if we have not reached the final block and need to forward the
1282
+ # upsample size, we do it here
1283
+ if not is_final_block and forward_upsample_size:
1284
+ upsample_size = down_block_res_samples[-1].shape[2:]
1285
+
1286
+ if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
1287
+ sample = upsample_block(
1288
+ hidden_states=sample,
1289
+ temb=emb,
1290
+ res_hidden_states_tuple=res_samples,
1291
+ encoder_hidden_states=encoder_hidden_states,
1292
+ cross_attention_kwargs=cross_attention_kwargs,
1293
+ upsample_size=upsample_size,
1294
+ attention_mask=attention_mask,
1295
+ encoder_attention_mask=encoder_attention_mask,
1296
+ )
1297
+ else:
1298
+ sample = upsample_block(
1299
+ hidden_states=sample,
1300
+ temb=emb,
1301
+ res_hidden_states_tuple=res_samples,
1302
+ upsample_size=upsample_size,
1303
+ )
1304
+
1305
+ # 6. post-process
1306
+ if self.conv_norm_out:
1307
+ sample = self.conv_norm_out(sample)
1308
+ sample = self.conv_act(sample)
1309
+ sample = self.conv_out(sample)
1310
+
1311
+ if USE_PEFT_BACKEND:
1312
+ # remove `lora_scale` from each PEFT layer
1313
+ unscale_lora_layers(self, lora_scale)
1314
+
1315
+ if not return_dict:
1316
+ return (sample,)
1317
+
1318
+ return UNet2DConditionOutput(sample=sample)
modules/u_net_modify.py ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import inspect
3
+ import os
4
+ from collections import defaultdict
5
+ from contextlib import nullcontext
6
+ from functools import partial
7
+ from pathlib import Path
8
+ from typing import Callable, Dict, List, Optional, Union
9
+
10
+
11
+ import safetensors
12
+ import torch
13
+ import torch.nn.functional as F
14
+ from huggingface_hub.utils import validate_hf_hub_args
15
+ from torch import nn
16
+
17
+ from diffusers.models.embeddings import (
18
+ ImageProjection,
19
+ IPAdapterFaceIDImageProjection,
20
+ IPAdapterFaceIDPlusImageProjection,
21
+ IPAdapterFullImageProjection,
22
+ IPAdapterPlusImageProjection,
23
+ MultiIPAdapterImageProjection,
24
+ )
25
+
26
+ from diffusers.models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_model_dict_into_meta, load_state_dict
27
+
28
+ from diffusers.loaders.unet import UNet2DConditionLoadersMixin
29
+ from diffusers.utils import (
30
+ USE_PEFT_BACKEND,
31
+ _get_model_file,
32
+ delete_adapter_layers,
33
+ is_accelerate_available,
34
+ is_torch_version,
35
+ logging,
36
+ set_adapter_layers,
37
+ set_weights_and_activate_adapters,
38
+ )
39
+
40
+ from diffusers.loaders.utils import AttnProcsLayers
41
+
42
+ from .attention_modify import AttnProcessor,IPAdapterAttnProcessor,AttnProcessor2_0,IPAdapterAttnProcessor2_0
43
+
44
+ if is_accelerate_available():
45
+ from accelerate import init_empty_weights
46
+ from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+
51
+
52
+ class UNet2DConditionLoadersMixin_modify(UNet2DConditionLoadersMixin):
53
+ def _convert_ip_adapter_attn_to_diffusers(self, state_dicts, low_cpu_mem_usage=False):
54
+
55
+ if low_cpu_mem_usage:
56
+ if is_accelerate_available():
57
+ from accelerate import init_empty_weights
58
+
59
+ else:
60
+ low_cpu_mem_usage = False
61
+ logger.warning(
62
+ "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the"
63
+ " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install"
64
+ " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip"
65
+ " install accelerate\n```\n."
66
+ )
67
+
68
+ if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"):
69
+ raise NotImplementedError(
70
+ "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set"
71
+ " `low_cpu_mem_usage=False`."
72
+ )
73
+
74
+ # set ip-adapter cross-attention processors & load state_dict
75
+ attn_procs = {}
76
+ key_id = 1
77
+ init_context = init_empty_weights if low_cpu_mem_usage else nullcontext
78
+ for name in self.attn_processors.keys():
79
+ cross_attention_dim = None if name.endswith("attn1.processor") else self.config.cross_attention_dim
80
+ if name.startswith("mid_block"):
81
+ hidden_size = self.config.block_out_channels[-1]
82
+ elif name.startswith("up_blocks"):
83
+ block_id = int(name[len("up_blocks.")])
84
+ hidden_size = list(reversed(self.config.block_out_channels))[block_id]
85
+ elif name.startswith("down_blocks"):
86
+ block_id = int(name[len("down_blocks.")])
87
+ hidden_size = self.config.block_out_channels[block_id]
88
+
89
+ if cross_attention_dim is None or "motion_modules" in name:
90
+ attn_processor_class = (
91
+ AttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else AttnProcessor
92
+ )
93
+ attn_procs[name] = attn_processor_class()
94
+
95
+ else:
96
+ attn_processor_class = (
97
+ IPAdapterAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else IPAdapterAttnProcessor
98
+ )
99
+ num_image_text_embeds = []
100
+ for state_dict in state_dicts:
101
+ if "proj.weight" in state_dict["image_proj"]:
102
+ # IP-Adapter
103
+ num_image_text_embeds += [4]
104
+ elif "proj.3.weight" in state_dict["image_proj"]:
105
+ # IP-Adapter Full Face
106
+ num_image_text_embeds += [257] # 256 CLIP tokens + 1 CLS token
107
+ elif "perceiver_resampler.proj_in.weight" in state_dict["image_proj"]:
108
+ # IP-Adapter Face ID Plus
109
+ num_image_text_embeds += [4]
110
+ elif "norm.weight" in state_dict["image_proj"]:
111
+ # IP-Adapter Face ID
112
+ num_image_text_embeds += [4]
113
+ else:
114
+ # IP-Adapter Plus
115
+ num_image_text_embeds += [state_dict["image_proj"]["latents"].shape[1]]
116
+
117
+ with init_context():
118
+ attn_procs[name] = attn_processor_class(
119
+ hidden_size=hidden_size,
120
+ cross_attention_dim=cross_attention_dim,
121
+ scale=1.0,
122
+ num_tokens=num_image_text_embeds,
123
+ )
124
+
125
+ value_dict = {}
126
+ for i, state_dict in enumerate(state_dicts):
127
+ value_dict.update({f"to_k_ip.{i}.weight": state_dict["ip_adapter"][f"{key_id}.to_k_ip.weight"]})
128
+ value_dict.update({f"to_v_ip.{i}.weight": state_dict["ip_adapter"][f"{key_id}.to_v_ip.weight"]})
129
+
130
+ if not low_cpu_mem_usage:
131
+ attn_procs[name].load_state_dict(value_dict)
132
+ else:
133
+ device = next(iter(value_dict.values())).device
134
+ dtype = next(iter(value_dict.values())).dtype
135
+ load_model_dict_into_meta(attn_procs[name], value_dict, device=device, dtype=dtype)
136
+
137
+ key_id += 2
138
+
139
+ return attn_procs
140
+
141
+ def _load_ip_adapter_weights(self, state_dicts, low_cpu_mem_usage=False):
142
+ if not isinstance(state_dicts, list):
143
+ state_dicts = [state_dicts]
144
+ # Set encoder_hid_proj after loading ip_adapter weights,
145
+ # because `IPAdapterPlusImageProjection` also has `attn_processors`.
146
+ self.encoder_hid_proj = None
147
+
148
+ attn_procs = self._convert_ip_adapter_attn_to_diffusers(state_dicts, low_cpu_mem_usage=low_cpu_mem_usage)
149
+ self.set_attn_processor(attn_procs)
150
+
151
+ # convert IP-Adapter Image Projection layers to diffusers
152
+ image_projection_layers = []
153
+ for state_dict in state_dicts:
154
+ image_projection_layer = self._convert_ip_adapter_image_proj_to_diffusers(
155
+ state_dict["image_proj"], low_cpu_mem_usage=low_cpu_mem_usage
156
+ )
157
+ image_projection_layers.append(image_projection_layer)
158
+
159
+ self.encoder_hid_proj = MultiIPAdapterImageProjection(image_projection_layers)
160
+ self.config.encoder_hid_dim_type = "ip_image_proj"
161
+
162
+ self.to(dtype=self.dtype, device=self.device)
163
+
164
+ def _load_ip_adapter_loras(self, state_dicts):
165
+ lora_dicts = {}
166
+ for key_id, name in enumerate(self.attn_processors.keys()):
167
+ for i, state_dict in enumerate(state_dicts):
168
+ if f"{key_id}.to_k_lora.down.weight" in state_dict["ip_adapter"]:
169
+ if i not in lora_dicts:
170
+ lora_dicts[i] = {}
171
+ lora_dicts[i].update(
172
+ {
173
+ f"unet.{name}.to_k_lora.down.weight": state_dict["ip_adapter"][
174
+ f"{key_id}.to_k_lora.down.weight"
175
+ ]
176
+ }
177
+ )
178
+ lora_dicts[i].update(
179
+ {
180
+ f"unet.{name}.to_q_lora.down.weight": state_dict["ip_adapter"][
181
+ f"{key_id}.to_q_lora.down.weight"
182
+ ]
183
+ }
184
+ )
185
+ lora_dicts[i].update(
186
+ {
187
+ f"unet.{name}.to_v_lora.down.weight": state_dict["ip_adapter"][
188
+ f"{key_id}.to_v_lora.down.weight"
189
+ ]
190
+ }
191
+ )
192
+ lora_dicts[i].update(
193
+ {
194
+ f"unet.{name}.to_out_lora.down.weight": state_dict["ip_adapter"][
195
+ f"{key_id}.to_out_lora.down.weight"
196
+ ]
197
+ }
198
+ )
199
+ lora_dicts[i].update(
200
+ {f"unet.{name}.to_k_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_k_lora.up.weight"]}
201
+ )
202
+ lora_dicts[i].update(
203
+ {f"unet.{name}.to_q_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_q_lora.up.weight"]}
204
+ )
205
+ lora_dicts[i].update(
206
+ {f"unet.{name}.to_v_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_v_lora.up.weight"]}
207
+ )
208
+ lora_dicts[i].update(
209
+ {
210
+ f"unet.{name}.to_out_lora.up.weight": state_dict["ip_adapter"][
211
+ f"{key_id}.to_out_lora.up.weight"
212
+ ]
213
+ }
214
+ )
215
+ return lora_dicts
216
+
217
+
218
+ class FromOriginalUNetMixin:
219
+ """
220
+ Load pretrained UNet model weights saved in the `.ckpt` or `.safetensors` format into a [`StableCascadeUNet`].
221
+ """
222
+
223
+ @classmethod
224
+ @validate_hf_hub_args
225
+ def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
226
+ r"""
227
+ Instantiate a [`StableCascadeUNet`] from pretrained StableCascadeUNet weights saved in the original `.ckpt` or
228
+ `.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default.
229
+
230
+ Parameters:
231
+ pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*):
232
+ Can be either:
233
+ - A link to the `.ckpt` file (for example
234
+ `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub.
235
+ - A path to a *file* containing all pipeline weights.
236
+ config: (`dict`, *optional*):
237
+ Dictionary containing the configuration of the model:
238
+ torch_dtype (`str` or `torch.dtype`, *optional*):
239
+ Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the
240
+ dtype is automatically derived from the model's weights.
241
+ force_download (`bool`, *optional*, defaults to `False`):
242
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
243
+ cached versions if they exist.
244
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
245
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
246
+ is not used.
247
+ resume_download (`bool`, *optional*, defaults to `False`):
248
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
249
+ incompletely downloaded files are deleted.
250
+ proxies (`Dict[str, str]`, *optional*):
251
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
252
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
253
+ local_files_only (`bool`, *optional*, defaults to `False`):
254
+ Whether to only load local model weights and configuration files or not. If set to True, the model
255
+ won't be downloaded from the Hub.
256
+ token (`str` or *bool*, *optional*):
257
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
258
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
259
+ revision (`str`, *optional*, defaults to `"main"`):
260
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
261
+ allowed by Git.
262
+ kwargs (remaining dictionary of keyword arguments, *optional*):
263
+ Can be used to overwrite load and saveable variables of the model.
264
+
265
+ """
266
+ class_name = cls.__name__
267
+ if class_name != "StableCascadeUNet":
268
+ raise ValueError("FromOriginalUNetMixin is currently only compatible with StableCascadeUNet")
269
+
270
+ config = kwargs.pop("config", None)
271
+ resume_download = kwargs.pop("resume_download", False)
272
+ force_download = kwargs.pop("force_download", False)
273
+ proxies = kwargs.pop("proxies", None)
274
+ token = kwargs.pop("token", None)
275
+ cache_dir = kwargs.pop("cache_dir", None)
276
+ local_files_only = kwargs.pop("local_files_only", None)
277
+ revision = kwargs.pop("revision", None)
278
+ torch_dtype = kwargs.pop("torch_dtype", None)
279
+
280
+ checkpoint = load_single_file_model_checkpoint(
281
+ pretrained_model_link_or_path,
282
+ resume_download=resume_download,
283
+ force_download=force_download,
284
+ proxies=proxies,
285
+ token=token,
286
+ cache_dir=cache_dir,
287
+ local_files_only=local_files_only,
288
+ revision=revision,
289
+ )
290
+
291
+ if config is None:
292
+ config = infer_stable_cascade_single_file_config(checkpoint)
293
+ model_config = cls.load_config(**config, **kwargs)
294
+ else:
295
+ model_config = config
296
+
297
+ ctx = init_empty_weights if is_accelerate_available() else nullcontext
298
+ with ctx():
299
+ model = cls.from_config(model_config, **kwargs)
300
+
301
+ diffusers_format_checkpoint = convert_stable_cascade_unet_single_file_to_diffusers(checkpoint)
302
+ if is_accelerate_available():
303
+ unexpected_keys = load_model_dict_into_meta(model, diffusers_format_checkpoint, dtype=torch_dtype)
304
+ if len(unexpected_keys) > 0:
305
+ logger.warn(
306
+ f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}"
307
+ )
308
+
309
+ else:
310
+ model.load_state_dict(diffusers_format_checkpoint)
311
+
312
+ if torch_dtype is not None:
313
+ model.to(torch_dtype)
314
+
315
+ return model
requirements.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ einops==0.8.0
3
+ diffusers==0.29.0
4
+ transformers==4.41.2
5
+ k_diffusion==0.1.1.post1
6
+ safetensors==0.4.3
7
+ gradio==3.44.4
8
+ timm==0.6.7
9
+ basicsr==1.4.2
10
+ controlnet-aux==0.0.9
11
+ mediapipe==0.10.14
12
+ kaleido==0.2.1
13
+ insightface==0.7.3
14
+ onnxruntime-gpu
15
+ peft
16
+ pytorch_lightning==2.2.5