MegaTronX commited on
Commit
efe5ed1
·
verified ·
1 Parent(s): 66a696f

Upload train_network.py

Browse files
Files changed (1) hide show
  1. train_network.py +1242 -0
train_network.py ADDED
@@ -0,0 +1,1242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import argparse
3
+ import math
4
+ import os
5
+ import sys
6
+ import random
7
+ import time
8
+ import json
9
+ from multiprocessing import Value
10
+ import toml
11
+
12
+ from tqdm import tqdm
13
+
14
+ import torch
15
+ from library.device_utils import init_ipex, clean_memory_on_device
16
+
17
+ init_ipex()
18
+
19
+ from accelerate.utils import set_seed
20
+ from diffusers import DDPMScheduler
21
+ from library import deepspeed_utils, model_util
22
+
23
+ import library.train_util as train_util
24
+ from library.train_util import DreamBoothDataset
25
+ import library.config_util as config_util
26
+ from library.config_util import (
27
+ ConfigSanitizer,
28
+ BlueprintGenerator,
29
+ )
30
+ import library.huggingface_util as huggingface_util
31
+ import library.custom_train_functions as custom_train_functions
32
+ from library.custom_train_functions import (
33
+ apply_snr_weight,
34
+ get_weighted_text_embeddings,
35
+ prepare_scheduler_for_custom_training,
36
+ scale_v_prediction_loss_like_noise_prediction,
37
+ add_v_prediction_like_loss,
38
+ apply_debiased_estimation,
39
+ apply_masked_loss,
40
+ )
41
+ from library.utils import setup_logging, add_logging_arguments
42
+
43
+ setup_logging()
44
+ import logging
45
+
46
+ logger = logging.getLogger(__name__)
47
+
48
+
49
+ class NetworkTrainer:
50
+ def __init__(self):
51
+ self.vae_scale_factor = 0.18215
52
+ self.is_sdxl = False
53
+
54
+ # TODO 他のスクリプトと共通化する
55
+ def generate_step_logs(
56
+ self,
57
+ args: argparse.Namespace,
58
+ current_loss,
59
+ avr_loss,
60
+ lr_scheduler,
61
+ lr_descriptions,
62
+ keys_scaled=None,
63
+ mean_norm=None,
64
+ maximum_norm=None,
65
+ ):
66
+ logs = {"loss/current": current_loss, "loss/average": avr_loss}
67
+
68
+ if keys_scaled is not None:
69
+ logs["max_norm/keys_scaled"] = keys_scaled
70
+ logs["max_norm/average_key_norm"] = mean_norm
71
+ logs["max_norm/max_key_norm"] = maximum_norm
72
+
73
+ lrs = lr_scheduler.get_last_lr()
74
+ for i, lr in enumerate(lrs):
75
+ if lr_descriptions is not None:
76
+ lr_desc = lr_descriptions[i]
77
+ else:
78
+ idx = i - (0 if args.network_train_unet_only else -1)
79
+ if idx == -1:
80
+ lr_desc = "textencoder"
81
+ else:
82
+ if len(lrs) > 2:
83
+ lr_desc = f"group{idx}"
84
+ else:
85
+ lr_desc = "unet"
86
+
87
+ logs[f"lr/{lr_desc}"] = lr
88
+
89
+ if args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy".lower():
90
+ # tracking d*lr value
91
+ logs[f"lr/d*lr/{lr_desc}"] = (
92
+ lr_scheduler.optimizers[-1].param_groups[i]["d"] * lr_scheduler.optimizers[-1].param_groups[i]["lr"]
93
+ )
94
+
95
+ return logs
96
+
97
+ def assert_extra_args(self, args, train_dataset_group):
98
+ train_dataset_group.verify_bucket_reso_steps(64)
99
+
100
+ def load_target_model(self, args, weight_dtype, accelerator):
101
+ text_encoder, vae, unet, _ = train_util.load_target_model(args, weight_dtype, accelerator)
102
+ return model_util.get_model_version_str_for_sd1_sd2(args.v2, args.v_parameterization), text_encoder, vae, unet
103
+
104
+ def load_tokenizer(self, args):
105
+ tokenizer = train_util.load_tokenizer(args)
106
+ return tokenizer
107
+
108
+ def is_text_encoder_outputs_cached(self, args):
109
+ return False
110
+
111
+ def is_train_text_encoder(self, args):
112
+ return not args.network_train_unet_only and not self.is_text_encoder_outputs_cached(args)
113
+
114
+ def cache_text_encoder_outputs_if_needed(
115
+ self, args, accelerator, unet, vae, tokenizers, text_encoders, data_loader, weight_dtype
116
+ ):
117
+ for t_enc in text_encoders:
118
+ t_enc.to(accelerator.device, dtype=weight_dtype)
119
+
120
+ def get_text_cond(self, args, accelerator, batch, tokenizers, text_encoders, weight_dtype):
121
+ input_ids = batch["input_ids"].to(accelerator.device)
122
+ encoder_hidden_states = train_util.get_hidden_states(args, input_ids, tokenizers[0], text_encoders[0], weight_dtype)
123
+ return encoder_hidden_states
124
+
125
+ def call_unet(self, args, accelerator, unet, noisy_latents, timesteps, text_conds, batch, weight_dtype):
126
+ noise_pred = unet(noisy_latents, timesteps, text_conds).sample
127
+ return noise_pred
128
+
129
+ def all_reduce_network(self, accelerator, network):
130
+ for param in network.parameters():
131
+ if param.grad is not None:
132
+ param.grad = accelerator.reduce(param.grad, reduction="mean")
133
+
134
+ def sample_images(self, accelerator, args, epoch, global_step, device, vae, tokenizer, text_encoder, unet):
135
+ train_util.sample_images(accelerator, args, epoch, global_step, device, vae, tokenizer, text_encoder, unet)
136
+
137
+ def train(self, args):
138
+ session_id = random.randint(0, 2**32)
139
+ training_started_at = time.time()
140
+ train_util.verify_training_args(args)
141
+ train_util.prepare_dataset_args(args, True)
142
+ deepspeed_utils.prepare_deepspeed_args(args)
143
+ setup_logging(args, reset=True)
144
+
145
+ cache_latents = args.cache_latents
146
+ use_dreambooth_method = args.in_json is None
147
+ use_user_config = args.dataset_config is not None
148
+
149
+ if args.seed is None:
150
+ args.seed = random.randint(0, 2**32)
151
+ set_seed(args.seed)
152
+
153
+ # tokenizerは単体またはリスト、tokenizersは必ずリスト:既存のコードとの互換性のため
154
+ tokenizer = self.load_tokenizer(args)
155
+ tokenizers = tokenizer if isinstance(tokenizer, list) else [tokenizer]
156
+
157
+ # データセットを準備する
158
+ if args.dataset_class is None:
159
+ blueprint_generator = BlueprintGenerator(ConfigSanitizer(True, True, args.masked_loss, True))
160
+ if use_user_config:
161
+ logger.info(f"Loading dataset config from {args.dataset_config}")
162
+ user_config = config_util.load_user_config(args.dataset_config)
163
+ ignored = ["train_data_dir", "reg_data_dir", "in_json"]
164
+ if any(getattr(args, attr) is not None for attr in ignored):
165
+ logger.warning(
166
+ "ignoring the following options because config file is found: {0} / 設定ファイルが利用されるため以下のオプションは無視されます: {0}".format(
167
+ ", ".join(ignored)
168
+ )
169
+ )
170
+ else:
171
+ if use_dreambooth_method:
172
+ logger.info("Using DreamBooth method.")
173
+ user_config = {
174
+ "datasets": [
175
+ {
176
+ "subsets": config_util.generate_dreambooth_subsets_config_by_subdirs(
177
+ args.train_data_dir, args.reg_data_dir
178
+ )
179
+ }
180
+ ]
181
+ }
182
+ else:
183
+ logger.info("Training with captions.")
184
+ user_config = {
185
+ "datasets": [
186
+ {
187
+ "subsets": [
188
+ {
189
+ "image_dir": args.train_data_dir,
190
+ "metadata_file": args.in_json,
191
+ }
192
+ ]
193
+ }
194
+ ]
195
+ }
196
+
197
+ blueprint = blueprint_generator.generate(user_config, args, tokenizer=tokenizer)
198
+ train_dataset_group = config_util.generate_dataset_group_by_blueprint(blueprint.dataset_group)
199
+ else:
200
+ # use arbitrary dataset class
201
+ train_dataset_group = train_util.load_arbitrary_dataset(args, tokenizer)
202
+
203
+ current_epoch = Value("i", 0)
204
+ current_step = Value("i", 0)
205
+ ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None
206
+ collator = train_util.collator_class(current_epoch, current_step, ds_for_collator)
207
+
208
+ if args.debug_dataset:
209
+ train_util.debug_dataset(train_dataset_group)
210
+ return
211
+ if len(train_dataset_group) == 0:
212
+ logger.error(
213
+ "No data found. Please verify arguments (train_data_dir must be the parent of folders with images) / 画像がありません。引数指定を確認してください(train_data_dirには画像があるフォルダではなく、画像があるフォルダの親フォルダを指定する必要があります)"
214
+ )
215
+ return
216
+
217
+ if cache_latents:
218
+ assert (
219
+ train_dataset_group.is_latent_cacheable()
220
+ ), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません"
221
+
222
+ self.assert_extra_args(args, train_dataset_group)
223
+
224
+ # acceleratorを準備する
225
+ logger.info("preparing accelerator")
226
+ accelerator = train_util.prepare_accelerator(args)
227
+ is_main_process = accelerator.is_main_process
228
+
229
+ # mixed precisionに対応した型を用意しておき適宜castする
230
+ weight_dtype, save_dtype = train_util.prepare_dtype(args)
231
+ vae_dtype = torch.float32 if args.no_half_vae else weight_dtype
232
+
233
+ # モデルを読み込む
234
+ model_version, text_encoder, vae, unet = self.load_target_model(args, weight_dtype, accelerator)
235
+
236
+ # text_encoder is List[CLIPTextModel] or CLIPTextModel
237
+ text_encoders = text_encoder if isinstance(text_encoder, list) else [text_encoder]
238
+
239
+ # モデルに xformers とか memory efficient attention を組み込む
240
+ train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers, args.sdpa)
241
+ if torch.__version__ >= "2.0.0": # PyTorch 2.0.0 以上対応のxformersなら以下が使える
242
+ vae.set_use_memory_efficient_attention_xformers(args.xformers)
243
+
244
+ # 差分追加学習のためにモデルを読み込む
245
+ sys.path.append(os.path.dirname(__file__))
246
+ accelerator.print("import network module:", args.network_module)
247
+ network_module = importlib.import_module(args.network_module)
248
+
249
+ if args.base_weights is not None:
250
+ # base_weights が指定されている場合は、指定された重みを読み込みマージする
251
+ for i, weight_path in enumerate(args.base_weights):
252
+ if args.base_weights_multiplier is None or len(args.base_weights_multiplier) <= i:
253
+ multiplier = 1.0
254
+ else:
255
+ multiplier = args.base_weights_multiplier[i]
256
+
257
+ accelerator.print(f"merging module: {weight_path} with multiplier {multiplier}")
258
+
259
+ module, weights_sd = network_module.create_network_from_weights(
260
+ multiplier, weight_path, vae, text_encoder, unet, for_inference=True
261
+ )
262
+ module.merge_to(text_encoder, unet, weights_sd, weight_dtype, accelerator.device if args.lowram else "cpu")
263
+
264
+ accelerator.print(f"all weights merged: {', '.join(args.base_weights)}")
265
+
266
+ # 学習を準備する
267
+ if cache_latents:
268
+ vae.to(accelerator.device, dtype=vae_dtype)
269
+ vae.requires_grad_(False)
270
+ vae.eval()
271
+ with torch.no_grad():
272
+ train_dataset_group.cache_latents(vae, args.vae_batch_size, args.cache_latents_to_disk, accelerator.is_main_process)
273
+ vae.to("cpu")
274
+ clean_memory_on_device(accelerator.device)
275
+
276
+ accelerator.wait_for_everyone()
277
+
278
+ # 必要ならテキストエンコーダーの出力をキャッシュする: Text Encoderはcpuまたはgpuへ移される
279
+ # cache text encoder outputs if needed: Text Encoder is moved to cpu or gpu
280
+ self.cache_text_encoder_outputs_if_needed(
281
+ args, accelerator, unet, vae, tokenizers, text_encoders, train_dataset_group, weight_dtype
282
+ )
283
+
284
+ # prepare network
285
+ net_kwargs = {}
286
+ if args.network_args is not None:
287
+ for net_arg in args.network_args:
288
+ key, value = net_arg.split("=")
289
+ net_kwargs[key] = value
290
+
291
+ # if a new network is added in future, add if ~ then blocks for each network (;'∀')
292
+ if args.dim_from_weights:
293
+ network, _ = network_module.create_network_from_weights(1, args.network_weights, vae, text_encoder, unet, **net_kwargs)
294
+ else:
295
+ if "dropout" not in net_kwargs:
296
+ # workaround for LyCORIS (;^ω^)
297
+ net_kwargs["dropout"] = args.network_dropout
298
+
299
+ network = network_module.create_network(
300
+ 1.0,
301
+ args.network_dim,
302
+ args.network_alpha,
303
+ vae,
304
+ text_encoder,
305
+ unet,
306
+ neuron_dropout=args.network_dropout,
307
+ **net_kwargs,
308
+ )
309
+ if network is None:
310
+ return
311
+ network_has_multiplier = hasattr(network, "set_multiplier")
312
+
313
+ if hasattr(network, "prepare_network"):
314
+ network.prepare_network(args)
315
+ if args.scale_weight_norms and not hasattr(network, "apply_max_norm_regularization"):
316
+ logger.warning(
317
+ "warning: scale_weight_norms is specified but the network does not support it / scale_weight_normsが指定されていますが、ネットワークが対応していません"
318
+ )
319
+ args.scale_weight_norms = False
320
+
321
+ train_unet = not args.network_train_text_encoder_only
322
+ train_text_encoder = self.is_train_text_encoder(args)
323
+ network.apply_to(text_encoder, unet, train_text_encoder, train_unet)
324
+
325
+ if args.network_weights is not None:
326
+ # FIXME consider alpha of weights
327
+ info = network.load_weights(args.network_weights)
328
+ accelerator.print(f"load network weights from {args.network_weights}: {info}")
329
+
330
+ if args.gradient_checkpointing:
331
+ unet.enable_gradient_checkpointing()
332
+ for t_enc in text_encoders:
333
+ t_enc.gradient_checkpointing_enable()
334
+ del t_enc
335
+ network.enable_gradient_checkpointing() # may have no effect
336
+
337
+ # 学習に必要なクラスを準備する
338
+ accelerator.print("prepare optimizer, data loader etc.")
339
+
340
+ # 後方互換性を確保するよ
341
+ try:
342
+ results = network.prepare_optimizer_params(args.text_encoder_lr, args.unet_lr, args.learning_rate)
343
+ if type(results) is tuple:
344
+ trainable_params = results[0]
345
+ lr_descriptions = results[1]
346
+ else:
347
+ trainable_params = results
348
+ lr_descriptions = None
349
+ except TypeError as e:
350
+ # logger.warning(f"{e}")
351
+ # accelerator.print(
352
+ # "Deprecated: use prepare_optimizer_params(text_encoder_lr, unet_lr, learning_rate) instead of prepare_optimizer_params(text_encoder_lr, unet_lr)"
353
+ # )
354
+ trainable_params = network.prepare_optimizer_params(args.text_encoder_lr, args.unet_lr)
355
+ lr_descriptions = None
356
+
357
+ # if len(trainable_params) == 0:
358
+ # accelerator.print("no trainable parameters found / 学習可能なパラメータが見つかりませんでした")
359
+ # for params in trainable_params:
360
+ # for k, v in params.items():
361
+ # if type(v) == float:
362
+ # pass
363
+ # else:
364
+ # v = len(v)
365
+ # accelerator.print(f"trainable_params: {k} = {v}")
366
+
367
+ optimizer_name, optimizer_args, optimizer = train_util.get_optimizer(args, trainable_params)
368
+
369
+ # dataloaderを準備する
370
+ # DataLoaderのプロセス数:0 は persistent_workers が使えないので注意
371
+ n_workers = min(args.max_data_loader_n_workers, os.cpu_count()) # cpu_count or max_data_loader_n_workers
372
+
373
+ train_dataloader = torch.utils.data.DataLoader(
374
+ train_dataset_group,
375
+ batch_size=1,
376
+ shuffle=True,
377
+ collate_fn=collator,
378
+ num_workers=n_workers,
379
+ persistent_workers=args.persistent_data_loader_workers,
380
+ )
381
+
382
+ # 学習ステップ数を計算する
383
+ if args.max_train_epochs is not None:
384
+ args.max_train_steps = args.max_train_epochs * math.ceil(
385
+ len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps
386
+ )
387
+ accelerator.print(
388
+ f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}"
389
+ )
390
+
391
+ # データセット側にも学習ステップを送信
392
+ train_dataset_group.set_max_train_steps(args.max_train_steps)
393
+
394
+ # lr schedulerを用意する
395
+ lr_scheduler = train_util.get_scheduler_fix(args, optimizer, accelerator.num_processes)
396
+
397
+ # 実験的機能:勾配も含めたfp16/bf16学習を行う モデル全体をfp16/bf16にする
398
+ if args.full_fp16:
399
+ assert (
400
+ args.mixed_precision == "fp16"
401
+ ), "full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。"
402
+ accelerator.print("enable full fp16 training.")
403
+ network.to(weight_dtype)
404
+ elif args.full_bf16:
405
+ assert (
406
+ args.mixed_precision == "bf16"
407
+ ), "full_bf16 requires mixed precision='bf16' / full_bf16を使う場合はmixed_precision='bf16'を指定してください。"
408
+ accelerator.print("enable full bf16 training.")
409
+ network.to(weight_dtype)
410
+
411
+ unet_weight_dtype = te_weight_dtype = weight_dtype
412
+ # Experimental Feature: Put base model into fp8 to save vram
413
+ if args.fp8_base:
414
+ assert torch.__version__ >= "2.1.0", "fp8_base requires torch>=2.1.0 / fp8を使う場合はtorch>=2.1.0が必要です。"
415
+ assert (
416
+ args.mixed_precision != "no"
417
+ ), "fp8_base requires mixed precision='fp16' or 'bf16' / fp8を使う場合はmixed_precision='fp16'または'bf16'が必要です。"
418
+ accelerator.print("enable fp8 training.")
419
+ unet_weight_dtype = torch.float8_e4m3fn
420
+ te_weight_dtype = torch.float8_e4m3fn
421
+
422
+ unet.requires_grad_(False)
423
+ unet.to(dtype=unet_weight_dtype)
424
+ for t_enc in text_encoders:
425
+ t_enc.requires_grad_(False)
426
+
427
+ # in case of cpu, dtype is already set to fp32 because cpu does not support fp8/fp16/bf16
428
+ if t_enc.device.type != "cpu":
429
+ t_enc.to(dtype=te_weight_dtype)
430
+ # nn.Embedding not support FP8
431
+ t_enc.text_model.embeddings.to(dtype=(weight_dtype if te_weight_dtype != weight_dtype else te_weight_dtype))
432
+
433
+ # acceleratorがなんかよろしくやってくれるらしい / accelerator will do something good
434
+ if args.deepspeed:
435
+ ds_model = deepspeed_utils.prepare_deepspeed_model(
436
+ args,
437
+ unet=unet if train_unet else None,
438
+ text_encoder1=text_encoders[0] if train_text_encoder else None,
439
+ text_encoder2=text_encoders[1] if train_text_encoder and len(text_encoders) > 1 else None,
440
+ network=network,
441
+ )
442
+ ds_model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
443
+ ds_model, optimizer, train_dataloader, lr_scheduler
444
+ )
445
+ training_model = ds_model
446
+ else:
447
+ if train_unet:
448
+ unet = accelerator.prepare(unet)
449
+ else:
450
+ unet.to(accelerator.device, dtype=unet_weight_dtype) # move to device because unet is not prepared by accelerator
451
+ if train_text_encoder:
452
+ if len(text_encoders) > 1:
453
+ text_encoder = text_encoders = [accelerator.prepare(t_enc) for t_enc in text_encoders]
454
+ else:
455
+ text_encoder = accelerator.prepare(text_encoder)
456
+ text_encoders = [text_encoder]
457
+ else:
458
+ pass # if text_encoder is not trained, no need to prepare. and device and dtype are already set
459
+
460
+ network, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
461
+ network, optimizer, train_dataloader, lr_scheduler
462
+ )
463
+ training_model = network
464
+
465
+ if args.gradient_checkpointing:
466
+ # according to TI example in Diffusers, train is required
467
+ unet.train()
468
+ for t_enc in text_encoders:
469
+ t_enc.train()
470
+
471
+ # set top parameter requires_grad = True for gradient checkpointing works
472
+ if train_text_encoder:
473
+ t_enc.text_model.embeddings.requires_grad_(True)
474
+
475
+ else:
476
+ unet.eval()
477
+ for t_enc in text_encoders:
478
+ t_enc.eval()
479
+
480
+ del t_enc
481
+
482
+ accelerator.unwrap_model(network).prepare_grad_etc(text_encoder, unet)
483
+
484
+ if not cache_latents: # キャッシュしない場合はVAEを使うのでVAEを準備する
485
+ vae.requires_grad_(False)
486
+ vae.eval()
487
+ vae.to(accelerator.device, dtype=vae_dtype)
488
+
489
+ # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする
490
+ if args.full_fp16:
491
+ train_util.patch_accelerator_for_fp16_training(accelerator)
492
+
493
+ # before resuming make hook for saving/loading to save/load the network weights only
494
+ def save_model_hook(models, weights, output_dir):
495
+ # pop weights of other models than network to save only network weights
496
+ # only main process or deepspeed https://github.com/huggingface/diffusers/issues/2606
497
+ if accelerator.is_main_process or args.deepspeed:
498
+ remove_indices = []
499
+ for i, model in enumerate(models):
500
+ if not isinstance(model, type(accelerator.unwrap_model(network))):
501
+ remove_indices.append(i)
502
+ for i in reversed(remove_indices):
503
+ if len(weights) > i:
504
+ weights.pop(i)
505
+ # print(f"save model hook: {len(weights)} weights will be saved")
506
+
507
+ # save current ecpoch and step
508
+ train_state_file = os.path.join(output_dir, "train_state.json")
509
+ # +1 is needed because the state is saved before current_step is set from global_step
510
+ logger.info(f"save train state to {train_state_file} at epoch {current_epoch.value} step {current_step.value+1}")
511
+ with open(train_state_file, "w", encoding="utf-8") as f:
512
+ json.dump({"current_epoch": current_epoch.value, "current_step": current_step.value + 1}, f)
513
+
514
+ steps_from_state = None
515
+
516
+ def load_model_hook(models, input_dir):
517
+ # remove models except network
518
+ remove_indices = []
519
+ for i, model in enumerate(models):
520
+ if not isinstance(model, type(accelerator.unwrap_model(network))):
521
+ remove_indices.append(i)
522
+ for i in reversed(remove_indices):
523
+ models.pop(i)
524
+ # print(f"load model hook: {len(models)} models will be loaded")
525
+
526
+ # load current epoch and step to
527
+ nonlocal steps_from_state
528
+ train_state_file = os.path.join(input_dir, "train_state.json")
529
+ if os.path.exists(train_state_file):
530
+ with open(train_state_file, "r", encoding="utf-8") as f:
531
+ data = json.load(f)
532
+ steps_from_state = data["current_step"]
533
+ logger.info(f"load train state from {train_state_file}: {data}")
534
+
535
+ accelerator.register_save_state_pre_hook(save_model_hook)
536
+ accelerator.register_load_state_pre_hook(load_model_hook)
537
+
538
+ # resumeする
539
+ train_util.resume_from_local_or_hf_if_specified(accelerator, args)
540
+
541
+ # epoch数を計算する
542
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
543
+ num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
544
+ if (args.save_n_epoch_ratio is not None) and (args.save_n_epoch_ratio > 0):
545
+ args.save_every_n_epochs = math.floor(num_train_epochs / args.save_n_epoch_ratio) or 1
546
+
547
+ # 学習する
548
+ # TODO: find a way to handle total batch size when there are multiple datasets
549
+ total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
550
+
551
+ accelerator.print("running training / 学習開始")
552
+ accelerator.print(f" num train images * repeats / 学習画像の数×繰り返し回数: {train_dataset_group.num_train_images}")
553
+ accelerator.print(f" num reg images / 正則化画像の数: {train_dataset_group.num_reg_images}")
554
+ accelerator.print(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}")
555
+ accelerator.print(f" num epochs / epoch数: {num_train_epochs}")
556
+ accelerator.print(
557
+ f" batch size per device / バッチサイズ: {', '.join([str(d.batch_size) for d in train_dataset_group.datasets])}"
558
+ )
559
+ # accelerator.print(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}")
560
+ accelerator.print(f" gradient accumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}")
561
+ accelerator.print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}")
562
+
563
+ # TODO refactor metadata creation and move to util
564
+ metadata = {
565
+ "ss_session_id": session_id, # random integer indicating which group of epochs the model came from
566
+ "ss_training_started_at": training_started_at, # unix timestamp
567
+ "ss_output_name": args.output_name,
568
+ "ss_learning_rate": args.learning_rate,
569
+ "ss_text_encoder_lr": args.text_encoder_lr,
570
+ "ss_unet_lr": args.unet_lr,
571
+ "ss_num_train_images": train_dataset_group.num_train_images,
572
+ "ss_num_reg_images": train_dataset_group.num_reg_images,
573
+ "ss_num_batches_per_epoch": len(train_dataloader),
574
+ "ss_num_epochs": num_train_epochs,
575
+ "ss_gradient_checkpointing": args.gradient_checkpointing,
576
+ "ss_gradient_accumulation_steps": args.gradient_accumulation_steps,
577
+ "ss_max_train_steps": args.max_train_steps,
578
+ "ss_lr_warmup_steps": args.lr_warmup_steps,
579
+ "ss_lr_scheduler": args.lr_scheduler,
580
+ "ss_network_module": args.network_module,
581
+ "ss_network_dim": args.network_dim, # None means default because another network than LoRA may have another default dim
582
+ "ss_network_alpha": args.network_alpha, # some networks may not have alpha
583
+ "ss_network_dropout": args.network_dropout, # some networks may not have dropout
584
+ "ss_mixed_precision": args.mixed_precision,
585
+ "ss_full_fp16": bool(args.full_fp16),
586
+ "ss_v2": bool(args.v2),
587
+ "ss_base_model_version": model_version,
588
+ "ss_clip_skip": args.clip_skip,
589
+ "ss_max_token_length": args.max_token_length,
590
+ "ss_cache_latents": bool(args.cache_latents),
591
+ "ss_seed": args.seed,
592
+ "ss_lowram": args.lowram,
593
+ "ss_noise_offset": args.noise_offset,
594
+ "ss_multires_noise_iterations": args.multires_noise_iterations,
595
+ "ss_multires_noise_discount": args.multires_noise_discount,
596
+ "ss_adaptive_noise_scale": args.adaptive_noise_scale,
597
+ "ss_zero_terminal_snr": args.zero_terminal_snr,
598
+ "ss_training_comment": args.training_comment, # will not be updated after training
599
+ "ss_sd_scripts_commit_hash": train_util.get_git_revision_hash(),
600
+ "ss_optimizer": optimizer_name + (f"({optimizer_args})" if len(optimizer_args) > 0 else ""),
601
+ "ss_max_grad_norm": args.max_grad_norm,
602
+ "ss_caption_dropout_rate": args.caption_dropout_rate,
603
+ "ss_caption_dropout_every_n_epochs": args.caption_dropout_every_n_epochs,
604
+ "ss_caption_tag_dropout_rate": args.caption_tag_dropout_rate,
605
+ "ss_face_crop_aug_range": args.face_crop_aug_range,
606
+ "ss_prior_loss_weight": args.prior_loss_weight,
607
+ "ss_min_snr_gamma": args.min_snr_gamma,
608
+ "ss_scale_weight_norms": args.scale_weight_norms,
609
+ "ss_ip_noise_gamma": args.ip_noise_gamma,
610
+ "ss_debiased_estimation": bool(args.debiased_estimation_loss),
611
+ "ss_noise_offset_random_strength": args.noise_offset_random_strength,
612
+ "ss_ip_noise_gamma_random_strength": args.ip_noise_gamma_random_strength,
613
+ "ss_loss_type": args.loss_type,
614
+ "ss_huber_schedule": args.huber_schedule,
615
+ "ss_huber_c": args.huber_c,
616
+ }
617
+
618
+ if use_user_config:
619
+ # save metadata of multiple datasets
620
+ # NOTE: pack "ss_datasets" value as json one time
621
+ # or should also pack nested collections as json?
622
+ datasets_metadata = []
623
+ tag_frequency = {} # merge tag frequency for metadata editor
624
+ dataset_dirs_info = {} # merge subset dirs for metadata editor
625
+
626
+ for dataset in train_dataset_group.datasets:
627
+ is_dreambooth_dataset = isinstance(dataset, DreamBoothDataset)
628
+ dataset_metadata = {
629
+ "is_dreambooth": is_dreambooth_dataset,
630
+ "batch_size_per_device": dataset.batch_size,
631
+ "num_train_images": dataset.num_train_images, # includes repeating
632
+ "num_reg_images": dataset.num_reg_images,
633
+ "resolution": (dataset.width, dataset.height),
634
+ "enable_bucket": bool(dataset.enable_bucket),
635
+ "min_bucket_reso": dataset.min_bucket_reso,
636
+ "max_bucket_reso": dataset.max_bucket_reso,
637
+ "tag_frequency": dataset.tag_frequency,
638
+ "bucket_info": dataset.bucket_info,
639
+ }
640
+
641
+ subsets_metadata = []
642
+ for subset in dataset.subsets:
643
+ subset_metadata = {
644
+ "img_count": subset.img_count,
645
+ "num_repeats": subset.num_repeats,
646
+ "color_aug": bool(subset.color_aug),
647
+ "flip_aug": bool(subset.flip_aug),
648
+ "random_crop": bool(subset.random_crop),
649
+ "shuffle_caption": bool(subset.shuffle_caption),
650
+ "keep_tokens": subset.keep_tokens,
651
+ "keep_tokens_separator": subset.keep_tokens_separator,
652
+ "secondary_separator": subset.secondary_separator,
653
+ "enable_wildcard": bool(subset.enable_wildcard),
654
+ "caption_prefix": subset.caption_prefix,
655
+ "caption_suffix": subset.caption_suffix,
656
+ }
657
+
658
+ image_dir_or_metadata_file = None
659
+ if subset.image_dir:
660
+ image_dir = os.path.basename(subset.image_dir)
661
+ subset_metadata["image_dir"] = image_dir
662
+ image_dir_or_metadata_file = image_dir
663
+
664
+ if is_dreambooth_dataset:
665
+ subset_metadata["class_tokens"] = subset.class_tokens
666
+ subset_metadata["is_reg"] = subset.is_reg
667
+ if subset.is_reg:
668
+ image_dir_or_metadata_file = None # not merging reg dataset
669
+ else:
670
+ metadata_file = os.path.basename(subset.metadata_file)
671
+ subset_metadata["metadata_file"] = metadata_file
672
+ image_dir_or_metadata_file = metadata_file # may overwrite
673
+
674
+ subsets_metadata.append(subset_metadata)
675
+
676
+ # merge dataset dir: not reg subset only
677
+ # TODO update additional-network extension to show detailed dataset config from metadata
678
+ if image_dir_or_metadata_file is not None:
679
+ # datasets may have a certain dir multiple times
680
+ v = image_dir_or_metadata_file
681
+ i = 2
682
+ while v in dataset_dirs_info:
683
+ v = image_dir_or_metadata_file + f" ({i})"
684
+ i += 1
685
+ image_dir_or_metadata_file = v
686
+
687
+ dataset_dirs_info[image_dir_or_metadata_file] = {
688
+ "n_repeats": subset.num_repeats,
689
+ "img_count": subset.img_count,
690
+ }
691
+
692
+ dataset_metadata["subsets"] = subsets_metadata
693
+ datasets_metadata.append(dataset_metadata)
694
+
695
+ # merge tag frequency:
696
+ for ds_dir_name, ds_freq_for_dir in dataset.tag_frequency.items():
697
+ # あるディレクトリが複数のdatasetで使用されている場合、一度だけ数える
698
+ # もともと繰り返し回数を指定しているので、キャプション内でのタグの出現回数と、それが学習で何度使われるかは一致しない
699
+ # なので、ここで複数datasetの回数を合算してもあまり意味はない
700
+ if ds_dir_name in tag_frequency:
701
+ continue
702
+ tag_frequency[ds_dir_name] = ds_freq_for_dir
703
+
704
+ metadata["ss_datasets"] = json.dumps(datasets_metadata)
705
+ metadata["ss_tag_frequency"] = json.dumps(tag_frequency)
706
+ metadata["ss_dataset_dirs"] = json.dumps(dataset_dirs_info)
707
+ else:
708
+ # conserving backward compatibility when using train_dataset_dir and reg_dataset_dir
709
+ assert (
710
+ len(train_dataset_group.datasets) == 1
711
+ ), f"There should be a single dataset but {len(train_dataset_group.datasets)} found. This seems to be a bug. / データセットは1個だけ存在するはずですが、実際には{len(train_dataset_group.datasets)}個でした。プログラムのバグかもしれません。"
712
+
713
+ dataset = train_dataset_group.datasets[0]
714
+
715
+ dataset_dirs_info = {}
716
+ reg_dataset_dirs_info = {}
717
+ if use_dreambooth_method:
718
+ for subset in dataset.subsets:
719
+ info = reg_dataset_dirs_info if subset.is_reg else dataset_dirs_info
720
+ info[os.path.basename(subset.image_dir)] = {"n_repeats": subset.num_repeats, "img_count": subset.img_count}
721
+ else:
722
+ for subset in dataset.subsets:
723
+ dataset_dirs_info[os.path.basename(subset.metadata_file)] = {
724
+ "n_repeats": subset.num_repeats,
725
+ "img_count": subset.img_count,
726
+ }
727
+
728
+ metadata.update(
729
+ {
730
+ "ss_batch_size_per_device": args.train_batch_size,
731
+ "ss_total_batch_size": total_batch_size,
732
+ "ss_resolution": args.resolution,
733
+ "ss_color_aug": bool(args.color_aug),
734
+ "ss_flip_aug": bool(args.flip_aug),
735
+ "ss_random_crop": bool(args.random_crop),
736
+ "ss_shuffle_caption": bool(args.shuffle_caption),
737
+ "ss_enable_bucket": bool(dataset.enable_bucket),
738
+ "ss_bucket_no_upscale": bool(dataset.bucket_no_upscale),
739
+ "ss_min_bucket_reso": dataset.min_bucket_reso,
740
+ "ss_max_bucket_reso": dataset.max_bucket_reso,
741
+ "ss_keep_tokens": args.keep_tokens,
742
+ "ss_dataset_dirs": json.dumps(dataset_dirs_info),
743
+ "ss_reg_dataset_dirs": json.dumps(reg_dataset_dirs_info),
744
+ "ss_tag_frequency": json.dumps(dataset.tag_frequency),
745
+ "ss_bucket_info": json.dumps(dataset.bucket_info),
746
+ }
747
+ )
748
+
749
+ # add extra args
750
+ if args.network_args:
751
+ metadata["ss_network_args"] = json.dumps(net_kwargs)
752
+
753
+ # model name and hash
754
+ if args.pretrained_model_name_or_path is not None:
755
+ sd_model_name = args.pretrained_model_name_or_path
756
+ if os.path.exists(sd_model_name):
757
+ metadata["ss_sd_model_hash"] = train_util.model_hash(sd_model_name)
758
+ metadata["ss_new_sd_model_hash"] = train_util.calculate_sha256(sd_model_name)
759
+ sd_model_name = os.path.basename(sd_model_name)
760
+ metadata["ss_sd_model_name"] = sd_model_name
761
+
762
+ if args.vae is not None:
763
+ vae_name = args.vae
764
+ if os.path.exists(vae_name):
765
+ metadata["ss_vae_hash"] = train_util.model_hash(vae_name)
766
+ metadata["ss_new_vae_hash"] = train_util.calculate_sha256(vae_name)
767
+ vae_name = os.path.basename(vae_name)
768
+ metadata["ss_vae_name"] = vae_name
769
+
770
+ metadata = {k: str(v) for k, v in metadata.items()}
771
+
772
+ # make minimum metadata for filtering
773
+ minimum_metadata = {}
774
+ for key in train_util.SS_METADATA_MINIMUM_KEYS:
775
+ if key in metadata:
776
+ minimum_metadata[key] = metadata[key]
777
+
778
+ # calculate steps to skip when resuming or starting from a specific step
779
+ initial_step = 0
780
+ if args.initial_epoch is not None or args.initial_step is not None:
781
+ # if initial_epoch or initial_step is specified, steps_from_state is ignored even when resuming
782
+ if steps_from_state is not None:
783
+ logger.warning(
784
+ "steps from the state is ignored because initial_step is specified / initial_stepが指定されているため、stateからのステップ数は無視されます"
785
+ )
786
+ if args.initial_step is not None:
787
+ initial_step = args.initial_step
788
+ else:
789
+ # num steps per epoch is calculated by num_processes and gradient_accumulation_steps
790
+ initial_step = (args.initial_epoch - 1) * math.ceil(
791
+ len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps
792
+ )
793
+ else:
794
+ # if initial_epoch and initial_step are not specified, steps_from_state is used when resuming
795
+ if steps_from_state is not None:
796
+ initial_step = steps_from_state
797
+ steps_from_state = None
798
+
799
+ if initial_step > 0:
800
+ assert (
801
+ args.max_train_steps > initial_step
802
+ ), f"max_train_steps should be greater than initial step / max_train_stepsは初期ステップより大きい必要があります: {args.max_train_steps} vs {initial_step}"
803
+
804
+ progress_bar = tqdm(
805
+ range(args.max_train_steps - initial_step), smoothing=0, disable=not accelerator.is_local_main_process, desc="steps"
806
+ )
807
+
808
+ epoch_to_start = 0
809
+ if initial_step > 0:
810
+ if args.skip_until_initial_step:
811
+ # if skip_until_initial_step is specified, load data and discard it to ensure the same data is used
812
+ if not args.resume:
813
+ logger.info(
814
+ f"initial_step is specified but not resuming. lr scheduler will be started from the beginning / initial_stepが指定されていますがresumeしていないため、lr schedulerは最初から始まります"
815
+ )
816
+ logger.info(f"skipping {initial_step} steps / {initial_step}ステップをスキップします")
817
+ initial_step *= args.gradient_accumulation_steps
818
+
819
+ # set epoch to start to make initial_step less than len(train_dataloader)
820
+ epoch_to_start = initial_step // math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
821
+ else:
822
+ # if not, only epoch no is skipped for informative purpose
823
+ epoch_to_start = initial_step // math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
824
+ initial_step = 0 # do not skip
825
+
826
+ global_step = 0
827
+
828
+ noise_scheduler = DDPMScheduler(
829
+ beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, clip_sample=False
830
+ )
831
+ prepare_scheduler_for_custom_training(noise_scheduler, accelerator.device)
832
+ if args.zero_terminal_snr:
833
+ custom_train_functions.fix_noise_scheduler_betas_for_zero_terminal_snr(noise_scheduler)
834
+
835
+ if accelerator.is_main_process:
836
+ init_kwargs = {}
837
+ if args.wandb_run_name:
838
+ init_kwargs["wandb"] = {"name": args.wandb_run_name}
839
+ if args.log_tracker_config is not None:
840
+ init_kwargs = toml.load(args.log_tracker_config)
841
+ accelerator.init_trackers(
842
+ "network_train" if args.log_tracker_name is None else args.log_tracker_name,
843
+ config=train_util.get_sanitized_config_or_none(args),
844
+ init_kwargs=init_kwargs,
845
+ )
846
+
847
+ loss_recorder = train_util.LossRecorder()
848
+ del train_dataset_group
849
+
850
+ # callback for step start
851
+ if hasattr(accelerator.unwrap_model(network), "on_step_start"):
852
+ on_step_start = accelerator.unwrap_model(network).on_step_start
853
+ else:
854
+ on_step_start = lambda *args, **kwargs: None
855
+
856
+ # function for saving/removing
857
+ def save_model(ckpt_name, unwrapped_nw, steps, epoch_no, force_sync_upload=False):
858
+ os.makedirs(args.output_dir, exist_ok=True)
859
+ ckpt_file = os.path.join(args.output_dir, ckpt_name)
860
+
861
+ accelerator.print(f"\nsaving checkpoint: {ckpt_file}")
862
+ metadata["ss_training_finished_at"] = str(time.time())
863
+ metadata["ss_steps"] = str(steps)
864
+ metadata["ss_epoch"] = str(epoch_no)
865
+
866
+ metadata_to_save = minimum_metadata if args.no_metadata else metadata
867
+ sai_metadata = train_util.get_sai_model_spec(None, args, self.is_sdxl, True, False)
868
+ metadata_to_save.update(sai_metadata)
869
+
870
+ unwrapped_nw.save_weights(ckpt_file, save_dtype, metadata_to_save)
871
+ if args.huggingface_repo_id is not None:
872
+ huggingface_util.upload(args, ckpt_file, "/" + ckpt_name, force_sync_upload=force_sync_upload)
873
+
874
+ def remove_model(old_ckpt_name):
875
+ old_ckpt_file = os.path.join(args.output_dir, old_ckpt_name)
876
+ if os.path.exists(old_ckpt_file):
877
+ accelerator.print(f"removing old checkpoint: {old_ckpt_file}")
878
+ os.remove(old_ckpt_file)
879
+
880
+ # For --sample_at_first
881
+ self.sample_images(accelerator, args, 0, global_step, accelerator.device, vae, tokenizer, text_encoder, unet)
882
+
883
+ # training loop
884
+ if initial_step > 0: # only if skip_until_initial_step is specified
885
+ for skip_epoch in range(epoch_to_start): # skip epochs
886
+ logger.info(f"skipping epoch {skip_epoch+1} because initial_step (multiplied) is {initial_step}")
887
+ initial_step -= len(train_dataloader)
888
+ global_step = initial_step
889
+
890
+ for epoch in range(epoch_to_start, num_train_epochs):
891
+ accelerator.print(f"\nepoch {epoch+1}/{num_train_epochs}")
892
+ current_epoch.value = epoch + 1
893
+
894
+ metadata["ss_epoch"] = str(epoch + 1)
895
+
896
+ accelerator.unwrap_model(network).on_epoch_start(text_encoder, unet)
897
+
898
+ skipped_dataloader = None
899
+ if initial_step > 0:
900
+ skipped_dataloader = accelerator.skip_first_batches(train_dataloader, initial_step - 1)
901
+ initial_step = 1
902
+
903
+ for step, batch in enumerate(skipped_dataloader or train_dataloader):
904
+ current_step.value = global_step
905
+ if initial_step > 0:
906
+ initial_step -= 1
907
+ continue
908
+
909
+ with accelerator.accumulate(training_model):
910
+ on_step_start(text_encoder, unet)
911
+
912
+ if "latents" in batch and batch["latents"] is not None:
913
+ latents = batch["latents"].to(accelerator.device).to(dtype=weight_dtype)
914
+ else:
915
+ with torch.no_grad():
916
+ # latentに変換
917
+ latents = vae.encode(batch["images"].to(dtype=vae_dtype)).latent_dist.sample().to(dtype=weight_dtype)
918
+
919
+ # NaNが含まれていれば警告を表示し0に置き換える
920
+ if torch.any(torch.isnan(latents)):
921
+ accelerator.print("NaN found in latents, replacing with zeros")
922
+ latents = torch.nan_to_num(latents, 0, out=latents)
923
+ latents = latents * self.vae_scale_factor
924
+
925
+ # get multiplier for each sample
926
+ if network_has_multiplier:
927
+ multipliers = batch["network_multipliers"]
928
+ # if all multipliers are same, use single multiplier
929
+ if torch.all(multipliers == multipliers[0]):
930
+ multipliers = multipliers[0].item()
931
+ else:
932
+ raise NotImplementedError("multipliers for each sample is not supported yet")
933
+ # print(f"set multiplier: {multipliers}")
934
+ accelerator.unwrap_model(network).set_multiplier(multipliers)
935
+
936
+ with torch.set_grad_enabled(train_text_encoder), accelerator.autocast():
937
+ # Get the text embedding for conditioning
938
+ if args.weighted_captions:
939
+ text_encoder_conds = get_weighted_text_embeddings(
940
+ tokenizer,
941
+ text_encoder,
942
+ batch["captions"],
943
+ accelerator.device,
944
+ args.max_token_length // 75 if args.max_token_length else 1,
945
+ clip_skip=args.clip_skip,
946
+ )
947
+ else:
948
+ text_encoder_conds = self.get_text_cond(
949
+ args, accelerator, batch, tokenizers, text_encoders, weight_dtype
950
+ )
951
+
952
+ # Sample noise, sample a random timestep for each image, and add noise to the latents,
953
+ # with noise offset and/or multires noise if specified
954
+ noise, noisy_latents, timesteps, huber_c = train_util.get_noise_noisy_latents_and_timesteps(
955
+ args, noise_scheduler, latents
956
+ )
957
+
958
+ # ensure the hidden state will require grad
959
+ if args.gradient_checkpointing:
960
+ for x in noisy_latents:
961
+ x.requires_grad_(True)
962
+ for t in text_encoder_conds:
963
+ t.requires_grad_(True)
964
+
965
+ # Predict the noise residual
966
+ with accelerator.autocast():
967
+ noise_pred = self.call_unet(
968
+ args,
969
+ accelerator,
970
+ unet,
971
+ noisy_latents.requires_grad_(train_unet),
972
+ timesteps,
973
+ text_encoder_conds,
974
+ batch,
975
+ weight_dtype,
976
+ )
977
+
978
+ if args.v_parameterization:
979
+ # v-parameterization training
980
+ target = noise_scheduler.get_velocity(latents, noise, timesteps)
981
+ else:
982
+ target = noise
983
+
984
+ loss = train_util.conditional_loss(
985
+ noise_pred.float(), target.float(), reduction="none", loss_type=args.loss_type, huber_c=huber_c
986
+ )
987
+ if args.masked_loss or ("alpha_masks" in batch and batch["alpha_masks"] is not None):
988
+ loss = apply_masked_loss(loss, batch)
989
+ loss = loss.mean([1, 2, 3])
990
+
991
+ loss_weights = batch["loss_weights"] # 各sampleごとのweight
992
+ loss = loss * loss_weights
993
+
994
+ if args.min_snr_gamma:
995
+ loss = apply_snr_weight(loss, timesteps, noise_scheduler, args.min_snr_gamma, args.v_parameterization)
996
+ if args.scale_v_pred_loss_like_noise_pred:
997
+ loss = scale_v_prediction_loss_like_noise_prediction(loss, timesteps, noise_scheduler)
998
+ if args.v_pred_like_loss:
999
+ loss = add_v_prediction_like_loss(loss, timesteps, noise_scheduler, args.v_pred_like_loss)
1000
+ if args.debiased_estimation_loss:
1001
+ loss = apply_debiased_estimation(loss, timesteps, noise_scheduler, args.v_parameterization)
1002
+
1003
+ loss = loss.mean() # 平均なのでbatch_sizeで割る必要なし
1004
+
1005
+ accelerator.backward(loss)
1006
+ if accelerator.sync_gradients:
1007
+ self.all_reduce_network(accelerator, network) # sync DDP grad manually
1008
+ if args.max_grad_norm != 0.0:
1009
+ params_to_clip = accelerator.unwrap_model(network).get_trainable_params()
1010
+ accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
1011
+
1012
+ optimizer.step()
1013
+ lr_scheduler.step()
1014
+ optimizer.zero_grad(set_to_none=True)
1015
+
1016
+ if args.scale_weight_norms:
1017
+ keys_scaled, mean_norm, maximum_norm = accelerator.unwrap_model(network).apply_max_norm_regularization(
1018
+ args.scale_weight_norms, accelerator.device
1019
+ )
1020
+ max_mean_logs = {"Keys Scaled": keys_scaled, "Average key norm": mean_norm}
1021
+ else:
1022
+ keys_scaled, mean_norm, maximum_norm = None, None, None
1023
+
1024
+ # Checks if the accelerator has performed an optimization step behind the scenes
1025
+ if accelerator.sync_gradients:
1026
+ progress_bar.update(1)
1027
+ global_step += 1
1028
+
1029
+ self.sample_images(accelerator, args, None, global_step, accelerator.device, vae, tokenizer, text_encoder, unet)
1030
+
1031
+ # 指定ステップごとにモデルを保存
1032
+ if args.save_every_n_steps is not None and global_step % args.save_every_n_steps == 0:
1033
+ accelerator.wait_for_everyone()
1034
+ if accelerator.is_main_process:
1035
+ ckpt_name = train_util.get_step_ckpt_name(args, "." + args.save_model_as, global_step)
1036
+ save_model(ckpt_name, accelerator.unwrap_model(network), global_step, epoch)
1037
+
1038
+ if args.save_state:
1039
+ train_util.save_and_remove_state_stepwise(args, accelerator, global_step)
1040
+
1041
+ remove_step_no = train_util.get_remove_step_no(args, global_step)
1042
+ if remove_step_no is not None:
1043
+ remove_ckpt_name = train_util.get_step_ckpt_name(args, "." + args.save_model_as, remove_step_no)
1044
+ remove_model(remove_ckpt_name)
1045
+
1046
+ current_loss = loss.detach().item()
1047
+ loss_recorder.add(epoch=epoch, step=step, loss=current_loss)
1048
+ avr_loss: float = loss_recorder.moving_average
1049
+ logs = {"avr_loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]}
1050
+ progress_bar.set_postfix(**logs)
1051
+
1052
+ if args.scale_weight_norms:
1053
+ progress_bar.set_postfix(**{**max_mean_logs, **logs})
1054
+
1055
+ if args.logging_dir is not None:
1056
+ logs = self.generate_step_logs(
1057
+ args, current_loss, avr_loss, lr_scheduler, lr_descriptions, keys_scaled, mean_norm, maximum_norm
1058
+ )
1059
+ accelerator.log(logs, step=global_step)
1060
+
1061
+ if global_step >= args.max_train_steps:
1062
+ break
1063
+
1064
+ if args.logging_dir is not None:
1065
+ logs = {"loss/epoch": loss_recorder.moving_average}
1066
+ accelerator.log(logs, step=epoch + 1)
1067
+
1068
+ accelerator.wait_for_everyone()
1069
+
1070
+ # 指定エポックごとにモデルを保存
1071
+ if args.save_every_n_epochs is not None:
1072
+ saving = (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs
1073
+ if is_main_process and saving:
1074
+ ckpt_name = train_util.get_epoch_ckpt_name(args, "." + args.save_model_as, epoch + 1)
1075
+ save_model(ckpt_name, accelerator.unwrap_model(network), global_step, epoch + 1)
1076
+
1077
+ remove_epoch_no = train_util.get_remove_epoch_no(args, epoch + 1)
1078
+ if remove_epoch_no is not None:
1079
+ remove_ckpt_name = train_util.get_epoch_ckpt_name(args, "." + args.save_model_as, remove_epoch_no)
1080
+ remove_model(remove_ckpt_name)
1081
+
1082
+ if args.save_state:
1083
+ train_util.save_and_remove_state_on_epoch_end(args, accelerator, epoch + 1)
1084
+
1085
+ self.sample_images(accelerator, args, epoch + 1, global_step, accelerator.device, vae, tokenizer, text_encoder, unet)
1086
+
1087
+ # end of epoch
1088
+
1089
+ # metadata["ss_epoch"] = str(num_train_epochs)
1090
+ metadata["ss_training_finished_at"] = str(time.time())
1091
+
1092
+ if is_main_process:
1093
+ network = accelerator.unwrap_model(network)
1094
+
1095
+ accelerator.end_training()
1096
+
1097
+ if is_main_process and (args.save_state or args.save_state_on_train_end):
1098
+ train_util.save_state_on_train_end(args, accelerator)
1099
+
1100
+ if is_main_process:
1101
+ ckpt_name = train_util.get_last_ckpt_name(args, "." + args.save_model_as)
1102
+ save_model(ckpt_name, network, global_step, num_train_epochs, force_sync_upload=True)
1103
+
1104
+ logger.info("model saved.")
1105
+
1106
+
1107
+ def setup_parser() -> argparse.ArgumentParser:
1108
+ parser = argparse.ArgumentParser()
1109
+
1110
+ add_logging_arguments(parser)
1111
+ train_util.add_sd_models_arguments(parser)
1112
+ train_util.add_dataset_arguments(parser, True, True, True)
1113
+ train_util.add_training_arguments(parser, True)
1114
+ train_util.add_masked_loss_arguments(parser)
1115
+ deepspeed_utils.add_deepspeed_arguments(parser)
1116
+ train_util.add_optimizer_arguments(parser)
1117
+ config_util.add_config_arguments(parser)
1118
+ custom_train_functions.add_custom_train_arguments(parser)
1119
+
1120
+ parser.add_argument(
1121
+ "--no_metadata", action="store_true", help="do not save metadata in output model / メタデータを出力先モデルに保存しない"
1122
+ )
1123
+ parser.add_argument(
1124
+ "--save_model_as",
1125
+ type=str,
1126
+ default="safetensors",
1127
+ choices=[None, "ckpt", "pt", "safetensors"],
1128
+ help="format to save the model (default is .safetensors) / モデル保存時の形式(デフォルトはsafetensors)",
1129
+ )
1130
+
1131
+ parser.add_argument("--unet_lr", type=float, default=None, help="learning rate for U-Net / U-Netの学習率")
1132
+ parser.add_argument("--text_encoder_lr", type=float, default=None, help="learning rate for Text Encoder / Text Encoderの学習率")
1133
+
1134
+ parser.add_argument(
1135
+ "--network_weights", type=str, default=None, help="pretrained weights for network / 学習するネットワークの初期重み"
1136
+ )
1137
+ parser.add_argument(
1138
+ "--network_module", type=str, default=None, help="network module to train / 学習対象のネットワークのモジュール"
1139
+ )
1140
+ parser.add_argument(
1141
+ "--network_dim",
1142
+ type=int,
1143
+ default=None,
1144
+ help="network dimensions (depends on each network) / モジュールの次元数(ネットワークにより定義は異なります)",
1145
+ )
1146
+ parser.add_argument(
1147
+ "--network_alpha",
1148
+ type=float,
1149
+ default=1,
1150
+ help="alpha for LoRA weight scaling, default 1 (same as network_dim for same behavior as old version) / LoRaの重み調整のalpha値、デフォルト1(旧バージョンと同じ動作をするにはnetwork_dimと同じ値を指定)",
1151
+ )
1152
+ parser.add_argument(
1153
+ "--network_dropout",
1154
+ type=float,
1155
+ default=None,
1156
+ help="Drops neurons out of training every step (0 or None is default behavior (no dropout), 1 would drop all neurons) / 訓練時に毎ステップでニューロンをdropする(0またはNoneはdropoutなし、1は全ニューロンをdropout)",
1157
+ )
1158
+ parser.add_argument(
1159
+ "--network_args",
1160
+ type=str,
1161
+ default=None,
1162
+ nargs="*",
1163
+ help="additional arguments for network (key=value) / ネットワークへの追加の引数",
1164
+ )
1165
+ parser.add_argument(
1166
+ "--network_train_unet_only", action="store_true", help="only training U-Net part / U-Net関連部分のみ学習する"
1167
+ )
1168
+ parser.add_argument(
1169
+ "--network_train_text_encoder_only",
1170
+ action="store_true",
1171
+ help="only training Text Encoder part / Text Encoder関連部分のみ学習する",
1172
+ )
1173
+ parser.add_argument(
1174
+ "--training_comment",
1175
+ type=str,
1176
+ default=None,
1177
+ help="arbitrary comment string stored in metadata / メタデータに記録する任意のコメント文字列",
1178
+ )
1179
+ parser.add_argument(
1180
+ "--dim_from_weights",
1181
+ action="store_true",
1182
+ help="automatically determine dim (rank) from network_weights / dim (rank)をnetwork_weightsで指定した重みから自動で決定する",
1183
+ )
1184
+ parser.add_argument(
1185
+ "--scale_weight_norms",
1186
+ type=float,
1187
+ default=None,
1188
+ help="Scale the weight of each key pair to help prevent overtraing via exploding gradients. (1 is a good starting point) / 重みの値をスケーリングして勾配爆発を防ぐ(1が初期値としては適当)",
1189
+ )
1190
+ parser.add_argument(
1191
+ "--base_weights",
1192
+ type=str,
1193
+ default=None,
1194
+ nargs="*",
1195
+ help="network weights to merge into the model before training / 学習前にあらかじめモデルにマージするnetworkの重みファイル",
1196
+ )
1197
+ parser.add_argument(
1198
+ "--base_weights_multiplier",
1199
+ type=float,
1200
+ default=None,
1201
+ nargs="*",
1202
+ help="multiplier for network weights to merge into the model before training / 学習前にあらかじめモデルにマージするnetworkの重みの倍率",
1203
+ )
1204
+ parser.add_argument(
1205
+ "--no_half_vae",
1206
+ action="store_true",
1207
+ help="do not use fp16/bf16 VAE in mixed precision (use float VAE) / mixed precisionでも fp16/bf16 VAEを使わずfloat VAEを使う",
1208
+ )
1209
+ parser.add_argument(
1210
+ "--skip_until_initial_step",
1211
+ action="store_true",
1212
+ help="skip training until initial_step is reached / initial_stepに到達するまで学習をスキップする",
1213
+ )
1214
+ parser.add_argument(
1215
+ "--initial_epoch",
1216
+ type=int,
1217
+ default=None,
1218
+ help="initial epoch number, 1 means first epoch (same as not specifying). NOTE: initial_epoch/step doesn't affect to lr scheduler. Which means lr scheduler will start from 0 without `--resume`."
1219
+ + " / 初期エポック数、1で最初のエポック(未指定時と同じ)。注意:initial_epoch/stepはlr schedulerに影響しないため、`--resume`しない場合はlr schedulerは0から始まる",
1220
+ )
1221
+ parser.add_argument(
1222
+ "--initial_step",
1223
+ type=int,
1224
+ default=None,
1225
+ help="initial step number including all epochs, 0 means first step (same as not specifying). overwrites initial_epoch."
1226
+ + " / 初期ステップ数、全エポックを含むステップ数、0で最初のステップ(未指定時と同じ)。initial_epochを上書きする",
1227
+ )
1228
+ # parser.add_argument("--loraplus_lr_ratio", default=None, type=float, help="LoRA+ learning rate ratio")
1229
+ # parser.add_argument("--loraplus_unet_lr_ratio", default=None, type=float, help="LoRA+ UNet learning rate ratio")
1230
+ # parser.add_argument("--loraplus_text_encoder_lr_ratio", default=None, type=float, help="LoRA+ text encoder learning rate ratio")
1231
+ return parser
1232
+
1233
+
1234
+ if __name__ == "__main__":
1235
+ parser = setup_parser()
1236
+
1237
+ args = parser.parse_args()
1238
+ train_util.verify_command_line_training_args(args)
1239
+ args = train_util.read_config_from_file(args, parser)
1240
+
1241
+ trainer = NetworkTrainer()
1242
+ trainer.train(args)