|
|
|
|
|
|
|
import argparse |
|
import gc |
|
import math |
|
import os |
|
|
|
from tqdm import tqdm |
|
import torch |
|
from accelerate.utils import set_seed |
|
import diffusers |
|
from diffusers import DDPMScheduler |
|
|
|
import library.train_util as train_util |
|
import library.config_util as config_util |
|
from library.config_util import ( |
|
ConfigSanitizer, |
|
BlueprintGenerator, |
|
) |
|
|
|
def collate_fn(examples): |
|
return examples[0] |
|
|
|
|
|
def train(args): |
|
train_util.verify_training_args(args) |
|
train_util.prepare_dataset_args(args, True) |
|
|
|
cache_latents = args.cache_latents |
|
|
|
if args.seed is not None: |
|
set_seed(args.seed) |
|
|
|
tokenizer = train_util.load_tokenizer(args) |
|
|
|
blueprint_generator = BlueprintGenerator(ConfigSanitizer(False, True, True)) |
|
if args.dataset_config is not None: |
|
print(f"Load dataset config from {args.dataset_config}") |
|
user_config = config_util.load_user_config(args.dataset_config) |
|
ignored = ["train_data_dir", "in_json"] |
|
if any(getattr(args, attr) is not None for attr in ignored): |
|
print("ignore following options because config file is found: {0} / 設定ファイルが利用されるため以下のオプションは無視されます: {0}".format(', '.join(ignored))) |
|
else: |
|
user_config = { |
|
"datasets": [{ |
|
"subsets": [{ |
|
"image_dir": args.train_data_dir, |
|
"metadata_file": args.in_json, |
|
}] |
|
}] |
|
} |
|
|
|
blueprint = blueprint_generator.generate(user_config, args, tokenizer=tokenizer) |
|
train_dataset_group = config_util.generate_dataset_group_by_blueprint(blueprint.dataset_group) |
|
|
|
if args.debug_dataset: |
|
train_util.debug_dataset(train_dataset_group) |
|
return |
|
if len(train_dataset_group) == 0: |
|
print("No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。") |
|
return |
|
|
|
if cache_latents: |
|
assert train_dataset_group.is_latent_cacheable(), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません" |
|
|
|
|
|
print("prepare accelerator") |
|
accelerator, unwrap_model = train_util.prepare_accelerator(args) |
|
|
|
|
|
weight_dtype, save_dtype = train_util.prepare_dtype(args) |
|
|
|
|
|
text_encoder, vae, unet, load_stable_diffusion_format = train_util.load_target_model(args, weight_dtype) |
|
|
|
|
|
if load_stable_diffusion_format: |
|
src_stable_diffusion_ckpt = args.pretrained_model_name_or_path |
|
src_diffusers_model_path = None |
|
else: |
|
src_stable_diffusion_ckpt = None |
|
src_diffusers_model_path = args.pretrained_model_name_or_path |
|
|
|
if args.save_model_as is None: |
|
save_stable_diffusion_format = load_stable_diffusion_format |
|
use_safetensors = args.use_safetensors |
|
else: |
|
save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors' |
|
use_safetensors = args.use_safetensors or ("safetensors" in args.save_model_as.lower()) |
|
|
|
|
|
def set_diffusers_xformers_flag(model, valid): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def fn_recursive_set_mem_eff(module: torch.nn.Module): |
|
if hasattr(module, "set_use_memory_efficient_attention_xformers"): |
|
module.set_use_memory_efficient_attention_xformers(valid) |
|
|
|
for child in module.children(): |
|
fn_recursive_set_mem_eff(child) |
|
|
|
fn_recursive_set_mem_eff(model) |
|
|
|
|
|
if args.diffusers_xformers: |
|
print("Use xformers by Diffusers") |
|
set_diffusers_xformers_flag(unet, True) |
|
else: |
|
|
|
print("Disable Diffusers' xformers") |
|
set_diffusers_xformers_flag(unet, False) |
|
train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers) |
|
|
|
|
|
if cache_latents: |
|
vae.to(accelerator.device, dtype=weight_dtype) |
|
vae.requires_grad_(False) |
|
vae.eval() |
|
with torch.no_grad(): |
|
train_dataset_group.cache_latents(vae) |
|
vae.to("cpu") |
|
if torch.cuda.is_available(): |
|
torch.cuda.empty_cache() |
|
gc.collect() |
|
|
|
|
|
training_models = [] |
|
if args.gradient_checkpointing: |
|
unet.enable_gradient_checkpointing() |
|
training_models.append(unet) |
|
|
|
if args.train_text_encoder: |
|
print("enable text encoder training") |
|
if args.gradient_checkpointing: |
|
text_encoder.gradient_checkpointing_enable() |
|
training_models.append(text_encoder) |
|
else: |
|
text_encoder.to(accelerator.device, dtype=weight_dtype) |
|
text_encoder.requires_grad_(False) |
|
if args.gradient_checkpointing: |
|
text_encoder.gradient_checkpointing_enable() |
|
text_encoder.train() |
|
else: |
|
text_encoder.eval() |
|
|
|
if not cache_latents: |
|
vae.requires_grad_(False) |
|
vae.eval() |
|
vae.to(accelerator.device, dtype=weight_dtype) |
|
|
|
for m in training_models: |
|
m.requires_grad_(True) |
|
params = [] |
|
for m in training_models: |
|
params.extend(m.parameters()) |
|
params_to_optimize = params |
|
|
|
|
|
print("prepare optimizer, data loader etc.") |
|
_, _, optimizer = train_util.get_optimizer(args, trainable_params=params_to_optimize) |
|
|
|
|
|
|
|
n_workers = min(args.max_data_loader_n_workers, os.cpu_count() - 1) |
|
train_dataloader = torch.utils.data.DataLoader( |
|
train_dataset_group, batch_size=1, shuffle=True, collate_fn=collate_fn, num_workers=n_workers, persistent_workers=args.persistent_data_loader_workers) |
|
|
|
|
|
if args.max_train_epochs is not None: |
|
args.max_train_steps = args.max_train_epochs * len(train_dataloader) |
|
print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}") |
|
|
|
|
|
lr_scheduler = train_util.get_scheduler_fix(args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, |
|
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, |
|
num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power) |
|
|
|
|
|
if args.full_fp16: |
|
assert args.mixed_precision == "fp16", "full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。" |
|
print("enable full fp16 training.") |
|
unet.to(weight_dtype) |
|
text_encoder.to(weight_dtype) |
|
|
|
|
|
if args.train_text_encoder: |
|
unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( |
|
unet, text_encoder, optimizer, train_dataloader, lr_scheduler) |
|
else: |
|
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler) |
|
|
|
|
|
if args.full_fp16: |
|
train_util.patch_accelerator_for_fp16_training(accelerator) |
|
|
|
|
|
if args.resume is not None: |
|
print(f"resume training from state: {args.resume}") |
|
accelerator.load_state(args.resume) |
|
|
|
|
|
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) |
|
num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) |
|
if (args.save_n_epoch_ratio is not None) and (args.save_n_epoch_ratio > 0): |
|
args.save_every_n_epochs = math.floor(num_train_epochs / args.save_n_epoch_ratio) or 1 |
|
|
|
|
|
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps |
|
print("running training / 学習開始") |
|
print(f" num examples / サンプル数: {train_dataset_group.num_train_images}") |
|
print(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}") |
|
print(f" num epochs / epoch数: {num_train_epochs}") |
|
print(f" batch size per device / バッチサイズ: {args.train_batch_size}") |
|
print(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}") |
|
print(f" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}") |
|
print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}") |
|
|
|
progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc="steps") |
|
global_step = 0 |
|
|
|
noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", |
|
num_train_timesteps=1000, clip_sample=False) |
|
|
|
if accelerator.is_main_process: |
|
accelerator.init_trackers("finetuning") |
|
|
|
for epoch in range(num_train_epochs): |
|
print(f"epoch {epoch+1}/{num_train_epochs}") |
|
train_dataset_group.set_current_epoch(epoch + 1) |
|
|
|
for m in training_models: |
|
m.train() |
|
|
|
loss_total = 0 |
|
for step, batch in enumerate(train_dataloader): |
|
with accelerator.accumulate(training_models[0]): |
|
with torch.no_grad(): |
|
if "latents" in batch and batch["latents"] is not None: |
|
latents = batch["latents"].to(accelerator.device) |
|
else: |
|
|
|
latents = vae.encode(batch["images"].to(dtype=weight_dtype)).latent_dist.sample() |
|
latents = latents * 0.18215 |
|
b_size = latents.shape[0] |
|
|
|
with torch.set_grad_enabled(args.train_text_encoder): |
|
|
|
input_ids = batch["input_ids"].to(accelerator.device) |
|
encoder_hidden_states = train_util.get_hidden_states( |
|
args, input_ids, tokenizer, text_encoder, None if not args.full_fp16 else weight_dtype) |
|
|
|
|
|
noise = torch.randn_like(latents, device=latents.device) |
|
if args.noise_offset: |
|
|
|
noise += args.noise_offset * torch.randn((latents.shape[0], latents.shape[1], 1, 1), device=latents.device) |
|
|
|
|
|
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device) |
|
timesteps = timesteps.long() |
|
|
|
|
|
|
|
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) |
|
|
|
|
|
noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample |
|
|
|
if args.v_parameterization: |
|
|
|
target = noise_scheduler.get_velocity(latents, noise, timesteps) |
|
else: |
|
target = noise |
|
|
|
loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction="mean") |
|
|
|
accelerator.backward(loss) |
|
if accelerator.sync_gradients and args.max_grad_norm != 0.0: |
|
params_to_clip = [] |
|
for m in training_models: |
|
params_to_clip.extend(m.parameters()) |
|
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) |
|
|
|
optimizer.step() |
|
lr_scheduler.step() |
|
optimizer.zero_grad(set_to_none=True) |
|
|
|
|
|
if accelerator.sync_gradients: |
|
progress_bar.update(1) |
|
global_step += 1 |
|
|
|
train_util.sample_images(accelerator, args, None, global_step, accelerator.device, vae, tokenizer, text_encoder, unet) |
|
|
|
current_loss = loss.detach().item() |
|
if args.logging_dir is not None: |
|
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])} |
|
if args.optimizer_type.lower() == "DAdaptation".lower(): |
|
logs["lr/d*lr"] = lr_scheduler.optimizers[0].param_groups[0]['d']*lr_scheduler.optimizers[0].param_groups[0]['lr'] |
|
accelerator.log(logs, step=global_step) |
|
|
|
|
|
loss_total += current_loss |
|
avr_loss = loss_total / (step+1) |
|
logs = {"loss": avr_loss} |
|
progress_bar.set_postfix(**logs) |
|
|
|
if global_step >= args.max_train_steps: |
|
break |
|
|
|
if args.logging_dir is not None: |
|
logs = {"loss/epoch": loss_total / len(train_dataloader)} |
|
accelerator.log(logs, step=epoch+1) |
|
|
|
accelerator.wait_for_everyone() |
|
|
|
if args.save_every_n_epochs is not None: |
|
src_path = src_stable_diffusion_ckpt if save_stable_diffusion_format else src_diffusers_model_path |
|
train_util.save_sd_model_on_epoch_end(args, accelerator, src_path, save_stable_diffusion_format, use_safetensors, |
|
save_dtype, epoch, num_train_epochs, global_step, unwrap_model(text_encoder), unwrap_model(unet), vae) |
|
|
|
train_util.sample_images(accelerator, args, epoch + 1, global_step, accelerator.device, vae, tokenizer, text_encoder, unet) |
|
|
|
is_main_process = accelerator.is_main_process |
|
if is_main_process: |
|
unet = unwrap_model(unet) |
|
text_encoder = unwrap_model(text_encoder) |
|
|
|
accelerator.end_training() |
|
|
|
if args.save_state: |
|
train_util.save_state_on_train_end(args, accelerator) |
|
|
|
del accelerator |
|
|
|
if is_main_process: |
|
src_path = src_stable_diffusion_ckpt if save_stable_diffusion_format else src_diffusers_model_path |
|
train_util.save_sd_model_on_train_end(args, src_path, save_stable_diffusion_format, use_safetensors, |
|
save_dtype, epoch, global_step, text_encoder, unet, vae) |
|
print("model saved.") |
|
|
|
|
|
if __name__ == '__main__': |
|
parser = argparse.ArgumentParser() |
|
|
|
train_util.add_sd_models_arguments(parser) |
|
train_util.add_dataset_arguments(parser, False, True, True) |
|
train_util.add_training_arguments(parser, False) |
|
train_util.add_sd_saving_arguments(parser) |
|
train_util.add_optimizer_arguments(parser) |
|
config_util.add_config_arguments(parser) |
|
|
|
parser.add_argument("--diffusers_xformers", action='store_true', |
|
help='use xformers by diffusers / Diffusersでxformersを使用する') |
|
parser.add_argument("--train_text_encoder", action="store_true", help="train text encoder / text encoderも学習する") |
|
|
|
args = parser.parse_args() |
|
train(args) |
|
|