|
[sdxl_arguments] |
|
cache_text_encoder_outputs = true |
|
no_half_vae = true |
|
min_timestep = 0 |
|
max_timestep = 1000 |
|
shuffle_caption = false |
|
|
|
[model_arguments] |
|
pretrained_model_name_or_path = "/content/pretrained_model/sd_xl_base_1.0.safetensors" |
|
vae = "/content/vae/sdxl_vae.safetensors" |
|
|
|
[dataset_arguments] |
|
debug_dataset = false |
|
in_json = "/content/LoRA/meta_lat.json" |
|
train_data_dir = "/content/LoRA/train_data" |
|
dataset_repeats = 20 |
|
keep_tokens = 0 |
|
resolution = "1024,1024" |
|
color_aug = false |
|
token_warmup_min = 1 |
|
token_warmup_step = 0 |
|
|
|
[training_arguments] |
|
output_dir = "/content/drive/MyDrive/kohya-trainer/output" |
|
output_name = "sdxl_lora_architecture_siheyuan" |
|
save_precision = "fp16" |
|
save_every_n_epochs = 1 |
|
train_batch_size = 4 |
|
max_token_length = 225 |
|
mem_eff_attn = false |
|
sdpa = true |
|
xformers = false |
|
max_train_epochs = 10 |
|
max_data_loader_n_workers = 8 |
|
persistent_data_loader_workers = true |
|
gradient_checkpointing = true |
|
gradient_accumulation_steps = 1 |
|
mixed_precision = "fp16" |
|
|
|
[logging_arguments] |
|
log_with = "wandb" |
|
log_tracker_name = "sdxl_lora_architecture_siheyuan" |
|
logging_dir = "/content/LoRA/logs" |
|
|
|
[sample_prompt_arguments] |
|
sample_every_n_epochs = 1 |
|
sample_sampler = "euler_a" |
|
|
|
[saving_arguments] |
|
save_model_as = "safetensors" |
|
|
|
[optimizer_arguments] |
|
optimizer_type = "AdaFactor" |
|
learning_rate = 1e-5 |
|
max_grad_norm = 0 |
|
optimizer_args = [ "scale_parameter=False", "relative_step=False", "warmup_init=False",] |
|
lr_scheduler = "constant_with_warmup" |
|
lr_warmup_steps = 100 |
|
|
|
[additional_network_arguments] |
|
no_metadata = false |
|
network_module = "networks.lora" |
|
network_dim = 32 |
|
network_alpha = 16 |
|
network_args = [ "conv_dim=32", "conv_alpha=16",] |
|
network_train_unet_only = true |
|
|
|
[advanced_training_config] |
|
noise_offset = 0.1 |
|
adaptive_noise_scale = 0.01 |
|
min_snr_gamma = 5 |
|
|