jerry_surtr_arknights / surtr_arknights.toml
NebulaeWis's picture
Upload surtr_arknights.toml
29b561d verified
[Basics]
pretrained_model_name_or_path = "D:\\stable-diffusion-webui\\models\\Stable-diffusion\\animefull-latest.ckpt"
train_data_dir = "D:\\train_data\\surtr_arknights"
resolution = "512,768"
seed = 23
max_train_steps = 99999
max_train_epochs = 24
clip_skip = 2
[Save]
output_dir = "D:\\train_out\\lora"
output_name = "surtr_arknights"
save_precision = "fp16"
save_model_as = "safetensors"
save_every_n_epochs = 2
save_every_n_steps = 9999
save_state = false
save_last_n_steps_state = 1
save_last_n_steps = 200
[SDv2]
v2 = false
v_parameterization = false
scale_v_pred_loss_like_noise_pred = false
[Network_setup]
network_dim = 4
network_alpha = 2
dim_from_weights = false
network_dropout = 0
network_train_unet_only = true
network_train_text_encoder_only = false
resume = false
[LyCORIS]
network_module = "lycoris.kohya"
network_args = [ "preset=attn-mlp", "algo=lora",]
[Optimizer]
train_batch_size = 8
gradient_checkpointing = true
gradient_accumulation_steps = 1
optimizer_type = "AdamW8bit"
unet_lr = 0.0006
text_encoder_lr = 0.0006
max_grad_norm = 1.0
optimizer_args = [ "weight_decay=0.1", "betas=0.9,0.99",]
[Lr_scheduler]
lr_scheduler_type = ""
lr_scheduler = "constant"
lr_warmup_steps = 0
lr_scheduler_num_cycles = 1
lr_scheduler_power = 1.0
[Training_preciscion]
mixed_precision = "fp16"
full_fp16 = false
full_bf16 = false
[Further_improvement]
min_snr_gamma = 0
multires_noise_discount = 0.3
multires_noise_iterations = 6
[ARB]
enable_bucket = true
min_bucket_reso = 320
max_bucket_reso = 960
bucket_reso_steps = 64
bucket_no_upscale = false
[Captions]
shuffle_caption = true
caption_extension = ".txt"
keep_tokens = 1
caption_dropout_rate = 0.05
caption_dropout_every_n_epochs = 0
caption_tag_dropout_rate = 0.0
max_token_length = 150
weighted_captions = false
token_warmup_min = 1
token_warmup_step = 0
[Attention]
mem_eff_attn = false
xformers = true
[Data_augmentation]
color_aug = false
flip_aug = false
random_crop = false
[Cache_latents]
cache_latents = true
vae_batch_size = 1
cache_latents_to_disk = true
[Sampling_during_training]
sample_sampler = "ddim"
[Logging]
logging_dir = "logs_training"
log_with = "tensorboard"
log_prefix = "lora_"
[Dataset]
max_data_loader_n_workers = 8
persistent_data_loader_workers = true
dataset_repeats = 1
[Regularization]
prior_loss_weight = 1.0
[Huggingface]
save_state_to_huggingface = false
resume_from_huggingface = false
async_upload = false
[Debugging]
debug_dataset = false
[Deprecated]
use_8bit_adam = false
use_lion_optimizer = false
learning_rate = 0.0002
[Others]
lowram = false
training_comment = "nebulae"