File size: 2,525 Bytes
3cb5ca1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
{
  "pretrained_model_name_or_path": "C:/Users/darkenlord1/Desktop/SD files/SD Models/NAI.ckpt",
  "v2": false,
  "v_parameterization": false,
  "logging_dir": "C:\\Users\\darkenlord1\\Desktop\\datasets\\eExpressions2/logging",
  "train_data_dir": "C:\\Users\\darkenlord1\\Desktop\\datasets\\eExpressions2/Images",
  "reg_data_dir": "",
  "output_dir": "C:\\Users\\darkenlord1\\Desktop\\datasets\\eExpressions2",
  "max_resolution": "768,768",
  "learning_rate": 1.0,
  "lr_scheduler": "constant_with_warmup",
  "lr_warmup": 10,
  "train_batch_size": 6,
  "epoch": 12,
  "save_every_n_epochs": 2,
  "mixed_precision": "bf16",
  "save_precision": "fp16",
  "seed": "228",
  "num_cpu_threads_per_process": 2,
  "cache_latents": true,
  "cache_latents_to_disk": false,
  "caption_extension": ".txt",
  "enable_bucket": true,
  "gradient_checkpointing": false,
  "full_fp16": false,
  "no_token_padding": false,
  "stop_text_encoder_training": 0,
  "xformers": true,
  "save_model_as": "safetensors",
  "shuffle_caption": true,
  "save_state": false,
  "resume": "",
  "prior_loss_weight": 1.0,
  "text_encoder_lr": 0.5,
  "unet_lr": 1.0,
  "network_dim": 32,
  "lora_network_weights": "",
  "color_aug": false,
  "flip_aug": false,
  "clip_skip": 2,
  "gradient_accumulation_steps": 1.0,
  "mem_eff_attn": false,
  "output_name": "eExpressions2",
  "model_list": "custom",
  "max_token_length": "150",
  "max_train_epochs": "",
  "max_data_loader_n_workers": "0",
  "network_alpha": 32,
  "training_comment": "",
  "keep_tokens": 2,
  "lr_scheduler_num_cycles": "3",
  "lr_scheduler_power": "",
  "persistent_data_loader_workers": false,
  "bucket_no_upscale": false,
  "random_crop": false,
  "bucket_reso_steps": 64.0,
  "caption_dropout_every_n_epochs": 0.0,
  "caption_dropout_rate": 0,
  "optimizer": "DAdaptation",
  "optimizer_args": "decouple=True weight_decay=0.02",
  "noise_offset": 0,
  "multires_noise_iterations": 0,
  "multires_noise_discount": 0,
  "LoRA_type": "Standard",
  "conv_dim": 1,
  "conv_alpha": 1,
  "sample_every_n_steps": 0,
  "sample_every_n_epochs": 0,
  "sample_sampler": "euler_a",
  "sample_prompts": "",
  "additional_parameters": "",
  "vae_batch_size": 0,
  "min_snr_gamma": 0,
  "down_lr_weight": "",
  "mid_lr_weight": "",
  "up_lr_weight": "",
  "block_lr_zero_threshold": "",
  "block_dims": "",
  "block_alphas": "",
  "conv_dims": "",
  "conv_alphas": "",
  "weighted_captions": false,
  "unit": 1,
  "save_every_n_steps": 0,
  "save_last_n_steps": 0,
  "save_last_n_steps_state": 0
}