feat: upload asahi lora model
Browse files
asahi_config/config_file.toml
CHANGED
|
@@ -6,7 +6,7 @@ pretrained_model_name_or_path = "/content/pretrained_model/Animefull-final-prune
|
|
| 6 |
[additional_network_arguments]
|
| 7 |
no_metadata = false
|
| 8 |
unet_lr = 1.0
|
| 9 |
-
text_encoder_lr = 0
|
| 10 |
network_module = "networks.lora"
|
| 11 |
network_dim = 32
|
| 12 |
network_alpha = 32
|
|
@@ -14,10 +14,10 @@ network_train_unet_only = false
|
|
| 14 |
network_train_text_encoder_only = false
|
| 15 |
|
| 16 |
[optimizer_arguments]
|
| 17 |
-
optimizer_type = "
|
| 18 |
learning_rate = 1.0
|
| 19 |
max_grad_norm = 1.0
|
| 20 |
-
optimizer_args = [ "decouple=True", "weight_decay=0.01", "betas=0.9,0.99",]
|
| 21 |
lr_scheduler = "constant"
|
| 22 |
lr_warmup_steps = 0
|
| 23 |
|
|
@@ -30,12 +30,12 @@ vae_batch_size = 4
|
|
| 30 |
output_dir = "/content/LoRA/output"
|
| 31 |
output_name = "asahi"
|
| 32 |
save_precision = "fp16"
|
| 33 |
-
save_every_n_epochs =
|
| 34 |
train_batch_size = 3
|
| 35 |
max_token_length = 225
|
| 36 |
mem_eff_attn = false
|
| 37 |
xformers = true
|
| 38 |
-
max_train_epochs =
|
| 39 |
max_data_loader_n_workers = 8
|
| 40 |
persistent_data_loader_workers = true
|
| 41 |
seed = 31337
|
|
@@ -48,7 +48,7 @@ log_prefix = "asahi"
|
|
| 48 |
lowram = true
|
| 49 |
|
| 50 |
[sample_prompt_arguments]
|
| 51 |
-
sample_every_n_epochs =
|
| 52 |
sample_sampler = "ddim"
|
| 53 |
|
| 54 |
[dreambooth_arguments]
|
|
|
|
| 6 |
[additional_network_arguments]
|
| 7 |
no_metadata = false
|
| 8 |
unet_lr = 1.0
|
| 9 |
+
text_encoder_lr = 1.0
|
| 10 |
network_module = "networks.lora"
|
| 11 |
network_dim = 32
|
| 12 |
network_alpha = 32
|
|
|
|
| 14 |
network_train_text_encoder_only = false
|
| 15 |
|
| 16 |
[optimizer_arguments]
|
| 17 |
+
optimizer_type = "Prodigy"
|
| 18 |
learning_rate = 1.0
|
| 19 |
max_grad_norm = 1.0
|
| 20 |
+
optimizer_args = [ "decouple=True", "weight_decay=0.01", "d_coef=2", "use_bias_correction=True", "safeguard_warmup=True", "betas=0.9,0.99",]
|
| 21 |
lr_scheduler = "constant"
|
| 22 |
lr_warmup_steps = 0
|
| 23 |
|
|
|
|
| 30 |
output_dir = "/content/LoRA/output"
|
| 31 |
output_name = "asahi"
|
| 32 |
save_precision = "fp16"
|
| 33 |
+
save_every_n_epochs = 10
|
| 34 |
train_batch_size = 3
|
| 35 |
max_token_length = 225
|
| 36 |
mem_eff_attn = false
|
| 37 |
xformers = true
|
| 38 |
+
max_train_epochs = 100
|
| 39 |
max_data_loader_n_workers = 8
|
| 40 |
persistent_data_loader_workers = true
|
| 41 |
seed = 31337
|
|
|
|
| 48 |
lowram = true
|
| 49 |
|
| 50 |
[sample_prompt_arguments]
|
| 51 |
+
sample_every_n_epochs = 10
|
| 52 |
sample_sampler = "ddim"
|
| 53 |
|
| 54 |
[dreambooth_arguments]
|
asahi_config/dataset_config.toml
CHANGED
|
@@ -10,7 +10,7 @@ color_aug = false
|
|
| 10 |
[[datasets.subsets]]
|
| 11 |
image_dir = "/content/LoRA/train_data"
|
| 12 |
class_tokens = "mksks"
|
| 13 |
-
num_repeats =
|
| 14 |
|
| 15 |
|
| 16 |
[general]
|
|
|
|
| 10 |
[[datasets.subsets]]
|
| 11 |
image_dir = "/content/LoRA/train_data"
|
| 12 |
class_tokens = "mksks"
|
| 13 |
+
num_repeats = 2
|
| 14 |
|
| 15 |
|
| 16 |
[general]
|