hanungaddi commited on
Commit
1fd3737
1 Parent(s): 9a31d6d

jingliu 20 epoch testing

Browse files
jingliu_sdxl_20_ep_config/.ipynb_checkpoints/config_file-checkpoint.toml ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [sdxl_arguments]
2
+ cache_text_encoder_outputs = true
3
+ no_half_vae = true
4
+ min_timestep = 0
5
+ max_timestep = 1000
6
+ shuffle_caption = false
7
+
8
+ [model_arguments]
9
+ pretrained_model_name_or_path = "/workspace/pretrained_model/animagine-xl.safetensors"
10
+ vae = "/workspace/vae/sdxl_vae.safetensors"
11
+
12
+ [dataset_arguments]
13
+ debug_dataset = false
14
+ in_json = "/workspace/LoRA/meta_lat.json"
15
+ train_data_dir = "/workspace/LoRA/train_data"
16
+ dataset_repeats = 1
17
+ keep_tokens = 0
18
+ resolution = "1024,1024"
19
+ color_aug = false
20
+ token_warmup_min = 1
21
+ token_warmup_step = 0
22
+
23
+ [training_arguments]
24
+ output_dir = "/workspace/LoRA/outputs"
25
+ output_name = "jingliu_sdxl_lora"
26
+ save_precision = "fp16"
27
+ save_every_n_epochs = 4
28
+ train_batch_size = 1
29
+ max_token_length = 225
30
+ mem_eff_attn = false
31
+ sdpa = true
32
+ xformers = false
33
+ max_train_epochs = 20
34
+ max_data_loader_n_workers = 8
35
+ persistent_data_loader_workers = true
36
+ gradient_checkpointing = true
37
+ gradient_accumulation_steps = 1
38
+ mixed_precision = "fp16"
39
+
40
+ [logging_arguments]
41
+ log_with = "wandb"
42
+ log_tracker_name = "sdxl_lora"
43
+ logging_dir = "/workspace/LoRA/logs"
44
+
45
+ [sample_prompt_arguments]
46
+ sample_every_n_epochs = 4
47
+ sample_sampler = "euler_a"
48
+
49
+ [saving_arguments]
50
+ save_model_as = "safetensors"
51
+
52
+ [optimizer_arguments]
53
+ optimizer_type = "AdaFactor"
54
+ learning_rate = 4e-7
55
+ max_grad_norm = 1.0
56
+ optimizer_args = [ "scale_parameter=False", "relative_step=False", "warmup_init=False",]
57
+ lr_scheduler = "constant_with_warmup"
58
+ lr_warmup_steps = 100
59
+
60
+ [additional_network_arguments]
61
+ no_metadata = false
62
+ network_weights: network_weight,
63
+ network_module = "networks.lora"
64
+ network_dim = 64
65
+ network_alpha = 32
66
+ network_args = []
67
+ network_train_unet_only = true
68
+
69
+ [advanced_training_config]
jingliu_sdxl_20_ep_config/config_file.toml ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [sdxl_arguments]
2
+ cache_text_encoder_outputs = true
3
+ no_half_vae = true
4
+ min_timestep = 0
5
+ max_timestep = 1000
6
+ shuffle_caption = false
7
+
8
+ [model_arguments]
9
+ pretrained_model_name_or_path = "/workspace/pretrained_model/animagine-xl.safetensors"
10
+ vae = "/workspace/vae/sdxl_vae.safetensors"
11
+
12
+ [dataset_arguments]
13
+ debug_dataset = false
14
+ in_json = "/workspace/LoRA/meta_lat.json"
15
+ train_data_dir = "/workspace/LoRA/train_data"
16
+ dataset_repeats = 1
17
+ keep_tokens = 0
18
+ resolution = "1024,1024"
19
+ color_aug = false
20
+ token_warmup_min = 1
21
+ token_warmup_step = 0
22
+
23
+ [training_arguments]
24
+ output_dir = "/workspace/LoRA/outputs"
25
+ output_name = "jingliu_sdxl_lora"
26
+ save_precision = "fp16"
27
+ save_every_n_epochs = 4
28
+ train_batch_size = 1
29
+ max_token_length = 225
30
+ mem_eff_attn = false
31
+ sdpa = true
32
+ xformers = false
33
+ max_train_epochs = 20
34
+ max_data_loader_n_workers = 8
35
+ persistent_data_loader_workers = true
36
+ gradient_checkpointing = true
37
+ gradient_accumulation_steps = 1
38
+ mixed_precision = "fp16"
39
+
40
+ [logging_arguments]
41
+ log_with = "wandb"
42
+ log_tracker_name = "sdxl_lora"
43
+ logging_dir = "/workspace/LoRA/logs"
44
+
45
+ [sample_prompt_arguments]
46
+ sample_every_n_epochs = 4
47
+ sample_sampler = "euler_a"
48
+
49
+ [saving_arguments]
50
+ save_model_as = "safetensors"
51
+
52
+ [optimizer_arguments]
53
+ optimizer_type = "AdaFactor"
54
+ learning_rate = 4e-7
55
+ max_grad_norm = 1.0
56
+ optimizer_args = [ "scale_parameter=False", "relative_step=False", "warmup_init=False",]
57
+ lr_scheduler = "constant_with_warmup"
58
+ lr_warmup_steps = 100
59
+
60
+ [additional_network_arguments]
61
+ no_metadata = false
62
+ network_weights: network_weight,
63
+ network_module = "networks.lora"
64
+ network_dim = 64
65
+ network_alpha = 32
66
+ network_args = []
67
+ network_train_unet_only = true
68
+
69
+ [advanced_training_config]
jingliu_sdxl_20_ep_config/sample_prompt.toml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [prompt]
2
+ negative_prompt = "3d render, smooth, plastic, blurry, grainy, low-resolution, deep-fried, oversaturated"
3
+ width = 1024
4
+ height = 1024
5
+ scale = 7
6
+ sample_steps = 28
7
+ [[prompt.subset]]
8
+ prompt = "jingliu, 1girl, solo, long hair, breasts, bangs, gloves, dress, cleavage, hair between eyes, bare shoulders, medium breasts, closed mouth, blue hair, grey hair, detached sleeves, black gloves, blue dress, lantern"
9
+
10
+ [[prompt.subset]]
11
+ prompt = "jingliu, 1girl, solo, long hair, looking at viewer, bangs, red eyes, gloves, dress, hair between eyes, bare shoulders, closed mouth, blue hair, detached sleeves, cosplay"
12
+