WangXFng commited on
Commit
3ab9759
·
verified ·
1 Parent(s): 62bba30

Model save

Browse files
README.md CHANGED
@@ -41,6 +41,7 @@ The following hyperparameters were used during training:
41
  - total_train_batch_size: 256
42
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
  - lr_scheduler_type: linear
 
44
  - num_epochs: 4
45
 
46
  ### Training results
 
41
  - total_train_batch_size: 256
42
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
  - lr_scheduler_type: linear
44
+ - lr_scheduler_warmup_steps: 2
45
  - num_epochs: 4
46
 
47
  ### Training results
adapter_config.json CHANGED
@@ -16,17 +16,17 @@
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
- "r": 8,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
 
 
23
  "down_proj",
24
- "up_proj",
25
  "gate_proj",
26
- "q_proj",
27
- "k_proj",
28
- "o_proj",
29
- "v_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 16,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "o_proj",
24
+ "v_proj",
25
+ "k_proj",
26
+ "q_proj",
27
  "down_proj",
 
28
  "gate_proj",
29
+ "up_proj"
 
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c7f823d070fb4bc434289fe85e13dbd360fc27d0ba59d70e964da5d34863719a
3
- size 1080767344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28d18e25ccb1e93e19ba60ec0285d931a9b17f4425c3ee0b0242cd14be802c51
3
+ size 1103312056
trainer_state.json CHANGED
@@ -10,68 +10,68 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.4854368932038835,
13
- "grad_norm": 1.1607048511505127,
14
- "learning_rate": 8.786407766990292e-05,
15
- "loss": 0.8852,
16
  "step": 250
17
  },
18
  {
19
  "epoch": 0.970873786407767,
20
- "grad_norm": 0.9901185631752014,
21
- "learning_rate": 7.572815533980583e-05,
22
- "loss": 0.4521,
23
  "step": 500
24
  },
25
  {
26
  "epoch": 1.4563106796116505,
27
- "grad_norm": 0.8445346355438232,
28
- "learning_rate": 6.359223300970875e-05,
29
- "loss": 0.3889,
30
  "step": 750
31
  },
32
  {
33
  "epoch": 1.941747572815534,
34
- "grad_norm": 0.7807681560516357,
35
- "learning_rate": 5.145631067961165e-05,
36
- "loss": 0.3685,
37
  "step": 1000
38
  },
39
  {
40
  "epoch": 2.4271844660194173,
41
- "grad_norm": 0.7362964153289795,
42
- "learning_rate": 3.9320388349514564e-05,
43
- "loss": 0.3581,
44
  "step": 1250
45
  },
46
  {
47
  "epoch": 2.912621359223301,
48
- "grad_norm": 0.785736620426178,
49
- "learning_rate": 2.7184466019417475e-05,
50
- "loss": 0.3525,
51
  "step": 1500
52
  },
53
  {
54
  "epoch": 3.3980582524271843,
55
- "grad_norm": 0.7113440632820129,
56
- "learning_rate": 1.5048543689320387e-05,
57
- "loss": 0.3446,
58
  "step": 1750
59
  },
60
  {
61
  "epoch": 3.883495145631068,
62
- "grad_norm": 0.6975888013839722,
63
- "learning_rate": 2.912621359223301e-06,
64
- "loss": 0.3404,
65
  "step": 2000
66
  },
67
  {
68
  "epoch": 4.0,
69
  "step": 2060,
70
- "total_flos": 4.996840775351132e+17,
71
- "train_loss": 0.4334296365386074,
72
- "train_runtime": 6711.8297,
73
- "train_samples_per_second": 78.57,
74
- "train_steps_per_second": 0.307
75
  }
76
  ],
77
  "logging_steps": 250,
@@ -91,7 +91,7 @@
91
  "attributes": {}
92
  }
93
  },
94
- "total_flos": 4.996840775351132e+17,
95
  "train_batch_size": 16,
96
  "trial_name": null,
97
  "trial_params": null
 
10
  "log_history": [
11
  {
12
  "epoch": 0.4854368932038835,
13
+ "grad_norm": 0.7365666627883911,
14
+ "learning_rate": 8.794946550048592e-05,
15
+ "loss": 0.9374,
16
  "step": 250
17
  },
18
  {
19
  "epoch": 0.970873786407767,
20
+ "grad_norm": 0.701321005821228,
21
+ "learning_rate": 7.580174927113704e-05,
22
+ "loss": 0.4669,
23
  "step": 500
24
  },
25
  {
26
  "epoch": 1.4563106796116505,
27
+ "grad_norm": 0.5943931937217712,
28
+ "learning_rate": 6.365403304178815e-05,
29
+ "loss": 0.3933,
30
  "step": 750
31
  },
32
  {
33
  "epoch": 1.941747572815534,
34
+ "grad_norm": 0.5661283135414124,
35
+ "learning_rate": 5.150631681243926e-05,
36
+ "loss": 0.3706,
37
  "step": 1000
38
  },
39
  {
40
  "epoch": 2.4271844660194173,
41
+ "grad_norm": 0.5260675549507141,
42
+ "learning_rate": 3.9358600583090386e-05,
43
+ "loss": 0.3599,
44
  "step": 1250
45
  },
46
  {
47
  "epoch": 2.912621359223301,
48
+ "grad_norm": 0.5369626879692078,
49
+ "learning_rate": 2.72108843537415e-05,
50
+ "loss": 0.3546,
51
  "step": 1500
52
  },
53
  {
54
  "epoch": 3.3980582524271843,
55
+ "grad_norm": 0.48315396904945374,
56
+ "learning_rate": 1.5063168124392615e-05,
57
+ "loss": 0.3473,
58
  "step": 1750
59
  },
60
  {
61
  "epoch": 3.883495145631068,
62
+ "grad_norm": 0.46571284532546997,
63
+ "learning_rate": 2.915451895043732e-06,
64
+ "loss": 0.3437,
65
  "step": 2000
66
  },
67
  {
68
  "epoch": 4.0,
69
  "step": 2060,
70
+ "total_flos": 5.02561395295531e+17,
71
+ "train_loss": 0.4436599824035052,
72
+ "train_runtime": 6742.0133,
73
+ "train_samples_per_second": 78.218,
74
+ "train_steps_per_second": 0.306
75
  }
76
  ],
77
  "logging_steps": 250,
 
91
  "attributes": {}
92
  }
93
  },
94
+ "total_flos": 5.02561395295531e+17,
95
  "train_batch_size": 16,
96
  "trial_name": null,
97
  "trial_params": null
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4971a71404bdc4471db5496187a2a0962d0fcb9bda93b897a427de69a0807442
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6404f770a80f28004c986cae0c269c7894eeff9ed7b65250d652ac62b91db4e8
3
  size 5240