VERSIL91 commited on
Commit
6f41e93
1 Parent(s): 7d38d54

Training in progress, step 15, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b32571d072a7e9e58ec9df4d64d37de73b74df93891cbf7e0583d4df0b73a131
3
  size 50503544
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e0a9c71cd47c0e98e4df8a7e78f4b523fcd607377b8ca17db6e416015c3207f
3
  size 50503544
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c7ccf4e08e09ae3c5b1c0681d904470cc25fc19ea1235c5e8961470a5e70349
3
  size 25986148
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62ade698410c7671fb365897251dba414c6a38da64f7d2134b76611ae3967174
3
  size 25986148
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:37869a54425cc950dac8e63e96f681db7fe42c26ab4cc02786464e2e696282b6
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b21341e28491e7519aa247a7900dde9ffa5bc444ad819877747c1cba8a2f246
3
  size 15984
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4d08f99970b931c4a23943a3b8e04e9c1c8914f8013b03146cd0e95b5d5847f
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69ae5e0ac083cbd6919e32143a287b06120b3d4d3cefddc87a84a5a2240ee75a
3
  size 15984
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0470aafc112e73e386f9c0796c9f7d47f712e4ec1725a9a8b0a60b35785a396a
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e59a7c6fcdbe021eb09a9b26ec2b45b55030c986399c9694fff97c95b645c400
3
  size 15984
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7773c4114e9a6400f44047ebbc36e6f819b09f52624f9b08749e5591a2949255
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c01e7d96866ffb2310854fcb4f079a0cd8597ced3b2cd3eb4cea82a53748202
3
  size 15984
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b773dd0352daf5f6d7b7dbf00ce8796025d702b6c9d7b887e60406bd73ff69d
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:789b5ac6a3d26da907edb51db958ba71233b76a160341b9ee57b430ce74531e6
3
  size 15984
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:82f00a1ddd43d7cfda1c967aa1b3730028b00945a2e660902073d96e57b6ad89
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91edd18fb81e0ac338bab59cc220719cc869b33d86ada4ae9588d0548376122c
3
  size 15984
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0a2eccd85b6a03f9cbe971d14978d89812e95a786f876dd323caf4ba3a9f4275
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f3c1c4cc28dd44ba1817dffea22d63df2cc40ceac5849d28972cf3ae287c3d4
3
  size 15984
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:01a84d1bc751dc4bb97f124f2adf51847b23eeacb71aa0e4f3bf9c9a3061cddf
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f4d82c554fbc426dbdfa4a4ba0d2eda37c9813afb5b4534eea5b76764acddac
3
  size 15984
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4849a6ac0a1d895740f1ab4eba9d346b8d898008d0cfe93dd108cd928d7c63e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff2736979009751c0c6b0ddcc5f6544d6f723aa752b4798eab0b70fb76cf0083
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.012415612632885854,
5
  "eval_steps": 5,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -101,6 +101,49 @@
101
  "eval_samples_per_second": 90.03,
102
  "eval_steps_per_second": 11.266,
103
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  }
105
  ],
106
  "logging_steps": 1,
@@ -120,7 +163,7 @@
120
  "attributes": {}
121
  }
122
  },
123
- "total_flos": 3.293980070510592e+16,
124
  "train_batch_size": 1,
125
  "trial_name": null,
126
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.01862341894932878,
5
  "eval_steps": 5,
6
+ "global_step": 15,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
101
  "eval_samples_per_second": 90.03,
102
  "eval_steps_per_second": 11.266,
103
  "step": 10
104
+ },
105
+ {
106
+ "epoch": 0.01365717389617444,
107
+ "grad_norm": 0.31225091218948364,
108
+ "learning_rate": 9.755282581475769e-05,
109
+ "loss": 0.8059,
110
+ "step": 11
111
+ },
112
+ {
113
+ "epoch": 0.014898735159463024,
114
+ "grad_norm": 0.33488965034484863,
115
+ "learning_rate": 9.045084971874738e-05,
116
+ "loss": 0.7773,
117
+ "step": 12
118
+ },
119
+ {
120
+ "epoch": 0.01614029642275161,
121
+ "grad_norm": 1.0021756887435913,
122
+ "learning_rate": 7.938926261462366e-05,
123
+ "loss": 0.8657,
124
+ "step": 13
125
+ },
126
+ {
127
+ "epoch": 0.017381857686040194,
128
+ "grad_norm": 0.3065239489078522,
129
+ "learning_rate": 6.545084971874738e-05,
130
+ "loss": 0.748,
131
+ "step": 14
132
+ },
133
+ {
134
+ "epoch": 0.01862341894932878,
135
+ "grad_norm": 0.3766428828239441,
136
+ "learning_rate": 5e-05,
137
+ "loss": 0.8213,
138
+ "step": 15
139
+ },
140
+ {
141
+ "epoch": 0.01862341894932878,
142
+ "eval_loss": NaN,
143
+ "eval_runtime": 60.2571,
144
+ "eval_samples_per_second": 90.047,
145
+ "eval_steps_per_second": 11.268,
146
+ "step": 15
147
  }
148
  ],
149
  "logging_steps": 1,
 
163
  "attributes": {}
164
  }
165
  },
166
+ "total_flos": 4.940970105765888e+16,
167
  "train_batch_size": 1,
168
  "trial_name": null,
169
  "trial_params": null