PureFighter commited on
Commit
3fe4195
1 Parent(s): 2c8a6f4

Training in progress, step 1000, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cd5dde95ad6c157a2fe95cc958dbb18c26699082da64c338dab9848d4c67c19a
3
  size 540827680
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afcd81be9b532c2a51334c9b2205e5e3b2dce963888b7a4276a6b07b3407d7e6
3
  size 540827680
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3ea50f6e51285e2a51883bc21cde22e02fb19d673bd8e39e2afff7b2db706d12
3
  size 1081770746
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04e54ceee3605ff92754bffa78afc73aed9dbc57e9fd6a5462119c931d361160
3
  size 1081770746
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:06e55bfc8723f269a626afca0be6f7def5753f3bb265436b94c5580b703cfcc7
3
  size 13990
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bc6631138e7fa84a7a9d65acc91ce044a760be23ac04a06a280bdd1e0066a08
3
  size 13990
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b8967d5c23dc2de82573da94680305eacf32d7b908df4950b6f5a1314e5be47e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e29425e5a8e130a3001b6471b34f1175bdf6c6c4e0d2fe18e68d527d7f30011
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.7994011976047904,
3
- "best_model_checkpoint": "./results/checkpoint-500",
4
- "epoch": 1.3333333333333333,
5
  "eval_steps": 250,
6
- "global_step": 500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -144,6 +144,143 @@
144
  "eval_samples_per_second": 2.658,
145
  "eval_steps_per_second": 0.334,
146
  "step": 500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  }
148
  ],
149
  "logging_steps": 30,
@@ -172,7 +309,7 @@
172
  "attributes": {}
173
  }
174
  },
175
- "total_flos": 262998389326848.0,
176
  "train_batch_size": 8,
177
  "trial_name": null,
178
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.8383233532934131,
3
+ "best_model_checkpoint": "./results/checkpoint-1000",
4
+ "epoch": 2.6666666666666665,
5
  "eval_steps": 250,
6
+ "global_step": 1000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
144
  "eval_samples_per_second": 2.658,
145
  "eval_steps_per_second": 0.334,
146
  "step": 500
147
+ },
148
+ {
149
+ "epoch": 1.3599999999999999,
150
+ "grad_norm": 13.776785850524902,
151
+ "learning_rate": 1.0933333333333334e-05,
152
+ "loss": 0.4696,
153
+ "step": 510
154
+ },
155
+ {
156
+ "epoch": 1.44,
157
+ "grad_norm": 24.468957901000977,
158
+ "learning_rate": 1.04e-05,
159
+ "loss": 0.617,
160
+ "step": 540
161
+ },
162
+ {
163
+ "epoch": 1.52,
164
+ "grad_norm": 18.628131866455078,
165
+ "learning_rate": 9.866666666666668e-06,
166
+ "loss": 0.6078,
167
+ "step": 570
168
+ },
169
+ {
170
+ "epoch": 1.6,
171
+ "grad_norm": 100.15174865722656,
172
+ "learning_rate": 9.333333333333334e-06,
173
+ "loss": 0.5287,
174
+ "step": 600
175
+ },
176
+ {
177
+ "epoch": 1.6800000000000002,
178
+ "grad_norm": 16.748079299926758,
179
+ "learning_rate": 8.8e-06,
180
+ "loss": 0.7787,
181
+ "step": 630
182
+ },
183
+ {
184
+ "epoch": 1.76,
185
+ "grad_norm": 19.134979248046875,
186
+ "learning_rate": 8.266666666666667e-06,
187
+ "loss": 0.4815,
188
+ "step": 660
189
+ },
190
+ {
191
+ "epoch": 1.8399999999999999,
192
+ "grad_norm": 6.146342754364014,
193
+ "learning_rate": 7.733333333333334e-06,
194
+ "loss": 0.5385,
195
+ "step": 690
196
+ },
197
+ {
198
+ "epoch": 1.92,
199
+ "grad_norm": 5.284756660461426,
200
+ "learning_rate": 7.2000000000000005e-06,
201
+ "loss": 0.6076,
202
+ "step": 720
203
+ },
204
+ {
205
+ "epoch": 2.0,
206
+ "grad_norm": 13.146005630493164,
207
+ "learning_rate": 6.666666666666667e-06,
208
+ "loss": 0.5023,
209
+ "step": 750
210
+ },
211
+ {
212
+ "epoch": 2.0,
213
+ "eval_accuracy": 0.8323353293413174,
214
+ "eval_loss": 0.6021029353141785,
215
+ "eval_runtime": 118.5789,
216
+ "eval_samples_per_second": 2.817,
217
+ "eval_steps_per_second": 0.354,
218
+ "step": 750
219
+ },
220
+ {
221
+ "epoch": 2.08,
222
+ "grad_norm": 15.934453010559082,
223
+ "learning_rate": 6.133333333333334e-06,
224
+ "loss": 0.4482,
225
+ "step": 780
226
+ },
227
+ {
228
+ "epoch": 2.16,
229
+ "grad_norm": 2.5525577068328857,
230
+ "learning_rate": 5.600000000000001e-06,
231
+ "loss": 0.4262,
232
+ "step": 810
233
+ },
234
+ {
235
+ "epoch": 2.24,
236
+ "grad_norm": 20.71769905090332,
237
+ "learning_rate": 5.0666666666666676e-06,
238
+ "loss": 0.3308,
239
+ "step": 840
240
+ },
241
+ {
242
+ "epoch": 2.32,
243
+ "grad_norm": 5.773500919342041,
244
+ "learning_rate": 4.533333333333334e-06,
245
+ "loss": 0.3913,
246
+ "step": 870
247
+ },
248
+ {
249
+ "epoch": 2.4,
250
+ "grad_norm": 23.659786224365234,
251
+ "learning_rate": 4.000000000000001e-06,
252
+ "loss": 0.4899,
253
+ "step": 900
254
+ },
255
+ {
256
+ "epoch": 2.48,
257
+ "grad_norm": 9.76993179321289,
258
+ "learning_rate": 3.4666666666666672e-06,
259
+ "loss": 0.3889,
260
+ "step": 930
261
+ },
262
+ {
263
+ "epoch": 2.56,
264
+ "grad_norm": 3.7444443702697754,
265
+ "learning_rate": 2.9333333333333338e-06,
266
+ "loss": 0.4644,
267
+ "step": 960
268
+ },
269
+ {
270
+ "epoch": 2.64,
271
+ "grad_norm": 15.488058090209961,
272
+ "learning_rate": 2.4000000000000003e-06,
273
+ "loss": 0.3985,
274
+ "step": 990
275
+ },
276
+ {
277
+ "epoch": 2.6666666666666665,
278
+ "eval_accuracy": 0.8383233532934131,
279
+ "eval_loss": 0.5776464939117432,
280
+ "eval_runtime": 128.183,
281
+ "eval_samples_per_second": 2.606,
282
+ "eval_steps_per_second": 0.328,
283
+ "step": 1000
284
  }
285
  ],
286
  "logging_steps": 30,
 
309
  "attributes": {}
310
  }
311
  },
312
+ "total_flos": 525996778653696.0,
313
  "train_batch_size": 8,
314
  "trial_name": null,
315
  "trial_params": null