dimasik2987 commited on
Commit
7a15eb8
·
verified ·
1 Parent(s): 08cde1c

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5a4a0a95ac3cae47977b499fb7f3d5e63750c6aae56e4fdfeb36d68e80ccfae7
3
  size 70430032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:505cb13a465c9411cc4f15a9e86f85848c0b2b5a53badf92ff8e0bbb5ff206f8
3
  size 70430032
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:09a1945005fea60223a082b7fbb3a6a75fedd42bcea318d810d089371f192690
3
  size 141053442
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab234809f7de65a347327bcd5c712cab956b8b7f6a84d571573bf1cc3e3eac41
3
  size 141053442
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:18db6c1bc743d48259413c2026540b18d05325bbab9ad615ffc96744e97a683b
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d164e3f3bc7b21f6311b4913902d267a8bc53f4a3b4b038f51ba32b099e5f78
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9ea7ff16b0c30a914eb0d145e3fb06ff9027c6cd2408e766ce8a09accab89a4d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e69e2b49ea642509f0c688c16fb190b7cf27dac0a18903a5e2d1467d0343d8b8
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.21676300578034682,
5
  "eval_steps": 6,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -222,6 +222,213 @@
222
  "learning_rate": 0.00010654031292301432,
223
  "loss": 3.1412,
224
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
  }
226
  ],
227
  "logging_steps": 1,
@@ -236,12 +443,12 @@
236
  "should_evaluate": false,
237
  "should_log": false,
238
  "should_save": true,
239
- "should_training_stop": false
240
  },
241
  "attributes": {}
242
  }
243
  },
244
- "total_flos": 5536892112076800.0,
245
  "train_batch_size": 4,
246
  "trial_name": null,
247
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.43352601156069365,
5
  "eval_steps": 6,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
222
  "learning_rate": 0.00010654031292301432,
223
  "loss": 3.1412,
224
  "step": 25
225
+ },
226
+ {
227
+ "epoch": 0.2254335260115607,
228
+ "grad_norm": 7.255655288696289,
229
+ "learning_rate": 0.0001,
230
+ "loss": 2.3499,
231
+ "step": 26
232
+ },
233
+ {
234
+ "epoch": 0.23410404624277456,
235
+ "grad_norm": 9.42827033996582,
236
+ "learning_rate": 9.345968707698569e-05,
237
+ "loss": 3.2037,
238
+ "step": 27
239
+ },
240
+ {
241
+ "epoch": 0.24277456647398843,
242
+ "grad_norm": 7.624701976776123,
243
+ "learning_rate": 8.694738077799488e-05,
244
+ "loss": 2.768,
245
+ "step": 28
246
+ },
247
+ {
248
+ "epoch": 0.2514450867052023,
249
+ "grad_norm": 7.891088962554932,
250
+ "learning_rate": 8.049096779838719e-05,
251
+ "loss": 2.3543,
252
+ "step": 29
253
+ },
254
+ {
255
+ "epoch": 0.26011560693641617,
256
+ "grad_norm": 6.974014759063721,
257
+ "learning_rate": 7.411809548974792e-05,
258
+ "loss": 2.1117,
259
+ "step": 30
260
+ },
261
+ {
262
+ "epoch": 0.26011560693641617,
263
+ "eval_loss": 2.6875665187835693,
264
+ "eval_runtime": 15.6483,
265
+ "eval_samples_per_second": 9.33,
266
+ "eval_steps_per_second": 2.364,
267
+ "step": 30
268
+ },
269
+ {
270
+ "epoch": 0.26878612716763006,
271
+ "grad_norm": 7.1580810546875,
272
+ "learning_rate": 6.785605346968386e-05,
273
+ "loss": 2.8485,
274
+ "step": 31
275
+ },
276
+ {
277
+ "epoch": 0.2774566473988439,
278
+ "grad_norm": 7.447386264801025,
279
+ "learning_rate": 6.173165676349103e-05,
280
+ "loss": 2.7971,
281
+ "step": 32
282
+ },
283
+ {
284
+ "epoch": 0.2861271676300578,
285
+ "grad_norm": 5.977284908294678,
286
+ "learning_rate": 5.577113097809989e-05,
287
+ "loss": 2.32,
288
+ "step": 33
289
+ },
290
+ {
291
+ "epoch": 0.2947976878612717,
292
+ "grad_norm": 6.685746192932129,
293
+ "learning_rate": 5.000000000000002e-05,
294
+ "loss": 2.5347,
295
+ "step": 34
296
+ },
297
+ {
298
+ "epoch": 0.30346820809248554,
299
+ "grad_norm": 12.432933807373047,
300
+ "learning_rate": 4.444297669803981e-05,
301
+ "loss": 3.2119,
302
+ "step": 35
303
+ },
304
+ {
305
+ "epoch": 0.31213872832369943,
306
+ "grad_norm": 7.956794261932373,
307
+ "learning_rate": 3.9123857099127936e-05,
308
+ "loss": 2.2774,
309
+ "step": 36
310
+ },
311
+ {
312
+ "epoch": 0.31213872832369943,
313
+ "eval_loss": 2.6305289268493652,
314
+ "eval_runtime": 15.5922,
315
+ "eval_samples_per_second": 9.364,
316
+ "eval_steps_per_second": 2.373,
317
+ "step": 36
318
+ },
319
+ {
320
+ "epoch": 0.3208092485549133,
321
+ "grad_norm": 7.3900580406188965,
322
+ "learning_rate": 3.406541848999312e-05,
323
+ "loss": 2.4076,
324
+ "step": 37
325
+ },
326
+ {
327
+ "epoch": 0.32947976878612717,
328
+ "grad_norm": 7.0221710205078125,
329
+ "learning_rate": 2.9289321881345254e-05,
330
+ "loss": 1.9486,
331
+ "step": 38
332
+ },
333
+ {
334
+ "epoch": 0.33815028901734107,
335
+ "grad_norm": 7.89264440536499,
336
+ "learning_rate": 2.4816019252102273e-05,
337
+ "loss": 2.6934,
338
+ "step": 39
339
+ },
340
+ {
341
+ "epoch": 0.3468208092485549,
342
+ "grad_norm": 7.8453521728515625,
343
+ "learning_rate": 2.0664665970876496e-05,
344
+ "loss": 2.1978,
345
+ "step": 40
346
+ },
347
+ {
348
+ "epoch": 0.3554913294797688,
349
+ "grad_norm": 6.693100452423096,
350
+ "learning_rate": 1.6853038769745467e-05,
351
+ "loss": 2.3828,
352
+ "step": 41
353
+ },
354
+ {
355
+ "epoch": 0.36416184971098264,
356
+ "grad_norm": 5.916697978973389,
357
+ "learning_rate": 1.339745962155613e-05,
358
+ "loss": 1.9406,
359
+ "step": 42
360
+ },
361
+ {
362
+ "epoch": 0.36416184971098264,
363
+ "eval_loss": 2.5845963954925537,
364
+ "eval_runtime": 16.1508,
365
+ "eval_samples_per_second": 9.04,
366
+ "eval_steps_per_second": 2.291,
367
+ "step": 42
368
+ },
369
+ {
370
+ "epoch": 0.37283236994219654,
371
+ "grad_norm": 6.838918685913086,
372
+ "learning_rate": 1.0312725846731175e-05,
373
+ "loss": 2.5254,
374
+ "step": 43
375
+ },
376
+ {
377
+ "epoch": 0.3815028901734104,
378
+ "grad_norm": 6.780750274658203,
379
+ "learning_rate": 7.612046748871327e-06,
380
+ "loss": 2.2767,
381
+ "step": 44
382
+ },
383
+ {
384
+ "epoch": 0.3901734104046243,
385
+ "grad_norm": 6.910787582397461,
386
+ "learning_rate": 5.306987050489442e-06,
387
+ "loss": 2.3037,
388
+ "step": 45
389
+ },
390
+ {
391
+ "epoch": 0.3988439306358382,
392
+ "grad_norm": 6.165891170501709,
393
+ "learning_rate": 3.40741737109318e-06,
394
+ "loss": 2.3262,
395
+ "step": 46
396
+ },
397
+ {
398
+ "epoch": 0.407514450867052,
399
+ "grad_norm": 6.632768630981445,
400
+ "learning_rate": 1.921471959676957e-06,
401
+ "loss": 2.2463,
402
+ "step": 47
403
+ },
404
+ {
405
+ "epoch": 0.4161849710982659,
406
+ "grad_norm": 6.605554580688477,
407
+ "learning_rate": 8.555138626189618e-07,
408
+ "loss": 2.4657,
409
+ "step": 48
410
+ },
411
+ {
412
+ "epoch": 0.4161849710982659,
413
+ "eval_loss": 2.575497627258301,
414
+ "eval_runtime": 15.6673,
415
+ "eval_samples_per_second": 9.319,
416
+ "eval_steps_per_second": 2.362,
417
+ "step": 48
418
+ },
419
+ {
420
+ "epoch": 0.42485549132947975,
421
+ "grad_norm": 7.213985919952393,
422
+ "learning_rate": 2.141076761396521e-07,
423
+ "loss": 3.1228,
424
+ "step": 49
425
+ },
426
+ {
427
+ "epoch": 0.43352601156069365,
428
+ "grad_norm": 7.434656620025635,
429
+ "learning_rate": 0.0,
430
+ "loss": 2.6095,
431
+ "step": 50
432
  }
433
  ],
434
  "logging_steps": 1,
 
443
  "should_evaluate": false,
444
  "should_log": false,
445
  "should_save": true,
446
+ "should_training_stop": true
447
  },
448
  "attributes": {}
449
  }
450
  },
451
+ "total_flos": 1.10737842241536e+16,
452
  "train_batch_size": 4,
453
  "trial_name": null,
454
  "trial_params": null