diaenra commited on
Commit
985e9cf
·
verified ·
1 Parent(s): cef3289

Training in progress, step 6231, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:621b09bb6b65a042a5d1aa984f1ae3eb550edf44591c3ce5197e723ddb0e6861
3
  size 4286680
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6a61484c58f2aa5fbb2745552311c9a00798baf432791d1aac6c666b0179b94
3
  size 4286680
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:195d742d70a90ac9fb452296d7eed8e0fb39376173bfdf06b4bb9201d7b249d4
3
  size 8583659
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae7143c1ac9958839859c3afbfc24d10d8f0514fbf676a7e123d6adb0f7dcc43
3
  size 8583659
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0b384bb028283a2e480a81f100ae4848f22377168030cffb15ea66c728719f08
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c1211409034a159c109d0b39b882945cb5b76df53eb397f96427985907baedf
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:38f6745e10494f0fc63c59b1b0a804a10928aac9839af7d5ce6caa7feff5dbdd
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28d686c5d1ce4e1ba39c740cc5b18a76a7041ecc94d8a5e148ede93ef04e7c8a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.9971516829141092,
5
  "eval_steps": 500,
6
- "global_step": 6214,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -43505,6 +43505,125 @@
43505
  "learning_rate": 1.89702134845704e-09,
43506
  "loss": 32.7734,
43507
  "step": 6214
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43508
  }
43509
  ],
43510
  "logging_steps": 1,
@@ -43519,12 +43638,12 @@
43519
  "should_evaluate": false,
43520
  "should_log": false,
43521
  "should_save": true,
43522
- "should_training_stop": false
43523
  },
43524
  "attributes": {}
43525
  }
43526
  },
43527
- "total_flos": 2607409105207296.0,
43528
  "train_batch_size": 4,
43529
  "trial_name": null,
43530
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.9998796485738356,
5
  "eval_steps": 500,
6
+ "global_step": 6231,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
43505
  "learning_rate": 1.89702134845704e-09,
43506
  "loss": 32.7734,
43507
  "step": 6214
43508
+ },
43509
+ {
43510
+ "epoch": 0.9973121514823284,
43511
+ "grad_norm": 12.067755699157715,
43512
+ "learning_rate": 1.680407667342676e-09,
43513
+ "loss": 32.7109,
43514
+ "step": 6215
43515
+ },
43516
+ {
43517
+ "epoch": 0.9974726200505476,
43518
+ "grad_norm": 12.38700008392334,
43519
+ "learning_rate": 1.4769218031618349e-09,
43520
+ "loss": 32.5391,
43521
+ "step": 6216
43522
+ },
43523
+ {
43524
+ "epoch": 0.9976330886187668,
43525
+ "grad_norm": 12.381599426269531,
43526
+ "learning_rate": 1.2865638093439992e-09,
43527
+ "loss": 32.6172,
43528
+ "step": 6217
43529
+ },
43530
+ {
43531
+ "epoch": 0.997793557186986,
43532
+ "grad_norm": 12.004240989685059,
43533
+ "learning_rate": 1.1093337358658585e-09,
43534
+ "loss": 32.6016,
43535
+ "step": 6218
43536
+ },
43537
+ {
43538
+ "epoch": 0.9979540257552052,
43539
+ "grad_norm": 12.466684341430664,
43540
+ "learning_rate": 9.45231629273513e-10,
43541
+ "loss": 32.625,
43542
+ "step": 6219
43543
+ },
43544
+ {
43545
+ "epoch": 0.9981144943234244,
43546
+ "grad_norm": 12.143556594848633,
43547
+ "learning_rate": 7.942575326380653e-10,
43548
+ "loss": 32.6562,
43549
+ "step": 6220
43550
+ },
43551
+ {
43552
+ "epoch": 0.9982749628916436,
43553
+ "grad_norm": 12.071325302124023,
43554
+ "learning_rate": 6.564114856111304e-10,
43555
+ "loss": 32.7344,
43556
+ "step": 6221
43557
+ },
43558
+ {
43559
+ "epoch": 0.9984354314598628,
43560
+ "grad_norm": 12.138935089111328,
43561
+ "learning_rate": 5.316935243859789e-10,
43562
+ "loss": 32.8047,
43563
+ "step": 6222
43564
+ },
43565
+ {
43566
+ "epoch": 0.998595900028082,
43567
+ "grad_norm": 12.325695991516113,
43568
+ "learning_rate": 4.20103681708639e-10,
43569
+ "loss": 32.5938,
43570
+ "step": 6223
43571
+ },
43572
+ {
43573
+ "epoch": 0.9987563685963012,
43574
+ "grad_norm": 12.195591926574707,
43575
+ "learning_rate": 3.216419868723453e-10,
43576
+ "loss": 32.5938,
43577
+ "step": 6224
43578
+ },
43579
+ {
43580
+ "epoch": 0.9989168371645204,
43581
+ "grad_norm": 12.133679389953613,
43582
+ "learning_rate": 2.3630846573419183e-10,
43583
+ "loss": 32.6016,
43584
+ "step": 6225
43585
+ },
43586
+ {
43587
+ "epoch": 0.9990773057327396,
43588
+ "grad_norm": 12.008955001831055,
43589
+ "learning_rate": 1.6410314070403054e-10,
43590
+ "loss": 32.625,
43591
+ "step": 6226
43592
+ },
43593
+ {
43594
+ "epoch": 0.9992377743009588,
43595
+ "grad_norm": 12.204169273376465,
43596
+ "learning_rate": 1.0502603073336836e-10,
43597
+ "loss": 32.6797,
43598
+ "step": 6227
43599
+ },
43600
+ {
43601
+ "epoch": 0.999398242869178,
43602
+ "grad_norm": 12.070178031921387,
43603
+ "learning_rate": 5.907715133202096e-11,
43604
+ "loss": 32.6016,
43605
+ "step": 6228
43606
+ },
43607
+ {
43608
+ "epoch": 0.9995587114373972,
43609
+ "grad_norm": 12.140969276428223,
43610
+ "learning_rate": 2.6256514579214854e-11,
43611
+ "loss": 32.6406,
43612
+ "step": 6229
43613
+ },
43614
+ {
43615
+ "epoch": 0.9997191800056164,
43616
+ "grad_norm": 12.390560150146484,
43617
+ "learning_rate": 6.564129073627356e-12,
43618
+ "loss": 32.6172,
43619
+ "step": 6230
43620
+ },
43621
+ {
43622
+ "epoch": 0.9998796485738356,
43623
+ "grad_norm": 12.162409782409668,
43624
+ "learning_rate": 0.0,
43625
+ "loss": 32.75,
43626
+ "step": 6231
43627
  }
43628
  ],
43629
  "logging_steps": 1,
 
43638
  "should_evaluate": false,
43639
  "should_log": false,
43640
  "should_save": true,
43641
+ "should_training_stop": true
43642
  },
43643
  "attributes": {}
43644
  }
43645
  },
43646
+ "total_flos": 2614542345437184.0,
43647
  "train_batch_size": 4,
43648
  "trial_name": null,
43649
  "trial_params": null