vit-base-beans / trainer_state.json
qubvel-hf's picture
qubvel-hf HF staff
End of training
6e9afe1 verified
raw
history blame
38.2 kB
{
"best_metric": 0.738013505935669,
"best_model_checkpoint": "./beans_outputs/checkpoint-1950",
"epoch": 15.0,
"eval_steps": 500,
"global_step": 1950,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07692307692307693,
"grad_norm": 2.099634885787964,
"learning_rate": 1.98974358974359e-05,
"loss": 1.1239,
"step": 10
},
{
"epoch": 0.15384615384615385,
"grad_norm": 1.7766097784042358,
"learning_rate": 1.9794871794871798e-05,
"loss": 1.1221,
"step": 20
},
{
"epoch": 0.23076923076923078,
"grad_norm": 1.9635370969772339,
"learning_rate": 1.9692307692307696e-05,
"loss": 1.1164,
"step": 30
},
{
"epoch": 0.3076923076923077,
"grad_norm": 2.459880828857422,
"learning_rate": 1.958974358974359e-05,
"loss": 1.1045,
"step": 40
},
{
"epoch": 0.38461538461538464,
"grad_norm": 1.7044235467910767,
"learning_rate": 1.9487179487179488e-05,
"loss": 1.1083,
"step": 50
},
{
"epoch": 0.46153846153846156,
"grad_norm": 1.4914859533309937,
"learning_rate": 1.9384615384615386e-05,
"loss": 1.1044,
"step": 60
},
{
"epoch": 0.5384615384615384,
"grad_norm": 2.6082077026367188,
"learning_rate": 1.9282051282051284e-05,
"loss": 1.1027,
"step": 70
},
{
"epoch": 0.6153846153846154,
"grad_norm": 2.722012996673584,
"learning_rate": 1.9179487179487182e-05,
"loss": 1.0908,
"step": 80
},
{
"epoch": 0.6923076923076923,
"grad_norm": 2.029851198196411,
"learning_rate": 1.907692307692308e-05,
"loss": 1.0941,
"step": 90
},
{
"epoch": 0.7692307692307693,
"grad_norm": 2.4984819889068604,
"learning_rate": 1.8974358974358975e-05,
"loss": 1.0913,
"step": 100
},
{
"epoch": 0.8461538461538461,
"grad_norm": 1.7019662857055664,
"learning_rate": 1.8871794871794873e-05,
"loss": 1.0945,
"step": 110
},
{
"epoch": 0.9230769230769231,
"grad_norm": 2.2935667037963867,
"learning_rate": 1.876923076923077e-05,
"loss": 1.0994,
"step": 120
},
{
"epoch": 1.0,
"grad_norm": 3.9286272525787354,
"learning_rate": 1.866666666666667e-05,
"loss": 1.0861,
"step": 130
},
{
"epoch": 1.0,
"eval_accuracy": 0.42857142857142855,
"eval_loss": 1.0880533456802368,
"eval_runtime": 0.7946,
"eval_samples_per_second": 167.381,
"eval_steps_per_second": 21.395,
"step": 130
},
{
"epoch": 1.0769230769230769,
"grad_norm": 2.083320140838623,
"learning_rate": 1.8564102564102567e-05,
"loss": 1.0772,
"step": 140
},
{
"epoch": 1.1538461538461537,
"grad_norm": 2.711829423904419,
"learning_rate": 1.8461538461538465e-05,
"loss": 1.072,
"step": 150
},
{
"epoch": 1.2307692307692308,
"grad_norm": 1.5575768947601318,
"learning_rate": 1.835897435897436e-05,
"loss": 1.0787,
"step": 160
},
{
"epoch": 1.3076923076923077,
"grad_norm": 2.3275976181030273,
"learning_rate": 1.8256410256410257e-05,
"loss": 1.0935,
"step": 170
},
{
"epoch": 1.3846153846153846,
"grad_norm": 2.1124396324157715,
"learning_rate": 1.8153846153846155e-05,
"loss": 1.0806,
"step": 180
},
{
"epoch": 1.4615384615384617,
"grad_norm": 2.1334290504455566,
"learning_rate": 1.8051282051282053e-05,
"loss": 1.0796,
"step": 190
},
{
"epoch": 1.5384615384615383,
"grad_norm": 1.7488656044006348,
"learning_rate": 1.794871794871795e-05,
"loss": 1.0762,
"step": 200
},
{
"epoch": 1.6153846153846154,
"grad_norm": 1.7498658895492554,
"learning_rate": 1.784615384615385e-05,
"loss": 1.0682,
"step": 210
},
{
"epoch": 1.6923076923076923,
"grad_norm": 3.5284996032714844,
"learning_rate": 1.7743589743589744e-05,
"loss": 1.0699,
"step": 220
},
{
"epoch": 1.7692307692307692,
"grad_norm": 2.088862895965576,
"learning_rate": 1.7641025641025642e-05,
"loss": 1.0707,
"step": 230
},
{
"epoch": 1.8461538461538463,
"grad_norm": 1.9373514652252197,
"learning_rate": 1.753846153846154e-05,
"loss": 1.0864,
"step": 240
},
{
"epoch": 1.9230769230769231,
"grad_norm": 2.00813889503479,
"learning_rate": 1.7435897435897438e-05,
"loss": 1.0681,
"step": 250
},
{
"epoch": 2.0,
"grad_norm": 3.0624523162841797,
"learning_rate": 1.7333333333333336e-05,
"loss": 1.0631,
"step": 260
},
{
"epoch": 2.0,
"eval_accuracy": 0.5413533834586466,
"eval_loss": 1.0597343444824219,
"eval_runtime": 0.7394,
"eval_samples_per_second": 179.866,
"eval_steps_per_second": 22.99,
"step": 260
},
{
"epoch": 2.076923076923077,
"grad_norm": 2.6268935203552246,
"learning_rate": 1.7230769230769234e-05,
"loss": 1.0716,
"step": 270
},
{
"epoch": 2.1538461538461537,
"grad_norm": 2.3874154090881348,
"learning_rate": 1.7128205128205128e-05,
"loss": 1.0599,
"step": 280
},
{
"epoch": 2.230769230769231,
"grad_norm": 2.3503990173339844,
"learning_rate": 1.7025641025641026e-05,
"loss": 1.054,
"step": 290
},
{
"epoch": 2.3076923076923075,
"grad_norm": 2.3312108516693115,
"learning_rate": 1.6923076923076924e-05,
"loss": 1.0436,
"step": 300
},
{
"epoch": 2.3846153846153846,
"grad_norm": 1.9090198278427124,
"learning_rate": 1.6820512820512822e-05,
"loss": 1.0569,
"step": 310
},
{
"epoch": 2.4615384615384617,
"grad_norm": 1.9505618810653687,
"learning_rate": 1.671794871794872e-05,
"loss": 1.0479,
"step": 320
},
{
"epoch": 2.5384615384615383,
"grad_norm": 1.4548966884613037,
"learning_rate": 1.6615384615384618e-05,
"loss": 1.0304,
"step": 330
},
{
"epoch": 2.6153846153846154,
"grad_norm": 2.1505134105682373,
"learning_rate": 1.6512820512820513e-05,
"loss": 1.0467,
"step": 340
},
{
"epoch": 2.6923076923076925,
"grad_norm": 2.8420169353485107,
"learning_rate": 1.641025641025641e-05,
"loss": 1.0509,
"step": 350
},
{
"epoch": 2.769230769230769,
"grad_norm": 1.8315626382827759,
"learning_rate": 1.630769230769231e-05,
"loss": 1.0407,
"step": 360
},
{
"epoch": 2.8461538461538463,
"grad_norm": 1.8499083518981934,
"learning_rate": 1.6205128205128207e-05,
"loss": 1.0344,
"step": 370
},
{
"epoch": 2.9230769230769234,
"grad_norm": 1.9010175466537476,
"learning_rate": 1.6102564102564105e-05,
"loss": 1.0219,
"step": 380
},
{
"epoch": 3.0,
"grad_norm": 4.809950828552246,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.0443,
"step": 390
},
{
"epoch": 3.0,
"eval_accuracy": 0.6691729323308271,
"eval_loss": 1.0225275754928589,
"eval_runtime": 0.7405,
"eval_samples_per_second": 179.608,
"eval_steps_per_second": 22.957,
"step": 390
},
{
"epoch": 3.076923076923077,
"grad_norm": 2.195730686187744,
"learning_rate": 1.5897435897435897e-05,
"loss": 1.0414,
"step": 400
},
{
"epoch": 3.1538461538461537,
"grad_norm": 2.1667442321777344,
"learning_rate": 1.5794871794871795e-05,
"loss": 1.0302,
"step": 410
},
{
"epoch": 3.230769230769231,
"grad_norm": 2.149664878845215,
"learning_rate": 1.5692307692307693e-05,
"loss": 1.0365,
"step": 420
},
{
"epoch": 3.3076923076923075,
"grad_norm": 2.2559187412261963,
"learning_rate": 1.558974358974359e-05,
"loss": 1.0159,
"step": 430
},
{
"epoch": 3.3846153846153846,
"grad_norm": 2.165260076522827,
"learning_rate": 1.548717948717949e-05,
"loss": 1.0274,
"step": 440
},
{
"epoch": 3.4615384615384617,
"grad_norm": 1.799578309059143,
"learning_rate": 1.5384615384615387e-05,
"loss": 1.0371,
"step": 450
},
{
"epoch": 3.5384615384615383,
"grad_norm": 2.441862106323242,
"learning_rate": 1.5282051282051282e-05,
"loss": 1.0322,
"step": 460
},
{
"epoch": 3.6153846153846154,
"grad_norm": 1.571476936340332,
"learning_rate": 1.517948717948718e-05,
"loss": 1.0046,
"step": 470
},
{
"epoch": 3.6923076923076925,
"grad_norm": 2.725184440612793,
"learning_rate": 1.5076923076923078e-05,
"loss": 1.0081,
"step": 480
},
{
"epoch": 3.769230769230769,
"grad_norm": 1.5710386037826538,
"learning_rate": 1.4974358974358976e-05,
"loss": 0.9977,
"step": 490
},
{
"epoch": 3.8461538461538463,
"grad_norm": 1.6186603307724,
"learning_rate": 1.4871794871794874e-05,
"loss": 1.0217,
"step": 500
},
{
"epoch": 3.9230769230769234,
"grad_norm": 1.8194940090179443,
"learning_rate": 1.4769230769230772e-05,
"loss": 0.997,
"step": 510
},
{
"epoch": 4.0,
"grad_norm": 5.478014945983887,
"learning_rate": 1.4666666666666666e-05,
"loss": 1.0218,
"step": 520
},
{
"epoch": 4.0,
"eval_accuracy": 0.6842105263157895,
"eval_loss": 0.9959980845451355,
"eval_runtime": 0.7638,
"eval_samples_per_second": 174.124,
"eval_steps_per_second": 22.256,
"step": 520
},
{
"epoch": 4.076923076923077,
"grad_norm": 2.7807815074920654,
"learning_rate": 1.4564102564102564e-05,
"loss": 1.0126,
"step": 530
},
{
"epoch": 4.153846153846154,
"grad_norm": 1.8810123205184937,
"learning_rate": 1.4461538461538462e-05,
"loss": 1.0142,
"step": 540
},
{
"epoch": 4.230769230769231,
"grad_norm": 2.737741708755493,
"learning_rate": 1.435897435897436e-05,
"loss": 1.0241,
"step": 550
},
{
"epoch": 4.3076923076923075,
"grad_norm": 2.183462381362915,
"learning_rate": 1.4256410256410258e-05,
"loss": 1.0324,
"step": 560
},
{
"epoch": 4.384615384615385,
"grad_norm": 1.9445362091064453,
"learning_rate": 1.4153846153846156e-05,
"loss": 0.9823,
"step": 570
},
{
"epoch": 4.461538461538462,
"grad_norm": 2.8362057209014893,
"learning_rate": 1.405128205128205e-05,
"loss": 0.9919,
"step": 580
},
{
"epoch": 4.538461538461538,
"grad_norm": 2.5588510036468506,
"learning_rate": 1.3948717948717949e-05,
"loss": 0.987,
"step": 590
},
{
"epoch": 4.615384615384615,
"grad_norm": 1.740641474723816,
"learning_rate": 1.3846153846153847e-05,
"loss": 0.9778,
"step": 600
},
{
"epoch": 4.6923076923076925,
"grad_norm": 3.247788429260254,
"learning_rate": 1.3743589743589745e-05,
"loss": 0.9978,
"step": 610
},
{
"epoch": 4.769230769230769,
"grad_norm": 2.136213779449463,
"learning_rate": 1.3641025641025643e-05,
"loss": 1.0067,
"step": 620
},
{
"epoch": 4.846153846153846,
"grad_norm": 1.8435287475585938,
"learning_rate": 1.353846153846154e-05,
"loss": 0.9691,
"step": 630
},
{
"epoch": 4.923076923076923,
"grad_norm": 2.3936328887939453,
"learning_rate": 1.3435897435897435e-05,
"loss": 0.9692,
"step": 640
},
{
"epoch": 5.0,
"grad_norm": 4.921462535858154,
"learning_rate": 1.3333333333333333e-05,
"loss": 1.0028,
"step": 650
},
{
"epoch": 5.0,
"eval_accuracy": 0.7293233082706767,
"eval_loss": 0.9568173289299011,
"eval_runtime": 0.7715,
"eval_samples_per_second": 172.394,
"eval_steps_per_second": 22.035,
"step": 650
},
{
"epoch": 5.076923076923077,
"grad_norm": 2.051286458969116,
"learning_rate": 1.3230769230769231e-05,
"loss": 0.9638,
"step": 660
},
{
"epoch": 5.153846153846154,
"grad_norm": 2.7295546531677246,
"learning_rate": 1.312820512820513e-05,
"loss": 1.0056,
"step": 670
},
{
"epoch": 5.230769230769231,
"grad_norm": 2.3378870487213135,
"learning_rate": 1.3025641025641027e-05,
"loss": 0.9916,
"step": 680
},
{
"epoch": 5.3076923076923075,
"grad_norm": 2.754603862762451,
"learning_rate": 1.2923076923076925e-05,
"loss": 0.9869,
"step": 690
},
{
"epoch": 5.384615384615385,
"grad_norm": 2.6367554664611816,
"learning_rate": 1.2820512820512823e-05,
"loss": 0.9743,
"step": 700
},
{
"epoch": 5.461538461538462,
"grad_norm": 2.152855396270752,
"learning_rate": 1.2717948717948718e-05,
"loss": 0.9669,
"step": 710
},
{
"epoch": 5.538461538461538,
"grad_norm": 1.970173954963684,
"learning_rate": 1.2615384615384616e-05,
"loss": 0.9496,
"step": 720
},
{
"epoch": 5.615384615384615,
"grad_norm": 2.49542498588562,
"learning_rate": 1.2512820512820514e-05,
"loss": 0.9644,
"step": 730
},
{
"epoch": 5.6923076923076925,
"grad_norm": 2.5210750102996826,
"learning_rate": 1.2410256410256412e-05,
"loss": 0.9453,
"step": 740
},
{
"epoch": 5.769230769230769,
"grad_norm": 2.0949254035949707,
"learning_rate": 1.230769230769231e-05,
"loss": 0.95,
"step": 750
},
{
"epoch": 5.846153846153846,
"grad_norm": 2.167081594467163,
"learning_rate": 1.2205128205128208e-05,
"loss": 0.9519,
"step": 760
},
{
"epoch": 5.923076923076923,
"grad_norm": 3.6734519004821777,
"learning_rate": 1.2102564102564102e-05,
"loss": 0.9376,
"step": 770
},
{
"epoch": 6.0,
"grad_norm": 4.938449859619141,
"learning_rate": 1.2e-05,
"loss": 0.9752,
"step": 780
},
{
"epoch": 6.0,
"eval_accuracy": 0.7669172932330827,
"eval_loss": 0.9278604984283447,
"eval_runtime": 0.7739,
"eval_samples_per_second": 171.864,
"eval_steps_per_second": 21.968,
"step": 780
},
{
"epoch": 6.076923076923077,
"grad_norm": 2.8171768188476562,
"learning_rate": 1.1897435897435898e-05,
"loss": 0.9423,
"step": 790
},
{
"epoch": 6.153846153846154,
"grad_norm": 2.5053982734680176,
"learning_rate": 1.1794871794871796e-05,
"loss": 0.9382,
"step": 800
},
{
"epoch": 6.230769230769231,
"grad_norm": 1.875543475151062,
"learning_rate": 1.1692307692307694e-05,
"loss": 0.9318,
"step": 810
},
{
"epoch": 6.3076923076923075,
"grad_norm": 2.222604513168335,
"learning_rate": 1.1589743589743592e-05,
"loss": 0.9313,
"step": 820
},
{
"epoch": 6.384615384615385,
"grad_norm": 2.5009353160858154,
"learning_rate": 1.1487179487179487e-05,
"loss": 0.9354,
"step": 830
},
{
"epoch": 6.461538461538462,
"grad_norm": 3.2472548484802246,
"learning_rate": 1.1384615384615385e-05,
"loss": 0.9335,
"step": 840
},
{
"epoch": 6.538461538461538,
"grad_norm": 2.944819927215576,
"learning_rate": 1.1282051282051283e-05,
"loss": 0.9403,
"step": 850
},
{
"epoch": 6.615384615384615,
"grad_norm": 1.9217369556427002,
"learning_rate": 1.117948717948718e-05,
"loss": 0.9109,
"step": 860
},
{
"epoch": 6.6923076923076925,
"grad_norm": 2.2056405544281006,
"learning_rate": 1.1076923076923079e-05,
"loss": 0.9578,
"step": 870
},
{
"epoch": 6.769230769230769,
"grad_norm": 2.6266028881073,
"learning_rate": 1.0974358974358977e-05,
"loss": 0.9249,
"step": 880
},
{
"epoch": 6.846153846153846,
"grad_norm": 2.48388409614563,
"learning_rate": 1.0871794871794871e-05,
"loss": 0.9198,
"step": 890
},
{
"epoch": 6.923076923076923,
"grad_norm": 3.0685908794403076,
"learning_rate": 1.076923076923077e-05,
"loss": 0.9424,
"step": 900
},
{
"epoch": 7.0,
"grad_norm": 4.163443088531494,
"learning_rate": 1.0666666666666667e-05,
"loss": 0.924,
"step": 910
},
{
"epoch": 7.0,
"eval_accuracy": 0.8045112781954887,
"eval_loss": 0.8851932287216187,
"eval_runtime": 0.7436,
"eval_samples_per_second": 178.849,
"eval_steps_per_second": 22.86,
"step": 910
},
{
"epoch": 7.076923076923077,
"grad_norm": 2.746605634689331,
"learning_rate": 1.0564102564102565e-05,
"loss": 0.9102,
"step": 920
},
{
"epoch": 7.153846153846154,
"grad_norm": 3.5535519123077393,
"learning_rate": 1.0461538461538463e-05,
"loss": 0.9097,
"step": 930
},
{
"epoch": 7.230769230769231,
"grad_norm": 3.44745135307312,
"learning_rate": 1.0358974358974361e-05,
"loss": 0.899,
"step": 940
},
{
"epoch": 7.3076923076923075,
"grad_norm": 2.7519822120666504,
"learning_rate": 1.0256410256410256e-05,
"loss": 0.9072,
"step": 950
},
{
"epoch": 7.384615384615385,
"grad_norm": 2.7004168033599854,
"learning_rate": 1.0153846153846154e-05,
"loss": 0.9231,
"step": 960
},
{
"epoch": 7.461538461538462,
"grad_norm": 2.188138961791992,
"learning_rate": 1.0051282051282052e-05,
"loss": 0.8897,
"step": 970
},
{
"epoch": 7.538461538461538,
"grad_norm": 2.8544998168945312,
"learning_rate": 9.94871794871795e-06,
"loss": 0.9233,
"step": 980
},
{
"epoch": 7.615384615384615,
"grad_norm": 2.217107057571411,
"learning_rate": 9.846153846153848e-06,
"loss": 0.8854,
"step": 990
},
{
"epoch": 7.6923076923076925,
"grad_norm": 2.083819627761841,
"learning_rate": 9.743589743589744e-06,
"loss": 0.8808,
"step": 1000
},
{
"epoch": 7.769230769230769,
"grad_norm": 2.864044427871704,
"learning_rate": 9.641025641025642e-06,
"loss": 0.8818,
"step": 1010
},
{
"epoch": 7.846153846153846,
"grad_norm": 2.2901501655578613,
"learning_rate": 9.53846153846154e-06,
"loss": 0.9066,
"step": 1020
},
{
"epoch": 7.923076923076923,
"grad_norm": 2.5113565921783447,
"learning_rate": 9.435897435897436e-06,
"loss": 0.8577,
"step": 1030
},
{
"epoch": 8.0,
"grad_norm": 4.092830181121826,
"learning_rate": 9.333333333333334e-06,
"loss": 0.9179,
"step": 1040
},
{
"epoch": 8.0,
"eval_accuracy": 0.8120300751879699,
"eval_loss": 0.8504595160484314,
"eval_runtime": 0.7701,
"eval_samples_per_second": 172.703,
"eval_steps_per_second": 22.075,
"step": 1040
},
{
"epoch": 8.076923076923077,
"grad_norm": 1.9615790843963623,
"learning_rate": 9.230769230769232e-06,
"loss": 0.8931,
"step": 1050
},
{
"epoch": 8.153846153846153,
"grad_norm": 2.1320254802703857,
"learning_rate": 9.128205128205129e-06,
"loss": 0.8652,
"step": 1060
},
{
"epoch": 8.23076923076923,
"grad_norm": 2.0207359790802,
"learning_rate": 9.025641025641027e-06,
"loss": 0.8992,
"step": 1070
},
{
"epoch": 8.307692307692308,
"grad_norm": 1.9664427042007446,
"learning_rate": 8.923076923076925e-06,
"loss": 0.8552,
"step": 1080
},
{
"epoch": 8.384615384615385,
"grad_norm": 2.321737766265869,
"learning_rate": 8.820512820512821e-06,
"loss": 0.8973,
"step": 1090
},
{
"epoch": 8.461538461538462,
"grad_norm": 2.780322313308716,
"learning_rate": 8.717948717948719e-06,
"loss": 0.8695,
"step": 1100
},
{
"epoch": 8.538461538461538,
"grad_norm": 1.664422631263733,
"learning_rate": 8.615384615384617e-06,
"loss": 0.8711,
"step": 1110
},
{
"epoch": 8.615384615384615,
"grad_norm": 4.4292097091674805,
"learning_rate": 8.512820512820513e-06,
"loss": 0.8983,
"step": 1120
},
{
"epoch": 8.692307692307692,
"grad_norm": 3.7464237213134766,
"learning_rate": 8.410256410256411e-06,
"loss": 0.8831,
"step": 1130
},
{
"epoch": 8.76923076923077,
"grad_norm": 1.9225133657455444,
"learning_rate": 8.307692307692309e-06,
"loss": 0.8851,
"step": 1140
},
{
"epoch": 8.846153846153847,
"grad_norm": 2.6193883419036865,
"learning_rate": 8.205128205128205e-06,
"loss": 0.8718,
"step": 1150
},
{
"epoch": 8.923076923076923,
"grad_norm": 3.2045023441314697,
"learning_rate": 8.102564102564103e-06,
"loss": 0.8843,
"step": 1160
},
{
"epoch": 9.0,
"grad_norm": 4.040080547332764,
"learning_rate": 8.000000000000001e-06,
"loss": 0.9,
"step": 1170
},
{
"epoch": 9.0,
"eval_accuracy": 0.8045112781954887,
"eval_loss": 0.8327888250350952,
"eval_runtime": 0.764,
"eval_samples_per_second": 174.095,
"eval_steps_per_second": 22.253,
"step": 1170
},
{
"epoch": 9.076923076923077,
"grad_norm": 1.9053655862808228,
"learning_rate": 7.897435897435898e-06,
"loss": 0.8961,
"step": 1180
},
{
"epoch": 9.153846153846153,
"grad_norm": 2.056864023208618,
"learning_rate": 7.794871794871796e-06,
"loss": 0.8369,
"step": 1190
},
{
"epoch": 9.23076923076923,
"grad_norm": 2.1695327758789062,
"learning_rate": 7.692307692307694e-06,
"loss": 0.8496,
"step": 1200
},
{
"epoch": 9.307692307692308,
"grad_norm": 2.1930296421051025,
"learning_rate": 7.58974358974359e-06,
"loss": 0.8527,
"step": 1210
},
{
"epoch": 9.384615384615385,
"grad_norm": 2.0710272789001465,
"learning_rate": 7.487179487179488e-06,
"loss": 0.867,
"step": 1220
},
{
"epoch": 9.461538461538462,
"grad_norm": 3.325305938720703,
"learning_rate": 7.384615384615386e-06,
"loss": 0.8541,
"step": 1230
},
{
"epoch": 9.538461538461538,
"grad_norm": 2.5524134635925293,
"learning_rate": 7.282051282051282e-06,
"loss": 0.8711,
"step": 1240
},
{
"epoch": 9.615384615384615,
"grad_norm": 3.508930206298828,
"learning_rate": 7.17948717948718e-06,
"loss": 0.8263,
"step": 1250
},
{
"epoch": 9.692307692307692,
"grad_norm": 2.657597303390503,
"learning_rate": 7.076923076923078e-06,
"loss": 0.8519,
"step": 1260
},
{
"epoch": 9.76923076923077,
"grad_norm": 1.858414888381958,
"learning_rate": 6.974358974358974e-06,
"loss": 0.8306,
"step": 1270
},
{
"epoch": 9.846153846153847,
"grad_norm": 2.1380136013031006,
"learning_rate": 6.871794871794872e-06,
"loss": 0.8115,
"step": 1280
},
{
"epoch": 9.923076923076923,
"grad_norm": 2.1686654090881348,
"learning_rate": 6.76923076923077e-06,
"loss": 0.8366,
"step": 1290
},
{
"epoch": 10.0,
"grad_norm": 4.619501113891602,
"learning_rate": 6.666666666666667e-06,
"loss": 0.8084,
"step": 1300
},
{
"epoch": 10.0,
"eval_accuracy": 0.8421052631578947,
"eval_loss": 0.807080864906311,
"eval_runtime": 0.7501,
"eval_samples_per_second": 177.313,
"eval_steps_per_second": 22.664,
"step": 1300
},
{
"epoch": 10.076923076923077,
"grad_norm": 2.3092093467712402,
"learning_rate": 6.564102564102565e-06,
"loss": 0.8366,
"step": 1310
},
{
"epoch": 10.153846153846153,
"grad_norm": 2.2793080806732178,
"learning_rate": 6.461538461538463e-06,
"loss": 0.89,
"step": 1320
},
{
"epoch": 10.23076923076923,
"grad_norm": 2.369584560394287,
"learning_rate": 6.358974358974359e-06,
"loss": 0.8718,
"step": 1330
},
{
"epoch": 10.307692307692308,
"grad_norm": 2.307018995285034,
"learning_rate": 6.256410256410257e-06,
"loss": 0.8299,
"step": 1340
},
{
"epoch": 10.384615384615385,
"grad_norm": 2.086519718170166,
"learning_rate": 6.153846153846155e-06,
"loss": 0.8096,
"step": 1350
},
{
"epoch": 10.461538461538462,
"grad_norm": 2.7159907817840576,
"learning_rate": 6.051282051282051e-06,
"loss": 0.8442,
"step": 1360
},
{
"epoch": 10.538461538461538,
"grad_norm": 2.477699041366577,
"learning_rate": 5.948717948717949e-06,
"loss": 0.8133,
"step": 1370
},
{
"epoch": 10.615384615384615,
"grad_norm": 4.113278388977051,
"learning_rate": 5.846153846153847e-06,
"loss": 0.8159,
"step": 1380
},
{
"epoch": 10.692307692307692,
"grad_norm": 2.264570951461792,
"learning_rate": 5.743589743589743e-06,
"loss": 0.8086,
"step": 1390
},
{
"epoch": 10.76923076923077,
"grad_norm": 3.312915802001953,
"learning_rate": 5.641025641025641e-06,
"loss": 0.8668,
"step": 1400
},
{
"epoch": 10.846153846153847,
"grad_norm": 2.189178228378296,
"learning_rate": 5.538461538461539e-06,
"loss": 0.8298,
"step": 1410
},
{
"epoch": 10.923076923076923,
"grad_norm": 1.8542472124099731,
"learning_rate": 5.435897435897436e-06,
"loss": 0.8329,
"step": 1420
},
{
"epoch": 11.0,
"grad_norm": 4.028012752532959,
"learning_rate": 5.333333333333334e-06,
"loss": 0.8306,
"step": 1430
},
{
"epoch": 11.0,
"eval_accuracy": 0.8345864661654135,
"eval_loss": 0.7760081887245178,
"eval_runtime": 0.7954,
"eval_samples_per_second": 167.205,
"eval_steps_per_second": 21.372,
"step": 1430
},
{
"epoch": 11.076923076923077,
"grad_norm": 1.9505723714828491,
"learning_rate": 5.230769230769232e-06,
"loss": 0.8461,
"step": 1440
},
{
"epoch": 11.153846153846153,
"grad_norm": 3.543419122695923,
"learning_rate": 5.128205128205128e-06,
"loss": 0.8287,
"step": 1450
},
{
"epoch": 11.23076923076923,
"grad_norm": 2.6018764972686768,
"learning_rate": 5.025641025641026e-06,
"loss": 0.7996,
"step": 1460
},
{
"epoch": 11.307692307692308,
"grad_norm": 2.7204878330230713,
"learning_rate": 4.923076923076924e-06,
"loss": 0.8218,
"step": 1470
},
{
"epoch": 11.384615384615385,
"grad_norm": 2.6924397945404053,
"learning_rate": 4.820512820512821e-06,
"loss": 0.8152,
"step": 1480
},
{
"epoch": 11.461538461538462,
"grad_norm": 2.1294894218444824,
"learning_rate": 4.717948717948718e-06,
"loss": 0.8452,
"step": 1490
},
{
"epoch": 11.538461538461538,
"grad_norm": 2.45246958732605,
"learning_rate": 4.615384615384616e-06,
"loss": 0.8316,
"step": 1500
},
{
"epoch": 11.615384615384615,
"grad_norm": 3.2710139751434326,
"learning_rate": 4.512820512820513e-06,
"loss": 0.8272,
"step": 1510
},
{
"epoch": 11.692307692307692,
"grad_norm": 3.3108620643615723,
"learning_rate": 4.4102564102564104e-06,
"loss": 0.8539,
"step": 1520
},
{
"epoch": 11.76923076923077,
"grad_norm": 2.079916000366211,
"learning_rate": 4.307692307692308e-06,
"loss": 0.8067,
"step": 1530
},
{
"epoch": 11.846153846153847,
"grad_norm": 2.2175967693328857,
"learning_rate": 4.2051282051282055e-06,
"loss": 0.7943,
"step": 1540
},
{
"epoch": 11.923076923076923,
"grad_norm": 2.064258098602295,
"learning_rate": 4.102564102564103e-06,
"loss": 0.8268,
"step": 1550
},
{
"epoch": 12.0,
"grad_norm": 3.4313971996307373,
"learning_rate": 4.000000000000001e-06,
"loss": 0.8031,
"step": 1560
},
{
"epoch": 12.0,
"eval_accuracy": 0.8345864661654135,
"eval_loss": 0.7563135623931885,
"eval_runtime": 0.7817,
"eval_samples_per_second": 170.134,
"eval_steps_per_second": 21.746,
"step": 1560
},
{
"epoch": 12.076923076923077,
"grad_norm": 2.1071786880493164,
"learning_rate": 3.897435897435898e-06,
"loss": 0.7877,
"step": 1570
},
{
"epoch": 12.153846153846153,
"grad_norm": 2.6646640300750732,
"learning_rate": 3.794871794871795e-06,
"loss": 0.8223,
"step": 1580
},
{
"epoch": 12.23076923076923,
"grad_norm": 1.7268288135528564,
"learning_rate": 3.692307692307693e-06,
"loss": 0.8204,
"step": 1590
},
{
"epoch": 12.307692307692308,
"grad_norm": 2.982988119125366,
"learning_rate": 3.58974358974359e-06,
"loss": 0.8336,
"step": 1600
},
{
"epoch": 12.384615384615385,
"grad_norm": 2.559271812438965,
"learning_rate": 3.487179487179487e-06,
"loss": 0.8064,
"step": 1610
},
{
"epoch": 12.461538461538462,
"grad_norm": 2.528869390487671,
"learning_rate": 3.384615384615385e-06,
"loss": 0.793,
"step": 1620
},
{
"epoch": 12.538461538461538,
"grad_norm": 2.1543517112731934,
"learning_rate": 3.2820512820512823e-06,
"loss": 0.8031,
"step": 1630
},
{
"epoch": 12.615384615384615,
"grad_norm": 1.7778912782669067,
"learning_rate": 3.1794871794871795e-06,
"loss": 0.8086,
"step": 1640
},
{
"epoch": 12.692307692307692,
"grad_norm": 3.2962191104888916,
"learning_rate": 3.0769230769230774e-06,
"loss": 0.7617,
"step": 1650
},
{
"epoch": 12.76923076923077,
"grad_norm": 2.336732864379883,
"learning_rate": 2.9743589743589746e-06,
"loss": 0.8541,
"step": 1660
},
{
"epoch": 12.846153846153847,
"grad_norm": 1.9721437692642212,
"learning_rate": 2.8717948717948717e-06,
"loss": 0.8249,
"step": 1670
},
{
"epoch": 12.923076923076923,
"grad_norm": 2.0368008613586426,
"learning_rate": 2.7692307692307697e-06,
"loss": 0.795,
"step": 1680
},
{
"epoch": 13.0,
"grad_norm": 3.8102405071258545,
"learning_rate": 2.666666666666667e-06,
"loss": 0.8138,
"step": 1690
},
{
"epoch": 13.0,
"eval_accuracy": 0.8421052631578947,
"eval_loss": 0.753366231918335,
"eval_runtime": 0.7711,
"eval_samples_per_second": 172.483,
"eval_steps_per_second": 22.047,
"step": 1690
},
{
"epoch": 13.076923076923077,
"grad_norm": 3.0844614505767822,
"learning_rate": 2.564102564102564e-06,
"loss": 0.7901,
"step": 1700
},
{
"epoch": 13.153846153846153,
"grad_norm": 2.517000913619995,
"learning_rate": 2.461538461538462e-06,
"loss": 0.8205,
"step": 1710
},
{
"epoch": 13.23076923076923,
"grad_norm": 3.047574520111084,
"learning_rate": 2.358974358974359e-06,
"loss": 0.8113,
"step": 1720
},
{
"epoch": 13.307692307692308,
"grad_norm": 1.9097496271133423,
"learning_rate": 2.2564102564102566e-06,
"loss": 0.817,
"step": 1730
},
{
"epoch": 13.384615384615385,
"grad_norm": 2.553558826446533,
"learning_rate": 2.153846153846154e-06,
"loss": 0.7794,
"step": 1740
},
{
"epoch": 13.461538461538462,
"grad_norm": 3.915072202682495,
"learning_rate": 2.0512820512820513e-06,
"loss": 0.8238,
"step": 1750
},
{
"epoch": 13.538461538461538,
"grad_norm": 2.7563774585723877,
"learning_rate": 1.948717948717949e-06,
"loss": 0.8464,
"step": 1760
},
{
"epoch": 13.615384615384615,
"grad_norm": 1.9687329530715942,
"learning_rate": 1.8461538461538465e-06,
"loss": 0.7998,
"step": 1770
},
{
"epoch": 13.692307692307692,
"grad_norm": 1.9707388877868652,
"learning_rate": 1.7435897435897436e-06,
"loss": 0.7827,
"step": 1780
},
{
"epoch": 13.76923076923077,
"grad_norm": 1.8745958805084229,
"learning_rate": 1.6410256410256412e-06,
"loss": 0.8211,
"step": 1790
},
{
"epoch": 13.846153846153847,
"grad_norm": 3.193314552307129,
"learning_rate": 1.5384615384615387e-06,
"loss": 0.7874,
"step": 1800
},
{
"epoch": 13.923076923076923,
"grad_norm": 2.4726743698120117,
"learning_rate": 1.4358974358974359e-06,
"loss": 0.7323,
"step": 1810
},
{
"epoch": 14.0,
"grad_norm": 4.2939653396606445,
"learning_rate": 1.3333333333333334e-06,
"loss": 0.8178,
"step": 1820
},
{
"epoch": 14.0,
"eval_accuracy": 0.8270676691729323,
"eval_loss": 0.7507623434066772,
"eval_runtime": 0.7401,
"eval_samples_per_second": 179.698,
"eval_steps_per_second": 22.969,
"step": 1820
},
{
"epoch": 14.076923076923077,
"grad_norm": 2.51151967048645,
"learning_rate": 1.230769230769231e-06,
"loss": 0.8201,
"step": 1830
},
{
"epoch": 14.153846153846153,
"grad_norm": 2.035071849822998,
"learning_rate": 1.1282051282051283e-06,
"loss": 0.7885,
"step": 1840
},
{
"epoch": 14.23076923076923,
"grad_norm": 2.924006938934326,
"learning_rate": 1.0256410256410257e-06,
"loss": 0.7802,
"step": 1850
},
{
"epoch": 14.307692307692308,
"grad_norm": 3.3788881301879883,
"learning_rate": 9.230769230769232e-07,
"loss": 0.7717,
"step": 1860
},
{
"epoch": 14.384615384615385,
"grad_norm": 2.6425819396972656,
"learning_rate": 8.205128205128206e-07,
"loss": 0.7871,
"step": 1870
},
{
"epoch": 14.461538461538462,
"grad_norm": 2.781729221343994,
"learning_rate": 7.179487179487179e-07,
"loss": 0.7859,
"step": 1880
},
{
"epoch": 14.538461538461538,
"grad_norm": 3.0325913429260254,
"learning_rate": 6.153846153846155e-07,
"loss": 0.7783,
"step": 1890
},
{
"epoch": 14.615384615384615,
"grad_norm": 2.7311503887176514,
"learning_rate": 5.128205128205128e-07,
"loss": 0.8273,
"step": 1900
},
{
"epoch": 14.692307692307692,
"grad_norm": 2.3128809928894043,
"learning_rate": 4.102564102564103e-07,
"loss": 0.7977,
"step": 1910
},
{
"epoch": 14.76923076923077,
"grad_norm": 3.4510748386383057,
"learning_rate": 3.0769230769230774e-07,
"loss": 0.8029,
"step": 1920
},
{
"epoch": 14.846153846153847,
"grad_norm": 3.1088345050811768,
"learning_rate": 2.0512820512820514e-07,
"loss": 0.8509,
"step": 1930
},
{
"epoch": 14.923076923076923,
"grad_norm": 2.3365509510040283,
"learning_rate": 1.0256410256410257e-07,
"loss": 0.7569,
"step": 1940
},
{
"epoch": 15.0,
"grad_norm": 4.560791015625,
"learning_rate": 0.0,
"loss": 0.7901,
"step": 1950
},
{
"epoch": 15.0,
"eval_accuracy": 0.8195488721804511,
"eval_loss": 0.738013505935669,
"eval_runtime": 0.796,
"eval_samples_per_second": 167.085,
"eval_steps_per_second": 21.357,
"step": 1950
},
{
"epoch": 15.0,
"step": 1950,
"total_flos": 1.5658365504595968e+17,
"train_loss": 0.9225189507313264,
"train_runtime": 140.2,
"train_samples_per_second": 110.628,
"train_steps_per_second": 13.909
}
],
"logging_steps": 10,
"max_steps": 1950,
"num_input_tokens_seen": 0,
"num_train_epochs": 15,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.5658365504595968e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}