vit-base-beans / trainer_state.json
qubvel-hf's picture
qubvel-hf HF staff
End of training
e47c1ba verified
raw
history blame
38.2 kB
{
"best_metric": 0.7388833165168762,
"best_model_checkpoint": "./beans_outputs/checkpoint-1950",
"epoch": 15.0,
"eval_steps": 500,
"global_step": 1950,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07692307692307693,
"grad_norm": 2.1216182708740234,
"learning_rate": 1.98974358974359e-05,
"loss": 1.1239,
"step": 10
},
{
"epoch": 0.15384615384615385,
"grad_norm": 1.8308407068252563,
"learning_rate": 1.9794871794871798e-05,
"loss": 1.1221,
"step": 20
},
{
"epoch": 0.23076923076923078,
"grad_norm": 1.9811038970947266,
"learning_rate": 1.9692307692307696e-05,
"loss": 1.1163,
"step": 30
},
{
"epoch": 0.3076923076923077,
"grad_norm": 2.4690616130828857,
"learning_rate": 1.958974358974359e-05,
"loss": 1.1046,
"step": 40
},
{
"epoch": 0.38461538461538464,
"grad_norm": 1.6778465509414673,
"learning_rate": 1.9487179487179488e-05,
"loss": 1.1082,
"step": 50
},
{
"epoch": 0.46153846153846156,
"grad_norm": 1.4839682579040527,
"learning_rate": 1.9384615384615386e-05,
"loss": 1.1043,
"step": 60
},
{
"epoch": 0.5384615384615384,
"grad_norm": 2.6265501976013184,
"learning_rate": 1.9282051282051284e-05,
"loss": 1.1028,
"step": 70
},
{
"epoch": 0.6153846153846154,
"grad_norm": 2.565593719482422,
"learning_rate": 1.9179487179487182e-05,
"loss": 1.0909,
"step": 80
},
{
"epoch": 0.6923076923076923,
"grad_norm": 2.020240545272827,
"learning_rate": 1.907692307692308e-05,
"loss": 1.0939,
"step": 90
},
{
"epoch": 0.7692307692307693,
"grad_norm": 2.49810791015625,
"learning_rate": 1.8974358974358975e-05,
"loss": 1.0917,
"step": 100
},
{
"epoch": 0.8461538461538461,
"grad_norm": 1.8923050165176392,
"learning_rate": 1.8871794871794873e-05,
"loss": 1.0937,
"step": 110
},
{
"epoch": 0.9230769230769231,
"grad_norm": 2.2869341373443604,
"learning_rate": 1.876923076923077e-05,
"loss": 1.1001,
"step": 120
},
{
"epoch": 1.0,
"grad_norm": 4.069129467010498,
"learning_rate": 1.866666666666667e-05,
"loss": 1.0863,
"step": 130
},
{
"epoch": 1.0,
"eval_accuracy": 0.42857142857142855,
"eval_loss": 1.0881553888320923,
"eval_runtime": 0.9817,
"eval_samples_per_second": 135.479,
"eval_steps_per_second": 17.317,
"step": 130
},
{
"epoch": 1.0769230769230769,
"grad_norm": 1.7320090532302856,
"learning_rate": 1.8564102564102567e-05,
"loss": 1.0792,
"step": 140
},
{
"epoch": 1.1538461538461537,
"grad_norm": 2.5494987964630127,
"learning_rate": 1.8461538461538465e-05,
"loss": 1.072,
"step": 150
},
{
"epoch": 1.2307692307692308,
"grad_norm": 1.4740803241729736,
"learning_rate": 1.835897435897436e-05,
"loss": 1.0773,
"step": 160
},
{
"epoch": 1.3076923076923077,
"grad_norm": 2.3478026390075684,
"learning_rate": 1.8256410256410257e-05,
"loss": 1.0907,
"step": 170
},
{
"epoch": 1.3846153846153846,
"grad_norm": 2.1786012649536133,
"learning_rate": 1.8153846153846155e-05,
"loss": 1.08,
"step": 180
},
{
"epoch": 1.4615384615384617,
"grad_norm": 2.1593339443206787,
"learning_rate": 1.8051282051282053e-05,
"loss": 1.0789,
"step": 190
},
{
"epoch": 1.5384615384615383,
"grad_norm": 2.1174795627593994,
"learning_rate": 1.794871794871795e-05,
"loss": 1.0779,
"step": 200
},
{
"epoch": 1.6153846153846154,
"grad_norm": 1.8251662254333496,
"learning_rate": 1.784615384615385e-05,
"loss": 1.067,
"step": 210
},
{
"epoch": 1.6923076923076923,
"grad_norm": 3.535820245742798,
"learning_rate": 1.7743589743589744e-05,
"loss": 1.0701,
"step": 220
},
{
"epoch": 1.7692307692307692,
"grad_norm": 2.0242528915405273,
"learning_rate": 1.7641025641025642e-05,
"loss": 1.0714,
"step": 230
},
{
"epoch": 1.8461538461538463,
"grad_norm": 2.1979262828826904,
"learning_rate": 1.753846153846154e-05,
"loss": 1.0868,
"step": 240
},
{
"epoch": 1.9230769230769231,
"grad_norm": 2.096327543258667,
"learning_rate": 1.7435897435897438e-05,
"loss": 1.0682,
"step": 250
},
{
"epoch": 2.0,
"grad_norm": 3.008599042892456,
"learning_rate": 1.7333333333333336e-05,
"loss": 1.063,
"step": 260
},
{
"epoch": 2.0,
"eval_accuracy": 0.5413533834586466,
"eval_loss": 1.0589852333068848,
"eval_runtime": 0.7866,
"eval_samples_per_second": 169.088,
"eval_steps_per_second": 21.613,
"step": 260
},
{
"epoch": 2.076923076923077,
"grad_norm": 2.03446888923645,
"learning_rate": 1.7230769230769234e-05,
"loss": 1.0712,
"step": 270
},
{
"epoch": 2.1538461538461537,
"grad_norm": 2.4047350883483887,
"learning_rate": 1.7128205128205128e-05,
"loss": 1.0593,
"step": 280
},
{
"epoch": 2.230769230769231,
"grad_norm": 2.37992262840271,
"learning_rate": 1.7025641025641026e-05,
"loss": 1.0577,
"step": 290
},
{
"epoch": 2.3076923076923075,
"grad_norm": 2.940575361251831,
"learning_rate": 1.6923076923076924e-05,
"loss": 1.044,
"step": 300
},
{
"epoch": 2.3846153846153846,
"grad_norm": 1.983780026435852,
"learning_rate": 1.6820512820512822e-05,
"loss": 1.0573,
"step": 310
},
{
"epoch": 2.4615384615384617,
"grad_norm": 1.9737600088119507,
"learning_rate": 1.671794871794872e-05,
"loss": 1.048,
"step": 320
},
{
"epoch": 2.5384615384615383,
"grad_norm": 1.4561364650726318,
"learning_rate": 1.6615384615384618e-05,
"loss": 1.0317,
"step": 330
},
{
"epoch": 2.6153846153846154,
"grad_norm": 2.187842845916748,
"learning_rate": 1.6512820512820513e-05,
"loss": 1.0482,
"step": 340
},
{
"epoch": 2.6923076923076925,
"grad_norm": 1.9264109134674072,
"learning_rate": 1.641025641025641e-05,
"loss": 1.0491,
"step": 350
},
{
"epoch": 2.769230769230769,
"grad_norm": 1.8492430448532104,
"learning_rate": 1.630769230769231e-05,
"loss": 1.0409,
"step": 360
},
{
"epoch": 2.8461538461538463,
"grad_norm": 2.1464972496032715,
"learning_rate": 1.6205128205128207e-05,
"loss": 1.0359,
"step": 370
},
{
"epoch": 2.9230769230769234,
"grad_norm": 1.8184483051300049,
"learning_rate": 1.6102564102564105e-05,
"loss": 1.0237,
"step": 380
},
{
"epoch": 3.0,
"grad_norm": 4.10977029800415,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.0447,
"step": 390
},
{
"epoch": 3.0,
"eval_accuracy": 0.6992481203007519,
"eval_loss": 1.022922396659851,
"eval_runtime": 0.831,
"eval_samples_per_second": 160.057,
"eval_steps_per_second": 20.458,
"step": 390
},
{
"epoch": 3.076923076923077,
"grad_norm": 2.4164669513702393,
"learning_rate": 1.5897435897435897e-05,
"loss": 1.0417,
"step": 400
},
{
"epoch": 3.1538461538461537,
"grad_norm": 1.9611194133758545,
"learning_rate": 1.5794871794871795e-05,
"loss": 1.03,
"step": 410
},
{
"epoch": 3.230769230769231,
"grad_norm": 2.1152236461639404,
"learning_rate": 1.5692307692307693e-05,
"loss": 1.038,
"step": 420
},
{
"epoch": 3.3076923076923075,
"grad_norm": 2.2656948566436768,
"learning_rate": 1.558974358974359e-05,
"loss": 1.0163,
"step": 430
},
{
"epoch": 3.3846153846153846,
"grad_norm": 2.1608102321624756,
"learning_rate": 1.548717948717949e-05,
"loss": 1.0277,
"step": 440
},
{
"epoch": 3.4615384615384617,
"grad_norm": 1.7433905601501465,
"learning_rate": 1.5384615384615387e-05,
"loss": 1.0379,
"step": 450
},
{
"epoch": 3.5384615384615383,
"grad_norm": 2.625889778137207,
"learning_rate": 1.5282051282051282e-05,
"loss": 1.0314,
"step": 460
},
{
"epoch": 3.6153846153846154,
"grad_norm": 1.6532872915267944,
"learning_rate": 1.517948717948718e-05,
"loss": 1.0052,
"step": 470
},
{
"epoch": 3.6923076923076925,
"grad_norm": 2.715625524520874,
"learning_rate": 1.5076923076923078e-05,
"loss": 1.0101,
"step": 480
},
{
"epoch": 3.769230769230769,
"grad_norm": 1.6237256526947021,
"learning_rate": 1.4974358974358976e-05,
"loss": 0.9981,
"step": 490
},
{
"epoch": 3.8461538461538463,
"grad_norm": 1.582321286201477,
"learning_rate": 1.4871794871794874e-05,
"loss": 1.0226,
"step": 500
},
{
"epoch": 3.9230769230769234,
"grad_norm": 1.7656166553497314,
"learning_rate": 1.4769230769230772e-05,
"loss": 0.9989,
"step": 510
},
{
"epoch": 4.0,
"grad_norm": 5.592857837677002,
"learning_rate": 1.4666666666666666e-05,
"loss": 1.0223,
"step": 520
},
{
"epoch": 4.0,
"eval_accuracy": 0.6917293233082706,
"eval_loss": 0.9968266487121582,
"eval_runtime": 0.8013,
"eval_samples_per_second": 165.971,
"eval_steps_per_second": 21.214,
"step": 520
},
{
"epoch": 4.076923076923077,
"grad_norm": 2.7708559036254883,
"learning_rate": 1.4564102564102564e-05,
"loss": 1.0136,
"step": 530
},
{
"epoch": 4.153846153846154,
"grad_norm": 1.880313515663147,
"learning_rate": 1.4461538461538462e-05,
"loss": 1.0166,
"step": 540
},
{
"epoch": 4.230769230769231,
"grad_norm": 2.6722023487091064,
"learning_rate": 1.435897435897436e-05,
"loss": 1.0238,
"step": 550
},
{
"epoch": 4.3076923076923075,
"grad_norm": 2.3504159450531006,
"learning_rate": 1.4256410256410258e-05,
"loss": 1.0338,
"step": 560
},
{
"epoch": 4.384615384615385,
"grad_norm": 2.2466416358947754,
"learning_rate": 1.4153846153846156e-05,
"loss": 0.9838,
"step": 570
},
{
"epoch": 4.461538461538462,
"grad_norm": 2.590592861175537,
"learning_rate": 1.405128205128205e-05,
"loss": 0.9915,
"step": 580
},
{
"epoch": 4.538461538461538,
"grad_norm": 2.3943469524383545,
"learning_rate": 1.3948717948717949e-05,
"loss": 0.9885,
"step": 590
},
{
"epoch": 4.615384615384615,
"grad_norm": 1.8981530666351318,
"learning_rate": 1.3846153846153847e-05,
"loss": 0.9802,
"step": 600
},
{
"epoch": 4.6923076923076925,
"grad_norm": 3.2020835876464844,
"learning_rate": 1.3743589743589745e-05,
"loss": 0.9993,
"step": 610
},
{
"epoch": 4.769230769230769,
"grad_norm": 2.171642541885376,
"learning_rate": 1.3641025641025643e-05,
"loss": 1.0033,
"step": 620
},
{
"epoch": 4.846153846153846,
"grad_norm": 1.8594719171524048,
"learning_rate": 1.353846153846154e-05,
"loss": 0.9689,
"step": 630
},
{
"epoch": 4.923076923076923,
"grad_norm": 2.3601503372192383,
"learning_rate": 1.3435897435897435e-05,
"loss": 0.9703,
"step": 640
},
{
"epoch": 5.0,
"grad_norm": 5.1482744216918945,
"learning_rate": 1.3333333333333333e-05,
"loss": 1.0,
"step": 650
},
{
"epoch": 5.0,
"eval_accuracy": 0.7518796992481203,
"eval_loss": 0.9575085639953613,
"eval_runtime": 0.8114,
"eval_samples_per_second": 163.92,
"eval_steps_per_second": 20.952,
"step": 650
},
{
"epoch": 5.076923076923077,
"grad_norm": 1.8753620386123657,
"learning_rate": 1.3230769230769231e-05,
"loss": 0.9664,
"step": 660
},
{
"epoch": 5.153846153846154,
"grad_norm": 2.668313503265381,
"learning_rate": 1.312820512820513e-05,
"loss": 1.0055,
"step": 670
},
{
"epoch": 5.230769230769231,
"grad_norm": 2.544369697570801,
"learning_rate": 1.3025641025641027e-05,
"loss": 0.9936,
"step": 680
},
{
"epoch": 5.3076923076923075,
"grad_norm": 2.7921175956726074,
"learning_rate": 1.2923076923076925e-05,
"loss": 0.9852,
"step": 690
},
{
"epoch": 5.384615384615385,
"grad_norm": 2.6936609745025635,
"learning_rate": 1.2820512820512823e-05,
"loss": 0.9725,
"step": 700
},
{
"epoch": 5.461538461538462,
"grad_norm": 2.4392247200012207,
"learning_rate": 1.2717948717948718e-05,
"loss": 0.9653,
"step": 710
},
{
"epoch": 5.538461538461538,
"grad_norm": 1.9628013372421265,
"learning_rate": 1.2615384615384616e-05,
"loss": 0.9497,
"step": 720
},
{
"epoch": 5.615384615384615,
"grad_norm": 2.3558847904205322,
"learning_rate": 1.2512820512820514e-05,
"loss": 0.9646,
"step": 730
},
{
"epoch": 5.6923076923076925,
"grad_norm": 2.4973325729370117,
"learning_rate": 1.2410256410256412e-05,
"loss": 0.9458,
"step": 740
},
{
"epoch": 5.769230769230769,
"grad_norm": 2.091280221939087,
"learning_rate": 1.230769230769231e-05,
"loss": 0.9506,
"step": 750
},
{
"epoch": 5.846153846153846,
"grad_norm": 2.1584551334381104,
"learning_rate": 1.2205128205128208e-05,
"loss": 0.9517,
"step": 760
},
{
"epoch": 5.923076923076923,
"grad_norm": 3.8231847286224365,
"learning_rate": 1.2102564102564102e-05,
"loss": 0.9375,
"step": 770
},
{
"epoch": 6.0,
"grad_norm": 4.746103286743164,
"learning_rate": 1.2e-05,
"loss": 0.9726,
"step": 780
},
{
"epoch": 6.0,
"eval_accuracy": 0.7744360902255639,
"eval_loss": 0.92984938621521,
"eval_runtime": 0.8125,
"eval_samples_per_second": 163.686,
"eval_steps_per_second": 20.922,
"step": 780
},
{
"epoch": 6.076923076923077,
"grad_norm": 2.8764736652374268,
"learning_rate": 1.1897435897435898e-05,
"loss": 0.9424,
"step": 790
},
{
"epoch": 6.153846153846154,
"grad_norm": 2.512113332748413,
"learning_rate": 1.1794871794871796e-05,
"loss": 0.9378,
"step": 800
},
{
"epoch": 6.230769230769231,
"grad_norm": 1.8056284189224243,
"learning_rate": 1.1692307692307694e-05,
"loss": 0.9309,
"step": 810
},
{
"epoch": 6.3076923076923075,
"grad_norm": 2.3125550746917725,
"learning_rate": 1.1589743589743592e-05,
"loss": 0.9316,
"step": 820
},
{
"epoch": 6.384615384615385,
"grad_norm": 2.485017776489258,
"learning_rate": 1.1487179487179487e-05,
"loss": 0.9359,
"step": 830
},
{
"epoch": 6.461538461538462,
"grad_norm": 3.3460640907287598,
"learning_rate": 1.1384615384615385e-05,
"loss": 0.9316,
"step": 840
},
{
"epoch": 6.538461538461538,
"grad_norm": 2.9212265014648438,
"learning_rate": 1.1282051282051283e-05,
"loss": 0.9417,
"step": 850
},
{
"epoch": 6.615384615384615,
"grad_norm": 1.9060611724853516,
"learning_rate": 1.117948717948718e-05,
"loss": 0.9127,
"step": 860
},
{
"epoch": 6.6923076923076925,
"grad_norm": 2.729116201400757,
"learning_rate": 1.1076923076923079e-05,
"loss": 0.9581,
"step": 870
},
{
"epoch": 6.769230769230769,
"grad_norm": 2.170494794845581,
"learning_rate": 1.0974358974358977e-05,
"loss": 0.9267,
"step": 880
},
{
"epoch": 6.846153846153846,
"grad_norm": 2.36336350440979,
"learning_rate": 1.0871794871794871e-05,
"loss": 0.9209,
"step": 890
},
{
"epoch": 6.923076923076923,
"grad_norm": 3.067629098892212,
"learning_rate": 1.076923076923077e-05,
"loss": 0.9425,
"step": 900
},
{
"epoch": 7.0,
"grad_norm": 4.193312168121338,
"learning_rate": 1.0666666666666667e-05,
"loss": 0.9258,
"step": 910
},
{
"epoch": 7.0,
"eval_accuracy": 0.8045112781954887,
"eval_loss": 0.8871035575866699,
"eval_runtime": 0.9311,
"eval_samples_per_second": 142.845,
"eval_steps_per_second": 18.258,
"step": 910
},
{
"epoch": 7.076923076923077,
"grad_norm": 2.743016004562378,
"learning_rate": 1.0564102564102565e-05,
"loss": 0.9111,
"step": 920
},
{
"epoch": 7.153846153846154,
"grad_norm": 3.4307682514190674,
"learning_rate": 1.0461538461538463e-05,
"loss": 0.9108,
"step": 930
},
{
"epoch": 7.230769230769231,
"grad_norm": 3.426872968673706,
"learning_rate": 1.0358974358974361e-05,
"loss": 0.9011,
"step": 940
},
{
"epoch": 7.3076923076923075,
"grad_norm": 2.8081014156341553,
"learning_rate": 1.0256410256410256e-05,
"loss": 0.9076,
"step": 950
},
{
"epoch": 7.384615384615385,
"grad_norm": 2.9387893676757812,
"learning_rate": 1.0153846153846154e-05,
"loss": 0.9236,
"step": 960
},
{
"epoch": 7.461538461538462,
"grad_norm": 2.192082643508911,
"learning_rate": 1.0051282051282052e-05,
"loss": 0.8889,
"step": 970
},
{
"epoch": 7.538461538461538,
"grad_norm": 2.60426926612854,
"learning_rate": 9.94871794871795e-06,
"loss": 0.9243,
"step": 980
},
{
"epoch": 7.615384615384615,
"grad_norm": 2.2395219802856445,
"learning_rate": 9.846153846153848e-06,
"loss": 0.8859,
"step": 990
},
{
"epoch": 7.6923076923076925,
"grad_norm": 2.0519468784332275,
"learning_rate": 9.743589743589744e-06,
"loss": 0.8821,
"step": 1000
},
{
"epoch": 7.769230769230769,
"grad_norm": 3.1931142807006836,
"learning_rate": 9.641025641025642e-06,
"loss": 0.8825,
"step": 1010
},
{
"epoch": 7.846153846153846,
"grad_norm": 2.4902703762054443,
"learning_rate": 9.53846153846154e-06,
"loss": 0.9106,
"step": 1020
},
{
"epoch": 7.923076923076923,
"grad_norm": 2.7248635292053223,
"learning_rate": 9.435897435897436e-06,
"loss": 0.8597,
"step": 1030
},
{
"epoch": 8.0,
"grad_norm": 4.114623069763184,
"learning_rate": 9.333333333333334e-06,
"loss": 0.9203,
"step": 1040
},
{
"epoch": 8.0,
"eval_accuracy": 0.8345864661654135,
"eval_loss": 0.8486866354942322,
"eval_runtime": 0.8033,
"eval_samples_per_second": 165.565,
"eval_steps_per_second": 21.162,
"step": 1040
},
{
"epoch": 8.076923076923077,
"grad_norm": 1.7978770732879639,
"learning_rate": 9.230769230769232e-06,
"loss": 0.8922,
"step": 1050
},
{
"epoch": 8.153846153846153,
"grad_norm": 2.1388049125671387,
"learning_rate": 9.128205128205129e-06,
"loss": 0.8696,
"step": 1060
},
{
"epoch": 8.23076923076923,
"grad_norm": 3.935715913772583,
"learning_rate": 9.025641025641027e-06,
"loss": 0.9006,
"step": 1070
},
{
"epoch": 8.307692307692308,
"grad_norm": 2.129542112350464,
"learning_rate": 8.923076923076925e-06,
"loss": 0.8603,
"step": 1080
},
{
"epoch": 8.384615384615385,
"grad_norm": 2.4084651470184326,
"learning_rate": 8.820512820512821e-06,
"loss": 0.8989,
"step": 1090
},
{
"epoch": 8.461538461538462,
"grad_norm": 2.7610435485839844,
"learning_rate": 8.717948717948719e-06,
"loss": 0.8732,
"step": 1100
},
{
"epoch": 8.538461538461538,
"grad_norm": 1.6848160028457642,
"learning_rate": 8.615384615384617e-06,
"loss": 0.8715,
"step": 1110
},
{
"epoch": 8.615384615384615,
"grad_norm": 3.277689218521118,
"learning_rate": 8.512820512820513e-06,
"loss": 0.8989,
"step": 1120
},
{
"epoch": 8.692307692307692,
"grad_norm": 3.7184839248657227,
"learning_rate": 8.410256410256411e-06,
"loss": 0.8822,
"step": 1130
},
{
"epoch": 8.76923076923077,
"grad_norm": 2.3165555000305176,
"learning_rate": 8.307692307692309e-06,
"loss": 0.889,
"step": 1140
},
{
"epoch": 8.846153846153847,
"grad_norm": 2.629028558731079,
"learning_rate": 8.205128205128205e-06,
"loss": 0.8753,
"step": 1150
},
{
"epoch": 8.923076923076923,
"grad_norm": 2.8620731830596924,
"learning_rate": 8.102564102564103e-06,
"loss": 0.8855,
"step": 1160
},
{
"epoch": 9.0,
"grad_norm": 3.924490451812744,
"learning_rate": 8.000000000000001e-06,
"loss": 0.9038,
"step": 1170
},
{
"epoch": 9.0,
"eval_accuracy": 0.8120300751879699,
"eval_loss": 0.8329855799674988,
"eval_runtime": 0.781,
"eval_samples_per_second": 170.303,
"eval_steps_per_second": 21.768,
"step": 1170
},
{
"epoch": 9.076923076923077,
"grad_norm": 1.9303114414215088,
"learning_rate": 7.897435897435898e-06,
"loss": 0.8952,
"step": 1180
},
{
"epoch": 9.153846153846153,
"grad_norm": 2.1960716247558594,
"learning_rate": 7.794871794871796e-06,
"loss": 0.8401,
"step": 1190
},
{
"epoch": 9.23076923076923,
"grad_norm": 2.7040772438049316,
"learning_rate": 7.692307692307694e-06,
"loss": 0.8488,
"step": 1200
},
{
"epoch": 9.307692307692308,
"grad_norm": 2.097287654876709,
"learning_rate": 7.58974358974359e-06,
"loss": 0.8544,
"step": 1210
},
{
"epoch": 9.384615384615385,
"grad_norm": 2.012044906616211,
"learning_rate": 7.487179487179488e-06,
"loss": 0.8679,
"step": 1220
},
{
"epoch": 9.461538461538462,
"grad_norm": 3.2651612758636475,
"learning_rate": 7.384615384615386e-06,
"loss": 0.8558,
"step": 1230
},
{
"epoch": 9.538461538461538,
"grad_norm": 2.5597023963928223,
"learning_rate": 7.282051282051282e-06,
"loss": 0.8719,
"step": 1240
},
{
"epoch": 9.615384615384615,
"grad_norm": 2.756129503250122,
"learning_rate": 7.17948717948718e-06,
"loss": 0.8272,
"step": 1250
},
{
"epoch": 9.692307692307692,
"grad_norm": 2.7830724716186523,
"learning_rate": 7.076923076923078e-06,
"loss": 0.854,
"step": 1260
},
{
"epoch": 9.76923076923077,
"grad_norm": 1.841320514678955,
"learning_rate": 6.974358974358974e-06,
"loss": 0.8283,
"step": 1270
},
{
"epoch": 9.846153846153847,
"grad_norm": 2.4447691440582275,
"learning_rate": 6.871794871794872e-06,
"loss": 0.8179,
"step": 1280
},
{
"epoch": 9.923076923076923,
"grad_norm": 2.266535758972168,
"learning_rate": 6.76923076923077e-06,
"loss": 0.8368,
"step": 1290
},
{
"epoch": 10.0,
"grad_norm": 4.2985944747924805,
"learning_rate": 6.666666666666667e-06,
"loss": 0.8112,
"step": 1300
},
{
"epoch": 10.0,
"eval_accuracy": 0.8345864661654135,
"eval_loss": 0.8083619475364685,
"eval_runtime": 0.8269,
"eval_samples_per_second": 160.851,
"eval_steps_per_second": 20.56,
"step": 1300
},
{
"epoch": 10.076923076923077,
"grad_norm": 1.931143045425415,
"learning_rate": 6.564102564102565e-06,
"loss": 0.8378,
"step": 1310
},
{
"epoch": 10.153846153846153,
"grad_norm": 2.4910976886749268,
"learning_rate": 6.461538461538463e-06,
"loss": 0.8909,
"step": 1320
},
{
"epoch": 10.23076923076923,
"grad_norm": 2.3511204719543457,
"learning_rate": 6.358974358974359e-06,
"loss": 0.8715,
"step": 1330
},
{
"epoch": 10.307692307692308,
"grad_norm": 2.0618457794189453,
"learning_rate": 6.256410256410257e-06,
"loss": 0.833,
"step": 1340
},
{
"epoch": 10.384615384615385,
"grad_norm": 2.4037184715270996,
"learning_rate": 6.153846153846155e-06,
"loss": 0.809,
"step": 1350
},
{
"epoch": 10.461538461538462,
"grad_norm": 2.6920347213745117,
"learning_rate": 6.051282051282051e-06,
"loss": 0.8441,
"step": 1360
},
{
"epoch": 10.538461538461538,
"grad_norm": 2.5570709705352783,
"learning_rate": 5.948717948717949e-06,
"loss": 0.8166,
"step": 1370
},
{
"epoch": 10.615384615384615,
"grad_norm": 2.6978418827056885,
"learning_rate": 5.846153846153847e-06,
"loss": 0.8193,
"step": 1380
},
{
"epoch": 10.692307692307692,
"grad_norm": 2.370861291885376,
"learning_rate": 5.743589743589743e-06,
"loss": 0.81,
"step": 1390
},
{
"epoch": 10.76923076923077,
"grad_norm": 3.260789394378662,
"learning_rate": 5.641025641025641e-06,
"loss": 0.8671,
"step": 1400
},
{
"epoch": 10.846153846153847,
"grad_norm": 2.27559494972229,
"learning_rate": 5.538461538461539e-06,
"loss": 0.8321,
"step": 1410
},
{
"epoch": 10.923076923076923,
"grad_norm": 1.8184912204742432,
"learning_rate": 5.435897435897436e-06,
"loss": 0.8388,
"step": 1420
},
{
"epoch": 11.0,
"grad_norm": 4.08992862701416,
"learning_rate": 5.333333333333334e-06,
"loss": 0.8335,
"step": 1430
},
{
"epoch": 11.0,
"eval_accuracy": 0.8345864661654135,
"eval_loss": 0.7785491943359375,
"eval_runtime": 0.8255,
"eval_samples_per_second": 161.106,
"eval_steps_per_second": 20.592,
"step": 1430
},
{
"epoch": 11.076923076923077,
"grad_norm": 2.0166661739349365,
"learning_rate": 5.230769230769232e-06,
"loss": 0.8471,
"step": 1440
},
{
"epoch": 11.153846153846153,
"grad_norm": 3.2678654193878174,
"learning_rate": 5.128205128205128e-06,
"loss": 0.8298,
"step": 1450
},
{
"epoch": 11.23076923076923,
"grad_norm": 2.545058488845825,
"learning_rate": 5.025641025641026e-06,
"loss": 0.7991,
"step": 1460
},
{
"epoch": 11.307692307692308,
"grad_norm": 2.469082832336426,
"learning_rate": 4.923076923076924e-06,
"loss": 0.8256,
"step": 1470
},
{
"epoch": 11.384615384615385,
"grad_norm": 2.8207569122314453,
"learning_rate": 4.820512820512821e-06,
"loss": 0.8172,
"step": 1480
},
{
"epoch": 11.461538461538462,
"grad_norm": 2.142630100250244,
"learning_rate": 4.717948717948718e-06,
"loss": 0.8448,
"step": 1490
},
{
"epoch": 11.538461538461538,
"grad_norm": 2.3974075317382812,
"learning_rate": 4.615384615384616e-06,
"loss": 0.8292,
"step": 1500
},
{
"epoch": 11.615384615384615,
"grad_norm": 3.240954875946045,
"learning_rate": 4.512820512820513e-06,
"loss": 0.8275,
"step": 1510
},
{
"epoch": 11.692307692307692,
"grad_norm": 3.3133740425109863,
"learning_rate": 4.4102564102564104e-06,
"loss": 0.8569,
"step": 1520
},
{
"epoch": 11.76923076923077,
"grad_norm": 2.0835375785827637,
"learning_rate": 4.307692307692308e-06,
"loss": 0.8086,
"step": 1530
},
{
"epoch": 11.846153846153847,
"grad_norm": 2.20538067817688,
"learning_rate": 4.2051282051282055e-06,
"loss": 0.797,
"step": 1540
},
{
"epoch": 11.923076923076923,
"grad_norm": 2.1278247833251953,
"learning_rate": 4.102564102564103e-06,
"loss": 0.8301,
"step": 1550
},
{
"epoch": 12.0,
"grad_norm": 3.4031107425689697,
"learning_rate": 4.000000000000001e-06,
"loss": 0.8062,
"step": 1560
},
{
"epoch": 12.0,
"eval_accuracy": 0.8345864661654135,
"eval_loss": 0.7569367289543152,
"eval_runtime": 0.799,
"eval_samples_per_second": 166.458,
"eval_steps_per_second": 21.277,
"step": 1560
},
{
"epoch": 12.076923076923077,
"grad_norm": 2.181518793106079,
"learning_rate": 3.897435897435898e-06,
"loss": 0.7903,
"step": 1570
},
{
"epoch": 12.153846153846153,
"grad_norm": 2.4533674716949463,
"learning_rate": 3.794871794871795e-06,
"loss": 0.8228,
"step": 1580
},
{
"epoch": 12.23076923076923,
"grad_norm": 1.6895153522491455,
"learning_rate": 3.692307692307693e-06,
"loss": 0.8242,
"step": 1590
},
{
"epoch": 12.307692307692308,
"grad_norm": 2.1413424015045166,
"learning_rate": 3.58974358974359e-06,
"loss": 0.8378,
"step": 1600
},
{
"epoch": 12.384615384615385,
"grad_norm": 2.416987657546997,
"learning_rate": 3.487179487179487e-06,
"loss": 0.8088,
"step": 1610
},
{
"epoch": 12.461538461538462,
"grad_norm": 1.704318881034851,
"learning_rate": 3.384615384615385e-06,
"loss": 0.7953,
"step": 1620
},
{
"epoch": 12.538461538461538,
"grad_norm": 2.015375852584839,
"learning_rate": 3.2820512820512823e-06,
"loss": 0.799,
"step": 1630
},
{
"epoch": 12.615384615384615,
"grad_norm": 1.851975917816162,
"learning_rate": 3.1794871794871795e-06,
"loss": 0.813,
"step": 1640
},
{
"epoch": 12.692307692307692,
"grad_norm": 3.352241277694702,
"learning_rate": 3.0769230769230774e-06,
"loss": 0.763,
"step": 1650
},
{
"epoch": 12.76923076923077,
"grad_norm": 2.4369592666625977,
"learning_rate": 2.9743589743589746e-06,
"loss": 0.8583,
"step": 1660
},
{
"epoch": 12.846153846153847,
"grad_norm": 1.7659847736358643,
"learning_rate": 2.8717948717948717e-06,
"loss": 0.8354,
"step": 1670
},
{
"epoch": 12.923076923076923,
"grad_norm": 1.8726952075958252,
"learning_rate": 2.7692307692307697e-06,
"loss": 0.7972,
"step": 1680
},
{
"epoch": 13.0,
"grad_norm": 3.766446352005005,
"learning_rate": 2.666666666666667e-06,
"loss": 0.8141,
"step": 1690
},
{
"epoch": 13.0,
"eval_accuracy": 0.849624060150376,
"eval_loss": 0.7535560131072998,
"eval_runtime": 0.8236,
"eval_samples_per_second": 161.479,
"eval_steps_per_second": 20.64,
"step": 1690
},
{
"epoch": 13.076923076923077,
"grad_norm": 3.104832649230957,
"learning_rate": 2.564102564102564e-06,
"loss": 0.7937,
"step": 1700
},
{
"epoch": 13.153846153846153,
"grad_norm": 2.46419095993042,
"learning_rate": 2.461538461538462e-06,
"loss": 0.822,
"step": 1710
},
{
"epoch": 13.23076923076923,
"grad_norm": 3.035752296447754,
"learning_rate": 2.358974358974359e-06,
"loss": 0.8127,
"step": 1720
},
{
"epoch": 13.307692307692308,
"grad_norm": 1.9470882415771484,
"learning_rate": 2.2564102564102566e-06,
"loss": 0.8151,
"step": 1730
},
{
"epoch": 13.384615384615385,
"grad_norm": 2.086946964263916,
"learning_rate": 2.153846153846154e-06,
"loss": 0.7773,
"step": 1740
},
{
"epoch": 13.461538461538462,
"grad_norm": 3.7044293880462646,
"learning_rate": 2.0512820512820513e-06,
"loss": 0.8276,
"step": 1750
},
{
"epoch": 13.538461538461538,
"grad_norm": 2.735135555267334,
"learning_rate": 1.948717948717949e-06,
"loss": 0.8462,
"step": 1760
},
{
"epoch": 13.615384615384615,
"grad_norm": 2.065619468688965,
"learning_rate": 1.8461538461538465e-06,
"loss": 0.8024,
"step": 1770
},
{
"epoch": 13.692307692307692,
"grad_norm": 2.519625663757324,
"learning_rate": 1.7435897435897436e-06,
"loss": 0.7813,
"step": 1780
},
{
"epoch": 13.76923076923077,
"grad_norm": 1.8556421995162964,
"learning_rate": 1.6410256410256412e-06,
"loss": 0.8218,
"step": 1790
},
{
"epoch": 13.846153846153847,
"grad_norm": 3.1282896995544434,
"learning_rate": 1.5384615384615387e-06,
"loss": 0.7891,
"step": 1800
},
{
"epoch": 13.923076923076923,
"grad_norm": 2.363374948501587,
"learning_rate": 1.4358974358974359e-06,
"loss": 0.7341,
"step": 1810
},
{
"epoch": 14.0,
"grad_norm": 4.762740612030029,
"learning_rate": 1.3333333333333334e-06,
"loss": 0.8172,
"step": 1820
},
{
"epoch": 14.0,
"eval_accuracy": 0.8270676691729323,
"eval_loss": 0.7531744837760925,
"eval_runtime": 0.8266,
"eval_samples_per_second": 160.91,
"eval_steps_per_second": 20.567,
"step": 1820
},
{
"epoch": 14.076923076923077,
"grad_norm": 1.8959217071533203,
"learning_rate": 1.230769230769231e-06,
"loss": 0.8201,
"step": 1830
},
{
"epoch": 14.153846153846153,
"grad_norm": 2.1285548210144043,
"learning_rate": 1.1282051282051283e-06,
"loss": 0.7906,
"step": 1840
},
{
"epoch": 14.23076923076923,
"grad_norm": 2.916303873062134,
"learning_rate": 1.0256410256410257e-06,
"loss": 0.7793,
"step": 1850
},
{
"epoch": 14.307692307692308,
"grad_norm": 2.9460341930389404,
"learning_rate": 9.230769230769232e-07,
"loss": 0.773,
"step": 1860
},
{
"epoch": 14.384615384615385,
"grad_norm": 2.4907443523406982,
"learning_rate": 8.205128205128206e-07,
"loss": 0.7887,
"step": 1870
},
{
"epoch": 14.461538461538462,
"grad_norm": 2.431607961654663,
"learning_rate": 7.179487179487179e-07,
"loss": 0.7886,
"step": 1880
},
{
"epoch": 14.538461538461538,
"grad_norm": 3.005627155303955,
"learning_rate": 6.153846153846155e-07,
"loss": 0.7789,
"step": 1890
},
{
"epoch": 14.615384615384615,
"grad_norm": 2.981405735015869,
"learning_rate": 5.128205128205128e-07,
"loss": 0.8262,
"step": 1900
},
{
"epoch": 14.692307692307692,
"grad_norm": 1.9199621677398682,
"learning_rate": 4.102564102564103e-07,
"loss": 0.8015,
"step": 1910
},
{
"epoch": 14.76923076923077,
"grad_norm": 3.6987714767456055,
"learning_rate": 3.0769230769230774e-07,
"loss": 0.7998,
"step": 1920
},
{
"epoch": 14.846153846153847,
"grad_norm": 3.5848920345306396,
"learning_rate": 2.0512820512820514e-07,
"loss": 0.8552,
"step": 1930
},
{
"epoch": 14.923076923076923,
"grad_norm": 2.4082224369049072,
"learning_rate": 1.0256410256410257e-07,
"loss": 0.759,
"step": 1940
},
{
"epoch": 15.0,
"grad_norm": 4.4640116691589355,
"learning_rate": 0.0,
"loss": 0.7896,
"step": 1950
},
{
"epoch": 15.0,
"eval_accuracy": 0.8045112781954887,
"eval_loss": 0.7388833165168762,
"eval_runtime": 0.8692,
"eval_samples_per_second": 153.016,
"eval_steps_per_second": 19.558,
"step": 1950
},
{
"epoch": 15.0,
"step": 1950,
"total_flos": 1.5658365504595968e+17,
"train_loss": 0.923483537771763,
"train_runtime": 151.8082,
"train_samples_per_second": 102.168,
"train_steps_per_second": 12.845
}
],
"logging_steps": 10,
"max_steps": 1950,
"num_input_tokens_seen": 0,
"num_train_epochs": 15,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.5658365504595968e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}