vit-base-beans / trainer_state.json
qubvel-hf's picture
qubvel-hf HF staff
End of training
fe3eb32 verified
raw
history blame
38.1 kB
{
"best_metric": 0.8550169467926025,
"best_model_checkpoint": "./beans_outputs/checkpoint-1950",
"epoch": 15.0,
"eval_steps": 500,
"global_step": 1950,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07692307692307693,
"grad_norm": 2.0955963134765625,
"learning_rate": 1.9692307692307696e-05,
"loss": 1.1239,
"step": 10
},
{
"epoch": 0.15384615384615385,
"grad_norm": 1.8236017227172852,
"learning_rate": 1.9384615384615386e-05,
"loss": 1.1222,
"step": 20
},
{
"epoch": 0.23076923076923078,
"grad_norm": 1.9866633415222168,
"learning_rate": 1.907692307692308e-05,
"loss": 1.1165,
"step": 30
},
{
"epoch": 0.3076923076923077,
"grad_norm": 2.4556338787078857,
"learning_rate": 1.876923076923077e-05,
"loss": 1.1047,
"step": 40
},
{
"epoch": 0.38461538461538464,
"grad_norm": 1.6562598943710327,
"learning_rate": 1.8461538461538465e-05,
"loss": 1.1085,
"step": 50
},
{
"epoch": 0.46153846153846156,
"grad_norm": 1.501336932182312,
"learning_rate": 1.8153846153846155e-05,
"loss": 1.1048,
"step": 60
},
{
"epoch": 0.5384615384615384,
"grad_norm": 2.606107234954834,
"learning_rate": 1.784615384615385e-05,
"loss": 1.1031,
"step": 70
},
{
"epoch": 0.6153846153846154,
"grad_norm": 2.634584426879883,
"learning_rate": 1.753846153846154e-05,
"loss": 1.0916,
"step": 80
},
{
"epoch": 0.6923076923076923,
"grad_norm": 2.039886236190796,
"learning_rate": 1.7230769230769234e-05,
"loss": 1.095,
"step": 90
},
{
"epoch": 0.7692307692307693,
"grad_norm": 2.4800949096679688,
"learning_rate": 1.6923076923076924e-05,
"loss": 1.0916,
"step": 100
},
{
"epoch": 0.8461538461538461,
"grad_norm": 1.6766040325164795,
"learning_rate": 1.6615384615384618e-05,
"loss": 1.0959,
"step": 110
},
{
"epoch": 0.9230769230769231,
"grad_norm": 2.2848353385925293,
"learning_rate": 1.630769230769231e-05,
"loss": 1.1005,
"step": 120
},
{
"epoch": 1.0,
"grad_norm": 4.07199239730835,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.0881,
"step": 130
},
{
"epoch": 1.0,
"eval_accuracy": 0.41353383458646614,
"eval_loss": 1.0901767015457153,
"eval_runtime": 0.7894,
"eval_samples_per_second": 168.48,
"eval_steps_per_second": 21.535,
"step": 130
},
{
"epoch": 1.0769230769230769,
"grad_norm": 2.0711560249328613,
"learning_rate": 1.5692307692307693e-05,
"loss": 1.0794,
"step": 140
},
{
"epoch": 1.1538461538461537,
"grad_norm": 2.499732494354248,
"learning_rate": 1.5384615384615387e-05,
"loss": 1.0755,
"step": 150
},
{
"epoch": 1.2307692307692308,
"grad_norm": 1.4569323062896729,
"learning_rate": 1.5076923076923078e-05,
"loss": 1.0807,
"step": 160
},
{
"epoch": 1.3076923076923077,
"grad_norm": 2.351478338241577,
"learning_rate": 1.4769230769230772e-05,
"loss": 1.0965,
"step": 170
},
{
"epoch": 1.3846153846153846,
"grad_norm": 2.1514322757720947,
"learning_rate": 1.4461538461538462e-05,
"loss": 1.0839,
"step": 180
},
{
"epoch": 1.4615384615384617,
"grad_norm": 2.151601791381836,
"learning_rate": 1.4153846153846156e-05,
"loss": 1.0837,
"step": 190
},
{
"epoch": 1.5384615384615383,
"grad_norm": 1.797500729560852,
"learning_rate": 1.3846153846153847e-05,
"loss": 1.0807,
"step": 200
},
{
"epoch": 1.6153846153846154,
"grad_norm": 1.7583892345428467,
"learning_rate": 1.353846153846154e-05,
"loss": 1.073,
"step": 210
},
{
"epoch": 1.6923076923076923,
"grad_norm": 3.5006496906280518,
"learning_rate": 1.3230769230769231e-05,
"loss": 1.0741,
"step": 220
},
{
"epoch": 1.7692307692307692,
"grad_norm": 2.0179672241210938,
"learning_rate": 1.2923076923076925e-05,
"loss": 1.077,
"step": 230
},
{
"epoch": 1.8461538461538463,
"grad_norm": 2.057086944580078,
"learning_rate": 1.2615384615384616e-05,
"loss": 1.0925,
"step": 240
},
{
"epoch": 1.9230769230769231,
"grad_norm": 1.9710103273391724,
"learning_rate": 1.230769230769231e-05,
"loss": 1.0757,
"step": 250
},
{
"epoch": 2.0,
"grad_norm": 2.9315028190612793,
"learning_rate": 1.2e-05,
"loss": 1.0716,
"step": 260
},
{
"epoch": 2.0,
"eval_accuracy": 0.5037593984962406,
"eval_loss": 1.0685255527496338,
"eval_runtime": 0.7711,
"eval_samples_per_second": 172.471,
"eval_steps_per_second": 22.045,
"step": 260
},
{
"epoch": 2.076923076923077,
"grad_norm": 2.183527946472168,
"learning_rate": 1.1692307692307694e-05,
"loss": 1.0786,
"step": 270
},
{
"epoch": 2.1538461538461537,
"grad_norm": 2.379652500152588,
"learning_rate": 1.1384615384615385e-05,
"loss": 1.0676,
"step": 280
},
{
"epoch": 2.230769230769231,
"grad_norm": 2.387296438217163,
"learning_rate": 1.1076923076923079e-05,
"loss": 1.064,
"step": 290
},
{
"epoch": 2.3076923076923075,
"grad_norm": 2.8164947032928467,
"learning_rate": 1.076923076923077e-05,
"loss": 1.0548,
"step": 300
},
{
"epoch": 2.3846153846153846,
"grad_norm": 1.849363088607788,
"learning_rate": 1.0461538461538463e-05,
"loss": 1.0693,
"step": 310
},
{
"epoch": 2.4615384615384617,
"grad_norm": 2.0274300575256348,
"learning_rate": 1.0153846153846154e-05,
"loss": 1.0604,
"step": 320
},
{
"epoch": 2.5384615384615383,
"grad_norm": 1.4544223546981812,
"learning_rate": 9.846153846153848e-06,
"loss": 1.0458,
"step": 330
},
{
"epoch": 2.6153846153846154,
"grad_norm": 2.1712183952331543,
"learning_rate": 9.53846153846154e-06,
"loss": 1.0609,
"step": 340
},
{
"epoch": 2.6923076923076925,
"grad_norm": 1.922677993774414,
"learning_rate": 9.230769230769232e-06,
"loss": 1.0646,
"step": 350
},
{
"epoch": 2.769230769230769,
"grad_norm": 1.8288860321044922,
"learning_rate": 8.923076923076925e-06,
"loss": 1.0597,
"step": 360
},
{
"epoch": 2.8461538461538463,
"grad_norm": 2.123480796813965,
"learning_rate": 8.615384615384617e-06,
"loss": 1.052,
"step": 370
},
{
"epoch": 2.9230769230769234,
"grad_norm": 1.820168137550354,
"learning_rate": 8.307692307692309e-06,
"loss": 1.0434,
"step": 380
},
{
"epoch": 3.0,
"grad_norm": 4.973505020141602,
"learning_rate": 8.000000000000001e-06,
"loss": 1.061,
"step": 390
},
{
"epoch": 3.0,
"eval_accuracy": 0.6240601503759399,
"eval_loss": 1.0459144115447998,
"eval_runtime": 0.7618,
"eval_samples_per_second": 174.588,
"eval_steps_per_second": 22.316,
"step": 390
},
{
"epoch": 3.076923076923077,
"grad_norm": 2.1673223972320557,
"learning_rate": 7.692307692307694e-06,
"loss": 1.0616,
"step": 400
},
{
"epoch": 3.1538461538461537,
"grad_norm": 1.8567888736724854,
"learning_rate": 7.384615384615386e-06,
"loss": 1.0501,
"step": 410
},
{
"epoch": 3.230769230769231,
"grad_norm": 2.0640571117401123,
"learning_rate": 7.076923076923078e-06,
"loss": 1.0611,
"step": 420
},
{
"epoch": 3.3076923076923075,
"grad_norm": 2.215384006500244,
"learning_rate": 6.76923076923077e-06,
"loss": 1.0469,
"step": 430
},
{
"epoch": 3.3846153846153846,
"grad_norm": 2.1000049114227295,
"learning_rate": 6.461538461538463e-06,
"loss": 1.0499,
"step": 440
},
{
"epoch": 3.4615384615384617,
"grad_norm": 1.7218382358551025,
"learning_rate": 6.153846153846155e-06,
"loss": 1.0564,
"step": 450
},
{
"epoch": 3.5384615384615383,
"grad_norm": 2.3569300174713135,
"learning_rate": 5.846153846153847e-06,
"loss": 1.0599,
"step": 460
},
{
"epoch": 3.6153846153846154,
"grad_norm": 1.5210909843444824,
"learning_rate": 5.538461538461539e-06,
"loss": 1.0367,
"step": 470
},
{
"epoch": 3.6923076923076925,
"grad_norm": 2.7621657848358154,
"learning_rate": 5.230769230769232e-06,
"loss": 1.0421,
"step": 480
},
{
"epoch": 3.769230769230769,
"grad_norm": 1.5097808837890625,
"learning_rate": 4.923076923076924e-06,
"loss": 1.0362,
"step": 490
},
{
"epoch": 3.8461538461538463,
"grad_norm": 1.5118447542190552,
"learning_rate": 4.615384615384616e-06,
"loss": 1.0572,
"step": 500
},
{
"epoch": 3.9230769230769234,
"grad_norm": 1.7513490915298462,
"learning_rate": 4.307692307692308e-06,
"loss": 1.0361,
"step": 510
},
{
"epoch": 4.0,
"grad_norm": 5.398025035858154,
"learning_rate": 4.000000000000001e-06,
"loss": 1.0514,
"step": 520
},
{
"epoch": 4.0,
"eval_accuracy": 0.6015037593984962,
"eval_loss": 1.0407124757766724,
"eval_runtime": 0.7726,
"eval_samples_per_second": 172.155,
"eval_steps_per_second": 22.005,
"step": 520
},
{
"epoch": 4.076923076923077,
"grad_norm": 2.5516345500946045,
"learning_rate": 3.692307692307693e-06,
"loss": 1.0529,
"step": 530
},
{
"epoch": 4.153846153846154,
"grad_norm": 1.6976008415222168,
"learning_rate": 3.384615384615385e-06,
"loss": 1.0472,
"step": 540
},
{
"epoch": 4.230769230769231,
"grad_norm": 2.5672519207000732,
"learning_rate": 3.0769230769230774e-06,
"loss": 1.0565,
"step": 550
},
{
"epoch": 4.3076923076923075,
"grad_norm": 2.166529655456543,
"learning_rate": 2.7692307692307697e-06,
"loss": 1.0619,
"step": 560
},
{
"epoch": 4.384615384615385,
"grad_norm": 1.961472511291504,
"learning_rate": 2.461538461538462e-06,
"loss": 1.0322,
"step": 570
},
{
"epoch": 4.461538461538462,
"grad_norm": 2.392319440841675,
"learning_rate": 2.153846153846154e-06,
"loss": 1.0388,
"step": 580
},
{
"epoch": 4.538461538461538,
"grad_norm": 2.3034205436706543,
"learning_rate": 1.8461538461538465e-06,
"loss": 1.0358,
"step": 590
},
{
"epoch": 4.615384615384615,
"grad_norm": 2.037050247192383,
"learning_rate": 1.5384615384615387e-06,
"loss": 1.0334,
"step": 600
},
{
"epoch": 4.6923076923076925,
"grad_norm": 3.0737335681915283,
"learning_rate": 1.230769230769231e-06,
"loss": 1.0506,
"step": 610
},
{
"epoch": 4.769230769230769,
"grad_norm": 2.1824796199798584,
"learning_rate": 9.230769230769232e-07,
"loss": 1.0516,
"step": 620
},
{
"epoch": 4.846153846153846,
"grad_norm": 1.9239214658737183,
"learning_rate": 6.153846153846155e-07,
"loss": 1.0399,
"step": 630
},
{
"epoch": 4.923076923076923,
"grad_norm": 2.267302989959717,
"learning_rate": 3.0769230769230774e-07,
"loss": 1.0374,
"step": 640
},
{
"epoch": 5.0,
"grad_norm": 4.4957404136657715,
"learning_rate": 0.0,
"loss": 1.05,
"step": 650
},
{
"epoch": 5.0,
"eval_accuracy": 0.6766917293233082,
"eval_loss": 1.0332472324371338,
"eval_runtime": 0.8208,
"eval_samples_per_second": 162.035,
"eval_steps_per_second": 20.711,
"step": 650
},
{
"epoch": 5.076923076923077,
"grad_norm": 1.9310355186462402,
"learning_rate": 1.3230769230769231e-05,
"loss": 1.0281,
"step": 660
},
{
"epoch": 5.153846153846154,
"grad_norm": 2.36603045463562,
"learning_rate": 1.312820512820513e-05,
"loss": 1.0587,
"step": 670
},
{
"epoch": 5.230769230769231,
"grad_norm": 2.0758917331695557,
"learning_rate": 1.3025641025641027e-05,
"loss": 1.0534,
"step": 680
},
{
"epoch": 5.3076923076923075,
"grad_norm": 2.549725294113159,
"learning_rate": 1.2923076923076925e-05,
"loss": 1.0421,
"step": 690
},
{
"epoch": 5.384615384615385,
"grad_norm": 2.7183680534362793,
"learning_rate": 1.2820512820512823e-05,
"loss": 1.035,
"step": 700
},
{
"epoch": 5.461538461538462,
"grad_norm": 1.7176955938339233,
"learning_rate": 1.2717948717948718e-05,
"loss": 1.0268,
"step": 710
},
{
"epoch": 5.538461538461538,
"grad_norm": 1.8257861137390137,
"learning_rate": 1.2615384615384616e-05,
"loss": 1.0302,
"step": 720
},
{
"epoch": 5.615384615384615,
"grad_norm": 2.50368595123291,
"learning_rate": 1.2512820512820514e-05,
"loss": 1.0331,
"step": 730
},
{
"epoch": 5.6923076923076925,
"grad_norm": 2.4315121173858643,
"learning_rate": 1.2410256410256412e-05,
"loss": 1.0208,
"step": 740
},
{
"epoch": 5.769230769230769,
"grad_norm": 2.043854236602783,
"learning_rate": 1.230769230769231e-05,
"loss": 1.0241,
"step": 750
},
{
"epoch": 5.846153846153846,
"grad_norm": 2.0800740718841553,
"learning_rate": 1.2205128205128208e-05,
"loss": 1.0251,
"step": 760
},
{
"epoch": 5.923076923076923,
"grad_norm": 3.4356396198272705,
"learning_rate": 1.2102564102564102e-05,
"loss": 1.0104,
"step": 770
},
{
"epoch": 6.0,
"grad_norm": 4.49416971206665,
"learning_rate": 1.2e-05,
"loss": 1.0357,
"step": 780
},
{
"epoch": 6.0,
"eval_accuracy": 0.6541353383458647,
"eval_loss": 1.0109117031097412,
"eval_runtime": 0.7801,
"eval_samples_per_second": 170.483,
"eval_steps_per_second": 21.791,
"step": 780
},
{
"epoch": 6.076923076923077,
"grad_norm": 2.650513172149658,
"learning_rate": 1.1897435897435898e-05,
"loss": 1.0148,
"step": 790
},
{
"epoch": 6.153846153846154,
"grad_norm": 2.3682632446289062,
"learning_rate": 1.1794871794871796e-05,
"loss": 1.0094,
"step": 800
},
{
"epoch": 6.230769230769231,
"grad_norm": 1.6716077327728271,
"learning_rate": 1.1692307692307694e-05,
"loss": 1.0097,
"step": 810
},
{
"epoch": 6.3076923076923075,
"grad_norm": 2.4839890003204346,
"learning_rate": 1.1589743589743592e-05,
"loss": 1.01,
"step": 820
},
{
"epoch": 6.384615384615385,
"grad_norm": 2.4004769325256348,
"learning_rate": 1.1487179487179487e-05,
"loss": 1.0104,
"step": 830
},
{
"epoch": 6.461538461538462,
"grad_norm": 2.9597084522247314,
"learning_rate": 1.1384615384615385e-05,
"loss": 1.0137,
"step": 840
},
{
"epoch": 6.538461538461538,
"grad_norm": 2.680335760116577,
"learning_rate": 1.1282051282051283e-05,
"loss": 1.0147,
"step": 850
},
{
"epoch": 6.615384615384615,
"grad_norm": 1.7677160501480103,
"learning_rate": 1.117948717948718e-05,
"loss": 0.9947,
"step": 860
},
{
"epoch": 6.6923076923076925,
"grad_norm": 2.0484132766723633,
"learning_rate": 1.1076923076923079e-05,
"loss": 1.0169,
"step": 870
},
{
"epoch": 6.769230769230769,
"grad_norm": 2.1910479068756104,
"learning_rate": 1.0974358974358977e-05,
"loss": 1.0024,
"step": 880
},
{
"epoch": 6.846153846153846,
"grad_norm": 2.181236743927002,
"learning_rate": 1.0871794871794871e-05,
"loss": 0.9962,
"step": 890
},
{
"epoch": 6.923076923076923,
"grad_norm": 2.898885488510132,
"learning_rate": 1.076923076923077e-05,
"loss": 1.012,
"step": 900
},
{
"epoch": 7.0,
"grad_norm": 4.950052738189697,
"learning_rate": 1.0666666666666667e-05,
"loss": 1.0012,
"step": 910
},
{
"epoch": 7.0,
"eval_accuracy": 0.7368421052631579,
"eval_loss": 0.981479287147522,
"eval_runtime": 0.7693,
"eval_samples_per_second": 172.886,
"eval_steps_per_second": 22.098,
"step": 910
},
{
"epoch": 7.076923076923077,
"grad_norm": 2.692753553390503,
"learning_rate": 1.0564102564102565e-05,
"loss": 0.9889,
"step": 920
},
{
"epoch": 7.153846153846154,
"grad_norm": 2.9175124168395996,
"learning_rate": 1.0461538461538463e-05,
"loss": 0.9911,
"step": 930
},
{
"epoch": 7.230769230769231,
"grad_norm": 3.221527099609375,
"learning_rate": 1.0358974358974361e-05,
"loss": 0.9827,
"step": 940
},
{
"epoch": 7.3076923076923075,
"grad_norm": 2.507923126220703,
"learning_rate": 1.0256410256410256e-05,
"loss": 0.9919,
"step": 950
},
{
"epoch": 7.384615384615385,
"grad_norm": 2.4533870220184326,
"learning_rate": 1.0153846153846154e-05,
"loss": 0.9962,
"step": 960
},
{
"epoch": 7.461538461538462,
"grad_norm": 2.1032631397247314,
"learning_rate": 1.0051282051282052e-05,
"loss": 0.9751,
"step": 970
},
{
"epoch": 7.538461538461538,
"grad_norm": 2.5848186016082764,
"learning_rate": 9.94871794871795e-06,
"loss": 0.9939,
"step": 980
},
{
"epoch": 7.615384615384615,
"grad_norm": 2.17742919921875,
"learning_rate": 9.846153846153848e-06,
"loss": 0.9745,
"step": 990
},
{
"epoch": 7.6923076923076925,
"grad_norm": 1.9953967332839966,
"learning_rate": 9.743589743589744e-06,
"loss": 0.9665,
"step": 1000
},
{
"epoch": 7.769230769230769,
"grad_norm": 3.0263218879699707,
"learning_rate": 9.641025641025642e-06,
"loss": 0.9646,
"step": 1010
},
{
"epoch": 7.846153846153846,
"grad_norm": 2.3735406398773193,
"learning_rate": 9.53846153846154e-06,
"loss": 0.9836,
"step": 1020
},
{
"epoch": 7.923076923076923,
"grad_norm": 2.548480272293091,
"learning_rate": 9.435897435897436e-06,
"loss": 0.9546,
"step": 1030
},
{
"epoch": 8.0,
"grad_norm": 3.8450028896331787,
"learning_rate": 9.333333333333334e-06,
"loss": 0.9932,
"step": 1040
},
{
"epoch": 8.0,
"eval_accuracy": 0.7669172932330827,
"eval_loss": 0.9549766778945923,
"eval_runtime": 0.7536,
"eval_samples_per_second": 176.486,
"eval_steps_per_second": 22.558,
"step": 1040
},
{
"epoch": 8.076923076923077,
"grad_norm": 1.6870847940444946,
"learning_rate": 9.230769230769232e-06,
"loss": 0.9752,
"step": 1050
},
{
"epoch": 8.153846153846153,
"grad_norm": 2.1122217178344727,
"learning_rate": 9.128205128205129e-06,
"loss": 0.9573,
"step": 1060
},
{
"epoch": 8.23076923076923,
"grad_norm": 4.1552886962890625,
"learning_rate": 9.025641025641027e-06,
"loss": 0.9764,
"step": 1070
},
{
"epoch": 8.307692307692308,
"grad_norm": 1.7864203453063965,
"learning_rate": 8.923076923076925e-06,
"loss": 0.9434,
"step": 1080
},
{
"epoch": 8.384615384615385,
"grad_norm": 2.2091946601867676,
"learning_rate": 8.820512820512821e-06,
"loss": 0.974,
"step": 1090
},
{
"epoch": 8.461538461538462,
"grad_norm": 2.4063644409179688,
"learning_rate": 8.717948717948719e-06,
"loss": 0.9576,
"step": 1100
},
{
"epoch": 8.538461538461538,
"grad_norm": 1.6061931848526,
"learning_rate": 8.615384615384617e-06,
"loss": 0.9588,
"step": 1110
},
{
"epoch": 8.615384615384615,
"grad_norm": 2.8999595642089844,
"learning_rate": 8.512820512820513e-06,
"loss": 0.9791,
"step": 1120
},
{
"epoch": 8.692307692307692,
"grad_norm": 3.6554131507873535,
"learning_rate": 8.410256410256411e-06,
"loss": 0.9629,
"step": 1130
},
{
"epoch": 8.76923076923077,
"grad_norm": 1.7246966361999512,
"learning_rate": 8.307692307692309e-06,
"loss": 0.9707,
"step": 1140
},
{
"epoch": 8.846153846153847,
"grad_norm": 2.3160033226013184,
"learning_rate": 8.205128205128205e-06,
"loss": 0.9578,
"step": 1150
},
{
"epoch": 8.923076923076923,
"grad_norm": 2.685718059539795,
"learning_rate": 8.102564102564103e-06,
"loss": 0.972,
"step": 1160
},
{
"epoch": 9.0,
"grad_norm": 3.6465442180633545,
"learning_rate": 8.000000000000001e-06,
"loss": 0.9748,
"step": 1170
},
{
"epoch": 9.0,
"eval_accuracy": 0.7669172932330827,
"eval_loss": 0.9408761858940125,
"eval_runtime": 0.7615,
"eval_samples_per_second": 174.644,
"eval_steps_per_second": 22.323,
"step": 1170
},
{
"epoch": 9.076923076923077,
"grad_norm": 1.8668149709701538,
"learning_rate": 7.897435897435898e-06,
"loss": 0.9712,
"step": 1180
},
{
"epoch": 9.153846153846153,
"grad_norm": 2.042644739151001,
"learning_rate": 7.794871794871796e-06,
"loss": 0.9407,
"step": 1190
},
{
"epoch": 9.23076923076923,
"grad_norm": 1.967020869255066,
"learning_rate": 7.692307692307694e-06,
"loss": 0.9457,
"step": 1200
},
{
"epoch": 9.307692307692308,
"grad_norm": 2.147862672805786,
"learning_rate": 7.58974358974359e-06,
"loss": 0.9442,
"step": 1210
},
{
"epoch": 9.384615384615385,
"grad_norm": 1.8528053760528564,
"learning_rate": 7.487179487179488e-06,
"loss": 0.9526,
"step": 1220
},
{
"epoch": 9.461538461538462,
"grad_norm": 3.2000551223754883,
"learning_rate": 7.384615384615386e-06,
"loss": 0.9465,
"step": 1230
},
{
"epoch": 9.538461538461538,
"grad_norm": 2.259323835372925,
"learning_rate": 7.282051282051282e-06,
"loss": 0.9503,
"step": 1240
},
{
"epoch": 9.615384615384615,
"grad_norm": 2.4054858684539795,
"learning_rate": 7.17948717948718e-06,
"loss": 0.9274,
"step": 1250
},
{
"epoch": 9.692307692307692,
"grad_norm": 3.4811408519744873,
"learning_rate": 7.076923076923078e-06,
"loss": 0.943,
"step": 1260
},
{
"epoch": 9.76923076923077,
"grad_norm": 1.7080141305923462,
"learning_rate": 6.974358974358974e-06,
"loss": 0.9247,
"step": 1270
},
{
"epoch": 9.846153846153847,
"grad_norm": 2.0476508140563965,
"learning_rate": 6.871794871794872e-06,
"loss": 0.9194,
"step": 1280
},
{
"epoch": 9.923076923076923,
"grad_norm": 2.149641990661621,
"learning_rate": 6.76923076923077e-06,
"loss": 0.9269,
"step": 1290
},
{
"epoch": 10.0,
"grad_norm": 5.121323108673096,
"learning_rate": 6.666666666666667e-06,
"loss": 0.9113,
"step": 1300
},
{
"epoch": 10.0,
"eval_accuracy": 0.7819548872180451,
"eval_loss": 0.9149269461631775,
"eval_runtime": 0.7775,
"eval_samples_per_second": 171.06,
"eval_steps_per_second": 21.865,
"step": 1300
},
{
"epoch": 10.076923076923077,
"grad_norm": 2.06109881401062,
"learning_rate": 6.564102564102565e-06,
"loss": 0.925,
"step": 1310
},
{
"epoch": 10.153846153846153,
"grad_norm": 1.9137018918991089,
"learning_rate": 6.461538461538463e-06,
"loss": 0.9657,
"step": 1320
},
{
"epoch": 10.23076923076923,
"grad_norm": 2.0686280727386475,
"learning_rate": 6.358974358974359e-06,
"loss": 0.9565,
"step": 1330
},
{
"epoch": 10.307692307692308,
"grad_norm": 2.046623945236206,
"learning_rate": 6.256410256410257e-06,
"loss": 0.918,
"step": 1340
},
{
"epoch": 10.384615384615385,
"grad_norm": 2.281343936920166,
"learning_rate": 6.153846153846155e-06,
"loss": 0.9118,
"step": 1350
},
{
"epoch": 10.461538461538462,
"grad_norm": 2.694427728652954,
"learning_rate": 6.051282051282051e-06,
"loss": 0.9377,
"step": 1360
},
{
"epoch": 10.538461538461538,
"grad_norm": 2.3148765563964844,
"learning_rate": 5.948717948717949e-06,
"loss": 0.911,
"step": 1370
},
{
"epoch": 10.615384615384615,
"grad_norm": 2.595669746398926,
"learning_rate": 5.846153846153847e-06,
"loss": 0.9146,
"step": 1380
},
{
"epoch": 10.692307692307692,
"grad_norm": 2.136301279067993,
"learning_rate": 5.743589743589743e-06,
"loss": 0.9061,
"step": 1390
},
{
"epoch": 10.76923076923077,
"grad_norm": 3.0159363746643066,
"learning_rate": 5.641025641025641e-06,
"loss": 0.9365,
"step": 1400
},
{
"epoch": 10.846153846153847,
"grad_norm": 2.1136507987976074,
"learning_rate": 5.538461538461539e-06,
"loss": 0.9232,
"step": 1410
},
{
"epoch": 10.923076923076923,
"grad_norm": 1.713663101196289,
"learning_rate": 5.435897435897436e-06,
"loss": 0.9344,
"step": 1420
},
{
"epoch": 11.0,
"grad_norm": 4.04538631439209,
"learning_rate": 5.333333333333334e-06,
"loss": 0.9255,
"step": 1430
},
{
"epoch": 11.0,
"eval_accuracy": 0.7894736842105263,
"eval_loss": 0.8905543088912964,
"eval_runtime": 0.7659,
"eval_samples_per_second": 173.659,
"eval_steps_per_second": 22.197,
"step": 1430
},
{
"epoch": 11.076923076923077,
"grad_norm": 2.04194974899292,
"learning_rate": 5.230769230769232e-06,
"loss": 0.9333,
"step": 1440
},
{
"epoch": 11.153846153846153,
"grad_norm": 3.108344554901123,
"learning_rate": 5.128205128205128e-06,
"loss": 0.9174,
"step": 1450
},
{
"epoch": 11.23076923076923,
"grad_norm": 2.406233072280884,
"learning_rate": 5.025641025641026e-06,
"loss": 0.8948,
"step": 1460
},
{
"epoch": 11.307692307692308,
"grad_norm": 2.4100501537323,
"learning_rate": 4.923076923076924e-06,
"loss": 0.9155,
"step": 1470
},
{
"epoch": 11.384615384615385,
"grad_norm": 2.7117860317230225,
"learning_rate": 4.820512820512821e-06,
"loss": 0.9075,
"step": 1480
},
{
"epoch": 11.461538461538462,
"grad_norm": 2.0159695148468018,
"learning_rate": 4.717948717948718e-06,
"loss": 0.9338,
"step": 1490
},
{
"epoch": 11.538461538461538,
"grad_norm": 3.280245304107666,
"learning_rate": 4.615384615384616e-06,
"loss": 0.9243,
"step": 1500
},
{
"epoch": 11.615384615384615,
"grad_norm": 3.1355690956115723,
"learning_rate": 4.512820512820513e-06,
"loss": 0.9185,
"step": 1510
},
{
"epoch": 11.692307692307692,
"grad_norm": 3.0900094509124756,
"learning_rate": 4.4102564102564104e-06,
"loss": 0.937,
"step": 1520
},
{
"epoch": 11.76923076923077,
"grad_norm": 1.8758033514022827,
"learning_rate": 4.307692307692308e-06,
"loss": 0.9052,
"step": 1530
},
{
"epoch": 11.846153846153847,
"grad_norm": 2.0586955547332764,
"learning_rate": 4.2051282051282055e-06,
"loss": 0.8874,
"step": 1540
},
{
"epoch": 11.923076923076923,
"grad_norm": 2.0720062255859375,
"learning_rate": 4.102564102564103e-06,
"loss": 0.9141,
"step": 1550
},
{
"epoch": 12.0,
"grad_norm": 3.183523416519165,
"learning_rate": 4.000000000000001e-06,
"loss": 0.8877,
"step": 1560
},
{
"epoch": 12.0,
"eval_accuracy": 0.7894736842105263,
"eval_loss": 0.8749483823776245,
"eval_runtime": 0.7374,
"eval_samples_per_second": 180.372,
"eval_steps_per_second": 23.055,
"step": 1560
},
{
"epoch": 12.076923076923077,
"grad_norm": 2.0058720111846924,
"learning_rate": 3.897435897435898e-06,
"loss": 0.8829,
"step": 1570
},
{
"epoch": 12.153846153846153,
"grad_norm": 2.2991676330566406,
"learning_rate": 3.794871794871795e-06,
"loss": 0.9152,
"step": 1580
},
{
"epoch": 12.23076923076923,
"grad_norm": 1.5903538465499878,
"learning_rate": 3.692307692307693e-06,
"loss": 0.9149,
"step": 1590
},
{
"epoch": 12.307692307692308,
"grad_norm": 1.7883615493774414,
"learning_rate": 3.58974358974359e-06,
"loss": 0.9163,
"step": 1600
},
{
"epoch": 12.384615384615385,
"grad_norm": 2.2841601371765137,
"learning_rate": 3.487179487179487e-06,
"loss": 0.8958,
"step": 1610
},
{
"epoch": 12.461538461538462,
"grad_norm": 2.3814501762390137,
"learning_rate": 3.384615384615385e-06,
"loss": 0.8918,
"step": 1620
},
{
"epoch": 12.538461538461538,
"grad_norm": 1.9848734140396118,
"learning_rate": 3.2820512820512823e-06,
"loss": 0.889,
"step": 1630
},
{
"epoch": 12.615384615384615,
"grad_norm": 1.7236778736114502,
"learning_rate": 3.1794871794871795e-06,
"loss": 0.8979,
"step": 1640
},
{
"epoch": 12.692307692307692,
"grad_norm": 3.340665102005005,
"learning_rate": 3.0769230769230774e-06,
"loss": 0.8695,
"step": 1650
},
{
"epoch": 12.76923076923077,
"grad_norm": 2.127927780151367,
"learning_rate": 2.9743589743589746e-06,
"loss": 0.9323,
"step": 1660
},
{
"epoch": 12.846153846153847,
"grad_norm": 1.8213707208633423,
"learning_rate": 2.8717948717948717e-06,
"loss": 0.9178,
"step": 1670
},
{
"epoch": 12.923076923076923,
"grad_norm": 2.0011963844299316,
"learning_rate": 2.7692307692307697e-06,
"loss": 0.8872,
"step": 1680
},
{
"epoch": 13.0,
"grad_norm": 3.812871217727661,
"learning_rate": 2.666666666666667e-06,
"loss": 0.9032,
"step": 1690
},
{
"epoch": 13.0,
"eval_accuracy": 0.7969924812030075,
"eval_loss": 0.8698711395263672,
"eval_runtime": 0.7423,
"eval_samples_per_second": 179.165,
"eval_steps_per_second": 22.901,
"step": 1690
},
{
"epoch": 13.076923076923077,
"grad_norm": 2.8741540908813477,
"learning_rate": 2.564102564102564e-06,
"loss": 0.8842,
"step": 1700
},
{
"epoch": 13.153846153846153,
"grad_norm": 2.3278818130493164,
"learning_rate": 2.461538461538462e-06,
"loss": 0.9131,
"step": 1710
},
{
"epoch": 13.23076923076923,
"grad_norm": 2.8419501781463623,
"learning_rate": 2.358974358974359e-06,
"loss": 0.8965,
"step": 1720
},
{
"epoch": 13.307692307692308,
"grad_norm": 1.8506221771240234,
"learning_rate": 2.2564102564102566e-06,
"loss": 0.8967,
"step": 1730
},
{
"epoch": 13.384615384615385,
"grad_norm": 2.6166839599609375,
"learning_rate": 2.153846153846154e-06,
"loss": 0.8785,
"step": 1740
},
{
"epoch": 13.461538461538462,
"grad_norm": 4.287515640258789,
"learning_rate": 2.0512820512820513e-06,
"loss": 0.914,
"step": 1750
},
{
"epoch": 13.538461538461538,
"grad_norm": 2.516889810562134,
"learning_rate": 1.948717948717949e-06,
"loss": 0.9286,
"step": 1760
},
{
"epoch": 13.615384615384615,
"grad_norm": 1.8332946300506592,
"learning_rate": 1.8461538461538465e-06,
"loss": 0.8995,
"step": 1770
},
{
"epoch": 13.692307692307692,
"grad_norm": 2.2418551445007324,
"learning_rate": 1.7435897435897436e-06,
"loss": 0.8818,
"step": 1780
},
{
"epoch": 13.76923076923077,
"grad_norm": 1.794832706451416,
"learning_rate": 1.6410256410256412e-06,
"loss": 0.9044,
"step": 1790
},
{
"epoch": 13.846153846153847,
"grad_norm": 3.0142152309417725,
"learning_rate": 1.5384615384615387e-06,
"loss": 0.8826,
"step": 1800
},
{
"epoch": 13.923076923076923,
"grad_norm": 2.5891315937042236,
"learning_rate": 1.4358974358974359e-06,
"loss": 0.8387,
"step": 1810
},
{
"epoch": 14.0,
"grad_norm": 5.37412691116333,
"learning_rate": 1.3333333333333334e-06,
"loss": 0.9001,
"step": 1820
},
{
"epoch": 14.0,
"eval_accuracy": 0.7819548872180451,
"eval_loss": 0.8673797845840454,
"eval_runtime": 0.7642,
"eval_samples_per_second": 174.027,
"eval_steps_per_second": 22.244,
"step": 1820
},
{
"epoch": 14.076923076923077,
"grad_norm": 1.8213236331939697,
"learning_rate": 1.230769230769231e-06,
"loss": 0.9047,
"step": 1830
},
{
"epoch": 14.153846153846153,
"grad_norm": 1.8006333112716675,
"learning_rate": 1.1282051282051283e-06,
"loss": 0.8768,
"step": 1840
},
{
"epoch": 14.23076923076923,
"grad_norm": 2.691574811935425,
"learning_rate": 1.0256410256410257e-06,
"loss": 0.8757,
"step": 1850
},
{
"epoch": 14.307692307692308,
"grad_norm": 5.015848636627197,
"learning_rate": 9.230769230769232e-07,
"loss": 0.8734,
"step": 1860
},
{
"epoch": 14.384615384615385,
"grad_norm": 2.5233821868896484,
"learning_rate": 8.205128205128206e-07,
"loss": 0.8787,
"step": 1870
},
{
"epoch": 14.461538461538462,
"grad_norm": 2.1718924045562744,
"learning_rate": 7.179487179487179e-07,
"loss": 0.8767,
"step": 1880
},
{
"epoch": 14.538461538461538,
"grad_norm": 3.0364015102386475,
"learning_rate": 6.153846153846155e-07,
"loss": 0.873,
"step": 1890
},
{
"epoch": 14.615384615384615,
"grad_norm": 2.5152034759521484,
"learning_rate": 5.128205128205128e-07,
"loss": 0.9096,
"step": 1900
},
{
"epoch": 14.692307692307692,
"grad_norm": 1.819096565246582,
"learning_rate": 4.102564102564103e-07,
"loss": 0.892,
"step": 1910
},
{
"epoch": 14.76923076923077,
"grad_norm": 3.512732982635498,
"learning_rate": 3.0769230769230774e-07,
"loss": 0.8937,
"step": 1920
},
{
"epoch": 14.846153846153847,
"grad_norm": 2.917677879333496,
"learning_rate": 2.0512820512820514e-07,
"loss": 0.9231,
"step": 1930
},
{
"epoch": 14.923076923076923,
"grad_norm": 2.0683395862579346,
"learning_rate": 1.0256410256410257e-07,
"loss": 0.8613,
"step": 1940
},
{
"epoch": 15.0,
"grad_norm": 4.704519271850586,
"learning_rate": 0.0,
"loss": 0.8842,
"step": 1950
},
{
"epoch": 15.0,
"eval_accuracy": 0.7894736842105263,
"eval_loss": 0.8550169467926025,
"eval_runtime": 0.7718,
"eval_samples_per_second": 172.329,
"eval_steps_per_second": 22.027,
"step": 1950
},
{
"epoch": 15.0,
"step": 1950,
"total_flos": 1.5658365504595968e+17,
"train_loss": 0.6299933981284117,
"train_runtime": 92.772,
"train_samples_per_second": 167.184,
"train_steps_per_second": 21.019
}
],
"logging_steps": 10,
"max_steps": 1950,
"num_input_tokens_seen": 0,
"num_train_epochs": 15,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.5658365504595968e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}