“Sara
adding remaining checkpoints
e3b0548
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.4615384615384617,
"eval_steps": 10,
"global_step": 800,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.006153846153846154,
"eval_loss": 1.845949411392212,
"eval_runtime": 1.9761,
"eval_samples_per_second": 57.69,
"eval_steps_per_second": 3.036,
"step": 2
},
{
"epoch": 0.03076923076923077,
"grad_norm": 29.46457290649414,
"learning_rate": 7.692307692307694e-07,
"loss": 1.944,
"step": 10
},
{
"epoch": 0.03076923076923077,
"eval_loss": 1.7965577840805054,
"eval_runtime": 1.9506,
"eval_samples_per_second": 58.444,
"eval_steps_per_second": 3.076,
"step": 10
},
{
"epoch": 0.06153846153846154,
"grad_norm": 14.49207878112793,
"learning_rate": 1.5384615384615387e-06,
"loss": 1.6999,
"step": 20
},
{
"epoch": 0.06153846153846154,
"eval_loss": 1.5032916069030762,
"eval_runtime": 1.9599,
"eval_samples_per_second": 58.168,
"eval_steps_per_second": 3.061,
"step": 20
},
{
"epoch": 0.09230769230769231,
"grad_norm": 11.355466842651367,
"learning_rate": 2.307692307692308e-06,
"loss": 1.1499,
"step": 30
},
{
"epoch": 0.09230769230769231,
"eval_loss": 0.8391174674034119,
"eval_runtime": 1.9437,
"eval_samples_per_second": 58.65,
"eval_steps_per_second": 3.087,
"step": 30
},
{
"epoch": 0.12307692307692308,
"grad_norm": 5.648468017578125,
"learning_rate": 3.0769230769230774e-06,
"loss": 0.7194,
"step": 40
},
{
"epoch": 0.12307692307692308,
"eval_loss": 0.6716201305389404,
"eval_runtime": 1.9517,
"eval_samples_per_second": 58.411,
"eval_steps_per_second": 3.074,
"step": 40
},
{
"epoch": 0.15384615384615385,
"grad_norm": 5.752841949462891,
"learning_rate": 3.846153846153847e-06,
"loss": 0.6811,
"step": 50
},
{
"epoch": 0.15384615384615385,
"eval_loss": 0.6289324164390564,
"eval_runtime": 1.952,
"eval_samples_per_second": 58.401,
"eval_steps_per_second": 3.074,
"step": 50
},
{
"epoch": 0.18461538461538463,
"grad_norm": 3.414722204208374,
"learning_rate": 4.615384615384616e-06,
"loss": 0.5924,
"step": 60
},
{
"epoch": 0.18461538461538463,
"eval_loss": 0.6140013337135315,
"eval_runtime": 1.94,
"eval_samples_per_second": 58.762,
"eval_steps_per_second": 3.093,
"step": 60
},
{
"epoch": 0.2153846153846154,
"grad_norm": 5.556036949157715,
"learning_rate": 5.384615384615385e-06,
"loss": 0.6135,
"step": 70
},
{
"epoch": 0.2153846153846154,
"eval_loss": 0.6054026484489441,
"eval_runtime": 1.9659,
"eval_samples_per_second": 57.988,
"eval_steps_per_second": 3.052,
"step": 70
},
{
"epoch": 0.24615384615384617,
"grad_norm": 4.436710834503174,
"learning_rate": 6.153846153846155e-06,
"loss": 0.5952,
"step": 80
},
{
"epoch": 0.24615384615384617,
"eval_loss": 0.5986860990524292,
"eval_runtime": 1.9428,
"eval_samples_per_second": 58.677,
"eval_steps_per_second": 3.088,
"step": 80
},
{
"epoch": 0.27692307692307694,
"grad_norm": 3.496018409729004,
"learning_rate": 6.923076923076923e-06,
"loss": 0.5887,
"step": 90
},
{
"epoch": 0.27692307692307694,
"eval_loss": 0.594973623752594,
"eval_runtime": 1.9468,
"eval_samples_per_second": 58.558,
"eval_steps_per_second": 3.082,
"step": 90
},
{
"epoch": 0.3076923076923077,
"grad_norm": 2.3540539741516113,
"learning_rate": 7.692307692307694e-06,
"loss": 0.5634,
"step": 100
},
{
"epoch": 0.3076923076923077,
"eval_loss": 0.5954164266586304,
"eval_runtime": 1.948,
"eval_samples_per_second": 58.521,
"eval_steps_per_second": 3.08,
"step": 100
},
{
"epoch": 0.3384615384615385,
"grad_norm": 2.9880635738372803,
"learning_rate": 8.461538461538462e-06,
"loss": 0.5827,
"step": 110
},
{
"epoch": 0.3384615384615385,
"eval_loss": 0.5955133438110352,
"eval_runtime": 1.9432,
"eval_samples_per_second": 58.667,
"eval_steps_per_second": 3.088,
"step": 110
},
{
"epoch": 0.36923076923076925,
"grad_norm": 1.9405996799468994,
"learning_rate": 9.230769230769232e-06,
"loss": 0.5726,
"step": 120
},
{
"epoch": 0.36923076923076925,
"eval_loss": 0.5951128602027893,
"eval_runtime": 1.9715,
"eval_samples_per_second": 57.824,
"eval_steps_per_second": 3.043,
"step": 120
},
{
"epoch": 0.4,
"grad_norm": 1.9948021173477173,
"learning_rate": 1e-05,
"loss": 0.6488,
"step": 130
},
{
"epoch": 0.4,
"eval_loss": 0.5964463949203491,
"eval_runtime": 2.1032,
"eval_samples_per_second": 54.203,
"eval_steps_per_second": 2.853,
"step": 130
},
{
"epoch": 0.4307692307692308,
"grad_norm": 1.95350980758667,
"learning_rate": 1.076923076923077e-05,
"loss": 0.6218,
"step": 140
},
{
"epoch": 0.4307692307692308,
"eval_loss": 0.5996471047401428,
"eval_runtime": 1.9539,
"eval_samples_per_second": 58.345,
"eval_steps_per_second": 3.071,
"step": 140
},
{
"epoch": 0.46153846153846156,
"grad_norm": 3.840015411376953,
"learning_rate": 1.1538461538461538e-05,
"loss": 0.5585,
"step": 150
},
{
"epoch": 0.46153846153846156,
"eval_loss": 0.6000372767448425,
"eval_runtime": 1.9457,
"eval_samples_per_second": 58.592,
"eval_steps_per_second": 3.084,
"step": 150
},
{
"epoch": 0.49230769230769234,
"grad_norm": 2.345364809036255,
"learning_rate": 1.230769230769231e-05,
"loss": 0.5314,
"step": 160
},
{
"epoch": 0.49230769230769234,
"eval_loss": 0.6025042533874512,
"eval_runtime": 1.9398,
"eval_samples_per_second": 58.768,
"eval_steps_per_second": 3.093,
"step": 160
},
{
"epoch": 0.5230769230769231,
"grad_norm": 3.162071704864502,
"learning_rate": 1.3076923076923078e-05,
"loss": 0.6901,
"step": 170
},
{
"epoch": 0.5230769230769231,
"eval_loss": 0.598171055316925,
"eval_runtime": 1.9419,
"eval_samples_per_second": 58.705,
"eval_steps_per_second": 3.09,
"step": 170
},
{
"epoch": 0.5538461538461539,
"grad_norm": 3.9432108402252197,
"learning_rate": 1.3846153846153847e-05,
"loss": 0.6604,
"step": 180
},
{
"epoch": 0.5538461538461539,
"eval_loss": 0.5974885821342468,
"eval_runtime": 1.9664,
"eval_samples_per_second": 57.975,
"eval_steps_per_second": 3.051,
"step": 180
},
{
"epoch": 0.5846153846153846,
"grad_norm": 1.904718279838562,
"learning_rate": 1.4615384615384615e-05,
"loss": 0.5806,
"step": 190
},
{
"epoch": 0.5846153846153846,
"eval_loss": 0.6012160778045654,
"eval_runtime": 2.082,
"eval_samples_per_second": 54.755,
"eval_steps_per_second": 2.882,
"step": 190
},
{
"epoch": 0.6153846153846154,
"grad_norm": 2.2823173999786377,
"learning_rate": 1.5384615384615387e-05,
"loss": 0.588,
"step": 200
},
{
"epoch": 0.6153846153846154,
"eval_loss": 0.6052933931350708,
"eval_runtime": 1.9613,
"eval_samples_per_second": 58.124,
"eval_steps_per_second": 3.059,
"step": 200
},
{
"epoch": 0.6461538461538462,
"grad_norm": 1.8020161390304565,
"learning_rate": 1.6153846153846154e-05,
"loss": 0.5826,
"step": 210
},
{
"epoch": 0.6461538461538462,
"eval_loss": 0.6047356724739075,
"eval_runtime": 1.9487,
"eval_samples_per_second": 58.5,
"eval_steps_per_second": 3.079,
"step": 210
},
{
"epoch": 0.676923076923077,
"grad_norm": 1.8297995328903198,
"learning_rate": 1.6923076923076924e-05,
"loss": 0.5442,
"step": 220
},
{
"epoch": 0.676923076923077,
"eval_loss": 0.6105689406394958,
"eval_runtime": 2.0567,
"eval_samples_per_second": 55.429,
"eval_steps_per_second": 2.917,
"step": 220
},
{
"epoch": 0.7076923076923077,
"grad_norm": 1.8141131401062012,
"learning_rate": 1.7692307692307694e-05,
"loss": 0.602,
"step": 230
},
{
"epoch": 0.7076923076923077,
"eval_loss": 0.6149886250495911,
"eval_runtime": 2.1951,
"eval_samples_per_second": 51.933,
"eval_steps_per_second": 2.733,
"step": 230
},
{
"epoch": 0.7384615384615385,
"grad_norm": 2.093683958053589,
"learning_rate": 1.8461538461538465e-05,
"loss": 0.6103,
"step": 240
},
{
"epoch": 0.7384615384615385,
"eval_loss": 0.621900200843811,
"eval_runtime": 1.9465,
"eval_samples_per_second": 58.567,
"eval_steps_per_second": 3.082,
"step": 240
},
{
"epoch": 0.7692307692307693,
"grad_norm": 1.840038776397705,
"learning_rate": 1.923076923076923e-05,
"loss": 0.5775,
"step": 250
},
{
"epoch": 0.7692307692307693,
"eval_loss": 0.6232128739356995,
"eval_runtime": 1.9883,
"eval_samples_per_second": 57.334,
"eval_steps_per_second": 3.018,
"step": 250
},
{
"epoch": 0.8,
"grad_norm": 1.7329185009002686,
"learning_rate": 2e-05,
"loss": 0.6808,
"step": 260
},
{
"epoch": 0.8,
"eval_loss": 0.6226441264152527,
"eval_runtime": 1.9469,
"eval_samples_per_second": 58.553,
"eval_steps_per_second": 3.082,
"step": 260
},
{
"epoch": 0.8307692307692308,
"grad_norm": 2.738678455352783,
"learning_rate": 1.999909877856721e-05,
"loss": 0.5833,
"step": 270
},
{
"epoch": 0.8307692307692308,
"eval_loss": 0.6240194439888,
"eval_runtime": 1.9419,
"eval_samples_per_second": 58.706,
"eval_steps_per_second": 3.09,
"step": 270
},
{
"epoch": 0.8615384615384616,
"grad_norm": 2.0359909534454346,
"learning_rate": 1.9996395276708856e-05,
"loss": 0.691,
"step": 280
},
{
"epoch": 0.8615384615384616,
"eval_loss": 0.6267117857933044,
"eval_runtime": 1.939,
"eval_samples_per_second": 58.794,
"eval_steps_per_second": 3.094,
"step": 280
},
{
"epoch": 0.8923076923076924,
"grad_norm": 1.7706685066223145,
"learning_rate": 1.9991889981715696e-05,
"loss": 0.6404,
"step": 290
},
{
"epoch": 0.8923076923076924,
"eval_loss": 0.6222097873687744,
"eval_runtime": 1.9533,
"eval_samples_per_second": 58.363,
"eval_steps_per_second": 3.072,
"step": 290
},
{
"epoch": 0.9230769230769231,
"grad_norm": 2.129652976989746,
"learning_rate": 1.9985583705641418e-05,
"loss": 0.7109,
"step": 300
},
{
"epoch": 0.9230769230769231,
"eval_loss": 0.6229674816131592,
"eval_runtime": 1.9467,
"eval_samples_per_second": 58.561,
"eval_steps_per_second": 3.082,
"step": 300
},
{
"epoch": 0.9538461538461539,
"grad_norm": 1.8439427614212036,
"learning_rate": 1.9977477585156252e-05,
"loss": 0.6623,
"step": 310
},
{
"epoch": 0.9538461538461539,
"eval_loss": 0.6273208856582642,
"eval_runtime": 2.0985,
"eval_samples_per_second": 54.324,
"eval_steps_per_second": 2.859,
"step": 310
},
{
"epoch": 0.9846153846153847,
"grad_norm": 2.1446306705474854,
"learning_rate": 1.9967573081342103e-05,
"loss": 0.5583,
"step": 320
},
{
"epoch": 0.9846153846153847,
"eval_loss": 0.6260280013084412,
"eval_runtime": 1.9521,
"eval_samples_per_second": 58.398,
"eval_steps_per_second": 3.074,
"step": 320
},
{
"epoch": 1.0153846153846153,
"grad_norm": 1.9259562492370605,
"learning_rate": 1.9955871979429188e-05,
"loss": 0.4857,
"step": 330
},
{
"epoch": 1.0153846153846153,
"eval_loss": 0.6497873067855835,
"eval_runtime": 1.9489,
"eval_samples_per_second": 58.496,
"eval_steps_per_second": 3.079,
"step": 330
},
{
"epoch": 1.0461538461538462,
"grad_norm": 1.8625404834747314,
"learning_rate": 1.9942376388474282e-05,
"loss": 0.4152,
"step": 340
},
{
"epoch": 1.0461538461538462,
"eval_loss": 0.6430822014808655,
"eval_runtime": 1.9534,
"eval_samples_per_second": 58.358,
"eval_steps_per_second": 3.071,
"step": 340
},
{
"epoch": 1.0769230769230769,
"grad_norm": 1.1712415218353271,
"learning_rate": 1.992708874098054e-05,
"loss": 0.3459,
"step": 350
},
{
"epoch": 1.0769230769230769,
"eval_loss": 0.671064019203186,
"eval_runtime": 1.9772,
"eval_samples_per_second": 57.658,
"eval_steps_per_second": 3.035,
"step": 350
},
{
"epoch": 1.1076923076923078,
"grad_norm": 1.0710129737854004,
"learning_rate": 1.9910011792459086e-05,
"loss": 0.4307,
"step": 360
},
{
"epoch": 1.1076923076923078,
"eval_loss": 0.6545295119285583,
"eval_runtime": 2.0822,
"eval_samples_per_second": 54.749,
"eval_steps_per_second": 2.882,
"step": 360
},
{
"epoch": 1.1384615384615384,
"grad_norm": 1.839811086654663,
"learning_rate": 1.989114862093232e-05,
"loss": 0.4084,
"step": 370
},
{
"epoch": 1.1384615384615384,
"eval_loss": 0.6743721961975098,
"eval_runtime": 1.962,
"eval_samples_per_second": 58.105,
"eval_steps_per_second": 3.058,
"step": 370
},
{
"epoch": 1.1692307692307693,
"grad_norm": 2.2416107654571533,
"learning_rate": 1.9870502626379127e-05,
"loss": 0.4203,
"step": 380
},
{
"epoch": 1.1692307692307693,
"eval_loss": 0.6703702211380005,
"eval_runtime": 2.0871,
"eval_samples_per_second": 54.621,
"eval_steps_per_second": 2.875,
"step": 380
},
{
"epoch": 1.2,
"grad_norm": 1.8775320053100586,
"learning_rate": 1.9848077530122083e-05,
"loss": 0.3056,
"step": 390
},
{
"epoch": 1.2,
"eval_loss": 0.680027425289154,
"eval_runtime": 2.0739,
"eval_samples_per_second": 54.97,
"eval_steps_per_second": 2.893,
"step": 390
},
{
"epoch": 1.2307692307692308,
"grad_norm": 2.1544764041900635,
"learning_rate": 1.9823877374156647e-05,
"loss": 0.4332,
"step": 400
},
{
"epoch": 1.2307692307692308,
"eval_loss": 0.6662920117378235,
"eval_runtime": 1.9498,
"eval_samples_per_second": 58.466,
"eval_steps_per_second": 3.077,
"step": 400
},
{
"epoch": 1.2615384615384615,
"grad_norm": 1.740652084350586,
"learning_rate": 1.979790652042268e-05,
"loss": 0.3707,
"step": 410
},
{
"epoch": 1.2615384615384615,
"eval_loss": 0.6721649765968323,
"eval_runtime": 1.9421,
"eval_samples_per_second": 58.699,
"eval_steps_per_second": 3.089,
"step": 410
},
{
"epoch": 1.2923076923076924,
"grad_norm": 1.8645464181900024,
"learning_rate": 1.977016965001817e-05,
"loss": 0.412,
"step": 420
},
{
"epoch": 1.2923076923076924,
"eval_loss": 0.6605477333068848,
"eval_runtime": 1.9455,
"eval_samples_per_second": 58.597,
"eval_steps_per_second": 3.084,
"step": 420
},
{
"epoch": 1.323076923076923,
"grad_norm": 1.3930943012237549,
"learning_rate": 1.9740671762355548e-05,
"loss": 0.388,
"step": 430
},
{
"epoch": 1.323076923076923,
"eval_loss": 0.6649342775344849,
"eval_runtime": 1.9497,
"eval_samples_per_second": 58.471,
"eval_steps_per_second": 3.077,
"step": 430
},
{
"epoch": 1.353846153846154,
"grad_norm": 1.8291655778884888,
"learning_rate": 1.9709418174260523e-05,
"loss": 0.4118,
"step": 440
},
{
"epoch": 1.353846153846154,
"eval_loss": 0.6680696606636047,
"eval_runtime": 2.0262,
"eval_samples_per_second": 56.262,
"eval_steps_per_second": 2.961,
"step": 440
},
{
"epoch": 1.3846153846153846,
"grad_norm": 1.1135263442993164,
"learning_rate": 1.9676414519013782e-05,
"loss": 0.3205,
"step": 450
},
{
"epoch": 1.3846153846153846,
"eval_loss": 0.6739374995231628,
"eval_runtime": 1.9443,
"eval_samples_per_second": 58.633,
"eval_steps_per_second": 3.086,
"step": 450
},
{
"epoch": 1.4153846153846155,
"grad_norm": 2.233457326889038,
"learning_rate": 1.9641666745335626e-05,
"loss": 0.3591,
"step": 460
},
{
"epoch": 1.4153846153846155,
"eval_loss": 0.6751876473426819,
"eval_runtime": 1.9447,
"eval_samples_per_second": 58.622,
"eval_steps_per_second": 3.085,
"step": 460
},
{
"epoch": 1.4461538461538461,
"grad_norm": 1.815677523612976,
"learning_rate": 1.9605181116313725e-05,
"loss": 0.4107,
"step": 470
},
{
"epoch": 1.4461538461538461,
"eval_loss": 0.6670271158218384,
"eval_runtime": 1.9448,
"eval_samples_per_second": 58.618,
"eval_steps_per_second": 3.085,
"step": 470
},
{
"epoch": 1.476923076923077,
"grad_norm": 2.0177175998687744,
"learning_rate": 1.9566964208274254e-05,
"loss": 0.3954,
"step": 480
},
{
"epoch": 1.476923076923077,
"eval_loss": 0.6706439256668091,
"eval_runtime": 2.1454,
"eval_samples_per_second": 53.137,
"eval_steps_per_second": 2.797,
"step": 480
},
{
"epoch": 1.5076923076923077,
"grad_norm": 2.2797820568084717,
"learning_rate": 1.9527022909596537e-05,
"loss": 0.349,
"step": 490
},
{
"epoch": 1.5076923076923077,
"eval_loss": 0.6916453242301941,
"eval_runtime": 1.9454,
"eval_samples_per_second": 58.599,
"eval_steps_per_second": 3.084,
"step": 490
},
{
"epoch": 1.5384615384615383,
"grad_norm": 1.6123872995376587,
"learning_rate": 1.9485364419471454e-05,
"loss": 0.3796,
"step": 500
},
{
"epoch": 1.5384615384615383,
"eval_loss": 0.6646614670753479,
"eval_runtime": 1.9525,
"eval_samples_per_second": 58.386,
"eval_steps_per_second": 3.073,
"step": 500
},
{
"epoch": 1.5692307692307692,
"grad_norm": 2.039036989212036,
"learning_rate": 1.9441996246603848e-05,
"loss": 0.4163,
"step": 510
},
{
"epoch": 1.5692307692307692,
"eval_loss": 0.6671331524848938,
"eval_runtime": 1.9375,
"eval_samples_per_second": 58.84,
"eval_steps_per_second": 3.097,
"step": 510
},
{
"epoch": 1.6,
"grad_norm": 1.8493613004684448,
"learning_rate": 1.9396926207859085e-05,
"loss": 0.3768,
"step": 520
},
{
"epoch": 1.6,
"eval_loss": 0.6984831094741821,
"eval_runtime": 1.9384,
"eval_samples_per_second": 58.812,
"eval_steps_per_second": 3.095,
"step": 520
},
{
"epoch": 1.6307692307692307,
"grad_norm": 1.1424219608306885,
"learning_rate": 1.9350162426854152e-05,
"loss": 0.3985,
"step": 530
},
{
"epoch": 1.6307692307692307,
"eval_loss": 0.6629871726036072,
"eval_runtime": 1.9471,
"eval_samples_per_second": 58.549,
"eval_steps_per_second": 3.082,
"step": 530
},
{
"epoch": 1.6615384615384614,
"grad_norm": 1.8115930557250977,
"learning_rate": 1.9301713332493386e-05,
"loss": 0.397,
"step": 540
},
{
"epoch": 1.6615384615384614,
"eval_loss": 0.6737513542175293,
"eval_runtime": 1.9496,
"eval_samples_per_second": 58.473,
"eval_steps_per_second": 3.078,
"step": 540
},
{
"epoch": 1.6923076923076923,
"grad_norm": 1.7575629949569702,
"learning_rate": 1.925158765744924e-05,
"loss": 0.4624,
"step": 550
},
{
"epoch": 1.6923076923076923,
"eval_loss": 0.660962700843811,
"eval_runtime": 1.953,
"eval_samples_per_second": 58.372,
"eval_steps_per_second": 3.072,
"step": 550
},
{
"epoch": 1.7230769230769232,
"grad_norm": 1.7702680826187134,
"learning_rate": 1.9199794436588244e-05,
"loss": 0.3705,
"step": 560
},
{
"epoch": 1.7230769230769232,
"eval_loss": 0.663165807723999,
"eval_runtime": 1.9548,
"eval_samples_per_second": 58.319,
"eval_steps_per_second": 3.069,
"step": 560
},
{
"epoch": 1.7538461538461538,
"grad_norm": 1.4927833080291748,
"learning_rate": 1.9146343005342546e-05,
"loss": 0.3708,
"step": 570
},
{
"epoch": 1.7538461538461538,
"eval_loss": 0.673720121383667,
"eval_runtime": 1.9582,
"eval_samples_per_second": 58.216,
"eval_steps_per_second": 3.064,
"step": 570
},
{
"epoch": 1.7846153846153845,
"grad_norm": 1.6623133420944214,
"learning_rate": 1.909124299802724e-05,
"loss": 0.399,
"step": 580
},
{
"epoch": 1.7846153846153845,
"eval_loss": 0.6685453653335571,
"eval_runtime": 1.9645,
"eval_samples_per_second": 58.029,
"eval_steps_per_second": 3.054,
"step": 580
},
{
"epoch": 1.8153846153846154,
"grad_norm": 2.253755807876587,
"learning_rate": 1.9034504346103825e-05,
"loss": 0.3642,
"step": 590
},
{
"epoch": 1.8153846153846154,
"eval_loss": 0.6629040241241455,
"eval_runtime": 1.9527,
"eval_samples_per_second": 58.382,
"eval_steps_per_second": 3.073,
"step": 590
},
{
"epoch": 1.8461538461538463,
"grad_norm": 1.8123295307159424,
"learning_rate": 1.8976137276390145e-05,
"loss": 0.4213,
"step": 600
},
{
"epoch": 1.8461538461538463,
"eval_loss": 0.6636160016059875,
"eval_runtime": 2.1305,
"eval_samples_per_second": 53.508,
"eval_steps_per_second": 2.816,
"step": 600
},
{
"epoch": 1.876923076923077,
"grad_norm": 1.6377712488174438,
"learning_rate": 1.891615230921703e-05,
"loss": 0.3154,
"step": 610
},
{
"epoch": 1.876923076923077,
"eval_loss": 0.6621889472007751,
"eval_runtime": 1.9727,
"eval_samples_per_second": 57.789,
"eval_steps_per_second": 3.042,
"step": 610
},
{
"epoch": 1.9076923076923076,
"grad_norm": 2.1680402755737305,
"learning_rate": 1.8854560256532098e-05,
"loss": 0.421,
"step": 620
},
{
"epoch": 1.9076923076923076,
"eval_loss": 0.6584432721138,
"eval_runtime": 1.9532,
"eval_samples_per_second": 58.367,
"eval_steps_per_second": 3.072,
"step": 620
},
{
"epoch": 1.9384615384615385,
"grad_norm": 1.771596074104309,
"learning_rate": 1.879137221995095e-05,
"loss": 0.3625,
"step": 630
},
{
"epoch": 1.9384615384615385,
"eval_loss": 0.6574673652648926,
"eval_runtime": 1.9593,
"eval_samples_per_second": 58.185,
"eval_steps_per_second": 3.062,
"step": 630
},
{
"epoch": 1.9692307692307693,
"grad_norm": 1.6291016340255737,
"learning_rate": 1.8726599588756144e-05,
"loss": 0.3787,
"step": 640
},
{
"epoch": 1.9692307692307693,
"eval_loss": 0.6593620181083679,
"eval_runtime": 2.1362,
"eval_samples_per_second": 53.367,
"eval_steps_per_second": 2.809,
"step": 640
},
{
"epoch": 2.0,
"grad_norm": 1.721921682357788,
"learning_rate": 1.866025403784439e-05,
"loss": 0.3923,
"step": 650
},
{
"epoch": 2.0,
"eval_loss": 0.6485319137573242,
"eval_runtime": 1.967,
"eval_samples_per_second": 57.957,
"eval_steps_per_second": 3.05,
"step": 650
},
{
"epoch": 2.0307692307692307,
"grad_norm": 1.6391288042068481,
"learning_rate": 1.859234752562217e-05,
"loss": 0.2137,
"step": 660
},
{
"epoch": 2.0307692307692307,
"eval_loss": 0.7283760905265808,
"eval_runtime": 1.9428,
"eval_samples_per_second": 58.678,
"eval_steps_per_second": 3.088,
"step": 660
},
{
"epoch": 2.0615384615384613,
"grad_norm": 1.9791120290756226,
"learning_rate": 1.8522892291850335e-05,
"loss": 0.2221,
"step": 670
},
{
"epoch": 2.0615384615384613,
"eval_loss": 0.7443767189979553,
"eval_runtime": 1.9429,
"eval_samples_per_second": 58.676,
"eval_steps_per_second": 3.088,
"step": 670
},
{
"epoch": 2.0923076923076924,
"grad_norm": 2.0093741416931152,
"learning_rate": 1.845190085543795e-05,
"loss": 0.1777,
"step": 680
},
{
"epoch": 2.0923076923076924,
"eval_loss": 0.7445951700210571,
"eval_runtime": 1.9592,
"eval_samples_per_second": 58.186,
"eval_steps_per_second": 3.062,
"step": 680
},
{
"epoch": 2.123076923076923,
"grad_norm": 1.8424338102340698,
"learning_rate": 1.8379386012185813e-05,
"loss": 0.1755,
"step": 690
},
{
"epoch": 2.123076923076923,
"eval_loss": 0.7517443299293518,
"eval_runtime": 2.0747,
"eval_samples_per_second": 54.947,
"eval_steps_per_second": 2.892,
"step": 690
},
{
"epoch": 2.1538461538461537,
"grad_norm": 1.4635226726531982,
"learning_rate": 1.8305360832480118e-05,
"loss": 0.1742,
"step": 700
},
{
"epoch": 2.1538461538461537,
"eval_loss": 0.7649147510528564,
"eval_runtime": 1.9437,
"eval_samples_per_second": 58.65,
"eval_steps_per_second": 3.087,
"step": 700
},
{
"epoch": 2.184615384615385,
"grad_norm": 1.9029723405838013,
"learning_rate": 1.8229838658936566e-05,
"loss": 0.2037,
"step": 710
},
{
"epoch": 2.184615384615385,
"eval_loss": 0.7539044618606567,
"eval_runtime": 2.1243,
"eval_samples_per_second": 53.664,
"eval_steps_per_second": 2.824,
"step": 710
},
{
"epoch": 2.2153846153846155,
"grad_norm": 1.3570928573608398,
"learning_rate": 1.8152833103995443e-05,
"loss": 0.1954,
"step": 720
},
{
"epoch": 2.2153846153846155,
"eval_loss": 0.7460123300552368,
"eval_runtime": 2.1466,
"eval_samples_per_second": 53.108,
"eval_steps_per_second": 2.795,
"step": 720
},
{
"epoch": 2.246153846153846,
"grad_norm": 1.5055183172225952,
"learning_rate": 1.807435804746807e-05,
"loss": 0.2027,
"step": 730
},
{
"epoch": 2.246153846153846,
"eval_loss": 0.7585832476615906,
"eval_runtime": 2.1298,
"eval_samples_per_second": 53.525,
"eval_steps_per_second": 2.817,
"step": 730
},
{
"epoch": 2.276923076923077,
"grad_norm": 1.0890541076660156,
"learning_rate": 1.7994427634035016e-05,
"loss": 0.1867,
"step": 740
},
{
"epoch": 2.276923076923077,
"eval_loss": 0.7535340785980225,
"eval_runtime": 1.9471,
"eval_samples_per_second": 58.549,
"eval_steps_per_second": 3.082,
"step": 740
},
{
"epoch": 2.3076923076923075,
"grad_norm": 1.11017644405365,
"learning_rate": 1.791305627069662e-05,
"loss": 0.1875,
"step": 750
},
{
"epoch": 2.3076923076923075,
"eval_loss": 0.7587242126464844,
"eval_runtime": 1.9462,
"eval_samples_per_second": 58.575,
"eval_steps_per_second": 3.083,
"step": 750
},
{
"epoch": 2.3384615384615386,
"grad_norm": 1.7792816162109375,
"learning_rate": 1.7830258624176224e-05,
"loss": 0.1841,
"step": 760
},
{
"epoch": 2.3384615384615386,
"eval_loss": 0.7774962782859802,
"eval_runtime": 1.942,
"eval_samples_per_second": 58.703,
"eval_steps_per_second": 3.09,
"step": 760
},
{
"epoch": 2.3692307692307693,
"grad_norm": 1.384929895401001,
"learning_rate": 1.7746049618276545e-05,
"loss": 0.1724,
"step": 770
},
{
"epoch": 2.3692307692307693,
"eval_loss": 0.7517464756965637,
"eval_runtime": 2.0655,
"eval_samples_per_second": 55.191,
"eval_steps_per_second": 2.905,
"step": 770
},
{
"epoch": 2.4,
"grad_norm": 1.3608155250549316,
"learning_rate": 1.766044443118978e-05,
"loss": 0.1831,
"step": 780
},
{
"epoch": 2.4,
"eval_loss": 0.7726874351501465,
"eval_runtime": 1.952,
"eval_samples_per_second": 58.4,
"eval_steps_per_second": 3.074,
"step": 780
},
{
"epoch": 2.430769230769231,
"grad_norm": 0.9307472109794617,
"learning_rate": 1.7573458492761802e-05,
"loss": 0.1897,
"step": 790
},
{
"epoch": 2.430769230769231,
"eval_loss": 0.7573862075805664,
"eval_runtime": 1.9435,
"eval_samples_per_second": 58.658,
"eval_steps_per_second": 3.087,
"step": 790
},
{
"epoch": 2.4615384615384617,
"grad_norm": 1.0741816759109497,
"learning_rate": 1.7485107481711014e-05,
"loss": 0.1936,
"step": 800
},
{
"epoch": 2.4615384615384617,
"eval_loss": 0.7550384402275085,
"eval_runtime": 1.9506,
"eval_samples_per_second": 58.443,
"eval_steps_per_second": 3.076,
"step": 800
}
],
"logging_steps": 10,
"max_steps": 2600,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 50,
"total_flos": 6.974779907951821e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}