erx-llama-3-8b-high / trainer_state.json
bdsaglam's picture
Upload folder using huggingface_hub
a19b3ea verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 1109,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009017132551848512,
"grad_norm": 1.8547641038894653,
"learning_rate": 9.00900900900901e-06,
"loss": 0.9349,
"step": 10
},
{
"epoch": 0.018034265103697024,
"grad_norm": 1.347398281097412,
"learning_rate": 1.801801801801802e-05,
"loss": 0.7242,
"step": 20
},
{
"epoch": 0.027051397655545536,
"grad_norm": 1.2487881183624268,
"learning_rate": 2.702702702702703e-05,
"loss": 0.4507,
"step": 30
},
{
"epoch": 0.03606853020739405,
"grad_norm": 1.1477875709533691,
"learning_rate": 3.603603603603604e-05,
"loss": 0.33,
"step": 40
},
{
"epoch": 0.04508566275924256,
"grad_norm": 0.923848569393158,
"learning_rate": 4.5045045045045046e-05,
"loss": 0.234,
"step": 50
},
{
"epoch": 0.05410279531109107,
"grad_norm": 0.9260658025741577,
"learning_rate": 5.405405405405406e-05,
"loss": 0.2077,
"step": 60
},
{
"epoch": 0.06311992786293959,
"grad_norm": 1.255967378616333,
"learning_rate": 6.306306306306306e-05,
"loss": 0.1886,
"step": 70
},
{
"epoch": 0.0721370604147881,
"grad_norm": 0.7384198904037476,
"learning_rate": 7.207207207207208e-05,
"loss": 0.1645,
"step": 80
},
{
"epoch": 0.0811541929666366,
"grad_norm": 0.672892153263092,
"learning_rate": 8.108108108108109e-05,
"loss": 0.1591,
"step": 90
},
{
"epoch": 0.09017132551848513,
"grad_norm": 0.8307783007621765,
"learning_rate": 9.009009009009009e-05,
"loss": 0.1595,
"step": 100
},
{
"epoch": 0.09918845807033363,
"grad_norm": 0.7313665151596069,
"learning_rate": 9.90990990990991e-05,
"loss": 0.1406,
"step": 110
},
{
"epoch": 0.10820559062218214,
"grad_norm": 0.985645592212677,
"learning_rate": 9.997993520894937e-05,
"loss": 0.1385,
"step": 120
},
{
"epoch": 0.11722272317403065,
"grad_norm": 0.9960182905197144,
"learning_rate": 9.99105961120544e-05,
"loss": 0.137,
"step": 130
},
{
"epoch": 0.12623985572587917,
"grad_norm": 0.5437267422676086,
"learning_rate": 9.979180368331558e-05,
"loss": 0.1269,
"step": 140
},
{
"epoch": 0.13525698827772767,
"grad_norm": 0.5780126452445984,
"learning_rate": 9.962367562682496e-05,
"loss": 0.1282,
"step": 150
},
{
"epoch": 0.1442741208295762,
"grad_norm": 0.8550663590431213,
"learning_rate": 9.940637853030572e-05,
"loss": 0.1267,
"step": 160
},
{
"epoch": 0.1532912533814247,
"grad_norm": 0.5869548916816711,
"learning_rate": 9.914012770005072e-05,
"loss": 0.1224,
"step": 170
},
{
"epoch": 0.1623083859332732,
"grad_norm": 0.6128000617027283,
"learning_rate": 9.882518694758875e-05,
"loss": 0.1059,
"step": 180
},
{
"epoch": 0.17132551848512173,
"grad_norm": 0.852254331111908,
"learning_rate": 9.846186832828989e-05,
"loss": 0.0997,
"step": 190
},
{
"epoch": 0.18034265103697025,
"grad_norm": 0.6299287676811218,
"learning_rate": 9.805053183216923e-05,
"loss": 0.1038,
"step": 200
},
{
"epoch": 0.18935978358881875,
"grad_norm": 0.8325192928314209,
"learning_rate": 9.759158502719481e-05,
"loss": 0.1207,
"step": 210
},
{
"epoch": 0.19837691614066727,
"grad_norm": 0.46743109822273254,
"learning_rate": 9.708548265545375e-05,
"loss": 0.105,
"step": 220
},
{
"epoch": 0.2073940486925158,
"grad_norm": 0.6069552302360535,
"learning_rate": 9.653272618257631e-05,
"loss": 0.0968,
"step": 230
},
{
"epoch": 0.2164111812443643,
"grad_norm": 0.6297630071640015,
"learning_rate": 9.593386330086458e-05,
"loss": 0.0982,
"step": 240
},
{
"epoch": 0.2254283137962128,
"grad_norm": 0.574450671672821,
"learning_rate": 9.528948738661784e-05,
"loss": 0.0996,
"step": 250
},
{
"epoch": 0.2344454463480613,
"grad_norm": 0.8703469038009644,
"learning_rate": 9.460023691219277e-05,
"loss": 0.0919,
"step": 260
},
{
"epoch": 0.24346257889990983,
"grad_norm": 0.6118870377540588,
"learning_rate": 9.386679481338033e-05,
"loss": 0.094,
"step": 270
},
{
"epoch": 0.25247971145175835,
"grad_norm": 0.47032231092453003,
"learning_rate": 9.308988781272694e-05,
"loss": 0.0918,
"step": 280
},
{
"epoch": 0.26149684400360684,
"grad_norm": 0.5307496786117554,
"learning_rate": 9.227028569946996e-05,
"loss": 0.095,
"step": 290
},
{
"epoch": 0.27051397655545534,
"grad_norm": 0.37769177556037903,
"learning_rate": 9.140880056680088e-05,
"loss": 0.0892,
"step": 300
},
{
"epoch": 0.2795311091073039,
"grad_norm": 0.5687931776046753,
"learning_rate": 9.050628600721234e-05,
"loss": 0.0853,
"step": 310
},
{
"epoch": 0.2885482416591524,
"grad_norm": 0.6189451217651367,
"learning_rate": 8.956363626672595e-05,
"loss": 0.0838,
"step": 320
},
{
"epoch": 0.2975653742110009,
"grad_norm": 0.6863543391227722,
"learning_rate": 8.858178535883905e-05,
"loss": 0.0825,
"step": 330
},
{
"epoch": 0.3065825067628494,
"grad_norm": 0.8236618041992188,
"learning_rate": 8.756170613906833e-05,
"loss": 0.0832,
"step": 340
},
{
"epoch": 0.3155996393146979,
"grad_norm": 0.5764484405517578,
"learning_rate": 8.650440934100728e-05,
"loss": 0.0775,
"step": 350
},
{
"epoch": 0.3246167718665464,
"grad_norm": 0.40208104252815247,
"learning_rate": 8.541094257485265e-05,
"loss": 0.0751,
"step": 360
},
{
"epoch": 0.33363390441839497,
"grad_norm": 0.6935873031616211,
"learning_rate": 8.428238928939207e-05,
"loss": 0.0793,
"step": 370
},
{
"epoch": 0.34265103697024346,
"grad_norm": 0.5251246094703674,
"learning_rate": 8.311986769848141e-05,
"loss": 0.0713,
"step": 380
},
{
"epoch": 0.35166816952209196,
"grad_norm": 0.4371775686740875,
"learning_rate": 8.192452967307576e-05,
"loss": 0.0712,
"step": 390
},
{
"epoch": 0.3606853020739405,
"grad_norm": 0.6153225302696228,
"learning_rate": 8.069755959991142e-05,
"loss": 0.0694,
"step": 400
},
{
"epoch": 0.369702434625789,
"grad_norm": 0.4341914653778076,
"learning_rate": 7.944017320797013e-05,
"loss": 0.075,
"step": 410
},
{
"epoch": 0.3787195671776375,
"grad_norm": 0.4805607199668884,
"learning_rate": 7.815361636388827e-05,
"loss": 0.0665,
"step": 420
},
{
"epoch": 0.38773669972948605,
"grad_norm": 0.508466362953186,
"learning_rate": 7.683916383750436e-05,
"loss": 0.0692,
"step": 430
},
{
"epoch": 0.39675383228133454,
"grad_norm": 0.7154425978660583,
"learning_rate": 7.549811803876825e-05,
"loss": 0.0693,
"step": 440
},
{
"epoch": 0.40577096483318303,
"grad_norm": 0.3025909960269928,
"learning_rate": 7.413180772726348e-05,
"loss": 0.0721,
"step": 450
},
{
"epoch": 0.4147880973850316,
"grad_norm": 0.5098099708557129,
"learning_rate": 7.274158669562126e-05,
"loss": 0.0691,
"step": 460
},
{
"epoch": 0.4238052299368801,
"grad_norm": 0.31458744406700134,
"learning_rate": 7.13288324281309e-05,
"loss": 0.0668,
"step": 470
},
{
"epoch": 0.4328223624887286,
"grad_norm": 0.5931514501571655,
"learning_rate": 6.989494473587554e-05,
"loss": 0.0656,
"step": 480
},
{
"epoch": 0.4418394950405771,
"grad_norm": 0.3595449924468994,
"learning_rate": 6.844134436974567e-05,
"loss": 0.0646,
"step": 490
},
{
"epoch": 0.4508566275924256,
"grad_norm": 0.5599433779716492,
"learning_rate": 6.696947161270476e-05,
"loss": 0.0687,
"step": 500
},
{
"epoch": 0.4598737601442741,
"grad_norm": 0.4869447946548462,
"learning_rate": 6.548078485270152e-05,
"loss": 0.0683,
"step": 510
},
{
"epoch": 0.4688908926961226,
"grad_norm": 0.36777040362358093,
"learning_rate": 6.397675913764347e-05,
"loss": 0.0658,
"step": 520
},
{
"epoch": 0.47790802524797116,
"grad_norm": 0.5203920602798462,
"learning_rate": 6.245888471386263e-05,
"loss": 0.0627,
"step": 530
},
{
"epoch": 0.48692515779981965,
"grad_norm": 0.46625274419784546,
"learning_rate": 6.0928665549522554e-05,
"loss": 0.0622,
"step": 540
},
{
"epoch": 0.49594229035166815,
"grad_norm": 0.7451359629631042,
"learning_rate": 5.9387617844429e-05,
"loss": 0.0618,
"step": 550
},
{
"epoch": 0.5049594229035167,
"grad_norm": 0.392704576253891,
"learning_rate": 5.78372685277209e-05,
"loss": 0.0589,
"step": 560
},
{
"epoch": 0.5139765554553652,
"grad_norm": 0.44599130749702454,
"learning_rate": 5.627915374493061e-05,
"loss": 0.0615,
"step": 570
},
{
"epoch": 0.5229936880072137,
"grad_norm": 0.3163304328918457,
"learning_rate": 5.4714817335911894e-05,
"loss": 0.0503,
"step": 580
},
{
"epoch": 0.5320108205590622,
"grad_norm": 0.6041306257247925,
"learning_rate": 5.314580930514431e-05,
"loss": 0.0598,
"step": 590
},
{
"epoch": 0.5410279531109107,
"grad_norm": 0.38383838534355164,
"learning_rate": 5.157368428592933e-05,
"loss": 0.0589,
"step": 600
},
{
"epoch": 0.5500450856627592,
"grad_norm": 0.2501381039619446,
"learning_rate": 5e-05,
"loss": 0.049,
"step": 610
},
{
"epoch": 0.5590622182146078,
"grad_norm": 0.3422386348247528,
"learning_rate": 4.8426315714070684e-05,
"loss": 0.0538,
"step": 620
},
{
"epoch": 0.5680793507664562,
"grad_norm": 0.5101218223571777,
"learning_rate": 4.6854190694855694e-05,
"loss": 0.0497,
"step": 630
},
{
"epoch": 0.5770964833183048,
"grad_norm": 0.37502148747444153,
"learning_rate": 4.528518266408811e-05,
"loss": 0.052,
"step": 640
},
{
"epoch": 0.5861136158701533,
"grad_norm": 0.3856571912765503,
"learning_rate": 4.3720846255069406e-05,
"loss": 0.0568,
"step": 650
},
{
"epoch": 0.5951307484220018,
"grad_norm": 0.3386954069137573,
"learning_rate": 4.21627314722791e-05,
"loss": 0.0493,
"step": 660
},
{
"epoch": 0.6041478809738503,
"grad_norm": 0.28927159309387207,
"learning_rate": 4.0612382155571026e-05,
"loss": 0.0574,
"step": 670
},
{
"epoch": 0.6131650135256989,
"grad_norm": 0.3935062289237976,
"learning_rate": 3.907133445047747e-05,
"loss": 0.0434,
"step": 680
},
{
"epoch": 0.6221821460775473,
"grad_norm": 0.39534249901771545,
"learning_rate": 3.75411152861374e-05,
"loss": 0.0487,
"step": 690
},
{
"epoch": 0.6311992786293958,
"grad_norm": 0.35437795519828796,
"learning_rate": 3.602324086235655e-05,
"loss": 0.0512,
"step": 700
},
{
"epoch": 0.6402164111812444,
"grad_norm": 0.2841149866580963,
"learning_rate": 3.451921514729848e-05,
"loss": 0.0391,
"step": 710
},
{
"epoch": 0.6492335437330928,
"grad_norm": 0.3514391779899597,
"learning_rate": 3.303052838729525e-05,
"loss": 0.0484,
"step": 720
},
{
"epoch": 0.6582506762849414,
"grad_norm": 0.29207444190979004,
"learning_rate": 3.155865563025433e-05,
"loss": 0.0443,
"step": 730
},
{
"epoch": 0.6672678088367899,
"grad_norm": 0.27113544940948486,
"learning_rate": 3.010505526412447e-05,
"loss": 0.0446,
"step": 740
},
{
"epoch": 0.6762849413886384,
"grad_norm": 0.4424648880958557,
"learning_rate": 2.867116757186911e-05,
"loss": 0.0431,
"step": 750
},
{
"epoch": 0.6853020739404869,
"grad_norm": 0.4714156687259674,
"learning_rate": 2.7258413304378734e-05,
"loss": 0.0468,
"step": 760
},
{
"epoch": 0.6943192064923355,
"grad_norm": 0.3349123001098633,
"learning_rate": 2.5868192272736514e-05,
"loss": 0.0433,
"step": 770
},
{
"epoch": 0.7033363390441839,
"grad_norm": 0.5246461629867554,
"learning_rate": 2.450188196123177e-05,
"loss": 0.041,
"step": 780
},
{
"epoch": 0.7123534715960325,
"grad_norm": 0.37693607807159424,
"learning_rate": 2.3160836162495653e-05,
"loss": 0.0512,
"step": 790
},
{
"epoch": 0.721370604147881,
"grad_norm": 0.33574390411376953,
"learning_rate": 2.1846383636111743e-05,
"loss": 0.0452,
"step": 800
},
{
"epoch": 0.7303877366997295,
"grad_norm": 0.22386135160923004,
"learning_rate": 2.0559826792029884e-05,
"loss": 0.0424,
"step": 810
},
{
"epoch": 0.739404869251578,
"grad_norm": 0.40371736884117126,
"learning_rate": 1.9302440400088606e-05,
"loss": 0.0357,
"step": 820
},
{
"epoch": 0.7484220018034266,
"grad_norm": 0.4526534676551819,
"learning_rate": 1.8075470326924243e-05,
"loss": 0.0432,
"step": 830
},
{
"epoch": 0.757439134355275,
"grad_norm": 0.2903415858745575,
"learning_rate": 1.6880132301518598e-05,
"loss": 0.0375,
"step": 840
},
{
"epoch": 0.7664562669071235,
"grad_norm": 0.30989566445350647,
"learning_rate": 1.5717610710607948e-05,
"loss": 0.0377,
"step": 850
},
{
"epoch": 0.7754733994589721,
"grad_norm": 0.3357095420360565,
"learning_rate": 1.458905742514734e-05,
"loss": 0.0376,
"step": 860
},
{
"epoch": 0.7844905320108205,
"grad_norm": 0.47947272658348083,
"learning_rate": 1.3495590658992718e-05,
"loss": 0.037,
"step": 870
},
{
"epoch": 0.7935076645626691,
"grad_norm": 0.3474498391151428,
"learning_rate": 1.2438293860931677e-05,
"loss": 0.0397,
"step": 880
},
{
"epoch": 0.8025247971145176,
"grad_norm": 0.4065302610397339,
"learning_rate": 1.1418214641160958e-05,
"loss": 0.0378,
"step": 890
},
{
"epoch": 0.8115419296663661,
"grad_norm": 0.20412218570709229,
"learning_rate": 1.0436363733274057e-05,
"loss": 0.0333,
"step": 900
},
{
"epoch": 0.8205590622182146,
"grad_norm": 0.2562933564186096,
"learning_rate": 9.493713992787672e-06,
"loss": 0.0396,
"step": 910
},
{
"epoch": 0.8295761947700632,
"grad_norm": 0.3250836133956909,
"learning_rate": 8.591199433199126e-06,
"loss": 0.0351,
"step": 920
},
{
"epoch": 0.8385933273219116,
"grad_norm": 0.31816214323043823,
"learning_rate": 7.72971430053005e-06,
"loss": 0.043,
"step": 930
},
{
"epoch": 0.8476104598737602,
"grad_norm": 0.28158149123191833,
"learning_rate": 6.910112187273066e-06,
"loss": 0.0324,
"step": 940
},
{
"epoch": 0.8566275924256087,
"grad_norm": 0.34491991996765137,
"learning_rate": 6.133205186619695e-06,
"loss": 0.046,
"step": 950
},
{
"epoch": 0.8656447249774571,
"grad_norm": 0.3850158154964447,
"learning_rate": 5.399763087807236e-06,
"loss": 0.0339,
"step": 960
},
{
"epoch": 0.8746618575293057,
"grad_norm": 0.3283858597278595,
"learning_rate": 4.710512613382151e-06,
"loss": 0.0388,
"step": 970
},
{
"epoch": 0.8836789900811542,
"grad_norm": 0.4049375653266907,
"learning_rate": 4.0661366991354365e-06,
"loss": 0.0346,
"step": 980
},
{
"epoch": 0.8926961226330027,
"grad_norm": 0.43446460366249084,
"learning_rate": 3.4672738174236884e-06,
"loss": 0.0346,
"step": 990
},
{
"epoch": 0.9017132551848512,
"grad_norm": 0.2909717857837677,
"learning_rate": 2.914517344546258e-06,
"loss": 0.0372,
"step": 1000
},
{
"epoch": 0.9107303877366997,
"grad_norm": 0.42221134901046753,
"learning_rate": 2.4084149728051952e-06,
"loss": 0.0317,
"step": 1010
},
{
"epoch": 0.9197475202885482,
"grad_norm": 0.3318589925765991,
"learning_rate": 1.9494681678307703e-06,
"loss": 0.0343,
"step": 1020
},
{
"epoch": 0.9287646528403968,
"grad_norm": 0.3418586552143097,
"learning_rate": 1.538131671710108e-06,
"loss": 0.0364,
"step": 1030
},
{
"epoch": 0.9377817853922452,
"grad_norm": 0.2581607699394226,
"learning_rate": 1.1748130524112666e-06,
"loss": 0.0347,
"step": 1040
},
{
"epoch": 0.9467989179440938,
"grad_norm": 0.37032172083854675,
"learning_rate": 8.59872299949288e-07,
"loss": 0.0402,
"step": 1050
},
{
"epoch": 0.9558160504959423,
"grad_norm": 0.2215413898229599,
"learning_rate": 5.936214696942887e-07,
"loss": 0.0357,
"step": 1060
},
{
"epoch": 0.9648331830477908,
"grad_norm": 0.30502429604530334,
"learning_rate": 3.7632437317505207e-07,
"loss": 0.0332,
"step": 1070
},
{
"epoch": 0.9738503155996393,
"grad_norm": 0.24881534278392792,
"learning_rate": 2.0819631668442253e-07,
"loss": 0.033,
"step": 1080
},
{
"epoch": 0.9828674481514879,
"grad_norm": 0.7615265846252441,
"learning_rate": 8.940388794559939e-08,
"loss": 0.0378,
"step": 1090
},
{
"epoch": 0.9918845807033363,
"grad_norm": 0.4428829848766327,
"learning_rate": 2.0064791050633526e-08,
"loss": 0.0334,
"step": 1100
},
{
"epoch": 1.0,
"step": 1109,
"total_flos": 2.052344509146071e+17,
"train_loss": 0.08958469805575578,
"train_runtime": 1690.0157,
"train_samples_per_second": 5.249,
"train_steps_per_second": 0.656
}
],
"logging_steps": 10,
"max_steps": 1109,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.052344509146071e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}