mistral-7b-opo / trainer_state.json
yangzhao02's picture
Model save
dd8aeaf verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9981298423724285,
"eval_steps": 200,
"global_step": 467,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0021373230029388193,
"grad_norm": 0.0,
"learning_rate": 0.0,
"logits": -2.7276527881622314,
"logps": -123.19757843017578,
"loss": -0.9502,
"step": 1
},
{
"epoch": 0.010686615014694095,
"grad_norm": 0.0,
"learning_rate": 0.0,
"logits": -2.8715224266052246,
"logps": -234.59034729003906,
"loss": -0.9521,
"step": 5
},
{
"epoch": 0.02137323002938819,
"grad_norm": 0.0,
"learning_rate": 0.0,
"logits": -2.846045732498169,
"logps": -248.165771484375,
"loss": -0.9515,
"step": 10
},
{
"epoch": 0.03205984504408229,
"grad_norm": 0.0,
"learning_rate": 0.0,
"logits": -2.7775120735168457,
"logps": -229.2094268798828,
"loss": -0.9503,
"step": 15
},
{
"epoch": 0.04274646005877638,
"grad_norm": 0.0,
"learning_rate": 0.0,
"logits": -2.7634153366088867,
"logps": -203.92291259765625,
"loss": -0.9506,
"step": 20
},
{
"epoch": 0.053433075073470476,
"grad_norm": 15.734558829221458,
"learning_rate": 3.191489361702127e-08,
"logits": -2.917879581451416,
"logps": -291.4737854003906,
"loss": -0.9521,
"step": 25
},
{
"epoch": 0.06411969008816458,
"grad_norm": 11.680076224519096,
"learning_rate": 7.446808510638298e-08,
"logits": -2.877744197845459,
"logps": -280.48907470703125,
"loss": -0.9508,
"step": 30
},
{
"epoch": 0.07480630510285867,
"grad_norm": 8.443451358220528,
"learning_rate": 1.2765957446808508e-07,
"logits": -2.8596420288085938,
"logps": -237.61083984375,
"loss": -0.9589,
"step": 35
},
{
"epoch": 0.08549292011755276,
"grad_norm": 3.6112030581883396,
"learning_rate": 1.8085106382978725e-07,
"logits": -2.8875012397766113,
"logps": -238.05062866210938,
"loss": -0.9721,
"step": 40
},
{
"epoch": 0.09617953513224686,
"grad_norm": 2.330434375600566,
"learning_rate": 2.3404255319148937e-07,
"logits": -2.8423449993133545,
"logps": -254.7257537841797,
"loss": -0.9788,
"step": 45
},
{
"epoch": 0.10686615014694095,
"grad_norm": 1.8977765676364111,
"learning_rate": 2.872340425531915e-07,
"logits": -2.743157148361206,
"logps": -269.1846618652344,
"loss": -0.9811,
"step": 50
},
{
"epoch": 0.11755276516163506,
"grad_norm": 1.4953000184403984,
"learning_rate": 3.404255319148936e-07,
"logits": -2.9768242835998535,
"logps": -278.90875244140625,
"loss": -0.9838,
"step": 55
},
{
"epoch": 0.12823938017632916,
"grad_norm": 2.8096229198116363,
"learning_rate": 3.9361702127659574e-07,
"logits": -2.8828537464141846,
"logps": -244.09213256835938,
"loss": -0.9835,
"step": 60
},
{
"epoch": 0.13892599519102325,
"grad_norm": 1.5256462120284162,
"learning_rate": 4.4680851063829783e-07,
"logits": -2.9095747470855713,
"logps": -279.29400634765625,
"loss": -0.9854,
"step": 65
},
{
"epoch": 0.14961261020571734,
"grad_norm": 2.2701771747365225,
"learning_rate": 5e-07,
"logits": -2.7382285594940186,
"logps": -259.78839111328125,
"loss": -0.9835,
"step": 70
},
{
"epoch": 0.16029922522041143,
"grad_norm": 1.7763738397122832,
"learning_rate": 4.998251761970996e-07,
"logits": -2.598153591156006,
"logps": -253.6716766357422,
"loss": -0.9863,
"step": 75
},
{
"epoch": 0.17098584023510552,
"grad_norm": 1.4697077978466289,
"learning_rate": 4.993009492952949e-07,
"logits": -2.8250794410705566,
"logps": -295.9560852050781,
"loss": -0.9854,
"step": 80
},
{
"epoch": 0.18167245524979964,
"grad_norm": 1.4960127973989916,
"learning_rate": 4.984280524733107e-07,
"logits": -2.6275196075439453,
"logps": -290.86669921875,
"loss": -0.9874,
"step": 85
},
{
"epoch": 0.19235907026449373,
"grad_norm": 1.6048847263455288,
"learning_rate": 4.972077065562821e-07,
"logits": -2.801757335662842,
"logps": -231.8870849609375,
"loss": -0.9871,
"step": 90
},
{
"epoch": 0.20304568527918782,
"grad_norm": 1.258984494113581,
"learning_rate": 4.956416183083221e-07,
"logits": -2.640484094619751,
"logps": -286.9975891113281,
"loss": -0.9874,
"step": 95
},
{
"epoch": 0.2137323002938819,
"grad_norm": 1.0451102136142367,
"learning_rate": 4.937319780454559e-07,
"logits": -2.6160218715667725,
"logps": -235.74368286132812,
"loss": -0.9882,
"step": 100
},
{
"epoch": 0.224418915308576,
"grad_norm": 0.9560541281518616,
"learning_rate": 4.91481456572267e-07,
"logits": -2.621774196624756,
"logps": -288.15924072265625,
"loss": -0.9891,
"step": 105
},
{
"epoch": 0.2351055303232701,
"grad_norm": 1.086374805840901,
"learning_rate": 4.888932014465352e-07,
"logits": -2.499404191970825,
"logps": -306.10992431640625,
"loss": -0.9897,
"step": 110
},
{
"epoch": 0.2457921453379642,
"grad_norm": 1.8425882600326258,
"learning_rate": 4.859708325770919e-07,
"logits": -2.172153949737549,
"logps": -289.72344970703125,
"loss": -0.9889,
"step": 115
},
{
"epoch": 0.2564787603526583,
"grad_norm": 1.1791146646766286,
"learning_rate": 4.82718437161051e-07,
"logits": -2.1123390197753906,
"logps": -267.1687316894531,
"loss": -0.9891,
"step": 120
},
{
"epoch": 0.2671653753673524,
"grad_norm": 1.2181022008552986,
"learning_rate": 4.79140563967494e-07,
"logits": -2.1335525512695312,
"logps": -283.4874267578125,
"loss": -0.9899,
"step": 125
},
{
"epoch": 0.2778519903820465,
"grad_norm": 1.3131651758878582,
"learning_rate": 4.752422169756047e-07,
"logits": -2.1036856174468994,
"logps": -281.2633972167969,
"loss": -0.9889,
"step": 130
},
{
"epoch": 0.2885386053967406,
"grad_norm": 1.1246350463161292,
"learning_rate": 4.710288483761524e-07,
"logits": -2.0363128185272217,
"logps": -276.03009033203125,
"loss": -0.9892,
"step": 135
},
{
"epoch": 0.2992252204114347,
"grad_norm": 1.1744979116142513,
"learning_rate": 4.6650635094610966e-07,
"logits": -1.8472490310668945,
"logps": -320.5764465332031,
"loss": -0.99,
"step": 140
},
{
"epoch": 0.30991183542612877,
"grad_norm": 1.2335452788412884,
"learning_rate": 4.6168104980707103e-07,
"logits": -1.4329324960708618,
"logps": -354.47222900390625,
"loss": -0.9889,
"step": 145
},
{
"epoch": 0.32059845044082286,
"grad_norm": 1.3758262751007333,
"learning_rate": 4.565596935789987e-07,
"logits": -1.8408161401748657,
"logps": -306.6959228515625,
"loss": -0.9892,
"step": 150
},
{
"epoch": 0.33128506545551695,
"grad_norm": 0.8683791379292062,
"learning_rate": 4.511494449416671e-07,
"logits": -1.7431236505508423,
"logps": -328.41876220703125,
"loss": -0.9897,
"step": 155
},
{
"epoch": 0.34197168047021104,
"grad_norm": 1.185028787615611,
"learning_rate": 4.4545787061700746e-07,
"logits": -1.6122312545776367,
"logps": -362.706787109375,
"loss": -0.9901,
"step": 160
},
{
"epoch": 0.3526582954849052,
"grad_norm": 0.9402503644643285,
"learning_rate": 4.394929307863632e-07,
"logits": -1.8733676671981812,
"logps": -325.729736328125,
"loss": -0.9896,
"step": 165
},
{
"epoch": 0.36334491049959927,
"grad_norm": 0.9910810146608703,
"learning_rate": 4.332629679574565e-07,
"logits": -1.3691990375518799,
"logps": -309.1920471191406,
"loss": -0.9904,
"step": 170
},
{
"epoch": 0.37403152551429336,
"grad_norm": 1.0139742191324101,
"learning_rate": 4.2677669529663686e-07,
"logits": -1.2885710000991821,
"logps": -312.348876953125,
"loss": -0.9901,
"step": 175
},
{
"epoch": 0.38471814052898745,
"grad_norm": 1.0624962636145272,
"learning_rate": 4.200431844427298e-07,
"logits": -1.4409315586090088,
"logps": -323.0936279296875,
"loss": -0.9898,
"step": 180
},
{
"epoch": 0.39540475554368154,
"grad_norm": 0.9238164718732454,
"learning_rate": 4.130718528195303e-07,
"logits": -1.3237049579620361,
"logps": -271.7065124511719,
"loss": -0.9902,
"step": 185
},
{
"epoch": 0.40609137055837563,
"grad_norm": 1.6805087656261446,
"learning_rate": 4.058724504646834e-07,
"logits": -1.4338650703430176,
"logps": -314.31158447265625,
"loss": -0.9905,
"step": 190
},
{
"epoch": 0.4167779855730697,
"grad_norm": 1.0520666144071318,
"learning_rate": 3.9845504639337535e-07,
"logits": -1.1166661977767944,
"logps": -375.02099609375,
"loss": -0.9908,
"step": 195
},
{
"epoch": 0.4274646005877638,
"grad_norm": 1.8265551536946028,
"learning_rate": 3.908300145159055e-07,
"logits": -0.7473028302192688,
"logps": -326.22174072265625,
"loss": -0.9908,
"step": 200
},
{
"epoch": 0.4274646005877638,
"eval_logits": -0.6105663776397705,
"eval_logps": -365.471435546875,
"eval_loss": -0.9910063743591309,
"eval_runtime": 561.9474,
"eval_samples_per_second": 3.502,
"eval_steps_per_second": 0.219,
"step": 200
},
{
"epoch": 0.4381512156024579,
"grad_norm": 1.1571237674798547,
"learning_rate": 3.8300801912883414e-07,
"logits": -0.5235253572463989,
"logps": -333.2412414550781,
"loss": -0.9908,
"step": 205
},
{
"epoch": 0.448837830617152,
"grad_norm": 1.1146228462238759,
"learning_rate": 3.75e-07,
"logits": -0.4318702220916748,
"logps": -330.295654296875,
"loss": -0.9905,
"step": 210
},
{
"epoch": 0.45952444563184613,
"grad_norm": 1.2787439488099548,
"learning_rate": 3.668171570682655e-07,
"logits": -0.32339486479759216,
"logps": -314.34735107421875,
"loss": -0.9916,
"step": 215
},
{
"epoch": 0.4702110606465402,
"grad_norm": 1.2513111469300429,
"learning_rate": 3.584709347793895e-07,
"logits": -0.02529122307896614,
"logps": -309.16632080078125,
"loss": -0.9903,
"step": 220
},
{
"epoch": 0.4808976756612343,
"grad_norm": 1.8872538119047462,
"learning_rate": 3.499730060799352e-07,
"logits": -0.8243139386177063,
"logps": -361.0574035644531,
"loss": -0.9913,
"step": 225
},
{
"epoch": 0.4915842906759284,
"grad_norm": 1.3203520406448612,
"learning_rate": 3.413352560915988e-07,
"logits": -0.8895326852798462,
"logps": -372.61767578125,
"loss": -0.991,
"step": 230
},
{
"epoch": 0.5022709056906225,
"grad_norm": 1.0362622413225342,
"learning_rate": 3.325697654887918e-07,
"logits": -0.6164321899414062,
"logps": -319.2579650878906,
"loss": -0.9913,
"step": 235
},
{
"epoch": 0.5129575207053166,
"grad_norm": 1.6260362606620915,
"learning_rate": 3.2368879360272606e-07,
"logits": -0.3822157382965088,
"logps": -366.2575988769531,
"loss": -0.9914,
"step": 240
},
{
"epoch": 0.5236441357200107,
"grad_norm": 1.2889766602701416,
"learning_rate": 3.147047612756302e-07,
"logits": -0.968255341053009,
"logps": -340.54095458984375,
"loss": -0.9912,
"step": 245
},
{
"epoch": 0.5343307507347048,
"grad_norm": 0.8620079534338061,
"learning_rate": 3.056302334890786e-07,
"logits": -1.6567840576171875,
"logps": -310.63653564453125,
"loss": -0.9924,
"step": 250
},
{
"epoch": 0.5450173657493989,
"grad_norm": 1.3611897722808162,
"learning_rate": 2.964779017907287e-07,
"logits": -0.6750233769416809,
"logps": -349.7366638183594,
"loss": -0.9917,
"step": 255
},
{
"epoch": 0.555703980764093,
"grad_norm": 0.8337405600545534,
"learning_rate": 2.872605665440436e-07,
"logits": -0.4019729197025299,
"logps": -370.806640625,
"loss": -0.9914,
"step": 260
},
{
"epoch": 0.566390595778787,
"grad_norm": 0.9052623457524422,
"learning_rate": 2.7799111902582693e-07,
"logits": -1.068468451499939,
"logps": -334.2131652832031,
"loss": -0.9915,
"step": 265
},
{
"epoch": 0.5770772107934812,
"grad_norm": 2.046114501421888,
"learning_rate": 2.6868252339660607e-07,
"logits": -0.8225592374801636,
"logps": -329.8605041503906,
"loss": -0.9913,
"step": 270
},
{
"epoch": 0.5877638258081752,
"grad_norm": 1.273749104352798,
"learning_rate": 2.593477985690815e-07,
"logits": -1.324310064315796,
"logps": -343.4034118652344,
"loss": -0.9918,
"step": 275
},
{
"epoch": 0.5984504408228694,
"grad_norm": 1.214923788229,
"learning_rate": 2.5e-07,
"logits": -0.5436466932296753,
"logps": -387.28472900390625,
"loss": -0.9911,
"step": 280
},
{
"epoch": 0.6091370558375635,
"grad_norm": 1.1451126728510552,
"learning_rate": 2.406522014309186e-07,
"logits": -0.6337051391601562,
"logps": -327.0845947265625,
"loss": -0.9923,
"step": 285
},
{
"epoch": 0.6198236708522575,
"grad_norm": 0.8529696256174257,
"learning_rate": 2.3131747660339394e-07,
"logits": -0.6581249237060547,
"logps": -378.38848876953125,
"loss": -0.9918,
"step": 290
},
{
"epoch": 0.6305102858669517,
"grad_norm": 1.6433386031064567,
"learning_rate": 2.2200888097417302e-07,
"logits": -0.031244849786162376,
"logps": -328.96514892578125,
"loss": -0.9915,
"step": 295
},
{
"epoch": 0.6411969008816457,
"grad_norm": 0.8917092325354914,
"learning_rate": 2.1273943345595635e-07,
"logits": -0.49933141469955444,
"logps": -353.3529052734375,
"loss": -0.9916,
"step": 300
},
{
"epoch": 0.6518835158963399,
"grad_norm": 1.837252039541841,
"learning_rate": 2.0352209820927135e-07,
"logits": -0.6463004946708679,
"logps": -366.4661560058594,
"loss": -0.9913,
"step": 305
},
{
"epoch": 0.6625701309110339,
"grad_norm": 1.1401311407033219,
"learning_rate": 1.9436976651092142e-07,
"logits": -0.5396396517753601,
"logps": -346.47540283203125,
"loss": -0.9914,
"step": 310
},
{
"epoch": 0.673256745925728,
"grad_norm": 0.8968248109630922,
"learning_rate": 1.8529523872436977e-07,
"logits": -0.7154626250267029,
"logps": -387.49053955078125,
"loss": -0.9915,
"step": 315
},
{
"epoch": 0.6839433609404221,
"grad_norm": 1.3188728624613946,
"learning_rate": 1.763112063972739e-07,
"logits": -0.5408394932746887,
"logps": -344.79180908203125,
"loss": -0.9927,
"step": 320
},
{
"epoch": 0.6946299759551162,
"grad_norm": 0.7700933509536648,
"learning_rate": 1.674302345112083e-07,
"logits": -0.7688044309616089,
"logps": -396.57989501953125,
"loss": -0.9927,
"step": 325
},
{
"epoch": 0.7053165909698104,
"grad_norm": 1.4394660851768872,
"learning_rate": 1.5866474390840124e-07,
"logits": -0.7753411531448364,
"logps": -384.23236083984375,
"loss": -0.9923,
"step": 330
},
{
"epoch": 0.7160032059845044,
"grad_norm": 0.9447091695103774,
"learning_rate": 1.500269939200648e-07,
"logits": -0.09479126334190369,
"logps": -432.61029052734375,
"loss": -0.9935,
"step": 335
},
{
"epoch": 0.7266898209991985,
"grad_norm": 0.9304517566031328,
"learning_rate": 1.4152906522061047e-07,
"logits": -0.597342848777771,
"logps": -341.08721923828125,
"loss": -0.9926,
"step": 340
},
{
"epoch": 0.7373764360138926,
"grad_norm": 1.0332373400712702,
"learning_rate": 1.3318284293173449e-07,
"logits": -1.0064098834991455,
"logps": -376.7502136230469,
"loss": -0.9927,
"step": 345
},
{
"epoch": 0.7480630510285867,
"grad_norm": 0.9801926195059047,
"learning_rate": 1.2500000000000005e-07,
"logits": -0.6184051036834717,
"logps": -361.1688537597656,
"loss": -0.9933,
"step": 350
},
{
"epoch": 0.7587496660432808,
"grad_norm": 1.1560661623887567,
"learning_rate": 1.1699198087116588e-07,
"logits": -0.7793210744857788,
"logps": -349.13873291015625,
"loss": -0.9918,
"step": 355
},
{
"epoch": 0.7694362810579749,
"grad_norm": 1.0468793785520436,
"learning_rate": 1.0916998548409447e-07,
"logits": -0.33462679386138916,
"logps": -379.23602294921875,
"loss": -0.9925,
"step": 360
},
{
"epoch": 0.7801228960726689,
"grad_norm": 2.2699810159110534,
"learning_rate": 1.0154495360662463e-07,
"logits": -0.48606783151626587,
"logps": -368.26214599609375,
"loss": -0.992,
"step": 365
},
{
"epoch": 0.7908095110873631,
"grad_norm": 1.0786500613394177,
"learning_rate": 9.412754953531663e-08,
"logits": -0.26565030217170715,
"logps": -390.9700012207031,
"loss": -0.9921,
"step": 370
},
{
"epoch": 0.8014961261020572,
"grad_norm": 0.9910237834434807,
"learning_rate": 8.692814718046978e-08,
"logits": 0.028326725587248802,
"logps": -396.8414001464844,
"loss": -0.9923,
"step": 375
},
{
"epoch": 0.8121827411167513,
"grad_norm": 0.8386677992353745,
"learning_rate": 7.99568155572701e-08,
"logits": -0.024175168946385384,
"logps": -394.0411682128906,
"loss": -0.9927,
"step": 380
},
{
"epoch": 0.8228693561314454,
"grad_norm": 0.8224042152269938,
"learning_rate": 7.322330470336313e-08,
"logits": 0.3705129027366638,
"logps": -428.37664794921875,
"loss": -0.9922,
"step": 385
},
{
"epoch": 0.8335559711461394,
"grad_norm": 1.183204369689231,
"learning_rate": 6.673703204254347e-08,
"logits": 0.11569230258464813,
"logps": -389.3094787597656,
"loss": -0.9926,
"step": 390
},
{
"epoch": 0.8442425861608336,
"grad_norm": 0.9944059089182833,
"learning_rate": 6.050706921363672e-08,
"logits": -0.35151463747024536,
"logps": -354.1724853515625,
"loss": -0.9913,
"step": 395
},
{
"epoch": 0.8549292011755276,
"grad_norm": 1.4003912147296091,
"learning_rate": 5.454212938299255e-08,
"logits": -0.6611379981040955,
"logps": -382.3763427734375,
"loss": -0.9924,
"step": 400
},
{
"epoch": 0.8549292011755276,
"eval_logits": -0.6677941679954529,
"eval_logps": -393.3538513183594,
"eval_loss": -0.9923494458198547,
"eval_runtime": 513.6314,
"eval_samples_per_second": 3.832,
"eval_steps_per_second": 0.239,
"step": 400
},
{
"epoch": 0.8656158161902218,
"grad_norm": 2.1056427662724397,
"learning_rate": 4.885055505833291e-08,
"logits": -0.6193000078201294,
"logps": -392.50592041015625,
"loss": -0.9922,
"step": 405
},
{
"epoch": 0.8763024312049158,
"grad_norm": 0.7735856387160954,
"learning_rate": 4.3440306421001324e-08,
"logits": -0.22712135314941406,
"logps": -373.3880920410156,
"loss": -0.9922,
"step": 410
},
{
"epoch": 0.88698904621961,
"grad_norm": 1.3876415111401237,
"learning_rate": 3.831895019292897e-08,
"logits": -0.4037683606147766,
"logps": -356.56182861328125,
"loss": -0.9926,
"step": 415
},
{
"epoch": 0.897675661234304,
"grad_norm": 1.0176914347842085,
"learning_rate": 3.4434694900509345e-08,
"logits": -0.9573896527290344,
"logps": -360.57562255859375,
"loss": -0.9915,
"step": 420
},
{
"epoch": 0.9083622762489981,
"grad_norm": 1.203480672418267,
"learning_rate": 2.98511170358155e-08,
"logits": -0.5279964208602905,
"logps": -370.0226135253906,
"loss": -0.9925,
"step": 425
},
{
"epoch": 0.9190488912636923,
"grad_norm": 1.0489783355443068,
"learning_rate": 2.55754372905142e-08,
"logits": -0.6505440473556519,
"logps": -400.1978759765625,
"loss": -0.993,
"step": 430
},
{
"epoch": 0.9297355062783863,
"grad_norm": 0.7610142229014231,
"learning_rate": 2.1613635589349756e-08,
"logits": -0.8178772926330566,
"logps": -357.04608154296875,
"loss": -0.9922,
"step": 435
},
{
"epoch": 0.9404221212930804,
"grad_norm": 1.6402467715526239,
"learning_rate": 1.797125287024029e-08,
"logits": -0.5346024632453918,
"logps": -344.31884765625,
"loss": -0.9926,
"step": 440
},
{
"epoch": 0.9511087363077745,
"grad_norm": 0.8237683547839563,
"learning_rate": 1.4653383334774228e-08,
"logits": -0.6444264650344849,
"logps": -416.3868713378906,
"loss": -0.9921,
"step": 445
},
{
"epoch": 0.9617953513224686,
"grad_norm": 1.2323961485498716,
"learning_rate": 1.1664667323509347e-08,
"logits": -0.62237548828125,
"logps": -365.18109130859375,
"loss": -0.9915,
"step": 450
},
{
"epoch": 0.9724819663371627,
"grad_norm": 1.3354878112154691,
"learning_rate": 9.009284826036689e-09,
"logits": -0.22248443961143494,
"logps": -369.40570068359375,
"loss": -0.9927,
"step": 455
},
{
"epoch": 0.9831685813518568,
"grad_norm": 1.0035054571755926,
"learning_rate": 6.6909496348871445e-09,
"logits": -0.07116945087909698,
"logps": -352.45648193359375,
"loss": -0.9928,
"step": 460
},
{
"epoch": 0.9938551963665508,
"grad_norm": 2.832254415741253,
"learning_rate": 4.712904151456864e-09,
"logits": -0.6286806464195251,
"logps": -369.8558349609375,
"loss": -0.9917,
"step": 465
},
{
"epoch": 0.9981298423724285,
"step": 467,
"total_flos": 0.0,
"train_loss": -0.9873674071575386,
"train_runtime": 41077.5542,
"train_samples_per_second": 1.458,
"train_steps_per_second": 0.011
}
],
"logging_steps": 5,
"max_steps": 467,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 125,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}