customer-service-chat-responses / trainer_state.json
csr's picture
Upload folder using huggingface_hub
af506d4 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.48685491723466406,
"eval_steps": 500,
"global_step": 250,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0019474196689386564,
"grad_norm": 0.6627452969551086,
"learning_rate": 2.5e-05,
"loss": 1.8562,
"step": 1
},
{
"epoch": 0.0038948393378773127,
"grad_norm": 0.9325484037399292,
"learning_rate": 5e-05,
"loss": 2.0532,
"step": 2
},
{
"epoch": 0.005842259006815969,
"grad_norm": 0.8353978991508484,
"learning_rate": 7.500000000000001e-05,
"loss": 1.8732,
"step": 3
},
{
"epoch": 0.007789678675754625,
"grad_norm": 0.6053460836410522,
"learning_rate": 0.0001,
"loss": 1.942,
"step": 4
},
{
"epoch": 0.009737098344693282,
"grad_norm": 0.5565560460090637,
"learning_rate": 0.000125,
"loss": 1.6511,
"step": 5
},
{
"epoch": 0.011684518013631937,
"grad_norm": 0.9751281142234802,
"learning_rate": 0.00015000000000000001,
"loss": 1.9445,
"step": 6
},
{
"epoch": 0.013631937682570594,
"grad_norm": 0.730299711227417,
"learning_rate": 0.000175,
"loss": 1.3815,
"step": 7
},
{
"epoch": 0.01557935735150925,
"grad_norm": 0.753030002117157,
"learning_rate": 0.0002,
"loss": 1.1519,
"step": 8
},
{
"epoch": 0.017526777020447908,
"grad_norm": 0.8523620367050171,
"learning_rate": 0.00019917355371900828,
"loss": 1.2913,
"step": 9
},
{
"epoch": 0.019474196689386564,
"grad_norm": 1.002160906791687,
"learning_rate": 0.00019834710743801655,
"loss": 0.8663,
"step": 10
},
{
"epoch": 0.021421616358325218,
"grad_norm": 1.114942193031311,
"learning_rate": 0.00019752066115702482,
"loss": 0.7233,
"step": 11
},
{
"epoch": 0.023369036027263874,
"grad_norm": 1.1557896137237549,
"learning_rate": 0.0001966942148760331,
"loss": 0.9432,
"step": 12
},
{
"epoch": 0.02531645569620253,
"grad_norm": 2.1235923767089844,
"learning_rate": 0.00019586776859504133,
"loss": 0.6598,
"step": 13
},
{
"epoch": 0.027263875365141188,
"grad_norm": 1.2073708772659302,
"learning_rate": 0.0001950413223140496,
"loss": 1.0136,
"step": 14
},
{
"epoch": 0.029211295034079845,
"grad_norm": 1.1807072162628174,
"learning_rate": 0.00019421487603305787,
"loss": 1.0244,
"step": 15
},
{
"epoch": 0.0311587147030185,
"grad_norm": 0.9304102659225464,
"learning_rate": 0.0001933884297520661,
"loss": 0.5349,
"step": 16
},
{
"epoch": 0.033106134371957155,
"grad_norm": 0.7814887762069702,
"learning_rate": 0.00019256198347107438,
"loss": 0.5058,
"step": 17
},
{
"epoch": 0.035053554040895815,
"grad_norm": 0.8314672708511353,
"learning_rate": 0.00019173553719008265,
"loss": 0.3814,
"step": 18
},
{
"epoch": 0.03700097370983447,
"grad_norm": 0.916557788848877,
"learning_rate": 0.00019090909090909092,
"loss": 0.4953,
"step": 19
},
{
"epoch": 0.03894839337877313,
"grad_norm": 0.9891867637634277,
"learning_rate": 0.0001900826446280992,
"loss": 0.932,
"step": 20
},
{
"epoch": 0.04089581304771178,
"grad_norm": 0.9992872476577759,
"learning_rate": 0.00018925619834710743,
"loss": 0.8751,
"step": 21
},
{
"epoch": 0.042843232716650435,
"grad_norm": 0.5688315033912659,
"learning_rate": 0.0001884297520661157,
"loss": 0.3556,
"step": 22
},
{
"epoch": 0.044790652385589096,
"grad_norm": 0.5576704144477844,
"learning_rate": 0.00018760330578512397,
"loss": 0.2855,
"step": 23
},
{
"epoch": 0.04673807205452775,
"grad_norm": 0.8203558921813965,
"learning_rate": 0.00018677685950413224,
"loss": 0.9009,
"step": 24
},
{
"epoch": 0.04868549172346641,
"grad_norm": 0.7509244680404663,
"learning_rate": 0.0001859504132231405,
"loss": 0.5587,
"step": 25
},
{
"epoch": 0.05063291139240506,
"grad_norm": 0.9155460596084595,
"learning_rate": 0.00018512396694214878,
"loss": 0.7122,
"step": 26
},
{
"epoch": 0.05258033106134372,
"grad_norm": 0.5604537129402161,
"learning_rate": 0.00018429752066115705,
"loss": 0.4308,
"step": 27
},
{
"epoch": 0.054527750730282376,
"grad_norm": 0.902310311794281,
"learning_rate": 0.00018347107438016532,
"loss": 0.7039,
"step": 28
},
{
"epoch": 0.05647517039922103,
"grad_norm": 0.6863579154014587,
"learning_rate": 0.00018264462809917356,
"loss": 0.7063,
"step": 29
},
{
"epoch": 0.05842259006815969,
"grad_norm": 0.6778506636619568,
"learning_rate": 0.00018181818181818183,
"loss": 0.6357,
"step": 30
},
{
"epoch": 0.06037000973709834,
"grad_norm": 0.5920571684837341,
"learning_rate": 0.00018099173553719008,
"loss": 0.4388,
"step": 31
},
{
"epoch": 0.062317429406037,
"grad_norm": 0.5040196776390076,
"learning_rate": 0.00018016528925619835,
"loss": 0.4409,
"step": 32
},
{
"epoch": 0.06426484907497566,
"grad_norm": 0.5752211809158325,
"learning_rate": 0.00017933884297520662,
"loss": 0.4516,
"step": 33
},
{
"epoch": 0.06621226874391431,
"grad_norm": 0.7298389673233032,
"learning_rate": 0.00017851239669421489,
"loss": 0.571,
"step": 34
},
{
"epoch": 0.06815968841285297,
"grad_norm": 0.7012993693351746,
"learning_rate": 0.00017768595041322316,
"loss": 0.4716,
"step": 35
},
{
"epoch": 0.07010710808179163,
"grad_norm": 0.7051262855529785,
"learning_rate": 0.00017685950413223143,
"loss": 0.6295,
"step": 36
},
{
"epoch": 0.07205452775073028,
"grad_norm": 0.7011151909828186,
"learning_rate": 0.00017603305785123967,
"loss": 0.4605,
"step": 37
},
{
"epoch": 0.07400194741966894,
"grad_norm": 1.0121755599975586,
"learning_rate": 0.00017520661157024794,
"loss": 0.7132,
"step": 38
},
{
"epoch": 0.0759493670886076,
"grad_norm": 1.274100422859192,
"learning_rate": 0.0001743801652892562,
"loss": 1.0475,
"step": 39
},
{
"epoch": 0.07789678675754626,
"grad_norm": 1.2915902137756348,
"learning_rate": 0.00017355371900826448,
"loss": 0.9126,
"step": 40
},
{
"epoch": 0.0798442064264849,
"grad_norm": 1.1296463012695312,
"learning_rate": 0.00017272727272727275,
"loss": 1.0518,
"step": 41
},
{
"epoch": 0.08179162609542356,
"grad_norm": 0.7432142496109009,
"learning_rate": 0.00017190082644628102,
"loss": 0.5967,
"step": 42
},
{
"epoch": 0.08373904576436222,
"grad_norm": 0.878311812877655,
"learning_rate": 0.00017107438016528926,
"loss": 0.6643,
"step": 43
},
{
"epoch": 0.08568646543330087,
"grad_norm": 1.0814746618270874,
"learning_rate": 0.00017024793388429753,
"loss": 0.7934,
"step": 44
},
{
"epoch": 0.08763388510223953,
"grad_norm": 1.0332353115081787,
"learning_rate": 0.00016942148760330577,
"loss": 0.7566,
"step": 45
},
{
"epoch": 0.08958130477117819,
"grad_norm": 0.952813446521759,
"learning_rate": 0.00016859504132231404,
"loss": 0.6161,
"step": 46
},
{
"epoch": 0.09152872444011685,
"grad_norm": 0.9768489003181458,
"learning_rate": 0.0001677685950413223,
"loss": 0.8178,
"step": 47
},
{
"epoch": 0.0934761441090555,
"grad_norm": 0.7100651860237122,
"learning_rate": 0.00016694214876033058,
"loss": 0.6014,
"step": 48
},
{
"epoch": 0.09542356377799416,
"grad_norm": 0.906511127948761,
"learning_rate": 0.00016611570247933885,
"loss": 0.952,
"step": 49
},
{
"epoch": 0.09737098344693282,
"grad_norm": 1.0189696550369263,
"learning_rate": 0.00016528925619834712,
"loss": 0.8181,
"step": 50
},
{
"epoch": 0.09931840311587146,
"grad_norm": 0.8355782628059387,
"learning_rate": 0.0001644628099173554,
"loss": 0.6442,
"step": 51
},
{
"epoch": 0.10126582278481013,
"grad_norm": 0.747944176197052,
"learning_rate": 0.00016363636363636366,
"loss": 0.5864,
"step": 52
},
{
"epoch": 0.10321324245374879,
"grad_norm": 0.6874829530715942,
"learning_rate": 0.0001628099173553719,
"loss": 0.5721,
"step": 53
},
{
"epoch": 0.10516066212268745,
"grad_norm": 0.8766732811927795,
"learning_rate": 0.00016198347107438017,
"loss": 1.1118,
"step": 54
},
{
"epoch": 0.10710808179162609,
"grad_norm": 0.503648579120636,
"learning_rate": 0.00016115702479338844,
"loss": 0.4632,
"step": 55
},
{
"epoch": 0.10905550146056475,
"grad_norm": 0.5597368478775024,
"learning_rate": 0.0001603305785123967,
"loss": 0.6327,
"step": 56
},
{
"epoch": 0.11100292112950341,
"grad_norm": 0.5331040024757385,
"learning_rate": 0.00015950413223140498,
"loss": 0.4662,
"step": 57
},
{
"epoch": 0.11295034079844206,
"grad_norm": 0.4501107335090637,
"learning_rate": 0.00015867768595041322,
"loss": 0.4593,
"step": 58
},
{
"epoch": 0.11489776046738072,
"grad_norm": 0.5544815063476562,
"learning_rate": 0.0001578512396694215,
"loss": 0.6111,
"step": 59
},
{
"epoch": 0.11684518013631938,
"grad_norm": 0.5640352964401245,
"learning_rate": 0.00015702479338842976,
"loss": 0.5499,
"step": 60
},
{
"epoch": 0.11879259980525804,
"grad_norm": 0.6318275332450867,
"learning_rate": 0.000156198347107438,
"loss": 0.4993,
"step": 61
},
{
"epoch": 0.12074001947419669,
"grad_norm": 0.5284496545791626,
"learning_rate": 0.00015537190082644627,
"loss": 0.6777,
"step": 62
},
{
"epoch": 0.12268743914313535,
"grad_norm": 0.44762396812438965,
"learning_rate": 0.00015454545454545454,
"loss": 0.3336,
"step": 63
},
{
"epoch": 0.124634858812074,
"grad_norm": 0.7527077794075012,
"learning_rate": 0.00015371900826446281,
"loss": 0.5085,
"step": 64
},
{
"epoch": 0.12658227848101267,
"grad_norm": 0.5384949445724487,
"learning_rate": 0.00015289256198347108,
"loss": 0.4182,
"step": 65
},
{
"epoch": 0.12852969814995133,
"grad_norm": 0.6809192895889282,
"learning_rate": 0.00015206611570247935,
"loss": 0.5201,
"step": 66
},
{
"epoch": 0.13047711781888996,
"grad_norm": 0.6038883328437805,
"learning_rate": 0.00015123966942148762,
"loss": 0.4331,
"step": 67
},
{
"epoch": 0.13242453748782862,
"grad_norm": 0.489505797624588,
"learning_rate": 0.0001504132231404959,
"loss": 0.3439,
"step": 68
},
{
"epoch": 0.13437195715676728,
"grad_norm": 0.4495028555393219,
"learning_rate": 0.00014958677685950414,
"loss": 0.3788,
"step": 69
},
{
"epoch": 0.13631937682570594,
"grad_norm": 0.3689400255680084,
"learning_rate": 0.0001487603305785124,
"loss": 0.299,
"step": 70
},
{
"epoch": 0.1382667964946446,
"grad_norm": 0.440514475107193,
"learning_rate": 0.00014793388429752067,
"loss": 0.2871,
"step": 71
},
{
"epoch": 0.14021421616358326,
"grad_norm": 0.6603713035583496,
"learning_rate": 0.00014710743801652894,
"loss": 0.4321,
"step": 72
},
{
"epoch": 0.14216163583252192,
"grad_norm": 0.353661447763443,
"learning_rate": 0.0001462809917355372,
"loss": 0.2402,
"step": 73
},
{
"epoch": 0.14410905550146055,
"grad_norm": 0.7928922772407532,
"learning_rate": 0.00014545454545454546,
"loss": 0.8787,
"step": 74
},
{
"epoch": 0.1460564751703992,
"grad_norm": 0.502176821231842,
"learning_rate": 0.00014462809917355373,
"loss": 0.3636,
"step": 75
},
{
"epoch": 0.14800389483933787,
"grad_norm": 0.5314487814903259,
"learning_rate": 0.000143801652892562,
"loss": 0.409,
"step": 76
},
{
"epoch": 0.14995131450827653,
"grad_norm": 0.6997962594032288,
"learning_rate": 0.00014297520661157024,
"loss": 0.5591,
"step": 77
},
{
"epoch": 0.1518987341772152,
"grad_norm": 0.7340275645256042,
"learning_rate": 0.0001421487603305785,
"loss": 0.6306,
"step": 78
},
{
"epoch": 0.15384615384615385,
"grad_norm": 0.722694993019104,
"learning_rate": 0.00014132231404958678,
"loss": 0.5656,
"step": 79
},
{
"epoch": 0.15579357351509251,
"grad_norm": 0.7989778518676758,
"learning_rate": 0.00014049586776859505,
"loss": 0.8676,
"step": 80
},
{
"epoch": 0.15774099318403115,
"grad_norm": 0.514163613319397,
"learning_rate": 0.00013966942148760332,
"loss": 0.492,
"step": 81
},
{
"epoch": 0.1596884128529698,
"grad_norm": 0.78263920545578,
"learning_rate": 0.0001388429752066116,
"loss": 0.7545,
"step": 82
},
{
"epoch": 0.16163583252190847,
"grad_norm": 0.5553173422813416,
"learning_rate": 0.00013801652892561986,
"loss": 0.5447,
"step": 83
},
{
"epoch": 0.16358325219084713,
"grad_norm": 0.6660541892051697,
"learning_rate": 0.00013719008264462813,
"loss": 0.7586,
"step": 84
},
{
"epoch": 0.1655306718597858,
"grad_norm": 0.5211347341537476,
"learning_rate": 0.00013636363636363637,
"loss": 0.3946,
"step": 85
},
{
"epoch": 0.16747809152872445,
"grad_norm": 0.9146146774291992,
"learning_rate": 0.00013553719008264464,
"loss": 0.6544,
"step": 86
},
{
"epoch": 0.1694255111976631,
"grad_norm": 0.5467679500579834,
"learning_rate": 0.00013471074380165288,
"loss": 0.5245,
"step": 87
},
{
"epoch": 0.17137293086660174,
"grad_norm": 0.7478577494621277,
"learning_rate": 0.00013388429752066115,
"loss": 0.8132,
"step": 88
},
{
"epoch": 0.1733203505355404,
"grad_norm": 0.6577962636947632,
"learning_rate": 0.00013305785123966942,
"loss": 0.59,
"step": 89
},
{
"epoch": 0.17526777020447906,
"grad_norm": 0.775256335735321,
"learning_rate": 0.0001322314049586777,
"loss": 0.5544,
"step": 90
},
{
"epoch": 0.17721518987341772,
"grad_norm": 0.5346238613128662,
"learning_rate": 0.00013140495867768596,
"loss": 0.4337,
"step": 91
},
{
"epoch": 0.17916260954235638,
"grad_norm": 0.578333854675293,
"learning_rate": 0.00013057851239669423,
"loss": 0.4799,
"step": 92
},
{
"epoch": 0.18111002921129504,
"grad_norm": 0.6039044857025146,
"learning_rate": 0.00012975206611570247,
"loss": 0.4282,
"step": 93
},
{
"epoch": 0.1830574488802337,
"grad_norm": 0.6463829874992371,
"learning_rate": 0.00012892561983471074,
"loss": 0.6641,
"step": 94
},
{
"epoch": 0.18500486854917234,
"grad_norm": 0.9733583331108093,
"learning_rate": 0.000128099173553719,
"loss": 0.6536,
"step": 95
},
{
"epoch": 0.186952288218111,
"grad_norm": 0.6739547252655029,
"learning_rate": 0.00012727272727272728,
"loss": 0.6845,
"step": 96
},
{
"epoch": 0.18889970788704966,
"grad_norm": 0.7450870275497437,
"learning_rate": 0.00012644628099173555,
"loss": 0.7029,
"step": 97
},
{
"epoch": 0.19084712755598832,
"grad_norm": 0.7437999248504639,
"learning_rate": 0.00012561983471074382,
"loss": 0.6285,
"step": 98
},
{
"epoch": 0.19279454722492698,
"grad_norm": 0.6908058524131775,
"learning_rate": 0.0001247933884297521,
"loss": 0.6161,
"step": 99
},
{
"epoch": 0.19474196689386564,
"grad_norm": 0.6222776174545288,
"learning_rate": 0.00012396694214876033,
"loss": 0.5389,
"step": 100
},
{
"epoch": 0.1966893865628043,
"grad_norm": 0.7415236234664917,
"learning_rate": 0.0001231404958677686,
"loss": 0.7564,
"step": 101
},
{
"epoch": 0.19863680623174293,
"grad_norm": 0.725433349609375,
"learning_rate": 0.00012231404958677685,
"loss": 0.5624,
"step": 102
},
{
"epoch": 0.2005842259006816,
"grad_norm": 0.5979681611061096,
"learning_rate": 0.00012148760330578513,
"loss": 0.4344,
"step": 103
},
{
"epoch": 0.20253164556962025,
"grad_norm": 0.6501683592796326,
"learning_rate": 0.0001206611570247934,
"loss": 0.5137,
"step": 104
},
{
"epoch": 0.2044790652385589,
"grad_norm": 0.4350631535053253,
"learning_rate": 0.00011983471074380165,
"loss": 0.4055,
"step": 105
},
{
"epoch": 0.20642648490749757,
"grad_norm": 0.7034055590629578,
"learning_rate": 0.00011900826446280992,
"loss": 0.6413,
"step": 106
},
{
"epoch": 0.20837390457643623,
"grad_norm": 0.606842041015625,
"learning_rate": 0.0001181818181818182,
"loss": 0.6433,
"step": 107
},
{
"epoch": 0.2103213242453749,
"grad_norm": 0.6555774807929993,
"learning_rate": 0.00011735537190082646,
"loss": 0.8194,
"step": 108
},
{
"epoch": 0.21226874391431352,
"grad_norm": 0.6111577749252319,
"learning_rate": 0.0001165289256198347,
"loss": 0.5042,
"step": 109
},
{
"epoch": 0.21421616358325218,
"grad_norm": 0.6553054451942444,
"learning_rate": 0.00011570247933884298,
"loss": 0.6989,
"step": 110
},
{
"epoch": 0.21616358325219084,
"grad_norm": 0.4501146972179413,
"learning_rate": 0.00011487603305785125,
"loss": 0.3658,
"step": 111
},
{
"epoch": 0.2181110029211295,
"grad_norm": 0.44687238335609436,
"learning_rate": 0.0001140495867768595,
"loss": 0.3615,
"step": 112
},
{
"epoch": 0.22005842259006816,
"grad_norm": 0.7412980198860168,
"learning_rate": 0.00011322314049586777,
"loss": 0.8426,
"step": 113
},
{
"epoch": 0.22200584225900682,
"grad_norm": 0.4677373170852661,
"learning_rate": 0.00011239669421487604,
"loss": 0.3759,
"step": 114
},
{
"epoch": 0.22395326192794549,
"grad_norm": 0.5156281590461731,
"learning_rate": 0.00011157024793388431,
"loss": 0.4241,
"step": 115
},
{
"epoch": 0.22590068159688412,
"grad_norm": 0.655049741268158,
"learning_rate": 0.00011074380165289258,
"loss": 0.4153,
"step": 116
},
{
"epoch": 0.22784810126582278,
"grad_norm": 0.4297865927219391,
"learning_rate": 0.00010991735537190082,
"loss": 0.3184,
"step": 117
},
{
"epoch": 0.22979552093476144,
"grad_norm": 0.6195393204689026,
"learning_rate": 0.00010909090909090909,
"loss": 0.7035,
"step": 118
},
{
"epoch": 0.2317429406037001,
"grad_norm": 0.6508321762084961,
"learning_rate": 0.00010826446280991735,
"loss": 0.6435,
"step": 119
},
{
"epoch": 0.23369036027263876,
"grad_norm": 0.5999849438667297,
"learning_rate": 0.00010743801652892562,
"loss": 0.5618,
"step": 120
},
{
"epoch": 0.23563777994157742,
"grad_norm": 0.4445487856864929,
"learning_rate": 0.00010661157024793389,
"loss": 0.3008,
"step": 121
},
{
"epoch": 0.23758519961051608,
"grad_norm": 0.46502161026000977,
"learning_rate": 0.00010578512396694216,
"loss": 0.4989,
"step": 122
},
{
"epoch": 0.2395326192794547,
"grad_norm": 0.5087387561798096,
"learning_rate": 0.00010495867768595043,
"loss": 0.3852,
"step": 123
},
{
"epoch": 0.24148003894839337,
"grad_norm": 0.4807125926017761,
"learning_rate": 0.0001041322314049587,
"loss": 0.3579,
"step": 124
},
{
"epoch": 0.24342745861733203,
"grad_norm": 0.5348750948905945,
"learning_rate": 0.00010330578512396694,
"loss": 0.4919,
"step": 125
},
{
"epoch": 0.2453748782862707,
"grad_norm": 0.8607493042945862,
"learning_rate": 0.00010247933884297521,
"loss": 0.8142,
"step": 126
},
{
"epoch": 0.24732229795520935,
"grad_norm": 0.4636419415473938,
"learning_rate": 0.00010165289256198347,
"loss": 0.407,
"step": 127
},
{
"epoch": 0.249269717624148,
"grad_norm": 0.49384385347366333,
"learning_rate": 0.00010082644628099174,
"loss": 0.4263,
"step": 128
},
{
"epoch": 0.25121713729308665,
"grad_norm": 0.5093722343444824,
"learning_rate": 0.0001,
"loss": 0.3402,
"step": 129
},
{
"epoch": 0.25316455696202533,
"grad_norm": 0.6678729057312012,
"learning_rate": 9.917355371900827e-05,
"loss": 0.5644,
"step": 130
},
{
"epoch": 0.25511197663096397,
"grad_norm": 0.7193764448165894,
"learning_rate": 9.834710743801654e-05,
"loss": 0.6274,
"step": 131
},
{
"epoch": 0.25705939629990265,
"grad_norm": 0.3708731234073639,
"learning_rate": 9.75206611570248e-05,
"loss": 0.269,
"step": 132
},
{
"epoch": 0.2590068159688413,
"grad_norm": 0.6703848242759705,
"learning_rate": 9.669421487603306e-05,
"loss": 0.5984,
"step": 133
},
{
"epoch": 0.2609542356377799,
"grad_norm": 0.5617924928665161,
"learning_rate": 9.586776859504133e-05,
"loss": 0.5166,
"step": 134
},
{
"epoch": 0.2629016553067186,
"grad_norm": 0.6645628213882446,
"learning_rate": 9.50413223140496e-05,
"loss": 0.7676,
"step": 135
},
{
"epoch": 0.26484907497565724,
"grad_norm": 0.6777731776237488,
"learning_rate": 9.421487603305785e-05,
"loss": 0.4477,
"step": 136
},
{
"epoch": 0.2667964946445959,
"grad_norm": 1.0802561044692993,
"learning_rate": 9.338842975206612e-05,
"loss": 0.7692,
"step": 137
},
{
"epoch": 0.26874391431353456,
"grad_norm": 0.5928775072097778,
"learning_rate": 9.256198347107439e-05,
"loss": 0.5721,
"step": 138
},
{
"epoch": 0.27069133398247325,
"grad_norm": 0.73649001121521,
"learning_rate": 9.173553719008266e-05,
"loss": 0.6324,
"step": 139
},
{
"epoch": 0.2726387536514119,
"grad_norm": 0.6584858298301697,
"learning_rate": 9.090909090909092e-05,
"loss": 0.5763,
"step": 140
},
{
"epoch": 0.2745861733203505,
"grad_norm": 0.6551913619041443,
"learning_rate": 9.008264462809917e-05,
"loss": 0.6005,
"step": 141
},
{
"epoch": 0.2765335929892892,
"grad_norm": 0.5698264837265015,
"learning_rate": 8.925619834710744e-05,
"loss": 0.4164,
"step": 142
},
{
"epoch": 0.27848101265822783,
"grad_norm": 0.5793282985687256,
"learning_rate": 8.842975206611571e-05,
"loss": 0.4952,
"step": 143
},
{
"epoch": 0.2804284323271665,
"grad_norm": 0.52625972032547,
"learning_rate": 8.760330578512397e-05,
"loss": 0.4534,
"step": 144
},
{
"epoch": 0.28237585199610515,
"grad_norm": 0.6003084778785706,
"learning_rate": 8.677685950413224e-05,
"loss": 0.5206,
"step": 145
},
{
"epoch": 0.28432327166504384,
"grad_norm": 0.5583734512329102,
"learning_rate": 8.595041322314051e-05,
"loss": 0.5323,
"step": 146
},
{
"epoch": 0.2862706913339825,
"grad_norm": 0.600612461566925,
"learning_rate": 8.512396694214876e-05,
"loss": 0.5463,
"step": 147
},
{
"epoch": 0.2882181110029211,
"grad_norm": 0.8786905407905579,
"learning_rate": 8.429752066115702e-05,
"loss": 0.48,
"step": 148
},
{
"epoch": 0.2901655306718598,
"grad_norm": 0.6033667922019958,
"learning_rate": 8.347107438016529e-05,
"loss": 0.4997,
"step": 149
},
{
"epoch": 0.2921129503407984,
"grad_norm": 0.7072091102600098,
"learning_rate": 8.264462809917356e-05,
"loss": 0.5164,
"step": 150
},
{
"epoch": 0.2940603700097371,
"grad_norm": 0.5921626687049866,
"learning_rate": 8.181818181818183e-05,
"loss": 0.6378,
"step": 151
},
{
"epoch": 0.29600778967867575,
"grad_norm": 0.5669450759887695,
"learning_rate": 8.099173553719009e-05,
"loss": 0.501,
"step": 152
},
{
"epoch": 0.29795520934761444,
"grad_norm": 0.5064852237701416,
"learning_rate": 8.016528925619836e-05,
"loss": 0.3843,
"step": 153
},
{
"epoch": 0.29990262901655307,
"grad_norm": 0.5336524248123169,
"learning_rate": 7.933884297520661e-05,
"loss": 0.3716,
"step": 154
},
{
"epoch": 0.3018500486854917,
"grad_norm": 0.44651252031326294,
"learning_rate": 7.851239669421488e-05,
"loss": 0.3773,
"step": 155
},
{
"epoch": 0.3037974683544304,
"grad_norm": 0.38342341780662537,
"learning_rate": 7.768595041322314e-05,
"loss": 0.2936,
"step": 156
},
{
"epoch": 0.305744888023369,
"grad_norm": 0.4770042598247528,
"learning_rate": 7.685950413223141e-05,
"loss": 0.4883,
"step": 157
},
{
"epoch": 0.3076923076923077,
"grad_norm": 0.6555917263031006,
"learning_rate": 7.603305785123968e-05,
"loss": 0.606,
"step": 158
},
{
"epoch": 0.30963972736124634,
"grad_norm": 0.4627241790294647,
"learning_rate": 7.520661157024795e-05,
"loss": 0.3273,
"step": 159
},
{
"epoch": 0.31158714703018503,
"grad_norm": 0.3865076005458832,
"learning_rate": 7.43801652892562e-05,
"loss": 0.2885,
"step": 160
},
{
"epoch": 0.31353456669912366,
"grad_norm": 0.37422531843185425,
"learning_rate": 7.355371900826447e-05,
"loss": 0.3094,
"step": 161
},
{
"epoch": 0.3154819863680623,
"grad_norm": 0.4276966452598572,
"learning_rate": 7.272727272727273e-05,
"loss": 0.3022,
"step": 162
},
{
"epoch": 0.317429406037001,
"grad_norm": 0.31993845105171204,
"learning_rate": 7.1900826446281e-05,
"loss": 0.2376,
"step": 163
},
{
"epoch": 0.3193768257059396,
"grad_norm": 0.3512668013572693,
"learning_rate": 7.107438016528925e-05,
"loss": 0.2865,
"step": 164
},
{
"epoch": 0.3213242453748783,
"grad_norm": 1.2840955257415771,
"learning_rate": 7.024793388429752e-05,
"loss": 0.9994,
"step": 165
},
{
"epoch": 0.32327166504381694,
"grad_norm": 0.7181655168533325,
"learning_rate": 6.94214876033058e-05,
"loss": 1.3493,
"step": 166
},
{
"epoch": 0.3252190847127556,
"grad_norm": 0.5071792602539062,
"learning_rate": 6.859504132231406e-05,
"loss": 0.4438,
"step": 167
},
{
"epoch": 0.32716650438169426,
"grad_norm": 0.7322396636009216,
"learning_rate": 6.776859504132232e-05,
"loss": 0.6287,
"step": 168
},
{
"epoch": 0.3291139240506329,
"grad_norm": 0.26013123989105225,
"learning_rate": 6.694214876033058e-05,
"loss": 0.2015,
"step": 169
},
{
"epoch": 0.3310613437195716,
"grad_norm": 0.41022831201553345,
"learning_rate": 6.611570247933885e-05,
"loss": 0.2814,
"step": 170
},
{
"epoch": 0.3330087633885102,
"grad_norm": 0.4295414388179779,
"learning_rate": 6.528925619834711e-05,
"loss": 0.3253,
"step": 171
},
{
"epoch": 0.3349561830574489,
"grad_norm": 0.5953055024147034,
"learning_rate": 6.446280991735537e-05,
"loss": 0.5285,
"step": 172
},
{
"epoch": 0.33690360272638753,
"grad_norm": 0.585370659828186,
"learning_rate": 6.363636363636364e-05,
"loss": 0.5672,
"step": 173
},
{
"epoch": 0.3388510223953262,
"grad_norm": 0.469965398311615,
"learning_rate": 6.280991735537191e-05,
"loss": 0.3597,
"step": 174
},
{
"epoch": 0.34079844206426485,
"grad_norm": 0.5699323415756226,
"learning_rate": 6.198347107438017e-05,
"loss": 0.4781,
"step": 175
},
{
"epoch": 0.3427458617332035,
"grad_norm": 0.6034254431724548,
"learning_rate": 6.115702479338842e-05,
"loss": 0.468,
"step": 176
},
{
"epoch": 0.34469328140214217,
"grad_norm": 0.646748423576355,
"learning_rate": 6.03305785123967e-05,
"loss": 0.4904,
"step": 177
},
{
"epoch": 0.3466407010710808,
"grad_norm": 0.4674849212169647,
"learning_rate": 5.950413223140496e-05,
"loss": 0.4888,
"step": 178
},
{
"epoch": 0.3485881207400195,
"grad_norm": 0.5764815807342529,
"learning_rate": 5.867768595041323e-05,
"loss": 0.5268,
"step": 179
},
{
"epoch": 0.3505355404089581,
"grad_norm": 0.5898470878601074,
"learning_rate": 5.785123966942149e-05,
"loss": 0.4667,
"step": 180
},
{
"epoch": 0.3524829600778968,
"grad_norm": 0.38661783933639526,
"learning_rate": 5.702479338842975e-05,
"loss": 0.3538,
"step": 181
},
{
"epoch": 0.35443037974683544,
"grad_norm": 0.4027338922023773,
"learning_rate": 5.619834710743802e-05,
"loss": 0.3705,
"step": 182
},
{
"epoch": 0.3563777994157741,
"grad_norm": 0.3949303925037384,
"learning_rate": 5.537190082644629e-05,
"loss": 0.3463,
"step": 183
},
{
"epoch": 0.35832521908471276,
"grad_norm": 0.5721768736839294,
"learning_rate": 5.4545454545454546e-05,
"loss": 0.4461,
"step": 184
},
{
"epoch": 0.3602726387536514,
"grad_norm": 0.5568830966949463,
"learning_rate": 5.371900826446281e-05,
"loss": 0.3703,
"step": 185
},
{
"epoch": 0.3622200584225901,
"grad_norm": 0.4944753646850586,
"learning_rate": 5.289256198347108e-05,
"loss": 0.4531,
"step": 186
},
{
"epoch": 0.3641674780915287,
"grad_norm": 0.5557789206504822,
"learning_rate": 5.206611570247935e-05,
"loss": 0.5426,
"step": 187
},
{
"epoch": 0.3661148977604674,
"grad_norm": 0.6477212309837341,
"learning_rate": 5.1239669421487605e-05,
"loss": 0.5483,
"step": 188
},
{
"epoch": 0.36806231742940604,
"grad_norm": 0.7089900374412537,
"learning_rate": 5.041322314049587e-05,
"loss": 0.6709,
"step": 189
},
{
"epoch": 0.37000973709834467,
"grad_norm": 0.5330088138580322,
"learning_rate": 4.958677685950414e-05,
"loss": 0.7063,
"step": 190
},
{
"epoch": 0.37195715676728336,
"grad_norm": 0.5296370387077332,
"learning_rate": 4.87603305785124e-05,
"loss": 0.5056,
"step": 191
},
{
"epoch": 0.373904576436222,
"grad_norm": 0.8069382309913635,
"learning_rate": 4.793388429752066e-05,
"loss": 0.7653,
"step": 192
},
{
"epoch": 0.3758519961051607,
"grad_norm": 0.43892863392829895,
"learning_rate": 4.7107438016528926e-05,
"loss": 0.4423,
"step": 193
},
{
"epoch": 0.3777994157740993,
"grad_norm": 0.7146729826927185,
"learning_rate": 4.6280991735537196e-05,
"loss": 0.7297,
"step": 194
},
{
"epoch": 0.379746835443038,
"grad_norm": 0.41250815987586975,
"learning_rate": 4.545454545454546e-05,
"loss": 0.404,
"step": 195
},
{
"epoch": 0.38169425511197663,
"grad_norm": 0.4819222390651703,
"learning_rate": 4.462809917355372e-05,
"loss": 0.4106,
"step": 196
},
{
"epoch": 0.38364167478091526,
"grad_norm": 0.41315364837646484,
"learning_rate": 4.3801652892561984e-05,
"loss": 0.3445,
"step": 197
},
{
"epoch": 0.38558909444985395,
"grad_norm": 0.4681267738342285,
"learning_rate": 4.2975206611570254e-05,
"loss": 0.3611,
"step": 198
},
{
"epoch": 0.3875365141187926,
"grad_norm": 0.48512405157089233,
"learning_rate": 4.214876033057851e-05,
"loss": 0.3842,
"step": 199
},
{
"epoch": 0.3894839337877313,
"grad_norm": 0.49099263548851013,
"learning_rate": 4.132231404958678e-05,
"loss": 0.3824,
"step": 200
},
{
"epoch": 0.3914313534566699,
"grad_norm": 0.3325710892677307,
"learning_rate": 4.049586776859504e-05,
"loss": 0.2954,
"step": 201
},
{
"epoch": 0.3933787731256086,
"grad_norm": 0.30682113766670227,
"learning_rate": 3.9669421487603306e-05,
"loss": 0.2274,
"step": 202
},
{
"epoch": 0.3953261927945472,
"grad_norm": 0.3700462579727173,
"learning_rate": 3.884297520661157e-05,
"loss": 0.2983,
"step": 203
},
{
"epoch": 0.39727361246348586,
"grad_norm": 0.4219472110271454,
"learning_rate": 3.801652892561984e-05,
"loss": 0.3604,
"step": 204
},
{
"epoch": 0.39922103213242455,
"grad_norm": 0.2869436740875244,
"learning_rate": 3.71900826446281e-05,
"loss": 0.2366,
"step": 205
},
{
"epoch": 0.4011684518013632,
"grad_norm": 0.5798669457435608,
"learning_rate": 3.6363636363636364e-05,
"loss": 0.6483,
"step": 206
},
{
"epoch": 0.40311587147030187,
"grad_norm": 0.5161038637161255,
"learning_rate": 3.553719008264463e-05,
"loss": 0.557,
"step": 207
},
{
"epoch": 0.4050632911392405,
"grad_norm": 0.43804600834846497,
"learning_rate": 3.47107438016529e-05,
"loss": 0.2817,
"step": 208
},
{
"epoch": 0.4070107108081792,
"grad_norm": 0.2725582420825958,
"learning_rate": 3.388429752066116e-05,
"loss": 0.2317,
"step": 209
},
{
"epoch": 0.4089581304771178,
"grad_norm": 0.4551626443862915,
"learning_rate": 3.305785123966942e-05,
"loss": 0.3523,
"step": 210
},
{
"epoch": 0.41090555014605645,
"grad_norm": 0.4810575842857361,
"learning_rate": 3.2231404958677685e-05,
"loss": 0.4526,
"step": 211
},
{
"epoch": 0.41285296981499514,
"grad_norm": 0.6466739773750305,
"learning_rate": 3.1404958677685955e-05,
"loss": 0.8117,
"step": 212
},
{
"epoch": 0.4148003894839338,
"grad_norm": 0.5906726717948914,
"learning_rate": 3.057851239669421e-05,
"loss": 0.6446,
"step": 213
},
{
"epoch": 0.41674780915287246,
"grad_norm": 0.5657168626785278,
"learning_rate": 2.975206611570248e-05,
"loss": 0.4798,
"step": 214
},
{
"epoch": 0.4186952288218111,
"grad_norm": 0.45190712809562683,
"learning_rate": 2.8925619834710744e-05,
"loss": 0.4177,
"step": 215
},
{
"epoch": 0.4206426484907498,
"grad_norm": 0.36461302638053894,
"learning_rate": 2.809917355371901e-05,
"loss": 0.3125,
"step": 216
},
{
"epoch": 0.4225900681596884,
"grad_norm": 0.5353040099143982,
"learning_rate": 2.7272727272727273e-05,
"loss": 0.6517,
"step": 217
},
{
"epoch": 0.42453748782862705,
"grad_norm": 0.7112963795661926,
"learning_rate": 2.644628099173554e-05,
"loss": 0.6613,
"step": 218
},
{
"epoch": 0.42648490749756574,
"grad_norm": 0.4022853374481201,
"learning_rate": 2.5619834710743802e-05,
"loss": 0.3589,
"step": 219
},
{
"epoch": 0.42843232716650437,
"grad_norm": 0.4931185841560364,
"learning_rate": 2.479338842975207e-05,
"loss": 0.5696,
"step": 220
},
{
"epoch": 0.43037974683544306,
"grad_norm": 0.28436464071273804,
"learning_rate": 2.396694214876033e-05,
"loss": 0.2196,
"step": 221
},
{
"epoch": 0.4323271665043817,
"grad_norm": 0.4250311553478241,
"learning_rate": 2.3140495867768598e-05,
"loss": 0.3458,
"step": 222
},
{
"epoch": 0.4342745861733204,
"grad_norm": 0.23114222288131714,
"learning_rate": 2.231404958677686e-05,
"loss": 0.2124,
"step": 223
},
{
"epoch": 0.436222005842259,
"grad_norm": 0.46653562784194946,
"learning_rate": 2.1487603305785127e-05,
"loss": 0.3793,
"step": 224
},
{
"epoch": 0.43816942551119764,
"grad_norm": 0.7057761549949646,
"learning_rate": 2.066115702479339e-05,
"loss": 0.7023,
"step": 225
},
{
"epoch": 0.44011684518013633,
"grad_norm": 0.4195654094219208,
"learning_rate": 1.9834710743801653e-05,
"loss": 0.4148,
"step": 226
},
{
"epoch": 0.44206426484907496,
"grad_norm": 0.4446503221988678,
"learning_rate": 1.900826446280992e-05,
"loss": 0.4615,
"step": 227
},
{
"epoch": 0.44401168451801365,
"grad_norm": 0.28336820006370544,
"learning_rate": 1.8181818181818182e-05,
"loss": 0.1496,
"step": 228
},
{
"epoch": 0.4459591041869523,
"grad_norm": 0.5226246118545532,
"learning_rate": 1.735537190082645e-05,
"loss": 0.5196,
"step": 229
},
{
"epoch": 0.44790652385589097,
"grad_norm": 0.5695878267288208,
"learning_rate": 1.652892561983471e-05,
"loss": 0.5308,
"step": 230
},
{
"epoch": 0.4498539435248296,
"grad_norm": 0.5476521849632263,
"learning_rate": 1.5702479338842978e-05,
"loss": 0.4681,
"step": 231
},
{
"epoch": 0.45180136319376824,
"grad_norm": 0.5449410676956177,
"learning_rate": 1.487603305785124e-05,
"loss": 0.5434,
"step": 232
},
{
"epoch": 0.4537487828627069,
"grad_norm": 0.47294700145721436,
"learning_rate": 1.4049586776859505e-05,
"loss": 0.435,
"step": 233
},
{
"epoch": 0.45569620253164556,
"grad_norm": 0.6122575998306274,
"learning_rate": 1.322314049586777e-05,
"loss": 0.5793,
"step": 234
},
{
"epoch": 0.45764362220058424,
"grad_norm": 0.7652103900909424,
"learning_rate": 1.2396694214876034e-05,
"loss": 0.8786,
"step": 235
},
{
"epoch": 0.4595910418695229,
"grad_norm": 0.5390903949737549,
"learning_rate": 1.1570247933884299e-05,
"loss": 0.4212,
"step": 236
},
{
"epoch": 0.46153846153846156,
"grad_norm": 0.6218725442886353,
"learning_rate": 1.0743801652892564e-05,
"loss": 0.5875,
"step": 237
},
{
"epoch": 0.4634858812074002,
"grad_norm": 0.3942383825778961,
"learning_rate": 9.917355371900826e-06,
"loss": 0.3841,
"step": 238
},
{
"epoch": 0.46543330087633883,
"grad_norm": 0.34732937812805176,
"learning_rate": 9.090909090909091e-06,
"loss": 0.2677,
"step": 239
},
{
"epoch": 0.4673807205452775,
"grad_norm": 0.5432242751121521,
"learning_rate": 8.264462809917356e-06,
"loss": 0.5924,
"step": 240
},
{
"epoch": 0.46932814021421615,
"grad_norm": 0.6005178689956665,
"learning_rate": 7.43801652892562e-06,
"loss": 0.5235,
"step": 241
},
{
"epoch": 0.47127555988315484,
"grad_norm": 0.6554625630378723,
"learning_rate": 6.611570247933885e-06,
"loss": 0.6429,
"step": 242
},
{
"epoch": 0.47322297955209347,
"grad_norm": 0.3775584399700165,
"learning_rate": 5.7851239669421495e-06,
"loss": 0.3247,
"step": 243
},
{
"epoch": 0.47517039922103216,
"grad_norm": 0.5196499824523926,
"learning_rate": 4.958677685950413e-06,
"loss": 0.3589,
"step": 244
},
{
"epoch": 0.4771178188899708,
"grad_norm": 0.462444931268692,
"learning_rate": 4.132231404958678e-06,
"loss": 0.4217,
"step": 245
},
{
"epoch": 0.4790652385589094,
"grad_norm": 0.3943294286727905,
"learning_rate": 3.3057851239669424e-06,
"loss": 0.2937,
"step": 246
},
{
"epoch": 0.4810126582278481,
"grad_norm": 0.3740336298942566,
"learning_rate": 2.4793388429752066e-06,
"loss": 0.3033,
"step": 247
},
{
"epoch": 0.48296007789678674,
"grad_norm": 0.3156736493110657,
"learning_rate": 1.6528925619834712e-06,
"loss": 0.2708,
"step": 248
},
{
"epoch": 0.48490749756572543,
"grad_norm": 0.6795382499694824,
"learning_rate": 8.264462809917356e-07,
"loss": 0.5882,
"step": 249
},
{
"epoch": 0.48685491723466406,
"grad_norm": 0.45327121019363403,
"learning_rate": 0.0,
"loss": 0.3451,
"step": 250
}
],
"logging_steps": 1,
"max_steps": 250,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 250,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1165175740907520.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}