Qwen2.5-0.5B-Open-R1-Distill / trainer_state.json
jdqqjr's picture
Model save
b18b40a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.999537251272559,
"eval_steps": 100,
"global_step": 900,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005552984729291994,
"grad_norm": 5.378993948282789,
"learning_rate": 1.111111111111111e-06,
"loss": 1.3618,
"step": 5
},
{
"epoch": 0.011105969458583989,
"grad_norm": 4.007932344194533,
"learning_rate": 2.222222222222222e-06,
"loss": 1.3497,
"step": 10
},
{
"epoch": 0.016658954187875982,
"grad_norm": 2.870467768621527,
"learning_rate": 3.3333333333333333e-06,
"loss": 1.3292,
"step": 15
},
{
"epoch": 0.022211938917167977,
"grad_norm": 2.9129656624638147,
"learning_rate": 4.444444444444444e-06,
"loss": 1.3086,
"step": 20
},
{
"epoch": 0.027764923646459973,
"grad_norm": 2.0216771653661767,
"learning_rate": 5.555555555555557e-06,
"loss": 1.1997,
"step": 25
},
{
"epoch": 0.033317908375751965,
"grad_norm": 1.7768511495820114,
"learning_rate": 6.666666666666667e-06,
"loss": 1.1742,
"step": 30
},
{
"epoch": 0.03887089310504396,
"grad_norm": 1.4804724166332308,
"learning_rate": 7.77777777777778e-06,
"loss": 1.1244,
"step": 35
},
{
"epoch": 0.044423877834335955,
"grad_norm": 1.5180593968939422,
"learning_rate": 8.888888888888888e-06,
"loss": 1.0946,
"step": 40
},
{
"epoch": 0.04997686256362795,
"grad_norm": 1.4879318494570208,
"learning_rate": 1e-05,
"loss": 1.1032,
"step": 45
},
{
"epoch": 0.055529847292919945,
"grad_norm": 1.449589161585785,
"learning_rate": 1.1111111111111113e-05,
"loss": 1.0751,
"step": 50
},
{
"epoch": 0.06108283202221194,
"grad_norm": 1.3300428861282843,
"learning_rate": 1.2222222222222224e-05,
"loss": 1.0704,
"step": 55
},
{
"epoch": 0.06663581675150393,
"grad_norm": 1.5071782473179958,
"learning_rate": 1.3333333333333333e-05,
"loss": 1.0561,
"step": 60
},
{
"epoch": 0.07218880148079593,
"grad_norm": 1.3957616523065333,
"learning_rate": 1.4444444444444446e-05,
"loss": 1.0527,
"step": 65
},
{
"epoch": 0.07774178621008793,
"grad_norm": 1.3254556004203786,
"learning_rate": 1.555555555555556e-05,
"loss": 1.0107,
"step": 70
},
{
"epoch": 0.08329477093937991,
"grad_norm": 1.5821507688192253,
"learning_rate": 1.6666666666666667e-05,
"loss": 1.035,
"step": 75
},
{
"epoch": 0.08884775566867191,
"grad_norm": 1.5770964291038414,
"learning_rate": 1.7777777777777777e-05,
"loss": 1.0285,
"step": 80
},
{
"epoch": 0.09440074039796391,
"grad_norm": 1.5156588992406825,
"learning_rate": 1.888888888888889e-05,
"loss": 1.0462,
"step": 85
},
{
"epoch": 0.0999537251272559,
"grad_norm": 1.8368814946604881,
"learning_rate": 2e-05,
"loss": 1.0321,
"step": 90
},
{
"epoch": 0.10550670985654789,
"grad_norm": 1.6154848357095026,
"learning_rate": 1.9998119704485016e-05,
"loss": 0.9962,
"step": 95
},
{
"epoch": 0.11105969458583989,
"grad_norm": 1.6218644276479086,
"learning_rate": 1.9992479525042305e-05,
"loss": 1.0216,
"step": 100
},
{
"epoch": 0.11105969458583989,
"eval_loss": 1.0477724075317383,
"eval_runtime": 14.0681,
"eval_samples_per_second": 18.197,
"eval_steps_per_second": 4.549,
"step": 100
},
{
"epoch": 0.11661267931513189,
"grad_norm": 1.4399255902202484,
"learning_rate": 1.9983081582712684e-05,
"loss": 1.0344,
"step": 105
},
{
"epoch": 0.12216566404442387,
"grad_norm": 1.8069324642784876,
"learning_rate": 1.996992941167792e-05,
"loss": 1.0204,
"step": 110
},
{
"epoch": 0.12771864877371586,
"grad_norm": 2.1003495945173865,
"learning_rate": 1.9953027957931658e-05,
"loss": 1.0169,
"step": 115
},
{
"epoch": 0.13327163350300786,
"grad_norm": 1.9735058045205416,
"learning_rate": 1.9932383577419432e-05,
"loss": 1.0039,
"step": 120
},
{
"epoch": 0.13882461823229986,
"grad_norm": 1.4822914643295109,
"learning_rate": 1.9908004033648452e-05,
"loss": 1.0025,
"step": 125
},
{
"epoch": 0.14437760296159186,
"grad_norm": 1.5730749273183444,
"learning_rate": 1.9879898494768093e-05,
"loss": 1.0014,
"step": 130
},
{
"epoch": 0.14993058769088385,
"grad_norm": 1.4317675236519043,
"learning_rate": 1.9848077530122083e-05,
"loss": 1.0055,
"step": 135
},
{
"epoch": 0.15548357242017585,
"grad_norm": 1.4305310143482486,
"learning_rate": 1.9812553106273848e-05,
"loss": 0.9772,
"step": 140
},
{
"epoch": 0.16103655714946785,
"grad_norm": 1.835206300273569,
"learning_rate": 1.9773338582506357e-05,
"loss": 1.0041,
"step": 145
},
{
"epoch": 0.16658954187875982,
"grad_norm": 1.346307644251197,
"learning_rate": 1.973044870579824e-05,
"loss": 0.9836,
"step": 150
},
{
"epoch": 0.17214252660805182,
"grad_norm": 1.3087431013115765,
"learning_rate": 1.9683899605278062e-05,
"loss": 0.9906,
"step": 155
},
{
"epoch": 0.17769551133734382,
"grad_norm": 1.6304479084212757,
"learning_rate": 1.9633708786158803e-05,
"loss": 0.9846,
"step": 160
},
{
"epoch": 0.18324849606663582,
"grad_norm": 1.4079238364588627,
"learning_rate": 1.957989512315489e-05,
"loss": 0.9953,
"step": 165
},
{
"epoch": 0.18880148079592782,
"grad_norm": 1.4160494597971853,
"learning_rate": 1.9522478853384154e-05,
"loss": 0.9728,
"step": 170
},
{
"epoch": 0.19435446552521982,
"grad_norm": 1.3831279332888735,
"learning_rate": 1.946148156875751e-05,
"loss": 0.9786,
"step": 175
},
{
"epoch": 0.1999074502545118,
"grad_norm": 1.3223005814385944,
"learning_rate": 1.9396926207859085e-05,
"loss": 0.9915,
"step": 180
},
{
"epoch": 0.20546043498380379,
"grad_norm": 7.733682148553893,
"learning_rate": 1.932883704732001e-05,
"loss": 1.0145,
"step": 185
},
{
"epoch": 0.21101341971309578,
"grad_norm": 1.6669211194032283,
"learning_rate": 1.9257239692688907e-05,
"loss": 0.9862,
"step": 190
},
{
"epoch": 0.21656640444238778,
"grad_norm": 1.4461392109939817,
"learning_rate": 1.9182161068802742e-05,
"loss": 0.9944,
"step": 195
},
{
"epoch": 0.22211938917167978,
"grad_norm": 1.3816905654957743,
"learning_rate": 1.9103629409661468e-05,
"loss": 0.99,
"step": 200
},
{
"epoch": 0.22211938917167978,
"eval_loss": 1.0106741189956665,
"eval_runtime": 13.0423,
"eval_samples_per_second": 19.628,
"eval_steps_per_second": 4.907,
"step": 200
},
{
"epoch": 0.22767237390097178,
"grad_norm": 1.3296649623180647,
"learning_rate": 1.902167424781038e-05,
"loss": 0.9874,
"step": 205
},
{
"epoch": 0.23322535863026378,
"grad_norm": 1.4147575392387848,
"learning_rate": 1.8936326403234125e-05,
"loss": 0.9906,
"step": 210
},
{
"epoch": 0.23877834335955575,
"grad_norm": 1.5048777388561694,
"learning_rate": 1.8847617971766577e-05,
"loss": 0.9721,
"step": 215
},
{
"epoch": 0.24433132808884775,
"grad_norm": 1.3281954721580007,
"learning_rate": 1.8755582313020912e-05,
"loss": 0.9848,
"step": 220
},
{
"epoch": 0.24988431281813975,
"grad_norm": 1.3865529123681517,
"learning_rate": 1.866025403784439e-05,
"loss": 0.9885,
"step": 225
},
{
"epoch": 0.2554372975474317,
"grad_norm": 1.468591497661801,
"learning_rate": 1.8561668995302668e-05,
"loss": 0.9713,
"step": 230
},
{
"epoch": 0.2609902822767237,
"grad_norm": 1.3686298836564843,
"learning_rate": 1.845986425919841e-05,
"loss": 0.9579,
"step": 235
},
{
"epoch": 0.2665432670060157,
"grad_norm": 1.4148255169077197,
"learning_rate": 1.8354878114129368e-05,
"loss": 0.9506,
"step": 240
},
{
"epoch": 0.2720962517353077,
"grad_norm": 1.5172086489276786,
"learning_rate": 1.824675004109107e-05,
"loss": 0.99,
"step": 245
},
{
"epoch": 0.2776492364645997,
"grad_norm": 1.3436284741737878,
"learning_rate": 1.8135520702629677e-05,
"loss": 0.9654,
"step": 250
},
{
"epoch": 0.2832022211938917,
"grad_norm": 1.4414606449378646,
"learning_rate": 1.802123192755044e-05,
"loss": 0.9668,
"step": 255
},
{
"epoch": 0.2887552059231837,
"grad_norm": 1.4031899462530004,
"learning_rate": 1.7903926695187595e-05,
"loss": 0.9626,
"step": 260
},
{
"epoch": 0.2943081906524757,
"grad_norm": 1.4639958980701138,
"learning_rate": 1.7783649119241603e-05,
"loss": 0.9459,
"step": 265
},
{
"epoch": 0.2998611753817677,
"grad_norm": 1.426651386589118,
"learning_rate": 1.766044443118978e-05,
"loss": 0.9863,
"step": 270
},
{
"epoch": 0.3054141601110597,
"grad_norm": 1.4219614478552796,
"learning_rate": 1.7534358963276606e-05,
"loss": 0.9719,
"step": 275
},
{
"epoch": 0.3109671448403517,
"grad_norm": 1.3151103765065284,
"learning_rate": 1.740544013109005e-05,
"loss": 0.9903,
"step": 280
},
{
"epoch": 0.3165201295696437,
"grad_norm": 1.3354926740055781,
"learning_rate": 1.7273736415730488e-05,
"loss": 0.9681,
"step": 285
},
{
"epoch": 0.3220731142989357,
"grad_norm": 1.3717221395296357,
"learning_rate": 1.7139297345578992e-05,
"loss": 0.9456,
"step": 290
},
{
"epoch": 0.32762609902822765,
"grad_norm": 1.6411114205659896,
"learning_rate": 1.7002173477671685e-05,
"loss": 0.9591,
"step": 295
},
{
"epoch": 0.33317908375751965,
"grad_norm": 1.2826425067775913,
"learning_rate": 1.686241637868734e-05,
"loss": 0.9328,
"step": 300
},
{
"epoch": 0.33317908375751965,
"eval_loss": 0.99033522605896,
"eval_runtime": 13.2544,
"eval_samples_per_second": 19.314,
"eval_steps_per_second": 4.829,
"step": 300
},
{
"epoch": 0.33873206848681164,
"grad_norm": 1.4005676511844571,
"learning_rate": 1.6720078605555227e-05,
"loss": 0.9803,
"step": 305
},
{
"epoch": 0.34428505321610364,
"grad_norm": 1.3022121862564202,
"learning_rate": 1.657521368569064e-05,
"loss": 0.9622,
"step": 310
},
{
"epoch": 0.34983803794539564,
"grad_norm": 1.4147938551822972,
"learning_rate": 1.6427876096865394e-05,
"loss": 0.9733,
"step": 315
},
{
"epoch": 0.35539102267468764,
"grad_norm": 1.4563599805627543,
"learning_rate": 1.627812124672099e-05,
"loss": 0.9695,
"step": 320
},
{
"epoch": 0.36094400740397964,
"grad_norm": 1.2666931064627047,
"learning_rate": 1.6126005451932028e-05,
"loss": 0.9512,
"step": 325
},
{
"epoch": 0.36649699213327164,
"grad_norm": 1.3459612867606927,
"learning_rate": 1.5971585917027864e-05,
"loss": 0.9962,
"step": 330
},
{
"epoch": 0.37204997686256364,
"grad_norm": 1.4053767425847852,
"learning_rate": 1.5814920712880267e-05,
"loss": 0.9456,
"step": 335
},
{
"epoch": 0.37760296159185563,
"grad_norm": 1.2542630494816203,
"learning_rate": 1.5656068754865388e-05,
"loss": 0.9625,
"step": 340
},
{
"epoch": 0.38315594632114763,
"grad_norm": 1.2467268401516984,
"learning_rate": 1.5495089780708062e-05,
"loss": 0.9416,
"step": 345
},
{
"epoch": 0.38870893105043963,
"grad_norm": 1.3642420280912566,
"learning_rate": 1.5332044328016916e-05,
"loss": 0.9745,
"step": 350
},
{
"epoch": 0.39426191577973163,
"grad_norm": 1.370419991211419,
"learning_rate": 1.5166993711518631e-05,
"loss": 0.9235,
"step": 355
},
{
"epoch": 0.3998149005090236,
"grad_norm": 1.4175545037228292,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.9419,
"step": 360
},
{
"epoch": 0.4053678852383156,
"grad_norm": 1.5230125115585145,
"learning_rate": 1.4831125992966386e-05,
"loss": 0.9482,
"step": 365
},
{
"epoch": 0.41092086996760757,
"grad_norm": 1.3243119971736406,
"learning_rate": 1.4660435197025391e-05,
"loss": 0.9516,
"step": 370
},
{
"epoch": 0.41647385469689957,
"grad_norm": 1.3168758353005081,
"learning_rate": 1.4487991802004625e-05,
"loss": 0.9274,
"step": 375
},
{
"epoch": 0.42202683942619157,
"grad_norm": 1.3316158417964403,
"learning_rate": 1.4313860656812537e-05,
"loss": 0.9279,
"step": 380
},
{
"epoch": 0.42757982415548357,
"grad_norm": 1.2112610899992784,
"learning_rate": 1.4138107245051394e-05,
"loss": 0.9604,
"step": 385
},
{
"epoch": 0.43313280888477557,
"grad_norm": 1.4256068345744652,
"learning_rate": 1.396079766039157e-05,
"loss": 0.9504,
"step": 390
},
{
"epoch": 0.43868579361406757,
"grad_norm": 1.48676297842706,
"learning_rate": 1.3781998581716427e-05,
"loss": 0.9783,
"step": 395
},
{
"epoch": 0.44423877834335956,
"grad_norm": 1.3917645840392392,
"learning_rate": 1.3601777248047105e-05,
"loss": 0.9428,
"step": 400
},
{
"epoch": 0.44423877834335956,
"eval_loss": 0.975586473941803,
"eval_runtime": 13.0584,
"eval_samples_per_second": 19.604,
"eval_steps_per_second": 4.901,
"step": 400
},
{
"epoch": 0.44979176307265156,
"grad_norm": 1.3230682794396744,
"learning_rate": 1.342020143325669e-05,
"loss": 0.9477,
"step": 405
},
{
"epoch": 0.45534474780194356,
"grad_norm": 1.2045267312184351,
"learning_rate": 1.3237339420583213e-05,
"loss": 0.9568,
"step": 410
},
{
"epoch": 0.46089773253123556,
"grad_norm": 1.2839147189555775,
"learning_rate": 1.3053259976951134e-05,
"loss": 0.9256,
"step": 415
},
{
"epoch": 0.46645071726052756,
"grad_norm": 1.284551507215065,
"learning_rate": 1.2868032327110904e-05,
"loss": 0.9246,
"step": 420
},
{
"epoch": 0.4720037019898195,
"grad_norm": 1.3021372211969566,
"learning_rate": 1.2681726127606374e-05,
"loss": 0.9527,
"step": 425
},
{
"epoch": 0.4775566867191115,
"grad_norm": 1.3264954697360052,
"learning_rate": 1.2494411440579814e-05,
"loss": 0.9659,
"step": 430
},
{
"epoch": 0.4831096714484035,
"grad_norm": 1.2931203310254533,
"learning_rate": 1.2306158707424402e-05,
"loss": 0.9044,
"step": 435
},
{
"epoch": 0.4886626561776955,
"grad_norm": 1.5033726081614123,
"learning_rate": 1.211703872229411e-05,
"loss": 0.9135,
"step": 440
},
{
"epoch": 0.4942156409069875,
"grad_norm": 1.2465062347279259,
"learning_rate": 1.1927122605480899e-05,
"loss": 0.9382,
"step": 445
},
{
"epoch": 0.4997686256362795,
"grad_norm": 1.2206738144949079,
"learning_rate": 1.1736481776669307e-05,
"loss": 0.9429,
"step": 450
},
{
"epoch": 0.5053216103655715,
"grad_norm": 1.268246804784844,
"learning_rate": 1.1545187928078407e-05,
"loss": 0.9163,
"step": 455
},
{
"epoch": 0.5108745950948634,
"grad_norm": 1.1842171344447763,
"learning_rate": 1.1353312997501313e-05,
"loss": 0.9296,
"step": 460
},
{
"epoch": 0.5164275798241554,
"grad_norm": 1.3329158977326516,
"learning_rate": 1.1160929141252303e-05,
"loss": 0.9536,
"step": 465
},
{
"epoch": 0.5219805645534474,
"grad_norm": 1.3140264320853574,
"learning_rate": 1.0968108707031792e-05,
"loss": 0.9314,
"step": 470
},
{
"epoch": 0.5275335492827394,
"grad_norm": 1.2112452453436886,
"learning_rate": 1.077492420671931e-05,
"loss": 0.8858,
"step": 475
},
{
"epoch": 0.5330865340120314,
"grad_norm": 1.244066114064373,
"learning_rate": 1.0581448289104759e-05,
"loss": 0.938,
"step": 480
},
{
"epoch": 0.5386395187413234,
"grad_norm": 1.2958379438943313,
"learning_rate": 1.038775371256817e-05,
"loss": 0.9366,
"step": 485
},
{
"epoch": 0.5441925034706154,
"grad_norm": 1.2706792533588414,
"learning_rate": 1.0193913317718245e-05,
"loss": 0.9809,
"step": 490
},
{
"epoch": 0.5497454881999074,
"grad_norm": 1.2373793128051882,
"learning_rate": 1e-05,
"loss": 0.9107,
"step": 495
},
{
"epoch": 0.5552984729291994,
"grad_norm": 1.3259462807230906,
"learning_rate": 9.806086682281759e-06,
"loss": 0.9478,
"step": 500
},
{
"epoch": 0.5552984729291994,
"eval_loss": 0.9645185470581055,
"eval_runtime": 14.0973,
"eval_samples_per_second": 18.16,
"eval_steps_per_second": 4.54,
"step": 500
},
{
"epoch": 0.5608514576584914,
"grad_norm": 1.2818749562353056,
"learning_rate": 9.612246287431832e-06,
"loss": 0.9157,
"step": 505
},
{
"epoch": 0.5664044423877834,
"grad_norm": 1.2789066996692675,
"learning_rate": 9.418551710895243e-06,
"loss": 0.9715,
"step": 510
},
{
"epoch": 0.5719574271170754,
"grad_norm": 1.3159690301384173,
"learning_rate": 9.225075793280693e-06,
"loss": 0.9564,
"step": 515
},
{
"epoch": 0.5775104118463674,
"grad_norm": 1.1820999233185365,
"learning_rate": 9.03189129296821e-06,
"loss": 0.9333,
"step": 520
},
{
"epoch": 0.5830633965756594,
"grad_norm": 2.9705367947063595,
"learning_rate": 8.839070858747697e-06,
"loss": 0.9393,
"step": 525
},
{
"epoch": 0.5886163813049514,
"grad_norm": 1.1578484132538005,
"learning_rate": 8.646687002498692e-06,
"loss": 0.9386,
"step": 530
},
{
"epoch": 0.5941693660342434,
"grad_norm": 1.2495396181990488,
"learning_rate": 8.454812071921597e-06,
"loss": 0.9202,
"step": 535
},
{
"epoch": 0.5997223507635354,
"grad_norm": 1.18243382811664,
"learning_rate": 8.263518223330698e-06,
"loss": 0.9222,
"step": 540
},
{
"epoch": 0.6052753354928274,
"grad_norm": 1.2247856999180733,
"learning_rate": 8.072877394519103e-06,
"loss": 0.9426,
"step": 545
},
{
"epoch": 0.6108283202221194,
"grad_norm": 1.220145252189745,
"learning_rate": 7.882961277705897e-06,
"loss": 0.9161,
"step": 550
},
{
"epoch": 0.6163813049514114,
"grad_norm": 1.2450054960684753,
"learning_rate": 7.6938412925756e-06,
"loss": 0.9419,
"step": 555
},
{
"epoch": 0.6219342896807034,
"grad_norm": 1.2450637061923184,
"learning_rate": 7.505588559420188e-06,
"loss": 0.9471,
"step": 560
},
{
"epoch": 0.6274872744099954,
"grad_norm": 1.2160705969336116,
"learning_rate": 7.3182738723936255e-06,
"loss": 0.9446,
"step": 565
},
{
"epoch": 0.6330402591392874,
"grad_norm": 1.2857156644799776,
"learning_rate": 7.131967672889101e-06,
"loss": 0.9473,
"step": 570
},
{
"epoch": 0.6385932438685794,
"grad_norm": 1.1688923012156514,
"learning_rate": 6.94674002304887e-06,
"loss": 0.9193,
"step": 575
},
{
"epoch": 0.6441462285978714,
"grad_norm": 1.3240345620915759,
"learning_rate": 6.762660579416791e-06,
"loss": 0.955,
"step": 580
},
{
"epoch": 0.6496992133271634,
"grad_norm": 1.171487612102084,
"learning_rate": 6.579798566743314e-06,
"loss": 0.9345,
"step": 585
},
{
"epoch": 0.6552521980564553,
"grad_norm": 1.1809918233177483,
"learning_rate": 6.3982227519528986e-06,
"loss": 0.9317,
"step": 590
},
{
"epoch": 0.6608051827857473,
"grad_norm": 1.1849135550680583,
"learning_rate": 6.218001418283577e-06,
"loss": 0.9282,
"step": 595
},
{
"epoch": 0.6663581675150393,
"grad_norm": 1.2353428612054926,
"learning_rate": 6.039202339608432e-06,
"loss": 0.9186,
"step": 600
},
{
"epoch": 0.6663581675150393,
"eval_loss": 0.9549762010574341,
"eval_runtime": 13.1058,
"eval_samples_per_second": 19.533,
"eval_steps_per_second": 4.883,
"step": 600
},
{
"epoch": 0.6719111522443313,
"grad_norm": 1.2260206730700363,
"learning_rate": 5.8618927549486095e-06,
"loss": 0.91,
"step": 605
},
{
"epoch": 0.6774641369736233,
"grad_norm": 1.2511346515975978,
"learning_rate": 5.686139343187468e-06,
"loss": 0.9445,
"step": 610
},
{
"epoch": 0.6830171217029153,
"grad_norm": 1.1415920400151276,
"learning_rate": 5.512008197995379e-06,
"loss": 0.9267,
"step": 615
},
{
"epoch": 0.6885701064322073,
"grad_norm": 1.1718028560057823,
"learning_rate": 5.339564802974615e-06,
"loss": 0.9173,
"step": 620
},
{
"epoch": 0.6941230911614993,
"grad_norm": 1.1427826499364968,
"learning_rate": 5.168874007033615e-06,
"loss": 0.9113,
"step": 625
},
{
"epoch": 0.6996760758907913,
"grad_norm": 1.1646711162888848,
"learning_rate": 5.000000000000003e-06,
"loss": 0.9314,
"step": 630
},
{
"epoch": 0.7052290606200833,
"grad_norm": 1.1666389482954498,
"learning_rate": 4.8330062884813714e-06,
"loss": 0.949,
"step": 635
},
{
"epoch": 0.7107820453493753,
"grad_norm": 1.2669383225264854,
"learning_rate": 4.66795567198309e-06,
"loss": 0.9298,
"step": 640
},
{
"epoch": 0.7163350300786673,
"grad_norm": 1.1727436218210467,
"learning_rate": 4.504910219291941e-06,
"loss": 0.9384,
"step": 645
},
{
"epoch": 0.7218880148079593,
"grad_norm": 1.160141429869342,
"learning_rate": 4.343931245134616e-06,
"loss": 0.9231,
"step": 650
},
{
"epoch": 0.7274409995372513,
"grad_norm": 1.1571744280380951,
"learning_rate": 4.185079287119733e-06,
"loss": 0.9379,
"step": 655
},
{
"epoch": 0.7329939842665433,
"grad_norm": 1.2080957370089618,
"learning_rate": 4.028414082972141e-06,
"loss": 0.9087,
"step": 660
},
{
"epoch": 0.7385469689958353,
"grad_norm": 1.2262375492125892,
"learning_rate": 3.873994548067972e-06,
"loss": 0.9175,
"step": 665
},
{
"epoch": 0.7440999537251273,
"grad_norm": 1.1868816861234752,
"learning_rate": 3.7218787532790167e-06,
"loss": 0.915,
"step": 670
},
{
"epoch": 0.7496529384544193,
"grad_norm": 1.2149836180874647,
"learning_rate": 3.5721239031346067e-06,
"loss": 0.9359,
"step": 675
},
{
"epoch": 0.7552059231837113,
"grad_norm": 1.1401883150941166,
"learning_rate": 3.424786314309365e-06,
"loss": 0.8996,
"step": 680
},
{
"epoch": 0.7607589079130033,
"grad_norm": 1.1918737445166034,
"learning_rate": 3.279921394444776e-06,
"loss": 0.9478,
"step": 685
},
{
"epoch": 0.7663118926422953,
"grad_norm": 1.1601543056853199,
"learning_rate": 3.1375836213126653e-06,
"loss": 0.9144,
"step": 690
},
{
"epoch": 0.7718648773715873,
"grad_norm": 1.1849230333131153,
"learning_rate": 2.9978265223283152e-06,
"loss": 0.9134,
"step": 695
},
{
"epoch": 0.7774178621008793,
"grad_norm": 1.1922764155084293,
"learning_rate": 2.8607026544210115e-06,
"loss": 0.9184,
"step": 700
},
{
"epoch": 0.7774178621008793,
"eval_loss": 0.9487817287445068,
"eval_runtime": 12.9966,
"eval_samples_per_second": 19.697,
"eval_steps_per_second": 4.924,
"step": 700
},
{
"epoch": 0.7829708468301713,
"grad_norm": 1.0668112491118575,
"learning_rate": 2.726263584269513e-06,
"loss": 0.9038,
"step": 705
},
{
"epoch": 0.7885238315594633,
"grad_norm": 1.191610088893989,
"learning_rate": 2.594559868909956e-06,
"loss": 0.9003,
"step": 710
},
{
"epoch": 0.7940768162887553,
"grad_norm": 1.2465046516243472,
"learning_rate": 2.4656410367233928e-06,
"loss": 0.9215,
"step": 715
},
{
"epoch": 0.7996298010180471,
"grad_norm": 1.1463175579772371,
"learning_rate": 2.339555568810221e-06,
"loss": 0.9303,
"step": 720
},
{
"epoch": 0.8051827857473391,
"grad_norm": 1.1279074050372055,
"learning_rate": 2.2163508807584e-06,
"loss": 0.9294,
"step": 725
},
{
"epoch": 0.8107357704766311,
"grad_norm": 1.1668600336379225,
"learning_rate": 2.0960733048124082e-06,
"loss": 0.9082,
"step": 730
},
{
"epoch": 0.8162887552059231,
"grad_norm": 1.1280124295582716,
"learning_rate": 1.9787680724495617e-06,
"loss": 0.898,
"step": 735
},
{
"epoch": 0.8218417399352151,
"grad_norm": 1.2693061690074205,
"learning_rate": 1.8644792973703252e-06,
"loss": 0.9473,
"step": 740
},
{
"epoch": 0.8273947246645071,
"grad_norm": 1.0935688113313613,
"learning_rate": 1.7532499589089324e-06,
"loss": 0.9312,
"step": 745
},
{
"epoch": 0.8329477093937991,
"grad_norm": 1.1491375579091732,
"learning_rate": 1.6451218858706374e-06,
"loss": 0.9295,
"step": 750
},
{
"epoch": 0.8385006941230911,
"grad_norm": 1.1971381545639446,
"learning_rate": 1.5401357408015893e-06,
"loss": 0.8977,
"step": 755
},
{
"epoch": 0.8440536788523831,
"grad_norm": 1.2118976466189748,
"learning_rate": 1.4383310046973365e-06,
"loss": 0.9201,
"step": 760
},
{
"epoch": 0.8496066635816751,
"grad_norm": 1.1129578498310642,
"learning_rate": 1.339745962155613e-06,
"loss": 0.9235,
"step": 765
},
{
"epoch": 0.8551596483109671,
"grad_norm": 1.155034707390975,
"learning_rate": 1.2444176869790925e-06,
"loss": 0.906,
"step": 770
},
{
"epoch": 0.8607126330402591,
"grad_norm": 1.1211050312494577,
"learning_rate": 1.152382028233422e-06,
"loss": 0.9027,
"step": 775
},
{
"epoch": 0.8662656177695511,
"grad_norm": 1.161462499770876,
"learning_rate": 1.0636735967658785e-06,
"loss": 0.902,
"step": 780
},
{
"epoch": 0.8718186024988431,
"grad_norm": 1.2466679714276248,
"learning_rate": 9.783257521896228e-07,
"loss": 0.9312,
"step": 785
},
{
"epoch": 0.8773715872281351,
"grad_norm": 1.1688203046154668,
"learning_rate": 8.963705903385344e-07,
"loss": 0.9231,
"step": 790
},
{
"epoch": 0.8829245719574271,
"grad_norm": 1.1446515266652288,
"learning_rate": 8.178389311972612e-07,
"loss": 0.924,
"step": 795
},
{
"epoch": 0.8884775566867191,
"grad_norm": 1.1971045442791841,
"learning_rate": 7.427603073110967e-07,
"loss": 0.8564,
"step": 800
},
{
"epoch": 0.8884775566867191,
"eval_loss": 0.9455364346504211,
"eval_runtime": 13.3345,
"eval_samples_per_second": 19.198,
"eval_steps_per_second": 4.8,
"step": 800
},
{
"epoch": 0.8940305414160111,
"grad_norm": 1.158461858290895,
"learning_rate": 6.711629526799946e-07,
"loss": 0.9399,
"step": 805
},
{
"epoch": 0.8995835261453031,
"grad_norm": 1.1264872944659339,
"learning_rate": 6.030737921409169e-07,
"loss": 0.8946,
"step": 810
},
{
"epoch": 0.9051365108745951,
"grad_norm": 1.122295252126309,
"learning_rate": 5.385184312424973e-07,
"loss": 0.9228,
"step": 815
},
{
"epoch": 0.9106894956038871,
"grad_norm": 1.130327163633901,
"learning_rate": 4.775211466158469e-07,
"loss": 0.9278,
"step": 820
},
{
"epoch": 0.9162424803331791,
"grad_norm": 1.152980775746581,
"learning_rate": 4.2010487684511105e-07,
"loss": 0.9105,
"step": 825
},
{
"epoch": 0.9217954650624711,
"grad_norm": 1.2630255671771575,
"learning_rate": 3.662912138411967e-07,
"loss": 0.9103,
"step": 830
},
{
"epoch": 0.9273484497917631,
"grad_norm": 1.1212305446295874,
"learning_rate": 3.161003947219421e-07,
"loss": 0.8847,
"step": 835
},
{
"epoch": 0.9329014345210551,
"grad_norm": 1.1635748807768116,
"learning_rate": 2.6955129420176193e-07,
"loss": 0.9139,
"step": 840
},
{
"epoch": 0.9384544192503471,
"grad_norm": 1.101552910147697,
"learning_rate": 2.2666141749364434e-07,
"loss": 0.9233,
"step": 845
},
{
"epoch": 0.944007403979639,
"grad_norm": 1.1104286718600382,
"learning_rate": 1.874468937261531e-07,
"loss": 0.8872,
"step": 850
},
{
"epoch": 0.949560388708931,
"grad_norm": 1.1513796874736284,
"learning_rate": 1.519224698779198e-07,
"loss": 0.938,
"step": 855
},
{
"epoch": 0.955113373438223,
"grad_norm": 1.172996631179348,
"learning_rate": 1.201015052319099e-07,
"loss": 0.9366,
"step": 860
},
{
"epoch": 0.960666358167515,
"grad_norm": 1.178813337349509,
"learning_rate": 9.199596635154684e-08,
"loss": 0.9382,
"step": 865
},
{
"epoch": 0.966219342896807,
"grad_norm": 1.123260974533791,
"learning_rate": 6.761642258056977e-08,
"loss": 0.8969,
"step": 870
},
{
"epoch": 0.971772327626099,
"grad_norm": 1.2440553964560856,
"learning_rate": 4.6972042068341714e-08,
"loss": 0.9125,
"step": 875
},
{
"epoch": 0.977325312355391,
"grad_norm": 1.100622329056596,
"learning_rate": 3.0070588322079765e-08,
"loss": 0.9106,
"step": 880
},
{
"epoch": 0.982878297084683,
"grad_norm": 1.224884941200071,
"learning_rate": 1.6918417287318245e-08,
"loss": 0.9045,
"step": 885
},
{
"epoch": 0.988431281813975,
"grad_norm": 1.1506168883594277,
"learning_rate": 7.520474957699586e-09,
"loss": 0.9341,
"step": 890
},
{
"epoch": 0.993984266543267,
"grad_norm": 1.3030322318546892,
"learning_rate": 1.8802955149865854e-09,
"loss": 0.9093,
"step": 895
},
{
"epoch": 0.999537251272559,
"grad_norm": 1.164399511709177,
"learning_rate": 0.0,
"loss": 0.943,
"step": 900
},
{
"epoch": 0.999537251272559,
"eval_loss": 0.9448966979980469,
"eval_runtime": 12.8793,
"eval_samples_per_second": 19.877,
"eval_steps_per_second": 4.969,
"step": 900
},
{
"epoch": 0.999537251272559,
"step": 900,
"total_flos": 37979261239296.0,
"train_loss": 0.9636553647783067,
"train_runtime": 8704.1469,
"train_samples_per_second": 4.965,
"train_steps_per_second": 0.103
}
],
"logging_steps": 5,
"max_steps": 900,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 37979261239296.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}