llama3-8b-ht-v1-2 / checkpoint-434 /trainer_state.json
Qteam1's picture
added checkpoints
12e7122
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 6.806451612903226,
"eval_steps": 16,
"global_step": 434,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016129032258064516,
"grad_norm": 5.020356178283691,
"learning_rate": 2e-05,
"loss": 2.1533,
"step": 1
},
{
"epoch": 0.016129032258064516,
"eval_loss": 2.183060884475708,
"eval_runtime": 261.5376,
"eval_samples_per_second": 4.053,
"eval_steps_per_second": 0.677,
"step": 1
},
{
"epoch": 0.03225806451612903,
"grad_norm": 5.117613315582275,
"learning_rate": 4e-05,
"loss": 2.1211,
"step": 2
},
{
"epoch": 0.04838709677419355,
"grad_norm": 4.581052303314209,
"learning_rate": 6e-05,
"loss": 2.1036,
"step": 3
},
{
"epoch": 0.06451612903225806,
"grad_norm": 3.6673312187194824,
"learning_rate": 8e-05,
"loss": 2.0632,
"step": 4
},
{
"epoch": 0.08064516129032258,
"grad_norm": 3.1003849506378174,
"learning_rate": 0.0001,
"loss": 2.021,
"step": 5
},
{
"epoch": 0.0967741935483871,
"grad_norm": 2.2640504837036133,
"learning_rate": 0.00012,
"loss": 1.9713,
"step": 6
},
{
"epoch": 0.11290322580645161,
"grad_norm": 2.594027519226074,
"learning_rate": 0.00014,
"loss": 1.8607,
"step": 7
},
{
"epoch": 0.12903225806451613,
"grad_norm": 2.064988136291504,
"learning_rate": 0.00016,
"loss": 1.8101,
"step": 8
},
{
"epoch": 0.14516129032258066,
"grad_norm": 1.8494470119476318,
"learning_rate": 0.00018,
"loss": 1.7351,
"step": 9
},
{
"epoch": 0.16129032258064516,
"grad_norm": 1.8000997304916382,
"learning_rate": 0.0002,
"loss": 1.7415,
"step": 10
},
{
"epoch": 0.1774193548387097,
"grad_norm": 1.7077090740203857,
"learning_rate": 0.0001999986737997063,
"loss": 1.7155,
"step": 11
},
{
"epoch": 0.1935483870967742,
"grad_norm": 1.8839540481567383,
"learning_rate": 0.00019999469523400122,
"loss": 1.7474,
"step": 12
},
{
"epoch": 0.20967741935483872,
"grad_norm": 1.7330329418182373,
"learning_rate": 0.00019998806440841234,
"loss": 1.6775,
"step": 13
},
{
"epoch": 0.22580645161290322,
"grad_norm": 1.4584600925445557,
"learning_rate": 0.00019997878149881574,
"loss": 1.7176,
"step": 14
},
{
"epoch": 0.24193548387096775,
"grad_norm": 1.4614368677139282,
"learning_rate": 0.0001999668467514313,
"loss": 1.6047,
"step": 15
},
{
"epoch": 0.25806451612903225,
"grad_norm": 1.3776947259902954,
"learning_rate": 0.0001999522604828164,
"loss": 1.6416,
"step": 16
},
{
"epoch": 0.25806451612903225,
"eval_loss": 1.7332706451416016,
"eval_runtime": 431.4384,
"eval_samples_per_second": 2.457,
"eval_steps_per_second": 0.41,
"step": 16
},
{
"epoch": 0.27419354838709675,
"grad_norm": 1.4929783344268799,
"learning_rate": 0.00019993502307985724,
"loss": 1.73,
"step": 17
},
{
"epoch": 0.2903225806451613,
"grad_norm": 1.3602776527404785,
"learning_rate": 0.00019991513499975882,
"loss": 1.6958,
"step": 18
},
{
"epoch": 0.3064516129032258,
"grad_norm": 1.4044336080551147,
"learning_rate": 0.00019989259677003274,
"loss": 1.6475,
"step": 19
},
{
"epoch": 0.3225806451612903,
"grad_norm": 1.3602012395858765,
"learning_rate": 0.00019986740898848306,
"loss": 1.7063,
"step": 20
},
{
"epoch": 0.3387096774193548,
"grad_norm": 1.3823881149291992,
"learning_rate": 0.0001998395723231907,
"loss": 1.7079,
"step": 21
},
{
"epoch": 0.3548387096774194,
"grad_norm": 1.3182601928710938,
"learning_rate": 0.00019980908751249555,
"loss": 1.672,
"step": 22
},
{
"epoch": 0.3709677419354839,
"grad_norm": 1.2420353889465332,
"learning_rate": 0.00019977595536497687,
"loss": 1.6393,
"step": 23
},
{
"epoch": 0.3870967741935484,
"grad_norm": 1.3861604928970337,
"learning_rate": 0.00019974017675943192,
"loss": 1.6904,
"step": 24
},
{
"epoch": 0.4032258064516129,
"grad_norm": 1.537832498550415,
"learning_rate": 0.00019970175264485266,
"loss": 1.6731,
"step": 25
},
{
"epoch": 0.41935483870967744,
"grad_norm": 1.2361652851104736,
"learning_rate": 0.0001996606840404006,
"loss": 1.6262,
"step": 26
},
{
"epoch": 0.43548387096774194,
"grad_norm": 1.253831148147583,
"learning_rate": 0.00019961697203537952,
"loss": 1.6068,
"step": 27
},
{
"epoch": 0.45161290322580644,
"grad_norm": 1.306778073310852,
"learning_rate": 0.00019957061778920701,
"loss": 1.6397,
"step": 28
},
{
"epoch": 0.46774193548387094,
"grad_norm": 1.1258474588394165,
"learning_rate": 0.0001995216225313833,
"loss": 1.5656,
"step": 29
},
{
"epoch": 0.4838709677419355,
"grad_norm": 1.198811411857605,
"learning_rate": 0.0001994699875614589,
"loss": 1.6768,
"step": 30
},
{
"epoch": 0.5,
"grad_norm": 1.4581488370895386,
"learning_rate": 0.00019941571424900013,
"loss": 1.5823,
"step": 31
},
{
"epoch": 0.5161290322580645,
"grad_norm": 1.2790226936340332,
"learning_rate": 0.00019935880403355253,
"loss": 1.6154,
"step": 32
},
{
"epoch": 0.5161290322580645,
"eval_loss": 1.64582359790802,
"eval_runtime": 430.558,
"eval_samples_per_second": 2.462,
"eval_steps_per_second": 0.411,
"step": 32
},
{
"epoch": 0.532258064516129,
"grad_norm": 1.1969035863876343,
"learning_rate": 0.0001992992584246031,
"loss": 1.6766,
"step": 33
},
{
"epoch": 0.5483870967741935,
"grad_norm": 1.4593340158462524,
"learning_rate": 0.00019923707900153982,
"loss": 1.6112,
"step": 34
},
{
"epoch": 0.5645161290322581,
"grad_norm": 1.306646704673767,
"learning_rate": 0.00019917226741361015,
"loss": 1.5671,
"step": 35
},
{
"epoch": 0.5806451612903226,
"grad_norm": 1.3403998613357544,
"learning_rate": 0.00019910482537987702,
"loss": 1.6703,
"step": 36
},
{
"epoch": 0.5967741935483871,
"grad_norm": 1.1974356174468994,
"learning_rate": 0.0001990347546891733,
"loss": 1.586,
"step": 37
},
{
"epoch": 0.6129032258064516,
"grad_norm": 1.3454561233520508,
"learning_rate": 0.0001989620572000544,
"loss": 1.6023,
"step": 38
},
{
"epoch": 0.6290322580645161,
"grad_norm": 1.2124018669128418,
"learning_rate": 0.000198886734840749,
"loss": 1.6405,
"step": 39
},
{
"epoch": 0.6451612903225806,
"grad_norm": 1.0802409648895264,
"learning_rate": 0.00019880878960910772,
"loss": 1.5191,
"step": 40
},
{
"epoch": 0.6612903225806451,
"grad_norm": 1.4059436321258545,
"learning_rate": 0.0001987282235725504,
"loss": 1.5969,
"step": 41
},
{
"epoch": 0.6774193548387096,
"grad_norm": 1.2623989582061768,
"learning_rate": 0.00019864503886801106,
"loss": 1.5921,
"step": 42
},
{
"epoch": 0.6935483870967742,
"grad_norm": 1.2654995918273926,
"learning_rate": 0.0001985592377018813,
"loss": 1.5707,
"step": 43
},
{
"epoch": 0.7096774193548387,
"grad_norm": 1.2158616781234741,
"learning_rate": 0.00019847082234995171,
"loss": 1.5141,
"step": 44
},
{
"epoch": 0.7258064516129032,
"grad_norm": 1.2419042587280273,
"learning_rate": 0.00019837979515735166,
"loss": 1.5541,
"step": 45
},
{
"epoch": 0.7419354838709677,
"grad_norm": 1.1441471576690674,
"learning_rate": 0.00019828615853848688,
"loss": 1.4788,
"step": 46
},
{
"epoch": 0.7580645161290323,
"grad_norm": 1.1924492120742798,
"learning_rate": 0.00019818991497697565,
"loss": 1.5335,
"step": 47
},
{
"epoch": 0.7741935483870968,
"grad_norm": 1.133042812347412,
"learning_rate": 0.00019809106702558277,
"loss": 1.5155,
"step": 48
},
{
"epoch": 0.7741935483870968,
"eval_loss": 1.5807298421859741,
"eval_runtime": 433.4129,
"eval_samples_per_second": 2.446,
"eval_steps_per_second": 0.408,
"step": 48
},
{
"epoch": 0.7903225806451613,
"grad_norm": 1.2338794469833374,
"learning_rate": 0.0001979896173061518,
"loss": 1.611,
"step": 49
},
{
"epoch": 0.8064516129032258,
"grad_norm": 1.306178331375122,
"learning_rate": 0.0001978855685095358,
"loss": 1.5046,
"step": 50
},
{
"epoch": 0.8225806451612904,
"grad_norm": 1.174604058265686,
"learning_rate": 0.00019777892339552559,
"loss": 1.5431,
"step": 51
},
{
"epoch": 0.8387096774193549,
"grad_norm": 1.3554447889328003,
"learning_rate": 0.00019766968479277683,
"loss": 1.5713,
"step": 52
},
{
"epoch": 0.8548387096774194,
"grad_norm": 1.111693263053894,
"learning_rate": 0.00019755785559873488,
"loss": 1.5173,
"step": 53
},
{
"epoch": 0.8709677419354839,
"grad_norm": 1.1916875839233398,
"learning_rate": 0.00019744343877955788,
"loss": 1.5706,
"step": 54
},
{
"epoch": 0.8870967741935484,
"grad_norm": 1.1925556659698486,
"learning_rate": 0.00019732643737003827,
"loss": 1.5466,
"step": 55
},
{
"epoch": 0.9032258064516129,
"grad_norm": 1.174657940864563,
"learning_rate": 0.00019720685447352209,
"loss": 1.5281,
"step": 56
},
{
"epoch": 0.9193548387096774,
"grad_norm": 1.1206492185592651,
"learning_rate": 0.00019708469326182678,
"loss": 1.5871,
"step": 57
},
{
"epoch": 0.9354838709677419,
"grad_norm": 1.089790940284729,
"learning_rate": 0.0001969599569751571,
"loss": 1.5988,
"step": 58
},
{
"epoch": 0.9516129032258065,
"grad_norm": 1.146849274635315,
"learning_rate": 0.000196832648922019,
"loss": 1.5066,
"step": 59
},
{
"epoch": 0.967741935483871,
"grad_norm": 1.0739450454711914,
"learning_rate": 0.00019670277247913205,
"loss": 1.5486,
"step": 60
},
{
"epoch": 0.9838709677419355,
"grad_norm": 1.0958797931671143,
"learning_rate": 0.00019657033109133975,
"loss": 1.5212,
"step": 61
},
{
"epoch": 1.0,
"grad_norm": 1.134975552558899,
"learning_rate": 0.0001964353282715183,
"loss": 1.5581,
"step": 62
},
{
"epoch": 1.0161290322580645,
"grad_norm": 1.1783857345581055,
"learning_rate": 0.0001962977676004832,
"loss": 1.4905,
"step": 63
},
{
"epoch": 1.032258064516129,
"grad_norm": 1.0987132787704468,
"learning_rate": 0.00019615765272689461,
"loss": 1.5359,
"step": 64
},
{
"epoch": 1.032258064516129,
"eval_loss": 1.5370668172836304,
"eval_runtime": 379.6837,
"eval_samples_per_second": 2.792,
"eval_steps_per_second": 0.466,
"step": 64
},
{
"epoch": 1.0161290322580645,
"grad_norm": 1.0743085145950317,
"learning_rate": 0.00019601498736716017,
"loss": 1.1431,
"step": 65
},
{
"epoch": 1.032258064516129,
"grad_norm": 0.9797853231430054,
"learning_rate": 0.00019586977530533677,
"loss": 1.0969,
"step": 66
},
{
"epoch": 1.0483870967741935,
"grad_norm": 1.1307501792907715,
"learning_rate": 0.00019572202039303,
"loss": 1.0826,
"step": 67
},
{
"epoch": 1.064516129032258,
"grad_norm": 1.0152018070220947,
"learning_rate": 0.00019557172654929196,
"loss": 1.1224,
"step": 68
},
{
"epoch": 1.0806451612903225,
"grad_norm": 1.0957362651824951,
"learning_rate": 0.0001954188977605175,
"loss": 1.0978,
"step": 69
},
{
"epoch": 1.096774193548387,
"grad_norm": 1.0177264213562012,
"learning_rate": 0.00019526353808033825,
"loss": 1.0882,
"step": 70
},
{
"epoch": 1.1129032258064515,
"grad_norm": 1.0174063444137573,
"learning_rate": 0.00019510565162951537,
"loss": 1.0331,
"step": 71
},
{
"epoch": 1.129032258064516,
"grad_norm": 1.0872232913970947,
"learning_rate": 0.00019494524259582992,
"loss": 1.0552,
"step": 72
},
{
"epoch": 1.1451612903225807,
"grad_norm": 1.0081063508987427,
"learning_rate": 0.00019478231523397215,
"loss": 1.023,
"step": 73
},
{
"epoch": 1.1612903225806452,
"grad_norm": 1.083362340927124,
"learning_rate": 0.00019461687386542826,
"loss": 1.0639,
"step": 74
},
{
"epoch": 1.1774193548387097,
"grad_norm": 1.1271260976791382,
"learning_rate": 0.00019444892287836613,
"loss": 1.053,
"step": 75
},
{
"epoch": 1.1935483870967742,
"grad_norm": 1.1567646265029907,
"learning_rate": 0.00019427846672751873,
"loss": 1.0214,
"step": 76
},
{
"epoch": 1.2096774193548387,
"grad_norm": 1.1053016185760498,
"learning_rate": 0.00019410550993406603,
"loss": 1.0552,
"step": 77
},
{
"epoch": 1.2258064516129032,
"grad_norm": 1.0573008060455322,
"learning_rate": 0.00019393005708551498,
"loss": 1.0223,
"step": 78
},
{
"epoch": 1.2419354838709677,
"grad_norm": 1.081141471862793,
"learning_rate": 0.00019375211283557798,
"loss": 1.0422,
"step": 79
},
{
"epoch": 1.2580645161290323,
"grad_norm": 1.154768943786621,
"learning_rate": 0.00019357168190404936,
"loss": 1.0746,
"step": 80
},
{
"epoch": 1.2580645161290323,
"eval_loss": 1.5887596607208252,
"eval_runtime": 379.2194,
"eval_samples_per_second": 2.795,
"eval_steps_per_second": 0.467,
"step": 80
},
{
"epoch": 1.2741935483870968,
"grad_norm": 1.0481725931167603,
"learning_rate": 0.00019338876907668026,
"loss": 1.0684,
"step": 81
},
{
"epoch": 1.2903225806451613,
"grad_norm": 1.1526888608932495,
"learning_rate": 0.00019320337920505153,
"loss": 1.104,
"step": 82
},
{
"epoch": 1.3064516129032258,
"grad_norm": 1.108069658279419,
"learning_rate": 0.00019301551720644523,
"loss": 1.0907,
"step": 83
},
{
"epoch": 1.3225806451612903,
"grad_norm": 1.1018728017807007,
"learning_rate": 0.00019282518806371414,
"loss": 1.0699,
"step": 84
},
{
"epoch": 1.3387096774193548,
"grad_norm": 1.157453179359436,
"learning_rate": 0.00019263239682514952,
"loss": 1.0589,
"step": 85
},
{
"epoch": 1.3548387096774195,
"grad_norm": 1.116068959236145,
"learning_rate": 0.0001924371486043473,
"loss": 1.0847,
"step": 86
},
{
"epoch": 1.370967741935484,
"grad_norm": 1.0575870275497437,
"learning_rate": 0.00019223944858007253,
"loss": 1.0688,
"step": 87
},
{
"epoch": 1.3870967741935485,
"grad_norm": 1.1120954751968384,
"learning_rate": 0.0001920393019961217,
"loss": 1.0829,
"step": 88
},
{
"epoch": 1.403225806451613,
"grad_norm": 1.0838677883148193,
"learning_rate": 0.00019183671416118405,
"loss": 1.0751,
"step": 89
},
{
"epoch": 1.4193548387096775,
"grad_norm": 1.1601375341415405,
"learning_rate": 0.0001916316904487005,
"loss": 1.0348,
"step": 90
},
{
"epoch": 1.435483870967742,
"grad_norm": 1.080993413925171,
"learning_rate": 0.00019142423629672117,
"loss": 1.0512,
"step": 91
},
{
"epoch": 1.4516129032258065,
"grad_norm": 1.4107691049575806,
"learning_rate": 0.00019121435720776122,
"loss": 1.0834,
"step": 92
},
{
"epoch": 1.467741935483871,
"grad_norm": 1.10901939868927,
"learning_rate": 0.00019100205874865485,
"loss": 1.127,
"step": 93
},
{
"epoch": 1.4838709677419355,
"grad_norm": 1.040759563446045,
"learning_rate": 0.0001907873465504076,
"loss": 1.0838,
"step": 94
},
{
"epoch": 1.5,
"grad_norm": 1.117697834968567,
"learning_rate": 0.00019057022630804716,
"loss": 1.094,
"step": 95
},
{
"epoch": 1.5161290322580645,
"grad_norm": 1.1124318838119507,
"learning_rate": 0.00019035070378047204,
"loss": 1.0806,
"step": 96
},
{
"epoch": 1.5161290322580645,
"eval_loss": 1.569594144821167,
"eval_runtime": 379.9276,
"eval_samples_per_second": 2.79,
"eval_steps_per_second": 0.466,
"step": 96
},
{
"epoch": 1.532258064516129,
"grad_norm": 1.0274182558059692,
"learning_rate": 0.00019012878479029906,
"loss": 1.0322,
"step": 97
},
{
"epoch": 1.5483870967741935,
"grad_norm": 1.214774250984192,
"learning_rate": 0.00018990447522370884,
"loss": 1.1029,
"step": 98
},
{
"epoch": 1.564516129032258,
"grad_norm": 1.146145224571228,
"learning_rate": 0.00018967778103028967,
"loss": 1.0939,
"step": 99
},
{
"epoch": 1.5806451612903225,
"grad_norm": 1.115272045135498,
"learning_rate": 0.00018944870822287956,
"loss": 1.0795,
"step": 100
},
{
"epoch": 1.596774193548387,
"grad_norm": 1.0580235719680786,
"learning_rate": 0.0001892172628774071,
"loss": 1.0887,
"step": 101
},
{
"epoch": 1.6129032258064515,
"grad_norm": 1.1681277751922607,
"learning_rate": 0.00018898345113272998,
"loss": 1.0781,
"step": 102
},
{
"epoch": 1.629032258064516,
"grad_norm": 1.1023615598678589,
"learning_rate": 0.00018874727919047227,
"loss": 1.0679,
"step": 103
},
{
"epoch": 1.6451612903225805,
"grad_norm": 1.139622688293457,
"learning_rate": 0.00018850875331485995,
"loss": 1.0795,
"step": 104
},
{
"epoch": 1.661290322580645,
"grad_norm": 1.0712463855743408,
"learning_rate": 0.00018826787983255473,
"loss": 1.11,
"step": 105
},
{
"epoch": 1.6774193548387095,
"grad_norm": 1.0515589714050293,
"learning_rate": 0.00018802466513248632,
"loss": 1.0896,
"step": 106
},
{
"epoch": 1.6935483870967742,
"grad_norm": 1.0597035884857178,
"learning_rate": 0.00018777911566568282,
"loss": 1.0761,
"step": 107
},
{
"epoch": 1.7096774193548387,
"grad_norm": 1.11446213722229,
"learning_rate": 0.00018753123794509974,
"loss": 1.0813,
"step": 108
},
{
"epoch": 1.7258064516129032,
"grad_norm": 1.0365442037582397,
"learning_rate": 0.0001872810385454472,
"loss": 1.1037,
"step": 109
},
{
"epoch": 1.7419354838709677,
"grad_norm": 1.0622906684875488,
"learning_rate": 0.00018702852410301554,
"loss": 1.085,
"step": 110
},
{
"epoch": 1.7580645161290323,
"grad_norm": 1.0445847511291504,
"learning_rate": 0.0001867737013154993,
"loss": 1.1109,
"step": 111
},
{
"epoch": 1.7741935483870968,
"grad_norm": 1.066573977470398,
"learning_rate": 0.0001865165769418196,
"loss": 1.0348,
"step": 112
},
{
"epoch": 1.7741935483870968,
"eval_loss": 1.5535695552825928,
"eval_runtime": 385.6979,
"eval_samples_per_second": 2.748,
"eval_steps_per_second": 0.459,
"step": 112
},
{
"epoch": 1.7903225806451613,
"grad_norm": 1.1161065101623535,
"learning_rate": 0.00018625715780194485,
"loss": 1.0733,
"step": 113
},
{
"epoch": 1.8064516129032258,
"grad_norm": 1.0873162746429443,
"learning_rate": 0.00018599545077670985,
"loss": 1.0875,
"step": 114
},
{
"epoch": 1.8225806451612905,
"grad_norm": 2.7381904125213623,
"learning_rate": 0.00018573146280763324,
"loss": 1.1284,
"step": 115
},
{
"epoch": 1.838709677419355,
"grad_norm": 1.0686196088790894,
"learning_rate": 0.0001854652008967335,
"loss": 1.064,
"step": 116
},
{
"epoch": 1.8548387096774195,
"grad_norm": 1.0897347927093506,
"learning_rate": 0.0001851966721063431,
"loss": 1.0418,
"step": 117
},
{
"epoch": 1.870967741935484,
"grad_norm": 1.092289686203003,
"learning_rate": 0.00018492588355892124,
"loss": 1.1247,
"step": 118
},
{
"epoch": 1.8870967741935485,
"grad_norm": 1.1446326971054077,
"learning_rate": 0.00018465284243686494,
"loss": 1.0789,
"step": 119
},
{
"epoch": 1.903225806451613,
"grad_norm": 1.052269458770752,
"learning_rate": 0.00018437755598231856,
"loss": 1.0792,
"step": 120
},
{
"epoch": 1.9193548387096775,
"grad_norm": 1.1960537433624268,
"learning_rate": 0.00018410003149698162,
"loss": 1.0551,
"step": 121
},
{
"epoch": 1.935483870967742,
"grad_norm": 1.2509639263153076,
"learning_rate": 0.00018382027634191524,
"loss": 1.1336,
"step": 122
},
{
"epoch": 1.9516129032258065,
"grad_norm": 1.1205637454986572,
"learning_rate": 0.00018353829793734669,
"loss": 1.082,
"step": 123
},
{
"epoch": 1.967741935483871,
"grad_norm": 1.087641954421997,
"learning_rate": 0.00018325410376247294,
"loss": 1.0999,
"step": 124
},
{
"epoch": 1.9838709677419355,
"grad_norm": 1.141137957572937,
"learning_rate": 0.0001829677013552619,
"loss": 1.1358,
"step": 125
},
{
"epoch": 2.0,
"grad_norm": 1.1019104719161987,
"learning_rate": 0.0001826790983122527,
"loss": 1.0671,
"step": 126
},
{
"epoch": 2.0161290322580645,
"grad_norm": 1.0500136613845825,
"learning_rate": 0.00018238830228835417,
"loss": 1.1061,
"step": 127
},
{
"epoch": 2.032258064516129,
"grad_norm": 1.0191640853881836,
"learning_rate": 0.00018209532099664174,
"loss": 1.0769,
"step": 128
},
{
"epoch": 2.032258064516129,
"eval_loss": 1.5341366529464722,
"eval_runtime": 377.1489,
"eval_samples_per_second": 2.811,
"eval_steps_per_second": 0.469,
"step": 128
},
{
"epoch": 2.0161290322580645,
"grad_norm": 0.8983094692230225,
"learning_rate": 0.00018180016220815292,
"loss": 0.6901,
"step": 129
},
{
"epoch": 2.032258064516129,
"grad_norm": 0.9102672934532166,
"learning_rate": 0.00018150283375168114,
"loss": 0.6643,
"step": 130
},
{
"epoch": 2.0483870967741935,
"grad_norm": 0.9043310284614563,
"learning_rate": 0.00018120334351356813,
"loss": 0.6813,
"step": 131
},
{
"epoch": 2.064516129032258,
"grad_norm": 0.920064389705658,
"learning_rate": 0.00018090169943749476,
"loss": 0.6422,
"step": 132
},
{
"epoch": 2.0806451612903225,
"grad_norm": 1.0108799934387207,
"learning_rate": 0.0001805979095242702,
"loss": 0.6727,
"step": 133
},
{
"epoch": 2.096774193548387,
"grad_norm": 0.9978906512260437,
"learning_rate": 0.00018029198183161998,
"loss": 0.6705,
"step": 134
},
{
"epoch": 2.1129032258064515,
"grad_norm": 0.9658149480819702,
"learning_rate": 0.00017998392447397197,
"loss": 0.6519,
"step": 135
},
{
"epoch": 2.129032258064516,
"grad_norm": 0.9641355872154236,
"learning_rate": 0.00017967374562224132,
"loss": 0.6408,
"step": 136
},
{
"epoch": 2.1451612903225805,
"grad_norm": 1.057494878768921,
"learning_rate": 0.0001793614535036137,
"loss": 0.6903,
"step": 137
},
{
"epoch": 2.161290322580645,
"grad_norm": 1.0118696689605713,
"learning_rate": 0.00017904705640132718,
"loss": 0.6606,
"step": 138
},
{
"epoch": 2.1774193548387095,
"grad_norm": 0.9846657514572144,
"learning_rate": 0.0001787305626544523,
"loss": 0.6712,
"step": 139
},
{
"epoch": 2.193548387096774,
"grad_norm": 1.151206374168396,
"learning_rate": 0.00017841198065767107,
"loss": 0.6781,
"step": 140
},
{
"epoch": 2.2096774193548385,
"grad_norm": 0.9539623856544495,
"learning_rate": 0.0001780913188610542,
"loss": 0.6253,
"step": 141
},
{
"epoch": 2.225806451612903,
"grad_norm": 1.044904351234436,
"learning_rate": 0.00017776858576983712,
"loss": 0.6617,
"step": 142
},
{
"epoch": 2.241935483870968,
"grad_norm": 0.9991489052772522,
"learning_rate": 0.0001774437899441942,
"loss": 0.6703,
"step": 143
},
{
"epoch": 2.258064516129032,
"grad_norm": 1.0453065633773804,
"learning_rate": 0.0001771169399990119,
"loss": 0.6608,
"step": 144
},
{
"epoch": 2.258064516129032,
"eval_loss": 1.6200847625732422,
"eval_runtime": 371.4426,
"eval_samples_per_second": 2.854,
"eval_steps_per_second": 0.477,
"step": 144
},
{
"epoch": 2.274193548387097,
"grad_norm": 0.9984713196754456,
"learning_rate": 0.00017678804460366,
"loss": 0.6976,
"step": 145
},
{
"epoch": 2.2903225806451615,
"grad_norm": 0.9991441369056702,
"learning_rate": 0.00017645711248176195,
"loss": 0.6872,
"step": 146
},
{
"epoch": 2.306451612903226,
"grad_norm": 2.5695972442626953,
"learning_rate": 0.00017612415241096327,
"loss": 0.6672,
"step": 147
},
{
"epoch": 2.3225806451612905,
"grad_norm": 0.9834401607513428,
"learning_rate": 0.00017578917322269886,
"loss": 0.6764,
"step": 148
},
{
"epoch": 2.338709677419355,
"grad_norm": 1.0587729215621948,
"learning_rate": 0.00017545218380195864,
"loss": 0.6741,
"step": 149
},
{
"epoch": 2.3548387096774195,
"grad_norm": 1.0295051336288452,
"learning_rate": 0.00017511319308705198,
"loss": 0.6963,
"step": 150
},
{
"epoch": 2.370967741935484,
"grad_norm": 1.0167349576950073,
"learning_rate": 0.00017477221006937066,
"loss": 0.7085,
"step": 151
},
{
"epoch": 2.3870967741935485,
"grad_norm": 1.0259524583816528,
"learning_rate": 0.0001744292437931502,
"loss": 0.7143,
"step": 152
},
{
"epoch": 2.403225806451613,
"grad_norm": 1.0057259798049927,
"learning_rate": 0.00017408430335523012,
"loss": 0.708,
"step": 153
},
{
"epoch": 2.4193548387096775,
"grad_norm": 0.975724995136261,
"learning_rate": 0.00017373739790481262,
"loss": 0.696,
"step": 154
},
{
"epoch": 2.435483870967742,
"grad_norm": 1.0695990324020386,
"learning_rate": 0.00017338853664321992,
"loss": 0.6734,
"step": 155
},
{
"epoch": 2.4516129032258065,
"grad_norm": 1.3752026557922363,
"learning_rate": 0.00017303772882365016,
"loss": 0.6976,
"step": 156
},
{
"epoch": 2.467741935483871,
"grad_norm": 2.8574488162994385,
"learning_rate": 0.00017268498375093202,
"loss": 0.7288,
"step": 157
},
{
"epoch": 2.4838709677419355,
"grad_norm": 1.0393046140670776,
"learning_rate": 0.00017233031078127788,
"loss": 0.6664,
"step": 158
},
{
"epoch": 2.5,
"grad_norm": 1.456668496131897,
"learning_rate": 0.00017197371932203568,
"loss": 0.6944,
"step": 159
},
{
"epoch": 2.5161290322580645,
"grad_norm": 1.0640977621078491,
"learning_rate": 0.00017161521883143934,
"loss": 0.6918,
"step": 160
},
{
"epoch": 2.5161290322580645,
"eval_loss": 1.6184865236282349,
"eval_runtime": 239.7727,
"eval_samples_per_second": 4.421,
"eval_steps_per_second": 0.738,
"step": 160
},
{
"epoch": 2.532258064516129,
"grad_norm": 1.1856733560562134,
"learning_rate": 0.000171254818818358,
"loss": 0.7186,
"step": 161
},
{
"epoch": 2.5483870967741935,
"grad_norm": 1.0068649053573608,
"learning_rate": 0.00017089252884204377,
"loss": 0.6656,
"step": 162
},
{
"epoch": 2.564516129032258,
"grad_norm": 1.055477499961853,
"learning_rate": 0.00017052835851187804,
"loss": 0.7235,
"step": 163
},
{
"epoch": 2.5806451612903225,
"grad_norm": 1.0402213335037231,
"learning_rate": 0.0001701623174871168,
"loss": 0.7212,
"step": 164
},
{
"epoch": 2.596774193548387,
"grad_norm": 1.0087556838989258,
"learning_rate": 0.00016979441547663435,
"loss": 0.7124,
"step": 165
},
{
"epoch": 2.6129032258064515,
"grad_norm": 1.9748854637145996,
"learning_rate": 0.0001694246622386658,
"loss": 0.732,
"step": 166
},
{
"epoch": 2.629032258064516,
"grad_norm": 1.0986181497573853,
"learning_rate": 0.0001690530675805482,
"loss": 0.7063,
"step": 167
},
{
"epoch": 2.6451612903225805,
"grad_norm": 0.9828779101371765,
"learning_rate": 0.00016867964135846043,
"loss": 0.7026,
"step": 168
},
{
"epoch": 2.661290322580645,
"grad_norm": 1.0073943138122559,
"learning_rate": 0.0001683043934771618,
"loss": 0.7084,
"step": 169
},
{
"epoch": 2.6774193548387095,
"grad_norm": 1.0138689279556274,
"learning_rate": 0.00016792733388972932,
"loss": 0.7144,
"step": 170
},
{
"epoch": 2.693548387096774,
"grad_norm": 1.0346741676330566,
"learning_rate": 0.00016754847259729369,
"loss": 0.686,
"step": 171
},
{
"epoch": 2.709677419354839,
"grad_norm": 1.0213960409164429,
"learning_rate": 0.0001671678196487741,
"loss": 0.6537,
"step": 172
},
{
"epoch": 2.725806451612903,
"grad_norm": 0.9593885540962219,
"learning_rate": 0.00016678538514061154,
"loss": 0.7029,
"step": 173
},
{
"epoch": 2.741935483870968,
"grad_norm": 1.0341320037841797,
"learning_rate": 0.00016640117921650117,
"loss": 0.6969,
"step": 174
},
{
"epoch": 2.758064516129032,
"grad_norm": 1.1095786094665527,
"learning_rate": 0.00016601521206712318,
"loss": 0.7048,
"step": 175
},
{
"epoch": 2.774193548387097,
"grad_norm": 0.9925594329833984,
"learning_rate": 0.00016562749392987254,
"loss": 0.7203,
"step": 176
},
{
"epoch": 2.774193548387097,
"eval_loss": 1.6154118776321411,
"eval_runtime": 232.8684,
"eval_samples_per_second": 4.552,
"eval_steps_per_second": 0.76,
"step": 176
},
{
"epoch": 2.790322580645161,
"grad_norm": 0.9950226545333862,
"learning_rate": 0.00016523803508858732,
"loss": 0.6853,
"step": 177
},
{
"epoch": 2.806451612903226,
"grad_norm": 1.0214858055114746,
"learning_rate": 0.0001648468458732762,
"loss": 0.7098,
"step": 178
},
{
"epoch": 2.8225806451612905,
"grad_norm": 1.0195788145065308,
"learning_rate": 0.00016445393665984417,
"loss": 0.6775,
"step": 179
},
{
"epoch": 2.838709677419355,
"grad_norm": 0.9992055892944336,
"learning_rate": 0.00016405931786981755,
"loss": 0.6981,
"step": 180
},
{
"epoch": 2.8548387096774195,
"grad_norm": 1.0090147256851196,
"learning_rate": 0.0001636629999700674,
"loss": 0.7083,
"step": 181
},
{
"epoch": 2.870967741935484,
"grad_norm": 1.0279518365859985,
"learning_rate": 0.00016326499347253207,
"loss": 0.7097,
"step": 182
},
{
"epoch": 2.8870967741935485,
"grad_norm": 0.9872741103172302,
"learning_rate": 0.00016286530893393818,
"loss": 0.7185,
"step": 183
},
{
"epoch": 2.903225806451613,
"grad_norm": 1.0596610307693481,
"learning_rate": 0.00016246395695552085,
"loss": 0.6818,
"step": 184
},
{
"epoch": 2.9193548387096775,
"grad_norm": 5.755076885223389,
"learning_rate": 0.00016206094818274229,
"loss": 0.7244,
"step": 185
},
{
"epoch": 2.935483870967742,
"grad_norm": 1.0628125667572021,
"learning_rate": 0.00016165629330500952,
"loss": 0.7094,
"step": 186
},
{
"epoch": 2.9516129032258065,
"grad_norm": 1.0117448568344116,
"learning_rate": 0.00016125000305539094,
"loss": 0.7052,
"step": 187
},
{
"epoch": 2.967741935483871,
"grad_norm": 1.0038176774978638,
"learning_rate": 0.0001608420882103315,
"loss": 0.6832,
"step": 188
},
{
"epoch": 2.9838709677419355,
"grad_norm": 1.0732207298278809,
"learning_rate": 0.00016043255958936693,
"loss": 0.7367,
"step": 189
},
{
"epoch": 3.0,
"grad_norm": 1.056867003440857,
"learning_rate": 0.00016002142805483685,
"loss": 0.674,
"step": 190
},
{
"epoch": 3.0161290322580645,
"grad_norm": 1.0539047718048096,
"learning_rate": 0.00015960870451159638,
"loss": 0.724,
"step": 191
},
{
"epoch": 3.032258064516129,
"grad_norm": 1.0756276845932007,
"learning_rate": 0.0001591943999067273,
"loss": 0.7172,
"step": 192
},
{
"epoch": 3.032258064516129,
"eval_loss": 1.6202313899993896,
"eval_runtime": 227.1127,
"eval_samples_per_second": 4.667,
"eval_steps_per_second": 0.779,
"step": 192
},
{
"epoch": 3.0161290322580645,
"grad_norm": 0.8275887966156006,
"learning_rate": 0.00015877852522924732,
"loss": 0.4273,
"step": 193
},
{
"epoch": 3.032258064516129,
"grad_norm": 0.8425289988517761,
"learning_rate": 0.00015836109150981886,
"loss": 0.3975,
"step": 194
},
{
"epoch": 3.0483870967741935,
"grad_norm": 0.8054454922676086,
"learning_rate": 0.00015794210982045636,
"loss": 0.4053,
"step": 195
},
{
"epoch": 3.064516129032258,
"grad_norm": 0.9864973425865173,
"learning_rate": 0.00015752159127423263,
"loss": 0.3912,
"step": 196
},
{
"epoch": 3.0806451612903225,
"grad_norm": 4.44207763671875,
"learning_rate": 0.00015709954702498397,
"loss": 0.4295,
"step": 197
},
{
"epoch": 3.096774193548387,
"grad_norm": 1.4279245138168335,
"learning_rate": 0.0001566759882670146,
"loss": 0.458,
"step": 198
},
{
"epoch": 3.1129032258064515,
"grad_norm": 0.9974954724311829,
"learning_rate": 0.0001562509262347995,
"loss": 0.4105,
"step": 199
},
{
"epoch": 3.129032258064516,
"grad_norm": 1.0291105508804321,
"learning_rate": 0.00015582437220268647,
"loss": 0.3779,
"step": 200
},
{
"epoch": 3.1451612903225805,
"grad_norm": 0.9659109711647034,
"learning_rate": 0.00015539633748459724,
"loss": 0.4046,
"step": 201
},
{
"epoch": 3.161290322580645,
"grad_norm": 1.0539616346359253,
"learning_rate": 0.0001549668334337271,
"loss": 0.4101,
"step": 202
},
{
"epoch": 3.1774193548387095,
"grad_norm": 1.0015851259231567,
"learning_rate": 0.00015453587144224407,
"loss": 0.4184,
"step": 203
},
{
"epoch": 3.193548387096774,
"grad_norm": 0.9041028618812561,
"learning_rate": 0.0001541034629409865,
"loss": 0.3943,
"step": 204
},
{
"epoch": 3.2096774193548385,
"grad_norm": 0.922798216342926,
"learning_rate": 0.00015366961939916008,
"loss": 0.3937,
"step": 205
},
{
"epoch": 3.225806451612903,
"grad_norm": 0.9305877685546875,
"learning_rate": 0.00015323435232403337,
"loss": 0.3796,
"step": 206
},
{
"epoch": 3.241935483870968,
"grad_norm": 0.9297038316726685,
"learning_rate": 0.00015279767326063296,
"loss": 0.404,
"step": 207
},
{
"epoch": 3.258064516129032,
"grad_norm": 0.8899175524711609,
"learning_rate": 0.00015235959379143678,
"loss": 0.3914,
"step": 208
},
{
"epoch": 3.258064516129032,
"eval_loss": 1.7161870002746582,
"eval_runtime": 361.1949,
"eval_samples_per_second": 2.935,
"eval_steps_per_second": 0.49,
"step": 208
},
{
"epoch": 3.274193548387097,
"grad_norm": 0.9059929847717285,
"learning_rate": 0.0001519201255360673,
"loss": 0.3761,
"step": 209
},
{
"epoch": 3.2903225806451615,
"grad_norm": 1.0878820419311523,
"learning_rate": 0.0001514792801509831,
"loss": 0.4018,
"step": 210
},
{
"epoch": 3.306451612903226,
"grad_norm": 0.9320698976516724,
"learning_rate": 0.00015103706932916974,
"loss": 0.398,
"step": 211
},
{
"epoch": 3.3225806451612905,
"grad_norm": 0.8879979252815247,
"learning_rate": 0.00015059350479982965,
"loss": 0.3848,
"step": 212
},
{
"epoch": 3.338709677419355,
"grad_norm": 0.8591129779815674,
"learning_rate": 0.00015014859832807105,
"loss": 0.3935,
"step": 213
},
{
"epoch": 3.3548387096774195,
"grad_norm": 0.8328726887702942,
"learning_rate": 0.0001497023617145958,
"loss": 0.4025,
"step": 214
},
{
"epoch": 3.370967741935484,
"grad_norm": 0.9114816188812256,
"learning_rate": 0.00014925480679538647,
"loss": 0.3978,
"step": 215
},
{
"epoch": 3.3870967741935485,
"grad_norm": 0.8772809505462646,
"learning_rate": 0.0001488059454413923,
"loss": 0.3936,
"step": 216
},
{
"epoch": 3.403225806451613,
"grad_norm": 0.96305251121521,
"learning_rate": 0.00014835578955821457,
"loss": 0.4098,
"step": 217
},
{
"epoch": 3.4193548387096775,
"grad_norm": 0.8828062415122986,
"learning_rate": 0.00014790435108579048,
"loss": 0.3994,
"step": 218
},
{
"epoch": 3.435483870967742,
"grad_norm": 1.0111607313156128,
"learning_rate": 0.00014745164199807682,
"loss": 0.4002,
"step": 219
},
{
"epoch": 3.4516129032258065,
"grad_norm": 0.9364872574806213,
"learning_rate": 0.000146997674302732,
"loss": 0.4115,
"step": 220
},
{
"epoch": 3.467741935483871,
"grad_norm": 0.9206238389015198,
"learning_rate": 0.00014654246004079793,
"loss": 0.3979,
"step": 221
},
{
"epoch": 3.4838709677419355,
"grad_norm": 0.910442590713501,
"learning_rate": 0.00014608601128638027,
"loss": 0.4041,
"step": 222
},
{
"epoch": 3.5,
"grad_norm": 0.9023163914680481,
"learning_rate": 0.00014562834014632853,
"loss": 0.3936,
"step": 223
},
{
"epoch": 3.5161290322580645,
"grad_norm": 1.0501468181610107,
"learning_rate": 0.00014516945875991472,
"loss": 0.4111,
"step": 224
},
{
"epoch": 3.5161290322580645,
"eval_loss": 1.711409091949463,
"eval_runtime": 360.3679,
"eval_samples_per_second": 2.941,
"eval_steps_per_second": 0.491,
"step": 224
},
{
"epoch": 3.532258064516129,
"grad_norm": 1.386006474494934,
"learning_rate": 0.0001447093792985114,
"loss": 0.4127,
"step": 225
},
{
"epoch": 3.5483870967741935,
"grad_norm": 0.8904193639755249,
"learning_rate": 0.00014424811396526892,
"loss": 0.4089,
"step": 226
},
{
"epoch": 3.564516129032258,
"grad_norm": 0.8737647533416748,
"learning_rate": 0.00014378567499479168,
"loss": 0.3884,
"step": 227
},
{
"epoch": 3.5806451612903225,
"grad_norm": 0.914535641670227,
"learning_rate": 0.00014332207465281364,
"loss": 0.4217,
"step": 228
},
{
"epoch": 3.596774193548387,
"grad_norm": 0.9375549554824829,
"learning_rate": 0.00014285732523587304,
"loss": 0.4092,
"step": 229
},
{
"epoch": 3.6129032258064515,
"grad_norm": 0.8925461173057556,
"learning_rate": 0.0001423914390709861,
"loss": 0.3956,
"step": 230
},
{
"epoch": 3.629032258064516,
"grad_norm": 0.8751139640808105,
"learning_rate": 0.00014192442851532018,
"loss": 0.413,
"step": 231
},
{
"epoch": 3.6451612903225805,
"grad_norm": 0.8749066591262817,
"learning_rate": 0.00014145630595586607,
"loss": 0.4015,
"step": 232
},
{
"epoch": 3.661290322580645,
"grad_norm": 0.9000979661941528,
"learning_rate": 0.0001409870838091092,
"loss": 0.4117,
"step": 233
},
{
"epoch": 3.6774193548387095,
"grad_norm": 0.909804105758667,
"learning_rate": 0.00014051677452070065,
"loss": 0.3782,
"step": 234
},
{
"epoch": 3.693548387096774,
"grad_norm": 1.7925716638565063,
"learning_rate": 0.00014004539056512667,
"loss": 0.4451,
"step": 235
},
{
"epoch": 3.709677419354839,
"grad_norm": 0.9784117937088013,
"learning_rate": 0.00013957294444537808,
"loss": 0.4252,
"step": 236
},
{
"epoch": 3.725806451612903,
"grad_norm": 0.9990687370300293,
"learning_rate": 0.00013909944869261855,
"loss": 0.4384,
"step": 237
},
{
"epoch": 3.741935483870968,
"grad_norm": 1.0078152418136597,
"learning_rate": 0.0001386249158658522,
"loss": 0.4193,
"step": 238
},
{
"epoch": 3.758064516129032,
"grad_norm": 0.9793704152107239,
"learning_rate": 0.00013814935855159055,
"loss": 0.4253,
"step": 239
},
{
"epoch": 3.774193548387097,
"grad_norm": 0.9232134819030762,
"learning_rate": 0.00013767278936351854,
"loss": 0.4091,
"step": 240
},
{
"epoch": 3.774193548387097,
"eval_loss": 1.717702865600586,
"eval_runtime": 359.9193,
"eval_samples_per_second": 2.945,
"eval_steps_per_second": 0.492,
"step": 240
},
{
"epoch": 3.790322580645161,
"grad_norm": 1.0362968444824219,
"learning_rate": 0.00013719522094216013,
"loss": 0.418,
"step": 241
},
{
"epoch": 3.806451612903226,
"grad_norm": 0.9517565369606018,
"learning_rate": 0.00013671666595454295,
"loss": 0.4343,
"step": 242
},
{
"epoch": 3.8225806451612905,
"grad_norm": 0.9661825895309448,
"learning_rate": 0.00013623713709386227,
"loss": 0.3973,
"step": 243
},
{
"epoch": 3.838709677419355,
"grad_norm": 0.9564940333366394,
"learning_rate": 0.00013575664707914448,
"loss": 0.3935,
"step": 244
},
{
"epoch": 3.8548387096774195,
"grad_norm": 0.9845961928367615,
"learning_rate": 0.0001352752086549095,
"loss": 0.4374,
"step": 245
},
{
"epoch": 3.870967741935484,
"grad_norm": 0.9213180541992188,
"learning_rate": 0.0001347928345908329,
"loss": 0.4253,
"step": 246
},
{
"epoch": 3.8870967741935485,
"grad_norm": 0.8919222950935364,
"learning_rate": 0.00013430953768140723,
"loss": 0.4148,
"step": 247
},
{
"epoch": 3.903225806451613,
"grad_norm": 0.899038553237915,
"learning_rate": 0.00013382533074560255,
"loss": 0.419,
"step": 248
},
{
"epoch": 3.9193548387096775,
"grad_norm": 0.9399131536483765,
"learning_rate": 0.00013334022662652649,
"loss": 0.4081,
"step": 249
},
{
"epoch": 3.935483870967742,
"grad_norm": 0.8963966965675354,
"learning_rate": 0.0001328542381910835,
"loss": 0.4036,
"step": 250
},
{
"epoch": 3.9516129032258065,
"grad_norm": 0.8994773626327515,
"learning_rate": 0.0001323673783296337,
"loss": 0.4138,
"step": 251
},
{
"epoch": 3.967741935483871,
"grad_norm": 0.9611584544181824,
"learning_rate": 0.00013187965995565098,
"loss": 0.4416,
"step": 252
},
{
"epoch": 3.9838709677419355,
"grad_norm": 0.9271295666694641,
"learning_rate": 0.00013139109600538028,
"loss": 0.4009,
"step": 253
},
{
"epoch": 4.0,
"grad_norm": 0.9294149875640869,
"learning_rate": 0.00013090169943749476,
"loss": 0.4187,
"step": 254
},
{
"epoch": 4.016129032258064,
"grad_norm": 0.9005634188652039,
"learning_rate": 0.0001304114832327518,
"loss": 0.4068,
"step": 255
},
{
"epoch": 4.032258064516129,
"grad_norm": 0.9278081655502319,
"learning_rate": 0.00012992046039364893,
"loss": 0.4103,
"step": 256
},
{
"epoch": 4.032258064516129,
"eval_loss": 1.719123363494873,
"eval_runtime": 366.7154,
"eval_samples_per_second": 2.891,
"eval_steps_per_second": 0.483,
"step": 256
},
{
"epoch": 4.016129032258065,
"grad_norm": 0.6807416677474976,
"learning_rate": 0.00012942864394407879,
"loss": 0.2192,
"step": 257
},
{
"epoch": 4.032258064516129,
"grad_norm": 0.7075343132019043,
"learning_rate": 0.0001289360469289838,
"loss": 0.2174,
"step": 258
},
{
"epoch": 4.048387096774194,
"grad_norm": 0.7534266114234924,
"learning_rate": 0.0001284426824140101,
"loss": 0.2102,
"step": 259
},
{
"epoch": 4.064516129032258,
"grad_norm": 0.80478435754776,
"learning_rate": 0.00012794856348516095,
"loss": 0.1996,
"step": 260
},
{
"epoch": 4.080645161290323,
"grad_norm": 0.8782503604888916,
"learning_rate": 0.00012745370324844974,
"loss": 0.2231,
"step": 261
},
{
"epoch": 4.096774193548387,
"grad_norm": 0.9023078083992004,
"learning_rate": 0.00012695811482955227,
"loss": 0.2127,
"step": 262
},
{
"epoch": 4.112903225806452,
"grad_norm": 1.4141674041748047,
"learning_rate": 0.0001264618113734587,
"loss": 0.2308,
"step": 263
},
{
"epoch": 4.129032258064516,
"grad_norm": 0.7676845192909241,
"learning_rate": 0.00012596480604412484,
"loss": 0.2044,
"step": 264
},
{
"epoch": 4.145161290322581,
"grad_norm": 0.7796346545219421,
"learning_rate": 0.00012546711202412287,
"loss": 0.2103,
"step": 265
},
{
"epoch": 4.161290322580645,
"grad_norm": 0.7459124326705933,
"learning_rate": 0.000124968742514292,
"loss": 0.1979,
"step": 266
},
{
"epoch": 4.17741935483871,
"grad_norm": 0.7819743156433105,
"learning_rate": 0.000124469710733388,
"loss": 0.1989,
"step": 267
},
{
"epoch": 4.193548387096774,
"grad_norm": 0.8068183660507202,
"learning_rate": 0.00012397002991773275,
"loss": 0.211,
"step": 268
},
{
"epoch": 4.209677419354839,
"grad_norm": 0.76580810546875,
"learning_rate": 0.00012346971332086317,
"loss": 0.2161,
"step": 269
},
{
"epoch": 4.225806451612903,
"grad_norm": 0.7457690238952637,
"learning_rate": 0.0001229687742131796,
"loss": 0.2082,
"step": 270
},
{
"epoch": 4.241935483870968,
"grad_norm": 0.7540544867515564,
"learning_rate": 0.0001224672258815938,
"loss": 0.1897,
"step": 271
},
{
"epoch": 4.258064516129032,
"grad_norm": 0.769862174987793,
"learning_rate": 0.00012196508162917677,
"loss": 0.1996,
"step": 272
},
{
"epoch": 4.258064516129032,
"eval_loss": 1.8386796712875366,
"eval_runtime": 360.7673,
"eval_samples_per_second": 2.938,
"eval_steps_per_second": 0.491,
"step": 272
},
{
"epoch": 4.274193548387097,
"grad_norm": 0.7516776919364929,
"learning_rate": 0.00012146235477480559,
"loss": 0.2048,
"step": 273
},
{
"epoch": 4.290322580645161,
"grad_norm": 0.8582746386528015,
"learning_rate": 0.00012095905865281025,
"loss": 0.2017,
"step": 274
},
{
"epoch": 4.306451612903226,
"grad_norm": 0.7552902102470398,
"learning_rate": 0.0001204552066126201,
"loss": 0.205,
"step": 275
},
{
"epoch": 4.32258064516129,
"grad_norm": 0.7861020565032959,
"learning_rate": 0.00011995081201840956,
"loss": 0.21,
"step": 276
},
{
"epoch": 4.338709677419355,
"grad_norm": 0.8263004422187805,
"learning_rate": 0.00011944588824874388,
"loss": 0.2006,
"step": 277
},
{
"epoch": 4.354838709677419,
"grad_norm": 3.678614616394043,
"learning_rate": 0.00011894044869622403,
"loss": 0.19,
"step": 278
},
{
"epoch": 4.370967741935484,
"grad_norm": 0.7940691709518433,
"learning_rate": 0.0001184345067671317,
"loss": 0.2141,
"step": 279
},
{
"epoch": 4.387096774193548,
"grad_norm": 0.7446163296699524,
"learning_rate": 0.00011792807588107357,
"loss": 0.192,
"step": 280
},
{
"epoch": 4.403225806451613,
"grad_norm": 0.7419606447219849,
"learning_rate": 0.00011742116947062545,
"loss": 0.2085,
"step": 281
},
{
"epoch": 4.419354838709677,
"grad_norm": 0.7716213464736938,
"learning_rate": 0.00011691380098097597,
"loss": 0.2126,
"step": 282
},
{
"epoch": 4.435483870967742,
"grad_norm": 0.9422786831855774,
"learning_rate": 0.00011640598386956992,
"loss": 0.2048,
"step": 283
},
{
"epoch": 4.451612903225806,
"grad_norm": 0.8216850161552429,
"learning_rate": 0.0001158977316057513,
"loss": 0.1977,
"step": 284
},
{
"epoch": 4.467741935483871,
"grad_norm": 0.7419984936714172,
"learning_rate": 0.0001153890576704062,
"loss": 0.2047,
"step": 285
},
{
"epoch": 4.483870967741936,
"grad_norm": 0.7556256651878357,
"learning_rate": 0.00011487997555560503,
"loss": 0.2094,
"step": 286
},
{
"epoch": 4.5,
"grad_norm": 0.8195300102233887,
"learning_rate": 0.0001143704987642448,
"loss": 0.199,
"step": 287
},
{
"epoch": 4.516129032258064,
"grad_norm": 0.7117184996604919,
"learning_rate": 0.00011386064080969094,
"loss": 0.1932,
"step": 288
},
{
"epoch": 4.516129032258064,
"eval_loss": 1.8438739776611328,
"eval_runtime": 361.0378,
"eval_samples_per_second": 2.936,
"eval_steps_per_second": 0.49,
"step": 288
},
{
"epoch": 4.532258064516129,
"grad_norm": 0.7270099520683289,
"learning_rate": 0.00011335041521541885,
"loss": 0.1916,
"step": 289
},
{
"epoch": 4.548387096774194,
"grad_norm": 0.7429494261741638,
"learning_rate": 0.00011283983551465511,
"loss": 0.1984,
"step": 290
},
{
"epoch": 4.564516129032258,
"grad_norm": 0.7204171419143677,
"learning_rate": 0.00011232891525001876,
"loss": 0.1814,
"step": 291
},
{
"epoch": 4.580645161290323,
"grad_norm": 0.7861455678939819,
"learning_rate": 0.0001118176679731619,
"loss": 0.2097,
"step": 292
},
{
"epoch": 4.596774193548387,
"grad_norm": 0.8191571831703186,
"learning_rate": 0.0001113061072444103,
"loss": 0.221,
"step": 293
},
{
"epoch": 4.612903225806452,
"grad_norm": 1.3524975776672363,
"learning_rate": 0.00011079424663240372,
"loss": 0.2066,
"step": 294
},
{
"epoch": 4.629032258064516,
"grad_norm": 0.7753690481185913,
"learning_rate": 0.00011028209971373605,
"loss": 0.2112,
"step": 295
},
{
"epoch": 4.645161290322581,
"grad_norm": 0.7331258654594421,
"learning_rate": 0.00010976968007259519,
"loss": 0.1993,
"step": 296
},
{
"epoch": 4.661290322580645,
"grad_norm": 0.7499488592147827,
"learning_rate": 0.00010925700130040266,
"loss": 0.1937,
"step": 297
},
{
"epoch": 4.67741935483871,
"grad_norm": 0.813107967376709,
"learning_rate": 0.00010874407699545328,
"loss": 0.2157,
"step": 298
},
{
"epoch": 4.693548387096774,
"grad_norm": 0.8471450805664062,
"learning_rate": 0.0001082309207625543,
"loss": 0.215,
"step": 299
},
{
"epoch": 4.709677419354839,
"grad_norm": 0.7541672587394714,
"learning_rate": 0.00010771754621266466,
"loss": 0.1968,
"step": 300
},
{
"epoch": 4.725806451612903,
"grad_norm": 0.8032663464546204,
"learning_rate": 0.00010720396696253393,
"loss": 0.2161,
"step": 301
},
{
"epoch": 4.741935483870968,
"grad_norm": 0.7688977718353271,
"learning_rate": 0.00010669019663434117,
"loss": 0.202,
"step": 302
},
{
"epoch": 4.758064516129032,
"grad_norm": 0.8105716109275818,
"learning_rate": 0.00010617624885533356,
"loss": 0.228,
"step": 303
},
{
"epoch": 4.774193548387097,
"grad_norm": 0.7785431146621704,
"learning_rate": 0.00010566213725746506,
"loss": 0.2185,
"step": 304
},
{
"epoch": 4.774193548387097,
"eval_loss": 1.8509935140609741,
"eval_runtime": 361.2404,
"eval_samples_per_second": 2.934,
"eval_steps_per_second": 0.49,
"step": 304
},
{
"epoch": 4.790322580645161,
"grad_norm": 0.865378737449646,
"learning_rate": 0.00010514787547703466,
"loss": 0.2095,
"step": 305
},
{
"epoch": 4.806451612903226,
"grad_norm": 1.0173976421356201,
"learning_rate": 0.00010463347715432488,
"loss": 0.2202,
"step": 306
},
{
"epoch": 4.82258064516129,
"grad_norm": 0.7536367774009705,
"learning_rate": 0.00010411895593323981,
"loss": 0.2072,
"step": 307
},
{
"epoch": 4.838709677419355,
"grad_norm": 0.7996053695678711,
"learning_rate": 0.00010360432546094341,
"loss": 0.209,
"step": 308
},
{
"epoch": 4.854838709677419,
"grad_norm": 0.8005889654159546,
"learning_rate": 0.00010308959938749729,
"loss": 0.217,
"step": 309
},
{
"epoch": 4.870967741935484,
"grad_norm": 0.7493289709091187,
"learning_rate": 0.00010257479136549889,
"loss": 0.2037,
"step": 310
},
{
"epoch": 4.887096774193548,
"grad_norm": 0.8069470524787903,
"learning_rate": 0.00010205991504971912,
"loss": 0.2189,
"step": 311
},
{
"epoch": 4.903225806451613,
"grad_norm": 0.7922778129577637,
"learning_rate": 0.00010154498409674051,
"loss": 0.2241,
"step": 312
},
{
"epoch": 4.919354838709677,
"grad_norm": 0.7668143510818481,
"learning_rate": 0.00010103001216459469,
"loss": 0.2202,
"step": 313
},
{
"epoch": 4.935483870967742,
"grad_norm": 0.7700707316398621,
"learning_rate": 0.00010051501291240008,
"loss": 0.2149,
"step": 314
},
{
"epoch": 4.951612903225806,
"grad_norm": 0.7833395004272461,
"learning_rate": 0.0001,
"loss": 0.2176,
"step": 315
},
{
"epoch": 4.967741935483871,
"grad_norm": 0.7745612263679504,
"learning_rate": 9.948498708759993e-05,
"loss": 0.2121,
"step": 316
},
{
"epoch": 4.983870967741936,
"grad_norm": 0.7606418132781982,
"learning_rate": 9.896998783540536e-05,
"loss": 0.1974,
"step": 317
},
{
"epoch": 5.0,
"grad_norm": 0.8445224165916443,
"learning_rate": 9.845501590325948e-05,
"loss": 0.2194,
"step": 318
},
{
"epoch": 5.016129032258064,
"grad_norm": 0.7644294500350952,
"learning_rate": 9.794008495028087e-05,
"loss": 0.2037,
"step": 319
},
{
"epoch": 5.032258064516129,
"grad_norm": 0.7571164965629578,
"learning_rate": 9.742520863450115e-05,
"loss": 0.2221,
"step": 320
},
{
"epoch": 5.032258064516129,
"eval_loss": 1.851494550704956,
"eval_runtime": 366.7021,
"eval_samples_per_second": 2.891,
"eval_steps_per_second": 0.483,
"step": 320
},
{
"epoch": 5.016129032258065,
"grad_norm": 0.591090977191925,
"learning_rate": 9.691040061250273e-05,
"loss": 0.1024,
"step": 321
},
{
"epoch": 5.032258064516129,
"grad_norm": 0.5239948630332947,
"learning_rate": 9.639567453905661e-05,
"loss": 0.0993,
"step": 322
},
{
"epoch": 5.048387096774194,
"grad_norm": 0.4719286561012268,
"learning_rate": 9.58810440667602e-05,
"loss": 0.0905,
"step": 323
},
{
"epoch": 5.064516129032258,
"grad_norm": 0.5142775177955627,
"learning_rate": 9.536652284567513e-05,
"loss": 0.0916,
"step": 324
},
{
"epoch": 5.080645161290323,
"grad_norm": 0.6494867205619812,
"learning_rate": 9.485212452296535e-05,
"loss": 0.1001,
"step": 325
},
{
"epoch": 5.096774193548387,
"grad_norm": 0.6289418935775757,
"learning_rate": 9.433786274253495e-05,
"loss": 0.0945,
"step": 326
},
{
"epoch": 5.112903225806452,
"grad_norm": 0.6664146780967712,
"learning_rate": 9.382375114466644e-05,
"loss": 0.0919,
"step": 327
},
{
"epoch": 5.129032258064516,
"grad_norm": 0.6793842911720276,
"learning_rate": 9.330980336565887e-05,
"loss": 0.1023,
"step": 328
},
{
"epoch": 5.145161290322581,
"grad_norm": 0.6552854776382446,
"learning_rate": 9.279603303746608e-05,
"loss": 0.0956,
"step": 329
},
{
"epoch": 5.161290322580645,
"grad_norm": 0.5973914861679077,
"learning_rate": 9.228245378733537e-05,
"loss": 0.0978,
"step": 330
},
{
"epoch": 5.17741935483871,
"grad_norm": 0.5700520277023315,
"learning_rate": 9.176907923744571e-05,
"loss": 0.0909,
"step": 331
},
{
"epoch": 5.193548387096774,
"grad_norm": 0.5783142447471619,
"learning_rate": 9.125592300454676e-05,
"loss": 0.094,
"step": 332
},
{
"epoch": 5.209677419354839,
"grad_norm": 0.5391398072242737,
"learning_rate": 9.074299869959738e-05,
"loss": 0.0885,
"step": 333
},
{
"epoch": 5.225806451612903,
"grad_norm": 0.5473701357841492,
"learning_rate": 9.023031992740488e-05,
"loss": 0.0917,
"step": 334
},
{
"epoch": 5.241935483870968,
"grad_norm": 0.5759130716323853,
"learning_rate": 8.971790028626395e-05,
"loss": 0.1002,
"step": 335
},
{
"epoch": 5.258064516129032,
"grad_norm": 0.5708579421043396,
"learning_rate": 8.920575336759629e-05,
"loss": 0.0968,
"step": 336
},
{
"epoch": 5.258064516129032,
"eval_loss": 2.0317347049713135,
"eval_runtime": 365.1587,
"eval_samples_per_second": 2.903,
"eval_steps_per_second": 0.485,
"step": 336
},
{
"epoch": 5.274193548387097,
"grad_norm": 0.6354628205299377,
"learning_rate": 8.869389275558972e-05,
"loss": 0.1,
"step": 337
},
{
"epoch": 5.290322580645161,
"grad_norm": 0.6346054077148438,
"learning_rate": 8.818233202683814e-05,
"loss": 0.097,
"step": 338
},
{
"epoch": 5.306451612903226,
"grad_norm": 0.5868246555328369,
"learning_rate": 8.767108474998128e-05,
"loss": 0.098,
"step": 339
},
{
"epoch": 5.32258064516129,
"grad_norm": 0.5682688355445862,
"learning_rate": 8.71601644853449e-05,
"loss": 0.0887,
"step": 340
},
{
"epoch": 5.338709677419355,
"grad_norm": 0.643573522567749,
"learning_rate": 8.664958478458117e-05,
"loss": 0.0969,
"step": 341
},
{
"epoch": 5.354838709677419,
"grad_norm": 1.490026593208313,
"learning_rate": 8.613935919030907e-05,
"loss": 0.0983,
"step": 342
},
{
"epoch": 5.370967741935484,
"grad_norm": 0.629753828048706,
"learning_rate": 8.562950123575523e-05,
"loss": 0.0991,
"step": 343
},
{
"epoch": 5.387096774193548,
"grad_norm": 0.6013917326927185,
"learning_rate": 8.512002444439502e-05,
"loss": 0.1026,
"step": 344
},
{
"epoch": 5.403225806451613,
"grad_norm": 0.5838581323623657,
"learning_rate": 8.461094232959381e-05,
"loss": 0.089,
"step": 345
},
{
"epoch": 5.419354838709677,
"grad_norm": 0.5835732221603394,
"learning_rate": 8.410226839424871e-05,
"loss": 0.0939,
"step": 346
},
{
"epoch": 5.435483870967742,
"grad_norm": 0.616823136806488,
"learning_rate": 8.359401613043012e-05,
"loss": 0.0936,
"step": 347
},
{
"epoch": 5.451612903225806,
"grad_norm": 0.6824636459350586,
"learning_rate": 8.308619901902406e-05,
"loss": 0.1023,
"step": 348
},
{
"epoch": 5.467741935483871,
"grad_norm": 0.5994443297386169,
"learning_rate": 8.257883052937456e-05,
"loss": 0.0975,
"step": 349
},
{
"epoch": 5.483870967741936,
"grad_norm": 0.7672178149223328,
"learning_rate": 8.207192411892646e-05,
"loss": 0.0983,
"step": 350
},
{
"epoch": 5.5,
"grad_norm": 1.1863083839416504,
"learning_rate": 8.156549323286831e-05,
"loss": 0.0951,
"step": 351
},
{
"epoch": 5.516129032258064,
"grad_norm": 0.5679525136947632,
"learning_rate": 8.1059551303776e-05,
"loss": 0.0937,
"step": 352
},
{
"epoch": 5.516129032258064,
"eval_loss": 2.013756036758423,
"eval_runtime": 365.2484,
"eval_samples_per_second": 2.902,
"eval_steps_per_second": 0.485,
"step": 352
},
{
"epoch": 5.532258064516129,
"grad_norm": 0.5959299802780151,
"learning_rate": 8.055411175125616e-05,
"loss": 0.086,
"step": 353
},
{
"epoch": 5.548387096774194,
"grad_norm": 0.5877125859260559,
"learning_rate": 8.004918798159045e-05,
"loss": 0.0911,
"step": 354
},
{
"epoch": 5.564516129032258,
"grad_norm": 0.5466898083686829,
"learning_rate": 7.954479338737995e-05,
"loss": 0.0845,
"step": 355
},
{
"epoch": 5.580645161290323,
"grad_norm": 0.5609007477760315,
"learning_rate": 7.904094134718976e-05,
"loss": 0.0883,
"step": 356
},
{
"epoch": 5.596774193548387,
"grad_norm": 0.6323772668838501,
"learning_rate": 7.853764522519445e-05,
"loss": 0.0944,
"step": 357
},
{
"epoch": 5.612903225806452,
"grad_norm": 0.6980358958244324,
"learning_rate": 7.803491837082324e-05,
"loss": 0.1007,
"step": 358
},
{
"epoch": 5.629032258064516,
"grad_norm": 0.6050729751586914,
"learning_rate": 7.753277411840622e-05,
"loss": 0.0927,
"step": 359
},
{
"epoch": 5.645161290322581,
"grad_norm": 0.5835074782371521,
"learning_rate": 7.703122578682046e-05,
"loss": 0.0876,
"step": 360
},
{
"epoch": 5.661290322580645,
"grad_norm": 0.5104186534881592,
"learning_rate": 7.653028667913686e-05,
"loss": 0.084,
"step": 361
},
{
"epoch": 5.67741935483871,
"grad_norm": 0.5861672163009644,
"learning_rate": 7.602997008226726e-05,
"loss": 0.0932,
"step": 362
},
{
"epoch": 5.693548387096774,
"grad_norm": 0.5980664491653442,
"learning_rate": 7.553028926661201e-05,
"loss": 0.0833,
"step": 363
},
{
"epoch": 5.709677419354839,
"grad_norm": 0.6397245526313782,
"learning_rate": 7.5031257485708e-05,
"loss": 0.088,
"step": 364
},
{
"epoch": 5.725806451612903,
"grad_norm": 0.6025028228759766,
"learning_rate": 7.453288797587714e-05,
"loss": 0.0949,
"step": 365
},
{
"epoch": 5.741935483870968,
"grad_norm": 0.6607884168624878,
"learning_rate": 7.403519395587521e-05,
"loss": 0.1029,
"step": 366
},
{
"epoch": 5.758064516129032,
"grad_norm": 0.6854023933410645,
"learning_rate": 7.353818862654129e-05,
"loss": 0.0984,
"step": 367
},
{
"epoch": 5.774193548387097,
"grad_norm": 0.5980477929115295,
"learning_rate": 7.304188517044774e-05,
"loss": 0.0973,
"step": 368
},
{
"epoch": 5.774193548387097,
"eval_loss": 2.027407646179199,
"eval_runtime": 229.0955,
"eval_samples_per_second": 4.627,
"eval_steps_per_second": 0.773,
"step": 368
},
{
"epoch": 5.790322580645161,
"grad_norm": 0.5738844871520996,
"learning_rate": 7.254629675155027e-05,
"loss": 0.0872,
"step": 369
},
{
"epoch": 5.806451612903226,
"grad_norm": 0.6093122363090515,
"learning_rate": 7.205143651483906e-05,
"loss": 0.0939,
"step": 370
},
{
"epoch": 5.82258064516129,
"grad_norm": 0.5988517999649048,
"learning_rate": 7.155731758598992e-05,
"loss": 0.0922,
"step": 371
},
{
"epoch": 5.838709677419355,
"grad_norm": 0.6104899644851685,
"learning_rate": 7.106395307101621e-05,
"loss": 0.0829,
"step": 372
},
{
"epoch": 5.854838709677419,
"grad_norm": 0.8592974543571472,
"learning_rate": 7.057135605592121e-05,
"loss": 0.1204,
"step": 373
},
{
"epoch": 5.870967741935484,
"grad_norm": 0.5683754682540894,
"learning_rate": 7.007953960635109e-05,
"loss": 0.0865,
"step": 374
},
{
"epoch": 5.887096774193548,
"grad_norm": 0.5832166075706482,
"learning_rate": 6.958851676724823e-05,
"loss": 0.0911,
"step": 375
},
{
"epoch": 5.903225806451613,
"grad_norm": 0.534551739692688,
"learning_rate": 6.909830056250527e-05,
"loss": 0.0861,
"step": 376
},
{
"epoch": 5.919354838709677,
"grad_norm": 0.546940803527832,
"learning_rate": 6.860890399461974e-05,
"loss": 0.0893,
"step": 377
},
{
"epoch": 5.935483870967742,
"grad_norm": 0.5596715211868286,
"learning_rate": 6.812034004434903e-05,
"loss": 0.0913,
"step": 378
},
{
"epoch": 5.951612903225806,
"grad_norm": 0.6050382852554321,
"learning_rate": 6.76326216703663e-05,
"loss": 0.0853,
"step": 379
},
{
"epoch": 5.967741935483871,
"grad_norm": 0.563949465751648,
"learning_rate": 6.714576180891654e-05,
"loss": 0.0814,
"step": 380
},
{
"epoch": 5.983870967741936,
"grad_norm": 0.6186781525611877,
"learning_rate": 6.665977337347354e-05,
"loss": 0.1003,
"step": 381
},
{
"epoch": 6.0,
"grad_norm": 0.583235502243042,
"learning_rate": 6.617466925439746e-05,
"loss": 0.0911,
"step": 382
},
{
"epoch": 6.016129032258064,
"grad_norm": 0.6332904696464539,
"learning_rate": 6.569046231859281e-05,
"loss": 0.0923,
"step": 383
},
{
"epoch": 6.032258064516129,
"grad_norm": 0.6747143864631653,
"learning_rate": 6.520716540916709e-05,
"loss": 0.083,
"step": 384
},
{
"epoch": 6.032258064516129,
"eval_loss": 2.0257039070129395,
"eval_runtime": 219.95,
"eval_samples_per_second": 4.819,
"eval_steps_per_second": 0.805,
"step": 384
},
{
"epoch": 6.016129032258065,
"grad_norm": 0.33682334423065186,
"learning_rate": 6.472479134509052e-05,
"loss": 0.0476,
"step": 385
},
{
"epoch": 6.032258064516129,
"grad_norm": 0.33974146842956543,
"learning_rate": 6.424335292085553e-05,
"loss": 0.0462,
"step": 386
},
{
"epoch": 6.048387096774194,
"grad_norm": 0.335627019405365,
"learning_rate": 6.376286290613776e-05,
"loss": 0.0437,
"step": 387
},
{
"epoch": 6.064516129032258,
"grad_norm": 0.3977552652359009,
"learning_rate": 6.32833340454571e-05,
"loss": 0.0446,
"step": 388
},
{
"epoch": 6.080645161290323,
"grad_norm": 0.3793430030345917,
"learning_rate": 6.280477905783988e-05,
"loss": 0.0437,
"step": 389
},
{
"epoch": 6.096774193548387,
"grad_norm": 0.38144728541374207,
"learning_rate": 6.232721063648148e-05,
"loss": 0.0388,
"step": 390
},
{
"epoch": 6.112903225806452,
"grad_norm": 0.3977310359477997,
"learning_rate": 6.185064144840948e-05,
"loss": 0.0412,
"step": 391
},
{
"epoch": 6.129032258064516,
"grad_norm": 0.4477882385253906,
"learning_rate": 6.137508413414784e-05,
"loss": 0.0422,
"step": 392
},
{
"epoch": 6.145161290322581,
"grad_norm": 0.4987168312072754,
"learning_rate": 6.0900551307381484e-05,
"loss": 0.0391,
"step": 393
},
{
"epoch": 6.161290322580645,
"grad_norm": 0.4499349892139435,
"learning_rate": 6.0427055554621913e-05,
"loss": 0.0382,
"step": 394
},
{
"epoch": 6.17741935483871,
"grad_norm": 0.39157646894454956,
"learning_rate": 5.9954609434873344e-05,
"loss": 0.0368,
"step": 395
},
{
"epoch": 6.193548387096774,
"grad_norm": 0.38924601674079895,
"learning_rate": 5.948322547929939e-05,
"loss": 0.0406,
"step": 396
},
{
"epoch": 6.209677419354839,
"grad_norm": 0.4060990810394287,
"learning_rate": 5.901291619089081e-05,
"loss": 0.0453,
"step": 397
},
{
"epoch": 6.225806451612903,
"grad_norm": 0.5091999173164368,
"learning_rate": 5.854369404413398e-05,
"loss": 0.0415,
"step": 398
},
{
"epoch": 6.241935483870968,
"grad_norm": 0.38994643092155457,
"learning_rate": 5.807557148467984e-05,
"loss": 0.0429,
"step": 399
},
{
"epoch": 6.258064516129032,
"grad_norm": 0.4015688896179199,
"learning_rate": 5.7608560929013946e-05,
"loss": 0.0385,
"step": 400
},
{
"epoch": 6.258064516129032,
"eval_loss": 2.1730642318725586,
"eval_runtime": 220.4701,
"eval_samples_per_second": 4.808,
"eval_steps_per_second": 0.803,
"step": 400
},
{
"epoch": 6.274193548387097,
"grad_norm": 0.41079944372177124,
"learning_rate": 5.7142674764127e-05,
"loss": 0.0426,
"step": 401
},
{
"epoch": 6.290322580645161,
"grad_norm": 0.382231205701828,
"learning_rate": 5.667792534718639e-05,
"loss": 0.0387,
"step": 402
},
{
"epoch": 6.306451612903226,
"grad_norm": 0.3789753019809723,
"learning_rate": 5.6214325005208355e-05,
"loss": 0.0411,
"step": 403
},
{
"epoch": 6.32258064516129,
"grad_norm": 0.36216092109680176,
"learning_rate": 5.5751886034731115e-05,
"loss": 0.0411,
"step": 404
},
{
"epoch": 6.338709677419355,
"grad_norm": 0.44374072551727295,
"learning_rate": 5.5290620701488594e-05,
"loss": 0.0408,
"step": 405
},
{
"epoch": 6.354838709677419,
"grad_norm": 0.46136826276779175,
"learning_rate": 5.483054124008528e-05,
"loss": 0.045,
"step": 406
},
{
"epoch": 6.370967741935484,
"grad_norm": 0.3711608052253723,
"learning_rate": 5.437165985367145e-05,
"loss": 0.0369,
"step": 407
},
{
"epoch": 6.387096774193548,
"grad_norm": 0.41239285469055176,
"learning_rate": 5.391398871361972e-05,
"loss": 0.0379,
"step": 408
},
{
"epoch": 6.403225806451613,
"grad_norm": 0.5803573727607727,
"learning_rate": 5.3457539959202086e-05,
"loss": 0.0515,
"step": 409
},
{
"epoch": 6.419354838709677,
"grad_norm": 0.4359411597251892,
"learning_rate": 5.300232569726804e-05,
"loss": 0.0427,
"step": 410
},
{
"epoch": 6.435483870967742,
"grad_norm": 0.4077586233615875,
"learning_rate": 5.2548358001923204e-05,
"loss": 0.0399,
"step": 411
},
{
"epoch": 6.451612903225806,
"grad_norm": 0.3488485813140869,
"learning_rate": 5.2095648914209525e-05,
"loss": 0.0396,
"step": 412
},
{
"epoch": 6.467741935483871,
"grad_norm": 0.39937523007392883,
"learning_rate": 5.1644210441785467e-05,
"loss": 0.039,
"step": 413
},
{
"epoch": 6.483870967741936,
"grad_norm": 0.43873974680900574,
"learning_rate": 5.119405455860772e-05,
"loss": 0.0366,
"step": 414
},
{
"epoch": 6.5,
"grad_norm": 0.356111079454422,
"learning_rate": 5.074519320461357e-05,
"loss": 0.0372,
"step": 415
},
{
"epoch": 6.516129032258064,
"grad_norm": 0.42101165652275085,
"learning_rate": 5.029763828540419e-05,
"loss": 0.0411,
"step": 416
},
{
"epoch": 6.516129032258064,
"eval_loss": 2.2114336490631104,
"eval_runtime": 358.1731,
"eval_samples_per_second": 2.959,
"eval_steps_per_second": 0.494,
"step": 416
},
{
"epoch": 6.532258064516129,
"grad_norm": 0.3797813653945923,
"learning_rate": 4.9851401671928934e-05,
"loss": 0.0431,
"step": 417
},
{
"epoch": 6.548387096774194,
"grad_norm": 0.48072004318237305,
"learning_rate": 4.940649520017035e-05,
"loss": 0.0359,
"step": 418
},
{
"epoch": 6.564516129032258,
"grad_norm": 0.38997748494148254,
"learning_rate": 4.896293067083032e-05,
"loss": 0.0386,
"step": 419
},
{
"epoch": 6.580645161290323,
"grad_norm": 0.35900595784187317,
"learning_rate": 4.852071984901696e-05,
"loss": 0.0412,
"step": 420
},
{
"epoch": 6.596774193548387,
"grad_norm": 0.3810931444168091,
"learning_rate": 4.807987446393275e-05,
"loss": 0.0356,
"step": 421
},
{
"epoch": 6.612903225806452,
"grad_norm": 0.6008711457252502,
"learning_rate": 4.7640406208563224e-05,
"loss": 0.0433,
"step": 422
},
{
"epoch": 6.629032258064516,
"grad_norm": 0.5079227685928345,
"learning_rate": 4.720232673936706e-05,
"loss": 0.0369,
"step": 423
},
{
"epoch": 6.645161290322581,
"grad_norm": 0.3621387183666229,
"learning_rate": 4.676564767596663e-05,
"loss": 0.0373,
"step": 424
},
{
"epoch": 6.661290322580645,
"grad_norm": 0.418442964553833,
"learning_rate": 4.633038060083996e-05,
"loss": 0.0445,
"step": 425
},
{
"epoch": 6.67741935483871,
"grad_norm": 0.37667208909988403,
"learning_rate": 4.5896537059013536e-05,
"loss": 0.0368,
"step": 426
},
{
"epoch": 6.693548387096774,
"grad_norm": 0.3964817523956299,
"learning_rate": 4.546412855775595e-05,
"loss": 0.0395,
"step": 427
},
{
"epoch": 6.709677419354839,
"grad_norm": 0.3593597710132599,
"learning_rate": 4.503316656627294e-05,
"loss": 0.038,
"step": 428
},
{
"epoch": 6.725806451612903,
"grad_norm": 0.6818132400512695,
"learning_rate": 4.460366251540281e-05,
"loss": 0.0504,
"step": 429
},
{
"epoch": 6.741935483870968,
"grad_norm": 0.41429397463798523,
"learning_rate": 4.417562779731355e-05,
"loss": 0.0405,
"step": 430
},
{
"epoch": 6.758064516129032,
"grad_norm": 0.5057131052017212,
"learning_rate": 4.374907376520054e-05,
"loss": 0.0395,
"step": 431
},
{
"epoch": 6.774193548387097,
"grad_norm": 0.5655283331871033,
"learning_rate": 4.3324011732985433e-05,
"loss": 0.0446,
"step": 432
},
{
"epoch": 6.774193548387097,
"eval_loss": 2.2080209255218506,
"eval_runtime": 357.0367,
"eval_samples_per_second": 2.969,
"eval_steps_per_second": 0.496,
"step": 432
},
{
"epoch": 6.790322580645161,
"grad_norm": 0.5865839719772339,
"learning_rate": 4.2900452975016037e-05,
"loss": 0.0479,
"step": 433
},
{
"epoch": 6.806451612903226,
"grad_norm": 0.43115484714508057,
"learning_rate": 4.247840872576739e-05,
"loss": 0.0429,
"step": 434
}
],
"logging_steps": 1,
"max_steps": 620,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 62,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.1927216431929754e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}