leixa's picture
Training in progress, step 500, checkpoint
26c9e3e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.1176989848462557,
"eval_steps": 42,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0002353979696925114,
"eval_loss": 1.5950701236724854,
"eval_runtime": 350.256,
"eval_samples_per_second": 20.428,
"eval_steps_per_second": 5.108,
"step": 1
},
{
"epoch": 0.0007061939090775342,
"grad_norm": 2.2252461910247803,
"learning_rate": 3e-05,
"loss": 1.686,
"step": 3
},
{
"epoch": 0.0014123878181550684,
"grad_norm": 1.464859127998352,
"learning_rate": 6e-05,
"loss": 1.4659,
"step": 6
},
{
"epoch": 0.0021185817272326027,
"grad_norm": 1.2264316082000732,
"learning_rate": 9e-05,
"loss": 1.3202,
"step": 9
},
{
"epoch": 0.002824775636310137,
"grad_norm": 1.0421457290649414,
"learning_rate": 9.999588943391597e-05,
"loss": 1.2312,
"step": 12
},
{
"epoch": 0.003530969545387671,
"grad_norm": 1.0033332109451294,
"learning_rate": 9.99743108100344e-05,
"loss": 1.162,
"step": 15
},
{
"epoch": 0.0042371634544652054,
"grad_norm": 0.922690749168396,
"learning_rate": 9.993424445916923e-05,
"loss": 1.1829,
"step": 18
},
{
"epoch": 0.004943357363542739,
"grad_norm": 0.8965157866477966,
"learning_rate": 9.987570520365104e-05,
"loss": 1.0968,
"step": 21
},
{
"epoch": 0.005649551272620274,
"grad_norm": 0.9128215312957764,
"learning_rate": 9.979871469976196e-05,
"loss": 1.1683,
"step": 24
},
{
"epoch": 0.006355745181697808,
"grad_norm": 0.8273465037345886,
"learning_rate": 9.970330142972401e-05,
"loss": 1.1221,
"step": 27
},
{
"epoch": 0.007061939090775342,
"grad_norm": 0.8535186052322388,
"learning_rate": 9.95895006911623e-05,
"loss": 1.1528,
"step": 30
},
{
"epoch": 0.007768132999852876,
"grad_norm": 0.8920127749443054,
"learning_rate": 9.945735458404681e-05,
"loss": 1.2226,
"step": 33
},
{
"epoch": 0.008474326908930411,
"grad_norm": 0.8245745301246643,
"learning_rate": 9.930691199511775e-05,
"loss": 1.1651,
"step": 36
},
{
"epoch": 0.009180520818007945,
"grad_norm": 0.836405873298645,
"learning_rate": 9.91382285798002e-05,
"loss": 1.1392,
"step": 39
},
{
"epoch": 0.009886714727085478,
"grad_norm": 0.9151057004928589,
"learning_rate": 9.895136674161465e-05,
"loss": 1.1205,
"step": 42
},
{
"epoch": 0.009886714727085478,
"eval_loss": 1.1383236646652222,
"eval_runtime": 269.5971,
"eval_samples_per_second": 26.54,
"eval_steps_per_second": 6.636,
"step": 42
},
{
"epoch": 0.010592908636163013,
"grad_norm": 0.9477565884590149,
"learning_rate": 9.874639560909117e-05,
"loss": 1.1265,
"step": 45
},
{
"epoch": 0.011299102545240547,
"grad_norm": 0.8438647389411926,
"learning_rate": 9.852339101019574e-05,
"loss": 1.0991,
"step": 48
},
{
"epoch": 0.012005296454318082,
"grad_norm": 0.887405514717102,
"learning_rate": 9.828243544427796e-05,
"loss": 1.0668,
"step": 51
},
{
"epoch": 0.012711490363395616,
"grad_norm": 0.8021948933601379,
"learning_rate": 9.802361805155097e-05,
"loss": 1.1643,
"step": 54
},
{
"epoch": 0.013417684272473149,
"grad_norm": 0.8580029606819153,
"learning_rate": 9.774703458011453e-05,
"loss": 1.1427,
"step": 57
},
{
"epoch": 0.014123878181550684,
"grad_norm": 0.8958735466003418,
"learning_rate": 9.745278735053343e-05,
"loss": 1.1193,
"step": 60
},
{
"epoch": 0.014830072090628218,
"grad_norm": 0.8596162796020508,
"learning_rate": 9.714098521798465e-05,
"loss": 1.2052,
"step": 63
},
{
"epoch": 0.015536265999705753,
"grad_norm": 0.903272271156311,
"learning_rate": 9.681174353198687e-05,
"loss": 1.1097,
"step": 66
},
{
"epoch": 0.016242459908783285,
"grad_norm": 0.9256662726402283,
"learning_rate": 9.64651840937276e-05,
"loss": 1.1135,
"step": 69
},
{
"epoch": 0.016948653817860822,
"grad_norm": 0.8818249106407166,
"learning_rate": 9.610143511100354e-05,
"loss": 1.0806,
"step": 72
},
{
"epoch": 0.017654847726938355,
"grad_norm": 0.8820203542709351,
"learning_rate": 9.572063115079063e-05,
"loss": 1.129,
"step": 75
},
{
"epoch": 0.01836104163601589,
"grad_norm": 0.7920622229576111,
"learning_rate": 9.53229130894619e-05,
"loss": 1.1205,
"step": 78
},
{
"epoch": 0.019067235545093424,
"grad_norm": 0.8157576322555542,
"learning_rate": 9.490842806067095e-05,
"loss": 1.1035,
"step": 81
},
{
"epoch": 0.019773429454170956,
"grad_norm": 0.9254652261734009,
"learning_rate": 9.44773294009206e-05,
"loss": 1.1034,
"step": 84
},
{
"epoch": 0.019773429454170956,
"eval_loss": 1.1102567911148071,
"eval_runtime": 269.1284,
"eval_samples_per_second": 26.586,
"eval_steps_per_second": 6.647,
"step": 84
},
{
"epoch": 0.020479623363248493,
"grad_norm": 0.8315055966377258,
"learning_rate": 9.40297765928369e-05,
"loss": 1.1383,
"step": 87
},
{
"epoch": 0.021185817272326025,
"grad_norm": 0.8498836159706116,
"learning_rate": 9.356593520616948e-05,
"loss": 1.1585,
"step": 90
},
{
"epoch": 0.02189201118140356,
"grad_norm": 0.7853305339813232,
"learning_rate": 9.308597683653975e-05,
"loss": 1.0736,
"step": 93
},
{
"epoch": 0.022598205090481095,
"grad_norm": 0.8707268238067627,
"learning_rate": 9.259007904196023e-05,
"loss": 1.1068,
"step": 96
},
{
"epoch": 0.023304398999558627,
"grad_norm": 0.7966411709785461,
"learning_rate": 9.207842527714767e-05,
"loss": 1.095,
"step": 99
},
{
"epoch": 0.024010592908636164,
"grad_norm": 0.8166768550872803,
"learning_rate": 9.155120482565521e-05,
"loss": 1.0826,
"step": 102
},
{
"epoch": 0.024716786817713696,
"grad_norm": 0.8242478370666504,
"learning_rate": 9.10086127298478e-05,
"loss": 1.0712,
"step": 105
},
{
"epoch": 0.025422980726791233,
"grad_norm": 0.8760331869125366,
"learning_rate": 9.045084971874738e-05,
"loss": 1.0798,
"step": 108
},
{
"epoch": 0.026129174635868765,
"grad_norm": 0.9729852080345154,
"learning_rate": 8.987812213377424e-05,
"loss": 1.1157,
"step": 111
},
{
"epoch": 0.026835368544946298,
"grad_norm": 0.8696463108062744,
"learning_rate": 8.929064185241213e-05,
"loss": 1.0699,
"step": 114
},
{
"epoch": 0.027541562454023834,
"grad_norm": 0.954184889793396,
"learning_rate": 8.868862620982534e-05,
"loss": 1.0641,
"step": 117
},
{
"epoch": 0.028247756363101367,
"grad_norm": 0.838320791721344,
"learning_rate": 8.807229791845673e-05,
"loss": 1.0967,
"step": 120
},
{
"epoch": 0.028953950272178904,
"grad_norm": 0.9018192887306213,
"learning_rate": 8.744188498563641e-05,
"loss": 1.119,
"step": 123
},
{
"epoch": 0.029660144181256436,
"grad_norm": 0.8872904777526855,
"learning_rate": 8.679762062923175e-05,
"loss": 1.1756,
"step": 126
},
{
"epoch": 0.029660144181256436,
"eval_loss": 1.0964943170547485,
"eval_runtime": 268.8289,
"eval_samples_per_second": 26.615,
"eval_steps_per_second": 6.655,
"step": 126
},
{
"epoch": 0.03036633809033397,
"grad_norm": 0.8306711912155151,
"learning_rate": 8.613974319136958e-05,
"loss": 1.1028,
"step": 129
},
{
"epoch": 0.031072531999411505,
"grad_norm": 0.7843056917190552,
"learning_rate": 8.54684960502629e-05,
"loss": 1.1047,
"step": 132
},
{
"epoch": 0.03177872590848904,
"grad_norm": 0.9554882049560547,
"learning_rate": 8.478412753017433e-05,
"loss": 1.0684,
"step": 135
},
{
"epoch": 0.03248491981756657,
"grad_norm": 0.8904004096984863,
"learning_rate": 8.408689080954998e-05,
"loss": 1.087,
"step": 138
},
{
"epoch": 0.03319111372664411,
"grad_norm": 0.8412317037582397,
"learning_rate": 8.33770438273574e-05,
"loss": 1.0588,
"step": 141
},
{
"epoch": 0.033897307635721644,
"grad_norm": 0.8437768220901489,
"learning_rate": 8.265484918766243e-05,
"loss": 1.1033,
"step": 144
},
{
"epoch": 0.03460350154479917,
"grad_norm": 0.9272513389587402,
"learning_rate": 8.192057406248028e-05,
"loss": 1.042,
"step": 147
},
{
"epoch": 0.03530969545387671,
"grad_norm": 0.8524516224861145,
"learning_rate": 8.117449009293668e-05,
"loss": 1.0728,
"step": 150
},
{
"epoch": 0.036015889362954245,
"grad_norm": 0.8371686339378357,
"learning_rate": 8.041687328877567e-05,
"loss": 1.0905,
"step": 153
},
{
"epoch": 0.03672208327203178,
"grad_norm": 0.8983803987503052,
"learning_rate": 7.964800392625129e-05,
"loss": 1.0301,
"step": 156
},
{
"epoch": 0.03742827718110931,
"grad_norm": 0.8502050042152405,
"learning_rate": 7.886816644444098e-05,
"loss": 1.0659,
"step": 159
},
{
"epoch": 0.03813447109018685,
"grad_norm": 0.8608352541923523,
"learning_rate": 7.807764934001874e-05,
"loss": 1.0618,
"step": 162
},
{
"epoch": 0.038840664999264383,
"grad_norm": 0.8520893454551697,
"learning_rate": 7.727674506052743e-05,
"loss": 1.1093,
"step": 165
},
{
"epoch": 0.03954685890834191,
"grad_norm": 0.8674155473709106,
"learning_rate": 7.646574989618938e-05,
"loss": 0.988,
"step": 168
},
{
"epoch": 0.03954685890834191,
"eval_loss": 1.0796748399734497,
"eval_runtime": 268.8776,
"eval_samples_per_second": 26.611,
"eval_steps_per_second": 6.654,
"step": 168
},
{
"epoch": 0.04025305281741945,
"grad_norm": 0.8685376644134521,
"learning_rate": 7.564496387029532e-05,
"loss": 1.0711,
"step": 171
},
{
"epoch": 0.040959246726496985,
"grad_norm": 0.8623842597007751,
"learning_rate": 7.481469062821252e-05,
"loss": 1.0819,
"step": 174
},
{
"epoch": 0.04166544063557452,
"grad_norm": 0.799719512462616,
"learning_rate": 7.39752373250527e-05,
"loss": 1.0751,
"step": 177
},
{
"epoch": 0.04237163454465205,
"grad_norm": 0.8547356724739075,
"learning_rate": 7.312691451204178e-05,
"loss": 1.0339,
"step": 180
},
{
"epoch": 0.04307782845372959,
"grad_norm": 0.9027285575866699,
"learning_rate": 7.227003602163295e-05,
"loss": 1.0092,
"step": 183
},
{
"epoch": 0.04378402236280712,
"grad_norm": 0.9481127858161926,
"learning_rate": 7.14049188514063e-05,
"loss": 1.1462,
"step": 186
},
{
"epoch": 0.04449021627188465,
"grad_norm": 0.87617027759552,
"learning_rate": 7.05318830467969e-05,
"loss": 1.0242,
"step": 189
},
{
"epoch": 0.04519641018096219,
"grad_norm": 0.8404496908187866,
"learning_rate": 6.965125158269619e-05,
"loss": 1.1622,
"step": 192
},
{
"epoch": 0.045902604090039725,
"grad_norm": 0.8150330185890198,
"learning_rate": 6.876335024396872e-05,
"loss": 1.0837,
"step": 195
},
{
"epoch": 0.046608797999117255,
"grad_norm": 0.8786975145339966,
"learning_rate": 6.786850750493006e-05,
"loss": 1.1104,
"step": 198
},
{
"epoch": 0.04731499190819479,
"grad_norm": 0.8775438666343689,
"learning_rate": 6.696705440782938e-05,
"loss": 1.0072,
"step": 201
},
{
"epoch": 0.04802118581727233,
"grad_norm": 0.965625524520874,
"learning_rate": 6.605932444038229e-05,
"loss": 1.05,
"step": 204
},
{
"epoch": 0.04872737972634986,
"grad_norm": 1.0058841705322266,
"learning_rate": 6.514565341239861e-05,
"loss": 1.0594,
"step": 207
},
{
"epoch": 0.04943357363542739,
"grad_norm": 0.8338814973831177,
"learning_rate": 6.422637933155162e-05,
"loss": 1.0185,
"step": 210
},
{
"epoch": 0.04943357363542739,
"eval_loss": 1.0691874027252197,
"eval_runtime": 269.2023,
"eval_samples_per_second": 26.579,
"eval_steps_per_second": 6.646,
"step": 210
},
{
"epoch": 0.05013976754450493,
"grad_norm": 0.8559284210205078,
"learning_rate": 6.330184227833376e-05,
"loss": 1.0554,
"step": 213
},
{
"epoch": 0.050845961453582465,
"grad_norm": 0.8256874680519104,
"learning_rate": 6.237238428024572e-05,
"loss": 1.0456,
"step": 216
},
{
"epoch": 0.051552155362659995,
"grad_norm": 0.8674404621124268,
"learning_rate": 6.143834918526527e-05,
"loss": 1.0081,
"step": 219
},
{
"epoch": 0.05225834927173753,
"grad_norm": 0.8470603227615356,
"learning_rate": 6.0500082534642464e-05,
"loss": 1.0314,
"step": 222
},
{
"epoch": 0.05296454318081507,
"grad_norm": 0.7734717726707458,
"learning_rate": 5.955793143506863e-05,
"loss": 0.9973,
"step": 225
},
{
"epoch": 0.053670737089892596,
"grad_norm": 0.8707345724105835,
"learning_rate": 5.861224443026595e-05,
"loss": 0.9939,
"step": 228
},
{
"epoch": 0.05437693099897013,
"grad_norm": 0.8519316911697388,
"learning_rate": 5.766337137204579e-05,
"loss": 1.0261,
"step": 231
},
{
"epoch": 0.05508312490804767,
"grad_norm": 0.9453819990158081,
"learning_rate": 5.6711663290882776e-05,
"loss": 1.0753,
"step": 234
},
{
"epoch": 0.055789318817125205,
"grad_norm": 0.8315773606300354,
"learning_rate": 5.575747226605298e-05,
"loss": 1.0459,
"step": 237
},
{
"epoch": 0.056495512726202735,
"grad_norm": 0.846106231212616,
"learning_rate": 5.480115129538409e-05,
"loss": 1.0934,
"step": 240
},
{
"epoch": 0.05720170663528027,
"grad_norm": 0.9331178665161133,
"learning_rate": 5.384305416466584e-05,
"loss": 1.1103,
"step": 243
},
{
"epoch": 0.05790790054435781,
"grad_norm": 1.0972535610198975,
"learning_rate": 5.288353531676873e-05,
"loss": 1.0382,
"step": 246
},
{
"epoch": 0.058614094453435336,
"grad_norm": 0.9295033812522888,
"learning_rate": 5.192294972051992e-05,
"loss": 1.1169,
"step": 249
},
{
"epoch": 0.05932028836251287,
"grad_norm": 0.8790277242660522,
"learning_rate": 5.0961652739384356e-05,
"loss": 1.0503,
"step": 252
},
{
"epoch": 0.05932028836251287,
"eval_loss": 1.0560417175292969,
"eval_runtime": 269.1877,
"eval_samples_per_second": 26.58,
"eval_steps_per_second": 6.646,
"step": 252
},
{
"epoch": 0.06002648227159041,
"grad_norm": 0.8657337427139282,
"learning_rate": 5e-05,
"loss": 0.982,
"step": 255
},
{
"epoch": 0.06073267618066794,
"grad_norm": 0.8750261664390564,
"learning_rate": 4.903834726061565e-05,
"loss": 1.0433,
"step": 258
},
{
"epoch": 0.061438870089745475,
"grad_norm": 1.0193053483963013,
"learning_rate": 4.807705027948008e-05,
"loss": 1.0615,
"step": 261
},
{
"epoch": 0.06214506399882301,
"grad_norm": 0.7748709321022034,
"learning_rate": 4.711646468323129e-05,
"loss": 1.0261,
"step": 264
},
{
"epoch": 0.06285125790790054,
"grad_norm": 0.8381453156471252,
"learning_rate": 4.6156945835334184e-05,
"loss": 1.0834,
"step": 267
},
{
"epoch": 0.06355745181697808,
"grad_norm": 0.8673277497291565,
"learning_rate": 4.5198848704615914e-05,
"loss": 1.0298,
"step": 270
},
{
"epoch": 0.06426364572605561,
"grad_norm": 0.8711371421813965,
"learning_rate": 4.424252773394704e-05,
"loss": 1.1021,
"step": 273
},
{
"epoch": 0.06496983963513314,
"grad_norm": 0.8081970810890198,
"learning_rate": 4.328833670911724e-05,
"loss": 1.0529,
"step": 276
},
{
"epoch": 0.06567603354421069,
"grad_norm": 0.8456825613975525,
"learning_rate": 4.23366286279542e-05,
"loss": 0.9749,
"step": 279
},
{
"epoch": 0.06638222745328821,
"grad_norm": 0.7953401803970337,
"learning_rate": 4.138775556973406e-05,
"loss": 1.0372,
"step": 282
},
{
"epoch": 0.06708842136236574,
"grad_norm": 1.081406593322754,
"learning_rate": 4.04420685649314e-05,
"loss": 1.0553,
"step": 285
},
{
"epoch": 0.06779461527144329,
"grad_norm": 0.851957380771637,
"learning_rate": 3.9499917465357534e-05,
"loss": 0.9904,
"step": 288
},
{
"epoch": 0.06850080918052082,
"grad_norm": 0.894390344619751,
"learning_rate": 3.856165081473474e-05,
"loss": 1.0338,
"step": 291
},
{
"epoch": 0.06920700308959835,
"grad_norm": 0.9085647463798523,
"learning_rate": 3.762761571975429e-05,
"loss": 0.9757,
"step": 294
},
{
"epoch": 0.06920700308959835,
"eval_loss": 1.0470167398452759,
"eval_runtime": 269.177,
"eval_samples_per_second": 26.581,
"eval_steps_per_second": 6.646,
"step": 294
},
{
"epoch": 0.06991319699867589,
"grad_norm": 0.9668933153152466,
"learning_rate": 3.6698157721666246e-05,
"loss": 1.0899,
"step": 297
},
{
"epoch": 0.07061939090775342,
"grad_norm": 0.8540582060813904,
"learning_rate": 3.5773620668448384e-05,
"loss": 1.0131,
"step": 300
},
{
"epoch": 0.07132558481683096,
"grad_norm": 0.8082262277603149,
"learning_rate": 3.48543465876014e-05,
"loss": 1.0107,
"step": 303
},
{
"epoch": 0.07203177872590849,
"grad_norm": 0.8167277574539185,
"learning_rate": 3.3940675559617724e-05,
"loss": 1.04,
"step": 306
},
{
"epoch": 0.07273797263498602,
"grad_norm": 0.8783098459243774,
"learning_rate": 3.303294559217063e-05,
"loss": 1.047,
"step": 309
},
{
"epoch": 0.07344416654406356,
"grad_norm": 0.891689121723175,
"learning_rate": 3.213149249506997e-05,
"loss": 1.0212,
"step": 312
},
{
"epoch": 0.07415036045314109,
"grad_norm": 0.8565186858177185,
"learning_rate": 3.12366497560313e-05,
"loss": 1.0649,
"step": 315
},
{
"epoch": 0.07485655436221862,
"grad_norm": 0.8088008761405945,
"learning_rate": 3.0348748417303823e-05,
"loss": 1.0795,
"step": 318
},
{
"epoch": 0.07556274827129617,
"grad_norm": 0.9278706908226013,
"learning_rate": 2.9468116953203107e-05,
"loss": 1.0993,
"step": 321
},
{
"epoch": 0.0762689421803737,
"grad_norm": 0.8478650450706482,
"learning_rate": 2.8595081148593738e-05,
"loss": 1.0914,
"step": 324
},
{
"epoch": 0.07697513608945122,
"grad_norm": 0.8993713855743408,
"learning_rate": 2.772996397836704e-05,
"loss": 1.0011,
"step": 327
},
{
"epoch": 0.07768132999852877,
"grad_norm": 0.9126960039138794,
"learning_rate": 2.687308548795825e-05,
"loss": 1.0809,
"step": 330
},
{
"epoch": 0.0783875239076063,
"grad_norm": 0.9319847822189331,
"learning_rate": 2.6024762674947313e-05,
"loss": 1.0285,
"step": 333
},
{
"epoch": 0.07909371781668383,
"grad_norm": 0.8791552782058716,
"learning_rate": 2.5185309371787513e-05,
"loss": 1.085,
"step": 336
},
{
"epoch": 0.07909371781668383,
"eval_loss": 1.0400582551956177,
"eval_runtime": 269.1233,
"eval_samples_per_second": 26.586,
"eval_steps_per_second": 6.648,
"step": 336
},
{
"epoch": 0.07979991172576137,
"grad_norm": 0.8961087465286255,
"learning_rate": 2.43550361297047e-05,
"loss": 1.0224,
"step": 339
},
{
"epoch": 0.0805061056348389,
"grad_norm": 0.8532075881958008,
"learning_rate": 2.353425010381063e-05,
"loss": 1.0061,
"step": 342
},
{
"epoch": 0.08121229954391643,
"grad_norm": 0.8397119641304016,
"learning_rate": 2.272325493947257e-05,
"loss": 1.0578,
"step": 345
},
{
"epoch": 0.08191849345299397,
"grad_norm": 0.8038586378097534,
"learning_rate": 2.192235065998126e-05,
"loss": 1.0712,
"step": 348
},
{
"epoch": 0.0826246873620715,
"grad_norm": 0.890216588973999,
"learning_rate": 2.1131833555559037e-05,
"loss": 1.0385,
"step": 351
},
{
"epoch": 0.08333088127114904,
"grad_norm": 0.9037936925888062,
"learning_rate": 2.0351996073748713e-05,
"loss": 1.0628,
"step": 354
},
{
"epoch": 0.08403707518022657,
"grad_norm": 0.8792752027511597,
"learning_rate": 1.9583126711224343e-05,
"loss": 1.0399,
"step": 357
},
{
"epoch": 0.0847432690893041,
"grad_norm": 0.9603466987609863,
"learning_rate": 1.8825509907063327e-05,
"loss": 1.0277,
"step": 360
},
{
"epoch": 0.08544946299838165,
"grad_norm": 0.9195833206176758,
"learning_rate": 1.807942593751973e-05,
"loss": 1.0849,
"step": 363
},
{
"epoch": 0.08615565690745917,
"grad_norm": 0.8159909248352051,
"learning_rate": 1.7345150812337564e-05,
"loss": 1.0185,
"step": 366
},
{
"epoch": 0.0868618508165367,
"grad_norm": 1.0280473232269287,
"learning_rate": 1.66229561726426e-05,
"loss": 0.9795,
"step": 369
},
{
"epoch": 0.08756804472561425,
"grad_norm": 0.9304172992706299,
"learning_rate": 1.5913109190450032e-05,
"loss": 1.0552,
"step": 372
},
{
"epoch": 0.08827423863469178,
"grad_norm": 0.8089962005615234,
"learning_rate": 1.5215872469825682e-05,
"loss": 1.049,
"step": 375
},
{
"epoch": 0.0889804325437693,
"grad_norm": 0.826493501663208,
"learning_rate": 1.4531503949737108e-05,
"loss": 1.0446,
"step": 378
},
{
"epoch": 0.0889804325437693,
"eval_loss": 1.034751534461975,
"eval_runtime": 269.0497,
"eval_samples_per_second": 26.594,
"eval_steps_per_second": 6.649,
"step": 378
},
{
"epoch": 0.08968662645284685,
"grad_norm": 0.842292308807373,
"learning_rate": 1.3860256808630428e-05,
"loss": 1.0038,
"step": 381
},
{
"epoch": 0.09039282036192438,
"grad_norm": 0.8173481225967407,
"learning_rate": 1.3202379370768252e-05,
"loss": 1.0147,
"step": 384
},
{
"epoch": 0.09109901427100191,
"grad_norm": 0.9445770978927612,
"learning_rate": 1.2558115014363592e-05,
"loss": 1.0801,
"step": 387
},
{
"epoch": 0.09180520818007945,
"grad_norm": 0.8653852939605713,
"learning_rate": 1.1927702081543279e-05,
"loss": 1.0846,
"step": 390
},
{
"epoch": 0.09251140208915698,
"grad_norm": 0.8680213093757629,
"learning_rate": 1.1311373790174657e-05,
"loss": 1.0537,
"step": 393
},
{
"epoch": 0.09321759599823451,
"grad_norm": 0.8509698510169983,
"learning_rate": 1.0709358147587884e-05,
"loss": 1.0221,
"step": 396
},
{
"epoch": 0.09392378990731205,
"grad_norm": 0.8282216787338257,
"learning_rate": 1.0121877866225781e-05,
"loss": 0.9938,
"step": 399
},
{
"epoch": 0.09462998381638958,
"grad_norm": 0.9085760712623596,
"learning_rate": 9.549150281252633e-06,
"loss": 0.9776,
"step": 402
},
{
"epoch": 0.09533617772546711,
"grad_norm": 0.8074911236763,
"learning_rate": 8.991387270152201e-06,
"loss": 1.0781,
"step": 405
},
{
"epoch": 0.09604237163454465,
"grad_norm": 0.8322492837905884,
"learning_rate": 8.448795174344804e-06,
"loss": 0.9819,
"step": 408
},
{
"epoch": 0.09674856554362218,
"grad_norm": 0.8055025339126587,
"learning_rate": 7.921574722852343e-06,
"loss": 1.029,
"step": 411
},
{
"epoch": 0.09745475945269973,
"grad_norm": 0.8876299858093262,
"learning_rate": 7.409920958039795e-06,
"loss": 1.0499,
"step": 414
},
{
"epoch": 0.09816095336177726,
"grad_norm": 0.7985761761665344,
"learning_rate": 6.9140231634602485e-06,
"loss": 0.9958,
"step": 417
},
{
"epoch": 0.09886714727085479,
"grad_norm": 1.0081782341003418,
"learning_rate": 6.43406479383053e-06,
"loss": 1.083,
"step": 420
},
{
"epoch": 0.09886714727085479,
"eval_loss": 1.0310063362121582,
"eval_runtime": 268.9096,
"eval_samples_per_second": 26.607,
"eval_steps_per_second": 6.653,
"step": 420
},
{
"epoch": 0.09957334117993233,
"grad_norm": 0.8535836338996887,
"learning_rate": 5.9702234071631e-06,
"loss": 0.9285,
"step": 423
},
{
"epoch": 0.10027953508900986,
"grad_norm": 0.8347278833389282,
"learning_rate": 5.5226705990794155e-06,
"loss": 0.955,
"step": 426
},
{
"epoch": 0.10098572899808739,
"grad_norm": 0.8799304962158203,
"learning_rate": 5.091571939329048e-06,
"loss": 1.0021,
"step": 429
},
{
"epoch": 0.10169192290716493,
"grad_norm": 0.9157566428184509,
"learning_rate": 4.677086910538092e-06,
"loss": 1.0896,
"step": 432
},
{
"epoch": 0.10239811681624246,
"grad_norm": 0.9026911854743958,
"learning_rate": 4.279368849209381e-06,
"loss": 1.0693,
"step": 435
},
{
"epoch": 0.10310431072531999,
"grad_norm": 0.887106716632843,
"learning_rate": 3.898564888996476e-06,
"loss": 1.0153,
"step": 438
},
{
"epoch": 0.10381050463439753,
"grad_norm": 0.895766019821167,
"learning_rate": 3.534815906272404e-06,
"loss": 0.9806,
"step": 441
},
{
"epoch": 0.10451669854347506,
"grad_norm": 0.9084652662277222,
"learning_rate": 3.18825646801314e-06,
"loss": 1.0601,
"step": 444
},
{
"epoch": 0.10522289245255259,
"grad_norm": 0.8326694965362549,
"learning_rate": 2.8590147820153513e-06,
"loss": 1.0726,
"step": 447
},
{
"epoch": 0.10592908636163013,
"grad_norm": 0.8262003064155579,
"learning_rate": 2.547212649466568e-06,
"loss": 0.9757,
"step": 450
},
{
"epoch": 0.10663528027070766,
"grad_norm": 0.8814327120780945,
"learning_rate": 2.2529654198854835e-06,
"loss": 1.0132,
"step": 453
},
{
"epoch": 0.10734147417978519,
"grad_norm": 0.8832114338874817,
"learning_rate": 1.9763819484490355e-06,
"loss": 1.0479,
"step": 456
},
{
"epoch": 0.10804766808886274,
"grad_norm": 0.8286268711090088,
"learning_rate": 1.7175645557220566e-06,
"loss": 1.0587,
"step": 459
},
{
"epoch": 0.10875386199794027,
"grad_norm": 0.9024969339370728,
"learning_rate": 1.4766089898042678e-06,
"loss": 1.0135,
"step": 462
},
{
"epoch": 0.10875386199794027,
"eval_loss": 1.0295510292053223,
"eval_runtime": 268.7586,
"eval_samples_per_second": 26.622,
"eval_steps_per_second": 6.657,
"step": 462
},
{
"epoch": 0.10946005590701781,
"grad_norm": 0.890392005443573,
"learning_rate": 1.2536043909088191e-06,
"loss": 1.0834,
"step": 465
},
{
"epoch": 0.11016624981609534,
"grad_norm": 0.8814263939857483,
"learning_rate": 1.0486332583853563e-06,
"loss": 1.0303,
"step": 468
},
{
"epoch": 0.11087244372517287,
"grad_norm": 0.8813495635986328,
"learning_rate": 8.617714201998084e-07,
"loss": 1.0092,
"step": 471
},
{
"epoch": 0.11157863763425041,
"grad_norm": 0.8011179566383362,
"learning_rate": 6.93088004882253e-07,
"loss": 1.0072,
"step": 474
},
{
"epoch": 0.11228483154332794,
"grad_norm": 0.9191976189613342,
"learning_rate": 5.426454159531913e-07,
"loss": 1.0898,
"step": 477
},
{
"epoch": 0.11299102545240547,
"grad_norm": 0.9359084963798523,
"learning_rate": 4.104993088376974e-07,
"loss": 1.0413,
"step": 480
},
{
"epoch": 0.11369721936148301,
"grad_norm": 0.8636139035224915,
"learning_rate": 2.966985702759828e-07,
"loss": 1.0287,
"step": 483
},
{
"epoch": 0.11440341327056054,
"grad_norm": 0.9202425479888916,
"learning_rate": 2.012853002380466e-07,
"loss": 1.1024,
"step": 486
},
{
"epoch": 0.11510960717963807,
"grad_norm": 0.928870677947998,
"learning_rate": 1.2429479634897267e-07,
"loss": 1.0219,
"step": 489
},
{
"epoch": 0.11581580108871561,
"grad_norm": 0.8760671019554138,
"learning_rate": 6.575554083078084e-08,
"loss": 1.034,
"step": 492
},
{
"epoch": 0.11652199499779314,
"grad_norm": 0.8538545966148376,
"learning_rate": 2.568918996560532e-08,
"loss": 1.0029,
"step": 495
},
{
"epoch": 0.11722818890687067,
"grad_norm": 0.8800035119056702,
"learning_rate": 4.110566084036816e-09,
"loss": 1.0125,
"step": 498
}
],
"logging_steps": 3,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 42,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.5547217133568e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}