eformat's picture
Upload folder using huggingface_hub
e62caa0 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.3189792663476874,
"eval_steps": 500,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.001594896331738437,
"grad_norm": 9.529223442077637,
"learning_rate": 2.0000000000000003e-06,
"loss": 3.8964,
"step": 1
},
{
"epoch": 0.003189792663476874,
"grad_norm": 3.4800214767456055,
"learning_rate": 4.000000000000001e-06,
"loss": 2.2247,
"step": 2
},
{
"epoch": 0.004784688995215311,
"grad_norm": 3.026850461959839,
"learning_rate": 6e-06,
"loss": 2.2337,
"step": 3
},
{
"epoch": 0.006379585326953748,
"grad_norm": 7.065993785858154,
"learning_rate": 8.000000000000001e-06,
"loss": 1.9733,
"step": 4
},
{
"epoch": 0.007974481658692184,
"grad_norm": 5.338789463043213,
"learning_rate": 1e-05,
"loss": 2.2808,
"step": 5
},
{
"epoch": 0.009569377990430622,
"grad_norm": 12.933262825012207,
"learning_rate": 1.2e-05,
"loss": 2.2984,
"step": 6
},
{
"epoch": 0.011164274322169059,
"grad_norm": 3.535792350769043,
"learning_rate": 1.4000000000000001e-05,
"loss": 2.6236,
"step": 7
},
{
"epoch": 0.012759170653907496,
"grad_norm": 7.564616680145264,
"learning_rate": 1.6000000000000003e-05,
"loss": 2.4849,
"step": 8
},
{
"epoch": 0.014354066985645933,
"grad_norm": 7.379055976867676,
"learning_rate": 1.8e-05,
"loss": 2.9017,
"step": 9
},
{
"epoch": 0.01594896331738437,
"grad_norm": 6.781640529632568,
"learning_rate": 2e-05,
"loss": 2.5836,
"step": 10
},
{
"epoch": 0.017543859649122806,
"grad_norm": Infinity,
"learning_rate": 2e-05,
"loss": 2.6482,
"step": 11
},
{
"epoch": 0.019138755980861243,
"grad_norm": 9.666836738586426,
"learning_rate": 2.2000000000000003e-05,
"loss": 1.5775,
"step": 12
},
{
"epoch": 0.02073365231259968,
"grad_norm": 7.910143852233887,
"learning_rate": 2.4e-05,
"loss": 2.259,
"step": 13
},
{
"epoch": 0.022328548644338118,
"grad_norm": 5.05098295211792,
"learning_rate": 2.6000000000000002e-05,
"loss": 2.1146,
"step": 14
},
{
"epoch": 0.023923444976076555,
"grad_norm": 4.457101345062256,
"learning_rate": 2.8000000000000003e-05,
"loss": 2.1058,
"step": 15
},
{
"epoch": 0.025518341307814992,
"grad_norm": NaN,
"learning_rate": 2.8000000000000003e-05,
"loss": 2.4278,
"step": 16
},
{
"epoch": 0.02711323763955343,
"grad_norm": 4.420550346374512,
"learning_rate": 3e-05,
"loss": 1.9174,
"step": 17
},
{
"epoch": 0.028708133971291867,
"grad_norm": 9.368809700012207,
"learning_rate": 3.2000000000000005e-05,
"loss": 2.0925,
"step": 18
},
{
"epoch": 0.030303030303030304,
"grad_norm": 4.382827281951904,
"learning_rate": 3.4000000000000007e-05,
"loss": 1.3716,
"step": 19
},
{
"epoch": 0.03189792663476874,
"grad_norm": 24.012392044067383,
"learning_rate": 3.6e-05,
"loss": 2.348,
"step": 20
},
{
"epoch": 0.03349282296650718,
"grad_norm": 2.227252721786499,
"learning_rate": 3.8e-05,
"loss": 2.0678,
"step": 21
},
{
"epoch": 0.03508771929824561,
"grad_norm": 6.851357460021973,
"learning_rate": 4e-05,
"loss": 3.2008,
"step": 22
},
{
"epoch": 0.03668261562998405,
"grad_norm": 5.75844144821167,
"learning_rate": 4.2e-05,
"loss": 1.8704,
"step": 23
},
{
"epoch": 0.03827751196172249,
"grad_norm": 14.692899703979492,
"learning_rate": 4.4000000000000006e-05,
"loss": 2.9055,
"step": 24
},
{
"epoch": 0.03987240829346093,
"grad_norm": NaN,
"learning_rate": 4.4000000000000006e-05,
"loss": 2.4497,
"step": 25
},
{
"epoch": 0.04146730462519936,
"grad_norm": NaN,
"learning_rate": 4.4000000000000006e-05,
"loss": 2.4382,
"step": 26
},
{
"epoch": 0.0430622009569378,
"grad_norm": 6.065037727355957,
"learning_rate": 4.600000000000001e-05,
"loss": 2.105,
"step": 27
},
{
"epoch": 0.044657097288676235,
"grad_norm": 4.765100479125977,
"learning_rate": 4.8e-05,
"loss": 2.5292,
"step": 28
},
{
"epoch": 0.046251993620414676,
"grad_norm": 10.693521499633789,
"learning_rate": 5e-05,
"loss": 1.4471,
"step": 29
},
{
"epoch": 0.04784688995215311,
"grad_norm": 5.703505992889404,
"learning_rate": 5.2000000000000004e-05,
"loss": 1.8759,
"step": 30
},
{
"epoch": 0.049441786283891544,
"grad_norm": 6.594554424285889,
"learning_rate": 5.4000000000000005e-05,
"loss": 2.3555,
"step": 31
},
{
"epoch": 0.051036682615629984,
"grad_norm": 10.944120407104492,
"learning_rate": 5.6000000000000006e-05,
"loss": 2.6607,
"step": 32
},
{
"epoch": 0.05263157894736842,
"grad_norm": 4.437515735626221,
"learning_rate": 5.8e-05,
"loss": 2.1366,
"step": 33
},
{
"epoch": 0.05422647527910686,
"grad_norm": 18.751192092895508,
"learning_rate": 6e-05,
"loss": 2.8504,
"step": 34
},
{
"epoch": 0.05582137161084529,
"grad_norm": 4.625219821929932,
"learning_rate": 6.2e-05,
"loss": 1.7409,
"step": 35
},
{
"epoch": 0.05741626794258373,
"grad_norm": 5.256422996520996,
"learning_rate": 6.400000000000001e-05,
"loss": 2.965,
"step": 36
},
{
"epoch": 0.05901116427432217,
"grad_norm": 5.383383274078369,
"learning_rate": 6.6e-05,
"loss": 2.5555,
"step": 37
},
{
"epoch": 0.06060606060606061,
"grad_norm": 20.616491317749023,
"learning_rate": 6.800000000000001e-05,
"loss": 2.1693,
"step": 38
},
{
"epoch": 0.06220095693779904,
"grad_norm": 14.188122749328613,
"learning_rate": 7e-05,
"loss": 3.2098,
"step": 39
},
{
"epoch": 0.06379585326953748,
"grad_norm": 7.294703483581543,
"learning_rate": 7.2e-05,
"loss": 2.8137,
"step": 40
},
{
"epoch": 0.06539074960127592,
"grad_norm": 16.109020233154297,
"learning_rate": 7.4e-05,
"loss": 3.4821,
"step": 41
},
{
"epoch": 0.06698564593301436,
"grad_norm": 12.77609920501709,
"learning_rate": 7.6e-05,
"loss": 3.2586,
"step": 42
},
{
"epoch": 0.0685805422647528,
"grad_norm": 7.247976303100586,
"learning_rate": 7.800000000000001e-05,
"loss": 2.1131,
"step": 43
},
{
"epoch": 0.07017543859649122,
"grad_norm": 3.0302693843841553,
"learning_rate": 8e-05,
"loss": 2.3047,
"step": 44
},
{
"epoch": 0.07177033492822966,
"grad_norm": 6.998560905456543,
"learning_rate": 8.2e-05,
"loss": 1.9685,
"step": 45
},
{
"epoch": 0.0733652312599681,
"grad_norm": 11.82168197631836,
"learning_rate": 8.4e-05,
"loss": 2.412,
"step": 46
},
{
"epoch": 0.07496012759170653,
"grad_norm": 2.4560766220092773,
"learning_rate": 8.6e-05,
"loss": 1.5546,
"step": 47
},
{
"epoch": 0.07655502392344497,
"grad_norm": 7.5531110763549805,
"learning_rate": 8.800000000000001e-05,
"loss": 2.0706,
"step": 48
},
{
"epoch": 0.07814992025518341,
"grad_norm": 9.41441535949707,
"learning_rate": 9e-05,
"loss": 2.0769,
"step": 49
},
{
"epoch": 0.07974481658692185,
"grad_norm": 5.027219295501709,
"learning_rate": 9.200000000000001e-05,
"loss": 1.0112,
"step": 50
},
{
"epoch": 0.08133971291866028,
"grad_norm": 4.590622901916504,
"learning_rate": 9.4e-05,
"loss": 2.4014,
"step": 51
},
{
"epoch": 0.08293460925039872,
"grad_norm": 110.29186248779297,
"learning_rate": 9.6e-05,
"loss": 2.2312,
"step": 52
},
{
"epoch": 0.08452950558213716,
"grad_norm": 13.907599449157715,
"learning_rate": 9.8e-05,
"loss": 3.74,
"step": 53
},
{
"epoch": 0.0861244019138756,
"grad_norm": 21.45476722717285,
"learning_rate": 0.0001,
"loss": 2.2449,
"step": 54
},
{
"epoch": 0.08771929824561403,
"grad_norm": 26.367050170898438,
"learning_rate": 0.00010200000000000001,
"loss": 2.4117,
"step": 55
},
{
"epoch": 0.08931419457735247,
"grad_norm": 11.473040580749512,
"learning_rate": 0.00010400000000000001,
"loss": 1.9575,
"step": 56
},
{
"epoch": 0.09090909090909091,
"grad_norm": 7.996298789978027,
"learning_rate": 0.00010600000000000002,
"loss": 2.2506,
"step": 57
},
{
"epoch": 0.09250398724082935,
"grad_norm": 5.780291557312012,
"learning_rate": 0.00010800000000000001,
"loss": 1.685,
"step": 58
},
{
"epoch": 0.09409888357256778,
"grad_norm": 14.359979629516602,
"learning_rate": 0.00011000000000000002,
"loss": 2.1997,
"step": 59
},
{
"epoch": 0.09569377990430622,
"grad_norm": 2.4853787422180176,
"learning_rate": 0.00011200000000000001,
"loss": 1.2731,
"step": 60
},
{
"epoch": 0.09728867623604466,
"grad_norm": 7.136777877807617,
"learning_rate": 0.00011399999999999999,
"loss": 2.1987,
"step": 61
},
{
"epoch": 0.09888357256778309,
"grad_norm": 7.219399929046631,
"learning_rate": 0.000116,
"loss": 2.3118,
"step": 62
},
{
"epoch": 0.10047846889952153,
"grad_norm": 15.516590118408203,
"learning_rate": 0.000118,
"loss": 2.2534,
"step": 63
},
{
"epoch": 0.10207336523125997,
"grad_norm": 4.05905818939209,
"learning_rate": 0.00012,
"loss": 2.0509,
"step": 64
},
{
"epoch": 0.10366826156299841,
"grad_norm": 51.53736877441406,
"learning_rate": 0.000122,
"loss": 3.7686,
"step": 65
},
{
"epoch": 0.10526315789473684,
"grad_norm": 4.316274642944336,
"learning_rate": 0.000124,
"loss": 1.332,
"step": 66
},
{
"epoch": 0.10685805422647528,
"grad_norm": 47.97142028808594,
"learning_rate": 0.000126,
"loss": 1.4256,
"step": 67
},
{
"epoch": 0.10845295055821372,
"grad_norm": 10.22885513305664,
"learning_rate": 0.00012800000000000002,
"loss": 1.4572,
"step": 68
},
{
"epoch": 0.11004784688995216,
"grad_norm": 5.966179370880127,
"learning_rate": 0.00013000000000000002,
"loss": 1.3998,
"step": 69
},
{
"epoch": 0.11164274322169059,
"grad_norm": 3.413966178894043,
"learning_rate": 0.000132,
"loss": 1.6306,
"step": 70
},
{
"epoch": 0.11323763955342903,
"grad_norm": 197.0709991455078,
"learning_rate": 0.000134,
"loss": 1.6587,
"step": 71
},
{
"epoch": 0.11483253588516747,
"grad_norm": 12.735696792602539,
"learning_rate": 0.00013600000000000003,
"loss": 2.0253,
"step": 72
},
{
"epoch": 0.11642743221690591,
"grad_norm": 14.38369083404541,
"learning_rate": 0.000138,
"loss": 2.0833,
"step": 73
},
{
"epoch": 0.11802232854864433,
"grad_norm": 5.746401786804199,
"learning_rate": 0.00014,
"loss": 1.959,
"step": 74
},
{
"epoch": 0.11961722488038277,
"grad_norm": 4.266767501831055,
"learning_rate": 0.000142,
"loss": 1.5752,
"step": 75
},
{
"epoch": 0.12121212121212122,
"grad_norm": 16.618648529052734,
"learning_rate": 0.000144,
"loss": 3.0947,
"step": 76
},
{
"epoch": 0.12280701754385964,
"grad_norm": 2.3666510581970215,
"learning_rate": 0.000146,
"loss": 2.0715,
"step": 77
},
{
"epoch": 0.12440191387559808,
"grad_norm": 6.622753143310547,
"learning_rate": 0.000148,
"loss": 1.8844,
"step": 78
},
{
"epoch": 0.12599681020733652,
"grad_norm": 43.816375732421875,
"learning_rate": 0.00015000000000000001,
"loss": 1.9108,
"step": 79
},
{
"epoch": 0.12759170653907495,
"grad_norm": 6.682168006896973,
"learning_rate": 0.000152,
"loss": 1.5588,
"step": 80
},
{
"epoch": 0.1291866028708134,
"grad_norm": 3.0997276306152344,
"learning_rate": 0.000154,
"loss": 2.1988,
"step": 81
},
{
"epoch": 0.13078149920255183,
"grad_norm": 4.309176445007324,
"learning_rate": 0.00015600000000000002,
"loss": 1.406,
"step": 82
},
{
"epoch": 0.13237639553429026,
"grad_norm": 5.416724681854248,
"learning_rate": 0.00015800000000000002,
"loss": 1.576,
"step": 83
},
{
"epoch": 0.1339712918660287,
"grad_norm": 4.361196517944336,
"learning_rate": 0.00016,
"loss": 2.2985,
"step": 84
},
{
"epoch": 0.13556618819776714,
"grad_norm": 7.655956745147705,
"learning_rate": 0.000162,
"loss": 1.3693,
"step": 85
},
{
"epoch": 0.1371610845295056,
"grad_norm": 5.973109245300293,
"learning_rate": 0.000164,
"loss": 2.4914,
"step": 86
},
{
"epoch": 0.13875598086124402,
"grad_norm": 5.1589436531066895,
"learning_rate": 0.000166,
"loss": 1.6638,
"step": 87
},
{
"epoch": 0.14035087719298245,
"grad_norm": 6.356327056884766,
"learning_rate": 0.000168,
"loss": 1.381,
"step": 88
},
{
"epoch": 0.1419457735247209,
"grad_norm": 1.5582388639450073,
"learning_rate": 0.00017,
"loss": 1.4864,
"step": 89
},
{
"epoch": 0.14354066985645933,
"grad_norm": 2.830798387527466,
"learning_rate": 0.000172,
"loss": 1.897,
"step": 90
},
{
"epoch": 0.14513556618819776,
"grad_norm": 20.24082374572754,
"learning_rate": 0.000174,
"loss": 1.7698,
"step": 91
},
{
"epoch": 0.1467304625199362,
"grad_norm": 35.429466247558594,
"learning_rate": 0.00017600000000000002,
"loss": 3.6051,
"step": 92
},
{
"epoch": 0.14832535885167464,
"grad_norm": 9.8428373336792,
"learning_rate": 0.00017800000000000002,
"loss": 1.9402,
"step": 93
},
{
"epoch": 0.14992025518341306,
"grad_norm": 3.7131340503692627,
"learning_rate": 0.00018,
"loss": 2.2192,
"step": 94
},
{
"epoch": 0.15151515151515152,
"grad_norm": 8.838521003723145,
"learning_rate": 0.000182,
"loss": 2.0167,
"step": 95
},
{
"epoch": 0.15311004784688995,
"grad_norm": 9.63149356842041,
"learning_rate": 0.00018400000000000003,
"loss": 1.6169,
"step": 96
},
{
"epoch": 0.1547049441786284,
"grad_norm": 6.012756824493408,
"learning_rate": 0.00018600000000000002,
"loss": 1.4477,
"step": 97
},
{
"epoch": 0.15629984051036683,
"grad_norm": 9.02092456817627,
"learning_rate": 0.000188,
"loss": 1.5999,
"step": 98
},
{
"epoch": 0.15789473684210525,
"grad_norm": 5.8646416664123535,
"learning_rate": 0.00019,
"loss": 1.2515,
"step": 99
},
{
"epoch": 0.1594896331738437,
"grad_norm": 3.3447864055633545,
"learning_rate": 0.000192,
"loss": 1.257,
"step": 100
},
{
"epoch": 0.16108452950558214,
"grad_norm": 8.706202507019043,
"learning_rate": 0.000194,
"loss": 2.0047,
"step": 101
},
{
"epoch": 0.16267942583732056,
"grad_norm": 17.863378524780273,
"learning_rate": 0.000196,
"loss": 2.421,
"step": 102
},
{
"epoch": 0.16427432216905902,
"grad_norm": 5.376917362213135,
"learning_rate": 0.00019800000000000002,
"loss": 2.1448,
"step": 103
},
{
"epoch": 0.16586921850079744,
"grad_norm": 3.9824860095977783,
"learning_rate": 0.0002,
"loss": 1.7987,
"step": 104
},
{
"epoch": 0.1674641148325359,
"grad_norm": 6.7771382331848145,
"learning_rate": 0.00019800000000000002,
"loss": 1.8173,
"step": 105
},
{
"epoch": 0.16905901116427433,
"grad_norm": 6.204737186431885,
"learning_rate": 0.000196,
"loss": 1.2615,
"step": 106
},
{
"epoch": 0.17065390749601275,
"grad_norm": 5.108335018157959,
"learning_rate": 0.000194,
"loss": 1.6966,
"step": 107
},
{
"epoch": 0.1722488038277512,
"grad_norm": 51.948585510253906,
"learning_rate": 0.000192,
"loss": 2.8226,
"step": 108
},
{
"epoch": 0.17384370015948963,
"grad_norm": 3.510647773742676,
"learning_rate": 0.00019,
"loss": 1.4454,
"step": 109
},
{
"epoch": 0.17543859649122806,
"grad_norm": 5.786037921905518,
"learning_rate": 0.000188,
"loss": 1.3979,
"step": 110
},
{
"epoch": 0.17703349282296652,
"grad_norm": 5.4267964363098145,
"learning_rate": 0.00018600000000000002,
"loss": 1.8775,
"step": 111
},
{
"epoch": 0.17862838915470494,
"grad_norm": 7.448083400726318,
"learning_rate": 0.00018400000000000003,
"loss": 1.8377,
"step": 112
},
{
"epoch": 0.18022328548644337,
"grad_norm": 6.855521202087402,
"learning_rate": 0.000182,
"loss": 1.7079,
"step": 113
},
{
"epoch": 0.18181818181818182,
"grad_norm": 6.257575988769531,
"learning_rate": 0.00018,
"loss": 2.2364,
"step": 114
},
{
"epoch": 0.18341307814992025,
"grad_norm": 15.47193431854248,
"learning_rate": 0.00017800000000000002,
"loss": 1.7272,
"step": 115
},
{
"epoch": 0.1850079744816587,
"grad_norm": 12.51389217376709,
"learning_rate": 0.00017600000000000002,
"loss": 1.8385,
"step": 116
},
{
"epoch": 0.18660287081339713,
"grad_norm": 8.150806427001953,
"learning_rate": 0.000174,
"loss": 2.429,
"step": 117
},
{
"epoch": 0.18819776714513556,
"grad_norm": 29.065210342407227,
"learning_rate": 0.000172,
"loss": 1.3318,
"step": 118
},
{
"epoch": 0.189792663476874,
"grad_norm": 5.457766532897949,
"learning_rate": 0.00017,
"loss": 1.2329,
"step": 119
},
{
"epoch": 0.19138755980861244,
"grad_norm": 3.7080814838409424,
"learning_rate": 0.000168,
"loss": 1.1145,
"step": 120
},
{
"epoch": 0.19298245614035087,
"grad_norm": 5.2060065269470215,
"learning_rate": 0.000166,
"loss": 1.6808,
"step": 121
},
{
"epoch": 0.19457735247208932,
"grad_norm": 15.03097152709961,
"learning_rate": 0.000164,
"loss": 2.207,
"step": 122
},
{
"epoch": 0.19617224880382775,
"grad_norm": 3.1523945331573486,
"learning_rate": 0.000162,
"loss": 0.8121,
"step": 123
},
{
"epoch": 0.19776714513556617,
"grad_norm": 12.531198501586914,
"learning_rate": 0.00016,
"loss": 1.6493,
"step": 124
},
{
"epoch": 0.19936204146730463,
"grad_norm": 18.13838768005371,
"learning_rate": 0.00015800000000000002,
"loss": 1.9895,
"step": 125
},
{
"epoch": 0.20095693779904306,
"grad_norm": 3.7068324089050293,
"learning_rate": 0.00015600000000000002,
"loss": 1.2397,
"step": 126
},
{
"epoch": 0.2025518341307815,
"grad_norm": 3.565275192260742,
"learning_rate": 0.000154,
"loss": 1.1711,
"step": 127
},
{
"epoch": 0.20414673046251994,
"grad_norm": 2.20239520072937,
"learning_rate": 0.000152,
"loss": 1.8436,
"step": 128
},
{
"epoch": 0.20574162679425836,
"grad_norm": 3.103001117706299,
"learning_rate": 0.00015000000000000001,
"loss": 1.0085,
"step": 129
},
{
"epoch": 0.20733652312599682,
"grad_norm": 3.8708302974700928,
"learning_rate": 0.000148,
"loss": 1.0336,
"step": 130
},
{
"epoch": 0.20893141945773525,
"grad_norm": 4.008622646331787,
"learning_rate": 0.000146,
"loss": 1.7669,
"step": 131
},
{
"epoch": 0.21052631578947367,
"grad_norm": 3.847217559814453,
"learning_rate": 0.000144,
"loss": 1.5163,
"step": 132
},
{
"epoch": 0.21212121212121213,
"grad_norm": 9.75152587890625,
"learning_rate": 0.000142,
"loss": 2.3286,
"step": 133
},
{
"epoch": 0.21371610845295055,
"grad_norm": 9.416215896606445,
"learning_rate": 0.00014,
"loss": 2.0397,
"step": 134
},
{
"epoch": 0.215311004784689,
"grad_norm": 8.208052635192871,
"learning_rate": 0.000138,
"loss": 1.7927,
"step": 135
},
{
"epoch": 0.21690590111642744,
"grad_norm": 11.827594757080078,
"learning_rate": 0.00013600000000000003,
"loss": 2.1958,
"step": 136
},
{
"epoch": 0.21850079744816586,
"grad_norm": 5.788820266723633,
"learning_rate": 0.000134,
"loss": 1.8336,
"step": 137
},
{
"epoch": 0.22009569377990432,
"grad_norm": 3.6653785705566406,
"learning_rate": 0.000132,
"loss": 1.0477,
"step": 138
},
{
"epoch": 0.22169059011164274,
"grad_norm": 63.57593536376953,
"learning_rate": 0.00013000000000000002,
"loss": 1.5667,
"step": 139
},
{
"epoch": 0.22328548644338117,
"grad_norm": 12.209519386291504,
"learning_rate": 0.00012800000000000002,
"loss": 1.6414,
"step": 140
},
{
"epoch": 0.22488038277511962,
"grad_norm": 8.834230422973633,
"learning_rate": 0.000126,
"loss": 1.1292,
"step": 141
},
{
"epoch": 0.22647527910685805,
"grad_norm": 3.4640305042266846,
"learning_rate": 0.000124,
"loss": 1.3858,
"step": 142
},
{
"epoch": 0.22807017543859648,
"grad_norm": 5.367834091186523,
"learning_rate": 0.000122,
"loss": 1.4735,
"step": 143
},
{
"epoch": 0.22966507177033493,
"grad_norm": 3.5475995540618896,
"learning_rate": 0.00012,
"loss": 1.8889,
"step": 144
},
{
"epoch": 0.23125996810207336,
"grad_norm": 4.0117621421813965,
"learning_rate": 0.000118,
"loss": 0.6609,
"step": 145
},
{
"epoch": 0.23285486443381181,
"grad_norm": 2.641918420791626,
"learning_rate": 0.000116,
"loss": 1.439,
"step": 146
},
{
"epoch": 0.23444976076555024,
"grad_norm": 24.166297912597656,
"learning_rate": 0.00011399999999999999,
"loss": 1.5829,
"step": 147
},
{
"epoch": 0.23604465709728867,
"grad_norm": 12.261051177978516,
"learning_rate": 0.00011200000000000001,
"loss": 2.6259,
"step": 148
},
{
"epoch": 0.23763955342902712,
"grad_norm": 4.844594955444336,
"learning_rate": 0.00011000000000000002,
"loss": 2.2162,
"step": 149
},
{
"epoch": 0.23923444976076555,
"grad_norm": 6.697972297668457,
"learning_rate": 0.00010800000000000001,
"loss": 2.7383,
"step": 150
},
{
"epoch": 0.24082934609250398,
"grad_norm": 4.018860340118408,
"learning_rate": 0.00010600000000000002,
"loss": 2.4861,
"step": 151
},
{
"epoch": 0.24242424242424243,
"grad_norm": 7.205791473388672,
"learning_rate": 0.00010400000000000001,
"loss": 1.4837,
"step": 152
},
{
"epoch": 0.24401913875598086,
"grad_norm": 8.773712158203125,
"learning_rate": 0.00010200000000000001,
"loss": 1.6268,
"step": 153
},
{
"epoch": 0.24561403508771928,
"grad_norm": 4.254454135894775,
"learning_rate": 0.0001,
"loss": 1.6405,
"step": 154
},
{
"epoch": 0.24720893141945774,
"grad_norm": 4.130655288696289,
"learning_rate": 9.8e-05,
"loss": 0.9554,
"step": 155
},
{
"epoch": 0.24880382775119617,
"grad_norm": 7.5216169357299805,
"learning_rate": 9.6e-05,
"loss": 1.3912,
"step": 156
},
{
"epoch": 0.2503987240829346,
"grad_norm": 70.0249252319336,
"learning_rate": 9.4e-05,
"loss": 1.8041,
"step": 157
},
{
"epoch": 0.25199362041467305,
"grad_norm": 3.9001526832580566,
"learning_rate": 9.200000000000001e-05,
"loss": 1.7109,
"step": 158
},
{
"epoch": 0.2535885167464115,
"grad_norm": 5.0965094566345215,
"learning_rate": 9e-05,
"loss": 1.0314,
"step": 159
},
{
"epoch": 0.2551834130781499,
"grad_norm": 3.0462706089019775,
"learning_rate": 8.800000000000001e-05,
"loss": 1.3541,
"step": 160
},
{
"epoch": 0.2567783094098884,
"grad_norm": 2.8334619998931885,
"learning_rate": 8.6e-05,
"loss": 1.9827,
"step": 161
},
{
"epoch": 0.2583732057416268,
"grad_norm": 5.991235256195068,
"learning_rate": 8.4e-05,
"loss": 1.4319,
"step": 162
},
{
"epoch": 0.25996810207336524,
"grad_norm": 6.401395320892334,
"learning_rate": 8.2e-05,
"loss": 1.912,
"step": 163
},
{
"epoch": 0.26156299840510366,
"grad_norm": 3.0269479751586914,
"learning_rate": 8e-05,
"loss": 1.5637,
"step": 164
},
{
"epoch": 0.2631578947368421,
"grad_norm": 10.380670547485352,
"learning_rate": 7.800000000000001e-05,
"loss": 1.8585,
"step": 165
},
{
"epoch": 0.2647527910685805,
"grad_norm": 7.375792503356934,
"learning_rate": 7.6e-05,
"loss": 1.8773,
"step": 166
},
{
"epoch": 0.266347687400319,
"grad_norm": 3.75229811668396,
"learning_rate": 7.4e-05,
"loss": 1.9936,
"step": 167
},
{
"epoch": 0.2679425837320574,
"grad_norm": 4.587918281555176,
"learning_rate": 7.2e-05,
"loss": 1.0402,
"step": 168
},
{
"epoch": 0.26953748006379585,
"grad_norm": 4.558103561401367,
"learning_rate": 7e-05,
"loss": 2.0408,
"step": 169
},
{
"epoch": 0.2711323763955343,
"grad_norm": 6.410488605499268,
"learning_rate": 6.800000000000001e-05,
"loss": 1.448,
"step": 170
},
{
"epoch": 0.2727272727272727,
"grad_norm": 9.21990966796875,
"learning_rate": 6.6e-05,
"loss": 0.963,
"step": 171
},
{
"epoch": 0.2743221690590112,
"grad_norm": 10.446578025817871,
"learning_rate": 6.400000000000001e-05,
"loss": 1.4381,
"step": 172
},
{
"epoch": 0.2759170653907496,
"grad_norm": 13.717737197875977,
"learning_rate": 6.2e-05,
"loss": 1.2965,
"step": 173
},
{
"epoch": 0.27751196172248804,
"grad_norm": 36.117401123046875,
"learning_rate": 6e-05,
"loss": 1.2144,
"step": 174
},
{
"epoch": 0.27910685805422647,
"grad_norm": 8.741774559020996,
"learning_rate": 5.8e-05,
"loss": 1.5059,
"step": 175
},
{
"epoch": 0.2807017543859649,
"grad_norm": 3.040428400039673,
"learning_rate": 5.6000000000000006e-05,
"loss": 1.4881,
"step": 176
},
{
"epoch": 0.2822966507177033,
"grad_norm": 7.401648044586182,
"learning_rate": 5.4000000000000005e-05,
"loss": 0.7462,
"step": 177
},
{
"epoch": 0.2838915470494418,
"grad_norm": 3.3486616611480713,
"learning_rate": 5.2000000000000004e-05,
"loss": 0.8773,
"step": 178
},
{
"epoch": 0.28548644338118023,
"grad_norm": 9.647512435913086,
"learning_rate": 5e-05,
"loss": 1.3147,
"step": 179
},
{
"epoch": 0.28708133971291866,
"grad_norm": 4.068251132965088,
"learning_rate": 4.8e-05,
"loss": 1.4796,
"step": 180
},
{
"epoch": 0.2886762360446571,
"grad_norm": 5.119231700897217,
"learning_rate": 4.600000000000001e-05,
"loss": 1.2465,
"step": 181
},
{
"epoch": 0.2902711323763955,
"grad_norm": 11.434249877929688,
"learning_rate": 4.4000000000000006e-05,
"loss": 1.0839,
"step": 182
},
{
"epoch": 0.291866028708134,
"grad_norm": 2.6633126735687256,
"learning_rate": 4.2e-05,
"loss": 1.1653,
"step": 183
},
{
"epoch": 0.2934609250398724,
"grad_norm": 8.366178512573242,
"learning_rate": 4e-05,
"loss": 1.8623,
"step": 184
},
{
"epoch": 0.29505582137161085,
"grad_norm": 13.434002876281738,
"learning_rate": 3.8e-05,
"loss": 1.4409,
"step": 185
},
{
"epoch": 0.2966507177033493,
"grad_norm": 3.535421371459961,
"learning_rate": 3.6e-05,
"loss": 1.2367,
"step": 186
},
{
"epoch": 0.2982456140350877,
"grad_norm": 5.332886219024658,
"learning_rate": 3.4000000000000007e-05,
"loss": 1.4744,
"step": 187
},
{
"epoch": 0.29984051036682613,
"grad_norm": 3.7560222148895264,
"learning_rate": 3.2000000000000005e-05,
"loss": 1.3489,
"step": 188
},
{
"epoch": 0.3014354066985646,
"grad_norm": 4.513449668884277,
"learning_rate": 3e-05,
"loss": 1.3738,
"step": 189
},
{
"epoch": 0.30303030303030304,
"grad_norm": 7.969113826751709,
"learning_rate": 2.8000000000000003e-05,
"loss": 1.7352,
"step": 190
},
{
"epoch": 0.30462519936204147,
"grad_norm": 4.11577033996582,
"learning_rate": 2.6000000000000002e-05,
"loss": 1.6171,
"step": 191
},
{
"epoch": 0.3062200956937799,
"grad_norm": 4.727830410003662,
"learning_rate": 2.4e-05,
"loss": 1.3359,
"step": 192
},
{
"epoch": 0.3078149920255183,
"grad_norm": 1.7988307476043701,
"learning_rate": 2.2000000000000003e-05,
"loss": 0.834,
"step": 193
},
{
"epoch": 0.3094098883572568,
"grad_norm": 4.455323696136475,
"learning_rate": 2e-05,
"loss": 1.2367,
"step": 194
},
{
"epoch": 0.31100478468899523,
"grad_norm": 5.334070682525635,
"learning_rate": 1.8e-05,
"loss": 1.0581,
"step": 195
},
{
"epoch": 0.31259968102073366,
"grad_norm": 4.967501640319824,
"learning_rate": 1.6000000000000003e-05,
"loss": 2.2129,
"step": 196
},
{
"epoch": 0.3141945773524721,
"grad_norm": 4.0120673179626465,
"learning_rate": 1.4000000000000001e-05,
"loss": 1.2043,
"step": 197
},
{
"epoch": 0.3157894736842105,
"grad_norm": 4.491064548492432,
"learning_rate": 1.2e-05,
"loss": 1.3383,
"step": 198
},
{
"epoch": 0.31738437001594894,
"grad_norm": 3.6377949714660645,
"learning_rate": 1e-05,
"loss": 1.2878,
"step": 199
},
{
"epoch": 0.3189792663476874,
"grad_norm": 3.7990877628326416,
"learning_rate": 8.000000000000001e-06,
"loss": 1.7178,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1717079281606656.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}