atsuki-yamaguchi's picture
Upload folder using huggingface_hub
2236bf1 verified
raw
history blame
30.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 175,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005714285714285714,
"grad_norm": 70.73185729980469,
"learning_rate": 5.555555555555556e-06,
"loss": 7.814,
"step": 1
},
{
"epoch": 0.011428571428571429,
"grad_norm": 67.59354400634766,
"learning_rate": 1.1111111111111112e-05,
"loss": 7.4984,
"step": 2
},
{
"epoch": 0.017142857142857144,
"grad_norm": 33.69651794433594,
"learning_rate": 1.6666666666666667e-05,
"loss": 5.9383,
"step": 3
},
{
"epoch": 0.022857142857142857,
"grad_norm": 13.683563232421875,
"learning_rate": 2.2222222222222223e-05,
"loss": 5.1633,
"step": 4
},
{
"epoch": 0.02857142857142857,
"grad_norm": 14.047224044799805,
"learning_rate": 2.777777777777778e-05,
"loss": 5.2954,
"step": 5
},
{
"epoch": 0.03428571428571429,
"grad_norm": 13.862848281860352,
"learning_rate": 3.3333333333333335e-05,
"loss": 5.2335,
"step": 6
},
{
"epoch": 0.04,
"grad_norm": 11.201601028442383,
"learning_rate": 3.888888888888889e-05,
"loss": 5.1067,
"step": 7
},
{
"epoch": 0.045714285714285714,
"grad_norm": 9.068086624145508,
"learning_rate": 4.4444444444444447e-05,
"loss": 4.8728,
"step": 8
},
{
"epoch": 0.05142857142857143,
"grad_norm": 26.591472625732422,
"learning_rate": 5e-05,
"loss": 4.9554,
"step": 9
},
{
"epoch": 0.05714285714285714,
"grad_norm": 11.889716148376465,
"learning_rate": 5.555555555555556e-05,
"loss": 4.7776,
"step": 10
},
{
"epoch": 0.06285714285714286,
"grad_norm": 8.594311714172363,
"learning_rate": 6.111111111111112e-05,
"loss": 4.711,
"step": 11
},
{
"epoch": 0.06857142857142857,
"grad_norm": 6.407460689544678,
"learning_rate": 6.666666666666667e-05,
"loss": 4.5297,
"step": 12
},
{
"epoch": 0.07428571428571429,
"grad_norm": 5.835541248321533,
"learning_rate": 7.222222222222222e-05,
"loss": 4.5921,
"step": 13
},
{
"epoch": 0.08,
"grad_norm": 5.524861812591553,
"learning_rate": 7.777777777777778e-05,
"loss": 4.6319,
"step": 14
},
{
"epoch": 0.08571428571428572,
"grad_norm": 5.910550117492676,
"learning_rate": 8.333333333333334e-05,
"loss": 4.5386,
"step": 15
},
{
"epoch": 0.09142857142857143,
"grad_norm": 3.423496723175049,
"learning_rate": 8.888888888888889e-05,
"loss": 4.481,
"step": 16
},
{
"epoch": 0.09714285714285714,
"grad_norm": 14.430135726928711,
"learning_rate": 9.444444444444444e-05,
"loss": 4.2537,
"step": 17
},
{
"epoch": 0.10285714285714286,
"grad_norm": 4.56686544418335,
"learning_rate": 0.0001,
"loss": 4.5151,
"step": 18
},
{
"epoch": 0.10857142857142857,
"grad_norm": 3.4966490268707275,
"learning_rate": 9.999776148326216e-05,
"loss": 4.332,
"step": 19
},
{
"epoch": 0.11428571428571428,
"grad_norm": 7.750316619873047,
"learning_rate": 9.999104613348688e-05,
"loss": 4.361,
"step": 20
},
{
"epoch": 0.12,
"grad_norm": 4.739988803863525,
"learning_rate": 9.997985455197114e-05,
"loss": 4.3876,
"step": 21
},
{
"epoch": 0.12571428571428572,
"grad_norm": 6.870810508728027,
"learning_rate": 9.996418774081658e-05,
"loss": 3.8275,
"step": 22
},
{
"epoch": 0.13142857142857142,
"grad_norm": 3.8571834564208984,
"learning_rate": 9.994404710283998e-05,
"loss": 4.3033,
"step": 23
},
{
"epoch": 0.13714285714285715,
"grad_norm": 2.915670156478882,
"learning_rate": 9.991943444144757e-05,
"loss": 3.7222,
"step": 24
},
{
"epoch": 0.14285714285714285,
"grad_norm": 3.343397855758667,
"learning_rate": 9.98903519604735e-05,
"loss": 4.4069,
"step": 25
},
{
"epoch": 0.14857142857142858,
"grad_norm": 4.528430461883545,
"learning_rate": 9.985680226398261e-05,
"loss": 4.5253,
"step": 26
},
{
"epoch": 0.15428571428571428,
"grad_norm": 2.878030300140381,
"learning_rate": 9.981878835603717e-05,
"loss": 4.2524,
"step": 27
},
{
"epoch": 0.16,
"grad_norm": 4.2011637687683105,
"learning_rate": 9.977631364042795e-05,
"loss": 4.5261,
"step": 28
},
{
"epoch": 0.1657142857142857,
"grad_norm": 3.751654624938965,
"learning_rate": 9.972938192036944e-05,
"loss": 4.1648,
"step": 29
},
{
"epoch": 0.17142857142857143,
"grad_norm": 5.761055946350098,
"learning_rate": 9.967799739815925e-05,
"loss": 4.2666,
"step": 30
},
{
"epoch": 0.17714285714285713,
"grad_norm": 2.6174347400665283,
"learning_rate": 9.962216467480193e-05,
"loss": 4.4525,
"step": 31
},
{
"epoch": 0.18285714285714286,
"grad_norm": 3.8841474056243896,
"learning_rate": 9.956188874959687e-05,
"loss": 4.0747,
"step": 32
},
{
"epoch": 0.18857142857142858,
"grad_norm": 2.7967708110809326,
"learning_rate": 9.94971750196908e-05,
"loss": 4.4146,
"step": 33
},
{
"epoch": 0.19428571428571428,
"grad_norm": 3.007628917694092,
"learning_rate": 9.942802927959443e-05,
"loss": 4.2302,
"step": 34
},
{
"epoch": 0.2,
"grad_norm": 3.3915443420410156,
"learning_rate": 9.93544577206636e-05,
"loss": 4.3599,
"step": 35
},
{
"epoch": 0.2057142857142857,
"grad_norm": 3.1952192783355713,
"learning_rate": 9.927646693054496e-05,
"loss": 3.9553,
"step": 36
},
{
"epoch": 0.21142857142857144,
"grad_norm": 3.318091630935669,
"learning_rate": 9.919406389258607e-05,
"loss": 4.4174,
"step": 37
},
{
"epoch": 0.21714285714285714,
"grad_norm": 3.1411001682281494,
"learning_rate": 9.910725598521013e-05,
"loss": 4.3758,
"step": 38
},
{
"epoch": 0.22285714285714286,
"grad_norm": 2.8873131275177,
"learning_rate": 9.901605098125528e-05,
"loss": 4.0678,
"step": 39
},
{
"epoch": 0.22857142857142856,
"grad_norm": 2.7040855884552,
"learning_rate": 9.892045704727864e-05,
"loss": 4.1704,
"step": 40
},
{
"epoch": 0.2342857142857143,
"grad_norm": 2.8548882007598877,
"learning_rate": 9.882048274282505e-05,
"loss": 4.3153,
"step": 41
},
{
"epoch": 0.24,
"grad_norm": 2.8522284030914307,
"learning_rate": 9.871613701966067e-05,
"loss": 4.2952,
"step": 42
},
{
"epoch": 0.24571428571428572,
"grad_norm": 3.3185341358184814,
"learning_rate": 9.860742922097141e-05,
"loss": 4.292,
"step": 43
},
{
"epoch": 0.25142857142857145,
"grad_norm": 2.78251051902771,
"learning_rate": 9.849436908052636e-05,
"loss": 4.0693,
"step": 44
},
{
"epoch": 0.2571428571428571,
"grad_norm": 3.5805132389068604,
"learning_rate": 9.837696672180618e-05,
"loss": 4.2146,
"step": 45
},
{
"epoch": 0.26285714285714284,
"grad_norm": 3.7008020877838135,
"learning_rate": 9.825523265709666e-05,
"loss": 4.2144,
"step": 46
},
{
"epoch": 0.26857142857142857,
"grad_norm": 3.876337766647339,
"learning_rate": 9.812917778654748e-05,
"loss": 4.3116,
"step": 47
},
{
"epoch": 0.2742857142857143,
"grad_norm": 3.8221843242645264,
"learning_rate": 9.799881339719615e-05,
"loss": 3.788,
"step": 48
},
{
"epoch": 0.28,
"grad_norm": 3.6571123600006104,
"learning_rate": 9.786415116195732e-05,
"loss": 4.185,
"step": 49
},
{
"epoch": 0.2857142857142857,
"grad_norm": 3.0925450325012207,
"learning_rate": 9.772520313857775e-05,
"loss": 3.9539,
"step": 50
},
{
"epoch": 0.2914285714285714,
"grad_norm": 2.5549099445343018,
"learning_rate": 9.758198176855648e-05,
"loss": 3.8761,
"step": 51
},
{
"epoch": 0.29714285714285715,
"grad_norm": 3.2105484008789062,
"learning_rate": 9.743449987603083e-05,
"loss": 4.0286,
"step": 52
},
{
"epoch": 0.3028571428571429,
"grad_norm": 3.4165728092193604,
"learning_rate": 9.72827706666282e-05,
"loss": 3.9907,
"step": 53
},
{
"epoch": 0.30857142857142855,
"grad_norm": 3.0576295852661133,
"learning_rate": 9.712680772628364e-05,
"loss": 3.9951,
"step": 54
},
{
"epoch": 0.3142857142857143,
"grad_norm": 6.778007507324219,
"learning_rate": 9.69666250200232e-05,
"loss": 4.0592,
"step": 55
},
{
"epoch": 0.32,
"grad_norm": 5.007216453552246,
"learning_rate": 9.680223689071364e-05,
"loss": 4.1202,
"step": 56
},
{
"epoch": 0.32571428571428573,
"grad_norm": 4.383210182189941,
"learning_rate": 9.663365805777814e-05,
"loss": 4.1462,
"step": 57
},
{
"epoch": 0.3314285714285714,
"grad_norm": 3.4334166049957275,
"learning_rate": 9.646090361587827e-05,
"loss": 3.9832,
"step": 58
},
{
"epoch": 0.33714285714285713,
"grad_norm": 3.4360697269439697,
"learning_rate": 9.628398903356239e-05,
"loss": 3.9158,
"step": 59
},
{
"epoch": 0.34285714285714286,
"grad_norm": 2.9482150077819824,
"learning_rate": 9.610293015188067e-05,
"loss": 3.6752,
"step": 60
},
{
"epoch": 0.3485714285714286,
"grad_norm": 4.406130790710449,
"learning_rate": 9.591774318296661e-05,
"loss": 4.1023,
"step": 61
},
{
"epoch": 0.35428571428571426,
"grad_norm": 3.4706637859344482,
"learning_rate": 9.572844470858537e-05,
"loss": 3.9855,
"step": 62
},
{
"epoch": 0.36,
"grad_norm": 4.023850917816162,
"learning_rate": 9.553505167864908e-05,
"loss": 3.948,
"step": 63
},
{
"epoch": 0.3657142857142857,
"grad_norm": 2.9235880374908447,
"learning_rate": 9.533758140969912e-05,
"loss": 3.5046,
"step": 64
},
{
"epoch": 0.37142857142857144,
"grad_norm": 3.7228496074676514,
"learning_rate": 9.513605158335562e-05,
"loss": 3.6147,
"step": 65
},
{
"epoch": 0.37714285714285717,
"grad_norm": 3.482706308364868,
"learning_rate": 9.493048024473412e-05,
"loss": 3.5001,
"step": 66
},
{
"epoch": 0.38285714285714284,
"grad_norm": 4.889612674713135,
"learning_rate": 9.47208858008299e-05,
"loss": 3.941,
"step": 67
},
{
"epoch": 0.38857142857142857,
"grad_norm": 3.4616644382476807,
"learning_rate": 9.450728701886983e-05,
"loss": 3.7212,
"step": 68
},
{
"epoch": 0.3942857142857143,
"grad_norm": 3.7083253860473633,
"learning_rate": 9.428970302463185e-05,
"loss": 3.6287,
"step": 69
},
{
"epoch": 0.4,
"grad_norm": 3.255087375640869,
"learning_rate": 9.406815330073244e-05,
"loss": 3.7913,
"step": 70
},
{
"epoch": 0.4057142857142857,
"grad_norm": 3.504664897918701,
"learning_rate": 9.384265768488225e-05,
"loss": 3.892,
"step": 71
},
{
"epoch": 0.4114285714285714,
"grad_norm": 3.181671380996704,
"learning_rate": 9.36132363681097e-05,
"loss": 3.7464,
"step": 72
},
{
"epoch": 0.41714285714285715,
"grad_norm": 5.243124961853027,
"learning_rate": 9.337990989295306e-05,
"loss": 3.8959,
"step": 73
},
{
"epoch": 0.4228571428571429,
"grad_norm": 3.619094133377075,
"learning_rate": 9.314269915162114e-05,
"loss": 3.8042,
"step": 74
},
{
"epoch": 0.42857142857142855,
"grad_norm": 4.521401405334473,
"learning_rate": 9.290162538412256e-05,
"loss": 3.7167,
"step": 75
},
{
"epoch": 0.4342857142857143,
"grad_norm": 5.233442306518555,
"learning_rate": 9.265671017636383e-05,
"loss": 3.8335,
"step": 76
},
{
"epoch": 0.44,
"grad_norm": 4.076780796051025,
"learning_rate": 9.240797545821667e-05,
"loss": 3.7429,
"step": 77
},
{
"epoch": 0.44571428571428573,
"grad_norm": 3.2406275272369385,
"learning_rate": 9.215544350155422e-05,
"loss": 3.6701,
"step": 78
},
{
"epoch": 0.4514285714285714,
"grad_norm": 3.954871654510498,
"learning_rate": 9.1899136918257e-05,
"loss": 3.7933,
"step": 79
},
{
"epoch": 0.45714285714285713,
"grad_norm": 3.645928144454956,
"learning_rate": 9.163907865818806e-05,
"loss": 3.776,
"step": 80
},
{
"epoch": 0.46285714285714286,
"grad_norm": 2.8732380867004395,
"learning_rate": 9.13752920071381e-05,
"loss": 3.613,
"step": 81
},
{
"epoch": 0.4685714285714286,
"grad_norm": 2.751926898956299,
"learning_rate": 9.110780058474052e-05,
"loss": 3.5343,
"step": 82
},
{
"epoch": 0.4742857142857143,
"grad_norm": 3.1919469833374023,
"learning_rate": 9.08366283423563e-05,
"loss": 3.6823,
"step": 83
},
{
"epoch": 0.48,
"grad_norm": 3.450768232345581,
"learning_rate": 9.056179956092962e-05,
"loss": 3.6727,
"step": 84
},
{
"epoch": 0.4857142857142857,
"grad_norm": 4.474507808685303,
"learning_rate": 9.028333884881357e-05,
"loss": 3.6091,
"step": 85
},
{
"epoch": 0.49142857142857144,
"grad_norm": 4.046672344207764,
"learning_rate": 9.000127113956674e-05,
"loss": 3.6397,
"step": 86
},
{
"epoch": 0.49714285714285716,
"grad_norm": 3.351008176803589,
"learning_rate": 8.971562168972064e-05,
"loss": 3.6661,
"step": 87
},
{
"epoch": 0.5028571428571429,
"grad_norm": 4.021505355834961,
"learning_rate": 8.94264160765183e-05,
"loss": 3.5036,
"step": 88
},
{
"epoch": 0.5085714285714286,
"grad_norm": 2.808837413787842,
"learning_rate": 8.913368019562391e-05,
"loss": 3.4524,
"step": 89
},
{
"epoch": 0.5142857142857142,
"grad_norm": 2.7998015880584717,
"learning_rate": 8.883744025880428e-05,
"loss": 3.6086,
"step": 90
},
{
"epoch": 0.52,
"grad_norm": 2.3896753787994385,
"learning_rate": 8.853772279158166e-05,
"loss": 3.4425,
"step": 91
},
{
"epoch": 0.5257142857142857,
"grad_norm": 3.2213077545166016,
"learning_rate": 8.823455463085873e-05,
"loss": 3.5151,
"step": 92
},
{
"epoch": 0.5314285714285715,
"grad_norm": 4.131877422332764,
"learning_rate": 8.79279629225156e-05,
"loss": 3.6083,
"step": 93
},
{
"epoch": 0.5371428571428571,
"grad_norm": 3.334186553955078,
"learning_rate": 8.761797511897906e-05,
"loss": 3.488,
"step": 94
},
{
"epoch": 0.5428571428571428,
"grad_norm": 3.3978617191314697,
"learning_rate": 8.730461897676464e-05,
"loss": 3.7274,
"step": 95
},
{
"epoch": 0.5485714285714286,
"grad_norm": 3.625830888748169,
"learning_rate": 8.698792255399104e-05,
"loss": 3.6878,
"step": 96
},
{
"epoch": 0.5542857142857143,
"grad_norm": 3.0797600746154785,
"learning_rate": 8.666791420786803e-05,
"loss": 3.3222,
"step": 97
},
{
"epoch": 0.56,
"grad_norm": 3.047025442123413,
"learning_rate": 8.634462259215719e-05,
"loss": 3.6581,
"step": 98
},
{
"epoch": 0.5657142857142857,
"grad_norm": 3.139619827270508,
"learning_rate": 8.60180766546062e-05,
"loss": 3.655,
"step": 99
},
{
"epoch": 0.5714285714285714,
"grad_norm": 3.2921836376190186,
"learning_rate": 8.568830563435694e-05,
"loss": 3.5063,
"step": 100
},
{
"epoch": 0.5771428571428572,
"grad_norm": 3.49196195602417,
"learning_rate": 8.535533905932738e-05,
"loss": 3.5654,
"step": 101
},
{
"epoch": 0.5828571428571429,
"grad_norm": 3.840716600418091,
"learning_rate": 8.501920674356754e-05,
"loss": 3.4673,
"step": 102
},
{
"epoch": 0.5885714285714285,
"grad_norm": 2.9666836261749268,
"learning_rate": 8.467993878459004e-05,
"loss": 3.6019,
"step": 103
},
{
"epoch": 0.5942857142857143,
"grad_norm": 2.98416805267334,
"learning_rate": 8.433756556067506e-05,
"loss": 3.403,
"step": 104
},
{
"epoch": 0.6,
"grad_norm": 3.2488508224487305,
"learning_rate": 8.39921177281503e-05,
"loss": 3.4254,
"step": 105
},
{
"epoch": 0.6057142857142858,
"grad_norm": 4.834604263305664,
"learning_rate": 8.364362621864595e-05,
"loss": 3.6265,
"step": 106
},
{
"epoch": 0.6114285714285714,
"grad_norm": 3.29841947555542,
"learning_rate": 8.329212223632511e-05,
"loss": 3.5551,
"step": 107
},
{
"epoch": 0.6171428571428571,
"grad_norm": 3.66792631149292,
"learning_rate": 8.293763725508969e-05,
"loss": 3.5378,
"step": 108
},
{
"epoch": 0.6228571428571429,
"grad_norm": 3.1973721981048584,
"learning_rate": 8.258020301576224e-05,
"loss": 3.5083,
"step": 109
},
{
"epoch": 0.6285714285714286,
"grad_norm": 3.9324676990509033,
"learning_rate": 8.221985152324385e-05,
"loss": 3.4797,
"step": 110
},
{
"epoch": 0.6342857142857142,
"grad_norm": 3.004668951034546,
"learning_rate": 8.185661504364844e-05,
"loss": 3.5164,
"step": 111
},
{
"epoch": 0.64,
"grad_norm": 2.747321605682373,
"learning_rate": 8.149052610141357e-05,
"loss": 3.2127,
"step": 112
},
{
"epoch": 0.6457142857142857,
"grad_norm": 4.0046162605285645,
"learning_rate": 8.112161747638823e-05,
"loss": 3.5309,
"step": 113
},
{
"epoch": 0.6514285714285715,
"grad_norm": 2.192357063293457,
"learning_rate": 8.074992220089769e-05,
"loss": 3.0268,
"step": 114
},
{
"epoch": 0.6571428571428571,
"grad_norm": 3.4939377307891846,
"learning_rate": 8.037547355678577e-05,
"loss": 3.5529,
"step": 115
},
{
"epoch": 0.6628571428571428,
"grad_norm": 2.790357828140259,
"learning_rate": 7.999830507243478e-05,
"loss": 3.3849,
"step": 116
},
{
"epoch": 0.6685714285714286,
"grad_norm": 3.2865593433380127,
"learning_rate": 7.961845051976334e-05,
"loss": 3.4702,
"step": 117
},
{
"epoch": 0.6742857142857143,
"grad_norm": 3.31089186668396,
"learning_rate": 7.923594391120236e-05,
"loss": 3.4549,
"step": 118
},
{
"epoch": 0.68,
"grad_norm": 3.0583114624023438,
"learning_rate": 7.88508194966497e-05,
"loss": 3.496,
"step": 119
},
{
"epoch": 0.6857142857142857,
"grad_norm": 2.981785297393799,
"learning_rate": 7.846311176040331e-05,
"loss": 3.4234,
"step": 120
},
{
"epoch": 0.6914285714285714,
"grad_norm": 2.7434380054473877,
"learning_rate": 7.80728554180734e-05,
"loss": 3.3552,
"step": 121
},
{
"epoch": 0.6971428571428572,
"grad_norm": 9.255672454833984,
"learning_rate": 7.768008541347423e-05,
"loss": 3.5707,
"step": 122
},
{
"epoch": 0.7028571428571428,
"grad_norm": 3.796801805496216,
"learning_rate": 7.728483691549491e-05,
"loss": 3.4186,
"step": 123
},
{
"epoch": 0.7085714285714285,
"grad_norm": 2.910283327102661,
"learning_rate": 7.688714531495061e-05,
"loss": 3.3844,
"step": 124
},
{
"epoch": 0.7142857142857143,
"grad_norm": 2.9970083236694336,
"learning_rate": 7.648704622141347e-05,
"loss": 3.5279,
"step": 125
},
{
"epoch": 0.72,
"grad_norm": 2.8664133548736572,
"learning_rate": 7.608457546002424e-05,
"loss": 3.4568,
"step": 126
},
{
"epoch": 0.7257142857142858,
"grad_norm": 3.1223716735839844,
"learning_rate": 7.567976906828431e-05,
"loss": 3.2902,
"step": 127
},
{
"epoch": 0.7314285714285714,
"grad_norm": 2.87223482131958,
"learning_rate": 7.527266329282905e-05,
"loss": 3.2975,
"step": 128
},
{
"epoch": 0.7371428571428571,
"grad_norm": 2.5925962924957275,
"learning_rate": 7.486329458618215e-05,
"loss": 3.3436,
"step": 129
},
{
"epoch": 0.7428571428571429,
"grad_norm": 3.0222792625427246,
"learning_rate": 7.445169960349167e-05,
"loss": 3.4291,
"step": 130
},
{
"epoch": 0.7485714285714286,
"grad_norm": 3.4972734451293945,
"learning_rate": 7.403791519924794e-05,
"loss": 3.3198,
"step": 131
},
{
"epoch": 0.7542857142857143,
"grad_norm": 2.552716016769409,
"learning_rate": 7.362197842398355e-05,
"loss": 3.4851,
"step": 132
},
{
"epoch": 0.76,
"grad_norm": 3.033993721008301,
"learning_rate": 7.320392652095585e-05,
"loss": 3.2181,
"step": 133
},
{
"epoch": 0.7657142857142857,
"grad_norm": 3.41973876953125,
"learning_rate": 7.278379692281208e-05,
"loss": 3.319,
"step": 134
},
{
"epoch": 0.7714285714285715,
"grad_norm": 3.8678910732269287,
"learning_rate": 7.23616272482378e-05,
"loss": 3.4172,
"step": 135
},
{
"epoch": 0.7771428571428571,
"grad_norm": 2.930457830429077,
"learning_rate": 7.193745529858826e-05,
"loss": 3.3255,
"step": 136
},
{
"epoch": 0.7828571428571428,
"grad_norm": 2.9811675548553467,
"learning_rate": 7.151131905450386e-05,
"loss": 3.4066,
"step": 137
},
{
"epoch": 0.7885714285714286,
"grad_norm": 2.4796462059020996,
"learning_rate": 7.10832566725092e-05,
"loss": 3.4391,
"step": 138
},
{
"epoch": 0.7942857142857143,
"grad_norm": 3.213162899017334,
"learning_rate": 7.065330648159656e-05,
"loss": 3.2553,
"step": 139
},
{
"epoch": 0.8,
"grad_norm": 4.176576614379883,
"learning_rate": 7.022150697979384e-05,
"loss": 3.1612,
"step": 140
},
{
"epoch": 0.8057142857142857,
"grad_norm": 2.9058704376220703,
"learning_rate": 6.97878968307176e-05,
"loss": 3.2411,
"step": 141
},
{
"epoch": 0.8114285714285714,
"grad_norm": 2.4541189670562744,
"learning_rate": 6.935251486011087e-05,
"loss": 3.2884,
"step": 142
},
{
"epoch": 0.8171428571428572,
"grad_norm": 2.880113363265991,
"learning_rate": 6.891540005236675e-05,
"loss": 3.2428,
"step": 143
},
{
"epoch": 0.8228571428571428,
"grad_norm": 2.3897929191589355,
"learning_rate": 6.847659154703785e-05,
"loss": 3.3205,
"step": 144
},
{
"epoch": 0.8285714285714286,
"grad_norm": 2.817380905151367,
"learning_rate": 6.803612863533148e-05,
"loss": 3.1432,
"step": 145
},
{
"epoch": 0.8342857142857143,
"grad_norm": 2.5644307136535645,
"learning_rate": 6.759405075659166e-05,
"loss": 3.1011,
"step": 146
},
{
"epoch": 0.84,
"grad_norm": 3.0853381156921387,
"learning_rate": 6.715039749476763e-05,
"loss": 3.3309,
"step": 147
},
{
"epoch": 0.8457142857142858,
"grad_norm": 2.8046741485595703,
"learning_rate": 6.67052085748695e-05,
"loss": 3.2116,
"step": 148
},
{
"epoch": 0.8514285714285714,
"grad_norm": 2.470891237258911,
"learning_rate": 6.625852385941119e-05,
"loss": 3.013,
"step": 149
},
{
"epoch": 0.8571428571428571,
"grad_norm": 2.9108824729919434,
"learning_rate": 6.58103833448412e-05,
"loss": 3.3611,
"step": 150
},
{
"epoch": 0.8628571428571429,
"grad_norm": 2.715352773666382,
"learning_rate": 6.536082715796125e-05,
"loss": 3.2395,
"step": 151
},
{
"epoch": 0.8685714285714285,
"grad_norm": 3.0429553985595703,
"learning_rate": 6.490989555233327e-05,
"loss": 3.2163,
"step": 152
},
{
"epoch": 0.8742857142857143,
"grad_norm": 3.281540870666504,
"learning_rate": 6.445762890467517e-05,
"loss": 3.2273,
"step": 153
},
{
"epoch": 0.88,
"grad_norm": 2.3502442836761475,
"learning_rate": 6.400406771124536e-05,
"loss": 3.1729,
"step": 154
},
{
"epoch": 0.8857142857142857,
"grad_norm": 2.791484832763672,
"learning_rate": 6.354925258421675e-05,
"loss": 3.1035,
"step": 155
},
{
"epoch": 0.8914285714285715,
"grad_norm": 2.831284523010254,
"learning_rate": 6.309322424804034e-05,
"loss": 3.2658,
"step": 156
},
{
"epoch": 0.8971428571428571,
"grad_norm": 2.340580940246582,
"learning_rate": 6.263602353579868e-05,
"loss": 3.186,
"step": 157
},
{
"epoch": 0.9028571428571428,
"grad_norm": 2.8199057579040527,
"learning_rate": 6.21776913855496e-05,
"loss": 3.0878,
"step": 158
},
{
"epoch": 0.9085714285714286,
"grad_norm": 2.6800119876861572,
"learning_rate": 6.171826883666074e-05,
"loss": 3.2225,
"step": 159
},
{
"epoch": 0.9142857142857143,
"grad_norm": 3.132681369781494,
"learning_rate": 6.125779702613471e-05,
"loss": 3.0903,
"step": 160
},
{
"epoch": 0.92,
"grad_norm": 3.0272161960601807,
"learning_rate": 6.079631718492569e-05,
"loss": 3.2235,
"step": 161
},
{
"epoch": 0.9257142857142857,
"grad_norm": 2.942263603210449,
"learning_rate": 6.0333870634247645e-05,
"loss": 3.2786,
"step": 162
},
{
"epoch": 0.9314285714285714,
"grad_norm": 2.588752269744873,
"learning_rate": 5.9870498781874365e-05,
"loss": 3.1267,
"step": 163
},
{
"epoch": 0.9371428571428572,
"grad_norm": 2.386512279510498,
"learning_rate": 5.940624311843169e-05,
"loss": 3.1636,
"step": 164
},
{
"epoch": 0.9428571428571428,
"grad_norm": 2.2246899604797363,
"learning_rate": 5.8941145213682594e-05,
"loss": 3.1075,
"step": 165
},
{
"epoch": 0.9485714285714286,
"grad_norm": 2.6086435317993164,
"learning_rate": 5.847524671280484e-05,
"loss": 3.2346,
"step": 166
},
{
"epoch": 0.9542857142857143,
"grad_norm": 2.297578811645508,
"learning_rate": 5.8008589332662136e-05,
"loss": 3.1868,
"step": 167
},
{
"epoch": 0.96,
"grad_norm": 2.551158905029297,
"learning_rate": 5.7541214858068705e-05,
"loss": 3.0412,
"step": 168
},
{
"epoch": 0.9657142857142857,
"grad_norm": 2.5748233795166016,
"learning_rate": 5.7073165138047924e-05,
"loss": 3.0838,
"step": 169
},
{
"epoch": 0.9714285714285714,
"grad_norm": 2.5943634510040283,
"learning_rate": 5.660448208208513e-05,
"loss": 3.0985,
"step": 170
},
{
"epoch": 0.9771428571428571,
"grad_norm": 2.3920516967773438,
"learning_rate": 5.613520765637489e-05,
"loss": 3.1383,
"step": 171
},
{
"epoch": 0.9828571428571429,
"grad_norm": 2.8342676162719727,
"learning_rate": 5.56653838800635e-05,
"loss": 3.0972,
"step": 172
},
{
"epoch": 0.9885714285714285,
"grad_norm": 2.9993340969085693,
"learning_rate": 5.519505282148644e-05,
"loss": 3.0928,
"step": 173
},
{
"epoch": 0.9942857142857143,
"grad_norm": 2.4510772228240967,
"learning_rate": 5.472425659440157e-05,
"loss": 3.0435,
"step": 174
},
{
"epoch": 1.0,
"grad_norm": 2.8540945053100586,
"learning_rate": 5.425303735421828e-05,
"loss": 2.9636,
"step": 175
}
],
"logging_steps": 1,
"max_steps": 350,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 175,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.9559171544252416e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}