Delta-Vector's picture
Training in progress, step 375, checkpoint
3450414 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.5,
"eval_steps": 500,
"global_step": 375,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004,
"grad_norm": 2.7120276745865266,
"learning_rate": 1.2500000000000002e-07,
"loss": 0.9478,
"step": 1
},
{
"epoch": 0.008,
"grad_norm": 2.5535110233770237,
"learning_rate": 2.5000000000000004e-07,
"loss": 0.962,
"step": 2
},
{
"epoch": 0.012,
"grad_norm": 2.6421564999361533,
"learning_rate": 3.75e-07,
"loss": 0.9483,
"step": 3
},
{
"epoch": 0.016,
"grad_norm": 2.5842713407482596,
"learning_rate": 5.000000000000001e-07,
"loss": 0.9484,
"step": 4
},
{
"epoch": 0.02,
"grad_norm": 2.5281267639976615,
"learning_rate": 6.25e-07,
"loss": 0.9512,
"step": 5
},
{
"epoch": 0.024,
"grad_norm": 2.55003561639048,
"learning_rate": 7.5e-07,
"loss": 0.9185,
"step": 6
},
{
"epoch": 0.028,
"grad_norm": 2.5014092783190947,
"learning_rate": 8.75e-07,
"loss": 0.9411,
"step": 7
},
{
"epoch": 0.032,
"grad_norm": 2.5247715994428046,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.9474,
"step": 8
},
{
"epoch": 0.036,
"grad_norm": 2.564090844776098,
"learning_rate": 1.125e-06,
"loss": 0.9246,
"step": 9
},
{
"epoch": 0.04,
"grad_norm": 2.5258707026382154,
"learning_rate": 1.25e-06,
"loss": 0.9078,
"step": 10
},
{
"epoch": 0.044,
"grad_norm": 2.5471156175078464,
"learning_rate": 1.3750000000000002e-06,
"loss": 0.9134,
"step": 11
},
{
"epoch": 0.048,
"grad_norm": 2.7125301372042467,
"learning_rate": 1.5e-06,
"loss": 0.8776,
"step": 12
},
{
"epoch": 0.052,
"grad_norm": 2.150392048325219,
"learning_rate": 1.6250000000000001e-06,
"loss": 0.8549,
"step": 13
},
{
"epoch": 0.056,
"grad_norm": 1.8184980673856375,
"learning_rate": 1.75e-06,
"loss": 0.8169,
"step": 14
},
{
"epoch": 0.06,
"grad_norm": 1.7529184087805771,
"learning_rate": 1.8750000000000003e-06,
"loss": 0.7975,
"step": 15
},
{
"epoch": 0.064,
"grad_norm": 1.7474840928555682,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.8204,
"step": 16
},
{
"epoch": 0.068,
"grad_norm": 1.522251241150409,
"learning_rate": 2.125e-06,
"loss": 0.8301,
"step": 17
},
{
"epoch": 0.072,
"grad_norm": 0.9782588646220091,
"learning_rate": 2.25e-06,
"loss": 0.7839,
"step": 18
},
{
"epoch": 0.076,
"grad_norm": 0.975162832649703,
"learning_rate": 2.375e-06,
"loss": 0.7876,
"step": 19
},
{
"epoch": 0.08,
"grad_norm": 0.8952616849532462,
"learning_rate": 2.5e-06,
"loss": 0.7589,
"step": 20
},
{
"epoch": 0.084,
"grad_norm": 0.8469187411478404,
"learning_rate": 2.6250000000000003e-06,
"loss": 0.7567,
"step": 21
},
{
"epoch": 0.088,
"grad_norm": 0.7683035597272257,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.7598,
"step": 22
},
{
"epoch": 0.092,
"grad_norm": 0.6779719620181055,
"learning_rate": 2.875e-06,
"loss": 0.7431,
"step": 23
},
{
"epoch": 0.096,
"grad_norm": 0.5998651036445499,
"learning_rate": 3e-06,
"loss": 0.7348,
"step": 24
},
{
"epoch": 0.1,
"grad_norm": 0.6798779251188986,
"learning_rate": 3.125e-06,
"loss": 0.7271,
"step": 25
},
{
"epoch": 0.104,
"grad_norm": 0.7459028436934305,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.7011,
"step": 26
},
{
"epoch": 0.108,
"grad_norm": 0.7661666147630528,
"learning_rate": 3.3750000000000003e-06,
"loss": 0.7209,
"step": 27
},
{
"epoch": 0.112,
"grad_norm": 0.7109730987249342,
"learning_rate": 3.5e-06,
"loss": 0.7118,
"step": 28
},
{
"epoch": 0.116,
"grad_norm": 0.6778217985942258,
"learning_rate": 3.625e-06,
"loss": 0.7295,
"step": 29
},
{
"epoch": 0.12,
"grad_norm": 0.6073429084137083,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.7114,
"step": 30
},
{
"epoch": 0.124,
"grad_norm": 0.5325413632521706,
"learning_rate": 3.875e-06,
"loss": 0.7117,
"step": 31
},
{
"epoch": 0.128,
"grad_norm": 0.5170445125401208,
"learning_rate": 4.000000000000001e-06,
"loss": 0.7132,
"step": 32
},
{
"epoch": 0.132,
"grad_norm": 0.47459002896590347,
"learning_rate": 4.125e-06,
"loss": 0.6913,
"step": 33
},
{
"epoch": 0.136,
"grad_norm": 0.46611890152884594,
"learning_rate": 4.25e-06,
"loss": 0.6893,
"step": 34
},
{
"epoch": 0.14,
"grad_norm": 0.5282002224942279,
"learning_rate": 4.3750000000000005e-06,
"loss": 0.7059,
"step": 35
},
{
"epoch": 0.144,
"grad_norm": 0.516098581069478,
"learning_rate": 4.5e-06,
"loss": 0.7068,
"step": 36
},
{
"epoch": 0.148,
"grad_norm": 0.4946613953020982,
"learning_rate": 4.625000000000001e-06,
"loss": 0.6854,
"step": 37
},
{
"epoch": 0.152,
"grad_norm": 0.45539519616935026,
"learning_rate": 4.75e-06,
"loss": 0.689,
"step": 38
},
{
"epoch": 0.156,
"grad_norm": 0.43098619176364883,
"learning_rate": 4.875e-06,
"loss": 0.6904,
"step": 39
},
{
"epoch": 0.16,
"grad_norm": 0.42693567901762375,
"learning_rate": 5e-06,
"loss": 0.6713,
"step": 40
},
{
"epoch": 0.164,
"grad_norm": 0.42916236211888303,
"learning_rate": 4.999941696797974e-06,
"loss": 0.6929,
"step": 41
},
{
"epoch": 0.168,
"grad_norm": 0.46320958381108696,
"learning_rate": 4.9997667899113055e-06,
"loss": 0.6601,
"step": 42
},
{
"epoch": 0.172,
"grad_norm": 0.4650598350234117,
"learning_rate": 4.9994752874981e-06,
"loss": 0.6627,
"step": 43
},
{
"epoch": 0.176,
"grad_norm": 0.4899967026584299,
"learning_rate": 4.999067203154777e-06,
"loss": 0.6583,
"step": 44
},
{
"epoch": 0.18,
"grad_norm": 0.4562804538758281,
"learning_rate": 4.998542555915435e-06,
"loss": 0.6712,
"step": 45
},
{
"epoch": 0.184,
"grad_norm": 0.36547047335003885,
"learning_rate": 4.997901370250966e-06,
"loss": 0.6495,
"step": 46
},
{
"epoch": 0.188,
"grad_norm": 0.37624066260466155,
"learning_rate": 4.997143676067913e-06,
"loss": 0.6703,
"step": 47
},
{
"epoch": 0.192,
"grad_norm": 0.36653954523845933,
"learning_rate": 4.99626950870707e-06,
"loss": 0.6781,
"step": 48
},
{
"epoch": 0.196,
"grad_norm": 0.3477765639450634,
"learning_rate": 4.995278908941845e-06,
"loss": 0.6745,
"step": 49
},
{
"epoch": 0.2,
"grad_norm": 0.3342296714847872,
"learning_rate": 4.994171922976349e-06,
"loss": 0.6771,
"step": 50
},
{
"epoch": 0.204,
"grad_norm": 0.37958792690250714,
"learning_rate": 4.9929486024432405e-06,
"loss": 0.6546,
"step": 51
},
{
"epoch": 0.208,
"grad_norm": 0.40870653964939113,
"learning_rate": 4.991609004401324e-06,
"loss": 0.6528,
"step": 52
},
{
"epoch": 0.212,
"grad_norm": 0.364192633459814,
"learning_rate": 4.990153191332885e-06,
"loss": 0.6458,
"step": 53
},
{
"epoch": 0.216,
"grad_norm": 0.30668416089546924,
"learning_rate": 4.988581231140772e-06,
"loss": 0.672,
"step": 54
},
{
"epoch": 0.22,
"grad_norm": 0.33255921507240616,
"learning_rate": 4.986893197145238e-06,
"loss": 0.6575,
"step": 55
},
{
"epoch": 0.224,
"grad_norm": 0.312428536241603,
"learning_rate": 4.985089168080509e-06,
"loss": 0.6629,
"step": 56
},
{
"epoch": 0.228,
"grad_norm": 0.2961748326875195,
"learning_rate": 4.983169228091125e-06,
"loss": 0.6514,
"step": 57
},
{
"epoch": 0.232,
"grad_norm": 0.30513153025857964,
"learning_rate": 4.981133466728004e-06,
"loss": 0.6547,
"step": 58
},
{
"epoch": 0.236,
"grad_norm": 0.3274000044829569,
"learning_rate": 4.978981978944271e-06,
"loss": 0.6514,
"step": 59
},
{
"epoch": 0.24,
"grad_norm": 0.3083977406610579,
"learning_rate": 4.976714865090827e-06,
"loss": 0.6422,
"step": 60
},
{
"epoch": 0.244,
"grad_norm": 0.3052292638265554,
"learning_rate": 4.97433223091167e-06,
"loss": 0.6355,
"step": 61
},
{
"epoch": 0.248,
"grad_norm": 0.2959593176715387,
"learning_rate": 4.971834187538963e-06,
"loss": 0.6551,
"step": 62
},
{
"epoch": 0.252,
"grad_norm": 0.29042322618949995,
"learning_rate": 4.9692208514878445e-06,
"loss": 0.6581,
"step": 63
},
{
"epoch": 0.256,
"grad_norm": 0.3149031676109748,
"learning_rate": 4.966492344651006e-06,
"loss": 0.6594,
"step": 64
},
{
"epoch": 0.26,
"grad_norm": 0.2874450280533471,
"learning_rate": 4.963648794292992e-06,
"loss": 0.6488,
"step": 65
},
{
"epoch": 0.264,
"grad_norm": 0.27325192501172824,
"learning_rate": 4.960690333044279e-06,
"loss": 0.6351,
"step": 66
},
{
"epoch": 0.268,
"grad_norm": 0.2817944445002008,
"learning_rate": 4.957617098895076e-06,
"loss": 0.6265,
"step": 67
},
{
"epoch": 0.272,
"grad_norm": 0.281980621240097,
"learning_rate": 4.954429235188897e-06,
"loss": 0.6432,
"step": 68
},
{
"epoch": 0.276,
"grad_norm": 0.2962526841715817,
"learning_rate": 4.951126890615871e-06,
"loss": 0.6354,
"step": 69
},
{
"epoch": 0.28,
"grad_norm": 0.28005347365677186,
"learning_rate": 4.947710219205808e-06,
"loss": 0.6548,
"step": 70
},
{
"epoch": 0.284,
"grad_norm": 0.26960325063378,
"learning_rate": 4.944179380321015e-06,
"loss": 0.6125,
"step": 71
},
{
"epoch": 0.288,
"grad_norm": 0.2848666105192882,
"learning_rate": 4.940534538648862e-06,
"loss": 0.6078,
"step": 72
},
{
"epoch": 0.292,
"grad_norm": 0.276353304899713,
"learning_rate": 4.936775864194101e-06,
"loss": 0.6524,
"step": 73
},
{
"epoch": 0.296,
"grad_norm": 0.27790715839174,
"learning_rate": 4.932903532270939e-06,
"loss": 0.6275,
"step": 74
},
{
"epoch": 0.3,
"grad_norm": 0.2636499704034946,
"learning_rate": 4.928917723494854e-06,
"loss": 0.6422,
"step": 75
},
{
"epoch": 0.304,
"grad_norm": 0.28133008746598603,
"learning_rate": 4.924818623774178e-06,
"loss": 0.6355,
"step": 76
},
{
"epoch": 0.308,
"grad_norm": 0.2779410264901997,
"learning_rate": 4.920606424301424e-06,
"loss": 0.6408,
"step": 77
},
{
"epoch": 0.312,
"grad_norm": 0.2897057665750394,
"learning_rate": 4.916281321544362e-06,
"loss": 0.6402,
"step": 78
},
{
"epoch": 0.316,
"grad_norm": 0.2761996704216673,
"learning_rate": 4.911843517236867e-06,
"loss": 0.653,
"step": 79
},
{
"epoch": 0.32,
"grad_norm": 0.29413202875425243,
"learning_rate": 4.907293218369499e-06,
"loss": 0.6298,
"step": 80
},
{
"epoch": 0.324,
"grad_norm": 0.2861680279307969,
"learning_rate": 4.9026306371798526e-06,
"loss": 0.6553,
"step": 81
},
{
"epoch": 0.328,
"grad_norm": 0.2848851913581759,
"learning_rate": 4.897855991142658e-06,
"loss": 0.6076,
"step": 82
},
{
"epoch": 0.332,
"grad_norm": 0.2687848093353044,
"learning_rate": 4.892969502959639e-06,
"loss": 0.6311,
"step": 83
},
{
"epoch": 0.336,
"grad_norm": 0.27061397240144314,
"learning_rate": 4.8879714005491205e-06,
"loss": 0.6148,
"step": 84
},
{
"epoch": 0.34,
"grad_norm": 0.2663532374960906,
"learning_rate": 4.882861917035403e-06,
"loss": 0.6412,
"step": 85
},
{
"epoch": 0.344,
"grad_norm": 0.27752950332016424,
"learning_rate": 4.8776412907378845e-06,
"loss": 0.642,
"step": 86
},
{
"epoch": 0.348,
"grad_norm": 0.2772792789989426,
"learning_rate": 4.87230976515995e-06,
"loss": 0.6184,
"step": 87
},
{
"epoch": 0.352,
"grad_norm": 0.29699617925202587,
"learning_rate": 4.8668675889776095e-06,
"loss": 0.6275,
"step": 88
},
{
"epoch": 0.356,
"grad_norm": 0.2736517750983177,
"learning_rate": 4.861315016027902e-06,
"loss": 0.6347,
"step": 89
},
{
"epoch": 0.36,
"grad_norm": 0.2871346395772671,
"learning_rate": 4.855652305297052e-06,
"loss": 0.6132,
"step": 90
},
{
"epoch": 0.364,
"grad_norm": 0.27131438304958966,
"learning_rate": 4.849879720908394e-06,
"loss": 0.6026,
"step": 91
},
{
"epoch": 0.368,
"grad_norm": 0.2701400539855458,
"learning_rate": 4.843997532110051e-06,
"loss": 0.6385,
"step": 92
},
{
"epoch": 0.372,
"grad_norm": 0.2870430498497826,
"learning_rate": 4.8380060132623776e-06,
"loss": 0.624,
"step": 93
},
{
"epoch": 0.376,
"grad_norm": 0.2889305346509075,
"learning_rate": 4.83190544382516e-06,
"loss": 0.6375,
"step": 94
},
{
"epoch": 0.38,
"grad_norm": 0.2832554152572974,
"learning_rate": 4.825696108344583e-06,
"loss": 0.6348,
"step": 95
},
{
"epoch": 0.384,
"grad_norm": 0.282963454209734,
"learning_rate": 4.819378296439962e-06,
"loss": 0.6425,
"step": 96
},
{
"epoch": 0.388,
"grad_norm": 0.28136329883499284,
"learning_rate": 4.812952302790226e-06,
"loss": 0.6238,
"step": 97
},
{
"epoch": 0.392,
"grad_norm": 0.2751896445755537,
"learning_rate": 4.80641842712018e-06,
"loss": 0.6453,
"step": 98
},
{
"epoch": 0.396,
"grad_norm": 0.2802941094985609,
"learning_rate": 4.799776974186523e-06,
"loss": 0.6362,
"step": 99
},
{
"epoch": 0.4,
"grad_norm": 0.2816828378634338,
"learning_rate": 4.793028253763633e-06,
"loss": 0.6394,
"step": 100
},
{
"epoch": 0.404,
"grad_norm": 0.28942806582562414,
"learning_rate": 4.786172580629118e-06,
"loss": 0.6106,
"step": 101
},
{
"epoch": 0.408,
"grad_norm": 0.28396225609673553,
"learning_rate": 4.7792102745491345e-06,
"loss": 0.6302,
"step": 102
},
{
"epoch": 0.412,
"grad_norm": 0.269131748983874,
"learning_rate": 4.772141660263472e-06,
"loss": 0.6247,
"step": 103
},
{
"epoch": 0.416,
"grad_norm": 0.28932510717249166,
"learning_rate": 4.764967067470409e-06,
"loss": 0.6201,
"step": 104
},
{
"epoch": 0.42,
"grad_norm": 0.2734040302258933,
"learning_rate": 4.757686830811332e-06,
"loss": 0.6092,
"step": 105
},
{
"epoch": 0.424,
"grad_norm": 0.28628481564556507,
"learning_rate": 4.750301289855128e-06,
"loss": 0.6284,
"step": 106
},
{
"epoch": 0.428,
"grad_norm": 0.30932729909286755,
"learning_rate": 4.742810789082345e-06,
"loss": 0.6332,
"step": 107
},
{
"epoch": 0.432,
"grad_norm": 0.2707515621211506,
"learning_rate": 4.735215677869129e-06,
"loss": 0.6107,
"step": 108
},
{
"epoch": 0.436,
"grad_norm": 0.29859925040242785,
"learning_rate": 4.72751631047092e-06,
"loss": 0.6477,
"step": 109
},
{
"epoch": 0.44,
"grad_norm": 0.2974079849667466,
"learning_rate": 4.7197130460059385e-06,
"loss": 0.632,
"step": 110
},
{
"epoch": 0.444,
"grad_norm": 0.28109309454092835,
"learning_rate": 4.711806248438428e-06,
"loss": 0.6308,
"step": 111
},
{
"epoch": 0.448,
"grad_norm": 0.287627270908265,
"learning_rate": 4.7037962865616795e-06,
"loss": 0.6322,
"step": 112
},
{
"epoch": 0.452,
"grad_norm": 0.2859506113795605,
"learning_rate": 4.695683533980835e-06,
"loss": 0.6196,
"step": 113
},
{
"epoch": 0.456,
"grad_norm": 0.27768505471724575,
"learning_rate": 4.687468369095457e-06,
"loss": 0.6107,
"step": 114
},
{
"epoch": 0.46,
"grad_norm": 0.2985912133376052,
"learning_rate": 4.679151175081879e-06,
"loss": 0.6316,
"step": 115
},
{
"epoch": 0.464,
"grad_norm": 0.27790254502289174,
"learning_rate": 4.6707323398753346e-06,
"loss": 0.6194,
"step": 116
},
{
"epoch": 0.468,
"grad_norm": 0.2770496916475714,
"learning_rate": 4.662212256151865e-06,
"loss": 0.5938,
"step": 117
},
{
"epoch": 0.472,
"grad_norm": 0.2750067208531331,
"learning_rate": 4.6535913213100005e-06,
"loss": 0.6125,
"step": 118
},
{
"epoch": 0.476,
"grad_norm": 0.2640533408637943,
"learning_rate": 4.644869937452224e-06,
"loss": 0.6245,
"step": 119
},
{
"epoch": 0.48,
"grad_norm": 0.27591929390872805,
"learning_rate": 4.636048511366222e-06,
"loss": 0.6186,
"step": 120
},
{
"epoch": 0.484,
"grad_norm": 0.29643778935625803,
"learning_rate": 4.627127454505902e-06,
"loss": 0.6086,
"step": 121
},
{
"epoch": 0.488,
"grad_norm": 0.28076149524670235,
"learning_rate": 4.618107182972209e-06,
"loss": 0.6158,
"step": 122
},
{
"epoch": 0.492,
"grad_norm": 0.28214439623978305,
"learning_rate": 4.6089881174937146e-06,
"loss": 0.6299,
"step": 123
},
{
"epoch": 0.496,
"grad_norm": 0.2885430172419291,
"learning_rate": 4.599770683406992e-06,
"loss": 0.6367,
"step": 124
},
{
"epoch": 0.5,
"grad_norm": 0.28978271139767015,
"learning_rate": 4.590455310636778e-06,
"loss": 0.6248,
"step": 125
},
{
"epoch": 0.504,
"grad_norm": 0.2763777911865909,
"learning_rate": 4.58104243367592e-06,
"loss": 0.6235,
"step": 126
},
{
"epoch": 0.508,
"grad_norm": 0.2788581607213461,
"learning_rate": 4.571532491565115e-06,
"loss": 0.6201,
"step": 127
},
{
"epoch": 0.512,
"grad_norm": 0.28522419436506885,
"learning_rate": 4.561925927872421e-06,
"loss": 0.6287,
"step": 128
},
{
"epoch": 0.516,
"grad_norm": 0.2759422238312871,
"learning_rate": 4.55222319067258e-06,
"loss": 0.6105,
"step": 129
},
{
"epoch": 0.52,
"grad_norm": 0.271521828303117,
"learning_rate": 4.542424732526105e-06,
"loss": 0.6004,
"step": 130
},
{
"epoch": 0.524,
"grad_norm": 0.2832768486502443,
"learning_rate": 4.532531010458188e-06,
"loss": 0.6438,
"step": 131
},
{
"epoch": 0.528,
"grad_norm": 0.28545986352466657,
"learning_rate": 4.522542485937369e-06,
"loss": 0.6147,
"step": 132
},
{
"epoch": 0.532,
"grad_norm": 0.2843650568512383,
"learning_rate": 4.512459624854017e-06,
"loss": 0.6347,
"step": 133
},
{
"epoch": 0.536,
"grad_norm": 0.2758779923686556,
"learning_rate": 4.5022828974986044e-06,
"loss": 0.6111,
"step": 134
},
{
"epoch": 0.54,
"grad_norm": 0.28471240078326554,
"learning_rate": 4.4920127785397615e-06,
"loss": 0.6161,
"step": 135
},
{
"epoch": 0.544,
"grad_norm": 0.27215538114487603,
"learning_rate": 4.481649747002146e-06,
"loss": 0.6019,
"step": 136
},
{
"epoch": 0.548,
"grad_norm": 0.27161590017753495,
"learning_rate": 4.471194286244094e-06,
"loss": 0.6229,
"step": 137
},
{
"epoch": 0.552,
"grad_norm": 0.2786884282741861,
"learning_rate": 4.460646883935079e-06,
"loss": 0.6217,
"step": 138
},
{
"epoch": 0.556,
"grad_norm": 0.29095908793086706,
"learning_rate": 4.4500080320329615e-06,
"loss": 0.6212,
"step": 139
},
{
"epoch": 0.56,
"grad_norm": 0.2797512942233689,
"learning_rate": 4.43927822676105e-06,
"loss": 0.6183,
"step": 140
},
{
"epoch": 0.564,
"grad_norm": 0.2701904530059608,
"learning_rate": 4.428457968584945e-06,
"loss": 0.6067,
"step": 141
},
{
"epoch": 0.568,
"grad_norm": 0.2924071263588622,
"learning_rate": 4.417547762189207e-06,
"loss": 0.6167,
"step": 142
},
{
"epoch": 0.572,
"grad_norm": 0.2684300131690406,
"learning_rate": 4.40654811645381e-06,
"loss": 0.6185,
"step": 143
},
{
"epoch": 0.576,
"grad_norm": 0.2774759359262972,
"learning_rate": 4.395459544430407e-06,
"loss": 0.602,
"step": 144
},
{
"epoch": 0.58,
"grad_norm": 0.2808643430953345,
"learning_rate": 4.384282563318403e-06,
"loss": 0.598,
"step": 145
},
{
"epoch": 0.584,
"grad_norm": 0.27015365579319356,
"learning_rate": 4.373017694440828e-06,
"loss": 0.5857,
"step": 146
},
{
"epoch": 0.588,
"grad_norm": 0.2856861787094523,
"learning_rate": 4.361665463220023e-06,
"loss": 0.6206,
"step": 147
},
{
"epoch": 0.592,
"grad_norm": 0.28199517014381215,
"learning_rate": 4.35022639915313e-06,
"loss": 0.6094,
"step": 148
},
{
"epoch": 0.596,
"grad_norm": 0.27010314199532126,
"learning_rate": 4.338701035787403e-06,
"loss": 0.5947,
"step": 149
},
{
"epoch": 0.6,
"grad_norm": 0.2601892508343049,
"learning_rate": 4.32708991069531e-06,
"loss": 0.5871,
"step": 150
},
{
"epoch": 0.604,
"grad_norm": 0.27825064672033506,
"learning_rate": 4.315393565449472e-06,
"loss": 0.6093,
"step": 151
},
{
"epoch": 0.608,
"grad_norm": 0.27958608277896724,
"learning_rate": 4.30361254559739e-06,
"loss": 0.5951,
"step": 152
},
{
"epoch": 0.612,
"grad_norm": 0.2758779818206466,
"learning_rate": 4.291747400636009e-06,
"loss": 0.6062,
"step": 153
},
{
"epoch": 0.616,
"grad_norm": 0.29572303953208817,
"learning_rate": 4.279798683986084e-06,
"loss": 0.605,
"step": 154
},
{
"epoch": 0.62,
"grad_norm": 0.28194612739384267,
"learning_rate": 4.267766952966369e-06,
"loss": 0.6078,
"step": 155
},
{
"epoch": 0.624,
"grad_norm": 0.28615376291544004,
"learning_rate": 4.255652768767619e-06,
"loss": 0.6319,
"step": 156
},
{
"epoch": 0.628,
"grad_norm": 0.26959219285273633,
"learning_rate": 4.243456696426415e-06,
"loss": 0.5968,
"step": 157
},
{
"epoch": 0.632,
"grad_norm": 0.27878753771339543,
"learning_rate": 4.2311793047988145e-06,
"loss": 0.6214,
"step": 158
},
{
"epoch": 0.636,
"grad_norm": 0.2779844282953486,
"learning_rate": 4.218821166533813e-06,
"loss": 0.5964,
"step": 159
},
{
"epoch": 0.64,
"grad_norm": 0.2767287929857217,
"learning_rate": 4.206382858046636e-06,
"loss": 0.6187,
"step": 160
},
{
"epoch": 0.644,
"grad_norm": 0.2652936251998452,
"learning_rate": 4.193864959491853e-06,
"loss": 0.5897,
"step": 161
},
{
"epoch": 0.648,
"grad_norm": 0.26227694980471933,
"learning_rate": 4.181268054736319e-06,
"loss": 0.6107,
"step": 162
},
{
"epoch": 0.652,
"grad_norm": 0.2690441499487734,
"learning_rate": 4.16859273133194e-06,
"loss": 0.6012,
"step": 163
},
{
"epoch": 0.656,
"grad_norm": 0.26934906424793176,
"learning_rate": 4.15583958048827e-06,
"loss": 0.6086,
"step": 164
},
{
"epoch": 0.66,
"grad_norm": 0.2727839454931186,
"learning_rate": 4.143009197044932e-06,
"loss": 0.6156,
"step": 165
},
{
"epoch": 0.664,
"grad_norm": 0.2767715664709689,
"learning_rate": 4.130102179443877e-06,
"loss": 0.607,
"step": 166
},
{
"epoch": 0.668,
"grad_norm": 0.27462389864805775,
"learning_rate": 4.117119129701468e-06,
"loss": 0.598,
"step": 167
},
{
"epoch": 0.672,
"grad_norm": 0.28124958800487015,
"learning_rate": 4.104060653380403e-06,
"loss": 0.6174,
"step": 168
},
{
"epoch": 0.676,
"grad_norm": 0.26867080247614167,
"learning_rate": 4.090927359561469e-06,
"loss": 0.6222,
"step": 169
},
{
"epoch": 0.68,
"grad_norm": 0.27329020109654967,
"learning_rate": 4.077719860815132e-06,
"loss": 0.6174,
"step": 170
},
{
"epoch": 0.684,
"grad_norm": 0.2598239429892548,
"learning_rate": 4.064438773172966e-06,
"loss": 0.5949,
"step": 171
},
{
"epoch": 0.688,
"grad_norm": 0.26610910407219807,
"learning_rate": 4.051084716098921e-06,
"loss": 0.5876,
"step": 172
},
{
"epoch": 0.692,
"grad_norm": 0.28832248653224085,
"learning_rate": 4.037658312460424e-06,
"loss": 0.6038,
"step": 173
},
{
"epoch": 0.696,
"grad_norm": 0.27995126745782395,
"learning_rate": 4.024160188499337e-06,
"loss": 0.6024,
"step": 174
},
{
"epoch": 0.7,
"grad_norm": 0.2708024464743442,
"learning_rate": 4.010590973802737e-06,
"loss": 0.6166,
"step": 175
},
{
"epoch": 0.704,
"grad_norm": 0.27379773644645394,
"learning_rate": 3.996951301273556e-06,
"loss": 0.6172,
"step": 176
},
{
"epoch": 0.708,
"grad_norm": 0.2704403624062539,
"learning_rate": 3.983241807101064e-06,
"loss": 0.5848,
"step": 177
},
{
"epoch": 0.712,
"grad_norm": 0.26367325554187204,
"learning_rate": 3.969463130731183e-06,
"loss": 0.6084,
"step": 178
},
{
"epoch": 0.716,
"grad_norm": 0.2714449492216179,
"learning_rate": 3.955615914836678e-06,
"loss": 0.6067,
"step": 179
},
{
"epoch": 0.72,
"grad_norm": 0.27396192782433526,
"learning_rate": 3.941700805287169e-06,
"loss": 0.6049,
"step": 180
},
{
"epoch": 0.724,
"grad_norm": 0.2712108680127688,
"learning_rate": 3.927718451119009e-06,
"loss": 0.5981,
"step": 181
},
{
"epoch": 0.728,
"grad_norm": 0.27016877733602884,
"learning_rate": 3.913669504505015e-06,
"loss": 0.6148,
"step": 182
},
{
"epoch": 0.732,
"grad_norm": 0.2986908827790219,
"learning_rate": 3.8995546207240455e-06,
"loss": 0.6293,
"step": 183
},
{
"epoch": 0.736,
"grad_norm": 0.27281610268420575,
"learning_rate": 3.8853744581304376e-06,
"loss": 0.5937,
"step": 184
},
{
"epoch": 0.74,
"grad_norm": 0.28387181164952147,
"learning_rate": 3.871129678123297e-06,
"loss": 0.6098,
"step": 185
},
{
"epoch": 0.744,
"grad_norm": 0.2740312224605285,
"learning_rate": 3.856820945115655e-06,
"loss": 0.6078,
"step": 186
},
{
"epoch": 0.748,
"grad_norm": 0.2662514099930545,
"learning_rate": 3.84244892650347e-06,
"loss": 0.6254,
"step": 187
},
{
"epoch": 0.752,
"grad_norm": 0.26802564374459203,
"learning_rate": 3.828014292634508e-06,
"loss": 0.6121,
"step": 188
},
{
"epoch": 0.756,
"grad_norm": 0.28248517647364846,
"learning_rate": 3.813517716777069e-06,
"loss": 0.6202,
"step": 189
},
{
"epoch": 0.76,
"grad_norm": 0.272622897496479,
"learning_rate": 3.798959875088584e-06,
"loss": 0.5901,
"step": 190
},
{
"epoch": 0.764,
"grad_norm": 0.27197490333330376,
"learning_rate": 3.7843414465840823e-06,
"loss": 0.5856,
"step": 191
},
{
"epoch": 0.768,
"grad_norm": 0.26663311683845875,
"learning_rate": 3.769663113104516e-06,
"loss": 0.5907,
"step": 192
},
{
"epoch": 0.772,
"grad_norm": 0.2714585682015405,
"learning_rate": 3.7549255592849575e-06,
"loss": 0.6072,
"step": 193
},
{
"epoch": 0.776,
"grad_norm": 0.2766267849608307,
"learning_rate": 3.7401294725226707e-06,
"loss": 0.6158,
"step": 194
},
{
"epoch": 0.78,
"grad_norm": 0.26291754258948374,
"learning_rate": 3.7252755429450437e-06,
"loss": 0.5921,
"step": 195
},
{
"epoch": 0.784,
"grad_norm": 0.26530027759256725,
"learning_rate": 3.7103644633774015e-06,
"loss": 0.5841,
"step": 196
},
{
"epoch": 0.788,
"grad_norm": 0.26634011298693916,
"learning_rate": 3.695396929310693e-06,
"loss": 0.6147,
"step": 197
},
{
"epoch": 0.792,
"grad_norm": 0.26354297421036926,
"learning_rate": 3.680373638869047e-06,
"loss": 0.6061,
"step": 198
},
{
"epoch": 0.796,
"grad_norm": 0.2738313781435172,
"learning_rate": 3.665295292777214e-06,
"loss": 0.5903,
"step": 199
},
{
"epoch": 0.8,
"grad_norm": 0.27041603557150606,
"learning_rate": 3.650162594327881e-06,
"loss": 0.6216,
"step": 200
},
{
"epoch": 0.804,
"grad_norm": 0.29233209893761647,
"learning_rate": 3.634976249348867e-06,
"loss": 0.6221,
"step": 201
},
{
"epoch": 0.808,
"grad_norm": 0.28236083977418097,
"learning_rate": 3.6197369661702052e-06,
"loss": 0.6048,
"step": 202
},
{
"epoch": 0.812,
"grad_norm": 0.2610570975246162,
"learning_rate": 3.604445455591099e-06,
"loss": 0.586,
"step": 203
},
{
"epoch": 0.816,
"grad_norm": 0.27792495585124566,
"learning_rate": 3.589102430846773e-06,
"loss": 0.6052,
"step": 204
},
{
"epoch": 0.82,
"grad_norm": 0.27390708264043134,
"learning_rate": 3.5737086075752054e-06,
"loss": 0.5968,
"step": 205
},
{
"epoch": 0.824,
"grad_norm": 0.26341409551542055,
"learning_rate": 3.5582647037837446e-06,
"loss": 0.6128,
"step": 206
},
{
"epoch": 0.828,
"grad_norm": 0.2659397773506794,
"learning_rate": 3.5427714398156267e-06,
"loss": 0.6171,
"step": 207
},
{
"epoch": 0.832,
"grad_norm": 0.278302186061793,
"learning_rate": 3.527229538316371e-06,
"loss": 0.6001,
"step": 208
},
{
"epoch": 0.836,
"grad_norm": 0.27935693059901906,
"learning_rate": 3.5116397242000748e-06,
"loss": 0.5915,
"step": 209
},
{
"epoch": 0.84,
"grad_norm": 0.2681762304000699,
"learning_rate": 3.4960027246156043e-06,
"loss": 0.5982,
"step": 210
},
{
"epoch": 0.844,
"grad_norm": 0.26833511905783713,
"learning_rate": 3.480319268912676e-06,
"loss": 0.5823,
"step": 211
},
{
"epoch": 0.848,
"grad_norm": 0.27282574698411466,
"learning_rate": 3.4645900886078388e-06,
"loss": 0.6098,
"step": 212
},
{
"epoch": 0.852,
"grad_norm": 0.2841325964241835,
"learning_rate": 3.448815917350355e-06,
"loss": 0.6054,
"step": 213
},
{
"epoch": 0.856,
"grad_norm": 0.27811416712297765,
"learning_rate": 3.432997490887979e-06,
"loss": 0.6071,
"step": 214
},
{
"epoch": 0.86,
"grad_norm": 0.26897184119138856,
"learning_rate": 3.417135547032642e-06,
"loss": 0.612,
"step": 215
},
{
"epoch": 0.864,
"grad_norm": 0.2752083088143504,
"learning_rate": 3.4012308256260366e-06,
"loss": 0.6189,
"step": 216
},
{
"epoch": 0.868,
"grad_norm": 0.27146348269262077,
"learning_rate": 3.385284068505113e-06,
"loss": 0.5914,
"step": 217
},
{
"epoch": 0.872,
"grad_norm": 0.2616553689610195,
"learning_rate": 3.369296019467473e-06,
"loss": 0.5935,
"step": 218
},
{
"epoch": 0.876,
"grad_norm": 0.27439799989957114,
"learning_rate": 3.3532674242366764e-06,
"loss": 0.5815,
"step": 219
},
{
"epoch": 0.88,
"grad_norm": 0.27322752130009204,
"learning_rate": 3.3371990304274654e-06,
"loss": 0.593,
"step": 220
},
{
"epoch": 0.884,
"grad_norm": 0.2776915569429837,
"learning_rate": 3.3210915875108895e-06,
"loss": 0.636,
"step": 221
},
{
"epoch": 0.888,
"grad_norm": 0.27021599999486623,
"learning_rate": 3.304945846779346e-06,
"loss": 0.618,
"step": 222
},
{
"epoch": 0.892,
"grad_norm": 0.27748742860539916,
"learning_rate": 3.2887625613115427e-06,
"loss": 0.5937,
"step": 223
},
{
"epoch": 0.896,
"grad_norm": 0.28302784990496294,
"learning_rate": 3.272542485937369e-06,
"loss": 0.6093,
"step": 224
},
{
"epoch": 0.9,
"grad_norm": 0.2759446162478654,
"learning_rate": 3.25628637720269e-06,
"loss": 0.6261,
"step": 225
},
{
"epoch": 0.904,
"grad_norm": 0.2794223898275526,
"learning_rate": 3.239994993334059e-06,
"loss": 0.6098,
"step": 226
},
{
"epoch": 0.908,
"grad_norm": 0.2863521541541822,
"learning_rate": 3.2236690942033523e-06,
"loss": 0.6122,
"step": 227
},
{
"epoch": 0.912,
"grad_norm": 0.2810413939687701,
"learning_rate": 3.207309441292325e-06,
"loss": 0.6193,
"step": 228
},
{
"epoch": 0.916,
"grad_norm": 0.2686724574589154,
"learning_rate": 3.1909167976570977e-06,
"loss": 0.5847,
"step": 229
},
{
"epoch": 0.92,
"grad_norm": 0.27668513115261734,
"learning_rate": 3.174491927892561e-06,
"loss": 0.6083,
"step": 230
},
{
"epoch": 0.924,
"grad_norm": 0.2838278304951842,
"learning_rate": 3.158035598096715e-06,
"loss": 0.597,
"step": 231
},
{
"epoch": 0.928,
"grad_norm": 0.2702238327135827,
"learning_rate": 3.1415485758349344e-06,
"loss": 0.5884,
"step": 232
},
{
"epoch": 0.932,
"grad_norm": 0.2677368696260293,
"learning_rate": 3.1250316301041727e-06,
"loss": 0.5835,
"step": 233
},
{
"epoch": 0.936,
"grad_norm": 0.2848905107931979,
"learning_rate": 3.1084855312970897e-06,
"loss": 0.6255,
"step": 234
},
{
"epoch": 0.94,
"grad_norm": 0.2788843417615313,
"learning_rate": 3.091911051166117e-06,
"loss": 0.6215,
"step": 235
},
{
"epoch": 0.944,
"grad_norm": 0.27680715854768223,
"learning_rate": 3.0753089627874668e-06,
"loss": 0.6022,
"step": 236
},
{
"epoch": 0.948,
"grad_norm": 0.26897413893634964,
"learning_rate": 3.0586800405250677e-06,
"loss": 0.6194,
"step": 237
},
{
"epoch": 0.952,
"grad_norm": 0.2657249388294363,
"learning_rate": 3.0420250599944525e-06,
"loss": 0.5884,
"step": 238
},
{
"epoch": 0.956,
"grad_norm": 0.26576882569871957,
"learning_rate": 3.0253447980265754e-06,
"loss": 0.5949,
"step": 239
},
{
"epoch": 0.96,
"grad_norm": 0.2754408264706426,
"learning_rate": 3.0086400326315853e-06,
"loss": 0.5747,
"step": 240
},
{
"epoch": 0.964,
"grad_norm": 0.26333215813151695,
"learning_rate": 2.9919115429625295e-06,
"loss": 0.6035,
"step": 241
},
{
"epoch": 0.968,
"grad_norm": 0.2687016604382336,
"learning_rate": 2.9751601092790185e-06,
"loss": 0.6057,
"step": 242
},
{
"epoch": 0.972,
"grad_norm": 0.28837866031127346,
"learning_rate": 2.958386512910831e-06,
"loss": 0.5737,
"step": 243
},
{
"epoch": 0.976,
"grad_norm": 0.27805411098388116,
"learning_rate": 2.941591536221469e-06,
"loss": 0.6022,
"step": 244
},
{
"epoch": 0.98,
"grad_norm": 0.27696163335286905,
"learning_rate": 2.924775962571667e-06,
"loss": 0.6081,
"step": 245
},
{
"epoch": 0.984,
"grad_norm": 0.28312029387331156,
"learning_rate": 2.907940576282856e-06,
"loss": 0.6178,
"step": 246
},
{
"epoch": 0.988,
"grad_norm": 0.2516197979856304,
"learning_rate": 2.8910861626005774e-06,
"loss": 0.5812,
"step": 247
},
{
"epoch": 0.992,
"grad_norm": 0.25765553700425475,
"learning_rate": 2.8742135076578608e-06,
"loss": 0.5868,
"step": 248
},
{
"epoch": 0.996,
"grad_norm": 0.27540024093918797,
"learning_rate": 2.857323398438554e-06,
"loss": 0.5976,
"step": 249
},
{
"epoch": 1.0,
"grad_norm": 0.2637219036362386,
"learning_rate": 2.840416622740617e-06,
"loss": 0.6106,
"step": 250
},
{
"epoch": 1.004,
"grad_norm": 0.3270308432987493,
"learning_rate": 2.8234939691393765e-06,
"loss": 0.5662,
"step": 251
},
{
"epoch": 1.008,
"grad_norm": 0.3032460726332443,
"learning_rate": 2.8065562269507464e-06,
"loss": 0.589,
"step": 252
},
{
"epoch": 1.012,
"grad_norm": 0.2761928570311231,
"learning_rate": 2.789604186194411e-06,
"loss": 0.5654,
"step": 253
},
{
"epoch": 1.016,
"grad_norm": 0.27190181641802363,
"learning_rate": 2.7726386375569748e-06,
"loss": 0.5908,
"step": 254
},
{
"epoch": 1.02,
"grad_norm": 0.27382272887323905,
"learning_rate": 2.7556603723550855e-06,
"loss": 0.553,
"step": 255
},
{
"epoch": 1.024,
"grad_norm": 0.2699869769385883,
"learning_rate": 2.7386701824985257e-06,
"loss": 0.5517,
"step": 256
},
{
"epoch": 1.028,
"grad_norm": 0.2740431770397761,
"learning_rate": 2.721668860453271e-06,
"loss": 0.5611,
"step": 257
},
{
"epoch": 1.032,
"grad_norm": 0.27006742872479766,
"learning_rate": 2.7046571992045334e-06,
"loss": 0.5609,
"step": 258
},
{
"epoch": 1.036,
"grad_norm": 0.2853628863130536,
"learning_rate": 2.6876359922197703e-06,
"loss": 0.5839,
"step": 259
},
{
"epoch": 1.04,
"grad_norm": 0.2674934886902205,
"learning_rate": 2.670606033411678e-06,
"loss": 0.5692,
"step": 260
},
{
"epoch": 1.044,
"grad_norm": 0.2717961476035451,
"learning_rate": 2.653568117101159e-06,
"loss": 0.5586,
"step": 261
},
{
"epoch": 1.048,
"grad_norm": 0.2886664885195019,
"learning_rate": 2.636523037980275e-06,
"loss": 0.5592,
"step": 262
},
{
"epoch": 1.052,
"grad_norm": 0.2913975175671213,
"learning_rate": 2.6194715910751806e-06,
"loss": 0.5588,
"step": 263
},
{
"epoch": 1.056,
"grad_norm": 0.26635335287134043,
"learning_rate": 2.602414571709036e-06,
"loss": 0.5518,
"step": 264
},
{
"epoch": 1.06,
"grad_norm": 0.27589381073650543,
"learning_rate": 2.58535277546492e-06,
"loss": 0.5588,
"step": 265
},
{
"epoch": 1.064,
"grad_norm": 0.26891839582782207,
"learning_rate": 2.5682869981487154e-06,
"loss": 0.5671,
"step": 266
},
{
"epoch": 1.068,
"grad_norm": 0.2699654255248958,
"learning_rate": 2.5512180357519913e-06,
"loss": 0.5663,
"step": 267
},
{
"epoch": 1.072,
"grad_norm": 0.27268944362101294,
"learning_rate": 2.5341466844148775e-06,
"loss": 0.5689,
"step": 268
},
{
"epoch": 1.076,
"grad_norm": 0.2895176432362157,
"learning_rate": 2.5170737403889334e-06,
"loss": 0.555,
"step": 269
},
{
"epoch": 1.08,
"grad_norm": 0.275834303493974,
"learning_rate": 2.5e-06,
"loss": 0.5652,
"step": 270
},
{
"epoch": 1.084,
"grad_norm": 0.27109148773857455,
"learning_rate": 2.4829262596110674e-06,
"loss": 0.5531,
"step": 271
},
{
"epoch": 1.088,
"grad_norm": 0.2885944970109634,
"learning_rate": 2.465853315585123e-06,
"loss": 0.5645,
"step": 272
},
{
"epoch": 1.092,
"grad_norm": 0.3325900759228949,
"learning_rate": 2.44878196424801e-06,
"loss": 0.567,
"step": 273
},
{
"epoch": 1.096,
"grad_norm": 0.2656725633978504,
"learning_rate": 2.431713001851286e-06,
"loss": 0.5645,
"step": 274
},
{
"epoch": 1.1,
"grad_norm": 0.28515147451371026,
"learning_rate": 2.4146472245350804e-06,
"loss": 0.5627,
"step": 275
},
{
"epoch": 1.104,
"grad_norm": 0.2772295325997671,
"learning_rate": 2.3975854282909645e-06,
"loss": 0.5606,
"step": 276
},
{
"epoch": 1.108,
"grad_norm": 0.2732258740956759,
"learning_rate": 2.3805284089248203e-06,
"loss": 0.558,
"step": 277
},
{
"epoch": 1.112,
"grad_norm": 0.2657630056993197,
"learning_rate": 2.3634769620197253e-06,
"loss": 0.5639,
"step": 278
},
{
"epoch": 1.116,
"grad_norm": 0.2741360647437433,
"learning_rate": 2.3464318828988416e-06,
"loss": 0.539,
"step": 279
},
{
"epoch": 1.12,
"grad_norm": 0.27924091679374063,
"learning_rate": 2.3293939665883233e-06,
"loss": 0.563,
"step": 280
},
{
"epoch": 1.124,
"grad_norm": 0.2663368697744116,
"learning_rate": 2.3123640077802305e-06,
"loss": 0.5639,
"step": 281
},
{
"epoch": 1.1280000000000001,
"grad_norm": 0.2635159398782791,
"learning_rate": 2.2953428007954682e-06,
"loss": 0.5225,
"step": 282
},
{
"epoch": 1.1320000000000001,
"grad_norm": 0.26790514294280293,
"learning_rate": 2.2783311395467304e-06,
"loss": 0.5556,
"step": 283
},
{
"epoch": 1.1360000000000001,
"grad_norm": 0.27007506595154834,
"learning_rate": 2.261329817501475e-06,
"loss": 0.545,
"step": 284
},
{
"epoch": 1.1400000000000001,
"grad_norm": 0.27274462523361476,
"learning_rate": 2.2443396276449145e-06,
"loss": 0.5724,
"step": 285
},
{
"epoch": 1.144,
"grad_norm": 0.25666672253669337,
"learning_rate": 2.2273613624430256e-06,
"loss": 0.541,
"step": 286
},
{
"epoch": 1.148,
"grad_norm": 0.30831325626025924,
"learning_rate": 2.2103958138055897e-06,
"loss": 0.5544,
"step": 287
},
{
"epoch": 1.152,
"grad_norm": 0.26679849744140743,
"learning_rate": 2.1934437730492544e-06,
"loss": 0.5544,
"step": 288
},
{
"epoch": 1.156,
"grad_norm": 0.26795918133510693,
"learning_rate": 2.1765060308606243e-06,
"loss": 0.5726,
"step": 289
},
{
"epoch": 1.16,
"grad_norm": 0.26692330671607206,
"learning_rate": 2.159583377259384e-06,
"loss": 0.5771,
"step": 290
},
{
"epoch": 1.164,
"grad_norm": 0.26800012106006976,
"learning_rate": 2.142676601561447e-06,
"loss": 0.5376,
"step": 291
},
{
"epoch": 1.168,
"grad_norm": 0.25520584565446575,
"learning_rate": 2.1257864923421405e-06,
"loss": 0.5439,
"step": 292
},
{
"epoch": 1.172,
"grad_norm": 0.26220655980779767,
"learning_rate": 2.1089138373994226e-06,
"loss": 0.552,
"step": 293
},
{
"epoch": 1.176,
"grad_norm": 0.2599229946929615,
"learning_rate": 2.092059423717145e-06,
"loss": 0.5591,
"step": 294
},
{
"epoch": 1.18,
"grad_norm": 0.26226465656771997,
"learning_rate": 2.0752240374283334e-06,
"loss": 0.5424,
"step": 295
},
{
"epoch": 1.184,
"grad_norm": 0.27097583397605235,
"learning_rate": 2.0584084637785316e-06,
"loss": 0.5613,
"step": 296
},
{
"epoch": 1.188,
"grad_norm": 0.25487494718289816,
"learning_rate": 2.0416134870891697e-06,
"loss": 0.5531,
"step": 297
},
{
"epoch": 1.192,
"grad_norm": 0.258627059695691,
"learning_rate": 2.0248398907209827e-06,
"loss": 0.5603,
"step": 298
},
{
"epoch": 1.196,
"grad_norm": 0.27660938201914487,
"learning_rate": 2.008088457037472e-06,
"loss": 0.5648,
"step": 299
},
{
"epoch": 1.2,
"grad_norm": 0.26596618229804614,
"learning_rate": 1.991359967368416e-06,
"loss": 0.574,
"step": 300
},
{
"epoch": 1.204,
"grad_norm": 0.2618039991994125,
"learning_rate": 1.9746552019734246e-06,
"loss": 0.5492,
"step": 301
},
{
"epoch": 1.208,
"grad_norm": 0.272654754048079,
"learning_rate": 1.957974940005548e-06,
"loss": 0.5767,
"step": 302
},
{
"epoch": 1.212,
"grad_norm": 0.26671171090168844,
"learning_rate": 1.9413199594749327e-06,
"loss": 0.5338,
"step": 303
},
{
"epoch": 1.216,
"grad_norm": 0.270832034752946,
"learning_rate": 1.9246910372125345e-06,
"loss": 0.5647,
"step": 304
},
{
"epoch": 1.22,
"grad_norm": 0.2643013014532281,
"learning_rate": 1.9080889488338833e-06,
"loss": 0.5597,
"step": 305
},
{
"epoch": 1.224,
"grad_norm": 0.27092629093151294,
"learning_rate": 1.8915144687029107e-06,
"loss": 0.5485,
"step": 306
},
{
"epoch": 1.228,
"grad_norm": 0.26613235310331923,
"learning_rate": 1.874968369895828e-06,
"loss": 0.552,
"step": 307
},
{
"epoch": 1.232,
"grad_norm": 0.2582406507610385,
"learning_rate": 1.8584514241650667e-06,
"loss": 0.5477,
"step": 308
},
{
"epoch": 1.236,
"grad_norm": 0.2687239346512157,
"learning_rate": 1.8419644019032868e-06,
"loss": 0.5716,
"step": 309
},
{
"epoch": 1.24,
"grad_norm": 0.2634130471010307,
"learning_rate": 1.8255080721074391e-06,
"loss": 0.5511,
"step": 310
},
{
"epoch": 1.244,
"grad_norm": 0.27164814594203673,
"learning_rate": 1.8090832023429022e-06,
"loss": 0.5605,
"step": 311
},
{
"epoch": 1.248,
"grad_norm": 0.26506994687071844,
"learning_rate": 1.792690558707675e-06,
"loss": 0.5522,
"step": 312
},
{
"epoch": 1.252,
"grad_norm": 0.2710404134246902,
"learning_rate": 1.7763309057966487e-06,
"loss": 0.5754,
"step": 313
},
{
"epoch": 1.256,
"grad_norm": 0.2644648635617096,
"learning_rate": 1.7600050066659418e-06,
"loss": 0.5542,
"step": 314
},
{
"epoch": 1.26,
"grad_norm": 0.2563441049550618,
"learning_rate": 1.7437136227973108e-06,
"loss": 0.5428,
"step": 315
},
{
"epoch": 1.264,
"grad_norm": 0.2781486176634604,
"learning_rate": 1.7274575140626318e-06,
"loss": 0.5469,
"step": 316
},
{
"epoch": 1.268,
"grad_norm": 0.26498925525964123,
"learning_rate": 1.7112374386884583e-06,
"loss": 0.5671,
"step": 317
},
{
"epoch": 1.272,
"grad_norm": 0.27752994625791155,
"learning_rate": 1.695054153220655e-06,
"loss": 0.5437,
"step": 318
},
{
"epoch": 1.276,
"grad_norm": 0.2602835745087367,
"learning_rate": 1.678908412489111e-06,
"loss": 0.5602,
"step": 319
},
{
"epoch": 1.28,
"grad_norm": 0.2665909274717665,
"learning_rate": 1.6628009695725348e-06,
"loss": 0.5762,
"step": 320
},
{
"epoch": 1.284,
"grad_norm": 0.2531045714341802,
"learning_rate": 1.6467325757633242e-06,
"loss": 0.5674,
"step": 321
},
{
"epoch": 1.288,
"grad_norm": 0.2621120701043234,
"learning_rate": 1.630703980532528e-06,
"loss": 0.5598,
"step": 322
},
{
"epoch": 1.292,
"grad_norm": 0.275203318844759,
"learning_rate": 1.6147159314948873e-06,
"loss": 0.564,
"step": 323
},
{
"epoch": 1.296,
"grad_norm": 0.2603786935372431,
"learning_rate": 1.5987691743739636e-06,
"loss": 0.5535,
"step": 324
},
{
"epoch": 1.3,
"grad_norm": 0.26776057829072847,
"learning_rate": 1.5828644529673592e-06,
"loss": 0.5627,
"step": 325
},
{
"epoch": 1.304,
"grad_norm": 0.2623048715119549,
"learning_rate": 1.5670025091120219e-06,
"loss": 0.5685,
"step": 326
},
{
"epoch": 1.308,
"grad_norm": 0.26608080729860306,
"learning_rate": 1.5511840826496462e-06,
"loss": 0.5695,
"step": 327
},
{
"epoch": 1.312,
"grad_norm": 0.2673157087455161,
"learning_rate": 1.5354099113921614e-06,
"loss": 0.5354,
"step": 328
},
{
"epoch": 1.316,
"grad_norm": 0.2754778027664838,
"learning_rate": 1.519680731087325e-06,
"loss": 0.5705,
"step": 329
},
{
"epoch": 1.32,
"grad_norm": 0.2697793809470272,
"learning_rate": 1.5039972753843966e-06,
"loss": 0.5748,
"step": 330
},
{
"epoch": 1.324,
"grad_norm": 0.2737068013002947,
"learning_rate": 1.488360275799926e-06,
"loss": 0.5554,
"step": 331
},
{
"epoch": 1.328,
"grad_norm": 0.2618826312103745,
"learning_rate": 1.4727704616836297e-06,
"loss": 0.5516,
"step": 332
},
{
"epoch": 1.332,
"grad_norm": 0.2610637671077678,
"learning_rate": 1.457228560184374e-06,
"loss": 0.5491,
"step": 333
},
{
"epoch": 1.336,
"grad_norm": 0.2615954274949455,
"learning_rate": 1.441735296216256e-06,
"loss": 0.5556,
"step": 334
},
{
"epoch": 1.34,
"grad_norm": 0.2650842665446321,
"learning_rate": 1.4262913924247956e-06,
"loss": 0.5475,
"step": 335
},
{
"epoch": 1.3439999999999999,
"grad_norm": 0.25585711289460494,
"learning_rate": 1.4108975691532273e-06,
"loss": 0.5549,
"step": 336
},
{
"epoch": 1.3479999999999999,
"grad_norm": 0.25250951856873494,
"learning_rate": 1.3955545444089017e-06,
"loss": 0.5601,
"step": 337
},
{
"epoch": 1.3519999999999999,
"grad_norm": 0.2569876765702743,
"learning_rate": 1.3802630338297956e-06,
"loss": 0.548,
"step": 338
},
{
"epoch": 1.3559999999999999,
"grad_norm": 0.28203813821342777,
"learning_rate": 1.3650237506511333e-06,
"loss": 0.566,
"step": 339
},
{
"epoch": 1.3599999999999999,
"grad_norm": 0.27081394807954995,
"learning_rate": 1.3498374056721198e-06,
"loss": 0.5594,
"step": 340
},
{
"epoch": 1.3639999999999999,
"grad_norm": 0.26811473148644754,
"learning_rate": 1.334704707222787e-06,
"loss": 0.5558,
"step": 341
},
{
"epoch": 1.3679999999999999,
"grad_norm": 0.2635422950339071,
"learning_rate": 1.3196263611309539e-06,
"loss": 0.5707,
"step": 342
},
{
"epoch": 1.3719999999999999,
"grad_norm": 0.25640933129102816,
"learning_rate": 1.3046030706893079e-06,
"loss": 0.5551,
"step": 343
},
{
"epoch": 1.376,
"grad_norm": 0.26144553856379565,
"learning_rate": 1.2896355366226e-06,
"loss": 0.5689,
"step": 344
},
{
"epoch": 1.38,
"grad_norm": 0.24921356601339556,
"learning_rate": 1.2747244570549578e-06,
"loss": 0.5223,
"step": 345
},
{
"epoch": 1.384,
"grad_norm": 0.2739025094336896,
"learning_rate": 1.2598705274773299e-06,
"loss": 0.5553,
"step": 346
},
{
"epoch": 1.388,
"grad_norm": 0.26711249323543973,
"learning_rate": 1.2450744407150427e-06,
"loss": 0.5411,
"step": 347
},
{
"epoch": 1.392,
"grad_norm": 0.2695324913604517,
"learning_rate": 1.2303368868954848e-06,
"loss": 0.567,
"step": 348
},
{
"epoch": 1.396,
"grad_norm": 0.25781431882124334,
"learning_rate": 1.215658553415918e-06,
"loss": 0.5674,
"step": 349
},
{
"epoch": 1.4,
"grad_norm": 0.2594657444454727,
"learning_rate": 1.2010401249114166e-06,
"loss": 0.5366,
"step": 350
},
{
"epoch": 1.404,
"grad_norm": 0.26265811514812376,
"learning_rate": 1.1864822832229319e-06,
"loss": 0.5465,
"step": 351
},
{
"epoch": 1.408,
"grad_norm": 0.26623313611028854,
"learning_rate": 1.1719857073654923e-06,
"loss": 0.564,
"step": 352
},
{
"epoch": 1.412,
"grad_norm": 0.2702849646886806,
"learning_rate": 1.1575510734965305e-06,
"loss": 0.5521,
"step": 353
},
{
"epoch": 1.416,
"grad_norm": 0.2689048371097966,
"learning_rate": 1.1431790548843464e-06,
"loss": 0.5527,
"step": 354
},
{
"epoch": 1.42,
"grad_norm": 0.25819241256540093,
"learning_rate": 1.1288703218767027e-06,
"loss": 0.54,
"step": 355
},
{
"epoch": 1.424,
"grad_norm": 0.25437896341243765,
"learning_rate": 1.1146255418695635e-06,
"loss": 0.528,
"step": 356
},
{
"epoch": 1.428,
"grad_norm": 0.25406879267543436,
"learning_rate": 1.1004453792759547e-06,
"loss": 0.5471,
"step": 357
},
{
"epoch": 1.432,
"grad_norm": 0.2562241134177116,
"learning_rate": 1.0863304954949856e-06,
"loss": 0.55,
"step": 358
},
{
"epoch": 1.436,
"grad_norm": 0.26542441620839063,
"learning_rate": 1.072281548880992e-06,
"loss": 0.5678,
"step": 359
},
{
"epoch": 1.44,
"grad_norm": 0.269318418298225,
"learning_rate": 1.0582991947128324e-06,
"loss": 0.5615,
"step": 360
},
{
"epoch": 1.444,
"grad_norm": 0.2691764014021926,
"learning_rate": 1.0443840851633227e-06,
"loss": 0.565,
"step": 361
},
{
"epoch": 1.448,
"grad_norm": 0.2594862635555543,
"learning_rate": 1.0305368692688175e-06,
"loss": 0.5666,
"step": 362
},
{
"epoch": 1.452,
"grad_norm": 0.25315547667760335,
"learning_rate": 1.0167581928989373e-06,
"loss": 0.5555,
"step": 363
},
{
"epoch": 1.456,
"grad_norm": 0.2613895044115319,
"learning_rate": 1.0030486987264436e-06,
"loss": 0.5776,
"step": 364
},
{
"epoch": 1.46,
"grad_norm": 0.2631431546714004,
"learning_rate": 9.89409026197264e-07,
"loss": 0.5365,
"step": 365
},
{
"epoch": 1.464,
"grad_norm": 0.2647751182203821,
"learning_rate": 9.758398115006637e-07,
"loss": 0.5573,
"step": 366
},
{
"epoch": 1.468,
"grad_norm": 0.26208143115660865,
"learning_rate": 9.623416875395763e-07,
"loss": 0.5605,
"step": 367
},
{
"epoch": 1.472,
"grad_norm": 0.2704474380702507,
"learning_rate": 9.489152839010799e-07,
"loss": 0.5697,
"step": 368
},
{
"epoch": 1.476,
"grad_norm": 0.275761932132816,
"learning_rate": 9.355612268270339e-07,
"loss": 0.5586,
"step": 369
},
{
"epoch": 1.48,
"grad_norm": 0.27242480864427504,
"learning_rate": 9.222801391848688e-07,
"loss": 0.5573,
"step": 370
},
{
"epoch": 1.484,
"grad_norm": 0.254674372509468,
"learning_rate": 9.090726404385319e-07,
"loss": 0.5377,
"step": 371
},
{
"epoch": 1.488,
"grad_norm": 0.25888213496864965,
"learning_rate": 8.959393466195973e-07,
"loss": 0.5278,
"step": 372
},
{
"epoch": 1.492,
"grad_norm": 0.2527033545759895,
"learning_rate": 8.828808702985325e-07,
"loss": 0.5475,
"step": 373
},
{
"epoch": 1.496,
"grad_norm": 0.2669973822070058,
"learning_rate": 8.69897820556124e-07,
"loss": 0.5652,
"step": 374
},
{
"epoch": 1.5,
"grad_norm": 0.26903334117277744,
"learning_rate": 8.569908029550686e-07,
"loss": 0.5555,
"step": 375
}
],
"logging_steps": 1,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 125,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.6053644267893555e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}