aleegis10's picture
Training in progress, step 200, checkpoint
a8cc97d verified
raw
history blame
36.4 kB
{
"best_metric": 1.2369053363800049,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.3420265070542967,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0017101325352714834,
"grad_norm": 14.640414237976074,
"learning_rate": 1e-05,
"loss": 5.4392,
"step": 1
},
{
"epoch": 0.0017101325352714834,
"eval_loss": 2.389522075653076,
"eval_runtime": 69.5278,
"eval_samples_per_second": 14.167,
"eval_steps_per_second": 3.553,
"step": 1
},
{
"epoch": 0.003420265070542967,
"grad_norm": 20.288902282714844,
"learning_rate": 2e-05,
"loss": 6.512,
"step": 2
},
{
"epoch": 0.005130397605814451,
"grad_norm": 18.305891036987305,
"learning_rate": 3e-05,
"loss": 6.8107,
"step": 3
},
{
"epoch": 0.006840530141085934,
"grad_norm": 16.469608306884766,
"learning_rate": 4e-05,
"loss": 7.0771,
"step": 4
},
{
"epoch": 0.008550662676357419,
"grad_norm": 16.029294967651367,
"learning_rate": 5e-05,
"loss": 6.9115,
"step": 5
},
{
"epoch": 0.010260795211628902,
"grad_norm": 13.417438507080078,
"learning_rate": 6e-05,
"loss": 6.5294,
"step": 6
},
{
"epoch": 0.011970927746900385,
"grad_norm": 13.111316680908203,
"learning_rate": 7e-05,
"loss": 6.4824,
"step": 7
},
{
"epoch": 0.013681060282171868,
"grad_norm": 12.419403076171875,
"learning_rate": 8e-05,
"loss": 6.3141,
"step": 8
},
{
"epoch": 0.015391192817443352,
"grad_norm": 11.091198921203613,
"learning_rate": 9e-05,
"loss": 6.2618,
"step": 9
},
{
"epoch": 0.017101325352714837,
"grad_norm": 10.392128944396973,
"learning_rate": 0.0001,
"loss": 6.3153,
"step": 10
},
{
"epoch": 0.01881145788798632,
"grad_norm": 9.125272750854492,
"learning_rate": 9.999316524962345e-05,
"loss": 5.9951,
"step": 11
},
{
"epoch": 0.020521590423257803,
"grad_norm": 9.861510276794434,
"learning_rate": 9.997266286704631e-05,
"loss": 6.1288,
"step": 12
},
{
"epoch": 0.022231722958529286,
"grad_norm": 8.92580509185791,
"learning_rate": 9.993849845741524e-05,
"loss": 5.7963,
"step": 13
},
{
"epoch": 0.02394185549380077,
"grad_norm": 9.063461303710938,
"learning_rate": 9.989068136093873e-05,
"loss": 5.8087,
"step": 14
},
{
"epoch": 0.025651988029072252,
"grad_norm": 9.55776596069336,
"learning_rate": 9.98292246503335e-05,
"loss": 5.9158,
"step": 15
},
{
"epoch": 0.027362120564343735,
"grad_norm": 11.029727935791016,
"learning_rate": 9.975414512725057e-05,
"loss": 5.8921,
"step": 16
},
{
"epoch": 0.02907225309961522,
"grad_norm": 9.607908248901367,
"learning_rate": 9.966546331768191e-05,
"loss": 5.6036,
"step": 17
},
{
"epoch": 0.030782385634886705,
"grad_norm": 9.793913841247559,
"learning_rate": 9.956320346634876e-05,
"loss": 5.6222,
"step": 18
},
{
"epoch": 0.032492518170158184,
"grad_norm": 8.611642837524414,
"learning_rate": 9.944739353007344e-05,
"loss": 5.7555,
"step": 19
},
{
"epoch": 0.034202650705429674,
"grad_norm": 8.97571849822998,
"learning_rate": 9.931806517013612e-05,
"loss": 5.6779,
"step": 20
},
{
"epoch": 0.03591278324070116,
"grad_norm": 9.353657722473145,
"learning_rate": 9.917525374361912e-05,
"loss": 5.8973,
"step": 21
},
{
"epoch": 0.03762291577597264,
"grad_norm": 9.22663688659668,
"learning_rate": 9.901899829374047e-05,
"loss": 5.8711,
"step": 22
},
{
"epoch": 0.03933304831124412,
"grad_norm": 8.428504943847656,
"learning_rate": 9.884934153917997e-05,
"loss": 5.8102,
"step": 23
},
{
"epoch": 0.041043180846515606,
"grad_norm": 7.905302047729492,
"learning_rate": 9.86663298624003e-05,
"loss": 5.4847,
"step": 24
},
{
"epoch": 0.04275331338178709,
"grad_norm": 8.299999237060547,
"learning_rate": 9.847001329696653e-05,
"loss": 5.8853,
"step": 25
},
{
"epoch": 0.04446344591705857,
"grad_norm": 8.881180763244629,
"learning_rate": 9.826044551386744e-05,
"loss": 5.7532,
"step": 26
},
{
"epoch": 0.046173578452330055,
"grad_norm": 8.510998725891113,
"learning_rate": 9.803768380684242e-05,
"loss": 5.2993,
"step": 27
},
{
"epoch": 0.04788371098760154,
"grad_norm": 8.11540412902832,
"learning_rate": 9.780178907671789e-05,
"loss": 5.3116,
"step": 28
},
{
"epoch": 0.04959384352287302,
"grad_norm": 8.081546783447266,
"learning_rate": 9.755282581475769e-05,
"loss": 5.7509,
"step": 29
},
{
"epoch": 0.051303976058144504,
"grad_norm": 8.157670021057129,
"learning_rate": 9.729086208503174e-05,
"loss": 5.7746,
"step": 30
},
{
"epoch": 0.05301410859341599,
"grad_norm": 8.227839469909668,
"learning_rate": 9.701596950580806e-05,
"loss": 5.8473,
"step": 31
},
{
"epoch": 0.05472424112868747,
"grad_norm": 9.177515983581543,
"learning_rate": 9.672822322997305e-05,
"loss": 5.8004,
"step": 32
},
{
"epoch": 0.05643437366395896,
"grad_norm": 8.275994300842285,
"learning_rate": 9.642770192448536e-05,
"loss": 6.0828,
"step": 33
},
{
"epoch": 0.05814450619923044,
"grad_norm": 8.03842544555664,
"learning_rate": 9.611448774886924e-05,
"loss": 5.2698,
"step": 34
},
{
"epoch": 0.059854638734501926,
"grad_norm": 9.835810661315918,
"learning_rate": 9.578866633275288e-05,
"loss": 5.5907,
"step": 35
},
{
"epoch": 0.06156477126977341,
"grad_norm": 8.554304122924805,
"learning_rate": 9.545032675245813e-05,
"loss": 5.8263,
"step": 36
},
{
"epoch": 0.06327490380504489,
"grad_norm": 9.51391315460205,
"learning_rate": 9.509956150664796e-05,
"loss": 6.0017,
"step": 37
},
{
"epoch": 0.06498503634031637,
"grad_norm": 8.211365699768066,
"learning_rate": 9.473646649103818e-05,
"loss": 5.254,
"step": 38
},
{
"epoch": 0.06669516887558785,
"grad_norm": 8.579219818115234,
"learning_rate": 9.43611409721806e-05,
"loss": 5.7198,
"step": 39
},
{
"epoch": 0.06840530141085935,
"grad_norm": 8.614923477172852,
"learning_rate": 9.397368756032445e-05,
"loss": 5.6871,
"step": 40
},
{
"epoch": 0.07011543394613083,
"grad_norm": 8.860878944396973,
"learning_rate": 9.357421218136386e-05,
"loss": 6.0608,
"step": 41
},
{
"epoch": 0.07182556648140231,
"grad_norm": 8.788886070251465,
"learning_rate": 9.316282404787871e-05,
"loss": 5.9044,
"step": 42
},
{
"epoch": 0.0735356990166738,
"grad_norm": 8.671177864074707,
"learning_rate": 9.273963562927695e-05,
"loss": 5.4956,
"step": 43
},
{
"epoch": 0.07524583155194528,
"grad_norm": 8.375917434692383,
"learning_rate": 9.230476262104677e-05,
"loss": 5.711,
"step": 44
},
{
"epoch": 0.07695596408721676,
"grad_norm": 8.910039901733398,
"learning_rate": 9.185832391312644e-05,
"loss": 5.908,
"step": 45
},
{
"epoch": 0.07866609662248825,
"grad_norm": 9.672930717468262,
"learning_rate": 9.140044155740101e-05,
"loss": 5.9495,
"step": 46
},
{
"epoch": 0.08037622915775973,
"grad_norm": 10.40195369720459,
"learning_rate": 9.093124073433463e-05,
"loss": 6.3817,
"step": 47
},
{
"epoch": 0.08208636169303121,
"grad_norm": 10.205389976501465,
"learning_rate": 9.045084971874738e-05,
"loss": 6.1616,
"step": 48
},
{
"epoch": 0.0837964942283027,
"grad_norm": 10.02330207824707,
"learning_rate": 8.995939984474624e-05,
"loss": 6.5455,
"step": 49
},
{
"epoch": 0.08550662676357418,
"grad_norm": 11.856907844543457,
"learning_rate": 8.945702546981969e-05,
"loss": 6.6414,
"step": 50
},
{
"epoch": 0.08550662676357418,
"eval_loss": 1.563002109527588,
"eval_runtime": 70.9713,
"eval_samples_per_second": 13.879,
"eval_steps_per_second": 3.48,
"step": 50
},
{
"epoch": 0.08721675929884566,
"grad_norm": 11.785542488098145,
"learning_rate": 8.894386393810563e-05,
"loss": 4.8819,
"step": 51
},
{
"epoch": 0.08892689183411714,
"grad_norm": 10.87253475189209,
"learning_rate": 8.842005554284296e-05,
"loss": 5.6858,
"step": 52
},
{
"epoch": 0.09063702436938863,
"grad_norm": 7.662486553192139,
"learning_rate": 8.788574348801675e-05,
"loss": 5.5863,
"step": 53
},
{
"epoch": 0.09234715690466011,
"grad_norm": 5.953812599182129,
"learning_rate": 8.73410738492077e-05,
"loss": 5.0459,
"step": 54
},
{
"epoch": 0.0940572894399316,
"grad_norm": 6.940700054168701,
"learning_rate": 8.678619553365659e-05,
"loss": 5.6281,
"step": 55
},
{
"epoch": 0.09576742197520308,
"grad_norm": 6.840532302856445,
"learning_rate": 8.622126023955446e-05,
"loss": 5.4419,
"step": 56
},
{
"epoch": 0.09747755451047456,
"grad_norm": 6.621339797973633,
"learning_rate": 8.564642241456986e-05,
"loss": 5.5128,
"step": 57
},
{
"epoch": 0.09918768704574604,
"grad_norm": 6.133358478546143,
"learning_rate": 8.506183921362443e-05,
"loss": 5.0469,
"step": 58
},
{
"epoch": 0.10089781958101753,
"grad_norm": 6.378569602966309,
"learning_rate": 8.44676704559283e-05,
"loss": 5.0157,
"step": 59
},
{
"epoch": 0.10260795211628901,
"grad_norm": 7.036313056945801,
"learning_rate": 8.386407858128706e-05,
"loss": 5.2026,
"step": 60
},
{
"epoch": 0.10431808465156049,
"grad_norm": 6.539918899536133,
"learning_rate": 8.32512286056924e-05,
"loss": 5.2365,
"step": 61
},
{
"epoch": 0.10602821718683197,
"grad_norm": 6.326776504516602,
"learning_rate": 8.262928807620843e-05,
"loss": 5.17,
"step": 62
},
{
"epoch": 0.10773834972210346,
"grad_norm": 6.600303649902344,
"learning_rate": 8.199842702516583e-05,
"loss": 5.4918,
"step": 63
},
{
"epoch": 0.10944848225737494,
"grad_norm": 6.266218185424805,
"learning_rate": 8.135881792367686e-05,
"loss": 5.1133,
"step": 64
},
{
"epoch": 0.11115861479264642,
"grad_norm": 6.132507801055908,
"learning_rate": 8.07106356344834e-05,
"loss": 5.1749,
"step": 65
},
{
"epoch": 0.11286874732791792,
"grad_norm": 6.380084037780762,
"learning_rate": 8.005405736415126e-05,
"loss": 5.1313,
"step": 66
},
{
"epoch": 0.1145788798631894,
"grad_norm": 6.203830242156982,
"learning_rate": 7.938926261462366e-05,
"loss": 5.1846,
"step": 67
},
{
"epoch": 0.11628901239846089,
"grad_norm": 6.547234535217285,
"learning_rate": 7.871643313414718e-05,
"loss": 5.0656,
"step": 68
},
{
"epoch": 0.11799914493373237,
"grad_norm": 6.533773422241211,
"learning_rate": 7.803575286758364e-05,
"loss": 5.0749,
"step": 69
},
{
"epoch": 0.11970927746900385,
"grad_norm": 7.144689083099365,
"learning_rate": 7.734740790612136e-05,
"loss": 5.3617,
"step": 70
},
{
"epoch": 0.12141941000427534,
"grad_norm": 7.121635437011719,
"learning_rate": 7.66515864363997e-05,
"loss": 5.5717,
"step": 71
},
{
"epoch": 0.12312954253954682,
"grad_norm": 6.523452281951904,
"learning_rate": 7.594847868906076e-05,
"loss": 5.0268,
"step": 72
},
{
"epoch": 0.1248396750748183,
"grad_norm": 6.674250602722168,
"learning_rate": 7.52382768867422e-05,
"loss": 5.5013,
"step": 73
},
{
"epoch": 0.12654980761008977,
"grad_norm": 6.321826934814453,
"learning_rate": 7.452117519152542e-05,
"loss": 5.2318,
"step": 74
},
{
"epoch": 0.12825994014536127,
"grad_norm": 6.396578788757324,
"learning_rate": 7.379736965185368e-05,
"loss": 5.3862,
"step": 75
},
{
"epoch": 0.12997007268063274,
"grad_norm": 6.630756378173828,
"learning_rate": 7.30670581489344e-05,
"loss": 5.5383,
"step": 76
},
{
"epoch": 0.13168020521590423,
"grad_norm": 6.786897659301758,
"learning_rate": 7.233044034264034e-05,
"loss": 5.6795,
"step": 77
},
{
"epoch": 0.1333903377511757,
"grad_norm": 6.6722025871276855,
"learning_rate": 7.158771761692464e-05,
"loss": 5.5393,
"step": 78
},
{
"epoch": 0.1351004702864472,
"grad_norm": 6.680119037628174,
"learning_rate": 7.083909302476453e-05,
"loss": 5.5529,
"step": 79
},
{
"epoch": 0.1368106028217187,
"grad_norm": 7.221715450286865,
"learning_rate": 7.008477123264848e-05,
"loss": 5.4678,
"step": 80
},
{
"epoch": 0.13852073535699017,
"grad_norm": 6.539522171020508,
"learning_rate": 6.932495846462261e-05,
"loss": 5.1555,
"step": 81
},
{
"epoch": 0.14023086789226166,
"grad_norm": 6.531249523162842,
"learning_rate": 6.855986244591104e-05,
"loss": 4.9377,
"step": 82
},
{
"epoch": 0.14194100042753313,
"grad_norm": 7.449616432189941,
"learning_rate": 6.778969234612584e-05,
"loss": 5.4777,
"step": 83
},
{
"epoch": 0.14365113296280463,
"grad_norm": 7.076162338256836,
"learning_rate": 6.701465872208216e-05,
"loss": 5.1931,
"step": 84
},
{
"epoch": 0.1453612654980761,
"grad_norm": 6.946402549743652,
"learning_rate": 6.623497346023418e-05,
"loss": 5.3551,
"step": 85
},
{
"epoch": 0.1470713980333476,
"grad_norm": 7.516958713531494,
"learning_rate": 6.545084971874738e-05,
"loss": 5.3214,
"step": 86
},
{
"epoch": 0.14878153056861906,
"grad_norm": 8.02112865447998,
"learning_rate": 6.466250186922325e-05,
"loss": 5.5993,
"step": 87
},
{
"epoch": 0.15049166310389056,
"grad_norm": 7.544280052185059,
"learning_rate": 6.387014543809223e-05,
"loss": 5.3809,
"step": 88
},
{
"epoch": 0.15220179563916203,
"grad_norm": 6.987375259399414,
"learning_rate": 6.307399704769099e-05,
"loss": 5.187,
"step": 89
},
{
"epoch": 0.15391192817443353,
"grad_norm": 7.001568794250488,
"learning_rate": 6.227427435703997e-05,
"loss": 5.6348,
"step": 90
},
{
"epoch": 0.155622060709705,
"grad_norm": 7.184516906738281,
"learning_rate": 6.147119600233758e-05,
"loss": 5.4621,
"step": 91
},
{
"epoch": 0.1573321932449765,
"grad_norm": 6.922083377838135,
"learning_rate": 6.066498153718735e-05,
"loss": 5.2475,
"step": 92
},
{
"epoch": 0.15904232578024796,
"grad_norm": 7.21707820892334,
"learning_rate": 5.985585137257401e-05,
"loss": 5.465,
"step": 93
},
{
"epoch": 0.16075245831551946,
"grad_norm": 7.266740798950195,
"learning_rate": 5.90440267166055e-05,
"loss": 5.1979,
"step": 94
},
{
"epoch": 0.16246259085079093,
"grad_norm": 8.004600524902344,
"learning_rate": 5.8229729514036705e-05,
"loss": 5.5634,
"step": 95
},
{
"epoch": 0.16417272338606242,
"grad_norm": 8.2750825881958,
"learning_rate": 5.74131823855921e-05,
"loss": 5.8962,
"step": 96
},
{
"epoch": 0.1658828559213339,
"grad_norm": 8.06142807006836,
"learning_rate": 5.6594608567103456e-05,
"loss": 5.5395,
"step": 97
},
{
"epoch": 0.1675929884566054,
"grad_norm": 9.92174243927002,
"learning_rate": 5.577423184847932e-05,
"loss": 6.3507,
"step": 98
},
{
"epoch": 0.16930312099187686,
"grad_norm": 9.798441886901855,
"learning_rate": 5.495227651252315e-05,
"loss": 6.3473,
"step": 99
},
{
"epoch": 0.17101325352714836,
"grad_norm": 11.396499633789062,
"learning_rate": 5.4128967273616625e-05,
"loss": 6.199,
"step": 100
},
{
"epoch": 0.17101325352714836,
"eval_loss": 1.3837385177612305,
"eval_runtime": 71.0039,
"eval_samples_per_second": 13.872,
"eval_steps_per_second": 3.479,
"step": 100
},
{
"epoch": 0.17272338606241983,
"grad_norm": 6.1180596351623535,
"learning_rate": 5.330452921628497e-05,
"loss": 4.1311,
"step": 101
},
{
"epoch": 0.17443351859769132,
"grad_norm": 6.361010551452637,
"learning_rate": 5.247918773366112e-05,
"loss": 4.5486,
"step": 102
},
{
"epoch": 0.1761436511329628,
"grad_norm": 6.283280372619629,
"learning_rate": 5.165316846586541e-05,
"loss": 4.9742,
"step": 103
},
{
"epoch": 0.1778537836682343,
"grad_norm": 5.876176357269287,
"learning_rate": 5.0826697238317935e-05,
"loss": 4.9586,
"step": 104
},
{
"epoch": 0.17956391620350576,
"grad_norm": 5.371125221252441,
"learning_rate": 5e-05,
"loss": 4.7882,
"step": 105
},
{
"epoch": 0.18127404873877725,
"grad_norm": 5.151697158813477,
"learning_rate": 4.917330276168208e-05,
"loss": 4.9775,
"step": 106
},
{
"epoch": 0.18298418127404875,
"grad_norm": 5.419619560241699,
"learning_rate": 4.834683153413459e-05,
"loss": 5.2023,
"step": 107
},
{
"epoch": 0.18469431380932022,
"grad_norm": 5.808403491973877,
"learning_rate": 4.7520812266338885e-05,
"loss": 5.1266,
"step": 108
},
{
"epoch": 0.18640444634459172,
"grad_norm": 5.708106517791748,
"learning_rate": 4.669547078371504e-05,
"loss": 4.7815,
"step": 109
},
{
"epoch": 0.1881145788798632,
"grad_norm": 5.750987529754639,
"learning_rate": 4.5871032726383386e-05,
"loss": 5.1993,
"step": 110
},
{
"epoch": 0.18982471141513468,
"grad_norm": 5.842879772186279,
"learning_rate": 4.504772348747687e-05,
"loss": 5.0834,
"step": 111
},
{
"epoch": 0.19153484395040615,
"grad_norm": 5.688647747039795,
"learning_rate": 4.4225768151520694e-05,
"loss": 5.1542,
"step": 112
},
{
"epoch": 0.19324497648567765,
"grad_norm": 6.085207939147949,
"learning_rate": 4.3405391432896555e-05,
"loss": 5.3708,
"step": 113
},
{
"epoch": 0.19495510902094912,
"grad_norm": 5.445624351501465,
"learning_rate": 4.2586817614407895e-05,
"loss": 4.8629,
"step": 114
},
{
"epoch": 0.19666524155622062,
"grad_norm": 5.688288688659668,
"learning_rate": 4.17702704859633e-05,
"loss": 4.9789,
"step": 115
},
{
"epoch": 0.19837537409149208,
"grad_norm": 6.147029399871826,
"learning_rate": 4.095597328339452e-05,
"loss": 5.0329,
"step": 116
},
{
"epoch": 0.20008550662676358,
"grad_norm": 5.8157196044921875,
"learning_rate": 4.0144148627425993e-05,
"loss": 4.9307,
"step": 117
},
{
"epoch": 0.20179563916203505,
"grad_norm": 5.963608741760254,
"learning_rate": 3.933501846281267e-05,
"loss": 5.11,
"step": 118
},
{
"epoch": 0.20350577169730655,
"grad_norm": 5.836517810821533,
"learning_rate": 3.852880399766243e-05,
"loss": 4.9338,
"step": 119
},
{
"epoch": 0.20521590423257802,
"grad_norm": 6.028753280639648,
"learning_rate": 3.772572564296005e-05,
"loss": 5.018,
"step": 120
},
{
"epoch": 0.2069260367678495,
"grad_norm": 5.885068893432617,
"learning_rate": 3.6926002952309016e-05,
"loss": 4.7368,
"step": 121
},
{
"epoch": 0.20863616930312098,
"grad_norm": 6.388590335845947,
"learning_rate": 3.612985456190778e-05,
"loss": 5.1395,
"step": 122
},
{
"epoch": 0.21034630183839248,
"grad_norm": 6.1952385902404785,
"learning_rate": 3.533749813077677e-05,
"loss": 5.1751,
"step": 123
},
{
"epoch": 0.21205643437366395,
"grad_norm": 6.2135443687438965,
"learning_rate": 3.4549150281252636e-05,
"loss": 5.012,
"step": 124
},
{
"epoch": 0.21376656690893545,
"grad_norm": 6.214436054229736,
"learning_rate": 3.3765026539765834e-05,
"loss": 5.0691,
"step": 125
},
{
"epoch": 0.21547669944420692,
"grad_norm": 6.635720252990723,
"learning_rate": 3.298534127791785e-05,
"loss": 5.3663,
"step": 126
},
{
"epoch": 0.2171868319794784,
"grad_norm": 6.239476203918457,
"learning_rate": 3.221030765387417e-05,
"loss": 5.0767,
"step": 127
},
{
"epoch": 0.21889696451474988,
"grad_norm": 6.299355983734131,
"learning_rate": 3.144013755408895e-05,
"loss": 5.2056,
"step": 128
},
{
"epoch": 0.22060709705002138,
"grad_norm": 6.062747955322266,
"learning_rate": 3.0675041535377405e-05,
"loss": 4.7423,
"step": 129
},
{
"epoch": 0.22231722958529285,
"grad_norm": 6.572103023529053,
"learning_rate": 2.991522876735154e-05,
"loss": 5.1702,
"step": 130
},
{
"epoch": 0.22402736212056434,
"grad_norm": 6.492870330810547,
"learning_rate": 2.916090697523549e-05,
"loss": 5.5961,
"step": 131
},
{
"epoch": 0.22573749465583584,
"grad_norm": 6.8842997550964355,
"learning_rate": 2.8412282383075363e-05,
"loss": 5.5233,
"step": 132
},
{
"epoch": 0.2274476271911073,
"grad_norm": 6.2079973220825195,
"learning_rate": 2.766955965735968e-05,
"loss": 4.9688,
"step": 133
},
{
"epoch": 0.2291577597263788,
"grad_norm": 6.081884384155273,
"learning_rate": 2.693294185106562e-05,
"loss": 5.1224,
"step": 134
},
{
"epoch": 0.23086789226165028,
"grad_norm": 6.672243595123291,
"learning_rate": 2.6202630348146324e-05,
"loss": 5.1432,
"step": 135
},
{
"epoch": 0.23257802479692177,
"grad_norm": 6.71869421005249,
"learning_rate": 2.547882480847461e-05,
"loss": 5.3586,
"step": 136
},
{
"epoch": 0.23428815733219324,
"grad_norm": 6.9483537673950195,
"learning_rate": 2.476172311325783e-05,
"loss": 5.4318,
"step": 137
},
{
"epoch": 0.23599828986746474,
"grad_norm": 6.899090766906738,
"learning_rate": 2.405152131093926e-05,
"loss": 5.2644,
"step": 138
},
{
"epoch": 0.2377084224027362,
"grad_norm": 7.219109535217285,
"learning_rate": 2.3348413563600325e-05,
"loss": 5.4093,
"step": 139
},
{
"epoch": 0.2394185549380077,
"grad_norm": 7.686084270477295,
"learning_rate": 2.2652592093878666e-05,
"loss": 5.2123,
"step": 140
},
{
"epoch": 0.24112868747327917,
"grad_norm": 6.914151668548584,
"learning_rate": 2.196424713241637e-05,
"loss": 5.3619,
"step": 141
},
{
"epoch": 0.24283882000855067,
"grad_norm": 7.846366882324219,
"learning_rate": 2.128356686585282e-05,
"loss": 5.3426,
"step": 142
},
{
"epoch": 0.24454895254382214,
"grad_norm": 7.538131237030029,
"learning_rate": 2.061073738537635e-05,
"loss": 5.6459,
"step": 143
},
{
"epoch": 0.24625908507909364,
"grad_norm": 7.204062461853027,
"learning_rate": 1.9945942635848748e-05,
"loss": 5.6492,
"step": 144
},
{
"epoch": 0.2479692176143651,
"grad_norm": 7.883687496185303,
"learning_rate": 1.928936436551661e-05,
"loss": 5.4739,
"step": 145
},
{
"epoch": 0.2496793501496366,
"grad_norm": 7.9401350021362305,
"learning_rate": 1.8641182076323148e-05,
"loss": 5.3524,
"step": 146
},
{
"epoch": 0.25138948268490807,
"grad_norm": 7.743297100067139,
"learning_rate": 1.800157297483417e-05,
"loss": 5.1239,
"step": 147
},
{
"epoch": 0.25309961522017954,
"grad_norm": 8.06339168548584,
"learning_rate": 1.7370711923791567e-05,
"loss": 5.6102,
"step": 148
},
{
"epoch": 0.25480974775545107,
"grad_norm": 8.759541511535645,
"learning_rate": 1.6748771394307585e-05,
"loss": 6.1376,
"step": 149
},
{
"epoch": 0.25651988029072254,
"grad_norm": 11.883188247680664,
"learning_rate": 1.6135921418712956e-05,
"loss": 6.0998,
"step": 150
},
{
"epoch": 0.25651988029072254,
"eval_loss": 1.2809289693832397,
"eval_runtime": 71.0145,
"eval_samples_per_second": 13.87,
"eval_steps_per_second": 3.478,
"step": 150
},
{
"epoch": 0.258230012825994,
"grad_norm": 4.4953413009643555,
"learning_rate": 1.553232954407171e-05,
"loss": 3.7884,
"step": 151
},
{
"epoch": 0.2599401453612655,
"grad_norm": 5.304215431213379,
"learning_rate": 1.4938160786375572e-05,
"loss": 4.2682,
"step": 152
},
{
"epoch": 0.261650277896537,
"grad_norm": 5.452952861785889,
"learning_rate": 1.435357758543015e-05,
"loss": 4.6377,
"step": 153
},
{
"epoch": 0.26336041043180847,
"grad_norm": 6.52045202255249,
"learning_rate": 1.3778739760445552e-05,
"loss": 4.8163,
"step": 154
},
{
"epoch": 0.26507054296707994,
"grad_norm": 6.399850845336914,
"learning_rate": 1.3213804466343421e-05,
"loss": 4.8225,
"step": 155
},
{
"epoch": 0.2667806755023514,
"grad_norm": 6.2536749839782715,
"learning_rate": 1.2658926150792322e-05,
"loss": 5.0223,
"step": 156
},
{
"epoch": 0.26849080803762293,
"grad_norm": 6.324411392211914,
"learning_rate": 1.2114256511983274e-05,
"loss": 5.4117,
"step": 157
},
{
"epoch": 0.2702009405728944,
"grad_norm": 5.510275363922119,
"learning_rate": 1.157994445715706e-05,
"loss": 4.8219,
"step": 158
},
{
"epoch": 0.27191107310816587,
"grad_norm": 5.786247730255127,
"learning_rate": 1.1056136061894384e-05,
"loss": 5.2305,
"step": 159
},
{
"epoch": 0.2736212056434374,
"grad_norm": 5.628164291381836,
"learning_rate": 1.0542974530180327e-05,
"loss": 5.0321,
"step": 160
},
{
"epoch": 0.27533133817870886,
"grad_norm": 5.594108581542969,
"learning_rate": 1.0040600155253765e-05,
"loss": 4.8256,
"step": 161
},
{
"epoch": 0.27704147071398033,
"grad_norm": 5.432445049285889,
"learning_rate": 9.549150281252633e-06,
"loss": 4.6674,
"step": 162
},
{
"epoch": 0.2787516032492518,
"grad_norm": 5.55719518661499,
"learning_rate": 9.068759265665384e-06,
"loss": 4.6913,
"step": 163
},
{
"epoch": 0.2804617357845233,
"grad_norm": 5.277993202209473,
"learning_rate": 8.599558442598998e-06,
"loss": 4.8593,
"step": 164
},
{
"epoch": 0.2821718683197948,
"grad_norm": 5.357100963592529,
"learning_rate": 8.141676086873572e-06,
"loss": 4.8837,
"step": 165
},
{
"epoch": 0.28388200085506626,
"grad_norm": 5.337014675140381,
"learning_rate": 7.695237378953223e-06,
"loss": 4.8787,
"step": 166
},
{
"epoch": 0.28559213339033773,
"grad_norm": 5.865081310272217,
"learning_rate": 7.260364370723044e-06,
"loss": 5.0684,
"step": 167
},
{
"epoch": 0.28730226592560926,
"grad_norm": 5.6288161277771,
"learning_rate": 6.837175952121306e-06,
"loss": 4.8526,
"step": 168
},
{
"epoch": 0.2890123984608807,
"grad_norm": 5.587432384490967,
"learning_rate": 6.425787818636131e-06,
"loss": 4.6702,
"step": 169
},
{
"epoch": 0.2907225309961522,
"grad_norm": 5.569537162780762,
"learning_rate": 6.026312439675552e-06,
"loss": 4.821,
"step": 170
},
{
"epoch": 0.29243266353142366,
"grad_norm": 5.6782989501953125,
"learning_rate": 5.6388590278194096e-06,
"loss": 5.0815,
"step": 171
},
{
"epoch": 0.2941427960666952,
"grad_norm": 5.838556289672852,
"learning_rate": 5.263533508961827e-06,
"loss": 5.0719,
"step": 172
},
{
"epoch": 0.29585292860196666,
"grad_norm": 5.764434814453125,
"learning_rate": 4.900438493352055e-06,
"loss": 4.8881,
"step": 173
},
{
"epoch": 0.2975630611372381,
"grad_norm": 5.603269577026367,
"learning_rate": 4.549673247541875e-06,
"loss": 4.9162,
"step": 174
},
{
"epoch": 0.2992731936725096,
"grad_norm": 5.911520481109619,
"learning_rate": 4.2113336672471245e-06,
"loss": 4.9465,
"step": 175
},
{
"epoch": 0.3009833262077811,
"grad_norm": 6.086195945739746,
"learning_rate": 3.885512251130763e-06,
"loss": 4.8536,
"step": 176
},
{
"epoch": 0.3026934587430526,
"grad_norm": 6.540954113006592,
"learning_rate": 3.5722980755146517e-06,
"loss": 5.3478,
"step": 177
},
{
"epoch": 0.30440359127832406,
"grad_norm": 6.309847354888916,
"learning_rate": 3.271776770026963e-06,
"loss": 5.2735,
"step": 178
},
{
"epoch": 0.30611372381359553,
"grad_norm": 6.2063775062561035,
"learning_rate": 2.9840304941919415e-06,
"loss": 4.9368,
"step": 179
},
{
"epoch": 0.30782385634886705,
"grad_norm": 6.023078441619873,
"learning_rate": 2.7091379149682685e-06,
"loss": 5.1261,
"step": 180
},
{
"epoch": 0.3095339888841385,
"grad_norm": 6.490723133087158,
"learning_rate": 2.4471741852423237e-06,
"loss": 5.1628,
"step": 181
},
{
"epoch": 0.31124412141941,
"grad_norm": 6.0313873291015625,
"learning_rate": 2.1982109232821178e-06,
"loss": 4.9787,
"step": 182
},
{
"epoch": 0.31295425395468146,
"grad_norm": 6.360531330108643,
"learning_rate": 1.962316193157593e-06,
"loss": 5.0005,
"step": 183
},
{
"epoch": 0.314664386489953,
"grad_norm": 6.467580318450928,
"learning_rate": 1.7395544861325718e-06,
"loss": 5.0739,
"step": 184
},
{
"epoch": 0.31637451902522445,
"grad_norm": 6.431023597717285,
"learning_rate": 1.5299867030334814e-06,
"loss": 5.0959,
"step": 185
},
{
"epoch": 0.3180846515604959,
"grad_norm": 6.457474708557129,
"learning_rate": 1.333670137599713e-06,
"loss": 5.2955,
"step": 186
},
{
"epoch": 0.31979478409576745,
"grad_norm": 6.595992565155029,
"learning_rate": 1.1506584608200367e-06,
"loss": 5.1507,
"step": 187
},
{
"epoch": 0.3215049166310389,
"grad_norm": 7.012680530548096,
"learning_rate": 9.810017062595322e-07,
"loss": 5.3988,
"step": 188
},
{
"epoch": 0.3232150491663104,
"grad_norm": 6.616781711578369,
"learning_rate": 8.247462563808817e-07,
"loss": 5.2315,
"step": 189
},
{
"epoch": 0.32492518170158186,
"grad_norm": 7.558001518249512,
"learning_rate": 6.819348298638839e-07,
"loss": 5.758,
"step": 190
},
{
"epoch": 0.3266353142368534,
"grad_norm": 6.658862590789795,
"learning_rate": 5.526064699265753e-07,
"loss": 5.1068,
"step": 191
},
{
"epoch": 0.32834544677212485,
"grad_norm": 7.025755882263184,
"learning_rate": 4.367965336512403e-07,
"loss": 5.3845,
"step": 192
},
{
"epoch": 0.3300555793073963,
"grad_norm": 7.665297508239746,
"learning_rate": 3.3453668231809286e-07,
"loss": 5.4417,
"step": 193
},
{
"epoch": 0.3317657118426678,
"grad_norm": 7.272705554962158,
"learning_rate": 2.458548727494292e-07,
"loss": 5.2698,
"step": 194
},
{
"epoch": 0.3334758443779393,
"grad_norm": 8.50095272064209,
"learning_rate": 1.7077534966650766e-07,
"loss": 5.3532,
"step": 195
},
{
"epoch": 0.3351859769132108,
"grad_norm": 7.427955627441406,
"learning_rate": 1.0931863906127327e-07,
"loss": 5.291,
"step": 196
},
{
"epoch": 0.33689610944848225,
"grad_norm": 8.0869779586792,
"learning_rate": 6.150154258476315e-08,
"loss": 5.5174,
"step": 197
},
{
"epoch": 0.3386062419837537,
"grad_norm": 9.528826713562012,
"learning_rate": 2.7337132953697554e-08,
"loss": 6.0544,
"step": 198
},
{
"epoch": 0.34031637451902524,
"grad_norm": 9.277633666992188,
"learning_rate": 6.834750376549792e-09,
"loss": 6.0752,
"step": 199
},
{
"epoch": 0.3420265070542967,
"grad_norm": 11.991414070129395,
"learning_rate": 0.0,
"loss": 6.2945,
"step": 200
},
{
"epoch": 0.3420265070542967,
"eval_loss": 1.2369053363800049,
"eval_runtime": 71.0035,
"eval_samples_per_second": 13.873,
"eval_steps_per_second": 3.479,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.863232968556544e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}