sam2ai's picture
Upload folder using huggingface_hub
e3f3b17 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 9.523809523809524,
"eval_steps": 500,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06349206349206349,
"grad_norm": 57.57447834029504,
"learning_rate": 6.000000000000001e-08,
"loss": 4.0982,
"step": 1
},
{
"epoch": 0.12698412698412698,
"grad_norm": 57.2163757354146,
"learning_rate": 1.2000000000000002e-07,
"loss": 3.8959,
"step": 2
},
{
"epoch": 0.19047619047619047,
"grad_norm": 54.21668090486469,
"learning_rate": 1.8e-07,
"loss": 4.0292,
"step": 3
},
{
"epoch": 0.25396825396825395,
"grad_norm": 58.33661945400434,
"learning_rate": 2.4000000000000003e-07,
"loss": 4.1284,
"step": 4
},
{
"epoch": 0.31746031746031744,
"grad_norm": 63.01395357482665,
"learning_rate": 3.0000000000000004e-07,
"loss": 4.188,
"step": 5
},
{
"epoch": 0.38095238095238093,
"grad_norm": 57.71828103888573,
"learning_rate": 3.6e-07,
"loss": 3.9705,
"step": 6
},
{
"epoch": 0.4444444444444444,
"grad_norm": 51.089655348018326,
"learning_rate": 4.2000000000000006e-07,
"loss": 3.8838,
"step": 7
},
{
"epoch": 0.5079365079365079,
"grad_norm": 58.15717323097846,
"learning_rate": 4.800000000000001e-07,
"loss": 4.1963,
"step": 8
},
{
"epoch": 0.5714285714285714,
"grad_norm": 56.91779340921828,
"learning_rate": 5.4e-07,
"loss": 3.9283,
"step": 9
},
{
"epoch": 0.6349206349206349,
"grad_norm": 51.10490139053562,
"learning_rate": 6.000000000000001e-07,
"loss": 3.8356,
"step": 10
},
{
"epoch": 0.6984126984126984,
"grad_norm": 52.268827098443424,
"learning_rate": 6.6e-07,
"loss": 3.7757,
"step": 11
},
{
"epoch": 0.7619047619047619,
"grad_norm": 51.80062279098391,
"learning_rate": 7.2e-07,
"loss": 3.9636,
"step": 12
},
{
"epoch": 0.8253968253968254,
"grad_norm": 44.40217995730599,
"learning_rate": 7.8e-07,
"loss": 3.6742,
"step": 13
},
{
"epoch": 0.8888888888888888,
"grad_norm": 44.82724671161504,
"learning_rate": 8.400000000000001e-07,
"loss": 3.768,
"step": 14
},
{
"epoch": 0.9523809523809523,
"grad_norm": 42.24923860328258,
"learning_rate": 9e-07,
"loss": 3.8381,
"step": 15
},
{
"epoch": 1.0158730158730158,
"grad_norm": 42.90764521769713,
"learning_rate": 9.600000000000001e-07,
"loss": 3.7314,
"step": 16
},
{
"epoch": 1.0793650793650793,
"grad_norm": 45.24513919191596,
"learning_rate": 1.0200000000000002e-06,
"loss": 3.8718,
"step": 17
},
{
"epoch": 1.1428571428571428,
"grad_norm": 31.63482178736033,
"learning_rate": 1.08e-06,
"loss": 3.3766,
"step": 18
},
{
"epoch": 1.2063492063492063,
"grad_norm": 32.21560942157832,
"learning_rate": 1.14e-06,
"loss": 3.3699,
"step": 19
},
{
"epoch": 1.2698412698412698,
"grad_norm": 32.952289793686546,
"learning_rate": 1.2000000000000002e-06,
"loss": 3.4645,
"step": 20
},
{
"epoch": 1.3333333333333333,
"grad_norm": 29.56293796088727,
"learning_rate": 1.26e-06,
"loss": 3.2721,
"step": 21
},
{
"epoch": 1.3968253968253967,
"grad_norm": 28.09762343844428,
"learning_rate": 1.32e-06,
"loss": 3.1558,
"step": 22
},
{
"epoch": 1.4603174603174602,
"grad_norm": 27.621733664600917,
"learning_rate": 1.3800000000000001e-06,
"loss": 3.2164,
"step": 23
},
{
"epoch": 1.5238095238095237,
"grad_norm": 18.800357074717578,
"learning_rate": 1.44e-06,
"loss": 3.0664,
"step": 24
},
{
"epoch": 1.5873015873015874,
"grad_norm": 13.800920246917714,
"learning_rate": 1.5e-06,
"loss": 2.7536,
"step": 25
},
{
"epoch": 1.6507936507936507,
"grad_norm": 13.61642291684508,
"learning_rate": 1.56e-06,
"loss": 2.4849,
"step": 26
},
{
"epoch": 1.7142857142857144,
"grad_norm": 13.43480789920014,
"learning_rate": 1.6200000000000002e-06,
"loss": 2.6918,
"step": 27
},
{
"epoch": 1.7777777777777777,
"grad_norm": 14.095592842222162,
"learning_rate": 1.6800000000000002e-06,
"loss": 2.6273,
"step": 28
},
{
"epoch": 1.8412698412698414,
"grad_norm": 12.620058865170318,
"learning_rate": 1.7399999999999999e-06,
"loss": 2.5829,
"step": 29
},
{
"epoch": 1.9047619047619047,
"grad_norm": 9.873668620990621,
"learning_rate": 1.8e-06,
"loss": 2.6698,
"step": 30
},
{
"epoch": 1.9682539682539684,
"grad_norm": 10.645995976559039,
"learning_rate": 1.86e-06,
"loss": 2.7849,
"step": 31
},
{
"epoch": 2.0317460317460316,
"grad_norm": 10.770458724192382,
"learning_rate": 1.9200000000000003e-06,
"loss": 2.6262,
"step": 32
},
{
"epoch": 2.0952380952380953,
"grad_norm": 9.288284603719239,
"learning_rate": 1.98e-06,
"loss": 2.405,
"step": 33
},
{
"epoch": 2.1587301587301586,
"grad_norm": 11.040656078736276,
"learning_rate": 2.0400000000000004e-06,
"loss": 2.3917,
"step": 34
},
{
"epoch": 2.2222222222222223,
"grad_norm": 9.014452408383601,
"learning_rate": 2.1e-06,
"loss": 2.2996,
"step": 35
},
{
"epoch": 2.2857142857142856,
"grad_norm": 10.977152245763714,
"learning_rate": 2.16e-06,
"loss": 2.4617,
"step": 36
},
{
"epoch": 2.3492063492063493,
"grad_norm": 11.961573908046798,
"learning_rate": 2.22e-06,
"loss": 2.4845,
"step": 37
},
{
"epoch": 2.4126984126984126,
"grad_norm": 12.506586582376126,
"learning_rate": 2.28e-06,
"loss": 2.2293,
"step": 38
},
{
"epoch": 2.4761904761904763,
"grad_norm": 15.888921935838896,
"learning_rate": 2.34e-06,
"loss": 2.2445,
"step": 39
},
{
"epoch": 2.5396825396825395,
"grad_norm": 17.724634829521865,
"learning_rate": 2.4000000000000003e-06,
"loss": 2.5632,
"step": 40
},
{
"epoch": 2.6031746031746033,
"grad_norm": 16.577971095615666,
"learning_rate": 2.4599999999999997e-06,
"loss": 1.9954,
"step": 41
},
{
"epoch": 2.6666666666666665,
"grad_norm": 20.04871359977933,
"learning_rate": 2.52e-06,
"loss": 1.8782,
"step": 42
},
{
"epoch": 2.7301587301587302,
"grad_norm": 16.171304753721177,
"learning_rate": 2.58e-06,
"loss": 1.9382,
"step": 43
},
{
"epoch": 2.7936507936507935,
"grad_norm": 19.412153353563298,
"learning_rate": 2.64e-06,
"loss": 1.752,
"step": 44
},
{
"epoch": 2.857142857142857,
"grad_norm": 17.206107408812862,
"learning_rate": 2.7e-06,
"loss": 1.9237,
"step": 45
},
{
"epoch": 2.9206349206349205,
"grad_norm": 20.99711783167552,
"learning_rate": 2.7600000000000003e-06,
"loss": 1.5944,
"step": 46
},
{
"epoch": 2.984126984126984,
"grad_norm": 17.220612862140708,
"learning_rate": 2.82e-06,
"loss": 1.6411,
"step": 47
},
{
"epoch": 3.0476190476190474,
"grad_norm": 26.644954169593106,
"learning_rate": 2.88e-06,
"loss": 1.4512,
"step": 48
},
{
"epoch": 3.111111111111111,
"grad_norm": 65.10523878873882,
"learning_rate": 2.9400000000000002e-06,
"loss": 1.6913,
"step": 49
},
{
"epoch": 3.1746031746031744,
"grad_norm": 27.458331836448174,
"learning_rate": 3e-06,
"loss": 1.6158,
"step": 50
},
{
"epoch": 3.238095238095238,
"grad_norm": 17.879988381877414,
"learning_rate": 2.9992598405485977e-06,
"loss": 1.6304,
"step": 51
},
{
"epoch": 3.3015873015873014,
"grad_norm": 20.512906161569692,
"learning_rate": 2.9970400926424076e-06,
"loss": 1.3412,
"step": 52
},
{
"epoch": 3.365079365079365,
"grad_norm": 28.162531827787188,
"learning_rate": 2.99334294690462e-06,
"loss": 1.308,
"step": 53
},
{
"epoch": 3.4285714285714284,
"grad_norm": 20.08257176199191,
"learning_rate": 2.988172051971717e-06,
"loss": 1.2513,
"step": 54
},
{
"epoch": 3.492063492063492,
"grad_norm": 22.419438866388326,
"learning_rate": 2.981532510892707e-06,
"loss": 1.3811,
"step": 55
},
{
"epoch": 3.5555555555555554,
"grad_norm": 18.961751404248563,
"learning_rate": 2.9734308760930334e-06,
"loss": 1.2914,
"step": 56
},
{
"epoch": 3.619047619047619,
"grad_norm": 37.34845292165996,
"learning_rate": 2.9638751429081213e-06,
"loss": 1.2351,
"step": 57
},
{
"epoch": 3.682539682539683,
"grad_norm": 14.747371601837061,
"learning_rate": 2.9528747416929465e-06,
"loss": 1.2103,
"step": 58
},
{
"epoch": 3.746031746031746,
"grad_norm": 17.04660819603052,
"learning_rate": 2.9404405285154148e-06,
"loss": 1.235,
"step": 59
},
{
"epoch": 3.8095238095238093,
"grad_norm": 19.420464839345144,
"learning_rate": 2.9265847744427307e-06,
"loss": 1.1033,
"step": 60
},
{
"epoch": 3.873015873015873,
"grad_norm": 35.5290042332465,
"learning_rate": 2.9113211534313383e-06,
"loss": 1.2773,
"step": 61
},
{
"epoch": 3.9365079365079367,
"grad_norm": 22.266882915667832,
"learning_rate": 2.894664728832377e-06,
"loss": 1.1921,
"step": 62
},
{
"epoch": 4.0,
"grad_norm": 15.491496380357695,
"learning_rate": 2.8766319385259716e-06,
"loss": 1.0918,
"step": 63
},
{
"epoch": 4.063492063492063,
"grad_norm": 14.926456699177113,
"learning_rate": 2.8572405786990296e-06,
"loss": 0.9075,
"step": 64
},
{
"epoch": 4.1269841269841265,
"grad_norm": 13.661102423305078,
"learning_rate": 2.8365097862825516e-06,
"loss": 1.0097,
"step": 65
},
{
"epoch": 4.190476190476191,
"grad_norm": 13.631384875740038,
"learning_rate": 2.814460020065795e-06,
"loss": 0.9625,
"step": 66
},
{
"epoch": 4.253968253968254,
"grad_norm": 14.239925114199261,
"learning_rate": 2.7911130405059155e-06,
"loss": 0.9898,
"step": 67
},
{
"epoch": 4.317460317460317,
"grad_norm": 12.77560062730983,
"learning_rate": 2.7664918882530226e-06,
"loss": 0.978,
"step": 68
},
{
"epoch": 4.380952380952381,
"grad_norm": 14.003703335394645,
"learning_rate": 2.7406208614118425e-06,
"loss": 0.988,
"step": 69
},
{
"epoch": 4.444444444444445,
"grad_norm": 13.375052129651001,
"learning_rate": 2.713525491562421e-06,
"loss": 0.8734,
"step": 70
},
{
"epoch": 4.507936507936508,
"grad_norm": 12.679615580914351,
"learning_rate": 2.685232518563536e-06,
"loss": 0.7357,
"step": 71
},
{
"epoch": 4.571428571428571,
"grad_norm": 12.990007533232859,
"learning_rate": 2.655769864163684e-06,
"loss": 0.8909,
"step": 72
},
{
"epoch": 4.634920634920634,
"grad_norm": 12.221083897238143,
"learning_rate": 2.6251666044456895e-06,
"loss": 0.78,
"step": 73
},
{
"epoch": 4.698412698412699,
"grad_norm": 15.252264298495929,
"learning_rate": 2.5934529411321173e-06,
"loss": 0.6989,
"step": 74
},
{
"epoch": 4.761904761904762,
"grad_norm": 13.687902392472823,
"learning_rate": 2.5606601717798212e-06,
"loss": 0.9173,
"step": 75
},
{
"epoch": 4.825396825396825,
"grad_norm": 16.092259560486735,
"learning_rate": 2.526820658893033e-06,
"loss": 0.8555,
"step": 76
},
{
"epoch": 4.888888888888889,
"grad_norm": 11.153841319781899,
"learning_rate": 2.491967797985478e-06,
"loss": 0.768,
"step": 77
},
{
"epoch": 4.9523809523809526,
"grad_norm": 16.24314620878438,
"learning_rate": 2.456135984623035e-06,
"loss": 0.6937,
"step": 78
},
{
"epoch": 5.015873015873016,
"grad_norm": 12.98652461918944,
"learning_rate": 2.419360580479465e-06,
"loss": 0.8355,
"step": 79
},
{
"epoch": 5.079365079365079,
"grad_norm": 8.727731435832583,
"learning_rate": 2.3816778784387097e-06,
"loss": 0.6985,
"step": 80
},
{
"epoch": 5.142857142857143,
"grad_norm": 11.299395468739695,
"learning_rate": 2.343125066778196e-06,
"loss": 0.6097,
"step": 81
},
{
"epoch": 5.2063492063492065,
"grad_norm": 10.89035476736735,
"learning_rate": 2.303740192468495e-06,
"loss": 0.8029,
"step": 82
},
{
"epoch": 5.26984126984127,
"grad_norm": 8.148154849352979,
"learning_rate": 2.263562123625557e-06,
"loss": 0.5835,
"step": 83
},
{
"epoch": 5.333333333333333,
"grad_norm": 12.49282469909081,
"learning_rate": 2.222630511152573e-06,
"loss": 0.628,
"step": 84
},
{
"epoch": 5.396825396825397,
"grad_norm": 8.307586947822,
"learning_rate": 2.18098574960932e-06,
"loss": 0.7011,
"step": 85
},
{
"epoch": 5.4603174603174605,
"grad_norm": 8.270887155947076,
"learning_rate": 2.138668937347609e-06,
"loss": 0.5918,
"step": 86
},
{
"epoch": 5.523809523809524,
"grad_norm": 12.098343605915145,
"learning_rate": 2.0957218359521707e-06,
"loss": 0.4842,
"step": 87
},
{
"epoch": 5.587301587301587,
"grad_norm": 10.793266890734438,
"learning_rate": 2.0521868290270174e-06,
"loss": 0.7194,
"step": 88
},
{
"epoch": 5.650793650793651,
"grad_norm": 9.811217896188793,
"learning_rate": 2.0081068803679374e-06,
"loss": 0.5915,
"step": 89
},
{
"epoch": 5.714285714285714,
"grad_norm": 17.005853804161248,
"learning_rate": 1.963525491562421e-06,
"loss": 0.6159,
"step": 90
},
{
"epoch": 5.777777777777778,
"grad_norm": 11.50337527021372,
"learning_rate": 1.918486659058844e-06,
"loss": 0.6515,
"step": 91
},
{
"epoch": 5.841269841269841,
"grad_norm": 11.271416372518264,
"learning_rate": 1.8730348307472826e-06,
"loss": 0.4101,
"step": 92
},
{
"epoch": 5.904761904761905,
"grad_norm": 9.732208459925952,
"learning_rate": 1.827214862094814e-06,
"loss": 0.5937,
"step": 93
},
{
"epoch": 5.968253968253968,
"grad_norm": 9.882839220456333,
"learning_rate": 1.7810719718785873e-06,
"loss": 0.5382,
"step": 94
},
{
"epoch": 6.031746031746032,
"grad_norm": 10.962287605399686,
"learning_rate": 1.7346516975603465e-06,
"loss": 0.4762,
"step": 95
},
{
"epoch": 6.095238095238095,
"grad_norm": 8.430198784148699,
"learning_rate": 1.6879998503464564e-06,
"loss": 0.5713,
"step": 96
},
{
"epoch": 6.158730158730159,
"grad_norm": 10.894222790901619,
"learning_rate": 1.6411624699777718e-06,
"loss": 0.5565,
"step": 97
},
{
"epoch": 6.222222222222222,
"grad_norm": 12.878403484815633,
"learning_rate": 1.5941857792939703e-06,
"loss": 0.5058,
"step": 98
},
{
"epoch": 6.285714285714286,
"grad_norm": 6.577107386592493,
"learning_rate": 1.5471161386171925e-06,
"loss": 0.4028,
"step": 99
},
{
"epoch": 6.349206349206349,
"grad_norm": 8.332341230736613,
"learning_rate": 1.5e-06,
"loss": 0.4107,
"step": 100
},
{
"epoch": 6.412698412698413,
"grad_norm": 10.897392576231315,
"learning_rate": 1.4528838613828075e-06,
"loss": 0.4851,
"step": 101
},
{
"epoch": 6.476190476190476,
"grad_norm": 10.09022209906221,
"learning_rate": 1.40581422070603e-06,
"loss": 0.4957,
"step": 102
},
{
"epoch": 6.5396825396825395,
"grad_norm": 7.353237258425595,
"learning_rate": 1.3588375300222285e-06,
"loss": 0.4534,
"step": 103
},
{
"epoch": 6.603174603174603,
"grad_norm": 9.004695390709724,
"learning_rate": 1.3120001496535434e-06,
"loss": 0.4615,
"step": 104
},
{
"epoch": 6.666666666666667,
"grad_norm": 8.971252585562915,
"learning_rate": 1.2653483024396534e-06,
"loss": 0.3708,
"step": 105
},
{
"epoch": 6.73015873015873,
"grad_norm": 7.803315873016153,
"learning_rate": 1.2189280281214128e-06,
"loss": 0.3618,
"step": 106
},
{
"epoch": 6.7936507936507935,
"grad_norm": 10.1946500046286,
"learning_rate": 1.1727851379051866e-06,
"loss": 0.5127,
"step": 107
},
{
"epoch": 6.857142857142857,
"grad_norm": 8.988718449624637,
"learning_rate": 1.1269651692527181e-06,
"loss": 0.3902,
"step": 108
},
{
"epoch": 6.920634920634921,
"grad_norm": 7.493496463903085,
"learning_rate": 1.0815133409411564e-06,
"loss": 0.4069,
"step": 109
},
{
"epoch": 6.984126984126984,
"grad_norm": 8.348873531323628,
"learning_rate": 1.036474508437579e-06,
"loss": 0.3469,
"step": 110
},
{
"epoch": 7.0476190476190474,
"grad_norm": 9.038998481408308,
"learning_rate": 9.918931196320629e-07,
"loss": 0.3247,
"step": 111
},
{
"epoch": 7.111111111111111,
"grad_norm": 7.844047940277719,
"learning_rate": 9.478131709729831e-07,
"loss": 0.3509,
"step": 112
},
{
"epoch": 7.174603174603175,
"grad_norm": 7.425090910067537,
"learning_rate": 9.042781640478293e-07,
"loss": 0.3648,
"step": 113
},
{
"epoch": 7.238095238095238,
"grad_norm": 12.359869006545956,
"learning_rate": 8.613310626523911e-07,
"loss": 0.2861,
"step": 114
},
{
"epoch": 7.301587301587301,
"grad_norm": 11.80335907456322,
"learning_rate": 8.190142503906799e-07,
"loss": 0.4778,
"step": 115
},
{
"epoch": 7.365079365079365,
"grad_norm": 12.471156069815597,
"learning_rate": 7.773694888474268e-07,
"loss": 0.3658,
"step": 116
},
{
"epoch": 7.428571428571429,
"grad_norm": 9.243334578032606,
"learning_rate": 7.36437876374443e-07,
"loss": 0.3291,
"step": 117
},
{
"epoch": 7.492063492063492,
"grad_norm": 8.40873822616543,
"learning_rate": 6.962598075315047e-07,
"loss": 0.2889,
"step": 118
},
{
"epoch": 7.555555555555555,
"grad_norm": 10.437791126518906,
"learning_rate": 6.568749332218045e-07,
"loss": 0.4034,
"step": 119
},
{
"epoch": 7.619047619047619,
"grad_norm": 8.296256384898921,
"learning_rate": 6.183221215612905e-07,
"loss": 0.3604,
"step": 120
},
{
"epoch": 7.682539682539683,
"grad_norm": 8.052915673295223,
"learning_rate": 5.806394195205356e-07,
"loss": 0.3938,
"step": 121
},
{
"epoch": 7.746031746031746,
"grad_norm": 5.792626137273427,
"learning_rate": 5.438640153769653e-07,
"loss": 0.3031,
"step": 122
},
{
"epoch": 7.809523809523809,
"grad_norm": 7.495104357983482,
"learning_rate": 5.080322020145225e-07,
"loss": 0.3826,
"step": 123
},
{
"epoch": 7.8730158730158735,
"grad_norm": 8.81251429634198,
"learning_rate": 4.731793411069669e-07,
"loss": 0.292,
"step": 124
},
{
"epoch": 7.936507936507937,
"grad_norm": 8.034376369007889,
"learning_rate": 4.3933982822017883e-07,
"loss": 0.4526,
"step": 125
},
{
"epoch": 8.0,
"grad_norm": 6.333920405739056,
"learning_rate": 4.06547058867883e-07,
"loss": 0.3632,
"step": 126
},
{
"epoch": 8.063492063492063,
"grad_norm": 6.19175131287288,
"learning_rate": 3.748333955543106e-07,
"loss": 0.2562,
"step": 127
},
{
"epoch": 8.126984126984127,
"grad_norm": 6.481701827503514,
"learning_rate": 3.442301358363163e-07,
"loss": 0.237,
"step": 128
},
{
"epoch": 8.19047619047619,
"grad_norm": 6.479683967653911,
"learning_rate": 3.147674814364644e-07,
"loss": 0.2607,
"step": 129
},
{
"epoch": 8.253968253968253,
"grad_norm": 5.676011576423432,
"learning_rate": 2.86474508437579e-07,
"loss": 0.2384,
"step": 130
},
{
"epoch": 8.317460317460318,
"grad_norm": 5.757077383945261,
"learning_rate": 2.593791385881571e-07,
"loss": 0.3726,
"step": 131
},
{
"epoch": 8.380952380952381,
"grad_norm": 6.871067257863073,
"learning_rate": 2.3350811174697772e-07,
"loss": 0.3818,
"step": 132
},
{
"epoch": 8.444444444444445,
"grad_norm": 7.268914398833981,
"learning_rate": 2.0888695949408471e-07,
"loss": 0.4703,
"step": 133
},
{
"epoch": 8.507936507936508,
"grad_norm": 7.1532015341319575,
"learning_rate": 1.8553997993420495e-07,
"loss": 0.357,
"step": 134
},
{
"epoch": 8.571428571428571,
"grad_norm": 6.681075937364691,
"learning_rate": 1.634902137174483e-07,
"loss": 0.3128,
"step": 135
},
{
"epoch": 8.634920634920634,
"grad_norm": 7.434604156502297,
"learning_rate": 1.4275942130097098e-07,
"loss": 0.3583,
"step": 136
},
{
"epoch": 8.698412698412698,
"grad_norm": 6.334177324650447,
"learning_rate": 1.2336806147402828e-07,
"loss": 0.3273,
"step": 137
},
{
"epoch": 8.761904761904763,
"grad_norm": 6.460288148240497,
"learning_rate": 1.0533527116762298e-07,
"loss": 0.298,
"step": 138
},
{
"epoch": 8.825396825396826,
"grad_norm": 6.793693864896331,
"learning_rate": 8.867884656866182e-08,
"loss": 0.4737,
"step": 139
},
{
"epoch": 8.88888888888889,
"grad_norm": 6.087635250584677,
"learning_rate": 7.341522555726971e-08,
"loss": 0.2492,
"step": 140
},
{
"epoch": 8.952380952380953,
"grad_norm": 5.958472793551493,
"learning_rate": 5.9559471484585404e-08,
"loss": 0.2531,
"step": 141
},
{
"epoch": 9.015873015873016,
"grad_norm": 6.181482193201246,
"learning_rate": 4.712525830705339e-08,
"loss": 0.2074,
"step": 142
},
{
"epoch": 9.079365079365079,
"grad_norm": 4.95043049803299,
"learning_rate": 3.6124857091878847e-08,
"loss": 0.1696,
"step": 143
},
{
"epoch": 9.142857142857142,
"grad_norm": 5.624633357663993,
"learning_rate": 2.6569123906967087e-08,
"loss": 0.3333,
"step": 144
},
{
"epoch": 9.206349206349206,
"grad_norm": 6.112000874321065,
"learning_rate": 1.846748910729351e-08,
"loss": 0.4033,
"step": 145
},
{
"epoch": 9.26984126984127,
"grad_norm": 6.64513673651448,
"learning_rate": 1.1827948028283353e-08,
"loss": 0.3716,
"step": 146
},
{
"epoch": 9.333333333333334,
"grad_norm": 5.509458009190147,
"learning_rate": 6.657053095380006e-09,
"loss": 0.3588,
"step": 147
},
{
"epoch": 9.396825396825397,
"grad_norm": 5.1740170504268015,
"learning_rate": 2.9599073575926614e-09,
"loss": 0.2379,
"step": 148
},
{
"epoch": 9.46031746031746,
"grad_norm": 5.9471797504961,
"learning_rate": 7.401594514026e-10,
"loss": 0.2427,
"step": 149
},
{
"epoch": 9.523809523809524,
"grad_norm": 5.771578335180772,
"learning_rate": 0.0,
"loss": 0.2553,
"step": 150
}
],
"logging_steps": 1,
"max_steps": 150,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 139054587576320.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}