alohia's picture
Upload folder using huggingface_hub
d8a3609 verified
{
"best_metric": 0.021347366273403168,
"best_model_checkpoint": "autotrain-78oj2-jlrbv/checkpoint-3639",
"epoch": 2.99938182567484,
"eval_steps": 500,
"global_step": 3639,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0206058108386565,
"grad_norm": 5.651961326599121,
"learning_rate": 3.021978021978022e-06,
"loss": 0.9893,
"step": 25
},
{
"epoch": 0.041211621677313,
"grad_norm": 3.856781005859375,
"learning_rate": 6.456043956043957e-06,
"loss": 0.8203,
"step": 50
},
{
"epoch": 0.0618174325159695,
"grad_norm": 3.5703439712524414,
"learning_rate": 9.89010989010989e-06,
"loss": 0.608,
"step": 75
},
{
"epoch": 0.082423243354626,
"grad_norm": 6.108644485473633,
"learning_rate": 1.3324175824175824e-05,
"loss": 0.6063,
"step": 100
},
{
"epoch": 0.10302905419328251,
"grad_norm": 3.141345739364624,
"learning_rate": 1.6758241758241757e-05,
"loss": 0.5388,
"step": 125
},
{
"epoch": 0.123634865031939,
"grad_norm": 4.041093349456787,
"learning_rate": 2.0192307692307694e-05,
"loss": 0.4164,
"step": 150
},
{
"epoch": 0.1442406758705955,
"grad_norm": 5.099579334259033,
"learning_rate": 2.348901098901099e-05,
"loss": 0.3599,
"step": 175
},
{
"epoch": 0.164846486709252,
"grad_norm": 2.3562018871307373,
"learning_rate": 2.6785714285714288e-05,
"loss": 0.3026,
"step": 200
},
{
"epoch": 0.1854522975479085,
"grad_norm": 1.3748841285705566,
"learning_rate": 3.021978021978022e-05,
"loss": 0.1953,
"step": 225
},
{
"epoch": 0.20605810838656502,
"grad_norm": 5.422688961029053,
"learning_rate": 3.365384615384616e-05,
"loss": 0.2107,
"step": 250
},
{
"epoch": 0.2266639192252215,
"grad_norm": 3.1931090354919434,
"learning_rate": 3.708791208791209e-05,
"loss": 0.1905,
"step": 275
},
{
"epoch": 0.247269730063878,
"grad_norm": 7.671177387237549,
"learning_rate": 4.052197802197803e-05,
"loss": 0.1454,
"step": 300
},
{
"epoch": 0.2678755409025345,
"grad_norm": 5.701260089874268,
"learning_rate": 4.3956043956043955e-05,
"loss": 0.1193,
"step": 325
},
{
"epoch": 0.288481351741191,
"grad_norm": 4.128372669219971,
"learning_rate": 4.739010989010989e-05,
"loss": 0.1242,
"step": 350
},
{
"epoch": 0.30908716257984753,
"grad_norm": 2.761212110519409,
"learning_rate": 4.990839694656489e-05,
"loss": 0.137,
"step": 375
},
{
"epoch": 0.329692973418504,
"grad_norm": 5.561426162719727,
"learning_rate": 4.952671755725191e-05,
"loss": 0.1326,
"step": 400
},
{
"epoch": 0.35029878425716054,
"grad_norm": 0.6887926459312439,
"learning_rate": 4.9145038167938934e-05,
"loss": 0.1149,
"step": 425
},
{
"epoch": 0.370904595095817,
"grad_norm": 3.1560258865356445,
"learning_rate": 4.8763358778625956e-05,
"loss": 0.1091,
"step": 450
},
{
"epoch": 0.3915104059344735,
"grad_norm": 3.2499308586120605,
"learning_rate": 4.838167938931298e-05,
"loss": 0.1192,
"step": 475
},
{
"epoch": 0.41211621677313004,
"grad_norm": 1.6396406888961792,
"learning_rate": 4.8e-05,
"loss": 0.098,
"step": 500
},
{
"epoch": 0.4327220276117865,
"grad_norm": 1.4734318256378174,
"learning_rate": 4.7618320610687024e-05,
"loss": 0.1064,
"step": 525
},
{
"epoch": 0.453327838450443,
"grad_norm": 2.2481157779693604,
"learning_rate": 4.723664122137405e-05,
"loss": 0.1099,
"step": 550
},
{
"epoch": 0.47393364928909953,
"grad_norm": 2.839649200439453,
"learning_rate": 4.6854961832061075e-05,
"loss": 0.0999,
"step": 575
},
{
"epoch": 0.494539460127756,
"grad_norm": 0.30553099513053894,
"learning_rate": 4.64732824427481e-05,
"loss": 0.0992,
"step": 600
},
{
"epoch": 0.5151452709664125,
"grad_norm": 0.589301347732544,
"learning_rate": 4.609160305343512e-05,
"loss": 0.0735,
"step": 625
},
{
"epoch": 0.535751081805069,
"grad_norm": 2.1147384643554688,
"learning_rate": 4.570992366412214e-05,
"loss": 0.0901,
"step": 650
},
{
"epoch": 0.5563568926437256,
"grad_norm": 0.805992841720581,
"learning_rate": 4.5328244274809165e-05,
"loss": 0.0747,
"step": 675
},
{
"epoch": 0.576962703482382,
"grad_norm": 1.168734073638916,
"learning_rate": 4.494656488549619e-05,
"loss": 0.067,
"step": 700
},
{
"epoch": 0.5975685143210385,
"grad_norm": 5.0603790283203125,
"learning_rate": 4.45648854961832e-05,
"loss": 0.0677,
"step": 725
},
{
"epoch": 0.6181743251596951,
"grad_norm": 5.00893497467041,
"learning_rate": 4.4183206106870225e-05,
"loss": 0.0538,
"step": 750
},
{
"epoch": 0.6387801359983515,
"grad_norm": 0.6863914132118225,
"learning_rate": 4.3801526717557255e-05,
"loss": 0.0596,
"step": 775
},
{
"epoch": 0.659385946837008,
"grad_norm": 3.027167797088623,
"learning_rate": 4.341984732824428e-05,
"loss": 0.0476,
"step": 800
},
{
"epoch": 0.6799917576756646,
"grad_norm": 3.794942617416382,
"learning_rate": 4.30381679389313e-05,
"loss": 0.0922,
"step": 825
},
{
"epoch": 0.7005975685143211,
"grad_norm": 1.6739851236343384,
"learning_rate": 4.265648854961832e-05,
"loss": 0.0601,
"step": 850
},
{
"epoch": 0.7212033793529775,
"grad_norm": 4.5667195320129395,
"learning_rate": 4.2274809160305344e-05,
"loss": 0.0606,
"step": 875
},
{
"epoch": 0.741809190191634,
"grad_norm": 1.4475433826446533,
"learning_rate": 4.189312977099237e-05,
"loss": 0.0488,
"step": 900
},
{
"epoch": 0.7624150010302906,
"grad_norm": 3.672123908996582,
"learning_rate": 4.151145038167939e-05,
"loss": 0.066,
"step": 925
},
{
"epoch": 0.783020811868947,
"grad_norm": 0.7629732489585876,
"learning_rate": 4.112977099236641e-05,
"loss": 0.0533,
"step": 950
},
{
"epoch": 0.8036266227076035,
"grad_norm": 3.216966390609741,
"learning_rate": 4.0748091603053434e-05,
"loss": 0.0524,
"step": 975
},
{
"epoch": 0.8242324335462601,
"grad_norm": 1.6424092054367065,
"learning_rate": 4.036641221374046e-05,
"loss": 0.0449,
"step": 1000
},
{
"epoch": 0.8448382443849165,
"grad_norm": 3.3341548442840576,
"learning_rate": 3.9984732824427486e-05,
"loss": 0.0595,
"step": 1025
},
{
"epoch": 0.865444055223573,
"grad_norm": 0.1322302669286728,
"learning_rate": 3.960305343511451e-05,
"loss": 0.0717,
"step": 1050
},
{
"epoch": 0.8860498660622296,
"grad_norm": 5.5867414474487305,
"learning_rate": 3.922137404580153e-05,
"loss": 0.0457,
"step": 1075
},
{
"epoch": 0.906655676900886,
"grad_norm": 1.4986252784729004,
"learning_rate": 3.883969465648855e-05,
"loss": 0.0395,
"step": 1100
},
{
"epoch": 0.9272614877395425,
"grad_norm": 0.7124060392379761,
"learning_rate": 3.8458015267175575e-05,
"loss": 0.0463,
"step": 1125
},
{
"epoch": 0.9478672985781991,
"grad_norm": 1.1682511568069458,
"learning_rate": 3.80763358778626e-05,
"loss": 0.0396,
"step": 1150
},
{
"epoch": 0.9684731094168556,
"grad_norm": 4.690053939819336,
"learning_rate": 3.770992366412214e-05,
"loss": 0.0428,
"step": 1175
},
{
"epoch": 0.989078920255512,
"grad_norm": 1.3264271020889282,
"learning_rate": 3.732824427480916e-05,
"loss": 0.06,
"step": 1200
},
{
"epoch": 0.9997939418916134,
"eval_accuracy": 0.9912411767736617,
"eval_f1_macro": 0.9877568131380815,
"eval_f1_micro": 0.9912411767736617,
"eval_f1_weighted": 0.9911910099696459,
"eval_loss": 0.03924937546253204,
"eval_precision_macro": 0.9939377252247231,
"eval_precision_micro": 0.9912411767736617,
"eval_precision_weighted": 0.9912837913018231,
"eval_recall_macro": 0.9818041226442024,
"eval_recall_micro": 0.9912411767736617,
"eval_recall_weighted": 0.9912411767736617,
"eval_runtime": 18.3314,
"eval_samples_per_second": 1058.782,
"eval_steps_per_second": 33.112,
"step": 1213
},
{
"epoch": 1.0096847310941686,
"grad_norm": 0.46682170033454895,
"learning_rate": 3.694656488549619e-05,
"loss": 0.0372,
"step": 1225
},
{
"epoch": 1.030290541932825,
"grad_norm": 3.3791985511779785,
"learning_rate": 3.656488549618321e-05,
"loss": 0.0439,
"step": 1250
},
{
"epoch": 1.0508963527714816,
"grad_norm": 3.6968090534210205,
"learning_rate": 3.6183206106870234e-05,
"loss": 0.0382,
"step": 1275
},
{
"epoch": 1.071502163610138,
"grad_norm": 2.349797487258911,
"learning_rate": 3.580152671755726e-05,
"loss": 0.0327,
"step": 1300
},
{
"epoch": 1.0921079744487945,
"grad_norm": 1.1067183017730713,
"learning_rate": 3.54351145038168e-05,
"loss": 0.0315,
"step": 1325
},
{
"epoch": 1.1127137852874511,
"grad_norm": 0.032840073108673096,
"learning_rate": 3.505343511450382e-05,
"loss": 0.0316,
"step": 1350
},
{
"epoch": 1.1333195961261076,
"grad_norm": 0.028824035078287125,
"learning_rate": 3.467175572519084e-05,
"loss": 0.0213,
"step": 1375
},
{
"epoch": 1.153925406964764,
"grad_norm": 0.5763944983482361,
"learning_rate": 3.4290076335877864e-05,
"loss": 0.026,
"step": 1400
},
{
"epoch": 1.1745312178034206,
"grad_norm": 0.08884817361831665,
"learning_rate": 3.3908396946564886e-05,
"loss": 0.025,
"step": 1425
},
{
"epoch": 1.195137028642077,
"grad_norm": 0.41536378860473633,
"learning_rate": 3.352671755725191e-05,
"loss": 0.0346,
"step": 1450
},
{
"epoch": 1.2157428394807335,
"grad_norm": 0.3112834393978119,
"learning_rate": 3.314503816793894e-05,
"loss": 0.0264,
"step": 1475
},
{
"epoch": 1.2363486503193901,
"grad_norm": 0.48356932401657104,
"learning_rate": 3.276335877862596e-05,
"loss": 0.0188,
"step": 1500
},
{
"epoch": 1.2569544611580465,
"grad_norm": 3.8283050060272217,
"learning_rate": 3.238167938931298e-05,
"loss": 0.038,
"step": 1525
},
{
"epoch": 1.2775602719967032,
"grad_norm": 0.06744606792926788,
"learning_rate": 3.2000000000000005e-05,
"loss": 0.0299,
"step": 1550
},
{
"epoch": 1.2981660828353596,
"grad_norm": 5.736328125,
"learning_rate": 3.161832061068703e-05,
"loss": 0.0243,
"step": 1575
},
{
"epoch": 1.318771893674016,
"grad_norm": 0.07336083799600601,
"learning_rate": 3.123664122137404e-05,
"loss": 0.0268,
"step": 1600
},
{
"epoch": 1.3393777045126725,
"grad_norm": 0.10824728012084961,
"learning_rate": 3.0854961832061066e-05,
"loss": 0.0277,
"step": 1625
},
{
"epoch": 1.359983515351329,
"grad_norm": 2.3970558643341064,
"learning_rate": 3.047328244274809e-05,
"loss": 0.0247,
"step": 1650
},
{
"epoch": 1.3805893261899855,
"grad_norm": 0.14330969750881195,
"learning_rate": 3.0091603053435114e-05,
"loss": 0.0292,
"step": 1675
},
{
"epoch": 1.4011951370286422,
"grad_norm": 0.5449164509773254,
"learning_rate": 2.970992366412214e-05,
"loss": 0.026,
"step": 1700
},
{
"epoch": 1.4218009478672986,
"grad_norm": 0.13645227253437042,
"learning_rate": 2.9328244274809162e-05,
"loss": 0.0337,
"step": 1725
},
{
"epoch": 1.442406758705955,
"grad_norm": 0.2142195850610733,
"learning_rate": 2.8946564885496185e-05,
"loss": 0.0148,
"step": 1750
},
{
"epoch": 1.4630125695446115,
"grad_norm": 0.053218819200992584,
"learning_rate": 2.8564885496183207e-05,
"loss": 0.0168,
"step": 1775
},
{
"epoch": 1.483618380383268,
"grad_norm": 6.44566011428833,
"learning_rate": 2.818320610687023e-05,
"loss": 0.0177,
"step": 1800
},
{
"epoch": 1.5042241912219247,
"grad_norm": 0.035187333822250366,
"learning_rate": 2.7801526717557252e-05,
"loss": 0.0277,
"step": 1825
},
{
"epoch": 1.5248300020605812,
"grad_norm": 0.042209599167108536,
"learning_rate": 2.7419847328244274e-05,
"loss": 0.0349,
"step": 1850
},
{
"epoch": 1.5454358128992376,
"grad_norm": 4.258108139038086,
"learning_rate": 2.7038167938931297e-05,
"loss": 0.0232,
"step": 1875
},
{
"epoch": 1.566041623737894,
"grad_norm": 0.2930113673210144,
"learning_rate": 2.665648854961832e-05,
"loss": 0.0225,
"step": 1900
},
{
"epoch": 1.5866474345765504,
"grad_norm": 4.373762607574463,
"learning_rate": 2.627480916030535e-05,
"loss": 0.0216,
"step": 1925
},
{
"epoch": 1.607253245415207,
"grad_norm": 1.3070122003555298,
"learning_rate": 2.589312977099237e-05,
"loss": 0.0118,
"step": 1950
},
{
"epoch": 1.6278590562538637,
"grad_norm": 1.7738819122314453,
"learning_rate": 2.5511450381679393e-05,
"loss": 0.024,
"step": 1975
},
{
"epoch": 1.6484648670925202,
"grad_norm": 0.027883194386959076,
"learning_rate": 2.5129770992366412e-05,
"loss": 0.0186,
"step": 2000
},
{
"epoch": 1.6690706779311766,
"grad_norm": 0.030870651826262474,
"learning_rate": 2.4748091603053435e-05,
"loss": 0.0174,
"step": 2025
},
{
"epoch": 1.689676488769833,
"grad_norm": 0.11152543127536774,
"learning_rate": 2.4366412213740457e-05,
"loss": 0.0225,
"step": 2050
},
{
"epoch": 1.7102822996084894,
"grad_norm": 0.09863735735416412,
"learning_rate": 2.3984732824427483e-05,
"loss": 0.0154,
"step": 2075
},
{
"epoch": 1.730888110447146,
"grad_norm": 0.03743008151650429,
"learning_rate": 2.3603053435114505e-05,
"loss": 0.0145,
"step": 2100
},
{
"epoch": 1.7514939212858027,
"grad_norm": 0.037079595029354095,
"learning_rate": 2.3221374045801528e-05,
"loss": 0.0228,
"step": 2125
},
{
"epoch": 1.7720997321244591,
"grad_norm": 0.02248048223555088,
"learning_rate": 2.283969465648855e-05,
"loss": 0.0302,
"step": 2150
},
{
"epoch": 1.7927055429631156,
"grad_norm": 0.5228779911994934,
"learning_rate": 2.2458015267175573e-05,
"loss": 0.0304,
"step": 2175
},
{
"epoch": 1.813311353801772,
"grad_norm": 0.07086365669965744,
"learning_rate": 2.20763358778626e-05,
"loss": 0.0224,
"step": 2200
},
{
"epoch": 1.8339171646404286,
"grad_norm": 0.02493666112422943,
"learning_rate": 2.169465648854962e-05,
"loss": 0.0135,
"step": 2225
},
{
"epoch": 1.854522975479085,
"grad_norm": 1.703371286392212,
"learning_rate": 2.1312977099236643e-05,
"loss": 0.0213,
"step": 2250
},
{
"epoch": 1.8751287863177417,
"grad_norm": 2.37048602104187,
"learning_rate": 2.0931297709923666e-05,
"loss": 0.0245,
"step": 2275
},
{
"epoch": 1.8957345971563981,
"grad_norm": 1.9404609203338623,
"learning_rate": 2.0549618320610688e-05,
"loss": 0.0249,
"step": 2300
},
{
"epoch": 1.9163404079950546,
"grad_norm": 2.3614635467529297,
"learning_rate": 2.016793893129771e-05,
"loss": 0.0155,
"step": 2325
},
{
"epoch": 1.936946218833711,
"grad_norm": 0.010869844816625118,
"learning_rate": 1.9786259541984733e-05,
"loss": 0.011,
"step": 2350
},
{
"epoch": 1.9575520296723676,
"grad_norm": 3.8267769813537598,
"learning_rate": 1.9404580152671755e-05,
"loss": 0.0194,
"step": 2375
},
{
"epoch": 1.978157840511024,
"grad_norm": 1.0191699266433716,
"learning_rate": 1.902290076335878e-05,
"loss": 0.024,
"step": 2400
},
{
"epoch": 1.9987636513496807,
"grad_norm": 2.944854497909546,
"learning_rate": 1.8641221374045803e-05,
"loss": 0.0157,
"step": 2425
},
{
"epoch": 1.9995878837832268,
"eval_accuracy": 0.9954144984285641,
"eval_f1_macro": 0.9934889743246283,
"eval_f1_micro": 0.9954144984285641,
"eval_f1_weighted": 0.9954058869538126,
"eval_loss": 0.02238026075065136,
"eval_precision_macro": 0.9952598218686433,
"eval_precision_micro": 0.9954144984285641,
"eval_precision_weighted": 0.9954125216770092,
"eval_recall_macro": 0.9917415518445051,
"eval_recall_micro": 0.9954144984285641,
"eval_recall_weighted": 0.9954144984285641,
"eval_runtime": 18.0784,
"eval_samples_per_second": 1073.599,
"eval_steps_per_second": 33.576,
"step": 2426
},
{
"epoch": 2.019369462188337,
"grad_norm": 0.0203185323625803,
"learning_rate": 1.8259541984732826e-05,
"loss": 0.0163,
"step": 2450
},
{
"epoch": 2.0399752730269936,
"grad_norm": 0.050282154232263565,
"learning_rate": 1.787786259541985e-05,
"loss": 0.023,
"step": 2475
},
{
"epoch": 2.06058108386565,
"grad_norm": 1.1421475410461426,
"learning_rate": 1.749618320610687e-05,
"loss": 0.0076,
"step": 2500
},
{
"epoch": 2.0811868947043064,
"grad_norm": 1.4307163953781128,
"learning_rate": 1.712977099236641e-05,
"loss": 0.0237,
"step": 2525
},
{
"epoch": 2.1017927055429633,
"grad_norm": 1.7668225765228271,
"learning_rate": 1.6748091603053437e-05,
"loss": 0.0097,
"step": 2550
},
{
"epoch": 2.1223985163816197,
"grad_norm": 0.43096867203712463,
"learning_rate": 1.636641221374046e-05,
"loss": 0.0164,
"step": 2575
},
{
"epoch": 2.143004327220276,
"grad_norm": 0.009999148547649384,
"learning_rate": 1.598473282442748e-05,
"loss": 0.005,
"step": 2600
},
{
"epoch": 2.1636101380589325,
"grad_norm": 0.015757331624627113,
"learning_rate": 1.5603053435114504e-05,
"loss": 0.0053,
"step": 2625
},
{
"epoch": 2.184215948897589,
"grad_norm": 0.014865142293274403,
"learning_rate": 1.5221374045801528e-05,
"loss": 0.0085,
"step": 2650
},
{
"epoch": 2.2048217597362454,
"grad_norm": 0.25190040469169617,
"learning_rate": 1.483969465648855e-05,
"loss": 0.0108,
"step": 2675
},
{
"epoch": 2.2254275705749023,
"grad_norm": 0.048391640186309814,
"learning_rate": 1.4458015267175573e-05,
"loss": 0.0015,
"step": 2700
},
{
"epoch": 2.2460333814135587,
"grad_norm": 0.03182575851678848,
"learning_rate": 1.4076335877862595e-05,
"loss": 0.006,
"step": 2725
},
{
"epoch": 2.266639192252215,
"grad_norm": 0.03702850267291069,
"learning_rate": 1.3694656488549621e-05,
"loss": 0.0042,
"step": 2750
},
{
"epoch": 2.2872450030908715,
"grad_norm": 0.010030820034444332,
"learning_rate": 1.3312977099236642e-05,
"loss": 0.0091,
"step": 2775
},
{
"epoch": 2.307850813929528,
"grad_norm": 0.42647820711135864,
"learning_rate": 1.2931297709923664e-05,
"loss": 0.019,
"step": 2800
},
{
"epoch": 2.328456624768185,
"grad_norm": 0.012364137917757034,
"learning_rate": 1.2549618320610687e-05,
"loss": 0.0052,
"step": 2825
},
{
"epoch": 2.3490624356068412,
"grad_norm": 0.8119286298751831,
"learning_rate": 1.216793893129771e-05,
"loss": 0.0058,
"step": 2850
},
{
"epoch": 2.3696682464454977,
"grad_norm": 0.03282434120774269,
"learning_rate": 1.1786259541984733e-05,
"loss": 0.0114,
"step": 2875
},
{
"epoch": 2.390274057284154,
"grad_norm": 0.03597622364759445,
"learning_rate": 1.1404580152671757e-05,
"loss": 0.0122,
"step": 2900
},
{
"epoch": 2.4108798681228105,
"grad_norm": 0.17078496515750885,
"learning_rate": 1.1022900763358778e-05,
"loss": 0.0111,
"step": 2925
},
{
"epoch": 2.431485678961467,
"grad_norm": 4.740245342254639,
"learning_rate": 1.0641221374045802e-05,
"loss": 0.0082,
"step": 2950
},
{
"epoch": 2.452091489800124,
"grad_norm": 0.05649268627166748,
"learning_rate": 1.0259541984732825e-05,
"loss": 0.0118,
"step": 2975
},
{
"epoch": 2.4726973006387802,
"grad_norm": 0.04967363923788071,
"learning_rate": 9.877862595419849e-06,
"loss": 0.0183,
"step": 3000
},
{
"epoch": 2.4933031114774367,
"grad_norm": 0.007585471961647272,
"learning_rate": 9.496183206106871e-06,
"loss": 0.0044,
"step": 3025
},
{
"epoch": 2.513908922316093,
"grad_norm": 0.020672131329774857,
"learning_rate": 9.114503816793894e-06,
"loss": 0.0043,
"step": 3050
},
{
"epoch": 2.5345147331547495,
"grad_norm": 0.005783187225461006,
"learning_rate": 8.732824427480916e-06,
"loss": 0.0049,
"step": 3075
},
{
"epoch": 2.5551205439934064,
"grad_norm": 0.0074052768759429455,
"learning_rate": 8.351145038167938e-06,
"loss": 0.0016,
"step": 3100
},
{
"epoch": 2.575726354832063,
"grad_norm": 0.03194005414843559,
"learning_rate": 7.969465648854962e-06,
"loss": 0.0039,
"step": 3125
},
{
"epoch": 2.5963321656707192,
"grad_norm": 0.0073048705235123634,
"learning_rate": 7.587786259541985e-06,
"loss": 0.0042,
"step": 3150
},
{
"epoch": 2.6169379765093757,
"grad_norm": 3.5076446533203125,
"learning_rate": 7.206106870229008e-06,
"loss": 0.0052,
"step": 3175
},
{
"epoch": 2.637543787348032,
"grad_norm": 0.004831337369978428,
"learning_rate": 6.8244274809160305e-06,
"loss": 0.0122,
"step": 3200
},
{
"epoch": 2.6581495981866885,
"grad_norm": 0.06588663160800934,
"learning_rate": 6.442748091603054e-06,
"loss": 0.0126,
"step": 3225
},
{
"epoch": 2.678755409025345,
"grad_norm": 0.11881962418556213,
"learning_rate": 6.061068702290076e-06,
"loss": 0.0109,
"step": 3250
},
{
"epoch": 2.699361219864002,
"grad_norm": 0.03703455999493599,
"learning_rate": 5.6793893129770995e-06,
"loss": 0.0063,
"step": 3275
},
{
"epoch": 2.719967030702658,
"grad_norm": 0.015223699621856213,
"learning_rate": 5.297709923664122e-06,
"loss": 0.0059,
"step": 3300
},
{
"epoch": 2.7405728415413146,
"grad_norm": 2.5166127681732178,
"learning_rate": 4.916030534351145e-06,
"loss": 0.0084,
"step": 3325
},
{
"epoch": 2.761178652379971,
"grad_norm": 0.007914497517049313,
"learning_rate": 4.5343511450381684e-06,
"loss": 0.0038,
"step": 3350
},
{
"epoch": 2.7817844632186275,
"grad_norm": 0.015695059671998024,
"learning_rate": 4.152671755725191e-06,
"loss": 0.012,
"step": 3375
},
{
"epoch": 2.8023902740572844,
"grad_norm": 0.012181021273136139,
"learning_rate": 3.770992366412214e-06,
"loss": 0.0205,
"step": 3400
},
{
"epoch": 2.822996084895941,
"grad_norm": 2.1182310581207275,
"learning_rate": 3.389312977099237e-06,
"loss": 0.0109,
"step": 3425
},
{
"epoch": 2.843601895734597,
"grad_norm": 0.04122795909643173,
"learning_rate": 3.0076335877862594e-06,
"loss": 0.0112,
"step": 3450
},
{
"epoch": 2.8642077065732536,
"grad_norm": 0.8893581032752991,
"learning_rate": 2.6259541984732826e-06,
"loss": 0.0062,
"step": 3475
},
{
"epoch": 2.88481351741191,
"grad_norm": 11.825778007507324,
"learning_rate": 2.2442748091603055e-06,
"loss": 0.0104,
"step": 3500
},
{
"epoch": 2.9054193282505665,
"grad_norm": 0.019555876031517982,
"learning_rate": 1.8625954198473283e-06,
"loss": 0.0095,
"step": 3525
},
{
"epoch": 2.926025139089223,
"grad_norm": 0.054326750338077545,
"learning_rate": 1.4809160305343512e-06,
"loss": 0.0117,
"step": 3550
},
{
"epoch": 2.9466309499278798,
"grad_norm": 0.01002267561852932,
"learning_rate": 1.099236641221374e-06,
"loss": 0.011,
"step": 3575
},
{
"epoch": 2.967236760766536,
"grad_norm": 0.13304032385349274,
"learning_rate": 7.175572519083969e-07,
"loss": 0.0057,
"step": 3600
},
{
"epoch": 2.9878425716051926,
"grad_norm": 0.027920976281166077,
"learning_rate": 3.3587786259541984e-07,
"loss": 0.0033,
"step": 3625
},
{
"epoch": 2.99938182567484,
"eval_accuracy": 0.9955175434076975,
"eval_f1_macro": 0.9936121048770471,
"eval_f1_micro": 0.9955175434076975,
"eval_f1_weighted": 0.9955108152465474,
"eval_loss": 0.021347366273403168,
"eval_precision_macro": 0.9946094746601329,
"eval_precision_micro": 0.9955175434076975,
"eval_precision_weighted": 0.9955142956474508,
"eval_recall_macro": 0.9926308276207328,
"eval_recall_micro": 0.9955175434076975,
"eval_recall_weighted": 0.9955175434076975,
"eval_runtime": 18.1429,
"eval_samples_per_second": 1069.787,
"eval_steps_per_second": 33.457,
"step": 3639
}
],
"logging_steps": 25,
"max_steps": 3639,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.01
},
"attributes": {
"early_stopping_patience_counter": 1
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.5317936953288704e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}