whisper-large-v3-ft-btb-cv-ca-cy / trainer_state.json
DewiBrynJones's picture
End of training
2254c3b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.034174125305126,
"eval_steps": 1000,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01017087062652563,
"grad_norm": 16.184080123901367,
"learning_rate": 4.800000000000001e-07,
"loss": 1.6215,
"step": 25
},
{
"epoch": 0.02034174125305126,
"grad_norm": 14.08956241607666,
"learning_rate": 9.800000000000001e-07,
"loss": 1.2721,
"step": 50
},
{
"epoch": 0.030512611879576892,
"grad_norm": 11.49959945678711,
"learning_rate": 1.48e-06,
"loss": 0.9884,
"step": 75
},
{
"epoch": 0.04068348250610252,
"grad_norm": 8.895913124084473,
"learning_rate": 1.98e-06,
"loss": 0.8726,
"step": 100
},
{
"epoch": 0.050854353132628156,
"grad_norm": 9.52476692199707,
"learning_rate": 2.4800000000000004e-06,
"loss": 0.8062,
"step": 125
},
{
"epoch": 0.061025223759153785,
"grad_norm": 9.508084297180176,
"learning_rate": 2.9800000000000003e-06,
"loss": 0.7792,
"step": 150
},
{
"epoch": 0.07119609438567942,
"grad_norm": 9.352806091308594,
"learning_rate": 3.48e-06,
"loss": 0.7541,
"step": 175
},
{
"epoch": 0.08136696501220504,
"grad_norm": 10.18264102935791,
"learning_rate": 3.980000000000001e-06,
"loss": 0.6983,
"step": 200
},
{
"epoch": 0.09153783563873068,
"grad_norm": 10.840176582336426,
"learning_rate": 4.48e-06,
"loss": 0.7416,
"step": 225
},
{
"epoch": 0.10170870626525631,
"grad_norm": 9.403505325317383,
"learning_rate": 4.980000000000001e-06,
"loss": 0.7154,
"step": 250
},
{
"epoch": 0.11187957689178193,
"grad_norm": 8.170647621154785,
"learning_rate": 5.480000000000001e-06,
"loss": 0.6564,
"step": 275
},
{
"epoch": 0.12205044751830757,
"grad_norm": 9.974289894104004,
"learning_rate": 5.98e-06,
"loss": 0.6401,
"step": 300
},
{
"epoch": 0.1322213181448332,
"grad_norm": 10.3145112991333,
"learning_rate": 6.480000000000001e-06,
"loss": 0.6564,
"step": 325
},
{
"epoch": 0.14239218877135884,
"grad_norm": 8.214014053344727,
"learning_rate": 6.98e-06,
"loss": 0.6191,
"step": 350
},
{
"epoch": 0.15256305939788445,
"grad_norm": 8.800423622131348,
"learning_rate": 7.48e-06,
"loss": 0.6492,
"step": 375
},
{
"epoch": 0.16273393002441008,
"grad_norm": 8.481072425842285,
"learning_rate": 7.980000000000002e-06,
"loss": 0.6295,
"step": 400
},
{
"epoch": 0.17290480065093572,
"grad_norm": 8.669546127319336,
"learning_rate": 8.48e-06,
"loss": 0.6167,
"step": 425
},
{
"epoch": 0.18307567127746135,
"grad_norm": 7.645444869995117,
"learning_rate": 8.98e-06,
"loss": 0.6021,
"step": 450
},
{
"epoch": 0.193246541903987,
"grad_norm": 11.719562530517578,
"learning_rate": 9.48e-06,
"loss": 0.6006,
"step": 475
},
{
"epoch": 0.20341741253051263,
"grad_norm": 9.42712116241455,
"learning_rate": 9.980000000000001e-06,
"loss": 0.5777,
"step": 500
},
{
"epoch": 0.21358828315703823,
"grad_norm": 7.9745988845825195,
"learning_rate": 9.946666666666667e-06,
"loss": 0.5928,
"step": 525
},
{
"epoch": 0.22375915378356387,
"grad_norm": 7.762495994567871,
"learning_rate": 9.891111111111113e-06,
"loss": 0.5735,
"step": 550
},
{
"epoch": 0.2339300244100895,
"grad_norm": 8.57645320892334,
"learning_rate": 9.835555555555556e-06,
"loss": 0.5613,
"step": 575
},
{
"epoch": 0.24410089503661514,
"grad_norm": 7.481997489929199,
"learning_rate": 9.780000000000001e-06,
"loss": 0.5496,
"step": 600
},
{
"epoch": 0.25427176566314075,
"grad_norm": 6.589779376983643,
"learning_rate": 9.724444444444445e-06,
"loss": 0.5746,
"step": 625
},
{
"epoch": 0.2644426362896664,
"grad_norm": 8.590812683105469,
"learning_rate": 9.66888888888889e-06,
"loss": 0.5385,
"step": 650
},
{
"epoch": 0.274613506916192,
"grad_norm": 7.343937397003174,
"learning_rate": 9.613333333333335e-06,
"loss": 0.5432,
"step": 675
},
{
"epoch": 0.2847843775427177,
"grad_norm": 7.952889919281006,
"learning_rate": 9.557777777777777e-06,
"loss": 0.5213,
"step": 700
},
{
"epoch": 0.2949552481692433,
"grad_norm": 8.083431243896484,
"learning_rate": 9.502222222222223e-06,
"loss": 0.5367,
"step": 725
},
{
"epoch": 0.3051261187957689,
"grad_norm": 7.172378063201904,
"learning_rate": 9.446666666666667e-06,
"loss": 0.5317,
"step": 750
},
{
"epoch": 0.31529698942229456,
"grad_norm": 7.955025672912598,
"learning_rate": 9.391111111111111e-06,
"loss": 0.5208,
"step": 775
},
{
"epoch": 0.32546786004882017,
"grad_norm": 7.041996479034424,
"learning_rate": 9.335555555555557e-06,
"loss": 0.5123,
"step": 800
},
{
"epoch": 0.33563873067534583,
"grad_norm": 7.7593607902526855,
"learning_rate": 9.280000000000001e-06,
"loss": 0.5019,
"step": 825
},
{
"epoch": 0.34580960130187144,
"grad_norm": 9.114460945129395,
"learning_rate": 9.224444444444445e-06,
"loss": 0.5189,
"step": 850
},
{
"epoch": 0.35598047192839705,
"grad_norm": 8.199577331542969,
"learning_rate": 9.171111111111112e-06,
"loss": 0.4844,
"step": 875
},
{
"epoch": 0.3661513425549227,
"grad_norm": 7.192209243774414,
"learning_rate": 9.115555555555556e-06,
"loss": 0.4867,
"step": 900
},
{
"epoch": 0.3763222131814483,
"grad_norm": 7.112555980682373,
"learning_rate": 9.060000000000001e-06,
"loss": 0.4673,
"step": 925
},
{
"epoch": 0.386493083807974,
"grad_norm": 6.723269462585449,
"learning_rate": 9.004444444444445e-06,
"loss": 0.4886,
"step": 950
},
{
"epoch": 0.3966639544344996,
"grad_norm": 6.772889137268066,
"learning_rate": 8.94888888888889e-06,
"loss": 0.5116,
"step": 975
},
{
"epoch": 0.40683482506102525,
"grad_norm": 7.985610485076904,
"learning_rate": 8.893333333333333e-06,
"loss": 0.5072,
"step": 1000
},
{
"epoch": 0.40683482506102525,
"eval_loss": 0.5020337700843811,
"eval_runtime": 1789.7876,
"eval_samples_per_second": 2.18,
"eval_steps_per_second": 0.136,
"eval_wer": 0.35670820052158797,
"step": 1000
},
{
"epoch": 0.41700569568755086,
"grad_norm": 10.103032112121582,
"learning_rate": 8.83777777777778e-06,
"loss": 0.4951,
"step": 1025
},
{
"epoch": 0.42717656631407647,
"grad_norm": 6.815390586853027,
"learning_rate": 8.782222222222223e-06,
"loss": 0.4682,
"step": 1050
},
{
"epoch": 0.43734743694060213,
"grad_norm": 7.63904333114624,
"learning_rate": 8.726666666666667e-06,
"loss": 0.4842,
"step": 1075
},
{
"epoch": 0.44751830756712774,
"grad_norm": 7.750236988067627,
"learning_rate": 8.671111111111113e-06,
"loss": 0.4823,
"step": 1100
},
{
"epoch": 0.4576891781936534,
"grad_norm": 6.178125858306885,
"learning_rate": 8.615555555555555e-06,
"loss": 0.4764,
"step": 1125
},
{
"epoch": 0.467860048820179,
"grad_norm": 6.7011284828186035,
"learning_rate": 8.560000000000001e-06,
"loss": 0.487,
"step": 1150
},
{
"epoch": 0.4780309194467046,
"grad_norm": 6.630568504333496,
"learning_rate": 8.504444444444445e-06,
"loss": 0.444,
"step": 1175
},
{
"epoch": 0.4882017900732303,
"grad_norm": 7.386712074279785,
"learning_rate": 8.448888888888889e-06,
"loss": 0.4372,
"step": 1200
},
{
"epoch": 0.4983726606997559,
"grad_norm": 6.448077201843262,
"learning_rate": 8.393333333333335e-06,
"loss": 0.4474,
"step": 1225
},
{
"epoch": 0.5085435313262815,
"grad_norm": 6.787522315979004,
"learning_rate": 8.337777777777777e-06,
"loss": 0.4692,
"step": 1250
},
{
"epoch": 0.5187144019528072,
"grad_norm": 6.614894866943359,
"learning_rate": 8.282222222222223e-06,
"loss": 0.4401,
"step": 1275
},
{
"epoch": 0.5288852725793328,
"grad_norm": 7.09251594543457,
"learning_rate": 8.226666666666667e-06,
"loss": 0.4419,
"step": 1300
},
{
"epoch": 0.5390561432058584,
"grad_norm": 7.047774791717529,
"learning_rate": 8.171111111111113e-06,
"loss": 0.4323,
"step": 1325
},
{
"epoch": 0.549227013832384,
"grad_norm": 6.616914749145508,
"learning_rate": 8.115555555555557e-06,
"loss": 0.4354,
"step": 1350
},
{
"epoch": 0.5593978844589097,
"grad_norm": 8.16896915435791,
"learning_rate": 8.06e-06,
"loss": 0.4299,
"step": 1375
},
{
"epoch": 0.5695687550854354,
"grad_norm": 7.04884672164917,
"learning_rate": 8.004444444444445e-06,
"loss": 0.4477,
"step": 1400
},
{
"epoch": 0.5797396257119609,
"grad_norm": 6.052219867706299,
"learning_rate": 7.948888888888889e-06,
"loss": 0.4561,
"step": 1425
},
{
"epoch": 0.5899104963384866,
"grad_norm": 6.257965087890625,
"learning_rate": 7.893333333333335e-06,
"loss": 0.4343,
"step": 1450
},
{
"epoch": 0.6000813669650122,
"grad_norm": 7.182107925415039,
"learning_rate": 7.837777777777779e-06,
"loss": 0.4215,
"step": 1475
},
{
"epoch": 0.6102522375915378,
"grad_norm": 6.276819705963135,
"learning_rate": 7.782222222222223e-06,
"loss": 0.4188,
"step": 1500
},
{
"epoch": 0.6204231082180635,
"grad_norm": 6.533692359924316,
"learning_rate": 7.726666666666667e-06,
"loss": 0.4855,
"step": 1525
},
{
"epoch": 0.6305939788445891,
"grad_norm": 6.108315467834473,
"learning_rate": 7.67111111111111e-06,
"loss": 0.4255,
"step": 1550
},
{
"epoch": 0.6407648494711147,
"grad_norm": 6.741276741027832,
"learning_rate": 7.6155555555555564e-06,
"loss": 0.4529,
"step": 1575
},
{
"epoch": 0.6509357200976403,
"grad_norm": 6.165073871612549,
"learning_rate": 7.5600000000000005e-06,
"loss": 0.4541,
"step": 1600
},
{
"epoch": 0.661106590724166,
"grad_norm": 7.246621608734131,
"learning_rate": 7.504444444444445e-06,
"loss": 0.4073,
"step": 1625
},
{
"epoch": 0.6712774613506917,
"grad_norm": 7.50054407119751,
"learning_rate": 7.44888888888889e-06,
"loss": 0.4143,
"step": 1650
},
{
"epoch": 0.6814483319772172,
"grad_norm": 6.225817680358887,
"learning_rate": 7.393333333333333e-06,
"loss": 0.438,
"step": 1675
},
{
"epoch": 0.6916192026037429,
"grad_norm": 6.704942226409912,
"learning_rate": 7.337777777777778e-06,
"loss": 0.4209,
"step": 1700
},
{
"epoch": 0.7017900732302685,
"grad_norm": 6.276001930236816,
"learning_rate": 7.282222222222222e-06,
"loss": 0.4033,
"step": 1725
},
{
"epoch": 0.7119609438567941,
"grad_norm": 7.256411075592041,
"learning_rate": 7.226666666666667e-06,
"loss": 0.416,
"step": 1750
},
{
"epoch": 0.7221318144833198,
"grad_norm": 6.347288608551025,
"learning_rate": 7.171111111111112e-06,
"loss": 0.4373,
"step": 1775
},
{
"epoch": 0.7323026851098454,
"grad_norm": 6.241955757141113,
"learning_rate": 7.115555555555557e-06,
"loss": 0.403,
"step": 1800
},
{
"epoch": 0.7424735557363711,
"grad_norm": 5.548867702484131,
"learning_rate": 7.06e-06,
"loss": 0.4034,
"step": 1825
},
{
"epoch": 0.7526444263628966,
"grad_norm": 7.0312113761901855,
"learning_rate": 7.004444444444445e-06,
"loss": 0.4037,
"step": 1850
},
{
"epoch": 0.7628152969894223,
"grad_norm": 7.7608256340026855,
"learning_rate": 6.948888888888889e-06,
"loss": 0.3939,
"step": 1875
},
{
"epoch": 0.772986167615948,
"grad_norm": 6.072271823883057,
"learning_rate": 6.893333333333334e-06,
"loss": 0.4067,
"step": 1900
},
{
"epoch": 0.7831570382424735,
"grad_norm": 6.9571123123168945,
"learning_rate": 6.837777777777779e-06,
"loss": 0.4107,
"step": 1925
},
{
"epoch": 0.7933279088689992,
"grad_norm": 5.884681224822998,
"learning_rate": 6.782222222222222e-06,
"loss": 0.3995,
"step": 1950
},
{
"epoch": 0.8034987794955248,
"grad_norm": 5.850441932678223,
"learning_rate": 6.726666666666667e-06,
"loss": 0.4017,
"step": 1975
},
{
"epoch": 0.8136696501220505,
"grad_norm": 6.021271705627441,
"learning_rate": 6.671111111111112e-06,
"loss": 0.3708,
"step": 2000
},
{
"epoch": 0.8136696501220505,
"eval_loss": 0.42604002356529236,
"eval_runtime": 1798.3084,
"eval_samples_per_second": 2.169,
"eval_steps_per_second": 0.136,
"eval_wer": 0.32584758041147494,
"step": 2000
},
{
"epoch": 0.823840520748576,
"grad_norm": 6.479699611663818,
"learning_rate": 6.615555555555556e-06,
"loss": 0.3914,
"step": 2025
},
{
"epoch": 0.8340113913751017,
"grad_norm": 6.589993476867676,
"learning_rate": 6.560000000000001e-06,
"loss": 0.401,
"step": 2050
},
{
"epoch": 0.8441822620016274,
"grad_norm": 5.026104927062988,
"learning_rate": 6.504444444444446e-06,
"loss": 0.38,
"step": 2075
},
{
"epoch": 0.8543531326281529,
"grad_norm": 6.458845138549805,
"learning_rate": 6.448888888888889e-06,
"loss": 0.3885,
"step": 2100
},
{
"epoch": 0.8645240032546786,
"grad_norm": 6.570550441741943,
"learning_rate": 6.393333333333334e-06,
"loss": 0.3992,
"step": 2125
},
{
"epoch": 0.8746948738812043,
"grad_norm": 6.160183906555176,
"learning_rate": 6.3377777777777786e-06,
"loss": 0.385,
"step": 2150
},
{
"epoch": 0.8848657445077298,
"grad_norm": 5.925890922546387,
"learning_rate": 6.282222222222223e-06,
"loss": 0.4082,
"step": 2175
},
{
"epoch": 0.8950366151342555,
"grad_norm": 6.508133888244629,
"learning_rate": 6.2266666666666675e-06,
"loss": 0.4043,
"step": 2200
},
{
"epoch": 0.9052074857607811,
"grad_norm": 6.300657272338867,
"learning_rate": 6.171111111111112e-06,
"loss": 0.3729,
"step": 2225
},
{
"epoch": 0.9153783563873068,
"grad_norm": 5.284305572509766,
"learning_rate": 6.1155555555555555e-06,
"loss": 0.4046,
"step": 2250
},
{
"epoch": 0.9255492270138324,
"grad_norm": 5.809286594390869,
"learning_rate": 6.0600000000000004e-06,
"loss": 0.3864,
"step": 2275
},
{
"epoch": 0.935720097640358,
"grad_norm": 6.667562007904053,
"learning_rate": 6.004444444444445e-06,
"loss": 0.3929,
"step": 2300
},
{
"epoch": 0.9458909682668837,
"grad_norm": 6.4545392990112305,
"learning_rate": 5.948888888888889e-06,
"loss": 0.3975,
"step": 2325
},
{
"epoch": 0.9560618388934092,
"grad_norm": 5.876199245452881,
"learning_rate": 5.893333333333334e-06,
"loss": 0.3769,
"step": 2350
},
{
"epoch": 0.9662327095199349,
"grad_norm": 5.008429527282715,
"learning_rate": 5.837777777777777e-06,
"loss": 0.4031,
"step": 2375
},
{
"epoch": 0.9764035801464606,
"grad_norm": 7.304080963134766,
"learning_rate": 5.782222222222222e-06,
"loss": 0.3804,
"step": 2400
},
{
"epoch": 0.9865744507729862,
"grad_norm": 7.11293888092041,
"learning_rate": 5.726666666666667e-06,
"loss": 0.3849,
"step": 2425
},
{
"epoch": 0.9967453213995118,
"grad_norm": 6.156993389129639,
"learning_rate": 5.671111111111112e-06,
"loss": 0.3804,
"step": 2450
},
{
"epoch": 1.0069161920260374,
"grad_norm": 4.09175968170166,
"learning_rate": 5.615555555555556e-06,
"loss": 0.3138,
"step": 2475
},
{
"epoch": 1.017087062652563,
"grad_norm": 5.104438304901123,
"learning_rate": 5.560000000000001e-06,
"loss": 0.2747,
"step": 2500
},
{
"epoch": 1.0272579332790888,
"grad_norm": 5.911495685577393,
"learning_rate": 5.504444444444444e-06,
"loss": 0.2825,
"step": 2525
},
{
"epoch": 1.0374288039056143,
"grad_norm": 5.619592189788818,
"learning_rate": 5.448888888888889e-06,
"loss": 0.2991,
"step": 2550
},
{
"epoch": 1.0475996745321399,
"grad_norm": 5.471043109893799,
"learning_rate": 5.393333333333334e-06,
"loss": 0.2766,
"step": 2575
},
{
"epoch": 1.0577705451586656,
"grad_norm": 4.353448867797852,
"learning_rate": 5.337777777777779e-06,
"loss": 0.2883,
"step": 2600
},
{
"epoch": 1.0679414157851912,
"grad_norm": 4.982877731323242,
"learning_rate": 5.282222222222223e-06,
"loss": 0.2888,
"step": 2625
},
{
"epoch": 1.0781122864117167,
"grad_norm": 5.228238582611084,
"learning_rate": 5.226666666666667e-06,
"loss": 0.2802,
"step": 2650
},
{
"epoch": 1.0882831570382425,
"grad_norm": 5.883535385131836,
"learning_rate": 5.171111111111111e-06,
"loss": 0.2779,
"step": 2675
},
{
"epoch": 1.098454027664768,
"grad_norm": 4.599557876586914,
"learning_rate": 5.115555555555556e-06,
"loss": 0.2817,
"step": 2700
},
{
"epoch": 1.1086248982912936,
"grad_norm": 5.081583023071289,
"learning_rate": 5.060000000000001e-06,
"loss": 0.2804,
"step": 2725
},
{
"epoch": 1.1187957689178194,
"grad_norm": 5.387673854827881,
"learning_rate": 5.004444444444445e-06,
"loss": 0.2676,
"step": 2750
},
{
"epoch": 1.128966639544345,
"grad_norm": 5.010275840759277,
"learning_rate": 4.94888888888889e-06,
"loss": 0.2752,
"step": 2775
},
{
"epoch": 1.1391375101708707,
"grad_norm": 5.828437805175781,
"learning_rate": 4.893333333333334e-06,
"loss": 0.2795,
"step": 2800
},
{
"epoch": 1.1493083807973963,
"grad_norm": 5.313021183013916,
"learning_rate": 4.837777777777778e-06,
"loss": 0.2876,
"step": 2825
},
{
"epoch": 1.1594792514239218,
"grad_norm": 5.790019512176514,
"learning_rate": 4.7822222222222226e-06,
"loss": 0.283,
"step": 2850
},
{
"epoch": 1.1696501220504476,
"grad_norm": 5.2214250564575195,
"learning_rate": 4.7266666666666674e-06,
"loss": 0.2762,
"step": 2875
},
{
"epoch": 1.1798209926769732,
"grad_norm": 6.338456630706787,
"learning_rate": 4.6711111111111115e-06,
"loss": 0.2588,
"step": 2900
},
{
"epoch": 1.1899918633034987,
"grad_norm": 6.858678340911865,
"learning_rate": 4.6155555555555555e-06,
"loss": 0.2724,
"step": 2925
},
{
"epoch": 1.2001627339300245,
"grad_norm": 5.54526948928833,
"learning_rate": 4.56e-06,
"loss": 0.2893,
"step": 2950
},
{
"epoch": 1.21033360455655,
"grad_norm": 5.165172100067139,
"learning_rate": 4.504444444444444e-06,
"loss": 0.2751,
"step": 2975
},
{
"epoch": 1.2205044751830756,
"grad_norm": 6.359204292297363,
"learning_rate": 4.448888888888889e-06,
"loss": 0.2599,
"step": 3000
},
{
"epoch": 1.2205044751830756,
"eval_loss": 0.39731866121292114,
"eval_runtime": 1796.0978,
"eval_samples_per_second": 2.172,
"eval_steps_per_second": 0.136,
"eval_wer": 0.30034772529701537,
"step": 3000
},
{
"epoch": 1.2306753458096014,
"grad_norm": 4.958346366882324,
"learning_rate": 4.393333333333334e-06,
"loss": 0.2754,
"step": 3025
},
{
"epoch": 1.240846216436127,
"grad_norm": 7.284126281738281,
"learning_rate": 4.337777777777778e-06,
"loss": 0.2764,
"step": 3050
},
{
"epoch": 1.2510170870626527,
"grad_norm": 5.106224536895752,
"learning_rate": 4.282222222222222e-06,
"loss": 0.2679,
"step": 3075
},
{
"epoch": 1.2611879576891782,
"grad_norm": 4.638324737548828,
"learning_rate": 4.226666666666667e-06,
"loss": 0.2678,
"step": 3100
},
{
"epoch": 1.2713588283157038,
"grad_norm": 5.0809245109558105,
"learning_rate": 4.171111111111111e-06,
"loss": 0.2679,
"step": 3125
},
{
"epoch": 1.2815296989422293,
"grad_norm": 5.177508354187012,
"learning_rate": 4.115555555555556e-06,
"loss": 0.2603,
"step": 3150
},
{
"epoch": 1.2917005695687551,
"grad_norm": 4.664310932159424,
"learning_rate": 4.060000000000001e-06,
"loss": 0.2659,
"step": 3175
},
{
"epoch": 1.3018714401952807,
"grad_norm": 5.230463981628418,
"learning_rate": 4.004444444444445e-06,
"loss": 0.271,
"step": 3200
},
{
"epoch": 1.3120423108218064,
"grad_norm": 5.396975994110107,
"learning_rate": 3.948888888888889e-06,
"loss": 0.2653,
"step": 3225
},
{
"epoch": 1.322213181448332,
"grad_norm": 4.997494220733643,
"learning_rate": 3.893333333333333e-06,
"loss": 0.2678,
"step": 3250
},
{
"epoch": 1.3323840520748575,
"grad_norm": 6.467133045196533,
"learning_rate": 3.837777777777778e-06,
"loss": 0.2628,
"step": 3275
},
{
"epoch": 1.342554922701383,
"grad_norm": 4.6464338302612305,
"learning_rate": 3.782222222222223e-06,
"loss": 0.2819,
"step": 3300
},
{
"epoch": 1.3527257933279089,
"grad_norm": 5.477710723876953,
"learning_rate": 3.726666666666667e-06,
"loss": 0.2694,
"step": 3325
},
{
"epoch": 1.3628966639544344,
"grad_norm": 5.745607376098633,
"learning_rate": 3.6711111111111113e-06,
"loss": 0.2731,
"step": 3350
},
{
"epoch": 1.3730675345809602,
"grad_norm": 4.110312461853027,
"learning_rate": 3.615555555555556e-06,
"loss": 0.2726,
"step": 3375
},
{
"epoch": 1.3832384052074858,
"grad_norm": 5.377216815948486,
"learning_rate": 3.5600000000000002e-06,
"loss": 0.2677,
"step": 3400
},
{
"epoch": 1.3934092758340113,
"grad_norm": 5.4512763023376465,
"learning_rate": 3.5044444444444447e-06,
"loss": 0.2621,
"step": 3425
},
{
"epoch": 1.403580146460537,
"grad_norm": 5.338203430175781,
"learning_rate": 3.4488888888888896e-06,
"loss": 0.2731,
"step": 3450
},
{
"epoch": 1.4137510170870626,
"grad_norm": 5.551027774810791,
"learning_rate": 3.3933333333333336e-06,
"loss": 0.2821,
"step": 3475
},
{
"epoch": 1.4239218877135884,
"grad_norm": 6.804419040679932,
"learning_rate": 3.337777777777778e-06,
"loss": 0.2652,
"step": 3500
},
{
"epoch": 1.434092758340114,
"grad_norm": 5.15053653717041,
"learning_rate": 3.282222222222223e-06,
"loss": 0.2626,
"step": 3525
},
{
"epoch": 1.4442636289666395,
"grad_norm": 4.987846851348877,
"learning_rate": 3.226666666666667e-06,
"loss": 0.2558,
"step": 3550
},
{
"epoch": 1.454434499593165,
"grad_norm": 5.460192680358887,
"learning_rate": 3.1711111111111114e-06,
"loss": 0.2688,
"step": 3575
},
{
"epoch": 1.4646053702196908,
"grad_norm": 5.20620059967041,
"learning_rate": 3.1155555555555555e-06,
"loss": 0.2541,
"step": 3600
},
{
"epoch": 1.4747762408462164,
"grad_norm": 5.177892208099365,
"learning_rate": 3.0600000000000003e-06,
"loss": 0.2546,
"step": 3625
},
{
"epoch": 1.4849471114727422,
"grad_norm": 5.275447845458984,
"learning_rate": 3.004444444444445e-06,
"loss": 0.2487,
"step": 3650
},
{
"epoch": 1.4951179820992677,
"grad_norm": 4.815367698669434,
"learning_rate": 2.948888888888889e-06,
"loss": 0.2505,
"step": 3675
},
{
"epoch": 1.5052888527257933,
"grad_norm": 4.586023330688477,
"learning_rate": 2.8933333333333337e-06,
"loss": 0.2385,
"step": 3700
},
{
"epoch": 1.5154597233523188,
"grad_norm": 5.920341491699219,
"learning_rate": 2.837777777777778e-06,
"loss": 0.2502,
"step": 3725
},
{
"epoch": 1.5256305939788446,
"grad_norm": 5.697859764099121,
"learning_rate": 2.7822222222222222e-06,
"loss": 0.2574,
"step": 3750
},
{
"epoch": 1.5358014646053704,
"grad_norm": 4.68679666519165,
"learning_rate": 2.726666666666667e-06,
"loss": 0.2543,
"step": 3775
},
{
"epoch": 1.545972335231896,
"grad_norm": 5.064055442810059,
"learning_rate": 2.6711111111111116e-06,
"loss": 0.2639,
"step": 3800
},
{
"epoch": 1.5561432058584215,
"grad_norm": 5.656905651092529,
"learning_rate": 2.6155555555555556e-06,
"loss": 0.2639,
"step": 3825
},
{
"epoch": 1.566314076484947,
"grad_norm": 5.397951126098633,
"learning_rate": 2.56e-06,
"loss": 0.271,
"step": 3850
},
{
"epoch": 1.5764849471114726,
"grad_norm": 6.378788948059082,
"learning_rate": 2.504444444444445e-06,
"loss": 0.2542,
"step": 3875
},
{
"epoch": 1.5866558177379984,
"grad_norm": 6.6806960105896,
"learning_rate": 2.448888888888889e-06,
"loss": 0.2471,
"step": 3900
},
{
"epoch": 1.5968266883645241,
"grad_norm": 4.837161064147949,
"learning_rate": 2.3933333333333334e-06,
"loss": 0.2497,
"step": 3925
},
{
"epoch": 1.6069975589910497,
"grad_norm": 5.817267894744873,
"learning_rate": 2.337777777777778e-06,
"loss": 0.2583,
"step": 3950
},
{
"epoch": 1.6171684296175752,
"grad_norm": 5.383573532104492,
"learning_rate": 2.2822222222222223e-06,
"loss": 0.2535,
"step": 3975
},
{
"epoch": 1.6273393002441008,
"grad_norm": 4.556544780731201,
"learning_rate": 2.226666666666667e-06,
"loss": 0.2618,
"step": 4000
},
{
"epoch": 1.6273393002441008,
"eval_loss": 0.37833017110824585,
"eval_runtime": 1788.9834,
"eval_samples_per_second": 2.181,
"eval_steps_per_second": 0.136,
"eval_wer": 0.2904713609581764,
"step": 4000
},
{
"epoch": 1.6375101708706266,
"grad_norm": 5.119606018066406,
"learning_rate": 2.1711111111111113e-06,
"loss": 0.2613,
"step": 4025
},
{
"epoch": 1.647681041497152,
"grad_norm": 5.435574054718018,
"learning_rate": 2.1155555555555557e-06,
"loss": 0.243,
"step": 4050
},
{
"epoch": 1.6578519121236779,
"grad_norm": 5.2191643714904785,
"learning_rate": 2.06e-06,
"loss": 0.2563,
"step": 4075
},
{
"epoch": 1.6680227827502034,
"grad_norm": 4.851512908935547,
"learning_rate": 2.0044444444444446e-06,
"loss": 0.2608,
"step": 4100
},
{
"epoch": 1.678193653376729,
"grad_norm": 5.570041179656982,
"learning_rate": 1.948888888888889e-06,
"loss": 0.2572,
"step": 4125
},
{
"epoch": 1.6883645240032545,
"grad_norm": 4.914021968841553,
"learning_rate": 1.8933333333333333e-06,
"loss": 0.2502,
"step": 4150
},
{
"epoch": 1.6985353946297803,
"grad_norm": 4.91426944732666,
"learning_rate": 1.837777777777778e-06,
"loss": 0.2484,
"step": 4175
},
{
"epoch": 1.708706265256306,
"grad_norm": 5.054934501647949,
"learning_rate": 1.7822222222222225e-06,
"loss": 0.2543,
"step": 4200
},
{
"epoch": 1.7188771358828316,
"grad_norm": 5.295421600341797,
"learning_rate": 1.7266666666666667e-06,
"loss": 0.2479,
"step": 4225
},
{
"epoch": 1.7290480065093572,
"grad_norm": 4.494225978851318,
"learning_rate": 1.6711111111111112e-06,
"loss": 0.2708,
"step": 4250
},
{
"epoch": 1.7392188771358827,
"grad_norm": 5.637074947357178,
"learning_rate": 1.6155555555555559e-06,
"loss": 0.2536,
"step": 4275
},
{
"epoch": 1.7493897477624083,
"grad_norm": 4.7007222175598145,
"learning_rate": 1.56e-06,
"loss": 0.2445,
"step": 4300
},
{
"epoch": 1.759560618388934,
"grad_norm": 5.922358989715576,
"learning_rate": 1.5044444444444446e-06,
"loss": 0.2629,
"step": 4325
},
{
"epoch": 1.7697314890154598,
"grad_norm": 5.818343162536621,
"learning_rate": 1.4488888888888892e-06,
"loss": 0.2462,
"step": 4350
},
{
"epoch": 1.7799023596419854,
"grad_norm": 4.40110969543457,
"learning_rate": 1.3933333333333335e-06,
"loss": 0.2389,
"step": 4375
},
{
"epoch": 1.790073230268511,
"grad_norm": 6.067351818084717,
"learning_rate": 1.337777777777778e-06,
"loss": 0.2662,
"step": 4400
},
{
"epoch": 1.8002441008950365,
"grad_norm": 4.228072166442871,
"learning_rate": 1.2822222222222222e-06,
"loss": 0.2611,
"step": 4425
},
{
"epoch": 1.8104149715215623,
"grad_norm": 5.135393142700195,
"learning_rate": 1.2266666666666666e-06,
"loss": 0.2607,
"step": 4450
},
{
"epoch": 1.8205858421480878,
"grad_norm": 5.195592403411865,
"learning_rate": 1.171111111111111e-06,
"loss": 0.2534,
"step": 4475
},
{
"epoch": 1.8307567127746136,
"grad_norm": 4.900667667388916,
"learning_rate": 1.1155555555555558e-06,
"loss": 0.2558,
"step": 4500
},
{
"epoch": 1.8409275834011392,
"grad_norm": 4.837711811065674,
"learning_rate": 1.06e-06,
"loss": 0.2476,
"step": 4525
},
{
"epoch": 1.8510984540276647,
"grad_norm": 5.072052955627441,
"learning_rate": 1.0044444444444445e-06,
"loss": 0.2333,
"step": 4550
},
{
"epoch": 1.8612693246541903,
"grad_norm": 5.385496139526367,
"learning_rate": 9.488888888888889e-07,
"loss": 0.2453,
"step": 4575
},
{
"epoch": 1.871440195280716,
"grad_norm": 5.36069393157959,
"learning_rate": 8.933333333333334e-07,
"loss": 0.2477,
"step": 4600
},
{
"epoch": 1.8816110659072418,
"grad_norm": 4.781914710998535,
"learning_rate": 8.37777777777778e-07,
"loss": 0.2494,
"step": 4625
},
{
"epoch": 1.8917819365337674,
"grad_norm": 5.366285800933838,
"learning_rate": 7.822222222222223e-07,
"loss": 0.2519,
"step": 4650
},
{
"epoch": 1.901952807160293,
"grad_norm": 4.479983806610107,
"learning_rate": 7.266666666666668e-07,
"loss": 0.241,
"step": 4675
},
{
"epoch": 1.9121236777868185,
"grad_norm": 5.053223133087158,
"learning_rate": 6.711111111111111e-07,
"loss": 0.2462,
"step": 4700
},
{
"epoch": 1.922294548413344,
"grad_norm": 4.833332061767578,
"learning_rate": 6.155555555555556e-07,
"loss": 0.2698,
"step": 4725
},
{
"epoch": 1.9324654190398698,
"grad_norm": 3.715808629989624,
"learning_rate": 5.6e-07,
"loss": 0.2338,
"step": 4750
},
{
"epoch": 1.9426362896663956,
"grad_norm": 5.295703887939453,
"learning_rate": 5.044444444444445e-07,
"loss": 0.2508,
"step": 4775
},
{
"epoch": 1.9528071602929211,
"grad_norm": 5.4455342292785645,
"learning_rate": 4.488888888888889e-07,
"loss": 0.2468,
"step": 4800
},
{
"epoch": 1.9629780309194467,
"grad_norm": 4.946452617645264,
"learning_rate": 3.9333333333333336e-07,
"loss": 0.2459,
"step": 4825
},
{
"epoch": 1.9731489015459722,
"grad_norm": 4.054887294769287,
"learning_rate": 3.3777777777777777e-07,
"loss": 0.2488,
"step": 4850
},
{
"epoch": 1.983319772172498,
"grad_norm": 5.577613830566406,
"learning_rate": 2.822222222222222e-07,
"loss": 0.2435,
"step": 4875
},
{
"epoch": 1.9934906427990235,
"grad_norm": 4.222945213317871,
"learning_rate": 2.266666666666667e-07,
"loss": 0.2399,
"step": 4900
},
{
"epoch": 2.0036615134255493,
"grad_norm": 4.715478897094727,
"learning_rate": 1.7111111111111114e-07,
"loss": 0.2385,
"step": 4925
},
{
"epoch": 2.013832384052075,
"grad_norm": 4.2399678230285645,
"learning_rate": 1.1555555555555556e-07,
"loss": 0.181,
"step": 4950
},
{
"epoch": 2.0240032546786004,
"grad_norm": 4.25973653793335,
"learning_rate": 6.000000000000001e-08,
"loss": 0.1792,
"step": 4975
},
{
"epoch": 2.034174125305126,
"grad_norm": 4.661466598510742,
"learning_rate": 4.444444444444445e-09,
"loss": 0.1846,
"step": 5000
},
{
"epoch": 2.034174125305126,
"eval_loss": 0.36655664443969727,
"eval_runtime": 1783.5562,
"eval_samples_per_second": 2.187,
"eval_steps_per_second": 0.137,
"eval_wer": 0.27733507195981844,
"step": 5000
},
{
"epoch": 2.034174125305126,
"step": 5000,
"total_flos": 5.435045990970163e+20,
"train_loss": 0.38478662071228026,
"train_runtime": 58003.8059,
"train_samples_per_second": 2.758,
"train_steps_per_second": 0.086
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.435045990970163e+20,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}