wav2vec2-large-xls-r-300m-slovak / trainer_state.json
infinitejoy's picture
End of training
e1bc8ca
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 100.0,
"global_step": 15200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.66,
"learning_rate": 2.2399999999999997e-06,
"loss": 29.0164,
"step": 100
},
{
"epoch": 1.32,
"learning_rate": 4.5733333333333325e-06,
"loss": 19.5282,
"step": 200
},
{
"epoch": 1.97,
"learning_rate": 6.906666666666666e-06,
"loss": 10.0976,
"step": 300
},
{
"epoch": 2.63,
"learning_rate": 9.24e-06,
"loss": 7.1314,
"step": 400
},
{
"epoch": 3.29,
"learning_rate": 1.1573333333333332e-05,
"loss": 6.253,
"step": 500
},
{
"epoch": 3.95,
"learning_rate": 1.3906666666666665e-05,
"loss": 5.0478,
"step": 600
},
{
"epoch": 4.61,
"learning_rate": 1.624e-05,
"loss": 4.2473,
"step": 700
},
{
"epoch": 5.26,
"learning_rate": 1.857333333333333e-05,
"loss": 3.8186,
"step": 800
},
{
"epoch": 5.92,
"learning_rate": 2.0906666666666668e-05,
"loss": 3.561,
"step": 900
},
{
"epoch": 6.58,
"learning_rate": 2.3239999999999998e-05,
"loss": 3.4988,
"step": 1000
},
{
"epoch": 7.24,
"learning_rate": 2.557333333333333e-05,
"loss": 3.4272,
"step": 1100
},
{
"epoch": 7.89,
"learning_rate": 2.7906666666666665e-05,
"loss": 3.3321,
"step": 1200
},
{
"epoch": 8.55,
"learning_rate": 3.024e-05,
"loss": 3.2363,
"step": 1300
},
{
"epoch": 9.21,
"learning_rate": 3.257333333333333e-05,
"loss": 3.1793,
"step": 1400
},
{
"epoch": 9.87,
"learning_rate": 3.490666666666666e-05,
"loss": 3.1351,
"step": 1500
},
{
"epoch": 10.53,
"learning_rate": 3.7239999999999996e-05,
"loss": 3.0,
"step": 1600
},
{
"epoch": 11.18,
"learning_rate": 3.957333333333333e-05,
"loss": 2.7348,
"step": 1700
},
{
"epoch": 11.84,
"learning_rate": 4.190666666666666e-05,
"loss": 2.2213,
"step": 1800
},
{
"epoch": 12.5,
"learning_rate": 4.4239999999999997e-05,
"loss": 1.8715,
"step": 1900
},
{
"epoch": 13.16,
"learning_rate": 4.657333333333333e-05,
"loss": 1.6957,
"step": 2000
},
{
"epoch": 13.82,
"learning_rate": 4.8906666666666664e-05,
"loss": 1.5455,
"step": 2100
},
{
"epoch": 14.47,
"learning_rate": 5.124e-05,
"loss": 1.4168,
"step": 2200
},
{
"epoch": 15.13,
"learning_rate": 5.3573333333333324e-05,
"loss": 1.2693,
"step": 2300
},
{
"epoch": 15.79,
"learning_rate": 5.590666666666666e-05,
"loss": 1.2215,
"step": 2400
},
{
"epoch": 16.45,
"learning_rate": 5.823999999999999e-05,
"loss": 1.172,
"step": 2500
},
{
"epoch": 17.11,
"learning_rate": 6.0573333333333325e-05,
"loss": 1.0927,
"step": 2600
},
{
"epoch": 17.76,
"learning_rate": 6.290666666666665e-05,
"loss": 1.0716,
"step": 2700
},
{
"epoch": 18.42,
"learning_rate": 6.523999999999999e-05,
"loss": 1.0267,
"step": 2800
},
{
"epoch": 19.08,
"learning_rate": 6.757333333333333e-05,
"loss": 1.0305,
"step": 2900
},
{
"epoch": 19.74,
"learning_rate": 6.990666666666666e-05,
"loss": 1.0076,
"step": 3000
},
{
"epoch": 19.74,
"eval_loss": 0.3274237811565399,
"eval_runtime": 108.7735,
"eval_samples_per_second": 19.977,
"eval_steps_per_second": 19.977,
"eval_wer": 0.38060622685502676,
"step": 3000
},
{
"epoch": 20.39,
"learning_rate": 6.944918032786885e-05,
"loss": 0.9555,
"step": 3100
},
{
"epoch": 21.05,
"learning_rate": 6.887540983606556e-05,
"loss": 0.9422,
"step": 3200
},
{
"epoch": 21.71,
"learning_rate": 6.830163934426229e-05,
"loss": 0.9229,
"step": 3300
},
{
"epoch": 22.37,
"learning_rate": 6.772786885245901e-05,
"loss": 0.9312,
"step": 3400
},
{
"epoch": 23.03,
"learning_rate": 6.715409836065574e-05,
"loss": 0.9204,
"step": 3500
},
{
"epoch": 23.68,
"learning_rate": 6.658032786885245e-05,
"loss": 0.8966,
"step": 3600
},
{
"epoch": 24.34,
"learning_rate": 6.600655737704917e-05,
"loss": 0.8867,
"step": 3700
},
{
"epoch": 25.0,
"learning_rate": 6.543278688524589e-05,
"loss": 0.895,
"step": 3800
},
{
"epoch": 25.66,
"learning_rate": 6.485901639344262e-05,
"loss": 0.8397,
"step": 3900
},
{
"epoch": 26.32,
"learning_rate": 6.428524590163934e-05,
"loss": 0.8523,
"step": 4000
},
{
"epoch": 26.97,
"learning_rate": 6.371147540983606e-05,
"loss": 0.831,
"step": 4100
},
{
"epoch": 27.63,
"learning_rate": 6.313770491803279e-05,
"loss": 0.8191,
"step": 4200
},
{
"epoch": 28.29,
"learning_rate": 6.25639344262295e-05,
"loss": 0.8222,
"step": 4300
},
{
"epoch": 28.95,
"learning_rate": 6.199016393442622e-05,
"loss": 0.8067,
"step": 4400
},
{
"epoch": 29.61,
"learning_rate": 6.141639344262295e-05,
"loss": 0.7992,
"step": 4500
},
{
"epoch": 30.26,
"learning_rate": 6.084262295081967e-05,
"loss": 0.8202,
"step": 4600
},
{
"epoch": 30.92,
"learning_rate": 6.026885245901639e-05,
"loss": 0.7756,
"step": 4700
},
{
"epoch": 31.58,
"learning_rate": 5.969508196721311e-05,
"loss": 0.7759,
"step": 4800
},
{
"epoch": 32.24,
"learning_rate": 5.912131147540983e-05,
"loss": 0.7778,
"step": 4900
},
{
"epoch": 32.89,
"learning_rate": 5.854754098360655e-05,
"loss": 0.7528,
"step": 5000
},
{
"epoch": 33.55,
"learning_rate": 5.797377049180327e-05,
"loss": 0.7656,
"step": 5100
},
{
"epoch": 34.21,
"learning_rate": 5.739999999999999e-05,
"loss": 0.7529,
"step": 5200
},
{
"epoch": 34.87,
"learning_rate": 5.682622950819672e-05,
"loss": 0.7371,
"step": 5300
},
{
"epoch": 35.53,
"learning_rate": 5.625245901639344e-05,
"loss": 0.7432,
"step": 5400
},
{
"epoch": 36.18,
"learning_rate": 5.5678688524590164e-05,
"loss": 0.7285,
"step": 5500
},
{
"epoch": 36.84,
"learning_rate": 5.510491803278688e-05,
"loss": 0.7116,
"step": 5600
},
{
"epoch": 37.5,
"learning_rate": 5.4531147540983605e-05,
"loss": 0.7109,
"step": 5700
},
{
"epoch": 38.16,
"learning_rate": 5.395737704918032e-05,
"loss": 0.7084,
"step": 5800
},
{
"epoch": 38.82,
"learning_rate": 5.3383606557377046e-05,
"loss": 0.6951,
"step": 5900
},
{
"epoch": 39.47,
"learning_rate": 5.280983606557376e-05,
"loss": 0.6889,
"step": 6000
},
{
"epoch": 39.47,
"eval_loss": 0.2824408710002899,
"eval_runtime": 108.4203,
"eval_samples_per_second": 20.042,
"eval_steps_per_second": 20.042,
"eval_wer": 0.2941983267041558,
"step": 6000
},
{
"epoch": 40.13,
"learning_rate": 5.2236065573770486e-05,
"loss": 0.6805,
"step": 6100
},
{
"epoch": 40.79,
"learning_rate": 5.1662295081967203e-05,
"loss": 0.7018,
"step": 6200
},
{
"epoch": 41.45,
"learning_rate": 5.1094262295081965e-05,
"loss": 0.6773,
"step": 6300
},
{
"epoch": 42.11,
"learning_rate": 5.052049180327869e-05,
"loss": 0.6828,
"step": 6400
},
{
"epoch": 42.76,
"learning_rate": 4.9952459016393436e-05,
"loss": 0.6875,
"step": 6500
},
{
"epoch": 43.42,
"learning_rate": 4.937868852459015e-05,
"loss": 0.6846,
"step": 6600
},
{
"epoch": 44.08,
"learning_rate": 4.880491803278688e-05,
"loss": 0.6826,
"step": 6700
},
{
"epoch": 44.74,
"learning_rate": 4.823114754098361e-05,
"loss": 0.645,
"step": 6800
},
{
"epoch": 45.39,
"learning_rate": 4.7657377049180325e-05,
"loss": 0.6468,
"step": 6900
},
{
"epoch": 46.05,
"learning_rate": 4.708360655737705e-05,
"loss": 0.6638,
"step": 7000
},
{
"epoch": 46.71,
"learning_rate": 4.6509836065573765e-05,
"loss": 0.6451,
"step": 7100
},
{
"epoch": 47.37,
"learning_rate": 4.593606557377049e-05,
"loss": 0.6396,
"step": 7200
},
{
"epoch": 48.03,
"learning_rate": 4.5362295081967206e-05,
"loss": 0.6643,
"step": 7300
},
{
"epoch": 48.68,
"learning_rate": 4.478852459016393e-05,
"loss": 0.6453,
"step": 7400
},
{
"epoch": 49.34,
"learning_rate": 4.422049180327869e-05,
"loss": 0.6579,
"step": 7500
},
{
"epoch": 50.0,
"learning_rate": 4.364672131147541e-05,
"loss": 0.6433,
"step": 7600
},
{
"epoch": 50.66,
"learning_rate": 4.307295081967213e-05,
"loss": 0.615,
"step": 7700
},
{
"epoch": 51.32,
"learning_rate": 4.249918032786885e-05,
"loss": 0.6422,
"step": 7800
},
{
"epoch": 51.97,
"learning_rate": 4.192540983606557e-05,
"loss": 0.6204,
"step": 7900
},
{
"epoch": 52.63,
"learning_rate": 4.135163934426229e-05,
"loss": 0.6173,
"step": 8000
},
{
"epoch": 53.29,
"learning_rate": 4.0777868852459014e-05,
"loss": 0.597,
"step": 8100
},
{
"epoch": 53.95,
"learning_rate": 4.020409836065573e-05,
"loss": 0.6106,
"step": 8200
},
{
"epoch": 54.61,
"learning_rate": 3.9630327868852455e-05,
"loss": 0.5915,
"step": 8300
},
{
"epoch": 55.26,
"learning_rate": 3.905655737704917e-05,
"loss": 0.5934,
"step": 8400
},
{
"epoch": 55.92,
"learning_rate": 3.8482786885245895e-05,
"loss": 0.5833,
"step": 8500
},
{
"epoch": 56.58,
"learning_rate": 3.790901639344262e-05,
"loss": 0.591,
"step": 8600
},
{
"epoch": 57.24,
"learning_rate": 3.733524590163934e-05,
"loss": 0.5708,
"step": 8700
},
{
"epoch": 57.89,
"learning_rate": 3.676147540983607e-05,
"loss": 0.5774,
"step": 8800
},
{
"epoch": 58.55,
"learning_rate": 3.6187704918032784e-05,
"loss": 0.5838,
"step": 8900
},
{
"epoch": 59.21,
"learning_rate": 3.561393442622951e-05,
"loss": 0.5863,
"step": 9000
},
{
"epoch": 59.21,
"eval_loss": 0.2699650526046753,
"eval_runtime": 108.4786,
"eval_samples_per_second": 20.032,
"eval_steps_per_second": 20.032,
"eval_wer": 0.2734878617473598,
"step": 9000
},
{
"epoch": 59.87,
"learning_rate": 3.5040163934426225e-05,
"loss": 0.5803,
"step": 9100
},
{
"epoch": 60.53,
"learning_rate": 3.446639344262295e-05,
"loss": 0.577,
"step": 9200
},
{
"epoch": 61.18,
"learning_rate": 3.389262295081967e-05,
"loss": 0.5599,
"step": 9300
},
{
"epoch": 61.84,
"learning_rate": 3.331885245901639e-05,
"loss": 0.5712,
"step": 9400
},
{
"epoch": 62.5,
"learning_rate": 3.274508196721311e-05,
"loss": 0.5615,
"step": 9500
},
{
"epoch": 63.16,
"learning_rate": 3.217131147540984e-05,
"loss": 0.5427,
"step": 9600
},
{
"epoch": 63.82,
"learning_rate": 3.1597540983606554e-05,
"loss": 0.5617,
"step": 9700
},
{
"epoch": 64.47,
"learning_rate": 3.102377049180328e-05,
"loss": 0.5602,
"step": 9800
},
{
"epoch": 65.13,
"learning_rate": 3.0449999999999998e-05,
"loss": 0.5325,
"step": 9900
},
{
"epoch": 65.79,
"learning_rate": 2.987622950819672e-05,
"loss": 0.5438,
"step": 10000
},
{
"epoch": 66.45,
"learning_rate": 2.930245901639344e-05,
"loss": 0.5538,
"step": 10100
},
{
"epoch": 67.11,
"learning_rate": 2.8728688524590163e-05,
"loss": 0.5408,
"step": 10200
},
{
"epoch": 67.76,
"learning_rate": 2.8154918032786883e-05,
"loss": 0.5424,
"step": 10300
},
{
"epoch": 68.42,
"learning_rate": 2.7581147540983604e-05,
"loss": 0.5132,
"step": 10400
},
{
"epoch": 69.08,
"learning_rate": 2.7007377049180324e-05,
"loss": 0.5396,
"step": 10500
},
{
"epoch": 69.74,
"learning_rate": 2.6433606557377044e-05,
"loss": 0.5398,
"step": 10600
},
{
"epoch": 70.39,
"learning_rate": 2.5859836065573768e-05,
"loss": 0.5067,
"step": 10700
},
{
"epoch": 71.05,
"learning_rate": 2.5291803278688523e-05,
"loss": 0.5248,
"step": 10800
},
{
"epoch": 71.71,
"learning_rate": 2.4718032786885243e-05,
"loss": 0.5148,
"step": 10900
},
{
"epoch": 72.37,
"learning_rate": 2.4144262295081967e-05,
"loss": 0.5155,
"step": 11000
},
{
"epoch": 73.03,
"learning_rate": 2.3570491803278687e-05,
"loss": 0.5195,
"step": 11100
},
{
"epoch": 73.68,
"learning_rate": 2.2996721311475408e-05,
"loss": 0.5185,
"step": 11200
},
{
"epoch": 74.34,
"learning_rate": 2.2422950819672128e-05,
"loss": 0.5184,
"step": 11300
},
{
"epoch": 75.0,
"learning_rate": 2.184918032786885e-05,
"loss": 0.5122,
"step": 11400
},
{
"epoch": 75.66,
"learning_rate": 2.127540983606557e-05,
"loss": 0.498,
"step": 11500
},
{
"epoch": 76.32,
"learning_rate": 2.0701639344262296e-05,
"loss": 0.5075,
"step": 11600
},
{
"epoch": 76.97,
"learning_rate": 2.0127868852459017e-05,
"loss": 0.479,
"step": 11700
},
{
"epoch": 77.63,
"learning_rate": 1.9554098360655737e-05,
"loss": 0.4826,
"step": 11800
},
{
"epoch": 78.29,
"learning_rate": 1.8980327868852457e-05,
"loss": 0.5057,
"step": 11900
},
{
"epoch": 78.95,
"learning_rate": 1.8406557377049178e-05,
"loss": 0.4798,
"step": 12000
},
{
"epoch": 78.95,
"eval_loss": 0.2843836545944214,
"eval_runtime": 109.262,
"eval_samples_per_second": 19.888,
"eval_steps_per_second": 19.888,
"eval_wer": 0.26018378823206695,
"step": 12000
},
{
"epoch": 79.61,
"learning_rate": 1.7832786885245898e-05,
"loss": 0.495,
"step": 12100
},
{
"epoch": 80.26,
"learning_rate": 1.7264754098360656e-05,
"loss": 0.4784,
"step": 12200
},
{
"epoch": 80.92,
"learning_rate": 1.6690983606557376e-05,
"loss": 0.477,
"step": 12300
},
{
"epoch": 81.58,
"learning_rate": 1.6117213114754097e-05,
"loss": 0.4936,
"step": 12400
},
{
"epoch": 82.24,
"learning_rate": 1.5543442622950817e-05,
"loss": 0.4942,
"step": 12500
},
{
"epoch": 82.89,
"learning_rate": 1.496967213114754e-05,
"loss": 0.474,
"step": 12600
},
{
"epoch": 83.55,
"learning_rate": 1.4395901639344262e-05,
"loss": 0.4857,
"step": 12700
},
{
"epoch": 84.21,
"learning_rate": 1.3822131147540982e-05,
"loss": 0.4628,
"step": 12800
},
{
"epoch": 84.87,
"learning_rate": 1.3248360655737704e-05,
"loss": 0.4653,
"step": 12900
},
{
"epoch": 85.53,
"learning_rate": 1.2674590163934426e-05,
"loss": 0.4787,
"step": 13000
},
{
"epoch": 86.18,
"learning_rate": 1.2100819672131147e-05,
"loss": 0.4726,
"step": 13100
},
{
"epoch": 86.84,
"learning_rate": 1.1527049180327867e-05,
"loss": 0.4535,
"step": 13200
},
{
"epoch": 87.5,
"learning_rate": 1.095327868852459e-05,
"loss": 0.4648,
"step": 13300
},
{
"epoch": 88.16,
"learning_rate": 1.0379508196721311e-05,
"loss": 0.4681,
"step": 13400
},
{
"epoch": 88.82,
"learning_rate": 9.805737704918032e-06,
"loss": 0.4741,
"step": 13500
},
{
"epoch": 89.47,
"learning_rate": 9.231967213114752e-06,
"loss": 0.4682,
"step": 13600
},
{
"epoch": 90.13,
"learning_rate": 8.658196721311476e-06,
"loss": 0.4627,
"step": 13700
},
{
"epoch": 90.79,
"learning_rate": 8.084426229508196e-06,
"loss": 0.453,
"step": 13800
},
{
"epoch": 91.45,
"learning_rate": 7.5106557377049175e-06,
"loss": 0.451,
"step": 13900
},
{
"epoch": 92.11,
"learning_rate": 6.936885245901639e-06,
"loss": 0.4705,
"step": 14000
},
{
"epoch": 92.76,
"learning_rate": 6.36311475409836e-06,
"loss": 0.4615,
"step": 14100
},
{
"epoch": 93.42,
"learning_rate": 5.789344262295081e-06,
"loss": 0.4507,
"step": 14200
},
{
"epoch": 94.08,
"learning_rate": 5.2155737704918034e-06,
"loss": 0.4526,
"step": 14300
},
{
"epoch": 94.74,
"learning_rate": 4.641803278688524e-06,
"loss": 0.4299,
"step": 14400
},
{
"epoch": 95.39,
"learning_rate": 4.068032786885246e-06,
"loss": 0.4448,
"step": 14500
},
{
"epoch": 96.05,
"learning_rate": 3.494262295081967e-06,
"loss": 0.4414,
"step": 14600
},
{
"epoch": 96.71,
"learning_rate": 2.920491803278688e-06,
"loss": 0.43,
"step": 14700
},
{
"epoch": 97.37,
"learning_rate": 2.3467213114754098e-06,
"loss": 0.4374,
"step": 14800
},
{
"epoch": 98.03,
"learning_rate": 1.772950819672131e-06,
"loss": 0.4582,
"step": 14900
},
{
"epoch": 98.68,
"learning_rate": 1.1991803278688525e-06,
"loss": 0.4399,
"step": 15000
},
{
"epoch": 98.68,
"eval_loss": 0.2906634509563446,
"eval_runtime": 110.6588,
"eval_samples_per_second": 19.637,
"eval_steps_per_second": 19.637,
"eval_wer": 0.24893704567274721,
"step": 15000
},
{
"epoch": 99.34,
"learning_rate": 6.254098360655737e-07,
"loss": 0.4309,
"step": 15100
},
{
"epoch": 100.0,
"learning_rate": 5.1639344262295075e-08,
"loss": 0.4456,
"step": 15200
},
{
"epoch": 100.0,
"step": 15200,
"total_flos": 5.9242678559261245e+19,
"train_loss": 1.3542817559995148,
"train_runtime": 20867.045,
"train_samples_per_second": 23.233,
"train_steps_per_second": 0.728
}
],
"max_steps": 15200,
"num_train_epochs": 100,
"total_flos": 5.9242678559261245e+19,
"trial_name": null,
"trial_params": null
}