csikasote's picture
End of training
c3f6a57 verified
{
"best_metric": 0.3796495497226715,
"best_model_checkpoint": "/scratch/skscla001/speech/results/whisper-medium-nyagen-female-model/checkpoint-800",
"epoch": 8.284023668639053,
"eval_steps": 200,
"global_step": 1400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.14792899408284024,
"grad_norm": 51.66197204589844,
"learning_rate": 4.2000000000000006e-07,
"loss": 5.8232,
"step": 25
},
{
"epoch": 0.2958579881656805,
"grad_norm": 44.10362243652344,
"learning_rate": 9.200000000000001e-07,
"loss": 4.7067,
"step": 50
},
{
"epoch": 0.4437869822485207,
"grad_norm": 28.569969177246094,
"learning_rate": 1.42e-06,
"loss": 3.4308,
"step": 75
},
{
"epoch": 0.591715976331361,
"grad_norm": 30.82530975341797,
"learning_rate": 1.9200000000000003e-06,
"loss": 2.6134,
"step": 100
},
{
"epoch": 0.7396449704142012,
"grad_norm": 25.239219665527344,
"learning_rate": 2.42e-06,
"loss": 1.9457,
"step": 125
},
{
"epoch": 0.8875739644970414,
"grad_norm": 22.162986755371094,
"learning_rate": 2.92e-06,
"loss": 1.6779,
"step": 150
},
{
"epoch": 1.0355029585798816,
"grad_norm": 20.636507034301758,
"learning_rate": 3.4200000000000007e-06,
"loss": 1.4326,
"step": 175
},
{
"epoch": 1.183431952662722,
"grad_norm": 26.6777400970459,
"learning_rate": 3.920000000000001e-06,
"loss": 1.1709,
"step": 200
},
{
"epoch": 1.183431952662722,
"eval_loss": 0.6064821481704712,
"eval_runtime": 82.6612,
"eval_samples_per_second": 2.044,
"eval_steps_per_second": 0.52,
"eval_wer": 0.39973142345568485,
"step": 200
},
{
"epoch": 1.331360946745562,
"grad_norm": 25.014854431152344,
"learning_rate": 4.42e-06,
"loss": 1.0305,
"step": 225
},
{
"epoch": 1.4792899408284024,
"grad_norm": 17.5407772064209,
"learning_rate": 4.92e-06,
"loss": 0.9408,
"step": 250
},
{
"epoch": 1.6272189349112427,
"grad_norm": 21.393421173095703,
"learning_rate": 5.420000000000001e-06,
"loss": 0.9531,
"step": 275
},
{
"epoch": 1.7751479289940828,
"grad_norm": 17.972383499145508,
"learning_rate": 5.92e-06,
"loss": 0.8432,
"step": 300
},
{
"epoch": 1.9230769230769231,
"grad_norm": 14.931800842285156,
"learning_rate": 6.42e-06,
"loss": 0.8653,
"step": 325
},
{
"epoch": 2.0710059171597632,
"grad_norm": 15.048805236816406,
"learning_rate": 6.92e-06,
"loss": 0.6679,
"step": 350
},
{
"epoch": 2.2189349112426036,
"grad_norm": 13.169198036193848,
"learning_rate": 7.420000000000001e-06,
"loss": 0.4734,
"step": 375
},
{
"epoch": 2.366863905325444,
"grad_norm": 9.416093826293945,
"learning_rate": 7.92e-06,
"loss": 0.4797,
"step": 400
},
{
"epoch": 2.366863905325444,
"eval_loss": 0.4227275848388672,
"eval_runtime": 88.219,
"eval_samples_per_second": 1.916,
"eval_steps_per_second": 0.487,
"eval_wer": 0.39077887197851385,
"step": 400
},
{
"epoch": 2.5147928994082838,
"grad_norm": 17.11431884765625,
"learning_rate": 8.42e-06,
"loss": 0.5092,
"step": 425
},
{
"epoch": 2.662721893491124,
"grad_norm": 12.54430103302002,
"learning_rate": 8.920000000000001e-06,
"loss": 0.4516,
"step": 450
},
{
"epoch": 2.8106508875739644,
"grad_norm": 12.5538911819458,
"learning_rate": 9.42e-06,
"loss": 0.4576,
"step": 475
},
{
"epoch": 2.9585798816568047,
"grad_norm": 13.34439754486084,
"learning_rate": 9.920000000000002e-06,
"loss": 0.4957,
"step": 500
},
{
"epoch": 3.106508875739645,
"grad_norm": 8.777567863464355,
"learning_rate": 9.953333333333333e-06,
"loss": 0.3319,
"step": 525
},
{
"epoch": 3.2544378698224854,
"grad_norm": 2.8179187774658203,
"learning_rate": 9.89777777777778e-06,
"loss": 0.2229,
"step": 550
},
{
"epoch": 3.4023668639053253,
"grad_norm": 6.126067161560059,
"learning_rate": 9.842222222222223e-06,
"loss": 0.2144,
"step": 575
},
{
"epoch": 3.5502958579881656,
"grad_norm": 8.303226470947266,
"learning_rate": 9.786666666666667e-06,
"loss": 0.2388,
"step": 600
},
{
"epoch": 3.5502958579881656,
"eval_loss": 0.39361897110939026,
"eval_runtime": 81.4549,
"eval_samples_per_second": 2.075,
"eval_steps_per_second": 0.528,
"eval_wer": 0.29319606087735006,
"step": 600
},
{
"epoch": 3.698224852071006,
"grad_norm": 14.067414283752441,
"learning_rate": 9.731111111111113e-06,
"loss": 0.2556,
"step": 625
},
{
"epoch": 3.8461538461538463,
"grad_norm": 10.964970588684082,
"learning_rate": 9.675555555555555e-06,
"loss": 0.2364,
"step": 650
},
{
"epoch": 3.994082840236686,
"grad_norm": 12.65548324584961,
"learning_rate": 9.620000000000001e-06,
"loss": 0.3048,
"step": 675
},
{
"epoch": 4.1420118343195265,
"grad_norm": 13.840071678161621,
"learning_rate": 9.564444444444445e-06,
"loss": 0.1798,
"step": 700
},
{
"epoch": 4.289940828402367,
"grad_norm": 5.432651042938232,
"learning_rate": 9.508888888888889e-06,
"loss": 0.1384,
"step": 725
},
{
"epoch": 4.437869822485207,
"grad_norm": 4.243218898773193,
"learning_rate": 9.453333333333335e-06,
"loss": 0.1131,
"step": 750
},
{
"epoch": 4.585798816568047,
"grad_norm": 8.833934783935547,
"learning_rate": 9.397777777777779e-06,
"loss": 0.1031,
"step": 775
},
{
"epoch": 4.733727810650888,
"grad_norm": 10.180987358093262,
"learning_rate": 9.342222222222223e-06,
"loss": 0.1376,
"step": 800
},
{
"epoch": 4.733727810650888,
"eval_loss": 0.3796495497226715,
"eval_runtime": 80.6054,
"eval_samples_per_second": 2.097,
"eval_steps_per_second": 0.533,
"eval_wer": 0.25738585496866606,
"step": 800
},
{
"epoch": 4.881656804733728,
"grad_norm": 7.904026985168457,
"learning_rate": 9.286666666666667e-06,
"loss": 0.1003,
"step": 825
},
{
"epoch": 5.029585798816568,
"grad_norm": 3.1111233234405518,
"learning_rate": 9.231111111111111e-06,
"loss": 0.1213,
"step": 850
},
{
"epoch": 5.177514792899408,
"grad_norm": 4.056166648864746,
"learning_rate": 9.175555555555557e-06,
"loss": 0.073,
"step": 875
},
{
"epoch": 5.325443786982248,
"grad_norm": 3.2874598503112793,
"learning_rate": 9.12e-06,
"loss": 0.0756,
"step": 900
},
{
"epoch": 5.4733727810650885,
"grad_norm": 6.1852641105651855,
"learning_rate": 9.064444444444447e-06,
"loss": 0.0681,
"step": 925
},
{
"epoch": 5.621301775147929,
"grad_norm": 8.725586891174316,
"learning_rate": 9.008888888888889e-06,
"loss": 0.08,
"step": 950
},
{
"epoch": 5.769230769230769,
"grad_norm": 6.852264881134033,
"learning_rate": 8.953333333333335e-06,
"loss": 0.0522,
"step": 975
},
{
"epoch": 5.9171597633136095,
"grad_norm": 5.19258975982666,
"learning_rate": 8.897777777777779e-06,
"loss": 0.0765,
"step": 1000
},
{
"epoch": 5.9171597633136095,
"eval_loss": 0.4010450541973114,
"eval_runtime": 83.2846,
"eval_samples_per_second": 2.029,
"eval_steps_per_second": 0.516,
"eval_wer": 0.28424350940017906,
"step": 1000
},
{
"epoch": 6.06508875739645,
"grad_norm": 9.290264129638672,
"learning_rate": 8.842222222222223e-06,
"loss": 0.056,
"step": 1025
},
{
"epoch": 6.21301775147929,
"grad_norm": 5.741577625274658,
"learning_rate": 8.786666666666668e-06,
"loss": 0.042,
"step": 1050
},
{
"epoch": 6.3609467455621305,
"grad_norm": 2.9526829719543457,
"learning_rate": 8.73111111111111e-06,
"loss": 0.0346,
"step": 1075
},
{
"epoch": 6.508875739644971,
"grad_norm": 4.198945999145508,
"learning_rate": 8.675555555555556e-06,
"loss": 0.049,
"step": 1100
},
{
"epoch": 6.65680473372781,
"grad_norm": 4.736867427825928,
"learning_rate": 8.62e-06,
"loss": 0.053,
"step": 1125
},
{
"epoch": 6.804733727810651,
"grad_norm": 4.120213985443115,
"learning_rate": 8.564444444444445e-06,
"loss": 0.054,
"step": 1150
},
{
"epoch": 6.952662721893491,
"grad_norm": 4.600209712982178,
"learning_rate": 8.50888888888889e-06,
"loss": 0.0383,
"step": 1175
},
{
"epoch": 7.100591715976331,
"grad_norm": 2.9512381553649902,
"learning_rate": 8.453333333333334e-06,
"loss": 0.0344,
"step": 1200
},
{
"epoch": 7.100591715976331,
"eval_loss": 0.4033794701099396,
"eval_runtime": 80.3421,
"eval_samples_per_second": 2.104,
"eval_steps_per_second": 0.535,
"eval_wer": 0.23813786929274844,
"step": 1200
},
{
"epoch": 7.2485207100591715,
"grad_norm": 2.454542636871338,
"learning_rate": 8.397777777777778e-06,
"loss": 0.0303,
"step": 1225
},
{
"epoch": 7.396449704142012,
"grad_norm": 3.2096927165985107,
"learning_rate": 8.342222222222222e-06,
"loss": 0.0315,
"step": 1250
},
{
"epoch": 7.544378698224852,
"grad_norm": 5.889292240142822,
"learning_rate": 8.286666666666668e-06,
"loss": 0.0183,
"step": 1275
},
{
"epoch": 7.6923076923076925,
"grad_norm": 4.196884632110596,
"learning_rate": 8.231111111111112e-06,
"loss": 0.0363,
"step": 1300
},
{
"epoch": 7.840236686390533,
"grad_norm": 5.16650390625,
"learning_rate": 8.175555555555556e-06,
"loss": 0.0376,
"step": 1325
},
{
"epoch": 7.988165680473373,
"grad_norm": 2.2055373191833496,
"learning_rate": 8.120000000000002e-06,
"loss": 0.025,
"step": 1350
},
{
"epoch": 8.136094674556213,
"grad_norm": 1.7722326517105103,
"learning_rate": 8.064444444444444e-06,
"loss": 0.0166,
"step": 1375
},
{
"epoch": 8.284023668639053,
"grad_norm": 2.3344950675964355,
"learning_rate": 8.00888888888889e-06,
"loss": 0.0207,
"step": 1400
},
{
"epoch": 8.284023668639053,
"eval_loss": 0.4384337067604065,
"eval_runtime": 79.5932,
"eval_samples_per_second": 2.123,
"eval_steps_per_second": 0.54,
"eval_wer": 0.2314234556848702,
"step": 1400
},
{
"epoch": 8.284023668639053,
"step": 1400,
"total_flos": 1.140628044644352e+19,
"train_loss": 0.6191167198973042,
"train_runtime": 2923.212,
"train_samples_per_second": 13.684,
"train_steps_per_second": 1.71
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 30,
"save_steps": 200,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 3
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.140628044644352e+19,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}