|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 17.99960058580748, |
|
"global_step": 33786, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.8712499999999997e-05, |
|
"loss": 6.2099, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"eval_loss": 3.41253662109375, |
|
"eval_runtime": 1358.074, |
|
"eval_samples_per_second": 14.868, |
|
"eval_steps_per_second": 0.465, |
|
"eval_wer": 1.0, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 3.7462499999999996e-05, |
|
"loss": 2.9961, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"eval_loss": 2.9223873615264893, |
|
"eval_runtime": 1361.8836, |
|
"eval_samples_per_second": 14.827, |
|
"eval_steps_per_second": 0.463, |
|
"eval_wer": 1.0, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 5.62125e-05, |
|
"loss": 2.2147, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_loss": 0.6520791053771973, |
|
"eval_runtime": 1355.9385, |
|
"eval_samples_per_second": 14.892, |
|
"eval_steps_per_second": 0.465, |
|
"eval_wer": 0.5567824255052013, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 7.49625e-05, |
|
"loss": 1.3017, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"eval_loss": 0.31525641679763794, |
|
"eval_runtime": 1371.0684, |
|
"eval_samples_per_second": 14.727, |
|
"eval_steps_per_second": 0.46, |
|
"eval_wer": 0.27607269814249236, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 7.431581352833637e-05, |
|
"loss": 1.1196, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_loss": 0.24444133043289185, |
|
"eval_runtime": 1363.4869, |
|
"eval_samples_per_second": 14.809, |
|
"eval_steps_per_second": 0.463, |
|
"eval_wer": 0.23673497683411066, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 7.363162705667275e-05, |
|
"loss": 1.0712, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"eval_loss": 0.23236490786075592, |
|
"eval_runtime": 1368.4487, |
|
"eval_samples_per_second": 14.755, |
|
"eval_steps_per_second": 0.461, |
|
"eval_wer": 0.21321861436015543, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 7.294606946983546e-05, |
|
"loss": 1.052, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"eval_loss": 0.21734564006328583, |
|
"eval_runtime": 1358.6503, |
|
"eval_samples_per_second": 14.862, |
|
"eval_steps_per_second": 0.464, |
|
"eval_wer": 0.20317428528513493, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 3.375103277334068e-05, |
|
"loss": 1.2813, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"eval_loss": 0.332599401473999, |
|
"eval_runtime": 393.989, |
|
"eval_samples_per_second": 10.906, |
|
"eval_steps_per_second": 0.343, |
|
"eval_wer": 0.20989847979467358, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 2.3423299366565682e-05, |
|
"loss": 1.2365, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"eval_loss": 0.3223731815814972, |
|
"eval_runtime": 391.7507, |
|
"eval_samples_per_second": 10.969, |
|
"eval_steps_per_second": 0.345, |
|
"eval_wer": 0.2003283560376986, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 1.309556595979069e-05, |
|
"loss": 1.2193, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"eval_loss": 0.31981074810028076, |
|
"eval_runtime": 402.4439, |
|
"eval_samples_per_second": 10.677, |
|
"eval_steps_per_second": 0.335, |
|
"eval_wer": 0.19565239980464894, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 2.767832553015698e-06, |
|
"loss": 1.2072, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"eval_loss": 0.3063456118106842, |
|
"eval_runtime": 396.6525, |
|
"eval_samples_per_second": 10.833, |
|
"eval_steps_per_second": 0.34, |
|
"eval_wer": 0.19327285763271923, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 4.263388037141005e-05, |
|
"loss": 1.213, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"eval_loss": 0.3051116168498993, |
|
"eval_runtime": 371.5477, |
|
"eval_samples_per_second": 11.565, |
|
"eval_steps_per_second": 0.363, |
|
"eval_wer": 0.19797998690732255, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 3.8585078816670256e-05, |
|
"loss": 1.2074, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"eval_loss": 0.3011966645717621, |
|
"eval_runtime": 372.0587, |
|
"eval_samples_per_second": 11.549, |
|
"eval_steps_per_second": 0.363, |
|
"eval_wer": 0.18792148549934018, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 3.4536277261930465e-05, |
|
"loss": 1.1918, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"eval_loss": 0.29471954703330994, |
|
"eval_runtime": 372.8767, |
|
"eval_samples_per_second": 11.524, |
|
"eval_steps_per_second": 0.362, |
|
"eval_wer": 0.18287145276764655, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 3.0487475707190668e-05, |
|
"loss": 1.1893, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 0.28953900933265686, |
|
"eval_runtime": 372.4558, |
|
"eval_samples_per_second": 11.537, |
|
"eval_steps_per_second": 0.362, |
|
"eval_wer": 0.1807205129004437, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 2.643867415245087e-05, |
|
"loss": 1.1751, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"eval_loss": 0.2877567410469055, |
|
"eval_runtime": 371.9244, |
|
"eval_samples_per_second": 11.553, |
|
"eval_steps_per_second": 0.363, |
|
"eval_wer": 0.17759281773122604, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 2.2389872597711073e-05, |
|
"loss": 1.1628, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"eval_loss": 0.28352275490760803, |
|
"eval_runtime": 373.4388, |
|
"eval_samples_per_second": 11.507, |
|
"eval_steps_per_second": 0.362, |
|
"eval_wer": 0.1730727267059447, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"learning_rate": 1.834107104297128e-05, |
|
"loss": 1.1577, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"eval_loss": 0.28156954050064087, |
|
"eval_runtime": 371.5997, |
|
"eval_samples_per_second": 11.564, |
|
"eval_steps_per_second": 0.363, |
|
"eval_wer": 0.17605494768124527, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 5.06, |
|
"learning_rate": 1.4300367091340963e-05, |
|
"loss": 1.1448, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 5.06, |
|
"eval_loss": 0.2757455110549927, |
|
"eval_runtime": 373.7148, |
|
"eval_samples_per_second": 11.498, |
|
"eval_steps_per_second": 0.361, |
|
"eval_wer": 0.17397674491100096, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"learning_rate": 1.0251565536601165e-05, |
|
"loss": 1.1407, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"eval_loss": 0.2768358588218689, |
|
"eval_runtime": 373.2273, |
|
"eval_samples_per_second": 11.513, |
|
"eval_steps_per_second": 0.362, |
|
"eval_wer": 0.17976453962613131, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 5.59, |
|
"learning_rate": 6.202763981861369e-06, |
|
"loss": 1.1401, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 5.59, |
|
"eval_loss": 0.2780420780181885, |
|
"eval_runtime": 375.3753, |
|
"eval_samples_per_second": 11.447, |
|
"eval_steps_per_second": 0.36, |
|
"eval_wer": 0.18160374907779753, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 5.86, |
|
"learning_rate": 2.153962427121572e-06, |
|
"loss": 1.1333, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.86, |
|
"eval_loss": 0.2748132646083832, |
|
"eval_runtime": 371.6352, |
|
"eval_samples_per_second": 11.562, |
|
"eval_steps_per_second": 0.363, |
|
"eval_wer": 0.17495350021301578, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 6.13, |
|
"learning_rate": 4.029916195673358e-05, |
|
"loss": 1.1571, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 6.13, |
|
"eval_loss": 0.2807982861995697, |
|
"eval_runtime": 393.9701, |
|
"eval_samples_per_second": 10.907, |
|
"eval_steps_per_second": 0.343, |
|
"eval_wer": 0.17083865872793208, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"learning_rate": 3.8472032742155524e-05, |
|
"loss": 1.1505, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"eval_loss": 0.27258625626564026, |
|
"eval_runtime": 389.5937, |
|
"eval_samples_per_second": 11.029, |
|
"eval_steps_per_second": 0.347, |
|
"eval_wer": 0.16920726955329032, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 6.66, |
|
"learning_rate": 3.6648557786006625e-05, |
|
"loss": 1.1519, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 6.66, |
|
"eval_loss": 0.2748955190181732, |
|
"eval_runtime": 387.583, |
|
"eval_samples_per_second": 11.087, |
|
"eval_steps_per_second": 0.348, |
|
"eval_wer": 0.16539376746989204, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 6.93, |
|
"learning_rate": 3.482142857142857e-05, |
|
"loss": 1.136, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 6.93, |
|
"eval_loss": 0.276467502117157, |
|
"eval_runtime": 393.6733, |
|
"eval_samples_per_second": 10.915, |
|
"eval_steps_per_second": 0.343, |
|
"eval_wer": 0.16425075594625768, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 7.19, |
|
"learning_rate": 3.299429935685051e-05, |
|
"loss": 1.1326, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 7.19, |
|
"eval_loss": 0.27056241035461426, |
|
"eval_runtime": 391.6082, |
|
"eval_samples_per_second": 10.973, |
|
"eval_steps_per_second": 0.345, |
|
"eval_wer": 0.1668277273813606, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 7.46, |
|
"learning_rate": 3.116717014227246e-05, |
|
"loss": 1.1342, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 7.46, |
|
"eval_loss": 0.26654428243637085, |
|
"eval_runtime": 388.567, |
|
"eval_samples_per_second": 11.059, |
|
"eval_steps_per_second": 0.347, |
|
"eval_wer": 0.1638247243783576, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 7.72, |
|
"learning_rate": 2.9340040927694404e-05, |
|
"loss": 1.1286, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 7.72, |
|
"eval_loss": 0.2668865919113159, |
|
"eval_runtime": 384.4478, |
|
"eval_samples_per_second": 11.177, |
|
"eval_steps_per_second": 0.351, |
|
"eval_wer": 0.1635857310597795, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 7.99, |
|
"learning_rate": 2.7516565971545504e-05, |
|
"loss": 1.1243, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 7.99, |
|
"eval_loss": 0.2619013488292694, |
|
"eval_runtime": 392.1143, |
|
"eval_samples_per_second": 10.959, |
|
"eval_steps_per_second": 0.344, |
|
"eval_wer": 0.16234920041148415, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 8.26, |
|
"learning_rate": 2.5689436756967453e-05, |
|
"loss": 1.1173, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 8.26, |
|
"eval_loss": 0.2651631236076355, |
|
"eval_runtime": 388.7223, |
|
"eval_samples_per_second": 11.054, |
|
"eval_steps_per_second": 0.347, |
|
"eval_wer": 0.16037490777975208, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 8.52, |
|
"learning_rate": 2.3862307542389395e-05, |
|
"loss": 1.1129, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 8.52, |
|
"eval_loss": 0.26095372438430786, |
|
"eval_runtime": 387.1141, |
|
"eval_samples_per_second": 11.1, |
|
"eval_steps_per_second": 0.349, |
|
"eval_wer": 0.1597514469486788, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 8.79, |
|
"learning_rate": 2.203517832781134e-05, |
|
"loss": 1.1091, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 8.79, |
|
"eval_loss": 0.2608127295970917, |
|
"eval_runtime": 386.5271, |
|
"eval_samples_per_second": 11.117, |
|
"eval_steps_per_second": 0.349, |
|
"eval_wer": 0.15837983312031756, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 9.06, |
|
"learning_rate": 2.0208049113233286e-05, |
|
"loss": 1.1053, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 9.06, |
|
"eval_loss": 0.2632738947868347, |
|
"eval_runtime": 389.2784, |
|
"eval_samples_per_second": 11.038, |
|
"eval_steps_per_second": 0.347, |
|
"eval_wer": 0.1664016958134605, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 9.32, |
|
"learning_rate": 1.8384574157084387e-05, |
|
"loss": 1.1004, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 9.32, |
|
"eval_loss": 0.2594461441040039, |
|
"eval_runtime": 388.9349, |
|
"eval_samples_per_second": 11.048, |
|
"eval_steps_per_second": 0.347, |
|
"eval_wer": 0.16618348452258486, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 9.59, |
|
"learning_rate": 1.6557444942506332e-05, |
|
"loss": 1.0995, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 9.59, |
|
"eval_loss": 0.26234543323516846, |
|
"eval_runtime": 383.4387, |
|
"eval_samples_per_second": 11.206, |
|
"eval_steps_per_second": 0.352, |
|
"eval_wer": 0.15694587320884898, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 9.86, |
|
"learning_rate": 1.4730315727928276e-05, |
|
"loss": 1.0964, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 9.86, |
|
"eval_loss": 0.26244357228279114, |
|
"eval_runtime": 384.9493, |
|
"eval_samples_per_second": 11.163, |
|
"eval_steps_per_second": 0.351, |
|
"eval_wer": 0.15968910086557145, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 10.12, |
|
"learning_rate": 1.2903186513350224e-05, |
|
"loss": 1.09, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 10.12, |
|
"eval_loss": 0.2576591968536377, |
|
"eval_runtime": 391.5397, |
|
"eval_samples_per_second": 10.975, |
|
"eval_steps_per_second": 0.345, |
|
"eval_wer": 0.15782910938620281, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 10.39, |
|
"learning_rate": 1.108336581563048e-05, |
|
"loss": 1.089, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 10.39, |
|
"eval_loss": 0.25744903087615967, |
|
"eval_runtime": 384.7348, |
|
"eval_samples_per_second": 11.169, |
|
"eval_steps_per_second": 0.351, |
|
"eval_wer": 0.1531219801115995, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 10.66, |
|
"learning_rate": 9.256236601052426e-06, |
|
"loss": 1.0864, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 10.66, |
|
"eval_loss": 0.2556162476539612, |
|
"eval_runtime": 391.8515, |
|
"eval_samples_per_second": 10.966, |
|
"eval_steps_per_second": 0.345, |
|
"eval_wer": 0.15464945914772904, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 10.92, |
|
"learning_rate": 7.429107386474371e-06, |
|
"loss": 1.0806, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 10.92, |
|
"eval_loss": 0.2548317313194275, |
|
"eval_runtime": 384.3272, |
|
"eval_samples_per_second": 11.181, |
|
"eval_steps_per_second": 0.351, |
|
"eval_wer": 0.1583174870372102, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 11.19, |
|
"learning_rate": 5.6019781718963155e-06, |
|
"loss": 1.0842, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 11.19, |
|
"eval_loss": 0.25501224398612976, |
|
"eval_runtime": 383.8764, |
|
"eval_samples_per_second": 11.194, |
|
"eval_steps_per_second": 0.352, |
|
"eval_wer": 0.1542026455521265, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 11.45, |
|
"learning_rate": 3.774848957318261e-06, |
|
"loss": 1.0805, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 11.45, |
|
"eval_loss": 0.25607678294181824, |
|
"eval_runtime": 388.7678, |
|
"eval_samples_per_second": 11.053, |
|
"eval_steps_per_second": 0.347, |
|
"eval_wer": 0.15236343610046033, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 11.72, |
|
"learning_rate": 1.9477197427402063e-06, |
|
"loss": 1.0722, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 11.72, |
|
"eval_loss": 0.25402259826660156, |
|
"eval_runtime": 385.5053, |
|
"eval_samples_per_second": 11.146, |
|
"eval_steps_per_second": 0.35, |
|
"eval_wer": 0.1565614056963538, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 11.99, |
|
"learning_rate": 1.2059052816215163e-07, |
|
"loss": 1.0763, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 11.99, |
|
"eval_loss": 0.2549177408218384, |
|
"eval_runtime": 379.953, |
|
"eval_samples_per_second": 11.309, |
|
"eval_steps_per_second": 0.355, |
|
"eval_wer": 0.15720564855512953, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 12.25, |
|
"learning_rate": 2.5471119360724847e-05, |
|
"loss": 1.0835, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 12.25, |
|
"eval_loss": 0.25863561034202576, |
|
"eval_runtime": 369.3533, |
|
"eval_samples_per_second": 11.634, |
|
"eval_steps_per_second": 0.366, |
|
"eval_wer": 0.15212444278188222, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 12.52, |
|
"learning_rate": 2.4293714213804817e-05, |
|
"loss": 1.0883, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 12.52, |
|
"eval_loss": 0.25827670097351074, |
|
"eval_runtime": 370.2467, |
|
"eval_samples_per_second": 11.606, |
|
"eval_steps_per_second": 0.365, |
|
"eval_wer": 0.15193740453256024, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 12.79, |
|
"learning_rate": 2.3113949537532244e-05, |
|
"loss": 1.0888, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 12.79, |
|
"eval_loss": 0.2551300823688507, |
|
"eval_runtime": 367.9843, |
|
"eval_samples_per_second": 11.677, |
|
"eval_steps_per_second": 0.367, |
|
"eval_wer": 0.15819279487099555, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 13.05, |
|
"learning_rate": 2.1934184861259672e-05, |
|
"loss": 1.0933, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 13.05, |
|
"eval_loss": 0.2628032863140106, |
|
"eval_runtime": 369.9671, |
|
"eval_samples_per_second": 11.615, |
|
"eval_steps_per_second": 0.365, |
|
"eval_wer": 0.1537142679011191, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 13.32, |
|
"learning_rate": 2.07544201849871e-05, |
|
"loss": 1.0799, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 13.32, |
|
"eval_loss": 0.2600410580635071, |
|
"eval_runtime": 374.9827, |
|
"eval_samples_per_second": 11.459, |
|
"eval_steps_per_second": 0.36, |
|
"eval_wer": 0.150752828953521, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 13.59, |
|
"learning_rate": 1.957701503806707e-05, |
|
"loss": 1.0804, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 13.59, |
|
"eval_loss": 0.26200664043426514, |
|
"eval_runtime": 369.1646, |
|
"eval_samples_per_second": 11.64, |
|
"eval_steps_per_second": 0.366, |
|
"eval_wer": 0.14753161465964235, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 13.85, |
|
"learning_rate": 1.8397250361794498e-05, |
|
"loss": 1.0814, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 13.85, |
|
"eval_loss": 0.2537305951118469, |
|
"eval_runtime": 368.6655, |
|
"eval_samples_per_second": 11.656, |
|
"eval_steps_per_second": 0.366, |
|
"eval_wer": 0.15170880222783337, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 14.12, |
|
"learning_rate": 1.7217485685521926e-05, |
|
"loss": 1.0693, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 14.12, |
|
"eval_loss": 0.25602129101753235, |
|
"eval_runtime": 368.3159, |
|
"eval_samples_per_second": 11.667, |
|
"eval_steps_per_second": 0.367, |
|
"eval_wer": 0.15421303656597773, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 14.38, |
|
"learning_rate": 1.6037721009249354e-05, |
|
"loss": 1.0724, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 14.38, |
|
"eval_loss": 0.2540068030357361, |
|
"eval_runtime": 369.0094, |
|
"eval_samples_per_second": 11.645, |
|
"eval_steps_per_second": 0.366, |
|
"eval_wer": 0.15736151376289784, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 14.65, |
|
"learning_rate": 1.4857956332976782e-05, |
|
"loss": 1.0704, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 14.65, |
|
"eval_loss": 0.25483617186546326, |
|
"eval_runtime": 365.0658, |
|
"eval_samples_per_second": 11.77, |
|
"eval_steps_per_second": 0.37, |
|
"eval_wer": 0.16258819373006225, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 14.92, |
|
"learning_rate": 1.3678191656704208e-05, |
|
"loss": 1.0729, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 14.92, |
|
"eval_loss": 0.254844069480896, |
|
"eval_runtime": 367.5842, |
|
"eval_samples_per_second": 11.69, |
|
"eval_steps_per_second": 0.367, |
|
"eval_wer": 0.16009435040576908, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 15.18, |
|
"learning_rate": 1.2498426980431636e-05, |
|
"loss": 1.0724, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 15.18, |
|
"eval_loss": 0.25110504031181335, |
|
"eval_runtime": 367.3861, |
|
"eval_samples_per_second": 11.696, |
|
"eval_steps_per_second": 0.367, |
|
"eval_wer": 0.15124120660452842, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 15.45, |
|
"learning_rate": 1.1318662304159062e-05, |
|
"loss": 1.0655, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 15.45, |
|
"eval_loss": 0.24978148937225342, |
|
"eval_runtime": 375.4183, |
|
"eval_samples_per_second": 11.446, |
|
"eval_steps_per_second": 0.36, |
|
"eval_wer": 0.14903831166806944, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 15.98, |
|
"learning_rate": 8.963852010319007e-06, |
|
"loss": 1.0608, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 15.98, |
|
"eval_loss": 0.24873663485050201, |
|
"eval_runtime": 370.6074, |
|
"eval_samples_per_second": 11.594, |
|
"eval_steps_per_second": 0.364, |
|
"eval_wer": 0.14812390244916196, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 16.52, |
|
"learning_rate": 6.604322657773862e-06, |
|
"loss": 1.0541, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 16.52, |
|
"eval_loss": 0.2467627078294754, |
|
"eval_runtime": 371.5001, |
|
"eval_samples_per_second": 11.567, |
|
"eval_steps_per_second": 0.363, |
|
"eval_wer": 0.15039953448257948, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 17.05, |
|
"learning_rate": 4.244793305228717e-06, |
|
"loss": 1.0584, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 17.05, |
|
"eval_loss": 0.2466605007648468, |
|
"eval_runtime": 370.8863, |
|
"eval_samples_per_second": 11.586, |
|
"eval_steps_per_second": 0.364, |
|
"eval_wer": 0.1493084780282012, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 17.58, |
|
"learning_rate": 1.8852639526835713e-06, |
|
"loss": 1.0507, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 17.58, |
|
"eval_loss": 0.2480592578649521, |
|
"eval_runtime": 373.0281, |
|
"eval_samples_per_second": 11.519, |
|
"eval_steps_per_second": 0.362, |
|
"eval_wer": 0.15173997526938704, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"step": 33786, |
|
"total_flos": 9.499341430600616e+20, |
|
"train_loss": 0.16521977071390054, |
|
"train_runtime": 71350.5908, |
|
"train_samples_per_second": 60.63, |
|
"train_steps_per_second": 0.474 |
|
} |
|
], |
|
"max_steps": 33786, |
|
"num_train_epochs": 18, |
|
"total_flos": 9.499341430600616e+20, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|