|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 59.999647514980616, |
|
"global_step": 85080, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.7, |
|
"eval_loss": 0.5371278524398804, |
|
"eval_runtime": 423.1427, |
|
"eval_samples_per_second": 47.847, |
|
"eval_steps_per_second": 2.992, |
|
"eval_wer": 0.3810800894541897, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.0009881184586108469, |
|
"loss": 1.3606, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"eval_loss": 0.5247300267219543, |
|
"eval_runtime": 419.7941, |
|
"eval_samples_per_second": 48.228, |
|
"eval_steps_per_second": 3.016, |
|
"eval_wer": 0.3902264309176043, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"eval_loss": 0.512639582157135, |
|
"eval_runtime": 421.2981, |
|
"eval_samples_per_second": 48.056, |
|
"eval_steps_per_second": 3.005, |
|
"eval_wer": 0.38586728632329304, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 0.0009643553758325404, |
|
"loss": 1.3671, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"eval_loss": 0.5062423348426819, |
|
"eval_runtime": 423.4242, |
|
"eval_samples_per_second": 47.815, |
|
"eval_steps_per_second": 2.99, |
|
"eval_wer": 0.3827660912712279, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"eval_loss": 0.49790534377098083, |
|
"eval_runtime": 423.631, |
|
"eval_samples_per_second": 47.792, |
|
"eval_steps_per_second": 2.988, |
|
"eval_wer": 0.3671902299252219, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 0.0009405803996194102, |
|
"loss": 1.3421, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"eval_loss": 0.4905886650085449, |
|
"eval_runtime": 422.3048, |
|
"eval_samples_per_second": 47.942, |
|
"eval_steps_per_second": 2.998, |
|
"eval_wer": 0.38159549933608217, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 4.94, |
|
"eval_loss": 0.4783899188041687, |
|
"eval_runtime": 421.2427, |
|
"eval_samples_per_second": 48.063, |
|
"eval_steps_per_second": 3.005, |
|
"eval_wer": 0.36512859039765183, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 5.64, |
|
"learning_rate": 0.0009168054234062797, |
|
"loss": 1.328, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 5.64, |
|
"eval_loss": 0.48102879524230957, |
|
"eval_runtime": 420.6976, |
|
"eval_samples_per_second": 48.125, |
|
"eval_steps_per_second": 3.009, |
|
"eval_wer": 0.3669106855825005, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 6.35, |
|
"eval_loss": 0.47466185688972473, |
|
"eval_runtime": 421.0932, |
|
"eval_samples_per_second": 48.08, |
|
"eval_steps_per_second": 3.006, |
|
"eval_wer": 0.35974736180026556, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 7.05, |
|
"learning_rate": 0.0008930185537583254, |
|
"loss": 1.3109, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 7.05, |
|
"eval_loss": 0.4812825620174408, |
|
"eval_runtime": 417.8509, |
|
"eval_samples_per_second": 48.453, |
|
"eval_steps_per_second": 3.03, |
|
"eval_wer": 0.3808267523935984, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 7.76, |
|
"eval_loss": 0.46314355731010437, |
|
"eval_runtime": 421.3095, |
|
"eval_samples_per_second": 48.055, |
|
"eval_steps_per_second": 3.005, |
|
"eval_wer": 0.3560696065413376, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 8.46, |
|
"learning_rate": 0.0008692435775451952, |
|
"loss": 1.2873, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 8.46, |
|
"eval_loss": 0.4602561295032501, |
|
"eval_runtime": 419.2403, |
|
"eval_samples_per_second": 48.292, |
|
"eval_steps_per_second": 3.02, |
|
"eval_wer": 0.3430533230833741, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 9.17, |
|
"eval_loss": 0.4578526020050049, |
|
"eval_runtime": 418.3262, |
|
"eval_samples_per_second": 48.398, |
|
"eval_steps_per_second": 3.026, |
|
"eval_wer": 0.3532916346355441, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 9.87, |
|
"learning_rate": 0.0008454567078972407, |
|
"loss": 1.2661, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 9.87, |
|
"eval_loss": 0.44712555408477783, |
|
"eval_runtime": 420.1714, |
|
"eval_samples_per_second": 48.185, |
|
"eval_steps_per_second": 3.013, |
|
"eval_wer": 0.336475295268712, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 10.58, |
|
"eval_loss": 0.45836716890335083, |
|
"eval_runtime": 415.4093, |
|
"eval_samples_per_second": 48.737, |
|
"eval_steps_per_second": 3.048, |
|
"eval_wer": 0.3436910336152072, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 11.28, |
|
"learning_rate": 0.0008216817316841104, |
|
"loss": 1.249, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 11.28, |
|
"eval_loss": 0.4460853040218353, |
|
"eval_runtime": 419.1988, |
|
"eval_samples_per_second": 48.297, |
|
"eval_steps_per_second": 3.02, |
|
"eval_wer": 0.3454294499965057, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 11.99, |
|
"eval_loss": 0.44824984669685364, |
|
"eval_runtime": 419.1569, |
|
"eval_samples_per_second": 48.302, |
|
"eval_steps_per_second": 3.02, |
|
"eval_wer": 0.3367024250471731, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 12.69, |
|
"learning_rate": 0.00079790675547098, |
|
"loss": 1.2322, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 12.69, |
|
"eval_loss": 0.44639432430267334, |
|
"eval_runtime": 417.2494, |
|
"eval_samples_per_second": 48.523, |
|
"eval_steps_per_second": 3.034, |
|
"eval_wer": 0.33347019358445734, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 13.4, |
|
"eval_loss": 0.4426889717578888, |
|
"eval_runtime": 417.2512, |
|
"eval_samples_per_second": 48.522, |
|
"eval_steps_per_second": 3.034, |
|
"eval_wer": 0.3454469215179258, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 14.1, |
|
"learning_rate": 0.0007741317792578497, |
|
"loss": 1.22, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 14.1, |
|
"eval_loss": 0.44404253363609314, |
|
"eval_runtime": 419.2873, |
|
"eval_samples_per_second": 48.287, |
|
"eval_steps_per_second": 3.019, |
|
"eval_wer": 0.33950660423509676, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 14.81, |
|
"eval_loss": 0.44594520330429077, |
|
"eval_runtime": 418.1805, |
|
"eval_samples_per_second": 48.415, |
|
"eval_steps_per_second": 3.027, |
|
"eval_wer": 0.3378468097001887, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 15.51, |
|
"learning_rate": 0.0007503449096098953, |
|
"loss": 1.2044, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 15.51, |
|
"eval_loss": 0.4406070411205292, |
|
"eval_runtime": 418.2792, |
|
"eval_samples_per_second": 48.403, |
|
"eval_steps_per_second": 3.027, |
|
"eval_wer": 0.3199035572017611, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 16.22, |
|
"eval_loss": 0.4397943317890167, |
|
"eval_runtime": 419.2827, |
|
"eval_samples_per_second": 48.287, |
|
"eval_steps_per_second": 3.019, |
|
"eval_wer": 0.31545705500034943, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 16.92, |
|
"learning_rate": 0.000726581826831589, |
|
"loss": 1.1913, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 16.92, |
|
"eval_loss": 0.42369282245635986, |
|
"eval_runtime": 417.1191, |
|
"eval_samples_per_second": 48.538, |
|
"eval_steps_per_second": 3.035, |
|
"eval_wer": 0.314959116639877, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 17.63, |
|
"eval_loss": 0.4286690652370453, |
|
"eval_runtime": 422.4918, |
|
"eval_samples_per_second": 47.92, |
|
"eval_steps_per_second": 2.997, |
|
"eval_wer": 0.3278880424907401, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 18.34, |
|
"learning_rate": 0.0007028068506184586, |
|
"loss": 1.1705, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 18.34, |
|
"eval_loss": 0.4252525866031647, |
|
"eval_runtime": 419.581, |
|
"eval_samples_per_second": 48.253, |
|
"eval_steps_per_second": 3.017, |
|
"eval_wer": 0.31029422042071425, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 19.04, |
|
"eval_loss": 0.42342108488082886, |
|
"eval_runtime": 420.8706, |
|
"eval_samples_per_second": 48.105, |
|
"eval_steps_per_second": 3.008, |
|
"eval_wer": 0.30976133901740166, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 19.75, |
|
"learning_rate": 0.0006790199809705044, |
|
"loss": 1.1564, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 19.75, |
|
"eval_loss": 0.4174347519874573, |
|
"eval_runtime": 419.264, |
|
"eval_samples_per_second": 48.289, |
|
"eval_steps_per_second": 3.02, |
|
"eval_wer": 0.3076472849255713, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 20.45, |
|
"eval_loss": 0.42600032687187195, |
|
"eval_runtime": 415.7754, |
|
"eval_samples_per_second": 48.695, |
|
"eval_steps_per_second": 3.045, |
|
"eval_wer": 0.31604235096792227, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 21.16, |
|
"learning_rate": 0.0006552450047573739, |
|
"loss": 1.1461, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 21.16, |
|
"eval_loss": 0.42350149154663086, |
|
"eval_runtime": 419.5333, |
|
"eval_samples_per_second": 48.258, |
|
"eval_steps_per_second": 3.018, |
|
"eval_wer": 0.30363757075966175, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 21.86, |
|
"eval_loss": 0.43086713552474976, |
|
"eval_runtime": 421.0996, |
|
"eval_samples_per_second": 48.079, |
|
"eval_steps_per_second": 3.006, |
|
"eval_wer": 0.30550702355161086, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 22.57, |
|
"learning_rate": 0.0006314581351094196, |
|
"loss": 1.1285, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 22.57, |
|
"eval_loss": 0.4263738989830017, |
|
"eval_runtime": 418.0963, |
|
"eval_samples_per_second": 48.424, |
|
"eval_steps_per_second": 3.028, |
|
"eval_wer": 0.3006324690754071, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 23.27, |
|
"eval_loss": 0.420136034488678, |
|
"eval_runtime": 418.959, |
|
"eval_samples_per_second": 48.325, |
|
"eval_steps_per_second": 3.022, |
|
"eval_wer": 0.28796561604584525, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 23.98, |
|
"learning_rate": 0.0006076950523311132, |
|
"loss": 1.1135, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 23.98, |
|
"eval_loss": 0.41308358311653137, |
|
"eval_runtime": 419.6417, |
|
"eval_samples_per_second": 48.246, |
|
"eval_steps_per_second": 3.017, |
|
"eval_wer": 0.29749633098050177, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 24.68, |
|
"eval_loss": 0.42022430896759033, |
|
"eval_runtime": 415.5643, |
|
"eval_samples_per_second": 48.719, |
|
"eval_steps_per_second": 3.046, |
|
"eval_wer": 0.2848556852330701, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 25.39, |
|
"learning_rate": 0.0005839200761179829, |
|
"loss": 1.0968, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 25.39, |
|
"eval_loss": 0.41045960783958435, |
|
"eval_runtime": 416.7195, |
|
"eval_samples_per_second": 48.584, |
|
"eval_steps_per_second": 3.038, |
|
"eval_wer": 0.2887867775525893, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 26.09, |
|
"eval_loss": 0.4209502339363098, |
|
"eval_runtime": 419.7061, |
|
"eval_samples_per_second": 48.239, |
|
"eval_steps_per_second": 3.016, |
|
"eval_wer": 0.28344922775875325, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 26.8, |
|
"learning_rate": 0.0005601332064700286, |
|
"loss": 1.087, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 26.8, |
|
"eval_loss": 0.4122714400291443, |
|
"eval_runtime": 417.6093, |
|
"eval_samples_per_second": 48.481, |
|
"eval_steps_per_second": 3.032, |
|
"eval_wer": 0.2843228038297575, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"eval_loss": 0.42156872153282166, |
|
"eval_runtime": 415.6358, |
|
"eval_samples_per_second": 48.711, |
|
"eval_steps_per_second": 3.046, |
|
"eval_wer": 0.2802694108602977, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 28.21, |
|
"learning_rate": 0.0005363582302568982, |
|
"loss": 1.0707, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 28.21, |
|
"eval_loss": 0.4161020517349243, |
|
"eval_runtime": 421.5813, |
|
"eval_samples_per_second": 48.024, |
|
"eval_steps_per_second": 3.003, |
|
"eval_wer": 0.2786707666503599, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 28.91, |
|
"eval_loss": 0.4186325967311859, |
|
"eval_runtime": 419.2011, |
|
"eval_samples_per_second": 48.297, |
|
"eval_steps_per_second": 3.02, |
|
"eval_wer": 0.27398839890977705, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 29.62, |
|
"learning_rate": 0.0005125713606089438, |
|
"loss": 1.0575, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 29.62, |
|
"eval_loss": 0.41177135705947876, |
|
"eval_runtime": 420.5227, |
|
"eval_samples_per_second": 48.145, |
|
"eval_steps_per_second": 3.011, |
|
"eval_wer": 0.2844625760011182, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 30.32, |
|
"eval_loss": 0.4242798388004303, |
|
"eval_runtime": 415.2438, |
|
"eval_samples_per_second": 48.757, |
|
"eval_steps_per_second": 3.049, |
|
"eval_wer": 0.27729925221888324, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 31.03, |
|
"learning_rate": 0.0004888082778306375, |
|
"loss": 1.0474, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 31.03, |
|
"eval_loss": 0.4220682382583618, |
|
"eval_runtime": 420.3236, |
|
"eval_samples_per_second": 48.168, |
|
"eval_steps_per_second": 3.012, |
|
"eval_wer": 0.27072122440422114, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 31.73, |
|
"eval_loss": 0.4138353765010834, |
|
"eval_runtime": 416.304, |
|
"eval_samples_per_second": 48.633, |
|
"eval_steps_per_second": 3.041, |
|
"eval_wer": 0.2699524774617374, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 32.44, |
|
"learning_rate": 0.00046502140818268316, |
|
"loss": 1.0333, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 32.44, |
|
"eval_loss": 0.41024765372276306, |
|
"eval_runtime": 419.6952, |
|
"eval_samples_per_second": 48.24, |
|
"eval_steps_per_second": 3.016, |
|
"eval_wer": 0.26381997344328745, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 33.15, |
|
"eval_loss": 0.4162220358848572, |
|
"eval_runtime": 421.7395, |
|
"eval_samples_per_second": 48.006, |
|
"eval_steps_per_second": 3.002, |
|
"eval_wer": 0.26496435809630303, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 33.85, |
|
"learning_rate": 0.00044123453853472884, |
|
"loss": 1.0191, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 33.85, |
|
"eval_loss": 0.4154505133628845, |
|
"eval_runtime": 415.0181, |
|
"eval_samples_per_second": 48.783, |
|
"eval_steps_per_second": 3.05, |
|
"eval_wer": 0.2636452582290866, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 34.56, |
|
"eval_loss": 0.41287967562675476, |
|
"eval_runtime": 419.9379, |
|
"eval_samples_per_second": 48.212, |
|
"eval_steps_per_second": 3.015, |
|
"eval_wer": 0.2655933328674261, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 35.26, |
|
"learning_rate": 0.0004174595623215985, |
|
"loss": 1.0087, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 35.26, |
|
"eval_loss": 0.4157230257987976, |
|
"eval_runtime": 417.0542, |
|
"eval_samples_per_second": 48.545, |
|
"eval_steps_per_second": 3.036, |
|
"eval_wer": 0.2631909986721644, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 35.97, |
|
"eval_loss": 0.40904876589775085, |
|
"eval_runtime": 417.7439, |
|
"eval_samples_per_second": 48.465, |
|
"eval_steps_per_second": 3.031, |
|
"eval_wer": 0.26537493884967506, |
|
"step": 51000 |
|
}, |
|
{ |
|
"epoch": 36.67, |
|
"learning_rate": 0.0003936845861084681, |
|
"loss": 0.9901, |
|
"step": 52000 |
|
}, |
|
{ |
|
"epoch": 36.67, |
|
"eval_loss": 0.41830816864967346, |
|
"eval_runtime": 417.9487, |
|
"eval_samples_per_second": 48.441, |
|
"eval_steps_per_second": 3.029, |
|
"eval_wer": 0.25867461038507233, |
|
"step": 52000 |
|
}, |
|
{ |
|
"epoch": 37.38, |
|
"eval_loss": 0.4250655770301819, |
|
"eval_runtime": 417.46, |
|
"eval_samples_per_second": 48.498, |
|
"eval_steps_per_second": 3.033, |
|
"eval_wer": 0.2648420574463624, |
|
"step": 53000 |
|
}, |
|
{ |
|
"epoch": 38.08, |
|
"learning_rate": 0.00036990960989533776, |
|
"loss": 0.9795, |
|
"step": 54000 |
|
}, |
|
{ |
|
"epoch": 38.08, |
|
"eval_loss": 0.4228881299495697, |
|
"eval_runtime": 417.6821, |
|
"eval_samples_per_second": 48.472, |
|
"eval_steps_per_second": 3.031, |
|
"eval_wer": 0.25547732196519674, |
|
"step": 54000 |
|
}, |
|
{ |
|
"epoch": 38.79, |
|
"eval_loss": 0.4176000952720642, |
|
"eval_runtime": 418.3393, |
|
"eval_samples_per_second": 48.396, |
|
"eval_steps_per_second": 3.026, |
|
"eval_wer": 0.2545513313299322, |
|
"step": 55000 |
|
}, |
|
{ |
|
"epoch": 39.49, |
|
"learning_rate": 0.00034613463368220746, |
|
"loss": 0.9644, |
|
"step": 56000 |
|
}, |
|
{ |
|
"epoch": 39.49, |
|
"eval_loss": 0.4222715497016907, |
|
"eval_runtime": 423.2092, |
|
"eval_samples_per_second": 47.839, |
|
"eval_steps_per_second": 2.991, |
|
"eval_wer": 0.25131036410650637, |
|
"step": 56000 |
|
}, |
|
{ |
|
"epoch": 40.2, |
|
"eval_loss": 0.4243711531162262, |
|
"eval_runtime": 419.2699, |
|
"eval_samples_per_second": 48.289, |
|
"eval_steps_per_second": 3.02, |
|
"eval_wer": 0.2530138374449647, |
|
"step": 57000 |
|
}, |
|
{ |
|
"epoch": 40.9, |
|
"learning_rate": 0.0003223477640342531, |
|
"loss": 0.9534, |
|
"step": 58000 |
|
}, |
|
{ |
|
"epoch": 40.9, |
|
"eval_loss": 0.4174785912036896, |
|
"eval_runtime": 417.6661, |
|
"eval_samples_per_second": 48.474, |
|
"eval_steps_per_second": 3.031, |
|
"eval_wer": 0.2538349989517087, |
|
"step": 58000 |
|
}, |
|
{ |
|
"epoch": 41.61, |
|
"eval_loss": 0.4212724566459656, |
|
"eval_runtime": 425.7723, |
|
"eval_samples_per_second": 47.551, |
|
"eval_steps_per_second": 2.973, |
|
"eval_wer": 0.2505416171640226, |
|
"step": 59000 |
|
}, |
|
{ |
|
"epoch": 42.31, |
|
"learning_rate": 0.00029856089438629876, |
|
"loss": 0.9397, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 42.31, |
|
"eval_loss": 0.4275393486022949, |
|
"eval_runtime": 419.6181, |
|
"eval_samples_per_second": 48.249, |
|
"eval_steps_per_second": 3.017, |
|
"eval_wer": 0.2565343490111119, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 43.02, |
|
"eval_loss": 0.4315040111541748, |
|
"eval_runtime": 420.9924, |
|
"eval_samples_per_second": 48.091, |
|
"eval_steps_per_second": 3.007, |
|
"eval_wer": 0.2528129149486337, |
|
"step": 61000 |
|
}, |
|
{ |
|
"epoch": 43.72, |
|
"learning_rate": 0.00027479781160799237, |
|
"loss": 0.9269, |
|
"step": 62000 |
|
}, |
|
{ |
|
"epoch": 43.72, |
|
"eval_loss": 0.4316493570804596, |
|
"eval_runtime": 417.0076, |
|
"eval_samples_per_second": 48.551, |
|
"eval_steps_per_second": 3.036, |
|
"eval_wer": 0.2501048291285205, |
|
"step": 62000 |
|
}, |
|
{ |
|
"epoch": 44.43, |
|
"eval_loss": 0.42470675706863403, |
|
"eval_runtime": 418.3345, |
|
"eval_samples_per_second": 48.397, |
|
"eval_steps_per_second": 3.026, |
|
"eval_wer": 0.24705604864071562, |
|
"step": 63000 |
|
}, |
|
{ |
|
"epoch": 45.13, |
|
"learning_rate": 0.00025101094196003804, |
|
"loss": 0.9175, |
|
"step": 64000 |
|
}, |
|
{ |
|
"epoch": 45.13, |
|
"eval_loss": 0.43763282895088196, |
|
"eval_runtime": 418.9083, |
|
"eval_samples_per_second": 48.33, |
|
"eval_steps_per_second": 3.022, |
|
"eval_wer": 0.24685512614438465, |
|
"step": 64000 |
|
}, |
|
{ |
|
"epoch": 45.84, |
|
"eval_loss": 0.4334784150123596, |
|
"eval_runtime": 418.8375, |
|
"eval_samples_per_second": 48.339, |
|
"eval_steps_per_second": 3.023, |
|
"eval_wer": 0.24501188063456567, |
|
"step": 65000 |
|
}, |
|
{ |
|
"epoch": 46.54, |
|
"learning_rate": 0.00022724785918173168, |
|
"loss": 0.9026, |
|
"step": 66000 |
|
}, |
|
{ |
|
"epoch": 46.54, |
|
"eval_loss": 0.4336349070072174, |
|
"eval_runtime": 419.7669, |
|
"eval_samples_per_second": 48.232, |
|
"eval_steps_per_second": 3.016, |
|
"eval_wer": 0.24517786008805648, |
|
"step": 66000 |
|
}, |
|
{ |
|
"epoch": 47.25, |
|
"eval_loss": 0.4399877190589905, |
|
"eval_runtime": 419.3411, |
|
"eval_samples_per_second": 48.281, |
|
"eval_steps_per_second": 3.019, |
|
"eval_wer": 0.24265322524285415, |
|
"step": 67000 |
|
}, |
|
{ |
|
"epoch": 47.95, |
|
"learning_rate": 0.00020346098953377738, |
|
"loss": 0.8929, |
|
"step": 68000 |
|
}, |
|
{ |
|
"epoch": 47.95, |
|
"eval_loss": 0.43824830651283264, |
|
"eval_runtime": 419.5549, |
|
"eval_samples_per_second": 48.256, |
|
"eval_steps_per_second": 3.017, |
|
"eval_wer": 0.24285414773918512, |
|
"step": 68000 |
|
}, |
|
{ |
|
"epoch": 48.66, |
|
"eval_loss": 0.43613117933273315, |
|
"eval_runtime": 418.6436, |
|
"eval_samples_per_second": 48.361, |
|
"eval_steps_per_second": 3.024, |
|
"eval_wer": 0.24154378363267873, |
|
"step": 69000 |
|
}, |
|
{ |
|
"epoch": 49.37, |
|
"learning_rate": 0.00017967411988582303, |
|
"loss": 0.8786, |
|
"step": 70000 |
|
}, |
|
{ |
|
"epoch": 49.37, |
|
"eval_loss": 0.44130945205688477, |
|
"eval_runtime": 418.5492, |
|
"eval_samples_per_second": 48.372, |
|
"eval_steps_per_second": 3.025, |
|
"eval_wer": 0.23977915996925012, |
|
"step": 70000 |
|
}, |
|
{ |
|
"epoch": 50.07, |
|
"eval_loss": 0.43924885988235474, |
|
"eval_runtime": 419.6699, |
|
"eval_samples_per_second": 48.243, |
|
"eval_steps_per_second": 3.017, |
|
"eval_wer": 0.2415001048291285, |
|
"step": 71000 |
|
}, |
|
{ |
|
"epoch": 50.78, |
|
"learning_rate": 0.00015591103710751666, |
|
"loss": 0.8714, |
|
"step": 72000 |
|
}, |
|
{ |
|
"epoch": 50.78, |
|
"eval_loss": 0.4345008134841919, |
|
"eval_runtime": 418.4339, |
|
"eval_samples_per_second": 48.385, |
|
"eval_steps_per_second": 3.026, |
|
"eval_wer": 0.24062652875812426, |
|
"step": 72000 |
|
}, |
|
{ |
|
"epoch": 51.48, |
|
"eval_loss": 0.44752031564712524, |
|
"eval_runtime": 417.7023, |
|
"eval_samples_per_second": 48.47, |
|
"eval_steps_per_second": 3.031, |
|
"eval_wer": 0.24017226920120205, |
|
"step": 73000 |
|
}, |
|
{ |
|
"epoch": 52.19, |
|
"learning_rate": 0.00013212416745956231, |
|
"loss": 0.8589, |
|
"step": 74000 |
|
}, |
|
{ |
|
"epoch": 52.19, |
|
"eval_loss": 0.4473401606082916, |
|
"eval_runtime": 419.491, |
|
"eval_samples_per_second": 48.263, |
|
"eval_steps_per_second": 3.018, |
|
"eval_wer": 0.23740303305611854, |
|
"step": 74000 |
|
}, |
|
{ |
|
"epoch": 52.89, |
|
"eval_loss": 0.4457215368747711, |
|
"eval_runtime": 416.4935, |
|
"eval_samples_per_second": 48.611, |
|
"eval_steps_per_second": 3.04, |
|
"eval_wer": 0.23568208819624012, |
|
"step": 75000 |
|
}, |
|
{ |
|
"epoch": 53.6, |
|
"learning_rate": 0.00010834919124643197, |
|
"loss": 0.8493, |
|
"step": 76000 |
|
}, |
|
{ |
|
"epoch": 53.6, |
|
"eval_loss": 0.44615164399147034, |
|
"eval_runtime": 416.8896, |
|
"eval_samples_per_second": 48.564, |
|
"eval_steps_per_second": 3.037, |
|
"eval_wer": 0.2365556642672444, |
|
"step": 76000 |
|
}, |
|
{ |
|
"epoch": 54.3, |
|
"eval_loss": 0.44939640164375305, |
|
"eval_runtime": 416.8835, |
|
"eval_samples_per_second": 48.565, |
|
"eval_steps_per_second": 3.037, |
|
"eval_wer": 0.2355947305891397, |
|
"step": 77000 |
|
}, |
|
{ |
|
"epoch": 55.01, |
|
"learning_rate": 8.456232159847763e-05, |
|
"loss": 0.8395, |
|
"step": 78000 |
|
}, |
|
{ |
|
"epoch": 55.01, |
|
"eval_loss": 0.44722679257392883, |
|
"eval_runtime": 419.1529, |
|
"eval_samples_per_second": 48.302, |
|
"eval_steps_per_second": 3.02, |
|
"eval_wer": 0.23519288559647775, |
|
"step": 78000 |
|
}, |
|
{ |
|
"epoch": 55.71, |
|
"eval_loss": 0.44897979497909546, |
|
"eval_runtime": 419.2062, |
|
"eval_samples_per_second": 48.296, |
|
"eval_steps_per_second": 3.02, |
|
"eval_wer": 0.23388252148997135, |
|
"step": 79000 |
|
}, |
|
{ |
|
"epoch": 56.42, |
|
"learning_rate": 6.0799238820171265e-05, |
|
"loss": 0.8295, |
|
"step": 80000 |
|
}, |
|
{ |
|
"epoch": 56.42, |
|
"eval_loss": 0.4489339590072632, |
|
"eval_runtime": 420.6011, |
|
"eval_samples_per_second": 48.136, |
|
"eval_steps_per_second": 3.01, |
|
"eval_wer": 0.23176846739814103, |
|
"step": 80000 |
|
}, |
|
{ |
|
"epoch": 57.12, |
|
"eval_loss": 0.4468826949596405, |
|
"eval_runtime": 417.4618, |
|
"eval_samples_per_second": 48.498, |
|
"eval_steps_per_second": 3.033, |
|
"eval_wer": 0.23203927598015237, |
|
"step": 81000 |
|
}, |
|
{ |
|
"epoch": 57.83, |
|
"learning_rate": 3.7012369172216936e-05, |
|
"loss": 0.8225, |
|
"step": 82000 |
|
}, |
|
{ |
|
"epoch": 57.83, |
|
"eval_loss": 0.4478228688240051, |
|
"eval_runtime": 417.4954, |
|
"eval_samples_per_second": 48.494, |
|
"eval_steps_per_second": 3.032, |
|
"eval_wer": 0.23214410510867287, |
|
"step": 82000 |
|
}, |
|
{ |
|
"epoch": 58.53, |
|
"eval_loss": 0.4525238573551178, |
|
"eval_runtime": 415.49, |
|
"eval_samples_per_second": 48.728, |
|
"eval_steps_per_second": 3.047, |
|
"eval_wer": 0.2326071004263051, |
|
"step": 83000 |
|
}, |
|
{ |
|
"epoch": 59.24, |
|
"learning_rate": 1.3225499524262608e-05, |
|
"loss": 0.816, |
|
"step": 84000 |
|
}, |
|
{ |
|
"epoch": 59.24, |
|
"eval_loss": 0.4532177150249481, |
|
"eval_runtime": 419.041, |
|
"eval_samples_per_second": 48.315, |
|
"eval_steps_per_second": 3.021, |
|
"eval_wer": 0.2315588091411, |
|
"step": 84000 |
|
}, |
|
{ |
|
"epoch": 59.94, |
|
"eval_loss": 0.45018914341926575, |
|
"eval_runtime": 418.7584, |
|
"eval_samples_per_second": 48.348, |
|
"eval_steps_per_second": 3.023, |
|
"eval_wer": 0.23179467468027115, |
|
"step": 85000 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"step": 85080, |
|
"total_flos": 4.6401496923493315e+20, |
|
"train_loss": 1.0617500136590194, |
|
"train_runtime": 191647.3893, |
|
"train_samples_per_second": 28.42, |
|
"train_steps_per_second": 0.444 |
|
} |
|
], |
|
"max_steps": 85080, |
|
"num_train_epochs": 60, |
|
"total_flos": 4.6401496923493315e+20, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|