|
{ |
|
"best_metric": 93.19793063805328, |
|
"best_model_checkpoint": "./whisper-small-ha/checkpoint-3500", |
|
"epoch": 22.29299363057325, |
|
"eval_steps": 500, |
|
"global_step": 3500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1592356687898089, |
|
"grad_norm": 1.5306243896484375, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0034, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3184713375796178, |
|
"grad_norm": 0.7897353172302246, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0053, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.47770700636942676, |
|
"grad_norm": 0.6488688588142395, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0075, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6369426751592356, |
|
"grad_norm": 5.7697882652282715, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0117, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7961783439490446, |
|
"grad_norm": 3.655069351196289, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0112, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.9554140127388535, |
|
"grad_norm": 0.6484639644622803, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0119, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.1146496815286624, |
|
"grad_norm": 1.6433720588684082, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0078, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.2738853503184713, |
|
"grad_norm": 0.5470134019851685, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0111, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.4331210191082802, |
|
"grad_norm": 7.9058685302734375, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0105, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.5923566878980893, |
|
"grad_norm": 0.44242119789123535, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0083, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.7515923566878981, |
|
"grad_norm": 0.560337483882904, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0093, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.910828025477707, |
|
"grad_norm": 0.45471709966659546, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0065, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.070063694267516, |
|
"grad_norm": 0.8609573841094971, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0071, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 2.229299363057325, |
|
"grad_norm": 0.27363401651382446, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0044, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.388535031847134, |
|
"grad_norm": 0.8000548481941223, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0057, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.5477707006369426, |
|
"grad_norm": 13.804436683654785, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0067, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.7070063694267517, |
|
"grad_norm": 0.20495767891407013, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0064, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 2.8662420382165603, |
|
"grad_norm": 1.7401798963546753, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0097, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.0254777070063694, |
|
"grad_norm": 2.01271653175354, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0067, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 3.1847133757961785, |
|
"grad_norm": 0.6848011016845703, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0081, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.1847133757961785, |
|
"eval_loss": 2.595545768737793, |
|
"eval_runtime": 162.5603, |
|
"eval_samples_per_second": 4.06, |
|
"eval_steps_per_second": 0.258, |
|
"eval_wer": 100.24908986395862, |
|
"eval_wer_ortho": 101.58203125, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.343949044585987, |
|
"grad_norm": 2.8130505084991455, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0062, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 3.5031847133757963, |
|
"grad_norm": 2.8670215606689453, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0049, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 3.662420382165605, |
|
"grad_norm": 0.6676667928695679, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0052, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 3.821656050955414, |
|
"grad_norm": 0.7465265393257141, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0033, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.980891719745223, |
|
"grad_norm": 0.9514827132225037, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0037, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 4.140127388535032, |
|
"grad_norm": 0.12443900853395462, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0024, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 4.2993630573248405, |
|
"grad_norm": 0.19353769719600677, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0034, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 4.45859872611465, |
|
"grad_norm": 3.0788159370422363, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0051, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 4.617834394904459, |
|
"grad_norm": 0.6144561171531677, |
|
"learning_rate": 1e-05, |
|
"loss": 0.003, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 4.777070063694268, |
|
"grad_norm": 0.4218046963214874, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0046, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 4.936305732484076, |
|
"grad_norm": 13.958199501037598, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0052, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 5.095541401273885, |
|
"grad_norm": 5.515628337860107, |
|
"learning_rate": 1e-05, |
|
"loss": 0.005, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 5.254777070063694, |
|
"grad_norm": 0.19730611145496368, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0025, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 5.414012738853503, |
|
"grad_norm": 0.5651189684867859, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0084, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 5.573248407643312, |
|
"grad_norm": 0.14254024624824524, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0045, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 5.732484076433121, |
|
"grad_norm": 0.863792359828949, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0046, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 5.89171974522293, |
|
"grad_norm": 1.5313643217086792, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0036, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 6.050955414012739, |
|
"grad_norm": 0.1208188533782959, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0041, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 6.210191082802548, |
|
"grad_norm": 0.26082348823547363, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0036, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 6.369426751592357, |
|
"grad_norm": 0.07662803679704666, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0019, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 6.369426751592357, |
|
"eval_loss": 2.7293362617492676, |
|
"eval_runtime": 161.9823, |
|
"eval_samples_per_second": 4.075, |
|
"eval_steps_per_second": 0.259, |
|
"eval_wer": 98.2563709522897, |
|
"eval_wer_ortho": 99.53125, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 6.528662420382165, |
|
"grad_norm": 0.20079679787158966, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0019, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 6.687898089171974, |
|
"grad_norm": 0.5479994416236877, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0029, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 6.8471337579617835, |
|
"grad_norm": 2.995877504348755, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0026, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 7.006369426751593, |
|
"grad_norm": 0.1092665046453476, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0042, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 7.165605095541402, |
|
"grad_norm": 0.23875755071640015, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0016, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 7.32484076433121, |
|
"grad_norm": 0.13069689273834229, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0014, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 7.484076433121019, |
|
"grad_norm": 0.5974874496459961, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0026, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 7.643312101910828, |
|
"grad_norm": 0.2607993185520172, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0027, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 7.802547770700637, |
|
"grad_norm": 0.7289318442344666, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0032, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 7.961783439490446, |
|
"grad_norm": 0.5595282316207886, |
|
"learning_rate": 1e-05, |
|
"loss": 0.003, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 8.121019108280255, |
|
"grad_norm": 1.0708633661270142, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0066, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 8.280254777070065, |
|
"grad_norm": 0.12123806029558182, |
|
"learning_rate": 1e-05, |
|
"loss": 0.003, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 8.439490445859873, |
|
"grad_norm": 6.941540718078613, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0042, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 8.598726114649681, |
|
"grad_norm": 8.278300285339355, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0067, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 8.757961783439491, |
|
"grad_norm": 0.314845472574234, |
|
"learning_rate": 1e-05, |
|
"loss": 0.004, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 8.9171974522293, |
|
"grad_norm": 0.5092040300369263, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0034, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 9.07643312101911, |
|
"grad_norm": 3.4508585929870605, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0033, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 9.235668789808917, |
|
"grad_norm": 0.20290407538414001, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0029, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 9.394904458598726, |
|
"grad_norm": 0.2386186718940735, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0044, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 9.554140127388536, |
|
"grad_norm": 7.686614036560059, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0025, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 9.554140127388536, |
|
"eval_loss": 2.748263120651245, |
|
"eval_runtime": 164.2494, |
|
"eval_samples_per_second": 4.018, |
|
"eval_steps_per_second": 0.256, |
|
"eval_wer": 100.3257328990228, |
|
"eval_wer_ortho": 101.953125, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 9.713375796178344, |
|
"grad_norm": 4.924851417541504, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0038, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 9.872611464968152, |
|
"grad_norm": 0.2480151355266571, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0063, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 10.031847133757962, |
|
"grad_norm": 0.3033640384674072, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0028, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 10.19108280254777, |
|
"grad_norm": 5.167762756347656, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0055, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 10.35031847133758, |
|
"grad_norm": 3.810326099395752, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0086, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 10.509554140127388, |
|
"grad_norm": 1.039038062095642, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0029, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 10.668789808917197, |
|
"grad_norm": 0.6598480939865112, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0026, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 10.828025477707007, |
|
"grad_norm": 0.169340118765831, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0042, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 10.987261146496815, |
|
"grad_norm": 0.8864091634750366, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0024, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 11.146496815286625, |
|
"grad_norm": 5.09591817855835, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0035, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 11.305732484076433, |
|
"grad_norm": 0.39931872487068176, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0042, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 11.464968152866241, |
|
"grad_norm": 0.2659781873226166, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0025, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 11.624203821656051, |
|
"grad_norm": 0.09195584058761597, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0041, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 11.78343949044586, |
|
"grad_norm": 4.084526062011719, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0035, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 11.94267515923567, |
|
"grad_norm": 0.3604657053947449, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0051, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 12.101910828025478, |
|
"grad_norm": 2.5682971477508545, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0062, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 12.261146496815286, |
|
"grad_norm": 0.22079519927501678, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0047, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 12.420382165605096, |
|
"grad_norm": 2.0606675148010254, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0016, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 12.579617834394904, |
|
"grad_norm": 0.8950979709625244, |
|
"learning_rate": 1e-05, |
|
"loss": 0.005, |
|
"step": 1975 |
|
}, |
|
{ |
|
"epoch": 12.738853503184714, |
|
"grad_norm": 0.9102997183799744, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0021, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 12.738853503184714, |
|
"eval_loss": 2.8374807834625244, |
|
"eval_runtime": 168.5918, |
|
"eval_samples_per_second": 3.915, |
|
"eval_steps_per_second": 0.249, |
|
"eval_wer": 97.16420770262502, |
|
"eval_wer_ortho": 99.0234375, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 12.898089171974522, |
|
"grad_norm": 0.8730576634407043, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0042, |
|
"step": 2025 |
|
}, |
|
{ |
|
"epoch": 13.05732484076433, |
|
"grad_norm": 0.2256990671157837, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0032, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 13.21656050955414, |
|
"grad_norm": 0.15921200811862946, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0027, |
|
"step": 2075 |
|
}, |
|
{ |
|
"epoch": 13.375796178343949, |
|
"grad_norm": 0.06783568859100342, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0026, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 13.535031847133759, |
|
"grad_norm": 7.764928817749023, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0026, |
|
"step": 2125 |
|
}, |
|
{ |
|
"epoch": 13.694267515923567, |
|
"grad_norm": 2.082003116607666, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0049, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 13.853503184713375, |
|
"grad_norm": 0.10517149418592453, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0034, |
|
"step": 2175 |
|
}, |
|
{ |
|
"epoch": 14.012738853503185, |
|
"grad_norm": 0.05206575617194176, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0022, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 14.171974522292993, |
|
"grad_norm": 0.4108193814754486, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0012, |
|
"step": 2225 |
|
}, |
|
{ |
|
"epoch": 14.331210191082803, |
|
"grad_norm": 0.15452294051647186, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0023, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 14.490445859872612, |
|
"grad_norm": 0.8691730499267578, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0033, |
|
"step": 2275 |
|
}, |
|
{ |
|
"epoch": 14.64968152866242, |
|
"grad_norm": 0.13633207976818085, |
|
"learning_rate": 1e-05, |
|
"loss": 0.003, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 14.80891719745223, |
|
"grad_norm": 0.18140809237957, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0033, |
|
"step": 2325 |
|
}, |
|
{ |
|
"epoch": 14.968152866242038, |
|
"grad_norm": 0.180851548910141, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0011, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 15.127388535031848, |
|
"grad_norm": 0.21481122076511383, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0028, |
|
"step": 2375 |
|
}, |
|
{ |
|
"epoch": 15.286624203821656, |
|
"grad_norm": 0.12347543984651566, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0023, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 15.445859872611464, |
|
"grad_norm": 0.17947587370872498, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0018, |
|
"step": 2425 |
|
}, |
|
{ |
|
"epoch": 15.605095541401274, |
|
"grad_norm": 3.2563202381134033, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0019, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 15.764331210191083, |
|
"grad_norm": 1.245087742805481, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0021, |
|
"step": 2475 |
|
}, |
|
{ |
|
"epoch": 15.923566878980893, |
|
"grad_norm": 0.23747095465660095, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0044, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 15.923566878980893, |
|
"eval_loss": 2.879415512084961, |
|
"eval_runtime": 165.4139, |
|
"eval_samples_per_second": 3.99, |
|
"eval_steps_per_second": 0.254, |
|
"eval_wer": 99.38685571948649, |
|
"eval_wer_ortho": 100.95703125, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 16.0828025477707, |
|
"grad_norm": 0.09223847836256027, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0021, |
|
"step": 2525 |
|
}, |
|
{ |
|
"epoch": 16.24203821656051, |
|
"grad_norm": 0.11110839247703552, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0021, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 16.401273885350317, |
|
"grad_norm": 0.8871801495552063, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0017, |
|
"step": 2575 |
|
}, |
|
{ |
|
"epoch": 16.56050955414013, |
|
"grad_norm": 4.709665775299072, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0026, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 16.719745222929937, |
|
"grad_norm": 0.05977766588330269, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0023, |
|
"step": 2625 |
|
}, |
|
{ |
|
"epoch": 16.878980891719745, |
|
"grad_norm": 3.9311728477478027, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0024, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 17.038216560509554, |
|
"grad_norm": 0.7477012872695923, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0046, |
|
"step": 2675 |
|
}, |
|
{ |
|
"epoch": 17.197452229299362, |
|
"grad_norm": 0.2451999932527542, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0019, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 17.356687898089174, |
|
"grad_norm": 6.275203704833984, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0025, |
|
"step": 2725 |
|
}, |
|
{ |
|
"epoch": 17.515923566878982, |
|
"grad_norm": 0.22062252461910248, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0031, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 17.67515923566879, |
|
"grad_norm": 0.08334895223379135, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0014, |
|
"step": 2775 |
|
}, |
|
{ |
|
"epoch": 17.8343949044586, |
|
"grad_norm": 2.0435831546783447, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0013, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 17.993630573248407, |
|
"grad_norm": 0.09641116112470627, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0028, |
|
"step": 2825 |
|
}, |
|
{ |
|
"epoch": 18.15286624203822, |
|
"grad_norm": 0.36049848794937134, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0009, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 18.312101910828027, |
|
"grad_norm": 1.4279415607452393, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0038, |
|
"step": 2875 |
|
}, |
|
{ |
|
"epoch": 18.471337579617835, |
|
"grad_norm": 0.22950223088264465, |
|
"learning_rate": 1e-05, |
|
"loss": 0.004, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 18.630573248407643, |
|
"grad_norm": 1.3637959957122803, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0065, |
|
"step": 2925 |
|
}, |
|
{ |
|
"epoch": 18.78980891719745, |
|
"grad_norm": 0.16725656390190125, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0025, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 18.94904458598726, |
|
"grad_norm": 0.5503889322280884, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0027, |
|
"step": 2975 |
|
}, |
|
{ |
|
"epoch": 19.10828025477707, |
|
"grad_norm": 0.10171041637659073, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0058, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 19.10828025477707, |
|
"eval_loss": 2.8631205558776855, |
|
"eval_runtime": 165.0208, |
|
"eval_samples_per_second": 3.999, |
|
"eval_steps_per_second": 0.255, |
|
"eval_wer": 96.6660279747078, |
|
"eval_wer_ortho": 98.5546875, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 19.26751592356688, |
|
"grad_norm": 3.3861842155456543, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0033, |
|
"step": 3025 |
|
}, |
|
{ |
|
"epoch": 19.426751592356688, |
|
"grad_norm": 0.24153462052345276, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0017, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 19.585987261146496, |
|
"grad_norm": 0.1117340698838234, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0043, |
|
"step": 3075 |
|
}, |
|
{ |
|
"epoch": 19.745222929936304, |
|
"grad_norm": 0.3812161982059479, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0029, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 19.904458598726116, |
|
"grad_norm": 0.31516993045806885, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0026, |
|
"step": 3125 |
|
}, |
|
{ |
|
"epoch": 20.063694267515924, |
|
"grad_norm": 0.08710745722055435, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0018, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 20.222929936305732, |
|
"grad_norm": 0.3438999652862549, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0019, |
|
"step": 3175 |
|
}, |
|
{ |
|
"epoch": 20.38216560509554, |
|
"grad_norm": 0.04310673847794533, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0013, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 20.54140127388535, |
|
"grad_norm": 0.045789364725351334, |
|
"learning_rate": 1e-05, |
|
"loss": 0.003, |
|
"step": 3225 |
|
}, |
|
{ |
|
"epoch": 20.70063694267516, |
|
"grad_norm": 0.12968392670154572, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0059, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 20.85987261146497, |
|
"grad_norm": 0.20701970160007477, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0031, |
|
"step": 3275 |
|
}, |
|
{ |
|
"epoch": 21.019108280254777, |
|
"grad_norm": 0.06687166541814804, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0015, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 21.178343949044585, |
|
"grad_norm": 0.06841659545898438, |
|
"learning_rate": 1e-05, |
|
"loss": 0.003, |
|
"step": 3325 |
|
}, |
|
{ |
|
"epoch": 21.337579617834393, |
|
"grad_norm": 0.05048463121056557, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0012, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 21.496815286624205, |
|
"grad_norm": 7.001803398132324, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0014, |
|
"step": 3375 |
|
}, |
|
{ |
|
"epoch": 21.656050955414013, |
|
"grad_norm": 0.2879500687122345, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0014, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 21.81528662420382, |
|
"grad_norm": 0.05168440565466881, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0006, |
|
"step": 3425 |
|
}, |
|
{ |
|
"epoch": 21.97452229299363, |
|
"grad_norm": 0.05985519662499428, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0025, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 22.133757961783438, |
|
"grad_norm": 0.05931171402335167, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0047, |
|
"step": 3475 |
|
}, |
|
{ |
|
"epoch": 22.29299363057325, |
|
"grad_norm": 0.062061209231615067, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0011, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 22.29299363057325, |
|
"eval_loss": 2.815406322479248, |
|
"eval_runtime": 163.1658, |
|
"eval_samples_per_second": 4.045, |
|
"eval_steps_per_second": 0.257, |
|
"eval_wer": 93.19793063805328, |
|
"eval_wer_ortho": 94.98046875, |
|
"step": 3500 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 3611, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 23, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.62645210529792e+18, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|