| { | |
| "best_metric": 10.859939408063388, | |
| "best_model_checkpoint": "./final-whisper-for-initial-publish/checkpoint-500", | |
| "epoch": 0.8237232289950577, | |
| "eval_steps": 500, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04118616144975288, | |
| "grad_norm": 67.08393096923828, | |
| "learning_rate": 4.2000000000000006e-07, | |
| "loss": 2.8367, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.08237232289950576, | |
| "grad_norm": 17.447174072265625, | |
| "learning_rate": 9.200000000000001e-07, | |
| "loss": 2.0282, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.12355848434925865, | |
| "grad_norm": 12.4807710647583, | |
| "learning_rate": 1.42e-06, | |
| "loss": 1.3152, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.16474464579901152, | |
| "grad_norm": 8.900789260864258, | |
| "learning_rate": 1.9200000000000003e-06, | |
| "loss": 0.7248, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.20593080724876442, | |
| "grad_norm": 7.897016525268555, | |
| "learning_rate": 2.42e-06, | |
| "loss": 0.5731, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.2471169686985173, | |
| "grad_norm": 9.260579109191895, | |
| "learning_rate": 2.92e-06, | |
| "loss": 0.474, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.2883031301482702, | |
| "grad_norm": 6.941812515258789, | |
| "learning_rate": 3.4200000000000007e-06, | |
| "loss": 0.4304, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.32948929159802304, | |
| "grad_norm": 6.919547080993652, | |
| "learning_rate": 3.920000000000001e-06, | |
| "loss": 0.3514, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.37067545304777594, | |
| "grad_norm": 6.444908618927002, | |
| "learning_rate": 4.42e-06, | |
| "loss": 0.273, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.41186161449752884, | |
| "grad_norm": 5.828249454498291, | |
| "learning_rate": 4.92e-06, | |
| "loss": 0.1655, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.45304777594728174, | |
| "grad_norm": 2.492833137512207, | |
| "learning_rate": 5.420000000000001e-06, | |
| "loss": 0.0894, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.4942339373970346, | |
| "grad_norm": 3.24698805809021, | |
| "learning_rate": 5.92e-06, | |
| "loss": 0.076, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.5354200988467874, | |
| "grad_norm": 4.378988265991211, | |
| "learning_rate": 6.42e-06, | |
| "loss": 0.0763, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.5766062602965404, | |
| "grad_norm": 3.146955728530884, | |
| "learning_rate": 6.92e-06, | |
| "loss": 0.0679, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.6177924217462932, | |
| "grad_norm": 3.8782553672790527, | |
| "learning_rate": 7.420000000000001e-06, | |
| "loss": 0.0524, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.6589785831960461, | |
| "grad_norm": 6.15730094909668, | |
| "learning_rate": 7.92e-06, | |
| "loss": 0.0554, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.700164744645799, | |
| "grad_norm": 6.1213812828063965, | |
| "learning_rate": 8.42e-06, | |
| "loss": 0.0535, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.7413509060955519, | |
| "grad_norm": 4.010768890380859, | |
| "learning_rate": 8.920000000000001e-06, | |
| "loss": 0.0465, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.7825370675453048, | |
| "grad_norm": 3.509321451187134, | |
| "learning_rate": 9.42e-06, | |
| "loss": 0.0436, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.8237232289950577, | |
| "grad_norm": 2.744082450866699, | |
| "learning_rate": 9.920000000000002e-06, | |
| "loss": 0.0468, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.8237232289950577, | |
| "eval_loss": 0.04186907038092613, | |
| "eval_runtime": 1739.4189, | |
| "eval_samples_per_second": 1.395, | |
| "eval_steps_per_second": 0.175, | |
| "eval_wer": 10.859939408063388, | |
| "step": 500 | |
| } | |
| ], | |
| "logging_steps": 25, | |
| "max_steps": 5000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 9, | |
| "save_steps": 500, | |
| "total_flos": 2.30868320256e+18, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |