|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 100.0, |
|
"global_step": 3300, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 1.9200000000000003e-06, |
|
"loss": 10.6008, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 3.920000000000001e-06, |
|
"loss": 2.9737, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"learning_rate": 5.92e-06, |
|
"loss": 2.8878, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 12.12, |
|
"learning_rate": 7.92e-06, |
|
"loss": 2.7215, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 15.15, |
|
"learning_rate": 9.920000000000002e-06, |
|
"loss": 2.2427, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 15.15, |
|
"eval_loss": 1.4632482528686523, |
|
"eval_runtime": 26.2286, |
|
"eval_samples_per_second": 19.406, |
|
"eval_steps_per_second": 1.22, |
|
"eval_wer": 0.9480786154297448, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 18.18, |
|
"learning_rate": 1.1920000000000001e-05, |
|
"loss": 1.8684, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 21.21, |
|
"learning_rate": 1.392e-05, |
|
"loss": 1.6802, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 24.24, |
|
"learning_rate": 1.5920000000000003e-05, |
|
"loss": 1.5608, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 27.27, |
|
"learning_rate": 1.792e-05, |
|
"loss": 1.4351, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 30.3, |
|
"learning_rate": 1.9920000000000002e-05, |
|
"loss": 1.3128, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 30.3, |
|
"eval_loss": 0.8661954402923584, |
|
"eval_runtime": 26.4182, |
|
"eval_samples_per_second": 19.267, |
|
"eval_steps_per_second": 1.211, |
|
"eval_wer": 0.6195365209738927, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 1.916521739130435e-05, |
|
"loss": 1.2289, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 36.36, |
|
"learning_rate": 1.8295652173913046e-05, |
|
"loss": 1.1687, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 39.39, |
|
"learning_rate": 1.742608695652174e-05, |
|
"loss": 1.0618, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 42.42, |
|
"learning_rate": 1.6556521739130437e-05, |
|
"loss": 0.9804, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 45.45, |
|
"learning_rate": 1.5686956521739133e-05, |
|
"loss": 0.9403, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 45.45, |
|
"eval_loss": 0.816250205039978, |
|
"eval_runtime": 26.1007, |
|
"eval_samples_per_second": 19.501, |
|
"eval_steps_per_second": 1.226, |
|
"eval_wer": 0.5168671164564388, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 48.48, |
|
"learning_rate": 1.4817391304347829e-05, |
|
"loss": 0.8791, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 51.52, |
|
"learning_rate": 1.3947826086956523e-05, |
|
"loss": 0.8258, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 54.55, |
|
"learning_rate": 1.3078260869565218e-05, |
|
"loss": 0.7785, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 57.58, |
|
"learning_rate": 1.2208695652173914e-05, |
|
"loss": 0.7478, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 60.61, |
|
"learning_rate": 1.133913043478261e-05, |
|
"loss": 0.6868, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 60.61, |
|
"eval_loss": 0.8661099672317505, |
|
"eval_runtime": 26.3982, |
|
"eval_samples_per_second": 19.282, |
|
"eval_steps_per_second": 1.212, |
|
"eval_wer": 0.48577295394543857, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 63.64, |
|
"learning_rate": 1.0469565217391304e-05, |
|
"loss": 0.6588, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 66.67, |
|
"learning_rate": 9.600000000000001e-06, |
|
"loss": 0.6304, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 69.7, |
|
"learning_rate": 8.730434782608697e-06, |
|
"loss": 0.6275, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 72.73, |
|
"learning_rate": 7.860869565217391e-06, |
|
"loss": 0.5777, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 75.76, |
|
"learning_rate": 6.991304347826088e-06, |
|
"loss": 0.563, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 75.76, |
|
"eval_loss": 0.9447136521339417, |
|
"eval_runtime": 26.3249, |
|
"eval_samples_per_second": 19.335, |
|
"eval_steps_per_second": 1.216, |
|
"eval_wer": 0.486652977412731, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 78.79, |
|
"learning_rate": 6.1217391304347825e-06, |
|
"loss": 0.5554, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 81.82, |
|
"learning_rate": 5.252173913043479e-06, |
|
"loss": 0.5418, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 84.85, |
|
"learning_rate": 4.382608695652174e-06, |
|
"loss": 0.5125, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 87.88, |
|
"learning_rate": 3.5130434782608697e-06, |
|
"loss": 0.4795, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 90.91, |
|
"learning_rate": 2.6434782608695654e-06, |
|
"loss": 0.4887, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 90.91, |
|
"eval_loss": 0.9649862051010132, |
|
"eval_runtime": 25.8471, |
|
"eval_samples_per_second": 19.693, |
|
"eval_steps_per_second": 1.238, |
|
"eval_wer": 0.4822528600762687, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 93.94, |
|
"learning_rate": 1.773913043478261e-06, |
|
"loss": 0.4584, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 96.97, |
|
"learning_rate": 9.043478260869566e-07, |
|
"loss": 0.4653, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 3.478260869565218e-08, |
|
"loss": 0.4529, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"step": 3300, |
|
"total_flos": 3.894919778878361e+19, |
|
"train_loss": 1.3816304802172112, |
|
"train_runtime": 9602.9824, |
|
"train_samples_per_second": 10.892, |
|
"train_steps_per_second": 0.344 |
|
} |
|
], |
|
"max_steps": 3300, |
|
"num_train_epochs": 100, |
|
"total_flos": 3.894919778878361e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|