|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 7.0, |
|
"eval_steps": 500, |
|
"global_step": 5103, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00048, |
|
"loss": 7.0366, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0004933240611961058, |
|
"loss": 0.4467, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00048643949930458967, |
|
"loss": 0.3946, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0004794853963838665, |
|
"loss": 0.399, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0004726008344923505, |
|
"loss": 0.3696, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"eval_loss": 0.3043254017829895, |
|
"eval_runtime": 65.1553, |
|
"eval_samples_per_second": 11.664, |
|
"eval_steps_per_second": 1.458, |
|
"eval_wer": 0.265993265993266, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0004656467315716273, |
|
"loss": 0.3677, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00045869262865090403, |
|
"loss": 0.3534, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00045173852573018083, |
|
"loss": 0.34, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.0004447844228094576, |
|
"loss": 0.3428, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.0004378998609179416, |
|
"loss": 0.3533, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"eval_loss": 0.27337372303009033, |
|
"eval_runtime": 65.7997, |
|
"eval_samples_per_second": 11.55, |
|
"eval_steps_per_second": 1.444, |
|
"eval_wer": 0.23931623931623933, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.00043094575799721834, |
|
"loss": 0.336, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.00042399165507649514, |
|
"loss": 0.3453, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.00041703755215577194, |
|
"loss": 0.342, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.0004100834492350487, |
|
"loss": 0.3232, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 0.0004031293463143255, |
|
"loss": 0.3419, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"eval_loss": 0.2687598764896393, |
|
"eval_runtime": 66.7716, |
|
"eval_samples_per_second": 11.382, |
|
"eval_steps_per_second": 1.423, |
|
"eval_wer": 0.23776223776223776, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.0003961752433936022, |
|
"loss": 0.3045, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.00038922114047287897, |
|
"loss": 0.327, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 0.0003822670375521558, |
|
"loss": 0.3162, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 0.00037531293463143256, |
|
"loss": 0.3111, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 0.0003684283727399166, |
|
"loss": 0.3399, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"eval_loss": 0.2612040042877197, |
|
"eval_runtime": 66.305, |
|
"eval_samples_per_second": 11.462, |
|
"eval_steps_per_second": 1.433, |
|
"eval_wer": 0.22895622895622897, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 0.00036147426981919333, |
|
"loss": 0.3113, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 0.00035452016689847013, |
|
"loss": 0.307, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 0.0003475660639777469, |
|
"loss": 0.3061, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 0.0003406119610570236, |
|
"loss": 0.2974, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 0.00033365785813630047, |
|
"loss": 0.3087, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"eval_loss": 0.26245519518852234, |
|
"eval_runtime": 66.092, |
|
"eval_samples_per_second": 11.499, |
|
"eval_steps_per_second": 1.437, |
|
"eval_wer": 0.22986272986272988, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 0.0003267037552155772, |
|
"loss": 0.3007, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 0.00031974965229485396, |
|
"loss": 0.3073, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 0.00031279554937413076, |
|
"loss": 0.314, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 0.0003059109874826148, |
|
"loss": 0.2944, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 0.00029902642559109875, |
|
"loss": 0.2908, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"eval_loss": 0.254683256149292, |
|
"eval_runtime": 65.5917, |
|
"eval_samples_per_second": 11.587, |
|
"eval_steps_per_second": 1.448, |
|
"eval_wer": 0.2220927220927221, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 0.00029207232267037554, |
|
"loss": 0.2906, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 0.0002851182197496523, |
|
"loss": 0.296, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 0.00027816411682892903, |
|
"loss": 0.2826, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 0.00027121001390820583, |
|
"loss": 0.2866, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 0.00026425591098748263, |
|
"loss": 0.2938, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"eval_loss": 0.24941803514957428, |
|
"eval_runtime": 65.3746, |
|
"eval_samples_per_second": 11.625, |
|
"eval_steps_per_second": 1.453, |
|
"eval_wer": 0.21561771561771562, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 4.94, |
|
"learning_rate": 0.0002573018080667594, |
|
"loss": 0.2953, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 5.08, |
|
"learning_rate": 0.0002503477051460362, |
|
"loss": 0.2972, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 5.21, |
|
"learning_rate": 0.00024339360222531292, |
|
"loss": 0.2781, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 5.35, |
|
"learning_rate": 0.00023643949930458972, |
|
"loss": 0.2803, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 5.49, |
|
"learning_rate": 0.0002294853963838665, |
|
"loss": 0.275, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 5.49, |
|
"eval_loss": 0.2497035413980484, |
|
"eval_runtime": 65.425, |
|
"eval_samples_per_second": 11.616, |
|
"eval_steps_per_second": 1.452, |
|
"eval_wer": 0.21406371406371405, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"learning_rate": 0.00022253129346314326, |
|
"loss": 0.2751, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 5.76, |
|
"learning_rate": 0.00021564673157162725, |
|
"loss": 0.2915, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 5.9, |
|
"learning_rate": 0.00020869262865090405, |
|
"loss": 0.2848, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 6.04, |
|
"learning_rate": 0.00020173852573018082, |
|
"loss": 0.2846, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 6.17, |
|
"learning_rate": 0.00019485396383866482, |
|
"loss": 0.2775, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 6.17, |
|
"eval_loss": 0.24489344656467438, |
|
"eval_runtime": 65.0133, |
|
"eval_samples_per_second": 11.69, |
|
"eval_steps_per_second": 1.461, |
|
"eval_wer": 0.21095571095571095, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 6.31, |
|
"learning_rate": 0.0001878998609179416, |
|
"loss": 0.2845, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 6.45, |
|
"learning_rate": 0.00018094575799721836, |
|
"loss": 0.2754, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 6.58, |
|
"learning_rate": 0.00017399165507649513, |
|
"loss": 0.2625, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 6.72, |
|
"learning_rate": 0.0001670375521557719, |
|
"loss": 0.2822, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 6.86, |
|
"learning_rate": 0.0001600834492350487, |
|
"loss": 0.2639, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 6.86, |
|
"eval_loss": 0.24407874047756195, |
|
"eval_runtime": 65.6065, |
|
"eval_samples_per_second": 11.584, |
|
"eval_steps_per_second": 1.448, |
|
"eval_wer": 0.2106967106967107, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 0.00015312934631432545, |
|
"loss": 0.2771, |
|
"step": 5100 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 7290, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"total_flos": 3.5356221446356038e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|