|
{ |
|
"best_metric": 21.4802, |
|
"best_model_checkpoint": "./tst-translation-output/checkpoint-16000", |
|
"epoch": 1.8563638473140736, |
|
"eval_steps": 2000, |
|
"global_step": 16000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 2.5e-05, |
|
"loss": 1.5441, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 5e-05, |
|
"loss": 1.4175, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.9927274842913664e-05, |
|
"loss": 1.4046, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.985454968582732e-05, |
|
"loss": 1.3784, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"eval_bleu": 18.2602, |
|
"eval_gen_len": 19.2938, |
|
"eval_loss": 1.5514309406280518, |
|
"eval_runtime": 1146.8459, |
|
"eval_samples_per_second": 15.03, |
|
"eval_steps_per_second": 0.94, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.978182452874098e-05, |
|
"loss": 1.3367, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 4.9709099371654644e-05, |
|
"loss": 1.3112, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.9636374214568306e-05, |
|
"loss": 1.2933, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 4.956364905748196e-05, |
|
"loss": 1.2953, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"eval_bleu": 19.6277, |
|
"eval_gen_len": 18.7905, |
|
"eval_loss": 1.5005673170089722, |
|
"eval_runtime": 1112.1506, |
|
"eval_samples_per_second": 15.499, |
|
"eval_steps_per_second": 0.969, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 4.9490923900395623e-05, |
|
"loss": 1.2817, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 4.9418198743309286e-05, |
|
"loss": 1.2722, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 4.9345473586222954e-05, |
|
"loss": 1.2491, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.927274842913661e-05, |
|
"loss": 1.2446, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"eval_bleu": 20.2667, |
|
"eval_gen_len": 19.2503, |
|
"eval_loss": 1.4663728475570679, |
|
"eval_runtime": 1107.4417, |
|
"eval_samples_per_second": 15.565, |
|
"eval_steps_per_second": 0.973, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 4.920002327205027e-05, |
|
"loss": 1.2323, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 4.9127298114963934e-05, |
|
"loss": 1.2242, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 4.905457295787759e-05, |
|
"loss": 1.2223, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 4.898184780079125e-05, |
|
"loss": 1.2095, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"eval_bleu": 20.8962, |
|
"eval_gen_len": 18.9352, |
|
"eval_loss": 1.448201298713684, |
|
"eval_runtime": 1086.8921, |
|
"eval_samples_per_second": 15.859, |
|
"eval_steps_per_second": 0.992, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 4.8909122643704913e-05, |
|
"loss": 1.2022, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 4.8836397486618576e-05, |
|
"loss": 0.9756, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 4.876367232953223e-05, |
|
"loss": 0.9306, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 4.869094717244589e-05, |
|
"loss": 0.9279, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"eval_bleu": 20.9876, |
|
"eval_gen_len": 19.093, |
|
"eval_loss": 1.4799305200576782, |
|
"eval_runtime": 1094.4619, |
|
"eval_samples_per_second": 15.749, |
|
"eval_steps_per_second": 0.985, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 4.8618222015359555e-05, |
|
"loss": 0.9366, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 4.854549685827322e-05, |
|
"loss": 0.9408, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 4.847277170118687e-05, |
|
"loss": 0.9456, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 4.8400046544100535e-05, |
|
"loss": 0.9604, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"eval_bleu": 21.261, |
|
"eval_gen_len": 18.8735, |
|
"eval_loss": 1.4671677350997925, |
|
"eval_runtime": 1083.8306, |
|
"eval_samples_per_second": 15.904, |
|
"eval_steps_per_second": 0.995, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 4.83273213870142e-05, |
|
"loss": 0.9375, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 4.825459622992786e-05, |
|
"loss": 0.9412, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 4.818187107284152e-05, |
|
"loss": 0.9441, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 4.810914591575518e-05, |
|
"loss": 0.9543, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"eval_bleu": 21.1987, |
|
"eval_gen_len": 18.8396, |
|
"eval_loss": 1.4610902070999146, |
|
"eval_runtime": 1087.5897, |
|
"eval_samples_per_second": 15.849, |
|
"eval_steps_per_second": 0.991, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 4.8036420758668845e-05, |
|
"loss": 0.9561, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 4.79636956015825e-05, |
|
"loss": 0.9587, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 4.789097044449616e-05, |
|
"loss": 0.9553, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 4.7818245287409825e-05, |
|
"loss": 0.9532, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"eval_bleu": 21.4802, |
|
"eval_gen_len": 18.8239, |
|
"eval_loss": 1.4428682327270508, |
|
"eval_runtime": 1090.8288, |
|
"eval_samples_per_second": 15.802, |
|
"eval_steps_per_second": 0.988, |
|
"step": 16000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 344760, |
|
"num_train_epochs": 40, |
|
"save_steps": 2000, |
|
"total_flos": 5.54785220591616e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|