|
{ |
|
"best_metric": 1.531933307647705, |
|
"best_model_checkpoint": "./tst-translation-output/checkpoint-1500", |
|
"epoch": 5.609573672400898, |
|
"eval_steps": 1500, |
|
"global_step": 7500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 5e-05, |
|
"loss": 1.7074, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 4.952812382030955e-05, |
|
"loss": 1.6178, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 4.9056247640619104e-05, |
|
"loss": 1.4354, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"eval_bleu": 11.2991, |
|
"eval_gen_len": 16.268, |
|
"eval_loss": 1.531933307647705, |
|
"eval_runtime": 168.1897, |
|
"eval_samples_per_second": 15.887, |
|
"eval_steps_per_second": 0.993, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 4.8584371460928654e-05, |
|
"loss": 1.1627, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 4.8112495281238205e-05, |
|
"loss": 1.1969, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 4.7640619101547756e-05, |
|
"loss": 0.9158, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"eval_bleu": 11.9085, |
|
"eval_gen_len": 16.3331, |
|
"eval_loss": 1.5989255905151367, |
|
"eval_runtime": 174.1685, |
|
"eval_samples_per_second": 15.341, |
|
"eval_steps_per_second": 0.959, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 4.7168742921857306e-05, |
|
"loss": 0.7923, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 4.669686674216686e-05, |
|
"loss": 0.831, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 4.622499056247641e-05, |
|
"loss": 0.4823, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"eval_bleu": 11.2452, |
|
"eval_gen_len": 15.771, |
|
"eval_loss": 1.7406657934188843, |
|
"eval_runtime": 159.7198, |
|
"eval_samples_per_second": 16.729, |
|
"eval_steps_per_second": 1.046, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 4.575311438278596e-05, |
|
"loss": 0.5185, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 4.528123820309551e-05, |
|
"loss": 0.4567, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 4.480936202340506e-05, |
|
"loss": 0.3005, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"eval_bleu": 11.0872, |
|
"eval_gen_len": 16.3668, |
|
"eval_loss": 1.8923007249832153, |
|
"eval_runtime": 162.6697, |
|
"eval_samples_per_second": 16.426, |
|
"eval_steps_per_second": 1.027, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 4.433748584371461e-05, |
|
"loss": 0.3253, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 5.24, |
|
"learning_rate": 4.386560966402416e-05, |
|
"loss": 0.2367, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 5.61, |
|
"learning_rate": 4.339373348433372e-05, |
|
"loss": 0.1969, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 5.61, |
|
"eval_bleu": 11.449, |
|
"eval_gen_len": 15.8694, |
|
"eval_loss": 2.0163686275482178, |
|
"eval_runtime": 160.1148, |
|
"eval_samples_per_second": 16.688, |
|
"eval_steps_per_second": 1.043, |
|
"step": 7500 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 53480, |
|
"num_train_epochs": 40, |
|
"save_steps": 1500, |
|
"total_flos": 2.6005557215232e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|