|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.8860759493670884, |
|
"eval_steps": 500, |
|
"global_step": 57, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 1.2923335171622337, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 2.1896, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.9912276172628393, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 2.3641, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.815034805697151, |
|
"learning_rate": 1.9697969360350098e-05, |
|
"loss": 2.4061, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.6786695631350969, |
|
"learning_rate": 1.8502171357296144e-05, |
|
"loss": 2.4036, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"eval_loss": 1.4486770629882812, |
|
"eval_runtime": 1.5127, |
|
"eval_samples_per_second": 5.289, |
|
"eval_steps_per_second": 0.661, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"grad_norm": 0.6621397636588793, |
|
"learning_rate": 1.650618300204242e-05, |
|
"loss": 2.4408, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"grad_norm": 0.8896671084173804, |
|
"learning_rate": 1.3897858732926794e-05, |
|
"loss": 2.1577, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"grad_norm": 0.7473486744572, |
|
"learning_rate": 1.092268359463302e-05, |
|
"loss": 1.9602, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"grad_norm": 0.7400600277246799, |
|
"learning_rate": 7.860669167935028e-06, |
|
"loss": 2.0418, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"eval_loss": 1.485201358795166, |
|
"eval_runtime": 1.4984, |
|
"eval_samples_per_second": 5.339, |
|
"eval_steps_per_second": 0.667, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"grad_norm": 0.8160514109181762, |
|
"learning_rate": 5.000000000000003e-06, |
|
"loss": 2.0077, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"grad_norm": 0.7939146315083865, |
|
"learning_rate": 2.6099108277934105e-06, |
|
"loss": 1.7591, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"grad_norm": 0.8611599565796522, |
|
"learning_rate": 9.153472818047627e-07, |
|
"loss": 1.8598, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"grad_norm": 0.809915034715595, |
|
"learning_rate": 7.579490328064265e-08, |
|
"loss": 1.8471, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"eval_loss": 1.5596716403961182, |
|
"eval_runtime": 1.7332, |
|
"eval_samples_per_second": 4.616, |
|
"eval_steps_per_second": 0.577, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"step": 57, |
|
"total_flos": 11882295459840.0, |
|
"train_loss": 2.103332046876874, |
|
"train_runtime": 1101.4918, |
|
"train_samples_per_second": 3.418, |
|
"train_steps_per_second": 0.052 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 57, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 11882295459840.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|