|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 5.0, |
|
"eval_steps": 500, |
|
"global_step": 250, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 1.0952688455581665, |
|
"learning_rate": 0.0002, |
|
"loss": 1.3815, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.8078024983406067, |
|
"learning_rate": 0.00017777777777777779, |
|
"loss": 0.9567, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"grad_norm": 1.228662133216858, |
|
"learning_rate": 0.00015555555555555556, |
|
"loss": 0.8409, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.9074092507362366, |
|
"learning_rate": 0.00013333333333333334, |
|
"loss": 0.8684, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 1.2369757890701294, |
|
"learning_rate": 0.00011111111111111112, |
|
"loss": 0.7197, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 1.128630518913269, |
|
"learning_rate": 8.888888888888889e-05, |
|
"loss": 0.7832, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"grad_norm": 1.4580910205841064, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 0.6828, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 1.3242324590682983, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.6552, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"grad_norm": 1.412304162979126, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.6244, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 1.457279920578003, |
|
"learning_rate": 0.0, |
|
"loss": 0.5918, |
|
"step": 250 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 250, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"total_flos": 4.372977156096e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|