|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 121, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.5384615384615387e-06, |
|
"loss": 1.1746, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 7.692307692307694e-06, |
|
"loss": 1.1302, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.5384615384615387e-05, |
|
"loss": 0.9861, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.9983081582712684e-05, |
|
"loss": 0.9154, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.9793406217655516e-05, |
|
"loss": 0.8791, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.9396926207859085e-05, |
|
"loss": 0.8825, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.880201391180111e-05, |
|
"loss": 0.8663, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.802123192755044e-05, |
|
"loss": 0.8349, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.7071067811865477e-05, |
|
"loss": 0.8493, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.5971585917027864e-05, |
|
"loss": 0.8408, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.4746003697476406e-05, |
|
"loss": 0.8537, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 1.342020143325669e-05, |
|
"loss": 0.8231, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.2022175723320382e-05, |
|
"loss": 0.8292, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 1.0581448289104759e-05, |
|
"loss": 0.81, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 9.128442572523418e-06, |
|
"loss": 0.8043, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 7.6938412925756e-06, |
|
"loss": 0.8119, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 6.3079385268731575e-06, |
|
"loss": 0.8052, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 5.000000000000003e-06, |
|
"loss": 0.7873, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 3.797645087317401e-06, |
|
"loss": 0.8023, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 2.726263584269513e-06, |
|
"loss": 0.8073, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.808479557110081e-06, |
|
"loss": 0.8061, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 1.0636735967658785e-06, |
|
"loss": 0.7997, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 5.075735642696611e-07, |
|
"loss": 0.7987, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.519224698779198e-07, |
|
"loss": 0.7965, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 4.230499177994007e-09, |
|
"loss": 0.8183, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 121, |
|
"total_flos": 2375150469120.0, |
|
"train_loss": 0.8479397050605333, |
|
"train_runtime": 621.2868, |
|
"train_samples_per_second": 1.167, |
|
"train_steps_per_second": 0.195 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 121, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 2375150469120.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|