|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 330, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06060606060606061, |
|
"grad_norm": 0.36308756470680237, |
|
"learning_rate": 1.5151515151515152e-06, |
|
"loss": 1.3855, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.12121212121212122, |
|
"grad_norm": 0.3340895473957062, |
|
"learning_rate": 3.0303030303030305e-06, |
|
"loss": 1.3679, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.18181818181818182, |
|
"grad_norm": 0.3473477363586426, |
|
"learning_rate": 4.5454545454545455e-06, |
|
"loss": 1.3813, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.24242424242424243, |
|
"grad_norm": 0.35594913363456726, |
|
"learning_rate": 4.965385884295467e-06, |
|
"loss": 1.2987, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.30303030303030304, |
|
"grad_norm": 0.39734789729118347, |
|
"learning_rate": 4.798150758954164e-06, |
|
"loss": 1.237, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.36363636363636365, |
|
"grad_norm": 0.42136889696121216, |
|
"learning_rate": 4.501353102310901e-06, |
|
"loss": 1.1311, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.42424242424242425, |
|
"grad_norm": 0.557064414024353, |
|
"learning_rate": 4.091725435297721e-06, |
|
"loss": 1.0763, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.48484848484848486, |
|
"grad_norm": 0.5050609707832336, |
|
"learning_rate": 3.5923612809233987e-06, |
|
"loss": 0.9135, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.5454545454545454, |
|
"grad_norm": 0.43929287791252136, |
|
"learning_rate": 3.0314132238824416e-06, |
|
"loss": 0.7968, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.6060606060606061, |
|
"grad_norm": 0.39808526635169983, |
|
"learning_rate": 2.440505756134732e-06, |
|
"loss": 0.74, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"grad_norm": 0.45625489950180054, |
|
"learning_rate": 1.852952387243698e-06, |
|
"loss": 0.7124, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.7272727272727273, |
|
"grad_norm": 0.4360494017601013, |
|
"learning_rate": 1.301877533199859e-06, |
|
"loss": 0.6832, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.7878787878787878, |
|
"grad_norm": 0.38371819257736206, |
|
"learning_rate": 8.183490657468687e-07, |
|
"loss": 0.6885, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.8484848484848485, |
|
"grad_norm": 0.36575645208358765, |
|
"learning_rate": 4.2962680322157335e-07, |
|
"loss": 0.6905, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.9090909090909091, |
|
"grad_norm": 0.5191577672958374, |
|
"learning_rate": 1.5762568750059604e-07, |
|
"loss": 0.6939, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.9696969696969697, |
|
"grad_norm": 0.42232680320739746, |
|
"learning_rate": 1.768028831677926e-08, |
|
"loss": 0.6645, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 330, |
|
"total_flos": 1.519725494403072e+16, |
|
"train_loss": 0.958257312485666, |
|
"train_runtime": 404.051, |
|
"train_samples_per_second": 3.267, |
|
"train_steps_per_second": 0.817 |
|
} |
|
], |
|
"logging_steps": 20, |
|
"max_steps": 330, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.519725494403072e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|