|
{ |
|
"best_metric": 1.324136734008789, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-200", |
|
"epoch": 0.005578333751708364, |
|
"eval_steps": 50, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 2.7891668758541825e-05, |
|
"eval_loss": 2.138502359390259, |
|
"eval_runtime": 539.9687, |
|
"eval_samples_per_second": 27.959, |
|
"eval_steps_per_second": 6.991, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0002789166875854182, |
|
"grad_norm": 3.119084358215332, |
|
"learning_rate": 5.05e-06, |
|
"loss": 2.1448, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0005578333751708364, |
|
"grad_norm": 2.9193804264068604, |
|
"learning_rate": 1.01e-05, |
|
"loss": 1.8834, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0008367500627562547, |
|
"grad_norm": 3.0759785175323486, |
|
"learning_rate": 9.538888888888889e-06, |
|
"loss": 1.7763, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0011156667503416729, |
|
"grad_norm": 2.9843831062316895, |
|
"learning_rate": 8.977777777777778e-06, |
|
"loss": 1.6058, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.001394583437927091, |
|
"grad_norm": 5.683198928833008, |
|
"learning_rate": 8.416666666666667e-06, |
|
"loss": 1.5341, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.001394583437927091, |
|
"eval_loss": 1.4934452772140503, |
|
"eval_runtime": 540.9009, |
|
"eval_samples_per_second": 27.911, |
|
"eval_steps_per_second": 6.979, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0016735001255125093, |
|
"grad_norm": 2.7144954204559326, |
|
"learning_rate": 7.855555555555556e-06, |
|
"loss": 1.7181, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0019524168130979278, |
|
"grad_norm": 2.7628259658813477, |
|
"learning_rate": 7.294444444444444e-06, |
|
"loss": 1.4813, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.0022313335006833458, |
|
"grad_norm": 2.849717140197754, |
|
"learning_rate": 6.733333333333333e-06, |
|
"loss": 1.3881, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.002510250188268764, |
|
"grad_norm": 2.8136160373687744, |
|
"learning_rate": 6.172222222222223e-06, |
|
"loss": 1.3241, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.002789166875854182, |
|
"grad_norm": 4.298107147216797, |
|
"learning_rate": 5.611111111111111e-06, |
|
"loss": 1.3305, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.002789166875854182, |
|
"eval_loss": 1.3773066997528076, |
|
"eval_runtime": 542.8338, |
|
"eval_samples_per_second": 27.811, |
|
"eval_steps_per_second": 6.954, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0030680835634396006, |
|
"grad_norm": 2.741830587387085, |
|
"learning_rate": 5.05e-06, |
|
"loss": 1.6088, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.0033470002510250186, |
|
"grad_norm": 2.751880407333374, |
|
"learning_rate": 4.488888888888889e-06, |
|
"loss": 1.2966, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.003625916938610437, |
|
"grad_norm": 3.4367833137512207, |
|
"learning_rate": 3.927777777777778e-06, |
|
"loss": 1.349, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.0039048336261958555, |
|
"grad_norm": 3.5134706497192383, |
|
"learning_rate": 3.3666666666666665e-06, |
|
"loss": 1.302, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.0041837503137812735, |
|
"grad_norm": 5.4509100914001465, |
|
"learning_rate": 2.8055555555555555e-06, |
|
"loss": 1.2418, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.0041837503137812735, |
|
"eval_loss": 1.336289644241333, |
|
"eval_runtime": 539.6746, |
|
"eval_samples_per_second": 27.974, |
|
"eval_steps_per_second": 6.995, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.0044626670013666915, |
|
"grad_norm": 2.685837984085083, |
|
"learning_rate": 2.2444444444444445e-06, |
|
"loss": 1.5027, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.00474158368895211, |
|
"grad_norm": 2.422490119934082, |
|
"learning_rate": 1.6833333333333332e-06, |
|
"loss": 1.3534, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.005020500376537528, |
|
"grad_norm": 3.044349193572998, |
|
"learning_rate": 1.1222222222222222e-06, |
|
"loss": 1.2763, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.005299417064122946, |
|
"grad_norm": 3.222191333770752, |
|
"learning_rate": 5.611111111111111e-07, |
|
"loss": 1.2248, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.005578333751708364, |
|
"grad_norm": 7.070194721221924, |
|
"learning_rate": 0.0, |
|
"loss": 1.2512, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.005578333751708364, |
|
"eval_loss": 1.324136734008789, |
|
"eval_runtime": 540.3338, |
|
"eval_samples_per_second": 27.94, |
|
"eval_steps_per_second": 6.986, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.506684316503245e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|