|
{ |
|
"best_metric": 0.5904276967048645, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 2.2727272727272725, |
|
"eval_steps": 25, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09090909090909091, |
|
"grad_norm": 3.9250755310058594, |
|
"learning_rate": 5e-05, |
|
"loss": 2.7029, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.09090909090909091, |
|
"eval_loss": 3.2133781909942627, |
|
"eval_runtime": 0.7306, |
|
"eval_samples_per_second": 101.292, |
|
"eval_steps_per_second": 13.688, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.18181818181818182, |
|
"grad_norm": 4.883828163146973, |
|
"learning_rate": 0.0001, |
|
"loss": 3.1924, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.2727272727272727, |
|
"grad_norm": 4.751261234283447, |
|
"learning_rate": 9.974346616959476e-05, |
|
"loss": 3.082, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.36363636363636365, |
|
"grad_norm": 4.260494232177734, |
|
"learning_rate": 9.897649706262473e-05, |
|
"loss": 2.5029, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.45454545454545453, |
|
"grad_norm": 5.264866828918457, |
|
"learning_rate": 9.770696282000244e-05, |
|
"loss": 2.5518, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.5454545454545454, |
|
"grad_norm": 2.550161361694336, |
|
"learning_rate": 9.594789058101153e-05, |
|
"loss": 1.5278, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.6363636363636364, |
|
"grad_norm": 2.883025646209717, |
|
"learning_rate": 9.371733080722911e-05, |
|
"loss": 1.5295, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.7272727272727273, |
|
"grad_norm": 2.446653127670288, |
|
"learning_rate": 9.103817206036382e-05, |
|
"loss": 1.2206, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.8181818181818182, |
|
"grad_norm": 2.4879133701324463, |
|
"learning_rate": 8.793790613463955e-05, |
|
"loss": 1.095, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.9090909090909091, |
|
"grad_norm": 2.3682610988616943, |
|
"learning_rate": 8.444834595378434e-05, |
|
"loss": 1.1061, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 1.6341614723205566, |
|
"learning_rate": 8.060529912738315e-05, |
|
"loss": 0.935, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.0909090909090908, |
|
"grad_norm": 1.5357123613357544, |
|
"learning_rate": 7.644820051634812e-05, |
|
"loss": 0.7651, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.1818181818181819, |
|
"grad_norm": 1.6579945087432861, |
|
"learning_rate": 7.201970757788172e-05, |
|
"loss": 0.7296, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.2727272727272727, |
|
"grad_norm": 2.005446434020996, |
|
"learning_rate": 6.736526264224101e-05, |
|
"loss": 0.9158, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.3636363636363638, |
|
"grad_norm": 3.1605615615844727, |
|
"learning_rate": 6.253262661293604e-05, |
|
"loss": 0.8794, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.4545454545454546, |
|
"grad_norm": 1.574661374092102, |
|
"learning_rate": 5.757138887522884e-05, |
|
"loss": 0.7897, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.5454545454545454, |
|
"grad_norm": 1.2775368690490723, |
|
"learning_rate": 5.2532458441935636e-05, |
|
"loss": 0.7534, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.6363636363636362, |
|
"grad_norm": 1.4721097946166992, |
|
"learning_rate": 4.746754155806437e-05, |
|
"loss": 0.5951, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.7272727272727273, |
|
"grad_norm": 1.561679482460022, |
|
"learning_rate": 4.2428611124771184e-05, |
|
"loss": 0.7347, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.8181818181818183, |
|
"grad_norm": 1.2978594303131104, |
|
"learning_rate": 3.746737338706397e-05, |
|
"loss": 0.6884, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.9090909090909092, |
|
"grad_norm": 1.5109196901321411, |
|
"learning_rate": 3.263473735775899e-05, |
|
"loss": 0.7078, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 1.3557102680206299, |
|
"learning_rate": 2.798029242211828e-05, |
|
"loss": 0.6497, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.090909090909091, |
|
"grad_norm": 1.1088306903839111, |
|
"learning_rate": 2.3551799483651894e-05, |
|
"loss": 0.5894, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 2.1818181818181817, |
|
"grad_norm": 1.1537069082260132, |
|
"learning_rate": 1.9394700872616855e-05, |
|
"loss": 0.5904, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.2727272727272725, |
|
"grad_norm": 1.1084684133529663, |
|
"learning_rate": 1.555165404621567e-05, |
|
"loss": 0.5657, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.2727272727272725, |
|
"eval_loss": 0.5904276967048645, |
|
"eval_runtime": 0.7289, |
|
"eval_samples_per_second": 101.523, |
|
"eval_steps_per_second": 13.719, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 33, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.16708196040704e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|