|
{ |
|
"best_metric": 10.346579551696777, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.06289308176100629, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0012578616352201257, |
|
"grad_norm": 0.0676870122551918, |
|
"learning_rate": 1e-05, |
|
"loss": 10.3781, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0012578616352201257, |
|
"eval_loss": 10.373727798461914, |
|
"eval_runtime": 2.8214, |
|
"eval_samples_per_second": 474.581, |
|
"eval_steps_per_second": 118.734, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0025157232704402514, |
|
"grad_norm": 0.053749579936265945, |
|
"learning_rate": 2e-05, |
|
"loss": 10.3781, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0037735849056603774, |
|
"grad_norm": 0.05700600892305374, |
|
"learning_rate": 3e-05, |
|
"loss": 10.3763, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.005031446540880503, |
|
"grad_norm": 0.05451948195695877, |
|
"learning_rate": 4e-05, |
|
"loss": 10.376, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.006289308176100629, |
|
"grad_norm": 0.05333434417843819, |
|
"learning_rate": 5e-05, |
|
"loss": 10.3767, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.007547169811320755, |
|
"grad_norm": 0.053336936980485916, |
|
"learning_rate": 6e-05, |
|
"loss": 10.3776, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00880503144654088, |
|
"grad_norm": 0.050664711743593216, |
|
"learning_rate": 7e-05, |
|
"loss": 10.3781, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.010062893081761006, |
|
"grad_norm": 0.053908322006464005, |
|
"learning_rate": 8e-05, |
|
"loss": 10.3772, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.011320754716981131, |
|
"grad_norm": 0.05347241088747978, |
|
"learning_rate": 9e-05, |
|
"loss": 10.3755, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.012578616352201259, |
|
"grad_norm": 0.06627130508422852, |
|
"learning_rate": 0.0001, |
|
"loss": 10.376, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.013836477987421384, |
|
"grad_norm": 0.0658232793211937, |
|
"learning_rate": 9.999316524962345e-05, |
|
"loss": 10.3757, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.01509433962264151, |
|
"grad_norm": 0.07336028665304184, |
|
"learning_rate": 9.997266286704631e-05, |
|
"loss": 10.3731, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.016352201257861635, |
|
"grad_norm": 0.07335679978132248, |
|
"learning_rate": 9.993849845741524e-05, |
|
"loss": 10.3721, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.01761006289308176, |
|
"grad_norm": 0.07778532058000565, |
|
"learning_rate": 9.989068136093873e-05, |
|
"loss": 10.3727, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.018867924528301886, |
|
"grad_norm": 0.08464192599058151, |
|
"learning_rate": 9.98292246503335e-05, |
|
"loss": 10.3715, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02012578616352201, |
|
"grad_norm": 0.07666515558958054, |
|
"learning_rate": 9.975414512725057e-05, |
|
"loss": 10.3705, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.021383647798742137, |
|
"grad_norm": 0.08507933467626572, |
|
"learning_rate": 9.966546331768191e-05, |
|
"loss": 10.3713, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.022641509433962263, |
|
"grad_norm": 0.08501887321472168, |
|
"learning_rate": 9.956320346634876e-05, |
|
"loss": 10.3707, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.02389937106918239, |
|
"grad_norm": 0.09447915107011795, |
|
"learning_rate": 9.944739353007344e-05, |
|
"loss": 10.3723, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.025157232704402517, |
|
"grad_norm": 0.09398620575666428, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 10.3687, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.026415094339622643, |
|
"grad_norm": 0.10066643357276917, |
|
"learning_rate": 9.917525374361912e-05, |
|
"loss": 10.3696, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.027672955974842768, |
|
"grad_norm": 0.10919738560914993, |
|
"learning_rate": 9.901899829374047e-05, |
|
"loss": 10.3668, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.028930817610062894, |
|
"grad_norm": 0.12229104340076447, |
|
"learning_rate": 9.884934153917997e-05, |
|
"loss": 10.3678, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.03018867924528302, |
|
"grad_norm": 0.11512572318315506, |
|
"learning_rate": 9.86663298624003e-05, |
|
"loss": 10.3675, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.031446540880503145, |
|
"grad_norm": 0.1288090944290161, |
|
"learning_rate": 9.847001329696653e-05, |
|
"loss": 10.3658, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03270440251572327, |
|
"grad_norm": 0.1288468837738037, |
|
"learning_rate": 9.826044551386744e-05, |
|
"loss": 10.3638, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.033962264150943396, |
|
"grad_norm": 0.15287871658802032, |
|
"learning_rate": 9.803768380684242e-05, |
|
"loss": 10.3627, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.03522012578616352, |
|
"grad_norm": 0.14995208382606506, |
|
"learning_rate": 9.780178907671789e-05, |
|
"loss": 10.3658, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.03647798742138365, |
|
"grad_norm": 0.16352064907550812, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 10.363, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.03773584905660377, |
|
"grad_norm": 0.1616298109292984, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 10.362, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0389937106918239, |
|
"grad_norm": 0.18207941949367523, |
|
"learning_rate": 9.701596950580806e-05, |
|
"loss": 10.3582, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.04025157232704402, |
|
"grad_norm": 0.17258626222610474, |
|
"learning_rate": 9.672822322997305e-05, |
|
"loss": 10.3591, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.04150943396226415, |
|
"grad_norm": 0.1723157912492752, |
|
"learning_rate": 9.642770192448536e-05, |
|
"loss": 10.3575, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.042767295597484274, |
|
"grad_norm": 0.1635301411151886, |
|
"learning_rate": 9.611448774886924e-05, |
|
"loss": 10.3578, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0440251572327044, |
|
"grad_norm": 0.18812069296836853, |
|
"learning_rate": 9.578866633275288e-05, |
|
"loss": 10.3569, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.045283018867924525, |
|
"grad_norm": 0.17699386179447174, |
|
"learning_rate": 9.545032675245813e-05, |
|
"loss": 10.3525, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.04654088050314465, |
|
"grad_norm": 0.17810064554214478, |
|
"learning_rate": 9.509956150664796e-05, |
|
"loss": 10.3524, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.04779874213836478, |
|
"grad_norm": 0.1659187227487564, |
|
"learning_rate": 9.473646649103818e-05, |
|
"loss": 10.3481, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.04905660377358491, |
|
"grad_norm": 0.15193907916545868, |
|
"learning_rate": 9.43611409721806e-05, |
|
"loss": 10.3503, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.050314465408805034, |
|
"grad_norm": 0.16581708192825317, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 10.3489, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05157232704402516, |
|
"grad_norm": 0.14731748402118683, |
|
"learning_rate": 9.357421218136386e-05, |
|
"loss": 10.3484, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.052830188679245285, |
|
"grad_norm": 0.12889303267002106, |
|
"learning_rate": 9.316282404787871e-05, |
|
"loss": 10.3448, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.05408805031446541, |
|
"grad_norm": 0.14560353755950928, |
|
"learning_rate": 9.273963562927695e-05, |
|
"loss": 10.3448, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.055345911949685536, |
|
"grad_norm": 0.12265373766422272, |
|
"learning_rate": 9.230476262104677e-05, |
|
"loss": 10.3433, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.05660377358490566, |
|
"grad_norm": 0.13917139172554016, |
|
"learning_rate": 9.185832391312644e-05, |
|
"loss": 10.34, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.05786163522012579, |
|
"grad_norm": 0.10753589868545532, |
|
"learning_rate": 9.140044155740101e-05, |
|
"loss": 10.3415, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.05911949685534591, |
|
"grad_norm": 0.0811968594789505, |
|
"learning_rate": 9.093124073433463e-05, |
|
"loss": 10.3417, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.06037735849056604, |
|
"grad_norm": 0.11372454464435577, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 10.3455, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.061635220125786164, |
|
"grad_norm": 0.1224861592054367, |
|
"learning_rate": 8.995939984474624e-05, |
|
"loss": 10.3371, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.06289308176100629, |
|
"grad_norm": 0.15765050053596497, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 10.3347, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.06289308176100629, |
|
"eval_loss": 10.346579551696777, |
|
"eval_runtime": 2.8076, |
|
"eval_samples_per_second": 476.923, |
|
"eval_steps_per_second": 119.32, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5605420695552.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|