|
{ |
|
"best_metric": 10.360739707946777, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 1.8314350797266514, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03644646924829157, |
|
"grad_norm": 0.15206216275691986, |
|
"learning_rate": 5e-05, |
|
"loss": 10.3796, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03644646924829157, |
|
"eval_loss": 10.380887031555176, |
|
"eval_runtime": 0.1809, |
|
"eval_samples_per_second": 1022.859, |
|
"eval_steps_per_second": 132.695, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07289293849658314, |
|
"grad_norm": 0.1610165387392044, |
|
"learning_rate": 0.0001, |
|
"loss": 10.379, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.10933940774487472, |
|
"grad_norm": 0.18909887969493866, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 10.3808, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.14578587699316628, |
|
"grad_norm": 0.2205168455839157, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 10.3768, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.18223234624145787, |
|
"grad_norm": 0.2703295648097992, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 10.3755, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.21867881548974943, |
|
"grad_norm": 0.33437639474868774, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 10.3741, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.255125284738041, |
|
"grad_norm": 0.2190495729446411, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 10.3759, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.29157175398633256, |
|
"grad_norm": 0.15123555064201355, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 10.3765, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.32801822323462415, |
|
"grad_norm": 0.1677154153585434, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 10.3756, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.36446469248291574, |
|
"grad_norm": 0.18042495846748352, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 10.3742, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.4009111617312073, |
|
"grad_norm": 0.22101934254169464, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 10.3725, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.43735763097949887, |
|
"grad_norm": 0.2781515419483185, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 10.3691, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.47380410022779046, |
|
"grad_norm": 0.33735859394073486, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 10.3659, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.510250569476082, |
|
"grad_norm": 0.17510421574115753, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 10.3727, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.5466970387243736, |
|
"grad_norm": 0.1429934948682785, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 10.3725, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.5831435079726651, |
|
"grad_norm": 0.15244732797145844, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 10.3719, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.6195899772209568, |
|
"grad_norm": 0.17936758697032928, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 10.3695, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.6560364464692483, |
|
"grad_norm": 0.22151167690753937, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 10.3661, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.6924829157175398, |
|
"grad_norm": 0.26017025113105774, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 10.3616, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.7289293849658315, |
|
"grad_norm": 0.3470045328140259, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 10.355, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.765375854214123, |
|
"grad_norm": 0.14400780200958252, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 10.37, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.8018223234624146, |
|
"grad_norm": 0.15392181277275085, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 10.3677, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.8382687927107062, |
|
"grad_norm": 0.16887319087982178, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 10.3667, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.8747152619589977, |
|
"grad_norm": 0.1790841966867447, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 10.3659, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.9111617312072893, |
|
"grad_norm": 0.2096974402666092, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 10.3644, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.9111617312072893, |
|
"eval_loss": 10.36435317993164, |
|
"eval_runtime": 0.1772, |
|
"eval_samples_per_second": 1044.023, |
|
"eval_steps_per_second": 135.441, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.9476082004555809, |
|
"grad_norm": 0.25196874141693115, |
|
"learning_rate": 5e-05, |
|
"loss": 10.3584, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.9840546697038725, |
|
"grad_norm": 0.3380187153816223, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 10.353, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.029612756264237, |
|
"grad_norm": 0.30297839641571045, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 19.0658, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.0660592255125285, |
|
"grad_norm": 0.14603254199028015, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 10.264, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.10250569476082, |
|
"grad_norm": 0.15991508960723877, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 10.3639, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.1389521640091116, |
|
"grad_norm": 0.19101238250732422, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 10.3875, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.1753986332574031, |
|
"grad_norm": 0.20625823736190796, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 10.3873, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.2118451025056949, |
|
"grad_norm": 0.24794551730155945, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 10.3639, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.2482915717539864, |
|
"grad_norm": 0.15549078583717346, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 6.8354, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.284738041002278, |
|
"grad_norm": 0.19828249514102936, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 13.8538, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.3211845102505695, |
|
"grad_norm": 0.15714150667190552, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 10.345, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.357630979498861, |
|
"grad_norm": 0.1823427975177765, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 10.4047, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.3940774487471526, |
|
"grad_norm": 0.1794886589050293, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 10.395, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.430523917995444, |
|
"grad_norm": 0.22187049686908722, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 10.3162, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.4669703872437356, |
|
"grad_norm": 0.2668076157569885, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 10.4429, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.5034168564920274, |
|
"grad_norm": 0.15169256925582886, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 8.8337, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.539863325740319, |
|
"grad_norm": 0.15748904645442963, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 11.7664, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.5763097949886105, |
|
"grad_norm": 0.15284818410873413, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 10.4053, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.6127562642369022, |
|
"grad_norm": 0.1644909381866455, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 10.3361, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.6492027334851938, |
|
"grad_norm": 0.18126553297042847, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 10.3664, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.6856492027334853, |
|
"grad_norm": 0.23220454156398773, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 10.3932, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.7220956719817768, |
|
"grad_norm": 0.2918383777141571, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 10.4925, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.7585421412300684, |
|
"grad_norm": 0.18016327917575836, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 9.7463, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.79498861047836, |
|
"grad_norm": 0.166108638048172, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 10.7557, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.8314350797266514, |
|
"grad_norm": 0.16405977308750153, |
|
"learning_rate": 0.0, |
|
"loss": 10.3726, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.8314350797266514, |
|
"eval_loss": 10.360739707946777, |
|
"eval_runtime": 0.1753, |
|
"eval_samples_per_second": 1055.129, |
|
"eval_steps_per_second": 136.882, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 42768059596800.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|