|
{ |
|
"best_metric": 10.371416091918945, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.06811409110259685, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.001362281822051937, |
|
"grad_norm": 0.026211107149720192, |
|
"learning_rate": 5e-05, |
|
"loss": 10.377, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001362281822051937, |
|
"eval_loss": 10.375895500183105, |
|
"eval_runtime": 4.8059, |
|
"eval_samples_per_second": 1029.156, |
|
"eval_steps_per_second": 128.801, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002724563644103874, |
|
"grad_norm": 0.026691626757383347, |
|
"learning_rate": 0.0001, |
|
"loss": 10.377, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.004086845466155811, |
|
"grad_norm": 0.025942042469978333, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 10.3761, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.005449127288207748, |
|
"grad_norm": 0.025576652958989143, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 10.377, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.006811409110259685, |
|
"grad_norm": 0.025234706699848175, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 10.3773, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.008173690932311623, |
|
"grad_norm": 0.029249094426631927, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 10.3771, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00953597275436356, |
|
"grad_norm": 0.02763037569820881, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 10.3758, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.010898254576415497, |
|
"grad_norm": 0.029007017612457275, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 10.3764, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.012260536398467433, |
|
"grad_norm": 0.027018466964364052, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 10.3753, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.01362281822051937, |
|
"grad_norm": 0.030888764187693596, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 10.3739, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.014985100042571307, |
|
"grad_norm": 0.03369227796792984, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 10.3744, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.016347381864623246, |
|
"grad_norm": 0.03383956849575043, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 10.3725, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.017709663686675182, |
|
"grad_norm": 0.031551700085401535, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 10.3754, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.01907194550872712, |
|
"grad_norm": 0.034269507974386215, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 10.3754, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.020434227330779056, |
|
"grad_norm": 0.03306145220994949, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 10.3745, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.021796509152830993, |
|
"grad_norm": 0.03238727152347565, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 10.3747, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.02315879097488293, |
|
"grad_norm": 0.03371455892920494, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 10.3749, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.024521072796934867, |
|
"grad_norm": 0.035507142543792725, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 10.3749, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.025883354618986804, |
|
"grad_norm": 0.03634075075387955, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 10.3745, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.02724563644103874, |
|
"grad_norm": 0.037323951721191406, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 10.3745, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.028607918263090677, |
|
"grad_norm": 0.038444288074970245, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 10.3761, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.029970200085142614, |
|
"grad_norm": 0.038676731288433075, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 10.3727, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.03133248190719455, |
|
"grad_norm": 0.046296995133161545, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 10.3717, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.03269476372924649, |
|
"grad_norm": 0.046376023441553116, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 10.3702, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.034057045551298425, |
|
"grad_norm": 0.058545101433992386, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 10.3673, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.034057045551298425, |
|
"eval_loss": 10.37269401550293, |
|
"eval_runtime": 4.8093, |
|
"eval_samples_per_second": 1028.426, |
|
"eval_steps_per_second": 128.709, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.035419327373350365, |
|
"grad_norm": 0.04315102472901344, |
|
"learning_rate": 5e-05, |
|
"loss": 10.373, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0367816091954023, |
|
"grad_norm": 0.04431917518377304, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 10.3733, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.03814389101745424, |
|
"grad_norm": 0.04504556581377983, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 10.372, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.03950617283950617, |
|
"grad_norm": 0.046898357570171356, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 10.3735, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.04086845466155811, |
|
"grad_norm": 0.04973635450005531, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 10.3728, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.042230736483610046, |
|
"grad_norm": 0.04653334617614746, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 10.373, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.043593018305661986, |
|
"grad_norm": 0.0456802062690258, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 10.373, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.04495530012771392, |
|
"grad_norm": 0.04771285504102707, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 10.3738, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.04631758194976586, |
|
"grad_norm": 0.046772122383117676, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 10.3717, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.04767986377181779, |
|
"grad_norm": 0.05056193843483925, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 10.3702, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.04904214559386973, |
|
"grad_norm": 0.06328132003545761, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 10.3704, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.05040442741592167, |
|
"grad_norm": 0.05825236067175865, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 10.3657, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.05176670923797361, |
|
"grad_norm": 0.05023612454533577, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 10.3716, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.05312899106002554, |
|
"grad_norm": 0.05372828617691994, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 10.3717, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.05449127288207748, |
|
"grad_norm": 0.05338004603981972, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 10.3725, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.055853554704129414, |
|
"grad_norm": 0.053346700966358185, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 10.3726, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.057215836526181355, |
|
"grad_norm": 0.05252009630203247, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 10.3734, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.05857811834823329, |
|
"grad_norm": 0.05296671763062477, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 10.3725, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.05994040017028523, |
|
"grad_norm": 0.05633159726858139, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 10.3726, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.06130268199233716, |
|
"grad_norm": 0.05360110104084015, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 10.3728, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0626649638143891, |
|
"grad_norm": 0.050015050917863846, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 10.3726, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.06402724563644104, |
|
"grad_norm": 0.054285526275634766, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 10.3705, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.06538952745849298, |
|
"grad_norm": 0.058426082134246826, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 10.3718, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.06675180928054492, |
|
"grad_norm": 0.05342816188931465, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 10.3699, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.06811409110259685, |
|
"grad_norm": 0.0683768019080162, |
|
"learning_rate": 0.0, |
|
"loss": 10.3662, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.06811409110259685, |
|
"eval_loss": 10.371416091918945, |
|
"eval_runtime": 4.8187, |
|
"eval_samples_per_second": 1026.428, |
|
"eval_steps_per_second": 128.459, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 42768059596800.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|