|
{ |
|
"best_metric": 4.46038818359375, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.03422899195618689, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0006845798391237378, |
|
"grad_norm": 1.8983988761901855, |
|
"learning_rate": 5e-05, |
|
"loss": 5.5369, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0006845798391237378, |
|
"eval_loss": 5.087055683135986, |
|
"eval_runtime": 1.8291, |
|
"eval_samples_per_second": 27.336, |
|
"eval_steps_per_second": 7.107, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0013691596782474755, |
|
"grad_norm": 1.7235485315322876, |
|
"learning_rate": 0.0001, |
|
"loss": 5.2483, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0020537395173712133, |
|
"grad_norm": 1.5233309268951416, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 5.085, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.002738319356494951, |
|
"grad_norm": 1.1321425437927246, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 4.9184, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.003422899195618689, |
|
"grad_norm": 2.263650417327881, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 4.8358, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.004107479034742427, |
|
"grad_norm": 0.9546835422515869, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 4.8692, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.004792058873866164, |
|
"grad_norm": 0.7501165270805359, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 4.9064, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.005476638712989902, |
|
"grad_norm": 0.5767120122909546, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 4.8118, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.00616121855211364, |
|
"grad_norm": 0.6856655478477478, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 4.8149, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.006845798391237378, |
|
"grad_norm": 0.7905865907669067, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 4.8121, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0075303782303611155, |
|
"grad_norm": 1.2904866933822632, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 4.8751, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.008214958069484853, |
|
"grad_norm": 0.7690590620040894, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 4.8562, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.008899537908608592, |
|
"grad_norm": 1.3141318559646606, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 5.1032, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.009584117747732329, |
|
"grad_norm": 1.111348271369934, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 5.0121, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.010268697586856067, |
|
"grad_norm": 0.9109743237495422, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 4.7557, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.010953277425979804, |
|
"grad_norm": 0.5635835528373718, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 4.6933, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.011637857265103543, |
|
"grad_norm": 1.2304234504699707, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 4.5971, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.01232243710422728, |
|
"grad_norm": 1.4028397798538208, |
|
"learning_rate": 7.75e-05, |
|
"loss": 4.6146, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.013007016943351019, |
|
"grad_norm": 1.7947639226913452, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 4.611, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.013691596782474755, |
|
"grad_norm": 1.5065919160842896, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 4.622, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.014376176621598494, |
|
"grad_norm": 0.48089975118637085, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 4.6141, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.015060756460722231, |
|
"grad_norm": 0.5148659348487854, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 4.6095, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.01574533629984597, |
|
"grad_norm": 0.6612058281898499, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 4.5889, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.016429916138969707, |
|
"grad_norm": 0.5617333650588989, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 4.6246, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.017114495978093443, |
|
"grad_norm": 0.5102720260620117, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 4.6922, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.017114495978093443, |
|
"eval_loss": 4.555571556091309, |
|
"eval_runtime": 0.3228, |
|
"eval_samples_per_second": 154.902, |
|
"eval_steps_per_second": 40.274, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.017799075817217184, |
|
"grad_norm": 0.652305006980896, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 4.8983, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.01848365565634092, |
|
"grad_norm": 0.7992231249809265, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 4.7073, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.019168235495464658, |
|
"grad_norm": 0.38415586948394775, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 4.5686, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.019852815334588398, |
|
"grad_norm": 0.38379648327827454, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 4.4937, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.020537395173712135, |
|
"grad_norm": 0.38786450028419495, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 4.4419, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.021221975012835872, |
|
"grad_norm": 0.49159541726112366, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 4.5172, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.02190655485195961, |
|
"grad_norm": 0.5576838254928589, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 4.5385, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.02259113469108335, |
|
"grad_norm": 0.3310554623603821, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 4.5236, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.023275714530207086, |
|
"grad_norm": 0.3554631769657135, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 4.5445, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.023960294369330823, |
|
"grad_norm": 0.3332033157348633, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 4.4697, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.02464487420845456, |
|
"grad_norm": 0.4032423198223114, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 4.4963, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0253294540475783, |
|
"grad_norm": 0.48033788800239563, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 4.5964, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.026014033886702037, |
|
"grad_norm": 0.6050848364830017, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 4.9098, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.026698613725825774, |
|
"grad_norm": 0.5689930319786072, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 4.7628, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.02738319356494951, |
|
"grad_norm": 0.35218316316604614, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 4.544, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.02806777340407325, |
|
"grad_norm": 0.2895558774471283, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 4.4886, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.028752353243196988, |
|
"grad_norm": 0.3760088384151459, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 4.4021, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.029436933082320725, |
|
"grad_norm": 0.2829365134239197, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 4.4461, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.030121512921444462, |
|
"grad_norm": 0.3910805583000183, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 4.4129, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.030806092760568202, |
|
"grad_norm": 0.3998270034790039, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 4.4577, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.03149067259969194, |
|
"grad_norm": 0.3465791344642639, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 4.4583, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.03217525243881568, |
|
"grad_norm": 0.31564465165138245, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 4.4946, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.03285983227793941, |
|
"grad_norm": 0.37373700737953186, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 4.4635, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.03354441211706315, |
|
"grad_norm": 0.39719510078430176, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 4.5366, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.03422899195618689, |
|
"grad_norm": 0.44478699564933777, |
|
"learning_rate": 1e-05, |
|
"loss": 4.6088, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03422899195618689, |
|
"eval_loss": 4.46038818359375, |
|
"eval_runtime": 0.3251, |
|
"eval_samples_per_second": 153.796, |
|
"eval_steps_per_second": 39.987, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.194596482678784e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|