{ "best_metric": 10.375041007995605, "best_model_checkpoint": "miner_id_24/checkpoint-50", "epoch": 0.0963159162051529, "eval_steps": 25, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.001926318324103058, "grad_norm": 0.017134863883256912, "learning_rate": 5e-05, "loss": 10.3772, "step": 1 }, { "epoch": 0.001926318324103058, "eval_loss": 10.376731872558594, "eval_runtime": 4.3149, "eval_samples_per_second": 810.683, "eval_steps_per_second": 101.509, "step": 1 }, { "epoch": 0.003852636648206116, "grad_norm": 0.01785031147301197, "learning_rate": 0.0001, "loss": 10.3775, "step": 2 }, { "epoch": 0.005778954972309174, "grad_norm": 0.018595920875668526, "learning_rate": 9.989294616193017e-05, "loss": 10.3776, "step": 3 }, { "epoch": 0.007705273296412232, "grad_norm": 0.01824306882917881, "learning_rate": 9.957224306869053e-05, "loss": 10.3772, "step": 4 }, { "epoch": 0.00963159162051529, "grad_norm": 0.017736271023750305, "learning_rate": 9.903926402016153e-05, "loss": 10.3768, "step": 5 }, { "epoch": 0.011557909944618349, "grad_norm": 0.01673169620335102, "learning_rate": 9.829629131445342e-05, "loss": 10.3766, "step": 6 }, { "epoch": 0.013484228268721405, "grad_norm": 0.01604505628347397, "learning_rate": 9.73465064747553e-05, "loss": 10.3769, "step": 7 }, { "epoch": 0.015410546592824464, "grad_norm": 0.013165425509214401, "learning_rate": 9.619397662556435e-05, "loss": 10.3766, "step": 8 }, { "epoch": 0.017336864916927524, "grad_norm": 0.013424793258309364, "learning_rate": 9.484363707663442e-05, "loss": 10.3761, "step": 9 }, { "epoch": 0.01926318324103058, "grad_norm": 0.012556428089737892, "learning_rate": 9.330127018922194e-05, "loss": 10.3756, "step": 10 }, { "epoch": 0.021189501565133637, "grad_norm": 0.017751269042491913, "learning_rate": 9.157348061512727e-05, "loss": 10.3751, "step": 11 }, { "epoch": 0.023115819889236697, "grad_norm": 0.03788875415921211, "learning_rate": 8.966766701456177e-05, "loss": 10.3754, "step": 12 }, { "epoch": 0.025042138213339754, "grad_norm": 0.014531556516885757, "learning_rate": 8.759199037394887e-05, "loss": 10.3755, "step": 13 }, { "epoch": 0.02696845653744281, "grad_norm": 0.018963413313031197, "learning_rate": 8.535533905932738e-05, "loss": 10.3761, "step": 14 }, { "epoch": 0.02889477486154587, "grad_norm": 0.0179679486900568, "learning_rate": 8.296729075500344e-05, "loss": 10.3756, "step": 15 }, { "epoch": 0.030821093185648928, "grad_norm": 0.018420385196805, "learning_rate": 8.043807145043604e-05, "loss": 10.3764, "step": 16 }, { "epoch": 0.03274741150975199, "grad_norm": 0.01803528517484665, "learning_rate": 7.777851165098012e-05, "loss": 10.3763, "step": 17 }, { "epoch": 0.03467372983385505, "grad_norm": 0.017887067049741745, "learning_rate": 7.500000000000001e-05, "loss": 10.3761, "step": 18 }, { "epoch": 0.0366000481579581, "grad_norm": 0.01678159460425377, "learning_rate": 7.211443451095007e-05, "loss": 10.3757, "step": 19 }, { "epoch": 0.03852636648206116, "grad_norm": 0.016594473272562027, "learning_rate": 6.91341716182545e-05, "loss": 10.376, "step": 20 }, { "epoch": 0.04045268480616422, "grad_norm": 0.012984883040189743, "learning_rate": 6.607197326515808e-05, "loss": 10.3756, "step": 21 }, { "epoch": 0.042379003130267275, "grad_norm": 0.014251720160245895, "learning_rate": 6.294095225512603e-05, "loss": 10.376, "step": 22 }, { "epoch": 0.044305321454370335, "grad_norm": 0.014976712875068188, "learning_rate": 5.9754516100806423e-05, "loss": 10.3756, "step": 23 }, { "epoch": 0.046231639778473395, "grad_norm": 0.029554875567555428, "learning_rate": 5.6526309611002594e-05, "loss": 10.3746, "step": 24 }, { "epoch": 0.04815795810257645, "grad_norm": 0.04060554876923561, "learning_rate": 5.327015646150716e-05, "loss": 10.3748, "step": 25 }, { "epoch": 0.04815795810257645, "eval_loss": 10.375431060791016, "eval_runtime": 4.5126, "eval_samples_per_second": 775.167, "eval_steps_per_second": 97.062, "step": 25 }, { "epoch": 0.05008427642667951, "grad_norm": 0.017374927178025246, "learning_rate": 5e-05, "loss": 10.3758, "step": 26 }, { "epoch": 0.05201059475078257, "grad_norm": 0.01883726380765438, "learning_rate": 4.6729843538492847e-05, "loss": 10.3753, "step": 27 }, { "epoch": 0.05393691307488562, "grad_norm": 0.0194549597799778, "learning_rate": 4.347369038899744e-05, "loss": 10.375, "step": 28 }, { "epoch": 0.05586323139898868, "grad_norm": 0.02029833383858204, "learning_rate": 4.0245483899193595e-05, "loss": 10.3754, "step": 29 }, { "epoch": 0.05778954972309174, "grad_norm": 0.02029760368168354, "learning_rate": 3.705904774487396e-05, "loss": 10.3755, "step": 30 }, { "epoch": 0.0597158680471948, "grad_norm": 0.019594980403780937, "learning_rate": 3.392802673484193e-05, "loss": 10.3748, "step": 31 }, { "epoch": 0.061642186371297855, "grad_norm": 0.016207123175263405, "learning_rate": 3.086582838174551e-05, "loss": 10.3758, "step": 32 }, { "epoch": 0.06356850469540092, "grad_norm": 0.014481107704341412, "learning_rate": 2.7885565489049946e-05, "loss": 10.3754, "step": 33 }, { "epoch": 0.06549482301950398, "grad_norm": 0.01565168984234333, "learning_rate": 2.500000000000001e-05, "loss": 10.3749, "step": 34 }, { "epoch": 0.06742114134360704, "grad_norm": 0.01421188935637474, "learning_rate": 2.2221488349019903e-05, "loss": 10.3751, "step": 35 }, { "epoch": 0.0693474596677101, "grad_norm": 0.020938636735081673, "learning_rate": 1.9561928549563968e-05, "loss": 10.3757, "step": 36 }, { "epoch": 0.07127377799181314, "grad_norm": 0.03991640731692314, "learning_rate": 1.703270924499656e-05, "loss": 10.3748, "step": 37 }, { "epoch": 0.0732000963159162, "grad_norm": 0.017153754830360413, "learning_rate": 1.4644660940672627e-05, "loss": 10.3752, "step": 38 }, { "epoch": 0.07512641464001926, "grad_norm": 0.020170168951153755, "learning_rate": 1.2408009626051137e-05, "loss": 10.3758, "step": 39 }, { "epoch": 0.07705273296412232, "grad_norm": 0.02071978710591793, "learning_rate": 1.0332332985438248e-05, "loss": 10.3752, "step": 40 }, { "epoch": 0.07897905128822538, "grad_norm": 0.021749036386609077, "learning_rate": 8.426519384872733e-06, "loss": 10.3754, "step": 41 }, { "epoch": 0.08090536961232844, "grad_norm": 0.02003314718604088, "learning_rate": 6.698729810778065e-06, "loss": 10.3752, "step": 42 }, { "epoch": 0.08283168793643149, "grad_norm": 0.02103583887219429, "learning_rate": 5.156362923365588e-06, "loss": 10.3756, "step": 43 }, { "epoch": 0.08475800626053455, "grad_norm": 0.01838034950196743, "learning_rate": 3.8060233744356633e-06, "loss": 10.3757, "step": 44 }, { "epoch": 0.08668432458463761, "grad_norm": 0.017888696864247322, "learning_rate": 2.653493525244721e-06, "loss": 10.3758, "step": 45 }, { "epoch": 0.08861064290874067, "grad_norm": 0.014699080027639866, "learning_rate": 1.70370868554659e-06, "loss": 10.375, "step": 46 }, { "epoch": 0.09053696123284373, "grad_norm": 0.014515848830342293, "learning_rate": 9.607359798384785e-07, "loss": 10.3747, "step": 47 }, { "epoch": 0.09246327955694679, "grad_norm": 0.01529916375875473, "learning_rate": 4.277569313094809e-07, "loss": 10.3752, "step": 48 }, { "epoch": 0.09438959788104985, "grad_norm": 0.030164409428834915, "learning_rate": 1.0705383806982606e-07, "loss": 10.3744, "step": 49 }, { "epoch": 0.0963159162051529, "grad_norm": 0.03701487183570862, "learning_rate": 0.0, "loss": 10.375, "step": 50 }, { "epoch": 0.0963159162051529, "eval_loss": 10.375041007995605, "eval_runtime": 4.3206, "eval_samples_per_second": 809.616, "eval_steps_per_second": 101.376, "step": 50 } ], "logging_steps": 1, "max_steps": 50, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 25, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 1, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 43369485434880.0, "train_batch_size": 2, "trial_name": null, "trial_params": null }