{ "best_metric": 0.4569474458694458, "best_model_checkpoint": "miner_id_24/checkpoint-50", "epoch": 0.008854846921833838, "eval_steps": 25, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.00017709693843667677, "grad_norm": 117.74211883544922, "learning_rate": 5e-05, "loss": 1.4771, "step": 1 }, { "epoch": 0.00017709693843667677, "eval_loss": 1.7598378658294678, "eval_runtime": 1565.1778, "eval_samples_per_second": 24.305, "eval_steps_per_second": 3.039, "step": 1 }, { "epoch": 0.00035419387687335354, "grad_norm": 144.3221435546875, "learning_rate": 0.0001, "loss": 1.8027, "step": 2 }, { "epoch": 0.0005312908153100303, "grad_norm": 134.8468017578125, "learning_rate": 9.989294616193017e-05, "loss": 1.2757, "step": 3 }, { "epoch": 0.0007083877537467071, "grad_norm": 84.56224822998047, "learning_rate": 9.957224306869053e-05, "loss": 1.1062, "step": 4 }, { "epoch": 0.0008854846921833839, "grad_norm": 52.89167022705078, "learning_rate": 9.903926402016153e-05, "loss": 0.9609, "step": 5 }, { "epoch": 0.0010625816306200607, "grad_norm": 92.34407806396484, "learning_rate": 9.829629131445342e-05, "loss": 0.9017, "step": 6 }, { "epoch": 0.0012396785690567374, "grad_norm": 46.530521392822266, "learning_rate": 9.73465064747553e-05, "loss": 0.8768, "step": 7 }, { "epoch": 0.0014167755074934142, "grad_norm": 33.14015579223633, "learning_rate": 9.619397662556435e-05, "loss": 0.7773, "step": 8 }, { "epoch": 0.001593872445930091, "grad_norm": 34.68248748779297, "learning_rate": 9.484363707663442e-05, "loss": 0.7603, "step": 9 }, { "epoch": 0.0017709693843667679, "grad_norm": 31.410905838012695, "learning_rate": 9.330127018922194e-05, "loss": 0.7567, "step": 10 }, { "epoch": 0.0019480663228034446, "grad_norm": 30.384151458740234, "learning_rate": 9.157348061512727e-05, "loss": 0.7125, "step": 11 }, { "epoch": 0.0021251632612401213, "grad_norm": 43.795509338378906, "learning_rate": 8.966766701456177e-05, "loss": 0.7481, "step": 12 }, { "epoch": 0.002302260199676798, "grad_norm": 48.145652770996094, "learning_rate": 8.759199037394887e-05, "loss": 0.7942, "step": 13 }, { "epoch": 0.002479357138113475, "grad_norm": 43.399681091308594, "learning_rate": 8.535533905932738e-05, "loss": 0.6538, "step": 14 }, { "epoch": 0.0026564540765501516, "grad_norm": 35.737213134765625, "learning_rate": 8.296729075500344e-05, "loss": 0.6559, "step": 15 }, { "epoch": 0.0028335510149868283, "grad_norm": 29.4437198638916, "learning_rate": 8.043807145043604e-05, "loss": 0.5808, "step": 16 }, { "epoch": 0.003010647953423505, "grad_norm": 25.55437660217285, "learning_rate": 7.777851165098012e-05, "loss": 0.6096, "step": 17 }, { "epoch": 0.003187744891860182, "grad_norm": 22.801061630249023, "learning_rate": 7.500000000000001e-05, "loss": 0.5435, "step": 18 }, { "epoch": 0.0033648418302968585, "grad_norm": 23.629531860351562, "learning_rate": 7.211443451095007e-05, "loss": 0.5911, "step": 19 }, { "epoch": 0.0035419387687335357, "grad_norm": 24.318557739257812, "learning_rate": 6.91341716182545e-05, "loss": 0.5332, "step": 20 }, { "epoch": 0.0037190357071702125, "grad_norm": 22.01402473449707, "learning_rate": 6.607197326515808e-05, "loss": 0.5355, "step": 21 }, { "epoch": 0.003896132645606889, "grad_norm": 26.791213989257812, "learning_rate": 6.294095225512603e-05, "loss": 0.5295, "step": 22 }, { "epoch": 0.0040732295840435655, "grad_norm": 28.695446014404297, "learning_rate": 5.9754516100806423e-05, "loss": 0.6232, "step": 23 }, { "epoch": 0.004250326522480243, "grad_norm": 30.2479248046875, "learning_rate": 5.6526309611002594e-05, "loss": 0.617, "step": 24 }, { "epoch": 0.004427423460916919, "grad_norm": 32.574432373046875, "learning_rate": 5.327015646150716e-05, "loss": 0.5934, "step": 25 }, { "epoch": 0.004427423460916919, "eval_loss": 0.555499255657196, "eval_runtime": 1566.0928, "eval_samples_per_second": 24.29, "eval_steps_per_second": 3.037, "step": 25 }, { "epoch": 0.004604520399353596, "grad_norm": 37.32914352416992, "learning_rate": 5e-05, "loss": 0.6278, "step": 26 }, { "epoch": 0.004781617337790273, "grad_norm": 30.841808319091797, "learning_rate": 4.6729843538492847e-05, "loss": 0.526, "step": 27 }, { "epoch": 0.00495871427622695, "grad_norm": 26.814208984375, "learning_rate": 4.347369038899744e-05, "loss": 0.5521, "step": 28 }, { "epoch": 0.005135811214663627, "grad_norm": 22.27334976196289, "learning_rate": 4.0245483899193595e-05, "loss": 0.5206, "step": 29 }, { "epoch": 0.005312908153100303, "grad_norm": 18.109580993652344, "learning_rate": 3.705904774487396e-05, "loss": 0.4236, "step": 30 }, { "epoch": 0.00549000509153698, "grad_norm": 21.037906646728516, "learning_rate": 3.392802673484193e-05, "loss": 0.4845, "step": 31 }, { "epoch": 0.005667102029973657, "grad_norm": 21.314884185791016, "learning_rate": 3.086582838174551e-05, "loss": 0.4973, "step": 32 }, { "epoch": 0.005844198968410334, "grad_norm": 23.060914993286133, "learning_rate": 2.7885565489049946e-05, "loss": 0.503, "step": 33 }, { "epoch": 0.00602129590684701, "grad_norm": 21.678466796875, "learning_rate": 2.500000000000001e-05, "loss": 0.5009, "step": 34 }, { "epoch": 0.006198392845283687, "grad_norm": 22.18929672241211, "learning_rate": 2.2221488349019903e-05, "loss": 0.4831, "step": 35 }, { "epoch": 0.006375489783720364, "grad_norm": 21.511735916137695, "learning_rate": 1.9561928549563968e-05, "loss": 0.4702, "step": 36 }, { "epoch": 0.006552586722157041, "grad_norm": 27.066511154174805, "learning_rate": 1.703270924499656e-05, "loss": 0.5608, "step": 37 }, { "epoch": 0.006729683660593717, "grad_norm": 22.099491119384766, "learning_rate": 1.4644660940672627e-05, "loss": 0.5373, "step": 38 }, { "epoch": 0.006906780599030394, "grad_norm": 21.110164642333984, "learning_rate": 1.2408009626051137e-05, "loss": 0.51, "step": 39 }, { "epoch": 0.007083877537467071, "grad_norm": 18.117944717407227, "learning_rate": 1.0332332985438248e-05, "loss": 0.4379, "step": 40 }, { "epoch": 0.007260974475903748, "grad_norm": 17.371549606323242, "learning_rate": 8.426519384872733e-06, "loss": 0.4025, "step": 41 }, { "epoch": 0.007438071414340425, "grad_norm": 18.518346786499023, "learning_rate": 6.698729810778065e-06, "loss": 0.4477, "step": 42 }, { "epoch": 0.007615168352777101, "grad_norm": 19.548995971679688, "learning_rate": 5.156362923365588e-06, "loss": 0.4838, "step": 43 }, { "epoch": 0.007792265291213778, "grad_norm": 20.07686424255371, "learning_rate": 3.8060233744356633e-06, "loss": 0.5081, "step": 44 }, { "epoch": 0.007969362229650456, "grad_norm": 20.308269500732422, "learning_rate": 2.653493525244721e-06, "loss": 0.4412, "step": 45 }, { "epoch": 0.008146459168087131, "grad_norm": 20.340423583984375, "learning_rate": 1.70370868554659e-06, "loss": 0.4678, "step": 46 }, { "epoch": 0.008323556106523808, "grad_norm": 20.577301025390625, "learning_rate": 9.607359798384785e-07, "loss": 0.4283, "step": 47 }, { "epoch": 0.008500653044960485, "grad_norm": 22.188573837280273, "learning_rate": 4.277569313094809e-07, "loss": 0.471, "step": 48 }, { "epoch": 0.008677749983397163, "grad_norm": 24.928325653076172, "learning_rate": 1.0705383806982606e-07, "loss": 0.5276, "step": 49 }, { "epoch": 0.008854846921833838, "grad_norm": 27.5335636138916, "learning_rate": 0.0, "loss": 0.4987, "step": 50 }, { "epoch": 0.008854846921833838, "eval_loss": 0.4569474458694458, "eval_runtime": 1567.1076, "eval_samples_per_second": 24.275, "eval_steps_per_second": 3.035, "step": 50 } ], "logging_steps": 1, "max_steps": 50, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 25, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 1, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 5.658021338284032e+17, "train_batch_size": 2, "trial_name": null, "trial_params": null }