|
{ |
|
"best_metric": 6.8076324462890625, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.05312084993359894, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0010624169986719787, |
|
"grad_norm": 0.5697853565216064, |
|
"learning_rate": 5e-05, |
|
"loss": 6.9091, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0010624169986719787, |
|
"eval_loss": 6.900913715362549, |
|
"eval_runtime": 0.1154, |
|
"eval_samples_per_second": 433.248, |
|
"eval_steps_per_second": 112.645, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0021248339973439574, |
|
"grad_norm": 0.5687248110771179, |
|
"learning_rate": 0.0001, |
|
"loss": 6.9113, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0031872509960159364, |
|
"grad_norm": 0.5834084153175354, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 6.9041, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.004249667994687915, |
|
"grad_norm": 0.5844998955726624, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 6.9019, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.005312084993359893, |
|
"grad_norm": 0.5626636743545532, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 6.8945, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.006374501992031873, |
|
"grad_norm": 0.5599868893623352, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 6.8961, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.007436918990703851, |
|
"grad_norm": 0.5682830214500427, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 6.8866, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.00849933598937583, |
|
"grad_norm": 0.5642528533935547, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 6.8891, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.009561752988047808, |
|
"grad_norm": 0.5969242453575134, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 6.879, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.010624169986719787, |
|
"grad_norm": 0.557924211025238, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 6.8858, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.011686586985391767, |
|
"grad_norm": 0.5749415159225464, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 6.881, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.012749003984063745, |
|
"grad_norm": 0.5889608263969421, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 6.8736, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.013811420982735724, |
|
"grad_norm": 0.535503625869751, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 6.8755, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.014873837981407702, |
|
"grad_norm": 0.5333693027496338, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 6.8741, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.01593625498007968, |
|
"grad_norm": 0.5248175263404846, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 6.8701, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.01699867197875166, |
|
"grad_norm": 0.5040876865386963, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 6.8671, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.018061088977423638, |
|
"grad_norm": 0.5249006152153015, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 6.8697, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.019123505976095617, |
|
"grad_norm": 0.54627925157547, |
|
"learning_rate": 7.75e-05, |
|
"loss": 6.8586, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.020185922974767595, |
|
"grad_norm": 0.562960147857666, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 6.8528, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.021248339973439574, |
|
"grad_norm": 0.5442079901695251, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 6.8498, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.022310756972111555, |
|
"grad_norm": 0.5389817357063293, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 6.8482, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.023373173970783534, |
|
"grad_norm": 0.5449718832969666, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 6.8423, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.024435590969455512, |
|
"grad_norm": 0.5577437877655029, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 6.8415, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.02549800796812749, |
|
"grad_norm": 0.5578014254570007, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 6.8358, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.02656042496679947, |
|
"grad_norm": 0.5776466727256775, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 6.8356, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.02656042496679947, |
|
"eval_loss": 6.831989765167236, |
|
"eval_runtime": 0.1152, |
|
"eval_samples_per_second": 433.859, |
|
"eval_steps_per_second": 112.803, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.027622841965471448, |
|
"grad_norm": 0.48254695534706116, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 6.8441, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.028685258964143426, |
|
"grad_norm": 0.4874054193496704, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 6.8453, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.029747675962815405, |
|
"grad_norm": 0.4814146161079407, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 6.8455, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.030810092961487383, |
|
"grad_norm": 0.5245039463043213, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 6.8339, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.03187250996015936, |
|
"grad_norm": 0.4892662465572357, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 6.8387, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.032934926958831344, |
|
"grad_norm": 0.48735734820365906, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 6.8307, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.03399734395750332, |
|
"grad_norm": 0.5039017200469971, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 6.8271, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.0350597609561753, |
|
"grad_norm": 0.511929452419281, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 6.8286, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.036122177954847276, |
|
"grad_norm": 0.45853519439697266, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 6.8392, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.03718459495351926, |
|
"grad_norm": 0.5033186078071594, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 6.8262, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.03824701195219123, |
|
"grad_norm": 0.5333366394042969, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 6.8221, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.039309428950863215, |
|
"grad_norm": 0.5256316661834717, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 6.813, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.04037184594953519, |
|
"grad_norm": 0.49777570366859436, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 6.8198, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.04143426294820717, |
|
"grad_norm": 0.44153058528900146, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 6.8374, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.04249667994687915, |
|
"grad_norm": 0.4926466643810272, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 6.8275, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.04355909694555113, |
|
"grad_norm": 0.470235139131546, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 6.8234, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.04462151394422311, |
|
"grad_norm": 0.47815701365470886, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 6.8224, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.045683930942895086, |
|
"grad_norm": 0.4792591333389282, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 6.82, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.04674634794156707, |
|
"grad_norm": 0.4888472855091095, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 6.8158, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.04780876494023904, |
|
"grad_norm": 0.49055448174476624, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 6.8205, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.048871181938911025, |
|
"grad_norm": 0.4726165235042572, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 6.826, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.049933598937583, |
|
"grad_norm": 0.4675711393356323, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 6.8278, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.05099601593625498, |
|
"grad_norm": 0.49473482370376587, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 6.8164, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.05205843293492696, |
|
"grad_norm": 0.49276942014694214, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 6.8157, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.05312084993359894, |
|
"grad_norm": 0.5099896192550659, |
|
"learning_rate": 1e-05, |
|
"loss": 6.8102, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.05312084993359894, |
|
"eval_loss": 6.8076324462890625, |
|
"eval_runtime": 0.1099, |
|
"eval_samples_per_second": 454.851, |
|
"eval_steps_per_second": 118.261, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5706848927744.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|