vdos's picture
Training in progress, step 50, checkpoint
f5ef5e0 verified
{
"best_metric": 0.2209298312664032,
"best_model_checkpoint": "miner_id_24/checkpoint-25",
"epoch": 0.02768549280177187,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0005537098560354374,
"grad_norm": 83.63919067382812,
"learning_rate": 5e-05,
"loss": 5.3284,
"step": 1
},
{
"epoch": 0.0005537098560354374,
"eval_loss": 5.616427421569824,
"eval_runtime": 881.9672,
"eval_samples_per_second": 13.795,
"eval_steps_per_second": 1.725,
"step": 1
},
{
"epoch": 0.0011074197120708748,
"grad_norm": 81.07270812988281,
"learning_rate": 0.0001,
"loss": 5.0392,
"step": 2
},
{
"epoch": 0.0016611295681063123,
"grad_norm": 77.96538543701172,
"learning_rate": 9.989294616193017e-05,
"loss": 4.821,
"step": 3
},
{
"epoch": 0.0022148394241417496,
"grad_norm": 248.20559692382812,
"learning_rate": 9.957224306869053e-05,
"loss": 4.2793,
"step": 4
},
{
"epoch": 0.0027685492801771874,
"grad_norm": 470.4466552734375,
"learning_rate": 9.903926402016153e-05,
"loss": 3.1648,
"step": 5
},
{
"epoch": 0.0033222591362126247,
"grad_norm": 244.6553192138672,
"learning_rate": 9.829629131445342e-05,
"loss": 1.8212,
"step": 6
},
{
"epoch": 0.003875968992248062,
"grad_norm": 73.94499206542969,
"learning_rate": 9.73465064747553e-05,
"loss": 1.1359,
"step": 7
},
{
"epoch": 0.004429678848283499,
"grad_norm": 52.48252487182617,
"learning_rate": 9.619397662556435e-05,
"loss": 0.639,
"step": 8
},
{
"epoch": 0.0049833887043189366,
"grad_norm": 42.62266159057617,
"learning_rate": 9.484363707663442e-05,
"loss": 0.4366,
"step": 9
},
{
"epoch": 0.005537098560354375,
"grad_norm": 55.48554229736328,
"learning_rate": 9.330127018922194e-05,
"loss": 0.2331,
"step": 10
},
{
"epoch": 0.006090808416389812,
"grad_norm": 18.96228790283203,
"learning_rate": 9.157348061512727e-05,
"loss": 0.2517,
"step": 11
},
{
"epoch": 0.006644518272425249,
"grad_norm": 600.3218994140625,
"learning_rate": 8.966766701456177e-05,
"loss": 3.1156,
"step": 12
},
{
"epoch": 0.007198228128460687,
"grad_norm": 560.729736328125,
"learning_rate": 8.759199037394887e-05,
"loss": 2.4623,
"step": 13
},
{
"epoch": 0.007751937984496124,
"grad_norm": 50.38520050048828,
"learning_rate": 8.535533905932738e-05,
"loss": 0.47,
"step": 14
},
{
"epoch": 0.008305647840531562,
"grad_norm": 45.0614128112793,
"learning_rate": 8.296729075500344e-05,
"loss": 0.2445,
"step": 15
},
{
"epoch": 0.008859357696566999,
"grad_norm": 16.783288955688477,
"learning_rate": 8.043807145043604e-05,
"loss": 0.0527,
"step": 16
},
{
"epoch": 0.009413067552602437,
"grad_norm": 401.33197021484375,
"learning_rate": 7.777851165098012e-05,
"loss": 0.1234,
"step": 17
},
{
"epoch": 0.009966777408637873,
"grad_norm": 127.39128875732422,
"learning_rate": 7.500000000000001e-05,
"loss": 0.0367,
"step": 18
},
{
"epoch": 0.010520487264673311,
"grad_norm": 7.844943046569824,
"learning_rate": 7.211443451095007e-05,
"loss": 0.0225,
"step": 19
},
{
"epoch": 0.01107419712070875,
"grad_norm": 4.442848205566406,
"learning_rate": 6.91341716182545e-05,
"loss": 0.0251,
"step": 20
},
{
"epoch": 0.011627906976744186,
"grad_norm": 2.3369901180267334,
"learning_rate": 6.607197326515808e-05,
"loss": 0.0091,
"step": 21
},
{
"epoch": 0.012181616832779624,
"grad_norm": 0.6338233947753906,
"learning_rate": 6.294095225512603e-05,
"loss": 0.0014,
"step": 22
},
{
"epoch": 0.01273532668881506,
"grad_norm": 0.2525288164615631,
"learning_rate": 5.9754516100806423e-05,
"loss": 0.0004,
"step": 23
},
{
"epoch": 0.013289036544850499,
"grad_norm": 202.52098083496094,
"learning_rate": 5.6526309611002594e-05,
"loss": 1.0549,
"step": 24
},
{
"epoch": 0.013842746400885935,
"grad_norm": 749.843017578125,
"learning_rate": 5.327015646150716e-05,
"loss": 2.1561,
"step": 25
},
{
"epoch": 0.013842746400885935,
"eval_loss": 0.2209298312664032,
"eval_runtime": 882.1499,
"eval_samples_per_second": 13.792,
"eval_steps_per_second": 1.724,
"step": 25
},
{
"epoch": 0.014396456256921373,
"grad_norm": 98.28343963623047,
"learning_rate": 5e-05,
"loss": 0.8503,
"step": 26
},
{
"epoch": 0.014950166112956811,
"grad_norm": 14.48868465423584,
"learning_rate": 4.6729843538492847e-05,
"loss": 0.018,
"step": 27
},
{
"epoch": 0.015503875968992248,
"grad_norm": 5.126861095428467,
"learning_rate": 4.347369038899744e-05,
"loss": 0.0071,
"step": 28
},
{
"epoch": 0.016057585825027684,
"grad_norm": 1.48406982421875,
"learning_rate": 4.0245483899193595e-05,
"loss": 0.0024,
"step": 29
},
{
"epoch": 0.016611295681063124,
"grad_norm": 13.655170440673828,
"learning_rate": 3.705904774487396e-05,
"loss": 0.0077,
"step": 30
},
{
"epoch": 0.01716500553709856,
"grad_norm": 8.702022552490234,
"learning_rate": 3.392802673484193e-05,
"loss": 0.0046,
"step": 31
},
{
"epoch": 0.017718715393133997,
"grad_norm": 4.665962219238281,
"learning_rate": 3.086582838174551e-05,
"loss": 0.0023,
"step": 32
},
{
"epoch": 0.018272425249169437,
"grad_norm": 1.7277586460113525,
"learning_rate": 2.7885565489049946e-05,
"loss": 0.0009,
"step": 33
},
{
"epoch": 0.018826135105204873,
"grad_norm": 0.4850597381591797,
"learning_rate": 2.500000000000001e-05,
"loss": 0.0004,
"step": 34
},
{
"epoch": 0.01937984496124031,
"grad_norm": 0.1928061842918396,
"learning_rate": 2.2221488349019903e-05,
"loss": 0.0001,
"step": 35
},
{
"epoch": 0.019933554817275746,
"grad_norm": 0.04373222589492798,
"learning_rate": 1.9561928549563968e-05,
"loss": 0.0001,
"step": 36
},
{
"epoch": 0.020487264673311186,
"grad_norm": 323.9327087402344,
"learning_rate": 1.703270924499656e-05,
"loss": 2.4818,
"step": 37
},
{
"epoch": 0.021040974529346623,
"grad_norm": 266.1087951660156,
"learning_rate": 1.4644660940672627e-05,
"loss": 1.7784,
"step": 38
},
{
"epoch": 0.02159468438538206,
"grad_norm": 0.04026114568114281,
"learning_rate": 1.2408009626051137e-05,
"loss": 0.0001,
"step": 39
},
{
"epoch": 0.0221483942414175,
"grad_norm": 0.03933119773864746,
"learning_rate": 1.0332332985438248e-05,
"loss": 0.0001,
"step": 40
},
{
"epoch": 0.022702104097452935,
"grad_norm": 0.04323228821158409,
"learning_rate": 8.426519384872733e-06,
"loss": 0.0001,
"step": 41
},
{
"epoch": 0.023255813953488372,
"grad_norm": 0.06206133961677551,
"learning_rate": 6.698729810778065e-06,
"loss": 0.0001,
"step": 42
},
{
"epoch": 0.023809523809523808,
"grad_norm": 0.015565506182610989,
"learning_rate": 5.156362923365588e-06,
"loss": 0.0,
"step": 43
},
{
"epoch": 0.024363233665559248,
"grad_norm": 0.057457469403743744,
"learning_rate": 3.8060233744356633e-06,
"loss": 0.0001,
"step": 44
},
{
"epoch": 0.024916943521594685,
"grad_norm": 0.010861905291676521,
"learning_rate": 2.653493525244721e-06,
"loss": 0.0,
"step": 45
},
{
"epoch": 0.02547065337763012,
"grad_norm": 0.016795886680483818,
"learning_rate": 1.70370868554659e-06,
"loss": 0.0,
"step": 46
},
{
"epoch": 0.02602436323366556,
"grad_norm": 0.04295491799712181,
"learning_rate": 9.607359798384785e-07,
"loss": 0.0001,
"step": 47
},
{
"epoch": 0.026578073089700997,
"grad_norm": 0.08125736564397812,
"learning_rate": 4.277569313094809e-07,
"loss": 0.0002,
"step": 48
},
{
"epoch": 0.027131782945736434,
"grad_norm": 101.67281341552734,
"learning_rate": 1.0705383806982606e-07,
"loss": 1.1258,
"step": 49
},
{
"epoch": 0.02768549280177187,
"grad_norm": 486.48394775390625,
"learning_rate": 0.0,
"loss": 2.7297,
"step": 50
},
{
"epoch": 0.02768549280177187,
"eval_loss": 0.3239246606826782,
"eval_runtime": 882.1015,
"eval_samples_per_second": 13.793,
"eval_steps_per_second": 1.724,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 1
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.967137622687744e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}