prxy5606's picture
Training in progress, step 100, checkpoint
acee1ae verified
{
"best_metric": 10.34882926940918,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 0.01864193503285641,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0001864193503285641,
"grad_norm": 0.029469789937138557,
"learning_rate": 3.3333333333333333e-06,
"loss": 10.3815,
"step": 1
},
{
"epoch": 0.0001864193503285641,
"eval_loss": 10.381146430969238,
"eval_runtime": 27.3606,
"eval_samples_per_second": 330.22,
"eval_steps_per_second": 165.128,
"step": 1
},
{
"epoch": 0.0003728387006571282,
"grad_norm": 0.03511423617601395,
"learning_rate": 6.666666666666667e-06,
"loss": 10.3815,
"step": 2
},
{
"epoch": 0.0005592580509856923,
"grad_norm": 0.03218838572502136,
"learning_rate": 1e-05,
"loss": 10.3826,
"step": 3
},
{
"epoch": 0.0007456774013142564,
"grad_norm": 0.027689678594470024,
"learning_rate": 1.3333333333333333e-05,
"loss": 10.3792,
"step": 4
},
{
"epoch": 0.0009320967516428206,
"grad_norm": 0.03155677765607834,
"learning_rate": 1.6666666666666667e-05,
"loss": 10.3816,
"step": 5
},
{
"epoch": 0.0011185161019713846,
"grad_norm": 0.03052655979990959,
"learning_rate": 2e-05,
"loss": 10.38,
"step": 6
},
{
"epoch": 0.0013049354522999487,
"grad_norm": 0.0268377885222435,
"learning_rate": 2.3333333333333336e-05,
"loss": 10.3798,
"step": 7
},
{
"epoch": 0.0014913548026285128,
"grad_norm": 0.03491060808300972,
"learning_rate": 2.6666666666666667e-05,
"loss": 10.3801,
"step": 8
},
{
"epoch": 0.0016777741529570768,
"grad_norm": 0.03049907088279724,
"learning_rate": 3e-05,
"loss": 10.3793,
"step": 9
},
{
"epoch": 0.0018641935032856411,
"grad_norm": 0.03284202516078949,
"learning_rate": 3.3333333333333335e-05,
"loss": 10.3808,
"step": 10
},
{
"epoch": 0.002050612853614205,
"grad_norm": 0.03127828985452652,
"learning_rate": 3.6666666666666666e-05,
"loss": 10.3818,
"step": 11
},
{
"epoch": 0.0022370322039427693,
"grad_norm": 0.029472503811120987,
"learning_rate": 4e-05,
"loss": 10.3824,
"step": 12
},
{
"epoch": 0.0024234515542713333,
"grad_norm": 0.03045397251844406,
"learning_rate": 4.3333333333333334e-05,
"loss": 10.3807,
"step": 13
},
{
"epoch": 0.0026098709045998974,
"grad_norm": 0.030138535425066948,
"learning_rate": 4.666666666666667e-05,
"loss": 10.3783,
"step": 14
},
{
"epoch": 0.0027962902549284615,
"grad_norm": 0.033551353961229324,
"learning_rate": 5e-05,
"loss": 10.3795,
"step": 15
},
{
"epoch": 0.0029827096052570255,
"grad_norm": 0.04115981236100197,
"learning_rate": 5.333333333333333e-05,
"loss": 10.3785,
"step": 16
},
{
"epoch": 0.0031691289555855896,
"grad_norm": 0.036658305674791336,
"learning_rate": 5.666666666666667e-05,
"loss": 10.3802,
"step": 17
},
{
"epoch": 0.0033555483059141537,
"grad_norm": 0.03716996684670448,
"learning_rate": 6e-05,
"loss": 10.3815,
"step": 18
},
{
"epoch": 0.003541967656242718,
"grad_norm": 0.03936820104718208,
"learning_rate": 6.333333333333333e-05,
"loss": 10.3802,
"step": 19
},
{
"epoch": 0.0037283870065712823,
"grad_norm": 0.03620583936572075,
"learning_rate": 6.666666666666667e-05,
"loss": 10.3805,
"step": 20
},
{
"epoch": 0.003914806356899846,
"grad_norm": 0.03842416778206825,
"learning_rate": 7e-05,
"loss": 10.3793,
"step": 21
},
{
"epoch": 0.00410122570722841,
"grad_norm": 0.039217282086610794,
"learning_rate": 7.333333333333333e-05,
"loss": 10.3797,
"step": 22
},
{
"epoch": 0.0042876450575569745,
"grad_norm": 0.04393612593412399,
"learning_rate": 7.666666666666667e-05,
"loss": 10.3784,
"step": 23
},
{
"epoch": 0.0044740644078855385,
"grad_norm": 0.03723832592368126,
"learning_rate": 8e-05,
"loss": 10.3764,
"step": 24
},
{
"epoch": 0.004660483758214103,
"grad_norm": 0.05864546820521355,
"learning_rate": 8.333333333333334e-05,
"loss": 10.3784,
"step": 25
},
{
"epoch": 0.004846903108542667,
"grad_norm": 0.05707390606403351,
"learning_rate": 8.666666666666667e-05,
"loss": 10.3796,
"step": 26
},
{
"epoch": 0.005033322458871231,
"grad_norm": 0.05543961003422737,
"learning_rate": 9e-05,
"loss": 10.38,
"step": 27
},
{
"epoch": 0.005219741809199795,
"grad_norm": 0.06495416909456253,
"learning_rate": 9.333333333333334e-05,
"loss": 10.3752,
"step": 28
},
{
"epoch": 0.005406161159528359,
"grad_norm": 0.07095163315534592,
"learning_rate": 9.666666666666667e-05,
"loss": 10.376,
"step": 29
},
{
"epoch": 0.005592580509856923,
"grad_norm": 0.07251051068305969,
"learning_rate": 0.0001,
"loss": 10.3783,
"step": 30
},
{
"epoch": 0.005778999860185487,
"grad_norm": 0.0717591941356659,
"learning_rate": 9.994965332706573e-05,
"loss": 10.3761,
"step": 31
},
{
"epoch": 0.005965419210514051,
"grad_norm": 0.08555017411708832,
"learning_rate": 9.979871469976196e-05,
"loss": 10.3753,
"step": 32
},
{
"epoch": 0.006151838560842615,
"grad_norm": 0.11320396512746811,
"learning_rate": 9.954748808839674e-05,
"loss": 10.3745,
"step": 33
},
{
"epoch": 0.006338257911171179,
"grad_norm": 0.09341420978307724,
"learning_rate": 9.919647942993148e-05,
"loss": 10.3745,
"step": 34
},
{
"epoch": 0.006524677261499743,
"grad_norm": 0.11599099636077881,
"learning_rate": 9.874639560909117e-05,
"loss": 10.3704,
"step": 35
},
{
"epoch": 0.006711096611828307,
"grad_norm": 0.10898492485284805,
"learning_rate": 9.819814303479267e-05,
"loss": 10.3727,
"step": 36
},
{
"epoch": 0.006897515962156872,
"grad_norm": 0.14422820508480072,
"learning_rate": 9.755282581475769e-05,
"loss": 10.3704,
"step": 37
},
{
"epoch": 0.007083935312485436,
"grad_norm": 0.14940433204174042,
"learning_rate": 9.681174353198687e-05,
"loss": 10.3691,
"step": 38
},
{
"epoch": 0.0072703546628140004,
"grad_norm": 0.14902304112911224,
"learning_rate": 9.597638862757255e-05,
"loss": 10.3665,
"step": 39
},
{
"epoch": 0.0074567740131425645,
"grad_norm": 0.16568797826766968,
"learning_rate": 9.504844339512095e-05,
"loss": 10.3681,
"step": 40
},
{
"epoch": 0.007643193363471129,
"grad_norm": 0.17017991840839386,
"learning_rate": 9.40297765928369e-05,
"loss": 10.3653,
"step": 41
},
{
"epoch": 0.007829612713799693,
"grad_norm": 0.18572087585926056,
"learning_rate": 9.292243968009331e-05,
"loss": 10.3632,
"step": 42
},
{
"epoch": 0.008016032064128256,
"grad_norm": 0.22373846173286438,
"learning_rate": 9.172866268606513e-05,
"loss": 10.3638,
"step": 43
},
{
"epoch": 0.00820245141445682,
"grad_norm": 0.19732271134853363,
"learning_rate": 9.045084971874738e-05,
"loss": 10.3615,
"step": 44
},
{
"epoch": 0.008388870764785384,
"grad_norm": 0.19585733115673065,
"learning_rate": 8.90915741234015e-05,
"loss": 10.3552,
"step": 45
},
{
"epoch": 0.008575290115113949,
"grad_norm": 0.20070262253284454,
"learning_rate": 8.765357330018056e-05,
"loss": 10.361,
"step": 46
},
{
"epoch": 0.008761709465442512,
"grad_norm": 0.18280582129955292,
"learning_rate": 8.613974319136958e-05,
"loss": 10.358,
"step": 47
},
{
"epoch": 0.008948128815771077,
"grad_norm": 0.19768880307674408,
"learning_rate": 8.455313244934324e-05,
"loss": 10.3532,
"step": 48
},
{
"epoch": 0.009134548166099642,
"grad_norm": 0.1908486932516098,
"learning_rate": 8.289693629698564e-05,
"loss": 10.3526,
"step": 49
},
{
"epoch": 0.009320967516428205,
"grad_norm": 0.24415874481201172,
"learning_rate": 8.117449009293668e-05,
"loss": 10.3496,
"step": 50
},
{
"epoch": 0.009320967516428205,
"eval_loss": 10.3571138381958,
"eval_runtime": 27.4265,
"eval_samples_per_second": 329.426,
"eval_steps_per_second": 164.731,
"step": 50
},
{
"epoch": 0.00950738686675677,
"grad_norm": 0.1561504453420639,
"learning_rate": 7.938926261462366e-05,
"loss": 10.3609,
"step": 51
},
{
"epoch": 0.009693806217085333,
"grad_norm": 0.14776375889778137,
"learning_rate": 7.754484907260513e-05,
"loss": 10.3593,
"step": 52
},
{
"epoch": 0.009880225567413898,
"grad_norm": 0.14880362153053284,
"learning_rate": 7.564496387029532e-05,
"loss": 10.3593,
"step": 53
},
{
"epoch": 0.010066644917742461,
"grad_norm": 0.12313321232795715,
"learning_rate": 7.369343312364993e-05,
"loss": 10.3598,
"step": 54
},
{
"epoch": 0.010253064268071026,
"grad_norm": 0.14246243238449097,
"learning_rate": 7.169418695587791e-05,
"loss": 10.3582,
"step": 55
},
{
"epoch": 0.01043948361839959,
"grad_norm": 0.13358600437641144,
"learning_rate": 6.965125158269619e-05,
"loss": 10.3575,
"step": 56
},
{
"epoch": 0.010625902968728155,
"grad_norm": 0.10732542723417282,
"learning_rate": 6.756874120406714e-05,
"loss": 10.3576,
"step": 57
},
{
"epoch": 0.010812322319056718,
"grad_norm": 0.10971201956272125,
"learning_rate": 6.545084971874738e-05,
"loss": 10.3567,
"step": 58
},
{
"epoch": 0.010998741669385283,
"grad_norm": 0.09025903791189194,
"learning_rate": 6.330184227833376e-05,
"loss": 10.3538,
"step": 59
},
{
"epoch": 0.011185161019713846,
"grad_norm": 0.10398943722248077,
"learning_rate": 6.112604669781572e-05,
"loss": 10.3545,
"step": 60
},
{
"epoch": 0.01137158037004241,
"grad_norm": 0.10011833906173706,
"learning_rate": 5.8927844739931834e-05,
"loss": 10.3557,
"step": 61
},
{
"epoch": 0.011557999720370974,
"grad_norm": 0.10548334568738937,
"learning_rate": 5.6711663290882776e-05,
"loss": 10.3532,
"step": 62
},
{
"epoch": 0.011744419070699539,
"grad_norm": 0.08827032148838043,
"learning_rate": 5.448196544517168e-05,
"loss": 10.3518,
"step": 63
},
{
"epoch": 0.011930838421028102,
"grad_norm": 0.05861261487007141,
"learning_rate": 5.2243241517525754e-05,
"loss": 10.353,
"step": 64
},
{
"epoch": 0.012117257771356667,
"grad_norm": 0.08971337974071503,
"learning_rate": 5e-05,
"loss": 10.355,
"step": 65
},
{
"epoch": 0.01230367712168523,
"grad_norm": 0.08864837139844894,
"learning_rate": 4.775675848247427e-05,
"loss": 10.3548,
"step": 66
},
{
"epoch": 0.012490096472013795,
"grad_norm": 0.08446395397186279,
"learning_rate": 4.551803455482833e-05,
"loss": 10.3526,
"step": 67
},
{
"epoch": 0.012676515822342358,
"grad_norm": 0.06572958827018738,
"learning_rate": 4.328833670911724e-05,
"loss": 10.3523,
"step": 68
},
{
"epoch": 0.012862935172670923,
"grad_norm": 0.08103340864181519,
"learning_rate": 4.107215526006817e-05,
"loss": 10.3508,
"step": 69
},
{
"epoch": 0.013049354522999487,
"grad_norm": 0.06975308805704117,
"learning_rate": 3.887395330218429e-05,
"loss": 10.3515,
"step": 70
},
{
"epoch": 0.013235773873328052,
"grad_norm": 0.055503133684396744,
"learning_rate": 3.6698157721666246e-05,
"loss": 10.3512,
"step": 71
},
{
"epoch": 0.013422193223656615,
"grad_norm": 0.06876011937856674,
"learning_rate": 3.4549150281252636e-05,
"loss": 10.3516,
"step": 72
},
{
"epoch": 0.01360861257398518,
"grad_norm": 0.06682562828063965,
"learning_rate": 3.243125879593286e-05,
"loss": 10.3509,
"step": 73
},
{
"epoch": 0.013795031924313745,
"grad_norm": 0.0667678713798523,
"learning_rate": 3.0348748417303823e-05,
"loss": 10.3496,
"step": 74
},
{
"epoch": 0.013981451274642308,
"grad_norm": 0.05310027301311493,
"learning_rate": 2.8305813044122097e-05,
"loss": 10.3485,
"step": 75
},
{
"epoch": 0.014167870624970873,
"grad_norm": 0.07601341605186462,
"learning_rate": 2.630656687635007e-05,
"loss": 10.3489,
"step": 76
},
{
"epoch": 0.014354289975299436,
"grad_norm": 0.06028437986969948,
"learning_rate": 2.43550361297047e-05,
"loss": 10.3492,
"step": 77
},
{
"epoch": 0.014540709325628001,
"grad_norm": 0.054510705173015594,
"learning_rate": 2.245515092739488e-05,
"loss": 10.3496,
"step": 78
},
{
"epoch": 0.014727128675956564,
"grad_norm": 0.08019602298736572,
"learning_rate": 2.061073738537635e-05,
"loss": 10.3497,
"step": 79
},
{
"epoch": 0.014913548026285129,
"grad_norm": 0.0804019346833229,
"learning_rate": 1.8825509907063327e-05,
"loss": 10.3537,
"step": 80
},
{
"epoch": 0.015099967376613692,
"grad_norm": 0.07119160145521164,
"learning_rate": 1.7103063703014372e-05,
"loss": 10.3479,
"step": 81
},
{
"epoch": 0.015286386726942257,
"grad_norm": 0.0820232704281807,
"learning_rate": 1.544686755065677e-05,
"loss": 10.3519,
"step": 82
},
{
"epoch": 0.01547280607727082,
"grad_norm": 0.055926691740751266,
"learning_rate": 1.3860256808630428e-05,
"loss": 10.3495,
"step": 83
},
{
"epoch": 0.015659225427599385,
"grad_norm": 0.06931938230991364,
"learning_rate": 1.2346426699819458e-05,
"loss": 10.3468,
"step": 84
},
{
"epoch": 0.01584564477792795,
"grad_norm": 0.08054453134536743,
"learning_rate": 1.090842587659851e-05,
"loss": 10.3472,
"step": 85
},
{
"epoch": 0.01603206412825651,
"grad_norm": 0.07081463932991028,
"learning_rate": 9.549150281252633e-06,
"loss": 10.3477,
"step": 86
},
{
"epoch": 0.016218483478585077,
"grad_norm": 0.1176007091999054,
"learning_rate": 8.271337313934869e-06,
"loss": 10.3496,
"step": 87
},
{
"epoch": 0.01640490282891364,
"grad_norm": 0.06712072342634201,
"learning_rate": 7.077560319906695e-06,
"loss": 10.3454,
"step": 88
},
{
"epoch": 0.016591322179242207,
"grad_norm": 0.06814035773277283,
"learning_rate": 5.9702234071631e-06,
"loss": 10.3486,
"step": 89
},
{
"epoch": 0.016777741529570768,
"grad_norm": 0.09200986474752426,
"learning_rate": 4.951556604879048e-06,
"loss": 10.3454,
"step": 90
},
{
"epoch": 0.016964160879899333,
"grad_norm": 0.08182810246944427,
"learning_rate": 4.023611372427471e-06,
"loss": 10.3469,
"step": 91
},
{
"epoch": 0.017150580230227898,
"grad_norm": 0.0823381245136261,
"learning_rate": 3.18825646801314e-06,
"loss": 10.3436,
"step": 92
},
{
"epoch": 0.017336999580556463,
"grad_norm": 0.12770026922225952,
"learning_rate": 2.4471741852423237e-06,
"loss": 10.3455,
"step": 93
},
{
"epoch": 0.017523418930885024,
"grad_norm": 0.09665341675281525,
"learning_rate": 1.8018569652073381e-06,
"loss": 10.345,
"step": 94
},
{
"epoch": 0.01770983828121359,
"grad_norm": 0.08394477516412735,
"learning_rate": 1.2536043909088191e-06,
"loss": 10.342,
"step": 95
},
{
"epoch": 0.017896257631542154,
"grad_norm": 0.15145687758922577,
"learning_rate": 8.035205700685167e-07,
"loss": 10.3465,
"step": 96
},
{
"epoch": 0.01808267698187072,
"grad_norm": 0.11746706068515778,
"learning_rate": 4.52511911603265e-07,
"loss": 10.3434,
"step": 97
},
{
"epoch": 0.018269096332199284,
"grad_norm": 0.09703138470649719,
"learning_rate": 2.012853002380466e-07,
"loss": 10.3448,
"step": 98
},
{
"epoch": 0.018455515682527845,
"grad_norm": 0.15032333135604858,
"learning_rate": 5.0346672934270534e-08,
"loss": 10.3436,
"step": 99
},
{
"epoch": 0.01864193503285641,
"grad_norm": 0.18205277621746063,
"learning_rate": 0.0,
"loss": 10.3399,
"step": 100
},
{
"epoch": 0.01864193503285641,
"eval_loss": 10.34882926940918,
"eval_runtime": 27.4349,
"eval_samples_per_second": 329.326,
"eval_steps_per_second": 164.681,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 11740707028992.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}