lesso11's picture
Training in progress, step 100, checkpoint
3458e8c verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.05116398055768739,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0005116398055768739,
"grad_norm": 1.6488349437713623,
"learning_rate": 1e-05,
"loss": 2.1352,
"step": 1
},
{
"epoch": 0.0005116398055768739,
"eval_loss": 1.228289008140564,
"eval_runtime": 274.3879,
"eval_samples_per_second": 5.999,
"eval_steps_per_second": 0.751,
"step": 1
},
{
"epoch": 0.0010232796111537478,
"grad_norm": 1.8014962673187256,
"learning_rate": 2e-05,
"loss": 2.439,
"step": 2
},
{
"epoch": 0.0015349194167306216,
"grad_norm": 1.226792573928833,
"learning_rate": 3e-05,
"loss": 2.2876,
"step": 3
},
{
"epoch": 0.0020465592223074956,
"grad_norm": 1.5848373174667358,
"learning_rate": 4e-05,
"loss": 2.1205,
"step": 4
},
{
"epoch": 0.0025581990278843694,
"grad_norm": 3.6875414848327637,
"learning_rate": 5e-05,
"loss": 2.973,
"step": 5
},
{
"epoch": 0.0030698388334612432,
"grad_norm": 1.861879587173462,
"learning_rate": 6e-05,
"loss": 2.2063,
"step": 6
},
{
"epoch": 0.003581478639038117,
"grad_norm": 2.6092193126678467,
"learning_rate": 7e-05,
"loss": 2.5909,
"step": 7
},
{
"epoch": 0.004093118444614991,
"grad_norm": 2.884692668914795,
"learning_rate": 8e-05,
"loss": 2.3501,
"step": 8
},
{
"epoch": 0.004604758250191865,
"grad_norm": 1.9096957445144653,
"learning_rate": 9e-05,
"loss": 1.7905,
"step": 9
},
{
"epoch": 0.004604758250191865,
"eval_loss": 1.1009039878845215,
"eval_runtime": 274.3445,
"eval_samples_per_second": 6.0,
"eval_steps_per_second": 0.751,
"step": 9
},
{
"epoch": 0.005116398055768739,
"grad_norm": 2.686815023422241,
"learning_rate": 0.0001,
"loss": 2.0176,
"step": 10
},
{
"epoch": 0.005628037861345613,
"grad_norm": 1.8589001893997192,
"learning_rate": 9.99695413509548e-05,
"loss": 1.8315,
"step": 11
},
{
"epoch": 0.0061396776669224865,
"grad_norm": 1.6375495195388794,
"learning_rate": 9.987820251299122e-05,
"loss": 1.8289,
"step": 12
},
{
"epoch": 0.00665131747249936,
"grad_norm": 1.9399513006210327,
"learning_rate": 9.972609476841367e-05,
"loss": 1.5838,
"step": 13
},
{
"epoch": 0.007162957278076234,
"grad_norm": 2.0972728729248047,
"learning_rate": 9.951340343707852e-05,
"loss": 1.514,
"step": 14
},
{
"epoch": 0.007674597083653108,
"grad_norm": 2.4550724029541016,
"learning_rate": 9.924038765061042e-05,
"loss": 1.8727,
"step": 15
},
{
"epoch": 0.008186236889229983,
"grad_norm": 2.246565580368042,
"learning_rate": 9.890738003669029e-05,
"loss": 1.5098,
"step": 16
},
{
"epoch": 0.008697876694806856,
"grad_norm": 2.1880674362182617,
"learning_rate": 9.851478631379982e-05,
"loss": 1.5074,
"step": 17
},
{
"epoch": 0.00920951650038373,
"grad_norm": 2.00897216796875,
"learning_rate": 9.806308479691595e-05,
"loss": 1.7004,
"step": 18
},
{
"epoch": 0.00920951650038373,
"eval_loss": 0.8399195671081543,
"eval_runtime": 274.0678,
"eval_samples_per_second": 6.006,
"eval_steps_per_second": 0.752,
"step": 18
},
{
"epoch": 0.009721156305960604,
"grad_norm": 1.782171368598938,
"learning_rate": 9.755282581475769e-05,
"loss": 1.3817,
"step": 19
},
{
"epoch": 0.010232796111537478,
"grad_norm": 1.8624811172485352,
"learning_rate": 9.698463103929542e-05,
"loss": 1.4014,
"step": 20
},
{
"epoch": 0.010744435917114352,
"grad_norm": 2.667515516281128,
"learning_rate": 9.635919272833938e-05,
"loss": 1.771,
"step": 21
},
{
"epoch": 0.011256075722691225,
"grad_norm": 2.9106290340423584,
"learning_rate": 9.567727288213005e-05,
"loss": 1.4126,
"step": 22
},
{
"epoch": 0.0117677155282681,
"grad_norm": 2.704364776611328,
"learning_rate": 9.493970231495835e-05,
"loss": 1.3825,
"step": 23
},
{
"epoch": 0.012279355333844973,
"grad_norm": 2.78434419631958,
"learning_rate": 9.414737964294636e-05,
"loss": 1.5669,
"step": 24
},
{
"epoch": 0.012790995139421847,
"grad_norm": 2.526869058609009,
"learning_rate": 9.330127018922194e-05,
"loss": 1.4226,
"step": 25
},
{
"epoch": 0.01330263494499872,
"grad_norm": 1.8918830156326294,
"learning_rate": 9.24024048078213e-05,
"loss": 1.3571,
"step": 26
},
{
"epoch": 0.013814274750575594,
"grad_norm": 2.5752553939819336,
"learning_rate": 9.145187862775209e-05,
"loss": 1.2146,
"step": 27
},
{
"epoch": 0.013814274750575594,
"eval_loss": 0.7836452126502991,
"eval_runtime": 274.2003,
"eval_samples_per_second": 6.003,
"eval_steps_per_second": 0.751,
"step": 27
},
{
"epoch": 0.014325914556152468,
"grad_norm": 2.8034920692443848,
"learning_rate": 9.045084971874738e-05,
"loss": 1.6562,
"step": 28
},
{
"epoch": 0.014837554361729342,
"grad_norm": 2.0168240070343018,
"learning_rate": 8.940053768033609e-05,
"loss": 1.3663,
"step": 29
},
{
"epoch": 0.015349194167306216,
"grad_norm": 3.294177770614624,
"learning_rate": 8.83022221559489e-05,
"loss": 1.5281,
"step": 30
},
{
"epoch": 0.01586083397288309,
"grad_norm": 2.886345624923706,
"learning_rate": 8.715724127386972e-05,
"loss": 1.5451,
"step": 31
},
{
"epoch": 0.016372473778459965,
"grad_norm": 1.7247331142425537,
"learning_rate": 8.596699001693255e-05,
"loss": 1.0179,
"step": 32
},
{
"epoch": 0.01688411358403684,
"grad_norm": 1.886189579963684,
"learning_rate": 8.473291852294987e-05,
"loss": 1.3923,
"step": 33
},
{
"epoch": 0.017395753389613713,
"grad_norm": 2.080334424972534,
"learning_rate": 8.345653031794292e-05,
"loss": 1.2178,
"step": 34
},
{
"epoch": 0.017907393195190587,
"grad_norm": 2.708214282989502,
"learning_rate": 8.213938048432697e-05,
"loss": 1.2374,
"step": 35
},
{
"epoch": 0.01841903300076746,
"grad_norm": 2.5027191638946533,
"learning_rate": 8.07830737662829e-05,
"loss": 1.0007,
"step": 36
},
{
"epoch": 0.01841903300076746,
"eval_loss": 0.7541875243186951,
"eval_runtime": 274.1248,
"eval_samples_per_second": 6.005,
"eval_steps_per_second": 0.751,
"step": 36
},
{
"epoch": 0.018930672806344334,
"grad_norm": 1.938878059387207,
"learning_rate": 7.938926261462366e-05,
"loss": 1.2976,
"step": 37
},
{
"epoch": 0.019442312611921208,
"grad_norm": 2.1836838722229004,
"learning_rate": 7.795964517353735e-05,
"loss": 1.5049,
"step": 38
},
{
"epoch": 0.019953952417498082,
"grad_norm": 2.365196704864502,
"learning_rate": 7.649596321166024e-05,
"loss": 1.2362,
"step": 39
},
{
"epoch": 0.020465592223074956,
"grad_norm": 1.9262018203735352,
"learning_rate": 7.500000000000001e-05,
"loss": 1.7173,
"step": 40
},
{
"epoch": 0.02097723202865183,
"grad_norm": 2.8925254344940186,
"learning_rate": 7.347357813929454e-05,
"loss": 1.3671,
"step": 41
},
{
"epoch": 0.021488871834228703,
"grad_norm": 1.5692651271820068,
"learning_rate": 7.191855733945387e-05,
"loss": 1.1358,
"step": 42
},
{
"epoch": 0.022000511639805577,
"grad_norm": 2.8360183238983154,
"learning_rate": 7.033683215379002e-05,
"loss": 1.3804,
"step": 43
},
{
"epoch": 0.02251215144538245,
"grad_norm": 1.7869185209274292,
"learning_rate": 6.873032967079561e-05,
"loss": 1.2711,
"step": 44
},
{
"epoch": 0.023023791250959325,
"grad_norm": 2.3221404552459717,
"learning_rate": 6.710100716628344e-05,
"loss": 1.7126,
"step": 45
},
{
"epoch": 0.023023791250959325,
"eval_loss": 0.7342292070388794,
"eval_runtime": 274.1224,
"eval_samples_per_second": 6.005,
"eval_steps_per_second": 0.751,
"step": 45
},
{
"epoch": 0.0235354310565362,
"grad_norm": 4.136687278747559,
"learning_rate": 6.545084971874738e-05,
"loss": 1.7686,
"step": 46
},
{
"epoch": 0.024047070862113072,
"grad_norm": 2.2173714637756348,
"learning_rate": 6.378186779084995e-05,
"loss": 1.2285,
"step": 47
},
{
"epoch": 0.024558710667689946,
"grad_norm": 2.4053614139556885,
"learning_rate": 6.209609477998338e-05,
"loss": 1.4621,
"step": 48
},
{
"epoch": 0.02507035047326682,
"grad_norm": 2.109034299850464,
"learning_rate": 6.0395584540887963e-05,
"loss": 1.4664,
"step": 49
},
{
"epoch": 0.025581990278843694,
"grad_norm": 1.8373016119003296,
"learning_rate": 5.868240888334653e-05,
"loss": 1.2307,
"step": 50
},
{
"epoch": 0.026093630084420567,
"grad_norm": 1.6459299325942993,
"learning_rate": 5.695865504800327e-05,
"loss": 1.1249,
"step": 51
},
{
"epoch": 0.02660526988999744,
"grad_norm": 1.93509042263031,
"learning_rate": 5.522642316338268e-05,
"loss": 1.4258,
"step": 52
},
{
"epoch": 0.027116909695574315,
"grad_norm": 3.230286121368408,
"learning_rate": 5.348782368720626e-05,
"loss": 1.4488,
"step": 53
},
{
"epoch": 0.02762854950115119,
"grad_norm": 1.5479521751403809,
"learning_rate": 5.174497483512506e-05,
"loss": 1.0165,
"step": 54
},
{
"epoch": 0.02762854950115119,
"eval_loss": 0.7164188623428345,
"eval_runtime": 274.108,
"eval_samples_per_second": 6.005,
"eval_steps_per_second": 0.752,
"step": 54
},
{
"epoch": 0.028140189306728063,
"grad_norm": 2.426544666290283,
"learning_rate": 5e-05,
"loss": 1.5535,
"step": 55
},
{
"epoch": 0.028651829112304936,
"grad_norm": 1.6888998746871948,
"learning_rate": 4.825502516487497e-05,
"loss": 1.2925,
"step": 56
},
{
"epoch": 0.02916346891788181,
"grad_norm": 1.6494107246398926,
"learning_rate": 4.6512176312793736e-05,
"loss": 1.0268,
"step": 57
},
{
"epoch": 0.029675108723458684,
"grad_norm": 2.0526039600372314,
"learning_rate": 4.477357683661734e-05,
"loss": 1.3885,
"step": 58
},
{
"epoch": 0.030186748529035558,
"grad_norm": 2.0388901233673096,
"learning_rate": 4.3041344951996746e-05,
"loss": 1.418,
"step": 59
},
{
"epoch": 0.03069838833461243,
"grad_norm": 2.1916987895965576,
"learning_rate": 4.131759111665349e-05,
"loss": 1.2828,
"step": 60
},
{
"epoch": 0.031210028140189305,
"grad_norm": 2.3835113048553467,
"learning_rate": 3.960441545911204e-05,
"loss": 1.5845,
"step": 61
},
{
"epoch": 0.03172166794576618,
"grad_norm": 2.7145097255706787,
"learning_rate": 3.790390522001662e-05,
"loss": 1.8305,
"step": 62
},
{
"epoch": 0.03223330775134305,
"grad_norm": 1.8284953832626343,
"learning_rate": 3.6218132209150045e-05,
"loss": 1.3383,
"step": 63
},
{
"epoch": 0.03223330775134305,
"eval_loss": 0.708600640296936,
"eval_runtime": 274.0602,
"eval_samples_per_second": 6.006,
"eval_steps_per_second": 0.752,
"step": 63
},
{
"epoch": 0.03274494755691993,
"grad_norm": 2.4546236991882324,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.4384,
"step": 64
},
{
"epoch": 0.0332565873624968,
"grad_norm": 1.848671317100525,
"learning_rate": 3.289899283371657e-05,
"loss": 1.3614,
"step": 65
},
{
"epoch": 0.03376822716807368,
"grad_norm": 1.7728267908096313,
"learning_rate": 3.12696703292044e-05,
"loss": 1.3155,
"step": 66
},
{
"epoch": 0.03427986697365055,
"grad_norm": 2.5115482807159424,
"learning_rate": 2.9663167846209998e-05,
"loss": 1.5332,
"step": 67
},
{
"epoch": 0.034791506779227425,
"grad_norm": 2.179997682571411,
"learning_rate": 2.8081442660546125e-05,
"loss": 1.1482,
"step": 68
},
{
"epoch": 0.035303146584804296,
"grad_norm": 2.3714139461517334,
"learning_rate": 2.6526421860705473e-05,
"loss": 1.2697,
"step": 69
},
{
"epoch": 0.03581478639038117,
"grad_norm": 2.672269344329834,
"learning_rate": 2.500000000000001e-05,
"loss": 1.6706,
"step": 70
},
{
"epoch": 0.03632642619595804,
"grad_norm": 1.9255658388137817,
"learning_rate": 2.350403678833976e-05,
"loss": 1.1635,
"step": 71
},
{
"epoch": 0.03683806600153492,
"grad_norm": 1.1116821765899658,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.8958,
"step": 72
},
{
"epoch": 0.03683806600153492,
"eval_loss": 0.7018402814865112,
"eval_runtime": 274.1606,
"eval_samples_per_second": 6.004,
"eval_steps_per_second": 0.751,
"step": 72
},
{
"epoch": 0.03734970580711179,
"grad_norm": 1.3924734592437744,
"learning_rate": 2.061073738537635e-05,
"loss": 1.0895,
"step": 73
},
{
"epoch": 0.03786134561268867,
"grad_norm": 3.168287992477417,
"learning_rate": 1.9216926233717085e-05,
"loss": 1.6212,
"step": 74
},
{
"epoch": 0.03837298541826554,
"grad_norm": 2.118021249771118,
"learning_rate": 1.7860619515673033e-05,
"loss": 1.2363,
"step": 75
},
{
"epoch": 0.038884625223842416,
"grad_norm": 2.252734422683716,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.9702,
"step": 76
},
{
"epoch": 0.039396265029419286,
"grad_norm": 1.5854482650756836,
"learning_rate": 1.526708147705013e-05,
"loss": 1.4033,
"step": 77
},
{
"epoch": 0.039907904834996163,
"grad_norm": 2.2333011627197266,
"learning_rate": 1.4033009983067452e-05,
"loss": 1.3619,
"step": 78
},
{
"epoch": 0.040419544640573034,
"grad_norm": 2.2637572288513184,
"learning_rate": 1.2842758726130283e-05,
"loss": 1.3294,
"step": 79
},
{
"epoch": 0.04093118444614991,
"grad_norm": 2.184448003768921,
"learning_rate": 1.1697777844051105e-05,
"loss": 1.3759,
"step": 80
},
{
"epoch": 0.04144282425172678,
"grad_norm": 2.1181156635284424,
"learning_rate": 1.0599462319663905e-05,
"loss": 1.4547,
"step": 81
},
{
"epoch": 0.04144282425172678,
"eval_loss": 0.6984646916389465,
"eval_runtime": 274.0143,
"eval_samples_per_second": 6.007,
"eval_steps_per_second": 0.752,
"step": 81
},
{
"epoch": 0.04195446405730366,
"grad_norm": 2.124769926071167,
"learning_rate": 9.549150281252633e-06,
"loss": 1.711,
"step": 82
},
{
"epoch": 0.04246610386288053,
"grad_norm": 2.768644332885742,
"learning_rate": 8.548121372247918e-06,
"loss": 1.4417,
"step": 83
},
{
"epoch": 0.042977743668457406,
"grad_norm": 1.6614168882369995,
"learning_rate": 7.597595192178702e-06,
"loss": 1.1462,
"step": 84
},
{
"epoch": 0.04348938347403428,
"grad_norm": 1.6592438220977783,
"learning_rate": 6.698729810778065e-06,
"loss": 1.1821,
"step": 85
},
{
"epoch": 0.044001023279611154,
"grad_norm": 2.4975907802581787,
"learning_rate": 5.852620357053651e-06,
"loss": 1.5,
"step": 86
},
{
"epoch": 0.044512663085188024,
"grad_norm": 1.9415825605392456,
"learning_rate": 5.060297685041659e-06,
"loss": 1.3181,
"step": 87
},
{
"epoch": 0.0450243028907649,
"grad_norm": 1.8416827917099,
"learning_rate": 4.322727117869951e-06,
"loss": 1.1157,
"step": 88
},
{
"epoch": 0.04553594269634178,
"grad_norm": 2.9629881381988525,
"learning_rate": 3.6408072716606346e-06,
"loss": 1.6228,
"step": 89
},
{
"epoch": 0.04604758250191865,
"grad_norm": 1.538602590560913,
"learning_rate": 3.0153689607045845e-06,
"loss": 1.2217,
"step": 90
},
{
"epoch": 0.04604758250191865,
"eval_loss": 0.6968083381652832,
"eval_runtime": 274.0049,
"eval_samples_per_second": 6.007,
"eval_steps_per_second": 0.752,
"step": 90
},
{
"epoch": 0.046559222307495526,
"grad_norm": 3.1427130699157715,
"learning_rate": 2.4471741852423237e-06,
"loss": 1.4783,
"step": 91
},
{
"epoch": 0.0470708621130724,
"grad_norm": 1.9339940547943115,
"learning_rate": 1.9369152030840556e-06,
"loss": 1.3989,
"step": 92
},
{
"epoch": 0.047582501918649274,
"grad_norm": 1.509247899055481,
"learning_rate": 1.4852136862001764e-06,
"loss": 1.277,
"step": 93
},
{
"epoch": 0.048094141724226144,
"grad_norm": 2.101160764694214,
"learning_rate": 1.0926199633097157e-06,
"loss": 1.129,
"step": 94
},
{
"epoch": 0.04860578152980302,
"grad_norm": 1.8684135675430298,
"learning_rate": 7.596123493895991e-07,
"loss": 1.397,
"step": 95
},
{
"epoch": 0.04911742133537989,
"grad_norm": 2.9883639812469482,
"learning_rate": 4.865965629214819e-07,
"loss": 1.4069,
"step": 96
},
{
"epoch": 0.04962906114095677,
"grad_norm": 2.2074315547943115,
"learning_rate": 2.7390523158633554e-07,
"loss": 1.4236,
"step": 97
},
{
"epoch": 0.05014070094653364,
"grad_norm": 2.8094723224639893,
"learning_rate": 1.2179748700879012e-07,
"loss": 1.4329,
"step": 98
},
{
"epoch": 0.05065234075211052,
"grad_norm": 1.6626664400100708,
"learning_rate": 3.04586490452119e-08,
"loss": 1.1333,
"step": 99
},
{
"epoch": 0.05065234075211052,
"eval_loss": 0.6963222026824951,
"eval_runtime": 274.0109,
"eval_samples_per_second": 6.007,
"eval_steps_per_second": 0.752,
"step": 99
},
{
"epoch": 0.05116398055768739,
"grad_norm": 2.4238545894622803,
"learning_rate": 0.0,
"loss": 1.3018,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.313992467283968e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}