lesso03's picture
Training in progress, step 100, checkpoint
cf47db6 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.06872852233676977,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0006872852233676976,
"grad_norm": 0.4530450403690338,
"learning_rate": 1e-05,
"loss": 3.5055,
"step": 1
},
{
"epoch": 0.0006872852233676976,
"eval_loss": 1.7271678447723389,
"eval_runtime": 252.7415,
"eval_samples_per_second": 4.851,
"eval_steps_per_second": 0.609,
"step": 1
},
{
"epoch": 0.0013745704467353953,
"grad_norm": 0.4202934205532074,
"learning_rate": 2e-05,
"loss": 3.4948,
"step": 2
},
{
"epoch": 0.002061855670103093,
"grad_norm": 0.4171874225139618,
"learning_rate": 3e-05,
"loss": 3.4219,
"step": 3
},
{
"epoch": 0.0027491408934707906,
"grad_norm": 0.5224863290786743,
"learning_rate": 4e-05,
"loss": 3.3726,
"step": 4
},
{
"epoch": 0.003436426116838488,
"grad_norm": 0.4832131862640381,
"learning_rate": 5e-05,
"loss": 3.3309,
"step": 5
},
{
"epoch": 0.004123711340206186,
"grad_norm": 0.5266788601875305,
"learning_rate": 6e-05,
"loss": 3.4297,
"step": 6
},
{
"epoch": 0.004810996563573883,
"grad_norm": 0.5814648270606995,
"learning_rate": 7e-05,
"loss": 3.3866,
"step": 7
},
{
"epoch": 0.005498281786941581,
"grad_norm": 0.7436932921409607,
"learning_rate": 8e-05,
"loss": 3.3173,
"step": 8
},
{
"epoch": 0.006185567010309278,
"grad_norm": 0.7650415897369385,
"learning_rate": 9e-05,
"loss": 3.0903,
"step": 9
},
{
"epoch": 0.006185567010309278,
"eval_loss": 1.549910068511963,
"eval_runtime": 253.3835,
"eval_samples_per_second": 4.839,
"eval_steps_per_second": 0.608,
"step": 9
},
{
"epoch": 0.006872852233676976,
"grad_norm": 0.8187753558158875,
"learning_rate": 0.0001,
"loss": 3.2222,
"step": 10
},
{
"epoch": 0.007560137457044674,
"grad_norm": 0.8003529906272888,
"learning_rate": 9.99695413509548e-05,
"loss": 2.7961,
"step": 11
},
{
"epoch": 0.008247422680412371,
"grad_norm": 0.813484251499176,
"learning_rate": 9.987820251299122e-05,
"loss": 2.6905,
"step": 12
},
{
"epoch": 0.008934707903780068,
"grad_norm": 0.7743918299674988,
"learning_rate": 9.972609476841367e-05,
"loss": 2.5702,
"step": 13
},
{
"epoch": 0.009621993127147767,
"grad_norm": 0.7361239194869995,
"learning_rate": 9.951340343707852e-05,
"loss": 2.3314,
"step": 14
},
{
"epoch": 0.010309278350515464,
"grad_norm": 1.0487244129180908,
"learning_rate": 9.924038765061042e-05,
"loss": 2.308,
"step": 15
},
{
"epoch": 0.010996563573883162,
"grad_norm": 0.8277301788330078,
"learning_rate": 9.890738003669029e-05,
"loss": 2.3839,
"step": 16
},
{
"epoch": 0.01168384879725086,
"grad_norm": 0.8363406658172607,
"learning_rate": 9.851478631379982e-05,
"loss": 2.1383,
"step": 17
},
{
"epoch": 0.012371134020618556,
"grad_norm": 0.7794749736785889,
"learning_rate": 9.806308479691595e-05,
"loss": 2.0868,
"step": 18
},
{
"epoch": 0.012371134020618556,
"eval_loss": 0.9877583980560303,
"eval_runtime": 254.3292,
"eval_samples_per_second": 4.821,
"eval_steps_per_second": 0.606,
"step": 18
},
{
"epoch": 0.013058419243986255,
"grad_norm": 0.8351041078567505,
"learning_rate": 9.755282581475769e-05,
"loss": 2.1081,
"step": 19
},
{
"epoch": 0.013745704467353952,
"grad_norm": 0.8560498356819153,
"learning_rate": 9.698463103929542e-05,
"loss": 1.8485,
"step": 20
},
{
"epoch": 0.01443298969072165,
"grad_norm": 0.7435014843940735,
"learning_rate": 9.635919272833938e-05,
"loss": 1.7601,
"step": 21
},
{
"epoch": 0.015120274914089347,
"grad_norm": 0.7566239833831787,
"learning_rate": 9.567727288213005e-05,
"loss": 1.6923,
"step": 22
},
{
"epoch": 0.015807560137457044,
"grad_norm": 0.8233651518821716,
"learning_rate": 9.493970231495835e-05,
"loss": 1.724,
"step": 23
},
{
"epoch": 0.016494845360824743,
"grad_norm": 0.8340699672698975,
"learning_rate": 9.414737964294636e-05,
"loss": 1.6131,
"step": 24
},
{
"epoch": 0.01718213058419244,
"grad_norm": 0.7858492136001587,
"learning_rate": 9.330127018922194e-05,
"loss": 1.528,
"step": 25
},
{
"epoch": 0.017869415807560136,
"grad_norm": 0.8513308763504028,
"learning_rate": 9.24024048078213e-05,
"loss": 1.5835,
"step": 26
},
{
"epoch": 0.018556701030927835,
"grad_norm": 0.6795158982276917,
"learning_rate": 9.145187862775209e-05,
"loss": 1.5386,
"step": 27
},
{
"epoch": 0.018556701030927835,
"eval_loss": 0.748094916343689,
"eval_runtime": 253.4746,
"eval_samples_per_second": 4.837,
"eval_steps_per_second": 0.608,
"step": 27
},
{
"epoch": 0.019243986254295534,
"grad_norm": 0.7328270077705383,
"learning_rate": 9.045084971874738e-05,
"loss": 1.551,
"step": 28
},
{
"epoch": 0.01993127147766323,
"grad_norm": 0.7274560928344727,
"learning_rate": 8.940053768033609e-05,
"loss": 1.4977,
"step": 29
},
{
"epoch": 0.020618556701030927,
"grad_norm": 0.7910637259483337,
"learning_rate": 8.83022221559489e-05,
"loss": 1.4872,
"step": 30
},
{
"epoch": 0.021305841924398626,
"grad_norm": 0.8576248288154602,
"learning_rate": 8.715724127386972e-05,
"loss": 1.4049,
"step": 31
},
{
"epoch": 0.021993127147766325,
"grad_norm": 0.9982084631919861,
"learning_rate": 8.596699001693255e-05,
"loss": 1.3457,
"step": 32
},
{
"epoch": 0.02268041237113402,
"grad_norm": 0.7275662422180176,
"learning_rate": 8.473291852294987e-05,
"loss": 1.345,
"step": 33
},
{
"epoch": 0.02336769759450172,
"grad_norm": 0.7337255477905273,
"learning_rate": 8.345653031794292e-05,
"loss": 1.3482,
"step": 34
},
{
"epoch": 0.024054982817869417,
"grad_norm": 0.8252787590026855,
"learning_rate": 8.213938048432697e-05,
"loss": 1.3413,
"step": 35
},
{
"epoch": 0.024742268041237112,
"grad_norm": 0.9308682084083557,
"learning_rate": 8.07830737662829e-05,
"loss": 1.3861,
"step": 36
},
{
"epoch": 0.024742268041237112,
"eval_loss": 0.6290791034698486,
"eval_runtime": 252.791,
"eval_samples_per_second": 4.85,
"eval_steps_per_second": 0.609,
"step": 36
},
{
"epoch": 0.02542955326460481,
"grad_norm": 0.8834632635116577,
"learning_rate": 7.938926261462366e-05,
"loss": 1.2901,
"step": 37
},
{
"epoch": 0.02611683848797251,
"grad_norm": 0.7757920026779175,
"learning_rate": 7.795964517353735e-05,
"loss": 1.238,
"step": 38
},
{
"epoch": 0.026804123711340205,
"grad_norm": 0.9437708854675293,
"learning_rate": 7.649596321166024e-05,
"loss": 1.1715,
"step": 39
},
{
"epoch": 0.027491408934707903,
"grad_norm": 0.9284512400627136,
"learning_rate": 7.500000000000001e-05,
"loss": 1.091,
"step": 40
},
{
"epoch": 0.028178694158075602,
"grad_norm": 0.8415759205818176,
"learning_rate": 7.347357813929454e-05,
"loss": 1.1651,
"step": 41
},
{
"epoch": 0.0288659793814433,
"grad_norm": 0.8733612895011902,
"learning_rate": 7.191855733945387e-05,
"loss": 1.1425,
"step": 42
},
{
"epoch": 0.029553264604810996,
"grad_norm": 0.9334592819213867,
"learning_rate": 7.033683215379002e-05,
"loss": 1.1424,
"step": 43
},
{
"epoch": 0.030240549828178694,
"grad_norm": 0.8202064037322998,
"learning_rate": 6.873032967079561e-05,
"loss": 1.1921,
"step": 44
},
{
"epoch": 0.030927835051546393,
"grad_norm": 0.7855863571166992,
"learning_rate": 6.710100716628344e-05,
"loss": 1.1513,
"step": 45
},
{
"epoch": 0.030927835051546393,
"eval_loss": 0.5508877038955688,
"eval_runtime": 252.7564,
"eval_samples_per_second": 4.851,
"eval_steps_per_second": 0.609,
"step": 45
},
{
"epoch": 0.03161512027491409,
"grad_norm": 0.8433973789215088,
"learning_rate": 6.545084971874738e-05,
"loss": 1.0264,
"step": 46
},
{
"epoch": 0.03230240549828179,
"grad_norm": 0.8501434922218323,
"learning_rate": 6.378186779084995e-05,
"loss": 1.0172,
"step": 47
},
{
"epoch": 0.032989690721649485,
"grad_norm": 0.8408077359199524,
"learning_rate": 6.209609477998338e-05,
"loss": 1.1105,
"step": 48
},
{
"epoch": 0.03367697594501718,
"grad_norm": 0.8546326160430908,
"learning_rate": 6.0395584540887963e-05,
"loss": 1.1052,
"step": 49
},
{
"epoch": 0.03436426116838488,
"grad_norm": 0.8061237931251526,
"learning_rate": 5.868240888334653e-05,
"loss": 1.0345,
"step": 50
},
{
"epoch": 0.03505154639175258,
"grad_norm": 0.8400550484657288,
"learning_rate": 5.695865504800327e-05,
"loss": 0.9842,
"step": 51
},
{
"epoch": 0.03573883161512027,
"grad_norm": 0.8896331191062927,
"learning_rate": 5.522642316338268e-05,
"loss": 1.0689,
"step": 52
},
{
"epoch": 0.036426116838487975,
"grad_norm": 0.984061598777771,
"learning_rate": 5.348782368720626e-05,
"loss": 1.1267,
"step": 53
},
{
"epoch": 0.03711340206185567,
"grad_norm": 0.98240065574646,
"learning_rate": 5.174497483512506e-05,
"loss": 1.0024,
"step": 54
},
{
"epoch": 0.03711340206185567,
"eval_loss": 0.496385782957077,
"eval_runtime": 252.835,
"eval_samples_per_second": 4.849,
"eval_steps_per_second": 0.609,
"step": 54
},
{
"epoch": 0.037800687285223365,
"grad_norm": 0.9597200155258179,
"learning_rate": 5e-05,
"loss": 1.0872,
"step": 55
},
{
"epoch": 0.03848797250859107,
"grad_norm": 0.9907920360565186,
"learning_rate": 4.825502516487497e-05,
"loss": 0.9184,
"step": 56
},
{
"epoch": 0.03917525773195876,
"grad_norm": 1.0494588613510132,
"learning_rate": 4.6512176312793736e-05,
"loss": 1.0417,
"step": 57
},
{
"epoch": 0.03986254295532646,
"grad_norm": 0.9095539450645447,
"learning_rate": 4.477357683661734e-05,
"loss": 0.8995,
"step": 58
},
{
"epoch": 0.04054982817869416,
"grad_norm": 1.0099577903747559,
"learning_rate": 4.3041344951996746e-05,
"loss": 0.9593,
"step": 59
},
{
"epoch": 0.041237113402061855,
"grad_norm": 1.0103282928466797,
"learning_rate": 4.131759111665349e-05,
"loss": 0.9622,
"step": 60
},
{
"epoch": 0.04192439862542955,
"grad_norm": 0.9153605103492737,
"learning_rate": 3.960441545911204e-05,
"loss": 0.8628,
"step": 61
},
{
"epoch": 0.04261168384879725,
"grad_norm": 0.8908958435058594,
"learning_rate": 3.790390522001662e-05,
"loss": 0.922,
"step": 62
},
{
"epoch": 0.04329896907216495,
"grad_norm": 0.9071544408798218,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.9928,
"step": 63
},
{
"epoch": 0.04329896907216495,
"eval_loss": 0.464398592710495,
"eval_runtime": 252.7256,
"eval_samples_per_second": 4.851,
"eval_steps_per_second": 0.609,
"step": 63
},
{
"epoch": 0.04398625429553265,
"grad_norm": 0.9316085577011108,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.9378,
"step": 64
},
{
"epoch": 0.044673539518900345,
"grad_norm": 0.8639270663261414,
"learning_rate": 3.289899283371657e-05,
"loss": 0.9379,
"step": 65
},
{
"epoch": 0.04536082474226804,
"grad_norm": 0.9839565753936768,
"learning_rate": 3.12696703292044e-05,
"loss": 0.8882,
"step": 66
},
{
"epoch": 0.04604810996563574,
"grad_norm": 0.9108591079711914,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.9424,
"step": 67
},
{
"epoch": 0.04673539518900344,
"grad_norm": 0.9234270453453064,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.9626,
"step": 68
},
{
"epoch": 0.04742268041237113,
"grad_norm": 0.9421894550323486,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.8883,
"step": 69
},
{
"epoch": 0.048109965635738834,
"grad_norm": 0.9845061302185059,
"learning_rate": 2.500000000000001e-05,
"loss": 0.8485,
"step": 70
},
{
"epoch": 0.04879725085910653,
"grad_norm": 0.9633055329322815,
"learning_rate": 2.350403678833976e-05,
"loss": 0.9693,
"step": 71
},
{
"epoch": 0.049484536082474224,
"grad_norm": 0.9256303310394287,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.9221,
"step": 72
},
{
"epoch": 0.049484536082474224,
"eval_loss": 0.44281116127967834,
"eval_runtime": 252.7515,
"eval_samples_per_second": 4.851,
"eval_steps_per_second": 0.609,
"step": 72
},
{
"epoch": 0.05017182130584193,
"grad_norm": 0.9681574106216431,
"learning_rate": 2.061073738537635e-05,
"loss": 0.9509,
"step": 73
},
{
"epoch": 0.05085910652920962,
"grad_norm": 0.922154426574707,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.8309,
"step": 74
},
{
"epoch": 0.05154639175257732,
"grad_norm": 0.9173290729522705,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.9151,
"step": 75
},
{
"epoch": 0.05223367697594502,
"grad_norm": 0.9165816903114319,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.8993,
"step": 76
},
{
"epoch": 0.052920962199312714,
"grad_norm": 0.7988548874855042,
"learning_rate": 1.526708147705013e-05,
"loss": 0.7629,
"step": 77
},
{
"epoch": 0.05360824742268041,
"grad_norm": 0.8661845922470093,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.8056,
"step": 78
},
{
"epoch": 0.05429553264604811,
"grad_norm": 0.8931153416633606,
"learning_rate": 1.2842758726130283e-05,
"loss": 0.8286,
"step": 79
},
{
"epoch": 0.054982817869415807,
"grad_norm": 0.9941338300704956,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.969,
"step": 80
},
{
"epoch": 0.05567010309278351,
"grad_norm": 0.9468693137168884,
"learning_rate": 1.0599462319663905e-05,
"loss": 0.9096,
"step": 81
},
{
"epoch": 0.05567010309278351,
"eval_loss": 0.432786762714386,
"eval_runtime": 252.8196,
"eval_samples_per_second": 4.849,
"eval_steps_per_second": 0.609,
"step": 81
},
{
"epoch": 0.056357388316151204,
"grad_norm": 0.8518151640892029,
"learning_rate": 9.549150281252633e-06,
"loss": 0.738,
"step": 82
},
{
"epoch": 0.0570446735395189,
"grad_norm": 0.8456606268882751,
"learning_rate": 8.548121372247918e-06,
"loss": 0.8421,
"step": 83
},
{
"epoch": 0.0577319587628866,
"grad_norm": 0.916377067565918,
"learning_rate": 7.597595192178702e-06,
"loss": 0.7926,
"step": 84
},
{
"epoch": 0.058419243986254296,
"grad_norm": 0.8955652713775635,
"learning_rate": 6.698729810778065e-06,
"loss": 0.8337,
"step": 85
},
{
"epoch": 0.05910652920962199,
"grad_norm": 0.9039252996444702,
"learning_rate": 5.852620357053651e-06,
"loss": 0.8484,
"step": 86
},
{
"epoch": 0.05979381443298969,
"grad_norm": 0.9059133529663086,
"learning_rate": 5.060297685041659e-06,
"loss": 0.8716,
"step": 87
},
{
"epoch": 0.06048109965635739,
"grad_norm": 0.9183720350265503,
"learning_rate": 4.322727117869951e-06,
"loss": 0.9808,
"step": 88
},
{
"epoch": 0.061168384879725084,
"grad_norm": 0.9803336262702942,
"learning_rate": 3.6408072716606346e-06,
"loss": 0.8317,
"step": 89
},
{
"epoch": 0.061855670103092786,
"grad_norm": 0.911630392074585,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.8261,
"step": 90
},
{
"epoch": 0.061855670103092786,
"eval_loss": 0.4276270270347595,
"eval_runtime": 252.8496,
"eval_samples_per_second": 4.849,
"eval_steps_per_second": 0.609,
"step": 90
},
{
"epoch": 0.06254295532646048,
"grad_norm": 1.0115582942962646,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.8234,
"step": 91
},
{
"epoch": 0.06323024054982818,
"grad_norm": 0.8366324305534363,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.8316,
"step": 92
},
{
"epoch": 0.06391752577319587,
"grad_norm": 0.9966782331466675,
"learning_rate": 1.4852136862001764e-06,
"loss": 0.7705,
"step": 93
},
{
"epoch": 0.06460481099656358,
"grad_norm": 0.9167208075523376,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.8403,
"step": 94
},
{
"epoch": 0.06529209621993128,
"grad_norm": 0.9588520526885986,
"learning_rate": 7.596123493895991e-07,
"loss": 0.8928,
"step": 95
},
{
"epoch": 0.06597938144329897,
"grad_norm": 0.9358211159706116,
"learning_rate": 4.865965629214819e-07,
"loss": 0.8543,
"step": 96
},
{
"epoch": 0.06666666666666667,
"grad_norm": 0.8442792892456055,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.8146,
"step": 97
},
{
"epoch": 0.06735395189003436,
"grad_norm": 0.8882707357406616,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.8587,
"step": 98
},
{
"epoch": 0.06804123711340206,
"grad_norm": 1.0984888076782227,
"learning_rate": 3.04586490452119e-08,
"loss": 0.9505,
"step": 99
},
{
"epoch": 0.06804123711340206,
"eval_loss": 0.42630666494369507,
"eval_runtime": 253.3629,
"eval_samples_per_second": 4.839,
"eval_steps_per_second": 0.608,
"step": 99
},
{
"epoch": 0.06872852233676977,
"grad_norm": 0.9829119443893433,
"learning_rate": 0.0,
"loss": 0.9251,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.26956599246848e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}