leixa's picture
Training in progress, step 200, checkpoint
c538f47 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.039846590626089556,
"eval_steps": 17,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00019923295313044778,
"eval_loss": 0.9888173341751099,
"eval_runtime": 244.5615,
"eval_samples_per_second": 34.568,
"eval_steps_per_second": 4.322,
"step": 1
},
{
"epoch": 0.0005976988593913434,
"grad_norm": 0.2933606505393982,
"learning_rate": 3e-05,
"loss": 0.9647,
"step": 3
},
{
"epoch": 0.0011953977187826868,
"grad_norm": 0.30875223875045776,
"learning_rate": 6e-05,
"loss": 0.9835,
"step": 6
},
{
"epoch": 0.00179309657817403,
"grad_norm": 0.27685415744781494,
"learning_rate": 9e-05,
"loss": 1.0392,
"step": 9
},
{
"epoch": 0.0023907954375653735,
"grad_norm": 0.19165818393230438,
"learning_rate": 9.997266286704631e-05,
"loss": 0.8622,
"step": 12
},
{
"epoch": 0.0029884942969567166,
"grad_norm": 0.21981056034564972,
"learning_rate": 9.98292246503335e-05,
"loss": 0.8396,
"step": 15
},
{
"epoch": 0.003386960203217612,
"eval_loss": 0.854656457901001,
"eval_runtime": 246.6307,
"eval_samples_per_second": 34.278,
"eval_steps_per_second": 4.286,
"step": 17
},
{
"epoch": 0.00358619315634806,
"grad_norm": 0.18528936803340912,
"learning_rate": 9.956320346634876e-05,
"loss": 0.8472,
"step": 18
},
{
"epoch": 0.0041838920157394035,
"grad_norm": 0.17711381614208221,
"learning_rate": 9.917525374361912e-05,
"loss": 0.8624,
"step": 21
},
{
"epoch": 0.004781590875130747,
"grad_norm": 0.18825452029705048,
"learning_rate": 9.86663298624003e-05,
"loss": 0.7784,
"step": 24
},
{
"epoch": 0.00537928973452209,
"grad_norm": 0.2445794641971588,
"learning_rate": 9.803768380684242e-05,
"loss": 0.8382,
"step": 27
},
{
"epoch": 0.005976988593913433,
"grad_norm": 0.25549814105033875,
"learning_rate": 9.729086208503174e-05,
"loss": 0.83,
"step": 30
},
{
"epoch": 0.006574687453304777,
"grad_norm": 0.2030678689479828,
"learning_rate": 9.642770192448536e-05,
"loss": 0.7497,
"step": 33
},
{
"epoch": 0.006773920406435224,
"eval_loss": 0.7824097871780396,
"eval_runtime": 246.0525,
"eval_samples_per_second": 34.359,
"eval_steps_per_second": 4.296,
"step": 34
},
{
"epoch": 0.00717238631269612,
"grad_norm": 0.24549758434295654,
"learning_rate": 9.545032675245813e-05,
"loss": 0.863,
"step": 36
},
{
"epoch": 0.0077700851720874636,
"grad_norm": 0.20169614255428314,
"learning_rate": 9.43611409721806e-05,
"loss": 0.7357,
"step": 39
},
{
"epoch": 0.008367784031478807,
"grad_norm": 0.1820153445005417,
"learning_rate": 9.316282404787871e-05,
"loss": 0.7357,
"step": 42
},
{
"epoch": 0.00896548289087015,
"grad_norm": 0.1945677399635315,
"learning_rate": 9.185832391312644e-05,
"loss": 0.764,
"step": 45
},
{
"epoch": 0.009563181750261494,
"grad_norm": 0.19976423680782318,
"learning_rate": 9.045084971874738e-05,
"loss": 0.7432,
"step": 48
},
{
"epoch": 0.010160880609652836,
"grad_norm": 0.2329975664615631,
"learning_rate": 8.894386393810563e-05,
"loss": 0.7484,
"step": 51
},
{
"epoch": 0.010160880609652836,
"eval_loss": 0.7552871704101562,
"eval_runtime": 246.9974,
"eval_samples_per_second": 34.227,
"eval_steps_per_second": 4.279,
"step": 51
},
{
"epoch": 0.01075857946904418,
"grad_norm": 0.24253146350383759,
"learning_rate": 8.73410738492077e-05,
"loss": 0.7833,
"step": 54
},
{
"epoch": 0.011356278328435523,
"grad_norm": 0.2245894819498062,
"learning_rate": 8.564642241456986e-05,
"loss": 0.7807,
"step": 57
},
{
"epoch": 0.011953977187826866,
"grad_norm": 0.2301652878522873,
"learning_rate": 8.386407858128706e-05,
"loss": 0.7355,
"step": 60
},
{
"epoch": 0.01255167604721821,
"grad_norm": 0.21688085794448853,
"learning_rate": 8.199842702516583e-05,
"loss": 0.7521,
"step": 63
},
{
"epoch": 0.013149374906609553,
"grad_norm": 0.2115517556667328,
"learning_rate": 8.005405736415126e-05,
"loss": 0.7837,
"step": 66
},
{
"epoch": 0.013547840812870448,
"eval_loss": 0.7411483526229858,
"eval_runtime": 246.9838,
"eval_samples_per_second": 34.229,
"eval_steps_per_second": 4.28,
"step": 68
},
{
"epoch": 0.013747073766000897,
"grad_norm": 0.2068549543619156,
"learning_rate": 7.803575286758364e-05,
"loss": 0.6639,
"step": 69
},
{
"epoch": 0.01434477262539224,
"grad_norm": 0.1979922354221344,
"learning_rate": 7.594847868906076e-05,
"loss": 0.7337,
"step": 72
},
{
"epoch": 0.014942471484783584,
"grad_norm": 0.20813895761966705,
"learning_rate": 7.379736965185368e-05,
"loss": 0.7659,
"step": 75
},
{
"epoch": 0.015540170344174927,
"grad_norm": 0.20922593772411346,
"learning_rate": 7.158771761692464e-05,
"loss": 0.7355,
"step": 78
},
{
"epoch": 0.01613786920356627,
"grad_norm": 0.1938929706811905,
"learning_rate": 6.932495846462261e-05,
"loss": 0.7499,
"step": 81
},
{
"epoch": 0.016735568062957614,
"grad_norm": 0.21271425485610962,
"learning_rate": 6.701465872208216e-05,
"loss": 0.6686,
"step": 84
},
{
"epoch": 0.01693480101608806,
"eval_loss": 0.7316516041755676,
"eval_runtime": 246.9875,
"eval_samples_per_second": 34.228,
"eval_steps_per_second": 4.28,
"step": 85
},
{
"epoch": 0.017333266922348958,
"grad_norm": 0.2120652049779892,
"learning_rate": 6.466250186922325e-05,
"loss": 0.7224,
"step": 87
},
{
"epoch": 0.0179309657817403,
"grad_norm": 0.22158583998680115,
"learning_rate": 6.227427435703997e-05,
"loss": 0.7273,
"step": 90
},
{
"epoch": 0.018528664641131645,
"grad_norm": 0.21288037300109863,
"learning_rate": 5.985585137257401e-05,
"loss": 0.7102,
"step": 93
},
{
"epoch": 0.019126363500522988,
"grad_norm": 0.20418782532215118,
"learning_rate": 5.74131823855921e-05,
"loss": 0.7021,
"step": 96
},
{
"epoch": 0.01972406235991433,
"grad_norm": 0.22563454508781433,
"learning_rate": 5.495227651252315e-05,
"loss": 0.6756,
"step": 99
},
{
"epoch": 0.02032176121930567,
"grad_norm": 0.21469104290008545,
"learning_rate": 5.247918773366112e-05,
"loss": 0.6921,
"step": 102
},
{
"epoch": 0.02032176121930567,
"eval_loss": 0.7249154448509216,
"eval_runtime": 246.7439,
"eval_samples_per_second": 34.262,
"eval_steps_per_second": 4.284,
"step": 102
},
{
"epoch": 0.020919460078697015,
"grad_norm": 0.2427963763475418,
"learning_rate": 5e-05,
"loss": 0.7018,
"step": 105
},
{
"epoch": 0.02151715893808836,
"grad_norm": 0.2553952932357788,
"learning_rate": 4.7520812266338885e-05,
"loss": 0.7219,
"step": 108
},
{
"epoch": 0.022114857797479702,
"grad_norm": 0.23926398158073425,
"learning_rate": 4.504772348747687e-05,
"loss": 0.7707,
"step": 111
},
{
"epoch": 0.022712556656871045,
"grad_norm": 0.24447274208068848,
"learning_rate": 4.2586817614407895e-05,
"loss": 0.7402,
"step": 114
},
{
"epoch": 0.02331025551626239,
"grad_norm": 0.218000590801239,
"learning_rate": 4.0144148627425993e-05,
"loss": 0.7143,
"step": 117
},
{
"epoch": 0.023708721422523286,
"eval_loss": 0.7203857898712158,
"eval_runtime": 246.9873,
"eval_samples_per_second": 34.228,
"eval_steps_per_second": 4.28,
"step": 119
},
{
"epoch": 0.023907954375653732,
"grad_norm": 0.21252404153347015,
"learning_rate": 3.772572564296005e-05,
"loss": 0.6713,
"step": 120
},
{
"epoch": 0.024505653235045076,
"grad_norm": 0.20347541570663452,
"learning_rate": 3.533749813077677e-05,
"loss": 0.7159,
"step": 123
},
{
"epoch": 0.02510335209443642,
"grad_norm": 0.2782200574874878,
"learning_rate": 3.298534127791785e-05,
"loss": 0.749,
"step": 126
},
{
"epoch": 0.025701050953827763,
"grad_norm": 0.21792350709438324,
"learning_rate": 3.0675041535377405e-05,
"loss": 0.6967,
"step": 129
},
{
"epoch": 0.026298749813219106,
"grad_norm": 0.19734562933444977,
"learning_rate": 2.8412282383075363e-05,
"loss": 0.6717,
"step": 132
},
{
"epoch": 0.02689644867261045,
"grad_norm": 0.25884565711021423,
"learning_rate": 2.6202630348146324e-05,
"loss": 0.7098,
"step": 135
},
{
"epoch": 0.027095681625740897,
"eval_loss": 0.7171828746795654,
"eval_runtime": 247.1294,
"eval_samples_per_second": 34.209,
"eval_steps_per_second": 4.277,
"step": 136
},
{
"epoch": 0.027494147532001793,
"grad_norm": 0.2502513527870178,
"learning_rate": 2.405152131093926e-05,
"loss": 0.7678,
"step": 138
},
{
"epoch": 0.028091846391393137,
"grad_norm": 0.21426473557949066,
"learning_rate": 2.196424713241637e-05,
"loss": 0.7028,
"step": 141
},
{
"epoch": 0.02868954525078448,
"grad_norm": 0.2242855429649353,
"learning_rate": 1.9945942635848748e-05,
"loss": 0.7267,
"step": 144
},
{
"epoch": 0.029287244110175824,
"grad_norm": 0.2083127796649933,
"learning_rate": 1.800157297483417e-05,
"loss": 0.6915,
"step": 147
},
{
"epoch": 0.029884942969567167,
"grad_norm": 0.20724868774414062,
"learning_rate": 1.6135921418712956e-05,
"loss": 0.7249,
"step": 150
},
{
"epoch": 0.03048264182895851,
"grad_norm": 0.23638051748275757,
"learning_rate": 1.435357758543015e-05,
"loss": 0.6968,
"step": 153
},
{
"epoch": 0.03048264182895851,
"eval_loss": 0.7152303457260132,
"eval_runtime": 246.9292,
"eval_samples_per_second": 34.237,
"eval_steps_per_second": 4.281,
"step": 153
},
{
"epoch": 0.031080340688349854,
"grad_norm": 0.21780776977539062,
"learning_rate": 1.2658926150792322e-05,
"loss": 0.754,
"step": 156
},
{
"epoch": 0.0316780395477412,
"grad_norm": 0.23965197801589966,
"learning_rate": 1.1056136061894384e-05,
"loss": 0.6907,
"step": 159
},
{
"epoch": 0.03227573840713254,
"grad_norm": 0.21216127276420593,
"learning_rate": 9.549150281252633e-06,
"loss": 0.6891,
"step": 162
},
{
"epoch": 0.032873437266523885,
"grad_norm": 0.21560019254684448,
"learning_rate": 8.141676086873572e-06,
"loss": 0.7348,
"step": 165
},
{
"epoch": 0.03347113612591523,
"grad_norm": 0.24270105361938477,
"learning_rate": 6.837175952121306e-06,
"loss": 0.7507,
"step": 168
},
{
"epoch": 0.03386960203217612,
"eval_loss": 0.7143241167068481,
"eval_runtime": 246.9514,
"eval_samples_per_second": 34.233,
"eval_steps_per_second": 4.28,
"step": 170
},
{
"epoch": 0.03406883498530657,
"grad_norm": 0.19754469394683838,
"learning_rate": 5.6388590278194096e-06,
"loss": 0.7157,
"step": 171
},
{
"epoch": 0.034666533844697915,
"grad_norm": 0.21476249396800995,
"learning_rate": 4.549673247541875e-06,
"loss": 0.7158,
"step": 174
},
{
"epoch": 0.03526423270408926,
"grad_norm": 0.2122727334499359,
"learning_rate": 3.5722980755146517e-06,
"loss": 0.6921,
"step": 177
},
{
"epoch": 0.0358619315634806,
"grad_norm": 0.22367681562900543,
"learning_rate": 2.7091379149682685e-06,
"loss": 0.7906,
"step": 180
},
{
"epoch": 0.036459630422871946,
"grad_norm": 0.22603553533554077,
"learning_rate": 1.962316193157593e-06,
"loss": 0.6743,
"step": 183
},
{
"epoch": 0.03705732928226329,
"grad_norm": 0.23509639501571655,
"learning_rate": 1.333670137599713e-06,
"loss": 0.7062,
"step": 186
},
{
"epoch": 0.03725656223539373,
"eval_loss": 0.7139464020729065,
"eval_runtime": 246.9924,
"eval_samples_per_second": 34.228,
"eval_steps_per_second": 4.279,
"step": 187
},
{
"epoch": 0.03765502814165463,
"grad_norm": 0.221495121717453,
"learning_rate": 8.247462563808817e-07,
"loss": 0.7116,
"step": 189
},
{
"epoch": 0.038252727001045976,
"grad_norm": 0.22564756870269775,
"learning_rate": 4.367965336512403e-07,
"loss": 0.7366,
"step": 192
},
{
"epoch": 0.03885042586043732,
"grad_norm": 0.23282335698604584,
"learning_rate": 1.7077534966650766e-07,
"loss": 0.7022,
"step": 195
},
{
"epoch": 0.03944812471982866,
"grad_norm": 0.23939348757266998,
"learning_rate": 2.7337132953697554e-08,
"loss": 0.7262,
"step": 198
}
],
"logging_steps": 3,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 17,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.516788593636147e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}