|
{ |
|
"best_metric": 0.9042022824287415, |
|
"best_model_checkpoint": "data/Llama-31-8B_task-1_120-samples_config-4_full/checkpoint-319", |
|
"epoch": 64.9090909090909, |
|
"eval_steps": 500, |
|
"global_step": 357, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.18181818181818182, |
|
"grad_norm": 1.9413753747940063, |
|
"learning_rate": 1.3333333333333336e-07, |
|
"loss": 2.4963, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.36363636363636365, |
|
"grad_norm": 1.9993785619735718, |
|
"learning_rate": 2.666666666666667e-07, |
|
"loss": 2.515, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.7272727272727273, |
|
"grad_norm": 1.8845915794372559, |
|
"learning_rate": 5.333333333333335e-07, |
|
"loss": 2.4687, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.9090909090909091, |
|
"eval_loss": 2.458869218826294, |
|
"eval_runtime": 9.6295, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 1.0909090909090908, |
|
"grad_norm": 2.0253942012786865, |
|
"learning_rate": 8.000000000000001e-07, |
|
"loss": 2.4759, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 1.4545454545454546, |
|
"grad_norm": 1.8723983764648438, |
|
"learning_rate": 1.066666666666667e-06, |
|
"loss": 2.4786, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 1.8181818181818183, |
|
"grad_norm": 1.924127459526062, |
|
"learning_rate": 1.3333333333333334e-06, |
|
"loss": 2.5083, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 2.443969488143921, |
|
"eval_runtime": 9.6222, |
|
"eval_samples_per_second": 2.494, |
|
"eval_steps_per_second": 2.494, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 2.1818181818181817, |
|
"grad_norm": 1.7455520629882812, |
|
"learning_rate": 1.6000000000000001e-06, |
|
"loss": 2.4117, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 2.5454545454545454, |
|
"grad_norm": 1.710787296295166, |
|
"learning_rate": 1.8666666666666669e-06, |
|
"loss": 2.4583, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 2.909090909090909, |
|
"grad_norm": 1.6907106637954712, |
|
"learning_rate": 2.133333333333334e-06, |
|
"loss": 2.4676, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 2.909090909090909, |
|
"eval_loss": 2.421785593032837, |
|
"eval_runtime": 9.6333, |
|
"eval_samples_per_second": 2.491, |
|
"eval_steps_per_second": 2.491, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 3.2727272727272725, |
|
"grad_norm": 1.5824416875839233, |
|
"learning_rate": 2.4000000000000003e-06, |
|
"loss": 2.4237, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 3.6363636363636362, |
|
"grad_norm": 1.59761643409729, |
|
"learning_rate": 2.666666666666667e-06, |
|
"loss": 2.4148, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 1.6097276210784912, |
|
"learning_rate": 2.9333333333333338e-06, |
|
"loss": 2.4562, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 2.3870151042938232, |
|
"eval_runtime": 9.6315, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 4.363636363636363, |
|
"grad_norm": 1.636257529258728, |
|
"learning_rate": 3.2000000000000003e-06, |
|
"loss": 2.4287, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 4.7272727272727275, |
|
"grad_norm": 1.5569593906402588, |
|
"learning_rate": 3.4666666666666672e-06, |
|
"loss": 2.377, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 4.909090909090909, |
|
"eval_loss": 2.3474807739257812, |
|
"eval_runtime": 9.6237, |
|
"eval_samples_per_second": 2.494, |
|
"eval_steps_per_second": 2.494, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 5.090909090909091, |
|
"grad_norm": 1.808140516281128, |
|
"learning_rate": 3.7333333333333337e-06, |
|
"loss": 2.3605, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 5.454545454545454, |
|
"grad_norm": 1.7729766368865967, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 2.3404, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 5.818181818181818, |
|
"grad_norm": 1.9055501222610474, |
|
"learning_rate": 4.266666666666668e-06, |
|
"loss": 2.3303, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 2.2793145179748535, |
|
"eval_runtime": 9.6178, |
|
"eval_samples_per_second": 2.495, |
|
"eval_steps_per_second": 2.495, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 6.181818181818182, |
|
"grad_norm": 1.628233790397644, |
|
"learning_rate": 4.533333333333334e-06, |
|
"loss": 2.2739, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 6.545454545454545, |
|
"grad_norm": 1.4241219758987427, |
|
"learning_rate": 4.800000000000001e-06, |
|
"loss": 2.3294, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 6.909090909090909, |
|
"grad_norm": 1.5400785207748413, |
|
"learning_rate": 5.0666666666666676e-06, |
|
"loss": 2.2553, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 6.909090909090909, |
|
"eval_loss": 2.2254207134246826, |
|
"eval_runtime": 9.6211, |
|
"eval_samples_per_second": 2.495, |
|
"eval_steps_per_second": 2.495, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 7.2727272727272725, |
|
"grad_norm": 1.4445594549179077, |
|
"learning_rate": 5.333333333333334e-06, |
|
"loss": 2.2498, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 7.636363636363637, |
|
"grad_norm": 1.7270216941833496, |
|
"learning_rate": 5.600000000000001e-06, |
|
"loss": 2.1853, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 2.017972707748413, |
|
"learning_rate": 5.8666666666666675e-06, |
|
"loss": 2.174, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 2.139225721359253, |
|
"eval_runtime": 9.6197, |
|
"eval_samples_per_second": 2.495, |
|
"eval_steps_per_second": 2.495, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 8.363636363636363, |
|
"grad_norm": 1.3301095962524414, |
|
"learning_rate": 6.133333333333334e-06, |
|
"loss": 2.1429, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 8.727272727272727, |
|
"grad_norm": 1.083661675453186, |
|
"learning_rate": 6.4000000000000006e-06, |
|
"loss": 2.131, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 8.909090909090908, |
|
"eval_loss": 2.0661048889160156, |
|
"eval_runtime": 9.6328, |
|
"eval_samples_per_second": 2.491, |
|
"eval_steps_per_second": 2.491, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 9.090909090909092, |
|
"grad_norm": 1.0499473810195923, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 2.0813, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 9.454545454545455, |
|
"grad_norm": 1.014916181564331, |
|
"learning_rate": 6.9333333333333344e-06, |
|
"loss": 2.0497, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 9.818181818181818, |
|
"grad_norm": 1.0278065204620361, |
|
"learning_rate": 7.2000000000000005e-06, |
|
"loss": 2.0142, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 1.9625515937805176, |
|
"eval_runtime": 9.6185, |
|
"eval_samples_per_second": 2.495, |
|
"eval_steps_per_second": 2.495, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 10.181818181818182, |
|
"grad_norm": 0.9720411896705627, |
|
"learning_rate": 7.4666666666666675e-06, |
|
"loss": 1.9759, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 10.545454545454545, |
|
"grad_norm": 0.9346638321876526, |
|
"learning_rate": 7.733333333333334e-06, |
|
"loss": 1.941, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 10.909090909090908, |
|
"grad_norm": 0.8559221029281616, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 1.8873, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 10.909090909090908, |
|
"eval_loss": 1.8745914697647095, |
|
"eval_runtime": 9.6239, |
|
"eval_samples_per_second": 2.494, |
|
"eval_steps_per_second": 2.494, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 11.272727272727273, |
|
"grad_norm": 0.8817884922027588, |
|
"learning_rate": 8.266666666666667e-06, |
|
"loss": 1.9132, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 11.636363636363637, |
|
"grad_norm": 0.8232048749923706, |
|
"learning_rate": 8.533333333333335e-06, |
|
"loss": 1.8387, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"grad_norm": 0.8017051815986633, |
|
"learning_rate": 8.8e-06, |
|
"loss": 1.7633, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_loss": 1.7650254964828491, |
|
"eval_runtime": 9.6208, |
|
"eval_samples_per_second": 2.495, |
|
"eval_steps_per_second": 2.495, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 12.363636363636363, |
|
"grad_norm": 0.9119341373443604, |
|
"learning_rate": 9.066666666666667e-06, |
|
"loss": 1.72, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 12.727272727272727, |
|
"grad_norm": 0.8771039843559265, |
|
"learning_rate": 9.333333333333334e-06, |
|
"loss": 1.726, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 12.909090909090908, |
|
"eval_loss": 1.6562695503234863, |
|
"eval_runtime": 9.6212, |
|
"eval_samples_per_second": 2.494, |
|
"eval_steps_per_second": 2.494, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 13.090909090909092, |
|
"grad_norm": 0.9313778877258301, |
|
"learning_rate": 9.600000000000001e-06, |
|
"loss": 1.6816, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 13.454545454545455, |
|
"grad_norm": 1.1438463926315308, |
|
"learning_rate": 9.866666666666668e-06, |
|
"loss": 1.6168, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 13.818181818181818, |
|
"grad_norm": 1.0701647996902466, |
|
"learning_rate": 9.999945845889795e-06, |
|
"loss": 1.5711, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_loss": 1.5122851133346558, |
|
"eval_runtime": 9.627, |
|
"eval_samples_per_second": 2.493, |
|
"eval_steps_per_second": 2.493, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 14.181818181818182, |
|
"grad_norm": 0.9771044254302979, |
|
"learning_rate": 9.999512620046523e-06, |
|
"loss": 1.5488, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 14.545454545454545, |
|
"grad_norm": 0.91764235496521, |
|
"learning_rate": 9.99864620589731e-06, |
|
"loss": 1.4504, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 14.909090909090908, |
|
"grad_norm": 0.9226170182228088, |
|
"learning_rate": 9.99734667851357e-06, |
|
"loss": 1.4344, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 14.909090909090908, |
|
"eval_loss": 1.3950275182724, |
|
"eval_runtime": 9.6306, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 15.272727272727273, |
|
"grad_norm": 0.8576654195785522, |
|
"learning_rate": 9.995614150494293e-06, |
|
"loss": 1.3568, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 15.636363636363637, |
|
"grad_norm": 0.9888388514518738, |
|
"learning_rate": 9.993448771956285e-06, |
|
"loss": 1.3195, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"grad_norm": 0.9264158010482788, |
|
"learning_rate": 9.99085073052117e-06, |
|
"loss": 1.3201, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_loss": 1.2661280632019043, |
|
"eval_runtime": 9.6187, |
|
"eval_samples_per_second": 2.495, |
|
"eval_steps_per_second": 2.495, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 16.363636363636363, |
|
"grad_norm": 0.8193872570991516, |
|
"learning_rate": 9.987820251299121e-06, |
|
"loss": 1.2496, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 16.727272727272727, |
|
"grad_norm": 0.7646848559379578, |
|
"learning_rate": 9.984357596869369e-06, |
|
"loss": 1.1787, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 16.90909090909091, |
|
"eval_loss": 1.18313729763031, |
|
"eval_runtime": 9.6587, |
|
"eval_samples_per_second": 2.485, |
|
"eval_steps_per_second": 2.485, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 17.09090909090909, |
|
"grad_norm": 0.7123040556907654, |
|
"learning_rate": 9.980463067257437e-06, |
|
"loss": 1.2232, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 17.454545454545453, |
|
"grad_norm": 0.6257199645042419, |
|
"learning_rate": 9.976136999909156e-06, |
|
"loss": 1.1068, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 17.818181818181817, |
|
"grad_norm": 0.7334635257720947, |
|
"learning_rate": 9.971379769661422e-06, |
|
"loss": 1.1444, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_loss": 1.1187793016433716, |
|
"eval_runtime": 9.6189, |
|
"eval_samples_per_second": 2.495, |
|
"eval_steps_per_second": 2.495, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 18.181818181818183, |
|
"grad_norm": 0.589821457862854, |
|
"learning_rate": 9.966191788709716e-06, |
|
"loss": 1.1334, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 18.545454545454547, |
|
"grad_norm": 0.5560262799263, |
|
"learning_rate": 9.960573506572391e-06, |
|
"loss": 1.0929, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 18.90909090909091, |
|
"grad_norm": 0.5528337359428406, |
|
"learning_rate": 9.95452541005172e-06, |
|
"loss": 1.0591, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 18.90909090909091, |
|
"eval_loss": 1.0836217403411865, |
|
"eval_runtime": 9.632, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 19.272727272727273, |
|
"grad_norm": 0.5791244506835938, |
|
"learning_rate": 9.948048023191728e-06, |
|
"loss": 1.0522, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 19.636363636363637, |
|
"grad_norm": 0.5012540817260742, |
|
"learning_rate": 9.941141907232766e-06, |
|
"loss": 1.0986, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"grad_norm": 0.4489583969116211, |
|
"learning_rate": 9.933807660562898e-06, |
|
"loss": 1.0151, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_loss": 1.0539679527282715, |
|
"eval_runtime": 9.6338, |
|
"eval_samples_per_second": 2.491, |
|
"eval_steps_per_second": 2.491, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 20.363636363636363, |
|
"grad_norm": 0.384694367647171, |
|
"learning_rate": 9.926045918666045e-06, |
|
"loss": 1.0363, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 20.727272727272727, |
|
"grad_norm": 0.3782545030117035, |
|
"learning_rate": 9.91785735406693e-06, |
|
"loss": 1.0277, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 20.90909090909091, |
|
"eval_loss": 1.0388442277908325, |
|
"eval_runtime": 9.6309, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 21.09090909090909, |
|
"grad_norm": 0.3923183083534241, |
|
"learning_rate": 9.909242676272797e-06, |
|
"loss": 1.0385, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 21.454545454545453, |
|
"grad_norm": 0.3774013817310333, |
|
"learning_rate": 9.90020263171194e-06, |
|
"loss": 1.0073, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 21.818181818181817, |
|
"grad_norm": 0.3472942113876343, |
|
"learning_rate": 9.890738003669029e-06, |
|
"loss": 1.0025, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"eval_loss": 1.0249600410461426, |
|
"eval_runtime": 9.6161, |
|
"eval_samples_per_second": 2.496, |
|
"eval_steps_per_second": 2.496, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 22.181818181818183, |
|
"grad_norm": 0.3280906081199646, |
|
"learning_rate": 9.880849612217238e-06, |
|
"loss": 0.9887, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 22.545454545454547, |
|
"grad_norm": 0.3648756742477417, |
|
"learning_rate": 9.870538314147194e-06, |
|
"loss": 1.0023, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 22.90909090909091, |
|
"grad_norm": 0.390601247549057, |
|
"learning_rate": 9.859805002892733e-06, |
|
"loss": 1.0161, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 22.90909090909091, |
|
"eval_loss": 1.0154192447662354, |
|
"eval_runtime": 9.6244, |
|
"eval_samples_per_second": 2.494, |
|
"eval_steps_per_second": 2.494, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 23.272727272727273, |
|
"grad_norm": 0.3790784180164337, |
|
"learning_rate": 9.84865060845349e-06, |
|
"loss": 0.9941, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 23.636363636363637, |
|
"grad_norm": 0.36824390292167664, |
|
"learning_rate": 9.83707609731432e-06, |
|
"loss": 0.9697, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"grad_norm": 0.39850422739982605, |
|
"learning_rate": 9.825082472361558e-06, |
|
"loss": 0.9946, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_loss": 1.0047398805618286, |
|
"eval_runtime": 9.6214, |
|
"eval_samples_per_second": 2.494, |
|
"eval_steps_per_second": 2.494, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 24.363636363636363, |
|
"grad_norm": 0.34537094831466675, |
|
"learning_rate": 9.812670772796113e-06, |
|
"loss": 0.9699, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 24.727272727272727, |
|
"grad_norm": 0.38018471002578735, |
|
"learning_rate": 9.799842074043438e-06, |
|
"loss": 0.9773, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 24.90909090909091, |
|
"eval_loss": 0.9969633221626282, |
|
"eval_runtime": 9.6246, |
|
"eval_samples_per_second": 2.494, |
|
"eval_steps_per_second": 2.494, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 25.09090909090909, |
|
"grad_norm": 0.40856873989105225, |
|
"learning_rate": 9.786597487660336e-06, |
|
"loss": 0.9836, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 25.454545454545453, |
|
"grad_norm": 0.37090280652046204, |
|
"learning_rate": 9.77293816123866e-06, |
|
"loss": 0.9658, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 25.818181818181817, |
|
"grad_norm": 0.4068634808063507, |
|
"learning_rate": 9.75886527830587e-06, |
|
"loss": 0.9708, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"eval_loss": 0.9890053272247314, |
|
"eval_runtime": 9.6268, |
|
"eval_samples_per_second": 2.493, |
|
"eval_steps_per_second": 2.493, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 26.181818181818183, |
|
"grad_norm": 0.38360726833343506, |
|
"learning_rate": 9.744380058222483e-06, |
|
"loss": 0.9676, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 26.545454545454547, |
|
"grad_norm": 0.38106632232666016, |
|
"learning_rate": 9.729483756076436e-06, |
|
"loss": 0.972, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 26.90909090909091, |
|
"grad_norm": 0.36939358711242676, |
|
"learning_rate": 9.714177662574316e-06, |
|
"loss": 0.9374, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 26.90909090909091, |
|
"eval_loss": 0.9821727275848389, |
|
"eval_runtime": 9.622, |
|
"eval_samples_per_second": 2.494, |
|
"eval_steps_per_second": 2.494, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 27.272727272727273, |
|
"grad_norm": 0.37566348910331726, |
|
"learning_rate": 9.698463103929542e-06, |
|
"loss": 0.9219, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 27.636363636363637, |
|
"grad_norm": 0.3677101731300354, |
|
"learning_rate": 9.682341441747446e-06, |
|
"loss": 0.9798, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"grad_norm": 0.3695693016052246, |
|
"learning_rate": 9.665814072907293e-06, |
|
"loss": 0.9403, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_loss": 0.9750909209251404, |
|
"eval_runtime": 9.6244, |
|
"eval_samples_per_second": 2.494, |
|
"eval_steps_per_second": 2.494, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 28.363636363636363, |
|
"grad_norm": 0.42501190304756165, |
|
"learning_rate": 9.648882429441258e-06, |
|
"loss": 0.9428, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 28.727272727272727, |
|
"grad_norm": 0.3643590807914734, |
|
"learning_rate": 9.63154797841033e-06, |
|
"loss": 0.94, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 28.90909090909091, |
|
"eval_loss": 0.9702978134155273, |
|
"eval_runtime": 9.6253, |
|
"eval_samples_per_second": 2.493, |
|
"eval_steps_per_second": 2.493, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 29.09090909090909, |
|
"grad_norm": 0.3957996666431427, |
|
"learning_rate": 9.613812221777212e-06, |
|
"loss": 0.9274, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 29.454545454545453, |
|
"grad_norm": 0.4291062355041504, |
|
"learning_rate": 9.595676696276173e-06, |
|
"loss": 0.9886, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 29.818181818181817, |
|
"grad_norm": 0.5365828275680542, |
|
"learning_rate": 9.577142973279896e-06, |
|
"loss": 0.902, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_loss": 0.9632946848869324, |
|
"eval_runtime": 9.6187, |
|
"eval_samples_per_second": 2.495, |
|
"eval_steps_per_second": 2.495, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 30.181818181818183, |
|
"grad_norm": 0.38883283734321594, |
|
"learning_rate": 9.55821265866333e-06, |
|
"loss": 0.8967, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 30.545454545454547, |
|
"grad_norm": 0.41333243250846863, |
|
"learning_rate": 9.538887392664544e-06, |
|
"loss": 0.9142, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 30.90909090909091, |
|
"grad_norm": 0.4123990833759308, |
|
"learning_rate": 9.519168849742603e-06, |
|
"loss": 0.9215, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 30.90909090909091, |
|
"eval_loss": 0.9604056477546692, |
|
"eval_runtime": 9.627, |
|
"eval_samples_per_second": 2.493, |
|
"eval_steps_per_second": 2.493, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 31.272727272727273, |
|
"grad_norm": 0.407969206571579, |
|
"learning_rate": 9.499058738432492e-06, |
|
"loss": 0.9574, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 31.636363636363637, |
|
"grad_norm": 0.4867004156112671, |
|
"learning_rate": 9.478558801197065e-06, |
|
"loss": 0.9208, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"grad_norm": 0.4684889316558838, |
|
"learning_rate": 9.457670814276083e-06, |
|
"loss": 0.8854, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_loss": 0.9548270106315613, |
|
"eval_runtime": 9.6173, |
|
"eval_samples_per_second": 2.495, |
|
"eval_steps_per_second": 2.495, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 32.36363636363637, |
|
"grad_norm": 0.527631938457489, |
|
"learning_rate": 9.436396587532297e-06, |
|
"loss": 0.8831, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 32.72727272727273, |
|
"grad_norm": 0.44928282499313354, |
|
"learning_rate": 9.414737964294636e-06, |
|
"loss": 0.96, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 32.90909090909091, |
|
"eval_loss": 0.9503173232078552, |
|
"eval_runtime": 9.624, |
|
"eval_samples_per_second": 2.494, |
|
"eval_steps_per_second": 2.494, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 33.09090909090909, |
|
"grad_norm": 0.5621985197067261, |
|
"learning_rate": 9.392696821198488e-06, |
|
"loss": 0.8666, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 33.45454545454545, |
|
"grad_norm": 0.523452877998352, |
|
"learning_rate": 9.370275068023097e-06, |
|
"loss": 0.922, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 33.81818181818182, |
|
"grad_norm": 0.5437294840812683, |
|
"learning_rate": 9.347474647526095e-06, |
|
"loss": 0.9162, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"eval_loss": 0.9452812075614929, |
|
"eval_runtime": 9.6313, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 34.18181818181818, |
|
"grad_norm": 0.46963879466056824, |
|
"learning_rate": 9.324297535275156e-06, |
|
"loss": 0.8254, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 34.54545454545455, |
|
"grad_norm": 0.48245498538017273, |
|
"learning_rate": 9.30074573947683e-06, |
|
"loss": 0.9174, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 34.90909090909091, |
|
"grad_norm": 0.5139335989952087, |
|
"learning_rate": 9.276821300802535e-06, |
|
"loss": 0.8686, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 34.90909090909091, |
|
"eval_loss": 0.9428532719612122, |
|
"eval_runtime": 9.6309, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 35.27272727272727, |
|
"grad_norm": 0.45418813824653625, |
|
"learning_rate": 9.25252629221175e-06, |
|
"loss": 0.9011, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 35.63636363636363, |
|
"grad_norm": 0.5155036449432373, |
|
"learning_rate": 9.227862818772392e-06, |
|
"loss": 0.8754, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"grad_norm": 0.4917118549346924, |
|
"learning_rate": 9.202833017478421e-06, |
|
"loss": 0.906, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"eval_loss": 0.9385306239128113, |
|
"eval_runtime": 9.6304, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 36.36363636363637, |
|
"grad_norm": 0.5289394855499268, |
|
"learning_rate": 9.177439057064684e-06, |
|
"loss": 0.8751, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 36.72727272727273, |
|
"grad_norm": 0.5498368144035339, |
|
"learning_rate": 9.151683137818989e-06, |
|
"loss": 0.8762, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 36.90909090909091, |
|
"eval_loss": 0.9353806972503662, |
|
"eval_runtime": 9.6269, |
|
"eval_samples_per_second": 2.493, |
|
"eval_steps_per_second": 2.493, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 37.09090909090909, |
|
"grad_norm": 0.516069233417511, |
|
"learning_rate": 9.125567491391476e-06, |
|
"loss": 0.869, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 37.45454545454545, |
|
"grad_norm": 0.5102888345718384, |
|
"learning_rate": 9.099094380601244e-06, |
|
"loss": 0.8518, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 37.81818181818182, |
|
"grad_norm": 0.5379929542541504, |
|
"learning_rate": 9.072266099240286e-06, |
|
"loss": 0.8929, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"eval_loss": 0.9331977963447571, |
|
"eval_runtime": 9.6277, |
|
"eval_samples_per_second": 2.493, |
|
"eval_steps_per_second": 2.493, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 38.18181818181818, |
|
"grad_norm": 0.6433578729629517, |
|
"learning_rate": 9.045084971874738e-06, |
|
"loss": 0.8756, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 38.54545454545455, |
|
"grad_norm": 0.6186140179634094, |
|
"learning_rate": 9.017553353643479e-06, |
|
"loss": 0.8582, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 38.90909090909091, |
|
"grad_norm": 0.608066976070404, |
|
"learning_rate": 8.989673630054044e-06, |
|
"loss": 0.8687, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 38.90909090909091, |
|
"eval_loss": 0.9301042556762695, |
|
"eval_runtime": 9.6307, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 39.27272727272727, |
|
"grad_norm": 0.6045626401901245, |
|
"learning_rate": 8.961448216775955e-06, |
|
"loss": 0.8119, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 39.63636363636363, |
|
"grad_norm": 0.6160129308700562, |
|
"learning_rate": 8.932879559431392e-06, |
|
"loss": 0.8301, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"grad_norm": 0.6550566554069519, |
|
"learning_rate": 8.903970133383297e-06, |
|
"loss": 0.8933, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_loss": 0.9279410243034363, |
|
"eval_runtime": 9.6288, |
|
"eval_samples_per_second": 2.493, |
|
"eval_steps_per_second": 2.493, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 40.36363636363637, |
|
"grad_norm": 0.6415209770202637, |
|
"learning_rate": 8.874722443520898e-06, |
|
"loss": 0.8325, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 40.72727272727273, |
|
"grad_norm": 0.6836015582084656, |
|
"learning_rate": 8.845139024042664e-06, |
|
"loss": 0.858, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 40.90909090909091, |
|
"eval_loss": 0.9241297841072083, |
|
"eval_runtime": 9.6293, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 41.09090909090909, |
|
"grad_norm": 0.6644122004508972, |
|
"learning_rate": 8.815222438236726e-06, |
|
"loss": 0.8649, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 41.45454545454545, |
|
"grad_norm": 0.6619220972061157, |
|
"learning_rate": 8.784975278258783e-06, |
|
"loss": 0.8085, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 41.81818181818182, |
|
"grad_norm": 0.6005414724349976, |
|
"learning_rate": 8.754400164907496e-06, |
|
"loss": 0.8481, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"eval_loss": 0.9222747683525085, |
|
"eval_runtime": 9.6426, |
|
"eval_samples_per_second": 2.489, |
|
"eval_steps_per_second": 2.489, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 42.18181818181818, |
|
"grad_norm": 0.722902238368988, |
|
"learning_rate": 8.723499747397415e-06, |
|
"loss": 0.8578, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 42.54545454545455, |
|
"grad_norm": 0.7436155080795288, |
|
"learning_rate": 8.692276703129421e-06, |
|
"loss": 0.7996, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 42.90909090909091, |
|
"grad_norm": 0.6658902168273926, |
|
"learning_rate": 8.660733737458751e-06, |
|
"loss": 0.8228, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 42.90909090909091, |
|
"eval_loss": 0.9217340350151062, |
|
"eval_runtime": 9.6277, |
|
"eval_samples_per_second": 2.493, |
|
"eval_steps_per_second": 2.493, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 43.27272727272727, |
|
"grad_norm": 0.6352283358573914, |
|
"learning_rate": 8.628873583460593e-06, |
|
"loss": 0.8113, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 43.63636363636363, |
|
"grad_norm": 1.0223489999771118, |
|
"learning_rate": 8.596699001693257e-06, |
|
"loss": 0.8149, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"grad_norm": 0.7334797978401184, |
|
"learning_rate": 8.564212779959003e-06, |
|
"loss": 0.8593, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"eval_loss": 0.9185922741889954, |
|
"eval_runtime": 9.6303, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 44.36363636363637, |
|
"grad_norm": 0.7544272541999817, |
|
"learning_rate": 8.531417733062476e-06, |
|
"loss": 0.7958, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 44.72727272727273, |
|
"grad_norm": 0.8189204335212708, |
|
"learning_rate": 8.498316702566828e-06, |
|
"loss": 0.8238, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 44.90909090909091, |
|
"eval_loss": 0.9156233668327332, |
|
"eval_runtime": 9.6451, |
|
"eval_samples_per_second": 2.488, |
|
"eval_steps_per_second": 2.488, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 45.09090909090909, |
|
"grad_norm": 0.6729193329811096, |
|
"learning_rate": 8.464912556547486e-06, |
|
"loss": 0.835, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 45.45454545454545, |
|
"grad_norm": 0.6723213195800781, |
|
"learning_rate": 8.43120818934367e-06, |
|
"loss": 0.7991, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 45.81818181818182, |
|
"grad_norm": 0.8917332887649536, |
|
"learning_rate": 8.397206521307584e-06, |
|
"loss": 0.8081, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"eval_loss": 0.9161267876625061, |
|
"eval_runtime": 9.6325, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 46.18181818181818, |
|
"grad_norm": 0.7718498110771179, |
|
"learning_rate": 8.362910498551402e-06, |
|
"loss": 0.8071, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 46.54545454545455, |
|
"grad_norm": 0.7421916127204895, |
|
"learning_rate": 8.328323092691985e-06, |
|
"loss": 0.7838, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 46.90909090909091, |
|
"grad_norm": 0.775203287601471, |
|
"learning_rate": 8.293447300593402e-06, |
|
"loss": 0.8327, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 46.90909090909091, |
|
"eval_loss": 0.912854015827179, |
|
"eval_runtime": 9.6464, |
|
"eval_samples_per_second": 2.488, |
|
"eval_steps_per_second": 2.488, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 47.27272727272727, |
|
"grad_norm": 0.6994480490684509, |
|
"learning_rate": 8.258286144107277e-06, |
|
"loss": 0.7949, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 47.63636363636363, |
|
"grad_norm": 0.8607519865036011, |
|
"learning_rate": 8.222842669810936e-06, |
|
"loss": 0.7794, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"grad_norm": 0.8172978758811951, |
|
"learning_rate": 8.18711994874345e-06, |
|
"loss": 0.8029, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"eval_loss": 0.9110000133514404, |
|
"eval_runtime": 9.6168, |
|
"eval_samples_per_second": 2.496, |
|
"eval_steps_per_second": 2.496, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 48.36363636363637, |
|
"grad_norm": 0.8061463236808777, |
|
"learning_rate": 8.151121076139534e-06, |
|
"loss": 0.8099, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 48.72727272727273, |
|
"grad_norm": 0.9735673069953918, |
|
"learning_rate": 8.11484917116136e-06, |
|
"loss": 0.7909, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 48.90909090909091, |
|
"eval_loss": 0.9093864560127258, |
|
"eval_runtime": 9.6205, |
|
"eval_samples_per_second": 2.495, |
|
"eval_steps_per_second": 2.495, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 49.09090909090909, |
|
"grad_norm": 0.8723132014274597, |
|
"learning_rate": 8.078307376628292e-06, |
|
"loss": 0.7628, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 49.45454545454545, |
|
"grad_norm": 0.7607284188270569, |
|
"learning_rate": 8.041498858744572e-06, |
|
"loss": 0.7665, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 49.81818181818182, |
|
"grad_norm": 0.8277180194854736, |
|
"learning_rate": 8.004426806824985e-06, |
|
"loss": 0.7826, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_loss": 0.9079095721244812, |
|
"eval_runtime": 9.6142, |
|
"eval_samples_per_second": 2.496, |
|
"eval_steps_per_second": 2.496, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 50.18181818181818, |
|
"grad_norm": 0.8411371111869812, |
|
"learning_rate": 7.967094433018508e-06, |
|
"loss": 0.7943, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 50.54545454545455, |
|
"grad_norm": 0.834507167339325, |
|
"learning_rate": 7.929504972030003e-06, |
|
"loss": 0.7586, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 50.90909090909091, |
|
"grad_norm": 1.0113625526428223, |
|
"learning_rate": 7.891661680839932e-06, |
|
"loss": 0.773, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 50.90909090909091, |
|
"eval_loss": 0.9122073650360107, |
|
"eval_runtime": 9.6327, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 51.27272727272727, |
|
"grad_norm": 0.8380469083786011, |
|
"learning_rate": 7.85356783842216e-06, |
|
"loss": 0.7737, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 51.63636363636363, |
|
"grad_norm": 0.8534033894538879, |
|
"learning_rate": 7.815226745459831e-06, |
|
"loss": 0.7941, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"grad_norm": 0.8890909552574158, |
|
"learning_rate": 7.776641724059398e-06, |
|
"loss": 0.7377, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"eval_loss": 0.9077624678611755, |
|
"eval_runtime": 9.6175, |
|
"eval_samples_per_second": 2.495, |
|
"eval_steps_per_second": 2.495, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 52.36363636363637, |
|
"grad_norm": 0.9524686336517334, |
|
"learning_rate": 7.737816117462752e-06, |
|
"loss": 0.7699, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 52.72727272727273, |
|
"grad_norm": 0.8625631928443909, |
|
"learning_rate": 7.698753289757565e-06, |
|
"loss": 0.7491, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 52.90909090909091, |
|
"eval_loss": 0.9050046801567078, |
|
"eval_runtime": 9.6225, |
|
"eval_samples_per_second": 2.494, |
|
"eval_steps_per_second": 2.494, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 53.09090909090909, |
|
"grad_norm": 1.0375274419784546, |
|
"learning_rate": 7.65945662558579e-06, |
|
"loss": 0.7661, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 53.45454545454545, |
|
"grad_norm": 0.8255937695503235, |
|
"learning_rate": 7.619929529850397e-06, |
|
"loss": 0.7606, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 53.81818181818182, |
|
"grad_norm": 1.0094412565231323, |
|
"learning_rate": 7.580175427420358e-06, |
|
"loss": 0.7414, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"eval_loss": 0.9093080163002014, |
|
"eval_runtime": 9.6164, |
|
"eval_samples_per_second": 2.496, |
|
"eval_steps_per_second": 2.496, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 54.18181818181818, |
|
"grad_norm": 0.8360889554023743, |
|
"learning_rate": 7.54019776283389e-06, |
|
"loss": 0.7467, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 54.54545454545455, |
|
"grad_norm": 0.9806857109069824, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 0.7445, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 54.90909090909091, |
|
"grad_norm": 1.0003647804260254, |
|
"learning_rate": 7.459585621898353e-06, |
|
"loss": 0.7275, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 54.90909090909091, |
|
"eval_loss": 0.9052907824516296, |
|
"eval_runtime": 9.6259, |
|
"eval_samples_per_second": 2.493, |
|
"eval_steps_per_second": 2.493, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 55.27272727272727, |
|
"grad_norm": 1.080519437789917, |
|
"learning_rate": 7.418958130277483e-06, |
|
"loss": 0.7526, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 55.63636363636363, |
|
"grad_norm": 1.2365224361419678, |
|
"learning_rate": 7.378121045351378e-06, |
|
"loss": 0.7289, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"grad_norm": 0.9447788000106812, |
|
"learning_rate": 7.337077905494472e-06, |
|
"loss": 0.7198, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"eval_loss": 0.9046055674552917, |
|
"eval_runtime": 9.6309, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 56.36363636363637, |
|
"grad_norm": 0.9524107575416565, |
|
"learning_rate": 7.295832266935059e-06, |
|
"loss": 0.766, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 56.72727272727273, |
|
"grad_norm": 0.9705169796943665, |
|
"learning_rate": 7.254387703447154e-06, |
|
"loss": 0.7203, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 56.90909090909091, |
|
"eval_loss": 0.9092791676521301, |
|
"eval_runtime": 9.6325, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 57.09090909090909, |
|
"grad_norm": 1.0789105892181396, |
|
"learning_rate": 7.212747806040845e-06, |
|
"loss": 0.6951, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 57.45454545454545, |
|
"grad_norm": 1.1204413175582886, |
|
"learning_rate": 7.170916182651141e-06, |
|
"loss": 0.7249, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 57.81818181818182, |
|
"grad_norm": 1.0801540613174438, |
|
"learning_rate": 7.128896457825364e-06, |
|
"loss": 0.6903, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"eval_loss": 0.9042022824287415, |
|
"eval_runtime": 9.621, |
|
"eval_samples_per_second": 2.495, |
|
"eval_steps_per_second": 2.495, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 58.18181818181818, |
|
"grad_norm": 1.052799105644226, |
|
"learning_rate": 7.08669227240909e-06, |
|
"loss": 0.7306, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 58.54545454545455, |
|
"grad_norm": 1.020494818687439, |
|
"learning_rate": 7.04430728323069e-06, |
|
"loss": 0.7288, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 58.90909090909091, |
|
"grad_norm": 1.0670812129974365, |
|
"learning_rate": 7.0017451627844765e-06, |
|
"loss": 0.6987, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 58.90909090909091, |
|
"eval_loss": 0.9106718897819519, |
|
"eval_runtime": 9.6324, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 59.27272727272727, |
|
"grad_norm": 1.0681415796279907, |
|
"learning_rate": 6.959009598912493e-06, |
|
"loss": 0.6906, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 59.63636363636363, |
|
"grad_norm": 1.1053000688552856, |
|
"learning_rate": 6.916104294484988e-06, |
|
"loss": 0.7063, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"grad_norm": 1.0191996097564697, |
|
"learning_rate": 6.873032967079562e-06, |
|
"loss": 0.7141, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_loss": 0.9078884124755859, |
|
"eval_runtime": 9.6326, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 60.36363636363637, |
|
"grad_norm": 1.0764459371566772, |
|
"learning_rate": 6.829799348659061e-06, |
|
"loss": 0.7079, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 60.72727272727273, |
|
"grad_norm": 1.146618366241455, |
|
"learning_rate": 6.7864071852482205e-06, |
|
"loss": 0.7023, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 60.90909090909091, |
|
"eval_loss": 0.9119828343391418, |
|
"eval_runtime": 9.6211, |
|
"eval_samples_per_second": 2.495, |
|
"eval_steps_per_second": 2.495, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 61.09090909090909, |
|
"grad_norm": 1.274398684501648, |
|
"learning_rate": 6.7428602366090764e-06, |
|
"loss": 0.6856, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 61.45454545454545, |
|
"grad_norm": 1.1239506006240845, |
|
"learning_rate": 6.699162275915208e-06, |
|
"loss": 0.6603, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 61.81818181818182, |
|
"grad_norm": 1.3075493574142456, |
|
"learning_rate": 6.655317089424791e-06, |
|
"loss": 0.6945, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"eval_loss": 0.9086711406707764, |
|
"eval_runtime": 9.6164, |
|
"eval_samples_per_second": 2.496, |
|
"eval_steps_per_second": 2.496, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 62.18181818181818, |
|
"grad_norm": 1.1385191679000854, |
|
"learning_rate": 6.611328476152557e-06, |
|
"loss": 0.7058, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 62.54545454545455, |
|
"grad_norm": 1.2736291885375977, |
|
"learning_rate": 6.567200247540599e-06, |
|
"loss": 0.6662, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 62.90909090909091, |
|
"grad_norm": 1.1537060737609863, |
|
"learning_rate": 6.522936227128139e-06, |
|
"loss": 0.6897, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 62.90909090909091, |
|
"eval_loss": 0.9129719734191895, |
|
"eval_runtime": 9.6291, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 63.27272727272727, |
|
"grad_norm": 1.4773812294006348, |
|
"learning_rate": 6.4785402502202345e-06, |
|
"loss": 0.6822, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 63.63636363636363, |
|
"grad_norm": 1.3053030967712402, |
|
"learning_rate": 6.434016163555452e-06, |
|
"loss": 0.6596, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"grad_norm": 1.1616432666778564, |
|
"learning_rate": 6.389367824972575e-06, |
|
"loss": 0.6597, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"eval_loss": 0.9133894443511963, |
|
"eval_runtime": 9.6317, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 2.492, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 64.36363636363636, |
|
"grad_norm": 1.0900788307189941, |
|
"learning_rate": 6.344599103076329e-06, |
|
"loss": 0.6563, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 64.72727272727273, |
|
"grad_norm": 1.2286537885665894, |
|
"learning_rate": 6.299713876902188e-06, |
|
"loss": 0.6954, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 64.9090909090909, |
|
"eval_loss": 0.9120491147041321, |
|
"eval_runtime": 9.6347, |
|
"eval_samples_per_second": 2.491, |
|
"eval_steps_per_second": 2.491, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 64.9090909090909, |
|
"step": 357, |
|
"total_flos": 8.780093794235187e+16, |
|
"train_loss": 1.1563805774146436, |
|
"train_runtime": 6696.4639, |
|
"train_samples_per_second": 1.971, |
|
"train_steps_per_second": 0.112 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 750, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 150, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 7, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.780093794235187e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|