|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.006853853007699162, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 9.138470676932215e-05, |
|
"grad_norm": 14.466765403747559, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 6.6839, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 9.138470676932215e-05, |
|
"eval_loss": 6.981680870056152, |
|
"eval_runtime": 1042.4912, |
|
"eval_samples_per_second": 8.839, |
|
"eval_steps_per_second": 4.42, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0001827694135386443, |
|
"grad_norm": 15.177668571472168, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 7.6193, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0002741541203079665, |
|
"grad_norm": 15.911011695861816, |
|
"learning_rate": 0.0001, |
|
"loss": 6.6159, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0003655388270772886, |
|
"grad_norm": 13.123141288757324, |
|
"learning_rate": 9.99524110790929e-05, |
|
"loss": 5.8581, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0004569235338466108, |
|
"grad_norm": 13.573848724365234, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 5.3065, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.000548308240615933, |
|
"grad_norm": 11.101449966430664, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 4.1295, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0006396929473852551, |
|
"grad_norm": 8.28058910369873, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 3.1179, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0007310776541545772, |
|
"grad_norm": 9.096076965332031, |
|
"learning_rate": 9.881480035599667e-05, |
|
"loss": 3.0133, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0008224623609238994, |
|
"grad_norm": 8.431253433227539, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 2.8908, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0009138470676932215, |
|
"grad_norm": 7.56347131729126, |
|
"learning_rate": 9.768584753741134e-05, |
|
"loss": 2.4816, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0010052317744625437, |
|
"grad_norm": 9.71700668334961, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.5144, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.001096616481231866, |
|
"grad_norm": 8.16706371307373, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 2.1208, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.001188001188001188, |
|
"grad_norm": 6.899288654327393, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 1.9213, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0012793858947705101, |
|
"grad_norm": 7.026676654815674, |
|
"learning_rate": 9.435054165891109e-05, |
|
"loss": 1.8953, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0013707706015398324, |
|
"grad_norm": 6.5173563957214355, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 2.0217, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0014621553083091544, |
|
"grad_norm": 6.742232799530029, |
|
"learning_rate": 9.21695722906443e-05, |
|
"loss": 1.8169, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0015535400150784766, |
|
"grad_norm": 5.812262535095215, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 1.9582, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0016449247218477988, |
|
"grad_norm": 6.068929672241211, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 1.9146, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0017363094286171209, |
|
"grad_norm": 5.802151203155518, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 2.1055, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.001827694135386443, |
|
"grad_norm": 6.522188663482666, |
|
"learning_rate": 8.68638668405062e-05, |
|
"loss": 1.6778, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0019190788421557653, |
|
"grad_norm": 5.945748805999756, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 1.891, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0020104635489250873, |
|
"grad_norm": 4.804516315460205, |
|
"learning_rate": 8.377951038078302e-05, |
|
"loss": 1.6599, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0021018482556944096, |
|
"grad_norm": 5.1375732421875, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 1.6169, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.002193232962463732, |
|
"grad_norm": 4.924495220184326, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 1.4938, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.002284617669233054, |
|
"grad_norm": 4.743460655212402, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 1.2229, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.002284617669233054, |
|
"eval_loss": 1.457080364227295, |
|
"eval_runtime": 1043.5292, |
|
"eval_samples_per_second": 8.831, |
|
"eval_steps_per_second": 4.416, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.002376002376002376, |
|
"grad_norm": 5.3718156814575195, |
|
"learning_rate": 7.68649804173412e-05, |
|
"loss": 1.4273, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.002467387082771698, |
|
"grad_norm": 4.234831809997559, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.1389, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0025587717895410203, |
|
"grad_norm": 5.56032133102417, |
|
"learning_rate": 7.308743066175172e-05, |
|
"loss": 1.53, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0026501564963103425, |
|
"grad_norm": 5.804846286773682, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 1.2418, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0027415412030796647, |
|
"grad_norm": 4.4405317306518555, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 1.1612, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.002832925909848987, |
|
"grad_norm": 5.623289585113525, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 1.4001, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0029243106166183088, |
|
"grad_norm": 5.767253875732422, |
|
"learning_rate": 6.503528997521366e-05, |
|
"loss": 1.5194, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.003015695323387631, |
|
"grad_norm": 5.8981852531433105, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 1.3634, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0031070800301569532, |
|
"grad_norm": 6.315387725830078, |
|
"learning_rate": 6.0821980696905146e-05, |
|
"loss": 1.2824, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0031984647369262755, |
|
"grad_norm": 5.918696403503418, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 1.2946, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0032898494436955977, |
|
"grad_norm": 5.576779842376709, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 1.227, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0033812341504649195, |
|
"grad_norm": 5.354219913482666, |
|
"learning_rate": 5.435778713738292e-05, |
|
"loss": 1.2608, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.0034726188572342417, |
|
"grad_norm": 5.7497029304504395, |
|
"learning_rate": 5.218096936826681e-05, |
|
"loss": 1.5261, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.003564003564003564, |
|
"grad_norm": 4.693356990814209, |
|
"learning_rate": 5e-05, |
|
"loss": 1.2093, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.003655388270772886, |
|
"grad_norm": 5.870346546173096, |
|
"learning_rate": 4.781903063173321e-05, |
|
"loss": 1.5143, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0037467729775422084, |
|
"grad_norm": 5.203154563903809, |
|
"learning_rate": 4.564221286261709e-05, |
|
"loss": 1.0487, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.0038381576843115306, |
|
"grad_norm": 5.0768585205078125, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 1.3043, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.003929542391080853, |
|
"grad_norm": 4.440788269042969, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 0.9258, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.004020927097850175, |
|
"grad_norm": 5.428691387176514, |
|
"learning_rate": 3.917801930309486e-05, |
|
"loss": 1.3457, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.004112311804619497, |
|
"grad_norm": 4.545478343963623, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.9944, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.004203696511388819, |
|
"grad_norm": 6.674732208251953, |
|
"learning_rate": 3.4964710024786354e-05, |
|
"loss": 1.2533, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.004295081218158141, |
|
"grad_norm": 5.448188304901123, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 1.247, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.004386465924927464, |
|
"grad_norm": 5.576430797576904, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 1.2764, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.004477850631696785, |
|
"grad_norm": 5.393462657928467, |
|
"learning_rate": 2.886908691296504e-05, |
|
"loss": 0.9273, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.004569235338466108, |
|
"grad_norm": 5.323672771453857, |
|
"learning_rate": 2.6912569338248315e-05, |
|
"loss": 1.0806, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.004569235338466108, |
|
"eval_loss": 1.1224627494812012, |
|
"eval_runtime": 1043.091, |
|
"eval_samples_per_second": 8.834, |
|
"eval_steps_per_second": 4.418, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.00466062004523543, |
|
"grad_norm": 5.613043308258057, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 1.1746, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.004752004752004752, |
|
"grad_norm": 6.032951831817627, |
|
"learning_rate": 2.3135019582658802e-05, |
|
"loss": 1.348, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.004843389458774074, |
|
"grad_norm": 4.618442058563232, |
|
"learning_rate": 2.132117818244771e-05, |
|
"loss": 0.808, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.004934774165543396, |
|
"grad_norm": 4.89528226852417, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 1.1702, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.005026158872312719, |
|
"grad_norm": 5.89093017578125, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 1.1912, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.005117543579082041, |
|
"grad_norm": 4.801693439483643, |
|
"learning_rate": 1.622048961921699e-05, |
|
"loss": 1.0937, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.005208928285851362, |
|
"grad_norm": 6.408935070037842, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 1.3091, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.005300312992620685, |
|
"grad_norm": 5.189580917358398, |
|
"learning_rate": 1.3136133159493802e-05, |
|
"loss": 0.8866, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.005391697699390007, |
|
"grad_norm": 5.219249248504639, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 1.0799, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.0054830824061593295, |
|
"grad_norm": 5.454803466796875, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 1.0661, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.005574467112928651, |
|
"grad_norm": 4.765921592712402, |
|
"learning_rate": 9.042397785550405e-06, |
|
"loss": 0.7579, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.005665851819697974, |
|
"grad_norm": 5.725944519042969, |
|
"learning_rate": 7.830427709355725e-06, |
|
"loss": 1.0852, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.005757236526467296, |
|
"grad_norm": 4.559783935546875, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.9034, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.0058486212332366175, |
|
"grad_norm": 5.786202430725098, |
|
"learning_rate": 5.649458341088915e-06, |
|
"loss": 1.0369, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.00594000594000594, |
|
"grad_norm": 4.688554286956787, |
|
"learning_rate": 4.684610648167503e-06, |
|
"loss": 1.0344, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.006031390646775262, |
|
"grad_norm": 5.780782699584961, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.9494, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.006122775353544585, |
|
"grad_norm": 5.427234649658203, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 0.9841, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.0062141600603139065, |
|
"grad_norm": 5.6869215965271, |
|
"learning_rate": 2.314152462588659e-06, |
|
"loss": 1.0319, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.006305544767083228, |
|
"grad_norm": 5.268839359283447, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 1.0643, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.006396929473852551, |
|
"grad_norm": 5.248642921447754, |
|
"learning_rate": 1.1851996440033319e-06, |
|
"loss": 1.174, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.006488314180621873, |
|
"grad_norm": 6.898940086364746, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 1.2062, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.006579698887391195, |
|
"grad_norm": 5.899970531463623, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 0.9557, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.006671083594160517, |
|
"grad_norm": 5.415920734405518, |
|
"learning_rate": 1.9026509541272275e-07, |
|
"loss": 1.3895, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.006762468300929839, |
|
"grad_norm": 5.120940685272217, |
|
"learning_rate": 4.7588920907110094e-08, |
|
"loss": 0.8528, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.006853853007699162, |
|
"grad_norm": 5.29119873046875, |
|
"learning_rate": 0.0, |
|
"loss": 1.0819, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.006853853007699162, |
|
"eval_loss": 1.0691897869110107, |
|
"eval_runtime": 1043.1267, |
|
"eval_samples_per_second": 8.834, |
|
"eval_steps_per_second": 4.417, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.01589487157248e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|