|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.9952941176470587, |
|
"eval_steps": 500, |
|
"global_step": 424, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.023529411764705882, |
|
"grad_norm": 2.0912532806396484, |
|
"learning_rate": 4.998284588246634e-05, |
|
"loss": 1.5824, |
|
"num_input_tokens_seen": 47376, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.047058823529411764, |
|
"grad_norm": 1.2301002740859985, |
|
"learning_rate": 4.9931407070965254e-05, |
|
"loss": 1.1238, |
|
"num_input_tokens_seen": 97888, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.07058823529411765, |
|
"grad_norm": 1.0708130598068237, |
|
"learning_rate": 4.984575415649019e-05, |
|
"loss": 0.9474, |
|
"num_input_tokens_seen": 145664, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.09411764705882353, |
|
"grad_norm": 0.8778848648071289, |
|
"learning_rate": 4.97260046830541e-05, |
|
"loss": 0.7893, |
|
"num_input_tokens_seen": 192544, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.11764705882352941, |
|
"grad_norm": 0.9257701635360718, |
|
"learning_rate": 4.957232298638036e-05, |
|
"loss": 0.7478, |
|
"num_input_tokens_seen": 241760, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.1411764705882353, |
|
"grad_norm": 0.8825921416282654, |
|
"learning_rate": 4.9384919968379945e-05, |
|
"loss": 0.6895, |
|
"num_input_tokens_seen": 289600, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.16470588235294117, |
|
"grad_norm": 0.8157197833061218, |
|
"learning_rate": 4.916405280772462e-05, |
|
"loss": 0.6936, |
|
"num_input_tokens_seen": 340816, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.18823529411764706, |
|
"grad_norm": 0.8392460346221924, |
|
"learning_rate": 4.891002460691306e-05, |
|
"loss": 0.6835, |
|
"num_input_tokens_seen": 389424, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.21176470588235294, |
|
"grad_norm": 0.7645203471183777, |
|
"learning_rate": 4.862318397631434e-05, |
|
"loss": 0.6284, |
|
"num_input_tokens_seen": 438224, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.23529411764705882, |
|
"grad_norm": 1.0171334743499756, |
|
"learning_rate": 4.83039245557597e-05, |
|
"loss": 0.6165, |
|
"num_input_tokens_seen": 486496, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.25882352941176473, |
|
"grad_norm": 0.7693806290626526, |
|
"learning_rate": 4.795268447433907e-05, |
|
"loss": 0.5757, |
|
"num_input_tokens_seen": 537520, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.2823529411764706, |
|
"grad_norm": 0.8031296133995056, |
|
"learning_rate": 4.756994574914359e-05, |
|
"loss": 0.5638, |
|
"num_input_tokens_seen": 584288, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.3058823529411765, |
|
"grad_norm": 0.7544689774513245, |
|
"learning_rate": 4.715623362377939e-05, |
|
"loss": 0.5522, |
|
"num_input_tokens_seen": 633984, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.32941176470588235, |
|
"grad_norm": 0.8555608987808228, |
|
"learning_rate": 4.6712115847560355e-05, |
|
"loss": 0.5217, |
|
"num_input_tokens_seen": 685600, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.35294117647058826, |
|
"grad_norm": 0.8717510104179382, |
|
"learning_rate": 4.6238201896369055e-05, |
|
"loss": 0.5436, |
|
"num_input_tokens_seen": 734496, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.3764705882352941, |
|
"grad_norm": 0.9173887372016907, |
|
"learning_rate": 4.573514213625505e-05, |
|
"loss": 0.514, |
|
"num_input_tokens_seen": 784720, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.8762289881706238, |
|
"learning_rate": 4.5203626930918455e-05, |
|
"loss": 0.5073, |
|
"num_input_tokens_seen": 837808, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.4235294117647059, |
|
"grad_norm": 0.9715367555618286, |
|
"learning_rate": 4.464438569430354e-05, |
|
"loss": 0.5384, |
|
"num_input_tokens_seen": 888032, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.4470588235294118, |
|
"grad_norm": 0.9435672163963318, |
|
"learning_rate": 4.40581858896025e-05, |
|
"loss": 0.521, |
|
"num_input_tokens_seen": 938032, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.47058823529411764, |
|
"grad_norm": 0.8672516345977783, |
|
"learning_rate": 4.344583197604318e-05, |
|
"loss": 0.4765, |
|
"num_input_tokens_seen": 988176, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.49411764705882355, |
|
"grad_norm": 1.0466370582580566, |
|
"learning_rate": 4.2808164304906026e-05, |
|
"loss": 0.5121, |
|
"num_input_tokens_seen": 1040016, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.5176470588235295, |
|
"grad_norm": 0.9898332953453064, |
|
"learning_rate": 4.214605796628527e-05, |
|
"loss": 0.4972, |
|
"num_input_tokens_seen": 1090624, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.5411764705882353, |
|
"grad_norm": 1.0176562070846558, |
|
"learning_rate": 4.14604215881771e-05, |
|
"loss": 0.488, |
|
"num_input_tokens_seen": 1143280, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.5647058823529412, |
|
"grad_norm": 0.9806397557258606, |
|
"learning_rate": 4.075219608954278e-05, |
|
"loss": 0.489, |
|
"num_input_tokens_seen": 1191616, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.5882352941176471, |
|
"grad_norm": 0.9629871249198914, |
|
"learning_rate": 4.00223533890578e-05, |
|
"loss": 0.4853, |
|
"num_input_tokens_seen": 1240144, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.611764705882353, |
|
"grad_norm": 0.9678134918212891, |
|
"learning_rate": 3.927189507131938e-05, |
|
"loss": 0.4718, |
|
"num_input_tokens_seen": 1288912, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.6352941176470588, |
|
"grad_norm": 1.0481890439987183, |
|
"learning_rate": 3.8501851012342446e-05, |
|
"loss": 0.4936, |
|
"num_input_tokens_seen": 1335472, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.6588235294117647, |
|
"grad_norm": 0.9465301632881165, |
|
"learning_rate": 3.7713277966230514e-05, |
|
"loss": 0.4673, |
|
"num_input_tokens_seen": 1385568, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.6823529411764706, |
|
"grad_norm": 1.0117136240005493, |
|
"learning_rate": 3.690725811496092e-05, |
|
"loss": 0.4676, |
|
"num_input_tokens_seen": 1434384, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.7058823529411765, |
|
"grad_norm": 0.9891406297683716, |
|
"learning_rate": 3.608489758327472e-05, |
|
"loss": 0.5079, |
|
"num_input_tokens_seen": 1488448, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.7294117647058823, |
|
"grad_norm": 0.9826067090034485, |
|
"learning_rate": 3.524732492070915e-05, |
|
"loss": 0.4713, |
|
"num_input_tokens_seen": 1537472, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.7529411764705882, |
|
"grad_norm": 0.9447062611579895, |
|
"learning_rate": 3.4395689552855955e-05, |
|
"loss": 0.4616, |
|
"num_input_tokens_seen": 1587072, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.7764705882352941, |
|
"grad_norm": 1.0301213264465332, |
|
"learning_rate": 3.3531160203970805e-05, |
|
"loss": 0.4445, |
|
"num_input_tokens_seen": 1636624, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 1.1652017831802368, |
|
"learning_rate": 3.265492329309867e-05, |
|
"loss": 0.4762, |
|
"num_input_tokens_seen": 1687904, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.8235294117647058, |
|
"grad_norm": 1.1255770921707153, |
|
"learning_rate": 3.1768181305916066e-05, |
|
"loss": 0.4626, |
|
"num_input_tokens_seen": 1734544, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.8470588235294118, |
|
"grad_norm": 1.0519174337387085, |
|
"learning_rate": 3.0872151144524595e-05, |
|
"loss": 0.4554, |
|
"num_input_tokens_seen": 1781216, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.8705882352941177, |
|
"grad_norm": 1.1458220481872559, |
|
"learning_rate": 2.996806245746044e-05, |
|
"loss": 0.4418, |
|
"num_input_tokens_seen": 1830144, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.8941176470588236, |
|
"grad_norm": 1.2313694953918457, |
|
"learning_rate": 2.9057155952211502e-05, |
|
"loss": 0.4676, |
|
"num_input_tokens_seen": 1876288, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.9176470588235294, |
|
"grad_norm": 1.2109845876693726, |
|
"learning_rate": 2.8140681692558035e-05, |
|
"loss": 0.4425, |
|
"num_input_tokens_seen": 1925792, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.9411764705882353, |
|
"grad_norm": 1.0616289377212524, |
|
"learning_rate": 2.7219897383073373e-05, |
|
"loss": 0.4548, |
|
"num_input_tokens_seen": 1974368, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.9647058823529412, |
|
"grad_norm": 1.1032544374465942, |
|
"learning_rate": 2.629606664313896e-05, |
|
"loss": 0.4302, |
|
"num_input_tokens_seen": 2022080, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.9882352941176471, |
|
"grad_norm": 1.2024751901626587, |
|
"learning_rate": 2.537045727284232e-05, |
|
"loss": 0.4132, |
|
"num_input_tokens_seen": 2069920, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.011764705882353, |
|
"grad_norm": 1.0001943111419678, |
|
"learning_rate": 2.444433951313772e-05, |
|
"loss": 0.4214, |
|
"num_input_tokens_seen": 2120704, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 1.035294117647059, |
|
"grad_norm": 1.0397372245788574, |
|
"learning_rate": 2.3518984302657146e-05, |
|
"loss": 0.4088, |
|
"num_input_tokens_seen": 2171968, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.0588235294117647, |
|
"grad_norm": 1.1281183958053589, |
|
"learning_rate": 2.259566153356389e-05, |
|
"loss": 0.4199, |
|
"num_input_tokens_seen": 2223472, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.0823529411764705, |
|
"grad_norm": 1.1637018918991089, |
|
"learning_rate": 2.1675638308842145e-05, |
|
"loss": 0.4273, |
|
"num_input_tokens_seen": 2273296, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.1058823529411765, |
|
"grad_norm": 1.2093477249145508, |
|
"learning_rate": 2.0760177203414368e-05, |
|
"loss": 0.4042, |
|
"num_input_tokens_seen": 2323008, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 1.1294117647058823, |
|
"grad_norm": 1.1976971626281738, |
|
"learning_rate": 1.9850534531472546e-05, |
|
"loss": 0.4275, |
|
"num_input_tokens_seen": 2373744, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.1529411764705881, |
|
"grad_norm": 1.1152101755142212, |
|
"learning_rate": 1.8947958622401328e-05, |
|
"loss": 0.4483, |
|
"num_input_tokens_seen": 2425216, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 1.1764705882352942, |
|
"grad_norm": 1.0972641706466675, |
|
"learning_rate": 1.8053688107658908e-05, |
|
"loss": 0.4024, |
|
"num_input_tokens_seen": 2472864, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 1.12717604637146, |
|
"learning_rate": 1.7168950220966614e-05, |
|
"loss": 0.4363, |
|
"num_input_tokens_seen": 2523344, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 1.223529411764706, |
|
"grad_norm": 1.2230875492095947, |
|
"learning_rate": 1.6294959114140034e-05, |
|
"loss": 0.383, |
|
"num_input_tokens_seen": 2571936, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.2470588235294118, |
|
"grad_norm": 1.2656056880950928, |
|
"learning_rate": 1.5432914190872757e-05, |
|
"loss": 0.4287, |
|
"num_input_tokens_seen": 2622560, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 1.2705882352941176, |
|
"grad_norm": 1.186041235923767, |
|
"learning_rate": 1.4583998460759424e-05, |
|
"loss": 0.3686, |
|
"num_input_tokens_seen": 2669232, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.2941176470588236, |
|
"grad_norm": 1.2343655824661255, |
|
"learning_rate": 1.3749376915816886e-05, |
|
"loss": 0.4059, |
|
"num_input_tokens_seen": 2718128, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.3176470588235294, |
|
"grad_norm": 1.131495714187622, |
|
"learning_rate": 1.2930194931731382e-05, |
|
"loss": 0.4242, |
|
"num_input_tokens_seen": 2766432, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.3411764705882354, |
|
"grad_norm": 1.0469223260879517, |
|
"learning_rate": 1.2127576696025828e-05, |
|
"loss": 0.4326, |
|
"num_input_tokens_seen": 2819760, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 1.3647058823529412, |
|
"grad_norm": 1.315108060836792, |
|
"learning_rate": 1.1342623665304209e-05, |
|
"loss": 0.4266, |
|
"num_input_tokens_seen": 2867920, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.388235294117647, |
|
"grad_norm": 1.168626308441162, |
|
"learning_rate": 1.0576413053690327e-05, |
|
"loss": 0.4049, |
|
"num_input_tokens_seen": 2914944, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 1.4117647058823528, |
|
"grad_norm": 1.13846755027771, |
|
"learning_rate": 9.829996354535172e-06, |
|
"loss": 0.4102, |
|
"num_input_tokens_seen": 2965600, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.4352941176470588, |
|
"grad_norm": 1.24168062210083, |
|
"learning_rate": 9.104397897421623e-06, |
|
"loss": 0.4261, |
|
"num_input_tokens_seen": 3015728, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 1.4588235294117646, |
|
"grad_norm": 1.1973497867584229, |
|
"learning_rate": 8.400613442446948e-06, |
|
"loss": 0.4109, |
|
"num_input_tokens_seen": 3065328, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.4823529411764707, |
|
"grad_norm": 1.0736428499221802, |
|
"learning_rate": 7.719608813711848e-06, |
|
"loss": 0.3982, |
|
"num_input_tokens_seen": 3114720, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 1.5058823529411764, |
|
"grad_norm": 1.2293542623519897, |
|
"learning_rate": 7.062318573891716e-06, |
|
"loss": 0.4265, |
|
"num_input_tokens_seen": 3163008, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.5294117647058822, |
|
"grad_norm": 1.2456164360046387, |
|
"learning_rate": 6.429644741708779e-06, |
|
"loss": 0.4014, |
|
"num_input_tokens_seen": 3211408, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 1.5529411764705883, |
|
"grad_norm": 1.1817117929458618, |
|
"learning_rate": 5.822455554065217e-06, |
|
"loss": 0.425, |
|
"num_input_tokens_seen": 3263808, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.576470588235294, |
|
"grad_norm": 1.2817317247390747, |
|
"learning_rate": 5.241584274536259e-06, |
|
"loss": 0.3785, |
|
"num_input_tokens_seen": 3311056, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 1.2116546630859375, |
|
"learning_rate": 4.687828049857967e-06, |
|
"loss": 0.4034, |
|
"num_input_tokens_seen": 3358224, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.6235294117647059, |
|
"grad_norm": 1.3325591087341309, |
|
"learning_rate": 4.161946815979403e-06, |
|
"loss": 0.4075, |
|
"num_input_tokens_seen": 3409024, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 1.6470588235294117, |
|
"grad_norm": 1.0810184478759766, |
|
"learning_rate": 3.6646622551801345e-06, |
|
"loss": 0.3496, |
|
"num_input_tokens_seen": 3457616, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.6705882352941175, |
|
"grad_norm": 1.0979664325714111, |
|
"learning_rate": 3.19665680568445e-06, |
|
"loss": 0.4106, |
|
"num_input_tokens_seen": 3510768, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 1.6941176470588235, |
|
"grad_norm": 1.143621563911438, |
|
"learning_rate": 2.75857272513132e-06, |
|
"loss": 0.4093, |
|
"num_input_tokens_seen": 3556912, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.7176470588235295, |
|
"grad_norm": 1.2018797397613525, |
|
"learning_rate": 2.351011209185336e-06, |
|
"loss": 0.379, |
|
"num_input_tokens_seen": 3606016, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 1.7411764705882353, |
|
"grad_norm": 1.0897295475006104, |
|
"learning_rate": 1.9745315664982276e-06, |
|
"loss": 0.3996, |
|
"num_input_tokens_seen": 3654256, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.7647058823529411, |
|
"grad_norm": 1.2107083797454834, |
|
"learning_rate": 1.6296504511531836e-06, |
|
"loss": 0.4016, |
|
"num_input_tokens_seen": 3704528, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 1.788235294117647, |
|
"grad_norm": 1.1322784423828125, |
|
"learning_rate": 1.3168411536452152e-06, |
|
"loss": 0.3769, |
|
"num_input_tokens_seen": 3752208, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.811764705882353, |
|
"grad_norm": 1.1542903184890747, |
|
"learning_rate": 1.036532951370736e-06, |
|
"loss": 0.4045, |
|
"num_input_tokens_seen": 3805232, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 1.835294117647059, |
|
"grad_norm": 1.2877768278121948, |
|
"learning_rate": 7.891105195175358e-07, |
|
"loss": 0.4051, |
|
"num_input_tokens_seen": 3854864, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.8588235294117648, |
|
"grad_norm": 1.2475833892822266, |
|
"learning_rate": 5.749134031637349e-07, |
|
"loss": 0.3934, |
|
"num_input_tokens_seen": 3904880, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 1.8823529411764706, |
|
"grad_norm": 1.152550458908081, |
|
"learning_rate": 3.9423555131007925e-07, |
|
"loss": 0.41, |
|
"num_input_tokens_seen": 3953728, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.9058823529411764, |
|
"grad_norm": 1.1438477039337158, |
|
"learning_rate": 2.473249134850808e-07, |
|
"loss": 0.3902, |
|
"num_input_tokens_seen": 4002944, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 1.9294117647058824, |
|
"grad_norm": 1.143480896949768, |
|
"learning_rate": 1.343830994765982e-07, |
|
"loss": 0.3766, |
|
"num_input_tokens_seen": 4050880, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.9529411764705882, |
|
"grad_norm": 1.2639435529708862, |
|
"learning_rate": 5.5565102656787714e-08, |
|
"loss": 0.3826, |
|
"num_input_tokens_seen": 4098064, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 1.9764705882352942, |
|
"grad_norm": 1.1847045421600342, |
|
"learning_rate": 1.0979087280141298e-08, |
|
"loss": 0.4366, |
|
"num_input_tokens_seen": 4147296, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.9952941176470587, |
|
"num_input_tokens_seen": 4186528, |
|
"step": 424, |
|
"total_flos": 5.002960174252032e+16, |
|
"train_loss": 0.48991220936460317, |
|
"train_runtime": 1751.8031, |
|
"train_samples_per_second": 3.882, |
|
"train_steps_per_second": 0.242 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 424, |
|
"num_input_tokens_seen": 4186528, |
|
"num_train_epochs": 2, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.002960174252032e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|