|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.4, |
|
"eval_steps": 500, |
|
"global_step": 300, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.4841386675834656, |
|
"learning_rate": 4.997807075247146e-05, |
|
"loss": 0.688, |
|
"num_input_tokens_seen": 46776, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.3365215063095093, |
|
"learning_rate": 4.991232148123761e-05, |
|
"loss": 0.763, |
|
"num_input_tokens_seen": 85136, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.5198595523834229, |
|
"learning_rate": 4.980286753286195e-05, |
|
"loss": 0.6882, |
|
"num_input_tokens_seen": 126584, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.40807807445526123, |
|
"learning_rate": 4.964990092676263e-05, |
|
"loss": 0.6951, |
|
"num_input_tokens_seen": 167968, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.439756840467453, |
|
"learning_rate": 4.9453690018345144e-05, |
|
"loss": 0.5008, |
|
"num_input_tokens_seen": 206832, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.45932796597480774, |
|
"learning_rate": 4.9214579028215776e-05, |
|
"loss": 0.542, |
|
"num_input_tokens_seen": 243656, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.3281826376914978, |
|
"learning_rate": 4.893298743830168e-05, |
|
"loss": 0.5369, |
|
"num_input_tokens_seen": 284016, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.2551577389240265, |
|
"learning_rate": 4.860940925593703e-05, |
|
"loss": 0.4948, |
|
"num_input_tokens_seen": 327408, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.5936368107795715, |
|
"learning_rate": 4.8244412147206284e-05, |
|
"loss": 0.5244, |
|
"num_input_tokens_seen": 363376, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.2480110377073288, |
|
"learning_rate": 4.783863644106502e-05, |
|
"loss": 0.421, |
|
"num_input_tokens_seen": 398840, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.4601157307624817, |
|
"learning_rate": 4.7392794005985326e-05, |
|
"loss": 0.4517, |
|
"num_input_tokens_seen": 436136, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.6387960314750671, |
|
"learning_rate": 4.690766700109659e-05, |
|
"loss": 0.4661, |
|
"num_input_tokens_seen": 475856, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.4365377128124237, |
|
"learning_rate": 4.638410650401267e-05, |
|
"loss": 0.4928, |
|
"num_input_tokens_seen": 514496, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.2992519438266754, |
|
"learning_rate": 4.5823031017752485e-05, |
|
"loss": 0.5424, |
|
"num_input_tokens_seen": 554424, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.828787088394165, |
|
"learning_rate": 4.522542485937369e-05, |
|
"loss": 0.5419, |
|
"num_input_tokens_seen": 593264, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.2943418622016907, |
|
"learning_rate": 4.4592336433146e-05, |
|
"loss": 0.4558, |
|
"num_input_tokens_seen": 630264, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.3707512617111206, |
|
"learning_rate": 4.3924876391293915e-05, |
|
"loss": 0.5656, |
|
"num_input_tokens_seen": 668864, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.4809654653072357, |
|
"learning_rate": 4.3224215685535294e-05, |
|
"loss": 0.4832, |
|
"num_input_tokens_seen": 712504, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.516292929649353, |
|
"learning_rate": 4.249158351283414e-05, |
|
"loss": 0.4626, |
|
"num_input_tokens_seen": 748872, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.5604212284088135, |
|
"learning_rate": 4.172826515897146e-05, |
|
"loss": 0.4837, |
|
"num_input_tokens_seen": 788408, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.7355071902275085, |
|
"learning_rate": 4.093559974371725e-05, |
|
"loss": 0.5144, |
|
"num_input_tokens_seen": 828448, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.46999862790107727, |
|
"learning_rate": 4.011497787155938e-05, |
|
"loss": 0.493, |
|
"num_input_tokens_seen": 864680, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.5488362312316895, |
|
"learning_rate": 3.92678391921108e-05, |
|
"loss": 0.4083, |
|
"num_input_tokens_seen": 902568, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.5053719282150269, |
|
"learning_rate": 3.8395669874474915e-05, |
|
"loss": 0.5172, |
|
"num_input_tokens_seen": 944752, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.44234323501586914, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.5843, |
|
"num_input_tokens_seen": 988656, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"grad_norm": 0.4518623352050781, |
|
"learning_rate": 3.6582400877996546e-05, |
|
"loss": 0.4567, |
|
"num_input_tokens_seen": 1028584, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"grad_norm": 0.39792686700820923, |
|
"learning_rate": 3.564448228912682e-05, |
|
"loss": 0.418, |
|
"num_input_tokens_seen": 1065104, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"grad_norm": 0.5141780376434326, |
|
"learning_rate": 3.4687889661302576e-05, |
|
"loss": 0.3785, |
|
"num_input_tokens_seen": 1106744, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"grad_norm": 0.5755515098571777, |
|
"learning_rate": 3.3714301183045385e-05, |
|
"loss": 0.4097, |
|
"num_input_tokens_seen": 1140672, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 0.4626426696777344, |
|
"learning_rate": 3.272542485937369e-05, |
|
"loss": 0.4507, |
|
"num_input_tokens_seen": 1177280, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"grad_norm": 0.49699896574020386, |
|
"learning_rate": 3.172299551538164e-05, |
|
"loss": 0.368, |
|
"num_input_tokens_seen": 1215744, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"grad_norm": 0.5842998027801514, |
|
"learning_rate": 3.0708771752766394e-05, |
|
"loss": 0.4301, |
|
"num_input_tokens_seen": 1257040, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"grad_norm": 0.46173322200775146, |
|
"learning_rate": 2.9684532864643122e-05, |
|
"loss": 0.4488, |
|
"num_input_tokens_seen": 1295992, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.3599999999999999, |
|
"grad_norm": 0.4926168918609619, |
|
"learning_rate": 2.8652075714060295e-05, |
|
"loss": 0.4075, |
|
"num_input_tokens_seen": 1334672, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 0.5110897421836853, |
|
"learning_rate": 2.761321158169134e-05, |
|
"loss": 0.4991, |
|
"num_input_tokens_seen": 1379280, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"grad_norm": 0.33892518281936646, |
|
"learning_rate": 2.656976298823284e-05, |
|
"loss": 0.4894, |
|
"num_input_tokens_seen": 1419704, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"grad_norm": 0.46542254090309143, |
|
"learning_rate": 2.5523560497083926e-05, |
|
"loss": 0.4967, |
|
"num_input_tokens_seen": 1459256, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"grad_norm": 0.9100029468536377, |
|
"learning_rate": 2.447643950291608e-05, |
|
"loss": 0.5297, |
|
"num_input_tokens_seen": 1503576, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"grad_norm": 0.7347203493118286, |
|
"learning_rate": 2.3430237011767167e-05, |
|
"loss": 0.3939, |
|
"num_input_tokens_seen": 1539904, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 0.6078445315361023, |
|
"learning_rate": 2.238678841830867e-05, |
|
"loss": 0.461, |
|
"num_input_tokens_seen": 1580560, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.6400000000000001, |
|
"grad_norm": 0.5878064632415771, |
|
"learning_rate": 2.1347924285939714e-05, |
|
"loss": 0.4622, |
|
"num_input_tokens_seen": 1623608, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.6800000000000002, |
|
"grad_norm": 0.6516324877738953, |
|
"learning_rate": 2.031546713535688e-05, |
|
"loss": 0.4043, |
|
"num_input_tokens_seen": 1660744, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"grad_norm": 0.9136825203895569, |
|
"learning_rate": 1.9291228247233605e-05, |
|
"loss": 0.428, |
|
"num_input_tokens_seen": 1698784, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"grad_norm": 0.751567006111145, |
|
"learning_rate": 1.827700448461836e-05, |
|
"loss": 0.3779, |
|
"num_input_tokens_seen": 1734792, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"grad_norm": 0.8802843689918518, |
|
"learning_rate": 1.7274575140626318e-05, |
|
"loss": 0.4526, |
|
"num_input_tokens_seen": 1776144, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.8399999999999999, |
|
"grad_norm": 0.6807656288146973, |
|
"learning_rate": 1.6285698816954624e-05, |
|
"loss": 0.4627, |
|
"num_input_tokens_seen": 1815864, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"grad_norm": 0.6222789883613586, |
|
"learning_rate": 1.5312110338697426e-05, |
|
"loss": 0.4873, |
|
"num_input_tokens_seen": 1856168, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"grad_norm": 0.5224676728248596, |
|
"learning_rate": 1.4355517710873184e-05, |
|
"loss": 0.3234, |
|
"num_input_tokens_seen": 1899120, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"grad_norm": 0.4191940724849701, |
|
"learning_rate": 1.3417599122003464e-05, |
|
"loss": 0.4438, |
|
"num_input_tokens_seen": 1936808, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.6064372062683105, |
|
"learning_rate": 1.2500000000000006e-05, |
|
"loss": 0.4407, |
|
"num_input_tokens_seen": 1977312, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"grad_norm": 0.5188178420066833, |
|
"learning_rate": 1.1604330125525079e-05, |
|
"loss": 0.4401, |
|
"num_input_tokens_seen": 2018336, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"grad_norm": 0.5036932826042175, |
|
"learning_rate": 1.0732160807889211e-05, |
|
"loss": 0.3771, |
|
"num_input_tokens_seen": 2057776, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"grad_norm": 0.5732894539833069, |
|
"learning_rate": 9.88502212844063e-06, |
|
"loss": 0.4043, |
|
"num_input_tokens_seen": 2102312, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"grad_norm": 0.6521058082580566, |
|
"learning_rate": 9.064400256282757e-06, |
|
"loss": 0.4018, |
|
"num_input_tokens_seen": 2137304, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"grad_norm": 0.698835015296936, |
|
"learning_rate": 8.271734841028553e-06, |
|
"loss": 0.4258, |
|
"num_input_tokens_seen": 2176144, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"grad_norm": 0.7500255107879639, |
|
"learning_rate": 7.508416487165862e-06, |
|
"loss": 0.3912, |
|
"num_input_tokens_seen": 2211640, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.2800000000000002, |
|
"grad_norm": 0.7223864793777466, |
|
"learning_rate": 6.775784314464717e-06, |
|
"loss": 0.3458, |
|
"num_input_tokens_seen": 2247272, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"grad_norm": 0.6346307992935181, |
|
"learning_rate": 6.075123608706093e-06, |
|
"loss": 0.4255, |
|
"num_input_tokens_seen": 2285776, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"grad_norm": 0.6048356890678406, |
|
"learning_rate": 5.4076635668540075e-06, |
|
"loss": 0.4222, |
|
"num_input_tokens_seen": 2326112, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"grad_norm": 1.0743402242660522, |
|
"learning_rate": 4.7745751406263165e-06, |
|
"loss": 0.399, |
|
"num_input_tokens_seen": 2362816, |
|
"step": 300 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 375, |
|
"num_input_tokens_seen": 2362816, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.005255938455634e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|