|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.5080113100848256, |
|
"eval_steps": 500, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03770028275212064, |
|
"grad_norm": 2.404820680618286, |
|
"learning_rate": 4.9995083170283816e-05, |
|
"loss": 4.2968, |
|
"num_input_tokens_seen": 51872, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.07540056550424128, |
|
"grad_norm": 1.080649971961975, |
|
"learning_rate": 4.998033461515242e-05, |
|
"loss": 3.9725, |
|
"num_input_tokens_seen": 107024, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.11310084825636192, |
|
"grad_norm": 2.0493686199188232, |
|
"learning_rate": 4.9955760135896534e-05, |
|
"loss": 3.8314, |
|
"num_input_tokens_seen": 160080, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.15080113100848255, |
|
"grad_norm": 0.927135169506073, |
|
"learning_rate": 4.992136939879856e-05, |
|
"loss": 3.7, |
|
"num_input_tokens_seen": 208816, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.1885014137606032, |
|
"grad_norm": 0.9066221714019775, |
|
"learning_rate": 4.9877175931330346e-05, |
|
"loss": 3.6029, |
|
"num_input_tokens_seen": 262448, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.22620169651272384, |
|
"grad_norm": 1.361427903175354, |
|
"learning_rate": 4.982319711683221e-05, |
|
"loss": 3.4859, |
|
"num_input_tokens_seen": 315552, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2639019792648445, |
|
"grad_norm": 6.180077075958252, |
|
"learning_rate": 4.975945418767529e-05, |
|
"loss": 3.4653, |
|
"num_input_tokens_seen": 366992, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.3016022620169651, |
|
"grad_norm": 1.7034149169921875, |
|
"learning_rate": 4.968597221690986e-05, |
|
"loss": 3.485, |
|
"num_input_tokens_seen": 418816, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3393025447690858, |
|
"grad_norm": 2.64639949798584, |
|
"learning_rate": 4.96027801084029e-05, |
|
"loss": 3.4304, |
|
"num_input_tokens_seen": 469936, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.3770028275212064, |
|
"grad_norm": 1.302827000617981, |
|
"learning_rate": 4.950991058546893e-05, |
|
"loss": 3.4635, |
|
"num_input_tokens_seen": 525120, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.41470311027332707, |
|
"grad_norm": 1.5340975522994995, |
|
"learning_rate": 4.940740017799833e-05, |
|
"loss": 3.3626, |
|
"num_input_tokens_seen": 576944, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.4524033930254477, |
|
"grad_norm": 1.2329437732696533, |
|
"learning_rate": 4.929528920808854e-05, |
|
"loss": 3.3347, |
|
"num_input_tokens_seen": 628544, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.49010367577756836, |
|
"grad_norm": 1.7281228303909302, |
|
"learning_rate": 4.917362177418342e-05, |
|
"loss": 3.3347, |
|
"num_input_tokens_seen": 681392, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.527803958529689, |
|
"grad_norm": 1.3272786140441895, |
|
"learning_rate": 4.904244573372733e-05, |
|
"loss": 3.3173, |
|
"num_input_tokens_seen": 734928, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5655042412818096, |
|
"grad_norm": 1.3057860136032104, |
|
"learning_rate": 4.8901812684340564e-05, |
|
"loss": 3.3863, |
|
"num_input_tokens_seen": 785840, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6032045240339302, |
|
"grad_norm": 1.3130522966384888, |
|
"learning_rate": 4.8751777943523634e-05, |
|
"loss": 3.2669, |
|
"num_input_tokens_seen": 838112, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.6409048067860509, |
|
"grad_norm": 1.4002560377120972, |
|
"learning_rate": 4.8592400526898314e-05, |
|
"loss": 3.249, |
|
"num_input_tokens_seen": 886784, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.6786050895381716, |
|
"grad_norm": 1.5897698402404785, |
|
"learning_rate": 4.842374312499405e-05, |
|
"loss": 3.3248, |
|
"num_input_tokens_seen": 936880, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.7163053722902922, |
|
"grad_norm": 2.1904706954956055, |
|
"learning_rate": 4.824587207858888e-05, |
|
"loss": 3.1471, |
|
"num_input_tokens_seen": 988864, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.7540056550424128, |
|
"grad_norm": 1.859395980834961, |
|
"learning_rate": 4.805885735261454e-05, |
|
"loss": 3.2406, |
|
"num_input_tokens_seen": 1044400, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7917059377945335, |
|
"grad_norm": 1.6112860441207886, |
|
"learning_rate": 4.786277250863599e-05, |
|
"loss": 3.1418, |
|
"num_input_tokens_seen": 1097376, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.8294062205466541, |
|
"grad_norm": 1.3354580402374268, |
|
"learning_rate": 4.765769467591625e-05, |
|
"loss": 3.1452, |
|
"num_input_tokens_seen": 1153456, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.8671065032987747, |
|
"grad_norm": 1.9464856386184692, |
|
"learning_rate": 4.744370452107789e-05, |
|
"loss": 3.2019, |
|
"num_input_tokens_seen": 1205536, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.9048067860508954, |
|
"grad_norm": 1.3771345615386963, |
|
"learning_rate": 4.722088621637309e-05, |
|
"loss": 3.2528, |
|
"num_input_tokens_seen": 1256080, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.942507068803016, |
|
"grad_norm": 1.5059542655944824, |
|
"learning_rate": 4.698932740657479e-05, |
|
"loss": 3.0824, |
|
"num_input_tokens_seen": 1308592, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.9802073515551367, |
|
"grad_norm": 1.4657851457595825, |
|
"learning_rate": 4.6749119174501975e-05, |
|
"loss": 3.2487, |
|
"num_input_tokens_seen": 1355792, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.0179076343072573, |
|
"grad_norm": 1.5458085536956787, |
|
"learning_rate": 4.6500356005192514e-05, |
|
"loss": 3.12, |
|
"num_input_tokens_seen": 1402560, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.055607917059378, |
|
"grad_norm": 1.4856982231140137, |
|
"learning_rate": 4.6243135748737864e-05, |
|
"loss": 3.1617, |
|
"num_input_tokens_seen": 1453152, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.0933081998114986, |
|
"grad_norm": 1.728014588356018, |
|
"learning_rate": 4.597755958179406e-05, |
|
"loss": 3.0912, |
|
"num_input_tokens_seen": 1505744, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.1310084825636193, |
|
"grad_norm": 1.727301836013794, |
|
"learning_rate": 4.570373196778427e-05, |
|
"loss": 2.9448, |
|
"num_input_tokens_seen": 1559264, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.1687087653157398, |
|
"grad_norm": 1.8649897575378418, |
|
"learning_rate": 4.5421760615808474e-05, |
|
"loss": 3.1383, |
|
"num_input_tokens_seen": 1604368, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.2064090480678604, |
|
"grad_norm": 1.8682725429534912, |
|
"learning_rate": 4.513175643827647e-05, |
|
"loss": 3.0526, |
|
"num_input_tokens_seen": 1657168, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.244109330819981, |
|
"grad_norm": 1.723254680633545, |
|
"learning_rate": 4.4833833507280884e-05, |
|
"loss": 3.0386, |
|
"num_input_tokens_seen": 1704960, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.2818096135721018, |
|
"grad_norm": 1.6712509393692017, |
|
"learning_rate": 4.4528109009727336e-05, |
|
"loss": 2.9411, |
|
"num_input_tokens_seen": 1756400, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.3195098963242224, |
|
"grad_norm": 2.0004794597625732, |
|
"learning_rate": 4.42147032012394e-05, |
|
"loss": 3.1312, |
|
"num_input_tokens_seen": 1806976, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.3572101790763431, |
|
"grad_norm": 1.872934103012085, |
|
"learning_rate": 4.389373935885646e-05, |
|
"loss": 3.0785, |
|
"num_input_tokens_seen": 1861264, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.3949104618284638, |
|
"grad_norm": 1.8472355604171753, |
|
"learning_rate": 4.356534373254316e-05, |
|
"loss": 3.0567, |
|
"num_input_tokens_seen": 1917872, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.4326107445805842, |
|
"grad_norm": 2.1023237705230713, |
|
"learning_rate": 4.322964549552943e-05, |
|
"loss": 3.0526, |
|
"num_input_tokens_seen": 1971888, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.4703110273327051, |
|
"grad_norm": 1.9663615226745605, |
|
"learning_rate": 4.288677669350066e-05, |
|
"loss": 3.0147, |
|
"num_input_tokens_seen": 2022112, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.5080113100848256, |
|
"grad_norm": 2.509061574935913, |
|
"learning_rate": 4.2536872192658036e-05, |
|
"loss": 3.0681, |
|
"num_input_tokens_seen": 2073088, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 792, |
|
"num_input_tokens_seen": 2073088, |
|
"num_train_epochs": 6, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.819916695771546e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|