|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.3679890560875512, |
|
"eval_steps": 500, |
|
"global_step": 3000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.022799817601459188, |
|
"grad_norm": 0.765625, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.2602, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.045599635202918376, |
|
"grad_norm": 0.546875, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 0.1504, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.06839945280437756, |
|
"grad_norm": 0.44140625, |
|
"learning_rate": 0.0001, |
|
"loss": 0.137, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.09119927040583675, |
|
"grad_norm": 0.5390625, |
|
"learning_rate": 0.00013333333333333334, |
|
"loss": 0.1296, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.11399908800729594, |
|
"grad_norm": 0.84765625, |
|
"learning_rate": 0.0001666666666666667, |
|
"loss": 0.1308, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.13679890560875513, |
|
"grad_norm": 0.376953125, |
|
"learning_rate": 0.0002, |
|
"loss": 0.0811, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.15959872321021432, |
|
"grad_norm": 0.439453125, |
|
"learning_rate": 0.00019988560970029743, |
|
"loss": 0.0842, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.1823985408116735, |
|
"grad_norm": 0.5, |
|
"learning_rate": 0.00019977121940059484, |
|
"loss": 0.0772, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.2051983584131327, |
|
"grad_norm": 0.44921875, |
|
"learning_rate": 0.00019965682910089226, |
|
"loss": 0.0799, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.22799817601459188, |
|
"grad_norm": 0.5078125, |
|
"learning_rate": 0.00019954243880118967, |
|
"loss": 0.0733, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.2507979936160511, |
|
"grad_norm": 0.4140625, |
|
"learning_rate": 0.0001994280485014871, |
|
"loss": 0.1053, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.27359781121751026, |
|
"grad_norm": 0.431640625, |
|
"learning_rate": 0.00019931365820178448, |
|
"loss": 0.0868, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.29639762881896947, |
|
"grad_norm": 0.87109375, |
|
"learning_rate": 0.00019919926790208192, |
|
"loss": 0.0824, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.31919744642042863, |
|
"grad_norm": 1.0390625, |
|
"learning_rate": 0.00019908487760237934, |
|
"loss": 0.0667, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.34199726402188785, |
|
"grad_norm": 0.8671875, |
|
"learning_rate": 0.00019897048730267673, |
|
"loss": 0.0742, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.364797081623347, |
|
"grad_norm": 0.5703125, |
|
"learning_rate": 0.00019885609700297417, |
|
"loss": 0.0815, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.3875968992248062, |
|
"grad_norm": 0.8203125, |
|
"learning_rate": 0.00019874170670327156, |
|
"loss": 0.0695, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.4103967168262654, |
|
"grad_norm": 0.56640625, |
|
"learning_rate": 0.000198627316403569, |
|
"loss": 0.0564, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.4331965344277246, |
|
"grad_norm": 0.427734375, |
|
"learning_rate": 0.0001985129261038664, |
|
"loss": 0.051, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.45599635202918376, |
|
"grad_norm": 0.83203125, |
|
"learning_rate": 0.0001983985358041638, |
|
"loss": 0.0745, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.478796169630643, |
|
"grad_norm": 0.625, |
|
"learning_rate": 0.00019828414550446125, |
|
"loss": 0.0949, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.5015959872321022, |
|
"grad_norm": 0.337890625, |
|
"learning_rate": 0.00019816975520475864, |
|
"loss": 0.1331, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.5243958048335613, |
|
"grad_norm": 0.828125, |
|
"learning_rate": 0.00019805536490505606, |
|
"loss": 0.1326, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.5471956224350205, |
|
"grad_norm": 0.78515625, |
|
"learning_rate": 0.00019794097460535347, |
|
"loss": 0.1135, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.5699954400364797, |
|
"grad_norm": 0.66015625, |
|
"learning_rate": 0.0001978265843056509, |
|
"loss": 0.1085, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.5927952576379389, |
|
"grad_norm": 0.625, |
|
"learning_rate": 0.0001977121940059483, |
|
"loss": 0.1347, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.615595075239398, |
|
"grad_norm": 0.73828125, |
|
"learning_rate": 0.00019759780370624572, |
|
"loss": 0.102, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 0.6383948928408573, |
|
"grad_norm": 0.578125, |
|
"learning_rate": 0.00019748341340654314, |
|
"loss": 0.0991, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.6611947104423165, |
|
"grad_norm": 0.25390625, |
|
"learning_rate": 0.00019736902310684055, |
|
"loss": 0.1125, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 0.6839945280437757, |
|
"grad_norm": 0.87109375, |
|
"learning_rate": 0.00019725463280713797, |
|
"loss": 0.1183, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.7067943456452348, |
|
"grad_norm": 0.734375, |
|
"learning_rate": 0.00019714024250743536, |
|
"loss": 0.105, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 0.729594163246694, |
|
"grad_norm": 0.49609375, |
|
"learning_rate": 0.0001970258522077328, |
|
"loss": 0.1315, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.7523939808481532, |
|
"grad_norm": 0.59765625, |
|
"learning_rate": 0.00019691146190803022, |
|
"loss": 0.1133, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 0.7751937984496124, |
|
"grad_norm": 0.6484375, |
|
"learning_rate": 0.00019679707160832763, |
|
"loss": 0.1111, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.7979936160510716, |
|
"grad_norm": 0.671875, |
|
"learning_rate": 0.00019668268130862505, |
|
"loss": 0.1277, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.8207934336525308, |
|
"grad_norm": 0.392578125, |
|
"learning_rate": 0.00019656829100892244, |
|
"loss": 0.1145, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.84359325125399, |
|
"grad_norm": 0.63671875, |
|
"learning_rate": 0.00019645390070921988, |
|
"loss": 0.1144, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 0.8663930688554492, |
|
"grad_norm": 0.69921875, |
|
"learning_rate": 0.00019633951040951727, |
|
"loss": 0.0995, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.8891928864569083, |
|
"grad_norm": 0.8984375, |
|
"learning_rate": 0.00019622512010981468, |
|
"loss": 0.0972, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 0.9119927040583675, |
|
"grad_norm": 0.306640625, |
|
"learning_rate": 0.00019611072981011213, |
|
"loss": 0.105, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.9347925216598267, |
|
"grad_norm": 0.419921875, |
|
"learning_rate": 0.00019599633951040952, |
|
"loss": 0.1285, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 0.957592339261286, |
|
"grad_norm": 0.52734375, |
|
"learning_rate": 0.00019588194921070696, |
|
"loss": 0.1059, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.9803921568627451, |
|
"grad_norm": 0.79296875, |
|
"learning_rate": 0.00019576755891100435, |
|
"loss": 0.1013, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 1.0031919744642044, |
|
"grad_norm": 0.55078125, |
|
"learning_rate": 0.00019565316861130177, |
|
"loss": 0.0989, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.0259917920656634, |
|
"grad_norm": 0.84765625, |
|
"learning_rate": 0.00019553877831159918, |
|
"loss": 0.1024, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 1.0487916096671226, |
|
"grad_norm": 0.439453125, |
|
"learning_rate": 0.0001954243880118966, |
|
"loss": 0.0818, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.0715914272685818, |
|
"grad_norm": 0.59765625, |
|
"learning_rate": 0.000195309997712194, |
|
"loss": 0.0847, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 1.094391244870041, |
|
"grad_norm": 0.6640625, |
|
"learning_rate": 0.00019519560741249143, |
|
"loss": 0.0817, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.1171910624715002, |
|
"grad_norm": 0.61328125, |
|
"learning_rate": 0.00019508121711278885, |
|
"loss": 0.0742, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 1.1399908800729595, |
|
"grad_norm": 0.2265625, |
|
"learning_rate": 0.00019496682681308626, |
|
"loss": 0.0493, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.1627906976744187, |
|
"grad_norm": 0.4765625, |
|
"learning_rate": 0.00019485243651338368, |
|
"loss": 0.049, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 1.1855905152758779, |
|
"grad_norm": 0.451171875, |
|
"learning_rate": 0.0001947380462136811, |
|
"loss": 0.04, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.2083903328773369, |
|
"grad_norm": 0.39453125, |
|
"learning_rate": 0.0001946236559139785, |
|
"loss": 0.0446, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 1.231190150478796, |
|
"grad_norm": 0.5546875, |
|
"learning_rate": 0.00019450926561427593, |
|
"loss": 0.0398, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.2539899680802553, |
|
"grad_norm": 0.353515625, |
|
"learning_rate": 0.00019439487531457334, |
|
"loss": 0.0547, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 1.2767897856817145, |
|
"grad_norm": 0.83203125, |
|
"learning_rate": 0.00019428048501487076, |
|
"loss": 0.0503, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 1.2995896032831737, |
|
"grad_norm": 0.328125, |
|
"learning_rate": 0.00019416609471516815, |
|
"loss": 0.0378, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 1.322389420884633, |
|
"grad_norm": 0.54296875, |
|
"learning_rate": 0.0001940517044154656, |
|
"loss": 0.04, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 1.3451892384860922, |
|
"grad_norm": 0.82421875, |
|
"learning_rate": 0.00019393731411576298, |
|
"loss": 0.0362, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 1.3679890560875512, |
|
"grad_norm": 1.7890625, |
|
"learning_rate": 0.0001938229238160604, |
|
"loss": 0.0445, |
|
"step": 3000 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 87720, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 40, |
|
"save_steps": 200, |
|
"total_flos": 0.0, |
|
"train_batch_size": 128, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|