bigheiniuJ's picture
Model save
be633a6 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 478,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0020920502092050207,
"grad_norm": 9.099456092432153,
"learning_rate": 1.0416666666666666e-08,
"logits/chosen": -3.359375,
"logits/rejected": -3.3125,
"logps/chosen": -296.0,
"logps/rejected": -360.0,
"loss": 0.6914,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.02092050209205021,
"grad_norm": 8.217366878859176,
"learning_rate": 1.0416666666666667e-07,
"logits/chosen": -3.15625,
"logits/rejected": -3.1875,
"logps/chosen": -262.0,
"logps/rejected": -238.0,
"loss": 0.6917,
"rewards/accuracies": 0.2708333432674408,
"rewards/chosen": 0.000598907470703125,
"rewards/margins": 0.00058746337890625,
"rewards/rejected": 7.867813110351562e-06,
"step": 10
},
{
"epoch": 0.04184100418410042,
"grad_norm": 8.628513952797078,
"learning_rate": 2.0833333333333333e-07,
"logits/chosen": -3.21875,
"logits/rejected": -3.21875,
"logps/chosen": -282.0,
"logps/rejected": -286.0,
"loss": 0.6909,
"rewards/accuracies": 0.3687500059604645,
"rewards/chosen": -0.00173187255859375,
"rewards/margins": 0.0021820068359375,
"rewards/rejected": -0.00390625,
"step": 20
},
{
"epoch": 0.06276150627615062,
"grad_norm": 9.169127445477065,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": -3.25,
"logits/rejected": -3.21875,
"logps/chosen": -300.0,
"logps/rejected": -300.0,
"loss": 0.6832,
"rewards/accuracies": 0.5874999761581421,
"rewards/chosen": -0.0167236328125,
"rewards/margins": 0.01611328125,
"rewards/rejected": -0.032958984375,
"step": 30
},
{
"epoch": 0.08368200836820083,
"grad_norm": 9.282045835045837,
"learning_rate": 4.1666666666666667e-07,
"logits/chosen": -3.09375,
"logits/rejected": -3.1875,
"logps/chosen": -270.0,
"logps/rejected": -268.0,
"loss": 0.6603,
"rewards/accuracies": 0.8125,
"rewards/chosen": -0.051513671875,
"rewards/margins": 0.0771484375,
"rewards/rejected": -0.12890625,
"step": 40
},
{
"epoch": 0.10460251046025104,
"grad_norm": 12.0230941013448,
"learning_rate": 4.999733114418725e-07,
"logits/chosen": -3.109375,
"logits/rejected": -3.046875,
"logps/chosen": -288.0,
"logps/rejected": -320.0,
"loss": 0.6096,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": -0.056396484375,
"rewards/margins": 0.177734375,
"rewards/rejected": -0.2333984375,
"step": 50
},
{
"epoch": 0.12552301255230125,
"grad_norm": 19.700932387600457,
"learning_rate": 4.990398100856366e-07,
"logits/chosen": -3.1875,
"logits/rejected": -3.140625,
"logps/chosen": -284.0,
"logps/rejected": -354.0,
"loss": 0.5051,
"rewards/accuracies": 0.8187500238418579,
"rewards/chosen": -0.1455078125,
"rewards/margins": 0.443359375,
"rewards/rejected": -0.5859375,
"step": 60
},
{
"epoch": 0.14644351464435146,
"grad_norm": 25.122385996985045,
"learning_rate": 4.967775735898179e-07,
"logits/chosen": -2.984375,
"logits/rejected": -3.0,
"logps/chosen": -306.0,
"logps/rejected": -422.0,
"loss": 0.3824,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": -0.4140625,
"rewards/margins": 1.0234375,
"rewards/rejected": -1.4375,
"step": 70
},
{
"epoch": 0.16736401673640167,
"grad_norm": 29.28714176471108,
"learning_rate": 4.931986719649298e-07,
"logits/chosen": -3.09375,
"logits/rejected": -3.046875,
"logps/chosen": -406.0,
"logps/rejected": -544.0,
"loss": 0.2788,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": -0.765625,
"rewards/margins": 1.7421875,
"rewards/rejected": -2.5,
"step": 80
},
{
"epoch": 0.18828451882845187,
"grad_norm": 30.45351538248815,
"learning_rate": 4.883222001996351e-07,
"logits/chosen": -3.015625,
"logits/rejected": -2.953125,
"logps/chosen": -356.0,
"logps/rejected": -560.0,
"loss": 0.251,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -0.91015625,
"rewards/margins": 2.1875,
"rewards/rejected": -3.09375,
"step": 90
},
{
"epoch": 0.20920502092050208,
"grad_norm": 25.879079240835583,
"learning_rate": 4.821741763807186e-07,
"logits/chosen": -2.828125,
"logits/rejected": -2.875,
"logps/chosen": -384.0,
"logps/rejected": -620.0,
"loss": 0.2895,
"rewards/accuracies": 0.875,
"rewards/chosen": -1.1796875,
"rewards/margins": 2.40625,
"rewards/rejected": -3.59375,
"step": 100
},
{
"epoch": 0.2301255230125523,
"grad_norm": 59.34338493619359,
"learning_rate": 4.747874028753375e-07,
"logits/chosen": -2.890625,
"logits/rejected": -2.78125,
"logps/chosen": -436.0,
"logps/rejected": -656.0,
"loss": 0.2514,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": -1.3203125,
"rewards/margins": 2.34375,
"rewards/rejected": -3.65625,
"step": 110
},
{
"epoch": 0.2510460251046025,
"grad_norm": 34.20651774561803,
"learning_rate": 4.662012913161997e-07,
"logits/chosen": -2.78125,
"logits/rejected": -2.6875,
"logps/chosen": -392.0,
"logps/rejected": -628.0,
"loss": 0.2079,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -1.09375,
"rewards/margins": 2.46875,
"rewards/rejected": -3.5625,
"step": 120
},
{
"epoch": 0.2719665271966527,
"grad_norm": 34.47099090322982,
"learning_rate": 4.5646165232345103e-07,
"logits/chosen": -2.8125,
"logits/rejected": -2.703125,
"logps/chosen": -392.0,
"logps/rejected": -664.0,
"loss": 0.2106,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -1.1328125,
"rewards/margins": 2.78125,
"rewards/rejected": -3.90625,
"step": 130
},
{
"epoch": 0.2928870292887029,
"grad_norm": 25.816296928179703,
"learning_rate": 4.456204510851956e-07,
"logits/chosen": -2.734375,
"logits/rejected": -2.671875,
"logps/chosen": -450.0,
"logps/rejected": -736.0,
"loss": 0.2027,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -1.3828125,
"rewards/margins": 3.140625,
"rewards/rejected": -4.5,
"step": 140
},
{
"epoch": 0.3138075313807531,
"grad_norm": 25.9983799849635,
"learning_rate": 4.337355301007335e-07,
"logits/chosen": -2.71875,
"logits/rejected": -2.53125,
"logps/chosen": -432.0,
"logps/rejected": -808.0,
"loss": 0.2011,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -1.453125,
"rewards/margins": 3.796875,
"rewards/rejected": -5.25,
"step": 150
},
{
"epoch": 0.33472803347280333,
"grad_norm": 25.6614814368812,
"learning_rate": 4.2087030056579986e-07,
"logits/chosen": -2.734375,
"logits/rejected": -2.59375,
"logps/chosen": -396.0,
"logps/rejected": -744.0,
"loss": 0.1733,
"rewards/accuracies": 0.90625,
"rewards/chosen": -1.2265625,
"rewards/margins": 3.453125,
"rewards/rejected": -4.6875,
"step": 160
},
{
"epoch": 0.35564853556485354,
"grad_norm": 23.516456639083984,
"learning_rate": 4.070934040463998e-07,
"logits/chosen": -2.578125,
"logits/rejected": -2.484375,
"logps/chosen": -402.0,
"logps/rejected": -760.0,
"loss": 0.1909,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -1.5,
"rewards/margins": 3.5,
"rewards/rejected": -5.0,
"step": 170
},
{
"epoch": 0.37656903765690375,
"grad_norm": 40.42551024225754,
"learning_rate": 3.9247834624635404e-07,
"logits/chosen": -2.625,
"logits/rejected": -2.53125,
"logps/chosen": -402.0,
"logps/rejected": -812.0,
"loss": 0.1967,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -1.4453125,
"rewards/margins": 4.03125,
"rewards/rejected": -5.5,
"step": 180
},
{
"epoch": 0.39748953974895396,
"grad_norm": 19.01883870968105,
"learning_rate": 3.7710310482256523e-07,
"logits/chosen": -2.6875,
"logits/rejected": -2.5,
"logps/chosen": -406.0,
"logps/rejected": -748.0,
"loss": 0.1892,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": -1.2265625,
"rewards/margins": 3.453125,
"rewards/rejected": -4.6875,
"step": 190
},
{
"epoch": 0.41841004184100417,
"grad_norm": 58.64663183468128,
"learning_rate": 3.610497133404795e-07,
"logits/chosen": -2.515625,
"logits/rejected": -2.484375,
"logps/chosen": -424.0,
"logps/rejected": -836.0,
"loss": 0.1791,
"rewards/accuracies": 0.90625,
"rewards/chosen": -1.5859375,
"rewards/margins": 3.984375,
"rewards/rejected": -5.5625,
"step": 200
},
{
"epoch": 0.4393305439330544,
"grad_norm": 35.29196540474136,
"learning_rate": 3.4440382358952115e-07,
"logits/chosen": -2.6875,
"logits/rejected": -2.5,
"logps/chosen": -450.0,
"logps/rejected": -860.0,
"loss": 0.1883,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -1.5390625,
"rewards/margins": 4.09375,
"rewards/rejected": -5.65625,
"step": 210
},
{
"epoch": 0.4602510460251046,
"grad_norm": 42.58286407786604,
"learning_rate": 3.272542485937368e-07,
"logits/chosen": -2.5625,
"logits/rejected": -2.53125,
"logps/chosen": -400.0,
"logps/rejected": -776.0,
"loss": 0.1641,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -1.3671875,
"rewards/margins": 3.65625,
"rewards/rejected": -5.03125,
"step": 220
},
{
"epoch": 0.4811715481171548,
"grad_norm": 57.96355489388366,
"learning_rate": 3.096924887558854e-07,
"logits/chosen": -2.46875,
"logits/rejected": -2.40625,
"logps/chosen": -408.0,
"logps/rejected": -872.0,
"loss": 0.1766,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -1.625,
"rewards/margins": 4.59375,
"rewards/rejected": -6.21875,
"step": 230
},
{
"epoch": 0.502092050209205,
"grad_norm": 38.85695272700362,
"learning_rate": 2.9181224366319943e-07,
"logits/chosen": -2.578125,
"logits/rejected": -2.34375,
"logps/chosen": -428.0,
"logps/rejected": -880.0,
"loss": 0.1776,
"rewards/accuracies": 0.90625,
"rewards/chosen": -1.6015625,
"rewards/margins": 4.5625,
"rewards/rejected": -6.15625,
"step": 240
},
{
"epoch": 0.5230125523012552,
"grad_norm": 32.765054711715315,
"learning_rate": 2.7370891215954565e-07,
"logits/chosen": -2.59375,
"logits/rejected": -2.34375,
"logps/chosen": -436.0,
"logps/rejected": -852.0,
"loss": 0.1456,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": -1.328125,
"rewards/margins": 4.4375,
"rewards/rejected": -5.78125,
"step": 250
},
{
"epoch": 0.5439330543933054,
"grad_norm": 38.81475856875557,
"learning_rate": 2.55479083351317e-07,
"logits/chosen": -2.484375,
"logits/rejected": -2.21875,
"logps/chosen": -502.0,
"logps/rejected": -1032.0,
"loss": 0.1545,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": -2.078125,
"rewards/margins": 5.40625,
"rewards/rejected": -7.5,
"step": 260
},
{
"epoch": 0.5648535564853556,
"grad_norm": 49.7654758904445,
"learning_rate": 2.3722002126275822e-07,
"logits/chosen": -2.328125,
"logits/rejected": -2.09375,
"logps/chosen": -500.0,
"logps/rejected": -960.0,
"loss": 0.1565,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": -2.21875,
"rewards/margins": 4.5625,
"rewards/rejected": -6.8125,
"step": 270
},
{
"epoch": 0.5857740585774058,
"grad_norm": 31.170463152628752,
"learning_rate": 2.19029145890313e-07,
"logits/chosen": -2.25,
"logits/rejected": -1.9921875,
"logps/chosen": -462.0,
"logps/rejected": -968.0,
"loss": 0.1641,
"rewards/accuracies": 0.9375,
"rewards/chosen": -1.984375,
"rewards/margins": 4.96875,
"rewards/rejected": -6.9375,
"step": 280
},
{
"epoch": 0.606694560669456,
"grad_norm": 27.441348723076615,
"learning_rate": 2.0100351342479216e-07,
"logits/chosen": -2.28125,
"logits/rejected": -1.953125,
"logps/chosen": -512.0,
"logps/rejected": -956.0,
"loss": 0.1675,
"rewards/accuracies": 0.90625,
"rewards/chosen": -2.53125,
"rewards/margins": 4.59375,
"rewards/rejected": -7.125,
"step": 290
},
{
"epoch": 0.6276150627615062,
"grad_norm": 20.30528868038641,
"learning_rate": 1.8323929841460178e-07,
"logits/chosen": -2.328125,
"logits/rejected": -1.890625,
"logps/chosen": -540.0,
"logps/rejected": -1088.0,
"loss": 0.1308,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -2.40625,
"rewards/margins": 5.75,
"rewards/rejected": -8.125,
"step": 300
},
{
"epoch": 0.6485355648535565,
"grad_norm": 34.670577413518515,
"learning_rate": 1.6583128063291573e-07,
"logits/chosen": -2.09375,
"logits/rejected": -1.875,
"logps/chosen": -592.0,
"logps/rejected": -1152.0,
"loss": 0.1346,
"rewards/accuracies": 0.9375,
"rewards/chosen": -2.953125,
"rewards/margins": 5.90625,
"rewards/rejected": -8.875,
"step": 310
},
{
"epoch": 0.6694560669456067,
"grad_norm": 29.612754262891425,
"learning_rate": 1.488723393865766e-07,
"logits/chosen": -2.265625,
"logits/rejected": -1.8125,
"logps/chosen": -604.0,
"logps/rejected": -1248.0,
"loss": 0.1433,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -3.09375,
"rewards/margins": 6.53125,
"rewards/rejected": -9.625,
"step": 320
},
{
"epoch": 0.6903765690376569,
"grad_norm": 25.080256733689154,
"learning_rate": 1.3245295796480788e-07,
"logits/chosen": -2.28125,
"logits/rejected": -1.9140625,
"logps/chosen": -572.0,
"logps/rejected": -1112.0,
"loss": 0.1432,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -3.015625,
"rewards/margins": 5.46875,
"rewards/rejected": -8.5,
"step": 330
},
{
"epoch": 0.7112970711297071,
"grad_norm": 35.91711456332533,
"learning_rate": 1.1666074087171627e-07,
"logits/chosen": -2.34375,
"logits/rejected": -1.9765625,
"logps/chosen": -632.0,
"logps/rejected": -1256.0,
"loss": 0.1378,
"rewards/accuracies": 0.9375,
"rewards/chosen": -3.328125,
"rewards/margins": 6.21875,
"rewards/rejected": -9.5625,
"step": 340
},
{
"epoch": 0.7322175732217573,
"grad_norm": 28.74300288909294,
"learning_rate": 1.0157994641835734e-07,
"logits/chosen": -2.109375,
"logits/rejected": -1.796875,
"logps/chosen": -604.0,
"logps/rejected": -1184.0,
"loss": 0.1044,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -3.390625,
"rewards/margins": 5.75,
"rewards/rejected": -9.125,
"step": 350
},
{
"epoch": 0.7531380753138075,
"grad_norm": 26.068963609585946,
"learning_rate": 8.729103716819111e-08,
"logits/chosen": -2.34375,
"logits/rejected": -1.8828125,
"logps/chosen": -680.0,
"logps/rejected": -1272.0,
"loss": 0.1278,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": -3.75,
"rewards/margins": 6.09375,
"rewards/rejected": -9.8125,
"step": 360
},
{
"epoch": 0.7740585774058577,
"grad_norm": 88.91144605940063,
"learning_rate": 7.387025063449081e-08,
"logits/chosen": -2.171875,
"logits/rejected": -1.7890625,
"logps/chosen": -612.0,
"logps/rejected": -1192.0,
"loss": 0.1626,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -3.359375,
"rewards/margins": 6.1875,
"rewards/rejected": -9.5625,
"step": 370
},
{
"epoch": 0.7949790794979079,
"grad_norm": 16.463309523333024,
"learning_rate": 6.138919252022435e-08,
"logits/chosen": -2.09375,
"logits/rejected": -1.796875,
"logps/chosen": -552.0,
"logps/rejected": -1136.0,
"loss": 0.139,
"rewards/accuracies": 0.9375,
"rewards/chosen": -3.109375,
"rewards/margins": 5.5625,
"rewards/rejected": -8.6875,
"step": 380
},
{
"epoch": 0.8158995815899581,
"grad_norm": 37.449359063530714,
"learning_rate": 4.991445467064689e-08,
"logits/chosen": -2.3125,
"logits/rejected": -1.9375,
"logps/chosen": -644.0,
"logps/rejected": -1240.0,
"loss": 0.1281,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -3.34375,
"rewards/margins": 6.03125,
"rewards/rejected": -9.375,
"step": 390
},
{
"epoch": 0.8368200836820083,
"grad_norm": 24.80612572089887,
"learning_rate": 3.9507259776993954e-08,
"logits/chosen": -2.234375,
"logits/rejected": -1.9375,
"logps/chosen": -608.0,
"logps/rejected": -1240.0,
"loss": 0.1349,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -3.34375,
"rewards/margins": 6.21875,
"rewards/rejected": -9.5625,
"step": 400
},
{
"epoch": 0.8577405857740585,
"grad_norm": 24.11144809301846,
"learning_rate": 3.022313472693447e-08,
"logits/chosen": -2.34375,
"logits/rejected": -1.9921875,
"logps/chosen": -620.0,
"logps/rejected": -1208.0,
"loss": 0.1372,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -3.265625,
"rewards/margins": 6.21875,
"rewards/rejected": -9.4375,
"step": 410
},
{
"epoch": 0.8786610878661087,
"grad_norm": 20.712692773585854,
"learning_rate": 2.2111614344599684e-08,
"logits/chosen": -2.203125,
"logits/rejected": -2.0,
"logps/chosen": -628.0,
"logps/rejected": -1176.0,
"loss": 0.1391,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -3.203125,
"rewards/margins": 5.71875,
"rewards/rejected": -8.9375,
"step": 420
},
{
"epoch": 0.899581589958159,
"grad_norm": 14.713063691289868,
"learning_rate": 1.521597710086439e-08,
"logits/chosen": -2.15625,
"logits/rejected": -1.6796875,
"logps/chosen": -580.0,
"logps/rejected": -1192.0,
"loss": 0.1275,
"rewards/accuracies": 0.9375,
"rewards/chosen": -3.0,
"rewards/margins": 6.53125,
"rewards/rejected": -9.5,
"step": 430
},
{
"epoch": 0.9205020920502092,
"grad_norm": 42.78914335876745,
"learning_rate": 9.57301420397924e-09,
"logits/chosen": -2.25,
"logits/rejected": -1.9296875,
"logps/chosen": -604.0,
"logps/rejected": -1272.0,
"loss": 0.1322,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -3.140625,
"rewards/margins": 6.65625,
"rewards/rejected": -9.8125,
"step": 440
},
{
"epoch": 0.9414225941422594,
"grad_norm": 20.26822701097891,
"learning_rate": 5.212833302556258e-09,
"logits/chosen": -2.21875,
"logits/rejected": -1.890625,
"logps/chosen": -608.0,
"logps/rejected": -1264.0,
"loss": 0.1221,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -3.1875,
"rewards/margins": 6.5,
"rewards/rejected": -9.6875,
"step": 450
},
{
"epoch": 0.9623430962343096,
"grad_norm": 18.31779303714104,
"learning_rate": 2.158697848236607e-09,
"logits/chosen": -2.1875,
"logits/rejected": -1.8515625,
"logps/chosen": -604.0,
"logps/rejected": -1224.0,
"loss": 0.1222,
"rewards/accuracies": 0.9375,
"rewards/chosen": -3.28125,
"rewards/margins": 6.28125,
"rewards/rejected": -9.5625,
"step": 460
},
{
"epoch": 0.9832635983263598,
"grad_norm": 31.833980309732567,
"learning_rate": 4.269029751107489e-10,
"logits/chosen": -2.171875,
"logits/rejected": -1.84375,
"logps/chosen": -632.0,
"logps/rejected": -1168.0,
"loss": 0.1265,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": -3.5,
"rewards/margins": 5.46875,
"rewards/rejected": -9.0,
"step": 470
},
{
"epoch": 1.0,
"step": 478,
"total_flos": 0.0,
"train_loss": 0.0488231029969379,
"train_runtime": 2602.953,
"train_samples_per_second": 23.486,
"train_steps_per_second": 0.184
}
],
"logging_steps": 10,
"max_steps": 478,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}