nlee-208's picture
Model save
6ec61e3 verified
raw
history blame
57.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9995320542817033,
"eval_steps": 500,
"global_step": 1068,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009358914365933552,
"grad_norm": 438.15121357366934,
"learning_rate": 4.672897196261682e-08,
"logits/chosen": -2.27764630317688,
"logits/rejected": -2.233543634414673,
"logps/chosen": -181.85769653320312,
"logps/rejected": -160.63929748535156,
"loss": 0.6952,
"rewards/accuracies": 0.35624998807907104,
"rewards/chosen": -0.0006220912327989936,
"rewards/margins": -0.00201073894277215,
"rewards/rejected": 0.0013886478263884783,
"step": 10
},
{
"epoch": 0.018717828731867104,
"grad_norm": 392.19190635385917,
"learning_rate": 9.345794392523364e-08,
"logits/chosen": -2.1865861415863037,
"logits/rejected": -2.144514560699463,
"logps/chosen": -186.0104217529297,
"logps/rejected": -161.47190856933594,
"loss": 0.6858,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": 0.014014361426234245,
"rewards/margins": 0.011283891275525093,
"rewards/rejected": 0.002730469685047865,
"step": 20
},
{
"epoch": 0.028076743097800654,
"grad_norm": 350.1425757201821,
"learning_rate": 1.4018691588785045e-07,
"logits/chosen": -2.2283122539520264,
"logits/rejected": -2.222783088684082,
"logps/chosen": -204.5108184814453,
"logps/rejected": -161.1767578125,
"loss": 0.6368,
"rewards/accuracies": 0.643750011920929,
"rewards/chosen": 0.14504937827587128,
"rewards/margins": 0.13457781076431274,
"rewards/rejected": 0.010471588931977749,
"step": 30
},
{
"epoch": 0.03743565746373421,
"grad_norm": 345.4533132113738,
"learning_rate": 1.8691588785046729e-07,
"logits/chosen": -2.2137038707733154,
"logits/rejected": -2.2049481868743896,
"logps/chosen": -183.1804962158203,
"logps/rejected": -182.72348022460938,
"loss": 0.5836,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": 0.3550775945186615,
"rewards/margins": 0.3591943383216858,
"rewards/rejected": -0.0041167521849274635,
"step": 40
},
{
"epoch": 0.04679457182966776,
"grad_norm": 288.97151092196236,
"learning_rate": 2.336448598130841e-07,
"logits/chosen": -2.23293137550354,
"logits/rejected": -2.2262890338897705,
"logps/chosen": -187.1265869140625,
"logps/rejected": -173.95443725585938,
"loss": 0.5215,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": 0.6253132820129395,
"rewards/margins": 0.6594952940940857,
"rewards/rejected": -0.03418206050992012,
"step": 50
},
{
"epoch": 0.05615348619560131,
"grad_norm": 404.3974395498644,
"learning_rate": 2.803738317757009e-07,
"logits/chosen": -2.178109884262085,
"logits/rejected": -2.1433067321777344,
"logps/chosen": -194.1527099609375,
"logps/rejected": -167.05271911621094,
"loss": 0.495,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": 0.975957989692688,
"rewards/margins": 1.1514160633087158,
"rewards/rejected": -0.17545820772647858,
"step": 60
},
{
"epoch": 0.06551240056153486,
"grad_norm": 489.67076672915556,
"learning_rate": 3.271028037383177e-07,
"logits/chosen": -2.2335753440856934,
"logits/rejected": -2.2199859619140625,
"logps/chosen": -176.63388061523438,
"logps/rejected": -168.95254516601562,
"loss": 0.4915,
"rewards/accuracies": 0.8062499761581421,
"rewards/chosen": 1.0813405513763428,
"rewards/margins": 1.4744064807891846,
"rewards/rejected": -0.39306575059890747,
"step": 70
},
{
"epoch": 0.07487131492746842,
"grad_norm": 268.7439784813086,
"learning_rate": 3.7383177570093457e-07,
"logits/chosen": -2.230722427368164,
"logits/rejected": -2.2134506702423096,
"logps/chosen": -179.94815063476562,
"logps/rejected": -163.42172241210938,
"loss": 0.4306,
"rewards/accuracies": 0.8187500238418579,
"rewards/chosen": 0.9366798400878906,
"rewards/margins": 1.555740475654602,
"rewards/rejected": -0.6190606355667114,
"step": 80
},
{
"epoch": 0.08423022929340196,
"grad_norm": 407.10417831930414,
"learning_rate": 4.205607476635514e-07,
"logits/chosen": -2.239989757537842,
"logits/rejected": -2.2285830974578857,
"logps/chosen": -185.32884216308594,
"logps/rejected": -180.8179931640625,
"loss": 0.3964,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": 0.9721916317939758,
"rewards/margins": 1.7323249578475952,
"rewards/rejected": -0.7601334452629089,
"step": 90
},
{
"epoch": 0.09358914365933552,
"grad_norm": 285.7378646864507,
"learning_rate": 4.672897196261682e-07,
"logits/chosen": -2.2750823497772217,
"logits/rejected": -2.257164716720581,
"logps/chosen": -173.72262573242188,
"logps/rejected": -175.11956787109375,
"loss": 0.3628,
"rewards/accuracies": 0.84375,
"rewards/chosen": 0.8861462473869324,
"rewards/margins": 2.0354702472686768,
"rewards/rejected": -1.1493237018585205,
"step": 100
},
{
"epoch": 0.10294805802526907,
"grad_norm": 326.35954947901496,
"learning_rate": 4.999879772999679e-07,
"logits/chosen": -2.205427646636963,
"logits/rejected": -2.2005703449249268,
"logps/chosen": -173.0555419921875,
"logps/rejected": -176.62982177734375,
"loss": 0.4143,
"rewards/accuracies": 0.8187500238418579,
"rewards/chosen": 0.5173667073249817,
"rewards/margins": 1.9897832870483398,
"rewards/rejected": -1.472416639328003,
"step": 110
},
{
"epoch": 0.11230697239120262,
"grad_norm": 510.0144601970792,
"learning_rate": 4.997742725777528e-07,
"logits/chosen": -2.220973253250122,
"logits/rejected": -2.1889216899871826,
"logps/chosen": -182.09658813476562,
"logps/rejected": -182.0464324951172,
"loss": 0.3867,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.5381929874420166,
"rewards/margins": 1.9815146923065186,
"rewards/rejected": -1.4433214664459229,
"step": 120
},
{
"epoch": 0.12166588675713617,
"grad_norm": 296.75469977214414,
"learning_rate": 4.992936596071119e-07,
"logits/chosen": -2.277806043624878,
"logits/rejected": -2.266979455947876,
"logps/chosen": -184.37937927246094,
"logps/rejected": -196.90145874023438,
"loss": 0.3428,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 0.19669190049171448,
"rewards/margins": 2.241025447845459,
"rewards/rejected": -2.0443339347839355,
"step": 130
},
{
"epoch": 0.13102480112306972,
"grad_norm": 338.91893998964593,
"learning_rate": 4.985466519700276e-07,
"logits/chosen": -2.310959577560425,
"logits/rejected": -2.2651824951171875,
"logps/chosen": -206.59927368164062,
"logps/rejected": -198.52685546875,
"loss": 0.3218,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": -0.12562061846256256,
"rewards/margins": 2.7015974521636963,
"rewards/rejected": -2.8272180557250977,
"step": 140
},
{
"epoch": 0.14038371548900327,
"grad_norm": 212.25106910764887,
"learning_rate": 4.975340479172484e-07,
"logits/chosen": -2.2387795448303223,
"logits/rejected": -2.2182071208953857,
"logps/chosen": -173.45755004882812,
"logps/rejected": -192.9697723388672,
"loss": 0.261,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": -0.057323455810546875,
"rewards/margins": 2.911245584487915,
"rewards/rejected": -2.968569040298462,
"step": 150
},
{
"epoch": 0.14974262985493683,
"grad_norm": 449.1022472585512,
"learning_rate": 4.962569295152789e-07,
"logits/chosen": -2.269744634628296,
"logits/rejected": -2.2439522743225098,
"logps/chosen": -182.5315704345703,
"logps/rejected": -188.2720947265625,
"loss": 0.3652,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": -0.4403308928012848,
"rewards/margins": 2.6585566997528076,
"rewards/rejected": -3.0988876819610596,
"step": 160
},
{
"epoch": 0.1591015442208704,
"grad_norm": 221.2797843804336,
"learning_rate": 4.947166614900862e-07,
"logits/chosen": -2.241987466812134,
"logits/rejected": -2.240777015686035,
"logps/chosen": -176.78402709960938,
"logps/rejected": -189.9019317626953,
"loss": 0.3148,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -0.5035785436630249,
"rewards/margins": 3.3230865001678467,
"rewards/rejected": -3.8266654014587402,
"step": 170
},
{
"epoch": 0.16846045858680392,
"grad_norm": 248.66687889155475,
"learning_rate": 4.929148897687566e-07,
"logits/chosen": -2.2612690925598145,
"logits/rejected": -2.2250888347625732,
"logps/chosen": -160.37701416015625,
"logps/rejected": -175.27114868164062,
"loss": 0.2945,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": -0.7433215379714966,
"rewards/margins": 3.024913787841797,
"rewards/rejected": -3.768234968185425,
"step": 180
},
{
"epoch": 0.17781937295273748,
"grad_norm": 201.5607938140453,
"learning_rate": 4.908535397206616e-07,
"logits/chosen": -2.211106061935425,
"logits/rejected": -2.213930130004883,
"logps/chosen": -172.2119903564453,
"logps/rejected": -193.7111358642578,
"loss": 0.3633,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": -0.5147860050201416,
"rewards/margins": 3.2517306804656982,
"rewards/rejected": -3.766516923904419,
"step": 190
},
{
"epoch": 0.18717828731867103,
"grad_norm": 242.24625564840164,
"learning_rate": 4.885348141000122e-07,
"logits/chosen": -2.246872901916504,
"logits/rejected": -2.2560267448425293,
"logps/chosen": -203.78329467773438,
"logps/rejected": -216.1337890625,
"loss": 0.2602,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": -0.6025756001472473,
"rewards/margins": 3.9953460693359375,
"rewards/rejected": -4.597921371459961,
"step": 200
},
{
"epoch": 0.1965372016846046,
"grad_norm": 373.54438813525843,
"learning_rate": 4.859611906920007e-07,
"logits/chosen": -2.2831497192382812,
"logits/rejected": -2.243542194366455,
"logps/chosen": -197.22555541992188,
"logps/rejected": -209.253662109375,
"loss": 0.2325,
"rewards/accuracies": 0.90625,
"rewards/chosen": -0.8766697645187378,
"rewards/margins": 3.9367973804473877,
"rewards/rejected": -4.813466548919678,
"step": 210
},
{
"epoch": 0.20589611605053815,
"grad_norm": 335.83234499510337,
"learning_rate": 4.831354196650446e-07,
"logits/chosen": -2.219947338104248,
"logits/rejected": -2.2009224891662598,
"logps/chosen": -195.6641387939453,
"logps/rejected": -214.68386840820312,
"loss": 0.2758,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": -1.506487250328064,
"rewards/margins": 3.3951823711395264,
"rewards/rejected": -4.901669502258301,
"step": 220
},
{
"epoch": 0.21525503041647168,
"grad_norm": 217.04407322808672,
"learning_rate": 4.800605206319624e-07,
"logits/chosen": -2.290611743927002,
"logits/rejected": -2.2906360626220703,
"logps/chosen": -187.4936981201172,
"logps/rejected": -206.67166137695312,
"loss": 0.2448,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -0.8258201479911804,
"rewards/margins": 3.9309005737304688,
"rewards/rejected": -4.756721019744873,
"step": 230
},
{
"epoch": 0.22461394478240523,
"grad_norm": 268.14247446117065,
"learning_rate": 4.767397794232225e-07,
"logits/chosen": -2.2846646308898926,
"logits/rejected": -2.2862441539764404,
"logps/chosen": -204.64450073242188,
"logps/rejected": -207.92294311523438,
"loss": 0.227,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": -0.8587066531181335,
"rewards/margins": 3.832045316696167,
"rewards/rejected": -4.690752029418945,
"step": 240
},
{
"epoch": 0.2339728591483388,
"grad_norm": 138.21085068846622,
"learning_rate": 4.731767445757111e-07,
"logits/chosen": -2.2376067638397217,
"logits/rejected": -2.2099392414093018,
"logps/chosen": -191.3949737548828,
"logps/rejected": -211.0519561767578,
"loss": 0.2885,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": -1.0055475234985352,
"rewards/margins": 4.678719997406006,
"rewards/rejected": -5.684267044067383,
"step": 250
},
{
"epoch": 0.24333177351427235,
"grad_norm": 220.922491224004,
"learning_rate": 4.6937522354077397e-07,
"logits/chosen": -2.2818140983581543,
"logits/rejected": -2.275104522705078,
"logps/chosen": -196.9884796142578,
"logps/rejected": -220.4885711669922,
"loss": 0.2789,
"rewards/accuracies": 0.875,
"rewards/chosen": -1.6425187587738037,
"rewards/margins": 4.461324214935303,
"rewards/rejected": -6.1038432121276855,
"step": 260
},
{
"epoch": 0.2526906878802059,
"grad_norm": 301.14297310445636,
"learning_rate": 4.6533927861558166e-07,
"logits/chosen": -2.2284903526306152,
"logits/rejected": -2.2001399993896484,
"logps/chosen": -200.37940979003906,
"logps/rejected": -221.6734619140625,
"loss": 0.2105,
"rewards/accuracies": 0.90625,
"rewards/chosen": -1.2222009897232056,
"rewards/margins": 4.569552421569824,
"rewards/rejected": -5.791752815246582,
"step": 270
},
{
"epoch": 0.26204960224613943,
"grad_norm": 173.76590512224945,
"learning_rate": 4.6107322260216787e-07,
"logits/chosen": -2.269810199737549,
"logits/rejected": -2.2810139656066895,
"logps/chosen": -185.00311279296875,
"logps/rejected": -212.89730834960938,
"loss": 0.3261,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": -1.5367414951324463,
"rewards/margins": 4.153133869171143,
"rewards/rejected": -5.68987512588501,
"step": 280
},
{
"epoch": 0.271408516612073,
"grad_norm": 306.28569991504236,
"learning_rate": 4.565816141987782e-07,
"logits/chosen": -2.2453150749206543,
"logits/rejected": -2.249136209487915,
"logps/chosen": -205.7445068359375,
"logps/rejected": -240.56655883789062,
"loss": 0.2769,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -1.5990461111068726,
"rewards/margins": 4.568573951721191,
"rewards/rejected": -6.167620658874512,
"step": 290
},
{
"epoch": 0.28076743097800655,
"grad_norm": 338.2369819535066,
"learning_rate": 4.518692531284555e-07,
"logits/chosen": -2.245236873626709,
"logits/rejected": -2.235701560974121,
"logps/chosen": -195.2344970703125,
"logps/rejected": -242.3723907470703,
"loss": 0.3172,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": -1.8885822296142578,
"rewards/margins": 4.1689043045043945,
"rewards/rejected": -6.057486534118652,
"step": 300
},
{
"epoch": 0.2901263453439401,
"grad_norm": 259.98560817847823,
"learning_rate": 4.469411750100657e-07,
"logits/chosen": -2.271074056625366,
"logits/rejected": -2.259099245071411,
"logps/chosen": -177.983154296875,
"logps/rejected": -225.9912872314453,
"loss": 0.1695,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -1.4376739263534546,
"rewards/margins": 4.754927635192871,
"rewards/rejected": -6.192601680755615,
"step": 310
},
{
"epoch": 0.29948525970987366,
"grad_norm": 140.72410819610812,
"learning_rate": 4.418026459772465e-07,
"logits/chosen": -2.323652744293213,
"logits/rejected": -2.3101260662078857,
"logps/chosen": -190.6024169921875,
"logps/rejected": -238.22805786132812,
"loss": 0.211,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -1.3049207925796509,
"rewards/margins": 5.320730686187744,
"rewards/rejected": -6.6256513595581055,
"step": 320
},
{
"epoch": 0.3088441740758072,
"grad_norm": 170.38089922479452,
"learning_rate": 4.3645915705102875e-07,
"logits/chosen": -2.300119161605835,
"logits/rejected": -2.2830920219421387,
"logps/chosen": -193.13084411621094,
"logps/rejected": -223.0570831298828,
"loss": 0.244,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": -2.01973295211792,
"rewards/margins": 4.6583662033081055,
"rewards/rejected": -6.678099155426025,
"step": 330
},
{
"epoch": 0.3182030884417408,
"grad_norm": 288.1744910027074,
"learning_rate": 4.3091641827214255e-07,
"logits/chosen": -2.339986562728882,
"logits/rejected": -2.3027901649475098,
"logps/chosen": -215.3566436767578,
"logps/rejected": -223.7361297607422,
"loss": 0.2796,
"rewards/accuracies": 0.90625,
"rewards/chosen": -2.0394206047058105,
"rewards/margins": 4.69008731842041,
"rewards/rejected": -6.729508399963379,
"step": 340
},
{
"epoch": 0.32756200280767434,
"grad_norm": 266.350804676357,
"learning_rate": 4.251803525992806e-07,
"logits/chosen": -2.4055254459381104,
"logits/rejected": -2.377108573913574,
"logps/chosen": -186.85452270507812,
"logps/rejected": -215.7018585205078,
"loss": 0.2159,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -2.4017603397369385,
"rewards/margins": 4.537654876708984,
"rewards/rejected": -6.939416408538818,
"step": 350
},
{
"epoch": 0.33692091717360784,
"grad_norm": 430.59991122058335,
"learning_rate": 4.192570895798369e-07,
"logits/chosen": -2.3556883335113525,
"logits/rejected": -2.3484387397766113,
"logps/chosen": -202.12490844726562,
"logps/rejected": -228.5521697998047,
"loss": 0.2098,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": -2.2037243843078613,
"rewards/margins": 4.6899189949035645,
"rewards/rejected": -6.893643379211426,
"step": 360
},
{
"epoch": 0.3462798315395414,
"grad_norm": 208.29725176344635,
"learning_rate": 4.1315295879988603e-07,
"logits/chosen": -2.3661398887634277,
"logits/rejected": -2.3594870567321777,
"logps/chosen": -196.1697235107422,
"logps/rejected": -233.34750366210938,
"loss": 0.2062,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -2.0555663108825684,
"rewards/margins": 5.407596588134766,
"rewards/rejected": -7.463162899017334,
"step": 370
},
{
"epoch": 0.35563874590547495,
"grad_norm": 307.1229438949229,
"learning_rate": 4.06874483120401e-07,
"logits/chosen": -2.366969108581543,
"logits/rejected": -2.3364720344543457,
"logps/chosen": -191.20263671875,
"logps/rejected": -227.005615234375,
"loss": 0.1488,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": -1.4671393632888794,
"rewards/margins": 5.521860599517822,
"rewards/rejected": -6.988999843597412,
"step": 380
},
{
"epoch": 0.3649976602714085,
"grad_norm": 343.77677311698415,
"learning_rate": 4.00428371706938e-07,
"logits/chosen": -2.4439613819122314,
"logits/rejected": -2.429218292236328,
"logps/chosen": -184.9285430908203,
"logps/rejected": -223.7628936767578,
"loss": 0.2388,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -1.4543546438217163,
"rewards/margins": 4.948115348815918,
"rewards/rejected": -6.402470588684082,
"step": 390
},
{
"epoch": 0.37435657463734207,
"grad_norm": 396.61140085928014,
"learning_rate": 3.9382151286023644e-07,
"logits/chosen": -2.3459649085998535,
"logits/rejected": -2.3251359462738037,
"logps/chosen": -215.3402862548828,
"logps/rejected": -227.07998657226562,
"loss": 0.2852,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": -1.916053056716919,
"rewards/margins": 4.319214820861816,
"rewards/rejected": -6.235267639160156,
"step": 400
},
{
"epoch": 0.3837154890032756,
"grad_norm": 438.08804622250545,
"learning_rate": 3.8706096665539617e-07,
"logits/chosen": -2.356903076171875,
"logits/rejected": -2.3512845039367676,
"logps/chosen": -200.12722778320312,
"logps/rejected": -226.90353393554688,
"loss": 0.3127,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -1.6871376037597656,
"rewards/margins": 4.592865943908691,
"rewards/rejected": -6.280003547668457,
"step": 410
},
{
"epoch": 0.3930744033692092,
"grad_norm": 120.04239302174234,
"learning_rate": 3.801539573974959e-07,
"logits/chosen": -2.3720829486846924,
"logits/rejected": -2.3591010570526123,
"logps/chosen": -189.6260528564453,
"logps/rejected": -217.00198364257812,
"loss": 0.1983,
"rewards/accuracies": 0.90625,
"rewards/chosen": -1.1001567840576172,
"rewards/margins": 4.759296894073486,
"rewards/rejected": -5.8594536781311035,
"step": 420
},
{
"epoch": 0.40243331773514274,
"grad_norm": 220.6642040395276,
"learning_rate": 3.7310786590171683e-07,
"logits/chosen": -2.3531527519226074,
"logits/rejected": -2.316413402557373,
"logps/chosen": -204.20730590820312,
"logps/rejected": -217.7911834716797,
"loss": 0.251,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -1.7743408679962158,
"rewards/margins": 4.7499566078186035,
"rewards/rejected": -6.524298191070557,
"step": 430
},
{
"epoch": 0.4117922321010763,
"grad_norm": 231.72106552607505,
"learning_rate": 3.659302216062191e-07,
"logits/chosen": -2.304792642593384,
"logits/rejected": -2.3092713356018066,
"logps/chosen": -193.0000457763672,
"logps/rejected": -231.2361602783203,
"loss": 0.1942,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -2.236583709716797,
"rewards/margins": 4.879889965057373,
"rewards/rejected": -7.116473197937012,
"step": 440
},
{
"epoch": 0.42115114646700985,
"grad_norm": 200.0155148050309,
"learning_rate": 3.586286945262007e-07,
"logits/chosen": -2.3446478843688965,
"logits/rejected": -2.346301555633545,
"logps/chosen": -202.21690368652344,
"logps/rejected": -237.6812744140625,
"loss": 0.1977,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -1.7922999858856201,
"rewards/margins": 5.313002109527588,
"rewards/rejected": -7.105301856994629,
"step": 450
},
{
"epoch": 0.43051006083294335,
"grad_norm": 243.59457753599952,
"learning_rate": 3.512110870577357e-07,
"logits/chosen": -2.329233169555664,
"logits/rejected": -2.316497564315796,
"logps/chosen": -200.449951171875,
"logps/rejected": -232.7522735595703,
"loss": 0.3232,
"rewards/accuracies": 0.90625,
"rewards/chosen": -2.161752939224243,
"rewards/margins": 4.482771873474121,
"rewards/rejected": -6.644524574279785,
"step": 460
},
{
"epoch": 0.4398689751988769,
"grad_norm": 236.19588125075293,
"learning_rate": 3.436853256401506e-07,
"logits/chosen": -2.426886796951294,
"logits/rejected": -2.4074816703796387,
"logps/chosen": -200.6336212158203,
"logps/rejected": -222.5684051513672,
"loss": 0.2956,
"rewards/accuracies": 0.875,
"rewards/chosen": -2.3288497924804688,
"rewards/margins": 4.197469234466553,
"rewards/rejected": -6.5263190269470215,
"step": 470
},
{
"epoch": 0.44922788956481047,
"grad_norm": 261.36723919084227,
"learning_rate": 3.3605945228584866e-07,
"logits/chosen": -2.3828203678131104,
"logits/rejected": -2.3804872035980225,
"logps/chosen": -183.07763671875,
"logps/rejected": -214.15109252929688,
"loss": 0.2362,
"rewards/accuracies": 0.90625,
"rewards/chosen": -2.2689507007598877,
"rewards/margins": 4.707828998565674,
"rewards/rejected": -6.976778984069824,
"step": 480
},
{
"epoch": 0.458586803930744,
"grad_norm": 357.0231496583015,
"learning_rate": 3.283416159866321e-07,
"logits/chosen": -2.3662495613098145,
"logits/rejected": -2.356351613998413,
"logps/chosen": -213.04910278320312,
"logps/rejected": -235.6800994873047,
"loss": 0.2538,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -1.9594087600708008,
"rewards/margins": 5.065524101257324,
"rewards/rejected": -7.024932861328125,
"step": 490
},
{
"epoch": 0.4679457182966776,
"grad_norm": 178.83525635594185,
"learning_rate": 3.205400640057081e-07,
"logits/chosen": -2.34678053855896,
"logits/rejected": -2.3390913009643555,
"logps/chosen": -200.38333129882812,
"logps/rejected": -236.71630859375,
"loss": 0.2016,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -2.038482189178467,
"rewards/margins": 4.947923183441162,
"rewards/rejected": -6.986405849456787,
"step": 500
},
{
"epoch": 0.47730463266261114,
"grad_norm": 154.56461882085978,
"learning_rate": 3.126631330646801e-07,
"logits/chosen": -2.380345106124878,
"logits/rejected": -2.366577625274658,
"logps/chosen": -220.55148315429688,
"logps/rejected": -243.6335906982422,
"loss": 0.228,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -1.79801344871521,
"rewards/margins": 4.938762187957764,
"rewards/rejected": -6.736774444580078,
"step": 510
},
{
"epoch": 0.4866635470285447,
"grad_norm": 272.7303604765264,
"learning_rate": 3.0471924043494595e-07,
"logits/chosen": -2.34561824798584,
"logits/rejected": -2.343247890472412,
"logps/chosen": -188.9125213623047,
"logps/rejected": -234.4366912841797,
"loss": 0.2892,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -2.0266098976135254,
"rewards/margins": 4.852522850036621,
"rewards/rejected": -6.8791327476501465,
"step": 520
},
{
"epoch": 0.49602246139447825,
"grad_norm": 188.04497446499659,
"learning_rate": 2.967168749430191e-07,
"logits/chosen": -2.376474618911743,
"logits/rejected": -2.3322389125823975,
"logps/chosen": -197.1897430419922,
"logps/rejected": -208.17019653320312,
"loss": 0.2509,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -1.9680665731430054,
"rewards/margins": 4.385739326477051,
"rewards/rejected": -6.353806495666504,
"step": 530
},
{
"epoch": 0.5053813757604118,
"grad_norm": 141.3355114298549,
"learning_rate": 2.8866458789938774e-07,
"logits/chosen": -2.341230869293213,
"logits/rejected": -2.3447136878967285,
"logps/chosen": -192.13973999023438,
"logps/rejected": -221.07467651367188,
"loss": 0.1769,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -1.8019930124282837,
"rewards/margins": 5.1995649337768555,
"rewards/rejected": -7.00155782699585,
"step": 540
},
{
"epoch": 0.5147402901263454,
"grad_norm": 353.0896169961274,
"learning_rate": 2.8057098396060196e-07,
"logits/chosen": -2.3936164379119873,
"logits/rejected": -2.3707709312438965,
"logps/chosen": -213.8645477294922,
"logps/rejected": -229.2753143310547,
"loss": 0.2376,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -2.169435739517212,
"rewards/margins": 4.956913948059082,
"rewards/rejected": -7.126348972320557,
"step": 550
},
{
"epoch": 0.5240992044922789,
"grad_norm": 175.32011122486495,
"learning_rate": 2.724447119343572e-07,
"logits/chosen": -2.3987956047058105,
"logits/rejected": -2.3760597705841064,
"logps/chosen": -211.10073852539062,
"logps/rejected": -236.55307006835938,
"loss": 0.2425,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": -2.3528664112091064,
"rewards/margins": 5.059988975524902,
"rewards/rejected": -7.412856101989746,
"step": 560
},
{
"epoch": 0.5334581188582125,
"grad_norm": 196.64668218353924,
"learning_rate": 2.642944555373965e-07,
"logits/chosen": -2.3882896900177,
"logits/rejected": -2.3592185974121094,
"logps/chosen": -217.615966796875,
"logps/rejected": -237.7814483642578,
"loss": 0.2914,
"rewards/accuracies": 0.90625,
"rewards/chosen": -2.401543378829956,
"rewards/margins": 4.712543964385986,
"rewards/rejected": -7.1140875816345215,
"step": 570
},
{
"epoch": 0.542817033224146,
"grad_norm": 275.5508915566092,
"learning_rate": 2.561289241161095e-07,
"logits/chosen": -2.42510986328125,
"logits/rejected": -2.3965210914611816,
"logps/chosen": -184.4060516357422,
"logps/rejected": -224.3748016357422,
"loss": 0.1981,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -2.069643020629883,
"rewards/margins": 4.64445686340332,
"rewards/rejected": -6.714099884033203,
"step": 580
},
{
"epoch": 0.5521759475900796,
"grad_norm": 122.43757714489868,
"learning_rate": 2.479568433397441e-07,
"logits/chosen": -2.441179037094116,
"logits/rejected": -2.418757200241089,
"logps/chosen": -178.72349548339844,
"logps/rejected": -230.2123565673828,
"loss": 0.2123,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -1.8525559902191162,
"rewards/margins": 4.985072135925293,
"rewards/rejected": -6.837628364562988,
"step": 590
},
{
"epoch": 0.5615348619560131,
"grad_norm": 236.4733706875993,
"learning_rate": 2.3978694587617473e-07,
"logits/chosen": -2.5012760162353516,
"logits/rejected": -2.5047078132629395,
"logps/chosen": -190.4002685546875,
"logps/rejected": -224.8535614013672,
"loss": 0.2095,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -1.8381601572036743,
"rewards/margins": 4.416454315185547,
"rewards/rejected": -6.25461483001709,
"step": 600
},
{
"epoch": 0.5708937763219466,
"grad_norm": 305.5301632445064,
"learning_rate": 2.3162796206019266e-07,
"logits/chosen": -2.513767957687378,
"logits/rejected": -2.4798645973205566,
"logps/chosen": -197.41751098632812,
"logps/rejected": -212.202392578125,
"loss": 0.2902,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -1.4360084533691406,
"rewards/margins": 4.890429496765137,
"rewards/rejected": -6.326437950134277,
"step": 610
},
{
"epoch": 0.5802526906878802,
"grad_norm": 204.01758629819477,
"learning_rate": 2.2348861056428868e-07,
"logits/chosen": -2.417910575866699,
"logits/rejected": -2.396533727645874,
"logps/chosen": -198.9875030517578,
"logps/rejected": -234.6140899658203,
"loss": 0.1734,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -1.8775478601455688,
"rewards/margins": 5.081615924835205,
"rewards/rejected": -6.959162712097168,
"step": 620
},
{
"epoch": 0.5896116050538137,
"grad_norm": 249.61384281322438,
"learning_rate": 2.153775890818989e-07,
"logits/chosen": -2.372988700866699,
"logits/rejected": -2.374239444732666,
"logps/chosen": -199.1547088623047,
"logps/rejected": -237.0546112060547,
"loss": 0.1912,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -2.228351593017578,
"rewards/margins": 5.300154685974121,
"rewards/rejected": -7.528506278991699,
"step": 630
},
{
"epoch": 0.5989705194197473,
"grad_norm": 233.18324352334685,
"learning_rate": 2.0730356503306806e-07,
"logits/chosen": -2.3942298889160156,
"logits/rejected": -2.3816661834716797,
"logps/chosen": -206.57546997070312,
"logps/rejected": -220.2229766845703,
"loss": 0.1882,
"rewards/accuracies": 0.9375,
"rewards/chosen": -1.8596980571746826,
"rewards/margins": 4.845181465148926,
"rewards/rejected": -6.7048797607421875,
"step": 640
},
{
"epoch": 0.6083294337856808,
"grad_norm": 284.0861886390374,
"learning_rate": 1.9927516630246335e-07,
"logits/chosen": -2.374399185180664,
"logits/rejected": -2.3482818603515625,
"logps/chosen": -202.29544067382812,
"logps/rejected": -216.13223266601562,
"loss": 0.1947,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -2.2385735511779785,
"rewards/margins": 5.253697395324707,
"rewards/rejected": -7.492271423339844,
"step": 650
},
{
"epoch": 0.6176883481516144,
"grad_norm": 122.40127647049943,
"learning_rate": 1.9130097201963545e-07,
"logits/chosen": -2.3976006507873535,
"logits/rejected": -2.373926877975464,
"logps/chosen": -202.98617553710938,
"logps/rejected": -230.7165069580078,
"loss": 0.1549,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -2.2640860080718994,
"rewards/margins": 5.643507957458496,
"rewards/rejected": -7.907593727111816,
"step": 660
},
{
"epoch": 0.6270472625175479,
"grad_norm": 253.97537353410252,
"learning_rate": 1.833895033913789e-07,
"logits/chosen": -2.401292324066162,
"logits/rejected": -2.388359785079956,
"logps/chosen": -204.93606567382812,
"logps/rejected": -233.9709014892578,
"loss": 0.2199,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": -2.0720014572143555,
"rewards/margins": 5.02842903137207,
"rewards/rejected": -7.100430488586426,
"step": 670
},
{
"epoch": 0.6364061768834816,
"grad_norm": 160.3951062625682,
"learning_rate": 1.755492145959896e-07,
"logits/chosen": -2.4025282859802246,
"logits/rejected": -2.397082567214966,
"logps/chosen": -184.88265991210938,
"logps/rejected": -211.2412872314453,
"loss": 0.215,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -1.7722076177597046,
"rewards/margins": 4.766100883483887,
"rewards/rejected": -6.538309574127197,
"step": 680
},
{
"epoch": 0.6457650912494151,
"grad_norm": 183.24580011494754,
"learning_rate": 1.6778848374914728e-07,
"logits/chosen": -2.356121778488159,
"logits/rejected": -2.343468427658081,
"logps/chosen": -202.14730834960938,
"logps/rejected": -237.6175994873047,
"loss": 0.1561,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": -2.2485437393188477,
"rewards/margins": 4.805045127868652,
"rewards/rejected": -7.0535888671875,
"step": 690
},
{
"epoch": 0.6551240056153487,
"grad_norm": 331.5265540619454,
"learning_rate": 1.6011560395107998e-07,
"logits/chosen": -2.309534788131714,
"logits/rejected": -2.313741445541382,
"logps/chosen": -219.30227661132812,
"logps/rejected": -265.6573486328125,
"loss": 0.1999,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": -2.002448797225952,
"rewards/margins": 5.976955890655518,
"rewards/rejected": -7.979405403137207,
"step": 700
},
{
"epoch": 0.6644829199812822,
"grad_norm": 230.35235648135276,
"learning_rate": 1.5253877442457446e-07,
"logits/chosen": -2.380941390991211,
"logits/rejected": -2.3474934101104736,
"logps/chosen": -195.14735412597656,
"logps/rejected": -228.6056671142578,
"loss": 0.1553,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -2.0050787925720215,
"rewards/margins": 5.060803413391113,
"rewards/rejected": -7.065882682800293,
"step": 710
},
{
"epoch": 0.6738418343472157,
"grad_norm": 264.0638798597405,
"learning_rate": 1.450660917533048e-07,
"logits/chosen": -2.383948802947998,
"logits/rejected": -2.370701313018799,
"logps/chosen": -200.4253692626953,
"logps/rejected": -228.43276977539062,
"loss": 0.2231,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -1.5557230710983276,
"rewards/margins": 5.590066432952881,
"rewards/rejected": -7.145790100097656,
"step": 720
},
{
"epoch": 0.6832007487131493,
"grad_norm": 301.1690624380923,
"learning_rate": 1.377055412298402e-07,
"logits/chosen": -2.315328359603882,
"logits/rejected": -2.3330445289611816,
"logps/chosen": -184.9591827392578,
"logps/rejected": -221.6781463623047,
"loss": 0.2814,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -1.6768786907196045,
"rewards/margins": 4.688675880432129,
"rewards/rejected": -6.3655548095703125,
"step": 730
},
{
"epoch": 0.6925596630790828,
"grad_norm": 138.29726172165815,
"learning_rate": 1.3046498832257924e-07,
"logits/chosen": -2.261472463607788,
"logits/rejected": -2.2410614490509033,
"logps/chosen": -180.821044921875,
"logps/rejected": -227.9903564453125,
"loss": 0.2016,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -1.7726589441299438,
"rewards/margins": 5.4179792404174805,
"rewards/rejected": -7.190638542175293,
"step": 740
},
{
"epoch": 0.7019185774450164,
"grad_norm": 159.55209169708692,
"learning_rate": 1.233521702707264e-07,
"logits/chosen": -2.3717551231384277,
"logits/rejected": -2.3354125022888184,
"logps/chosen": -218.03079223632812,
"logps/rejected": -241.95547485351562,
"loss": 0.1846,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": -1.688469648361206,
"rewards/margins": 5.713479518890381,
"rewards/rejected": -7.401949405670166,
"step": 750
},
{
"epoch": 0.7112774918109499,
"grad_norm": 148.40838445395983,
"learning_rate": 1.1637468781629567e-07,
"logits/chosen": -2.3702948093414307,
"logits/rejected": -2.3584303855895996,
"logps/chosen": -200.7034912109375,
"logps/rejected": -240.5320281982422,
"loss": 0.2118,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -2.420879602432251,
"rewards/margins": 4.9278974533081055,
"rewards/rejected": -7.348776817321777,
"step": 760
},
{
"epoch": 0.7206364061768835,
"grad_norm": 117.9389721926925,
"learning_rate": 1.0953999708197404e-07,
"logits/chosen": -2.3842949867248535,
"logits/rejected": -2.3798604011535645,
"logps/chosen": -177.48667907714844,
"logps/rejected": -225.3169403076172,
"loss": 0.1628,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -2.098729372024536,
"rewards/margins": 5.115200996398926,
"rewards/rejected": -7.213929653167725,
"step": 770
},
{
"epoch": 0.729995320542817,
"grad_norm": 97.92207841158323,
"learning_rate": 1.0285540160352404e-07,
"logits/chosen": -2.386287212371826,
"logits/rejected": -2.376877546310425,
"logps/chosen": -207.9984893798828,
"logps/rejected": -236.9729766845703,
"loss": 0.1808,
"rewards/accuracies": 0.9375,
"rewards/chosen": -1.8446118831634521,
"rewards/margins": 4.870266914367676,
"rewards/rejected": -6.714879035949707,
"step": 780
},
{
"epoch": 0.7393542349087506,
"grad_norm": 192.7463006493741,
"learning_rate": 9.632804452524256e-08,
"logits/chosen": -2.3924126625061035,
"logits/rejected": -2.3695528507232666,
"logps/chosen": -194.1177978515625,
"logps/rejected": -225.58261108398438,
"loss": 0.1798,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": -1.8828728199005127,
"rewards/margins": 4.728604316711426,
"rewards/rejected": -6.611476898193359,
"step": 790
},
{
"epoch": 0.7487131492746841,
"grad_norm": 252.49293961822633,
"learning_rate": 8.996490096681109e-08,
"logits/chosen": -2.332010269165039,
"logits/rejected": -2.331939935684204,
"logps/chosen": -192.48745727539062,
"logps/rejected": -239.68603515625,
"loss": 0.2127,
"rewards/accuracies": 0.9375,
"rewards/chosen": -1.7579126358032227,
"rewards/margins": 5.512737274169922,
"rewards/rejected": -7.2706499099731445,
"step": 800
},
{
"epoch": 0.7580720636406177,
"grad_norm": 286.82155890951486,
"learning_rate": 8.377277056969842e-08,
"logits/chosen": -2.3116650581359863,
"logits/rejected": -2.294281482696533,
"logps/chosen": -200.29153442382812,
"logps/rejected": -229.8260040283203,
"loss": 0.2065,
"rewards/accuracies": 0.96875,
"rewards/chosen": -2.109861135482788,
"rewards/margins": 5.406586647033691,
"rewards/rejected": -7.5164475440979,
"step": 810
},
{
"epoch": 0.7674309780065512,
"grad_norm": 184.48830075035403,
"learning_rate": 7.775827023107834e-08,
"logits/chosen": -2.3696084022521973,
"logits/rejected": -2.3516430854797363,
"logps/chosen": -199.99896240234375,
"logps/rejected": -222.9709014892578,
"loss": 0.1882,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -1.6485363245010376,
"rewards/margins": 5.333346843719482,
"rewards/rejected": -6.981882572174072,
"step": 820
},
{
"epoch": 0.7767898923724847,
"grad_norm": 132.0229952982291,
"learning_rate": 7.192782703302785e-08,
"logits/chosen": -2.4008779525756836,
"logits/rejected": -2.4027934074401855,
"logps/chosen": -204.44485473632812,
"logps/rejected": -239.76693725585938,
"loss": 0.1912,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -2.0589280128479004,
"rewards/margins": 4.891021251678467,
"rewards/rejected": -6.949949741363525,
"step": 830
},
{
"epoch": 0.7861488067384184,
"grad_norm": 235.57073722900367,
"learning_rate": 6.628767137456067e-08,
"logits/chosen": -2.3839497566223145,
"logits/rejected": -2.3741023540496826,
"logps/chosen": -214.9656219482422,
"logps/rejected": -243.2672882080078,
"loss": 0.2597,
"rewards/accuracies": 0.90625,
"rewards/chosen": -1.827707290649414,
"rewards/margins": 5.181296348571777,
"rewards/rejected": -7.009003639221191,
"step": 840
},
{
"epoch": 0.7955077211043519,
"grad_norm": 158.98404160783085,
"learning_rate": 6.08438303138365e-08,
"logits/chosen": -2.3602254390716553,
"logits/rejected": -2.350632429122925,
"logps/chosen": -197.31517028808594,
"logps/rejected": -231.42343139648438,
"loss": 0.2479,
"rewards/accuracies": 0.90625,
"rewards/chosen": -2.3168513774871826,
"rewards/margins": 4.414270877838135,
"rewards/rejected": -6.731122016906738,
"step": 850
},
{
"epoch": 0.8048666354702855,
"grad_norm": 177.35304007639718,
"learning_rate": 5.560212112766011e-08,
"logits/chosen": -2.3707616329193115,
"logits/rejected": -2.3783464431762695,
"logps/chosen": -198.06982421875,
"logps/rejected": -237.296875,
"loss": 0.2453,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": -1.8742965459823608,
"rewards/margins": 5.264628887176514,
"rewards/rejected": -7.138925075531006,
"step": 860
},
{
"epoch": 0.814225549836219,
"grad_norm": 271.13283993407134,
"learning_rate": 5.056814509515092e-08,
"logits/chosen": -2.428950071334839,
"logits/rejected": -2.3831164836883545,
"logps/chosen": -200.1393585205078,
"logps/rejected": -216.57424926757812,
"loss": 0.1966,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -1.7213060855865479,
"rewards/margins": 5.350318431854248,
"rewards/rejected": -7.071624755859375,
"step": 870
},
{
"epoch": 0.8235844642021526,
"grad_norm": 203.86764722500251,
"learning_rate": 4.57472815122294e-08,
"logits/chosen": -2.366086483001709,
"logits/rejected": -2.3685665130615234,
"logps/chosen": -190.0081787109375,
"logps/rejected": -232.5431365966797,
"loss": 0.2183,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": -1.7962220907211304,
"rewards/margins": 5.570892333984375,
"rewards/rejected": -7.367114067077637,
"step": 880
},
{
"epoch": 0.8329433785680861,
"grad_norm": 271.41817915302744,
"learning_rate": 4.1144681943312135e-08,
"logits/chosen": -2.3388266563415527,
"logits/rejected": -2.342984676361084,
"logps/chosen": -193.9663848876953,
"logps/rejected": -224.80859375,
"loss": 0.262,
"rewards/accuracies": 0.90625,
"rewards/chosen": -2.0019173622131348,
"rewards/margins": 4.8847246170043945,
"rewards/rejected": -6.886641502380371,
"step": 890
},
{
"epoch": 0.8423022929340197,
"grad_norm": 308.2778765171746,
"learning_rate": 3.676526471636168e-08,
"logits/chosen": -2.3499531745910645,
"logits/rejected": -2.3488576412200928,
"logps/chosen": -193.8671112060547,
"logps/rejected": -219.2283172607422,
"loss": 0.1793,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -1.8417354822158813,
"rewards/margins": 4.747663974761963,
"rewards/rejected": -6.5893988609313965,
"step": 900
},
{
"epoch": 0.8516612072999532,
"grad_norm": 153.53030663881188,
"learning_rate": 3.2613709667171893e-08,
"logits/chosen": -2.388249635696411,
"logits/rejected": -2.360759735107422,
"logps/chosen": -199.05209350585938,
"logps/rejected": -228.4853973388672,
"loss": 0.1961,
"rewards/accuracies": 0.90625,
"rewards/chosen": -1.754990577697754,
"rewards/margins": 5.208621978759766,
"rewards/rejected": -6.9636125564575195,
"step": 910
},
{
"epoch": 0.8610201216658867,
"grad_norm": 291.40469015284674,
"learning_rate": 2.8694453138505296e-08,
"logits/chosen": -2.370460033416748,
"logits/rejected": -2.35982346534729,
"logps/chosen": -189.83285522460938,
"logps/rejected": -222.2431182861328,
"loss": 0.2066,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -2.2980434894561768,
"rewards/margins": 4.7502241134643555,
"rewards/rejected": -7.048267364501953,
"step": 920
},
{
"epoch": 0.8703790360318203,
"grad_norm": 290.7230352789667,
"learning_rate": 2.5011683239426847e-08,
"logits/chosen": -2.3978443145751953,
"logits/rejected": -2.3811287879943848,
"logps/chosen": -215.52938842773438,
"logps/rejected": -239.8271026611328,
"loss": 0.237,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -2.1587111949920654,
"rewards/margins": 5.388065338134766,
"rewards/rejected": -7.546775817871094,
"step": 930
},
{
"epoch": 0.8797379503977538,
"grad_norm": 220.6414563408128,
"learning_rate": 2.1569335369899884e-08,
"logits/chosen": -2.418283700942993,
"logits/rejected": -2.3877856731414795,
"logps/chosen": -211.22250366210938,
"logps/rejected": -232.1291046142578,
"loss": 0.2113,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -1.8959633111953735,
"rewards/margins": 5.136784553527832,
"rewards/rejected": -7.032748222351074,
"step": 940
},
{
"epoch": 0.8890968647636874,
"grad_norm": 143.48169448012388,
"learning_rate": 1.837108801542589e-08,
"logits/chosen": -2.411691904067993,
"logits/rejected": -2.4168848991394043,
"logps/chosen": -205.9098358154297,
"logps/rejected": -232.7529296875,
"loss": 0.1393,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -1.89556086063385,
"rewards/margins": 5.379176139831543,
"rewards/rejected": -7.2747368812561035,
"step": 950
},
{
"epoch": 0.8984557791296209,
"grad_norm": 276.0510358237357,
"learning_rate": 1.5420358816223e-08,
"logits/chosen": -2.351065158843994,
"logits/rejected": -2.3105835914611816,
"logps/chosen": -194.69497680664062,
"logps/rejected": -240.65762329101562,
"loss": 0.1762,
"rewards/accuracies": 0.96875,
"rewards/chosen": -1.7103493213653564,
"rewards/margins": 5.52829122543335,
"rewards/rejected": -7.238640785217285,
"step": 960
},
{
"epoch": 0.9078146934955545,
"grad_norm": 286.7038886752733,
"learning_rate": 1.2720300915142978e-08,
"logits/chosen": -2.3931686878204346,
"logits/rejected": -2.379987955093384,
"logps/chosen": -212.43826293945312,
"logps/rejected": -237.0255584716797,
"loss": 0.2259,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -2.4316928386688232,
"rewards/margins": 4.873816967010498,
"rewards/rejected": -7.3055100440979,
"step": 970
},
{
"epoch": 0.917173607861488,
"grad_norm": 227.75719345945672,
"learning_rate": 1.0273799588229659e-08,
"logits/chosen": -2.3514316082000732,
"logits/rejected": -2.3695566654205322,
"logps/chosen": -182.806884765625,
"logps/rejected": -219.46005249023438,
"loss": 0.1828,
"rewards/accuracies": 0.9375,
"rewards/chosen": -1.490412712097168,
"rewards/margins": 5.141217231750488,
"rewards/rejected": -6.631629943847656,
"step": 980
},
{
"epoch": 0.9265325222274217,
"grad_norm": 109.72235164161634,
"learning_rate": 8.08346916151903e-09,
"logits/chosen": -2.309913396835327,
"logits/rejected": -2.304335355758667,
"logps/chosen": -185.7053985595703,
"logps/rejected": -225.95895385742188,
"loss": 0.2306,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -1.6385961771011353,
"rewards/margins": 5.16446590423584,
"rewards/rejected": -6.803062438964844,
"step": 990
},
{
"epoch": 0.9358914365933552,
"grad_norm": 120.18799699014491,
"learning_rate": 6.151650217376347e-09,
"logits/chosen": -2.404142141342163,
"logits/rejected": -2.379453420639038,
"logps/chosen": -192.36676025390625,
"logps/rejected": -228.187744140625,
"loss": 0.2018,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -1.6089038848876953,
"rewards/margins": 5.643777370452881,
"rewards/rejected": -7.252681732177734,
"step": 1000
},
{
"epoch": 0.9452503509592888,
"grad_norm": 386.22845252678115,
"learning_rate": 4.480407093354566e-09,
"logits/chosen": -2.3859081268310547,
"logits/rejected": -2.392927646636963,
"logps/chosen": -207.25112915039062,
"logps/rejected": -233.6548614501953,
"loss": 0.204,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -1.6337274312973022,
"rewards/margins": 5.299923896789551,
"rewards/rejected": -6.933651924133301,
"step": 1010
},
{
"epoch": 0.9546092653252223,
"grad_norm": 186.14758486308767,
"learning_rate": 3.0715256762478825e-09,
"logits/chosen": -2.4298458099365234,
"logits/rejected": -2.4073410034179688,
"logps/chosen": -183.422607421875,
"logps/rejected": -216.18246459960938,
"loss": 0.216,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -1.9498409032821655,
"rewards/margins": 4.482772350311279,
"rewards/rejected": -6.432612419128418,
"step": 1020
},
{
"epoch": 0.9639681796911558,
"grad_norm": 123.36716955534204,
"learning_rate": 1.926511493696936e-09,
"logits/chosen": -2.3117165565490723,
"logits/rejected": -2.295056104660034,
"logps/chosen": -188.56947326660156,
"logps/rejected": -231.18417358398438,
"loss": 0.1393,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -1.7056289911270142,
"rewards/margins": 5.4376044273376465,
"rewards/rejected": -7.143233299255371,
"step": 1030
},
{
"epoch": 0.9733270940570894,
"grad_norm": 255.97860575442272,
"learning_rate": 1.04658810538516e-09,
"logits/chosen": -2.280712127685547,
"logits/rejected": -2.2452359199523926,
"logps/chosen": -218.2684326171875,
"logps/rejected": -241.2434539794922,
"loss": 0.2111,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": -1.9387584924697876,
"rewards/margins": 5.03714656829834,
"rewards/rejected": -6.975905418395996,
"step": 1040
},
{
"epoch": 0.9826860084230229,
"grad_norm": 278.59930069753267,
"learning_rate": 4.3269579554558455e-10,
"logits/chosen": -2.375357151031494,
"logits/rejected": -2.3615028858184814,
"logps/chosen": -197.4428253173828,
"logps/rejected": -236.6564483642578,
"loss": 0.2418,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": -2.3001036643981934,
"rewards/margins": 4.696293354034424,
"rewards/rejected": -6.996397495269775,
"step": 1050
},
{
"epoch": 0.9920449227889565,
"grad_norm": 267.76889839293335,
"learning_rate": 8.549056817513944e-11,
"logits/chosen": -2.360644817352295,
"logits/rejected": -2.354513645172119,
"logps/chosen": -188.0636749267578,
"logps/rejected": -226.8789520263672,
"loss": 0.221,
"rewards/accuracies": 0.90625,
"rewards/chosen": -2.1550252437591553,
"rewards/margins": 4.53070592880249,
"rewards/rejected": -6.685731410980225,
"step": 1060
},
{
"epoch": 0.9995320542817033,
"step": 1068,
"total_flos": 0.0,
"train_loss": 0.2609846996010913,
"train_runtime": 23133.9065,
"train_samples_per_second": 1.478,
"train_steps_per_second": 0.046
}
],
"logging_steps": 10,
"max_steps": 1068,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}