RyanYr's picture
Training in progress, step 900, checkpoint
14b2c5f verified
raw
history blame
242 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0262989095574087,
"eval_steps": 100,
"global_step": 900,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002280664243460908,
"grad_norm": 104.30809234123367,
"learning_rate": 4e-09,
"logits/chosen": -1.353676199913025,
"logits/rejected": -1.4426417350769043,
"logps/chosen": -169.59426879882812,
"logps/rejected": -212.90069580078125,
"loss": 0.7312,
"rewards/accuracies": 0.4375,
"rewards/chosen": 0.23574542999267578,
"rewards/margins": -0.06179435923695564,
"rewards/rejected": 0.29753977060317993,
"step": 2
},
{
"epoch": 0.004561328486921816,
"grad_norm": 92.96399840391902,
"learning_rate": 8e-09,
"logits/chosen": -1.2849147319793701,
"logits/rejected": -1.3181504011154175,
"logps/chosen": -156.07391357421875,
"logps/rejected": -180.74073791503906,
"loss": 0.6935,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.32024019956588745,
"rewards/margins": 0.08485272526741028,
"rewards/rejected": 0.23538745939731598,
"step": 4
},
{
"epoch": 0.006841992730382724,
"grad_norm": 98.95959882761899,
"learning_rate": 1.1999999999999998e-08,
"logits/chosen": -1.4182077646255493,
"logits/rejected": -1.4656966924667358,
"logps/chosen": -162.13934326171875,
"logps/rejected": -188.761474609375,
"loss": 0.6766,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.4807170629501343,
"rewards/margins": 0.1233808621764183,
"rewards/rejected": 0.35733622312545776,
"step": 6
},
{
"epoch": 0.009122656973843632,
"grad_norm": 100.15592083854503,
"learning_rate": 1.6e-08,
"logits/chosen": -1.3926836252212524,
"logits/rejected": -1.401774287223816,
"logps/chosen": -170.7425079345703,
"logps/rejected": -190.57833862304688,
"loss": 0.7039,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.4466077983379364,
"rewards/margins": 0.06900040060281754,
"rewards/rejected": 0.37760740518569946,
"step": 8
},
{
"epoch": 0.01140332121730454,
"grad_norm": 97.0889776355223,
"learning_rate": 2e-08,
"logits/chosen": -1.376230239868164,
"logits/rejected": -1.409171223640442,
"logps/chosen": -169.57582092285156,
"logps/rejected": -185.82571411132812,
"loss": 0.7113,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.39193734526634216,
"rewards/margins": 0.3205062747001648,
"rewards/rejected": 0.07143110036849976,
"step": 10
},
{
"epoch": 0.013683985460765448,
"grad_norm": 92.66648488760384,
"learning_rate": 2.3999999999999997e-08,
"logits/chosen": -1.3613837957382202,
"logits/rejected": -1.4228042364120483,
"logps/chosen": -142.47850036621094,
"logps/rejected": -195.29649353027344,
"loss": 0.6867,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.19295203685760498,
"rewards/margins": -0.01336541399359703,
"rewards/rejected": 0.20631742477416992,
"step": 12
},
{
"epoch": 0.015964649704226355,
"grad_norm": 88.1654009314199,
"learning_rate": 2.8000000000000003e-08,
"logits/chosen": -1.5071678161621094,
"logits/rejected": -1.5190939903259277,
"logps/chosen": -128.34288024902344,
"logps/rejected": -131.07789611816406,
"loss": 0.6954,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.23906439542770386,
"rewards/margins": 0.07392804324626923,
"rewards/rejected": 0.16513636708259583,
"step": 14
},
{
"epoch": 0.018245313947687265,
"grad_norm": 83.0913005177034,
"learning_rate": 3.2e-08,
"logits/chosen": -1.2882771492004395,
"logits/rejected": -1.3209519386291504,
"logps/chosen": -125.15751647949219,
"logps/rejected": -132.21255493164062,
"loss": 0.6652,
"rewards/accuracies": 0.46875,
"rewards/chosen": 0.18403516709804535,
"rewards/margins": -0.032971642911434174,
"rewards/rejected": 0.21700681746006012,
"step": 16
},
{
"epoch": 0.02052597819114817,
"grad_norm": 92.77604284067941,
"learning_rate": 3.6e-08,
"logits/chosen": -1.3992947340011597,
"logits/rejected": -1.473150610923767,
"logps/chosen": -173.55206298828125,
"logps/rejected": -202.7044677734375,
"loss": 0.7212,
"rewards/accuracies": 0.34375,
"rewards/chosen": 0.4242628216743469,
"rewards/margins": -0.0976889505982399,
"rewards/rejected": 0.5219517350196838,
"step": 18
},
{
"epoch": 0.02280664243460908,
"grad_norm": 88.66990299818134,
"learning_rate": 4e-08,
"logits/chosen": -1.432981014251709,
"logits/rejected": -1.5539088249206543,
"logps/chosen": -182.9879913330078,
"logps/rejected": -203.99020385742188,
"loss": 0.7184,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.4513806104660034,
"rewards/margins": 0.20164981484413147,
"rewards/rejected": 0.24973079562187195,
"step": 20
},
{
"epoch": 0.02508730667806999,
"grad_norm": 77.91111396247051,
"learning_rate": 4.4e-08,
"logits/chosen": -1.3197808265686035,
"logits/rejected": -1.3996787071228027,
"logps/chosen": -154.55003356933594,
"logps/rejected": -232.81744384765625,
"loss": 0.6479,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.3307289481163025,
"rewards/margins": 0.18726389110088348,
"rewards/rejected": 0.14346502721309662,
"step": 22
},
{
"epoch": 0.027367970921530895,
"grad_norm": 99.08871355118758,
"learning_rate": 4.799999999999999e-08,
"logits/chosen": -1.3024228811264038,
"logits/rejected": -1.3258510828018188,
"logps/chosen": -131.18356323242188,
"logps/rejected": -148.7147216796875,
"loss": 0.7046,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.421053409576416,
"rewards/margins": 0.3122374713420868,
"rewards/rejected": 0.10881592333316803,
"step": 24
},
{
"epoch": 0.029648635164991805,
"grad_norm": 89.36423052851278,
"learning_rate": 5.2e-08,
"logits/chosen": -1.329531192779541,
"logits/rejected": -1.4354490041732788,
"logps/chosen": -168.1116180419922,
"logps/rejected": -207.8105010986328,
"loss": 0.6619,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.40395674109458923,
"rewards/margins": 0.26189538836479187,
"rewards/rejected": 0.14206132292747498,
"step": 26
},
{
"epoch": 0.03192929940845271,
"grad_norm": 99.33427508045129,
"learning_rate": 5.6000000000000005e-08,
"logits/chosen": -1.3796099424362183,
"logits/rejected": -1.3620996475219727,
"logps/chosen": -131.89183044433594,
"logps/rejected": -133.18844604492188,
"loss": 0.6782,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.37585678696632385,
"rewards/margins": 0.27094465494155884,
"rewards/rejected": 0.10491211712360382,
"step": 28
},
{
"epoch": 0.03420996365191362,
"grad_norm": 98.1725487266389,
"learning_rate": 6e-08,
"logits/chosen": -1.4092419147491455,
"logits/rejected": -1.4259543418884277,
"logps/chosen": -228.17083740234375,
"logps/rejected": -249.2948455810547,
"loss": 0.6923,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.4407646358013153,
"rewards/margins": 0.335997998714447,
"rewards/rejected": 0.10476663708686829,
"step": 30
},
{
"epoch": 0.03649062789537453,
"grad_norm": 75.77879619863296,
"learning_rate": 6.4e-08,
"logits/chosen": -1.364654541015625,
"logits/rejected": -1.4574649333953857,
"logps/chosen": -134.63072204589844,
"logps/rejected": -159.65719604492188,
"loss": 0.6454,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.28305765986442566,
"rewards/margins": 0.14592355489730835,
"rewards/rejected": 0.1371341049671173,
"step": 32
},
{
"epoch": 0.038771292138835435,
"grad_norm": 100.55666520099189,
"learning_rate": 6.8e-08,
"logits/chosen": -1.3500841856002808,
"logits/rejected": -1.4460691213607788,
"logps/chosen": -134.4729461669922,
"logps/rejected": -159.3612823486328,
"loss": 0.716,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.44891929626464844,
"rewards/margins": 0.12445079535245895,
"rewards/rejected": 0.3244684934616089,
"step": 34
},
{
"epoch": 0.04105195638229634,
"grad_norm": 104.71963688579538,
"learning_rate": 7.2e-08,
"logits/chosen": -1.3853086233139038,
"logits/rejected": -1.434361457824707,
"logps/chosen": -139.8603057861328,
"logps/rejected": -171.86366271972656,
"loss": 0.6988,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.3810410499572754,
"rewards/margins": 0.2534639537334442,
"rewards/rejected": 0.12757712602615356,
"step": 36
},
{
"epoch": 0.043332620625757254,
"grad_norm": 77.00258209889141,
"learning_rate": 7.599999999999999e-08,
"logits/chosen": -1.3689879179000854,
"logits/rejected": -1.3514069318771362,
"logps/chosen": -208.98460388183594,
"logps/rejected": -216.9095458984375,
"loss": 0.6691,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.5943199396133423,
"rewards/margins": 0.26101234555244446,
"rewards/rejected": 0.33330756425857544,
"step": 38
},
{
"epoch": 0.04561328486921816,
"grad_norm": 83.25872912559916,
"learning_rate": 8e-08,
"logits/chosen": -1.3037664890289307,
"logits/rejected": -1.3465715646743774,
"logps/chosen": -160.97340393066406,
"logps/rejected": -178.60267639160156,
"loss": 0.6319,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.4052881896495819,
"rewards/margins": 0.16212955117225647,
"rewards/rejected": 0.24315865337848663,
"step": 40
},
{
"epoch": 0.047893949112679066,
"grad_norm": 113.55879280714245,
"learning_rate": 8.4e-08,
"logits/chosen": -1.3739519119262695,
"logits/rejected": -1.442929744720459,
"logps/chosen": -163.52279663085938,
"logps/rejected": -217.51824951171875,
"loss": 0.7171,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.13579119741916656,
"rewards/margins": 0.14747555553913116,
"rewards/rejected": -0.011684387922286987,
"step": 42
},
{
"epoch": 0.05017461335613998,
"grad_norm": 81.14122988211874,
"learning_rate": 8.8e-08,
"logits/chosen": -1.373199701309204,
"logits/rejected": -1.4750025272369385,
"logps/chosen": -132.52114868164062,
"logps/rejected": -154.50648498535156,
"loss": 0.6947,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.4790118932723999,
"rewards/margins": 0.16499680280685425,
"rewards/rejected": 0.31401512026786804,
"step": 44
},
{
"epoch": 0.052455277599600884,
"grad_norm": 89.9603968710281,
"learning_rate": 9.2e-08,
"logits/chosen": -1.3388586044311523,
"logits/rejected": -1.4220290184020996,
"logps/chosen": -181.95082092285156,
"logps/rejected": -226.30923461914062,
"loss": 0.6941,
"rewards/accuracies": 0.46875,
"rewards/chosen": 0.4439522325992584,
"rewards/margins": 0.17398414015769958,
"rewards/rejected": 0.26996806263923645,
"step": 46
},
{
"epoch": 0.05473594184306179,
"grad_norm": 104.88400068525443,
"learning_rate": 9.599999999999999e-08,
"logits/chosen": -1.456312894821167,
"logits/rejected": -1.451519250869751,
"logps/chosen": -199.40493774414062,
"logps/rejected": -216.68386840820312,
"loss": 0.7537,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.4792614281177521,
"rewards/margins": 0.112982377409935,
"rewards/rejected": 0.36627906560897827,
"step": 48
},
{
"epoch": 0.0570166060865227,
"grad_norm": 73.96994315968016,
"learning_rate": 1e-07,
"logits/chosen": -1.3086615800857544,
"logits/rejected": -1.3628607988357544,
"logps/chosen": -151.69989013671875,
"logps/rejected": -171.46029663085938,
"loss": 0.6314,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.1983395218849182,
"rewards/margins": 0.22184070944786072,
"rewards/rejected": -0.023501206189393997,
"step": 50
},
{
"epoch": 0.05929727032998361,
"grad_norm": 100.64321021539473,
"learning_rate": 1.04e-07,
"logits/chosen": -1.5143202543258667,
"logits/rejected": -1.5794754028320312,
"logps/chosen": -118.7998046875,
"logps/rejected": -149.1954803466797,
"loss": 0.6704,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.34698718786239624,
"rewards/margins": 0.06618239730596542,
"rewards/rejected": 0.2808048129081726,
"step": 52
},
{
"epoch": 0.061577934573444515,
"grad_norm": 96.37635530030595,
"learning_rate": 1.08e-07,
"logits/chosen": -1.254115104675293,
"logits/rejected": -1.3123385906219482,
"logps/chosen": -152.57516479492188,
"logps/rejected": -172.66200256347656,
"loss": 0.6966,
"rewards/accuracies": 0.46875,
"rewards/chosen": 0.28736555576324463,
"rewards/margins": 0.0115619245916605,
"rewards/rejected": 0.2758035957813263,
"step": 54
},
{
"epoch": 0.06385859881690542,
"grad_norm": 99.11153586277212,
"learning_rate": 1.1200000000000001e-07,
"logits/chosen": -1.3405977487564087,
"logits/rejected": -1.4054450988769531,
"logps/chosen": -194.3892822265625,
"logps/rejected": -219.24171447753906,
"loss": 0.6625,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.382589727640152,
"rewards/margins": 0.24182234704494476,
"rewards/rejected": 0.14076738059520721,
"step": 56
},
{
"epoch": 0.06613926306036633,
"grad_norm": 82.95051663248452,
"learning_rate": 1.1599999999999999e-07,
"logits/chosen": -1.4561753273010254,
"logits/rejected": -1.4183000326156616,
"logps/chosen": -189.18923950195312,
"logps/rejected": -186.99264526367188,
"loss": 0.6206,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.4602745771408081,
"rewards/margins": 0.18983443081378937,
"rewards/rejected": 0.27044013142585754,
"step": 58
},
{
"epoch": 0.06841992730382725,
"grad_norm": 85.63947077517764,
"learning_rate": 1.2e-07,
"logits/chosen": -1.4239516258239746,
"logits/rejected": -1.5685184001922607,
"logps/chosen": -140.7595977783203,
"logps/rejected": -184.33067321777344,
"loss": 0.7255,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.2823754847049713,
"rewards/margins": 0.02492373436689377,
"rewards/rejected": 0.25745177268981934,
"step": 60
},
{
"epoch": 0.07070059154728815,
"grad_norm": 97.67365684595,
"learning_rate": 1.24e-07,
"logits/chosen": -1.350022554397583,
"logits/rejected": -1.3478128910064697,
"logps/chosen": -171.09677124023438,
"logps/rejected": -165.71694946289062,
"loss": 0.6819,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.3082874119281769,
"rewards/margins": 0.11261623352766037,
"rewards/rejected": 0.1956711858510971,
"step": 62
},
{
"epoch": 0.07298125579074906,
"grad_norm": 99.3804737289846,
"learning_rate": 1.28e-07,
"logits/chosen": -1.4815456867218018,
"logits/rejected": -1.4830272197723389,
"logps/chosen": -107.2784194946289,
"logps/rejected": -121.1039047241211,
"loss": 0.7165,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.31323567032814026,
"rewards/margins": 0.11968746036291122,
"rewards/rejected": 0.19354820251464844,
"step": 64
},
{
"epoch": 0.07526192003420996,
"grad_norm": 94.44141503559203,
"learning_rate": 1.32e-07,
"logits/chosen": -1.493153691291809,
"logits/rejected": -1.4850858449935913,
"logps/chosen": -164.4472198486328,
"logps/rejected": -169.9815673828125,
"loss": 0.6558,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.5161038637161255,
"rewards/margins": 0.30025073885917664,
"rewards/rejected": 0.21585312485694885,
"step": 66
},
{
"epoch": 0.07754258427767087,
"grad_norm": 86.21425469616315,
"learning_rate": 1.36e-07,
"logits/chosen": -1.333409070968628,
"logits/rejected": -1.3404844999313354,
"logps/chosen": -153.38101196289062,
"logps/rejected": -159.85690307617188,
"loss": 0.6781,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.3244006931781769,
"rewards/margins": 0.22184959053993225,
"rewards/rejected": 0.10255111753940582,
"step": 68
},
{
"epoch": 0.07982324852113178,
"grad_norm": 90.03565465587579,
"learning_rate": 1.3999999999999998e-07,
"logits/chosen": -1.5341051816940308,
"logits/rejected": -1.5601752996444702,
"logps/chosen": -149.2415008544922,
"logps/rejected": -156.4012908935547,
"loss": 0.6936,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.27590540051460266,
"rewards/margins": 0.12058861553668976,
"rewards/rejected": 0.1553167849779129,
"step": 70
},
{
"epoch": 0.08210391276459268,
"grad_norm": 80.65697099385187,
"learning_rate": 1.44e-07,
"logits/chosen": -1.3111834526062012,
"logits/rejected": -1.3080787658691406,
"logps/chosen": -152.25611877441406,
"logps/rejected": -169.93919372558594,
"loss": 0.6471,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.34636569023132324,
"rewards/margins": 0.3154309093952179,
"rewards/rejected": 0.03093479573726654,
"step": 72
},
{
"epoch": 0.0843845770080536,
"grad_norm": 116.72716177672243,
"learning_rate": 1.48e-07,
"logits/chosen": -1.3597153425216675,
"logits/rejected": -1.4166897535324097,
"logps/chosen": -174.33534240722656,
"logps/rejected": -200.9926300048828,
"loss": 0.6831,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.2202744334936142,
"rewards/margins": 0.13681186735630035,
"rewards/rejected": 0.08346255123615265,
"step": 74
},
{
"epoch": 0.08666524125151451,
"grad_norm": 99.62367819711024,
"learning_rate": 1.5199999999999998e-07,
"logits/chosen": -1.3973196744918823,
"logits/rejected": -1.4653477668762207,
"logps/chosen": -189.6744384765625,
"logps/rejected": -224.8593292236328,
"loss": 0.7086,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.33062875270843506,
"rewards/margins": 0.11676961928606033,
"rewards/rejected": 0.21385914087295532,
"step": 76
},
{
"epoch": 0.08894590549497541,
"grad_norm": 86.47188266565705,
"learning_rate": 1.56e-07,
"logits/chosen": -1.3268086910247803,
"logits/rejected": -1.302788257598877,
"logps/chosen": -135.62147521972656,
"logps/rejected": -153.5860137939453,
"loss": 0.6835,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.2682960629463196,
"rewards/margins": 0.05024400353431702,
"rewards/rejected": 0.21805202960968018,
"step": 78
},
{
"epoch": 0.09122656973843632,
"grad_norm": 125.16786139574279,
"learning_rate": 1.6e-07,
"logits/chosen": -1.2104482650756836,
"logits/rejected": -1.3274182081222534,
"logps/chosen": -161.164794921875,
"logps/rejected": -210.53590393066406,
"loss": 0.7174,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.2355586439371109,
"rewards/margins": 0.07326290011405945,
"rewards/rejected": 0.16229577362537384,
"step": 80
},
{
"epoch": 0.09350723398189723,
"grad_norm": 98.85832957481998,
"learning_rate": 1.6399999999999999e-07,
"logits/chosen": -1.3579671382904053,
"logits/rejected": -1.4671956300735474,
"logps/chosen": -153.21591186523438,
"logps/rejected": -173.60589599609375,
"loss": 0.6862,
"rewards/accuracies": 0.46875,
"rewards/chosen": 0.2675905227661133,
"rewards/margins": 0.046474047005176544,
"rewards/rejected": 0.22111651301383972,
"step": 82
},
{
"epoch": 0.09578789822535813,
"grad_norm": 83.11445245790541,
"learning_rate": 1.68e-07,
"logits/chosen": -1.2888550758361816,
"logits/rejected": -1.3688700199127197,
"logps/chosen": -145.24754333496094,
"logps/rejected": -174.91700744628906,
"loss": 0.637,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.36274462938308716,
"rewards/margins": 0.15029877424240112,
"rewards/rejected": 0.21244585514068604,
"step": 84
},
{
"epoch": 0.09806856246881904,
"grad_norm": 82.49239750970325,
"learning_rate": 1.7199999999999998e-07,
"logits/chosen": -1.3739551305770874,
"logits/rejected": -1.3799883127212524,
"logps/chosen": -178.80386352539062,
"logps/rejected": -215.7256317138672,
"loss": 0.6907,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.32459211349487305,
"rewards/margins": 0.16312535107135773,
"rewards/rejected": 0.1614667773246765,
"step": 86
},
{
"epoch": 0.10034922671227996,
"grad_norm": 105.64550946139919,
"learning_rate": 1.76e-07,
"logits/chosen": -1.4653171300888062,
"logits/rejected": -1.508003830909729,
"logps/chosen": -192.55154418945312,
"logps/rejected": -214.61546325683594,
"loss": 0.7401,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.43679386377334595,
"rewards/margins": 0.1878117173910141,
"rewards/rejected": 0.24898216128349304,
"step": 88
},
{
"epoch": 0.10262989095574086,
"grad_norm": 106.99677192741333,
"learning_rate": 1.8e-07,
"logits/chosen": -1.3307772874832153,
"logits/rejected": -1.4872570037841797,
"logps/chosen": -226.3917236328125,
"logps/rejected": -265.8712158203125,
"loss": 0.6758,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.42067086696624756,
"rewards/margins": 0.07930833846330643,
"rewards/rejected": 0.34136250615119934,
"step": 90
},
{
"epoch": 0.10491055519920177,
"grad_norm": 104.67741510645097,
"learning_rate": 1.84e-07,
"logits/chosen": -1.3470810651779175,
"logits/rejected": -1.3835241794586182,
"logps/chosen": -162.22958374023438,
"logps/rejected": -184.17697143554688,
"loss": 0.6993,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.25471583008766174,
"rewards/margins": 0.08957283943891525,
"rewards/rejected": 0.1651429831981659,
"step": 92
},
{
"epoch": 0.10719121944266267,
"grad_norm": 82.2596269922547,
"learning_rate": 1.88e-07,
"logits/chosen": -1.3317389488220215,
"logits/rejected": -1.367332935333252,
"logps/chosen": -138.1045684814453,
"logps/rejected": -213.2273712158203,
"loss": 0.5933,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.27782779932022095,
"rewards/margins": 0.2610364258289337,
"rewards/rejected": 0.01679137721657753,
"step": 94
},
{
"epoch": 0.10947188368612358,
"grad_norm": 106.46518450026352,
"learning_rate": 1.9199999999999997e-07,
"logits/chosen": -1.3722081184387207,
"logits/rejected": -1.4072837829589844,
"logps/chosen": -185.40281677246094,
"logps/rejected": -205.63037109375,
"loss": 0.7079,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.4706501066684723,
"rewards/margins": 0.29465728998184204,
"rewards/rejected": 0.17599281668663025,
"step": 96
},
{
"epoch": 0.11175254792958449,
"grad_norm": 79.05608727628055,
"learning_rate": 1.9599999999999998e-07,
"logits/chosen": -1.2723050117492676,
"logits/rejected": -1.2712621688842773,
"logps/chosen": -170.6747589111328,
"logps/rejected": -172.70376586914062,
"loss": 0.6713,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.4158933460712433,
"rewards/margins": 0.22599810361862183,
"rewards/rejected": 0.18989527225494385,
"step": 98
},
{
"epoch": 0.1140332121730454,
"grad_norm": 102.50537690642588,
"learning_rate": 2e-07,
"logits/chosen": -1.2609540224075317,
"logits/rejected": -1.3507779836654663,
"logps/chosen": -126.81893157958984,
"logps/rejected": -172.69830322265625,
"loss": 0.7344,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.2087049037218094,
"rewards/margins": 0.05775710940361023,
"rewards/rejected": 0.15094780921936035,
"step": 100
},
{
"epoch": 0.1140332121730454,
"eval_logits/chosen": -1.465891718864441,
"eval_logits/rejected": -1.452298879623413,
"eval_logps/chosen": -126.79114532470703,
"eval_logps/rejected": -128.49049377441406,
"eval_loss": 0.7120834589004517,
"eval_rewards/accuracies": 0.4000000059604645,
"eval_rewards/chosen": 0.3106829524040222,
"eval_rewards/margins": -0.04654194042086601,
"eval_rewards/rejected": 0.3572249114513397,
"eval_runtime": 22.2628,
"eval_samples_per_second": 4.492,
"eval_steps_per_second": 1.123,
"step": 100
},
{
"epoch": 0.11631387641650631,
"grad_norm": 115.65869747987765,
"learning_rate": 1.9999927671554446e-07,
"logits/chosen": -1.4759191274642944,
"logits/rejected": -1.458261251449585,
"logps/chosen": -200.50059509277344,
"logps/rejected": -213.1103973388672,
"loss": 0.6618,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.3923037648200989,
"rewards/margins": 0.1664755940437317,
"rewards/rejected": 0.2258281409740448,
"step": 102
},
{
"epoch": 0.11859454065996722,
"grad_norm": 93.12582037555201,
"learning_rate": 1.9999710687264071e-07,
"logits/chosen": -1.311631441116333,
"logits/rejected": -1.3278183937072754,
"logps/chosen": -132.20401000976562,
"logps/rejected": -157.69627380371094,
"loss": 0.7135,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.19503216445446014,
"rewards/margins": -0.017576567828655243,
"rewards/rejected": 0.21260873973369598,
"step": 104
},
{
"epoch": 0.12087520490342812,
"grad_norm": 76.40018414480076,
"learning_rate": 1.9999349050267698e-07,
"logits/chosen": -1.291077733039856,
"logits/rejected": -1.3958327770233154,
"logps/chosen": -158.81573486328125,
"logps/rejected": -199.284423828125,
"loss": 0.6252,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.25081124901771545,
"rewards/margins": 0.25090575218200684,
"rewards/rejected": -9.45068895816803e-05,
"step": 106
},
{
"epoch": 0.12315586914688903,
"grad_norm": 90.49004280394047,
"learning_rate": 1.9998842765796658e-07,
"logits/chosen": -1.4403332471847534,
"logits/rejected": -1.4392634630203247,
"logps/chosen": -143.04052734375,
"logps/rejected": -156.4816436767578,
"loss": 0.6816,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.10938511788845062,
"rewards/margins": 0.17133575677871704,
"rewards/rejected": -0.06195063889026642,
"step": 108
},
{
"epoch": 0.12543653339034994,
"grad_norm": 81.5018944914688,
"learning_rate": 1.9998191841174702e-07,
"logits/chosen": -1.4619545936584473,
"logits/rejected": -1.483961582183838,
"logps/chosen": -120.11573028564453,
"logps/rejected": -121.2934341430664,
"loss": 0.6783,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.4725874662399292,
"rewards/margins": 0.37433862686157227,
"rewards/rejected": 0.09824882447719574,
"step": 110
},
{
"epoch": 0.12771719763381084,
"grad_norm": 91.38630396004429,
"learning_rate": 1.9997396285817904e-07,
"logits/chosen": -1.152282953262329,
"logits/rejected": -1.252787470817566,
"logps/chosen": -145.3370361328125,
"logps/rejected": -174.6603240966797,
"loss": 0.6269,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.40825897455215454,
"rewards/margins": 0.25122010707855225,
"rewards/rejected": 0.1570388376712799,
"step": 112
},
{
"epoch": 0.12999786187727175,
"grad_norm": 97.62629025517847,
"learning_rate": 1.9996456111234526e-07,
"logits/chosen": -1.393836259841919,
"logits/rejected": -1.5118399858474731,
"logps/chosen": -173.96847534179688,
"logps/rejected": -229.94232177734375,
"loss": 0.6697,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.21259565651416779,
"rewards/margins": 0.18360112607479095,
"rewards/rejected": 0.028994524851441383,
"step": 114
},
{
"epoch": 0.13227852612073265,
"grad_norm": 96.19706483741392,
"learning_rate": 1.9995371331024833e-07,
"logits/chosen": -1.299769401550293,
"logits/rejected": -1.3910255432128906,
"logps/chosen": -112.2242660522461,
"logps/rejected": -130.05230712890625,
"loss": 0.7283,
"rewards/accuracies": 0.4375,
"rewards/chosen": 0.14800208806991577,
"rewards/margins": -0.08275800198316574,
"rewards/rejected": 0.23076008260250092,
"step": 116
},
{
"epoch": 0.13455919036419356,
"grad_norm": 114.37970019595372,
"learning_rate": 1.999414196088092e-07,
"logits/chosen": -1.3472720384597778,
"logits/rejected": -1.4912211894989014,
"logps/chosen": -202.3058319091797,
"logps/rejected": -244.92904663085938,
"loss": 0.7194,
"rewards/accuracies": 0.46875,
"rewards/chosen": 0.3503240644931793,
"rewards/margins": 0.024903126060962677,
"rewards/rejected": 0.32542091608047485,
"step": 118
},
{
"epoch": 0.1368398546076545,
"grad_norm": 87.71566928589093,
"learning_rate": 1.9992768018586478e-07,
"logits/chosen": -1.3490660190582275,
"logits/rejected": -1.3772616386413574,
"logps/chosen": -146.59048461914062,
"logps/rejected": -159.0959930419922,
"loss": 0.6736,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.3947565257549286,
"rewards/margins": 0.15041925013065338,
"rewards/rejected": 0.244337260723114,
"step": 120
},
{
"epoch": 0.1391205188511154,
"grad_norm": 109.98286067474862,
"learning_rate": 1.9991249524016528e-07,
"logits/chosen": -1.2736543416976929,
"logits/rejected": -1.4115746021270752,
"logps/chosen": -150.3526611328125,
"logps/rejected": -189.20545959472656,
"loss": 0.761,
"rewards/accuracies": 0.46875,
"rewards/chosen": 0.28761598467826843,
"rewards/margins": -0.15278732776641846,
"rewards/rejected": 0.44040337204933167,
"step": 122
},
{
"epoch": 0.1414011830945763,
"grad_norm": 108.93431507211488,
"learning_rate": 1.9989586499137135e-07,
"logits/chosen": -1.2978495359420776,
"logits/rejected": -1.3472024202346802,
"logps/chosen": -179.5664825439453,
"logps/rejected": -203.53968811035156,
"loss": 0.7215,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.39709562063217163,
"rewards/margins": 0.20268934965133667,
"rewards/rejected": 0.19440627098083496,
"step": 124
},
{
"epoch": 0.1436818473380372,
"grad_norm": 99.31026397357329,
"learning_rate": 1.9987778968005106e-07,
"logits/chosen": -1.2714462280273438,
"logits/rejected": -1.4020087718963623,
"logps/chosen": -142.49758911132812,
"logps/rejected": -262.75775146484375,
"loss": 0.7111,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.1395024061203003,
"rewards/margins": -0.11353214085102081,
"rewards/rejected": 0.2530345320701599,
"step": 126
},
{
"epoch": 0.14596251158149812,
"grad_norm": 92.0460328359884,
"learning_rate": 1.9985826956767617e-07,
"logits/chosen": -1.474685788154602,
"logits/rejected": -1.5739442110061646,
"logps/chosen": -135.28616333007812,
"logps/rejected": -165.1414031982422,
"loss": 0.6874,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.14916732907295227,
"rewards/margins": 0.06242816895246506,
"rewards/rejected": 0.0867391973733902,
"step": 128
},
{
"epoch": 0.14824317582495902,
"grad_norm": 96.22054918626448,
"learning_rate": 1.9983730493661865e-07,
"logits/chosen": -1.276041030883789,
"logits/rejected": -1.3194851875305176,
"logps/chosen": -221.15992736816406,
"logps/rejected": -242.77647399902344,
"loss": 0.6457,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.260553777217865,
"rewards/margins": 0.26679596304893494,
"rewards/rejected": -0.006242218893021345,
"step": 130
},
{
"epoch": 0.15052384006841993,
"grad_norm": 78.01858280488834,
"learning_rate": 1.998148960901463e-07,
"logits/chosen": -1.393154263496399,
"logits/rejected": -1.423519492149353,
"logps/chosen": -201.9635467529297,
"logps/rejected": -235.25816345214844,
"loss": 0.6893,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.382313072681427,
"rewards/margins": 0.3356338441371918,
"rewards/rejected": 0.04667920991778374,
"step": 132
},
{
"epoch": 0.15280450431188083,
"grad_norm": 103.00857915327929,
"learning_rate": 1.997910433524185e-07,
"logits/chosen": -1.3205957412719727,
"logits/rejected": -1.3766120672225952,
"logps/chosen": -189.32142639160156,
"logps/rejected": -217.68544006347656,
"loss": 0.724,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.25681746006011963,
"rewards/margins": 0.15590888261795044,
"rewards/rejected": 0.10090853273868561,
"step": 134
},
{
"epoch": 0.15508516855534174,
"grad_norm": 90.52690719042177,
"learning_rate": 1.9976574706848153e-07,
"logits/chosen": -1.4535361528396606,
"logits/rejected": -1.4828790426254272,
"logps/chosen": -171.7893829345703,
"logps/rejected": -213.03846740722656,
"loss": 0.6788,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.33188748359680176,
"rewards/margins": 0.07647443562746048,
"rewards/rejected": 0.2554129958152771,
"step": 136
},
{
"epoch": 0.15736583279880265,
"grad_norm": 101.69492728925366,
"learning_rate": 1.9973900760426363e-07,
"logits/chosen": -1.4226858615875244,
"logits/rejected": -1.4269222021102905,
"logps/chosen": -166.29103088378906,
"logps/rejected": -268.772705078125,
"loss": 0.678,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.3539034128189087,
"rewards/margins": 0.052625708281993866,
"rewards/rejected": 0.3012777268886566,
"step": 138
},
{
"epoch": 0.15964649704226355,
"grad_norm": 83.07835169069685,
"learning_rate": 1.9971082534656955e-07,
"logits/chosen": -1.37977933883667,
"logits/rejected": -1.466927409172058,
"logps/chosen": -113.37686157226562,
"logps/rejected": -142.82138061523438,
"loss": 0.6442,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.15869390964508057,
"rewards/margins": 0.20419231057167053,
"rewards/rejected": -0.045498400926589966,
"step": 140
},
{
"epoch": 0.16192716128572446,
"grad_norm": 103.54665420816978,
"learning_rate": 1.99681200703075e-07,
"logits/chosen": -1.4178937673568726,
"logits/rejected": -1.4681955575942993,
"logps/chosen": -158.346435546875,
"logps/rejected": -180.1523895263672,
"loss": 0.7127,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.2817228436470032,
"rewards/margins": 0.11103209853172302,
"rewards/rejected": 0.17069074511528015,
"step": 142
},
{
"epoch": 0.16420782552918536,
"grad_norm": 77.71797198863521,
"learning_rate": 1.9965013410232097e-07,
"logits/chosen": -1.40109384059906,
"logits/rejected": -1.4553627967834473,
"logps/chosen": -172.07899475097656,
"logps/rejected": -222.21438598632812,
"loss": 0.6728,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.27610713243484497,
"rewards/margins": 0.35402336716651917,
"rewards/rejected": -0.077916219830513,
"step": 144
},
{
"epoch": 0.16648848977264627,
"grad_norm": 91.27829192980697,
"learning_rate": 1.996176259937072e-07,
"logits/chosen": -1.3127285242080688,
"logits/rejected": -1.3604404926300049,
"logps/chosen": -168.33004760742188,
"logps/rejected": -193.3489227294922,
"loss": 0.679,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.18943265080451965,
"rewards/margins": 0.11781211942434311,
"rewards/rejected": 0.07162053138017654,
"step": 146
},
{
"epoch": 0.1687691540161072,
"grad_norm": 105.10949942036537,
"learning_rate": 1.9958367684748586e-07,
"logits/chosen": -1.301593542098999,
"logits/rejected": -1.3123453855514526,
"logps/chosen": -153.8624267578125,
"logps/rejected": -172.3568115234375,
"loss": 0.6824,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.022907603532075882,
"rewards/margins": 0.08112253248691559,
"rewards/rejected": -0.058214932680130005,
"step": 148
},
{
"epoch": 0.1710498182595681,
"grad_norm": 99.17231684606652,
"learning_rate": 1.995482871547548e-07,
"logits/chosen": -1.2876176834106445,
"logits/rejected": -1.3324649333953857,
"logps/chosen": -127.41693115234375,
"logps/rejected": -158.8002166748047,
"loss": 0.6995,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.21076691150665283,
"rewards/margins": 0.24259139597415924,
"rewards/rejected": -0.031824491918087006,
"step": 150
},
{
"epoch": 0.17333048250302902,
"grad_norm": 91.41364046696481,
"learning_rate": 1.9951145742745024e-07,
"logits/chosen": -1.2649379968643188,
"logits/rejected": -1.4087891578674316,
"logps/chosen": -130.08468627929688,
"logps/rejected": -158.69525146484375,
"loss": 0.7016,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.23989444971084595,
"rewards/margins": 0.030172180384397507,
"rewards/rejected": 0.20972229540348053,
"step": 152
},
{
"epoch": 0.17561114674648992,
"grad_norm": 75.7202165191751,
"learning_rate": 1.9947318819833962e-07,
"logits/chosen": -1.3358944654464722,
"logits/rejected": -1.3563066720962524,
"logps/chosen": -106.91460418701172,
"logps/rejected": -113.2139892578125,
"loss": 0.6607,
"rewards/accuracies": 0.46875,
"rewards/chosen": 0.14014360308647156,
"rewards/margins": -0.031036917120218277,
"rewards/rejected": 0.17118054628372192,
"step": 154
},
{
"epoch": 0.17789181098995083,
"grad_norm": 109.3316094106793,
"learning_rate": 1.994334800210137e-07,
"logits/chosen": -1.3152189254760742,
"logits/rejected": -1.3776334524154663,
"logps/chosen": -160.3906707763672,
"logps/rejected": -232.65725708007812,
"loss": 0.7105,
"rewards/accuracies": 0.4375,
"rewards/chosen": 0.10319292545318604,
"rewards/margins": -0.037254467606544495,
"rewards/rejected": 0.14044740796089172,
"step": 156
},
{
"epoch": 0.18017247523341173,
"grad_norm": 84.0665837850057,
"learning_rate": 1.9939233346987862e-07,
"logits/chosen": -1.4214388132095337,
"logits/rejected": -1.437930703163147,
"logps/chosen": -172.41867065429688,
"logps/rejected": -182.8797607421875,
"loss": 0.6807,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.16843558847904205,
"rewards/margins": 0.16182081401348114,
"rewards/rejected": 0.0066147902980446815,
"step": 158
},
{
"epoch": 0.18245313947687264,
"grad_norm": 93.91811474012694,
"learning_rate": 1.9934974914014762e-07,
"logits/chosen": -1.3304110765457153,
"logits/rejected": -1.386664628982544,
"logps/chosen": -112.3897933959961,
"logps/rejected": -133.53810119628906,
"loss": 0.7031,
"rewards/accuracies": 0.40625,
"rewards/chosen": 0.19350233674049377,
"rewards/margins": -0.08095596730709076,
"rewards/rejected": 0.27445828914642334,
"step": 160
},
{
"epoch": 0.18473380372033354,
"grad_norm": 90.17492690496479,
"learning_rate": 1.9930572764783236e-07,
"logits/chosen": -1.2432975769042969,
"logits/rejected": -1.2546998262405396,
"logps/chosen": -167.08847045898438,
"logps/rejected": -183.3931884765625,
"loss": 0.6979,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.05589159578084946,
"rewards/margins": 0.17001007497310638,
"rewards/rejected": -0.11411847919225693,
"step": 162
},
{
"epoch": 0.18701446796379445,
"grad_norm": 95.44656254255437,
"learning_rate": 1.9926026962973403e-07,
"logits/chosen": -1.326186180114746,
"logits/rejected": -1.4297109842300415,
"logps/chosen": -183.224853515625,
"logps/rejected": -239.97793579101562,
"loss": 0.6436,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.16740243136882782,
"rewards/margins": 0.23012831807136536,
"rewards/rejected": -0.06272587180137634,
"step": 164
},
{
"epoch": 0.18929513220725536,
"grad_norm": 92.54409107962749,
"learning_rate": 1.992133757434342e-07,
"logits/chosen": -1.3230640888214111,
"logits/rejected": -1.31871497631073,
"logps/chosen": -226.2008819580078,
"logps/rejected": -243.1304931640625,
"loss": 0.6795,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.1608559638261795,
"rewards/margins": 0.35546940565109253,
"rewards/rejected": -0.1946134865283966,
"step": 166
},
{
"epoch": 0.19157579645071626,
"grad_norm": 93.78690688865437,
"learning_rate": 1.991650466672853e-07,
"logits/chosen": -1.2681585550308228,
"logits/rejected": -1.27677321434021,
"logps/chosen": -151.71714782714844,
"logps/rejected": -171.88832092285156,
"loss": 0.6772,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.21696476638317108,
"rewards/margins": 0.30003517866134644,
"rewards/rejected": -0.08307043462991714,
"step": 168
},
{
"epoch": 0.19385646069417717,
"grad_norm": 79.03039382866096,
"learning_rate": 1.991152831004007e-07,
"logits/chosen": -1.4594199657440186,
"logits/rejected": -1.456141710281372,
"logps/chosen": -169.4336700439453,
"logps/rejected": -180.23756408691406,
"loss": 0.695,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.091593436896801,
"rewards/margins": 0.008565463125705719,
"rewards/rejected": 0.08302795141935349,
"step": 170
},
{
"epoch": 0.19613712493763807,
"grad_norm": 84.01165418355862,
"learning_rate": 1.9906408576264467e-07,
"logits/chosen": -1.272233247756958,
"logits/rejected": -1.309181809425354,
"logps/chosen": -182.87887573242188,
"logps/rejected": -218.7847442626953,
"loss": 0.6868,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.13641594350337982,
"rewards/margins": 0.20986737310886383,
"rewards/rejected": -0.07345142215490341,
"step": 172
},
{
"epoch": 0.198417789181099,
"grad_norm": 95.73538074987611,
"learning_rate": 1.9901145539462197e-07,
"logits/chosen": -1.2831114530563354,
"logits/rejected": -1.302902340888977,
"logps/chosen": -147.5471954345703,
"logps/rejected": -178.70321655273438,
"loss": 0.6013,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.2684887647628784,
"rewards/margins": 0.47333818674087524,
"rewards/rejected": -0.20484942197799683,
"step": 174
},
{
"epoch": 0.20069845342455991,
"grad_norm": 102.90991917593935,
"learning_rate": 1.9895739275766715e-07,
"logits/chosen": -1.3818506002426147,
"logits/rejected": -1.4092711210250854,
"logps/chosen": -227.61679077148438,
"logps/rejected": -226.88558959960938,
"loss": 0.737,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.13025416433811188,
"rewards/margins": 0.26329731941223145,
"rewards/rejected": -0.13304319977760315,
"step": 176
},
{
"epoch": 0.20297911766802082,
"grad_norm": 105.89850456514735,
"learning_rate": 1.9890189863383353e-07,
"logits/chosen": -1.4667214155197144,
"logits/rejected": -1.5101617574691772,
"logps/chosen": -185.08575439453125,
"logps/rejected": -237.65597534179688,
"loss": 0.7025,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.2359275072813034,
"rewards/margins": 0.08144809305667877,
"rewards/rejected": 0.15447941422462463,
"step": 178
},
{
"epoch": 0.20525978191148173,
"grad_norm": 82.46406340773291,
"learning_rate": 1.9884497382588183e-07,
"logits/chosen": -1.2853760719299316,
"logits/rejected": -1.3631393909454346,
"logps/chosen": -140.35952758789062,
"logps/rejected": -153.50978088378906,
"loss": 0.6683,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.1511276513338089,
"rewards/margins": 0.06823855638504028,
"rewards/rejected": 0.08288908749818802,
"step": 180
},
{
"epoch": 0.20754044615494263,
"grad_norm": 90.32390874613019,
"learning_rate": 1.9878661915726865e-07,
"logits/chosen": -1.4392482042312622,
"logits/rejected": -1.4409726858139038,
"logps/chosen": -171.09303283691406,
"logps/rejected": -182.13232421875,
"loss": 0.6691,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.20515748858451843,
"rewards/margins": 0.10885492712259293,
"rewards/rejected": 0.0963025838136673,
"step": 182
},
{
"epoch": 0.20982111039840354,
"grad_norm": 75.10429123148643,
"learning_rate": 1.9872683547213446e-07,
"logits/chosen": -1.3081107139587402,
"logits/rejected": -1.317389965057373,
"logps/chosen": -174.84463500976562,
"logps/rejected": -210.18130493164062,
"loss": 0.6158,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.18892019987106323,
"rewards/margins": 0.4061851501464844,
"rewards/rejected": -0.21726495027542114,
"step": 184
},
{
"epoch": 0.21210177464186444,
"grad_norm": 107.2216966034497,
"learning_rate": 1.9866562363529144e-07,
"logits/chosen": -1.2954977750778198,
"logits/rejected": -1.3127189874649048,
"logps/chosen": -197.97561645507812,
"logps/rejected": -204.29013061523438,
"loss": 0.7154,
"rewards/accuracies": 0.4375,
"rewards/chosen": 0.17145520448684692,
"rewards/margins": -0.03724336624145508,
"rewards/rejected": 0.208698570728302,
"step": 186
},
{
"epoch": 0.21438243888532535,
"grad_norm": 96.60657963150389,
"learning_rate": 1.9860298453221107e-07,
"logits/chosen": -1.3616856336593628,
"logits/rejected": -1.3934649229049683,
"logps/chosen": -179.2027130126953,
"logps/rejected": -187.0948028564453,
"loss": 0.6701,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.0954272449016571,
"rewards/margins": -0.021092543378472328,
"rewards/rejected": 0.11651977896690369,
"step": 188
},
{
"epoch": 0.21666310312878626,
"grad_norm": 87.8142315385882,
"learning_rate": 1.9853891906901108e-07,
"logits/chosen": -1.3980423212051392,
"logits/rejected": -1.4947185516357422,
"logps/chosen": -142.8896942138672,
"logps/rejected": -180.8528594970703,
"loss": 0.6415,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.13048064708709717,
"rewards/margins": 0.3490259051322937,
"rewards/rejected": -0.21854525804519653,
"step": 190
},
{
"epoch": 0.21894376737224716,
"grad_norm": 91.22189567457308,
"learning_rate": 1.9847342817244254e-07,
"logits/chosen": -1.3708425760269165,
"logits/rejected": -1.3748908042907715,
"logps/chosen": -155.70945739746094,
"logps/rejected": -175.20013427734375,
"loss": 0.6507,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.3499588370323181,
"rewards/margins": 0.12689019739627838,
"rewards/rejected": 0.22306862473487854,
"step": 192
},
{
"epoch": 0.22122443161570807,
"grad_norm": 103.90331869438803,
"learning_rate": 1.9840651278987642e-07,
"logits/chosen": -1.3758065700531006,
"logits/rejected": -1.3824987411499023,
"logps/chosen": -181.644775390625,
"logps/rejected": -178.72109985351562,
"loss": 0.6563,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.29924964904785156,
"rewards/margins": 0.3290708065032959,
"rewards/rejected": -0.02982119843363762,
"step": 194
},
{
"epoch": 0.22350509585916897,
"grad_norm": 103.8705512922574,
"learning_rate": 1.9833817388928984e-07,
"logits/chosen": -1.4562551975250244,
"logits/rejected": -1.511305809020996,
"logps/chosen": -168.09715270996094,
"logps/rejected": -189.35787963867188,
"loss": 0.6926,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.31778958439826965,
"rewards/margins": 0.04199263080954552,
"rewards/rejected": 0.27579694986343384,
"step": 196
},
{
"epoch": 0.22578576010262988,
"grad_norm": 103.40331674623164,
"learning_rate": 1.982684124592521e-07,
"logits/chosen": -1.3754979372024536,
"logits/rejected": -1.347690463066101,
"logps/chosen": -168.61148071289062,
"logps/rejected": -186.8193359375,
"loss": 0.7315,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.14702042937278748,
"rewards/margins": 0.059913113713264465,
"rewards/rejected": 0.08710730820894241,
"step": 198
},
{
"epoch": 0.2280664243460908,
"grad_norm": 102.65405433185128,
"learning_rate": 1.981972295089103e-07,
"logits/chosen": -1.4264771938323975,
"logits/rejected": -1.5102254152297974,
"logps/chosen": -160.4220428466797,
"logps/rejected": -173.62269592285156,
"loss": 0.6553,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.3021972179412842,
"rewards/margins": 0.10177014023065567,
"rewards/rejected": 0.2004270851612091,
"step": 200
},
{
"epoch": 0.2280664243460908,
"eval_logits/chosen": -1.4570631980895996,
"eval_logits/rejected": -1.442795753479004,
"eval_logps/chosen": -127.66089630126953,
"eval_logps/rejected": -129.5493621826172,
"eval_loss": 0.6965904235839844,
"eval_rewards/accuracies": 0.5199999809265137,
"eval_rewards/chosen": 0.22370710968971252,
"eval_rewards/margins": -0.02763114497065544,
"eval_rewards/rejected": 0.2513382136821747,
"eval_runtime": 22.1102,
"eval_samples_per_second": 4.523,
"eval_steps_per_second": 1.131,
"step": 200
},
{
"epoch": 0.23034708858955172,
"grad_norm": 70.85461032661505,
"learning_rate": 1.9812462606797498e-07,
"logits/chosen": -1.1571446657180786,
"logits/rejected": -1.2567036151885986,
"logps/chosen": -170.15008544921875,
"logps/rejected": -181.82806396484375,
"loss": 0.6236,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.13917526602745056,
"rewards/margins": 0.30568423867225647,
"rewards/rejected": -0.1665089875459671,
"step": 202
},
{
"epoch": 0.23262775283301262,
"grad_norm": 101.20833615282186,
"learning_rate": 1.9805060318670485e-07,
"logits/chosen": -1.3708992004394531,
"logits/rejected": -1.3186278343200684,
"logps/chosen": -154.29226684570312,
"logps/rejected": -148.5332794189453,
"loss": 0.6319,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.09488646686077118,
"rewards/margins": 0.11530336737632751,
"rewards/rejected": -0.020416898652911186,
"step": 204
},
{
"epoch": 0.23490841707647353,
"grad_norm": 96.32887019136864,
"learning_rate": 1.9797516193589192e-07,
"logits/chosen": -1.3597060441970825,
"logits/rejected": -1.3876497745513916,
"logps/chosen": -101.66006469726562,
"logps/rejected": -131.19041442871094,
"loss": 0.7312,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.20645485818386078,
"rewards/margins": 0.06082191318273544,
"rewards/rejected": 0.14563296735286713,
"step": 206
},
{
"epoch": 0.23718908131993444,
"grad_norm": 91.31654055574913,
"learning_rate": 1.978983034068459e-07,
"logits/chosen": -1.3642208576202393,
"logits/rejected": -1.405060052871704,
"logps/chosen": -188.59420776367188,
"logps/rejected": -199.42637634277344,
"loss": 0.6922,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.13877716660499573,
"rewards/margins": 0.17159023880958557,
"rewards/rejected": -0.03281306475400925,
"step": 208
},
{
"epoch": 0.23946974556339534,
"grad_norm": 99.31042679769261,
"learning_rate": 1.9782002871137832e-07,
"logits/chosen": -1.3214318752288818,
"logits/rejected": -1.3617138862609863,
"logps/chosen": -195.82009887695312,
"logps/rejected": -214.06614685058594,
"loss": 0.6673,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.16799718141555786,
"rewards/margins": 0.25821763277053833,
"rewards/rejected": -0.09022042155265808,
"step": 210
},
{
"epoch": 0.24175040980685625,
"grad_norm": 103.3617201342953,
"learning_rate": 1.9774033898178666e-07,
"logits/chosen": -1.4174686670303345,
"logits/rejected": -1.420326590538025,
"logps/chosen": -129.22848510742188,
"logps/rejected": -145.9871826171875,
"loss": 0.6716,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.3314517140388489,
"rewards/margins": 0.1635245978832245,
"rewards/rejected": 0.167927086353302,
"step": 212
},
{
"epoch": 0.24403107405031715,
"grad_norm": 85.24728778878888,
"learning_rate": 1.9765923537083774e-07,
"logits/chosen": -1.243302583694458,
"logits/rejected": -1.2585264444351196,
"logps/chosen": -181.84434509277344,
"logps/rejected": -238.97323608398438,
"loss": 0.6159,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.0770760029554367,
"rewards/margins": 0.2519676089286804,
"rewards/rejected": -0.17489157617092133,
"step": 214
},
{
"epoch": 0.24631173829377806,
"grad_norm": 96.37551956943565,
"learning_rate": 1.9757671905175115e-07,
"logits/chosen": -1.3951995372772217,
"logits/rejected": -1.4096182584762573,
"logps/chosen": -119.3514404296875,
"logps/rejected": -131.9920196533203,
"loss": 0.6021,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.2738375961780548,
"rewards/margins": 0.2932060658931732,
"rewards/rejected": -0.01936846226453781,
"step": 216
},
{
"epoch": 0.24859240253723897,
"grad_norm": 90.01951430126202,
"learning_rate": 1.9749279121818234e-07,
"logits/chosen": -1.3084944486618042,
"logits/rejected": -1.3836894035339355,
"logps/chosen": -155.07765197753906,
"logps/rejected": -191.67471313476562,
"loss": 0.6953,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.12750835716724396,
"rewards/margins": 0.31851378083229065,
"rewards/rejected": -0.1910054087638855,
"step": 218
},
{
"epoch": 0.25087306678069987,
"grad_norm": 107.56947817547281,
"learning_rate": 1.9740745308420528e-07,
"logits/chosen": -1.4428541660308838,
"logits/rejected": -1.4522430896759033,
"logps/chosen": -159.34136962890625,
"logps/rejected": -201.03256225585938,
"loss": 0.7085,
"rewards/accuracies": 0.4375,
"rewards/chosen": 0.08158425986766815,
"rewards/margins": 0.062202394008636475,
"rewards/rejected": 0.019381869584321976,
"step": 220
},
{
"epoch": 0.2531537310241608,
"grad_norm": 83.84650346626758,
"learning_rate": 1.9732070588429487e-07,
"logits/chosen": -1.4249211549758911,
"logits/rejected": -1.4040560722351074,
"logps/chosen": -139.1720733642578,
"logps/rejected": -150.09683227539062,
"loss": 0.6528,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.25594449043273926,
"rewards/margins": 0.27077823877334595,
"rewards/rejected": -0.014833783730864525,
"step": 222
},
{
"epoch": 0.2554343952676217,
"grad_norm": 95.84521399355921,
"learning_rate": 1.972325508733091e-07,
"logits/chosen": -1.4068944454193115,
"logits/rejected": -1.4884459972381592,
"logps/chosen": -178.6918182373047,
"logps/rejected": -178.89633178710938,
"loss": 0.6725,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.17113548517227173,
"rewards/margins": 0.35012179613113403,
"rewards/rejected": -0.1789863258600235,
"step": 224
},
{
"epoch": 0.2577150595110826,
"grad_norm": 90.48418130257966,
"learning_rate": 1.9714298932647098e-07,
"logits/chosen": -1.3818295001983643,
"logits/rejected": -1.4686387777328491,
"logps/chosen": -203.4779510498047,
"logps/rejected": -231.25755310058594,
"loss": 0.6145,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.2646425664424896,
"rewards/margins": 0.30655670166015625,
"rewards/rejected": -0.04191412031650543,
"step": 226
},
{
"epoch": 0.2599957237545435,
"grad_norm": 96.02415026722944,
"learning_rate": 1.9705202253935004e-07,
"logits/chosen": -1.459298849105835,
"logits/rejected": -1.4874234199523926,
"logps/chosen": -190.43370056152344,
"logps/rejected": -208.5654296875,
"loss": 0.6259,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.2924637198448181,
"rewards/margins": 0.3736059367656708,
"rewards/rejected": -0.08114223182201385,
"step": 228
},
{
"epoch": 0.2622763879980044,
"grad_norm": 94.75504400212658,
"learning_rate": 1.9695965182784343e-07,
"logits/chosen": -1.37540864944458,
"logits/rejected": -1.4327397346496582,
"logps/chosen": -158.9178924560547,
"logps/rejected": -187.00572204589844,
"loss": 0.6413,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.031393878161907196,
"rewards/margins": 0.2354445904493332,
"rewards/rejected": -0.2040507048368454,
"step": 230
},
{
"epoch": 0.2645570522414653,
"grad_norm": 88.68651480780905,
"learning_rate": 1.968658785281573e-07,
"logits/chosen": -1.4091265201568604,
"logits/rejected": -1.3750542402267456,
"logps/chosen": -159.5924530029297,
"logps/rejected": -157.25701904296875,
"loss": 0.6956,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.05787750333547592,
"rewards/margins": 0.07036175578832626,
"rewards/rejected": -0.012484237551689148,
"step": 232
},
{
"epoch": 0.2668377164849262,
"grad_norm": 87.80878700459785,
"learning_rate": 1.9677070399678693e-07,
"logits/chosen": -1.3906813859939575,
"logits/rejected": -1.4185580015182495,
"logps/chosen": -170.7882843017578,
"logps/rejected": -192.6103515625,
"loss": 0.6498,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.026271583512425423,
"rewards/margins": 0.08472327888011932,
"rewards/rejected": -0.058451712131500244,
"step": 234
},
{
"epoch": 0.2691183807283871,
"grad_norm": 95.5482100990242,
"learning_rate": 1.9667412961049754e-07,
"logits/chosen": -1.4015161991119385,
"logits/rejected": -1.4669052362442017,
"logps/chosen": -180.34584045410156,
"logps/rejected": -205.79164123535156,
"loss": 0.6991,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.12791365385055542,
"rewards/margins": 0.28303825855255127,
"rewards/rejected": -0.15512457489967346,
"step": 236
},
{
"epoch": 0.271399044971848,
"grad_norm": 88.45630335400043,
"learning_rate": 1.9657615676630418e-07,
"logits/chosen": -1.2741804122924805,
"logits/rejected": -1.2581013441085815,
"logps/chosen": -184.23312377929688,
"logps/rejected": -201.00103759765625,
"loss": 0.6514,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.036131352186203,
"rewards/margins": 0.508301317691803,
"rewards/rejected": -0.4721699059009552,
"step": 238
},
{
"epoch": 0.273679709215309,
"grad_norm": 99.3871661426225,
"learning_rate": 1.9647678688145159e-07,
"logits/chosen": -1.327433466911316,
"logits/rejected": -1.4025075435638428,
"logps/chosen": -167.82005310058594,
"logps/rejected": -194.77706909179688,
"loss": 0.6544,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.01046083215624094,
"rewards/margins": 0.2960438132286072,
"rewards/rejected": -0.30650466680526733,
"step": 240
},
{
"epoch": 0.2759603734587699,
"grad_norm": 129.18874275194366,
"learning_rate": 1.9637602139339355e-07,
"logits/chosen": -1.1904761791229248,
"logits/rejected": -1.2964377403259277,
"logps/chosen": -198.83749389648438,
"logps/rejected": -213.1676025390625,
"loss": 0.7065,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.018888656049966812,
"rewards/margins": 0.0817704051733017,
"rewards/rejected": -0.06288175284862518,
"step": 242
},
{
"epoch": 0.2782410377022308,
"grad_norm": 78.46019369342993,
"learning_rate": 1.9627386175977238e-07,
"logits/chosen": -1.363703966140747,
"logits/rejected": -1.385565161705017,
"logps/chosen": -158.67543029785156,
"logps/rejected": -171.18373107910156,
"loss": 0.6328,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.0427667461335659,
"rewards/margins": 0.24535968899726868,
"rewards/rejected": -0.20259293913841248,
"step": 244
},
{
"epoch": 0.2805217019456917,
"grad_norm": 93.42637414011723,
"learning_rate": 1.9617030945839747e-07,
"logits/chosen": -1.4135775566101074,
"logits/rejected": -1.482433557510376,
"logps/chosen": -201.14869689941406,
"logps/rejected": -240.91909790039062,
"loss": 0.6572,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.14201030135154724,
"rewards/margins": 0.3642757833003998,
"rewards/rejected": -0.22226551175117493,
"step": 246
},
{
"epoch": 0.2828023661891526,
"grad_norm": 94.8034192613356,
"learning_rate": 1.9606536598722431e-07,
"logits/chosen": -1.3897498846054077,
"logits/rejected": -1.4480516910552979,
"logps/chosen": -211.97976684570312,
"logps/rejected": -213.59344482421875,
"loss": 0.6947,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.1269560605287552,
"rewards/margins": 0.18549999594688416,
"rewards/rejected": -0.05854398384690285,
"step": 248
},
{
"epoch": 0.2850830304326135,
"grad_norm": 107.48956854367718,
"learning_rate": 1.9595903286433253e-07,
"logits/chosen": -1.3192163705825806,
"logits/rejected": -1.3453290462493896,
"logps/chosen": -166.43765258789062,
"logps/rejected": -185.8678436279297,
"loss": 0.6446,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.22617535293102264,
"rewards/margins": 0.19618834555149078,
"rewards/rejected": -0.4223636984825134,
"step": 250
},
{
"epoch": 0.2873636946760744,
"grad_norm": 97.18344743194635,
"learning_rate": 1.9585131162790395e-07,
"logits/chosen": -1.2871501445770264,
"logits/rejected": -1.3303256034851074,
"logps/chosen": -169.388671875,
"logps/rejected": -222.89968872070312,
"loss": 0.6597,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.016080807894468307,
"rewards/margins": 0.22609557211399078,
"rewards/rejected": -0.21001477539539337,
"step": 252
},
{
"epoch": 0.2896443589195353,
"grad_norm": 102.3447949736702,
"learning_rate": 1.9574220383620054e-07,
"logits/chosen": -1.332809329032898,
"logits/rejected": -1.3781144618988037,
"logps/chosen": -192.14796447753906,
"logps/rejected": -229.9279327392578,
"loss": 0.6849,
"rewards/accuracies": 0.4375,
"rewards/chosen": -0.12542986869812012,
"rewards/margins": 0.2946632206439972,
"rewards/rejected": -0.4200930893421173,
"step": 254
},
{
"epoch": 0.29192502316299623,
"grad_norm": 95.89945385250834,
"learning_rate": 1.956317110675417e-07,
"logits/chosen": -1.3515238761901855,
"logits/rejected": -1.3907783031463623,
"logps/chosen": -135.85316467285156,
"logps/rejected": -168.83544921875,
"loss": 0.6599,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.2081318199634552,
"rewards/margins": 0.3642672896385193,
"rewards/rejected": -0.15613549947738647,
"step": 256
},
{
"epoch": 0.29420568740645714,
"grad_norm": 104.38674521939737,
"learning_rate": 1.9551983492028144e-07,
"logits/chosen": -1.3251550197601318,
"logits/rejected": -1.418371319770813,
"logps/chosen": -172.90769958496094,
"logps/rejected": -200.890380859375,
"loss": 0.6987,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.051881127059459686,
"rewards/margins": 0.18907570838928223,
"rewards/rejected": -0.24095678329467773,
"step": 258
},
{
"epoch": 0.29648635164991805,
"grad_norm": 110.83873967012946,
"learning_rate": 1.9540657701278533e-07,
"logits/chosen": -1.4099152088165283,
"logits/rejected": -1.4348920583724976,
"logps/chosen": -196.08592224121094,
"logps/rejected": -237.6608428955078,
"loss": 0.6704,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.22441606223583221,
"rewards/margins": 0.2391555905342102,
"rewards/rejected": -0.4635716676712036,
"step": 260
},
{
"epoch": 0.29876701589337895,
"grad_norm": 103.49844878423605,
"learning_rate": 1.952919389834071e-07,
"logits/chosen": -1.2992289066314697,
"logits/rejected": -1.3971199989318848,
"logps/chosen": -173.28128051757812,
"logps/rejected": -213.29893493652344,
"loss": 0.6847,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.19963645935058594,
"rewards/margins": 0.11320970952510834,
"rewards/rejected": 0.0864267572760582,
"step": 262
},
{
"epoch": 0.30104768013683986,
"grad_norm": 99.13994519459824,
"learning_rate": 1.9517592249046475e-07,
"logits/chosen": -1.2953108549118042,
"logits/rejected": -1.3212617635726929,
"logps/chosen": -133.38534545898438,
"logps/rejected": -153.64303588867188,
"loss": 0.6807,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.2558751106262207,
"rewards/margins": 0.31515762209892273,
"rewards/rejected": -0.059282511472702026,
"step": 264
},
{
"epoch": 0.30332834438030076,
"grad_norm": 105.16807067574956,
"learning_rate": 1.950585292122169e-07,
"logits/chosen": -1.380258560180664,
"logits/rejected": -1.381796956062317,
"logps/chosen": -265.73822021484375,
"logps/rejected": -279.2653503417969,
"loss": 0.6065,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.020402366295456886,
"rewards/margins": 0.3811405599117279,
"rewards/rejected": -0.36073821783065796,
"step": 266
},
{
"epoch": 0.30560900862376167,
"grad_norm": 83.84465669976083,
"learning_rate": 1.9493976084683813e-07,
"logits/chosen": -1.3013784885406494,
"logits/rejected": -1.3473682403564453,
"logps/chosen": -143.68994140625,
"logps/rejected": -161.75987243652344,
"loss": 0.6565,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.09173958003520966,
"rewards/margins": 0.3339024484157562,
"rewards/rejected": -0.4256420433521271,
"step": 268
},
{
"epoch": 0.3078896728672226,
"grad_norm": 97.4032150220662,
"learning_rate": 1.9481961911239475e-07,
"logits/chosen": -1.3531267642974854,
"logits/rejected": -1.3918657302856445,
"logps/chosen": -158.61968994140625,
"logps/rejected": -196.80203247070312,
"loss": 0.6872,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.018977202475070953,
"rewards/margins": 0.31147533655166626,
"rewards/rejected": -0.330452561378479,
"step": 270
},
{
"epoch": 0.3101703371106835,
"grad_norm": 102.05098535340969,
"learning_rate": 1.9469810574681968e-07,
"logits/chosen": -1.2518314123153687,
"logits/rejected": -1.373193383216858,
"logps/chosen": -208.13992309570312,
"logps/rejected": -265.2911376953125,
"loss": 0.6599,
"rewards/accuracies": 0.78125,
"rewards/chosen": -0.10957180708646774,
"rewards/margins": 0.4111281931400299,
"rewards/rejected": -0.5207000374794006,
"step": 272
},
{
"epoch": 0.3124510013541444,
"grad_norm": 101.2119365968094,
"learning_rate": 1.9457522250788756e-07,
"logits/chosen": -1.4733508825302124,
"logits/rejected": -1.4335081577301025,
"logps/chosen": -152.27394104003906,
"logps/rejected": -149.08934020996094,
"loss": 0.6512,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.0012259185314178467,
"rewards/margins": 0.06475882232189178,
"rewards/rejected": -0.06353290379047394,
"step": 274
},
{
"epoch": 0.3147316655976053,
"grad_norm": 102.70187300991479,
"learning_rate": 1.9445097117318912e-07,
"logits/chosen": -1.2339739799499512,
"logits/rejected": -1.2878998517990112,
"logps/chosen": -154.39866638183594,
"logps/rejected": -169.81304931640625,
"loss": 0.6069,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.030073221772909164,
"rewards/margins": 0.06755417585372925,
"rewards/rejected": -0.09762738645076752,
"step": 276
},
{
"epoch": 0.3170123298410662,
"grad_norm": 92.89132088481786,
"learning_rate": 1.9432535354010545e-07,
"logits/chosen": -1.381090521812439,
"logits/rejected": -1.408646583557129,
"logps/chosen": -193.48159790039062,
"logps/rejected": -218.6549530029297,
"loss": 0.721,
"rewards/accuracies": 0.53125,
"rewards/chosen": -0.018713481724262238,
"rewards/margins": 0.14033427834510803,
"rewards/rejected": -0.15904778242111206,
"step": 278
},
{
"epoch": 0.3192929940845271,
"grad_norm": 92.57611013262112,
"learning_rate": 1.9419837142578226e-07,
"logits/chosen": -1.3027143478393555,
"logits/rejected": -1.2925523519515991,
"logps/chosen": -159.863525390625,
"logps/rejected": -182.41249084472656,
"loss": 0.6453,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.16124297678470612,
"rewards/margins": 0.37421250343322754,
"rewards/rejected": -0.21296954154968262,
"step": 280
},
{
"epoch": 0.321573658327988,
"grad_norm": 88.34893118769534,
"learning_rate": 1.9407002666710333e-07,
"logits/chosen": -1.2514938116073608,
"logits/rejected": -1.2840971946716309,
"logps/chosen": -167.23568725585938,
"logps/rejected": -185.19515991210938,
"loss": 0.6776,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.0749707818031311,
"rewards/margins": 0.2606170177459717,
"rewards/rejected": -0.18564626574516296,
"step": 282
},
{
"epoch": 0.3238543225714489,
"grad_norm": 87.77224347782207,
"learning_rate": 1.93940321120664e-07,
"logits/chosen": -1.234694242477417,
"logits/rejected": -1.245900273323059,
"logps/chosen": -181.2189178466797,
"logps/rejected": -201.54295349121094,
"loss": 0.656,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.03384766727685928,
"rewards/margins": 0.15053534507751465,
"rewards/rejected": -0.18438303470611572,
"step": 284
},
{
"epoch": 0.3261349868149098,
"grad_norm": 102.48592883969225,
"learning_rate": 1.9380925666274443e-07,
"logits/chosen": -1.29789400100708,
"logits/rejected": -1.3402178287506104,
"logps/chosen": -200.9523468017578,
"logps/rejected": -274.8287658691406,
"loss": 0.6817,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.21383914351463318,
"rewards/margins": 0.0331435352563858,
"rewards/rejected": -0.24698270857334137,
"step": 286
},
{
"epoch": 0.32841565105837073,
"grad_norm": 98.03735612419584,
"learning_rate": 1.9367683518928225e-07,
"logits/chosen": -1.3940753936767578,
"logits/rejected": -1.464486837387085,
"logps/chosen": -124.9957504272461,
"logps/rejected": -153.5186767578125,
"loss": 0.6812,
"rewards/accuracies": 0.53125,
"rewards/chosen": -0.005818367004394531,
"rewards/margins": 0.20061178505420685,
"rewards/rejected": -0.20643013715744019,
"step": 288
},
{
"epoch": 0.33069631530183163,
"grad_norm": 103.78616077769101,
"learning_rate": 1.935430586158454e-07,
"logits/chosen": -1.1263610124588013,
"logits/rejected": -1.1101529598236084,
"logps/chosen": -117.66938018798828,
"logps/rejected": -164.22933959960938,
"loss": 0.6884,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.0009657144546508789,
"rewards/margins": 0.10708783566951752,
"rewards/rejected": -0.1080535426735878,
"step": 290
},
{
"epoch": 0.33297697954529254,
"grad_norm": 110.5063902175955,
"learning_rate": 1.9340792887760412e-07,
"logits/chosen": -1.324884295463562,
"logits/rejected": -1.3702059984207153,
"logps/chosen": -194.0547332763672,
"logps/rejected": -223.9933624267578,
"loss": 0.6334,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.0599634051322937,
"rewards/margins": 0.17343786358833313,
"rewards/rejected": -0.23340126872062683,
"step": 292
},
{
"epoch": 0.3352576437887535,
"grad_norm": 85.87924361401205,
"learning_rate": 1.932714479293033e-07,
"logits/chosen": -1.2896180152893066,
"logits/rejected": -1.3805060386657715,
"logps/chosen": -182.65460205078125,
"logps/rejected": -212.5992889404297,
"loss": 0.6151,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.10547913610935211,
"rewards/margins": 0.20131757855415344,
"rewards/rejected": -0.30679669976234436,
"step": 294
},
{
"epoch": 0.3375383080322144,
"grad_norm": 108.47177643694172,
"learning_rate": 1.9313361774523386e-07,
"logits/chosen": -1.4138604402542114,
"logits/rejected": -1.402896523475647,
"logps/chosen": -147.80795288085938,
"logps/rejected": -165.24566650390625,
"loss": 0.7377,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.21476852893829346,
"rewards/margins": 0.08000420778989792,
"rewards/rejected": -0.2947727143764496,
"step": 296
},
{
"epoch": 0.3398189722756753,
"grad_norm": 111.8896748854085,
"learning_rate": 1.9299444031920436e-07,
"logits/chosen": -1.2892051935195923,
"logits/rejected": -1.380386233329773,
"logps/chosen": -190.3002471923828,
"logps/rejected": -246.29263305664062,
"loss": 0.6347,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.08067283034324646,
"rewards/margins": 0.4462183713912964,
"rewards/rejected": -0.5268911719322205,
"step": 298
},
{
"epoch": 0.3420996365191362,
"grad_norm": 96.80869969762502,
"learning_rate": 1.9285391766451217e-07,
"logits/chosen": -1.3364348411560059,
"logits/rejected": -1.3174083232879639,
"logps/chosen": -161.74612426757812,
"logps/rejected": -179.8140106201172,
"loss": 0.669,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.1118321567773819,
"rewards/margins": 0.23199215531349182,
"rewards/rejected": -0.34382426738739014,
"step": 300
},
{
"epoch": 0.3420996365191362,
"eval_logits/chosen": -1.4281266927719116,
"eval_logits/rejected": -1.4122414588928223,
"eval_logps/chosen": -128.86663818359375,
"eval_logps/rejected": -131.4814910888672,
"eval_loss": 0.6797733306884766,
"eval_rewards/accuracies": 0.5199999809265137,
"eval_rewards/chosen": 0.10313291847705841,
"eval_rewards/margins": 0.045006848871707916,
"eval_rewards/rejected": 0.05812607705593109,
"eval_runtime": 21.8566,
"eval_samples_per_second": 4.575,
"eval_steps_per_second": 1.144,
"step": 300
},
{
"epoch": 0.3443803007625971,
"grad_norm": 84.25578372291146,
"learning_rate": 1.927120518139144e-07,
"logits/chosen": -1.3597999811172485,
"logits/rejected": -1.4241136312484741,
"logps/chosen": -180.1940460205078,
"logps/rejected": -199.74412536621094,
"loss": 0.6421,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.04787690192461014,
"rewards/margins": 0.13789615035057068,
"rewards/rejected": -0.18577302992343903,
"step": 302
},
{
"epoch": 0.34666096500605803,
"grad_norm": 89.24528063714898,
"learning_rate": 1.925688448195983e-07,
"logits/chosen": -1.1900302171707153,
"logits/rejected": -1.3024637699127197,
"logps/chosen": -130.7202606201172,
"logps/rejected": -170.3651123046875,
"loss": 0.6326,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.08976805210113525,
"rewards/margins": 0.30762431025505066,
"rewards/rejected": -0.2178562879562378,
"step": 304
},
{
"epoch": 0.34894162924951894,
"grad_norm": 86.12361588594867,
"learning_rate": 1.924242987531517e-07,
"logits/chosen": -1.3129158020019531,
"logits/rejected": -1.2686495780944824,
"logps/chosen": -170.01808166503906,
"logps/rejected": -172.82835388183594,
"loss": 0.6621,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.010694226250052452,
"rewards/margins": 0.25143933296203613,
"rewards/rejected": -0.26213353872299194,
"step": 306
},
{
"epoch": 0.35122229349297984,
"grad_norm": 80.23753458776699,
"learning_rate": 1.922784157055331e-07,
"logits/chosen": -1.3715269565582275,
"logits/rejected": -1.3273383378982544,
"logps/chosen": -190.35610961914062,
"logps/rejected": -180.68020629882812,
"loss": 0.6522,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.09160695225000381,
"rewards/margins": 0.13991902768611908,
"rewards/rejected": -0.2315259724855423,
"step": 308
},
{
"epoch": 0.35350295773644075,
"grad_norm": 98.71267017165292,
"learning_rate": 1.9213119778704127e-07,
"logits/chosen": -1.3962262868881226,
"logits/rejected": -1.4428459405899048,
"logps/chosen": -222.09481811523438,
"logps/rejected": -248.44686889648438,
"loss": 0.6602,
"rewards/accuracies": 0.78125,
"rewards/chosen": -0.001766495406627655,
"rewards/margins": 0.569767415523529,
"rewards/rejected": -0.5715339183807373,
"step": 310
},
{
"epoch": 0.35578362197990165,
"grad_norm": 88.65548046490639,
"learning_rate": 1.919826471272849e-07,
"logits/chosen": -1.3152942657470703,
"logits/rejected": -1.365240216255188,
"logps/chosen": -128.6586151123047,
"logps/rejected": -141.93685913085938,
"loss": 0.6252,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.05175452306866646,
"rewards/margins": 0.1226281225681305,
"rewards/rejected": -0.17438265681266785,
"step": 312
},
{
"epoch": 0.35806428622336256,
"grad_norm": 84.35369248301531,
"learning_rate": 1.9183276587515165e-07,
"logits/chosen": -1.2944692373275757,
"logits/rejected": -1.3323063850402832,
"logps/chosen": -165.85975646972656,
"logps/rejected": -191.24041748046875,
"loss": 0.6487,
"rewards/accuracies": 0.53125,
"rewards/chosen": -0.07458823919296265,
"rewards/margins": 0.3784411549568176,
"rewards/rejected": -0.4530293941497803,
"step": 314
},
{
"epoch": 0.36034495046682347,
"grad_norm": 86.49341842550122,
"learning_rate": 1.9168155619877707e-07,
"logits/chosen": -1.2946043014526367,
"logits/rejected": -1.4176356792449951,
"logps/chosen": -130.90985107421875,
"logps/rejected": -162.9954071044922,
"loss": 0.6467,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.011317873373627663,
"rewards/margins": 0.4069710373878479,
"rewards/rejected": -0.39565321803092957,
"step": 316
},
{
"epoch": 0.36262561471028437,
"grad_norm": 102.96541053849566,
"learning_rate": 1.9152902028551335e-07,
"logits/chosen": -1.3063163757324219,
"logits/rejected": -1.3172858953475952,
"logps/chosen": -157.0022735595703,
"logps/rejected": -165.20347595214844,
"loss": 0.6669,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.07298893481492996,
"rewards/margins": 0.17434003949165344,
"rewards/rejected": -0.2473289668560028,
"step": 318
},
{
"epoch": 0.3649062789537453,
"grad_norm": 76.39528252287091,
"learning_rate": 1.9137516034189765e-07,
"logits/chosen": -1.3794448375701904,
"logits/rejected": -1.4233444929122925,
"logps/chosen": -164.51431274414062,
"logps/rejected": -191.4024658203125,
"loss": 0.629,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.07598446309566498,
"rewards/margins": 0.355991005897522,
"rewards/rejected": -0.2800065577030182,
"step": 320
},
{
"epoch": 0.3671869431972062,
"grad_norm": 90.58917607125439,
"learning_rate": 1.9121997859361997e-07,
"logits/chosen": -1.3726584911346436,
"logits/rejected": -1.4218438863754272,
"logps/chosen": -161.8759765625,
"logps/rejected": -191.62745666503906,
"loss": 0.6777,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.03798535838723183,
"rewards/margins": 0.2353454828262329,
"rewards/rejected": -0.27333083748817444,
"step": 322
},
{
"epoch": 0.3694676074406671,
"grad_norm": 88.70829690205912,
"learning_rate": 1.9106347728549132e-07,
"logits/chosen": -1.4393202066421509,
"logits/rejected": -1.4652281999588013,
"logps/chosen": -159.0114288330078,
"logps/rejected": -169.2324981689453,
"loss": 0.6457,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.03970104455947876,
"rewards/margins": 0.19649550318717957,
"rewards/rejected": -0.23619654774665833,
"step": 324
},
{
"epoch": 0.371748271684128,
"grad_norm": 97.10274345850128,
"learning_rate": 1.9090565868141093e-07,
"logits/chosen": -1.415880560874939,
"logits/rejected": -1.4168412685394287,
"logps/chosen": -200.85858154296875,
"logps/rejected": -214.74472045898438,
"loss": 0.6936,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.0973915159702301,
"rewards/margins": -0.0053062885999679565,
"rewards/rejected": -0.09208524972200394,
"step": 326
},
{
"epoch": 0.3740289359275889,
"grad_norm": 79.53686855879208,
"learning_rate": 1.9074652506433367e-07,
"logits/chosen": -1.2665050029754639,
"logits/rejected": -1.2990330457687378,
"logps/chosen": -188.47377014160156,
"logps/rejected": -196.28854370117188,
"loss": 0.6894,
"rewards/accuracies": 0.53125,
"rewards/chosen": -0.2655767500400543,
"rewards/margins": 0.0786442756652832,
"rewards/rejected": -0.3442210257053375,
"step": 328
},
{
"epoch": 0.3763096001710498,
"grad_norm": 92.25716890573669,
"learning_rate": 1.9058607873623696e-07,
"logits/chosen": -1.2212181091308594,
"logits/rejected": -1.2750239372253418,
"logps/chosen": -136.28663635253906,
"logps/rejected": -182.32699584960938,
"loss": 0.6309,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.14970597624778748,
"rewards/margins": 0.13012784719467163,
"rewards/rejected": -0.2798338234424591,
"step": 330
},
{
"epoch": 0.3785902644145107,
"grad_norm": 96.38565738306772,
"learning_rate": 1.9042432201808753e-07,
"logits/chosen": -1.3811233043670654,
"logits/rejected": -1.3999402523040771,
"logps/chosen": -153.0084686279297,
"logps/rejected": -174.6513671875,
"loss": 0.6334,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.06558946520090103,
"rewards/margins": 0.20774412155151367,
"rewards/rejected": -0.2733335494995117,
"step": 332
},
{
"epoch": 0.3808709286579716,
"grad_norm": 101.76003883417016,
"learning_rate": 1.9026125724980777e-07,
"logits/chosen": -1.3316706418991089,
"logits/rejected": -1.2979425191879272,
"logps/chosen": -172.97792053222656,
"logps/rejected": -187.61904907226562,
"loss": 0.635,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.022555537521839142,
"rewards/margins": 0.31808096170425415,
"rewards/rejected": -0.2955254316329956,
"step": 334
},
{
"epoch": 0.3831515929014325,
"grad_norm": 89.57559954819274,
"learning_rate": 1.900968867902419e-07,
"logits/chosen": -1.392217993736267,
"logits/rejected": -1.4426984786987305,
"logps/chosen": -171.40127563476562,
"logps/rejected": -171.21238708496094,
"loss": 0.6236,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.11336915194988251,
"rewards/margins": 0.3153633177280426,
"rewards/rejected": -0.2019941657781601,
"step": 336
},
{
"epoch": 0.38543225714489343,
"grad_norm": 95.74770834081339,
"learning_rate": 1.899312130171219e-07,
"logits/chosen": -1.410531759262085,
"logits/rejected": -1.4363325834274292,
"logps/chosen": -176.63296508789062,
"logps/rejected": -194.51480102539062,
"loss": 0.6307,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.0749322921037674,
"rewards/margins": 0.2967851161956787,
"rewards/rejected": -0.3717173933982849,
"step": 338
},
{
"epoch": 0.38771292138835434,
"grad_norm": 94.33644084218925,
"learning_rate": 1.897642383270331e-07,
"logits/chosen": -1.4411017894744873,
"logits/rejected": -1.5141332149505615,
"logps/chosen": -220.8849639892578,
"logps/rejected": -249.29640197753906,
"loss": 0.6396,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.1280645728111267,
"rewards/margins": 0.4015355110168457,
"rewards/rejected": -0.5296000838279724,
"step": 340
},
{
"epoch": 0.38999358563181524,
"grad_norm": 93.41769187789713,
"learning_rate": 1.8959596513537937e-07,
"logits/chosen": -1.4379459619522095,
"logits/rejected": -1.463719367980957,
"logps/chosen": -136.88717651367188,
"logps/rejected": -146.65672302246094,
"loss": 0.6624,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.024333830922842026,
"rewards/margins": 0.15197159349918365,
"rewards/rejected": -0.12763777375221252,
"step": 342
},
{
"epoch": 0.39227424987527615,
"grad_norm": 110.48154958430904,
"learning_rate": 1.894263958763485e-07,
"logits/chosen": -1.477798581123352,
"logits/rejected": -1.4197784662246704,
"logps/chosen": -192.24853515625,
"logps/rejected": -182.48162841796875,
"loss": 0.6866,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.029505489394068718,
"rewards/margins": 0.263489693403244,
"rewards/rejected": -0.23398420214653015,
"step": 344
},
{
"epoch": 0.39455491411873705,
"grad_norm": 120.46352804291743,
"learning_rate": 1.892555330028766e-07,
"logits/chosen": -1.2521153688430786,
"logits/rejected": -1.2826614379882812,
"logps/chosen": -217.5478515625,
"logps/rejected": -273.1899719238281,
"loss": 0.6418,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.2787259817123413,
"rewards/margins": 0.3908158242702484,
"rewards/rejected": -0.6695418357849121,
"step": 346
},
{
"epoch": 0.396835578362198,
"grad_norm": 94.24036369589392,
"learning_rate": 1.8908337898661285e-07,
"logits/chosen": -1.3746845722198486,
"logits/rejected": -1.46570885181427,
"logps/chosen": -179.17138671875,
"logps/rejected": -217.95236206054688,
"loss": 0.6201,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.12253744155168533,
"rewards/margins": 0.39310359954833984,
"rewards/rejected": -0.5156410336494446,
"step": 348
},
{
"epoch": 0.3991162426056589,
"grad_norm": 88.9093668290791,
"learning_rate": 1.889099363178838e-07,
"logits/chosen": -1.445816993713379,
"logits/rejected": -1.4381108283996582,
"logps/chosen": -132.04539489746094,
"logps/rejected": -139.59835815429688,
"loss": 0.6502,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.10454928129911423,
"rewards/margins": 0.15687233209609985,
"rewards/rejected": -0.2614216208457947,
"step": 350
},
{
"epoch": 0.40139690684911983,
"grad_norm": 118.96237042903994,
"learning_rate": 1.8873520750565714e-07,
"logits/chosen": -1.3293050527572632,
"logits/rejected": -1.3804844617843628,
"logps/chosen": -172.6136474609375,
"logps/rejected": -184.08212280273438,
"loss": 0.6857,
"rewards/accuracies": 0.53125,
"rewards/chosen": -0.09095679968595505,
"rewards/margins": 0.1739913374185562,
"rewards/rejected": -0.26494812965393066,
"step": 352
},
{
"epoch": 0.40367757109258073,
"grad_norm": 123.21030025798191,
"learning_rate": 1.8855919507750556e-07,
"logits/chosen": -1.299286127090454,
"logits/rejected": -1.2867302894592285,
"logps/chosen": -228.6288604736328,
"logps/rejected": -253.30572509765625,
"loss": 0.6819,
"rewards/accuracies": 0.53125,
"rewards/chosen": -0.27850618958473206,
"rewards/margins": 0.12168295681476593,
"rewards/rejected": -0.40018919110298157,
"step": 354
},
{
"epoch": 0.40595823533604164,
"grad_norm": 93.75103343413197,
"learning_rate": 1.883819015795701e-07,
"logits/chosen": -1.3721405267715454,
"logits/rejected": -1.4811532497406006,
"logps/chosen": -158.45986938476562,
"logps/rejected": -191.99571228027344,
"loss": 0.6742,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.06369534879922867,
"rewards/margins": 0.13870595395565033,
"rewards/rejected": -0.07501061260700226,
"step": 356
},
{
"epoch": 0.40823889957950255,
"grad_norm": 102.06521296332116,
"learning_rate": 1.8820332957652343e-07,
"logits/chosen": -1.3636136054992676,
"logits/rejected": -1.4187768697738647,
"logps/chosen": -145.48468017578125,
"logps/rejected": -148.94664001464844,
"loss": 0.6414,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.0663696825504303,
"rewards/margins": 0.29655539989471436,
"rewards/rejected": -0.23018570244312286,
"step": 358
},
{
"epoch": 0.41051956382296345,
"grad_norm": 80.21120922967526,
"learning_rate": 1.8802348165153257e-07,
"logits/chosen": -1.0907145738601685,
"logits/rejected": -1.2451905012130737,
"logps/chosen": -140.98895263671875,
"logps/rejected": -169.56390380859375,
"loss": 0.6267,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.1324671506881714,
"rewards/margins": 0.2304615080356598,
"rewards/rejected": -0.0979943573474884,
"step": 360
},
{
"epoch": 0.41280022806642436,
"grad_norm": 90.74971301077287,
"learning_rate": 1.8784236040622173e-07,
"logits/chosen": -1.4046244621276855,
"logits/rejected": -1.5294733047485352,
"logps/chosen": -220.828125,
"logps/rejected": -252.51620483398438,
"loss": 0.6485,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.029995199292898178,
"rewards/margins": 0.10216841101646423,
"rewards/rejected": -0.1321636140346527,
"step": 362
},
{
"epoch": 0.41508089230988526,
"grad_norm": 116.10619062475209,
"learning_rate": 1.8765996846063453e-07,
"logits/chosen": -1.3053789138793945,
"logits/rejected": -1.331100344657898,
"logps/chosen": -145.552978515625,
"logps/rejected": -175.96580505371094,
"loss": 0.7355,
"rewards/accuracies": 0.46875,
"rewards/chosen": -0.08064378052949905,
"rewards/margins": 0.18256865441799164,
"rewards/rejected": -0.2632124125957489,
"step": 364
},
{
"epoch": 0.41736155655334617,
"grad_norm": 85.92321632812764,
"learning_rate": 1.8747630845319612e-07,
"logits/chosen": -1.3430732488632202,
"logits/rejected": -1.321002721786499,
"logps/chosen": -149.8716278076172,
"logps/rejected": -168.82972717285156,
"loss": 0.6414,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.08440528810024261,
"rewards/margins": 0.3168274164199829,
"rewards/rejected": -0.23242215812206268,
"step": 366
},
{
"epoch": 0.4196422207968071,
"grad_norm": 89.28728181459165,
"learning_rate": 1.8729138304067512e-07,
"logits/chosen": -1.464853286743164,
"logits/rejected": -1.478898286819458,
"logps/chosen": -157.5703887939453,
"logps/rejected": -171.20423889160156,
"loss": 0.6186,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.0049234069883823395,
"rewards/margins": 0.2588205635547638,
"rewards/rejected": -0.25389716029167175,
"step": 368
},
{
"epoch": 0.421922885040268,
"grad_norm": 86.83502819065235,
"learning_rate": 1.8710519489814503e-07,
"logits/chosen": -1.2088496685028076,
"logits/rejected": -1.238593578338623,
"logps/chosen": -156.05690002441406,
"logps/rejected": -176.14474487304688,
"loss": 0.7105,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.01236521452665329,
"rewards/margins": 0.17280086874961853,
"rewards/rejected": -0.18516604602336884,
"step": 370
},
{
"epoch": 0.4242035492837289,
"grad_norm": 101.40852416620743,
"learning_rate": 1.869177467189456e-07,
"logits/chosen": -1.4426904916763306,
"logits/rejected": -1.409334659576416,
"logps/chosen": -182.2246551513672,
"logps/rejected": -194.63980102539062,
"loss": 0.6937,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.13504472374916077,
"rewards/margins": 0.12622104585170746,
"rewards/rejected": -0.2612657845020294,
"step": 372
},
{
"epoch": 0.4264842135271898,
"grad_norm": 78.92812775165392,
"learning_rate": 1.8672904121464402e-07,
"logits/chosen": -1.3653035163879395,
"logits/rejected": -1.3275575637817383,
"logps/chosen": -180.02243041992188,
"logps/rejected": -195.43106079101562,
"loss": 0.6208,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.13813042640686035,
"rewards/margins": 0.25349733233451843,
"rewards/rejected": -0.3916277587413788,
"step": 374
},
{
"epoch": 0.4287648777706507,
"grad_norm": 85.95171696022348,
"learning_rate": 1.8653908111499531e-07,
"logits/chosen": -1.4379856586456299,
"logits/rejected": -1.5062627792358398,
"logps/chosen": -123.3829345703125,
"logps/rejected": -155.55897521972656,
"loss": 0.6752,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.02849392406642437,
"rewards/margins": 0.1816612333059311,
"rewards/rejected": -0.2101551592350006,
"step": 376
},
{
"epoch": 0.4310455420141116,
"grad_norm": 74.55363107506629,
"learning_rate": 1.8634786916790331e-07,
"logits/chosen": -1.306863784790039,
"logits/rejected": -1.4182804822921753,
"logps/chosen": -185.36831665039062,
"logps/rejected": -205.2194366455078,
"loss": 0.602,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.23371680080890656,
"rewards/margins": 0.4712272882461548,
"rewards/rejected": -0.23751050233840942,
"step": 378
},
{
"epoch": 0.4333262062575725,
"grad_norm": 98.09376209675088,
"learning_rate": 1.861554081393806e-07,
"logits/chosen": -1.298234462738037,
"logits/rejected": -1.320231556892395,
"logps/chosen": -170.1300506591797,
"logps/rejected": -201.2412567138672,
"loss": 0.6166,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.11016674339771271,
"rewards/margins": 0.3684804141521454,
"rewards/rejected": -0.25831368565559387,
"step": 380
},
{
"epoch": 0.4356068705010334,
"grad_norm": 95.75031333883096,
"learning_rate": 1.8596170081350855e-07,
"logits/chosen": -1.4295341968536377,
"logits/rejected": -1.4480311870574951,
"logps/chosen": -158.36959838867188,
"logps/rejected": -185.86700439453125,
"loss": 0.6506,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.11881276965141296,
"rewards/margins": 0.2408534288406372,
"rewards/rejected": -0.12204068899154663,
"step": 382
},
{
"epoch": 0.4378875347444943,
"grad_norm": 89.97741935605185,
"learning_rate": 1.8576674999239713e-07,
"logits/chosen": -1.2293694019317627,
"logits/rejected": -1.319061040878296,
"logps/chosen": -168.48960876464844,
"logps/rejected": -213.40200805664062,
"loss": 0.6379,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.07465538382530212,
"rewards/margins": 0.39287707209587097,
"rewards/rejected": -0.31822171807289124,
"step": 384
},
{
"epoch": 0.44016819898795523,
"grad_norm": 84.57348322808303,
"learning_rate": 1.8557055849614428e-07,
"logits/chosen": -1.2857288122177124,
"logits/rejected": -1.317692518234253,
"logps/chosen": -195.7303009033203,
"logps/rejected": -239.7130126953125,
"loss": 0.5716,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.07659934461116791,
"rewards/margins": 0.5603799819946289,
"rewards/rejected": -0.636979341506958,
"step": 386
},
{
"epoch": 0.44244886323141613,
"grad_norm": 85.15633806373397,
"learning_rate": 1.8537312916279523e-07,
"logits/chosen": -1.3817092180252075,
"logits/rejected": -1.501309871673584,
"logps/chosen": -146.41033935546875,
"logps/rejected": -171.60218811035156,
"loss": 0.6673,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.07926470041275024,
"rewards/margins": 0.26870349049568176,
"rewards/rejected": -0.347968190908432,
"step": 388
},
{
"epoch": 0.44472952747487704,
"grad_norm": 89.75323963715459,
"learning_rate": 1.8517446484830136e-07,
"logits/chosen": -1.4567725658416748,
"logits/rejected": -1.5358428955078125,
"logps/chosen": -172.92111206054688,
"logps/rejected": -192.05184936523438,
"loss": 0.6553,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.1497884839773178,
"rewards/margins": 0.06301924586296082,
"rewards/rejected": -0.21280772984027863,
"step": 390
},
{
"epoch": 0.44701019171833795,
"grad_norm": 105.51876340320463,
"learning_rate": 1.8497456842647878e-07,
"logits/chosen": -1.2944555282592773,
"logits/rejected": -1.3529748916625977,
"logps/chosen": -157.77642822265625,
"logps/rejected": -176.93093872070312,
"loss": 0.682,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.07403340190649033,
"rewards/margins": 0.10720521211624146,
"rewards/rejected": -0.18123860657215118,
"step": 392
},
{
"epoch": 0.44929085596179885,
"grad_norm": 85.63092920252892,
"learning_rate": 1.8477344278896706e-07,
"logits/chosen": -1.4767736196517944,
"logits/rejected": -1.5019617080688477,
"logps/chosen": -173.24639892578125,
"logps/rejected": -193.271240234375,
"loss": 0.6935,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.13190394639968872,
"rewards/margins": 0.48726946115493774,
"rewards/rejected": -0.355365514755249,
"step": 394
},
{
"epoch": 0.45157152020525976,
"grad_norm": 100.4356452029139,
"learning_rate": 1.8457109084518718e-07,
"logits/chosen": -1.4190278053283691,
"logits/rejected": -1.3654460906982422,
"logps/chosen": -227.8990020751953,
"logps/rejected": -215.39312744140625,
"loss": 0.6697,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.15163551270961761,
"rewards/margins": 0.19186191260814667,
"rewards/rejected": -0.3434974253177643,
"step": 396
},
{
"epoch": 0.45385218444872066,
"grad_norm": 89.70522326638444,
"learning_rate": 1.8436751552229938e-07,
"logits/chosen": -1.3089743852615356,
"logits/rejected": -1.340443730354309,
"logps/chosen": -145.11398315429688,
"logps/rejected": -172.60635375976562,
"loss": 0.6571,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.20591212809085846,
"rewards/margins": 0.34871169924736023,
"rewards/rejected": -0.5546237230300903,
"step": 398
},
{
"epoch": 0.4561328486921816,
"grad_norm": 84.89862446594006,
"learning_rate": 1.84162719765161e-07,
"logits/chosen": -1.2627918720245361,
"logits/rejected": -1.2893693447113037,
"logps/chosen": -130.3240966796875,
"logps/rejected": -160.49227905273438,
"loss": 0.6402,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.0029626190662384033,
"rewards/margins": 0.4357815384864807,
"rewards/rejected": -0.4328188896179199,
"step": 400
},
{
"epoch": 0.4561328486921816,
"eval_logits/chosen": -1.4406086206436157,
"eval_logits/rejected": -1.4253712892532349,
"eval_logps/chosen": -129.20408630371094,
"eval_logps/rejected": -132.1772003173828,
"eval_loss": 0.659533679485321,
"eval_rewards/accuracies": 0.6399999856948853,
"eval_rewards/chosen": 0.06938672810792923,
"eval_rewards/margins": 0.08083352446556091,
"eval_rewards/rejected": -0.011446798220276833,
"eval_runtime": 21.3693,
"eval_samples_per_second": 4.68,
"eval_steps_per_second": 1.17,
"step": 400
},
{
"epoch": 0.45841351293564253,
"grad_norm": 86.51322437777803,
"learning_rate": 1.839567065362838e-07,
"logits/chosen": -1.368638038635254,
"logits/rejected": -1.3851830959320068,
"logps/chosen": -169.3848876953125,
"logps/rejected": -175.4376678466797,
"loss": 0.635,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.0501415841281414,
"rewards/margins": 0.1427716761827469,
"rewards/rejected": -0.1929132342338562,
"step": 402
},
{
"epoch": 0.46069417717910344,
"grad_norm": 101.82366291792587,
"learning_rate": 1.8374947881579112e-07,
"logits/chosen": -1.201027512550354,
"logits/rejected": -1.2725883722305298,
"logps/chosen": -191.0504150390625,
"logps/rejected": -233.1793212890625,
"loss": 0.6195,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.2396811544895172,
"rewards/margins": 0.2062177062034607,
"rewards/rejected": -0.4458988606929779,
"step": 404
},
{
"epoch": 0.46297484142256434,
"grad_norm": 108.6063244513385,
"learning_rate": 1.8354103960137473e-07,
"logits/chosen": -1.4036638736724854,
"logits/rejected": -1.4488118886947632,
"logps/chosen": -230.92767333984375,
"logps/rejected": -239.439208984375,
"loss": 0.6795,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.32087260484695435,
"rewards/margins": 0.10698382556438446,
"rewards/rejected": -0.4278564453125,
"step": 406
},
{
"epoch": 0.46525550566602525,
"grad_norm": 86.59174830548812,
"learning_rate": 1.833313919082515e-07,
"logits/chosen": -1.1800165176391602,
"logits/rejected": -1.3113179206848145,
"logps/chosen": -161.62985229492188,
"logps/rejected": -202.1416473388672,
"loss": 0.6378,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.14624057710170746,
"rewards/margins": 0.3915923237800598,
"rewards/rejected": -0.5378329157829285,
"step": 408
},
{
"epoch": 0.46753616990948615,
"grad_norm": 92.36447587751275,
"learning_rate": 1.8312053876911977e-07,
"logits/chosen": -1.360296368598938,
"logits/rejected": -1.3766343593597412,
"logps/chosen": -203.7003173828125,
"logps/rejected": -233.6885223388672,
"loss": 0.6525,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.13416332006454468,
"rewards/margins": 0.36563166975975037,
"rewards/rejected": -0.49979501962661743,
"step": 410
},
{
"epoch": 0.46981683415294706,
"grad_norm": 79.86005120772512,
"learning_rate": 1.8290848323411553e-07,
"logits/chosen": -1.4597082138061523,
"logits/rejected": -1.4648618698120117,
"logps/chosen": -151.07449340820312,
"logps/rejected": -151.00494384765625,
"loss": 0.7101,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.05570954829454422,
"rewards/margins": 0.08499263972043991,
"rewards/rejected": -0.02928308956325054,
"step": 412
},
{
"epoch": 0.47209749839640797,
"grad_norm": 70.72545574881394,
"learning_rate": 1.8269522837076817e-07,
"logits/chosen": -1.4345017671585083,
"logits/rejected": -1.498835802078247,
"logps/chosen": -180.71951293945312,
"logps/rejected": -195.18490600585938,
"loss": 0.6126,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.060558006167411804,
"rewards/margins": 0.23918625712394714,
"rewards/rejected": -0.29974424839019775,
"step": 414
},
{
"epoch": 0.47437816263986887,
"grad_norm": 91.45944584413142,
"learning_rate": 1.8248077726395631e-07,
"logits/chosen": -1.381393313407898,
"logits/rejected": -1.3962711095809937,
"logps/chosen": -139.09548950195312,
"logps/rejected": -154.32943725585938,
"loss": 0.644,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.08523928374052048,
"rewards/margins": 0.4487174153327942,
"rewards/rejected": -0.3634781539440155,
"step": 416
},
{
"epoch": 0.4766588268833298,
"grad_norm": 118.15308317787861,
"learning_rate": 1.8226513301586297e-07,
"logits/chosen": -1.1643140316009521,
"logits/rejected": -1.1523492336273193,
"logps/chosen": -181.3363494873047,
"logps/rejected": -208.14474487304688,
"loss": 0.6631,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.18151137232780457,
"rewards/margins": 0.11719075590372086,
"rewards/rejected": -0.2987021207809448,
"step": 418
},
{
"epoch": 0.4789394911267907,
"grad_norm": 87.18607024470172,
"learning_rate": 1.820482987459308e-07,
"logits/chosen": -1.2820664644241333,
"logits/rejected": -1.340864658355713,
"logps/chosen": -152.3217315673828,
"logps/rejected": -166.40199279785156,
"loss": 0.6306,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.20804628729820251,
"rewards/margins": 0.08607995510101318,
"rewards/rejected": -0.2941262423992157,
"step": 420
},
{
"epoch": 0.4812201553702516,
"grad_norm": 101.2197264458815,
"learning_rate": 1.818302775908169e-07,
"logits/chosen": -1.3642746210098267,
"logits/rejected": -1.4530210494995117,
"logps/chosen": -177.27783203125,
"logps/rejected": -209.4615478515625,
"loss": 0.6002,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.06609566509723663,
"rewards/margins": 0.24875135719776154,
"rewards/rejected": -0.31484702229499817,
"step": 422
},
{
"epoch": 0.4835008196137125,
"grad_norm": 102.24009472626234,
"learning_rate": 1.8161107270434757e-07,
"logits/chosen": -1.322772741317749,
"logits/rejected": -1.3549699783325195,
"logps/chosen": -175.10890197753906,
"logps/rejected": -189.1114959716797,
"loss": 0.7052,
"rewards/accuracies": 0.53125,
"rewards/chosen": -0.2034468799829483,
"rewards/margins": -0.003699667751789093,
"rewards/rejected": -0.199747234582901,
"step": 424
},
{
"epoch": 0.4857814838571734,
"grad_norm": 88.40359207083654,
"learning_rate": 1.8139068725747251e-07,
"logits/chosen": -1.2980220317840576,
"logits/rejected": -1.328129768371582,
"logps/chosen": -127.48295593261719,
"logps/rejected": -145.63137817382812,
"loss": 0.6353,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.17486554384231567,
"rewards/margins": 0.16773542761802673,
"rewards/rejected": -0.3426010012626648,
"step": 426
},
{
"epoch": 0.4880621481006343,
"grad_norm": 85.01546232524993,
"learning_rate": 1.811691244382191e-07,
"logits/chosen": -1.240834355354309,
"logits/rejected": -1.315454125404358,
"logps/chosen": -171.84417724609375,
"logps/rejected": -196.60919189453125,
"loss": 0.6332,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.043825387954711914,
"rewards/margins": 0.35121482610702515,
"rewards/rejected": -0.39504021406173706,
"step": 428
},
{
"epoch": 0.4903428123440952,
"grad_norm": 92.50180646153169,
"learning_rate": 1.8094638745164619e-07,
"logits/chosen": -1.368233561515808,
"logits/rejected": -1.4737908840179443,
"logps/chosen": -231.7982177734375,
"logps/rejected": -257.540771484375,
"loss": 0.6393,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.41820961236953735,
"rewards/margins": 0.03227778524160385,
"rewards/rejected": -0.4504873752593994,
"step": 430
},
{
"epoch": 0.4926234765875561,
"grad_norm": 93.17182791936958,
"learning_rate": 1.8072247951979782e-07,
"logits/chosen": -1.218642234802246,
"logits/rejected": -1.3522933721542358,
"logps/chosen": -161.9176025390625,
"logps/rejected": -191.59205627441406,
"loss": 0.6532,
"rewards/accuracies": 0.46875,
"rewards/chosen": -0.2410704791545868,
"rewards/margins": 0.09214940667152405,
"rewards/rejected": -0.33321988582611084,
"step": 432
},
{
"epoch": 0.494904140831017,
"grad_norm": 85.22671128905367,
"learning_rate": 1.8049740388165646e-07,
"logits/chosen": -1.3610548973083496,
"logits/rejected": -1.369373083114624,
"logps/chosen": -170.4285888671875,
"logps/rejected": -203.44435119628906,
"loss": 0.6788,
"rewards/accuracies": 0.53125,
"rewards/chosen": -0.12429852783679962,
"rewards/margins": 0.06445370614528656,
"rewards/rejected": -0.18875223398208618,
"step": 434
},
{
"epoch": 0.49718480507447793,
"grad_norm": 106.2899671913355,
"learning_rate": 1.8027116379309635e-07,
"logits/chosen": -1.3293408155441284,
"logits/rejected": -1.3227120637893677,
"logps/chosen": -189.62835693359375,
"logps/rejected": -196.80776977539062,
"loss": 0.6658,
"rewards/accuracies": 0.4375,
"rewards/chosen": -0.36924922466278076,
"rewards/margins": 0.04584325850009918,
"rewards/rejected": -0.41509246826171875,
"step": 436
},
{
"epoch": 0.49946546931793884,
"grad_norm": 97.4627918918024,
"learning_rate": 1.8004376252683629e-07,
"logits/chosen": -1.3211669921875,
"logits/rejected": -1.318382740020752,
"logps/chosen": -170.44007873535156,
"logps/rejected": -177.15567016601562,
"loss": 0.652,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.0650164932012558,
"rewards/margins": 0.2474796026945114,
"rewards/rejected": -0.3124960958957672,
"step": 438
},
{
"epoch": 0.5017461335613997,
"grad_norm": 94.35984756124336,
"learning_rate": 1.7981520337239229e-07,
"logits/chosen": -1.4195128679275513,
"logits/rejected": -1.517584204673767,
"logps/chosen": -124.39069366455078,
"logps/rejected": -148.39462280273438,
"loss": 0.6071,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.07836849987506866,
"rewards/margins": 0.2909252941608429,
"rewards/rejected": -0.21255679428577423,
"step": 440
},
{
"epoch": 0.5040267978048607,
"grad_norm": 105.60849785169918,
"learning_rate": 1.7958548963603e-07,
"logits/chosen": -1.3688621520996094,
"logits/rejected": -1.3791152238845825,
"logps/chosen": -193.5460968017578,
"logps/rejected": -198.70162963867188,
"loss": 0.6155,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.006075154058635235,
"rewards/margins": 0.4186677634716034,
"rewards/rejected": -0.41259267926216125,
"step": 442
},
{
"epoch": 0.5063074620483216,
"grad_norm": 105.19505214300226,
"learning_rate": 1.7935462464071694e-07,
"logits/chosen": -1.4063529968261719,
"logits/rejected": -1.4609131813049316,
"logps/chosen": -136.6985321044922,
"logps/rejected": -142.0517120361328,
"loss": 0.6638,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.021208282560110092,
"rewards/margins": 0.23011060059070587,
"rewards/rejected": -0.25131887197494507,
"step": 444
},
{
"epoch": 0.5085881262917825,
"grad_norm": 80.98126219811707,
"learning_rate": 1.7912261172607434e-07,
"logits/chosen": -1.4103684425354004,
"logits/rejected": -1.4659305810928345,
"logps/chosen": -181.66671752929688,
"logps/rejected": -211.0421142578125,
"loss": 0.667,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.0669156014919281,
"rewards/margins": 0.23970045149326324,
"rewards/rejected": -0.30661600828170776,
"step": 446
},
{
"epoch": 0.5108687905352434,
"grad_norm": 86.7187808268374,
"learning_rate": 1.7888945424832892e-07,
"logits/chosen": -1.2415688037872314,
"logits/rejected": -1.2903611660003662,
"logps/chosen": -195.07174682617188,
"logps/rejected": -223.18804931640625,
"loss": 0.6344,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.26416096091270447,
"rewards/margins": 0.23690298199653625,
"rewards/rejected": -0.5010639429092407,
"step": 448
},
{
"epoch": 0.5131494547787043,
"grad_norm": 91.51916447980646,
"learning_rate": 1.7865515558026425e-07,
"logits/chosen": -1.2545722723007202,
"logits/rejected": -1.320673942565918,
"logps/chosen": -190.4950714111328,
"logps/rejected": -196.8434600830078,
"loss": 0.645,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.12865591049194336,
"rewards/margins": 0.2827560007572174,
"rewards/rejected": -0.41141194105148315,
"step": 450
},
{
"epoch": 0.5154301190221652,
"grad_norm": 78.95115222899102,
"learning_rate": 1.78419719111172e-07,
"logits/chosen": -1.2161290645599365,
"logits/rejected": -1.2955366373062134,
"logps/chosen": -133.3023223876953,
"logps/rejected": -173.38735961914062,
"loss": 0.6202,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.031168239191174507,
"rewards/margins": 0.3425666093826294,
"rewards/rejected": -0.37373483180999756,
"step": 452
},
{
"epoch": 0.5177107832656261,
"grad_norm": 85.03550287122602,
"learning_rate": 1.78183148246803e-07,
"logits/chosen": -1.3149534463882446,
"logits/rejected": -1.3734480142593384,
"logps/chosen": -181.44749450683594,
"logps/rejected": -190.66098022460938,
"loss": 0.6579,
"rewards/accuracies": 0.4375,
"rewards/chosen": -0.02150813303887844,
"rewards/margins": 0.09824002534151077,
"rewards/rejected": -0.11974816769361496,
"step": 454
},
{
"epoch": 0.519991447509087,
"grad_norm": 101.82433211173363,
"learning_rate": 1.779454464093177e-07,
"logits/chosen": -1.2327746152877808,
"logits/rejected": -1.2479290962219238,
"logps/chosen": -123.62596130371094,
"logps/rejected": -145.822021484375,
"loss": 0.71,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.008450210094451904,
"rewards/margins": 0.1642961949110031,
"rewards/rejected": -0.1558459848165512,
"step": 456
},
{
"epoch": 0.522272111752548,
"grad_norm": 86.25249319253086,
"learning_rate": 1.7770661703723716e-07,
"logits/chosen": -1.4179208278656006,
"logits/rejected": -1.4704089164733887,
"logps/chosen": -155.395263671875,
"logps/rejected": -241.91619873046875,
"loss": 0.6544,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.0192805714905262,
"rewards/margins": 0.4996611773967743,
"rewards/rejected": -0.5189418792724609,
"step": 458
},
{
"epoch": 0.5245527759960088,
"grad_norm": 95.73185772448069,
"learning_rate": 1.7746666358539268e-07,
"logits/chosen": -1.324285864830017,
"logits/rejected": -1.3773202896118164,
"logps/chosen": -138.01214599609375,
"logps/rejected": -187.76605224609375,
"loss": 0.6781,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.11806146800518036,
"rewards/margins": 0.12408170104026794,
"rewards/rejected": -0.2421431541442871,
"step": 460
},
{
"epoch": 0.5268334402394698,
"grad_norm": 83.04802100256194,
"learning_rate": 1.7722558952487637e-07,
"logits/chosen": -1.2252494096755981,
"logits/rejected": -1.2402747869491577,
"logps/chosen": -145.02134704589844,
"logps/rejected": -157.798095703125,
"loss": 0.6658,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.16228270530700684,
"rewards/margins": -0.006807304918766022,
"rewards/rejected": -0.1554754078388214,
"step": 462
},
{
"epoch": 0.5291141044829306,
"grad_norm": 106.4784679324614,
"learning_rate": 1.7698339834299062e-07,
"logits/chosen": -1.2572526931762695,
"logits/rejected": -1.2763482332229614,
"logps/chosen": -205.0850067138672,
"logps/rejected": -216.61383056640625,
"loss": 0.6758,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.10254046320915222,
"rewards/margins": 0.47703590989112854,
"rewards/rejected": -0.5795763731002808,
"step": 464
},
{
"epoch": 0.5313947687263916,
"grad_norm": 88.23039919938233,
"learning_rate": 1.7674009354319776e-07,
"logits/chosen": -1.2906126976013184,
"logits/rejected": -1.3812601566314697,
"logps/chosen": -182.90606689453125,
"logps/rejected": -192.94520568847656,
"loss": 0.5967,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.12573322653770447,
"rewards/margins": 0.15527519583702087,
"rewards/rejected": -0.28100842237472534,
"step": 466
},
{
"epoch": 0.5336754329698524,
"grad_norm": 96.24054074341065,
"learning_rate": 1.764956786450694e-07,
"logits/chosen": -1.2798047065734863,
"logits/rejected": -1.3678182363510132,
"logps/chosen": -160.85719299316406,
"logps/rejected": -189.01951599121094,
"loss": 0.6321,
"rewards/accuracies": 0.78125,
"rewards/chosen": -0.01916433498263359,
"rewards/margins": 0.328861802816391,
"rewards/rejected": -0.3480261266231537,
"step": 468
},
{
"epoch": 0.5359560972133134,
"grad_norm": 87.51604676838089,
"learning_rate": 1.7625015718423548e-07,
"logits/chosen": -1.3382031917572021,
"logits/rejected": -1.4446802139282227,
"logps/chosen": -152.4910125732422,
"logps/rejected": -192.94979858398438,
"loss": 0.6364,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.021129202097654343,
"rewards/margins": 0.3612152636051178,
"rewards/rejected": -0.38234445452690125,
"step": 470
},
{
"epoch": 0.5382367614567742,
"grad_norm": 118.40842425272356,
"learning_rate": 1.7600353271233312e-07,
"logits/chosen": -1.319710373878479,
"logits/rejected": -1.284549355506897,
"logps/chosen": -152.95933532714844,
"logps/rejected": -156.8072052001953,
"loss": 0.7236,
"rewards/accuracies": 0.53125,
"rewards/chosen": -0.10047149658203125,
"rewards/margins": 0.010637946426868439,
"rewards/rejected": -0.11110943555831909,
"step": 472
},
{
"epoch": 0.5405174257002352,
"grad_norm": 91.83509455780727,
"learning_rate": 1.7575580879695522e-07,
"logits/chosen": -1.445102572441101,
"logits/rejected": -1.4954876899719238,
"logps/chosen": -196.539306640625,
"logps/rejected": -211.00616455078125,
"loss": 0.6409,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.04536067694425583,
"rewards/margins": 0.1365879625082016,
"rewards/rejected": -0.18194863200187683,
"step": 474
},
{
"epoch": 0.542798089943696,
"grad_norm": 90.74830800218245,
"learning_rate": 1.7550698902159894e-07,
"logits/chosen": -1.3519353866577148,
"logits/rejected": -1.4368261098861694,
"logps/chosen": -228.4932098388672,
"logps/rejected": -261.87109375,
"loss": 0.6373,
"rewards/accuracies": 0.46875,
"rewards/chosen": -0.1739276647567749,
"rewards/margins": 0.011357128620147705,
"rewards/rejected": -0.1852847784757614,
"step": 476
},
{
"epoch": 0.545078754187157,
"grad_norm": 114.14083612276099,
"learning_rate": 1.7525707698561384e-07,
"logits/chosen": -1.296964168548584,
"logits/rejected": -1.3777580261230469,
"logps/chosen": -178.81484985351562,
"logps/rejected": -220.63436889648438,
"loss": 0.6515,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.10972025990486145,
"rewards/margins": 0.45839735865592957,
"rewards/rejected": -0.5681176781654358,
"step": 478
},
{
"epoch": 0.547359418430618,
"grad_norm": 106.70099483754956,
"learning_rate": 1.750060763041497e-07,
"logits/chosen": -1.3242031335830688,
"logits/rejected": -1.3594613075256348,
"logps/chosen": -139.518310546875,
"logps/rejected": -171.3057403564453,
"loss": 0.6932,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.09948215633630753,
"rewards/margins": 0.13577090203762054,
"rewards/rejected": -0.23525305092334747,
"step": 480
},
{
"epoch": 0.5496400826740788,
"grad_norm": 93.76871490458784,
"learning_rate": 1.7475399060810435e-07,
"logits/chosen": -1.5048158168792725,
"logits/rejected": -1.567376971244812,
"logps/chosen": -175.3671112060547,
"logps/rejected": -195.3438262939453,
"loss": 0.6284,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.08378006517887115,
"rewards/margins": 0.32175135612487793,
"rewards/rejected": -0.23797132074832916,
"step": 482
},
{
"epoch": 0.5519207469175398,
"grad_norm": 87.30478432077047,
"learning_rate": 1.7450082354407107e-07,
"logits/chosen": -1.3454846143722534,
"logits/rejected": -1.3264262676239014,
"logps/chosen": -150.41058349609375,
"logps/rejected": -158.28414916992188,
"loss": 0.6392,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.06661003828048706,
"rewards/margins": 0.05708365514874458,
"rewards/rejected": 0.009526383131742477,
"step": 484
},
{
"epoch": 0.5542014111610006,
"grad_norm": 97.70119982177076,
"learning_rate": 1.7424657877428594e-07,
"logits/chosen": -1.4438061714172363,
"logits/rejected": -1.5046687126159668,
"logps/chosen": -210.83221435546875,
"logps/rejected": -220.98056030273438,
"loss": 0.6711,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.02250128984451294,
"rewards/margins": 0.25783565640449524,
"rewards/rejected": -0.23533441126346588,
"step": 486
},
{
"epoch": 0.5564820754044616,
"grad_norm": 92.7470076491593,
"learning_rate": 1.7399125997657475e-07,
"logits/chosen": -1.4578030109405518,
"logits/rejected": -1.5047590732574463,
"logps/chosen": -173.8478546142578,
"logps/rejected": -218.70567321777344,
"loss": 0.6464,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.005423933267593384,
"rewards/margins": 0.22333881258964539,
"rewards/rejected": -0.22876277565956116,
"step": 488
},
{
"epoch": 0.5587627396479224,
"grad_norm": 80.43868813641923,
"learning_rate": 1.7373487084429986e-07,
"logits/chosen": -1.4293080568313599,
"logits/rejected": -1.3778735399246216,
"logps/chosen": -137.72915649414062,
"logps/rejected": -144.8123779296875,
"loss": 0.6175,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.04381662234663963,
"rewards/margins": 0.17386358976364136,
"rewards/rejected": -0.2176802158355713,
"step": 490
},
{
"epoch": 0.5610434038913834,
"grad_norm": 88.32928179136898,
"learning_rate": 1.734774150863067e-07,
"logits/chosen": -1.4234795570373535,
"logits/rejected": -1.4454480409622192,
"logps/chosen": -168.4951171875,
"logps/rejected": -183.8735809326172,
"loss": 0.6522,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.008489616215229034,
"rewards/margins": 0.20173315703868866,
"rewards/rejected": -0.19324351847171783,
"step": 492
},
{
"epoch": 0.5633240681348443,
"grad_norm": 93.76675043932717,
"learning_rate": 1.732188964268703e-07,
"logits/chosen": -1.2950657606124878,
"logits/rejected": -1.3514856100082397,
"logps/chosen": -174.49368286132812,
"logps/rejected": -198.4128875732422,
"loss": 0.6429,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.17520295083522797,
"rewards/margins": 0.17245244979858398,
"rewards/rejected": -0.34765538573265076,
"step": 494
},
{
"epoch": 0.5656047323783052,
"grad_norm": 124.66844580791373,
"learning_rate": 1.7295931860564118e-07,
"logits/chosen": -1.3973047733306885,
"logits/rejected": -1.3848158121109009,
"logps/chosen": -187.01031494140625,
"logps/rejected": -201.88232421875,
"loss": 0.661,
"rewards/accuracies": 0.4375,
"rewards/chosen": -0.4809916317462921,
"rewards/margins": -0.037112899124622345,
"rewards/rejected": -0.443878710269928,
"step": 496
},
{
"epoch": 0.5678853966217661,
"grad_norm": 98.27416452810739,
"learning_rate": 1.7269868537759138e-07,
"logits/chosen": -1.4414607286453247,
"logits/rejected": -1.4525971412658691,
"logps/chosen": -186.87220764160156,
"logps/rejected": -201.6339874267578,
"loss": 0.6551,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.048535339534282684,
"rewards/margins": 0.2541671097278595,
"rewards/rejected": -0.20563176274299622,
"step": 498
},
{
"epoch": 0.570166060865227,
"grad_norm": 81.20755107084783,
"learning_rate": 1.7243700051296016e-07,
"logits/chosen": -1.4566954374313354,
"logits/rejected": -1.458418369293213,
"logps/chosen": -186.07911682128906,
"logps/rejected": -181.54051208496094,
"loss": 0.6716,
"rewards/accuracies": 0.4375,
"rewards/chosen": -0.08042742311954498,
"rewards/margins": 0.003591485321521759,
"rewards/rejected": -0.08401892334222794,
"step": 500
},
{
"epoch": 0.570166060865227,
"eval_logits/chosen": -1.4689228534698486,
"eval_logits/rejected": -1.4550352096557617,
"eval_logps/chosen": -128.87640380859375,
"eval_logps/rejected": -132.28384399414062,
"eval_loss": 0.6351009011268616,
"eval_rewards/accuracies": 0.6399999856948853,
"eval_rewards/chosen": 0.10215496271848679,
"eval_rewards/margins": 0.12426460534334183,
"eval_rewards/rejected": -0.022109635174274445,
"eval_runtime": 20.6735,
"eval_samples_per_second": 4.837,
"eval_steps_per_second": 1.209,
"step": 500
},
{
"epoch": 0.5724467251086879,
"grad_norm": 87.15380241514247,
"learning_rate": 1.7217426779719944e-07,
"logits/chosen": -1.461560606956482,
"logits/rejected": -1.4978594779968262,
"logps/chosen": -145.8336639404297,
"logps/rejected": -164.410888671875,
"loss": 0.623,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.030693676322698593,
"rewards/margins": 0.31371259689331055,
"rewards/rejected": -0.34440624713897705,
"step": 502
},
{
"epoch": 0.5747273893521488,
"grad_norm": 109.6734083472702,
"learning_rate": 1.71910491030919e-07,
"logits/chosen": -1.4200553894042969,
"logits/rejected": -1.4383351802825928,
"logps/chosen": -185.2432861328125,
"logps/rejected": -198.3214569091797,
"loss": 0.6592,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.09178508818149567,
"rewards/margins": 0.1750696301460266,
"rewards/rejected": -0.2668547034263611,
"step": 504
},
{
"epoch": 0.5770080535956097,
"grad_norm": 95.33984559143799,
"learning_rate": 1.716456740298315e-07,
"logits/chosen": -1.356040120124817,
"logits/rejected": -1.3635413646697998,
"logps/chosen": -221.06320190429688,
"logps/rejected": -246.34091186523438,
"loss": 0.6466,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.224991112947464,
"rewards/margins": 0.12949411571025848,
"rewards/rejected": -0.35448524355888367,
"step": 506
},
{
"epoch": 0.5792887178390707,
"grad_norm": 104.51344570968739,
"learning_rate": 1.7137982062469739e-07,
"logits/chosen": -1.199120044708252,
"logits/rejected": -1.2254160642623901,
"logps/chosen": -182.35678100585938,
"logps/rejected": -205.9598388671875,
"loss": 0.6302,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.01045935694128275,
"rewards/margins": 0.4567835330963135,
"rewards/rejected": -0.46724286675453186,
"step": 508
},
{
"epoch": 0.5815693820825315,
"grad_norm": 80.61166146639357,
"learning_rate": 1.7111293466126936e-07,
"logits/chosen": -1.4290285110473633,
"logits/rejected": -1.465309977531433,
"logps/chosen": -183.41546630859375,
"logps/rejected": -190.38209533691406,
"loss": 0.6303,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.04882081598043442,
"rewards/margins": 0.2510834336280823,
"rewards/rejected": -0.2999042272567749,
"step": 510
},
{
"epoch": 0.5838500463259925,
"grad_norm": 81.58673928063743,
"learning_rate": 1.7084502000023678e-07,
"logits/chosen": -1.3187546730041504,
"logits/rejected": -1.3721165657043457,
"logps/chosen": -166.35317993164062,
"logps/rejected": -205.14459228515625,
"loss": 0.6609,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.06919605284929276,
"rewards/margins": 0.25993865728378296,
"rewards/rejected": -0.1907426118850708,
"step": 512
},
{
"epoch": 0.5861307105694533,
"grad_norm": 107.99959199152349,
"learning_rate": 1.7057608051716987e-07,
"logits/chosen": -1.2958874702453613,
"logits/rejected": -1.3409713506698608,
"logps/chosen": -189.3512420654297,
"logps/rejected": -229.98101806640625,
"loss": 0.6805,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.1412532776594162,
"rewards/margins": 0.1916051059961319,
"rewards/rejected": -0.3328583836555481,
"step": 514
},
{
"epoch": 0.5884113748129143,
"grad_norm": 103.31085588981476,
"learning_rate": 1.7030612010246357e-07,
"logits/chosen": -1.4542144536972046,
"logits/rejected": -1.4537440538406372,
"logps/chosen": -187.4440155029297,
"logps/rejected": -197.0413818359375,
"loss": 0.6901,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.20216982066631317,
"rewards/margins": -0.05732645466923714,
"rewards/rejected": -0.14484335482120514,
"step": 516
},
{
"epoch": 0.5906920390563751,
"grad_norm": 93.7004047268541,
"learning_rate": 1.7003514266128128e-07,
"logits/chosen": -1.3705557584762573,
"logits/rejected": -1.4125196933746338,
"logps/chosen": -176.84820556640625,
"logps/rejected": -218.4021453857422,
"loss": 0.688,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.13926447927951813,
"rewards/margins": 0.1602565199136734,
"rewards/rejected": -0.2995210289955139,
"step": 518
},
{
"epoch": 0.5929727032998361,
"grad_norm": 76.7210943847488,
"learning_rate": 1.6976315211349848e-07,
"logits/chosen": -1.402063250541687,
"logits/rejected": -1.5004804134368896,
"logps/chosen": -183.45787048339844,
"logps/rejected": -226.31532287597656,
"loss": 0.6405,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.08890336751937866,
"rewards/margins": 0.2809264659881592,
"rewards/rejected": -0.19202306866645813,
"step": 520
},
{
"epoch": 0.5952533675432969,
"grad_norm": 79.99952128240997,
"learning_rate": 1.694901523936458e-07,
"logits/chosen": -1.3303985595703125,
"logits/rejected": -1.4111953973770142,
"logps/chosen": -161.44390869140625,
"logps/rejected": -201.76593017578125,
"loss": 0.601,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.12671524286270142,
"rewards/margins": 0.4966307282447815,
"rewards/rejected": -0.3699154853820801,
"step": 522
},
{
"epoch": 0.5975340317867579,
"grad_norm": 89.60709297255305,
"learning_rate": 1.6921614745085235e-07,
"logits/chosen": -1.4100148677825928,
"logits/rejected": -1.4378656148910522,
"logps/chosen": -193.54835510253906,
"logps/rejected": -202.9269256591797,
"loss": 0.6182,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.035577308386564255,
"rewards/margins": 0.3006408214569092,
"rewards/rejected": -0.33621811866760254,
"step": 524
},
{
"epoch": 0.5998146960302188,
"grad_norm": 104.27911734761277,
"learning_rate": 1.689411412487885e-07,
"logits/chosen": -1.3611271381378174,
"logits/rejected": -1.3725131750106812,
"logps/chosen": -166.821533203125,
"logps/rejected": -197.65394592285156,
"loss": 0.6274,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.09458069503307343,
"rewards/margins": 0.400374174118042,
"rewards/rejected": -0.30579352378845215,
"step": 526
},
{
"epoch": 0.6020953602736797,
"grad_norm": 77.9183795766502,
"learning_rate": 1.6866513776560841e-07,
"logits/chosen": -1.2477551698684692,
"logits/rejected": -1.2647314071655273,
"logps/chosen": -143.03448486328125,
"logps/rejected": -149.70594787597656,
"loss": 0.6035,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.14375856518745422,
"rewards/margins": 0.17092323303222656,
"rewards/rejected": -0.02716466784477234,
"step": 528
},
{
"epoch": 0.6043760245171406,
"grad_norm": 79.08140854679833,
"learning_rate": 1.6838814099389265e-07,
"logits/chosen": -1.379310965538025,
"logits/rejected": -1.49821138381958,
"logps/chosen": -176.78890991210938,
"logps/rejected": -228.0948028564453,
"loss": 0.6114,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.18548306822776794,
"rewards/margins": 0.3985109329223633,
"rewards/rejected": -0.5839939117431641,
"step": 530
},
{
"epoch": 0.6066566887606015,
"grad_norm": 86.55652744872312,
"learning_rate": 1.6811015494059045e-07,
"logits/chosen": -1.340354323387146,
"logits/rejected": -1.372084617614746,
"logps/chosen": -139.92453002929688,
"logps/rejected": -170.04629516601562,
"loss": 0.5944,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.09279327094554901,
"rewards/margins": 0.36332762241363525,
"rewards/rejected": -0.27053436636924744,
"step": 532
},
{
"epoch": 0.6089373530040625,
"grad_norm": 86.16548448796566,
"learning_rate": 1.678311836269616e-07,
"logits/chosen": -1.2494535446166992,
"logits/rejected": -1.3522768020629883,
"logps/chosen": -133.36306762695312,
"logps/rejected": -140.9103240966797,
"loss": 0.626,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.22486190497875214,
"rewards/margins": 0.10757801681756973,
"rewards/rejected": -0.3324398994445801,
"step": 534
},
{
"epoch": 0.6112180172475233,
"grad_norm": 78.5036790731728,
"learning_rate": 1.6755123108851842e-07,
"logits/chosen": -1.2597969770431519,
"logits/rejected": -1.3842471837997437,
"logps/chosen": -102.91565704345703,
"logps/rejected": -149.77996826171875,
"loss": 0.6558,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.049101147800683975,
"rewards/margins": 0.2616952359676361,
"rewards/rejected": -0.21259410679340363,
"step": 536
},
{
"epoch": 0.6134986814909843,
"grad_norm": 97.00034287566903,
"learning_rate": 1.6727030137496727e-07,
"logits/chosen": -1.333500862121582,
"logits/rejected": -1.407701015472412,
"logps/chosen": -168.34678649902344,
"logps/rejected": -212.6487579345703,
"loss": 0.6333,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.23587258160114288,
"rewards/margins": 0.40141719579696655,
"rewards/rejected": -0.637289822101593,
"step": 538
},
{
"epoch": 0.6157793457344451,
"grad_norm": 93.14382617410926,
"learning_rate": 1.6698839855015007e-07,
"logits/chosen": -1.3826831579208374,
"logits/rejected": -1.3780534267425537,
"logps/chosen": -197.9063262939453,
"logps/rejected": -201.8406982421875,
"loss": 0.653,
"rewards/accuracies": 0.46875,
"rewards/chosen": -0.19411635398864746,
"rewards/margins": 0.06094612553715706,
"rewards/rejected": -0.2550624907016754,
"step": 540
},
{
"epoch": 0.6180600099779061,
"grad_norm": 84.06073173280129,
"learning_rate": 1.6670552669198546e-07,
"logits/chosen": -1.459622859954834,
"logits/rejected": -1.5475270748138428,
"logps/chosen": -149.38450622558594,
"logps/rejected": -174.00827026367188,
"loss": 0.6878,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.22387176752090454,
"rewards/margins": 0.04707152396440506,
"rewards/rejected": -0.2709433138370514,
"step": 542
},
{
"epoch": 0.620340674221367,
"grad_norm": 75.86401555746195,
"learning_rate": 1.6642168989240974e-07,
"logits/chosen": -1.3617055416107178,
"logits/rejected": -1.3725388050079346,
"logps/chosen": -177.85269165039062,
"logps/rejected": -204.47271728515625,
"loss": 0.6393,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.2369232326745987,
"rewards/margins": 0.4420955181121826,
"rewards/rejected": -0.6790187358856201,
"step": 544
},
{
"epoch": 0.6226213384648279,
"grad_norm": 99.08946862124856,
"learning_rate": 1.6613689225731787e-07,
"logits/chosen": -1.437178611755371,
"logits/rejected": -1.4497863054275513,
"logps/chosen": -152.61231994628906,
"logps/rejected": -163.36196899414062,
"loss": 0.6514,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.24256393313407898,
"rewards/margins": 0.14335207641124725,
"rewards/rejected": -0.3859160244464874,
"step": 546
},
{
"epoch": 0.6249020027082888,
"grad_norm": 82.17381709045306,
"learning_rate": 1.6585113790650386e-07,
"logits/chosen": -1.3418258428573608,
"logits/rejected": -1.357533574104309,
"logps/chosen": -150.5135955810547,
"logps/rejected": -168.92135620117188,
"loss": 0.6404,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.1530638188123703,
"rewards/margins": 0.16317017376422882,
"rewards/rejected": -0.3162340223789215,
"step": 548
},
{
"epoch": 0.6271826669517497,
"grad_norm": 98.6032079855375,
"learning_rate": 1.6556443097360133e-07,
"logits/chosen": -1.305914282798767,
"logits/rejected": -1.3782196044921875,
"logps/chosen": -187.4833526611328,
"logps/rejected": -213.8814697265625,
"loss": 0.6415,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.03956926241517067,
"rewards/margins": 0.45714423060417175,
"rewards/rejected": -0.4967134892940521,
"step": 550
},
{
"epoch": 0.6294633311952106,
"grad_norm": 89.5606898436777,
"learning_rate": 1.6527677560602363e-07,
"logits/chosen": -1.4830090999603271,
"logits/rejected": -1.4891180992126465,
"logps/chosen": -178.29112243652344,
"logps/rejected": -182.18019104003906,
"loss": 0.658,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.10583019256591797,
"rewards/margins": 0.16508930921554565,
"rewards/rejected": -0.270919531583786,
"step": 552
},
{
"epoch": 0.6317439954386715,
"grad_norm": 94.29213072448552,
"learning_rate": 1.6498817596490388e-07,
"logits/chosen": -1.3679401874542236,
"logits/rejected": -1.397881031036377,
"logps/chosen": -170.22218322753906,
"logps/rejected": -190.22608947753906,
"loss": 0.6632,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.07908223569393158,
"rewards/margins": 0.2489246428012848,
"rewards/rejected": -0.32800689339637756,
"step": 554
},
{
"epoch": 0.6340246596821324,
"grad_norm": 97.6468147380264,
"learning_rate": 1.6469863622503475e-07,
"logits/chosen": -1.3809480667114258,
"logits/rejected": -1.3862886428833008,
"logps/chosen": -161.6693878173828,
"logps/rejected": -186.37608337402344,
"loss": 0.6762,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.08833756297826767,
"rewards/margins": 0.3514275848865509,
"rewards/rejected": -0.4397651255130768,
"step": 556
},
{
"epoch": 0.6363053239255934,
"grad_norm": 90.63822694085678,
"learning_rate": 1.6440816057480812e-07,
"logits/chosen": -1.4811036586761475,
"logits/rejected": -1.554297924041748,
"logps/chosen": -214.304931640625,
"logps/rejected": -267.9236145019531,
"loss": 0.638,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.08694766461849213,
"rewards/margins": 0.4463149309158325,
"rewards/rejected": -0.359367311000824,
"step": 558
},
{
"epoch": 0.6385859881690542,
"grad_norm": 109.09749536253133,
"learning_rate": 1.641167532161545e-07,
"logits/chosen": -1.3816839456558228,
"logits/rejected": -1.3854830265045166,
"logps/chosen": -146.0878448486328,
"logps/rejected": -151.40838623046875,
"loss": 0.6199,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.2251240313053131,
"rewards/margins": 0.2835414409637451,
"rewards/rejected": -0.5086655020713806,
"step": 560
},
{
"epoch": 0.6408666524125152,
"grad_norm": 85.56418756561474,
"learning_rate": 1.63824418364482e-07,
"logits/chosen": -1.3435323238372803,
"logits/rejected": -1.4011439085006714,
"logps/chosen": -171.78982543945312,
"logps/rejected": -188.12899780273438,
"loss": 0.6342,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.03885346278548241,
"rewards/margins": 0.2555912137031555,
"rewards/rejected": -0.29444462060928345,
"step": 562
},
{
"epoch": 0.643147316655976,
"grad_norm": 91.83203136203487,
"learning_rate": 1.6353116024861583e-07,
"logits/chosen": -1.3896362781524658,
"logits/rejected": -1.3640623092651367,
"logps/chosen": -186.40646362304688,
"logps/rejected": -194.1944580078125,
"loss": 0.6166,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.14515984058380127,
"rewards/margins": 0.4773363471031189,
"rewards/rejected": -0.6224961876869202,
"step": 564
},
{
"epoch": 0.645427980899437,
"grad_norm": 81.53572929971928,
"learning_rate": 1.6323698311073665e-07,
"logits/chosen": -1.2487475872039795,
"logits/rejected": -1.298531413078308,
"logps/chosen": -157.927734375,
"logps/rejected": -172.3856658935547,
"loss": 0.6697,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.2098311483860016,
"rewards/margins": 0.2927185297012329,
"rewards/rejected": -0.5025496482849121,
"step": 566
},
{
"epoch": 0.6477086451428978,
"grad_norm": 114.13313398743495,
"learning_rate": 1.6294189120631955e-07,
"logits/chosen": -1.4274886846542358,
"logits/rejected": -1.5344736576080322,
"logps/chosen": -182.54502868652344,
"logps/rejected": -216.51434326171875,
"loss": 0.6837,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.11003254354000092,
"rewards/margins": 0.4595148265361786,
"rewards/rejected": -0.5695474147796631,
"step": 568
},
{
"epoch": 0.6499893093863588,
"grad_norm": 91.72506895488603,
"learning_rate": 1.6264588880407218e-07,
"logits/chosen": -1.3987553119659424,
"logits/rejected": -1.375367283821106,
"logps/chosen": -150.62136840820312,
"logps/rejected": -147.3133087158203,
"loss": 0.655,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.012118123471736908,
"rewards/margins": 0.23711130023002625,
"rewards/rejected": -0.22499315440654755,
"step": 570
},
{
"epoch": 0.6522699736298196,
"grad_norm": 107.88986883450896,
"learning_rate": 1.6234898018587336e-07,
"logits/chosen": -1.2692500352859497,
"logits/rejected": -1.369061827659607,
"logps/chosen": -139.50912475585938,
"logps/rejected": -160.3131561279297,
"loss": 0.6783,
"rewards/accuracies": 0.46875,
"rewards/chosen": -0.21677826344966888,
"rewards/margins": 0.14994385838508606,
"rewards/rejected": -0.36672213673591614,
"step": 572
},
{
"epoch": 0.6545506378732806,
"grad_norm": 78.69743235560887,
"learning_rate": 1.620511696467108e-07,
"logits/chosen": -1.4847999811172485,
"logits/rejected": -1.4455361366271973,
"logps/chosen": -143.21315002441406,
"logps/rejected": -142.0272674560547,
"loss": 0.5931,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.023255709558725357,
"rewards/margins": 0.20145481824874878,
"rewards/rejected": -0.17819911241531372,
"step": 574
},
{
"epoch": 0.6568313021167415,
"grad_norm": 88.95142783400384,
"learning_rate": 1.6175246149461918e-07,
"logits/chosen": -1.2740530967712402,
"logits/rejected": -1.454386830329895,
"logps/chosen": -151.84759521484375,
"logps/rejected": -200.50332641601562,
"loss": 0.6686,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.14912785589694977,
"rewards/margins": 0.19120272994041443,
"rewards/rejected": -0.3403306007385254,
"step": 576
},
{
"epoch": 0.6591119663602024,
"grad_norm": 106.08082954445281,
"learning_rate": 1.614528600506178e-07,
"logits/chosen": -1.3865227699279785,
"logits/rejected": -1.375576138496399,
"logps/chosen": -190.82093811035156,
"logps/rejected": -221.74673461914062,
"loss": 0.6078,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.02210266701877117,
"rewards/margins": 0.2864355444908142,
"rewards/rejected": -0.2643328607082367,
"step": 578
},
{
"epoch": 0.6613926306036633,
"grad_norm": 87.3395501088531,
"learning_rate": 1.6115236964864796e-07,
"logits/chosen": -1.4399878978729248,
"logits/rejected": -1.4395971298217773,
"logps/chosen": -173.22372436523438,
"logps/rejected": -176.63436889648438,
"loss": 0.6855,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.08596701920032501,
"rewards/margins": 0.2504124939441681,
"rewards/rejected": -0.3363794982433319,
"step": 580
},
{
"epoch": 0.6636732948471242,
"grad_norm": 106.76668867973154,
"learning_rate": 1.6085099463551042e-07,
"logits/chosen": -1.4452544450759888,
"logits/rejected": -1.5641038417816162,
"logps/chosen": -168.6453094482422,
"logps/rejected": -173.80926513671875,
"loss": 0.6725,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.173700749874115,
"rewards/margins": 0.12216023355722427,
"rewards/rejected": -0.29586100578308105,
"step": 582
},
{
"epoch": 0.6659539590905851,
"grad_norm": 87.05178703169928,
"learning_rate": 1.6054873937080243e-07,
"logits/chosen": -1.304762601852417,
"logits/rejected": -1.368096113204956,
"logps/chosen": -172.9827423095703,
"logps/rejected": -219.77606201171875,
"loss": 0.6104,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.15153777599334717,
"rewards/margins": 0.4469776451587677,
"rewards/rejected": -0.5985154509544373,
"step": 584
},
{
"epoch": 0.668234623334046,
"grad_norm": 110.06770845486187,
"learning_rate": 1.6024560822685464e-07,
"logits/chosen": -1.2189353704452515,
"logits/rejected": -1.2109813690185547,
"logps/chosen": -124.88150024414062,
"logps/rejected": -134.57229614257812,
"loss": 0.6398,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.04388846457004547,
"rewards/margins": 0.09341102838516235,
"rewards/rejected": -0.13729947805404663,
"step": 586
},
{
"epoch": 0.670515287577507,
"grad_norm": 96.86048631567245,
"learning_rate": 1.5994160558866803e-07,
"logits/chosen": -1.4283928871154785,
"logits/rejected": -1.454673171043396,
"logps/chosen": -141.67201232910156,
"logps/rejected": -165.86553955078125,
"loss": 0.6099,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.005323350429534912,
"rewards/margins": 0.37266457080841064,
"rewards/rejected": -0.37798795104026794,
"step": 588
},
{
"epoch": 0.6727959518209679,
"grad_norm": 87.33194066270343,
"learning_rate": 1.5963673585385014e-07,
"logits/chosen": -1.3119065761566162,
"logits/rejected": -1.3611897230148315,
"logps/chosen": -149.3885498046875,
"logps/rejected": -233.38589477539062,
"loss": 0.6465,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.1451117843389511,
"rewards/margins": 0.5241594314575195,
"rewards/rejected": -0.6692711710929871,
"step": 590
},
{
"epoch": 0.6750766160644288,
"grad_norm": 89.80632782962597,
"learning_rate": 1.5933100343255182e-07,
"logits/chosen": -1.478175401687622,
"logits/rejected": -1.5088427066802979,
"logps/chosen": -160.82077026367188,
"logps/rejected": -175.51266479492188,
"loss": 0.6521,
"rewards/accuracies": 0.53125,
"rewards/chosen": -0.026576366275548935,
"rewards/margins": 0.050725750625133514,
"rewards/rejected": -0.07730211317539215,
"step": 592
},
{
"epoch": 0.6773572803078897,
"grad_norm": 74.35318626181845,
"learning_rate": 1.5902441274740328e-07,
"logits/chosen": -1.3951764106750488,
"logits/rejected": -1.5363534688949585,
"logps/chosen": -199.1707000732422,
"logps/rejected": -223.73233032226562,
"loss": 0.6397,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.12182176113128662,
"rewards/margins": 0.23543262481689453,
"rewards/rejected": -0.35725438594818115,
"step": 594
},
{
"epoch": 0.6796379445513506,
"grad_norm": 94.79924897563647,
"learning_rate": 1.5871696823344998e-07,
"logits/chosen": -1.296017050743103,
"logits/rejected": -1.3113356828689575,
"logps/chosen": -217.41751098632812,
"logps/rejected": -248.5911865234375,
"loss": 0.6955,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.08776041865348816,
"rewards/margins": -0.076215460896492,
"rewards/rejected": -0.011544965207576752,
"step": 596
},
{
"epoch": 0.6819186087948115,
"grad_norm": 92.59481105091255,
"learning_rate": 1.584086743380887e-07,
"logits/chosen": -1.4293127059936523,
"logits/rejected": -1.4433321952819824,
"logps/chosen": -118.54342651367188,
"logps/rejected": -134.85447692871094,
"loss": 0.6368,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.17175720632076263,
"rewards/margins": 0.14412283897399902,
"rewards/rejected": -0.31588006019592285,
"step": 598
},
{
"epoch": 0.6841992730382724,
"grad_norm": 89.66618993893586,
"learning_rate": 1.580995355210031e-07,
"logits/chosen": -1.2930192947387695,
"logits/rejected": -1.368417501449585,
"logps/chosen": -156.90586853027344,
"logps/rejected": -204.53976440429688,
"loss": 0.655,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.10334792733192444,
"rewards/margins": 0.27799686789512634,
"rewards/rejected": -0.3813447952270508,
"step": 600
},
{
"epoch": 0.6841992730382724,
"eval_logits/chosen": -1.4766055345535278,
"eval_logits/rejected": -1.4625221490859985,
"eval_logps/chosen": -128.85867309570312,
"eval_logps/rejected": -132.3487091064453,
"eval_loss": 0.6277530789375305,
"eval_rewards/accuracies": 0.6000000238418579,
"eval_rewards/chosen": 0.10392862558364868,
"eval_rewards/margins": 0.13252468407154083,
"eval_rewards/rejected": -0.028596054762601852,
"eval_runtime": 20.497,
"eval_samples_per_second": 4.879,
"eval_steps_per_second": 1.22,
"step": 600
},
{
"epoch": 0.6864799372817333,
"grad_norm": 99.81191522440201,
"learning_rate": 1.577895562540992e-07,
"logits/chosen": -1.5043866634368896,
"logits/rejected": -1.5639445781707764,
"logps/chosen": -176.97195434570312,
"logps/rejected": -200.05955505371094,
"loss": 0.6298,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.08353392779827118,
"rewards/margins": 0.1179167851805687,
"rewards/rejected": -0.20145073533058167,
"step": 602
},
{
"epoch": 0.6887606015251942,
"grad_norm": 76.70830865796096,
"learning_rate": 1.574787410214407e-07,
"logits/chosen": -1.409602165222168,
"logits/rejected": -1.4615368843078613,
"logps/chosen": -155.466064453125,
"logps/rejected": -179.92701721191406,
"loss": 0.5929,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.06697350740432739,
"rewards/margins": 0.43057531118392944,
"rewards/rejected": -0.4975488483905792,
"step": 604
},
{
"epoch": 0.6910412657686551,
"grad_norm": 94.3888285857028,
"learning_rate": 1.571670943191841e-07,
"logits/chosen": -1.4215627908706665,
"logits/rejected": -1.4542145729064941,
"logps/chosen": -179.4984130859375,
"logps/rejected": -194.54330444335938,
"loss": 0.6238,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.06045013293623924,
"rewards/margins": 0.22754909098148346,
"rewards/rejected": -0.2879992127418518,
"step": 606
},
{
"epoch": 0.6933219300121161,
"grad_norm": 85.0198174251867,
"learning_rate": 1.5685462065551372e-07,
"logits/chosen": -1.3846269845962524,
"logits/rejected": -1.403570532798767,
"logps/chosen": -119.62281799316406,
"logps/rejected": -122.60442352294922,
"loss": 0.6716,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.05273966118693352,
"rewards/margins": 0.23132431507110596,
"rewards/rejected": -0.17858465015888214,
"step": 608
},
{
"epoch": 0.6956025942555769,
"grad_norm": 106.39373760782738,
"learning_rate": 1.5654132455057648e-07,
"logits/chosen": -1.4965307712554932,
"logits/rejected": -1.5246574878692627,
"logps/chosen": -172.1771240234375,
"logps/rejected": -178.22027587890625,
"loss": 0.6898,
"rewards/accuracies": 0.46875,
"rewards/chosen": -0.24939650297164917,
"rewards/margins": -0.02339550480246544,
"rewards/rejected": -0.22600099444389343,
"step": 610
},
{
"epoch": 0.6978832584990379,
"grad_norm": 90.59146954300502,
"learning_rate": 1.562272105364164e-07,
"logits/chosen": -1.2977298498153687,
"logits/rejected": -1.356859803199768,
"logps/chosen": -184.52252197265625,
"logps/rejected": -211.69007873535156,
"loss": 0.6426,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.14194564521312714,
"rewards/margins": 0.10268077999353409,
"rewards/rejected": -0.24462641775608063,
"step": 612
},
{
"epoch": 0.7001639227424987,
"grad_norm": 132.7396200312291,
"learning_rate": 1.5591228315690912e-07,
"logits/chosen": -1.312849760055542,
"logits/rejected": -1.3643529415130615,
"logps/chosen": -183.94964599609375,
"logps/rejected": -215.9215087890625,
"loss": 0.6904,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.048665039241313934,
"rewards/margins": 0.34853097796440125,
"rewards/rejected": -0.3971959948539734,
"step": 614
},
{
"epoch": 0.7024445869859597,
"grad_norm": 85.32152495878141,
"learning_rate": 1.5559654696769627e-07,
"logits/chosen": -1.3156163692474365,
"logits/rejected": -1.3534281253814697,
"logps/chosen": -135.82110595703125,
"logps/rejected": -146.7247314453125,
"loss": 0.6404,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.19835428893566132,
"rewards/margins": 0.1589125692844391,
"rewards/rejected": -0.3572668731212616,
"step": 616
},
{
"epoch": 0.7047252512294205,
"grad_norm": 97.72254578285961,
"learning_rate": 1.5528000653611933e-07,
"logits/chosen": -1.3361620903015137,
"logits/rejected": -1.3171348571777344,
"logps/chosen": -133.0448760986328,
"logps/rejected": -157.213623046875,
"loss": 0.6296,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.2093026041984558,
"rewards/margins": 0.04571903124451637,
"rewards/rejected": -0.2550216317176819,
"step": 618
},
{
"epoch": 0.7070059154728815,
"grad_norm": 81.04476758820772,
"learning_rate": 1.5496266644115383e-07,
"logits/chosen": -1.335296869277954,
"logits/rejected": -1.4067720174789429,
"logps/chosen": -166.6826934814453,
"logps/rejected": -201.6751251220703,
"loss": 0.5956,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.02005193382501602,
"rewards/margins": 0.4108864665031433,
"rewards/rejected": -0.3908345103263855,
"step": 620
},
{
"epoch": 0.7092865797163423,
"grad_norm": 117.48862378162204,
"learning_rate": 1.5464453127334292e-07,
"logits/chosen": -1.3234319686889648,
"logits/rejected": -1.3931816816329956,
"logps/chosen": -211.87327575683594,
"logps/rejected": -244.9843292236328,
"loss": 0.6435,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.23849377036094666,
"rewards/margins": 0.13237711787223816,
"rewards/rejected": -0.3708708584308624,
"step": 622
},
{
"epoch": 0.7115672439598033,
"grad_norm": 97.28790849798257,
"learning_rate": 1.5432560563473102e-07,
"logits/chosen": -1.3715243339538574,
"logits/rejected": -1.4115769863128662,
"logps/chosen": -167.96871948242188,
"logps/rejected": -195.65823364257812,
"loss": 0.6435,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.18149994313716888,
"rewards/margins": 0.21998891234397888,
"rewards/rejected": -0.4014888405799866,
"step": 624
},
{
"epoch": 0.7138479082032642,
"grad_norm": 74.77329414265908,
"learning_rate": 1.5400589413879728e-07,
"logits/chosen": -1.3941603899002075,
"logits/rejected": -1.4356334209442139,
"logps/chosen": -146.8162841796875,
"logps/rejected": -166.09603881835938,
"loss": 0.6115,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.0008982233703136444,
"rewards/margins": 0.2602536678314209,
"rewards/rejected": -0.25935542583465576,
"step": 626
},
{
"epoch": 0.7161285724467251,
"grad_norm": 102.40110691278403,
"learning_rate": 1.5368540141038877e-07,
"logits/chosen": -1.33980393409729,
"logits/rejected": -1.3720303773880005,
"logps/chosen": -177.0899658203125,
"logps/rejected": -177.4815216064453,
"loss": 0.6489,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.17625108361244202,
"rewards/margins": 0.22537095844745636,
"rewards/rejected": -0.40162205696105957,
"step": 628
},
{
"epoch": 0.718409236690186,
"grad_norm": 77.60752469183791,
"learning_rate": 1.5336413208565372e-07,
"logits/chosen": -1.424604058265686,
"logits/rejected": -1.4951881170272827,
"logps/chosen": -217.2845458984375,
"logps/rejected": -298.6929016113281,
"loss": 0.5522,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.31031134724617004,
"rewards/margins": 0.769706130027771,
"rewards/rejected": -1.0800174474716187,
"step": 630
},
{
"epoch": 0.7206899009336469,
"grad_norm": 86.60572317721935,
"learning_rate": 1.5304209081197424e-07,
"logits/chosen": -1.2321975231170654,
"logits/rejected": -1.1972179412841797,
"logps/chosen": -162.22976684570312,
"logps/rejected": -173.07118225097656,
"loss": 0.651,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.0614384189248085,
"rewards/margins": 0.27045127749443054,
"rewards/rejected": -0.33188968896865845,
"step": 632
},
{
"epoch": 0.7229705651771078,
"grad_norm": 105.40040114011227,
"learning_rate": 1.5271928224789933e-07,
"logits/chosen": -1.3975963592529297,
"logits/rejected": -1.4231470823287964,
"logps/chosen": -202.03836059570312,
"logps/rejected": -236.25344848632812,
"loss": 0.6755,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.32155218720436096,
"rewards/margins": 0.17959336936473846,
"rewards/rejected": -0.5011455416679382,
"step": 634
},
{
"epoch": 0.7252512294205687,
"grad_norm": 81.36310946045379,
"learning_rate": 1.5239571106307727e-07,
"logits/chosen": -1.3148503303527832,
"logits/rejected": -1.3296858072280884,
"logps/chosen": -97.38423919677734,
"logps/rejected": -119.96273803710938,
"loss": 0.632,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.0010671375785022974,
"rewards/margins": 0.2470298409461975,
"rewards/rejected": -0.2459626942873001,
"step": 636
},
{
"epoch": 0.7275318936640296,
"grad_norm": 96.83510386866747,
"learning_rate": 1.5207138193818824e-07,
"logits/chosen": -1.4544883966445923,
"logits/rejected": -1.4232516288757324,
"logps/chosen": -173.56967163085938,
"logps/rejected": -202.63575744628906,
"loss": 0.618,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.11890554428100586,
"rewards/margins": 0.4641531705856323,
"rewards/rejected": -0.5830587148666382,
"step": 638
},
{
"epoch": 0.7298125579074906,
"grad_norm": 98.36905363836429,
"learning_rate": 1.5174629956487657e-07,
"logits/chosen": -1.3621355295181274,
"logits/rejected": -1.470801591873169,
"logps/chosen": -152.83348083496094,
"logps/rejected": -180.09095764160156,
"loss": 0.6283,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.1331956833600998,
"rewards/margins": 0.12644681334495544,
"rewards/rejected": -0.25964248180389404,
"step": 640
},
{
"epoch": 0.7320932221509515,
"grad_norm": 99.21748796643946,
"learning_rate": 1.514204686456828e-07,
"logits/chosen": -1.4174845218658447,
"logits/rejected": -1.5134220123291016,
"logps/chosen": -199.45867919921875,
"logps/rejected": -233.53509521484375,
"loss": 0.6424,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.029536306858062744,
"rewards/margins": 0.40678656101226807,
"rewards/rejected": -0.3772502541542053,
"step": 642
},
{
"epoch": 0.7343738863944124,
"grad_norm": 104.85775879115107,
"learning_rate": 1.5109389389397564e-07,
"logits/chosen": -1.309433102607727,
"logits/rejected": -1.3806891441345215,
"logps/chosen": -198.72012329101562,
"logps/rejected": -229.50213623046875,
"loss": 0.636,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.2979825735092163,
"rewards/margins": 0.2780360281467438,
"rewards/rejected": -0.5760185718536377,
"step": 644
},
{
"epoch": 0.7366545506378733,
"grad_norm": 83.70012784480859,
"learning_rate": 1.50766580033884e-07,
"logits/chosen": -1.3813220262527466,
"logits/rejected": -1.4268507957458496,
"logps/chosen": -144.8968048095703,
"logps/rejected": -168.29891967773438,
"loss": 0.6301,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.2506921887397766,
"rewards/margins": 0.17526662349700928,
"rewards/rejected": -0.4259588122367859,
"step": 646
},
{
"epoch": 0.7389352148813342,
"grad_norm": 76.15531374501609,
"learning_rate": 1.5043853180022838e-07,
"logits/chosen": -1.3109767436981201,
"logits/rejected": -1.393604040145874,
"logps/chosen": -123.48320770263672,
"logps/rejected": -146.57994079589844,
"loss": 0.627,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.030930444598197937,
"rewards/margins": 0.237480029463768,
"rewards/rejected": -0.26841047406196594,
"step": 648
},
{
"epoch": 0.7412158791247951,
"grad_norm": 80.96947358910683,
"learning_rate": 1.5010975393845257e-07,
"logits/chosen": -1.249889612197876,
"logits/rejected": -1.2592819929122925,
"logps/chosen": -210.37823486328125,
"logps/rejected": -225.4598388671875,
"loss": 0.6471,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.39817529916763306,
"rewards/margins": 0.08833743631839752,
"rewards/rejected": -0.4865127205848694,
"step": 650
},
{
"epoch": 0.743496543368256,
"grad_norm": 87.1921118015952,
"learning_rate": 1.4978025120455482e-07,
"logits/chosen": -1.3601963520050049,
"logits/rejected": -1.376826286315918,
"logps/chosen": -167.55612182617188,
"logps/rejected": -175.45620727539062,
"loss": 0.6323,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.03152412921190262,
"rewards/margins": 0.33548736572265625,
"rewards/rejected": -0.30396321415901184,
"step": 652
},
{
"epoch": 0.745777207611717,
"grad_norm": 101.45108780703592,
"learning_rate": 1.4945002836501935e-07,
"logits/chosen": -1.4448864459991455,
"logits/rejected": -1.5169280767440796,
"logps/chosen": -251.17156982421875,
"logps/rejected": -303.9085693359375,
"loss": 0.6505,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.29235783219337463,
"rewards/margins": 0.28852301836013794,
"rewards/rejected": -0.580880880355835,
"step": 654
},
{
"epoch": 0.7480578718551778,
"grad_norm": 78.1814307088951,
"learning_rate": 1.4911909019674702e-07,
"logits/chosen": -1.3950117826461792,
"logits/rejected": -1.3936371803283691,
"logps/chosen": -221.95086669921875,
"logps/rejected": -246.12425231933594,
"loss": 0.7095,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.40981170535087585,
"rewards/margins": 0.04004380851984024,
"rewards/rejected": -0.4498555362224579,
"step": 656
},
{
"epoch": 0.7503385360986388,
"grad_norm": 86.3290682570179,
"learning_rate": 1.4878744148698654e-07,
"logits/chosen": -1.4538379907608032,
"logits/rejected": -1.4509309530258179,
"logps/chosen": -240.70957946777344,
"logps/rejected": -277.9938049316406,
"loss": 0.6274,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.10867351293563843,
"rewards/margins": 0.7040268182754517,
"rewards/rejected": -0.8127003908157349,
"step": 658
},
{
"epoch": 0.7526192003420996,
"grad_norm": 76.663224249533,
"learning_rate": 1.48455087033265e-07,
"logits/chosen": -1.347740888595581,
"logits/rejected": -1.4315268993377686,
"logps/chosen": -175.90298461914062,
"logps/rejected": -204.00741577148438,
"loss": 0.589,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.0198584645986557,
"rewards/margins": 0.3997357189655304,
"rewards/rejected": -0.3798772692680359,
"step": 660
},
{
"epoch": 0.7548998645855606,
"grad_norm": 89.45613833312,
"learning_rate": 1.4812203164331865e-07,
"logits/chosen": -1.3782517910003662,
"logits/rejected": -1.3579304218292236,
"logps/chosen": -106.40101623535156,
"logps/rejected": -115.47900390625,
"loss": 0.6276,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.002452205866575241,
"rewards/margins": 0.2672392427921295,
"rewards/rejected": -0.2647870182991028,
"step": 662
},
{
"epoch": 0.7571805288290214,
"grad_norm": 90.62698292022132,
"learning_rate": 1.4778828013502315e-07,
"logits/chosen": -1.3181474208831787,
"logits/rejected": -1.2984850406646729,
"logps/chosen": -121.110595703125,
"logps/rejected": -144.5844268798828,
"loss": 0.6345,
"rewards/accuracies": 0.46875,
"rewards/chosen": -0.14049722254276276,
"rewards/margins": 0.16425339877605438,
"rewards/rejected": -0.30475062131881714,
"step": 664
},
{
"epoch": 0.7594611930724824,
"grad_norm": 90.46387262398376,
"learning_rate": 1.474538373363241e-07,
"logits/chosen": -1.2038509845733643,
"logits/rejected": -1.1771519184112549,
"logps/chosen": -123.54405212402344,
"logps/rejected": -128.04415893554688,
"loss": 0.6513,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.0023752544075250626,
"rewards/margins": 0.20540934801101685,
"rewards/rejected": -0.20778462290763855,
"step": 666
},
{
"epoch": 0.7617418573159432,
"grad_norm": 98.8814238604518,
"learning_rate": 1.4711870808516705e-07,
"logits/chosen": -1.3800256252288818,
"logits/rejected": -1.4212613105773926,
"logps/chosen": -143.5724639892578,
"logps/rejected": -149.91879272460938,
"loss": 0.6367,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.25999924540519714,
"rewards/margins": 0.30095675587654114,
"rewards/rejected": -0.5609559416770935,
"step": 668
},
{
"epoch": 0.7640225215594042,
"grad_norm": 91.14237505783925,
"learning_rate": 1.4678289722942755e-07,
"logits/chosen": -1.4362088441848755,
"logits/rejected": -1.4652812480926514,
"logps/chosen": -238.3679962158203,
"logps/rejected": -245.81390380859375,
"loss": 0.6248,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.2839330732822418,
"rewards/margins": 0.23081044852733612,
"rewards/rejected": -0.5147435069084167,
"step": 670
},
{
"epoch": 0.766303185802865,
"grad_norm": 92.40909256739194,
"learning_rate": 1.4644640962684106e-07,
"logits/chosen": -1.2180765867233276,
"logits/rejected": -1.2128483057022095,
"logps/chosen": -139.85572814941406,
"logps/rejected": -155.68014526367188,
"loss": 0.6594,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.31525713205337524,
"rewards/margins": 0.2499125599861145,
"rewards/rejected": -0.5651696920394897,
"step": 672
},
{
"epoch": 0.768583850046326,
"grad_norm": 85.52183536423232,
"learning_rate": 1.461092501449326e-07,
"logits/chosen": -1.432080864906311,
"logits/rejected": -1.4694151878356934,
"logps/chosen": -192.5015411376953,
"logps/rejected": -206.3730926513672,
"loss": 0.6026,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.06770830601453781,
"rewards/margins": 0.42051053047180176,
"rewards/rejected": -0.48821884393692017,
"step": 674
},
{
"epoch": 0.7708645142897869,
"grad_norm": 90.10321239819011,
"learning_rate": 1.4577142366094641e-07,
"logits/chosen": -1.357291579246521,
"logits/rejected": -1.4703514575958252,
"logps/chosen": -159.84365844726562,
"logps/rejected": -179.19932556152344,
"loss": 0.6417,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.26335883140563965,
"rewards/margins": 0.19603171944618225,
"rewards/rejected": -0.4593905210494995,
"step": 676
},
{
"epoch": 0.7731451785332478,
"grad_norm": 89.71495801422127,
"learning_rate": 1.454329350617754e-07,
"logits/chosen": -1.2440226078033447,
"logits/rejected": -1.2795155048370361,
"logps/chosen": -142.0009765625,
"logps/rejected": -174.45877075195312,
"loss": 0.8425,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.18753905594348907,
"rewards/margins": 0.41140902042388916,
"rewards/rejected": -0.598948061466217,
"step": 678
},
{
"epoch": 0.7754258427767087,
"grad_norm": 86.78570750881197,
"learning_rate": 1.4509378924389042e-07,
"logits/chosen": -1.3585668802261353,
"logits/rejected": -1.4263851642608643,
"logps/chosen": -138.0133056640625,
"logps/rejected": -149.3664093017578,
"loss": 0.6293,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.08084426820278168,
"rewards/margins": 0.16473475098609924,
"rewards/rejected": -0.24557898938655853,
"step": 680
},
{
"epoch": 0.7777065070201696,
"grad_norm": 107.27075654983396,
"learning_rate": 1.4475399111326942e-07,
"logits/chosen": -1.3274388313293457,
"logits/rejected": -1.4092543125152588,
"logps/chosen": -205.84066772460938,
"logps/rejected": -229.88511657714844,
"loss": 0.6449,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.28960123658180237,
"rewards/margins": 0.23577138781547546,
"rewards/rejected": -0.5253726243972778,
"step": 682
},
{
"epoch": 0.7799871712636305,
"grad_norm": 78.77386252056206,
"learning_rate": 1.4441354558532653e-07,
"logits/chosen": -1.3599066734313965,
"logits/rejected": -1.4156947135925293,
"logps/chosen": -178.33787536621094,
"logps/rejected": -178.77232360839844,
"loss": 0.644,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.20014306902885437,
"rewards/margins": 0.3175792396068573,
"rewards/rejected": -0.5177222490310669,
"step": 684
},
{
"epoch": 0.7822678355070914,
"grad_norm": 96.56956615564326,
"learning_rate": 1.4407245758484092e-07,
"logits/chosen": -1.4600579738616943,
"logits/rejected": -1.4812790155410767,
"logps/chosen": -200.6736602783203,
"logps/rejected": -215.11302185058594,
"loss": 0.6408,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.31072649359703064,
"rewards/margins": 0.2427929937839508,
"rewards/rejected": -0.5535194873809814,
"step": 686
},
{
"epoch": 0.7845484997505523,
"grad_norm": 93.7282766427954,
"learning_rate": 1.4373073204588556e-07,
"logits/chosen": -1.392540454864502,
"logits/rejected": -1.5576982498168945,
"logps/chosen": -173.74102783203125,
"logps/rejected": -209.58255004882812,
"loss": 0.6396,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.07411661744117737,
"rewards/margins": 0.2528524100780487,
"rewards/rejected": -0.32696905732154846,
"step": 688
},
{
"epoch": 0.7868291639940133,
"grad_norm": 78.03441039978401,
"learning_rate": 1.433883739117558e-07,
"logits/chosen": -1.4191378355026245,
"logits/rejected": -1.376889705657959,
"logps/chosen": -167.5023651123047,
"logps/rejected": -175.2780303955078,
"loss": 0.6232,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.10266311466693878,
"rewards/margins": 0.33145037293434143,
"rewards/rejected": -0.22878730297088623,
"step": 690
},
{
"epoch": 0.7891098282374741,
"grad_norm": 102.68741637989442,
"learning_rate": 1.4304538813489807e-07,
"logits/chosen": -1.365789771080017,
"logits/rejected": -1.4465529918670654,
"logps/chosen": -241.91314697265625,
"logps/rejected": -287.3153381347656,
"loss": 0.626,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.22250030934810638,
"rewards/margins": 0.3011859655380249,
"rewards/rejected": -0.5236862301826477,
"step": 692
},
{
"epoch": 0.7913904924809351,
"grad_norm": 126.01863649654494,
"learning_rate": 1.4270177967683795e-07,
"logits/chosen": -1.1870087385177612,
"logits/rejected": -1.2576615810394287,
"logps/chosen": -193.8482666015625,
"logps/rejected": -214.00135803222656,
"loss": 0.683,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.6029391884803772,
"rewards/margins": 0.08865418285131454,
"rewards/rejected": -0.69159334897995,
"step": 694
},
{
"epoch": 0.793671156724396,
"grad_norm": 105.78445666178517,
"learning_rate": 1.4235755350810852e-07,
"logits/chosen": -1.4039561748504639,
"logits/rejected": -1.483564853668213,
"logps/chosen": -131.8105926513672,
"logps/rejected": -161.2996368408203,
"loss": 0.5968,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.06916500627994537,
"rewards/margins": 0.4792707562446594,
"rewards/rejected": -0.5484358072280884,
"step": 696
},
{
"epoch": 0.7959518209678569,
"grad_norm": 111.82639934813994,
"learning_rate": 1.420127146081786e-07,
"logits/chosen": -1.182243824005127,
"logits/rejected": -1.2272214889526367,
"logps/chosen": -142.89569091796875,
"logps/rejected": -167.72943115234375,
"loss": 0.7077,
"rewards/accuracies": 0.4375,
"rewards/chosen": -0.33521801233291626,
"rewards/margins": 0.031463623046875,
"rewards/rejected": -0.36668163537979126,
"step": 698
},
{
"epoch": 0.7982324852113178,
"grad_norm": 93.0448822586256,
"learning_rate": 1.4166726796538043e-07,
"logits/chosen": -1.337456464767456,
"logits/rejected": -1.38818359375,
"logps/chosen": -161.9310760498047,
"logps/rejected": -177.00717163085938,
"loss": 0.5943,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.11526203155517578,
"rewards/margins": 0.27307793498039246,
"rewards/rejected": -0.38833993673324585,
"step": 700
},
{
"epoch": 0.7982324852113178,
"eval_logits/chosen": -1.4622485637664795,
"eval_logits/rejected": -1.4484994411468506,
"eval_logps/chosen": -129.2548065185547,
"eval_logps/rejected": -133.13601684570312,
"eval_loss": 0.6083559989929199,
"eval_rewards/accuracies": 0.6399999856948853,
"eval_rewards/chosen": 0.06431641429662704,
"eval_rewards/margins": 0.17164374887943268,
"eval_rewards/rejected": -0.10732734948396683,
"eval_runtime": 19.822,
"eval_samples_per_second": 5.045,
"eval_steps_per_second": 1.261,
"step": 700
},
{
"epoch": 0.8005131494547787,
"grad_norm": 81.54591894736863,
"learning_rate": 1.413212185768378e-07,
"logits/chosen": -1.3647618293762207,
"logits/rejected": -1.4832388162612915,
"logps/chosen": -163.0651092529297,
"logps/rejected": -199.4841766357422,
"loss": 0.6247,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.14452999830245972,
"rewards/margins": 0.4548623561859131,
"rewards/rejected": -0.31033238768577576,
"step": 702
},
{
"epoch": 0.8027938136982397,
"grad_norm": 88.3455225094938,
"learning_rate": 1.409745714483936e-07,
"logits/chosen": -1.3221628665924072,
"logits/rejected": -1.379399299621582,
"logps/chosen": -124.75774383544922,
"logps/rejected": -142.49942016601562,
"loss": 0.6477,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.004875157028436661,
"rewards/margins": 0.23325148224830627,
"rewards/rejected": -0.23812663555145264,
"step": 704
},
{
"epoch": 0.8050744779417005,
"grad_norm": 104.14495646431855,
"learning_rate": 1.406273315945374e-07,
"logits/chosen": -1.4262161254882812,
"logits/rejected": -1.460605502128601,
"logps/chosen": -188.75218200683594,
"logps/rejected": -196.2736358642578,
"loss": 0.6307,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.09237577021121979,
"rewards/margins": 0.26734021306037903,
"rewards/rejected": -0.35971593856811523,
"step": 706
},
{
"epoch": 0.8073551421851615,
"grad_norm": 88.27883050256283,
"learning_rate": 1.4027950403833294e-07,
"logits/chosen": -1.3447182178497314,
"logits/rejected": -1.3849260807037354,
"logps/chosen": -194.97628784179688,
"logps/rejected": -231.31488037109375,
"loss": 0.608,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.018238365650177002,
"rewards/margins": 0.4262167513370514,
"rewards/rejected": -0.444455087184906,
"step": 708
},
{
"epoch": 0.8096358064286223,
"grad_norm": 83.81720367834012,
"learning_rate": 1.3993109381134552e-07,
"logits/chosen": -1.458938717842102,
"logits/rejected": -1.4975433349609375,
"logps/chosen": -217.40234375,
"logps/rejected": -230.56849670410156,
"loss": 0.5997,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.06927414983510971,
"rewards/margins": 0.34377092123031616,
"rewards/rejected": -0.41304510831832886,
"step": 710
},
{
"epoch": 0.8119164706720833,
"grad_norm": 83.92843092918012,
"learning_rate": 1.3958210595356923e-07,
"logits/chosen": -1.295701265335083,
"logits/rejected": -1.3689717054367065,
"logps/chosen": -184.71035766601562,
"logps/rejected": -231.5858154296875,
"loss": 0.6496,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.07217526435852051,
"rewards/margins": 0.5205077528953552,
"rewards/rejected": -0.5926830768585205,
"step": 712
},
{
"epoch": 0.8141971349155441,
"grad_norm": 64.10905806570788,
"learning_rate": 1.3923254551335385e-07,
"logits/chosen": -1.3552354574203491,
"logits/rejected": -1.5121649503707886,
"logps/chosen": -162.65443420410156,
"logps/rejected": -202.2783203125,
"loss": 0.6038,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.21346963942050934,
"rewards/margins": 0.2602553367614746,
"rewards/rejected": -0.47372499108314514,
"step": 714
},
{
"epoch": 0.8164777991590051,
"grad_norm": 96.65072480066111,
"learning_rate": 1.388824175473321e-07,
"logits/chosen": -1.4826998710632324,
"logits/rejected": -1.525536060333252,
"logps/chosen": -157.91253662109375,
"logps/rejected": -189.42970275878906,
"loss": 0.6418,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.15659984946250916,
"rewards/margins": 0.5801843404769897,
"rewards/rejected": -0.7367842197418213,
"step": 716
},
{
"epoch": 0.8187584634024659,
"grad_norm": 133.38988345223416,
"learning_rate": 1.3853172712034618e-07,
"logits/chosen": -1.453382968902588,
"logits/rejected": -1.4554516077041626,
"logps/chosen": -123.51729583740234,
"logps/rejected": -133.98687744140625,
"loss": 0.6316,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.17259877920150757,
"rewards/margins": 0.09528855979442596,
"rewards/rejected": -0.26788732409477234,
"step": 718
},
{
"epoch": 0.8210391276459269,
"grad_norm": 103.6833810750344,
"learning_rate": 1.3818047930537489e-07,
"logits/chosen": -1.4238344430923462,
"logits/rejected": -1.4430863857269287,
"logps/chosen": -185.29090881347656,
"logps/rejected": -190.6168212890625,
"loss": 0.6817,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.1492576152086258,
"rewards/margins": 0.1548936814069748,
"rewards/rejected": -0.304151326417923,
"step": 720
},
{
"epoch": 0.8233197918893878,
"grad_norm": 104.78480311225195,
"learning_rate": 1.3782867918345986e-07,
"logits/chosen": -1.4426988363265991,
"logits/rejected": -1.474928855895996,
"logps/chosen": -182.13693237304688,
"logps/rejected": -207.8540802001953,
"loss": 0.688,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.28544071316719055,
"rewards/margins": 0.010512780398130417,
"rewards/rejected": -0.2959534525871277,
"step": 722
},
{
"epoch": 0.8256004561328487,
"grad_norm": 83.99721091366573,
"learning_rate": 1.374763318436323e-07,
"logits/chosen": -1.3846015930175781,
"logits/rejected": -1.374580979347229,
"logps/chosen": -188.639892578125,
"logps/rejected": -203.4746856689453,
"loss": 0.6455,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.09084916859865189,
"rewards/margins": 0.13614203035831451,
"rewards/rejected": -0.2269912213087082,
"step": 724
},
{
"epoch": 0.8278811203763096,
"grad_norm": 100.80568473474456,
"learning_rate": 1.371234423828393e-07,
"logits/chosen": -1.3973044157028198,
"logits/rejected": -1.4167506694793701,
"logps/chosen": -221.90057373046875,
"logps/rejected": -223.55625915527344,
"loss": 0.6358,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.5376750826835632,
"rewards/margins": 0.1100195124745369,
"rewards/rejected": -0.6476945281028748,
"step": 726
},
{
"epoch": 0.8301617846197705,
"grad_norm": 95.2695276072168,
"learning_rate": 1.367700159058701e-07,
"logits/chosen": -1.304479718208313,
"logits/rejected": -1.3024879693984985,
"logps/chosen": -132.629150390625,
"logps/rejected": -157.943115234375,
"loss": 0.6385,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.17815637588500977,
"rewards/margins": 0.16010738909244537,
"rewards/rejected": -0.33826377987861633,
"step": 728
},
{
"epoch": 0.8324424488632314,
"grad_norm": 84.57675708069675,
"learning_rate": 1.3641605752528223e-07,
"logits/chosen": -1.345677137374878,
"logits/rejected": -1.369272232055664,
"logps/chosen": -174.67979431152344,
"logps/rejected": -177.95265197753906,
"loss": 0.6631,
"rewards/accuracies": 0.375,
"rewards/chosen": -0.33565065264701843,
"rewards/margins": -0.10611479729413986,
"rewards/rejected": -0.22953587770462036,
"step": 730
},
{
"epoch": 0.8347231131066923,
"grad_norm": 95.98437315557469,
"learning_rate": 1.3606157236132753e-07,
"logits/chosen": -1.3388769626617432,
"logits/rejected": -1.4168953895568848,
"logps/chosen": -128.86917114257812,
"logps/rejected": -155.26756286621094,
"loss": 0.5995,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.12645135819911957,
"rewards/margins": 0.4350703954696655,
"rewards/rejected": -0.5615217685699463,
"step": 732
},
{
"epoch": 0.8370037773501532,
"grad_norm": 98.72975210848782,
"learning_rate": 1.3570656554187823e-07,
"logits/chosen": -1.293025255203247,
"logits/rejected": -1.3117451667785645,
"logps/chosen": -198.599609375,
"logps/rejected": -195.0568084716797,
"loss": 0.6664,
"rewards/accuracies": 0.53125,
"rewards/chosen": -0.3894595205783844,
"rewards/margins": 0.01842033676803112,
"rewards/rejected": -0.40787985920906067,
"step": 734
},
{
"epoch": 0.8392844415936142,
"grad_norm": 83.44989797339352,
"learning_rate": 1.353510422023526e-07,
"logits/chosen": -1.3290454149246216,
"logits/rejected": -1.3762670755386353,
"logps/chosen": -108.33480834960938,
"logps/rejected": -132.43887329101562,
"loss": 0.6699,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.07590903341770172,
"rewards/margins": 0.31655895709991455,
"rewards/rejected": -0.3924679458141327,
"step": 736
},
{
"epoch": 0.841565105837075,
"grad_norm": 94.8310467886239,
"learning_rate": 1.3499500748564075e-07,
"logits/chosen": -1.4289618730545044,
"logits/rejected": -1.4254275560379028,
"logps/chosen": -111.20790100097656,
"logps/rejected": -120.1327896118164,
"loss": 0.6651,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.06255226582288742,
"rewards/margins": 0.16382303833961487,
"rewards/rejected": -0.2263752967119217,
"step": 738
},
{
"epoch": 0.843845770080536,
"grad_norm": 106.55945054665669,
"learning_rate": 1.346384665420302e-07,
"logits/chosen": -1.2947108745574951,
"logits/rejected": -1.3571364879608154,
"logps/chosen": -157.76895141601562,
"logps/rejected": -187.6517333984375,
"loss": 0.6321,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.14570750296115875,
"rewards/margins": 0.39257413148880005,
"rewards/rejected": -0.5382816791534424,
"step": 740
},
{
"epoch": 0.8461264343239968,
"grad_norm": 112.76083171183551,
"learning_rate": 1.3428142452913133e-07,
"logits/chosen": -1.4128669500350952,
"logits/rejected": -1.4613770246505737,
"logps/chosen": -202.3125457763672,
"logps/rejected": -242.2345428466797,
"loss": 0.6542,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.27778810262680054,
"rewards/margins": 0.3144941031932831,
"rewards/rejected": -0.592282235622406,
"step": 742
},
{
"epoch": 0.8484070985674578,
"grad_norm": 100.11503458436644,
"learning_rate": 1.3392388661180302e-07,
"logits/chosen": -1.3573894500732422,
"logits/rejected": -1.344806432723999,
"logps/chosen": -186.35108947753906,
"logps/rejected": -188.49490356445312,
"loss": 0.6701,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.16072505712509155,
"rewards/margins": 0.19017082452774048,
"rewards/rejected": -0.3508959114551544,
"step": 744
},
{
"epoch": 0.8506877628109187,
"grad_norm": 91.60219394383267,
"learning_rate": 1.3356585796207755e-07,
"logits/chosen": -1.3965177536010742,
"logits/rejected": -1.4244239330291748,
"logps/chosen": -201.50318908691406,
"logps/rejected": -217.1870574951172,
"loss": 0.6803,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.007203985005617142,
"rewards/margins": 0.4475135803222656,
"rewards/rejected": -0.44030967354774475,
"step": 746
},
{
"epoch": 0.8529684270543796,
"grad_norm": 93.27202711374441,
"learning_rate": 1.3320734375908606e-07,
"logits/chosen": -1.4266613721847534,
"logits/rejected": -1.4168901443481445,
"logps/chosen": -208.13624572753906,
"logps/rejected": -238.91502380371094,
"loss": 0.6581,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.2992842197418213,
"rewards/margins": 0.2575336992740631,
"rewards/rejected": -0.556817889213562,
"step": 748
},
{
"epoch": 0.8552490912978405,
"grad_norm": 99.8766408873739,
"learning_rate": 1.328483491889836e-07,
"logits/chosen": -1.427262306213379,
"logits/rejected": -1.4984745979309082,
"logps/chosen": -215.9974365234375,
"logps/rejected": -236.14759826660156,
"loss": 0.6592,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.048569947481155396,
"rewards/margins": 0.15008097887039185,
"rewards/rejected": -0.19865092635154724,
"step": 750
},
{
"epoch": 0.8575297555413014,
"grad_norm": 82.7550556411054,
"learning_rate": 1.3248887944487393e-07,
"logits/chosen": -1.410501480102539,
"logits/rejected": -1.452683687210083,
"logps/chosen": -122.24147033691406,
"logps/rejected": -137.63116455078125,
"loss": 0.6134,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.11725883185863495,
"rewards/margins": 0.1699836403131485,
"rewards/rejected": -0.05272480472922325,
"step": 752
},
{
"epoch": 0.8598104197847624,
"grad_norm": 88.9952584850963,
"learning_rate": 1.321289397267347e-07,
"logits/chosen": -1.4281740188598633,
"logits/rejected": -1.445003628730774,
"logps/chosen": -177.8570556640625,
"logps/rejected": -181.70663452148438,
"loss": 0.6919,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.2182515561580658,
"rewards/margins": 0.0669536367058754,
"rewards/rejected": -0.28520524501800537,
"step": 754
},
{
"epoch": 0.8620910840282232,
"grad_norm": 100.22335380917765,
"learning_rate": 1.3176853524134198e-07,
"logits/chosen": -1.391309142112732,
"logits/rejected": -1.4018808603286743,
"logps/chosen": -164.9562530517578,
"logps/rejected": -178.225341796875,
"loss": 0.6706,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.04197298735380173,
"rewards/margins": 0.15188831090927124,
"rewards/rejected": -0.19386129081249237,
"step": 756
},
{
"epoch": 0.8643717482716842,
"grad_norm": 100.1727346446802,
"learning_rate": 1.314076712021949e-07,
"logits/chosen": -1.35850989818573,
"logits/rejected": -1.420215129852295,
"logps/chosen": -157.33843994140625,
"logps/rejected": -192.61769104003906,
"loss": 0.6228,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.10978934168815613,
"rewards/margins": 0.24940092861652374,
"rewards/rejected": -0.3591902256011963,
"step": 758
},
{
"epoch": 0.866652412515145,
"grad_norm": 89.59203114100742,
"learning_rate": 1.3104635282944052e-07,
"logits/chosen": -1.4790459871292114,
"logits/rejected": -1.4666205644607544,
"logps/chosen": -161.86483764648438,
"logps/rejected": -211.35423278808594,
"loss": 0.6404,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.004301717504858971,
"rewards/margins": 0.35839876532554626,
"rewards/rejected": -0.35409703850746155,
"step": 760
},
{
"epoch": 0.868933076758606,
"grad_norm": 94.6581914253334,
"learning_rate": 1.3068458534979812e-07,
"logits/chosen": -1.3811688423156738,
"logits/rejected": -1.4190725088119507,
"logps/chosen": -80.00721740722656,
"logps/rejected": -101.73347473144531,
"loss": 0.6688,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.02035554125905037,
"rewards/margins": 0.07091270387172699,
"rewards/rejected": -0.09126824140548706,
"step": 762
},
{
"epoch": 0.8712137410020668,
"grad_norm": 84.05822092329534,
"learning_rate": 1.3032237399648357e-07,
"logits/chosen": -1.3661377429962158,
"logits/rejected": -1.3815851211547852,
"logps/chosen": -106.82656860351562,
"logps/rejected": -128.75656127929688,
"loss": 0.6309,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.03877873718738556,
"rewards/margins": 0.08257640898227692,
"rewards/rejected": -0.12135513871908188,
"step": 764
},
{
"epoch": 0.8734944052455278,
"grad_norm": 99.58060667623182,
"learning_rate": 1.2995972400913367e-07,
"logits/chosen": -1.4144562482833862,
"logits/rejected": -1.4412004947662354,
"logps/chosen": -136.15481567382812,
"logps/rejected": -141.8118438720703,
"loss": 0.6589,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.0782175287604332,
"rewards/margins": 0.17598725855350494,
"rewards/rejected": -0.25420477986335754,
"step": 766
},
{
"epoch": 0.8757750694889886,
"grad_norm": 80.17753876582776,
"learning_rate": 1.2959664063373042e-07,
"logits/chosen": -1.3012042045593262,
"logits/rejected": -1.2482383251190186,
"logps/chosen": -100.65060424804688,
"logps/rejected": -112.65821838378906,
"loss": 0.5368,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.04776890203356743,
"rewards/margins": 0.4462125897407532,
"rewards/rejected": -0.39844369888305664,
"step": 768
},
{
"epoch": 0.8780557337324496,
"grad_norm": 88.16104383702894,
"learning_rate": 1.2923312912252506e-07,
"logits/chosen": -1.4704458713531494,
"logits/rejected": -1.5337769985198975,
"logps/chosen": -118.58712768554688,
"logps/rejected": -128.90980529785156,
"loss": 0.6971,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.005523152183741331,
"rewards/margins": 0.031648553907871246,
"rewards/rejected": -0.02612539939582348,
"step": 770
},
{
"epoch": 0.8803363979759105,
"grad_norm": 88.57684727206627,
"learning_rate": 1.288691947339621e-07,
"logits/chosen": -1.4641995429992676,
"logits/rejected": -1.4404255151748657,
"logps/chosen": -188.07818603515625,
"logps/rejected": -189.4072265625,
"loss": 0.6435,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.11661610007286072,
"rewards/margins": 0.22115452587604523,
"rewards/rejected": -0.33777061104774475,
"step": 772
},
{
"epoch": 0.8826170622193714,
"grad_norm": 88.03404600894119,
"learning_rate": 1.2850484273260325e-07,
"logits/chosen": -1.2959342002868652,
"logits/rejected": -1.3783100843429565,
"logps/chosen": -205.9984130859375,
"logps/rejected": -227.6925506591797,
"loss": 0.6664,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.16467887163162231,
"rewards/margins": 0.33329781889915466,
"rewards/rejected": -0.497976690530777,
"step": 774
},
{
"epoch": 0.8848977264628323,
"grad_norm": 85.2906054415725,
"learning_rate": 1.2814007838905129e-07,
"logits/chosen": -1.3777484893798828,
"logits/rejected": -1.4851555824279785,
"logps/chosen": -169.7268524169922,
"logps/rejected": -183.84872436523438,
"loss": 0.5976,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.050482071936130524,
"rewards/margins": 0.21894827485084534,
"rewards/rejected": -0.26943036913871765,
"step": 776
},
{
"epoch": 0.8871783907062932,
"grad_norm": 113.67510179823725,
"learning_rate": 1.2777490697987375e-07,
"logits/chosen": -1.361803412437439,
"logits/rejected": -1.4689048528671265,
"logps/chosen": -188.6908416748047,
"logps/rejected": -239.35243225097656,
"loss": 0.6593,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.024178601801395416,
"rewards/margins": 0.5260721445083618,
"rewards/rejected": -0.5018935203552246,
"step": 778
},
{
"epoch": 0.8894590549497541,
"grad_norm": 77.93754881822308,
"learning_rate": 1.2740933378752683e-07,
"logits/chosen": -1.4177353382110596,
"logits/rejected": -1.4752013683319092,
"logps/chosen": -196.52670288085938,
"logps/rejected": -216.70066833496094,
"loss": 0.5885,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.18388278782367706,
"rewards/margins": 0.34562817215919495,
"rewards/rejected": -0.5295109748840332,
"step": 780
},
{
"epoch": 0.891739719193215,
"grad_norm": 85.24959090845118,
"learning_rate": 1.2704336410027862e-07,
"logits/chosen": -1.4634852409362793,
"logits/rejected": -1.5205847024917603,
"logps/chosen": -250.54759216308594,
"logps/rejected": -285.6510009765625,
"loss": 0.6189,
"rewards/accuracies": 0.53125,
"rewards/chosen": -0.32170242071151733,
"rewards/margins": 0.21656639873981476,
"rewards/rejected": -0.5382688045501709,
"step": 782
},
{
"epoch": 0.8940203834366759,
"grad_norm": 94.62957856064564,
"learning_rate": 1.2667700321213279e-07,
"logits/chosen": -1.216140627861023,
"logits/rejected": -1.3199231624603271,
"logps/chosen": -163.63653564453125,
"logps/rejected": -182.559814453125,
"loss": 0.6221,
"rewards/accuracies": 0.53125,
"rewards/chosen": -0.21417541801929474,
"rewards/margins": 0.07622166723012924,
"rewards/rejected": -0.29039713740348816,
"step": 784
},
{
"epoch": 0.8963010476801369,
"grad_norm": 84.13982267155843,
"learning_rate": 1.263102564227521e-07,
"logits/chosen": -1.3924446105957031,
"logits/rejected": -1.3973028659820557,
"logps/chosen": -164.72738647460938,
"logps/rejected": -185.91358947753906,
"loss": 0.6378,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.017229687422513962,
"rewards/margins": 0.29563748836517334,
"rewards/rejected": -0.3128671646118164,
"step": 786
},
{
"epoch": 0.8985817119235977,
"grad_norm": 92.91613247214089,
"learning_rate": 1.2594312903738162e-07,
"logits/chosen": -1.4232394695281982,
"logits/rejected": -1.5436615943908691,
"logps/chosen": -137.60520935058594,
"logps/rejected": -159.5936737060547,
"loss": 0.6099,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.14493076503276825,
"rewards/margins": 0.23380112648010254,
"rewards/rejected": -0.3787318468093872,
"step": 788
},
{
"epoch": 0.9008623761670587,
"grad_norm": 109.24475281169839,
"learning_rate": 1.2557562636677192e-07,
"logits/chosen": -1.3665200471878052,
"logits/rejected": -1.4350488185882568,
"logps/chosen": -191.39637756347656,
"logps/rejected": -255.76321411132812,
"loss": 0.6655,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.16933086514472961,
"rewards/margins": 0.5223888158798218,
"rewards/rejected": -0.691719651222229,
"step": 790
},
{
"epoch": 0.9031430404105195,
"grad_norm": 66.209268910187,
"learning_rate": 1.252077537271024e-07,
"logits/chosen": -1.318701982498169,
"logits/rejected": -1.3234184980392456,
"logps/chosen": -180.39767456054688,
"logps/rejected": -182.42808532714844,
"loss": 0.5986,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.012874465435743332,
"rewards/margins": 0.4246137738227844,
"rewards/rejected": -0.43748825788497925,
"step": 792
},
{
"epoch": 0.9054237046539805,
"grad_norm": 110.64800895876068,
"learning_rate": 1.2483951643990425e-07,
"logits/chosen": -1.427907943725586,
"logits/rejected": -1.4352999925613403,
"logps/chosen": -234.02430725097656,
"logps/rejected": -252.70291137695312,
"loss": 0.6882,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.23474670946598053,
"rewards/margins": 0.3372371196746826,
"rewards/rejected": -0.571983814239502,
"step": 794
},
{
"epoch": 0.9077043688974413,
"grad_norm": 71.22024881783034,
"learning_rate": 1.2447091983198366e-07,
"logits/chosen": -1.4595139026641846,
"logits/rejected": -1.494476556777954,
"logps/chosen": -177.576904296875,
"logps/rejected": -200.62152099609375,
"loss": 0.622,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.10567592829465866,
"rewards/margins": 0.4641418755054474,
"rewards/rejected": -0.5698177814483643,
"step": 796
},
{
"epoch": 0.9099850331409023,
"grad_norm": 77.66730114746076,
"learning_rate": 1.2410196923534453e-07,
"logits/chosen": -1.3751342296600342,
"logits/rejected": -1.4557826519012451,
"logps/chosen": -156.71981811523438,
"logps/rejected": -175.78079223632812,
"loss": 0.6241,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.17370270192623138,
"rewards/margins": 0.5142669081687927,
"rewards/rejected": -0.6879696249961853,
"step": 798
},
{
"epoch": 0.9122656973843632,
"grad_norm": 79.01703720689792,
"learning_rate": 1.237326699871115e-07,
"logits/chosen": -1.3648015260696411,
"logits/rejected": -1.4242901802062988,
"logps/chosen": -139.64234924316406,
"logps/rejected": -162.11544799804688,
"loss": 0.6048,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.2178276777267456,
"rewards/margins": 0.2655281722545624,
"rewards/rejected": -0.48335587978363037,
"step": 800
},
{
"epoch": 0.9122656973843632,
"eval_logits/chosen": -1.4735329151153564,
"eval_logits/rejected": -1.460737705230713,
"eval_logps/chosen": -128.9962158203125,
"eval_logps/rejected": -133.23785400390625,
"eval_loss": 0.6001502871513367,
"eval_rewards/accuracies": 0.6800000071525574,
"eval_rewards/chosen": 0.09017454832792282,
"eval_rewards/margins": 0.20768418908119202,
"eval_rewards/rejected": -0.11750967055559158,
"eval_runtime": 19.4284,
"eval_samples_per_second": 5.147,
"eval_steps_per_second": 1.287,
"step": 800
},
{
"epoch": 0.9145463616278241,
"grad_norm": 114.49291402716334,
"learning_rate": 1.2336302742945265e-07,
"logits/chosen": -1.1926456689834595,
"logits/rejected": -1.1694424152374268,
"logps/chosen": -151.49020385742188,
"logps/rejected": -221.17051696777344,
"loss": 0.6541,
"rewards/accuracies": 0.4375,
"rewards/chosen": -0.3034975230693817,
"rewards/margins": 0.10761985182762146,
"rewards/rejected": -0.41111740469932556,
"step": 802
},
{
"epoch": 0.9168270258712851,
"grad_norm": 87.25759889144325,
"learning_rate": 1.2299304690950234e-07,
"logits/chosen": -1.314978837966919,
"logits/rejected": -1.3282618522644043,
"logps/chosen": -130.7468719482422,
"logps/rejected": -169.4188690185547,
"loss": 0.6349,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.07509980350732803,
"rewards/margins": 0.23689687252044678,
"rewards/rejected": -0.3119966685771942,
"step": 804
},
{
"epoch": 0.9191076901147459,
"grad_norm": 90.2755897487053,
"learning_rate": 1.2262273377928374e-07,
"logits/chosen": -1.4546884298324585,
"logits/rejected": -1.5022200345993042,
"logps/chosen": -155.28553771972656,
"logps/rejected": -186.28756713867188,
"loss": 0.6376,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.20369568467140198,
"rewards/margins": 0.15442761778831482,
"rewards/rejected": -0.3581232726573944,
"step": 806
},
{
"epoch": 0.9213883543582069,
"grad_norm": 87.61957852766155,
"learning_rate": 1.2225209339563143e-07,
"logits/chosen": -1.1901319026947021,
"logits/rejected": -1.2732369899749756,
"logps/chosen": -126.2178726196289,
"logps/rejected": -131.4507598876953,
"loss": 0.6119,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.06662334501743317,
"rewards/margins": 0.1810164749622345,
"rewards/rejected": -0.24763983488082886,
"step": 808
},
{
"epoch": 0.9236690186016677,
"grad_norm": 123.86797719697702,
"learning_rate": 1.2188113112011404e-07,
"logits/chosen": -1.260641098022461,
"logits/rejected": -1.3186612129211426,
"logps/chosen": -247.8528594970703,
"logps/rejected": -279.6898498535156,
"loss": 0.6643,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.3332889676094055,
"rewards/margins": 0.2615528106689453,
"rewards/rejected": -0.594841718673706,
"step": 810
},
{
"epoch": 0.9259496828451287,
"grad_norm": 92.05739920896082,
"learning_rate": 1.2150985231895645e-07,
"logits/chosen": -1.5352187156677246,
"logits/rejected": -1.5089805126190186,
"logps/chosen": -168.19351196289062,
"logps/rejected": -155.74728393554688,
"loss": 0.6464,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.05093672126531601,
"rewards/margins": 0.23030051589012146,
"rewards/rejected": -0.2812372148036957,
"step": 812
},
{
"epoch": 0.9282303470885895,
"grad_norm": 93.01772334051867,
"learning_rate": 1.2113826236296244e-07,
"logits/chosen": -1.4838998317718506,
"logits/rejected": -1.4913954734802246,
"logps/chosen": -145.08023071289062,
"logps/rejected": -237.1237030029297,
"loss": 0.63,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.10176398605108261,
"rewards/margins": 0.37156644463539124,
"rewards/rejected": -0.47333040833473206,
"step": 814
},
{
"epoch": 0.9305110113320505,
"grad_norm": 104.77223934596387,
"learning_rate": 1.207663666274367e-07,
"logits/chosen": -1.3763738870620728,
"logits/rejected": -1.3552130460739136,
"logps/chosen": -177.25376892089844,
"logps/rejected": -221.85498046875,
"loss": 0.6857,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.18679024279117584,
"rewards/margins": 0.05987125262618065,
"rewards/rejected": -0.2466614842414856,
"step": 816
},
{
"epoch": 0.9327916755755113,
"grad_norm": 103.0413802693114,
"learning_rate": 1.2039417049210742e-07,
"logits/chosen": -1.3318116664886475,
"logits/rejected": -1.3942524194717407,
"logps/chosen": -142.11920166015625,
"logps/rejected": -182.98092651367188,
"loss": 0.6606,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.08502069115638733,
"rewards/margins": 0.2950421869754791,
"rewards/rejected": -0.38006284832954407,
"step": 818
},
{
"epoch": 0.9350723398189723,
"grad_norm": 85.38211010208585,
"learning_rate": 1.2002167934104814e-07,
"logits/chosen": -1.3738641738891602,
"logits/rejected": -1.3845763206481934,
"logps/chosen": -120.14466857910156,
"logps/rejected": -148.59951782226562,
"loss": 0.6025,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.09485463052988052,
"rewards/margins": 0.2216353416442871,
"rewards/rejected": -0.31648993492126465,
"step": 820
},
{
"epoch": 0.9373530040624332,
"grad_norm": 78.59698161262021,
"learning_rate": 1.1964889856260001e-07,
"logits/chosen": -1.405772089958191,
"logits/rejected": -1.4993829727172852,
"logps/chosen": -135.0908203125,
"logps/rejected": -158.73516845703125,
"loss": 0.6449,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.057033490389585495,
"rewards/margins": 0.19890281558036804,
"rewards/rejected": -0.14186930656433105,
"step": 822
},
{
"epoch": 0.9396336683058941,
"grad_norm": 77.09101408818802,
"learning_rate": 1.1927583354929392e-07,
"logits/chosen": -1.2669503688812256,
"logits/rejected": -1.3981753587722778,
"logps/chosen": -185.62281799316406,
"logps/rejected": -229.72349548339844,
"loss": 0.5898,
"rewards/accuracies": 0.78125,
"rewards/chosen": -0.05814466252923012,
"rewards/margins": 0.5641728043556213,
"rewards/rejected": -0.6223174333572388,
"step": 824
},
{
"epoch": 0.941914332549355,
"grad_norm": 80.62233528509246,
"learning_rate": 1.1890248969777239e-07,
"logits/chosen": -1.4591572284698486,
"logits/rejected": -1.4886600971221924,
"logps/chosen": -133.4588165283203,
"logps/rejected": -161.96484375,
"loss": 0.6355,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.1525450050830841,
"rewards/margins": 0.35945460200309753,
"rewards/rejected": -0.5119996666908264,
"step": 826
},
{
"epoch": 0.9441949967928159,
"grad_norm": 82.91104665388441,
"learning_rate": 1.1852887240871144e-07,
"logits/chosen": -1.3768235445022583,
"logits/rejected": -1.4797062873840332,
"logps/chosen": -168.19825744628906,
"logps/rejected": -195.83607482910156,
"loss": 0.6328,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.021572627127170563,
"rewards/margins": 0.4793768525123596,
"rewards/rejected": -0.45780429244041443,
"step": 828
},
{
"epoch": 0.9464756610362768,
"grad_norm": 99.28432995782116,
"learning_rate": 1.1815498708674265e-07,
"logits/chosen": -1.3641839027404785,
"logits/rejected": -1.5057623386383057,
"logps/chosen": -188.45806884765625,
"logps/rejected": -205.60987854003906,
"loss": 0.6396,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.27040964365005493,
"rewards/margins": 0.16016271710395813,
"rewards/rejected": -0.4305723309516907,
"step": 830
},
{
"epoch": 0.9487563252797377,
"grad_norm": 91.59735539302851,
"learning_rate": 1.1778083914037489e-07,
"logits/chosen": -1.4061131477355957,
"logits/rejected": -1.425171136856079,
"logps/chosen": -127.95852661132812,
"logps/rejected": -161.05148315429688,
"loss": 0.6552,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.016702737659215927,
"rewards/margins": 0.2845785915851593,
"rewards/rejected": -0.2678758502006531,
"step": 832
},
{
"epoch": 0.9510369895231986,
"grad_norm": 86.74397487981402,
"learning_rate": 1.17406433981916e-07,
"logits/chosen": -1.3160350322723389,
"logits/rejected": -1.3871701955795288,
"logps/chosen": -176.73638916015625,
"logps/rejected": -211.8169403076172,
"loss": 0.5804,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.43320897221565247,
"rewards/margins": 0.319485068321228,
"rewards/rejected": -0.7526940107345581,
"step": 834
},
{
"epoch": 0.9533176537666596,
"grad_norm": 95.1172952955302,
"learning_rate": 1.1703177702739459e-07,
"logits/chosen": -1.4391237497329712,
"logits/rejected": -1.4496339559555054,
"logps/chosen": -170.07687377929688,
"logps/rejected": -187.2700958251953,
"loss": 0.6198,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.08456218987703323,
"rewards/margins": 0.2940000295639038,
"rewards/rejected": -0.37856221199035645,
"step": 836
},
{
"epoch": 0.9555983180101204,
"grad_norm": 88.76345502432684,
"learning_rate": 1.1665687369648172e-07,
"logits/chosen": -1.323454737663269,
"logits/rejected": -1.330315351486206,
"logps/chosen": -124.01346588134766,
"logps/rejected": -141.37887573242188,
"loss": 0.6466,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.2537275552749634,
"rewards/margins": -0.010971667245030403,
"rewards/rejected": -0.24275588989257812,
"step": 838
},
{
"epoch": 0.9578789822535814,
"grad_norm": 88.84157969865495,
"learning_rate": 1.1628172941241239e-07,
"logits/chosen": -1.2515794038772583,
"logits/rejected": -1.2858678102493286,
"logps/chosen": -161.89578247070312,
"logps/rejected": -192.5193328857422,
"loss": 0.5897,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.062142424285411835,
"rewards/margins": 0.38304537534713745,
"rewards/rejected": -0.44518783688545227,
"step": 840
},
{
"epoch": 0.9601596464970422,
"grad_norm": 77.39513907682372,
"learning_rate": 1.159063496019072e-07,
"logits/chosen": -1.3906288146972656,
"logits/rejected": -1.529634952545166,
"logps/chosen": -149.05487060546875,
"logps/rejected": -175.32589721679688,
"loss": 0.607,
"rewards/accuracies": 0.46875,
"rewards/chosen": -0.14833964407444,
"rewards/margins": 0.27842655777931213,
"rewards/rejected": -0.42676615715026855,
"step": 842
},
{
"epoch": 0.9624403107405032,
"grad_norm": 95.00453640521121,
"learning_rate": 1.1553073969509382e-07,
"logits/chosen": -1.3770264387130737,
"logits/rejected": -1.4574774503707886,
"logps/chosen": -154.23617553710938,
"logps/rejected": -165.87744140625,
"loss": 0.6221,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.23750467598438263,
"rewards/margins": 0.23638057708740234,
"rewards/rejected": -0.47388529777526855,
"step": 844
},
{
"epoch": 0.964720974983964,
"grad_norm": 106.04741673208737,
"learning_rate": 1.1515490512542831e-07,
"logits/chosen": -1.467265248298645,
"logits/rejected": -1.4767037630081177,
"logps/chosen": -154.89556884765625,
"logps/rejected": -169.7006072998047,
"loss": 0.6862,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.292231023311615,
"rewards/margins": 0.32214629650115967,
"rewards/rejected": -0.6143773198127747,
"step": 846
},
{
"epoch": 0.967001639227425,
"grad_norm": 105.71509066302269,
"learning_rate": 1.1477885132961677e-07,
"logits/chosen": -1.3954505920410156,
"logits/rejected": -1.4496076107025146,
"logps/chosen": -135.10621643066406,
"logps/rejected": -155.632080078125,
"loss": 0.6695,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.11768642067909241,
"rewards/margins": 0.2924567759037018,
"rewards/rejected": -0.4101432263851166,
"step": 848
},
{
"epoch": 0.9692823034708858,
"grad_norm": 83.14192239326704,
"learning_rate": 1.1440258374753649e-07,
"logits/chosen": -1.2167763710021973,
"logits/rejected": -1.2719136476516724,
"logps/chosen": -161.74093627929688,
"logps/rejected": -186.17892456054688,
"loss": 0.6215,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.2962597608566284,
"rewards/margins": 0.19979730248451233,
"rewards/rejected": -0.49605703353881836,
"step": 850
},
{
"epoch": 0.9715629677143468,
"grad_norm": 91.00056492305369,
"learning_rate": 1.1402610782215733e-07,
"logits/chosen": -1.338948369026184,
"logits/rejected": -1.4312294721603394,
"logps/chosen": -184.9549560546875,
"logps/rejected": -219.67489624023438,
"loss": 0.6323,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.3477857708930969,
"rewards/margins": 0.11345378309488297,
"rewards/rejected": -0.46123960614204407,
"step": 852
},
{
"epoch": 0.9738436319578078,
"grad_norm": 83.43376640938686,
"learning_rate": 1.1364942899946299e-07,
"logits/chosen": -1.4227657318115234,
"logits/rejected": -1.4756063222885132,
"logps/chosen": -176.06297302246094,
"logps/rejected": -190.72410583496094,
"loss": 0.6332,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.2725405991077423,
"rewards/margins": 0.23061612248420715,
"rewards/rejected": -0.5031567215919495,
"step": 854
},
{
"epoch": 0.9761242962012686,
"grad_norm": 80.98108181650579,
"learning_rate": 1.132725527283722e-07,
"logits/chosen": -1.390743613243103,
"logits/rejected": -1.4082766771316528,
"logps/chosen": -148.54055786132812,
"logps/rejected": -182.93699645996094,
"loss": 0.6133,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.07731722295284271,
"rewards/margins": 0.25885114073753357,
"rewards/rejected": -0.3361683785915375,
"step": 856
},
{
"epoch": 0.9784049604447296,
"grad_norm": 84.02405944942352,
"learning_rate": 1.1289548446065993e-07,
"logits/chosen": -1.3017364740371704,
"logits/rejected": -1.3097370862960815,
"logps/chosen": -162.28753662109375,
"logps/rejected": -187.27439880371094,
"loss": 0.6506,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.13367247581481934,
"rewards/margins": 0.2926619052886963,
"rewards/rejected": -0.426334410905838,
"step": 858
},
{
"epoch": 0.9806856246881904,
"grad_norm": 92.76881750126093,
"learning_rate": 1.1251822965087854e-07,
"logits/chosen": -1.2643218040466309,
"logits/rejected": -1.4101805686950684,
"logps/chosen": -162.29969787597656,
"logps/rejected": -204.03713989257812,
"loss": 0.6025,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.28711238503456116,
"rewards/margins": 0.36070847511291504,
"rewards/rejected": -0.6478208899497986,
"step": 860
},
{
"epoch": 0.9829662889316514,
"grad_norm": 118.85388390167415,
"learning_rate": 1.1214079375627883e-07,
"logits/chosen": -1.4487006664276123,
"logits/rejected": -1.474176049232483,
"logps/chosen": -271.73126220703125,
"logps/rejected": -296.800537109375,
"loss": 0.6742,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.5793529152870178,
"rewards/margins": 0.24643933773040771,
"rewards/rejected": -0.8257922530174255,
"step": 862
},
{
"epoch": 0.9852469531751122,
"grad_norm": 86.67252914799577,
"learning_rate": 1.1176318223673105e-07,
"logits/chosen": -1.4337862730026245,
"logits/rejected": -1.4527971744537354,
"logps/chosen": -200.96485900878906,
"logps/rejected": -221.7783660888672,
"loss": 0.6498,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.2869877815246582,
"rewards/margins": 0.2341936230659485,
"rewards/rejected": -0.5211814045906067,
"step": 864
},
{
"epoch": 0.9875276174185732,
"grad_norm": 113.98036843821767,
"learning_rate": 1.1138540055464609e-07,
"logits/chosen": -1.3671294450759888,
"logits/rejected": -1.368064522743225,
"logps/chosen": -163.43055725097656,
"logps/rejected": -175.2020721435547,
"loss": 0.5793,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.022854965180158615,
"rewards/margins": 0.4233308434486389,
"rewards/rejected": -0.44618579745292664,
"step": 866
},
{
"epoch": 0.989808281662034,
"grad_norm": 80.29058110520482,
"learning_rate": 1.110074541748963e-07,
"logits/chosen": -1.346540093421936,
"logits/rejected": -1.3472204208374023,
"logps/chosen": -186.08084106445312,
"logps/rejected": -207.60470581054688,
"loss": 0.6321,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.25492382049560547,
"rewards/margins": 0.35832715034484863,
"rewards/rejected": -0.6132509708404541,
"step": 868
},
{
"epoch": 0.992088945905495,
"grad_norm": 90.95115779695014,
"learning_rate": 1.1062934856473653e-07,
"logits/chosen": -1.4387091398239136,
"logits/rejected": -1.455345630645752,
"logps/chosen": -98.49878692626953,
"logps/rejected": -172.24810791015625,
"loss": 0.6444,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.03821319341659546,
"rewards/margins": 0.8005204796791077,
"rewards/rejected": -0.8387336730957031,
"step": 870
},
{
"epoch": 0.9943696101489559,
"grad_norm": 100.52505053286734,
"learning_rate": 1.1025108919372499e-07,
"logits/chosen": -1.3714473247528076,
"logits/rejected": -1.3371658325195312,
"logps/chosen": -167.58447265625,
"logps/rejected": -186.04751586914062,
"loss": 0.6675,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.16858822107315063,
"rewards/margins": 0.19196297228336334,
"rewards/rejected": -0.36055123805999756,
"step": 872
},
{
"epoch": 0.9966502743924168,
"grad_norm": 110.94600706114623,
"learning_rate": 1.0987268153364411e-07,
"logits/chosen": -1.3899271488189697,
"logits/rejected": -1.3658580780029297,
"logps/chosen": -130.26220703125,
"logps/rejected": -134.71063232421875,
"loss": 0.66,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.10100418329238892,
"rewards/margins": 0.2500309348106384,
"rewards/rejected": -0.35103511810302734,
"step": 874
},
{
"epoch": 0.9989309386358777,
"grad_norm": 105.88720383741304,
"learning_rate": 1.0949413105842146e-07,
"logits/chosen": -1.4607656002044678,
"logits/rejected": -1.5963387489318848,
"logps/chosen": -196.40066528320312,
"logps/rejected": -225.38638305664062,
"loss": 0.6348,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.15517286956310272,
"rewards/margins": 0.15176478028297424,
"rewards/rejected": -0.30693763494491577,
"step": 876
},
{
"epoch": 1.0012116028793385,
"grad_norm": 56.3694410581267,
"learning_rate": 1.091154432440506e-07,
"logits/chosen": -1.389705777168274,
"logits/rejected": -1.4425851106643677,
"logps/chosen": -184.08709716796875,
"logps/rejected": -215.4456024169922,
"loss": 0.5323,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.004449841566383839,
"rewards/margins": 0.635086178779602,
"rewards/rejected": -0.6306363344192505,
"step": 878
},
{
"epoch": 1.0034922671227995,
"grad_norm": 59.14871364307595,
"learning_rate": 1.0873662356851164e-07,
"logits/chosen": -1.2264068126678467,
"logits/rejected": -1.2039250135421753,
"logps/chosen": -143.2238311767578,
"logps/rejected": -148.1532745361328,
"loss": 0.4487,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.14279066026210785,
"rewards/margins": 0.6712049841880798,
"rewards/rejected": -0.528414249420166,
"step": 880
},
{
"epoch": 1.0057729313662604,
"grad_norm": 56.08399590075636,
"learning_rate": 1.0835767751169225e-07,
"logits/chosen": -1.3238468170166016,
"logits/rejected": -1.4208531379699707,
"logps/chosen": -172.80247497558594,
"logps/rejected": -221.4132843017578,
"loss": 0.4898,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.04028752073645592,
"rewards/margins": 0.7049790024757385,
"rewards/rejected": -0.7452664375305176,
"step": 882
},
{
"epoch": 1.0080535956097214,
"grad_norm": 52.855939708340124,
"learning_rate": 1.0797861055530831e-07,
"logits/chosen": -1.3633054494857788,
"logits/rejected": -1.3413807153701782,
"logps/chosen": -146.97352600097656,
"logps/rejected": -183.86334228515625,
"loss": 0.4712,
"rewards/accuracies": 0.875,
"rewards/chosen": -0.06403888016939163,
"rewards/margins": 0.8986248970031738,
"rewards/rejected": -0.9626636505126953,
"step": 884
},
{
"epoch": 1.0103342598531821,
"grad_norm": 66.20782299721449,
"learning_rate": 1.0759942818282453e-07,
"logits/chosen": -1.2454776763916016,
"logits/rejected": -1.3409074544906616,
"logps/chosen": -165.26669311523438,
"logps/rejected": -199.02630615234375,
"loss": 0.4379,
"rewards/accuracies": 0.90625,
"rewards/chosen": -0.18418747186660767,
"rewards/margins": 0.8542241454124451,
"rewards/rejected": -1.0384116172790527,
"step": 886
},
{
"epoch": 1.012614924096643,
"grad_norm": 61.538116919835716,
"learning_rate": 1.0722013587937526e-07,
"logits/chosen": -1.345977783203125,
"logits/rejected": -1.3574477434158325,
"logps/chosen": -140.93460083007812,
"logps/rejected": -170.11268615722656,
"loss": 0.4964,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.001187225803732872,
"rewards/margins": 0.6773791313171387,
"rewards/rejected": -0.6761919260025024,
"step": 888
},
{
"epoch": 1.014895588340104,
"grad_norm": 66.18054031071868,
"learning_rate": 1.0684073913168501e-07,
"logits/chosen": -1.506268858909607,
"logits/rejected": -1.5550968647003174,
"logps/chosen": -227.16659545898438,
"logps/rejected": -231.74356079101562,
"loss": 0.4649,
"rewards/accuracies": 0.875,
"rewards/chosen": -0.04133976250886917,
"rewards/margins": 0.7655045986175537,
"rewards/rejected": -0.8068442940711975,
"step": 890
},
{
"epoch": 1.017176252583565,
"grad_norm": 63.53563886574213,
"learning_rate": 1.0646124342798919e-07,
"logits/chosen": -1.2807819843292236,
"logits/rejected": -1.3626489639282227,
"logps/chosen": -149.43206787109375,
"logps/rejected": -194.97207641601562,
"loss": 0.4679,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.026193831115961075,
"rewards/margins": 0.7646716237068176,
"rewards/rejected": -0.7384778261184692,
"step": 892
},
{
"epoch": 1.0194569168270258,
"grad_norm": 63.988279055249954,
"learning_rate": 1.0608165425795468e-07,
"logits/chosen": -1.5072122812271118,
"logits/rejected": -1.5509088039398193,
"logps/chosen": -185.84625244140625,
"logps/rejected": -213.9545440673828,
"loss": 0.5205,
"rewards/accuracies": 0.84375,
"rewards/chosen": -0.03688034415245056,
"rewards/margins": 0.8168615102767944,
"rewards/rejected": -0.8537418842315674,
"step": 894
},
{
"epoch": 1.0217375810704867,
"grad_norm": 58.69101964281229,
"learning_rate": 1.0570197711260038e-07,
"logits/chosen": -1.388396978378296,
"logits/rejected": -1.3820849657058716,
"logps/chosen": -127.9742431640625,
"logps/rejected": -137.78054809570312,
"loss": 0.4494,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.10387776046991348,
"rewards/margins": 0.5781891942024231,
"rewards/rejected": -0.47431135177612305,
"step": 896
},
{
"epoch": 1.0240182453139477,
"grad_norm": 74.70102903392454,
"learning_rate": 1.0532221748421786e-07,
"logits/chosen": -1.3617676496505737,
"logits/rejected": -1.4110198020935059,
"logps/chosen": -160.6496124267578,
"logps/rejected": -182.05860900878906,
"loss": 0.4881,
"rewards/accuracies": 0.78125,
"rewards/chosen": -0.21052153408527374,
"rewards/margins": 0.5095985531806946,
"rewards/rejected": -0.7201201319694519,
"step": 898
},
{
"epoch": 1.0262989095574087,
"grad_norm": 61.26905190831476,
"learning_rate": 1.0494238086629183e-07,
"logits/chosen": -1.4342904090881348,
"logits/rejected": -1.4664863348007202,
"logps/chosen": -173.26043701171875,
"logps/rejected": -202.07130432128906,
"loss": 0.4934,
"rewards/accuracies": 0.78125,
"rewards/chosen": -0.21440905332565308,
"rewards/margins": 0.7449226379394531,
"rewards/rejected": -0.9593316316604614,
"step": 900
},
{
"epoch": 1.0262989095574087,
"eval_logits/chosen": -1.4491240978240967,
"eval_logits/rejected": -1.4348580837249756,
"eval_logps/chosen": -129.59962463378906,
"eval_logps/rejected": -134.80783081054688,
"eval_loss": 0.5797997713088989,
"eval_rewards/accuracies": 0.7200000286102295,
"eval_rewards/chosen": 0.029834765940904617,
"eval_rewards/margins": 0.3043439984321594,
"eval_rewards/rejected": -0.2745092511177063,
"eval_runtime": 20.081,
"eval_samples_per_second": 4.98,
"eval_steps_per_second": 1.245,
"step": 900
}
],
"logging_steps": 2,
"max_steps": 1752,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 300,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}