zephyr-7b-MI1-SELM3 / trainer_state.json
Teng Xiao
TX
a27e1ce
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.998691442030882,
"eval_steps": 500,
"global_step": 477,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.010468463752944255,
"grad_norm": 15.176345540381961,
"learning_rate": 5.208333333333333e-08,
"logits/chosen": -2.7708451747894287,
"logits/rejected": -2.7284560203552246,
"logps/chosen": -1.0280715227127075,
"logps/rejected": -1.1731975078582764,
"loss": 2.0284,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": -1.0280715227127075,
"rewards/margins": 0.14512601494789124,
"rewards/rejected": -1.1731975078582764,
"step": 5
},
{
"epoch": 0.02093692750588851,
"grad_norm": 13.877817908572124,
"learning_rate": 1.0416666666666667e-07,
"logits/chosen": -2.7508482933044434,
"logits/rejected": -2.725215196609497,
"logps/chosen": -0.9954730868339539,
"logps/rejected": -1.071991205215454,
"loss": 2.002,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.9954730868339539,
"rewards/margins": 0.0765179991722107,
"rewards/rejected": -1.071991205215454,
"step": 10
},
{
"epoch": 0.031405391258832765,
"grad_norm": 13.915470437591527,
"learning_rate": 1.5624999999999999e-07,
"logits/chosen": -2.730905294418335,
"logits/rejected": -2.697235584259033,
"logps/chosen": -0.9896604418754578,
"logps/rejected": -1.1480659246444702,
"loss": 1.9874,
"rewards/accuracies": 0.643750011920929,
"rewards/chosen": -0.9896604418754578,
"rewards/margins": 0.15840543806552887,
"rewards/rejected": -1.1480659246444702,
"step": 15
},
{
"epoch": 0.04187385501177702,
"grad_norm": 13.632313250241245,
"learning_rate": 2.0833333333333333e-07,
"logits/chosen": -2.8031985759735107,
"logits/rejected": -2.7098159790039062,
"logps/chosen": -1.022659420967102,
"logps/rejected": -1.1234548091888428,
"loss": 2.0265,
"rewards/accuracies": 0.550000011920929,
"rewards/chosen": -1.022659420967102,
"rewards/margins": 0.10079552233219147,
"rewards/rejected": -1.1234548091888428,
"step": 20
},
{
"epoch": 0.05234231876472128,
"grad_norm": 14.107067164134795,
"learning_rate": 2.604166666666667e-07,
"logits/chosen": -2.799668073654175,
"logits/rejected": -2.7763009071350098,
"logps/chosen": -1.0291378498077393,
"logps/rejected": -1.086358904838562,
"loss": 2.0092,
"rewards/accuracies": 0.512499988079071,
"rewards/chosen": -1.0291378498077393,
"rewards/margins": 0.0572209358215332,
"rewards/rejected": -1.086358904838562,
"step": 25
},
{
"epoch": 0.06281078251766553,
"grad_norm": 15.048800842708138,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": -2.7735042572021484,
"logits/rejected": -2.731670379638672,
"logps/chosen": -0.9846929311752319,
"logps/rejected": -1.0904910564422607,
"loss": 2.0118,
"rewards/accuracies": 0.5375000238418579,
"rewards/chosen": -0.9846929311752319,
"rewards/margins": 0.10579802095890045,
"rewards/rejected": -1.0904910564422607,
"step": 30
},
{
"epoch": 0.07327924627060979,
"grad_norm": 14.073259274006203,
"learning_rate": 3.645833333333333e-07,
"logits/chosen": -2.777062177658081,
"logits/rejected": -2.690617084503174,
"logps/chosen": -1.0319979190826416,
"logps/rejected": -1.1718026399612427,
"loss": 1.9903,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": -1.0319979190826416,
"rewards/margins": 0.13980472087860107,
"rewards/rejected": -1.1718026399612427,
"step": 35
},
{
"epoch": 0.08374771002355404,
"grad_norm": 19.704149826171133,
"learning_rate": 4.1666666666666667e-07,
"logits/chosen": -2.7942392826080322,
"logits/rejected": -2.697408437728882,
"logps/chosen": -1.011974573135376,
"logps/rejected": -1.2997562885284424,
"loss": 1.9462,
"rewards/accuracies": 0.668749988079071,
"rewards/chosen": -1.011974573135376,
"rewards/margins": 0.28778162598609924,
"rewards/rejected": -1.2997562885284424,
"step": 40
},
{
"epoch": 0.0942161737764983,
"grad_norm": 15.255628868525834,
"learning_rate": 4.6874999999999996e-07,
"logits/chosen": -2.805002450942993,
"logits/rejected": -2.7587850093841553,
"logps/chosen": -0.963559627532959,
"logps/rejected": -1.1423972845077515,
"loss": 1.9511,
"rewards/accuracies": 0.550000011920929,
"rewards/chosen": -0.963559627532959,
"rewards/margins": 0.17883776128292084,
"rewards/rejected": -1.1423972845077515,
"step": 45
},
{
"epoch": 0.10468463752944256,
"grad_norm": 20.885465849075064,
"learning_rate": 4.999731868769026e-07,
"logits/chosen": -2.761730194091797,
"logits/rejected": -2.693643093109131,
"logps/chosen": -1.0126588344573975,
"logps/rejected": -1.3037517070770264,
"loss": 1.9653,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": -1.0126588344573975,
"rewards/margins": 0.2910929024219513,
"rewards/rejected": -1.3037517070770264,
"step": 50
},
{
"epoch": 0.11515310128238682,
"grad_norm": 16.254626281156803,
"learning_rate": 4.996716052911017e-07,
"logits/chosen": -2.7590675354003906,
"logits/rejected": -2.723374843597412,
"logps/chosen": -1.0168770551681519,
"logps/rejected": -1.268347978591919,
"loss": 1.9612,
"rewards/accuracies": 0.5375000238418579,
"rewards/chosen": -1.0168770551681519,
"rewards/margins": 0.25147074460983276,
"rewards/rejected": -1.268347978591919,
"step": 55
},
{
"epoch": 0.12562156503533106,
"grad_norm": 16.222896740457696,
"learning_rate": 4.990353313429303e-07,
"logits/chosen": -2.809765338897705,
"logits/rejected": -2.7633471488952637,
"logps/chosen": -1.0493232011795044,
"logps/rejected": -1.2437658309936523,
"loss": 1.9157,
"rewards/accuracies": 0.6187499761581421,
"rewards/chosen": -1.0493232011795044,
"rewards/margins": 0.1944427341222763,
"rewards/rejected": -1.2437658309936523,
"step": 60
},
{
"epoch": 0.1360900287882753,
"grad_norm": 15.145405551888192,
"learning_rate": 4.980652179769217e-07,
"logits/chosen": -2.8015551567077637,
"logits/rejected": -2.6827645301818848,
"logps/chosen": -1.0214020013809204,
"logps/rejected": -1.5461061000823975,
"loss": 1.9028,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": -1.0214020013809204,
"rewards/margins": 0.524704098701477,
"rewards/rejected": -1.5461061000823975,
"step": 65
},
{
"epoch": 0.14655849254121958,
"grad_norm": 20.7647978251983,
"learning_rate": 4.967625656594781e-07,
"logits/chosen": -2.6961591243743896,
"logits/rejected": -2.6422698497772217,
"logps/chosen": -1.0481222867965698,
"logps/rejected": -1.3449742794036865,
"loss": 1.9095,
"rewards/accuracies": 0.5874999761581421,
"rewards/chosen": -1.0481222867965698,
"rewards/margins": 0.29685190320014954,
"rewards/rejected": -1.3449742794036865,
"step": 70
},
{
"epoch": 0.15702695629416383,
"grad_norm": 29.67977958863066,
"learning_rate": 4.951291206355559e-07,
"logits/chosen": -2.7507481575012207,
"logits/rejected": -2.6747162342071533,
"logps/chosen": -0.9930871725082397,
"logps/rejected": -1.354509711265564,
"loss": 1.8891,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": -0.9930871725082397,
"rewards/margins": 0.36142244935035706,
"rewards/rejected": -1.354509711265564,
"step": 75
},
{
"epoch": 0.16749542004710807,
"grad_norm": 22.015004964913427,
"learning_rate": 4.93167072587771e-07,
"logits/chosen": -2.709141254425049,
"logits/rejected": -2.6435017585754395,
"logps/chosen": -0.9975327253341675,
"logps/rejected": -1.4075746536254883,
"loss": 1.9339,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -0.9975327253341675,
"rewards/margins": 0.4100419878959656,
"rewards/rejected": -1.4075746536254883,
"step": 80
},
{
"epoch": 0.17796388380005235,
"grad_norm": 22.42041956383337,
"learning_rate": 4.908790517010636e-07,
"logits/chosen": -2.7113754749298096,
"logits/rejected": -2.6602649688720703,
"logps/chosen": -0.9959508776664734,
"logps/rejected": -1.3768701553344727,
"loss": 1.9555,
"rewards/accuracies": 0.6187499761581421,
"rewards/chosen": -0.9959508776664734,
"rewards/margins": 0.3809191882610321,
"rewards/rejected": -1.3768701553344727,
"step": 85
},
{
"epoch": 0.1884323475529966,
"grad_norm": 21.065770503793715,
"learning_rate": 4.882681251368548e-07,
"logits/chosen": -2.689040184020996,
"logits/rejected": -2.6611132621765137,
"logps/chosen": -1.0262080430984497,
"logps/rejected": -1.4091228246688843,
"loss": 1.8793,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -1.0262080430984497,
"rewards/margins": 0.38291481137275696,
"rewards/rejected": -1.4091228246688843,
"step": 90
},
{
"epoch": 0.19890081130594087,
"grad_norm": 31.8448042024589,
"learning_rate": 4.853377929214243e-07,
"logits/chosen": -2.7642765045166016,
"logits/rejected": -2.6598925590515137,
"logps/chosen": -1.0126625299453735,
"logps/rejected": -1.3561102151870728,
"loss": 1.9092,
"rewards/accuracies": 0.59375,
"rewards/chosen": -1.0126625299453735,
"rewards/margins": 0.34344789385795593,
"rewards/rejected": -1.3561102151870728,
"step": 95
},
{
"epoch": 0.2093692750588851,
"grad_norm": 25.05097837239479,
"learning_rate": 4.820919832540181e-07,
"logits/chosen": -2.6988279819488525,
"logits/rejected": -2.597813844680786,
"logps/chosen": -1.0824997425079346,
"logps/rejected": -1.5376174449920654,
"loss": 1.9333,
"rewards/accuracies": 0.5874999761581421,
"rewards/chosen": -1.0824997425079346,
"rewards/margins": 0.45511776208877563,
"rewards/rejected": -1.5376174449920654,
"step": 100
},
{
"epoch": 0.21983773881182936,
"grad_norm": 26.762526766885564,
"learning_rate": 4.785350472409791e-07,
"logits/chosen": -2.709327220916748,
"logits/rejected": -2.6909279823303223,
"logps/chosen": -0.9367496371269226,
"logps/rejected": -1.3381364345550537,
"loss": 1.8988,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -0.9367496371269226,
"rewards/margins": 0.40138688683509827,
"rewards/rejected": -1.3381364345550537,
"step": 105
},
{
"epoch": 0.23030620256477363,
"grad_norm": 24.759397812808643,
"learning_rate": 4.7467175306295647e-07,
"logits/chosen": -2.785827398300171,
"logits/rejected": -2.7306325435638428,
"logps/chosen": -1.0359995365142822,
"logps/rejected": -1.4377586841583252,
"loss": 1.8787,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": -1.0359995365142822,
"rewards/margins": 0.40175899863243103,
"rewards/rejected": -1.4377586841583252,
"step": 110
},
{
"epoch": 0.24077466631771788,
"grad_norm": 22.275954156633127,
"learning_rate": 4.70507279583015e-07,
"logits/chosen": -2.734710693359375,
"logits/rejected": -2.67667818069458,
"logps/chosen": -1.0577763319015503,
"logps/rejected": -1.6186084747314453,
"loss": 1.875,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -1.0577763319015503,
"rewards/margins": 0.560832142829895,
"rewards/rejected": -1.6186084747314453,
"step": 115
},
{
"epoch": 0.2512431300706621,
"grad_norm": 31.253706152387053,
"learning_rate": 4.6604720940421207e-07,
"logits/chosen": -2.747194766998291,
"logits/rejected": -2.7159297466278076,
"logps/chosen": -1.061531662940979,
"logps/rejected": -1.4542839527130127,
"loss": 1.8991,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": -1.061531662940979,
"rewards/margins": 0.39275234937667847,
"rewards/rejected": -1.4542839527130127,
"step": 120
},
{
"epoch": 0.26171159382360637,
"grad_norm": 24.012313672135768,
"learning_rate": 4.612975213859487e-07,
"logits/chosen": -2.809326648712158,
"logits/rejected": -2.786160707473755,
"logps/chosen": -1.0037329196929932,
"logps/rejected": -1.2904164791107178,
"loss": 1.9171,
"rewards/accuracies": 0.59375,
"rewards/chosen": -1.0037329196929932,
"rewards/margins": 0.28668367862701416,
"rewards/rejected": -1.2904164791107178,
"step": 125
},
{
"epoch": 0.2721800575765506,
"grad_norm": 27.105267597546742,
"learning_rate": 4.5626458262912735e-07,
"logits/chosen": -2.7714219093322754,
"logits/rejected": -2.7451789379119873,
"logps/chosen": -1.0449589490890503,
"logps/rejected": -1.2745070457458496,
"loss": 1.9054,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": -1.0449589490890503,
"rewards/margins": 0.22954817116260529,
"rewards/rejected": -1.2745070457458496,
"step": 130
},
{
"epoch": 0.2826485213294949,
"grad_norm": 34.82290559167317,
"learning_rate": 4.5095513994085974e-07,
"logits/chosen": -2.783876419067383,
"logits/rejected": -2.67991304397583,
"logps/chosen": -1.1126677989959717,
"logps/rejected": -1.8345043659210205,
"loss": 1.8811,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -1.1126677989959717,
"rewards/margins": 0.7218365669250488,
"rewards/rejected": -1.8345043659210205,
"step": 135
},
{
"epoch": 0.29311698508243916,
"grad_norm": 25.33210409531245,
"learning_rate": 4.453763107901675e-07,
"logits/chosen": -2.8026270866394043,
"logits/rejected": -2.7463619709014893,
"logps/chosen": -1.0125950574874878,
"logps/rejected": -1.5290297269821167,
"loss": 1.815,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -1.0125950574874878,
"rewards/margins": 0.5164345502853394,
"rewards/rejected": -1.5290297269821167,
"step": 140
},
{
"epoch": 0.3035854488353834,
"grad_norm": 25.346225453192677,
"learning_rate": 4.395355737667985e-07,
"logits/chosen": -2.775475025177002,
"logits/rejected": -2.719919204711914,
"logps/chosen": -0.9933168292045593,
"logps/rejected": -1.3926680088043213,
"loss": 1.8662,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.9933168292045593,
"rewards/margins": 0.3993511199951172,
"rewards/rejected": -1.3926680088043213,
"step": 145
},
{
"epoch": 0.31405391258832765,
"grad_norm": 25.05252990164211,
"learning_rate": 4.3344075855595097e-07,
"logits/chosen": -2.8103270530700684,
"logits/rejected": -2.7926547527313232,
"logps/chosen": -0.9610552787780762,
"logps/rejected": -1.3762198686599731,
"loss": 1.8355,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.9610552787780762,
"rewards/margins": 0.4151645302772522,
"rewards/rejected": -1.3762198686599731,
"step": 150
},
{
"epoch": 0.3245223763412719,
"grad_norm": 27.37807184294238,
"learning_rate": 4.271000354423425e-07,
"logits/chosen": -2.7627320289611816,
"logits/rejected": -2.735795497894287,
"logps/chosen": -1.078357458114624,
"logps/rejected": -1.435006856918335,
"loss": 1.8878,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": -1.078357458114624,
"rewards/margins": 0.3566494882106781,
"rewards/rejected": -1.435006856918335,
"step": 155
},
{
"epoch": 0.33499084009421615,
"grad_norm": 31.453776336135004,
"learning_rate": 4.2052190435769554e-07,
"logits/chosen": -2.744175434112549,
"logits/rejected": -2.718656063079834,
"logps/chosen": -1.1089026927947998,
"logps/rejected": -1.5105451345443726,
"loss": 1.893,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": -1.1089026927947998,
"rewards/margins": 0.4016422629356384,
"rewards/rejected": -1.5105451345443726,
"step": 160
},
{
"epoch": 0.34545930384716045,
"grad_norm": 25.427022864790626,
"learning_rate": 4.137151834863213e-07,
"logits/chosen": -2.7565693855285645,
"logits/rejected": -2.6773271560668945,
"logps/chosen": -0.9760361909866333,
"logps/rejected": -1.4186967611312866,
"loss": 1.8368,
"rewards/accuracies": 0.6187499761581421,
"rewards/chosen": -0.9760361909866333,
"rewards/margins": 0.4426605701446533,
"rewards/rejected": -1.4186967611312866,
"step": 165
},
{
"epoch": 0.3559277676001047,
"grad_norm": 25.0313872723157,
"learning_rate": 4.0668899744407567e-07,
"logits/chosen": -2.7782142162323,
"logits/rejected": -2.706545829772949,
"logps/chosen": -1.0534151792526245,
"logps/rejected": -1.450411081314087,
"loss": 1.8695,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": -1.0534151792526245,
"rewards/margins": 0.39699599146842957,
"rewards/rejected": -1.450411081314087,
"step": 170
},
{
"epoch": 0.36639623135304894,
"grad_norm": 22.99470630834903,
"learning_rate": 3.994527650465352e-07,
"logits/chosen": -2.7585906982421875,
"logits/rejected": -2.6691181659698486,
"logps/chosen": -1.0702944993972778,
"logps/rejected": -1.4621968269348145,
"loss": 1.8958,
"rewards/accuracies": 0.6187499761581421,
"rewards/chosen": -1.0702944993972778,
"rewards/margins": 0.39190229773521423,
"rewards/rejected": -1.4621968269348145,
"step": 175
},
{
"epoch": 0.3768646951059932,
"grad_norm": 25.69330171727391,
"learning_rate": 3.920161866827889e-07,
"logits/chosen": -2.740964889526367,
"logits/rejected": -2.7247314453125,
"logps/chosen": -1.0360349416732788,
"logps/rejected": -1.5588700771331787,
"loss": 1.8488,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -1.0360349416732788,
"rewards/margins": 0.5228349566459656,
"rewards/rejected": -1.5588700771331787,
"step": 180
},
{
"epoch": 0.38733315885893743,
"grad_norm": 26.206907546340837,
"learning_rate": 3.8438923131177237e-07,
"logits/chosen": -2.7401633262634277,
"logits/rejected": -2.637202501296997,
"logps/chosen": -1.0996856689453125,
"logps/rejected": -1.7466999292373657,
"loss": 1.8192,
"rewards/accuracies": 0.643750011920929,
"rewards/chosen": -1.0996856689453125,
"rewards/margins": 0.6470142602920532,
"rewards/rejected": -1.7466999292373657,
"step": 185
},
{
"epoch": 0.39780162261188173,
"grad_norm": 27.521588311939052,
"learning_rate": 3.765821230985757e-07,
"logits/chosen": -2.743375301361084,
"logits/rejected": -2.69993257522583,
"logps/chosen": -1.086106300354004,
"logps/rejected": -1.5462232828140259,
"loss": 1.9254,
"rewards/accuracies": 0.643750011920929,
"rewards/chosen": -1.086106300354004,
"rewards/margins": 0.4601168632507324,
"rewards/rejected": -1.5462232828140259,
"step": 190
},
{
"epoch": 0.408270086364826,
"grad_norm": 27.446157471379898,
"learning_rate": 3.6860532770864005e-07,
"logits/chosen": -2.684860944747925,
"logits/rejected": -2.650524616241455,
"logps/chosen": -1.0963340997695923,
"logps/rejected": -1.4702621698379517,
"loss": 1.8975,
"rewards/accuracies": 0.65625,
"rewards/chosen": -1.0963340997695923,
"rewards/margins": 0.3739281892776489,
"rewards/rejected": -1.4702621698379517,
"step": 195
},
{
"epoch": 0.4187385501177702,
"grad_norm": 24.602486055800032,
"learning_rate": 3.604695382782159e-07,
"logits/chosen": -2.7188162803649902,
"logits/rejected": -2.712054967880249,
"logps/chosen": -1.0557594299316406,
"logps/rejected": -1.4215106964111328,
"loss": 1.8755,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": -1.0557594299316406,
"rewards/margins": 0.36575111746788025,
"rewards/rejected": -1.4215106964111328,
"step": 200
},
{
"epoch": 0.42920701387071447,
"grad_norm": 27.200176778512795,
"learning_rate": 3.5218566107988867e-07,
"logits/chosen": -2.6477105617523193,
"logits/rejected": -2.5724520683288574,
"logps/chosen": -0.9946632385253906,
"logps/rejected": -1.4712902307510376,
"loss": 1.8403,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.9946632385253906,
"rewards/margins": 0.4766268730163574,
"rewards/rejected": -1.4712902307510376,
"step": 205
},
{
"epoch": 0.4396754776236587,
"grad_norm": 36.0559472403249,
"learning_rate": 3.4376480090239047e-07,
"logits/chosen": -2.657755136489868,
"logits/rejected": -2.61205792427063,
"logps/chosen": -1.0976860523223877,
"logps/rejected": -1.4535160064697266,
"loss": 1.8803,
"rewards/accuracies": 0.606249988079071,
"rewards/chosen": -1.0976860523223877,
"rewards/margins": 0.35582998394966125,
"rewards/rejected": -1.4535160064697266,
"step": 210
},
{
"epoch": 0.45014394137660296,
"grad_norm": 22.419086184833272,
"learning_rate": 3.3521824616429284e-07,
"logits/chosen": -2.690389633178711,
"logits/rejected": -2.669494152069092,
"logps/chosen": -0.9983667135238647,
"logps/rejected": -1.5212194919586182,
"loss": 1.8208,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -0.9983667135238647,
"rewards/margins": 0.5228530168533325,
"rewards/rejected": -1.5212194919586182,
"step": 215
},
{
"epoch": 0.46061240512954726,
"grad_norm": 30.606607589341355,
"learning_rate": 3.265574537815398e-07,
"logits/chosen": -2.7263293266296387,
"logits/rejected": -2.649702787399292,
"logps/chosen": -1.0452382564544678,
"logps/rejected": -1.5576632022857666,
"loss": 1.8431,
"rewards/accuracies": 0.65625,
"rewards/chosen": -1.0452382564544678,
"rewards/margins": 0.5124249458312988,
"rewards/rejected": -1.5576632022857666,
"step": 220
},
{
"epoch": 0.4710808688824915,
"grad_norm": 27.672871069441197,
"learning_rate": 3.1779403380910425e-07,
"logits/chosen": -2.7160162925720215,
"logits/rejected": -2.6121773719787598,
"logps/chosen": -1.0075920820236206,
"logps/rejected": -1.576601505279541,
"loss": 1.8271,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": -1.0075920820236206,
"rewards/margins": 0.5690094232559204,
"rewards/rejected": -1.576601505279541,
"step": 225
},
{
"epoch": 0.48154933263543576,
"grad_norm": 25.08698759897845,
"learning_rate": 3.0893973387735683e-07,
"logits/chosen": -2.7268662452697754,
"logits/rejected": -2.6340160369873047,
"logps/chosen": -1.0295268297195435,
"logps/rejected": -1.7138006687164307,
"loss": 1.8333,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": -1.0295268297195435,
"rewards/margins": 0.6842738389968872,
"rewards/rejected": -1.7138006687164307,
"step": 230
},
{
"epoch": 0.49201779638838,
"grad_norm": 24.9727125758065,
"learning_rate": 3.000064234440111e-07,
"logits/chosen": -2.7256393432617188,
"logits/rejected": -2.67500638961792,
"logps/chosen": -1.0034698247909546,
"logps/rejected": -1.5250154733657837,
"loss": 1.8603,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -1.0034698247909546,
"rewards/margins": 0.5215455889701843,
"rewards/rejected": -1.5250154733657837,
"step": 235
},
{
"epoch": 0.5024862601413242,
"grad_norm": 23.709236995530965,
"learning_rate": 2.910060778827554e-07,
"logits/chosen": -2.667459011077881,
"logits/rejected": -2.622892379760742,
"logps/chosen": -0.998893141746521,
"logps/rejected": -1.5178442001342773,
"loss": 1.8707,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -0.998893141746521,
"rewards/margins": 0.5189512372016907,
"rewards/rejected": -1.5178442001342773,
"step": 240
},
{
"epoch": 0.5129547238942685,
"grad_norm": 23.55356072022147,
"learning_rate": 2.8195076242990116e-07,
"logits/chosen": -2.6935830116271973,
"logits/rejected": -2.5973896980285645,
"logps/chosen": -1.1026113033294678,
"logps/rejected": -1.5293716192245483,
"loss": 1.8533,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": -1.1026113033294678,
"rewards/margins": 0.4267602562904358,
"rewards/rejected": -1.5293716192245483,
"step": 245
},
{
"epoch": 0.5234231876472127,
"grad_norm": 33.32048093164131,
"learning_rate": 2.7285261601056697e-07,
"logits/chosen": -2.5986247062683105,
"logits/rejected": -2.545224905014038,
"logps/chosen": -1.019071340560913,
"logps/rejected": -1.5590027570724487,
"loss": 1.8581,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -1.019071340560913,
"rewards/margins": 0.5399314761161804,
"rewards/rejected": -1.5590027570724487,
"step": 250
},
{
"epoch": 0.533891651400157,
"grad_norm": 24.969288713955503,
"learning_rate": 2.6372383496608186e-07,
"logits/chosen": -2.647831678390503,
"logits/rejected": -2.5525565147399902,
"logps/chosen": -1.0960899591445923,
"logps/rejected": -1.7084300518035889,
"loss": 1.7984,
"rewards/accuracies": 0.668749988079071,
"rewards/chosen": -1.0960899591445923,
"rewards/margins": 0.6123402714729309,
"rewards/rejected": -1.7084300518035889,
"step": 255
},
{
"epoch": 0.5443601151531012,
"grad_norm": 24.978593158166646,
"learning_rate": 2.5457665670441937e-07,
"logits/chosen": -2.6099724769592285,
"logits/rejected": -2.503551959991455,
"logps/chosen": -0.94499671459198,
"logps/rejected": -1.6384252309799194,
"loss": 1.8396,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -0.94499671459198,
"rewards/margins": 0.6934284567832947,
"rewards/rejected": -1.6384252309799194,
"step": 260
},
{
"epoch": 0.5548285789060455,
"grad_norm": 27.898305291163492,
"learning_rate": 2.454233432955807e-07,
"logits/chosen": -2.661271095275879,
"logits/rejected": -2.6026952266693115,
"logps/chosen": -1.0296385288238525,
"logps/rejected": -1.382299780845642,
"loss": 1.8101,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -1.0296385288238525,
"rewards/margins": 0.3526611924171448,
"rewards/rejected": -1.382299780845642,
"step": 265
},
{
"epoch": 0.5652970426589898,
"grad_norm": 35.74021390544709,
"learning_rate": 2.3627616503391812e-07,
"logits/chosen": -2.6500651836395264,
"logits/rejected": -2.5953967571258545,
"logps/chosen": -1.1358740329742432,
"logps/rejected": -1.5125305652618408,
"loss": 1.8849,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -1.1358740329742432,
"rewards/margins": 0.376656711101532,
"rewards/rejected": -1.5125305652618408,
"step": 270
},
{
"epoch": 0.575765506411934,
"grad_norm": 25.432184339952187,
"learning_rate": 2.2714738398943308e-07,
"logits/chosen": -2.7251439094543457,
"logits/rejected": -2.6094861030578613,
"logps/chosen": -1.018061637878418,
"logps/rejected": -1.627947449684143,
"loss": 1.8328,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -1.018061637878418,
"rewards/margins": 0.6098858118057251,
"rewards/rejected": -1.627947449684143,
"step": 275
},
{
"epoch": 0.5862339701648783,
"grad_norm": 23.87099932259008,
"learning_rate": 2.1804923757009882e-07,
"logits/chosen": -2.746220827102661,
"logits/rejected": -2.6774742603302,
"logps/chosen": -0.9781770706176758,
"logps/rejected": -1.3979579210281372,
"loss": 1.8368,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -0.9781770706176758,
"rewards/margins": 0.4197807312011719,
"rewards/rejected": -1.3979579210281372,
"step": 280
},
{
"epoch": 0.5967024339178225,
"grad_norm": 26.340595120664386,
"learning_rate": 2.089939221172446e-07,
"logits/chosen": -2.6835036277770996,
"logits/rejected": -2.6886842250823975,
"logps/chosen": -1.0875818729400635,
"logps/rejected": -1.583901047706604,
"loss": 1.8233,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": -1.0875818729400635,
"rewards/margins": 0.49631914496421814,
"rewards/rejected": -1.583901047706604,
"step": 285
},
{
"epoch": 0.6071708976707668,
"grad_norm": 29.695224998335657,
"learning_rate": 1.9999357655598891e-07,
"logits/chosen": -2.7280991077423096,
"logits/rejected": -2.6861159801483154,
"logps/chosen": -1.0748018026351929,
"logps/rejected": -1.6494481563568115,
"loss": 1.8022,
"rewards/accuracies": 0.668749988079071,
"rewards/chosen": -1.0748018026351929,
"rewards/margins": 0.5746463537216187,
"rewards/rejected": -1.6494481563568115,
"step": 290
},
{
"epoch": 0.6176393614237111,
"grad_norm": 29.67617284412597,
"learning_rate": 1.9106026612264315e-07,
"logits/chosen": -2.7601213455200195,
"logits/rejected": -2.6855008602142334,
"logps/chosen": -1.0902409553527832,
"logps/rejected": -1.6281505823135376,
"loss": 1.8578,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": -1.0902409553527832,
"rewards/margins": 0.5379096269607544,
"rewards/rejected": -1.6281505823135376,
"step": 295
},
{
"epoch": 0.6281078251766553,
"grad_norm": 27.162465615999373,
"learning_rate": 1.8220596619089573e-07,
"logits/chosen": -2.66314435005188,
"logits/rejected": -2.6144497394561768,
"logps/chosen": -1.0360552072525024,
"logps/rejected": -1.5152963399887085,
"loss": 1.8344,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": -1.0360552072525024,
"rewards/margins": 0.47924113273620605,
"rewards/rejected": -1.5152963399887085,
"step": 300
},
{
"epoch": 0.6385762889295996,
"grad_norm": 31.001026793963305,
"learning_rate": 1.7344254621846017e-07,
"logits/chosen": -2.6350555419921875,
"logits/rejected": -2.5956974029541016,
"logps/chosen": -1.0529329776763916,
"logps/rejected": -1.6219732761383057,
"loss": 1.8196,
"rewards/accuracies": 0.6875,
"rewards/chosen": -1.0529329776763916,
"rewards/margins": 0.5690402388572693,
"rewards/rejected": -1.6219732761383057,
"step": 305
},
{
"epoch": 0.6490447526825438,
"grad_norm": 27.186267806704134,
"learning_rate": 1.647817538357072e-07,
"logits/chosen": -2.646355390548706,
"logits/rejected": -2.649137258529663,
"logps/chosen": -0.9727001190185547,
"logps/rejected": -1.5282304286956787,
"loss": 1.8154,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -0.9727001190185547,
"rewards/margins": 0.5555303692817688,
"rewards/rejected": -1.5282304286956787,
"step": 310
},
{
"epoch": 0.6595132164354881,
"grad_norm": 33.037660975429695,
"learning_rate": 1.562351990976095e-07,
"logits/chosen": -2.666133403778076,
"logits/rejected": -2.6141715049743652,
"logps/chosen": -0.9768770933151245,
"logps/rejected": -1.6563736200332642,
"loss": 1.819,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -0.9768770933151245,
"rewards/margins": 0.6794965863227844,
"rewards/rejected": -1.6563736200332642,
"step": 315
},
{
"epoch": 0.6699816801884323,
"grad_norm": 37.124618715177,
"learning_rate": 1.478143389201113e-07,
"logits/chosen": -2.6524291038513184,
"logits/rejected": -2.6353344917297363,
"logps/chosen": -1.0379709005355835,
"logps/rejected": -1.5560357570648193,
"loss": 1.7814,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -1.0379709005355835,
"rewards/margins": 0.5180650353431702,
"rewards/rejected": -1.5560357570648193,
"step": 320
},
{
"epoch": 0.6804501439413766,
"grad_norm": 29.546811615709533,
"learning_rate": 1.3953046172178413e-07,
"logits/chosen": -2.6703402996063232,
"logits/rejected": -2.5877861976623535,
"logps/chosen": -1.0295711755752563,
"logps/rejected": -1.6383397579193115,
"loss": 1.7933,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -1.0295711755752563,
"rewards/margins": 0.6087687611579895,
"rewards/rejected": -1.6383397579193115,
"step": 325
},
{
"epoch": 0.6909186076943209,
"grad_norm": 27.130577131118542,
"learning_rate": 1.3139467229135998e-07,
"logits/chosen": -2.6374616622924805,
"logits/rejected": -2.5912885665893555,
"logps/chosen": -1.0082510709762573,
"logps/rejected": -1.3762824535369873,
"loss": 1.8378,
"rewards/accuracies": 0.625,
"rewards/chosen": -1.0082510709762573,
"rewards/margins": 0.36803150177001953,
"rewards/rejected": -1.3762824535369873,
"step": 330
},
{
"epoch": 0.7013870714472651,
"grad_norm": 26.495064774873583,
"learning_rate": 1.2341787690142435e-07,
"logits/chosen": -2.751105308532715,
"logits/rejected": -2.673309564590454,
"logps/chosen": -0.9556055068969727,
"logps/rejected": -1.5157105922698975,
"loss": 1.8293,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -0.9556055068969727,
"rewards/margins": 0.56010502576828,
"rewards/rejected": -1.5157105922698975,
"step": 335
},
{
"epoch": 0.7118555352002094,
"grad_norm": 26.629021066262204,
"learning_rate": 1.1561076868822755e-07,
"logits/chosen": -2.6683545112609863,
"logits/rejected": -2.600292921066284,
"logps/chosen": -1.1952508687973022,
"logps/rejected": -1.6229918003082275,
"loss": 1.8878,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -1.1952508687973022,
"rewards/margins": 0.4277407228946686,
"rewards/rejected": -1.6229918003082275,
"step": 340
},
{
"epoch": 0.7223239989531536,
"grad_norm": 34.902511984640945,
"learning_rate": 1.0798381331721107e-07,
"logits/chosen": -2.702152729034424,
"logits/rejected": -2.656289577484131,
"logps/chosen": -1.0419316291809082,
"logps/rejected": -1.6374496221542358,
"loss": 1.8455,
"rewards/accuracies": 0.625,
"rewards/chosen": -1.0419316291809082,
"rewards/margins": 0.5955179929733276,
"rewards/rejected": -1.6374496221542358,
"step": 345
},
{
"epoch": 0.7327924627060979,
"grad_norm": 30.383520283042905,
"learning_rate": 1.0054723495346482e-07,
"logits/chosen": -2.6637139320373535,
"logits/rejected": -2.640423536300659,
"logps/chosen": -1.0635532140731812,
"logps/rejected": -1.55173659324646,
"loss": 1.8762,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": -1.0635532140731812,
"rewards/margins": 0.4881834387779236,
"rewards/rejected": -1.55173659324646,
"step": 350
},
{
"epoch": 0.7432609264590422,
"grad_norm": 28.202614006747883,
"learning_rate": 9.331100255592436e-08,
"logits/chosen": -2.672135591506958,
"logits/rejected": -2.614642858505249,
"logps/chosen": -0.9285499453544617,
"logps/rejected": -1.6109098196029663,
"loss": 1.7749,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.9285499453544617,
"rewards/margins": 0.6823598146438599,
"rewards/rejected": -1.6109098196029663,
"step": 355
},
{
"epoch": 0.7537293902119864,
"grad_norm": 23.48852239281184,
"learning_rate": 8.628481651367875e-08,
"logits/chosen": -2.701026439666748,
"logits/rejected": -2.645697832107544,
"logps/chosen": -0.9583543539047241,
"logps/rejected": -1.4256336688995361,
"loss": 1.7696,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -0.9583543539047241,
"rewards/margins": 0.46727919578552246,
"rewards/rejected": -1.4256336688995361,
"step": 360
},
{
"epoch": 0.7641978539649307,
"grad_norm": 29.4764609142287,
"learning_rate": 7.947809564230445e-08,
"logits/chosen": -2.6285886764526367,
"logits/rejected": -2.5465035438537598,
"logps/chosen": -1.07431960105896,
"logps/rejected": -1.6673939228057861,
"loss": 1.854,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -1.07431960105896,
"rewards/margins": 0.5930741429328918,
"rewards/rejected": -1.6673939228057861,
"step": 365
},
{
"epoch": 0.7746663177178749,
"grad_norm": 32.53243633682995,
"learning_rate": 7.289996455765748e-08,
"logits/chosen": -2.672539234161377,
"logits/rejected": -2.612757682800293,
"logps/chosen": -1.0804438591003418,
"logps/rejected": -1.7286722660064697,
"loss": 1.8244,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -1.0804438591003418,
"rewards/margins": 0.6482284069061279,
"rewards/rejected": -1.7286722660064697,
"step": 370
},
{
"epoch": 0.7851347814708192,
"grad_norm": 26.086052446415493,
"learning_rate": 6.655924144404906e-08,
"logits/chosen": -2.656698226928711,
"logits/rejected": -2.681257724761963,
"logps/chosen": -1.0645740032196045,
"logps/rejected": -1.5776240825653076,
"loss": 1.8108,
"rewards/accuracies": 0.612500011920929,
"rewards/chosen": -1.0645740032196045,
"rewards/margins": 0.5130501985549927,
"rewards/rejected": -1.5776240825653076,
"step": 375
},
{
"epoch": 0.7956032452237635,
"grad_norm": 29.301563375037563,
"learning_rate": 6.046442623320145e-08,
"logits/chosen": -2.6668169498443604,
"logits/rejected": -2.6132140159606934,
"logps/chosen": -1.0790109634399414,
"logps/rejected": -1.5554571151733398,
"loss": 1.782,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -1.0790109634399414,
"rewards/margins": 0.47644609212875366,
"rewards/rejected": -1.5554571151733398,
"step": 380
},
{
"epoch": 0.8060717089767077,
"grad_norm": 24.668280208736856,
"learning_rate": 5.4623689209832484e-08,
"logits/chosen": -2.677908420562744,
"logits/rejected": -2.607048511505127,
"logps/chosen": -1.0273312330245972,
"logps/rejected": -1.5443226099014282,
"loss": 1.7947,
"rewards/accuracies": 0.6187499761581421,
"rewards/chosen": -1.0273312330245972,
"rewards/margins": 0.5169912576675415,
"rewards/rejected": -1.5443226099014282,
"step": 385
},
{
"epoch": 0.816540172729652,
"grad_norm": 27.191462805016396,
"learning_rate": 4.904486005914027e-08,
"logits/chosen": -2.7038891315460205,
"logits/rejected": -2.6414623260498047,
"logps/chosen": -1.0105482339859009,
"logps/rejected": -1.7210209369659424,
"loss": 1.7734,
"rewards/accuracies": 0.71875,
"rewards/chosen": -1.0105482339859009,
"rewards/margins": 0.7104724645614624,
"rewards/rejected": -1.7210209369659424,
"step": 390
},
{
"epoch": 0.8270086364825961,
"grad_norm": 31.43323955162571,
"learning_rate": 4.373541737087263e-08,
"logits/chosen": -2.636841058731079,
"logits/rejected": -2.556833267211914,
"logps/chosen": -1.0063461065292358,
"logps/rejected": -1.5682775974273682,
"loss": 1.8075,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -1.0063461065292358,
"rewards/margins": 0.5619313716888428,
"rewards/rejected": -1.5682775974273682,
"step": 395
},
{
"epoch": 0.8374771002355405,
"grad_norm": 39.225726689241675,
"learning_rate": 3.8702478614051345e-08,
"logits/chosen": -2.6019065380096436,
"logits/rejected": -2.5374197959899902,
"logps/chosen": -0.9893720746040344,
"logps/rejected": -1.5351721048355103,
"loss": 1.8109,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -0.9893720746040344,
"rewards/margins": 0.5458000898361206,
"rewards/rejected": -1.5351721048355103,
"step": 400
},
{
"epoch": 0.8479455639884846,
"grad_norm": 42.21715206184894,
"learning_rate": 3.3952790595787986e-08,
"logits/chosen": -2.6662096977233887,
"logits/rejected": -2.62931489944458,
"logps/chosen": -1.0822862386703491,
"logps/rejected": -1.6576595306396484,
"loss": 1.8405,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": -1.0822862386703491,
"rewards/margins": 0.5753732323646545,
"rewards/rejected": -1.6576595306396484,
"step": 405
},
{
"epoch": 0.8584140277414289,
"grad_norm": 35.10610264317854,
"learning_rate": 2.9492720416985e-08,
"logits/chosen": -2.678342819213867,
"logits/rejected": -2.634387493133545,
"logps/chosen": -0.9690961837768555,
"logps/rejected": -1.4596083164215088,
"loss": 1.8145,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.9690961837768555,
"rewards/margins": 0.4905119836330414,
"rewards/rejected": -1.4596083164215088,
"step": 410
},
{
"epoch": 0.8688824914943732,
"grad_norm": 27.87180494304035,
"learning_rate": 2.5328246937043525e-08,
"logits/chosen": -2.702768564224243,
"logits/rejected": -2.636719226837158,
"logps/chosen": -1.0192323923110962,
"logps/rejected": -1.6251684427261353,
"loss": 1.77,
"rewards/accuracies": 0.6875,
"rewards/chosen": -1.0192323923110962,
"rewards/margins": 0.6059359312057495,
"rewards/rejected": -1.6251684427261353,
"step": 415
},
{
"epoch": 0.8793509552473174,
"grad_norm": 25.60878775790235,
"learning_rate": 2.1464952759020856e-08,
"logits/chosen": -2.6637589931488037,
"logits/rejected": -2.6058096885681152,
"logps/chosen": -1.0046476125717163,
"logps/rejected": -1.5859066247940063,
"loss": 1.8408,
"rewards/accuracies": 0.65625,
"rewards/chosen": -1.0046476125717163,
"rewards/margins": 0.5812588930130005,
"rewards/rejected": -1.5859066247940063,
"step": 420
},
{
"epoch": 0.8898194190002617,
"grad_norm": 34.39189970937955,
"learning_rate": 1.7908016745981856e-08,
"logits/chosen": -2.6160888671875,
"logits/rejected": -2.5980353355407715,
"logps/chosen": -1.1391981840133667,
"logps/rejected": -1.5944236516952515,
"loss": 1.8251,
"rewards/accuracies": 0.625,
"rewards/chosen": -1.1391981840133667,
"rewards/margins": 0.4552256464958191,
"rewards/rejected": -1.5944236516952515,
"step": 425
},
{
"epoch": 0.9002878827532059,
"grad_norm": 33.16075597765061,
"learning_rate": 1.4662207078575684e-08,
"logits/chosen": -2.6214401721954346,
"logits/rejected": -2.570112705230713,
"logps/chosen": -1.0948253870010376,
"logps/rejected": -1.6313488483428955,
"loss": 1.8008,
"rewards/accuracies": 0.65625,
"rewards/chosen": -1.0948253870010376,
"rewards/margins": 0.5365234613418579,
"rewards/rejected": -1.6313488483428955,
"step": 430
},
{
"epoch": 0.9107563465061502,
"grad_norm": 33.93315468108953,
"learning_rate": 1.1731874863145142e-08,
"logits/chosen": -2.6755282878875732,
"logits/rejected": -2.637303590774536,
"logps/chosen": -1.0409172773361206,
"logps/rejected": -1.7293403148651123,
"loss": 1.8172,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -1.0409172773361206,
"rewards/margins": 0.6884229779243469,
"rewards/rejected": -1.7293403148651123,
"step": 435
},
{
"epoch": 0.9212248102590945,
"grad_norm": 32.567789743092646,
"learning_rate": 9.12094829893642e-09,
"logits/chosen": -2.617215871810913,
"logits/rejected": -2.5979318618774414,
"logps/chosen": -1.0106241703033447,
"logps/rejected": -1.5670998096466064,
"loss": 1.8428,
"rewards/accuracies": 0.643750011920929,
"rewards/chosen": -1.0106241703033447,
"rewards/margins": 0.5564756989479065,
"rewards/rejected": -1.5670998096466064,
"step": 440
},
{
"epoch": 0.9316932740120387,
"grad_norm": 30.42876501735674,
"learning_rate": 6.832927412229017e-09,
"logits/chosen": -2.681229591369629,
"logits/rejected": -2.612400531768799,
"logps/chosen": -1.06952965259552,
"logps/rejected": -1.5481148958206177,
"loss": 1.8604,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -1.06952965259552,
"rewards/margins": 0.4785851836204529,
"rewards/rejected": -1.5481148958206177,
"step": 445
},
{
"epoch": 0.942161737764983,
"grad_norm": 27.381238378090455,
"learning_rate": 4.8708793644441086e-09,
"logits/chosen": -2.579571485519409,
"logits/rejected": -2.544320821762085,
"logps/chosen": -1.0337889194488525,
"logps/rejected": -1.5412318706512451,
"loss": 1.7854,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": -1.0337889194488525,
"rewards/margins": 0.5074428915977478,
"rewards/rejected": -1.5412318706512451,
"step": 450
},
{
"epoch": 0.9526302015179272,
"grad_norm": 31.78517016150814,
"learning_rate": 3.2374343405217884e-09,
"logits/chosen": -2.6410415172576904,
"logits/rejected": -2.5741779804229736,
"logps/chosen": -1.1349095106124878,
"logps/rejected": -1.5367697477340698,
"loss": 1.8237,
"rewards/accuracies": 0.668749988079071,
"rewards/chosen": -1.1349095106124878,
"rewards/margins": 0.40186023712158203,
"rewards/rejected": -1.5367697477340698,
"step": 455
},
{
"epoch": 0.9630986652708715,
"grad_norm": 29.1380242330253,
"learning_rate": 1.9347820230782295e-09,
"logits/chosen": -2.6333887577056885,
"logits/rejected": -2.594615936279297,
"logps/chosen": -0.9948427081108093,
"logps/rejected": -1.598851203918457,
"loss": 1.7821,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.9948427081108093,
"rewards/margins": 0.6040085554122925,
"rewards/rejected": -1.598851203918457,
"step": 460
},
{
"epoch": 0.9735671290238157,
"grad_norm": 34.467412911376215,
"learning_rate": 9.64668657069706e-10,
"logits/chosen": -2.69240140914917,
"logits/rejected": -2.6058619022369385,
"logps/chosen": -1.087109923362732,
"logps/rejected": -1.6203782558441162,
"loss": 1.8436,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": -1.087109923362732,
"rewards/margins": 0.5332685112953186,
"rewards/rejected": -1.6203782558441162,
"step": 465
},
{
"epoch": 0.98403559277676,
"grad_norm": 27.439754854696236,
"learning_rate": 3.2839470889836627e-10,
"logits/chosen": -2.6679489612579346,
"logits/rejected": -2.603280544281006,
"logps/chosen": -0.9998431205749512,
"logps/rejected": -1.458508849143982,
"loss": 1.8712,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.9998431205749512,
"rewards/margins": 0.45866554975509644,
"rewards/rejected": -1.458508849143982,
"step": 470
},
{
"epoch": 0.9945040565297043,
"grad_norm": 31.225346161878143,
"learning_rate": 2.6813123097352287e-11,
"logits/chosen": -2.714961290359497,
"logits/rejected": -2.6193995475769043,
"logps/chosen": -1.041459083557129,
"logps/rejected": -1.5316205024719238,
"loss": 1.8186,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": -1.041459083557129,
"rewards/margins": 0.4901614189147949,
"rewards/rejected": -1.5316205024719238,
"step": 475
},
{
"epoch": 0.998691442030882,
"step": 477,
"total_flos": 0.0,
"train_loss": 1.8633759126723188,
"train_runtime": 7578.2385,
"train_samples_per_second": 8.067,
"train_steps_per_second": 0.063
}
],
"logging_steps": 5,
"max_steps": 477,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000000,
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}