llama3_8b_dpo_iter2 / trainer_state.json
Chenlu123's picture
Upload folder using huggingface_hub
b97e3b3 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 888888,
"global_step": 308,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.012987012987012988,
"grad_norm": 30.409713806756372,
"learning_rate": 4.999479820473016e-07,
"logits/chosen": -1.1957358121871948,
"logits/rejected": -1.1989657878875732,
"logps/chosen": -236.6458282470703,
"logps/rejected": -274.4889221191406,
"loss": 0.7214,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.9376516938209534,
"rewards/margins": 0.12735898792743683,
"rewards/rejected": 0.810292661190033,
"step": 2
},
{
"epoch": 0.025974025974025976,
"grad_norm": 32.07895252539544,
"learning_rate": 4.997919498361457e-07,
"logits/chosen": -1.2326594591140747,
"logits/rejected": -1.2664812803268433,
"logps/chosen": -257.2532958984375,
"logps/rejected": -252.87025451660156,
"loss": 0.727,
"rewards/accuracies": 0.59375,
"rewards/chosen": 1.142688512802124,
"rewards/margins": 0.25847145915031433,
"rewards/rejected": 0.8842170238494873,
"step": 4
},
{
"epoch": 0.03896103896103896,
"grad_norm": 31.29868726835709,
"learning_rate": 4.995319682983417e-07,
"logits/chosen": -1.183756947517395,
"logits/rejected": -1.1092337369918823,
"logps/chosen": -191.75558471679688,
"logps/rejected": -230.58950805664062,
"loss": 0.7031,
"rewards/accuracies": 0.4375,
"rewards/chosen": 0.7358960509300232,
"rewards/margins": -0.0961524099111557,
"rewards/rejected": 0.8320484757423401,
"step": 6
},
{
"epoch": 0.05194805194805195,
"grad_norm": 32.01008811759358,
"learning_rate": 4.991681456235483e-07,
"logits/chosen": -1.2155265808105469,
"logits/rejected": -1.3162301778793335,
"logps/chosen": -202.1005859375,
"logps/rejected": -232.53140258789062,
"loss": 0.6947,
"rewards/accuracies": 0.65625,
"rewards/chosen": 1.0601692199707031,
"rewards/margins": 0.3322027921676636,
"rewards/rejected": 0.72796630859375,
"step": 8
},
{
"epoch": 0.06493506493506493,
"grad_norm": 26.584702872904153,
"learning_rate": 4.98700633214251e-07,
"logits/chosen": -1.08168363571167,
"logits/rejected": -1.21769118309021,
"logps/chosen": -217.7674560546875,
"logps/rejected": -195.05032348632812,
"loss": 0.6823,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.8856821060180664,
"rewards/margins": 0.06800435483455658,
"rewards/rejected": 0.8176776170730591,
"step": 10
},
{
"epoch": 0.07792207792207792,
"grad_norm": 31.831697368646147,
"learning_rate": 4.981296256227569e-07,
"logits/chosen": -0.912403404712677,
"logits/rejected": -0.9036965370178223,
"logps/chosen": -204.28919982910156,
"logps/rejected": -211.12686157226562,
"loss": 0.7049,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.849972665309906,
"rewards/margins": 0.017677173018455505,
"rewards/rejected": 0.8322954177856445,
"step": 12
},
{
"epoch": 0.09090909090909091,
"grad_norm": 29.9192585773342,
"learning_rate": 4.974553604702332e-07,
"logits/chosen": -1.0211938619613647,
"logits/rejected": -1.0898373126983643,
"logps/chosen": -166.66610717773438,
"logps/rejected": -154.6410675048828,
"loss": 0.6707,
"rewards/accuracies": 0.6875,
"rewards/chosen": 1.0837771892547607,
"rewards/margins": 0.4414757192134857,
"rewards/rejected": 0.6423014402389526,
"step": 14
},
{
"epoch": 0.1038961038961039,
"grad_norm": 29.446896480009652,
"learning_rate": 4.966781183478222e-07,
"logits/chosen": -1.1182293891906738,
"logits/rejected": -1.116457223892212,
"logps/chosen": -201.10256958007812,
"logps/rejected": -219.7225341796875,
"loss": 0.6796,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.78659588098526,
"rewards/margins": 0.23196566104888916,
"rewards/rejected": 0.5546301603317261,
"step": 16
},
{
"epoch": 0.11688311688311688,
"grad_norm": 30.21023973321977,
"learning_rate": 4.957982226998757e-07,
"logits/chosen": -1.31354820728302,
"logits/rejected": -1.2536346912384033,
"logps/chosen": -265.8379821777344,
"logps/rejected": -281.1967468261719,
"loss": 0.7288,
"rewards/accuracies": 0.59375,
"rewards/chosen": 1.1193804740905762,
"rewards/margins": 0.1973106861114502,
"rewards/rejected": 0.922069787979126,
"step": 18
},
{
"epoch": 0.12987012987012986,
"grad_norm": 30.94898503765605,
"learning_rate": 4.948160396893553e-07,
"logits/chosen": -1.2565226554870605,
"logits/rejected": -1.15552818775177,
"logps/chosen": -174.21267700195312,
"logps/rejected": -179.50820922851562,
"loss": 0.6927,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.931135892868042,
"rewards/margins": 0.1621209681034088,
"rewards/rejected": 0.7690149545669556,
"step": 20
},
{
"epoch": 0.14285714285714285,
"grad_norm": 29.667041326907114,
"learning_rate": 4.937319780454559e-07,
"logits/chosen": -1.3007498979568481,
"logits/rejected": -1.1949548721313477,
"logps/chosen": -230.96115112304688,
"logps/rejected": -248.40798950195312,
"loss": 0.7216,
"rewards/accuracies": 0.34375,
"rewards/chosen": 0.7717787027359009,
"rewards/margins": -0.18773330748081207,
"rewards/rejected": 0.9595120549201965,
"step": 22
},
{
"epoch": 0.15584415584415584,
"grad_norm": 33.222954526021944,
"learning_rate": 4.925464888935161e-07,
"logits/chosen": -1.1974416971206665,
"logits/rejected": -1.2200247049331665,
"logps/chosen": -198.02139282226562,
"logps/rejected": -242.11419677734375,
"loss": 0.7295,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.8105030655860901,
"rewards/margins": 0.041422925889492035,
"rewards/rejected": 0.7690802216529846,
"step": 24
},
{
"epoch": 0.16883116883116883,
"grad_norm": 31.901066989434995,
"learning_rate": 4.912600655672849e-07,
"logits/chosen": -1.240085482597351,
"logits/rejected": -1.2631834745407104,
"logps/chosen": -264.38067626953125,
"logps/rejected": -317.8161926269531,
"loss": 0.6988,
"rewards/accuracies": 0.53125,
"rewards/chosen": 1.0404579639434814,
"rewards/margins": 0.05802140012383461,
"rewards/rejected": 0.9824365377426147,
"step": 26
},
{
"epoch": 0.18181818181818182,
"grad_norm": 34.21159593307014,
"learning_rate": 4.898732434036243e-07,
"logits/chosen": -1.189288854598999,
"logits/rejected": -1.048648476600647,
"logps/chosen": -262.818115234375,
"logps/rejected": -301.31927490234375,
"loss": 0.7616,
"rewards/accuracies": 0.4375,
"rewards/chosen": 0.8156523108482361,
"rewards/margins": -0.08142556995153427,
"rewards/rejected": 0.8970779180526733,
"step": 28
},
{
"epoch": 0.19480519480519481,
"grad_norm": 32.29301620044956,
"learning_rate": 4.883865995197318e-07,
"logits/chosen": -1.1413640975952148,
"logits/rejected": -1.2122159004211426,
"logps/chosen": -217.71823120117188,
"logps/rejected": -221.3170166015625,
"loss": 0.7261,
"rewards/accuracies": 0.78125,
"rewards/chosen": 1.1059789657592773,
"rewards/margins": 0.40076321363449097,
"rewards/rejected": 0.7052158713340759,
"step": 30
},
{
"epoch": 0.2077922077922078,
"grad_norm": 30.16619750369487,
"learning_rate": 4.868007525729775e-07,
"logits/chosen": -1.36598801612854,
"logits/rejected": -1.3407963514328003,
"logps/chosen": -239.17111206054688,
"logps/rejected": -226.31658935546875,
"loss": 0.6825,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.7918877005577087,
"rewards/margins": 0.05119353532791138,
"rewards/rejected": 0.7406941056251526,
"step": 32
},
{
"epoch": 0.22077922077922077,
"grad_norm": 30.605215606478595,
"learning_rate": 4.851163625034529e-07,
"logits/chosen": -1.1574623584747314,
"logits/rejected": -1.193921685218811,
"logps/chosen": -230.02108764648438,
"logps/rejected": -244.59841918945312,
"loss": 0.6851,
"rewards/accuracies": 0.65625,
"rewards/chosen": 1.080081820487976,
"rewards/margins": 0.3703171908855438,
"rewards/rejected": 0.7097645998001099,
"step": 34
},
{
"epoch": 0.23376623376623376,
"grad_norm": 31.802179808840666,
"learning_rate": 4.833341302593417e-07,
"logits/chosen": -1.159416913986206,
"logits/rejected": -1.1420115232467651,
"logps/chosen": -140.0688018798828,
"logps/rejected": -152.40975952148438,
"loss": 0.6732,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.8693720102310181,
"rewards/margins": 0.11663462221622467,
"rewards/rejected": 0.7527374625205994,
"step": 36
},
{
"epoch": 0.24675324675324675,
"grad_norm": 30.94540714928562,
"learning_rate": 4.814547975052244e-07,
"logits/chosen": -1.3302018642425537,
"logits/rejected": -1.3434113264083862,
"logps/chosen": -191.34698486328125,
"logps/rejected": -196.69509887695312,
"loss": 0.6714,
"rewards/accuracies": 0.46875,
"rewards/chosen": 0.7995274662971497,
"rewards/margins": 0.047811880707740784,
"rewards/rejected": 0.7517155408859253,
"step": 38
},
{
"epoch": 0.2597402597402597,
"grad_norm": 29.865098693610847,
"learning_rate": 4.794791463134399e-07,
"logits/chosen": -1.2271629571914673,
"logits/rejected": -1.3550831079483032,
"logps/chosen": -226.42410278320312,
"logps/rejected": -214.7489013671875,
"loss": 0.6975,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.8646127581596375,
"rewards/margins": 0.22612789273262024,
"rewards/rejected": 0.6384848356246948,
"step": 40
},
{
"epoch": 0.2727272727272727,
"grad_norm": 30.43675432858084,
"learning_rate": 4.774079988386296e-07,
"logits/chosen": -1.1719427108764648,
"logits/rejected": -1.2684495449066162,
"logps/chosen": -208.20394897460938,
"logps/rejected": -235.50634765625,
"loss": 0.6855,
"rewards/accuracies": 0.6875,
"rewards/chosen": 1.0848238468170166,
"rewards/margins": 0.3408503830432892,
"rewards/rejected": 0.7439733743667603,
"step": 42
},
{
"epoch": 0.2857142857142857,
"grad_norm": 31.511971354878987,
"learning_rate": 4.752422169756047e-07,
"logits/chosen": -1.2040029764175415,
"logits/rejected": -1.2218215465545654,
"logps/chosen": -259.76959228515625,
"logps/rejected": -279.75946044921875,
"loss": 0.6772,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.9812463521957397,
"rewards/margins": 0.2615824341773987,
"rewards/rejected": 0.7196639776229858,
"step": 44
},
{
"epoch": 0.2987012987012987,
"grad_norm": 35.50720072355199,
"learning_rate": 4.729827020006735e-07,
"logits/chosen": -1.2218239307403564,
"logits/rejected": -1.2937262058258057,
"logps/chosen": -239.9827880859375,
"logps/rejected": -250.3217315673828,
"loss": 0.7242,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.8138805627822876,
"rewards/margins": 0.09423254430294037,
"rewards/rejected": 0.719648003578186,
"step": 46
},
{
"epoch": 0.3116883116883117,
"grad_norm": 32.207699613934025,
"learning_rate": 4.706303941965803e-07,
"logits/chosen": -1.174890398979187,
"logits/rejected": -1.1500227451324463,
"logps/chosen": -257.21533203125,
"logps/rejected": -302.0556945800781,
"loss": 0.6856,
"rewards/accuracies": 0.6875,
"rewards/chosen": 1.0188751220703125,
"rewards/margins": 0.20580947399139404,
"rewards/rejected": 0.8130655884742737,
"step": 48
},
{
"epoch": 0.3246753246753247,
"grad_norm": 31.074659437805654,
"learning_rate": 4.68186272461214e-07,
"logits/chosen": -1.2761106491088867,
"logits/rejected": -1.307037353515625,
"logps/chosen": -228.11666870117188,
"logps/rejected": -254.00048828125,
"loss": 0.6711,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.9066404104232788,
"rewards/margins": 0.32165050506591797,
"rewards/rejected": 0.5849898457527161,
"step": 50
},
{
"epoch": 0.33766233766233766,
"grad_norm": 31.027447722981428,
"learning_rate": 4.656513539002451e-07,
"logits/chosen": -1.2342908382415771,
"logits/rejected": -1.2500003576278687,
"logps/chosen": -182.6434783935547,
"logps/rejected": -210.83444213867188,
"loss": 0.6867,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.8957956433296204,
"rewards/margins": 0.21347832679748535,
"rewards/rejected": 0.6823172569274902,
"step": 52
},
{
"epoch": 0.35064935064935066,
"grad_norm": 28.860294980547383,
"learning_rate": 4.6302669340386415e-07,
"logits/chosen": -1.2332104444503784,
"logits/rejected": -1.1100932359695435,
"logps/chosen": -194.90341186523438,
"logps/rejected": -220.88478088378906,
"loss": 0.692,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.7950139045715332,
"rewards/margins": 0.09959140419960022,
"rewards/rejected": 0.6954225897789001,
"step": 54
},
{
"epoch": 0.36363636363636365,
"grad_norm": 37.00612209924237,
"learning_rate": 4.603133832077953e-07,
"logits/chosen": -1.2518322467803955,
"logits/rejected": -1.272511601448059,
"logps/chosen": -219.75762939453125,
"logps/rejected": -225.9881591796875,
"loss": 0.7341,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.8070573806762695,
"rewards/margins": 0.15917816758155823,
"rewards/rejected": 0.6478793025016785,
"step": 56
},
{
"epoch": 0.37662337662337664,
"grad_norm": 34.181154948444195,
"learning_rate": 4.575125524387701e-07,
"logits/chosen": -1.0896148681640625,
"logits/rejected": -1.0761992931365967,
"logps/chosen": -246.28172302246094,
"logps/rejected": -231.1036376953125,
"loss": 0.6999,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.6937105059623718,
"rewards/margins": 0.09433312714099884,
"rewards/rejected": 0.5993773937225342,
"step": 58
},
{
"epoch": 0.38961038961038963,
"grad_norm": 33.21940371087566,
"learning_rate": 4.5462536664464836e-07,
"logits/chosen": -1.2278883457183838,
"logits/rejected": -1.2770620584487915,
"logps/chosen": -325.75714111328125,
"logps/rejected": -309.88836669921875,
"loss": 0.685,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.9801526069641113,
"rewards/margins": 0.22209185361862183,
"rewards/rejected": 0.7580606937408447,
"step": 60
},
{
"epoch": 0.4025974025974026,
"grad_norm": 29.951734183402802,
"learning_rate": 4.516530273093825e-07,
"logits/chosen": -1.3827568292617798,
"logits/rejected": -1.3074092864990234,
"logps/chosen": -226.56942749023438,
"logps/rejected": -261.1898498535156,
"loss": 0.6715,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.8671592473983765,
"rewards/margins": 0.1708277314901352,
"rewards/rejected": 0.6963315010070801,
"step": 62
},
{
"epoch": 0.4155844155844156,
"grad_norm": 35.00256057592286,
"learning_rate": 4.485967713530281e-07,
"logits/chosen": -1.2592735290527344,
"logits/rejected": -1.276845097541809,
"logps/chosen": -280.13421630859375,
"logps/rejected": -272.318359375,
"loss": 0.6797,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.9574460983276367,
"rewards/margins": 0.2645747661590576,
"rewards/rejected": 0.6928713321685791,
"step": 64
},
{
"epoch": 0.42857142857142855,
"grad_norm": 130.44143506253818,
"learning_rate": 4.4545787061700746e-07,
"logits/chosen": -1.201524019241333,
"logits/rejected": -1.2254564762115479,
"logps/chosen": -222.44644165039062,
"logps/rejected": -215.9447021484375,
"loss": 0.6865,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.8678699731826782,
"rewards/margins": 0.32531124353408813,
"rewards/rejected": 0.5425587296485901,
"step": 66
},
{
"epoch": 0.44155844155844154,
"grad_norm": 30.886647455965967,
"learning_rate": 4.422376313348405e-07,
"logits/chosen": -1.2102560997009277,
"logits/rejected": -1.1355280876159668,
"logps/chosen": -211.4716796875,
"logps/rejected": -246.92666625976562,
"loss": 0.6728,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.9252184629440308,
"rewards/margins": 0.4986411929130554,
"rewards/rejected": 0.4265773296356201,
"step": 68
},
{
"epoch": 0.45454545454545453,
"grad_norm": 38.68657958814148,
"learning_rate": 4.3893739358856455e-07,
"logits/chosen": -1.0511149168014526,
"logits/rejected": -1.1264652013778687,
"logps/chosen": -193.53610229492188,
"logps/rejected": -218.5620880126953,
"loss": 0.701,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.7159083485603333,
"rewards/margins": 0.046296779066324234,
"rewards/rejected": 0.6696116328239441,
"step": 70
},
{
"epoch": 0.4675324675324675,
"grad_norm": 30.130370931610912,
"learning_rate": 4.355585307510675e-07,
"logits/chosen": -1.246776819229126,
"logits/rejected": -1.2227742671966553,
"logps/chosen": -159.0496826171875,
"logps/rejected": -173.915771484375,
"loss": 0.6643,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.7583217620849609,
"rewards/margins": 0.24899302423000336,
"rewards/rejected": 0.5093286633491516,
"step": 72
},
{
"epoch": 0.4805194805194805,
"grad_norm": 33.33819375543502,
"learning_rate": 4.3210244891456725e-07,
"logits/chosen": -1.3857276439666748,
"logits/rejected": -1.2871744632720947,
"logps/chosen": -237.01812744140625,
"logps/rejected": -222.86903381347656,
"loss": 0.6923,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.6103696227073669,
"rewards/margins": 0.10617219656705856,
"rewards/rejected": 0.5041974186897278,
"step": 74
},
{
"epoch": 0.4935064935064935,
"grad_norm": 31.374872695083415,
"learning_rate": 4.2857058630547586e-07,
"logits/chosen": -1.1448109149932861,
"logits/rejected": -1.0983011722564697,
"logps/chosen": -251.7313690185547,
"logps/rejected": -263.6651306152344,
"loss": 0.6743,
"rewards/accuracies": 0.4375,
"rewards/chosen": 0.733715295791626,
"rewards/margins": 0.12528231739997864,
"rewards/rejected": 0.608432948589325,
"step": 76
},
{
"epoch": 0.5064935064935064,
"grad_norm": 29.77623703163317,
"learning_rate": 4.2496441268589047e-07,
"logits/chosen": -1.0607296228408813,
"logits/rejected": -1.2361115217208862,
"logps/chosen": -208.2403564453125,
"logps/rejected": -205.75625610351562,
"loss": 0.6756,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.9102213382720947,
"rewards/margins": 0.41290420293807983,
"rewards/rejected": 0.4973170757293701,
"step": 78
},
{
"epoch": 0.5194805194805194,
"grad_norm": 29.02788697480319,
"learning_rate": 4.2128542874196107e-07,
"logits/chosen": -1.0309326648712158,
"logits/rejected": -1.143478274345398,
"logps/chosen": -173.32151794433594,
"logps/rejected": -161.68482971191406,
"loss": 0.6691,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.7697413563728333,
"rewards/margins": 0.14579711854457855,
"rewards/rejected": 0.6239442229270935,
"step": 80
},
{
"epoch": 0.5324675324675324,
"grad_norm": 29.784869306609654,
"learning_rate": 4.1753516545938986e-07,
"logits/chosen": -1.2103991508483887,
"logits/rejected": -1.3010125160217285,
"logps/chosen": -190.08010864257812,
"logps/rejected": -182.82606506347656,
"loss": 0.6618,
"rewards/accuracies": 0.40625,
"rewards/chosen": 0.6524017453193665,
"rewards/margins": 0.07943220436573029,
"rewards/rejected": 0.5729695558547974,
"step": 82
},
{
"epoch": 0.5454545454545454,
"grad_norm": 32.96758632787842,
"learning_rate": 4.137151834863213e-07,
"logits/chosen": -1.1179546117782593,
"logits/rejected": -1.1667120456695557,
"logps/chosen": -252.30087280273438,
"logps/rejected": -258.8082275390625,
"loss": 0.6678,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.7305130958557129,
"rewards/margins": 0.33587121963500977,
"rewards/rejected": 0.39464184641838074,
"step": 84
},
{
"epoch": 0.5584415584415584,
"grad_norm": 29.473194289500984,
"learning_rate": 4.098270724838879e-07,
"logits/chosen": -1.2531524896621704,
"logits/rejected": -1.2107303142547607,
"logps/chosen": -257.7474365234375,
"logps/rejected": -297.6868591308594,
"loss": 0.6752,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.9224438667297363,
"rewards/margins": 0.45967769622802734,
"rewards/rejected": 0.4627661406993866,
"step": 86
},
{
"epoch": 0.5714285714285714,
"grad_norm": 25.375543282285655,
"learning_rate": 4.058724504646834e-07,
"logits/chosen": -1.212619662284851,
"logits/rejected": -1.218203067779541,
"logps/chosen": -242.98947143554688,
"logps/rejected": -272.5478515625,
"loss": 0.629,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.7270645499229431,
"rewards/margins": 0.3879895508289337,
"rewards/rejected": 0.33907490968704224,
"step": 88
},
{
"epoch": 0.5844155844155844,
"grad_norm": 33.30211893834397,
"learning_rate": 4.018529631194369e-07,
"logits/chosen": -1.3774781227111816,
"logits/rejected": -1.4289326667785645,
"logps/chosen": -341.9387512207031,
"logps/rejected": -348.09283447265625,
"loss": 0.6743,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.8079380989074707,
"rewards/margins": 0.1343669593334198,
"rewards/rejected": 0.6735711097717285,
"step": 90
},
{
"epoch": 0.5974025974025974,
"grad_norm": 33.04398574915242,
"learning_rate": 3.9777028313216913e-07,
"logits/chosen": -1.1324537992477417,
"logits/rejected": -1.2160053253173828,
"logps/chosen": -216.34042358398438,
"logps/rejected": -239.47853088378906,
"loss": 0.6886,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.5493214726448059,
"rewards/margins": 0.09607386589050293,
"rewards/rejected": 0.4532475769519806,
"step": 92
},
{
"epoch": 0.6103896103896104,
"grad_norm": 27.7423356377416,
"learning_rate": 3.9362610948411584e-07,
"logits/chosen": -1.273106336593628,
"logits/rejected": -1.278911828994751,
"logps/chosen": -298.0914306640625,
"logps/rejected": -303.9120788574219,
"loss": 0.6527,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.9156701564788818,
"rewards/margins": 0.36599063873291016,
"rewards/rejected": 0.5496795177459717,
"step": 94
},
{
"epoch": 0.6233766233766234,
"grad_norm": 28.666974084256797,
"learning_rate": 3.8942216674670737e-07,
"logits/chosen": -1.1729968786239624,
"logits/rejected": -1.2594560384750366,
"logps/chosen": -201.3909454345703,
"logps/rejected": -202.02003479003906,
"loss": 0.6969,
"rewards/accuracies": 0.4375,
"rewards/chosen": 0.5368354320526123,
"rewards/margins": 0.11352111399173737,
"rewards/rejected": 0.4233143925666809,
"step": 96
},
{
"epoch": 0.6363636363636364,
"grad_norm": 31.936323967340297,
"learning_rate": 3.851602043638994e-07,
"logits/chosen": -1.2385543584823608,
"logits/rejected": -1.3384761810302734,
"logps/chosen": -247.0762939453125,
"logps/rejected": -246.0072021484375,
"loss": 0.7103,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.6583046317100525,
"rewards/margins": 0.3179153501987457,
"rewards/rejected": 0.3403893709182739,
"step": 98
},
{
"epoch": 0.6493506493506493,
"grad_norm": 32.627149716732674,
"learning_rate": 3.80841995924153e-07,
"logits/chosen": -1.2925565242767334,
"logits/rejected": -1.22373628616333,
"logps/chosen": -183.79818725585938,
"logps/rejected": -207.68206787109375,
"loss": 0.6958,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.48615562915802,
"rewards/margins": 0.16806553304195404,
"rewards/rejected": 0.3180900812149048,
"step": 100
},
{
"epoch": 0.6623376623376623,
"grad_norm": 35.550165524685106,
"learning_rate": 3.7646933842236707e-07,
"logits/chosen": -1.3273155689239502,
"logits/rejected": -1.2905724048614502,
"logps/chosen": -210.8981475830078,
"logps/rejected": -253.29486083984375,
"loss": 0.674,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.7722782492637634,
"rewards/margins": 0.2569873034954071,
"rewards/rejected": 0.5152909755706787,
"step": 102
},
{
"epoch": 0.6753246753246753,
"grad_norm": 37.08342788520365,
"learning_rate": 3.720440515120703e-07,
"logits/chosen": -1.0195050239562988,
"logits/rejected": -1.0432817935943604,
"logps/chosen": -190.58543395996094,
"logps/rejected": -212.59231567382812,
"loss": 0.6802,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.7294161915779114,
"rewards/margins": 0.22578200697898865,
"rewards/rejected": 0.5036342144012451,
"step": 104
},
{
"epoch": 0.6883116883116883,
"grad_norm": 31.103871645303624,
"learning_rate": 3.6756797674818417e-07,
"logits/chosen": -1.1702743768692017,
"logits/rejected": -1.2568817138671875,
"logps/chosen": -288.2113037109375,
"logps/rejected": -276.70550537109375,
"loss": 0.667,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.7896406650543213,
"rewards/margins": 0.35757139325141907,
"rewards/rejected": 0.43206924200057983,
"step": 106
},
{
"epoch": 0.7012987012987013,
"grad_norm": 33.27213712499375,
"learning_rate": 3.630429768206714e-07,
"logits/chosen": -1.1950292587280273,
"logits/rejected": -1.2156962156295776,
"logps/chosen": -260.03192138671875,
"logps/rejected": -280.8499450683594,
"loss": 0.6698,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.6720072627067566,
"rewards/margins": 0.1990586519241333,
"rewards/rejected": 0.4729485809803009,
"step": 108
},
{
"epoch": 0.7142857142857143,
"grad_norm": 34.347475898145326,
"learning_rate": 3.584709347793895e-07,
"logits/chosen": -1.2583609819412231,
"logits/rejected": -1.2271919250488281,
"logps/chosen": -218.08056640625,
"logps/rejected": -265.71612548828125,
"loss": 0.6771,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.7517715692520142,
"rewards/margins": 0.31436362862586975,
"rewards/rejected": 0.4374079704284668,
"step": 110
},
{
"epoch": 0.7272727272727273,
"grad_norm": 28.787797168356807,
"learning_rate": 3.5385375325047163e-07,
"logits/chosen": -1.301234245300293,
"logits/rejected": -1.3219377994537354,
"logps/chosen": -198.1061553955078,
"logps/rejected": -215.04071044921875,
"loss": 0.6702,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.6467806696891785,
"rewards/margins": 0.201984703540802,
"rewards/rejected": 0.44479596614837646,
"step": 112
},
{
"epoch": 0.7402597402597403,
"grad_norm": 31.164777041001017,
"learning_rate": 3.491933536445606e-07,
"logits/chosen": -1.3274401426315308,
"logits/rejected": -1.2342091798782349,
"logps/chosen": -153.36151123046875,
"logps/rejected": -195.70001220703125,
"loss": 0.6501,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.6609562635421753,
"rewards/margins": 0.36767348647117615,
"rewards/rejected": 0.2932826578617096,
"step": 114
},
{
"epoch": 0.7532467532467533,
"grad_norm": 28.597437073725573,
"learning_rate": 3.4449167535722664e-07,
"logits/chosen": -1.0330864191055298,
"logits/rejected": -1.1435695886611938,
"logps/chosen": -273.2784423828125,
"logps/rejected": -281.6010437011719,
"loss": 0.6587,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.6794790029525757,
"rewards/margins": 0.4547131061553955,
"rewards/rejected": 0.22476595640182495,
"step": 116
},
{
"epoch": 0.7662337662337663,
"grad_norm": 32.62513636994299,
"learning_rate": 3.3975067496189963e-07,
"logits/chosen": -1.2125685214996338,
"logits/rejected": -1.3594530820846558,
"logps/chosen": -317.5354919433594,
"logps/rejected": -315.6915588378906,
"loss": 0.65,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.8727169036865234,
"rewards/margins": 0.7781965136528015,
"rewards/rejected": 0.09452031552791595,
"step": 118
},
{
"epoch": 0.7792207792207793,
"grad_norm": 32.86814732954617,
"learning_rate": 3.349723253956541e-07,
"logits/chosen": -1.0453016757965088,
"logits/rejected": -1.1661120653152466,
"logps/chosen": -145.11253356933594,
"logps/rejected": -178.77328491210938,
"loss": 0.6635,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.7022942900657654,
"rewards/margins": 0.42999541759490967,
"rewards/rejected": 0.2722988426685333,
"step": 120
},
{
"epoch": 0.7922077922077922,
"grad_norm": 31.986096882532095,
"learning_rate": 3.3015861513818383e-07,
"logits/chosen": -1.1393004655838013,
"logits/rejected": -1.071953296661377,
"logps/chosen": -257.8083801269531,
"logps/rejected": -292.0323791503906,
"loss": 0.66,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.6557089686393738,
"rewards/margins": 0.2878965139389038,
"rewards/rejected": 0.3678124248981476,
"step": 122
},
{
"epoch": 0.8051948051948052,
"grad_norm": 32.75329554864317,
"learning_rate": 3.2531154738430853e-07,
"logits/chosen": -1.2495208978652954,
"logits/rejected": -1.1901376247406006,
"logps/chosen": -277.14208984375,
"logps/rejected": -308.6678161621094,
"loss": 0.6484,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.6612053513526917,
"rewards/margins": 0.33286944031715393,
"rewards/rejected": 0.32833582162857056,
"step": 124
},
{
"epoch": 0.8181818181818182,
"grad_norm": 35.313227849390415,
"learning_rate": 3.204331392103574e-07,
"logits/chosen": -1.0496852397918701,
"logits/rejected": -1.0938959121704102,
"logps/chosen": -183.056884765625,
"logps/rejected": -218.71853637695312,
"loss": 0.6607,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.7791882157325745,
"rewards/margins": 0.2896581292152405,
"rewards/rejected": 0.489530086517334,
"step": 126
},
{
"epoch": 0.8311688311688312,
"grad_norm": 32.66165401072115,
"learning_rate": 3.155254207347755e-07,
"logits/chosen": -1.154829740524292,
"logits/rejected": -1.0259711742401123,
"logps/chosen": -193.2096710205078,
"logps/rejected": -234.06759643554688,
"loss": 0.6539,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.7302087545394897,
"rewards/margins": 0.37150081992149353,
"rewards/rejected": 0.3587079346179962,
"step": 128
},
{
"epoch": 0.8441558441558441,
"grad_norm": 34.704917232369,
"learning_rate": 3.1059043427330314e-07,
"logits/chosen": -1.179085373878479,
"logits/rejected": -1.1466186046600342,
"logps/chosen": -290.7761535644531,
"logps/rejected": -247.35595703125,
"loss": 0.689,
"rewards/accuracies": 0.4375,
"rewards/chosen": 0.7207962274551392,
"rewards/margins": 0.21811310946941376,
"rewards/rejected": 0.5026831030845642,
"step": 130
},
{
"epoch": 0.8571428571428571,
"grad_norm": 31.672800837060226,
"learning_rate": 3.056302334890786e-07,
"logits/chosen": -1.0660974979400635,
"logits/rejected": -1.1430083513259888,
"logps/chosen": -186.87071228027344,
"logps/rejected": -191.1336669921875,
"loss": 0.6728,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.577528178691864,
"rewards/margins": 0.338672012090683,
"rewards/rejected": 0.23885619640350342,
"step": 132
},
{
"epoch": 0.8701298701298701,
"grad_norm": 31.658738700878413,
"learning_rate": 3.0064688253802024e-07,
"logits/chosen": -1.0814266204833984,
"logits/rejected": -1.141867756843567,
"logps/chosen": -219.94424438476562,
"logps/rejected": -249.24383544921875,
"loss": 0.6633,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.6220312118530273,
"rewards/margins": 0.4069012999534607,
"rewards/rejected": 0.21512994170188904,
"step": 134
},
{
"epoch": 0.8831168831168831,
"grad_norm": 33.16970393800331,
"learning_rate": 2.956424552098404e-07,
"logits/chosen": -1.313258171081543,
"logits/rejected": -1.3151941299438477,
"logps/chosen": -300.03961181640625,
"logps/rejected": -340.99566650390625,
"loss": 0.6998,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.7123932242393494,
"rewards/margins": 0.30548784136772156,
"rewards/rejected": 0.4069053828716278,
"step": 136
},
{
"epoch": 0.8961038961038961,
"grad_norm": 27.93166457525636,
"learning_rate": 2.9061903406505153e-07,
"logits/chosen": -1.141364574432373,
"logits/rejected": -1.0247657299041748,
"logps/chosen": -200.64974975585938,
"logps/rejected": -197.97593688964844,
"loss": 0.6424,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.630258321762085,
"rewards/margins": 0.3348933160305023,
"rewards/rejected": 0.29536497592926025,
"step": 138
},
{
"epoch": 0.9090909090909091,
"grad_norm": 28.870063032351457,
"learning_rate": 2.8557870956832133e-07,
"logits/chosen": -1.274742603302002,
"logits/rejected": -1.2089191675186157,
"logps/chosen": -197.0199737548828,
"logps/rejected": -220.10044860839844,
"loss": 0.642,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.6193188428878784,
"rewards/margins": 0.4841911494731903,
"rewards/rejected": 0.1351277232170105,
"step": 140
},
{
"epoch": 0.922077922077922,
"grad_norm": 34.83723284802987,
"learning_rate": 2.8052357921854e-07,
"logits/chosen": -1.0989036560058594,
"logits/rejected": -1.2587521076202393,
"logps/chosen": -167.8390350341797,
"logps/rejected": -161.0415496826172,
"loss": 0.6461,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.7375519275665283,
"rewards/margins": 0.4270685017108917,
"rewards/rejected": 0.3104833960533142,
"step": 142
},
{
"epoch": 0.935064935064935,
"grad_norm": 32.08059076203408,
"learning_rate": 2.754557466759589e-07,
"logits/chosen": -1.296328067779541,
"logits/rejected": -1.3102202415466309,
"logps/chosen": -209.74093627929688,
"logps/rejected": -220.9139862060547,
"loss": 0.6487,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.611607551574707,
"rewards/margins": 0.38875120878219604,
"rewards/rejected": 0.22285637259483337,
"step": 144
},
{
"epoch": 0.948051948051948,
"grad_norm": 30.710641933445263,
"learning_rate": 2.703773208867658e-07,
"logits/chosen": -1.2812106609344482,
"logits/rejected": -1.2686681747436523,
"logps/chosen": -289.8981018066406,
"logps/rejected": -298.4099426269531,
"loss": 0.6562,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.6197431087493896,
"rewards/margins": 0.27826988697052,
"rewards/rejected": 0.341473251581192,
"step": 146
},
{
"epoch": 0.961038961038961,
"grad_norm": 32.51693326041137,
"learning_rate": 2.652904152054607e-07,
"logits/chosen": -1.2882963418960571,
"logits/rejected": -1.234848141670227,
"logps/chosen": -212.38536071777344,
"logps/rejected": -235.0468292236328,
"loss": 0.657,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.8348413109779358,
"rewards/margins": 0.48668912053108215,
"rewards/rejected": 0.34815219044685364,
"step": 148
},
{
"epoch": 0.974025974025974,
"grad_norm": 33.13371475089395,
"learning_rate": 2.6019714651539645e-07,
"logits/chosen": -1.3393000364303589,
"logits/rejected": -1.3905009031295776,
"logps/chosen": -223.33636474609375,
"logps/rejected": -243.6895294189453,
"loss": 0.6381,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.5773805379867554,
"rewards/margins": 0.328535795211792,
"rewards/rejected": 0.24884477257728577,
"step": 150
},
{
"epoch": 0.987012987012987,
"grad_norm": 29.25513766134415,
"learning_rate": 2.550996343478514e-07,
"logits/chosen": -1.210328459739685,
"logits/rejected": -1.1693918704986572,
"logps/chosen": -266.2218933105469,
"logps/rejected": -318.95904541015625,
"loss": 0.6367,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.5575440526008606,
"rewards/margins": 0.3903830647468567,
"rewards/rejected": 0.1671610325574875,
"step": 152
},
{
"epoch": 1.0,
"grad_norm": 33.4683934031025,
"learning_rate": 2.5e-07,
"logits/chosen": -1.2191190719604492,
"logits/rejected": -1.215990424156189,
"logps/chosen": -267.1896667480469,
"logps/rejected": -297.6923828125,
"loss": 0.6461,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.5746477246284485,
"rewards/margins": 0.4244813323020935,
"rewards/rejected": 0.15016639232635498,
"step": 154
},
{
"epoch": 1.0129870129870129,
"grad_norm": 22.52614785258628,
"learning_rate": 2.449003656521487e-07,
"logits/chosen": -1.052141785621643,
"logits/rejected": -1.2538810968399048,
"logps/chosen": -262.6394958496094,
"logps/rejected": -265.5911560058594,
"loss": 0.5416,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.68734210729599,
"rewards/margins": 0.7715808749198914,
"rewards/rejected": -0.08423884212970734,
"step": 156
},
{
"epoch": 1.025974025974026,
"grad_norm": 17.71525419241131,
"learning_rate": 2.3980285348460363e-07,
"logits/chosen": -1.2752505540847778,
"logits/rejected": -1.297239065170288,
"logps/chosen": -180.13455200195312,
"logps/rejected": -194.86289978027344,
"loss": 0.5164,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.637496292591095,
"rewards/margins": 0.591891884803772,
"rewards/rejected": 0.045604437589645386,
"step": 158
},
{
"epoch": 1.0389610389610389,
"grad_norm": 17.624016800680035,
"learning_rate": 2.3470958479453937e-07,
"logits/chosen": -1.3482556343078613,
"logits/rejected": -1.2880206108093262,
"logps/chosen": -183.36322021484375,
"logps/rejected": -208.04705810546875,
"loss": 0.524,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.6870389580726624,
"rewards/margins": 0.8608755469322205,
"rewards/rejected": -0.17383655905723572,
"step": 160
},
{
"epoch": 1.051948051948052,
"grad_norm": 16.65756304879653,
"learning_rate": 2.296226791132342e-07,
"logits/chosen": -1.240028977394104,
"logits/rejected": -1.2343027591705322,
"logps/chosen": -233.16773986816406,
"logps/rejected": -250.71063232421875,
"loss": 0.5179,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.796766996383667,
"rewards/margins": 0.684018611907959,
"rewards/rejected": 0.11274835467338562,
"step": 162
},
{
"epoch": 1.0649350649350648,
"grad_norm": 18.048904117373002,
"learning_rate": 2.245442533240412e-07,
"logits/chosen": -1.2781399488449097,
"logits/rejected": -1.2129652500152588,
"logps/chosen": -244.93289184570312,
"logps/rejected": -284.34075927734375,
"loss": 0.5295,
"rewards/accuracies": 0.84375,
"rewards/chosen": 0.6863990426063538,
"rewards/margins": 0.871242344379425,
"rewards/rejected": -0.1848432570695877,
"step": 164
},
{
"epoch": 1.077922077922078,
"grad_norm": 20.34696734972406,
"learning_rate": 2.1947642078146e-07,
"logits/chosen": -1.322028398513794,
"logits/rejected": -1.3042186498641968,
"logps/chosen": -271.44500732421875,
"logps/rejected": -270.650146484375,
"loss": 0.5102,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.853551983833313,
"rewards/margins": 0.9862384796142578,
"rewards/rejected": -0.1326863318681717,
"step": 166
},
{
"epoch": 1.0909090909090908,
"grad_norm": 17.55986753162257,
"learning_rate": 2.1442129043167873e-07,
"logits/chosen": -1.328125,
"logits/rejected": -1.3073420524597168,
"logps/chosen": -206.6400909423828,
"logps/rejected": -220.58033752441406,
"loss": 0.5149,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.7431928515434265,
"rewards/margins": 0.8430217504501343,
"rewards/rejected": -0.09982895851135254,
"step": 168
},
{
"epoch": 1.103896103896104,
"grad_norm": 17.856674110170477,
"learning_rate": 2.0938096593494853e-07,
"logits/chosen": -0.9651075005531311,
"logits/rejected": -1.0858749151229858,
"logps/chosen": -185.41217041015625,
"logps/rejected": -206.42196655273438,
"loss": 0.5435,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.7554150819778442,
"rewards/margins": 0.6504155397415161,
"rewards/rejected": 0.10499954968690872,
"step": 170
},
{
"epoch": 1.1168831168831168,
"grad_norm": 19.78444270286199,
"learning_rate": 2.043575447901596e-07,
"logits/chosen": -1.22752845287323,
"logits/rejected": -1.306286334991455,
"logps/chosen": -281.8180236816406,
"logps/rejected": -299.3645935058594,
"loss": 0.5278,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.7911855578422546,
"rewards/margins": 0.6221764087677002,
"rewards/rejected": 0.16900911927223206,
"step": 172
},
{
"epoch": 1.12987012987013,
"grad_norm": 20.24750613090253,
"learning_rate": 1.9935311746197976e-07,
"logits/chosen": -1.0992745161056519,
"logits/rejected": -1.0037593841552734,
"logps/chosen": -219.87008666992188,
"logps/rejected": -246.12489318847656,
"loss": 0.5273,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.8513480424880981,
"rewards/margins": 0.9342952370643616,
"rewards/rejected": -0.08294717967510223,
"step": 174
},
{
"epoch": 1.1428571428571428,
"grad_norm": 19.866822049463956,
"learning_rate": 1.9436976651092142e-07,
"logits/chosen": -1.3867590427398682,
"logits/rejected": -1.2858481407165527,
"logps/chosen": -258.7950439453125,
"logps/rejected": -296.06842041015625,
"loss": 0.5338,
"rewards/accuracies": 0.90625,
"rewards/chosen": 0.7717873454093933,
"rewards/margins": 0.8829994201660156,
"rewards/rejected": -0.11121205985546112,
"step": 176
},
{
"epoch": 1.155844155844156,
"grad_norm": 18.093341348498807,
"learning_rate": 1.8940956572669692e-07,
"logits/chosen": -1.0915521383285522,
"logits/rejected": -1.1871168613433838,
"logps/chosen": -247.3667755126953,
"logps/rejected": -254.37728881835938,
"loss": 0.4999,
"rewards/accuracies": 0.84375,
"rewards/chosen": 0.7344910502433777,
"rewards/margins": 0.7577481865882874,
"rewards/rejected": -0.023257076740264893,
"step": 178
},
{
"epoch": 1.1688311688311688,
"grad_norm": 19.525075850508752,
"learning_rate": 1.8447457926522452e-07,
"logits/chosen": -0.9950531721115112,
"logits/rejected": -0.994107186794281,
"logps/chosen": -259.6279296875,
"logps/rejected": -308.6029052734375,
"loss": 0.5492,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.7692892551422119,
"rewards/margins": 0.9317154884338379,
"rewards/rejected": -0.16242632269859314,
"step": 180
},
{
"epoch": 1.1818181818181819,
"grad_norm": 19.472127981634152,
"learning_rate": 1.7956686078964255e-07,
"logits/chosen": -1.3088796138763428,
"logits/rejected": -1.4440947771072388,
"logps/chosen": -304.2322082519531,
"logps/rejected": -302.7728271484375,
"loss": 0.5274,
"rewards/accuracies": 0.84375,
"rewards/chosen": 0.815383791923523,
"rewards/margins": 0.9992111921310425,
"rewards/rejected": -0.18382729589939117,
"step": 182
},
{
"epoch": 1.1948051948051948,
"grad_norm": 17.961643519105607,
"learning_rate": 1.7468845261569147e-07,
"logits/chosen": -1.3604259490966797,
"logits/rejected": -1.1473455429077148,
"logps/chosen": -278.00860595703125,
"logps/rejected": -333.7747802734375,
"loss": 0.5072,
"rewards/accuracies": 0.90625,
"rewards/chosen": 0.8540161848068237,
"rewards/margins": 0.9145561456680298,
"rewards/rejected": -0.060539960861206055,
"step": 184
},
{
"epoch": 1.2077922077922079,
"grad_norm": 17.34162227747004,
"learning_rate": 1.698413848618161e-07,
"logits/chosen": -1.2001099586486816,
"logits/rejected": -1.1247773170471191,
"logps/chosen": -219.12808227539062,
"logps/rejected": -240.87974548339844,
"loss": 0.5124,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.792206883430481,
"rewards/margins": 0.9953896403312683,
"rewards/rejected": -0.20318272709846497,
"step": 186
},
{
"epoch": 1.2207792207792207,
"grad_norm": 18.899208870167122,
"learning_rate": 1.6502767460434585e-07,
"logits/chosen": -1.1938526630401611,
"logits/rejected": -1.2088103294372559,
"logps/chosen": -343.2425537109375,
"logps/rejected": -352.4480895996094,
"loss": 0.5276,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.7310408353805542,
"rewards/margins": 0.948591947555542,
"rewards/rejected": -0.21755114197731018,
"step": 188
},
{
"epoch": 1.2337662337662338,
"grad_norm": 21.1592051257712,
"learning_rate": 1.602493250381003e-07,
"logits/chosen": -1.2564481496810913,
"logits/rejected": -1.3384077548980713,
"logps/chosen": -211.84091186523438,
"logps/rejected": -175.0691375732422,
"loss": 0.5227,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.6941075325012207,
"rewards/margins": 0.8241455554962158,
"rewards/rejected": -0.13003802299499512,
"step": 190
},
{
"epoch": 1.2467532467532467,
"grad_norm": 17.37996372066179,
"learning_rate": 1.555083246427734e-07,
"logits/chosen": -1.1990139484405518,
"logits/rejected": -1.3094348907470703,
"logps/chosen": -227.32859802246094,
"logps/rejected": -240.32440185546875,
"loss": 0.5063,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.8420521020889282,
"rewards/margins": 0.7185032367706299,
"rewards/rejected": 0.12354880571365356,
"step": 192
},
{
"epoch": 1.2597402597402598,
"grad_norm": 19.348154887947697,
"learning_rate": 1.5080664635543932e-07,
"logits/chosen": -1.2713487148284912,
"logits/rejected": -1.1336112022399902,
"logps/chosen": -217.08712768554688,
"logps/rejected": -286.9374084472656,
"loss": 0.5347,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.9916350245475769,
"rewards/margins": 0.9090840220451355,
"rewards/rejected": 0.08255089819431305,
"step": 194
},
{
"epoch": 1.2727272727272727,
"grad_norm": 18.399160019731372,
"learning_rate": 1.461462467495284e-07,
"logits/chosen": -1.1585338115692139,
"logits/rejected": -1.1779534816741943,
"logps/chosen": -184.70875549316406,
"logps/rejected": -199.60733032226562,
"loss": 0.5194,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.6531062722206116,
"rewards/margins": 0.5796053409576416,
"rewards/rejected": 0.07350096851587296,
"step": 196
},
{
"epoch": 1.2857142857142856,
"grad_norm": 17.142926149258614,
"learning_rate": 1.4152906522061047e-07,
"logits/chosen": -1.145853877067566,
"logits/rejected": -1.1914734840393066,
"logps/chosen": -244.29345703125,
"logps/rejected": -252.35826110839844,
"loss": 0.5101,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.8408298492431641,
"rewards/margins": 0.9886577725410461,
"rewards/rejected": -0.14782798290252686,
"step": 198
},
{
"epoch": 1.2987012987012987,
"grad_norm": 19.542604645193908,
"learning_rate": 1.369570231793286e-07,
"logits/chosen": -1.2806802988052368,
"logits/rejected": -1.2916889190673828,
"logps/chosen": -233.01324462890625,
"logps/rejected": -256.65887451171875,
"loss": 0.515,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.8423460721969604,
"rewards/margins": 0.8902460336685181,
"rewards/rejected": -0.04789996147155762,
"step": 200
},
{
"epoch": 1.3116883116883118,
"grad_norm": 18.81859489288187,
"learning_rate": 1.3243202325181578e-07,
"logits/chosen": -1.0917601585388184,
"logits/rejected": -1.1492509841918945,
"logps/chosen": -241.70211791992188,
"logps/rejected": -247.6362762451172,
"loss": 0.5173,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.8184989094734192,
"rewards/margins": 0.8419989347457886,
"rewards/rejected": -0.023499924689531326,
"step": 202
},
{
"epoch": 1.3246753246753247,
"grad_norm": 21.98362763457428,
"learning_rate": 1.2795594848792974e-07,
"logits/chosen": -1.2968580722808838,
"logits/rejected": -1.3090989589691162,
"logps/chosen": -209.9777374267578,
"logps/rejected": -249.19898986816406,
"loss": 0.5144,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.6729121208190918,
"rewards/margins": 0.8331139087677002,
"rewards/rejected": -0.16020171344280243,
"step": 204
},
{
"epoch": 1.3376623376623376,
"grad_norm": 19.163927573547067,
"learning_rate": 1.2353066157763304e-07,
"logits/chosen": -1.165097713470459,
"logits/rejected": -1.0794737339019775,
"logps/chosen": -243.25994873046875,
"logps/rejected": -296.20458984375,
"loss": 0.5074,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.8912444710731506,
"rewards/margins": 1.0708703994750977,
"rewards/rejected": -0.17962592840194702,
"step": 206
},
{
"epoch": 1.3506493506493507,
"grad_norm": 19.327223964730898,
"learning_rate": 1.1915800407584702e-07,
"logits/chosen": -1.2650034427642822,
"logits/rejected": -1.260176181793213,
"logps/chosen": -297.4647216796875,
"logps/rejected": -283.3460998535156,
"loss": 0.5184,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.607912540435791,
"rewards/margins": 0.5631698369979858,
"rewards/rejected": 0.04474277421832085,
"step": 208
},
{
"epoch": 1.3636363636363638,
"grad_norm": 17.1533818598118,
"learning_rate": 1.1483979563610069e-07,
"logits/chosen": -1.3990474939346313,
"logits/rejected": -1.1992498636245728,
"logps/chosen": -179.99732971191406,
"logps/rejected": -254.8299560546875,
"loss": 0.5,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.6216685771942139,
"rewards/margins": 0.651849627494812,
"rewards/rejected": -0.03018118068575859,
"step": 210
},
{
"epoch": 1.3766233766233766,
"grad_norm": 18.960132927151744,
"learning_rate": 1.1057783325329267e-07,
"logits/chosen": -1.1669042110443115,
"logits/rejected": -1.221852421760559,
"logps/chosen": -292.6320495605469,
"logps/rejected": -333.9742736816406,
"loss": 0.505,
"rewards/accuracies": 0.90625,
"rewards/chosen": 0.997809648513794,
"rewards/margins": 1.133697748184204,
"rewards/rejected": -0.13588815927505493,
"step": 212
},
{
"epoch": 1.3896103896103895,
"grad_norm": 16.02074293153386,
"learning_rate": 1.0637389051588425e-07,
"logits/chosen": -1.1035025119781494,
"logits/rejected": -1.113924503326416,
"logps/chosen": -217.35040283203125,
"logps/rejected": -243.62997436523438,
"loss": 0.5012,
"rewards/accuracies": 0.84375,
"rewards/chosen": 0.7329657673835754,
"rewards/margins": 0.827349066734314,
"rewards/rejected": -0.09438329190015793,
"step": 214
},
{
"epoch": 1.4025974025974026,
"grad_norm": 16.742187965244536,
"learning_rate": 1.0222971686783089e-07,
"logits/chosen": -1.392643690109253,
"logits/rejected": -1.4035465717315674,
"logps/chosen": -252.4545440673828,
"logps/rejected": -249.23336791992188,
"loss": 0.5069,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.7851105332374573,
"rewards/margins": 0.8927165865898132,
"rewards/rejected": -0.10760608315467834,
"step": 216
},
{
"epoch": 1.4155844155844157,
"grad_norm": 20.851477095001684,
"learning_rate": 9.814703688056319e-08,
"logits/chosen": -1.1926751136779785,
"logits/rejected": -1.2301442623138428,
"logps/chosen": -165.44703674316406,
"logps/rejected": -194.3583221435547,
"loss": 0.5326,
"rewards/accuracies": 0.84375,
"rewards/chosen": 0.8975037336349487,
"rewards/margins": 0.9634904265403748,
"rewards/rejected": -0.065986767411232,
"step": 218
},
{
"epoch": 1.4285714285714286,
"grad_norm": 16.315287900746362,
"learning_rate": 9.412754953531663e-08,
"logits/chosen": -1.3106112480163574,
"logits/rejected": -1.3371086120605469,
"logps/chosen": -255.00607299804688,
"logps/rejected": -258.26361083984375,
"loss": 0.5037,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.6847482919692993,
"rewards/margins": 0.697127103805542,
"rewards/rejected": -0.012378760613501072,
"step": 220
},
{
"epoch": 1.4415584415584415,
"grad_norm": 15.292540532901763,
"learning_rate": 9.017292751611219e-08,
"logits/chosen": -0.9778515100479126,
"logits/rejected": -0.9790754318237305,
"logps/chosen": -189.63674926757812,
"logps/rejected": -222.9846649169922,
"loss": 0.5021,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.6293007135391235,
"rewards/margins": 0.9083631038665771,
"rewards/rejected": -0.27906233072280884,
"step": 222
},
{
"epoch": 1.4545454545454546,
"grad_norm": 17.14845228574236,
"learning_rate": 8.628481651367875e-08,
"logits/chosen": -1.227216362953186,
"logits/rejected": -1.306528091430664,
"logps/chosen": -212.0201873779297,
"logps/rejected": -204.0389404296875,
"loss": 0.5137,
"rewards/accuracies": 0.90625,
"rewards/chosen": 0.9179770946502686,
"rewards/margins": 0.7093281745910645,
"rewards/rejected": 0.20864906907081604,
"step": 224
},
{
"epoch": 1.4675324675324675,
"grad_norm": 18.007638315726304,
"learning_rate": 8.246483454061015e-08,
"logits/chosen": -1.3051916360855103,
"logits/rejected": -1.121058702468872,
"logps/chosen": -251.83587646484375,
"logps/rejected": -286.392822265625,
"loss": 0.5128,
"rewards/accuracies": 0.96875,
"rewards/chosen": 0.7694352269172668,
"rewards/margins": 1.2012625932693481,
"rewards/rejected": -0.43182751536369324,
"step": 226
},
{
"epoch": 1.4805194805194806,
"grad_norm": 16.89635894695866,
"learning_rate": 7.871457125803896e-08,
"logits/chosen": -1.3657732009887695,
"logits/rejected": -1.4764338731765747,
"logps/chosen": -242.94061279296875,
"logps/rejected": -254.3301239013672,
"loss": 0.4956,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.9610437750816345,
"rewards/margins": 1.0195040702819824,
"rewards/rejected": -0.058460310101509094,
"step": 228
},
{
"epoch": 1.4935064935064934,
"grad_norm": 18.122088719985634,
"learning_rate": 7.503558731410958e-08,
"logits/chosen": -1.1523171663284302,
"logits/rejected": -1.183262586593628,
"logps/chosen": -255.1221466064453,
"logps/rejected": -274.8419494628906,
"loss": 0.5169,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.6205594539642334,
"rewards/margins": 0.730150580406189,
"rewards/rejected": -0.10959106683731079,
"step": 230
},
{
"epoch": 1.5064935064935066,
"grad_norm": 17.706521703256886,
"learning_rate": 7.14294136945241e-08,
"logits/chosen": -1.226336121559143,
"logits/rejected": -1.068296194076538,
"logps/chosen": -210.2167510986328,
"logps/rejected": -264.81085205078125,
"loss": 0.5089,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.7191243171691895,
"rewards/margins": 1.1839776039123535,
"rewards/rejected": -0.46485334634780884,
"step": 232
},
{
"epoch": 1.5194805194805194,
"grad_norm": 16.66406313160021,
"learning_rate": 6.789755108543274e-08,
"logits/chosen": -1.228325605392456,
"logits/rejected": -1.3681098222732544,
"logps/chosen": -172.350830078125,
"logps/rejected": -175.53936767578125,
"loss": 0.5051,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.6207537651062012,
"rewards/margins": 0.7215192317962646,
"rewards/rejected": -0.10076545178890228,
"step": 234
},
{
"epoch": 1.5324675324675323,
"grad_norm": 18.51693060533818,
"learning_rate": 6.444146924893251e-08,
"logits/chosen": -1.101006269454956,
"logits/rejected": -1.0755804777145386,
"logps/chosen": -233.41761779785156,
"logps/rejected": -264.3063659667969,
"loss": 0.5173,
"rewards/accuracies": 0.84375,
"rewards/chosen": 0.8273410797119141,
"rewards/margins": 1.0633716583251953,
"rewards/rejected": -0.23603063821792603,
"step": 236
},
{
"epoch": 1.5454545454545454,
"grad_norm": 18.052624843204704,
"learning_rate": 6.106260641143546e-08,
"logits/chosen": -1.1633551120758057,
"logits/rejected": -1.1994410753250122,
"logps/chosen": -269.3063659667969,
"logps/rejected": -271.7884216308594,
"loss": 0.4853,
"rewards/accuracies": 0.84375,
"rewards/chosen": 0.6259198188781738,
"rewards/margins": 0.9741422533988953,
"rewards/rejected": -0.34822240471839905,
"step": 238
},
{
"epoch": 1.5584415584415585,
"grad_norm": 24.12818089831554,
"learning_rate": 5.776236866515946e-08,
"logits/chosen": -1.0952610969543457,
"logits/rejected": -1.2616946697235107,
"logps/chosen": -209.5765380859375,
"logps/rejected": -185.8994140625,
"loss": 0.5069,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.6148033738136292,
"rewards/margins": 0.7846094369888306,
"rewards/rejected": -0.16980606317520142,
"step": 240
},
{
"epoch": 1.5714285714285714,
"grad_norm": 17.25585157151766,
"learning_rate": 5.454212938299255e-08,
"logits/chosen": -1.220323920249939,
"logits/rejected": -1.1233211755752563,
"logps/chosen": -210.1920623779297,
"logps/rejected": -257.0649719238281,
"loss": 0.4983,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.8331396579742432,
"rewards/margins": 0.8172454833984375,
"rewards/rejected": 0.01589421182870865,
"step": 242
},
{
"epoch": 1.5844155844155843,
"grad_norm": 18.694264506215937,
"learning_rate": 5.140322864697183e-08,
"logits/chosen": -1.2713161706924438,
"logits/rejected": -1.1681023836135864,
"logps/chosen": -250.68112182617188,
"logps/rejected": -248.07406616210938,
"loss": 0.5152,
"rewards/accuracies": 0.90625,
"rewards/chosen": 0.8290749192237854,
"rewards/margins": 0.7929165363311768,
"rewards/rejected": 0.036158446222543716,
"step": 244
},
{
"epoch": 1.5974025974025974,
"grad_norm": 18.951231695832167,
"learning_rate": 4.8346972690617494e-08,
"logits/chosen": -1.2086520195007324,
"logits/rejected": -1.2194980382919312,
"logps/chosen": -237.5225067138672,
"logps/rejected": -294.5182800292969,
"loss": 0.4988,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.823986291885376,
"rewards/margins": 1.3896714448928833,
"rewards/rejected": -0.5656850934028625,
"step": 246
},
{
"epoch": 1.6103896103896105,
"grad_norm": 17.838639398070846,
"learning_rate": 4.53746333553516e-08,
"logits/chosen": -1.3609139919281006,
"logits/rejected": -1.3907623291015625,
"logps/chosen": -254.64718627929688,
"logps/rejected": -267.22265625,
"loss": 0.499,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.7264419794082642,
"rewards/margins": 0.9065679311752319,
"rewards/rejected": -0.18012598156929016,
"step": 248
},
{
"epoch": 1.6233766233766234,
"grad_norm": 15.282492364953745,
"learning_rate": 4.248744756122985e-08,
"logits/chosen": -1.4239810705184937,
"logits/rejected": -1.3679332733154297,
"logps/chosen": -231.71435546875,
"logps/rejected": -287.30572509765625,
"loss": 0.5073,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.5901214480400085,
"rewards/margins": 0.7662074565887451,
"rewards/rejected": -0.17608599364757538,
"step": 250
},
{
"epoch": 1.6363636363636362,
"grad_norm": 18.90237932042577,
"learning_rate": 3.968661679220467e-08,
"logits/chosen": -1.0735975503921509,
"logits/rejected": -1.167972207069397,
"logps/chosen": -188.3458709716797,
"logps/rejected": -179.90293884277344,
"loss": 0.5256,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.6942843794822693,
"rewards/margins": 0.7579864859580994,
"rewards/rejected": -0.06370209902524948,
"step": 252
},
{
"epoch": 1.6493506493506493,
"grad_norm": 17.45584095837454,
"learning_rate": 3.6973306596135873e-08,
"logits/chosen": -1.3392971754074097,
"logits/rejected": -1.358009696006775,
"logps/chosen": -254.31295776367188,
"logps/rejected": -254.20733642578125,
"loss": 0.502,
"rewards/accuracies": 0.84375,
"rewards/chosen": 0.7509973645210266,
"rewards/margins": 0.8210946917533875,
"rewards/rejected": -0.07009733468294144,
"step": 254
},
{
"epoch": 1.6623376623376624,
"grad_norm": 19.754012599719967,
"learning_rate": 3.43486460997548e-08,
"logits/chosen": -1.2106963396072388,
"logits/rejected": -1.2819626331329346,
"logps/chosen": -253.831787109375,
"logps/rejected": -303.4576721191406,
"loss": 0.5023,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.732483446598053,
"rewards/margins": 1.113276720046997,
"rewards/rejected": -0.38079336285591125,
"step": 256
},
{
"epoch": 1.6753246753246753,
"grad_norm": 18.3258569948234,
"learning_rate": 3.1813727538785943e-08,
"logits/chosen": -1.3981492519378662,
"logits/rejected": -1.4056671857833862,
"logps/chosen": -356.7002868652344,
"logps/rejected": -362.1513366699219,
"loss": 0.494,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.6453557014465332,
"rewards/margins": 1.1145414113998413,
"rewards/rejected": -0.46918565034866333,
"step": 258
},
{
"epoch": 1.6883116883116882,
"grad_norm": 19.670963229677074,
"learning_rate": 2.936960580341971e-08,
"logits/chosen": -1.1725897789001465,
"logits/rejected": -1.261063575744629,
"logps/chosen": -216.58990478515625,
"logps/rejected": -220.63986206054688,
"loss": 0.5231,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.7476328015327454,
"rewards/margins": 0.7463090419769287,
"rewards/rejected": 0.0013237213715910912,
"step": 260
},
{
"epoch": 1.7012987012987013,
"grad_norm": 17.983233221990766,
"learning_rate": 2.701729799932653e-08,
"logits/chosen": -1.3052499294281006,
"logits/rejected": -1.333134412765503,
"logps/chosen": -277.6193542480469,
"logps/rejected": -287.4527893066406,
"loss": 0.4954,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.726482629776001,
"rewards/margins": 0.9591711759567261,
"rewards/rejected": -0.23268845677375793,
"step": 262
},
{
"epoch": 1.7142857142857144,
"grad_norm": 17.467592052173433,
"learning_rate": 2.475778302439524e-08,
"logits/chosen": -1.2939479351043701,
"logits/rejected": -1.3279248476028442,
"logps/chosen": -207.76170349121094,
"logps/rejected": -233.1382598876953,
"loss": 0.472,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.8571805953979492,
"rewards/margins": 0.9823651313781738,
"rewards/rejected": -0.12518461048603058,
"step": 264
},
{
"epoch": 1.7272727272727273,
"grad_norm": 16.46335190300962,
"learning_rate": 2.259200116137039e-08,
"logits/chosen": -1.1292874813079834,
"logits/rejected": -1.2274911403656006,
"logps/chosen": -277.028564453125,
"logps/rejected": -308.0982971191406,
"loss": 0.4976,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.9631680846214294,
"rewards/margins": 1.1252551078796387,
"rewards/rejected": -0.16208705306053162,
"step": 266
},
{
"epoch": 1.7402597402597402,
"grad_norm": 18.56546944179356,
"learning_rate": 2.0520853686560175e-08,
"logits/chosen": -1.2864351272583008,
"logits/rejected": -1.2748289108276367,
"logps/chosen": -203.02589416503906,
"logps/rejected": -217.65682983398438,
"loss": 0.5072,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.5683714747428894,
"rewards/margins": 0.9107145667076111,
"rewards/rejected": -0.3423430323600769,
"step": 268
},
{
"epoch": 1.7532467532467533,
"grad_norm": 16.59250739344183,
"learning_rate": 1.854520249477551e-08,
"logits/chosen": -1.3317527770996094,
"logits/rejected": -1.3174333572387695,
"logps/chosen": -205.539306640625,
"logps/rejected": -236.86062622070312,
"loss": 0.487,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.8909409642219543,
"rewards/margins": 1.0206615924835205,
"rewards/rejected": -0.1297205686569214,
"step": 270
},
{
"epoch": 1.7662337662337664,
"grad_norm": 18.312480737747663,
"learning_rate": 1.666586974065831e-08,
"logits/chosen": -1.1755964756011963,
"logits/rejected": -1.2511848211288452,
"logps/chosen": -244.85443115234375,
"logps/rejected": -236.78126525878906,
"loss": 0.5334,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.5452998876571655,
"rewards/margins": 0.6891751885414124,
"rewards/rejected": -0.14387527108192444,
"step": 272
},
{
"epoch": 1.7792207792207793,
"grad_norm": 18.481484735169893,
"learning_rate": 1.4883637496547141e-08,
"logits/chosen": -1.2515636682510376,
"logits/rejected": -1.2902557849884033,
"logps/chosen": -254.2708740234375,
"logps/rejected": -258.9367980957031,
"loss": 0.5094,
"rewards/accuracies": 0.90625,
"rewards/chosen": 0.7975621223449707,
"rewards/margins": 1.1254695653915405,
"rewards/rejected": -0.327907532453537,
"step": 274
},
{
"epoch": 1.7922077922077921,
"grad_norm": 18.685754884224313,
"learning_rate": 1.3199247427022526e-08,
"logits/chosen": -1.2295678853988647,
"logits/rejected": -1.2680904865264893,
"logps/chosen": -196.48304748535156,
"logps/rejected": -207.93121337890625,
"loss": 0.5151,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.8263696432113647,
"rewards/margins": 0.9237637519836426,
"rewards/rejected": -0.09739402681589127,
"step": 276
},
{
"epoch": 1.8051948051948052,
"grad_norm": 18.498386316775253,
"learning_rate": 1.16134004802681e-08,
"logits/chosen": -1.0846458673477173,
"logits/rejected": -0.9547269940376282,
"logps/chosen": -231.32818603515625,
"logps/rejected": -263.1034851074219,
"loss": 0.5058,
"rewards/accuracies": 0.84375,
"rewards/chosen": 0.6925534605979919,
"rewards/margins": 1.1582170724868774,
"rewards/rejected": -0.4656636714935303,
"step": 278
},
{
"epoch": 1.8181818181818183,
"grad_norm": 17.340516668358195,
"learning_rate": 1.0126756596375685e-08,
"logits/chosen": -1.3053959608078003,
"logits/rejected": -1.2687203884124756,
"logps/chosen": -206.0025177001953,
"logps/rejected": -235.99029541015625,
"loss": 0.499,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.8264460563659668,
"rewards/margins": 0.9402331113815308,
"rewards/rejected": -0.11378702521324158,
"step": 280
},
{
"epoch": 1.8311688311688312,
"grad_norm": 20.777883031718197,
"learning_rate": 8.739934432715034e-09,
"logits/chosen": -1.2384371757507324,
"logits/rejected": -1.3831499814987183,
"logps/chosen": -226.1611785888672,
"logps/rejected": -219.2269287109375,
"loss": 0.5138,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.514397382736206,
"rewards/margins": 0.7629111409187317,
"rewards/rejected": -0.24851378798484802,
"step": 282
},
{
"epoch": 1.844155844155844,
"grad_norm": 18.170821830096596,
"learning_rate": 7.453511106483901e-09,
"logits/chosen": -1.4039347171783447,
"logits/rejected": -1.4059028625488281,
"logps/chosen": -269.5606689453125,
"logps/rejected": -254.4373779296875,
"loss": 0.5001,
"rewards/accuracies": 0.90625,
"rewards/chosen": 0.7399085164070129,
"rewards/margins": 1.1216527223587036,
"rewards/rejected": -0.3817441165447235,
"step": 284
},
{
"epoch": 1.8571428571428572,
"grad_norm": 21.929296495653098,
"learning_rate": 6.268021954544095e-09,
"logits/chosen": -1.2451683282852173,
"logits/rejected": -1.2341443300247192,
"logps/chosen": -249.87173461914062,
"logps/rejected": -256.73297119140625,
"loss": 0.5052,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.665867805480957,
"rewards/margins": 0.8860350251197815,
"rewards/rejected": -0.22016723453998566,
"step": 286
},
{
"epoch": 1.87012987012987,
"grad_norm": 17.66627473591946,
"learning_rate": 5.1839603106447475e-09,
"logits/chosen": -1.1848328113555908,
"logits/rejected": -1.2601234912872314,
"logps/chosen": -274.2748718261719,
"logps/rejected": -276.2279968261719,
"loss": 0.5143,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.40582898259162903,
"rewards/margins": 0.7143059372901917,
"rewards/rejected": -0.3084769546985626,
"step": 288
},
{
"epoch": 1.883116883116883,
"grad_norm": 17.62107555046559,
"learning_rate": 4.201777300124249e-09,
"logits/chosen": -1.3278290033340454,
"logits/rejected": -1.412712574005127,
"logps/chosen": -232.41680908203125,
"logps/rejected": -261.15911865234375,
"loss": 0.4936,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.6634901165962219,
"rewards/margins": 1.093658685684204,
"rewards/rejected": -0.43016865849494934,
"step": 290
},
{
"epoch": 1.896103896103896,
"grad_norm": 15.858708451261625,
"learning_rate": 3.3218816521777827e-09,
"logits/chosen": -1.1245518922805786,
"logits/rejected": -1.1566898822784424,
"logps/chosen": -204.53695678710938,
"logps/rejected": -195.2686309814453,
"loss": 0.5017,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.6790772080421448,
"rewards/margins": 0.7251225709915161,
"rewards/rejected": -0.04604536294937134,
"step": 292
},
{
"epoch": 1.9090909090909092,
"grad_norm": 18.1779116105005,
"learning_rate": 2.5446395297668287e-09,
"logits/chosen": -1.2490640878677368,
"logits/rejected": -1.3185060024261475,
"logps/chosen": -197.9644775390625,
"logps/rejected": -211.1698455810547,
"loss": 0.503,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.6239749789237976,
"rewards/margins": 1.010934591293335,
"rewards/rejected": -0.38695967197418213,
"step": 294
},
{
"epoch": 1.922077922077922,
"grad_norm": 18.8639806757367,
"learning_rate": 1.870374377243078e-09,
"logits/chosen": -1.2287623882293701,
"logits/rejected": -1.214994192123413,
"logps/chosen": -183.43939208984375,
"logps/rejected": -200.90785217285156,
"loss": 0.5078,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.3988313674926758,
"rewards/margins": 0.920490026473999,
"rewards/rejected": -0.5216587781906128,
"step": 296
},
{
"epoch": 1.935064935064935,
"grad_norm": 19.016439869677555,
"learning_rate": 1.2993667857489898e-09,
"logits/chosen": -1.365774393081665,
"logits/rejected": -1.4144957065582275,
"logps/chosen": -299.92120361328125,
"logps/rejected": -313.76287841796875,
"loss": 0.492,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.790663480758667,
"rewards/margins": 1.3730117082595825,
"rewards/rejected": -0.5823482275009155,
"step": 298
},
{
"epoch": 1.948051948051948,
"grad_norm": 14.859752452370639,
"learning_rate": 8.318543764516961e-10,
"logits/chosen": -1.3680145740509033,
"logits/rejected": -1.3740514516830444,
"logps/chosen": -264.17486572265625,
"logps/rejected": -253.31149291992188,
"loss": 0.5,
"rewards/accuracies": 0.90625,
"rewards/chosen": 0.784439206123352,
"rewards/margins": 1.083054542541504,
"rewards/rejected": -0.29861530661582947,
"step": 300
},
{
"epoch": 1.9610389610389611,
"grad_norm": 18.397492445789904,
"learning_rate": 4.680317016582669e-10,
"logits/chosen": -1.3651466369628906,
"logits/rejected": -1.2224037647247314,
"logps/chosen": -203.9071502685547,
"logps/rejected": -270.9750061035156,
"loss": 0.4975,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.7834948301315308,
"rewards/margins": 0.9753363132476807,
"rewards/rejected": -0.1918414682149887,
"step": 302
},
{
"epoch": 1.974025974025974,
"grad_norm": 17.010218612769055,
"learning_rate": 2.0805016385427865e-10,
"logits/chosen": -1.2519904375076294,
"logits/rejected": -1.3218713998794556,
"logps/chosen": -171.00653076171875,
"logps/rejected": -186.12115478515625,
"loss": 0.5156,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.7086281776428223,
"rewards/margins": 0.9142274856567383,
"rewards/rejected": -0.20559939742088318,
"step": 304
},
{
"epoch": 1.987012987012987,
"grad_norm": 20.168300406467708,
"learning_rate": 5.2017952698379943e-11,
"logits/chosen": -1.107275128364563,
"logits/rejected": -1.237258791923523,
"logps/chosen": -269.6531982421875,
"logps/rejected": -244.50665283203125,
"loss": 0.5078,
"rewards/accuracies": 0.90625,
"rewards/chosen": 0.7939411997795105,
"rewards/margins": 1.0421514511108398,
"rewards/rejected": -0.24821028113365173,
"step": 306
},
{
"epoch": 2.0,
"grad_norm": 19.11260041379628,
"learning_rate": 0.0,
"logits/chosen": -1.115605115890503,
"logits/rejected": -1.0857875347137451,
"logps/chosen": -228.82167053222656,
"logps/rejected": -256.9181823730469,
"loss": 0.5006,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.7351489067077637,
"rewards/margins": 0.7865853905677795,
"rewards/rejected": -0.051436468958854675,
"step": 308
}
],
"logging_steps": 2,
"max_steps": 308,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}