XiaoY1's picture
Upload trainer_state.json with huggingface_hub
d928440 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 80000000000,
"global_step": 1718,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 47.75,
"learning_rate": 2.9069767441860468e-08,
"logits/chosen": -0.30066978931427,
"logits/rejected": 0.05270648002624512,
"logps/chosen": -292.3870544433594,
"logps/rejected": -519.2880859375,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": 0.0,
"rewards/student_margin": 0.0,
"rewards/teacher_margin": 0.0,
"step": 1
},
{
"epoch": 0.01,
"grad_norm": 50.75,
"learning_rate": 2.906976744186047e-07,
"logits/chosen": -0.37918758392333984,
"logits/rejected": -0.042655572295188904,
"logps/chosen": -231.2487030029297,
"logps/rejected": -516.9002685546875,
"loss": 0.726,
"rewards/accuracies": 0.3333333432674408,
"rewards/chosen": 0.05387793853878975,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": 0.07821387052536011,
"rewards/student_margin": -0.024335933849215508,
"rewards/teacher_margin": 0.0,
"step": 10
},
{
"epoch": 0.01,
"grad_norm": 42.75,
"learning_rate": 5.813953488372094e-07,
"logits/chosen": -0.7622300386428833,
"logits/rejected": 0.022924324497580528,
"logps/chosen": -245.8932647705078,
"logps/rejected": -559.360595703125,
"loss": 0.7117,
"rewards/accuracies": 0.4333333373069763,
"rewards/chosen": 0.04971719905734062,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -0.02675577439367771,
"rewards/student_margin": 0.07647298276424408,
"rewards/teacher_margin": 0.0,
"step": 20
},
{
"epoch": 0.02,
"grad_norm": 34.25,
"learning_rate": 8.720930232558141e-07,
"logits/chosen": -0.6480653285980225,
"logits/rejected": -0.05629728361964226,
"logps/chosen": -207.52798461914062,
"logps/rejected": -569.8907470703125,
"loss": 0.6704,
"rewards/accuracies": 0.6666666865348816,
"rewards/chosen": -0.04712574928998947,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -0.31786206364631653,
"rewards/student_margin": 0.27073630690574646,
"rewards/teacher_margin": 0.0,
"step": 30
},
{
"epoch": 0.02,
"grad_norm": 33.25,
"learning_rate": 1.1627906976744188e-06,
"logits/chosen": -0.46851086616516113,
"logits/rejected": 0.08061075955629349,
"logps/chosen": -213.6239013671875,
"logps/rejected": -531.3410034179688,
"loss": 0.5929,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -0.09198583662509918,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -0.5451609492301941,
"rewards/student_margin": 0.4531751275062561,
"rewards/teacher_margin": 0.0,
"step": 40
},
{
"epoch": 0.03,
"grad_norm": 28.125,
"learning_rate": 1.4534883720930234e-06,
"logits/chosen": -0.6713495850563049,
"logits/rejected": -0.03319794684648514,
"logps/chosen": -178.42832946777344,
"logps/rejected": -427.0347595214844,
"loss": 0.5249,
"rewards/accuracies": 0.5666666626930237,
"rewards/chosen": -0.16660943627357483,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -0.6883589029312134,
"rewards/student_margin": 0.5217493772506714,
"rewards/teacher_margin": 0.0,
"step": 50
},
{
"epoch": 0.03,
"grad_norm": 23.625,
"learning_rate": 1.7441860465116282e-06,
"logits/chosen": -0.8985533714294434,
"logits/rejected": 0.039773982018232346,
"logps/chosen": -175.9773406982422,
"logps/rejected": -535.4810791015625,
"loss": 0.4963,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -0.20179271697998047,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.35043203830719,
"rewards/student_margin": 1.148639440536499,
"rewards/teacher_margin": 0.0,
"step": 60
},
{
"epoch": 0.04,
"grad_norm": 27.875,
"learning_rate": 2.0348837209302328e-06,
"logits/chosen": -0.5139203071594238,
"logits/rejected": 0.02224038355052471,
"logps/chosen": -244.5893096923828,
"logps/rejected": -573.5228271484375,
"loss": 0.4526,
"rewards/accuracies": 0.7999999523162842,
"rewards/chosen": -0.348739355802536,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.8875904083251953,
"rewards/student_margin": 1.5388509035110474,
"rewards/teacher_margin": 0.0,
"step": 70
},
{
"epoch": 0.05,
"grad_norm": 18.875,
"learning_rate": 2.3255813953488376e-06,
"logits/chosen": -0.7934290170669556,
"logits/rejected": -0.0012261777883395553,
"logps/chosen": -182.20286560058594,
"logps/rejected": -427.95037841796875,
"loss": 0.4462,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -0.0741361603140831,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.3484480381011963,
"rewards/student_margin": 1.2743120193481445,
"rewards/teacher_margin": 0.0,
"step": 80
},
{
"epoch": 0.05,
"grad_norm": 26.25,
"learning_rate": 2.6162790697674424e-06,
"logits/chosen": -0.7236814498901367,
"logits/rejected": 0.027469992637634277,
"logps/chosen": -186.38136291503906,
"logps/rejected": -399.2889099121094,
"loss": 0.4499,
"rewards/accuracies": 0.7333332896232605,
"rewards/chosen": -0.031203720718622208,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.2050061225891113,
"rewards/student_margin": 1.1738024950027466,
"rewards/teacher_margin": 0.0,
"step": 90
},
{
"epoch": 0.06,
"grad_norm": 30.0,
"learning_rate": 2.9069767441860468e-06,
"logits/chosen": -0.4575803279876709,
"logits/rejected": -0.1135629191994667,
"logps/chosen": -259.8243713378906,
"logps/rejected": -481.0874938964844,
"loss": 0.4093,
"rewards/accuracies": 0.8666666746139526,
"rewards/chosen": -0.21589164435863495,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.5411407947540283,
"rewards/student_margin": 1.3252493143081665,
"rewards/teacher_margin": 0.0,
"step": 100
},
{
"epoch": 0.06,
"grad_norm": 20.5,
"learning_rate": 3.1976744186046516e-06,
"logits/chosen": -0.3742029070854187,
"logits/rejected": -0.12473519891500473,
"logps/chosen": -186.20205688476562,
"logps/rejected": -392.6462097167969,
"loss": 0.3743,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 0.20217347145080566,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.5840258598327637,
"rewards/student_margin": 1.7861993312835693,
"rewards/teacher_margin": 0.0,
"step": 110
},
{
"epoch": 0.07,
"grad_norm": 19.125,
"learning_rate": 3.4883720930232564e-06,
"logits/chosen": -0.6747792959213257,
"logits/rejected": 0.16625359654426575,
"logps/chosen": -229.23672485351562,
"logps/rejected": -525.2789306640625,
"loss": 0.3796,
"rewards/accuracies": 0.8333333730697632,
"rewards/chosen": -0.06482670456171036,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.0777878761291504,
"rewards/student_margin": 2.012960910797119,
"rewards/teacher_margin": 0.0,
"step": 120
},
{
"epoch": 0.08,
"grad_norm": 36.5,
"learning_rate": 3.7790697674418607e-06,
"logits/chosen": -0.6423686742782593,
"logits/rejected": -0.049126945436000824,
"logps/chosen": -207.14761352539062,
"logps/rejected": -422.570556640625,
"loss": 0.3735,
"rewards/accuracies": 0.9000000953674316,
"rewards/chosen": -0.0891571044921875,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.900564193725586,
"rewards/student_margin": 1.8114070892333984,
"rewards/teacher_margin": 0.0,
"step": 130
},
{
"epoch": 0.08,
"grad_norm": 22.625,
"learning_rate": 4.0697674418604655e-06,
"logits/chosen": -0.6022151112556458,
"logits/rejected": -0.05691419914364815,
"logps/chosen": -294.1002502441406,
"logps/rejected": -565.58642578125,
"loss": 0.3309,
"rewards/accuracies": 0.7999999523162842,
"rewards/chosen": -0.2062998265028,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.2702088356018066,
"rewards/student_margin": 2.0639092922210693,
"rewards/teacher_margin": 0.0,
"step": 140
},
{
"epoch": 0.09,
"grad_norm": 21.5,
"learning_rate": 4.36046511627907e-06,
"logits/chosen": -0.5255838632583618,
"logits/rejected": -0.025744330137968063,
"logps/chosen": -206.9980010986328,
"logps/rejected": -500.0045471191406,
"loss": 0.4189,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 0.19613902270793915,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.1442222595214844,
"rewards/student_margin": 2.3403611183166504,
"rewards/teacher_margin": 0.0,
"step": 150
},
{
"epoch": 0.09,
"grad_norm": 28.625,
"learning_rate": 4.651162790697675e-06,
"logits/chosen": -0.595037043094635,
"logits/rejected": -0.05052659660577774,
"logps/chosen": -228.1767578125,
"logps/rejected": -517.8463134765625,
"loss": 0.3434,
"rewards/accuracies": 0.8333333134651184,
"rewards/chosen": 0.011402442120015621,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.753488302230835,
"rewards/student_margin": 2.764890670776367,
"rewards/teacher_margin": 0.0,
"step": 160
},
{
"epoch": 0.1,
"grad_norm": 22.625,
"learning_rate": 4.941860465116279e-06,
"logits/chosen": -0.8135232925415039,
"logits/rejected": 0.09407637268304825,
"logps/chosen": -175.84527587890625,
"logps/rejected": -517.9186401367188,
"loss": 0.3348,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.3143172264099121,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -3.1565933227539062,
"rewards/student_margin": 3.4709103107452393,
"rewards/teacher_margin": 0.0,
"step": 170
},
{
"epoch": 0.1,
"grad_norm": 31.25,
"learning_rate": 4.999669659981585e-06,
"logits/chosen": -0.6553431153297424,
"logits/rejected": 0.02322854846715927,
"logps/chosen": -190.44459533691406,
"logps/rejected": -498.41845703125,
"loss": 0.3838,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -0.03836626559495926,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.3416810035705566,
"rewards/student_margin": 2.303314685821533,
"rewards/teacher_margin": 0.0,
"step": 180
},
{
"epoch": 0.11,
"grad_norm": 23.625,
"learning_rate": 4.998327803275328e-06,
"logits/chosen": -0.5196131467819214,
"logits/rejected": 0.03156811743974686,
"logps/chosen": -244.62362670898438,
"logps/rejected": -550.338134765625,
"loss": 0.3397,
"rewards/accuracies": 0.8666666150093079,
"rewards/chosen": 0.1986195147037506,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.157165050506592,
"rewards/student_margin": 2.3557844161987305,
"rewards/teacher_margin": 0.0,
"step": 190
},
{
"epoch": 0.12,
"grad_norm": 23.0,
"learning_rate": 4.995954337283145e-06,
"logits/chosen": -0.46547913551330566,
"logits/rejected": 0.002417749259620905,
"logps/chosen": -220.58139038085938,
"logps/rejected": -457.37957763671875,
"loss": 0.3513,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": 0.23564238846302032,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.046499729156494,
"rewards/student_margin": 2.282142162322998,
"rewards/teacher_margin": 0.0,
"step": 200
},
{
"epoch": 0.12,
"grad_norm": 29.625,
"learning_rate": 4.992550242056393e-06,
"logits/chosen": -0.7077560424804688,
"logits/rejected": 0.23227286338806152,
"logps/chosen": -250.8577117919922,
"logps/rejected": -470.998046875,
"loss": 0.3619,
"rewards/accuracies": 0.76666659116745,
"rewards/chosen": 0.30192822217941284,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.5922541618347168,
"rewards/student_margin": 1.8941824436187744,
"rewards/teacher_margin": 0.0,
"step": 210
},
{
"epoch": 0.13,
"grad_norm": 13.4375,
"learning_rate": 4.988116923213761e-06,
"logits/chosen": -0.5510163903236389,
"logits/rejected": 0.061091721057891846,
"logps/chosen": -231.3572235107422,
"logps/rejected": -456.94219970703125,
"loss": 0.3619,
"rewards/accuracies": 0.8333333134651184,
"rewards/chosen": 0.7294235825538635,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.8068240880966187,
"rewards/student_margin": 2.536247491836548,
"rewards/teacher_margin": 0.0,
"step": 220
},
{
"epoch": 0.13,
"grad_norm": 23.125,
"learning_rate": 4.982656211360852e-06,
"logits/chosen": -0.6307552456855774,
"logits/rejected": 0.11015477031469345,
"logps/chosen": -210.99728393554688,
"logps/rejected": -536.0760498046875,
"loss": 0.3218,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 0.6962422132492065,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.4401557445526123,
"rewards/student_margin": 3.1363983154296875,
"rewards/teacher_margin": 0.0,
"step": 230
},
{
"epoch": 0.14,
"grad_norm": 24.75,
"learning_rate": 4.976170361334305e-06,
"logits/chosen": -0.47195425629615784,
"logits/rejected": -0.022416137158870697,
"logps/chosen": -255.0712127685547,
"logps/rejected": -450.69989013671875,
"loss": 0.3947,
"rewards/accuracies": 0.8333333134651184,
"rewards/chosen": 0.3207937180995941,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.8813111782073975,
"rewards/student_margin": 2.2021050453186035,
"rewards/teacher_margin": 0.0,
"step": 240
},
{
"epoch": 0.15,
"grad_norm": 39.25,
"learning_rate": 4.968662051270712e-06,
"logits/chosen": -0.7277335524559021,
"logits/rejected": 0.09811758249998093,
"logps/chosen": -212.38381958007812,
"logps/rejected": -535.5774536132812,
"loss": 0.3208,
"rewards/accuracies": 0.8333333134651184,
"rewards/chosen": 0.7789496183395386,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.0195579528808594,
"rewards/student_margin": 2.7985076904296875,
"rewards/teacher_margin": 0.0,
"step": 250
},
{
"epoch": 0.15,
"grad_norm": 17.125,
"learning_rate": 4.960134381500771e-06,
"logits/chosen": -0.6490304470062256,
"logits/rejected": 0.1224866732954979,
"logps/chosen": -208.1616973876953,
"logps/rejected": -549.3804931640625,
"loss": 0.3667,
"rewards/accuracies": 0.9333332777023315,
"rewards/chosen": 1.0950639247894287,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.4546568393707275,
"rewards/student_margin": 3.5497207641601562,
"rewards/teacher_margin": 0.0,
"step": 260
},
{
"epoch": 0.16,
"grad_norm": 27.5,
"learning_rate": 4.950590873269098e-06,
"logits/chosen": -0.5456488132476807,
"logits/rejected": 0.020637910813093185,
"logps/chosen": -259.57196044921875,
"logps/rejected": -409.33123779296875,
"loss": 0.3453,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": 0.4798993468284607,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.331217885017395,
"rewards/student_margin": 1.811117172241211,
"rewards/teacher_margin": 0.0,
"step": 270
},
{
"epoch": 0.16,
"grad_norm": 35.0,
"learning_rate": 4.940035467280229e-06,
"logits/chosen": -0.4896344542503357,
"logits/rejected": 0.024026235565543175,
"logps/chosen": -237.4656524658203,
"logps/rejected": -509.08404541015625,
"loss": 0.3292,
"rewards/accuracies": 0.9333332777023315,
"rewards/chosen": 0.728676974773407,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.34112811088562,
"rewards/student_margin": 3.069805145263672,
"rewards/teacher_margin": 0.0,
"step": 280
},
{
"epoch": 0.17,
"grad_norm": 26.5,
"learning_rate": 4.928472522071432e-06,
"logits/chosen": -0.6238890290260315,
"logits/rejected": 0.1964757889509201,
"logps/chosen": -213.3905029296875,
"logps/rejected": -478.8919372558594,
"loss": 0.3695,
"rewards/accuracies": 0.76666659116745,
"rewards/chosen": 0.6496525406837463,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.2729125022888184,
"rewards/student_margin": 1.9225651025772095,
"rewards/teacher_margin": 0.0,
"step": 290
},
{
"epoch": 0.17,
"grad_norm": 29.375,
"learning_rate": 4.915906812212976e-06,
"logits/chosen": -0.6193239092826843,
"logits/rejected": 0.05876749008893967,
"logps/chosen": -202.52391052246094,
"logps/rejected": -458.86492919921875,
"loss": 0.2946,
"rewards/accuracies": 0.8666666150093079,
"rewards/chosen": 0.8241189122200012,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.658251166343689,
"rewards/student_margin": 2.482369899749756,
"rewards/teacher_margin": 0.0,
"step": 300
},
{
"epoch": 0.18,
"grad_norm": 16.125,
"learning_rate": 4.9023435263366145e-06,
"logits/chosen": -0.9334913492202759,
"logits/rejected": -0.058604609221220016,
"logps/chosen": -168.749267578125,
"logps/rejected": -640.4456176757812,
"loss": 0.2114,
"rewards/accuracies": 0.966666579246521,
"rewards/chosen": 0.5594726800918579,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -3.131923198699951,
"rewards/student_margin": 3.6913959980010986,
"rewards/teacher_margin": 0.0,
"step": 310
},
{
"epoch": 0.19,
"grad_norm": 15.125,
"learning_rate": 4.887788264993099e-06,
"logits/chosen": -0.6044551730155945,
"logits/rejected": -0.04338273033499718,
"logps/chosen": -224.3017578125,
"logps/rejected": -549.9353637695312,
"loss": 0.3094,
"rewards/accuracies": 0.8333333730697632,
"rewards/chosen": 0.7394543290138245,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.3911728858947754,
"rewards/student_margin": 3.130627393722534,
"rewards/teacher_margin": 0.0,
"step": 320
},
{
"epoch": 0.19,
"grad_norm": 23.375,
"learning_rate": 4.872247038339603e-06,
"logits/chosen": -0.7818952798843384,
"logits/rejected": 0.06497599929571152,
"logps/chosen": -192.23880004882812,
"logps/rejected": -626.657470703125,
"loss": 0.2646,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 1.0649678707122803,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -3.6517505645751953,
"rewards/student_margin": 4.716717720031738,
"rewards/teacher_margin": 0.0,
"step": 330
},
{
"epoch": 0.2,
"grad_norm": 30.625,
"learning_rate": 4.855726263657999e-06,
"logits/chosen": -0.29226094484329224,
"logits/rejected": -0.13234995305538177,
"logps/chosen": -264.1004333496094,
"logps/rejected": -362.9111633300781,
"loss": 0.4197,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 0.6886833906173706,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.4170998334884644,
"rewards/student_margin": 2.105782985687256,
"rewards/teacher_margin": 0.0,
"step": 340
},
{
"epoch": 0.2,
"grad_norm": 24.25,
"learning_rate": 4.838232762705044e-06,
"logits/chosen": -0.4636480212211609,
"logits/rejected": 0.027485806494951248,
"logps/chosen": -194.08128356933594,
"logps/rejected": -531.1504516601562,
"loss": 0.2651,
"rewards/accuracies": 0.9333332777023315,
"rewards/chosen": 1.0223414897918701,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.3282663822174072,
"rewards/student_margin": 3.3506076335906982,
"rewards/teacher_margin": 0.0,
"step": 350
},
{
"epoch": 0.21,
"grad_norm": 27.5,
"learning_rate": 4.819773758895538e-06,
"logits/chosen": -0.7649397253990173,
"logits/rejected": 0.06376661360263824,
"logps/chosen": -163.4183807373047,
"logps/rejected": -592.5127563476562,
"loss": 0.3665,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 0.8098726272583008,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -3.550152540206909,
"rewards/student_margin": 4.360025405883789,
"rewards/teacher_margin": 0.0,
"step": 360
},
{
"epoch": 0.22,
"grad_norm": 22.125,
"learning_rate": 4.8003568743196294e-06,
"logits/chosen": -0.5304759740829468,
"logits/rejected": 0.034225065261125565,
"logps/chosen": -187.56813049316406,
"logps/rejected": -569.3456420898438,
"loss": 0.2797,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 0.6876587867736816,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.1767399311065674,
"rewards/student_margin": 2.864398956298828,
"rewards/teacher_margin": 0.0,
"step": 370
},
{
"epoch": 0.22,
"grad_norm": 29.625,
"learning_rate": 4.779990126595505e-06,
"logits/chosen": -0.5957476496696472,
"logits/rejected": -0.04448353499174118,
"logps/chosen": -200.004150390625,
"logps/rejected": -613.2229614257812,
"loss": 0.3936,
"rewards/accuracies": 0.9333332777023315,
"rewards/chosen": 0.7026926279067993,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.0374011993408203,
"rewards/student_margin": 2.740093946456909,
"rewards/teacher_margin": 0.0,
"step": 380
},
{
"epoch": 0.23,
"grad_norm": 26.375,
"learning_rate": 4.758681925558756e-06,
"logits/chosen": -0.5602328777313232,
"logits/rejected": -0.012245875783264637,
"logps/chosen": -185.31021118164062,
"logps/rejected": -407.41766357421875,
"loss": 0.3227,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 0.620998740196228,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.894086480140686,
"rewards/student_margin": 2.515085458755493,
"rewards/teacher_margin": 0.0,
"step": 390
},
{
"epoch": 0.23,
"grad_norm": 20.125,
"learning_rate": 4.736441069789786e-06,
"logits/chosen": -0.46345287561416626,
"logits/rejected": 0.001672474667429924,
"logps/chosen": -228.4940948486328,
"logps/rejected": -361.48175048828125,
"loss": 0.2846,
"rewards/accuracies": 0.6999999284744263,
"rewards/chosen": 0.4205475449562073,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.4956649541854858,
"rewards/student_margin": 1.9162124395370483,
"rewards/teacher_margin": 0.0,
"step": 400
},
{
"epoch": 0.24,
"grad_norm": 16.5,
"learning_rate": 4.713276742980708e-06,
"logits/chosen": -0.4360102713108063,
"logits/rejected": -0.10899927467107773,
"logps/chosen": -249.0923309326172,
"logps/rejected": -560.4241333007812,
"loss": 0.2865,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 0.8636870384216309,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.2510733604431152,
"rewards/student_margin": 3.114760637283325,
"rewards/teacher_margin": 0.0,
"step": 410
},
{
"epoch": 0.24,
"grad_norm": 30.0,
"learning_rate": 4.6891985101432085e-06,
"logits/chosen": -0.3729998469352722,
"logits/rejected": -0.028511980548501015,
"logps/chosen": -207.8327178955078,
"logps/rejected": -356.3282775878906,
"loss": 0.3602,
"rewards/accuracies": 0.7333332896232605,
"rewards/chosen": 0.9164339303970337,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -0.7431859374046326,
"rewards/student_margin": 1.659619927406311,
"rewards/teacher_margin": 0.0,
"step": 420
},
{
"epoch": 0.25,
"grad_norm": 29.0,
"learning_rate": 4.664216313658969e-06,
"logits/chosen": -0.324567973613739,
"logits/rejected": 0.016650067642331123,
"logps/chosen": -206.4794921875,
"logps/rejected": -445.509033203125,
"loss": 0.3204,
"rewards/accuracies": 0.8666666746139526,
"rewards/chosen": 1.2572410106658936,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -0.936699390411377,
"rewards/student_margin": 2.1939404010772705,
"rewards/teacher_margin": 0.0,
"step": 430
},
{
"epoch": 0.26,
"grad_norm": 28.0,
"learning_rate": 4.638340469174256e-06,
"logits/chosen": -0.5071316957473755,
"logits/rejected": 0.044037409126758575,
"logps/chosen": -180.57435607910156,
"logps/rejected": -601.9741821289062,
"loss": 0.2881,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 1.1313551664352417,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.505422830581665,
"rewards/student_margin": 3.6367783546447754,
"rewards/teacher_margin": 0.0,
"step": 440
},
{
"epoch": 0.26,
"grad_norm": 18.625,
"learning_rate": 4.611581661340386e-06,
"logits/chosen": -0.5572972297668457,
"logits/rejected": 0.05740237236022949,
"logps/chosen": -202.59849548339844,
"logps/rejected": -579.298095703125,
"loss": 0.2705,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 1.076784372329712,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.998802661895752,
"rewards/student_margin": 4.075586795806885,
"rewards/teacher_margin": 0.0,
"step": 450
},
{
"epoch": 0.27,
"grad_norm": 23.625,
"learning_rate": 4.5839509394018185e-06,
"logits/chosen": -0.37410736083984375,
"logits/rejected": 0.017481276765465736,
"logps/chosen": -243.3333282470703,
"logps/rejected": -485.5693359375,
"loss": 0.2463,
"rewards/accuracies": 0.8666666150093079,
"rewards/chosen": 0.691962480545044,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.156360149383545,
"rewards/student_margin": 2.8483223915100098,
"rewards/teacher_margin": 0.0,
"step": 460
},
{
"epoch": 0.27,
"grad_norm": 11.9375,
"learning_rate": 4.555459712633707e-06,
"logits/chosen": -0.6188653707504272,
"logits/rejected": 0.16356995701789856,
"logps/chosen": -238.00772094726562,
"logps/rejected": -447.47442626953125,
"loss": 0.273,
"rewards/accuracies": 0.8333333730697632,
"rewards/chosen": 1.3531479835510254,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.5606005191802979,
"rewards/student_margin": 2.913748264312744,
"rewards/teacher_margin": 0.0,
"step": 470
},
{
"epoch": 0.28,
"grad_norm": 15.5625,
"learning_rate": 4.526119745630779e-06,
"logits/chosen": -0.5436848402023315,
"logits/rejected": 0.04653601720929146,
"logps/chosen": -228.1831817626953,
"logps/rejected": -620.6707153320312,
"loss": 0.2793,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 1.1916218996047974,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.1671457290649414,
"rewards/student_margin": 3.3587677478790283,
"rewards/teacher_margin": 0.0,
"step": 480
},
{
"epoch": 0.29,
"grad_norm": 20.125,
"learning_rate": 4.4959431534495015e-06,
"logits/chosen": -0.1781628429889679,
"logits/rejected": 0.06491532176733017,
"logps/chosen": -266.6177673339844,
"logps/rejected": -403.18841552734375,
"loss": 0.2836,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 2.089085340499878,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -0.6247254610061646,
"rewards/student_margin": 2.713810682296753,
"rewards/teacher_margin": 0.0,
"step": 490
},
{
"epoch": 0.29,
"grad_norm": 28.625,
"learning_rate": 4.464942396605531e-06,
"logits/chosen": -0.5647803544998169,
"logits/rejected": 0.09310182183980942,
"logps/chosen": -185.3010711669922,
"logps/rejected": -453.74188232421875,
"loss": 0.2228,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.3027641773223877,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.6533056497573853,
"rewards/student_margin": 2.9560697078704834,
"rewards/teacher_margin": 0.0,
"step": 500
},
{
"epoch": 0.3,
"grad_norm": 27.625,
"learning_rate": 4.433130275928522e-06,
"logits/chosen": -0.508124828338623,
"logits/rejected": 0.05562957376241684,
"logps/chosen": -202.5834197998047,
"logps/rejected": -409.6363220214844,
"loss": 0.2788,
"rewards/accuracies": 0.8333333730697632,
"rewards/chosen": 1.8609619140625,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.7967350482940674,
"rewards/student_margin": 3.6576972007751465,
"rewards/teacher_margin": 0.0,
"step": 510
},
{
"epoch": 0.3,
"grad_norm": 17.0,
"learning_rate": 4.400519927276411e-06,
"logits/chosen": -0.48706334829330444,
"logits/rejected": -0.07246413826942444,
"logps/chosen": -216.9883270263672,
"logps/rejected": -562.3739013671875,
"loss": 0.2144,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.0860559940338135,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -3.157426357269287,
"rewards/student_margin": 4.2434821128845215,
"rewards/teacher_margin": 0.0,
"step": 520
},
{
"epoch": 0.31,
"grad_norm": 21.5,
"learning_rate": 4.367124816111351e-06,
"logits/chosen": -0.3559921979904175,
"logits/rejected": -0.11402563750743866,
"logps/chosen": -218.14559936523438,
"logps/rejected": -421.6756286621094,
"loss": 0.2122,
"rewards/accuracies": 0.8666666150093079,
"rewards/chosen": 1.3421014547348022,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.9218908548355103,
"rewards/student_margin": 3.2639918327331543,
"rewards/teacher_margin": 0.0,
"step": 530
},
{
"epoch": 0.31,
"grad_norm": 11.375,
"learning_rate": 4.332958731939562e-06,
"logits/chosen": -0.7146890163421631,
"logits/rejected": 0.2430504560470581,
"logps/chosen": -173.04917907714844,
"logps/rejected": -493.38641357421875,
"loss": 0.2895,
"rewards/accuracies": 0.8333333134651184,
"rewards/chosen": 1.2438677549362183,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.6418049335479736,
"rewards/student_margin": 2.8856725692749023,
"rewards/teacher_margin": 0.0,
"step": 540
},
{
"epoch": 0.32,
"grad_norm": 29.75,
"learning_rate": 4.2980357826173665e-06,
"logits/chosen": -0.37195292115211487,
"logits/rejected": 0.02155585028231144,
"logps/chosen": -223.03817749023438,
"logps/rejected": -389.7994689941406,
"loss": 0.3108,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.6178417205810547,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.4387261867523193,
"rewards/student_margin": 3.056567907333374,
"rewards/teacher_margin": 0.0,
"step": 550
},
{
"epoch": 0.33,
"grad_norm": 21.875,
"learning_rate": 4.262370388525777e-06,
"logits/chosen": -0.3869599401950836,
"logits/rejected": 0.07817984372377396,
"logps/chosen": -175.74595642089844,
"logps/rejected": -394.0006408691406,
"loss": 0.2445,
"rewards/accuracies": 0.966666579246521,
"rewards/chosen": 2.0001840591430664,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.1199393272399902,
"rewards/student_margin": 4.120123863220215,
"rewards/teacher_margin": 0.0,
"step": 560
},
{
"epoch": 0.33,
"grad_norm": 32.25,
"learning_rate": 4.225977276616028e-06,
"logits/chosen": -0.5689536333084106,
"logits/rejected": 0.11373758316040039,
"logps/chosen": -219.41482543945312,
"logps/rejected": -426.211669921875,
"loss": 0.2718,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.7283222675323486,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.4601569175720215,
"rewards/student_margin": 4.188479423522949,
"rewards/teacher_margin": 0.0,
"step": 570
},
{
"epoch": 0.34,
"grad_norm": 12.5,
"learning_rate": 4.1888714743285305e-06,
"logits/chosen": -0.3077758550643921,
"logits/rejected": -0.01714705489575863,
"logps/chosen": -228.143798828125,
"logps/rejected": -587.8424682617188,
"loss": 0.2311,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 1.6572096347808838,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -3.5316357612609863,
"rewards/student_margin": 5.188845157623291,
"rewards/teacher_margin": 0.0,
"step": 580
},
{
"epoch": 0.34,
"grad_norm": 15.1875,
"learning_rate": 4.151068303387739e-06,
"logits/chosen": -0.442771852016449,
"logits/rejected": 0.08343404531478882,
"logps/chosen": -172.50587463378906,
"logps/rejected": -430.36859130859375,
"loss": 0.2294,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.2904380559921265,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.5538195371627808,
"rewards/student_margin": 2.8442578315734863,
"rewards/teacher_margin": 0.0,
"step": 590
},
{
"epoch": 0.35,
"grad_norm": 17.125,
"learning_rate": 4.1125833734755115e-06,
"logits/chosen": -0.36492887139320374,
"logits/rejected": 0.17705991864204407,
"logps/chosen": -279.50311279296875,
"logps/rejected": -664.5555419921875,
"loss": 0.2703,
"rewards/accuracies": 0.9000000953674316,
"rewards/chosen": 1.2323994636535645,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.9321751594543457,
"rewards/student_margin": 4.16457462310791,
"rewards/teacher_margin": 0.0,
"step": 600
},
{
"epoch": 0.36,
"grad_norm": 13.75,
"learning_rate": 4.073432575785554e-06,
"logits/chosen": -0.1946389228105545,
"logits/rejected": 0.013921762816607952,
"logps/chosen": -248.22073364257812,
"logps/rejected": -516.2274169921875,
"loss": 0.2433,
"rewards/accuracies": 0.8333333134651184,
"rewards/chosen": 1.737766981124878,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.9633979797363281,
"rewards/student_margin": 3.7011654376983643,
"rewards/teacher_margin": 0.0,
"step": 610
},
{
"epoch": 0.36,
"grad_norm": 27.125,
"learning_rate": 4.033632076461634e-06,
"logits/chosen": -0.35111454129219055,
"logits/rejected": 0.09419815242290497,
"logps/chosen": -237.6559295654297,
"logps/rejected": -470.4676818847656,
"loss": 0.3378,
"rewards/accuracies": 0.9333332777023315,
"rewards/chosen": 1.6999142169952393,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.3078672885894775,
"rewards/student_margin": 3.007781505584717,
"rewards/teacher_margin": 0.0,
"step": 620
},
{
"epoch": 0.37,
"grad_norm": 15.1875,
"learning_rate": 3.9931983099222595e-06,
"logits/chosen": -0.47325119376182556,
"logits/rejected": -0.02582048997282982,
"logps/chosen": -222.41415405273438,
"logps/rejected": -465.8368225097656,
"loss": 0.2413,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 1.1449840068817139,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.2412731647491455,
"rewards/student_margin": 2.3862571716308594,
"rewards/teacher_margin": 0.0,
"step": 630
},
{
"epoch": 0.37,
"grad_norm": 18.875,
"learning_rate": 3.9521479720745785e-06,
"logits/chosen": -0.45588645339012146,
"logits/rejected": 0.014990913681685925,
"logps/chosen": -208.7266845703125,
"logps/rejected": -565.1739501953125,
"loss": 0.2011,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.211085319519043,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.549320936203003,
"rewards/student_margin": 3.760406017303467,
"rewards/teacher_margin": 0.0,
"step": 640
},
{
"epoch": 0.38,
"grad_norm": 20.25,
"learning_rate": 3.910498013420303e-06,
"logits/chosen": -0.5036629438400269,
"logits/rejected": -0.04427925869822502,
"logps/chosen": -158.07492065429688,
"logps/rejected": -435.2080078125,
"loss": 0.2739,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 1.043824315071106,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.8605940341949463,
"rewards/student_margin": 2.9044182300567627,
"rewards/teacher_margin": 0.0,
"step": 650
},
{
"epoch": 0.38,
"grad_norm": 29.0,
"learning_rate": 3.8682656320565205e-06,
"logits/chosen": -0.41261768341064453,
"logits/rejected": 0.030822629109025,
"logps/chosen": -225.4918670654297,
"logps/rejected": -392.59613037109375,
"loss": 0.2616,
"rewards/accuracies": 0.8666666746139526,
"rewards/chosen": 1.8061786890029907,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.9655482769012451,
"rewards/student_margin": 3.771726608276367,
"rewards/teacher_margin": 0.0,
"step": 660
},
{
"epoch": 0.39,
"grad_norm": 22.0,
"learning_rate": 3.8254682665742425e-06,
"logits/chosen": -0.31573912501335144,
"logits/rejected": 0.005792762152850628,
"logps/chosen": -240.8995361328125,
"logps/rejected": -505.09735107421875,
"loss": 0.2795,
"rewards/accuracies": 0.8333333134651184,
"rewards/chosen": 1.1914173364639282,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.8682523965835571,
"rewards/student_margin": 3.0596697330474854,
"rewards/teacher_margin": 0.0,
"step": 670
},
{
"epoch": 0.4,
"grad_norm": 18.5,
"learning_rate": 3.7821235888576744e-06,
"logits/chosen": -0.2197073996067047,
"logits/rejected": 0.02696964144706726,
"logps/chosen": -244.7580108642578,
"logps/rejected": -460.04400634765625,
"loss": 0.1711,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.5450682640075684,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.898803472518921,
"rewards/student_margin": 3.4438719749450684,
"rewards/teacher_margin": 0.0,
"step": 680
},
{
"epoch": 0.4,
"grad_norm": 9.5625,
"learning_rate": 3.7382494967871384e-06,
"logits/chosen": -0.4361536502838135,
"logits/rejected": 0.13731250166893005,
"logps/chosen": -246.27395629882812,
"logps/rejected": -633.4682006835938,
"loss": 0.278,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.635087251663208,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.891167640686035,
"rewards/student_margin": 4.526254653930664,
"rewards/teacher_margin": 0.0,
"step": 690
},
{
"epoch": 0.41,
"grad_norm": 16.125,
"learning_rate": 3.693864106848683e-06,
"logits/chosen": -0.4011663794517517,
"logits/rejected": 0.015058115124702454,
"logps/chosen": -248.08349609375,
"logps/rejected": -427.1165466308594,
"loss": 0.1977,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.8699365854263306,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.281233787536621,
"rewards/student_margin": 3.151170253753662,
"rewards/teacher_margin": 0.0,
"step": 700
},
{
"epoch": 0.41,
"grad_norm": 24.375,
"learning_rate": 3.648985746653424e-06,
"logits/chosen": -0.3061038553714752,
"logits/rejected": -0.029941242188215256,
"logps/chosen": -201.59161376953125,
"logps/rejected": -463.8720703125,
"loss": 0.1973,
"rewards/accuracies": 0.8666666746139526,
"rewards/chosen": 1.5523725748062134,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.9691741466522217,
"rewards/student_margin": 3.521547317504883,
"rewards/teacher_margin": 0.0,
"step": 710
},
{
"epoch": 0.42,
"grad_norm": 12.875,
"learning_rate": 3.6036329473697103e-06,
"logits/chosen": -0.613228976726532,
"logits/rejected": 0.009662175551056862,
"logps/chosen": -196.20706176757812,
"logps/rejected": -525.05908203125,
"loss": 0.1909,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 1.3759281635284424,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.8588900566101074,
"rewards/student_margin": 4.234818458557129,
"rewards/teacher_margin": 0.0,
"step": 720
},
{
"epoch": 0.42,
"grad_norm": 12.9375,
"learning_rate": 3.5578244360712373e-06,
"logits/chosen": -0.5177701115608215,
"logits/rejected": 0.176934152841568,
"logps/chosen": -206.4003143310547,
"logps/rejected": -487.99249267578125,
"loss": 0.2496,
"rewards/accuracies": 0.9000000953674316,
"rewards/chosen": 1.4539085626602173,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.346587657928467,
"rewards/student_margin": 3.8004963397979736,
"rewards/teacher_margin": 0.0,
"step": 730
},
{
"epoch": 0.43,
"grad_norm": 8.375,
"learning_rate": 3.511579128004261e-06,
"logits/chosen": -0.44068461656570435,
"logits/rejected": 0.005051338579505682,
"logps/chosen": -272.7444763183594,
"logps/rejected": -478.147705078125,
"loss": 0.2245,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.060945987701416,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.0382800102233887,
"rewards/student_margin": 3.0992257595062256,
"rewards/teacher_margin": 0.0,
"step": 740
},
{
"epoch": 0.44,
"grad_norm": 18.0,
"learning_rate": 3.464916118777121e-06,
"logits/chosen": -0.5848614573478699,
"logits/rejected": 0.30563992261886597,
"logps/chosen": -184.22329711914062,
"logps/rejected": -468.75970458984375,
"loss": 0.2374,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 1.795701026916504,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.5491726398468018,
"rewards/student_margin": 3.3448734283447266,
"rewards/teacher_margin": 0.0,
"step": 750
},
{
"epoch": 0.44,
"grad_norm": 8.1875,
"learning_rate": 3.417854676475279e-06,
"logits/chosen": -0.38186392188072205,
"logits/rejected": 0.059945814311504364,
"logps/chosen": -260.58978271484375,
"logps/rejected": -594.833251953125,
"loss": 0.2078,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 1.7202898263931274,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.4396731853485107,
"rewards/student_margin": 4.159963130950928,
"rewards/teacher_margin": 0.0,
"step": 760
},
{
"epoch": 0.45,
"grad_norm": 16.0,
"learning_rate": 3.370414233705147e-06,
"logits/chosen": -0.2833537757396698,
"logits/rejected": 0.09210254997015,
"logps/chosen": -223.1533203125,
"logps/rejected": -582.6676025390625,
"loss": 0.2096,
"rewards/accuracies": 0.966666579246521,
"rewards/chosen": 1.4522992372512817,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -3.2473137378692627,
"rewards/student_margin": 4.699612617492676,
"rewards/teacher_margin": 0.0,
"step": 770
},
{
"epoch": 0.45,
"grad_norm": 8.375,
"learning_rate": 3.322614379569975e-06,
"logits/chosen": -0.302879273891449,
"logits/rejected": -0.14690950512886047,
"logps/chosen": -225.3035888671875,
"logps/rejected": -357.20330810546875,
"loss": 0.2293,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": 1.1200844049453735,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.0596327781677246,
"rewards/student_margin": 3.1797170639038086,
"rewards/teacher_margin": 0.0,
"step": 780
},
{
"epoch": 0.46,
"grad_norm": 19.5,
"learning_rate": 3.27447485158112e-06,
"logits/chosen": -0.4661865830421448,
"logits/rejected": 0.12939366698265076,
"logps/chosen": -226.401611328125,
"logps/rejected": -421.63592529296875,
"loss": 0.1703,
"rewards/accuracies": 0.8666666150093079,
"rewards/chosen": 1.3359860181808472,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.47092604637146,
"rewards/student_margin": 3.806912660598755,
"rewards/teacher_margin": 0.0,
"step": 790
},
{
"epoch": 0.47,
"grad_norm": 24.125,
"learning_rate": 3.226015527508036e-06,
"logits/chosen": -0.5169921517372131,
"logits/rejected": -0.004798925016075373,
"logps/chosen": -241.3819580078125,
"logps/rejected": -335.36492919921875,
"loss": 0.2056,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.6830694675445557,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.760565996170044,
"rewards/student_margin": 3.4436354637145996,
"rewards/teacher_margin": 0.0,
"step": 800
},
{
"epoch": 0.47,
"grad_norm": 19.875,
"learning_rate": 3.1772564171703475e-06,
"logits/chosen": -0.28334909677505493,
"logits/rejected": -0.021695634350180626,
"logps/chosen": -255.8955078125,
"logps/rejected": -581.0257568359375,
"loss": 0.3028,
"rewards/accuracies": 0.8666666150093079,
"rewards/chosen": 1.906981110572815,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -3.3529152870178223,
"rewards/student_margin": 5.259896278381348,
"rewards/teacher_margin": 0.0,
"step": 810
},
{
"epoch": 0.48,
"grad_norm": 17.875,
"learning_rate": 3.128217654175393e-06,
"logits/chosen": -0.3028213381767273,
"logits/rejected": 0.1735200583934784,
"logps/chosen": -269.35162353515625,
"logps/rejected": -464.0060119628906,
"loss": 0.1942,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.3699723482131958,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.393336057662964,
"rewards/student_margin": 3.7633087635040283,
"rewards/teacher_margin": 0.0,
"step": 820
},
{
"epoch": 0.48,
"grad_norm": 18.5,
"learning_rate": 3.078919487604661e-06,
"logits/chosen": -0.43081316351890564,
"logits/rejected": -0.09694792330265045,
"logps/chosen": -211.90707397460938,
"logps/rejected": -447.4751892089844,
"loss": 0.1913,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 1.855574369430542,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.7887474298477173,
"rewards/student_margin": 3.644321918487549,
"rewards/teacher_margin": 0.0,
"step": 830
},
{
"epoch": 0.49,
"grad_norm": 12.75,
"learning_rate": 3.029382273652544e-06,
"logits/chosen": -0.4358023703098297,
"logits/rejected": 0.1673169583082199,
"logps/chosen": -241.4596710205078,
"logps/rejected": -499.11956787109375,
"loss": 0.1966,
"rewards/accuracies": 0.8666666746139526,
"rewards/chosen": 1.5806983709335327,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.8262617588043213,
"rewards/student_margin": 3.4069600105285645,
"rewards/teacher_margin": 0.0,
"step": 840
},
{
"epoch": 0.49,
"grad_norm": 16.875,
"learning_rate": 2.9796264672208535e-06,
"logits/chosen": -0.44691354036331177,
"logits/rejected": 0.14132077991962433,
"logps/chosen": -217.2314453125,
"logps/rejected": -476.49859619140625,
"loss": 0.1794,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.7376991510391235,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.2412829399108887,
"rewards/student_margin": 3.9789822101593018,
"rewards/teacher_margin": 0.0,
"step": 850
},
{
"epoch": 0.5,
"grad_norm": 12.375,
"learning_rate": 2.929672613472589e-06,
"logits/chosen": -0.6897366642951965,
"logits/rejected": 0.006776002235710621,
"logps/chosen": -216.3780517578125,
"logps/rejected": -620.7020263671875,
"loss": 0.1916,
"rewards/accuracies": 0.8666666150093079,
"rewards/chosen": 1.432846188545227,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -3.6632869243621826,
"rewards/student_margin": 5.096132755279541,
"rewards/teacher_margin": 0.0,
"step": 860
},
{
"epoch": 0.51,
"grad_norm": 13.375,
"learning_rate": 2.879541339348429e-06,
"logits/chosen": -0.5285438895225525,
"logits/rejected": 0.00928882509469986,
"logps/chosen": -242.85098266601562,
"logps/rejected": -481.8531188964844,
"loss": 0.1813,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.5595648288726807,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.5762245655059814,
"rewards/student_margin": 4.135788440704346,
"rewards/teacher_margin": 0.0,
"step": 870
},
{
"epoch": 0.51,
"grad_norm": 24.25,
"learning_rate": 2.829253345049458e-06,
"logits/chosen": -0.31855055689811707,
"logits/rejected": 0.0941370353102684,
"logps/chosen": -240.16650390625,
"logps/rejected": -531.5496826171875,
"loss": 0.2001,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 2.016098976135254,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.6434473991394043,
"rewards/student_margin": 4.659546375274658,
"rewards/teacher_margin": 0.0,
"step": 880
},
{
"epoch": 0.52,
"grad_norm": 5.5,
"learning_rate": 2.77882939548964e-06,
"logits/chosen": -0.4587094187736511,
"logits/rejected": 0.0055079348385334015,
"logps/chosen": -223.0956573486328,
"logps/rejected": -570.2337036132812,
"loss": 0.2067,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 1.6644607782363892,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.6545493602752686,
"rewards/student_margin": 4.3190107345581055,
"rewards/teacher_margin": 0.0,
"step": 890
},
{
"epoch": 0.52,
"grad_norm": 20.625,
"learning_rate": 2.7282903117215724e-06,
"logits/chosen": -0.26384082436561584,
"logits/rejected": 0.112058125436306,
"logps/chosen": -246.3653106689453,
"logps/rejected": -487.1790466308594,
"loss": 0.1951,
"rewards/accuracies": 0.9333332777023315,
"rewards/chosen": 1.8497905731201172,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.262843608856201,
"rewards/student_margin": 4.112634658813477,
"rewards/teacher_margin": 0.0,
"step": 900
},
{
"epoch": 0.53,
"grad_norm": 17.75,
"learning_rate": 2.6776569623390615e-06,
"logits/chosen": -0.3648647367954254,
"logits/rejected": 0.17572906613349915,
"logps/chosen": -262.3744201660156,
"logps/rejected": -555.7116088867188,
"loss": 0.171,
"rewards/accuracies": 0.8666666746139526,
"rewards/chosen": 1.8194630146026611,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.9717178344726562,
"rewards/student_margin": 4.791180610656738,
"rewards/teacher_margin": 0.0,
"step": 910
},
{
"epoch": 0.54,
"grad_norm": 12.9375,
"learning_rate": 2.626950254860059e-06,
"logits/chosen": -0.5600097179412842,
"logits/rejected": 0.1498565971851349,
"logps/chosen": -180.4383544921875,
"logps/rejected": -550.03857421875,
"loss": 0.2459,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 2.4329237937927246,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.4012227058410645,
"rewards/student_margin": 4.834146499633789,
"rewards/teacher_margin": 0.0,
"step": 920
},
{
"epoch": 0.54,
"grad_norm": 15.875,
"learning_rate": 2.576191127093538e-06,
"logits/chosen": -0.3777901232242584,
"logits/rejected": 0.046824149787425995,
"logps/chosen": -214.46310424804688,
"logps/rejected": -476.24627685546875,
"loss": 0.1634,
"rewards/accuracies": 0.8333333134651184,
"rewards/chosen": 1.7608407735824585,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.415520191192627,
"rewards/student_margin": 4.176361083984375,
"rewards/teacher_margin": 0.0,
"step": 930
},
{
"epoch": 0.55,
"grad_norm": 26.0,
"learning_rate": 2.5254005384938495e-06,
"logits/chosen": -0.49377965927124023,
"logits/rejected": 0.03530919924378395,
"logps/chosen": -214.46923828125,
"logps/rejected": -443.4176330566406,
"loss": 0.231,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 1.699275255203247,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.4718739986419678,
"rewards/student_margin": 4.171149253845215,
"rewards/teacher_margin": 0.0,
"step": 940
},
{
"epoch": 0.55,
"grad_norm": 9.5,
"learning_rate": 2.474599461506151e-06,
"logits/chosen": -0.6275911331176758,
"logits/rejected": 0.08756984770298004,
"logps/chosen": -198.088623046875,
"logps/rejected": -556.3299560546875,
"loss": 0.1402,
"rewards/accuracies": 1.0,
"rewards/chosen": 1.5414060354232788,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -3.8458423614501953,
"rewards/student_margin": 5.387248516082764,
"rewards/teacher_margin": 0.0,
"step": 950
},
{
"epoch": 0.56,
"grad_norm": 5.71875,
"learning_rate": 2.4238088729064625e-06,
"logits/chosen": -0.031927138566970825,
"logits/rejected": -0.28637224435806274,
"logps/chosen": -263.4320373535156,
"logps/rejected": -391.6306457519531,
"loss": 0.1843,
"rewards/accuracies": 0.966666579246521,
"rewards/chosen": 1.5371907949447632,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.255502939224243,
"rewards/student_margin": 3.7926928997039795,
"rewards/teacher_margin": 0.0,
"step": 960
},
{
"epoch": 0.56,
"grad_norm": 62.25,
"learning_rate": 2.3730497451399416e-06,
"logits/chosen": -0.39691585302352905,
"logits/rejected": 0.12687797844409943,
"logps/chosen": -187.50283813476562,
"logps/rejected": -645.21533203125,
"loss": 0.2059,
"rewards/accuracies": 1.0,
"rewards/chosen": 1.4940803050994873,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -4.447799205780029,
"rewards/student_margin": 5.941879749298096,
"rewards/teacher_margin": 0.0,
"step": 970
},
{
"epoch": 0.57,
"grad_norm": 17.0,
"learning_rate": 2.3223430376609397e-06,
"logits/chosen": -0.31980419158935547,
"logits/rejected": 0.05356071516871452,
"logps/chosen": -222.4664764404297,
"logps/rejected": -434.98681640625,
"loss": 0.2345,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 1.9460868835449219,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.792879104614258,
"rewards/student_margin": 4.7389655113220215,
"rewards/teacher_margin": 0.0,
"step": 980
},
{
"epoch": 0.58,
"grad_norm": 8.9375,
"learning_rate": 2.2717096882784275e-06,
"logits/chosen": -0.5998212099075317,
"logits/rejected": 0.12909957766532898,
"logps/chosen": -176.48876953125,
"logps/rejected": -488.4345703125,
"loss": 0.2036,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 2.0280308723449707,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.9598499536514282,
"rewards/student_margin": 3.9878811836242676,
"rewards/teacher_margin": 0.0,
"step": 990
},
{
"epoch": 0.58,
"grad_norm": 14.6875,
"learning_rate": 2.2211706045103607e-06,
"logits/chosen": -0.49985045194625854,
"logits/rejected": 0.06370660662651062,
"logps/chosen": -219.44418334960938,
"logps/rejected": -586.0706787109375,
"loss": 0.1749,
"rewards/accuracies": 1.0,
"rewards/chosen": 2.355344295501709,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.49812650680542,
"rewards/student_margin": 4.853470802307129,
"rewards/teacher_margin": 0.0,
"step": 1000
},
{
"epoch": 0.59,
"grad_norm": 12.5625,
"learning_rate": 2.1707466549505428e-06,
"logits/chosen": -0.5008007287979126,
"logits/rejected": -0.06781232357025146,
"logps/chosen": -212.249267578125,
"logps/rejected": -466.44964599609375,
"loss": 0.1597,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 1.9009497165679932,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.340704917907715,
"rewards/student_margin": 4.241654872894287,
"rewards/teacher_margin": 0.0,
"step": 1010
},
{
"epoch": 0.59,
"grad_norm": 7.78125,
"learning_rate": 2.120458660651572e-06,
"logits/chosen": -0.5323157906532288,
"logits/rejected": -0.03192181885242462,
"logps/chosen": -211.4275360107422,
"logps/rejected": -644.5025024414062,
"loss": 0.1576,
"rewards/accuracies": 1.0,
"rewards/chosen": 2.192241668701172,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -3.6768863201141357,
"rewards/student_margin": 5.869128227233887,
"rewards/teacher_margin": 0.0,
"step": 1020
},
{
"epoch": 0.6,
"grad_norm": 27.375,
"learning_rate": 2.070327386527412e-06,
"logits/chosen": -0.3082345128059387,
"logits/rejected": -0.09356869012117386,
"logps/chosen": -276.2046203613281,
"logps/rejected": -543.6260375976562,
"loss": 0.2272,
"rewards/accuracies": 0.8666666746139526,
"rewards/chosen": 1.5081729888916016,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.5921823978424072,
"rewards/student_margin": 4.100355625152588,
"rewards/teacher_margin": 0.0,
"step": 1030
},
{
"epoch": 0.61,
"grad_norm": 13.25,
"learning_rate": 2.020373532779147e-06,
"logits/chosen": -0.44038814306259155,
"logits/rejected": 0.11050190776586533,
"logps/chosen": -222.35556030273438,
"logps/rejected": -402.10443115234375,
"loss": 0.2101,
"rewards/accuracies": 0.966666579246521,
"rewards/chosen": 2.202882766723633,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.7409263849258423,
"rewards/student_margin": 3.9438090324401855,
"rewards/teacher_margin": 0.0,
"step": 1040
},
{
"epoch": 0.61,
"grad_norm": 11.1875,
"learning_rate": 1.9706177263474567e-06,
"logits/chosen": -0.4345908761024475,
"logits/rejected": 0.14452026784420013,
"logps/chosen": -266.3000793457031,
"logps/rejected": -518.9310913085938,
"loss": 0.1659,
"rewards/accuracies": 0.966666579246521,
"rewards/chosen": 2.2457222938537598,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.8994420766830444,
"rewards/student_margin": 4.145164489746094,
"rewards/teacher_margin": 0.0,
"step": 1050
},
{
"epoch": 0.62,
"grad_norm": 26.0,
"learning_rate": 1.9210805123953397e-06,
"logits/chosen": -0.47684893012046814,
"logits/rejected": 0.010342525318264961,
"logps/chosen": -221.49465942382812,
"logps/rejected": -640.6463623046875,
"loss": 0.1841,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.7234092950820923,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.9107723236083984,
"rewards/student_margin": 4.634181499481201,
"rewards/teacher_margin": 0.0,
"step": 1060
},
{
"epoch": 0.62,
"grad_norm": 19.25,
"learning_rate": 1.8717823458246082e-06,
"logits/chosen": -0.4791850447654724,
"logits/rejected": 0.2243192493915558,
"logps/chosen": -210.71337890625,
"logps/rejected": -536.1077880859375,
"loss": 0.1652,
"rewards/accuracies": 0.9333332777023315,
"rewards/chosen": 1.8451074361801147,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.979658842086792,
"rewards/student_margin": 4.824766159057617,
"rewards/teacher_margin": 0.0,
"step": 1070
},
{
"epoch": 0.63,
"grad_norm": 22.625,
"learning_rate": 1.8227435828296525e-06,
"logits/chosen": -0.7112604379653931,
"logits/rejected": 0.23266398906707764,
"logps/chosen": -200.66549682617188,
"logps/rejected": -564.5877685546875,
"loss": 0.1927,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 2.2678866386413574,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -3.094907283782959,
"rewards/student_margin": 5.362793922424316,
"rewards/teacher_margin": 0.0,
"step": 1080
},
{
"epoch": 0.63,
"grad_norm": 17.625,
"learning_rate": 1.7739844724919636e-06,
"logits/chosen": -0.18364885449409485,
"logits/rejected": -0.07437516748905182,
"logps/chosen": -236.36068725585938,
"logps/rejected": -423.6537170410156,
"loss": 0.1789,
"rewards/accuracies": 0.8666666150093079,
"rewards/chosen": 2.1759696006774902,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.076977014541626,
"rewards/student_margin": 4.252946376800537,
"rewards/teacher_margin": 0.0,
"step": 1090
},
{
"epoch": 0.64,
"grad_norm": 18.375,
"learning_rate": 1.72552514841888e-06,
"logits/chosen": -0.5487465858459473,
"logits/rejected": -0.06100478023290634,
"logps/chosen": -208.61264038085938,
"logps/rejected": -503.5950622558594,
"loss": 0.1366,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 1.9314453601837158,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.7049808502197266,
"rewards/student_margin": 4.636425971984863,
"rewards/teacher_margin": 0.0,
"step": 1100
},
{
"epoch": 0.65,
"grad_norm": 14.125,
"learning_rate": 1.6773856204300259e-06,
"logits/chosen": -0.48537248373031616,
"logits/rejected": 0.20526167750358582,
"logps/chosen": -204.2689208984375,
"logps/rejected": -438.7137145996094,
"loss": 0.1781,
"rewards/accuracies": 1.0,
"rewards/chosen": 1.7250961065292358,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.3652234077453613,
"rewards/student_margin": 4.090319633483887,
"rewards/teacher_margin": 0.0,
"step": 1110
},
{
"epoch": 0.65,
"grad_norm": 20.75,
"learning_rate": 1.6295857662948535e-06,
"logits/chosen": -0.41923952102661133,
"logits/rejected": 0.08341416716575623,
"logps/chosen": -190.0411376953125,
"logps/rejected": -548.9283447265625,
"loss": 0.19,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.4958313703536987,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.9623820781707764,
"rewards/student_margin": 4.458213806152344,
"rewards/teacher_margin": 0.0,
"step": 1120
},
{
"epoch": 0.66,
"grad_norm": 20.75,
"learning_rate": 1.5821453235247217e-06,
"logits/chosen": -0.053016532212495804,
"logits/rejected": 0.08401618152856827,
"logps/chosen": -312.3536682128906,
"logps/rejected": -552.4027099609375,
"loss": 0.198,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 1.9978468418121338,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.899606227874756,
"rewards/student_margin": 4.897453784942627,
"rewards/teacher_margin": 0.0,
"step": 1130
},
{
"epoch": 0.66,
"grad_norm": 11.375,
"learning_rate": 1.5350838812228796e-06,
"logits/chosen": -0.5477980375289917,
"logits/rejected": 0.18308475613594055,
"logps/chosen": -194.05624389648438,
"logps/rejected": -481.322265625,
"loss": 0.1794,
"rewards/accuracies": 0.8666666150093079,
"rewards/chosen": 2.268648624420166,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.6922143697738647,
"rewards/student_margin": 3.9608635902404785,
"rewards/teacher_margin": 0.0,
"step": 1140
},
{
"epoch": 0.67,
"grad_norm": 11.5625,
"learning_rate": 1.4884208719957396e-06,
"logits/chosen": -0.4524906277656555,
"logits/rejected": 0.048188693821430206,
"logps/chosen": -240.8212890625,
"logps/rejected": -457.342041015625,
"loss": 0.1424,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 2.082411527633667,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.1976187229156494,
"rewards/student_margin": 4.280030727386475,
"rewards/teacher_margin": 0.0,
"step": 1150
},
{
"epoch": 0.68,
"grad_norm": 23.125,
"learning_rate": 1.4421755639287633e-06,
"logits/chosen": -0.735596239566803,
"logits/rejected": 0.0022054375149309635,
"logps/chosen": -182.4052734375,
"logps/rejected": -511.3103942871094,
"loss": 0.1992,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 2.068901538848877,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.3740670680999756,
"rewards/student_margin": 4.442968845367432,
"rewards/teacher_margin": 0.0,
"step": 1160
},
{
"epoch": 0.68,
"grad_norm": 7.28125,
"learning_rate": 1.39636705263029e-06,
"logits/chosen": -0.09597094357013702,
"logits/rejected": -0.051341939717531204,
"logps/chosen": -257.4917297363281,
"logps/rejected": -411.2127990722656,
"loss": 0.1143,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 2.1275782585144043,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.4862334728240967,
"rewards/student_margin": 3.613811492919922,
"rewards/teacher_margin": 0.0,
"step": 1170
},
{
"epoch": 0.69,
"grad_norm": 4.875,
"learning_rate": 1.3510142533465764e-06,
"logits/chosen": -0.35563239455223083,
"logits/rejected": 0.13452188670635223,
"logps/chosen": -267.82806396484375,
"logps/rejected": -522.6319580078125,
"loss": 0.1118,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 2.2255101203918457,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.5478758811950684,
"rewards/student_margin": 4.773386478424072,
"rewards/teacher_margin": 0.0,
"step": 1180
},
{
"epoch": 0.69,
"grad_norm": 28.5,
"learning_rate": 1.306135893151318e-06,
"logits/chosen": -0.37067538499832153,
"logits/rejected": 0.1407340168952942,
"logps/chosen": -292.832763671875,
"logps/rejected": -464.6031799316406,
"loss": 0.2175,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 2.117187976837158,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.6492514610290527,
"rewards/student_margin": 4.766438961029053,
"rewards/teacher_margin": 0.0,
"step": 1190
},
{
"epoch": 0.7,
"grad_norm": 11.9375,
"learning_rate": 1.2617505032128618e-06,
"logits/chosen": -0.2240295708179474,
"logits/rejected": 0.03423571586608887,
"logps/chosen": -241.01223754882812,
"logps/rejected": -511.5538635253906,
"loss": 0.1503,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 1.904809594154358,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.1720213890075684,
"rewards/student_margin": 4.076830863952637,
"rewards/teacher_margin": 0.0,
"step": 1200
},
{
"epoch": 0.7,
"grad_norm": 22.5,
"learning_rate": 1.2178764111423258e-06,
"logits/chosen": -0.453156054019928,
"logits/rejected": -0.015243321657180786,
"logps/chosen": -261.2116394042969,
"logps/rejected": -541.9698486328125,
"loss": 0.1609,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 2.2009761333465576,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.577160120010376,
"rewards/student_margin": 4.778136253356934,
"rewards/teacher_margin": 0.0,
"step": 1210
},
{
"epoch": 0.71,
"grad_norm": 11.625,
"learning_rate": 1.1745317334257577e-06,
"logits/chosen": -0.3408849537372589,
"logits/rejected": 0.10163626819849014,
"logps/chosen": -180.9221649169922,
"logps/rejected": -456.72412109375,
"loss": 0.1747,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.4770768880844116,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.7443153858184814,
"rewards/student_margin": 4.221392631530762,
"rewards/teacher_margin": 0.0,
"step": 1220
},
{
"epoch": 0.72,
"grad_norm": 18.0,
"learning_rate": 1.1317343679434797e-06,
"logits/chosen": -0.39155808091163635,
"logits/rejected": 0.03318040445446968,
"logps/chosen": -243.98617553710938,
"logps/rejected": -480.6688537597656,
"loss": 0.1238,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 1.888183355331421,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.6680731773376465,
"rewards/student_margin": 4.556256294250488,
"rewards/teacher_margin": 0.0,
"step": 1230
},
{
"epoch": 0.72,
"grad_norm": 10.1875,
"learning_rate": 1.0895019865796974e-06,
"logits/chosen": -0.35370463132858276,
"logits/rejected": -0.06857095658779144,
"logps/chosen": -214.5222930908203,
"logps/rejected": -411.405517578125,
"loss": 0.2088,
"rewards/accuracies": 0.8666666150093079,
"rewards/chosen": 1.605942726135254,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.65793776512146,
"rewards/student_margin": 3.263880968093872,
"rewards/teacher_margin": 0.0,
"step": 1240
},
{
"epoch": 0.73,
"grad_norm": 7.96875,
"learning_rate": 1.0478520279254232e-06,
"logits/chosen": -0.4441911578178406,
"logits/rejected": 0.0003550305846147239,
"logps/chosen": -173.74652099609375,
"logps/rejected": -404.5919494628906,
"loss": 0.1417,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.5375311374664307,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.0653061866760254,
"rewards/student_margin": 3.602837324142456,
"rewards/teacher_margin": 0.0,
"step": 1250
},
{
"epoch": 0.73,
"grad_norm": 11.0,
"learning_rate": 1.0068016900777411e-06,
"logits/chosen": -0.14587707817554474,
"logits/rejected": 0.1403418779373169,
"logps/chosen": -265.76385498046875,
"logps/rejected": -488.88360595703125,
"loss": 0.1327,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 1.8325278759002686,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.28810453414917,
"rewards/student_margin": 4.120633125305176,
"rewards/teacher_margin": 0.0,
"step": 1260
},
{
"epoch": 0.74,
"grad_norm": 15.875,
"learning_rate": 9.663679235383663e-07,
"logits/chosen": -0.4032668173313141,
"logits/rejected": 0.15200474858283997,
"logps/chosen": -253.056396484375,
"logps/rejected": -567.1189575195312,
"loss": 0.1393,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 1.7767219543457031,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.897172451019287,
"rewards/student_margin": 4.67389440536499,
"rewards/teacher_margin": 0.0,
"step": 1270
},
{
"epoch": 0.75,
"grad_norm": 13.25,
"learning_rate": 9.265674242144462e-07,
"logits/chosen": -0.5168894529342651,
"logits/rejected": 0.1268787384033203,
"logps/chosen": -241.34536743164062,
"logps/rejected": -535.0253295898438,
"loss": 0.167,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 2.2057933807373047,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.5597376823425293,
"rewards/student_margin": 4.765531539916992,
"rewards/teacher_margin": 0.0,
"step": 1280
},
{
"epoch": 0.75,
"grad_norm": 11.375,
"learning_rate": 8.874166265244891e-07,
"logits/chosen": -0.2563822865486145,
"logits/rejected": -0.007131014950573444,
"logps/chosen": -241.32510375976562,
"logps/rejected": -438.27783203125,
"loss": 0.1496,
"rewards/accuracies": 0.966666579246521,
"rewards/chosen": 2.1465556621551514,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.53509783744812,
"rewards/student_margin": 4.681653022766113,
"rewards/teacher_margin": 0.0,
"step": 1290
},
{
"epoch": 0.76,
"grad_norm": 5.71875,
"learning_rate": 8.48931696612261e-07,
"logits/chosen": -0.03185834363102913,
"logits/rejected": 0.0335637666285038,
"logps/chosen": -292.47320556640625,
"logps/rejected": -396.5122375488281,
"loss": 0.1623,
"rewards/accuracies": 1.0,
"rewards/chosen": 2.144620656967163,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.0947091579437256,
"rewards/student_margin": 4.239329814910889,
"rewards/teacher_margin": 0.0,
"step": 1300
},
{
"epoch": 0.76,
"grad_norm": 11.1875,
"learning_rate": 8.111285256714702e-07,
"logits/chosen": -0.5409881472587585,
"logits/rejected": -0.020885199308395386,
"logps/chosen": -259.0535583496094,
"logps/rejected": -619.1828002929688,
"loss": 0.1752,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 2.047285795211792,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -3.6237800121307373,
"rewards/student_margin": 5.6710662841796875,
"rewards/teacher_margin": 0.0,
"step": 1310
},
{
"epoch": 0.77,
"grad_norm": 11.3125,
"learning_rate": 7.740227233839726e-07,
"logits/chosen": -0.23395772278308868,
"logits/rejected": 0.0602710135281086,
"logps/chosen": -236.9947967529297,
"logps/rejected": -399.87115478515625,
"loss": 0.1768,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 1.6983884572982788,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.7023701667785645,
"rewards/student_margin": 4.400758743286133,
"rewards/teacher_margin": 0.0,
"step": 1320
},
{
"epoch": 0.77,
"grad_norm": 8.5,
"learning_rate": 7.376296114742231e-07,
"logits/chosen": -0.5401469469070435,
"logits/rejected": 0.14081577956676483,
"logps/chosen": -179.28860473632812,
"logps/rejected": -485.43218994140625,
"loss": 0.1542,
"rewards/accuracies": 1.0,
"rewards/chosen": 2.387885808944702,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.904003620147705,
"rewards/student_margin": 5.291889190673828,
"rewards/teacher_margin": 0.0,
"step": 1330
},
{
"epoch": 0.78,
"grad_norm": 8.3125,
"learning_rate": 7.019642173826335e-07,
"logits/chosen": -0.42929285764694214,
"logits/rejected": 0.14102640748023987,
"logps/chosen": -236.9450225830078,
"logps/rejected": -510.2264099121094,
"loss": 0.1831,
"rewards/accuracies": 0.966666579246521,
"rewards/chosen": 2.448880672454834,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.9888415336608887,
"rewards/student_margin": 5.437722206115723,
"rewards/teacher_margin": 0.0,
"step": 1340
},
{
"epoch": 0.79,
"grad_norm": 12.125,
"learning_rate": 6.670412680604379e-07,
"logits/chosen": -0.25430089235305786,
"logits/rejected": 0.12993714213371277,
"logps/chosen": -257.48284912109375,
"logps/rejected": -497.19110107421875,
"loss": 0.1467,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 2.0826573371887207,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.528546094894409,
"rewards/student_margin": 4.611203193664551,
"rewards/teacher_margin": 0.0,
"step": 1350
},
{
"epoch": 0.79,
"grad_norm": 22.875,
"learning_rate": 6.328751838886496e-07,
"logits/chosen": -0.2653670907020569,
"logits/rejected": 0.04784447327256203,
"logps/chosen": -231.7043914794922,
"logps/rejected": -405.35430908203125,
"loss": 0.1555,
"rewards/accuracies": 1.0,
"rewards/chosen": 2.0900282859802246,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.961517572402954,
"rewards/student_margin": 4.0515456199646,
"rewards/teacher_margin": 0.0,
"step": 1360
},
{
"epoch": 0.8,
"grad_norm": 12.0625,
"learning_rate": 5.994800727235894e-07,
"logits/chosen": -0.6459251642227173,
"logits/rejected": -0.028898591175675392,
"logps/chosen": -167.8853759765625,
"logps/rejected": -490.18621826171875,
"loss": 0.1421,
"rewards/accuracies": 1.0,
"rewards/chosen": 1.746824026107788,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.833235502243042,
"rewards/student_margin": 4.580060005187988,
"rewards/teacher_margin": 0.0,
"step": 1370
},
{
"epoch": 0.8,
"grad_norm": 11.4375,
"learning_rate": 5.668697240714782e-07,
"logits/chosen": -0.736299991607666,
"logits/rejected": -0.027988219633698463,
"logps/chosen": -166.20730590820312,
"logps/rejected": -544.7757568359375,
"loss": 0.1785,
"rewards/accuracies": 1.0,
"rewards/chosen": 2.0296387672424316,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.3135805130004883,
"rewards/student_margin": 4.34321928024292,
"rewards/teacher_margin": 0.0,
"step": 1380
},
{
"epoch": 0.81,
"grad_norm": 26.0,
"learning_rate": 5.350576033944705e-07,
"logits/chosen": -0.4334324896335602,
"logits/rejected": 0.09274639934301376,
"logps/chosen": -197.7583465576172,
"logps/rejected": -443.3536071777344,
"loss": 0.1698,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 2.787088632583618,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.1186861991882324,
"rewards/student_margin": 4.9057745933532715,
"rewards/teacher_margin": 0.0,
"step": 1390
},
{
"epoch": 0.81,
"grad_norm": 12.5,
"learning_rate": 5.040568465504991e-07,
"logits/chosen": -0.23930701613426208,
"logits/rejected": 0.13585281372070312,
"logps/chosen": -247.615478515625,
"logps/rejected": -547.8809814453125,
"loss": 0.2305,
"rewards/accuracies": 0.966666579246521,
"rewards/chosen": 2.4914164543151855,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.0706675052642822,
"rewards/student_margin": 4.562083721160889,
"rewards/teacher_margin": 0.0,
"step": 1400
},
{
"epoch": 0.82,
"grad_norm": 6.71875,
"learning_rate": 4.738802543692214e-07,
"logits/chosen": -0.33991652727127075,
"logits/rejected": 0.23731207847595215,
"logps/chosen": -219.88528442382812,
"logps/rejected": -498.65618896484375,
"loss": 0.1918,
"rewards/accuracies": 0.966666579246521,
"rewards/chosen": 2.3377609252929688,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.0855703353881836,
"rewards/student_margin": 4.423331260681152,
"rewards/teacher_margin": 0.0,
"step": 1410
},
{
"epoch": 0.83,
"grad_norm": 17.625,
"learning_rate": 4.445402873662932e-07,
"logits/chosen": -0.33060532808303833,
"logits/rejected": 0.07881384342908859,
"logps/chosen": -235.1278839111328,
"logps/rejected": -671.2401123046875,
"loss": 0.1594,
"rewards/accuracies": 0.8666666746139526,
"rewards/chosen": 1.9622802734375,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.477834701538086,
"rewards/student_margin": 4.440115451812744,
"rewards/teacher_margin": 0.0,
"step": 1420
},
{
"epoch": 0.83,
"grad_norm": 18.125,
"learning_rate": 4.1604906059818234e-07,
"logits/chosen": -0.5972268581390381,
"logits/rejected": -0.00915351789444685,
"logps/chosen": -217.2305450439453,
"logps/rejected": -514.9454956054688,
"loss": 0.1546,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 1.6480636596679688,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.5352678298950195,
"rewards/student_margin": 4.183331489562988,
"rewards/teacher_margin": 0.0,
"step": 1430
},
{
"epoch": 0.84,
"grad_norm": 9.0625,
"learning_rate": 3.8841833865961435e-07,
"logits/chosen": -0.19409717619419098,
"logits/rejected": 0.05707187205553055,
"logps/chosen": -290.650390625,
"logps/rejected": -460.62548828125,
"loss": 0.1939,
"rewards/accuracies": 0.9333332777023315,
"rewards/chosen": 1.422797441482544,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.440934896469116,
"rewards/student_margin": 3.8637325763702393,
"rewards/teacher_margin": 0.0,
"step": 1440
},
{
"epoch": 0.84,
"grad_norm": 11.5,
"learning_rate": 3.6165953082574453e-07,
"logits/chosen": -0.34082725644111633,
"logits/rejected": 0.08756278455257416,
"logps/chosen": -258.0528259277344,
"logps/rejected": -627.5899047851562,
"loss": 0.1415,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 1.9923893213272095,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.4780945777893066,
"rewards/student_margin": 4.470483303070068,
"rewards/teacher_margin": 0.0,
"step": 1450
},
{
"epoch": 0.85,
"grad_norm": 14.25,
"learning_rate": 3.3578368634103133e-07,
"logits/chosen": -0.5575006008148193,
"logits/rejected": 0.1742563545703888,
"logps/chosen": -224.60537719726562,
"logps/rejected": -587.3863525390625,
"loss": 0.1389,
"rewards/accuracies": 0.8666666746139526,
"rewards/chosen": 1.9285333156585693,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -3.4504916667938232,
"rewards/student_margin": 5.379024982452393,
"rewards/teacher_margin": 0.0,
"step": 1460
},
{
"epoch": 0.86,
"grad_norm": 15.1875,
"learning_rate": 3.108014898567921e-07,
"logits/chosen": -0.32235223054885864,
"logits/rejected": 0.23691675066947937,
"logps/chosen": -289.0404052734375,
"logps/rejected": -518.9627685546875,
"loss": 0.1884,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 1.5728108882904053,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.671419143676758,
"rewards/student_margin": 4.244229793548584,
"rewards/teacher_margin": 0.0,
"step": 1470
},
{
"epoch": 0.86,
"grad_norm": 12.0,
"learning_rate": 2.867232570192932e-07,
"logits/chosen": -0.4675559103488922,
"logits/rejected": 0.2386462688446045,
"logps/chosen": -238.5657501220703,
"logps/rejected": -556.7369384765625,
"loss": 0.172,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 2.0571231842041016,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.1405348777770996,
"rewards/student_margin": 4.197658061981201,
"rewards/teacher_margin": 0.0,
"step": 1480
},
{
"epoch": 0.87,
"grad_norm": 9.6875,
"learning_rate": 2.635589302102143e-07,
"logits/chosen": -0.5631746053695679,
"logits/rejected": 0.1842821091413498,
"logps/chosen": -202.35910034179688,
"logps/rejected": -496.7588806152344,
"loss": 0.1308,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 1.9014427661895752,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.115988254547119,
"rewards/student_margin": 4.017430782318115,
"rewards/teacher_margin": 0.0,
"step": 1490
},
{
"epoch": 0.87,
"grad_norm": 12.875,
"learning_rate": 2.413180744412449e-07,
"logits/chosen": -0.45950451493263245,
"logits/rejected": -0.005202120635658503,
"logps/chosen": -199.29319763183594,
"logps/rejected": -393.0441589355469,
"loss": 0.1418,
"rewards/accuracies": 1.0,
"rewards/chosen": 2.295112371444702,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.899126648902893,
"rewards/student_margin": 4.194239139556885,
"rewards/teacher_margin": 0.0,
"step": 1500
},
{
"epoch": 0.88,
"grad_norm": 7.25,
"learning_rate": 2.200098734044953e-07,
"logits/chosen": -0.3245978355407715,
"logits/rejected": 0.15231186151504517,
"logps/chosen": -282.319091796875,
"logps/rejected": -564.8612060546875,
"loss": 0.1391,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 1.9999994039535522,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.712496757507324,
"rewards/student_margin": 4.712496757507324,
"rewards/teacher_margin": 0.0,
"step": 1510
},
{
"epoch": 0.88,
"grad_norm": 20.5,
"learning_rate": 1.9964312568037102e-07,
"logits/chosen": -0.43997129797935486,
"logits/rejected": 0.11200802028179169,
"logps/chosen": -215.91390991210938,
"logps/rejected": -454.8565979003906,
"loss": 0.2001,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.7922544479370117,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.544246196746826,
"rewards/student_margin": 4.336500644683838,
"rewards/teacher_margin": 0.0,
"step": 1520
},
{
"epoch": 0.89,
"grad_norm": 7.9375,
"learning_rate": 1.8022624110446223e-07,
"logits/chosen": -0.23303954303264618,
"logits/rejected": -0.25525903701782227,
"logps/chosen": -233.7492218017578,
"logps/rejected": -434.97943115234375,
"loss": 0.1737,
"rewards/accuracies": 0.9000000953674316,
"rewards/chosen": 2.0419070720672607,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.659355878829956,
"rewards/student_margin": 3.701263427734375,
"rewards/teacher_margin": 0.0,
"step": 1530
},
{
"epoch": 0.9,
"grad_norm": 11.3125,
"learning_rate": 1.617672372949558e-07,
"logits/chosen": -0.4891526699066162,
"logits/rejected": 0.0062074740417301655,
"logps/chosen": -184.78028869628906,
"logps/rejected": -495.7847595214844,
"loss": 0.1925,
"rewards/accuracies": 0.8666666150093079,
"rewards/chosen": 1.5070412158966064,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.09743070602417,
"rewards/student_margin": 3.6044719219207764,
"rewards/teacher_margin": 0.0,
"step": 1540
},
{
"epoch": 0.9,
"grad_norm": 8.8125,
"learning_rate": 1.442737363420016e-07,
"logits/chosen": -0.47575002908706665,
"logits/rejected": 0.09344211965799332,
"logps/chosen": -215.34390258789062,
"logps/rejected": -447.3370056152344,
"loss": 0.1637,
"rewards/accuracies": 0.9333332777023315,
"rewards/chosen": 1.5486562252044678,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.476374864578247,
"rewards/student_margin": 4.025030612945557,
"rewards/teacher_margin": 0.0,
"step": 1550
},
{
"epoch": 0.91,
"grad_norm": 5.09375,
"learning_rate": 1.2775296166039759e-07,
"logits/chosen": -0.34430769085884094,
"logits/rejected": 0.11993454396724701,
"logps/chosen": -198.77247619628906,
"logps/rejected": -414.20648193359375,
"loss": 0.1491,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 2.136995553970337,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.7208058834075928,
"rewards/student_margin": 3.8578014373779297,
"rewards/teacher_margin": 0.0,
"step": 1560
},
{
"epoch": 0.91,
"grad_norm": 9.125,
"learning_rate": 1.1221173500690074e-07,
"logits/chosen": -0.43167194724082947,
"logits/rejected": 0.05056245997548103,
"logps/chosen": -202.82408142089844,
"logps/rejected": -457.1270446777344,
"loss": 0.1328,
"rewards/accuracies": 0.966666579246521,
"rewards/chosen": 1.7842384576797485,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -1.8982183933258057,
"rewards/student_margin": 3.6824564933776855,
"rewards/teacher_margin": 0.0,
"step": 1570
},
{
"epoch": 0.92,
"grad_norm": 13.4375,
"learning_rate": 9.765647366338615e-08,
"logits/chosen": -0.617282509803772,
"logits/rejected": 0.20323173701763153,
"logps/chosen": -215.5322723388672,
"logps/rejected": -516.0162353515625,
"loss": 0.1082,
"rewards/accuracies": 1.0,
"rewards/chosen": 2.109098196029663,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.6933035850524902,
"rewards/student_margin": 4.802401542663574,
"rewards/teacher_margin": 0.0,
"step": 1580
},
{
"epoch": 0.93,
"grad_norm": 13.1875,
"learning_rate": 8.409318778702436e-08,
"logits/chosen": -0.24509771168231964,
"logits/rejected": 0.05450732633471489,
"logps/chosen": -244.3855438232422,
"logps/rejected": -399.8455505371094,
"loss": 0.1269,
"rewards/accuracies": 0.966666579246521,
"rewards/chosen": 2.09488582611084,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.1163201332092285,
"rewards/student_margin": 4.21120548248291,
"rewards/teacher_margin": 0.0,
"step": 1590
},
{
"epoch": 0.93,
"grad_norm": 12.625,
"learning_rate": 7.152747792856795e-08,
"logits/chosen": -0.5267202854156494,
"logits/rejected": 0.10546509921550751,
"logps/chosen": -246.46163940429688,
"logps/rejected": -488.86358642578125,
"loss": 0.1929,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 2.5057289600372314,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.4191794395446777,
"rewards/student_margin": 4.924908638000488,
"rewards/teacher_margin": 0.0,
"step": 1600
},
{
"epoch": 0.94,
"grad_norm": 8.8125,
"learning_rate": 5.99645327197712e-08,
"logits/chosen": -0.5035473108291626,
"logits/rejected": 0.06472502648830414,
"logps/chosen": -175.3248748779297,
"logps/rejected": -475.020263671875,
"loss": 0.1096,
"rewards/accuracies": 0.9333332777023315,
"rewards/chosen": 2.2377569675445557,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.6387767791748047,
"rewards/student_margin": 4.8765339851379395,
"rewards/teacher_margin": 0.0,
"step": 1610
},
{
"epoch": 0.94,
"grad_norm": 14.625,
"learning_rate": 4.94091267309027e-08,
"logits/chosen": -0.5968758463859558,
"logits/rejected": 0.08792127668857574,
"logps/chosen": -183.85728454589844,
"logps/rejected": -632.9024658203125,
"loss": 0.1953,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 2.25777530670166,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.826479434967041,
"rewards/student_margin": 5.084254741668701,
"rewards/teacher_margin": 0.0,
"step": 1620
},
{
"epoch": 0.95,
"grad_norm": 14.125,
"learning_rate": 3.9865618499229094e-08,
"logits/chosen": -0.41523703932762146,
"logits/rejected": -0.0010458737378939986,
"logps/chosen": -246.58242797851562,
"logps/rejected": -524.6209716796875,
"loss": 0.1439,
"rewards/accuracies": 0.9333332777023315,
"rewards/chosen": 1.9719102382659912,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.955763339996338,
"rewards/student_margin": 4.927673816680908,
"rewards/teacher_margin": 0.0,
"step": 1630
},
{
"epoch": 0.95,
"grad_norm": 7.59375,
"learning_rate": 3.1337948729288633e-08,
"logits/chosen": -0.45982232689857483,
"logits/rejected": 0.08918474614620209,
"logps/chosen": -246.1114501953125,
"logps/rejected": -527.5103149414062,
"loss": 0.1411,
"rewards/accuracies": 1.0,
"rewards/chosen": 2.207768440246582,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.371577501296997,
"rewards/student_margin": 4.579345703125,
"rewards/teacher_margin": 0.0,
"step": 1640
},
{
"epoch": 0.96,
"grad_norm": 12.5625,
"learning_rate": 2.3829638665695544e-08,
"logits/chosen": -0.5832090377807617,
"logits/rejected": 0.012857144698500633,
"logps/chosen": -177.62669372558594,
"logps/rejected": -534.47314453125,
"loss": 0.1282,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 1.428046703338623,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.5331838130950928,
"rewards/student_margin": 3.961230516433716,
"rewards/teacher_margin": 0.0,
"step": 1650
},
{
"epoch": 0.97,
"grad_norm": 9.6875,
"learning_rate": 1.734378863914782e-08,
"logits/chosen": -0.4249907433986664,
"logits/rejected": 0.0664374977350235,
"logps/chosen": -245.0904083251953,
"logps/rejected": -566.2081909179688,
"loss": 0.1449,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 2.0259430408477783,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.927861452102661,
"rewards/student_margin": 4.953804969787598,
"rewards/teacher_margin": 0.0,
"step": 1660
},
{
"epoch": 0.97,
"grad_norm": 15.75,
"learning_rate": 1.1883076786240177e-08,
"logits/chosen": -0.5717610716819763,
"logits/rejected": 0.050547920167446136,
"logps/chosen": -175.99942016601562,
"logps/rejected": -478.18524169921875,
"loss": 0.1616,
"rewards/accuracies": 0.9333333969116211,
"rewards/chosen": 1.8787494897842407,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.2613539695739746,
"rewards/student_margin": 4.140103340148926,
"rewards/teacher_margin": 0.0,
"step": 1670
},
{
"epoch": 0.98,
"grad_norm": 25.25,
"learning_rate": 7.449757943606972e-09,
"logits/chosen": -0.5395152568817139,
"logits/rejected": 0.1687006652355194,
"logps/chosen": -226.3690643310547,
"logps/rejected": -453.1436462402344,
"loss": 0.1588,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.7440685033798218,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.6785857677459717,
"rewards/student_margin": 4.422654151916504,
"rewards/teacher_margin": 0.0,
"step": 1680
},
{
"epoch": 0.98,
"grad_norm": 16.0,
"learning_rate": 4.045662716855303e-09,
"logits/chosen": -0.2058514803647995,
"logits/rejected": 0.07757274806499481,
"logps/chosen": -237.3645477294922,
"logps/rejected": -580.2776489257812,
"loss": 0.1531,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.9641157388687134,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.1610915660858154,
"rewards/student_margin": 4.12520694732666,
"rewards/teacher_margin": 0.0,
"step": 1690
},
{
"epoch": 0.99,
"grad_norm": 12.625,
"learning_rate": 1.6721967246721704e-09,
"logits/chosen": -0.36075156927108765,
"logits/rejected": 0.044158972799777985,
"logps/chosen": -210.068359375,
"logps/rejected": -480.38726806640625,
"loss": 0.1368,
"rewards/accuracies": 0.9333332777023315,
"rewards/chosen": 1.983689308166504,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.8466620445251465,
"rewards/student_margin": 4.830351829528809,
"rewards/teacher_margin": 0.0,
"step": 1700
},
{
"epoch": 1.0,
"grad_norm": 10.6875,
"learning_rate": 3.30340018415698e-10,
"logits/chosen": -0.4800366461277008,
"logits/rejected": 0.04889531061053276,
"logps/chosen": -193.82672119140625,
"logps/rejected": -558.3578491210938,
"loss": 0.1185,
"rewards/accuracies": 0.9666666984558105,
"rewards/chosen": 2.046447515487671,
"rewards/diff": 0.0,
"rewards/diff_abs": 0.0,
"rewards/rejected": -2.4438791275024414,
"rewards/student_margin": 4.490326881408691,
"rewards/teacher_margin": 0.0,
"step": 1710
},
{
"epoch": 1.0,
"step": 1718,
"total_flos": 0.0,
"train_loss": 0.24084537111144683,
"train_runtime": 1647.7299,
"train_samples_per_second": 25.016,
"train_steps_per_second": 1.043
}
],
"logging_steps": 10,
"max_steps": 1718,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 400,
"total_flos": 0.0,
"train_batch_size": 3,
"trial_name": null,
"trial_params": null
}