yuyijiong's picture
Upload trainer_state.json with huggingface_hub
4bc3655 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.997207114508305,
"eval_steps": 1,
"global_step": 850,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.023519035719535498,
"grad_norm": 4.1415791511535645,
"learning_rate": 3.060971262053483e-06,
"logits/chosen": -4.5,
"logits/rejected": -4.5,
"logps/chosen": -624.0,
"logps/rejected": -452.0,
"loss": 0.6933,
"rewards/accuracies": 0.08749999850988388,
"rewards/chosen": -0.007080078125,
"rewards/margins": -0.0089111328125,
"rewards/rejected": 0.00183868408203125,
"step": 10
},
{
"epoch": 0.047038071439070996,
"grad_norm": 3.7295737266540527,
"learning_rate": 3.9824154277970135e-06,
"logits/chosen": -4.53125,
"logits/rejected": -4.46875,
"logps/chosen": -552.0,
"logps/rejected": -422.0,
"loss": 0.6766,
"rewards/accuracies": 0.296875,
"rewards/chosen": 0.0263671875,
"rewards/margins": 0.044677734375,
"rewards/rejected": -0.018310546875,
"step": 20
},
{
"epoch": 0.0705571071586065,
"grad_norm": 3.65460205078125,
"learning_rate": 4.521425711265269e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.40625,
"logps/chosen": -592.0,
"logps/rejected": -436.0,
"loss": 0.6273,
"rewards/accuracies": 0.5718749761581421,
"rewards/chosen": 0.09423828125,
"rewards/margins": 0.158203125,
"rewards/rejected": -0.0634765625,
"step": 30
},
{
"epoch": 0.09407614287814199,
"grad_norm": 3.148322343826294,
"learning_rate": 4.903859593540544e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.40625,
"logps/chosen": -596.0,
"logps/rejected": -456.0,
"loss": 0.5544,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.185546875,
"rewards/margins": 0.35546875,
"rewards/rejected": -0.169921875,
"step": 40
},
{
"epoch": 0.1175951785976775,
"grad_norm": 4.775664806365967,
"learning_rate": 4.9628252788104095e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.4375,
"logps/chosen": -576.0,
"logps/rejected": -444.0,
"loss": 0.4791,
"rewards/accuracies": 0.793749988079071,
"rewards/chosen": 0.2578125,
"rewards/margins": 0.55859375,
"rewards/rejected": -0.30078125,
"step": 50
},
{
"epoch": 0.141114214317213,
"grad_norm": 2.693018913269043,
"learning_rate": 4.9008674101610905e-06,
"logits/chosen": -4.5,
"logits/rejected": -4.5,
"logps/chosen": -584.0,
"logps/rejected": -444.0,
"loss": 0.4168,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 0.39453125,
"rewards/margins": 0.87890625,
"rewards/rejected": -0.484375,
"step": 60
},
{
"epoch": 0.1646332500367485,
"grad_norm": 2.0642476081848145,
"learning_rate": 4.838909541511772e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.40625,
"logps/chosen": -596.0,
"logps/rejected": -454.0,
"loss": 0.3891,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 0.388671875,
"rewards/margins": 1.0078125,
"rewards/rejected": -0.625,
"step": 70
},
{
"epoch": 0.18815228575628398,
"grad_norm": 2.0007693767547607,
"learning_rate": 4.776951672862453e-06,
"logits/chosen": -4.53125,
"logits/rejected": -4.5,
"logps/chosen": -600.0,
"logps/rejected": -492.0,
"loss": 0.3605,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.49609375,
"rewards/margins": 1.3515625,
"rewards/rejected": -0.8515625,
"step": 80
},
{
"epoch": 0.2116713214758195,
"grad_norm": 2.3289802074432373,
"learning_rate": 4.714993804213135e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.4375,
"logps/chosen": -584.0,
"logps/rejected": -468.0,
"loss": 0.3619,
"rewards/accuracies": 0.84375,
"rewards/chosen": 0.396484375,
"rewards/margins": 1.34375,
"rewards/rejected": -0.9453125,
"step": 90
},
{
"epoch": 0.235190357195355,
"grad_norm": 2.2528398036956787,
"learning_rate": 4.653035935563817e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.46875,
"logps/chosen": -620.0,
"logps/rejected": -468.0,
"loss": 0.3334,
"rewards/accuracies": 0.831250011920929,
"rewards/chosen": 0.59765625,
"rewards/margins": 1.609375,
"rewards/rejected": -1.0078125,
"step": 100
},
{
"epoch": 0.2587093929148905,
"grad_norm": 1.8650789260864258,
"learning_rate": 4.591078066914498e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.375,
"logps/chosen": -596.0,
"logps/rejected": -416.0,
"loss": 0.2843,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 0.55859375,
"rewards/margins": 1.8125,
"rewards/rejected": -1.25,
"step": 110
},
{
"epoch": 0.282228428634426,
"grad_norm": 2.2233972549438477,
"learning_rate": 4.52912019826518e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.4375,
"logps/chosen": -596.0,
"logps/rejected": -476.0,
"loss": 0.2863,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": 0.63671875,
"rewards/margins": 1.8984375,
"rewards/rejected": -1.2578125,
"step": 120
},
{
"epoch": 0.3057474643539615,
"grad_norm": 2.017451286315918,
"learning_rate": 4.467162329615862e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.5,
"logps/chosen": -612.0,
"logps/rejected": -448.0,
"loss": 0.2741,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 0.64453125,
"rewards/margins": 1.9453125,
"rewards/rejected": -1.3046875,
"step": 130
},
{
"epoch": 0.329266500073497,
"grad_norm": 2.127619743347168,
"learning_rate": 4.405204460966543e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.40625,
"logps/chosen": -616.0,
"logps/rejected": -454.0,
"loss": 0.2705,
"rewards/accuracies": 0.8843749761581421,
"rewards/chosen": 0.8828125,
"rewards/margins": 2.265625,
"rewards/rejected": -1.375,
"step": 140
},
{
"epoch": 0.3527855357930325,
"grad_norm": 1.8466318845748901,
"learning_rate": 4.343246592317225e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.40625,
"logps/chosen": -624.0,
"logps/rejected": -492.0,
"loss": 0.2757,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 0.84375,
"rewards/margins": 2.28125,
"rewards/rejected": -1.4375,
"step": 150
},
{
"epoch": 0.37630457151256796,
"grad_norm": 1.8905202150344849,
"learning_rate": 4.2812887236679065e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.375,
"logps/chosen": -556.0,
"logps/rejected": -450.0,
"loss": 0.2506,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 0.94140625,
"rewards/margins": 2.328125,
"rewards/rejected": -1.390625,
"step": 160
},
{
"epoch": 0.3998236072321035,
"grad_norm": 1.59074866771698,
"learning_rate": 4.219330855018588e-06,
"logits/chosen": -4.53125,
"logits/rejected": -4.4375,
"logps/chosen": -576.0,
"logps/rejected": -452.0,
"loss": 0.2633,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 0.9453125,
"rewards/margins": 2.328125,
"rewards/rejected": -1.390625,
"step": 170
},
{
"epoch": 0.423342642951639,
"grad_norm": 1.7788141965866089,
"learning_rate": 4.157372986369269e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.4375,
"logps/chosen": -600.0,
"logps/rejected": -500.0,
"loss": 0.2619,
"rewards/accuracies": 0.859375,
"rewards/chosen": 0.9765625,
"rewards/margins": 2.375,
"rewards/rejected": -1.3984375,
"step": 180
},
{
"epoch": 0.4468616786711745,
"grad_norm": 2.1362152099609375,
"learning_rate": 4.095415117719951e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.46875,
"logps/chosen": -592.0,
"logps/rejected": -472.0,
"loss": 0.2743,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 1.09375,
"rewards/margins": 2.578125,
"rewards/rejected": -1.4921875,
"step": 190
},
{
"epoch": 0.47038071439071,
"grad_norm": 1.8583593368530273,
"learning_rate": 4.033457249070632e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.40625,
"logps/chosen": -588.0,
"logps/rejected": -460.0,
"loss": 0.2359,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.2265625,
"rewards/margins": 2.640625,
"rewards/rejected": -1.4140625,
"step": 200
},
{
"epoch": 0.4938997501102455,
"grad_norm": 2.6427791118621826,
"learning_rate": 3.971499380421314e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.5,
"logps/chosen": -568.0,
"logps/rejected": -456.0,
"loss": 0.2418,
"rewards/accuracies": 0.8968750238418579,
"rewards/chosen": 1.2578125,
"rewards/margins": 2.4375,
"rewards/rejected": -1.1796875,
"step": 210
},
{
"epoch": 0.517418785829781,
"grad_norm": 1.6700748205184937,
"learning_rate": 3.909541511771995e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.4375,
"logps/chosen": -580.0,
"logps/rejected": -442.0,
"loss": 0.2332,
"rewards/accuracies": 0.8968750238418579,
"rewards/chosen": 1.140625,
"rewards/margins": 2.8125,
"rewards/rejected": -1.671875,
"step": 220
},
{
"epoch": 0.5409378215493165,
"grad_norm": 2.4413912296295166,
"learning_rate": 3.847583643122677e-06,
"logits/chosen": -4.5,
"logits/rejected": -4.5,
"logps/chosen": -612.0,
"logps/rejected": -484.0,
"loss": 0.2338,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.3125,
"rewards/margins": 2.765625,
"rewards/rejected": -1.453125,
"step": 230
},
{
"epoch": 0.564456857268852,
"grad_norm": 1.718529462814331,
"learning_rate": 3.7856257744733583e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.46875,
"logps/chosen": -592.0,
"logps/rejected": -432.0,
"loss": 0.2044,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.5546875,
"rewards/margins": 3.09375,
"rewards/rejected": -1.5390625,
"step": 240
},
{
"epoch": 0.5879758929883875,
"grad_norm": 2.0037829875946045,
"learning_rate": 3.7236679058240398e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.40625,
"logps/chosen": -584.0,
"logps/rejected": -458.0,
"loss": 0.221,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.375,
"rewards/margins": 3.03125,
"rewards/rejected": -1.6484375,
"step": 250
},
{
"epoch": 0.611494928707923,
"grad_norm": 1.673599362373352,
"learning_rate": 3.6617100371747216e-06,
"logits/chosen": -4.53125,
"logits/rejected": -4.5,
"logps/chosen": -564.0,
"logps/rejected": -468.0,
"loss": 0.2305,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.453125,
"rewards/margins": 2.96875,
"rewards/rejected": -1.515625,
"step": 260
},
{
"epoch": 0.6350139644274585,
"grad_norm": 1.8723180294036865,
"learning_rate": 3.5997521685254026e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.46875,
"logps/chosen": -564.0,
"logps/rejected": -430.0,
"loss": 0.2233,
"rewards/accuracies": 0.921875,
"rewards/chosen": 1.4296875,
"rewards/margins": 3.0625,
"rewards/rejected": -1.6328125,
"step": 270
},
{
"epoch": 0.658533000146994,
"grad_norm": 1.8895870447158813,
"learning_rate": 3.5377942998760845e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.4375,
"logps/chosen": -584.0,
"logps/rejected": -468.0,
"loss": 0.2191,
"rewards/accuracies": 0.909375011920929,
"rewards/chosen": 1.5234375,
"rewards/margins": 3.21875,
"rewards/rejected": -1.6953125,
"step": 280
},
{
"epoch": 0.6820520358665295,
"grad_norm": 1.5127581357955933,
"learning_rate": 3.4758364312267663e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.40625,
"logps/chosen": -596.0,
"logps/rejected": -450.0,
"loss": 0.213,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.4609375,
"rewards/margins": 2.9375,
"rewards/rejected": -1.46875,
"step": 290
},
{
"epoch": 0.705571071586065,
"grad_norm": 2.202705144882202,
"learning_rate": 3.4138785625774478e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.40625,
"logps/chosen": -576.0,
"logps/rejected": -468.0,
"loss": 0.2195,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.578125,
"rewards/margins": 3.0,
"rewards/rejected": -1.421875,
"step": 300
},
{
"epoch": 0.7290901073056004,
"grad_norm": 1.8141247034072876,
"learning_rate": 3.351920693928129e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.4375,
"logps/chosen": -560.0,
"logps/rejected": -436.0,
"loss": 0.2075,
"rewards/accuracies": 0.9156249761581421,
"rewards/chosen": 1.578125,
"rewards/margins": 3.1875,
"rewards/rejected": -1.609375,
"step": 310
},
{
"epoch": 0.7526091430251359,
"grad_norm": 1.7854108810424805,
"learning_rate": 3.2899628252788106e-06,
"logits/chosen": -4.34375,
"logits/rejected": -4.375,
"logps/chosen": -616.0,
"logps/rejected": -462.0,
"loss": 0.2124,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.5859375,
"rewards/margins": 3.203125,
"rewards/rejected": -1.6171875,
"step": 320
},
{
"epoch": 0.7761281787446714,
"grad_norm": 1.5797982215881348,
"learning_rate": 3.2280049566294925e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.46875,
"logps/chosen": -620.0,
"logps/rejected": -496.0,
"loss": 0.2276,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 1.7734375,
"rewards/margins": 3.515625,
"rewards/rejected": -1.7421875,
"step": 330
},
{
"epoch": 0.799647214464207,
"grad_norm": 1.9662494659423828,
"learning_rate": 3.1660470879801735e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.4375,
"logps/chosen": -568.0,
"logps/rejected": -456.0,
"loss": 0.2161,
"rewards/accuracies": 0.903124988079071,
"rewards/chosen": 1.65625,
"rewards/margins": 3.0625,
"rewards/rejected": -1.3984375,
"step": 340
},
{
"epoch": 0.8231662501837425,
"grad_norm": 1.7154417037963867,
"learning_rate": 3.1040892193308553e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.4375,
"logps/chosen": -572.0,
"logps/rejected": -462.0,
"loss": 0.1925,
"rewards/accuracies": 0.909375011920929,
"rewards/chosen": 1.6640625,
"rewards/margins": 3.15625,
"rewards/rejected": -1.4921875,
"step": 350
},
{
"epoch": 0.846685285903278,
"grad_norm": 2.1363542079925537,
"learning_rate": 3.042131350681537e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.375,
"logps/chosen": -520.0,
"logps/rejected": -410.0,
"loss": 0.225,
"rewards/accuracies": 0.9156249761581421,
"rewards/chosen": 1.4609375,
"rewards/margins": 3.171875,
"rewards/rejected": -1.703125,
"step": 360
},
{
"epoch": 0.8702043216228135,
"grad_norm": 1.9305567741394043,
"learning_rate": 2.980173482032218e-06,
"logits/chosen": -4.5,
"logits/rejected": -4.5,
"logps/chosen": -572.0,
"logps/rejected": -452.0,
"loss": 0.1921,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 1.671875,
"rewards/margins": 3.453125,
"rewards/rejected": -1.7890625,
"step": 370
},
{
"epoch": 0.893723357342349,
"grad_norm": 1.7296861410140991,
"learning_rate": 2.9182156133829e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.5,
"logps/chosen": -592.0,
"logps/rejected": -450.0,
"loss": 0.2142,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 1.59375,
"rewards/margins": 3.296875,
"rewards/rejected": -1.703125,
"step": 380
},
{
"epoch": 0.9172423930618845,
"grad_norm": 1.9843934774398804,
"learning_rate": 2.8562577447335815e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.46875,
"logps/chosen": -576.0,
"logps/rejected": -426.0,
"loss": 0.213,
"rewards/accuracies": 0.9281250238418579,
"rewards/chosen": 1.625,
"rewards/margins": 3.390625,
"rewards/rejected": -1.7578125,
"step": 390
},
{
"epoch": 0.94076142878142,
"grad_norm": 2.4459681510925293,
"learning_rate": 2.7942998760842625e-06,
"logits/chosen": -4.375,
"logits/rejected": -4.34375,
"logps/chosen": -576.0,
"logps/rejected": -496.0,
"loss": 0.1966,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 1.75,
"rewards/margins": 3.515625,
"rewards/rejected": -1.765625,
"step": 400
},
{
"epoch": 0.9642804645009555,
"grad_norm": 2.3723385334014893,
"learning_rate": 2.7323420074349443e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.40625,
"logps/chosen": -556.0,
"logps/rejected": -452.0,
"loss": 0.2096,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 1.5703125,
"rewards/margins": 3.234375,
"rewards/rejected": -1.671875,
"step": 410
},
{
"epoch": 0.987799500220491,
"grad_norm": 2.0075254440307617,
"learning_rate": 2.670384138785626e-06,
"logits/chosen": -4.375,
"logits/rejected": -4.40625,
"logps/chosen": -588.0,
"logps/rejected": -492.0,
"loss": 0.219,
"rewards/accuracies": 0.890625,
"rewards/chosen": 1.7265625,
"rewards/margins": 3.4375,
"rewards/rejected": -1.7109375,
"step": 420
},
{
"epoch": 1.0094076142878141,
"grad_norm": 0.9782338738441467,
"learning_rate": 2.6146220570012394e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.40625,
"logps/chosen": -576.0,
"logps/rejected": -480.0,
"loss": 0.1502,
"rewards/accuracies": 0.9319728016853333,
"rewards/chosen": 1.78125,
"rewards/margins": 3.625,
"rewards/rejected": -1.84375,
"step": 430
},
{
"epoch": 1.0329266500073497,
"grad_norm": 0.8791906833648682,
"learning_rate": 2.552664188351921e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.5,
"logps/chosen": -568.0,
"logps/rejected": -436.0,
"loss": 0.1162,
"rewards/accuracies": 0.971875011920929,
"rewards/chosen": 1.890625,
"rewards/margins": 4.125,
"rewards/rejected": -2.25,
"step": 440
},
{
"epoch": 1.0564456857268851,
"grad_norm": 0.9551910161972046,
"learning_rate": 2.4907063197026023e-06,
"logits/chosen": -4.5,
"logits/rejected": -4.5,
"logps/chosen": -568.0,
"logps/rejected": -434.0,
"loss": 0.1221,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": 1.8359375,
"rewards/margins": 3.84375,
"rewards/rejected": -2.015625,
"step": 450
},
{
"epoch": 1.0799647214464208,
"grad_norm": 1.0174229145050049,
"learning_rate": 2.4287484510532837e-06,
"logits/chosen": -4.5,
"logits/rejected": -4.53125,
"logps/chosen": -576.0,
"logps/rejected": -446.0,
"loss": 0.122,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": 1.953125,
"rewards/margins": 4.0,
"rewards/rejected": -2.046875,
"step": 460
},
{
"epoch": 1.1034837571659561,
"grad_norm": 1.0029239654541016,
"learning_rate": 2.3667905824039656e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.46875,
"logps/chosen": -592.0,
"logps/rejected": -458.0,
"loss": 0.1126,
"rewards/accuracies": 0.971875011920929,
"rewards/chosen": 1.921875,
"rewards/margins": 4.21875,
"rewards/rejected": -2.296875,
"step": 470
},
{
"epoch": 1.1270027928854918,
"grad_norm": 1.4849553108215332,
"learning_rate": 2.304832713754647e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.375,
"logps/chosen": -600.0,
"logps/rejected": -504.0,
"loss": 0.1161,
"rewards/accuracies": 0.9781249761581421,
"rewards/chosen": 2.09375,
"rewards/margins": 4.40625,
"rewards/rejected": -2.3125,
"step": 480
},
{
"epoch": 1.1505218286050272,
"grad_norm": 0.9113921523094177,
"learning_rate": 2.242874845105329e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.40625,
"logps/chosen": -584.0,
"logps/rejected": -474.0,
"loss": 0.1067,
"rewards/accuracies": 0.965624988079071,
"rewards/chosen": 1.921875,
"rewards/margins": 4.125,
"rewards/rejected": -2.203125,
"step": 490
},
{
"epoch": 1.1740408643245628,
"grad_norm": 1.4038139581680298,
"learning_rate": 2.1809169764560103e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.40625,
"logps/chosen": -596.0,
"logps/rejected": -450.0,
"loss": 0.1208,
"rewards/accuracies": 0.96875,
"rewards/chosen": 1.9375,
"rewards/margins": 4.21875,
"rewards/rejected": -2.265625,
"step": 500
},
{
"epoch": 1.1975599000440982,
"grad_norm": 0.9754643440246582,
"learning_rate": 2.1189591078066917e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.375,
"logps/chosen": -584.0,
"logps/rejected": -456.0,
"loss": 0.1145,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": 1.8515625,
"rewards/margins": 4.0625,
"rewards/rejected": -2.203125,
"step": 510
},
{
"epoch": 1.2210789357636336,
"grad_norm": 0.7279557585716248,
"learning_rate": 2.057001239157373e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.375,
"logps/chosen": -588.0,
"logps/rejected": -460.0,
"loss": 0.111,
"rewards/accuracies": 0.984375,
"rewards/chosen": 1.765625,
"rewards/margins": 4.21875,
"rewards/rejected": -2.46875,
"step": 520
},
{
"epoch": 1.2445979714831692,
"grad_norm": 1.1293213367462158,
"learning_rate": 1.9950433705080545e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.46875,
"logps/chosen": -572.0,
"logps/rejected": -428.0,
"loss": 0.1192,
"rewards/accuracies": 0.981249988079071,
"rewards/chosen": 1.9921875,
"rewards/margins": 4.28125,
"rewards/rejected": -2.296875,
"step": 530
},
{
"epoch": 1.2681170072027048,
"grad_norm": 0.7407346963882446,
"learning_rate": 1.9330855018587364e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.40625,
"logps/chosen": -596.0,
"logps/rejected": -456.0,
"loss": 0.1107,
"rewards/accuracies": 0.965624988079071,
"rewards/chosen": 2.015625,
"rewards/margins": 4.25,
"rewards/rejected": -2.25,
"step": 540
},
{
"epoch": 1.2916360429222402,
"grad_norm": 1.1689890623092651,
"learning_rate": 1.8711276332094178e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.4375,
"logps/chosen": -600.0,
"logps/rejected": -472.0,
"loss": 0.116,
"rewards/accuracies": 0.9468749761581421,
"rewards/chosen": 1.828125,
"rewards/margins": 4.21875,
"rewards/rejected": -2.375,
"step": 550
},
{
"epoch": 1.3151550786417756,
"grad_norm": 1.0410897731781006,
"learning_rate": 1.8091697645600993e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.46875,
"logps/chosen": -584.0,
"logps/rejected": -436.0,
"loss": 0.1166,
"rewards/accuracies": 0.965624988079071,
"rewards/chosen": 1.9375,
"rewards/margins": 4.34375,
"rewards/rejected": -2.40625,
"step": 560
},
{
"epoch": 1.3386741143613112,
"grad_norm": 0.8815547227859497,
"learning_rate": 1.7472118959107809e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.40625,
"logps/chosen": -580.0,
"logps/rejected": -482.0,
"loss": 0.1003,
"rewards/accuracies": 0.953125,
"rewards/chosen": 1.96875,
"rewards/margins": 4.25,
"rewards/rejected": -2.28125,
"step": 570
},
{
"epoch": 1.3621931500808466,
"grad_norm": 1.0066012144088745,
"learning_rate": 1.6852540272614623e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.40625,
"logps/chosen": -576.0,
"logps/rejected": -516.0,
"loss": 0.1079,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": 1.8671875,
"rewards/margins": 4.03125,
"rewards/rejected": -2.15625,
"step": 580
},
{
"epoch": 1.3857121858003822,
"grad_norm": 1.2409735918045044,
"learning_rate": 1.623296158612144e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.46875,
"logps/chosen": -600.0,
"logps/rejected": -464.0,
"loss": 0.1281,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": 1.890625,
"rewards/margins": 4.71875,
"rewards/rejected": -2.828125,
"step": 590
},
{
"epoch": 1.4092312215199176,
"grad_norm": 1.059398889541626,
"learning_rate": 1.5613382899628254e-06,
"logits/chosen": -4.5,
"logits/rejected": -4.5,
"logps/chosen": -568.0,
"logps/rejected": -452.0,
"loss": 0.1181,
"rewards/accuracies": 0.953125,
"rewards/chosen": 1.921875,
"rewards/margins": 4.34375,
"rewards/rejected": -2.421875,
"step": 600
},
{
"epoch": 1.4327502572394533,
"grad_norm": 0.8506196141242981,
"learning_rate": 1.4993804213135068e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.4375,
"logps/chosen": -576.0,
"logps/rejected": -450.0,
"loss": 0.1021,
"rewards/accuracies": 0.96875,
"rewards/chosen": 1.921875,
"rewards/margins": 4.40625,
"rewards/rejected": -2.484375,
"step": 610
},
{
"epoch": 1.4562692929589887,
"grad_norm": 1.0724049806594849,
"learning_rate": 1.4374225526641887e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.5,
"logps/chosen": -576.0,
"logps/rejected": -462.0,
"loss": 0.1089,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": 1.796875,
"rewards/margins": 4.21875,
"rewards/rejected": -2.40625,
"step": 620
},
{
"epoch": 1.479788328678524,
"grad_norm": 0.8412142992019653,
"learning_rate": 1.37546468401487e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.4375,
"logps/chosen": -576.0,
"logps/rejected": -440.0,
"loss": 0.1178,
"rewards/accuracies": 0.953125,
"rewards/chosen": 1.9140625,
"rewards/margins": 4.09375,
"rewards/rejected": -2.1875,
"step": 630
},
{
"epoch": 1.5033073643980597,
"grad_norm": 1.080023169517517,
"learning_rate": 1.3135068153655513e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.4375,
"logps/chosen": -600.0,
"logps/rejected": -452.0,
"loss": 0.1097,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": 2.0,
"rewards/margins": 4.5625,
"rewards/rejected": -2.546875,
"step": 640
},
{
"epoch": 1.5268264001175953,
"grad_norm": 1.2255281209945679,
"learning_rate": 1.2515489467162332e-06,
"logits/chosen": -4.375,
"logits/rejected": -4.40625,
"logps/chosen": -572.0,
"logps/rejected": -456.0,
"loss": 0.1078,
"rewards/accuracies": 0.971875011920929,
"rewards/chosen": 1.96875,
"rewards/margins": 4.40625,
"rewards/rejected": -2.421875,
"step": 650
},
{
"epoch": 1.5503454358371307,
"grad_norm": 0.9179627299308777,
"learning_rate": 1.1895910780669146e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.4375,
"logps/chosen": -568.0,
"logps/rejected": -444.0,
"loss": 0.1105,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": 1.890625,
"rewards/margins": 4.28125,
"rewards/rejected": -2.40625,
"step": 660
},
{
"epoch": 1.573864471556666,
"grad_norm": 1.0199528932571411,
"learning_rate": 1.127633209417596e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.46875,
"logps/chosen": -548.0,
"logps/rejected": -450.0,
"loss": 0.1132,
"rewards/accuracies": 0.965624988079071,
"rewards/chosen": 1.8984375,
"rewards/margins": 4.21875,
"rewards/rejected": -2.3125,
"step": 670
},
{
"epoch": 1.5973835072762017,
"grad_norm": 1.190191626548767,
"learning_rate": 1.0656753407682777e-06,
"logits/chosen": -4.5,
"logits/rejected": -4.53125,
"logps/chosen": -596.0,
"logps/rejected": -460.0,
"loss": 0.1178,
"rewards/accuracies": 0.965624988079071,
"rewards/chosen": 1.9375,
"rewards/margins": 4.3125,
"rewards/rejected": -2.359375,
"step": 680
},
{
"epoch": 1.6209025429957373,
"grad_norm": 1.029616355895996,
"learning_rate": 1.0037174721189593e-06,
"logits/chosen": -4.5625,
"logits/rejected": -4.59375,
"logps/chosen": -552.0,
"logps/rejected": -450.0,
"loss": 0.1087,
"rewards/accuracies": 0.9593750238418579,
"rewards/chosen": 1.8515625,
"rewards/margins": 4.375,
"rewards/rejected": -2.515625,
"step": 690
},
{
"epoch": 1.6444215787152727,
"grad_norm": 1.2294584512710571,
"learning_rate": 9.417596034696406e-07,
"logits/chosen": -4.40625,
"logits/rejected": -4.40625,
"logps/chosen": -576.0,
"logps/rejected": -444.0,
"loss": 0.1049,
"rewards/accuracies": 0.984375,
"rewards/chosen": 1.8671875,
"rewards/margins": 4.21875,
"rewards/rejected": -2.34375,
"step": 700
},
{
"epoch": 1.6679406144348081,
"grad_norm": 1.0543959140777588,
"learning_rate": 8.798017348203223e-07,
"logits/chosen": -4.4375,
"logits/rejected": -4.5,
"logps/chosen": -580.0,
"logps/rejected": -458.0,
"loss": 0.1063,
"rewards/accuracies": 0.965624988079071,
"rewards/chosen": 1.9609375,
"rewards/margins": 4.40625,
"rewards/rejected": -2.46875,
"step": 710
},
{
"epoch": 1.6914596501543437,
"grad_norm": 1.0166019201278687,
"learning_rate": 8.178438661710038e-07,
"logits/chosen": -4.4375,
"logits/rejected": -4.5,
"logps/chosen": -560.0,
"logps/rejected": -448.0,
"loss": 0.1014,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": 1.8828125,
"rewards/margins": 4.4375,
"rewards/rejected": -2.5625,
"step": 720
},
{
"epoch": 1.7149786858738791,
"grad_norm": 1.0257729291915894,
"learning_rate": 7.558859975216853e-07,
"logits/chosen": -4.40625,
"logits/rejected": -4.375,
"logps/chosen": -604.0,
"logps/rejected": -524.0,
"loss": 0.1245,
"rewards/accuracies": 0.9593750238418579,
"rewards/chosen": 1.921875,
"rewards/margins": 4.46875,
"rewards/rejected": -2.546875,
"step": 730
},
{
"epoch": 1.7384977215934145,
"grad_norm": 1.058556318283081,
"learning_rate": 6.939281288723669e-07,
"logits/chosen": -4.40625,
"logits/rejected": -4.40625,
"logps/chosen": -588.0,
"logps/rejected": -468.0,
"loss": 0.1131,
"rewards/accuracies": 0.953125,
"rewards/chosen": 1.90625,
"rewards/margins": 4.09375,
"rewards/rejected": -2.1875,
"step": 740
},
{
"epoch": 1.7620167573129502,
"grad_norm": 1.1007747650146484,
"learning_rate": 6.319702602230483e-07,
"logits/chosen": -4.40625,
"logits/rejected": -4.375,
"logps/chosen": -620.0,
"logps/rejected": -510.0,
"loss": 0.0978,
"rewards/accuracies": 0.971875011920929,
"rewards/chosen": 1.9453125,
"rewards/margins": 4.71875,
"rewards/rejected": -2.765625,
"step": 750
},
{
"epoch": 1.7855357930324858,
"grad_norm": 1.0114929676055908,
"learning_rate": 5.700123915737299e-07,
"logits/chosen": -4.46875,
"logits/rejected": -4.4375,
"logps/chosen": -584.0,
"logps/rejected": -472.0,
"loss": 0.1013,
"rewards/accuracies": 0.981249988079071,
"rewards/chosen": 2.015625,
"rewards/margins": 4.53125,
"rewards/rejected": -2.53125,
"step": 760
},
{
"epoch": 1.8090548287520212,
"grad_norm": 0.9178210496902466,
"learning_rate": 5.080545229244115e-07,
"logits/chosen": -4.40625,
"logits/rejected": -4.4375,
"logps/chosen": -600.0,
"logps/rejected": -470.0,
"loss": 0.1082,
"rewards/accuracies": 0.965624988079071,
"rewards/chosen": 1.9375,
"rewards/margins": 4.5,
"rewards/rejected": -2.5625,
"step": 770
},
{
"epoch": 1.8325738644715566,
"grad_norm": 0.7771146893501282,
"learning_rate": 4.4609665427509294e-07,
"logits/chosen": -4.375,
"logits/rejected": -4.4375,
"logps/chosen": -600.0,
"logps/rejected": -456.0,
"loss": 0.1043,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": 1.765625,
"rewards/margins": 4.5625,
"rewards/rejected": -2.796875,
"step": 780
},
{
"epoch": 1.8560929001910922,
"grad_norm": 0.9903047680854797,
"learning_rate": 3.8413878562577453e-07,
"logits/chosen": -4.40625,
"logits/rejected": -4.4375,
"logps/chosen": -560.0,
"logps/rejected": -462.0,
"loss": 0.1029,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": 1.875,
"rewards/margins": 4.4375,
"rewards/rejected": -2.5625,
"step": 790
},
{
"epoch": 1.8796119359106278,
"grad_norm": 1.1646795272827148,
"learning_rate": 3.22180916976456e-07,
"logits/chosen": -4.5,
"logits/rejected": -4.46875,
"logps/chosen": -572.0,
"logps/rejected": -444.0,
"loss": 0.1103,
"rewards/accuracies": 0.965624988079071,
"rewards/chosen": 2.078125,
"rewards/margins": 4.53125,
"rewards/rejected": -2.4375,
"step": 800
},
{
"epoch": 1.9031309716301632,
"grad_norm": 0.7632185220718384,
"learning_rate": 2.6022304832713755e-07,
"logits/chosen": -4.4375,
"logits/rejected": -4.40625,
"logps/chosen": -584.0,
"logps/rejected": -438.0,
"loss": 0.0989,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": 1.9140625,
"rewards/margins": 4.53125,
"rewards/rejected": -2.625,
"step": 810
},
{
"epoch": 1.9266500073496986,
"grad_norm": 0.9221560955047607,
"learning_rate": 1.982651796778191e-07,
"logits/chosen": -4.53125,
"logits/rejected": -4.53125,
"logps/chosen": -592.0,
"logps/rejected": -462.0,
"loss": 0.1141,
"rewards/accuracies": 0.9593750238418579,
"rewards/chosen": 1.921875,
"rewards/margins": 4.40625,
"rewards/rejected": -2.484375,
"step": 820
},
{
"epoch": 1.9501690430692342,
"grad_norm": 1.339996576309204,
"learning_rate": 1.3630731102850064e-07,
"logits/chosen": -4.4375,
"logits/rejected": -4.4375,
"logps/chosen": -596.0,
"logps/rejected": -454.0,
"loss": 0.1099,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": 1.8203125,
"rewards/margins": 4.46875,
"rewards/rejected": -2.640625,
"step": 830
},
{
"epoch": 1.9736880787887696,
"grad_norm": 1.047699213027954,
"learning_rate": 7.434944237918216e-08,
"logits/chosen": -4.40625,
"logits/rejected": -4.4375,
"logps/chosen": -592.0,
"logps/rejected": -446.0,
"loss": 0.1054,
"rewards/accuracies": 0.96875,
"rewards/chosen": 1.890625,
"rewards/margins": 4.75,
"rewards/rejected": -2.875,
"step": 840
},
{
"epoch": 1.997207114508305,
"grad_norm": 0.8556466698646545,
"learning_rate": 1.2391573729863694e-08,
"logits/chosen": -4.46875,
"logits/rejected": -4.4375,
"logps/chosen": -600.0,
"logps/rejected": -516.0,
"loss": 0.1072,
"rewards/accuracies": 0.965624988079071,
"rewards/chosen": 1.90625,
"rewards/margins": 4.53125,
"rewards/rejected": -2.625,
"step": 850
}
],
"logging_steps": 10,
"max_steps": 850,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 250,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}