nash_dpo_merge_iter_6 / trainer_state.json
YYYYYYibo's picture
Model save
11ee063 verified
raw
history blame
11.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9990817263544536,
"eval_steps": 100,
"global_step": 204,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 2.3809523809523811e-07,
"logits/chosen": -2.476858377456665,
"logits/rejected": -2.312589406967163,
"logps/chosen": -256.081787109375,
"logps/rejected": -237.47195434570312,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.05,
"learning_rate": 2.380952380952381e-06,
"logits/chosen": -2.2722971439361572,
"logits/rejected": -2.247359275817871,
"logps/chosen": -218.2034454345703,
"logps/rejected": -239.2631072998047,
"loss": 0.6892,
"rewards/accuracies": 0.5277777910232544,
"rewards/chosen": 0.0009012501686811447,
"rewards/margins": 0.007593905553221703,
"rewards/rejected": -0.0066926549188792706,
"step": 10
},
{
"epoch": 0.1,
"learning_rate": 4.761904761904762e-06,
"logits/chosen": -1.9851748943328857,
"logits/rejected": -1.7117283344268799,
"logps/chosen": -327.2151794433594,
"logps/rejected": -315.92840576171875,
"loss": 0.6161,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.29775306582450867,
"rewards/margins": 0.21288084983825684,
"rewards/rejected": -0.5106339454650879,
"step": 20
},
{
"epoch": 0.15,
"learning_rate": 4.970219740227693e-06,
"logits/chosen": -1.6041641235351562,
"logits/rejected": -1.3207042217254639,
"logps/chosen": -298.0397644042969,
"logps/rejected": -315.13543701171875,
"loss": 0.5858,
"rewards/accuracies": 0.703125,
"rewards/chosen": -0.27262887358665466,
"rewards/margins": 0.3364975154399872,
"rewards/rejected": -0.6091263294219971,
"step": 30
},
{
"epoch": 0.2,
"learning_rate": 4.868186180746792e-06,
"logits/chosen": -1.2411524057388306,
"logits/rejected": -0.8121637105941772,
"logps/chosen": -354.04315185546875,
"logps/rejected": -402.13787841796875,
"loss": 0.5475,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -0.5969960689544678,
"rewards/margins": 0.565431535243988,
"rewards/rejected": -1.1624276638031006,
"step": 40
},
{
"epoch": 0.24,
"learning_rate": 4.696530612642871e-06,
"logits/chosen": -1.148498296737671,
"logits/rejected": -0.7622831463813782,
"logps/chosen": -342.1610107421875,
"logps/rejected": -401.2055969238281,
"loss": 0.5384,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -0.5624964833259583,
"rewards/margins": 0.587742030620575,
"rewards/rejected": -1.1502383947372437,
"step": 50
},
{
"epoch": 0.29,
"learning_rate": 4.460299516441777e-06,
"logits/chosen": -1.0859754085540771,
"logits/rejected": -0.6681124567985535,
"logps/chosen": -328.865234375,
"logps/rejected": -390.8894348144531,
"loss": 0.5481,
"rewards/accuracies": 0.734375,
"rewards/chosen": -0.5670114159584045,
"rewards/margins": 0.5836409330368042,
"rewards/rejected": -1.150652289390564,
"step": 60
},
{
"epoch": 0.34,
"learning_rate": 4.1664378205239085e-06,
"logits/chosen": -0.855512797832489,
"logits/rejected": -0.3595692217350006,
"logps/chosen": -353.48675537109375,
"logps/rejected": -402.8636779785156,
"loss": 0.5327,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -0.8263834714889526,
"rewards/margins": 0.5548129081726074,
"rewards/rejected": -1.38119637966156,
"step": 70
},
{
"epoch": 0.39,
"learning_rate": 3.8235847280454626e-06,
"logits/chosen": -1.166483998298645,
"logits/rejected": -0.6420685052871704,
"logps/chosen": -332.43975830078125,
"logps/rejected": -391.0145263671875,
"loss": 0.4927,
"rewards/accuracies": 0.753125011920929,
"rewards/chosen": -0.5521113276481628,
"rewards/margins": 0.6991725564002991,
"rewards/rejected": -1.251283884048462,
"step": 80
},
{
"epoch": 0.44,
"learning_rate": 3.441819734087963e-06,
"logits/chosen": -1.4997479915618896,
"logits/rejected": -0.9499446153640747,
"logps/chosen": -348.1226806640625,
"logps/rejected": -402.1637268066406,
"loss": 0.5287,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -0.5159438848495483,
"rewards/margins": 0.7751549482345581,
"rewards/rejected": -1.2910988330841064,
"step": 90
},
{
"epoch": 0.49,
"learning_rate": 3.0323662998460396e-06,
"logits/chosen": -1.3590983152389526,
"logits/rejected": -0.7285458445549011,
"logps/chosen": -362.32666015625,
"logps/rejected": -408.63885498046875,
"loss": 0.515,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": -0.7177928686141968,
"rewards/margins": 0.6953709125518799,
"rewards/rejected": -1.4131637811660767,
"step": 100
},
{
"epoch": 0.49,
"eval_logits/chosen": -1.2092429399490356,
"eval_logits/rejected": -0.5798258185386658,
"eval_logps/chosen": -365.3294677734375,
"eval_logps/rejected": -413.1092834472656,
"eval_loss": 0.544935405254364,
"eval_rewards/accuracies": 0.7160000205039978,
"eval_rewards/chosen": -0.7798795700073242,
"eval_rewards/margins": 0.681567370891571,
"eval_rewards/rejected": -1.4614468812942505,
"eval_runtime": 384.0193,
"eval_samples_per_second": 5.208,
"eval_steps_per_second": 0.651,
"step": 100
},
{
"epoch": 0.54,
"learning_rate": 2.6072618954988867e-06,
"logits/chosen": -1.1533472537994385,
"logits/rejected": -0.3894789218902588,
"logps/chosen": -369.21466064453125,
"logps/rejected": -427.78204345703125,
"loss": 0.5476,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -0.81017005443573,
"rewards/margins": 0.6706447601318359,
"rewards/rejected": -1.4808146953582764,
"step": 110
},
{
"epoch": 0.59,
"learning_rate": 2.1790041121336223e-06,
"logits/chosen": -1.2282952070236206,
"logits/rejected": -0.6969107985496521,
"logps/chosen": -362.3310852050781,
"logps/rejected": -415.934814453125,
"loss": 0.5101,
"rewards/accuracies": 0.7281249761581421,
"rewards/chosen": -0.7746846079826355,
"rewards/margins": 0.6679913401603699,
"rewards/rejected": -1.4426759481430054,
"step": 120
},
{
"epoch": 0.64,
"learning_rate": 1.760183246631777e-06,
"logits/chosen": -1.172527551651001,
"logits/rejected": -0.597676694393158,
"logps/chosen": -354.2320556640625,
"logps/rejected": -422.12841796875,
"loss": 0.5083,
"rewards/accuracies": 0.7406250238418579,
"rewards/chosen": -0.6290267109870911,
"rewards/margins": 0.8170250654220581,
"rewards/rejected": -1.446051836013794,
"step": 130
},
{
"epoch": 0.69,
"learning_rate": 1.3631121611097364e-06,
"logits/chosen": -1.188578486442566,
"logits/rejected": -0.816562294960022,
"logps/chosen": -359.2029113769531,
"logps/rejected": -407.4598693847656,
"loss": 0.5267,
"rewards/accuracies": 0.715624988079071,
"rewards/chosen": -0.699051558971405,
"rewards/margins": 0.6616160273551941,
"rewards/rejected": -1.3606675863265991,
"step": 140
},
{
"epoch": 0.73,
"learning_rate": 9.994642986290797e-07,
"logits/chosen": -1.323190450668335,
"logits/rejected": -0.7437389492988586,
"logps/chosen": -343.61151123046875,
"logps/rejected": -402.93597412109375,
"loss": 0.5295,
"rewards/accuracies": 0.746874988079071,
"rewards/chosen": -0.6734719276428223,
"rewards/margins": 0.7382540702819824,
"rewards/rejected": -1.4117259979248047,
"step": 150
},
{
"epoch": 0.78,
"learning_rate": 6.799304971075383e-07,
"logits/chosen": -1.250248670578003,
"logits/rejected": -0.6498929262161255,
"logps/chosen": -375.78753662109375,
"logps/rejected": -424.0221252441406,
"loss": 0.5198,
"rewards/accuracies": 0.7281249761581421,
"rewards/chosen": -0.7862330675125122,
"rewards/margins": 0.6770610213279724,
"rewards/rejected": -1.463294267654419,
"step": 160
},
{
"epoch": 0.83,
"learning_rate": 4.1390469071538183e-07,
"logits/chosen": -1.101661205291748,
"logits/rejected": -0.5704807043075562,
"logps/chosen": -360.23687744140625,
"logps/rejected": -435.20220947265625,
"loss": 0.5199,
"rewards/accuracies": 0.765625,
"rewards/chosen": -0.7304506897926331,
"rewards/margins": 0.9151493310928345,
"rewards/rejected": -1.6455999612808228,
"step": 170
},
{
"epoch": 0.88,
"learning_rate": 2.092077387824884e-07,
"logits/chosen": -1.2315480709075928,
"logits/rejected": -0.714310348033905,
"logps/chosen": -371.4505310058594,
"logps/rejected": -436.5375061035156,
"loss": 0.5269,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.7898632287979126,
"rewards/margins": 0.7547814249992371,
"rewards/rejected": -1.5446445941925049,
"step": 180
},
{
"epoch": 0.93,
"learning_rate": 7.185750133542168e-08,
"logits/chosen": -1.1423877477645874,
"logits/rejected": -0.7209599018096924,
"logps/chosen": -351.42633056640625,
"logps/rejected": -417.2574768066406,
"loss": 0.5275,
"rewards/accuracies": 0.703125,
"rewards/chosen": -0.8558732867240906,
"rewards/margins": 0.733085572719574,
"rewards/rejected": -1.588958978652954,
"step": 190
},
{
"epoch": 0.98,
"learning_rate": 5.891920784984184e-09,
"logits/chosen": -1.2168794870376587,
"logits/rejected": -0.6400166749954224,
"logps/chosen": -360.48065185546875,
"logps/rejected": -428.3067932128906,
"loss": 0.4925,
"rewards/accuracies": 0.778124988079071,
"rewards/chosen": -0.7727445960044861,
"rewards/margins": 0.8154066205024719,
"rewards/rejected": -1.5881513357162476,
"step": 200
},
{
"epoch": 0.98,
"eval_logits/chosen": -1.180118203163147,
"eval_logits/rejected": -0.6068284511566162,
"eval_logps/chosen": -367.6444396972656,
"eval_logps/rejected": -415.7151794433594,
"eval_loss": 0.5366359949111938,
"eval_rewards/accuracies": 0.7319999933242798,
"eval_rewards/chosen": -0.8030293583869934,
"eval_rewards/margins": 0.6844774484634399,
"eval_rewards/rejected": -1.4875068664550781,
"eval_runtime": 384.016,
"eval_samples_per_second": 5.208,
"eval_steps_per_second": 0.651,
"step": 200
},
{
"epoch": 1.0,
"step": 204,
"total_flos": 0.0,
"train_loss": 0.54004861034599,
"train_runtime": 9618.5725,
"train_samples_per_second": 2.717,
"train_steps_per_second": 0.021
}
],
"logging_steps": 10,
"max_steps": 204,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}