|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.998691442030882, |
|
"eval_steps": 500, |
|
"global_step": 477, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.010468463752944255, |
|
"grad_norm": 30.248635446936238, |
|
"learning_rate": 5.208333333333333e-08, |
|
"logits/chosen": -0.4978736937046051, |
|
"logits/rejected": -0.5136879682540894, |
|
"logps/chosen": -1.1741688251495361, |
|
"logps/rejected": -1.359588384628296, |
|
"loss": 2.1736, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": -1.1741688251495361, |
|
"rewards/margins": 0.18541957437992096, |
|
"rewards/rejected": -1.359588384628296, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.02093692750588851, |
|
"grad_norm": 16.7263304813632, |
|
"learning_rate": 1.0416666666666667e-07, |
|
"logits/chosen": -0.5214197039604187, |
|
"logits/rejected": -0.49839648604393005, |
|
"logps/chosen": -1.1591269969940186, |
|
"logps/rejected": -1.2628146409988403, |
|
"loss": 2.1404, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": -1.1591269969940186, |
|
"rewards/margins": 0.10368760675191879, |
|
"rewards/rejected": -1.2628146409988403, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.031405391258832765, |
|
"grad_norm": 31.370650570864015, |
|
"learning_rate": 1.5624999999999999e-07, |
|
"logits/chosen": -0.4619904160499573, |
|
"logits/rejected": -0.4506860673427582, |
|
"logps/chosen": -1.107530951499939, |
|
"logps/rejected": -1.362442970275879, |
|
"loss": 2.1079, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -1.107530951499939, |
|
"rewards/margins": 0.25491195917129517, |
|
"rewards/rejected": -1.362442970275879, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04187385501177702, |
|
"grad_norm": 39.77312899360463, |
|
"learning_rate": 2.0833333333333333e-07, |
|
"logits/chosen": -0.44072794914245605, |
|
"logits/rejected": -0.45300206542015076, |
|
"logps/chosen": -1.1610082387924194, |
|
"logps/rejected": -1.263232707977295, |
|
"loss": 2.1672, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": -1.1610082387924194, |
|
"rewards/margins": 0.10222446918487549, |
|
"rewards/rejected": -1.263232707977295, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05234231876472128, |
|
"grad_norm": 13.297272362399367, |
|
"learning_rate": 2.604166666666667e-07, |
|
"logits/chosen": -0.5022198557853699, |
|
"logits/rejected": -0.47837525606155396, |
|
"logps/chosen": -1.1782314777374268, |
|
"logps/rejected": -1.241213083267212, |
|
"loss": 2.1428, |
|
"rewards/accuracies": 0.518750011920929, |
|
"rewards/chosen": -1.1782314777374268, |
|
"rewards/margins": 0.06298139691352844, |
|
"rewards/rejected": -1.241213083267212, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06281078251766553, |
|
"grad_norm": 37.00312129408131, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": -0.4893794059753418, |
|
"logits/rejected": -0.4941066801548004, |
|
"logps/chosen": -1.1618492603302002, |
|
"logps/rejected": -1.2827363014221191, |
|
"loss": 2.1704, |
|
"rewards/accuracies": 0.5062500238418579, |
|
"rewards/chosen": -1.1618492603302002, |
|
"rewards/margins": 0.12088712304830551, |
|
"rewards/rejected": -1.2827363014221191, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07327924627060979, |
|
"grad_norm": 19.474111043847227, |
|
"learning_rate": 3.645833333333333e-07, |
|
"logits/chosen": -0.4861086308956146, |
|
"logits/rejected": -0.44397059082984924, |
|
"logps/chosen": -1.146590232849121, |
|
"logps/rejected": -1.2768195867538452, |
|
"loss": 2.1372, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": -1.146590232849121, |
|
"rewards/margins": 0.13022944331169128, |
|
"rewards/rejected": -1.2768195867538452, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.08374771002355404, |
|
"grad_norm": 27.757551759863073, |
|
"learning_rate": 4.1666666666666667e-07, |
|
"logits/chosen": -0.49622225761413574, |
|
"logits/rejected": -0.502428412437439, |
|
"logps/chosen": -1.0711848735809326, |
|
"logps/rejected": -1.3877671957015991, |
|
"loss": 2.0943, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -1.0711848735809326, |
|
"rewards/margins": 0.31658223271369934, |
|
"rewards/rejected": -1.3877671957015991, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0942161737764983, |
|
"grad_norm": 29.522164186661577, |
|
"learning_rate": 4.6874999999999996e-07, |
|
"logits/chosen": -0.4612571597099304, |
|
"logits/rejected": -0.473367303609848, |
|
"logps/chosen": -1.0810363292694092, |
|
"logps/rejected": -1.2934114933013916, |
|
"loss": 2.1151, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": -1.0810363292694092, |
|
"rewards/margins": 0.21237528324127197, |
|
"rewards/rejected": -1.2934114933013916, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.10468463752944256, |
|
"grad_norm": 29.341931035274772, |
|
"learning_rate": 4.999731868769026e-07, |
|
"logits/chosen": -0.48391613364219666, |
|
"logits/rejected": -0.5020588040351868, |
|
"logps/chosen": -1.1583433151245117, |
|
"logps/rejected": -1.4378485679626465, |
|
"loss": 2.1398, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -1.1583433151245117, |
|
"rewards/margins": 0.2795052230358124, |
|
"rewards/rejected": -1.4378485679626465, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.11515310128238682, |
|
"grad_norm": 123.21015696544787, |
|
"learning_rate": 4.996716052911017e-07, |
|
"logits/chosen": -0.4529895782470703, |
|
"logits/rejected": -0.44476404786109924, |
|
"logps/chosen": -1.1183184385299683, |
|
"logps/rejected": -1.3346388339996338, |
|
"loss": 2.1114, |
|
"rewards/accuracies": 0.5562499761581421, |
|
"rewards/chosen": -1.1183184385299683, |
|
"rewards/margins": 0.21632030606269836, |
|
"rewards/rejected": -1.3346388339996338, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.12562156503533106, |
|
"grad_norm": 21.079325990888805, |
|
"learning_rate": 4.990353313429303e-07, |
|
"logits/chosen": -0.4931353032588959, |
|
"logits/rejected": -0.49371495842933655, |
|
"logps/chosen": -1.1979756355285645, |
|
"logps/rejected": -1.3100030422210693, |
|
"loss": 2.096, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": -1.1979756355285645, |
|
"rewards/margins": 0.11202754080295563, |
|
"rewards/rejected": -1.3100030422210693, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.1360900287882753, |
|
"grad_norm": 18.626119398460727, |
|
"learning_rate": 4.980652179769217e-07, |
|
"logits/chosen": -0.45450514554977417, |
|
"logits/rejected": -0.4266931414604187, |
|
"logps/chosen": -1.0912764072418213, |
|
"logps/rejected": -1.5846312046051025, |
|
"loss": 2.0791, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -1.0912764072418213, |
|
"rewards/margins": 0.4933546483516693, |
|
"rewards/rejected": -1.5846312046051025, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.14655849254121958, |
|
"grad_norm": 52.35457634472626, |
|
"learning_rate": 4.967625656594781e-07, |
|
"logits/chosen": -0.3869231343269348, |
|
"logits/rejected": -0.3839837908744812, |
|
"logps/chosen": -1.165590524673462, |
|
"logps/rejected": -1.4081659317016602, |
|
"loss": 2.0592, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": -1.165590524673462, |
|
"rewards/margins": 0.2425754815340042, |
|
"rewards/rejected": -1.4081659317016602, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.15702695629416383, |
|
"grad_norm": 45.522892507761505, |
|
"learning_rate": 4.951291206355559e-07, |
|
"logits/chosen": -0.38980668783187866, |
|
"logits/rejected": -0.3762575685977936, |
|
"logps/chosen": -1.1448724269866943, |
|
"logps/rejected": -1.4742355346679688, |
|
"loss": 2.0746, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": -1.1448724269866943, |
|
"rewards/margins": 0.3293631970882416, |
|
"rewards/rejected": -1.4742355346679688, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.16749542004710807, |
|
"grad_norm": 31.87949007278887, |
|
"learning_rate": 4.93167072587771e-07, |
|
"logits/chosen": -0.40593957901000977, |
|
"logits/rejected": -0.3936491012573242, |
|
"logps/chosen": -1.1574352979660034, |
|
"logps/rejected": -1.5279510021209717, |
|
"loss": 2.1051, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -1.1574352979660034, |
|
"rewards/margins": 0.3705156445503235, |
|
"rewards/rejected": -1.5279510021209717, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.17796388380005235, |
|
"grad_norm": 21.704707598130327, |
|
"learning_rate": 4.908790517010636e-07, |
|
"logits/chosen": -0.4470198154449463, |
|
"logits/rejected": -0.4167211055755615, |
|
"logps/chosen": -1.1367292404174805, |
|
"logps/rejected": -1.450998306274414, |
|
"loss": 2.1069, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.1367292404174805, |
|
"rewards/margins": 0.3142690062522888, |
|
"rewards/rejected": -1.450998306274414, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.1884323475529966, |
|
"grad_norm": 8.653417747444738, |
|
"learning_rate": 4.882681251368548e-07, |
|
"logits/chosen": -0.4369986951351166, |
|
"logits/rejected": -0.4109964370727539, |
|
"logps/chosen": -1.1568020582199097, |
|
"logps/rejected": -1.4440120458602905, |
|
"loss": 2.0549, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -1.1568020582199097, |
|
"rewards/margins": 0.2872098982334137, |
|
"rewards/rejected": -1.4440120458602905, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.19890081130594087, |
|
"grad_norm": 32.43410976766361, |
|
"learning_rate": 4.853377929214243e-07, |
|
"logits/chosen": -0.4856724739074707, |
|
"logits/rejected": -0.409660667181015, |
|
"logps/chosen": -1.1660270690917969, |
|
"logps/rejected": -1.429152488708496, |
|
"loss": 2.0952, |
|
"rewards/accuracies": 0.5562499761581421, |
|
"rewards/chosen": -1.1660270690917969, |
|
"rewards/margins": 0.26312538981437683, |
|
"rewards/rejected": -1.429152488708496, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.2093692750588851, |
|
"grad_norm": 20.24510226455269, |
|
"learning_rate": 4.820919832540181e-07, |
|
"logits/chosen": -0.43944352865219116, |
|
"logits/rejected": -0.4186181128025055, |
|
"logps/chosen": -1.1845619678497314, |
|
"logps/rejected": -1.6824769973754883, |
|
"loss": 2.0785, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": -1.1845619678497314, |
|
"rewards/margins": 0.4979149401187897, |
|
"rewards/rejected": -1.6824769973754883, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.21983773881182936, |
|
"grad_norm": 21.93099294182866, |
|
"learning_rate": 4.785350472409791e-07, |
|
"logits/chosen": -0.4231189787387848, |
|
"logits/rejected": -0.41250792145729065, |
|
"logps/chosen": -1.103410005569458, |
|
"logps/rejected": -1.5199437141418457, |
|
"loss": 2.0523, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -1.103410005569458, |
|
"rewards/margins": 0.4165337085723877, |
|
"rewards/rejected": -1.5199437141418457, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.23030620256477363, |
|
"grad_norm": 192.6716268565829, |
|
"learning_rate": 4.7467175306295647e-07, |
|
"logits/chosen": -0.4392550587654114, |
|
"logits/rejected": -0.4161394238471985, |
|
"logps/chosen": -1.1035048961639404, |
|
"logps/rejected": -1.5209665298461914, |
|
"loss": 2.0465, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.1035048961639404, |
|
"rewards/margins": 0.41746172308921814, |
|
"rewards/rejected": -1.5209665298461914, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.24077466631771788, |
|
"grad_norm": 40.84875259149394, |
|
"learning_rate": 4.70507279583015e-07, |
|
"logits/chosen": -0.336051881313324, |
|
"logits/rejected": -0.3062174916267395, |
|
"logps/chosen": -1.0912038087844849, |
|
"logps/rejected": -1.6076161861419678, |
|
"loss": 2.0318, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -1.0912038087844849, |
|
"rewards/margins": 0.5164125561714172, |
|
"rewards/rejected": -1.6076161861419678, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.2512431300706621, |
|
"grad_norm": 1121.915313367022, |
|
"learning_rate": 4.6604720940421207e-07, |
|
"logits/chosen": -0.31270137429237366, |
|
"logits/rejected": -0.29621389508247375, |
|
"logps/chosen": -1.258256435394287, |
|
"logps/rejected": -1.5046377182006836, |
|
"loss": 2.1046, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": -1.258256435394287, |
|
"rewards/margins": 0.24638131260871887, |
|
"rewards/rejected": -1.5046377182006836, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.26171159382360637, |
|
"grad_norm": 20.937334753207477, |
|
"learning_rate": 4.612975213859487e-07, |
|
"logits/chosen": -0.32645902037620544, |
|
"logits/rejected": -0.2999876141548157, |
|
"logps/chosen": -1.124294400215149, |
|
"logps/rejected": -1.4352140426635742, |
|
"loss": 2.0543, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -1.124294400215149, |
|
"rewards/margins": 0.3109196722507477, |
|
"rewards/rejected": -1.4352140426635742, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.2721800575765506, |
|
"grad_norm": 123.08016970705063, |
|
"learning_rate": 4.5626458262912735e-07, |
|
"logits/chosen": -0.2738082706928253, |
|
"logits/rejected": -0.26782605051994324, |
|
"logps/chosen": -1.09318208694458, |
|
"logps/rejected": -1.3113747835159302, |
|
"loss": 2.08, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": -1.09318208694458, |
|
"rewards/margins": 0.2181926667690277, |
|
"rewards/rejected": -1.3113747835159302, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.2826485213294949, |
|
"grad_norm": 38.90891382468301, |
|
"learning_rate": 4.5095513994085974e-07, |
|
"logits/chosen": -0.33431217074394226, |
|
"logits/rejected": -0.25280579924583435, |
|
"logps/chosen": -1.1433501243591309, |
|
"logps/rejected": -1.7623112201690674, |
|
"loss": 2.0285, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.1433501243591309, |
|
"rewards/margins": 0.618960976600647, |
|
"rewards/rejected": -1.7623112201690674, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.29311698508243916, |
|
"grad_norm": 90.38260744944395, |
|
"learning_rate": 4.453763107901675e-07, |
|
"logits/chosen": -0.2669193148612976, |
|
"logits/rejected": -0.20890673995018005, |
|
"logps/chosen": -1.1904810667037964, |
|
"logps/rejected": -1.6921430826187134, |
|
"loss": 2.0201, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -1.1904810667037964, |
|
"rewards/margins": 0.5016618967056274, |
|
"rewards/rejected": -1.6921430826187134, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.3035854488353834, |
|
"grad_norm": 23.623204151291763, |
|
"learning_rate": 4.395355737667985e-07, |
|
"logits/chosen": -0.3123199939727783, |
|
"logits/rejected": -0.29313474893569946, |
|
"logps/chosen": -1.1307309865951538, |
|
"logps/rejected": -1.4845192432403564, |
|
"loss": 2.0527, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -1.1307309865951538, |
|
"rewards/margins": 0.3537880778312683, |
|
"rewards/rejected": -1.4845192432403564, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.31405391258832765, |
|
"grad_norm": 20.156387548307503, |
|
"learning_rate": 4.3344075855595097e-07, |
|
"logits/chosen": -0.28625836968421936, |
|
"logits/rejected": -0.2564564049243927, |
|
"logps/chosen": -1.0314805507659912, |
|
"logps/rejected": -1.4487305879592896, |
|
"loss": 2.0217, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -1.0314805507659912, |
|
"rewards/margins": 0.41724997758865356, |
|
"rewards/rejected": -1.4487305879592896, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3245223763412719, |
|
"grad_norm": 51.259884987761694, |
|
"learning_rate": 4.271000354423425e-07, |
|
"logits/chosen": -0.3215218186378479, |
|
"logits/rejected": -0.26840001344680786, |
|
"logps/chosen": -1.142395257949829, |
|
"logps/rejected": -1.4793251752853394, |
|
"loss": 2.013, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -1.142395257949829, |
|
"rewards/margins": 0.33692988753318787, |
|
"rewards/rejected": -1.4793251752853394, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.33499084009421615, |
|
"grad_norm": 36.42093295445508, |
|
"learning_rate": 4.2052190435769554e-07, |
|
"logits/chosen": -0.24544882774353027, |
|
"logits/rejected": -0.19806651771068573, |
|
"logps/chosen": -1.2047197818756104, |
|
"logps/rejected": -1.631598711013794, |
|
"loss": 2.0508, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -1.2047197818756104, |
|
"rewards/margins": 0.42687878012657166, |
|
"rewards/rejected": -1.631598711013794, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.34545930384716045, |
|
"grad_norm": 39.07058204343973, |
|
"learning_rate": 4.137151834863213e-07, |
|
"logits/chosen": -0.3026384711265564, |
|
"logits/rejected": -0.25133180618286133, |
|
"logps/chosen": -1.1269896030426025, |
|
"logps/rejected": -1.6380598545074463, |
|
"loss": 2.0307, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": -1.1269896030426025, |
|
"rewards/margins": 0.5110700130462646, |
|
"rewards/rejected": -1.6380598545074463, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.3559277676001047, |
|
"grad_norm": 30.659300325934307, |
|
"learning_rate": 4.0668899744407567e-07, |
|
"logits/chosen": -0.3210826516151428, |
|
"logits/rejected": -0.24679234623908997, |
|
"logps/chosen": -1.2077548503875732, |
|
"logps/rejected": -1.6277275085449219, |
|
"loss": 2.0405, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -1.2077548503875732, |
|
"rewards/margins": 0.4199727475643158, |
|
"rewards/rejected": -1.6277275085449219, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.36639623135304894, |
|
"grad_norm": 29.84733882043046, |
|
"learning_rate": 3.994527650465352e-07, |
|
"logits/chosen": -0.3458993434906006, |
|
"logits/rejected": -0.25427910685539246, |
|
"logps/chosen": -1.1780593395233154, |
|
"logps/rejected": -1.5799932479858398, |
|
"loss": 2.0373, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": -1.1780593395233154, |
|
"rewards/margins": 0.4019339084625244, |
|
"rewards/rejected": -1.5799932479858398, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.3768646951059932, |
|
"grad_norm": 18.975102811277186, |
|
"learning_rate": 3.920161866827889e-07, |
|
"logits/chosen": -0.3150302767753601, |
|
"logits/rejected": -0.28204405307769775, |
|
"logps/chosen": -1.1404414176940918, |
|
"logps/rejected": -1.519669532775879, |
|
"loss": 2.0334, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.1404414176940918, |
|
"rewards/margins": 0.3792281746864319, |
|
"rewards/rejected": -1.519669532775879, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.38733315885893743, |
|
"grad_norm": 14.475409524552186, |
|
"learning_rate": 3.8438923131177237e-07, |
|
"logits/chosen": -0.282858669757843, |
|
"logits/rejected": -0.19747613370418549, |
|
"logps/chosen": -1.165867805480957, |
|
"logps/rejected": -1.813596487045288, |
|
"loss": 1.965, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.165867805480957, |
|
"rewards/margins": 0.6477286219596863, |
|
"rewards/rejected": -1.813596487045288, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.39780162261188173, |
|
"grad_norm": 59.63064263707996, |
|
"learning_rate": 3.765821230985757e-07, |
|
"logits/chosen": -0.30526572465896606, |
|
"logits/rejected": -0.23906362056732178, |
|
"logps/chosen": -1.2474441528320312, |
|
"logps/rejected": -1.6622447967529297, |
|
"loss": 2.107, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": -1.2474441528320312, |
|
"rewards/margins": 0.4148005545139313, |
|
"rewards/rejected": -1.6622447967529297, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.408270086364826, |
|
"grad_norm": 23.989688460552507, |
|
"learning_rate": 3.6860532770864005e-07, |
|
"logits/chosen": -0.26398172974586487, |
|
"logits/rejected": -0.21283094584941864, |
|
"logps/chosen": -1.2256332635879517, |
|
"logps/rejected": -1.5658466815948486, |
|
"loss": 2.0384, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -1.2256332635879517, |
|
"rewards/margins": 0.3402135968208313, |
|
"rewards/rejected": -1.5658466815948486, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.4187385501177702, |
|
"grad_norm": 18.678426330461928, |
|
"learning_rate": 3.604695382782159e-07, |
|
"logits/chosen": -0.29794207215309143, |
|
"logits/rejected": -0.26024094223976135, |
|
"logps/chosen": -1.197186827659607, |
|
"logps/rejected": -1.5625463724136353, |
|
"loss": 2.0478, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -1.197186827659607, |
|
"rewards/margins": 0.36535948514938354, |
|
"rewards/rejected": -1.5625463724136353, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.42920701387071447, |
|
"grad_norm": 18.836564429743568, |
|
"learning_rate": 3.5218566107988867e-07, |
|
"logits/chosen": -0.21744203567504883, |
|
"logits/rejected": -0.16317346692085266, |
|
"logps/chosen": -1.038894772529602, |
|
"logps/rejected": -1.5853629112243652, |
|
"loss": 1.9918, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -1.038894772529602, |
|
"rewards/margins": 0.5464681386947632, |
|
"rewards/rejected": -1.5853629112243652, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.4396754776236587, |
|
"grad_norm": 72.20345838571406, |
|
"learning_rate": 3.4376480090239047e-07, |
|
"logits/chosen": -0.2059970200061798, |
|
"logits/rejected": -0.1603209674358368, |
|
"logps/chosen": -1.2739497423171997, |
|
"logps/rejected": -1.5597654581069946, |
|
"loss": 2.0481, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": -1.2739497423171997, |
|
"rewards/margins": 0.28581586480140686, |
|
"rewards/rejected": -1.5597654581069946, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.45014394137660296, |
|
"grad_norm": 27.301032186734012, |
|
"learning_rate": 3.3521824616429284e-07, |
|
"logits/chosen": -0.25597435235977173, |
|
"logits/rejected": -0.20625348389148712, |
|
"logps/chosen": -1.148979663848877, |
|
"logps/rejected": -1.677459478378296, |
|
"loss": 2.0204, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -1.148979663848877, |
|
"rewards/margins": 0.5284797549247742, |
|
"rewards/rejected": -1.677459478378296, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.46061240512954726, |
|
"grad_norm": 29.629980256927464, |
|
"learning_rate": 3.265574537815398e-07, |
|
"logits/chosen": -0.3048572540283203, |
|
"logits/rejected": -0.1881858855485916, |
|
"logps/chosen": -1.165297269821167, |
|
"logps/rejected": -1.6496442556381226, |
|
"loss": 1.9967, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -1.165297269821167, |
|
"rewards/margins": 0.4843469262123108, |
|
"rewards/rejected": -1.6496442556381226, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.4710808688824915, |
|
"grad_norm": 37.80136795892406, |
|
"learning_rate": 3.1779403380910425e-07, |
|
"logits/chosen": -0.2867579162120819, |
|
"logits/rejected": -0.19062136113643646, |
|
"logps/chosen": -1.084784746170044, |
|
"logps/rejected": -1.6818145513534546, |
|
"loss": 1.9613, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -1.084784746170044, |
|
"rewards/margins": 0.5970298647880554, |
|
"rewards/rejected": -1.6818145513534546, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.48154933263543576, |
|
"grad_norm": 35.96385036849735, |
|
"learning_rate": 3.0893973387735683e-07, |
|
"logits/chosen": -0.3329482972621918, |
|
"logits/rejected": -0.2059972733259201, |
|
"logps/chosen": -1.1524263620376587, |
|
"logps/rejected": -1.833388328552246, |
|
"loss": 1.9819, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -1.1524263620376587, |
|
"rewards/margins": 0.6809619665145874, |
|
"rewards/rejected": -1.833388328552246, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.49201779638838, |
|
"grad_norm": 22.085252727016336, |
|
"learning_rate": 3.000064234440111e-07, |
|
"logits/chosen": -0.24676939845085144, |
|
"logits/rejected": -0.17916962504386902, |
|
"logps/chosen": -1.1332640647888184, |
|
"logps/rejected": -1.6203981637954712, |
|
"loss": 2.0097, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -1.1332640647888184, |
|
"rewards/margins": 0.4871342182159424, |
|
"rewards/rejected": -1.6203981637954712, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.5024862601413242, |
|
"grad_norm": 59.80174206160742, |
|
"learning_rate": 2.910060778827554e-07, |
|
"logits/chosen": -0.27282705903053284, |
|
"logits/rejected": -0.2174268662929535, |
|
"logps/chosen": -1.1373220682144165, |
|
"logps/rejected": -1.6652929782867432, |
|
"loss": 2.0531, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": -1.1373220682144165, |
|
"rewards/margins": 0.5279708504676819, |
|
"rewards/rejected": -1.6652929782867432, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.5129547238942685, |
|
"grad_norm": 28.05175376412517, |
|
"learning_rate": 2.8195076242990116e-07, |
|
"logits/chosen": -0.3008952736854553, |
|
"logits/rejected": -0.22454960644245148, |
|
"logps/chosen": -1.229134202003479, |
|
"logps/rejected": -1.6271793842315674, |
|
"loss": 2.0109, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -1.229134202003479, |
|
"rewards/margins": 0.3980451822280884, |
|
"rewards/rejected": -1.6271793842315674, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.5234231876472127, |
|
"grad_norm": 55.23162384622342, |
|
"learning_rate": 2.7285261601056697e-07, |
|
"logits/chosen": -0.23525886237621307, |
|
"logits/rejected": -0.1734374463558197, |
|
"logps/chosen": -1.1406811475753784, |
|
"logps/rejected": -1.7381279468536377, |
|
"loss": 2.0332, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -1.1406811475753784, |
|
"rewards/margins": 0.5974463224411011, |
|
"rewards/rejected": -1.7381279468536377, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.533891651400157, |
|
"grad_norm": 42.787865372213936, |
|
"learning_rate": 2.6372383496608186e-07, |
|
"logits/chosen": -0.19053250551223755, |
|
"logits/rejected": -0.0688900500535965, |
|
"logps/chosen": -1.2063381671905518, |
|
"logps/rejected": -1.7581781148910522, |
|
"loss": 1.9736, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -1.2063381671905518, |
|
"rewards/margins": 0.5518399477005005, |
|
"rewards/rejected": -1.7581781148910522, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.5443601151531012, |
|
"grad_norm": 66.05775717329243, |
|
"learning_rate": 2.5457665670441937e-07, |
|
"logits/chosen": -0.17143914103507996, |
|
"logits/rejected": -0.0884476974606514, |
|
"logps/chosen": -1.1227834224700928, |
|
"logps/rejected": -1.7763004302978516, |
|
"loss": 2.0199, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -1.1227834224700928, |
|
"rewards/margins": 0.6535168886184692, |
|
"rewards/rejected": -1.7763004302978516, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.5548285789060455, |
|
"grad_norm": 24.089434105436485, |
|
"learning_rate": 2.454233432955807e-07, |
|
"logits/chosen": -0.17798355221748352, |
|
"logits/rejected": -0.11273761838674545, |
|
"logps/chosen": -1.1982738971710205, |
|
"logps/rejected": -1.5514423847198486, |
|
"loss": 1.9891, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -1.1982738971710205, |
|
"rewards/margins": 0.35316845774650574, |
|
"rewards/rejected": -1.5514423847198486, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.5652970426589898, |
|
"grad_norm": 38.85091217421057, |
|
"learning_rate": 2.3627616503391812e-07, |
|
"logits/chosen": -0.15943729877471924, |
|
"logits/rejected": -0.07409695535898209, |
|
"logps/chosen": -1.2726773023605347, |
|
"logps/rejected": -1.6484864950180054, |
|
"loss": 2.028, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -1.2726773023605347, |
|
"rewards/margins": 0.3758092522621155, |
|
"rewards/rejected": -1.6484864950180054, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.575765506411934, |
|
"grad_norm": 16.063870661090185, |
|
"learning_rate": 2.2714738398943308e-07, |
|
"logits/chosen": -0.15996511280536652, |
|
"logits/rejected": -0.01324576698243618, |
|
"logps/chosen": -1.1747169494628906, |
|
"logps/rejected": -1.8413015604019165, |
|
"loss": 1.9966, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -1.1747169494628906, |
|
"rewards/margins": 0.6665846705436707, |
|
"rewards/rejected": -1.8413015604019165, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.5862339701648783, |
|
"grad_norm": 83.57376579636319, |
|
"learning_rate": 2.1804923757009882e-07, |
|
"logits/chosen": -0.16753597557544708, |
|
"logits/rejected": -0.05278133228421211, |
|
"logps/chosen": -1.0996477603912354, |
|
"logps/rejected": -1.5048593282699585, |
|
"loss": 1.9938, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -1.0996477603912354, |
|
"rewards/margins": 0.40521159768104553, |
|
"rewards/rejected": -1.5048593282699585, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.5967024339178225, |
|
"grad_norm": 33.49859165217226, |
|
"learning_rate": 2.089939221172446e-07, |
|
"logits/chosen": -0.10943803936243057, |
|
"logits/rejected": -0.07321613281965256, |
|
"logps/chosen": -1.190861463546753, |
|
"logps/rejected": -1.7705156803131104, |
|
"loss": 1.9823, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -1.190861463546753, |
|
"rewards/margins": 0.5796541571617126, |
|
"rewards/rejected": -1.7705156803131104, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.6071708976707668, |
|
"grad_norm": 30.87334049891428, |
|
"learning_rate": 1.9999357655598891e-07, |
|
"logits/chosen": -0.1590811163187027, |
|
"logits/rejected": -0.07361742109060287, |
|
"logps/chosen": -1.181041955947876, |
|
"logps/rejected": -1.800238013267517, |
|
"loss": 1.9789, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -1.181041955947876, |
|
"rewards/margins": 0.6191961765289307, |
|
"rewards/rejected": -1.800238013267517, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.6176393614237111, |
|
"grad_norm": 23.02159874411763, |
|
"learning_rate": 1.9106026612264315e-07, |
|
"logits/chosen": -0.16086161136627197, |
|
"logits/rejected": -0.08389837294816971, |
|
"logps/chosen": -1.208166480064392, |
|
"logps/rejected": -1.826172113418579, |
|
"loss": 2.0188, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -1.208166480064392, |
|
"rewards/margins": 0.6180055737495422, |
|
"rewards/rejected": -1.826172113418579, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.6281078251766553, |
|
"grad_norm": 31.936289365137483, |
|
"learning_rate": 1.8220596619089573e-07, |
|
"logits/chosen": -0.21206030249595642, |
|
"logits/rejected": -0.15204045176506042, |
|
"logps/chosen": -1.2225364446640015, |
|
"logps/rejected": -1.6840988397598267, |
|
"loss": 2.0085, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -1.2225364446640015, |
|
"rewards/margins": 0.46156245470046997, |
|
"rewards/rejected": -1.6840988397598267, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6385762889295996, |
|
"grad_norm": 40.76245078380065, |
|
"learning_rate": 1.7344254621846017e-07, |
|
"logits/chosen": -0.11568818241357803, |
|
"logits/rejected": -0.054085873067379, |
|
"logps/chosen": -1.2138392925262451, |
|
"logps/rejected": -1.8302034139633179, |
|
"loss": 1.9638, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -1.2138392925262451, |
|
"rewards/margins": 0.6163640022277832, |
|
"rewards/rejected": -1.8302034139633179, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.6490447526825438, |
|
"grad_norm": 49.19939562684821, |
|
"learning_rate": 1.647817538357072e-07, |
|
"logits/chosen": -0.12115196883678436, |
|
"logits/rejected": -0.1311618536710739, |
|
"logps/chosen": -1.0947141647338867, |
|
"logps/rejected": -1.5839706659317017, |
|
"loss": 1.9698, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -1.0947141647338867, |
|
"rewards/margins": 0.48925653100013733, |
|
"rewards/rejected": -1.5839706659317017, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.6595132164354881, |
|
"grad_norm": 27.360907272103443, |
|
"learning_rate": 1.562351990976095e-07, |
|
"logits/chosen": -0.1562352329492569, |
|
"logits/rejected": -0.04307347536087036, |
|
"logps/chosen": -1.0912151336669922, |
|
"logps/rejected": -1.8711601495742798, |
|
"loss": 1.9701, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -1.0912151336669922, |
|
"rewards/margins": 0.7799449563026428, |
|
"rewards/rejected": -1.8711601495742798, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.6699816801884323, |
|
"grad_norm": 37.06696857881054, |
|
"learning_rate": 1.478143389201113e-07, |
|
"logits/chosen": -0.11155574023723602, |
|
"logits/rejected": -0.07031629979610443, |
|
"logps/chosen": -1.225824236869812, |
|
"logps/rejected": -1.715296745300293, |
|
"loss": 1.9507, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.225824236869812, |
|
"rewards/margins": 0.48947247862815857, |
|
"rewards/rejected": -1.715296745300293, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.6804501439413766, |
|
"grad_norm": 47.67894323163762, |
|
"learning_rate": 1.3953046172178413e-07, |
|
"logits/chosen": -0.14251194894313812, |
|
"logits/rejected": -0.046834878623485565, |
|
"logps/chosen": -1.1644965410232544, |
|
"logps/rejected": -1.8360859155654907, |
|
"loss": 1.9483, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -1.1644965410232544, |
|
"rewards/margins": 0.6715894341468811, |
|
"rewards/rejected": -1.8360859155654907, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.6909186076943209, |
|
"grad_norm": 15.072577205919428, |
|
"learning_rate": 1.3139467229135998e-07, |
|
"logits/chosen": -0.11516846716403961, |
|
"logits/rejected": -0.03220799192786217, |
|
"logps/chosen": -1.144539713859558, |
|
"logps/rejected": -1.6052381992340088, |
|
"loss": 1.9932, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -1.144539713859558, |
|
"rewards/margins": 0.46069830656051636, |
|
"rewards/rejected": -1.6052381992340088, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.7013870714472651, |
|
"grad_norm": 34.23220245935474, |
|
"learning_rate": 1.2341787690142435e-07, |
|
"logits/chosen": -0.1980099380016327, |
|
"logits/rejected": -0.10996635258197784, |
|
"logps/chosen": -1.110797643661499, |
|
"logps/rejected": -1.5681132078170776, |
|
"loss": 1.9936, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -1.110797643661499, |
|
"rewards/margins": 0.4573155343532562, |
|
"rewards/rejected": -1.5681132078170776, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.7118555352002094, |
|
"grad_norm": 28.671371339841, |
|
"learning_rate": 1.1561076868822755e-07, |
|
"logits/chosen": -0.18400368094444275, |
|
"logits/rejected": -0.09802932292222977, |
|
"logps/chosen": -1.275649070739746, |
|
"logps/rejected": -1.726248025894165, |
|
"loss": 2.023, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -1.275649070739746, |
|
"rewards/margins": 0.45059889554977417, |
|
"rewards/rejected": -1.726248025894165, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.7223239989531536, |
|
"grad_norm": 33.94958056858452, |
|
"learning_rate": 1.0798381331721107e-07, |
|
"logits/chosen": -0.20783087611198425, |
|
"logits/rejected": -0.0951487272977829, |
|
"logps/chosen": -1.164470911026001, |
|
"logps/rejected": -1.7271054983139038, |
|
"loss": 2.001, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -1.164470911026001, |
|
"rewards/margins": 0.5626345872879028, |
|
"rewards/rejected": -1.7271054983139038, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.7327924627060979, |
|
"grad_norm": 21.893648949832478, |
|
"learning_rate": 1.0054723495346482e-07, |
|
"logits/chosen": -0.07672537118196487, |
|
"logits/rejected": -0.03784745931625366, |
|
"logps/chosen": -1.1689984798431396, |
|
"logps/rejected": -1.6384124755859375, |
|
"loss": 2.0337, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": -1.1689984798431396, |
|
"rewards/margins": 0.469414085149765, |
|
"rewards/rejected": -1.6384124755859375, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.7432609264590422, |
|
"grad_norm": 32.78382225954148, |
|
"learning_rate": 9.331100255592436e-08, |
|
"logits/chosen": -0.07636372745037079, |
|
"logits/rejected": 0.007526999805122614, |
|
"logps/chosen": -1.0614162683486938, |
|
"logps/rejected": -1.7131534814834595, |
|
"loss": 1.9457, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -1.0614162683486938, |
|
"rewards/margins": 0.6517372727394104, |
|
"rewards/rejected": -1.7131534814834595, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.7537293902119864, |
|
"grad_norm": 18.581615763356663, |
|
"learning_rate": 8.628481651367875e-08, |
|
"logits/chosen": -0.165523499250412, |
|
"logits/rejected": -0.06427756696939468, |
|
"logps/chosen": -1.054750680923462, |
|
"logps/rejected": -1.56973397731781, |
|
"loss": 1.9192, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -1.054750680923462, |
|
"rewards/margins": 0.5149833559989929, |
|
"rewards/rejected": -1.56973397731781, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.7641978539649307, |
|
"grad_norm": 33.79193132717166, |
|
"learning_rate": 7.947809564230445e-08, |
|
"logits/chosen": -0.15916807949543, |
|
"logits/rejected": -0.04110134392976761, |
|
"logps/chosen": -1.2637778520584106, |
|
"logps/rejected": -1.8154550790786743, |
|
"loss": 2.0161, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -1.2637778520584106, |
|
"rewards/margins": 0.5516773462295532, |
|
"rewards/rejected": -1.8154550790786743, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.7746663177178749, |
|
"grad_norm": 30.598444258335533, |
|
"learning_rate": 7.289996455765748e-08, |
|
"logits/chosen": -0.09070632606744766, |
|
"logits/rejected": -0.016309842467308044, |
|
"logps/chosen": -1.233588695526123, |
|
"logps/rejected": -1.8405319452285767, |
|
"loss": 1.9721, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -1.233588695526123, |
|
"rewards/margins": 0.6069431900978088, |
|
"rewards/rejected": -1.8405319452285767, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.7851347814708192, |
|
"grad_norm": 47.76183473164956, |
|
"learning_rate": 6.655924144404906e-08, |
|
"logits/chosen": -0.12095741182565689, |
|
"logits/rejected": -0.15326176583766937, |
|
"logps/chosen": -1.2032678127288818, |
|
"logps/rejected": -1.698957085609436, |
|
"loss": 1.9989, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -1.2032678127288818, |
|
"rewards/margins": 0.49568939208984375, |
|
"rewards/rejected": -1.698957085609436, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.7956032452237635, |
|
"grad_norm": 26.82763374923621, |
|
"learning_rate": 6.046442623320145e-08, |
|
"logits/chosen": -0.16168245673179626, |
|
"logits/rejected": -0.10053972899913788, |
|
"logps/chosen": -1.2546281814575195, |
|
"logps/rejected": -1.6799224615097046, |
|
"loss": 1.9554, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": -1.2546281814575195, |
|
"rewards/margins": 0.42529433965682983, |
|
"rewards/rejected": -1.6799224615097046, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.8060717089767077, |
|
"grad_norm": 22.887488172337097, |
|
"learning_rate": 5.4623689209832484e-08, |
|
"logits/chosen": -0.1701394021511078, |
|
"logits/rejected": -0.10942293703556061, |
|
"logps/chosen": -1.1635555028915405, |
|
"logps/rejected": -1.6771953105926514, |
|
"loss": 1.9821, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": -1.1635555028915405, |
|
"rewards/margins": 0.5136396884918213, |
|
"rewards/rejected": -1.6771953105926514, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.816540172729652, |
|
"grad_norm": 17.93102818717875, |
|
"learning_rate": 4.904486005914027e-08, |
|
"logits/chosen": -0.16732355952262878, |
|
"logits/rejected": -0.0791902020573616, |
|
"logps/chosen": -1.1596777439117432, |
|
"logps/rejected": -1.8085283041000366, |
|
"loss": 1.9712, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.1596777439117432, |
|
"rewards/margins": 0.6488505005836487, |
|
"rewards/rejected": -1.8085283041000366, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.8270086364825961, |
|
"grad_norm": 45.13149823297093, |
|
"learning_rate": 4.373541737087263e-08, |
|
"logits/chosen": -0.17222358286380768, |
|
"logits/rejected": -0.02697933092713356, |
|
"logps/chosen": -1.18060302734375, |
|
"logps/rejected": -1.7580945491790771, |
|
"loss": 1.9717, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -1.18060302734375, |
|
"rewards/margins": 0.5774916410446167, |
|
"rewards/rejected": -1.7580945491790771, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.8374771002355405, |
|
"grad_norm": 17.576421623633806, |
|
"learning_rate": 3.8702478614051345e-08, |
|
"logits/chosen": -0.10352808237075806, |
|
"logits/rejected": -0.04908682033419609, |
|
"logps/chosen": -1.10788893699646, |
|
"logps/rejected": -1.7277934551239014, |
|
"loss": 1.9503, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -1.10788893699646, |
|
"rewards/margins": 0.619904637336731, |
|
"rewards/rejected": -1.7277934551239014, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.8479455639884846, |
|
"grad_norm": 26.16217732840323, |
|
"learning_rate": 3.3952790595787986e-08, |
|
"logits/chosen": -0.197185680270195, |
|
"logits/rejected": -0.12642063200473785, |
|
"logps/chosen": -1.2650173902511597, |
|
"logps/rejected": -1.8639274835586548, |
|
"loss": 2.0076, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -1.2650173902511597, |
|
"rewards/margins": 0.5989099740982056, |
|
"rewards/rejected": -1.8639274835586548, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.8584140277414289, |
|
"grad_norm": 24.116020361120558, |
|
"learning_rate": 2.9492720416985e-08, |
|
"logits/chosen": -0.1570913940668106, |
|
"logits/rejected": -0.08443228900432587, |
|
"logps/chosen": -1.1111314296722412, |
|
"logps/rejected": -1.5838778018951416, |
|
"loss": 1.9708, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": -1.1111314296722412, |
|
"rewards/margins": 0.47274643182754517, |
|
"rewards/rejected": -1.5838778018951416, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.8688824914943732, |
|
"grad_norm": 23.795655447251885, |
|
"learning_rate": 2.5328246937043525e-08, |
|
"logits/chosen": -0.1644868105649948, |
|
"logits/rejected": -0.05437507480382919, |
|
"logps/chosen": -1.1470693349838257, |
|
"logps/rejected": -1.7496206760406494, |
|
"loss": 1.9309, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -1.1470693349838257, |
|
"rewards/margins": 0.6025511622428894, |
|
"rewards/rejected": -1.7496206760406494, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.8793509552473174, |
|
"grad_norm": 17.858809164573977, |
|
"learning_rate": 2.1464952759020856e-08, |
|
"logits/chosen": -0.15058764815330505, |
|
"logits/rejected": -0.0765046551823616, |
|
"logps/chosen": -1.1509023904800415, |
|
"logps/rejected": -1.7408424615859985, |
|
"loss": 1.9941, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -1.1509023904800415, |
|
"rewards/margins": 0.5899400115013123, |
|
"rewards/rejected": -1.7408424615859985, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.8898194190002617, |
|
"grad_norm": 100.72607171931102, |
|
"learning_rate": 1.7908016745981856e-08, |
|
"logits/chosen": -0.11002065241336823, |
|
"logits/rejected": -0.048098642379045486, |
|
"logps/chosen": -1.2740198373794556, |
|
"logps/rejected": -1.741000771522522, |
|
"loss": 1.9691, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -1.2740198373794556, |
|
"rewards/margins": 0.46698078513145447, |
|
"rewards/rejected": -1.741000771522522, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.9002878827532059, |
|
"grad_norm": 22.399845767434137, |
|
"learning_rate": 1.4662207078575684e-08, |
|
"logits/chosen": -0.14560401439666748, |
|
"logits/rejected": -0.08761245012283325, |
|
"logps/chosen": -1.198464035987854, |
|
"logps/rejected": -1.7667903900146484, |
|
"loss": 1.9534, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.198464035987854, |
|
"rewards/margins": 0.568326473236084, |
|
"rewards/rejected": -1.7667903900146484, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.9107563465061502, |
|
"grad_norm": 26.789977249333305, |
|
"learning_rate": 1.1731874863145142e-08, |
|
"logits/chosen": -0.1761171966791153, |
|
"logits/rejected": -0.11556446552276611, |
|
"logps/chosen": -1.1400909423828125, |
|
"logps/rejected": -1.924628496170044, |
|
"loss": 1.9424, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -1.1400909423828125, |
|
"rewards/margins": 0.7845373153686523, |
|
"rewards/rejected": -1.924628496170044, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.9212248102590945, |
|
"grad_norm": 29.975236886779925, |
|
"learning_rate": 9.12094829893642e-09, |
|
"logits/chosen": -0.023710379377007484, |
|
"logits/rejected": -0.008227316662669182, |
|
"logps/chosen": -1.2032636404037476, |
|
"logps/rejected": -1.6965421438217163, |
|
"loss": 2.0197, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -1.2032636404037476, |
|
"rewards/margins": 0.4932785630226135, |
|
"rewards/rejected": -1.6965421438217163, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.9316932740120387, |
|
"grad_norm": 44.53364841982202, |
|
"learning_rate": 6.832927412229017e-09, |
|
"logits/chosen": -0.1587805449962616, |
|
"logits/rejected": -0.09741976112127304, |
|
"logps/chosen": -1.186359167098999, |
|
"logps/rejected": -1.6756283044815063, |
|
"loss": 2.0179, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.186359167098999, |
|
"rewards/margins": 0.4892690181732178, |
|
"rewards/rejected": -1.6756283044815063, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.942161737764983, |
|
"grad_norm": 37.66309860666725, |
|
"learning_rate": 4.8708793644441086e-09, |
|
"logits/chosen": -0.07072281092405319, |
|
"logits/rejected": -0.03446631878614426, |
|
"logps/chosen": -1.2110041379928589, |
|
"logps/rejected": -1.6703050136566162, |
|
"loss": 1.9597, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -1.2110041379928589, |
|
"rewards/margins": 0.4593009054660797, |
|
"rewards/rejected": -1.6703050136566162, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.9526302015179272, |
|
"grad_norm": 59.24673385214879, |
|
"learning_rate": 3.2374343405217884e-09, |
|
"logits/chosen": -0.12624625861644745, |
|
"logits/rejected": -0.06765826046466827, |
|
"logps/chosen": -1.3134384155273438, |
|
"logps/rejected": -1.6258878707885742, |
|
"loss": 2.0031, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -1.3134384155273438, |
|
"rewards/margins": 0.31244954466819763, |
|
"rewards/rejected": -1.6258878707885742, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.9630986652708715, |
|
"grad_norm": 31.809388609630044, |
|
"learning_rate": 1.9347820230782295e-09, |
|
"logits/chosen": -0.09453988820314407, |
|
"logits/rejected": -0.033892810344696045, |
|
"logps/chosen": -1.093733549118042, |
|
"logps/rejected": -1.6776697635650635, |
|
"loss": 1.9354, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -1.093733549118042, |
|
"rewards/margins": 0.5839362144470215, |
|
"rewards/rejected": -1.6776697635650635, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.9735671290238157, |
|
"grad_norm": 48.638368475214634, |
|
"learning_rate": 9.64668657069706e-10, |
|
"logits/chosen": -0.19474738836288452, |
|
"logits/rejected": -0.08652818948030472, |
|
"logps/chosen": -1.2114652395248413, |
|
"logps/rejected": -1.7437559366226196, |
|
"loss": 1.9666, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -1.2114652395248413, |
|
"rewards/margins": 0.5322908163070679, |
|
"rewards/rejected": -1.7437559366226196, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.98403559277676, |
|
"grad_norm": 20.81325024609551, |
|
"learning_rate": 3.2839470889836627e-10, |
|
"logits/chosen": -0.17209358513355255, |
|
"logits/rejected": -0.08596277236938477, |
|
"logps/chosen": -1.142816185951233, |
|
"logps/rejected": -1.610129952430725, |
|
"loss": 2.0171, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -1.142816185951233, |
|
"rewards/margins": 0.46731385588645935, |
|
"rewards/rejected": -1.610129952430725, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.9945040565297043, |
|
"grad_norm": 22.432046053055338, |
|
"learning_rate": 2.6813123097352287e-11, |
|
"logits/chosen": -0.20779597759246826, |
|
"logits/rejected": -0.07040806114673615, |
|
"logps/chosen": -1.2208012342453003, |
|
"logps/rejected": -1.6825767755508423, |
|
"loss": 1.9899, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": -1.2208012342453003, |
|
"rewards/margins": 0.46177545189857483, |
|
"rewards/rejected": -1.6825767755508423, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.998691442030882, |
|
"step": 477, |
|
"total_flos": 0.0, |
|
"train_loss": 0.0, |
|
"train_runtime": 4.2983, |
|
"train_samples_per_second": 14223.022, |
|
"train_steps_per_second": 110.974 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 477, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 1000000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|