|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.998691442030882, |
|
"eval_steps": 500, |
|
"global_step": 477, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.010468463752944255, |
|
"grad_norm": 38.22182319115289, |
|
"learning_rate": 6.25e-08, |
|
"logits/chosen": -0.7837417721748352, |
|
"logits/rejected": -0.8419772386550903, |
|
"logps/chosen": -1.1746927499771118, |
|
"logps/rejected": -1.3594377040863037, |
|
"loss": 2.1738, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": -1.1746927499771118, |
|
"rewards/margins": 0.1847449541091919, |
|
"rewards/rejected": -1.3594377040863037, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.02093692750588851, |
|
"grad_norm": 15.829940538804763, |
|
"learning_rate": 1.25e-07, |
|
"logits/chosen": -0.8227180242538452, |
|
"logits/rejected": -0.8315876722335815, |
|
"logps/chosen": -1.1591246128082275, |
|
"logps/rejected": -1.2620265483856201, |
|
"loss": 2.1411, |
|
"rewards/accuracies": 0.5062500238418579, |
|
"rewards/chosen": -1.1591246128082275, |
|
"rewards/margins": 0.10290191322565079, |
|
"rewards/rejected": -1.2620265483856201, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.031405391258832765, |
|
"grad_norm": 22.77524777376306, |
|
"learning_rate": 1.875e-07, |
|
"logits/chosen": -0.7929477691650391, |
|
"logits/rejected": -0.8053458333015442, |
|
"logps/chosen": -1.1068744659423828, |
|
"logps/rejected": -1.3614892959594727, |
|
"loss": 2.1083, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -1.1068744659423828, |
|
"rewards/margins": 0.2546148896217346, |
|
"rewards/rejected": -1.3614892959594727, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04187385501177702, |
|
"grad_norm": 37.8845459916446, |
|
"learning_rate": 2.5e-07, |
|
"logits/chosen": -0.717773973941803, |
|
"logits/rejected": -0.80096834897995, |
|
"logps/chosen": -1.1600226163864136, |
|
"logps/rejected": -1.2640360593795776, |
|
"loss": 2.167, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": -1.1600226163864136, |
|
"rewards/margins": 0.10401340574026108, |
|
"rewards/rejected": -1.2640360593795776, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05234231876472128, |
|
"grad_norm": 13.533858086681068, |
|
"learning_rate": 3.125e-07, |
|
"logits/chosen": -0.8047081232070923, |
|
"logits/rejected": -0.8094943165779114, |
|
"logps/chosen": -1.1760809421539307, |
|
"logps/rejected": -1.2413945198059082, |
|
"loss": 2.142, |
|
"rewards/accuracies": 0.518750011920929, |
|
"rewards/chosen": -1.1760809421539307, |
|
"rewards/margins": 0.06531357765197754, |
|
"rewards/rejected": -1.2413945198059082, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06281078251766553, |
|
"grad_norm": 46.12526967375131, |
|
"learning_rate": 3.75e-07, |
|
"logits/chosen": -0.7824481725692749, |
|
"logits/rejected": -0.8084238767623901, |
|
"logps/chosen": -1.1583902835845947, |
|
"logps/rejected": -1.2807166576385498, |
|
"loss": 2.1687, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": -1.1583902835845947, |
|
"rewards/margins": 0.12232650816440582, |
|
"rewards/rejected": -1.2807166576385498, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07327924627060979, |
|
"grad_norm": 20.51948865888897, |
|
"learning_rate": 4.3749999999999994e-07, |
|
"logits/chosen": -0.8100900650024414, |
|
"logits/rejected": -0.789565920829773, |
|
"logps/chosen": -1.142910361289978, |
|
"logps/rejected": -1.279740571975708, |
|
"loss": 2.1333, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": -1.142910361289978, |
|
"rewards/margins": 0.136830136179924, |
|
"rewards/rejected": -1.279740571975708, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.08374771002355404, |
|
"grad_norm": 26.130472787451367, |
|
"learning_rate": 5e-07, |
|
"logits/chosen": -0.7920509576797485, |
|
"logits/rejected": -0.9067994356155396, |
|
"logps/chosen": -1.0735507011413574, |
|
"logps/rejected": -1.3959448337554932, |
|
"loss": 2.089, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -1.0735507011413574, |
|
"rewards/margins": 0.3223941922187805, |
|
"rewards/rejected": -1.3959448337554932, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0942161737764983, |
|
"grad_norm": 33.25286433207774, |
|
"learning_rate": 5.625e-07, |
|
"logits/chosen": -0.7628027200698853, |
|
"logits/rejected": -0.8258091807365417, |
|
"logps/chosen": -1.0904920101165771, |
|
"logps/rejected": -1.3169877529144287, |
|
"loss": 2.1077, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": -1.0904920101165771, |
|
"rewards/margins": 0.2264958918094635, |
|
"rewards/rejected": -1.3169877529144287, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.10468463752944256, |
|
"grad_norm": 26.427906121657426, |
|
"learning_rate": 5.999678242522831e-07, |
|
"logits/chosen": -0.8172017931938171, |
|
"logits/rejected": -0.8944879770278931, |
|
"logps/chosen": -1.1694625616073608, |
|
"logps/rejected": -1.4630098342895508, |
|
"loss": 2.1326, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -1.1694625616073608, |
|
"rewards/margins": 0.29354724287986755, |
|
"rewards/rejected": -1.4630098342895508, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.11515310128238682, |
|
"grad_norm": 83.98012535654864, |
|
"learning_rate": 5.996059263493219e-07, |
|
"logits/chosen": -0.8303399085998535, |
|
"logits/rejected": -0.8691905736923218, |
|
"logps/chosen": -1.1166013479232788, |
|
"logps/rejected": -1.3432626724243164, |
|
"loss": 2.1064, |
|
"rewards/accuracies": 0.5562499761581421, |
|
"rewards/chosen": -1.1166013479232788, |
|
"rewards/margins": 0.22666127979755402, |
|
"rewards/rejected": -1.3432626724243164, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.12562156503533106, |
|
"grad_norm": 20.73349404149956, |
|
"learning_rate": 5.988423976115163e-07, |
|
"logits/chosen": -0.8669672012329102, |
|
"logits/rejected": -0.9171522855758667, |
|
"logps/chosen": -1.2086117267608643, |
|
"logps/rejected": -1.3324425220489502, |
|
"loss": 2.0869, |
|
"rewards/accuracies": 0.5562499761581421, |
|
"rewards/chosen": -1.2086117267608643, |
|
"rewards/margins": 0.1238306537270546, |
|
"rewards/rejected": -1.3324425220489502, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.1360900287882753, |
|
"grad_norm": 21.610299966098676, |
|
"learning_rate": 5.976782615723061e-07, |
|
"logits/chosen": -0.8252432942390442, |
|
"logits/rejected": -0.900849461555481, |
|
"logps/chosen": -1.1040380001068115, |
|
"logps/rejected": -1.6288048028945923, |
|
"loss": 2.0719, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -1.1040380001068115, |
|
"rewards/margins": 0.524766743183136, |
|
"rewards/rejected": -1.6288048028945923, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.14655849254121958, |
|
"grad_norm": 53.60916157723815, |
|
"learning_rate": 5.961150787913738e-07, |
|
"logits/chosen": -0.8189405202865601, |
|
"logits/rejected": -0.8585717082023621, |
|
"logps/chosen": -1.1598970890045166, |
|
"logps/rejected": -1.423801302909851, |
|
"loss": 2.051, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": -1.1598970890045166, |
|
"rewards/margins": 0.26390427350997925, |
|
"rewards/rejected": -1.423801302909851, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.15702695629416383, |
|
"grad_norm": 55.548769154194964, |
|
"learning_rate": 5.941549447626671e-07, |
|
"logits/chosen": -0.8151289224624634, |
|
"logits/rejected": -0.8495903015136719, |
|
"logps/chosen": -1.1472132205963135, |
|
"logps/rejected": -1.4964182376861572, |
|
"loss": 2.0657, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -1.1472132205963135, |
|
"rewards/margins": 0.349204957485199, |
|
"rewards/rejected": -1.4964182376861572, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.16749542004710807, |
|
"grad_norm": 24.74010593915972, |
|
"learning_rate": 5.918004871053251e-07, |
|
"logits/chosen": -0.8290842175483704, |
|
"logits/rejected": -0.9109519720077515, |
|
"logps/chosen": -1.1479018926620483, |
|
"logps/rejected": -1.5490809679031372, |
|
"loss": 2.0988, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -1.1479018926620483, |
|
"rewards/margins": 0.40117907524108887, |
|
"rewards/rejected": -1.5490809679031372, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.17796388380005235, |
|
"grad_norm": 19.216616658463852, |
|
"learning_rate": 5.890548620412763e-07, |
|
"logits/chosen": -0.9178426861763, |
|
"logits/rejected": -0.9389097094535828, |
|
"logps/chosen": -1.1228134632110596, |
|
"logps/rejected": -1.4461188316345215, |
|
"loss": 2.0983, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -1.1228134632110596, |
|
"rewards/margins": 0.3233054280281067, |
|
"rewards/rejected": -1.4461188316345215, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.1884323475529966, |
|
"grad_norm": 9.550523787824096, |
|
"learning_rate": 5.859217501642258e-07, |
|
"logits/chosen": -0.8629040718078613, |
|
"logits/rejected": -0.9038488268852234, |
|
"logps/chosen": -1.1353037357330322, |
|
"logps/rejected": -1.4276666641235352, |
|
"loss": 2.0462, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -1.1353037357330322, |
|
"rewards/margins": 0.29236286878585815, |
|
"rewards/rejected": -1.4276666641235352, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.19890081130594087, |
|
"grad_norm": 16.55214910624781, |
|
"learning_rate": 5.824053515057091e-07, |
|
"logits/chosen": -0.879852294921875, |
|
"logits/rejected": -0.8908422589302063, |
|
"logps/chosen": -1.157950520515442, |
|
"logps/rejected": -1.457081913948059, |
|
"loss": 2.0807, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": -1.157950520515442, |
|
"rewards/margins": 0.2991313636302948, |
|
"rewards/rejected": -1.457081913948059, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.2093692750588851, |
|
"grad_norm": 32.38005097808463, |
|
"learning_rate": 5.785103799048218e-07, |
|
"logits/chosen": -0.8940750360488892, |
|
"logits/rejected": -0.9557636380195618, |
|
"logps/chosen": -1.1859995126724243, |
|
"logps/rejected": -1.7212893962860107, |
|
"loss": 2.0735, |
|
"rewards/accuracies": 0.53125, |
|
"rewards/chosen": -1.1859995126724243, |
|
"rewards/margins": 0.5352898240089417, |
|
"rewards/rejected": -1.7212893962860107, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.21983773881182936, |
|
"grad_norm": 13.467954278767516, |
|
"learning_rate": 5.742420566891749e-07, |
|
"logits/chosen": -0.8789809942245483, |
|
"logits/rejected": -0.9158897399902344, |
|
"logps/chosen": -1.0777571201324463, |
|
"logps/rejected": -1.4952013492584229, |
|
"loss": 2.0438, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -1.0777571201324463, |
|
"rewards/margins": 0.4174441397190094, |
|
"rewards/rejected": -1.4952013492584229, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.23030620256477363, |
|
"grad_norm": 48.371034794873005, |
|
"learning_rate": 5.696061036755478e-07, |
|
"logits/chosen": -0.915793240070343, |
|
"logits/rejected": -0.9631668925285339, |
|
"logps/chosen": -1.081206202507019, |
|
"logps/rejected": -1.546509861946106, |
|
"loss": 2.0298, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.081206202507019, |
|
"rewards/margins": 0.4653037190437317, |
|
"rewards/rejected": -1.546509861946106, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.24077466631771788, |
|
"grad_norm": 33.44917537983854, |
|
"learning_rate": 5.64608735499618e-07, |
|
"logits/chosen": -0.8344041109085083, |
|
"logits/rejected": -0.9158002138137817, |
|
"logps/chosen": -1.1163157224655151, |
|
"logps/rejected": -1.6953636407852173, |
|
"loss": 2.0198, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -1.1163157224655151, |
|
"rewards/margins": 0.5790477991104126, |
|
"rewards/rejected": -1.6953636407852173, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.2512431300706621, |
|
"grad_norm": 41.36153494880983, |
|
"learning_rate": 5.592566512850545e-07, |
|
"logits/chosen": -0.8542771339416504, |
|
"logits/rejected": -0.8888319134712219, |
|
"logps/chosen": -1.2859517335891724, |
|
"logps/rejected": -1.5670356750488281, |
|
"loss": 2.1007, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": -1.2859517335891724, |
|
"rewards/margins": 0.28108420968055725, |
|
"rewards/rejected": -1.5670356750488281, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.26171159382360637, |
|
"grad_norm": 29.2467879102086, |
|
"learning_rate": 5.535570256631384e-07, |
|
"logits/chosen": -0.9180340766906738, |
|
"logits/rejected": -0.8835474252700806, |
|
"logps/chosen": -1.1059439182281494, |
|
"logps/rejected": -1.4397169351577759, |
|
"loss": 2.0475, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -1.1059439182281494, |
|
"rewards/margins": 0.33377301692962646, |
|
"rewards/rejected": -1.4397169351577759, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.2721800575765506, |
|
"grad_norm": 47.965843324490926, |
|
"learning_rate": 5.475174991549528e-07, |
|
"logits/chosen": -0.8605352640151978, |
|
"logits/rejected": -0.9204422235488892, |
|
"logps/chosen": -1.076321005821228, |
|
"logps/rejected": -1.291337251663208, |
|
"loss": 2.0689, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": -1.076321005821228, |
|
"rewards/margins": 0.21501627564430237, |
|
"rewards/rejected": -1.291337251663208, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.2826485213294949, |
|
"grad_norm": 25.643927984267705, |
|
"learning_rate": 5.411461679290317e-07, |
|
"logits/chosen": -0.8433247804641724, |
|
"logits/rejected": -0.8802833557128906, |
|
"logps/chosen": -1.1742990016937256, |
|
"logps/rejected": -1.8538631200790405, |
|
"loss": 2.0085, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -1.1742990016937256, |
|
"rewards/margins": 0.6795639395713806, |
|
"rewards/rejected": -1.8538631200790405, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.29311698508243916, |
|
"grad_norm": 24.64563141309484, |
|
"learning_rate": 5.34451572948201e-07, |
|
"logits/chosen": -0.8815560340881348, |
|
"logits/rejected": -0.9196213483810425, |
|
"logps/chosen": -1.1883355379104614, |
|
"logps/rejected": -1.751808524131775, |
|
"loss": 1.9914, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -1.1883355379104614, |
|
"rewards/margins": 0.5634733438491821, |
|
"rewards/rejected": -1.751808524131775, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.3035854488353834, |
|
"grad_norm": 16.805987111465818, |
|
"learning_rate": 5.274426885201582e-07, |
|
"logits/chosen": -0.8858097195625305, |
|
"logits/rejected": -0.9691437482833862, |
|
"logps/chosen": -1.1200135946273804, |
|
"logps/rejected": -1.5147252082824707, |
|
"loss": 2.0408, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.1200135946273804, |
|
"rewards/margins": 0.3947116434574127, |
|
"rewards/rejected": -1.5147252082824707, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.31405391258832765, |
|
"grad_norm": 18.868227485230445, |
|
"learning_rate": 5.201289102671411e-07, |
|
"logits/chosen": -0.8802644610404968, |
|
"logits/rejected": -0.8881145715713501, |
|
"logps/chosen": -1.0411317348480225, |
|
"logps/rejected": -1.462885856628418, |
|
"loss": 2.002, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -1.0411317348480225, |
|
"rewards/margins": 0.42175430059432983, |
|
"rewards/rejected": -1.462885856628418, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3245223763412719, |
|
"grad_norm": 23.821140021157394, |
|
"learning_rate": 5.12520042530811e-07, |
|
"logits/chosen": -0.8837698101997375, |
|
"logits/rejected": -0.903447151184082, |
|
"logps/chosen": -1.1391239166259766, |
|
"logps/rejected": -1.514580249786377, |
|
"loss": 2.0007, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": -1.1391239166259766, |
|
"rewards/margins": 0.3754563331604004, |
|
"rewards/rejected": -1.514580249786377, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.33499084009421615, |
|
"grad_norm": 25.07335170173153, |
|
"learning_rate": 5.046262852292346e-07, |
|
"logits/chosen": -0.8691936731338501, |
|
"logits/rejected": -0.8966501951217651, |
|
"logps/chosen": -1.1949414014816284, |
|
"logps/rejected": -1.6663414239883423, |
|
"loss": 2.0357, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -1.1949414014816284, |
|
"rewards/margins": 0.4713999629020691, |
|
"rewards/rejected": -1.6663414239883423, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.34545930384716045, |
|
"grad_norm": 15.930498315332532, |
|
"learning_rate": 4.964582201835856e-07, |
|
"logits/chosen": -0.9137632250785828, |
|
"logits/rejected": -0.9423500895500183, |
|
"logps/chosen": -1.1093122959136963, |
|
"logps/rejected": -1.6846201419830322, |
|
"loss": 2.0194, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -1.1093122959136963, |
|
"rewards/margins": 0.5753079652786255, |
|
"rewards/rejected": -1.6846201419830322, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.3559277676001047, |
|
"grad_norm": 14.533074422786573, |
|
"learning_rate": 4.880267969328908e-07, |
|
"logits/chosen": -0.8995744585990906, |
|
"logits/rejected": -0.9443628191947937, |
|
"logps/chosen": -1.1981232166290283, |
|
"logps/rejected": -1.643414855003357, |
|
"loss": 2.0262, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.1981232166290283, |
|
"rewards/margins": 0.4452916085720062, |
|
"rewards/rejected": -1.643414855003357, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.36639623135304894, |
|
"grad_norm": 21.913922705783488, |
|
"learning_rate": 4.793433180558423e-07, |
|
"logits/chosen": -0.8912171125411987, |
|
"logits/rejected": -0.9618469476699829, |
|
"logps/chosen": -1.182425856590271, |
|
"logps/rejected": -1.684942603111267, |
|
"loss": 2.0155, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -1.182425856590271, |
|
"rewards/margins": 0.5025165677070618, |
|
"rewards/rejected": -1.684942603111267, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.3768646951059932, |
|
"grad_norm": 27.73808093722701, |
|
"learning_rate": 4.704194240193467e-07, |
|
"logits/chosen": -0.8934853672981262, |
|
"logits/rejected": -0.9609943628311157, |
|
"logps/chosen": -1.1971694231033325, |
|
"logps/rejected": -1.6838546991348267, |
|
"loss": 2.0284, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -1.1971694231033325, |
|
"rewards/margins": 0.4866850972175598, |
|
"rewards/rejected": -1.6838546991348267, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.38733315885893743, |
|
"grad_norm": 30.41505287850658, |
|
"learning_rate": 4.6126707757412686e-07, |
|
"logits/chosen": -0.8480084538459778, |
|
"logits/rejected": -0.9141961336135864, |
|
"logps/chosen": -1.1976919174194336, |
|
"logps/rejected": -1.9417314529418945, |
|
"loss": 1.9467, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -1.1976919174194336, |
|
"rewards/margins": 0.7440396547317505, |
|
"rewards/rejected": -1.9417314529418945, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.39780162261188173, |
|
"grad_norm": 34.17710511802847, |
|
"learning_rate": 4.5189854771829086e-07, |
|
"logits/chosen": -0.963171124458313, |
|
"logits/rejected": -0.9760686755180359, |
|
"logps/chosen": -1.2360587120056152, |
|
"logps/rejected": -1.665792465209961, |
|
"loss": 2.0894, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -1.2360587120056152, |
|
"rewards/margins": 0.42973384261131287, |
|
"rewards/rejected": -1.665792465209961, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.408270086364826, |
|
"grad_norm": 39.854588709056486, |
|
"learning_rate": 4.4232639325036807e-07, |
|
"logits/chosen": -0.9235826730728149, |
|
"logits/rejected": -0.9407492876052856, |
|
"logps/chosen": -1.2383768558502197, |
|
"logps/rejected": -1.6111949682235718, |
|
"loss": 2.0333, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -1.2383768558502197, |
|
"rewards/margins": 0.3728182315826416, |
|
"rewards/rejected": -1.6111949682235718, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.4187385501177702, |
|
"grad_norm": 46.073511129529436, |
|
"learning_rate": 4.32563445933859e-07, |
|
"logits/chosen": -0.9877565503120422, |
|
"logits/rejected": -0.9876266717910767, |
|
"logps/chosen": -1.2531481981277466, |
|
"logps/rejected": -1.6756317615509033, |
|
"loss": 2.0456, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.2531481981277466, |
|
"rewards/margins": 0.4224833846092224, |
|
"rewards/rejected": -1.6756317615509033, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.42920701387071447, |
|
"grad_norm": 46.11695411093734, |
|
"learning_rate": 4.226227932958664e-07, |
|
"logits/chosen": -0.9007834196090698, |
|
"logits/rejected": -0.9248853921890259, |
|
"logps/chosen": -1.0488381385803223, |
|
"logps/rejected": -1.6470975875854492, |
|
"loss": 1.9781, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -1.0488381385803223, |
|
"rewards/margins": 0.5982593297958374, |
|
"rewards/rejected": -1.6470975875854492, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.4396754776236587, |
|
"grad_norm": 39.48440428645483, |
|
"learning_rate": 4.1251776108286854e-07, |
|
"logits/chosen": -0.9045936465263367, |
|
"logits/rejected": -0.9374397397041321, |
|
"logps/chosen": -1.2596890926361084, |
|
"logps/rejected": -1.6047645807266235, |
|
"loss": 2.0371, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -1.2596890926361084, |
|
"rewards/margins": 0.345075398683548, |
|
"rewards/rejected": -1.6047645807266235, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.45014394137660296, |
|
"grad_norm": 39.165174769151264, |
|
"learning_rate": 4.022618953971514e-07, |
|
"logits/chosen": -0.8771007657051086, |
|
"logits/rejected": -0.9414412379264832, |
|
"logps/chosen": -1.1611162424087524, |
|
"logps/rejected": -1.7325302362442017, |
|
"loss": 2.0219, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -1.1611162424087524, |
|
"rewards/margins": 0.5714138746261597, |
|
"rewards/rejected": -1.7325302362442017, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.46061240512954726, |
|
"grad_norm": 40.26532103065248, |
|
"learning_rate": 3.918689445378477e-07, |
|
"logits/chosen": -0.9068889617919922, |
|
"logits/rejected": -0.9496709108352661, |
|
"logps/chosen": -1.1995134353637695, |
|
"logps/rejected": -1.7156946659088135, |
|
"loss": 1.987, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": -1.1995134353637695, |
|
"rewards/margins": 0.5161812901496887, |
|
"rewards/rejected": -1.7156946659088135, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.4710808688824915, |
|
"grad_norm": 17.4001686828919, |
|
"learning_rate": 3.813528405709251e-07, |
|
"logits/chosen": -0.9098321795463562, |
|
"logits/rejected": -0.9786120653152466, |
|
"logps/chosen": -1.088384985923767, |
|
"logps/rejected": -1.7232511043548584, |
|
"loss": 1.9565, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -1.088384985923767, |
|
"rewards/margins": 0.6348661780357361, |
|
"rewards/rejected": -1.7232511043548584, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.48154933263543576, |
|
"grad_norm": 32.68221233140228, |
|
"learning_rate": 3.707276806528282e-07, |
|
"logits/chosen": -0.9034180641174316, |
|
"logits/rejected": -0.963924765586853, |
|
"logps/chosen": -1.1403090953826904, |
|
"logps/rejected": -1.8087886571884155, |
|
"loss": 1.9732, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -1.1403090953826904, |
|
"rewards/margins": 0.6684795618057251, |
|
"rewards/rejected": -1.8087886571884155, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.49201779638838, |
|
"grad_norm": 33.604375971044966, |
|
"learning_rate": 3.6000770813281334e-07, |
|
"logits/chosen": -0.9114705920219421, |
|
"logits/rejected": -0.9957945942878723, |
|
"logps/chosen": -1.124693512916565, |
|
"logps/rejected": -1.6200597286224365, |
|
"loss": 2.0076, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -1.124693512916565, |
|
"rewards/margins": 0.4953663945198059, |
|
"rewards/rejected": -1.6200597286224365, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.5024862601413242, |
|
"grad_norm": 33.58172238687118, |
|
"learning_rate": 3.4920729345930654e-07, |
|
"logits/chosen": -0.8984516263008118, |
|
"logits/rejected": -0.9524177312850952, |
|
"logps/chosen": -1.136643409729004, |
|
"logps/rejected": -1.6992219686508179, |
|
"loss": 2.0428, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -1.136643409729004, |
|
"rewards/margins": 0.562578558921814, |
|
"rewards/rejected": -1.6992219686508179, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.5129547238942685, |
|
"grad_norm": 36.50989911591013, |
|
"learning_rate": 3.383409149158814e-07, |
|
"logits/chosen": -0.9273117184638977, |
|
"logits/rejected": -0.9691912531852722, |
|
"logps/chosen": -1.255492091178894, |
|
"logps/rejected": -1.654199242591858, |
|
"loss": 2.0089, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -1.255492091178894, |
|
"rewards/margins": 0.39870715141296387, |
|
"rewards/rejected": -1.654199242591858, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.5234231876472127, |
|
"grad_norm": 56.18660552674704, |
|
"learning_rate": 3.2742313921268035e-07, |
|
"logits/chosen": -0.8531250953674316, |
|
"logits/rejected": -0.9333058595657349, |
|
"logps/chosen": -1.1693034172058105, |
|
"logps/rejected": -1.7800544500350952, |
|
"loss": 2.0302, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -1.1693034172058105, |
|
"rewards/margins": 0.6107511520385742, |
|
"rewards/rejected": -1.7800544500350952, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.533891651400157, |
|
"grad_norm": 32.73686669905504, |
|
"learning_rate": 3.1646860195929825e-07, |
|
"logits/chosen": -0.8804293870925903, |
|
"logits/rejected": -0.9355670809745789, |
|
"logps/chosen": -1.2591540813446045, |
|
"logps/rejected": -1.830005407333374, |
|
"loss": 1.9653, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -1.2591540813446045, |
|
"rewards/margins": 0.5708513855934143, |
|
"rewards/rejected": -1.830005407333374, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.5443601151531012, |
|
"grad_norm": 48.758378863327145, |
|
"learning_rate": 3.054919880453032e-07, |
|
"logits/chosen": -0.8192955255508423, |
|
"logits/rejected": -0.8804057240486145, |
|
"logps/chosen": -1.1422827243804932, |
|
"logps/rejected": -1.8388431072235107, |
|
"loss": 2.0149, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -1.1422827243804932, |
|
"rewards/margins": 0.6965604424476624, |
|
"rewards/rejected": -1.8388431072235107, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.5548285789060455, |
|
"grad_norm": 57.73699922463872, |
|
"learning_rate": 2.9450801195469686e-07, |
|
"logits/chosen": -0.8590672612190247, |
|
"logits/rejected": -0.929406464099884, |
|
"logps/chosen": -1.2068861722946167, |
|
"logps/rejected": -1.5913540124893188, |
|
"loss": 1.9765, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -1.2068861722946167, |
|
"rewards/margins": 0.3844676911830902, |
|
"rewards/rejected": -1.5913540124893188, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.5652970426589898, |
|
"grad_norm": 39.05494180222134, |
|
"learning_rate": 2.835313980407017e-07, |
|
"logits/chosen": -0.8784425854682922, |
|
"logits/rejected": -0.8780919313430786, |
|
"logps/chosen": -1.257610559463501, |
|
"logps/rejected": -1.6434276103973389, |
|
"loss": 2.012, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.257610559463501, |
|
"rewards/margins": 0.3858169615268707, |
|
"rewards/rejected": -1.6434276103973389, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.575765506411934, |
|
"grad_norm": 12.369775833139514, |
|
"learning_rate": 2.7257686078731973e-07, |
|
"logits/chosen": -0.8967874646186829, |
|
"logits/rejected": -0.9520025253295898, |
|
"logps/chosen": -1.1508904695510864, |
|
"logps/rejected": -1.829810380935669, |
|
"loss": 1.99, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -1.1508904695510864, |
|
"rewards/margins": 0.678919792175293, |
|
"rewards/rejected": -1.829810380935669, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.5862339701648783, |
|
"grad_norm": 28.730474706137716, |
|
"learning_rate": 2.6165908508411857e-07, |
|
"logits/chosen": -0.9400002360343933, |
|
"logits/rejected": -1.015257477760315, |
|
"logps/chosen": -1.0943539142608643, |
|
"logps/rejected": -1.5329124927520752, |
|
"loss": 1.9819, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -1.0943539142608643, |
|
"rewards/margins": 0.4385586678981781, |
|
"rewards/rejected": -1.5329124927520752, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.5967024339178225, |
|
"grad_norm": 45.97759565415205, |
|
"learning_rate": 2.5079270654069354e-07, |
|
"logits/chosen": -0.8827983736991882, |
|
"logits/rejected": -0.9212683439254761, |
|
"logps/chosen": -1.213191270828247, |
|
"logps/rejected": -1.7406049966812134, |
|
"loss": 1.9873, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -1.213191270828247, |
|
"rewards/margins": 0.5274137258529663, |
|
"rewards/rejected": -1.7406049966812134, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.6071708976707668, |
|
"grad_norm": 23.809737226091617, |
|
"learning_rate": 2.399922918671867e-07, |
|
"logits/chosen": -0.8965229988098145, |
|
"logits/rejected": -1.0004279613494873, |
|
"logps/chosen": -1.1851826906204224, |
|
"logps/rejected": -1.8194730281829834, |
|
"loss": 1.9564, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -1.1851826906204224, |
|
"rewards/margins": 0.6342904567718506, |
|
"rewards/rejected": -1.8194730281829834, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.6176393614237111, |
|
"grad_norm": 48.93055509881971, |
|
"learning_rate": 2.2927231934717176e-07, |
|
"logits/chosen": -0.8713434338569641, |
|
"logits/rejected": -0.9567992091178894, |
|
"logps/chosen": -1.2080659866333008, |
|
"logps/rejected": -1.8678998947143555, |
|
"loss": 2.0124, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -1.2080659866333008, |
|
"rewards/margins": 0.6598338484764099, |
|
"rewards/rejected": -1.8678998947143555, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.6281078251766553, |
|
"grad_norm": 16.728041846610992, |
|
"learning_rate": 2.1864715942907487e-07, |
|
"logits/chosen": -0.8935406804084778, |
|
"logits/rejected": -0.9455572366714478, |
|
"logps/chosen": -1.2053089141845703, |
|
"logps/rejected": -1.6934458017349243, |
|
"loss": 1.9953, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -1.2053089141845703, |
|
"rewards/margins": 0.4881366789340973, |
|
"rewards/rejected": -1.6934458017349243, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6385762889295996, |
|
"grad_norm": 64.72385101970457, |
|
"learning_rate": 2.081310554621522e-07, |
|
"logits/chosen": -0.8762944936752319, |
|
"logits/rejected": -0.9710057973861694, |
|
"logps/chosen": -1.21980881690979, |
|
"logps/rejected": -1.7974154949188232, |
|
"loss": 1.9564, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -1.21980881690979, |
|
"rewards/margins": 0.5776065587997437, |
|
"rewards/rejected": -1.7974154949188232, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.6490447526825438, |
|
"grad_norm": 30.32440992803243, |
|
"learning_rate": 1.9773810460284862e-07, |
|
"logits/chosen": -0.896551251411438, |
|
"logits/rejected": -0.925498366355896, |
|
"logps/chosen": -1.1150288581848145, |
|
"logps/rejected": -1.6414196491241455, |
|
"loss": 1.9559, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -1.1150288581848145, |
|
"rewards/margins": 0.5263907313346863, |
|
"rewards/rejected": -1.6414196491241455, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.6595132164354881, |
|
"grad_norm": 43.26797640360581, |
|
"learning_rate": 1.874822389171314e-07, |
|
"logits/chosen": -0.8692461848258972, |
|
"logits/rejected": -0.9539564251899719, |
|
"logps/chosen": -1.1112568378448486, |
|
"logps/rejected": -1.8892109394073486, |
|
"loss": 1.9671, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -1.1112568378448486, |
|
"rewards/margins": 0.7779541015625, |
|
"rewards/rejected": -1.8892109394073486, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.6699816801884323, |
|
"grad_norm": 27.417692518273306, |
|
"learning_rate": 1.7737720670413356e-07, |
|
"logits/chosen": -0.9024019241333008, |
|
"logits/rejected": -0.8824315071105957, |
|
"logps/chosen": -1.209397315979004, |
|
"logps/rejected": -1.7458854913711548, |
|
"loss": 1.9292, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -1.209397315979004, |
|
"rewards/margins": 0.5364881753921509, |
|
"rewards/rejected": -1.7458854913711548, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.6804501439413766, |
|
"grad_norm": 29.061168159916782, |
|
"learning_rate": 1.6743655406614095e-07, |
|
"logits/chosen": -0.8877676725387573, |
|
"logits/rejected": -0.9611382484436035, |
|
"logps/chosen": -1.1630074977874756, |
|
"logps/rejected": -1.8193352222442627, |
|
"loss": 1.9418, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -1.1630074977874756, |
|
"rewards/margins": 0.6563276052474976, |
|
"rewards/rejected": -1.8193352222442627, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.6909186076943209, |
|
"grad_norm": 21.820711826590742, |
|
"learning_rate": 1.5767360674963198e-07, |
|
"logits/chosen": -0.8649999499320984, |
|
"logits/rejected": -0.9277146458625793, |
|
"logps/chosen": -1.1468570232391357, |
|
"logps/rejected": -1.590583086013794, |
|
"loss": 1.9873, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": -1.1468570232391357, |
|
"rewards/margins": 0.4437260031700134, |
|
"rewards/rejected": -1.590583086013794, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.7013870714472651, |
|
"grad_norm": 26.357123343274285, |
|
"learning_rate": 1.4810145228170922e-07, |
|
"logits/chosen": -0.8826435804367065, |
|
"logits/rejected": -0.9004185795783997, |
|
"logps/chosen": -1.1120072603225708, |
|
"logps/rejected": -1.5835678577423096, |
|
"loss": 1.9829, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -1.1120072603225708, |
|
"rewards/margins": 0.4715605676174164, |
|
"rewards/rejected": -1.5835678577423096, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.7118555352002094, |
|
"grad_norm": 16.25494913759618, |
|
"learning_rate": 1.3873292242587306e-07, |
|
"logits/chosen": -0.8764508962631226, |
|
"logits/rejected": -0.9635313153266907, |
|
"logps/chosen": -1.2850573062896729, |
|
"logps/rejected": -1.7332632541656494, |
|
"loss": 2.0161, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -1.2850573062896729, |
|
"rewards/margins": 0.44820576906204224, |
|
"rewards/rejected": -1.7332632541656494, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.7223239989531536, |
|
"grad_norm": 46.740346613290164, |
|
"learning_rate": 1.295805759806533e-07, |
|
"logits/chosen": -0.8967689275741577, |
|
"logits/rejected": -0.9520794153213501, |
|
"logps/chosen": -1.177972435951233, |
|
"logps/rejected": -1.7682632207870483, |
|
"loss": 1.9908, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -1.177972435951233, |
|
"rewards/margins": 0.5902906656265259, |
|
"rewards/rejected": -1.7682632207870483, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.7327924627060979, |
|
"grad_norm": 24.33920868401554, |
|
"learning_rate": 1.2065668194415777e-07, |
|
"logits/chosen": -0.9185832738876343, |
|
"logits/rejected": -0.9162608981132507, |
|
"logps/chosen": -1.1997076272964478, |
|
"logps/rejected": -1.6665418148040771, |
|
"loss": 2.0251, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -1.1997076272964478, |
|
"rewards/margins": 0.4668343663215637, |
|
"rewards/rejected": -1.6665418148040771, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.7432609264590422, |
|
"grad_norm": 17.97599943277886, |
|
"learning_rate": 1.1197320306710923e-07, |
|
"logits/chosen": -0.9116529226303101, |
|
"logits/rejected": -0.951018214225769, |
|
"logps/chosen": -1.0667304992675781, |
|
"logps/rejected": -1.7582483291625977, |
|
"loss": 1.93, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.0667304992675781, |
|
"rewards/margins": 0.6915179491043091, |
|
"rewards/rejected": -1.7582483291625977, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.7537293902119864, |
|
"grad_norm": 24.4350634478838, |
|
"learning_rate": 1.035417798164145e-07, |
|
"logits/chosen": -0.8609287142753601, |
|
"logits/rejected": -0.9163888096809387, |
|
"logps/chosen": -1.0682001113891602, |
|
"logps/rejected": -1.5872647762298584, |
|
"loss": 1.9091, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -1.0682001113891602, |
|
"rewards/margins": 0.5190645456314087, |
|
"rewards/rejected": -1.5872647762298584, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.7641978539649307, |
|
"grad_norm": 20.39985667399638, |
|
"learning_rate": 9.537371477076535e-08, |
|
"logits/chosen": -0.8938928842544556, |
|
"logits/rejected": -0.9040834307670593, |
|
"logps/chosen": -1.2582809925079346, |
|
"logps/rejected": -1.840842843055725, |
|
"loss": 2.0073, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -1.2582809925079346, |
|
"rewards/margins": 0.5825616121292114, |
|
"rewards/rejected": -1.840842843055725, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.7746663177178749, |
|
"grad_norm": 34.173481784767304, |
|
"learning_rate": 8.747995746918898e-08, |
|
"logits/chosen": -0.8829482197761536, |
|
"logits/rejected": -0.907356858253479, |
|
"logps/chosen": -1.230694055557251, |
|
"logps/rejected": -1.8831084966659546, |
|
"loss": 1.9603, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -1.230694055557251, |
|
"rewards/margins": 0.6524146199226379, |
|
"rewards/rejected": -1.8831084966659546, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.7851347814708192, |
|
"grad_norm": 47.37722808156103, |
|
"learning_rate": 7.987108973285888e-08, |
|
"logits/chosen": -0.9488266706466675, |
|
"logits/rejected": -0.9228233098983765, |
|
"logps/chosen": -1.203776240348816, |
|
"logps/rejected": -1.7508113384246826, |
|
"loss": 1.9796, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -1.203776240348816, |
|
"rewards/margins": 0.5470350980758667, |
|
"rewards/rejected": -1.7508113384246826, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.7956032452237635, |
|
"grad_norm": 30.92892535857735, |
|
"learning_rate": 7.255731147984174e-08, |
|
"logits/chosen": -0.9628175497055054, |
|
"logits/rejected": -0.8987346887588501, |
|
"logps/chosen": -1.2509288787841797, |
|
"logps/rejected": -1.6936590671539307, |
|
"loss": 1.9474, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -1.2509288787841797, |
|
"rewards/margins": 0.4427300989627838, |
|
"rewards/rejected": -1.6936590671539307, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.8060717089767077, |
|
"grad_norm": 14.25191792086823, |
|
"learning_rate": 6.554842705179898e-08, |
|
"logits/chosen": -0.8933061361312866, |
|
"logits/rejected": -0.9177101254463196, |
|
"logps/chosen": -1.1689434051513672, |
|
"logps/rejected": -1.6953961849212646, |
|
"loss": 1.9767, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": -1.1689434051513672, |
|
"rewards/margins": 0.5264527797698975, |
|
"rewards/rejected": -1.6953961849212646, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.816540172729652, |
|
"grad_norm": 24.50848840983097, |
|
"learning_rate": 5.885383207096832e-08, |
|
"logits/chosen": -0.8765498995780945, |
|
"logits/rejected": -0.9643741846084595, |
|
"logps/chosen": -1.1704505681991577, |
|
"logps/rejected": -1.8245474100112915, |
|
"loss": 1.9596, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -1.1704505681991577, |
|
"rewards/margins": 0.654096782207489, |
|
"rewards/rejected": -1.8245474100112915, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.8270086364825961, |
|
"grad_norm": 49.84454621889519, |
|
"learning_rate": 5.2482500845047165e-08, |
|
"logits/chosen": -0.876386821269989, |
|
"logits/rejected": -0.9504436254501343, |
|
"logps/chosen": -1.1685442924499512, |
|
"logps/rejected": -1.7621757984161377, |
|
"loss": 1.9645, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -1.1685442924499512, |
|
"rewards/margins": 0.5936316251754761, |
|
"rewards/rejected": -1.7621757984161377, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.8374771002355405, |
|
"grad_norm": 24.717108157273426, |
|
"learning_rate": 4.644297433686162e-08, |
|
"logits/chosen": -0.8654069900512695, |
|
"logits/rejected": -0.8900250196456909, |
|
"logps/chosen": -1.1046303510665894, |
|
"logps/rejected": -1.7381641864776611, |
|
"loss": 1.9371, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -1.1046303510665894, |
|
"rewards/margins": 0.6335335969924927, |
|
"rewards/rejected": -1.7381641864776611, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.8479455639884846, |
|
"grad_norm": 129.2852712689259, |
|
"learning_rate": 4.074334871494558e-08, |
|
"logits/chosen": -0.9184934496879578, |
|
"logits/rejected": -0.9239559173583984, |
|
"logps/chosen": -1.269236445426941, |
|
"logps/rejected": -1.8909661769866943, |
|
"loss": 1.9919, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -1.269236445426941, |
|
"rewards/margins": 0.6217294931411743, |
|
"rewards/rejected": -1.8909661769866943, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.8584140277414289, |
|
"grad_norm": 35.460771935934545, |
|
"learning_rate": 3.5391264500382e-08, |
|
"logits/chosen": -0.8921190500259399, |
|
"logits/rejected": -0.9240585565567017, |
|
"logps/chosen": -1.1124510765075684, |
|
"logps/rejected": -1.6030313968658447, |
|
"loss": 1.9615, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -1.1124510765075684, |
|
"rewards/margins": 0.49058032035827637, |
|
"rewards/rejected": -1.6030313968658447, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.8688824914943732, |
|
"grad_norm": 30.93762572614218, |
|
"learning_rate": 3.0393896324452226e-08, |
|
"logits/chosen": -0.9354831576347351, |
|
"logits/rejected": -1.0134633779525757, |
|
"logps/chosen": -1.14380943775177, |
|
"logps/rejected": -1.7913806438446045, |
|
"loss": 1.9275, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -1.14380943775177, |
|
"rewards/margins": 0.6475714445114136, |
|
"rewards/rejected": -1.7913806438446045, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.8793509552473174, |
|
"grad_norm": 23.73127172846191, |
|
"learning_rate": 2.5757943310825026e-08, |
|
"logits/chosen": -0.8869434595108032, |
|
"logits/rejected": -0.8954530954360962, |
|
"logps/chosen": -1.1472705602645874, |
|
"logps/rejected": -1.7492564916610718, |
|
"loss": 1.9917, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": -1.1472705602645874, |
|
"rewards/margins": 0.6019860506057739, |
|
"rewards/rejected": -1.7492564916610718, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.8898194190002617, |
|
"grad_norm": 39.148819343565215, |
|
"learning_rate": 2.148962009517823e-08, |
|
"logits/chosen": -0.8858118057250977, |
|
"logits/rejected": -0.9047883152961731, |
|
"logps/chosen": -1.2842466831207275, |
|
"logps/rejected": -1.7744776010513306, |
|
"loss": 1.9499, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -1.2842466831207275, |
|
"rewards/margins": 0.49023112654685974, |
|
"rewards/rejected": -1.7744776010513306, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.9002878827532059, |
|
"grad_norm": 26.336696705653907, |
|
"learning_rate": 1.759464849429082e-08, |
|
"logits/chosen": -0.8909379243850708, |
|
"logits/rejected": -0.9339486360549927, |
|
"logps/chosen": -1.2071397304534912, |
|
"logps/rejected": -1.798255205154419, |
|
"loss": 1.9358, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -1.2071397304534912, |
|
"rewards/margins": 0.5911155939102173, |
|
"rewards/rejected": -1.798255205154419, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.9107563465061502, |
|
"grad_norm": 25.46620061376669, |
|
"learning_rate": 1.4078249835774169e-08, |
|
"logits/chosen": -0.9604193568229675, |
|
"logits/rejected": -0.983184814453125, |
|
"logps/chosen": -1.148258924484253, |
|
"logps/rejected": -1.949751615524292, |
|
"loss": 1.9302, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -1.148258924484253, |
|
"rewards/margins": 0.8014928102493286, |
|
"rewards/rejected": -1.949751615524292, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.9212248102590945, |
|
"grad_norm": 21.043141257775048, |
|
"learning_rate": 1.0945137958723705e-08, |
|
"logits/chosen": -0.894646167755127, |
|
"logits/rejected": -0.9200958013534546, |
|
"logps/chosen": -1.208785891532898, |
|
"logps/rejected": -1.7293192148208618, |
|
"loss": 2.0013, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -1.208785891532898, |
|
"rewards/margins": 0.5205334424972534, |
|
"rewards/rejected": -1.7293192148208618, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.9316932740120387, |
|
"grad_norm": 80.48128617194537, |
|
"learning_rate": 8.19951289467482e-09, |
|
"logits/chosen": -0.8895615339279175, |
|
"logits/rejected": -0.8950116038322449, |
|
"logps/chosen": -1.185006856918335, |
|
"logps/rejected": -1.7131178379058838, |
|
"loss": 2.011, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -1.185006856918335, |
|
"rewards/margins": 0.5281107425689697, |
|
"rewards/rejected": -1.7131178379058838, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.942161737764983, |
|
"grad_norm": 35.00393348206555, |
|
"learning_rate": 5.84505523733293e-09, |
|
"logits/chosen": -0.8778541684150696, |
|
"logits/rejected": -0.8582246899604797, |
|
"logps/chosen": -1.2027300596237183, |
|
"logps/rejected": -1.719333291053772, |
|
"loss": 1.9411, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.2027300596237183, |
|
"rewards/margins": 0.5166033506393433, |
|
"rewards/rejected": -1.719333291053772, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.9526302015179272, |
|
"grad_norm": 45.63097391162078, |
|
"learning_rate": 3.8849212086261466e-09, |
|
"logits/chosen": -0.8674596548080444, |
|
"logits/rejected": -0.8951441645622253, |
|
"logps/chosen": -1.3365813493728638, |
|
"logps/rejected": -1.6624677181243896, |
|
"loss": 1.9961, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -1.3365813493728638, |
|
"rewards/margins": 0.325886607170105, |
|
"rewards/rejected": -1.6624677181243896, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.9630986652708715, |
|
"grad_norm": 25.00020706198223, |
|
"learning_rate": 2.3217384276938756e-09, |
|
"logits/chosen": -0.9242098927497864, |
|
"logits/rejected": -0.9427459836006165, |
|
"logps/chosen": -1.098924160003662, |
|
"logps/rejected": -1.7310899496078491, |
|
"loss": 1.9237, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -1.098924160003662, |
|
"rewards/margins": 0.632165789604187, |
|
"rewards/rejected": -1.7310899496078491, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.9735671290238157, |
|
"grad_norm": 47.88185009368047, |
|
"learning_rate": 1.1576023884836472e-09, |
|
"logits/chosen": -0.9520083665847778, |
|
"logits/rejected": -0.9862750172615051, |
|
"logps/chosen": -1.2228929996490479, |
|
"logps/rejected": -1.7746740579605103, |
|
"loss": 1.9683, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -1.2228929996490479, |
|
"rewards/margins": 0.5517812967300415, |
|
"rewards/rejected": -1.7746740579605103, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.98403559277676, |
|
"grad_norm": 49.47414271366926, |
|
"learning_rate": 3.940736506780395e-10, |
|
"logits/chosen": -0.8756373524665833, |
|
"logits/rejected": -0.9029091000556946, |
|
"logps/chosen": -1.1716878414154053, |
|
"logps/rejected": -1.6480519771575928, |
|
"loss": 2.017, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -1.1716878414154053, |
|
"rewards/margins": 0.4763643741607666, |
|
"rewards/rejected": -1.6480519771575928, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.9945040565297043, |
|
"grad_norm": 20.978298819504356, |
|
"learning_rate": 3.2175747716822744e-11, |
|
"logits/chosen": -0.9227266311645508, |
|
"logits/rejected": -0.9477631449699402, |
|
"logps/chosen": -1.2197462320327759, |
|
"logps/rejected": -1.715574026107788, |
|
"loss": 1.9795, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -1.2197462320327759, |
|
"rewards/margins": 0.495827853679657, |
|
"rewards/rejected": -1.715574026107788, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.998691442030882, |
|
"step": 477, |
|
"total_flos": 0.0, |
|
"train_loss": 2.0152689970764226, |
|
"train_runtime": 14277.4481, |
|
"train_samples_per_second": 4.282, |
|
"train_steps_per_second": 0.033 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 477, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 1000000, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|