clm7b0129-wds-0.8-kendall-onof-ofif-corr-max-2-simpo-max1500-default
/
checkpoint-800
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.6565449322938038, | |
"eval_steps": 50, | |
"global_step": 800, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.008206811653672548, | |
"grad_norm": 0.06318386644124985, | |
"learning_rate": 4.999451708687114e-06, | |
"logits/chosen": -2.1367907524108887, | |
"logits/rejected": -2.4948182106018066, | |
"logps/chosen": -0.291498601436615, | |
"logps/rejected": -0.3196522295475006, | |
"loss": 7.5728, | |
"rewards/accuracies": 0.5, | |
"rewards/chosen": -0.4372479021549225, | |
"rewards/margins": 0.04223042353987694, | |
"rewards/rejected": -0.47947829961776733, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.016413623307345096, | |
"grad_norm": 0.07310314476490021, | |
"learning_rate": 4.997807075247147e-06, | |
"logits/chosen": -2.1456007957458496, | |
"logits/rejected": -2.4455342292785645, | |
"logps/chosen": -0.26213544607162476, | |
"logps/rejected": -0.32332050800323486, | |
"loss": 7.5298, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.3932031989097595, | |
"rewards/margins": 0.09177760779857635, | |
"rewards/rejected": -0.4849807620048523, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.024620434961017644, | |
"grad_norm": 0.05936102196574211, | |
"learning_rate": 4.9950668210706795e-06, | |
"logits/chosen": -2.0765950679779053, | |
"logits/rejected": -2.485799789428711, | |
"logps/chosen": -0.26631081104278564, | |
"logps/rejected": -0.32647624611854553, | |
"loss": 7.5208, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.39946624636650085, | |
"rewards/margins": 0.09024813771247864, | |
"rewards/rejected": -0.4897143840789795, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.03282724661469019, | |
"grad_norm": 0.08499134331941605, | |
"learning_rate": 4.9912321481237616e-06, | |
"logits/chosen": -2.0753884315490723, | |
"logits/rejected": -2.441580295562744, | |
"logps/chosen": -0.2749950885772705, | |
"logps/rejected": -0.30180150270462036, | |
"loss": 7.4229, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.41249266266822815, | |
"rewards/margins": 0.04020959883928299, | |
"rewards/rejected": -0.45270222425460815, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.04103405826836274, | |
"grad_norm": 0.07681389898061752, | |
"learning_rate": 4.986304738420684e-06, | |
"logits/chosen": -2.145660877227783, | |
"logits/rejected": -2.465946912765503, | |
"logps/chosen": -0.24909739196300507, | |
"logps/rejected": -0.2796121835708618, | |
"loss": 7.4811, | |
"rewards/accuracies": 0.48750001192092896, | |
"rewards/chosen": -0.373646080493927, | |
"rewards/margins": 0.045772187411785126, | |
"rewards/rejected": -0.41941824555397034, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04103405826836274, | |
"eval_logits/chosen": -2.012000799179077, | |
"eval_logits/rejected": -2.5381252765655518, | |
"eval_logps/chosen": -0.24157460033893585, | |
"eval_logps/rejected": -0.2957758605480194, | |
"eval_loss": 0.9317650198936462, | |
"eval_rewards/accuracies": 0.5252525210380554, | |
"eval_rewards/chosen": -0.3623619079589844, | |
"eval_rewards/margins": 0.08130191266536713, | |
"eval_rewards/rejected": -0.4436637759208679, | |
"eval_runtime": 26.0809, | |
"eval_samples_per_second": 30.214, | |
"eval_steps_per_second": 3.796, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04924086992203529, | |
"grad_norm": 0.06638535112142563, | |
"learning_rate": 4.980286753286196e-06, | |
"logits/chosen": -2.145846128463745, | |
"logits/rejected": -2.4077115058898926, | |
"logps/chosen": -0.22265203297138214, | |
"logps/rejected": -0.30774614214897156, | |
"loss": 7.4605, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.3339780271053314, | |
"rewards/margins": 0.1276412308216095, | |
"rewards/rejected": -0.4616192877292633, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.057447681575707836, | |
"grad_norm": 0.057281140238046646, | |
"learning_rate": 4.973180832407471e-06, | |
"logits/chosen": -2.0021350383758545, | |
"logits/rejected": -2.4299912452697754, | |
"logps/chosen": -0.23488977551460266, | |
"logps/rejected": -0.33270469307899475, | |
"loss": 7.4257, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.3523346781730652, | |
"rewards/margins": 0.14672236144542694, | |
"rewards/rejected": -0.4990570545196533, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.06565449322938038, | |
"grad_norm": 0.07725922018289566, | |
"learning_rate": 4.964990092676263e-06, | |
"logits/chosen": -2.117995023727417, | |
"logits/rejected": -2.359265089035034, | |
"logps/chosen": -0.21598832309246063, | |
"logps/rejected": -0.300583153963089, | |
"loss": 7.4384, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.32398244738578796, | |
"rewards/margins": 0.12689228355884552, | |
"rewards/rejected": -0.4508747458457947, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.07386130488305294, | |
"grad_norm": 0.0598183274269104, | |
"learning_rate": 4.9557181268217225e-06, | |
"logits/chosen": -2.282627582550049, | |
"logits/rejected": -2.441333532333374, | |
"logps/chosen": -0.23655852675437927, | |
"logps/rejected": -0.3246815800666809, | |
"loss": 7.4584, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.3548378050327301, | |
"rewards/margins": 0.13218457996845245, | |
"rewards/rejected": -0.48702239990234375, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.08206811653672548, | |
"grad_norm": 0.058213479816913605, | |
"learning_rate": 4.9453690018345144e-06, | |
"logits/chosen": -2.1114468574523926, | |
"logits/rejected": -2.5035691261291504, | |
"logps/chosen": -0.23073866963386536, | |
"logps/rejected": -0.29445192217826843, | |
"loss": 7.4116, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.34610801935195923, | |
"rewards/margins": 0.09556989371776581, | |
"rewards/rejected": -0.44167789816856384, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.08206811653672548, | |
"eval_logits/chosen": -2.0183491706848145, | |
"eval_logits/rejected": -2.5400593280792236, | |
"eval_logps/chosen": -0.20393377542495728, | |
"eval_logps/rejected": -0.2818409502506256, | |
"eval_loss": 0.9129964113235474, | |
"eval_rewards/accuracies": 0.5656565427780151, | |
"eval_rewards/chosen": -0.3059006631374359, | |
"eval_rewards/margins": 0.11686072498559952, | |
"eval_rewards/rejected": -0.4227614104747772, | |
"eval_runtime": 26.0825, | |
"eval_samples_per_second": 30.212, | |
"eval_steps_per_second": 3.796, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.09027492819039803, | |
"grad_norm": 0.06249881908297539, | |
"learning_rate": 4.933947257182901e-06, | |
"logits/chosen": -2.1324548721313477, | |
"logits/rejected": -2.434319019317627, | |
"logps/chosen": -0.22180762887001038, | |
"logps/rejected": -0.28862181305885315, | |
"loss": 7.3604, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.332711398601532, | |
"rewards/margins": 0.10022131353616714, | |
"rewards/rejected": -0.43293270468711853, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.09848173984407058, | |
"grad_norm": 0.061758093535900116, | |
"learning_rate": 4.921457902821578e-06, | |
"logits/chosen": -2.0597169399261475, | |
"logits/rejected": -2.4386391639709473, | |
"logps/chosen": -0.22720107436180115, | |
"logps/rejected": -0.303659051656723, | |
"loss": 7.3624, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.3408016264438629, | |
"rewards/margins": 0.1146869882941246, | |
"rewards/rejected": -0.4554885923862457, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.10668855149774313, | |
"grad_norm": 0.08368540555238724, | |
"learning_rate": 4.907906416994146e-06, | |
"logits/chosen": -2.0944437980651855, | |
"logits/rejected": -2.4157254695892334, | |
"logps/chosen": -0.19590887427330017, | |
"logps/rejected": -0.3365771770477295, | |
"loss": 7.3464, | |
"rewards/accuracies": 0.675000011920929, | |
"rewards/chosen": -0.29386329650878906, | |
"rewards/margins": 0.2110024392604828, | |
"rewards/rejected": -0.5048657655715942, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.11489536315141567, | |
"grad_norm": 0.060954928398132324, | |
"learning_rate": 4.893298743830168e-06, | |
"logits/chosen": -2.1551766395568848, | |
"logits/rejected": -2.5695576667785645, | |
"logps/chosen": -0.19875812530517578, | |
"logps/rejected": -0.2967599928379059, | |
"loss": 7.3179, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.29813718795776367, | |
"rewards/margins": 0.14700281620025635, | |
"rewards/rejected": -0.4451400339603424, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.12310217480508823, | |
"grad_norm": 0.05665091797709465, | |
"learning_rate": 4.8776412907378845e-06, | |
"logits/chosen": -2.1410365104675293, | |
"logits/rejected": -2.4798667430877686, | |
"logps/chosen": -0.19316771626472473, | |
"logps/rejected": -0.2972142696380615, | |
"loss": 7.2384, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.2897515296936035, | |
"rewards/margins": 0.15606984496116638, | |
"rewards/rejected": -0.4458213746547699, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.12310217480508823, | |
"eval_logits/chosen": -2.0654072761535645, | |
"eval_logits/rejected": -2.596571207046509, | |
"eval_logps/chosen": -0.17970335483551025, | |
"eval_logps/rejected": -0.2767573893070221, | |
"eval_loss": 0.8982937335968018, | |
"eval_rewards/accuracies": 0.5858585834503174, | |
"eval_rewards/chosen": -0.269555002450943, | |
"eval_rewards/margins": 0.14558106660842896, | |
"eval_rewards/rejected": -0.41513609886169434, | |
"eval_runtime": 26.0741, | |
"eval_samples_per_second": 30.222, | |
"eval_steps_per_second": 3.797, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.13130898645876077, | |
"grad_norm": 0.07328196614980698, | |
"learning_rate": 4.860940925593703e-06, | |
"logits/chosen": -2.202148914337158, | |
"logits/rejected": -2.5385117530822754, | |
"logps/chosen": -0.19814102351665497, | |
"logps/rejected": -0.3139093518257141, | |
"loss": 7.1309, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.29721152782440186, | |
"rewards/margins": 0.1736525148153305, | |
"rewards/rejected": -0.4708639979362488, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.1395157981124333, | |
"grad_norm": 0.09789691120386124, | |
"learning_rate": 4.84320497372973e-06, | |
"logits/chosen": -2.153545618057251, | |
"logits/rejected": -2.532336950302124, | |
"logps/chosen": -0.1861150860786438, | |
"logps/rejected": -0.2787100672721863, | |
"loss": 7.2498, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.2791725993156433, | |
"rewards/margins": 0.13889247179031372, | |
"rewards/rejected": -0.4180651605129242, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.14772260976610588, | |
"grad_norm": 0.0829203873872757, | |
"learning_rate": 4.824441214720629e-06, | |
"logits/chosen": -2.254868268966675, | |
"logits/rejected": -2.5931999683380127, | |
"logps/chosen": -0.19771653413772583, | |
"logps/rejected": -0.2859548032283783, | |
"loss": 7.1449, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.29657480120658875, | |
"rewards/margins": 0.1323573738336563, | |
"rewards/rejected": -0.42893218994140625, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.15592942141977842, | |
"grad_norm": 0.10499900579452515, | |
"learning_rate": 4.804657878971252e-06, | |
"logits/chosen": -2.2066543102264404, | |
"logits/rejected": -2.594515323638916, | |
"logps/chosen": -0.19944152235984802, | |
"logps/rejected": -0.26530706882476807, | |
"loss": 7.1717, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.29916223883628845, | |
"rewards/margins": 0.09879834204912186, | |
"rewards/rejected": -0.3979606032371521, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.16413623307345096, | |
"grad_norm": 0.10522742569446564, | |
"learning_rate": 4.783863644106502e-06, | |
"logits/chosen": -2.282865047454834, | |
"logits/rejected": -2.651233196258545, | |
"logps/chosen": -0.1877971738576889, | |
"logps/rejected": -0.28373947739601135, | |
"loss": 7.1001, | |
"rewards/accuracies": 0.550000011920929, | |
"rewards/chosen": -0.28169578313827515, | |
"rewards/margins": 0.14391344785690308, | |
"rewards/rejected": -0.4256092607975006, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.16413623307345096, | |
"eval_logits/chosen": -2.1934518814086914, | |
"eval_logits/rejected": -2.772143840789795, | |
"eval_logps/chosen": -0.1790025532245636, | |
"eval_logps/rejected": -0.3032245934009552, | |
"eval_loss": 0.8807509541511536, | |
"eval_rewards/accuracies": 0.5858585834503174, | |
"eval_rewards/chosen": -0.2685038149356842, | |
"eval_rewards/margins": 0.1863330751657486, | |
"eval_rewards/rejected": -0.454836905002594, | |
"eval_runtime": 26.0786, | |
"eval_samples_per_second": 30.216, | |
"eval_steps_per_second": 3.796, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.1723430447271235, | |
"grad_norm": 0.13399213552474976, | |
"learning_rate": 4.762067631165049e-06, | |
"logits/chosen": -2.3500285148620605, | |
"logits/rejected": -2.6827149391174316, | |
"logps/chosen": -0.17780962586402893, | |
"logps/rejected": -0.2749634087085724, | |
"loss": 7.0722, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.266714483499527, | |
"rewards/margins": 0.145730659365654, | |
"rewards/rejected": -0.41244515776634216, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.18054985638079607, | |
"grad_norm": 0.1727023422718048, | |
"learning_rate": 4.7392794005985324e-06, | |
"logits/chosen": -2.356797695159912, | |
"logits/rejected": -2.7276604175567627, | |
"logps/chosen": -0.20826168358325958, | |
"logps/rejected": -0.3760753273963928, | |
"loss": 7.0205, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.31239253282546997, | |
"rewards/margins": 0.2517204284667969, | |
"rewards/rejected": -0.5641129016876221, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.1887566680344686, | |
"grad_norm": 0.14360135793685913, | |
"learning_rate": 4.715508948078037e-06, | |
"logits/chosen": -2.3526904582977295, | |
"logits/rejected": -2.785292148590088, | |
"logps/chosen": -0.22017621994018555, | |
"logps/rejected": -0.35291892290115356, | |
"loss": 6.9773, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.3302643299102783, | |
"rewards/margins": 0.1991141140460968, | |
"rewards/rejected": -0.5293784737586975, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.19696347968814115, | |
"grad_norm": 0.18886974453926086, | |
"learning_rate": 4.690766700109659e-06, | |
"logits/chosen": -2.3618314266204834, | |
"logits/rejected": -2.850059747695923, | |
"logps/chosen": -0.22531962394714355, | |
"logps/rejected": -0.38331112265586853, | |
"loss": 6.9879, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -0.3379794657230377, | |
"rewards/margins": 0.23698726296424866, | |
"rewards/rejected": -0.5749667286872864, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.2051702913418137, | |
"grad_norm": 0.2599099278450012, | |
"learning_rate": 4.665063509461098e-06, | |
"logits/chosen": -2.4776885509490967, | |
"logits/rejected": -2.8583390712738037, | |
"logps/chosen": -0.2538486123085022, | |
"logps/rejected": -0.42415136098861694, | |
"loss": 6.7357, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.3807729184627533, | |
"rewards/margins": 0.2554541230201721, | |
"rewards/rejected": -0.636227011680603, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.2051702913418137, | |
"eval_logits/chosen": -2.3788387775421143, | |
"eval_logits/rejected": -2.958366632461548, | |
"eval_logps/chosen": -0.2162775695323944, | |
"eval_logps/rejected": -0.41066110134124756, | |
"eval_loss": 0.8405817747116089, | |
"eval_rewards/accuracies": 0.6060606241226196, | |
"eval_rewards/chosen": -0.3244163393974304, | |
"eval_rewards/margins": 0.2915753722190857, | |
"eval_rewards/rejected": -0.6159917116165161, | |
"eval_runtime": 26.0752, | |
"eval_samples_per_second": 30.22, | |
"eval_steps_per_second": 3.797, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.21337710299548626, | |
"grad_norm": 0.25840890407562256, | |
"learning_rate": 4.638410650401267e-06, | |
"logits/chosen": -2.5892271995544434, | |
"logits/rejected": -2.819650888442993, | |
"logps/chosen": -0.2257525473833084, | |
"logps/rejected": -0.46280306577682495, | |
"loss": 6.6048, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.3386288285255432, | |
"rewards/margins": 0.3555757403373718, | |
"rewards/rejected": -0.6942045092582703, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.2215839146491588, | |
"grad_norm": 0.2838613986968994, | |
"learning_rate": 4.610819813755038e-06, | |
"logits/chosen": -2.536020040512085, | |
"logits/rejected": -2.843383312225342, | |
"logps/chosen": -0.2739175856113434, | |
"logps/rejected": -0.5088076591491699, | |
"loss": 6.6403, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.41087642312049866, | |
"rewards/margins": 0.3523350656032562, | |
"rewards/rejected": -0.7632113695144653, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.22979072630283134, | |
"grad_norm": 0.3575810194015503, | |
"learning_rate": 4.582303101775249e-06, | |
"logits/chosen": -2.4525866508483887, | |
"logits/rejected": -2.7756829261779785, | |
"logps/chosen": -0.27604570984840393, | |
"logps/rejected": -0.6104786992073059, | |
"loss": 6.4767, | |
"rewards/accuracies": 0.6875, | |
"rewards/chosen": -0.4140685498714447, | |
"rewards/margins": 0.5016494989395142, | |
"rewards/rejected": -0.9157179594039917, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.23799753795650389, | |
"grad_norm": 0.36226338148117065, | |
"learning_rate": 4.55287302283426e-06, | |
"logits/chosen": -2.4639816284179688, | |
"logits/rejected": -2.865053415298462, | |
"logps/chosen": -0.35306140780448914, | |
"logps/rejected": -0.5840066075325012, | |
"loss": 6.2071, | |
"rewards/accuracies": 0.4749999940395355, | |
"rewards/chosen": -0.5295921564102173, | |
"rewards/margins": 0.3464178144931793, | |
"rewards/rejected": -0.8760099411010742, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.24620434961017645, | |
"grad_norm": 0.38896313309669495, | |
"learning_rate": 4.522542485937369e-06, | |
"logits/chosen": -2.6630337238311768, | |
"logits/rejected": -2.7479195594787598, | |
"logps/chosen": -0.3706950545310974, | |
"logps/rejected": -0.7957242131233215, | |
"loss": 6.1801, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.5560425519943237, | |
"rewards/margins": 0.6375436782836914, | |
"rewards/rejected": -1.1935861110687256, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.24620434961017645, | |
"eval_logits/chosen": -2.411334991455078, | |
"eval_logits/rejected": -2.820974588394165, | |
"eval_logps/chosen": -0.3725183308124542, | |
"eval_logps/rejected": -0.8138000965118408, | |
"eval_loss": 0.738965630531311, | |
"eval_rewards/accuracies": 0.6060606241226196, | |
"eval_rewards/chosen": -0.5587774515151978, | |
"eval_rewards/margins": 0.6619227528572083, | |
"eval_rewards/rejected": -1.2207001447677612, | |
"eval_runtime": 26.0764, | |
"eval_samples_per_second": 30.219, | |
"eval_steps_per_second": 3.797, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.254411161263849, | |
"grad_norm": 0.5848517417907715, | |
"learning_rate": 4.491324795060491e-06, | |
"logits/chosen": -2.5622355937957764, | |
"logits/rejected": -2.7415950298309326, | |
"logps/chosen": -0.38525494933128357, | |
"logps/rejected": -0.8741232752799988, | |
"loss": 5.98, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.5778824687004089, | |
"rewards/margins": 0.7333025336265564, | |
"rewards/rejected": -1.3111850023269653, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.26261797291752154, | |
"grad_norm": 0.38972222805023193, | |
"learning_rate": 4.4592336433146e-06, | |
"logits/chosen": -2.6293787956237793, | |
"logits/rejected": -2.7180721759796143, | |
"logps/chosen": -0.5181100964546204, | |
"logps/rejected": -0.97294682264328, | |
"loss": 5.608, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.7771651744842529, | |
"rewards/margins": 0.6822551488876343, | |
"rewards/rejected": -1.4594202041625977, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.2708247845711941, | |
"grad_norm": 0.5381959080696106, | |
"learning_rate": 4.426283106939474e-06, | |
"logits/chosen": -2.477749824523926, | |
"logits/rejected": -2.7682888507843018, | |
"logps/chosen": -0.47721824049949646, | |
"logps/rejected": -1.0577385425567627, | |
"loss": 5.5189, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.7158273458480835, | |
"rewards/margins": 0.8707805871963501, | |
"rewards/rejected": -1.5866079330444336, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.2790315962248666, | |
"grad_norm": 0.5332415699958801, | |
"learning_rate": 4.3924876391293915e-06, | |
"logits/chosen": -2.5470075607299805, | |
"logits/rejected": -2.8264012336730957, | |
"logps/chosen": -0.5053269267082214, | |
"logps/rejected": -1.412097454071045, | |
"loss": 5.3992, | |
"rewards/accuracies": 0.675000011920929, | |
"rewards/chosen": -0.7579904198646545, | |
"rewards/margins": 1.360155701637268, | |
"rewards/rejected": -2.1181461811065674, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.2872384078785392, | |
"grad_norm": 0.5841536521911621, | |
"learning_rate": 4.357862063693486e-06, | |
"logits/chosen": -2.5552780628204346, | |
"logits/rejected": -2.7644314765930176, | |
"logps/chosen": -0.6264504790306091, | |
"logps/rejected": -1.4451416730880737, | |
"loss": 5.0093, | |
"rewards/accuracies": 0.699999988079071, | |
"rewards/chosen": -0.9396758079528809, | |
"rewards/margins": 1.228036880493164, | |
"rewards/rejected": -2.167712450027466, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.2872384078785392, | |
"eval_logits/chosen": -2.486525774002075, | |
"eval_logits/rejected": -2.809356451034546, | |
"eval_logps/chosen": -0.6259626746177673, | |
"eval_logps/rejected": -1.6826657056808472, | |
"eval_loss": 0.610858678817749, | |
"eval_rewards/accuracies": 0.6464646458625793, | |
"eval_rewards/chosen": -0.9389441013336182, | |
"eval_rewards/margins": 1.585054636001587, | |
"eval_rewards/rejected": -2.523998737335205, | |
"eval_runtime": 26.0792, | |
"eval_samples_per_second": 30.216, | |
"eval_steps_per_second": 3.796, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.29544521953221176, | |
"grad_norm": 0.6259649395942688, | |
"learning_rate": 4.322421568553529e-06, | |
"logits/chosen": -2.5630745887756348, | |
"logits/rejected": -2.80169939994812, | |
"logps/chosen": -0.6148477792739868, | |
"logps/rejected": -1.7640241384506226, | |
"loss": 5.0832, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.9222715497016907, | |
"rewards/margins": 1.7237647771835327, | |
"rewards/rejected": -2.646036148071289, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.3036520311858843, | |
"grad_norm": 0.5134413838386536, | |
"learning_rate": 4.286181699082008e-06, | |
"logits/chosen": -2.5688040256500244, | |
"logits/rejected": -2.823493242263794, | |
"logps/chosen": -0.7328687906265259, | |
"logps/rejected": -2.135953187942505, | |
"loss": 4.484, | |
"rewards/accuracies": 0.6875, | |
"rewards/chosen": -1.0993033647537231, | |
"rewards/margins": 2.104626178741455, | |
"rewards/rejected": -3.203929901123047, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.31185884283955684, | |
"grad_norm": 0.5029065608978271, | |
"learning_rate": 4.249158351283414e-06, | |
"logits/chosen": -2.507948160171509, | |
"logits/rejected": -2.797893524169922, | |
"logps/chosen": -0.8517419695854187, | |
"logps/rejected": -2.6004090309143066, | |
"loss": 4.3033, | |
"rewards/accuracies": 0.675000011920929, | |
"rewards/chosen": -1.2776129245758057, | |
"rewards/margins": 2.6230006217956543, | |
"rewards/rejected": -3.900613307952881, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.3200656544932294, | |
"grad_norm": 0.6171831488609314, | |
"learning_rate": 4.211367764821722e-06, | |
"logits/chosen": -2.709599018096924, | |
"logits/rejected": -2.8980605602264404, | |
"logps/chosen": -0.9357224702835083, | |
"logps/rejected": -2.464841842651367, | |
"loss": 4.6813, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -1.4035838842391968, | |
"rewards/margins": 2.2936789989471436, | |
"rewards/rejected": -3.6972625255584717, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.3282724661469019, | |
"grad_norm": 1.2649667263031006, | |
"learning_rate": 4.172826515897146e-06, | |
"logits/chosen": -2.3837532997131348, | |
"logits/rejected": -2.6686861515045166, | |
"logps/chosen": -0.9314821362495422, | |
"logps/rejected": -2.562440872192383, | |
"loss": 4.3764, | |
"rewards/accuracies": 0.737500011920929, | |
"rewards/chosen": -1.3972232341766357, | |
"rewards/margins": 2.4464378356933594, | |
"rewards/rejected": -3.843661069869995, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.3282724661469019, | |
"eval_logits/chosen": -2.3613698482513428, | |
"eval_logits/rejected": -2.7536535263061523, | |
"eval_logps/chosen": -0.8348632454872131, | |
"eval_logps/rejected": -2.6590662002563477, | |
"eval_loss": 0.5019229650497437, | |
"eval_rewards/accuracies": 0.6767676472663879, | |
"eval_rewards/chosen": -1.2522947788238525, | |
"eval_rewards/margins": 2.736304759979248, | |
"eval_rewards/rejected": -3.9885993003845215, | |
"eval_runtime": 26.0816, | |
"eval_samples_per_second": 30.213, | |
"eval_steps_per_second": 3.796, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.33647927780057446, | |
"grad_norm": 0.509773313999176, | |
"learning_rate": 4.133551509975264e-06, | |
"logits/chosen": -2.4433794021606445, | |
"logits/rejected": -2.732313871383667, | |
"logps/chosen": -1.0649070739746094, | |
"logps/rejected": -3.0055129528045654, | |
"loss": 4.0575, | |
"rewards/accuracies": 0.7250000238418579, | |
"rewards/chosen": -1.597360372543335, | |
"rewards/margins": 2.9109084606170654, | |
"rewards/rejected": -4.508269309997559, | |
"step": 410 | |
}, | |
{ | |
"epoch": 0.344686089454247, | |
"grad_norm": 1.3467975854873657, | |
"learning_rate": 4.093559974371725e-06, | |
"logits/chosen": -2.296419858932495, | |
"logits/rejected": -2.650160551071167, | |
"logps/chosen": -1.1074317693710327, | |
"logps/rejected": -3.8539538383483887, | |
"loss": 3.9082, | |
"rewards/accuracies": 0.762499988079071, | |
"rewards/chosen": -1.6611478328704834, | |
"rewards/margins": 4.119783401489258, | |
"rewards/rejected": -5.780930519104004, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.3528929011079196, | |
"grad_norm": 0.576802134513855, | |
"learning_rate": 4.052869450695776e-06, | |
"logits/chosen": -2.4122748374938965, | |
"logits/rejected": -2.657745122909546, | |
"logps/chosen": -1.4457646608352661, | |
"logps/rejected": -3.8541057109832764, | |
"loss": 3.6948, | |
"rewards/accuracies": 0.800000011920929, | |
"rewards/chosen": -2.168646812438965, | |
"rewards/margins": 3.612511396408081, | |
"rewards/rejected": -5.781157970428467, | |
"step": 430 | |
}, | |
{ | |
"epoch": 0.36109971276159214, | |
"grad_norm": 1.8899520635604858, | |
"learning_rate": 4.011497787155938e-06, | |
"logits/chosen": -2.380706310272217, | |
"logits/rejected": -2.628760576248169, | |
"logps/chosen": -1.5100795030593872, | |
"logps/rejected": -4.016777515411377, | |
"loss": 3.7709, | |
"rewards/accuracies": 0.7875000238418579, | |
"rewards/chosen": -2.2651190757751465, | |
"rewards/margins": 3.760047197341919, | |
"rewards/rejected": -6.0251665115356445, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.3693065244152647, | |
"grad_norm": 1.3820478916168213, | |
"learning_rate": 3.969463130731183e-06, | |
"logits/chosen": -2.3203773498535156, | |
"logits/rejected": -2.7199389934539795, | |
"logps/chosen": -1.7879537343978882, | |
"logps/rejected": -4.4142656326293945, | |
"loss": 3.6878, | |
"rewards/accuracies": 0.8374999761581421, | |
"rewards/chosen": -2.6819303035736084, | |
"rewards/margins": 3.9394683837890625, | |
"rewards/rejected": -6.621399879455566, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.3693065244152647, | |
"eval_logits/chosen": -2.3454010486602783, | |
"eval_logits/rejected": -2.7509803771972656, | |
"eval_logps/chosen": -2.039729595184326, | |
"eval_logps/rejected": -4.705678462982178, | |
"eval_loss": 0.43350929021835327, | |
"eval_rewards/accuracies": 0.8383838534355164, | |
"eval_rewards/chosen": -3.05959415435791, | |
"eval_rewards/margins": 3.9989237785339355, | |
"eval_rewards/rejected": -7.058517932891846, | |
"eval_runtime": 26.0827, | |
"eval_samples_per_second": 30.212, | |
"eval_steps_per_second": 3.796, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.3775133360689372, | |
"grad_norm": 1.2694813013076782, | |
"learning_rate": 3.92678391921108e-06, | |
"logits/chosen": -2.419553279876709, | |
"logits/rejected": -2.722438335418701, | |
"logps/chosen": -2.116955280303955, | |
"logps/rejected": -4.574510097503662, | |
"loss": 3.5762, | |
"rewards/accuracies": 0.8125, | |
"rewards/chosen": -3.1754324436187744, | |
"rewards/margins": 3.6863322257995605, | |
"rewards/rejected": -6.861765384674072, | |
"step": 460 | |
}, | |
{ | |
"epoch": 0.38572014772260976, | |
"grad_norm": 2.1713900566101074, | |
"learning_rate": 3.88347887310836e-06, | |
"logits/chosen": -2.509795904159546, | |
"logits/rejected": -2.8286545276641846, | |
"logps/chosen": -3.043957233428955, | |
"logps/rejected": -5.701972007751465, | |
"loss": 3.3953, | |
"rewards/accuracies": 0.824999988079071, | |
"rewards/chosen": -4.565936088562012, | |
"rewards/margins": 3.9870212078094482, | |
"rewards/rejected": -8.552957534790039, | |
"step": 470 | |
}, | |
{ | |
"epoch": 0.3939269593762823, | |
"grad_norm": 1.7294141054153442, | |
"learning_rate": 3.839566987447492e-06, | |
"logits/chosen": -2.5797224044799805, | |
"logits/rejected": -2.8154430389404297, | |
"logps/chosen": -3.6535918712615967, | |
"logps/rejected": -6.4350786209106445, | |
"loss": 2.8405, | |
"rewards/accuracies": 0.862500011920929, | |
"rewards/chosen": -5.4803876876831055, | |
"rewards/margins": 4.172229290008545, | |
"rewards/rejected": -9.652616500854492, | |
"step": 480 | |
}, | |
{ | |
"epoch": 0.40213377102995485, | |
"grad_norm": 2.604421615600586, | |
"learning_rate": 3.795067523432826e-06, | |
"logits/chosen": -2.658399820327759, | |
"logits/rejected": -2.8255085945129395, | |
"logps/chosen": -5.536412239074707, | |
"logps/rejected": -7.725207328796387, | |
"loss": 2.5705, | |
"rewards/accuracies": 0.8999999761581421, | |
"rewards/chosen": -8.304617881774902, | |
"rewards/margins": 3.2831923961639404, | |
"rewards/rejected": -11.587809562683105, | |
"step": 490 | |
}, | |
{ | |
"epoch": 0.4103405826836274, | |
"grad_norm": 3.06144118309021, | |
"learning_rate": 3.7500000000000005e-06, | |
"logits/chosen": -2.4845776557922363, | |
"logits/rejected": -2.7703354358673096, | |
"logps/chosen": -5.598423004150391, | |
"logps/rejected": -8.852571487426758, | |
"loss": 2.6946, | |
"rewards/accuracies": 0.887499988079071, | |
"rewards/chosen": -8.397635459899902, | |
"rewards/margins": 4.881222724914551, | |
"rewards/rejected": -13.27885913848877, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.4103405826836274, | |
"eval_logits/chosen": -2.3113462924957275, | |
"eval_logits/rejected": -2.7164077758789062, | |
"eval_logps/chosen": -5.1130757331848145, | |
"eval_logps/rejected": -9.016916275024414, | |
"eval_loss": 0.28886228799819946, | |
"eval_rewards/accuracies": 0.9090909361839294, | |
"eval_rewards/chosen": -7.669614791870117, | |
"eval_rewards/margins": 5.8557610511779785, | |
"eval_rewards/rejected": -13.525375366210938, | |
"eval_runtime": 26.082, | |
"eval_samples_per_second": 30.212, | |
"eval_steps_per_second": 3.796, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.4185473943373, | |
"grad_norm": 6.627122402191162, | |
"learning_rate": 3.7043841852542884e-06, | |
"logits/chosen": -2.3907535076141357, | |
"logits/rejected": -2.667914390563965, | |
"logps/chosen": -5.433152198791504, | |
"logps/rejected": -8.439302444458008, | |
"loss": 2.0307, | |
"rewards/accuracies": 0.887499988079071, | |
"rewards/chosen": -8.149726867675781, | |
"rewards/margins": 4.5092267990112305, | |
"rewards/rejected": -12.658953666687012, | |
"step": 510 | |
}, | |
{ | |
"epoch": 0.4267542059909725, | |
"grad_norm": 4.345485687255859, | |
"learning_rate": 3.658240087799655e-06, | |
"logits/chosen": -2.4811301231384277, | |
"logits/rejected": -2.8047549724578857, | |
"logps/chosen": -6.806387424468994, | |
"logps/rejected": -11.425605773925781, | |
"loss": 1.9227, | |
"rewards/accuracies": 0.887499988079071, | |
"rewards/chosen": -10.209580421447754, | |
"rewards/margins": 6.92882776260376, | |
"rewards/rejected": -17.13840675354004, | |
"step": 520 | |
}, | |
{ | |
"epoch": 0.43496101764464506, | |
"grad_norm": 3.9401891231536865, | |
"learning_rate": 3.611587947962319e-06, | |
"logits/chosen": -2.5136146545410156, | |
"logits/rejected": -2.815864086151123, | |
"logps/chosen": -8.350787162780762, | |
"logps/rejected": -13.539543151855469, | |
"loss": 2.1664, | |
"rewards/accuracies": 0.8999999761581421, | |
"rewards/chosen": -12.526180267333984, | |
"rewards/margins": 7.783134460449219, | |
"rewards/rejected": -20.30931282043457, | |
"step": 530 | |
}, | |
{ | |
"epoch": 0.4431678292983176, | |
"grad_norm": 8.136115074157715, | |
"learning_rate": 3.564448228912682e-06, | |
"logits/chosen": -2.3747506141662598, | |
"logits/rejected": -2.767707586288452, | |
"logps/chosen": -7.152952671051025, | |
"logps/rejected": -12.549365043640137, | |
"loss": 1.9159, | |
"rewards/accuracies": 0.887499988079071, | |
"rewards/chosen": -10.729429244995117, | |
"rewards/margins": 8.094616889953613, | |
"rewards/rejected": -18.824045181274414, | |
"step": 540 | |
}, | |
{ | |
"epoch": 0.45137464095199015, | |
"grad_norm": 4.128848552703857, | |
"learning_rate": 3.516841607689501e-06, | |
"logits/chosen": -2.4747514724731445, | |
"logits/rejected": -2.753988265991211, | |
"logps/chosen": -7.145654201507568, | |
"logps/rejected": -11.649099349975586, | |
"loss": 1.7838, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -10.718483924865723, | |
"rewards/margins": 6.755165100097656, | |
"rewards/rejected": -17.473648071289062, | |
"step": 550 | |
}, | |
{ | |
"epoch": 0.45137464095199015, | |
"eval_logits/chosen": -2.358137845993042, | |
"eval_logits/rejected": -2.7661235332489014, | |
"eval_logps/chosen": -6.704189300537109, | |
"eval_logps/rejected": -12.318349838256836, | |
"eval_loss": 0.2348441481590271, | |
"eval_rewards/accuracies": 0.9292929172515869, | |
"eval_rewards/chosen": -10.056282997131348, | |
"eval_rewards/margins": 8.42124080657959, | |
"eval_rewards/rejected": -18.477523803710938, | |
"eval_runtime": 26.0731, | |
"eval_samples_per_second": 30.223, | |
"eval_steps_per_second": 3.797, | |
"step": 550 | |
}, | |
{ | |
"epoch": 0.4595814526056627, | |
"grad_norm": 6.159413814544678, | |
"learning_rate": 3.4687889661302577e-06, | |
"logits/chosen": -2.4763951301574707, | |
"logits/rejected": -2.7941336631774902, | |
"logps/chosen": -7.845038414001465, | |
"logps/rejected": -14.3870210647583, | |
"loss": 1.834, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -11.767557144165039, | |
"rewards/margins": 9.81297492980957, | |
"rewards/rejected": -21.58053207397461, | |
"step": 560 | |
}, | |
{ | |
"epoch": 0.46778826425933523, | |
"grad_norm": 3.0514960289001465, | |
"learning_rate": 3.4203113817116955e-06, | |
"logits/chosen": -2.4743447303771973, | |
"logits/rejected": -2.7862396240234375, | |
"logps/chosen": -8.275420188903809, | |
"logps/rejected": -16.52743911743164, | |
"loss": 1.4036, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -12.413130760192871, | |
"rewards/margins": 12.378029823303223, | |
"rewards/rejected": -24.791160583496094, | |
"step": 570 | |
}, | |
{ | |
"epoch": 0.47599507591300777, | |
"grad_norm": 7.868257999420166, | |
"learning_rate": 3.3714301183045382e-06, | |
"logits/chosen": -2.4062681198120117, | |
"logits/rejected": -2.616164445877075, | |
"logps/chosen": -8.249846458435059, | |
"logps/rejected": -14.887449264526367, | |
"loss": 1.7889, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -12.37476921081543, | |
"rewards/margins": 9.956401824951172, | |
"rewards/rejected": -22.3311710357666, | |
"step": 580 | |
}, | |
{ | |
"epoch": 0.48420188756668037, | |
"grad_norm": 2.8784306049346924, | |
"learning_rate": 3.3221666168464584e-06, | |
"logits/chosen": -2.447603702545166, | |
"logits/rejected": -2.6810977458953857, | |
"logps/chosen": -7.720141410827637, | |
"logps/rejected": -14.280428886413574, | |
"loss": 1.4502, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -11.580211639404297, | |
"rewards/margins": 9.840431213378906, | |
"rewards/rejected": -21.420642852783203, | |
"step": 590 | |
}, | |
{ | |
"epoch": 0.4924086992203529, | |
"grad_norm": 3.972952127456665, | |
"learning_rate": 3.272542485937369e-06, | |
"logits/chosen": -2.3675425052642822, | |
"logits/rejected": -2.7083656787872314, | |
"logps/chosen": -7.870957851409912, | |
"logps/rejected": -15.028215408325195, | |
"loss": 1.338, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -11.806436538696289, | |
"rewards/margins": 10.73588752746582, | |
"rewards/rejected": -22.542322158813477, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.4924086992203529, | |
"eval_logits/chosen": -2.2910001277923584, | |
"eval_logits/rejected": -2.656534194946289, | |
"eval_logps/chosen": -8.230629920959473, | |
"eval_logps/rejected": -15.021241188049316, | |
"eval_loss": 0.21987247467041016, | |
"eval_rewards/accuracies": 0.939393937587738, | |
"eval_rewards/chosen": -12.345946311950684, | |
"eval_rewards/margins": 10.185916900634766, | |
"eval_rewards/rejected": -22.531862258911133, | |
"eval_runtime": 26.0717, | |
"eval_samples_per_second": 30.224, | |
"eval_steps_per_second": 3.797, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.5006155108740254, | |
"grad_norm": 5.846790313720703, | |
"learning_rate": 3.222579492361179e-06, | |
"logits/chosen": -2.3496642112731934, | |
"logits/rejected": -2.651423692703247, | |
"logps/chosen": -9.013282775878906, | |
"logps/rejected": -16.293872833251953, | |
"loss": 1.6971, | |
"rewards/accuracies": 0.9375, | |
"rewards/chosen": -13.519923210144043, | |
"rewards/margins": 10.920884132385254, | |
"rewards/rejected": -24.440807342529297, | |
"step": 610 | |
}, | |
{ | |
"epoch": 0.508822322527698, | |
"grad_norm": 5.161929607391357, | |
"learning_rate": 3.1722995515381644e-06, | |
"logits/chosen": -2.323763132095337, | |
"logits/rejected": -2.656510591506958, | |
"logps/chosen": -8.339229583740234, | |
"logps/rejected": -16.096925735473633, | |
"loss": 0.9451, | |
"rewards/accuracies": 0.9375, | |
"rewards/chosen": -12.508844375610352, | |
"rewards/margins": 11.636543273925781, | |
"rewards/rejected": -24.145389556884766, | |
"step": 620 | |
}, | |
{ | |
"epoch": 0.5170291341813705, | |
"grad_norm": 6.016699314117432, | |
"learning_rate": 3.121724717912138e-06, | |
"logits/chosen": -2.457834482192993, | |
"logits/rejected": -2.661825180053711, | |
"logps/chosen": -9.72178840637207, | |
"logps/rejected": -16.58643341064453, | |
"loss": 1.3102, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -14.582681655883789, | |
"rewards/margins": 10.296967506408691, | |
"rewards/rejected": -24.879650115966797, | |
"step": 630 | |
}, | |
{ | |
"epoch": 0.5252359458350431, | |
"grad_norm": 4.966028213500977, | |
"learning_rate": 3.0708771752766397e-06, | |
"logits/chosen": -2.3414790630340576, | |
"logits/rejected": -2.6370954513549805, | |
"logps/chosen": -9.21303939819336, | |
"logps/rejected": -16.45307731628418, | |
"loss": 1.5846, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -13.819559097290039, | |
"rewards/margins": 10.860057830810547, | |
"rewards/rejected": -24.679615020751953, | |
"step": 640 | |
}, | |
{ | |
"epoch": 0.5334427574887156, | |
"grad_norm": 2.0964155197143555, | |
"learning_rate": 3.019779227044398e-06, | |
"logits/chosen": -2.370847225189209, | |
"logits/rejected": -2.6393492221832275, | |
"logps/chosen": -9.985071182250977, | |
"logps/rejected": -16.840097427368164, | |
"loss": 1.3448, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -14.977605819702148, | |
"rewards/margins": 10.282541275024414, | |
"rewards/rejected": -25.260149002075195, | |
"step": 650 | |
}, | |
{ | |
"epoch": 0.5334427574887156, | |
"eval_logits/chosen": -2.256930351257324, | |
"eval_logits/rejected": -2.6202380657196045, | |
"eval_logps/chosen": -8.08243465423584, | |
"eval_logps/rejected": -15.596598625183105, | |
"eval_loss": 0.19149567186832428, | |
"eval_rewards/accuracies": 0.9292929172515869, | |
"eval_rewards/chosen": -12.123653411865234, | |
"eval_rewards/margins": 11.271244049072266, | |
"eval_rewards/rejected": -23.394899368286133, | |
"eval_runtime": 26.0758, | |
"eval_samples_per_second": 30.22, | |
"eval_steps_per_second": 3.797, | |
"step": 650 | |
}, | |
{ | |
"epoch": 0.5416495691423882, | |
"grad_norm": 24.455801010131836, | |
"learning_rate": 2.9684532864643123e-06, | |
"logits/chosen": -2.367436647415161, | |
"logits/rejected": -2.587658405303955, | |
"logps/chosen": -9.489423751831055, | |
"logps/rejected": -17.4456844329834, | |
"loss": 1.0519, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -14.234136581420898, | |
"rewards/margins": 11.9343900680542, | |
"rewards/rejected": -26.168527603149414, | |
"step": 660 | |
}, | |
{ | |
"epoch": 0.5498563807960607, | |
"grad_norm": 5.811470985412598, | |
"learning_rate": 2.9169218667902562e-06, | |
"logits/chosen": -2.348598003387451, | |
"logits/rejected": -2.6428780555725098, | |
"logps/chosen": -8.679718971252441, | |
"logps/rejected": -17.370378494262695, | |
"loss": 1.4414, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -13.01957893371582, | |
"rewards/margins": 13.035990715026855, | |
"rewards/rejected": -26.05556869506836, | |
"step": 670 | |
}, | |
{ | |
"epoch": 0.5580631924497332, | |
"grad_norm": 3.920671224594116, | |
"learning_rate": 2.8652075714060296e-06, | |
"logits/chosen": -2.2460615634918213, | |
"logits/rejected": -2.688439130783081, | |
"logps/chosen": -8.605902671813965, | |
"logps/rejected": -17.995298385620117, | |
"loss": 1.2588, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -12.908853530883789, | |
"rewards/margins": 14.084096908569336, | |
"rewards/rejected": -26.992950439453125, | |
"step": 680 | |
}, | |
{ | |
"epoch": 0.5662700041034058, | |
"grad_norm": 6.326926231384277, | |
"learning_rate": 2.813333083910761e-06, | |
"logits/chosen": -2.225262403488159, | |
"logits/rejected": -2.5890913009643555, | |
"logps/chosen": -8.476136207580566, | |
"logps/rejected": -17.39042854309082, | |
"loss": 1.0845, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -12.714203834533691, | |
"rewards/margins": 13.371438980102539, | |
"rewards/rejected": -26.085641860961914, | |
"step": 690 | |
}, | |
{ | |
"epoch": 0.5744768157570784, | |
"grad_norm": 40.18976593017578, | |
"learning_rate": 2.761321158169134e-06, | |
"logits/chosen": -2.3147075176239014, | |
"logits/rejected": -2.6593971252441406, | |
"logps/chosen": -9.569334983825684, | |
"logps/rejected": -17.609041213989258, | |
"loss": 1.112, | |
"rewards/accuracies": 1.0, | |
"rewards/chosen": -14.35400104522705, | |
"rewards/margins": 12.059560775756836, | |
"rewards/rejected": -26.413562774658203, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.5744768157570784, | |
"eval_logits/chosen": -2.2287755012512207, | |
"eval_logits/rejected": -2.61726450920105, | |
"eval_logps/chosen": -8.035316467285156, | |
"eval_logps/rejected": -16.38852310180664, | |
"eval_loss": 0.15530110895633698, | |
"eval_rewards/accuracies": 0.9494949579238892, | |
"eval_rewards/chosen": -12.052973747253418, | |
"eval_rewards/margins": 12.529810905456543, | |
"eval_rewards/rejected": -24.582786560058594, | |
"eval_runtime": 26.0758, | |
"eval_samples_per_second": 30.22, | |
"eval_steps_per_second": 3.797, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.582683627410751, | |
"grad_norm": 3.3525519371032715, | |
"learning_rate": 2.70919460833079e-06, | |
"logits/chosen": -2.268187999725342, | |
"logits/rejected": -2.6029911041259766, | |
"logps/chosen": -8.399455070495605, | |
"logps/rejected": -16.665691375732422, | |
"loss": 0.9868, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -12.599184036254883, | |
"rewards/margins": 12.399351119995117, | |
"rewards/rejected": -24.998537063598633, | |
"step": 710 | |
}, | |
{ | |
"epoch": 0.5908904390644235, | |
"grad_norm": 3.8941779136657715, | |
"learning_rate": 2.6569762988232838e-06, | |
"logits/chosen": -2.244795322418213, | |
"logits/rejected": -2.582077741622925, | |
"logps/chosen": -8.654252052307129, | |
"logps/rejected": -17.85637855529785, | |
"loss": 1.141, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -12.981379508972168, | |
"rewards/margins": 13.803189277648926, | |
"rewards/rejected": -26.78456687927246, | |
"step": 720 | |
}, | |
{ | |
"epoch": 0.599097250718096, | |
"grad_norm": 2.1029350757598877, | |
"learning_rate": 2.604689134322999e-06, | |
"logits/chosen": -2.3193981647491455, | |
"logits/rejected": -2.5794734954833984, | |
"logps/chosen": -9.116949081420898, | |
"logps/rejected": -17.48544692993164, | |
"loss": 0.9563, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -13.675424575805664, | |
"rewards/margins": 12.55274772644043, | |
"rewards/rejected": -26.22817039489746, | |
"step": 730 | |
}, | |
{ | |
"epoch": 0.6073040623717686, | |
"grad_norm": 1.702139139175415, | |
"learning_rate": 2.5523560497083927e-06, | |
"logits/chosen": -2.3507955074310303, | |
"logits/rejected": -2.5690979957580566, | |
"logps/chosen": -10.525144577026367, | |
"logps/rejected": -19.390769958496094, | |
"loss": 0.8511, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -15.78771686553955, | |
"rewards/margins": 13.298437118530273, | |
"rewards/rejected": -29.086156845092773, | |
"step": 740 | |
}, | |
{ | |
"epoch": 0.6155108740254411, | |
"grad_norm": 1.7904704809188843, | |
"learning_rate": 2.5e-06, | |
"logits/chosen": -2.2960116863250732, | |
"logits/rejected": -2.6188182830810547, | |
"logps/chosen": -10.946245193481445, | |
"logps/rejected": -19.992496490478516, | |
"loss": 0.8016, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -16.419368743896484, | |
"rewards/margins": 13.569379806518555, | |
"rewards/rejected": -29.98874855041504, | |
"step": 750 | |
}, | |
{ | |
"epoch": 0.6155108740254411, | |
"eval_logits/chosen": -2.196777105331421, | |
"eval_logits/rejected": -2.5854194164276123, | |
"eval_logps/chosen": -8.476363182067871, | |
"eval_logps/rejected": -17.717283248901367, | |
"eval_loss": 0.13788874447345734, | |
"eval_rewards/accuracies": 0.9494949579238892, | |
"eval_rewards/chosen": -12.714544296264648, | |
"eval_rewards/margins": 13.861381530761719, | |
"eval_rewards/rejected": -26.575925827026367, | |
"eval_runtime": 26.0729, | |
"eval_samples_per_second": 30.223, | |
"eval_steps_per_second": 3.797, | |
"step": 750 | |
}, | |
{ | |
"epoch": 0.6237176856791137, | |
"grad_norm": 8.473343849182129, | |
"learning_rate": 2.447643950291608e-06, | |
"logits/chosen": -2.224431037902832, | |
"logits/rejected": -2.5573363304138184, | |
"logps/chosen": -9.210199356079102, | |
"logps/rejected": -18.3690242767334, | |
"loss": 0.8284, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -13.815298080444336, | |
"rewards/margins": 13.738235473632812, | |
"rewards/rejected": -27.553537368774414, | |
"step": 760 | |
}, | |
{ | |
"epoch": 0.6319244973327862, | |
"grad_norm": 4.644931793212891, | |
"learning_rate": 2.3953108656770018e-06, | |
"logits/chosen": -2.349294424057007, | |
"logits/rejected": -2.57686710357666, | |
"logps/chosen": -10.199867248535156, | |
"logps/rejected": -18.49524688720703, | |
"loss": 0.8514, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -15.299799919128418, | |
"rewards/margins": 12.44306755065918, | |
"rewards/rejected": -27.742868423461914, | |
"step": 770 | |
}, | |
{ | |
"epoch": 0.6401313089864588, | |
"grad_norm": 3.135159730911255, | |
"learning_rate": 2.3430237011767166e-06, | |
"logits/chosen": -2.229192018508911, | |
"logits/rejected": -2.58925724029541, | |
"logps/chosen": -8.433959007263184, | |
"logps/rejected": -18.41027069091797, | |
"loss": 0.8814, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -12.650938034057617, | |
"rewards/margins": 14.96446704864502, | |
"rewards/rejected": -27.615406036376953, | |
"step": 780 | |
}, | |
{ | |
"epoch": 0.6483381206401313, | |
"grad_norm": 2.2181289196014404, | |
"learning_rate": 2.290805391669212e-06, | |
"logits/chosen": -2.2662854194641113, | |
"logits/rejected": -2.5892975330352783, | |
"logps/chosen": -10.09723949432373, | |
"logps/rejected": -19.54763412475586, | |
"loss": 0.6926, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -15.145858764648438, | |
"rewards/margins": 14.175588607788086, | |
"rewards/rejected": -29.32145118713379, | |
"step": 790 | |
}, | |
{ | |
"epoch": 0.6565449322938038, | |
"grad_norm": 17.57781410217285, | |
"learning_rate": 2.238678841830867e-06, | |
"logits/chosen": -2.2988882064819336, | |
"logits/rejected": -2.5589470863342285, | |
"logps/chosen": -10.06776237487793, | |
"logps/rejected": -19.59263801574707, | |
"loss": 0.9249, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -15.101643562316895, | |
"rewards/margins": 14.287312507629395, | |
"rewards/rejected": -29.38895606994629, | |
"step": 800 | |
}, | |
{ | |
"epoch": 0.6565449322938038, | |
"eval_logits/chosen": -2.1742825508117676, | |
"eval_logits/rejected": -2.5563910007476807, | |
"eval_logps/chosen": -9.052408218383789, | |
"eval_logps/rejected": -19.134737014770508, | |
"eval_loss": 0.12732766568660736, | |
"eval_rewards/accuracies": 0.9494949579238892, | |
"eval_rewards/chosen": -13.57861328125, | |
"eval_rewards/margins": 15.123494148254395, | |
"eval_rewards/rejected": -28.702106475830078, | |
"eval_runtime": 26.072, | |
"eval_samples_per_second": 30.224, | |
"eval_steps_per_second": 3.797, | |
"step": 800 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 50, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 3.000088674059682e+18, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |