clm7b0129-wds-0.8-kendall-onof-ofif-corr-max-2-simpo-max1500-default
/
checkpoint-400
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.3282724661469019, | |
"eval_steps": 50, | |
"global_step": 400, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.008206811653672548, | |
"grad_norm": 0.06318386644124985, | |
"learning_rate": 4.999451708687114e-06, | |
"logits/chosen": -2.1367907524108887, | |
"logits/rejected": -2.4948182106018066, | |
"logps/chosen": -0.291498601436615, | |
"logps/rejected": -0.3196522295475006, | |
"loss": 7.5728, | |
"rewards/accuracies": 0.5, | |
"rewards/chosen": -0.4372479021549225, | |
"rewards/margins": 0.04223042353987694, | |
"rewards/rejected": -0.47947829961776733, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.016413623307345096, | |
"grad_norm": 0.07310314476490021, | |
"learning_rate": 4.997807075247147e-06, | |
"logits/chosen": -2.1456007957458496, | |
"logits/rejected": -2.4455342292785645, | |
"logps/chosen": -0.26213544607162476, | |
"logps/rejected": -0.32332050800323486, | |
"loss": 7.5298, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.3932031989097595, | |
"rewards/margins": 0.09177760779857635, | |
"rewards/rejected": -0.4849807620048523, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.024620434961017644, | |
"grad_norm": 0.05936102196574211, | |
"learning_rate": 4.9950668210706795e-06, | |
"logits/chosen": -2.0765950679779053, | |
"logits/rejected": -2.485799789428711, | |
"logps/chosen": -0.26631081104278564, | |
"logps/rejected": -0.32647624611854553, | |
"loss": 7.5208, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.39946624636650085, | |
"rewards/margins": 0.09024813771247864, | |
"rewards/rejected": -0.4897143840789795, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.03282724661469019, | |
"grad_norm": 0.08499134331941605, | |
"learning_rate": 4.9912321481237616e-06, | |
"logits/chosen": -2.0753884315490723, | |
"logits/rejected": -2.441580295562744, | |
"logps/chosen": -0.2749950885772705, | |
"logps/rejected": -0.30180150270462036, | |
"loss": 7.4229, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.41249266266822815, | |
"rewards/margins": 0.04020959883928299, | |
"rewards/rejected": -0.45270222425460815, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.04103405826836274, | |
"grad_norm": 0.07681389898061752, | |
"learning_rate": 4.986304738420684e-06, | |
"logits/chosen": -2.145660877227783, | |
"logits/rejected": -2.465946912765503, | |
"logps/chosen": -0.24909739196300507, | |
"logps/rejected": -0.2796121835708618, | |
"loss": 7.4811, | |
"rewards/accuracies": 0.48750001192092896, | |
"rewards/chosen": -0.373646080493927, | |
"rewards/margins": 0.045772187411785126, | |
"rewards/rejected": -0.41941824555397034, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04103405826836274, | |
"eval_logits/chosen": -2.012000799179077, | |
"eval_logits/rejected": -2.5381252765655518, | |
"eval_logps/chosen": -0.24157460033893585, | |
"eval_logps/rejected": -0.2957758605480194, | |
"eval_loss": 0.9317650198936462, | |
"eval_rewards/accuracies": 0.5252525210380554, | |
"eval_rewards/chosen": -0.3623619079589844, | |
"eval_rewards/margins": 0.08130191266536713, | |
"eval_rewards/rejected": -0.4436637759208679, | |
"eval_runtime": 26.0809, | |
"eval_samples_per_second": 30.214, | |
"eval_steps_per_second": 3.796, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04924086992203529, | |
"grad_norm": 0.06638535112142563, | |
"learning_rate": 4.980286753286196e-06, | |
"logits/chosen": -2.145846128463745, | |
"logits/rejected": -2.4077115058898926, | |
"logps/chosen": -0.22265203297138214, | |
"logps/rejected": -0.30774614214897156, | |
"loss": 7.4605, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.3339780271053314, | |
"rewards/margins": 0.1276412308216095, | |
"rewards/rejected": -0.4616192877292633, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.057447681575707836, | |
"grad_norm": 0.057281140238046646, | |
"learning_rate": 4.973180832407471e-06, | |
"logits/chosen": -2.0021350383758545, | |
"logits/rejected": -2.4299912452697754, | |
"logps/chosen": -0.23488977551460266, | |
"logps/rejected": -0.33270469307899475, | |
"loss": 7.4257, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.3523346781730652, | |
"rewards/margins": 0.14672236144542694, | |
"rewards/rejected": -0.4990570545196533, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.06565449322938038, | |
"grad_norm": 0.07725922018289566, | |
"learning_rate": 4.964990092676263e-06, | |
"logits/chosen": -2.117995023727417, | |
"logits/rejected": -2.359265089035034, | |
"logps/chosen": -0.21598832309246063, | |
"logps/rejected": -0.300583153963089, | |
"loss": 7.4384, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.32398244738578796, | |
"rewards/margins": 0.12689228355884552, | |
"rewards/rejected": -0.4508747458457947, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.07386130488305294, | |
"grad_norm": 0.0598183274269104, | |
"learning_rate": 4.9557181268217225e-06, | |
"logits/chosen": -2.282627582550049, | |
"logits/rejected": -2.441333532333374, | |
"logps/chosen": -0.23655852675437927, | |
"logps/rejected": -0.3246815800666809, | |
"loss": 7.4584, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.3548378050327301, | |
"rewards/margins": 0.13218457996845245, | |
"rewards/rejected": -0.48702239990234375, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.08206811653672548, | |
"grad_norm": 0.058213479816913605, | |
"learning_rate": 4.9453690018345144e-06, | |
"logits/chosen": -2.1114468574523926, | |
"logits/rejected": -2.5035691261291504, | |
"logps/chosen": -0.23073866963386536, | |
"logps/rejected": -0.29445192217826843, | |
"loss": 7.4116, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.34610801935195923, | |
"rewards/margins": 0.09556989371776581, | |
"rewards/rejected": -0.44167789816856384, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.08206811653672548, | |
"eval_logits/chosen": -2.0183491706848145, | |
"eval_logits/rejected": -2.5400593280792236, | |
"eval_logps/chosen": -0.20393377542495728, | |
"eval_logps/rejected": -0.2818409502506256, | |
"eval_loss": 0.9129964113235474, | |
"eval_rewards/accuracies": 0.5656565427780151, | |
"eval_rewards/chosen": -0.3059006631374359, | |
"eval_rewards/margins": 0.11686072498559952, | |
"eval_rewards/rejected": -0.4227614104747772, | |
"eval_runtime": 26.0825, | |
"eval_samples_per_second": 30.212, | |
"eval_steps_per_second": 3.796, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.09027492819039803, | |
"grad_norm": 0.06249881908297539, | |
"learning_rate": 4.933947257182901e-06, | |
"logits/chosen": -2.1324548721313477, | |
"logits/rejected": -2.434319019317627, | |
"logps/chosen": -0.22180762887001038, | |
"logps/rejected": -0.28862181305885315, | |
"loss": 7.3604, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.332711398601532, | |
"rewards/margins": 0.10022131353616714, | |
"rewards/rejected": -0.43293270468711853, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.09848173984407058, | |
"grad_norm": 0.061758093535900116, | |
"learning_rate": 4.921457902821578e-06, | |
"logits/chosen": -2.0597169399261475, | |
"logits/rejected": -2.4386391639709473, | |
"logps/chosen": -0.22720107436180115, | |
"logps/rejected": -0.303659051656723, | |
"loss": 7.3624, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.3408016264438629, | |
"rewards/margins": 0.1146869882941246, | |
"rewards/rejected": -0.4554885923862457, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.10668855149774313, | |
"grad_norm": 0.08368540555238724, | |
"learning_rate": 4.907906416994146e-06, | |
"logits/chosen": -2.0944437980651855, | |
"logits/rejected": -2.4157254695892334, | |
"logps/chosen": -0.19590887427330017, | |
"logps/rejected": -0.3365771770477295, | |
"loss": 7.3464, | |
"rewards/accuracies": 0.675000011920929, | |
"rewards/chosen": -0.29386329650878906, | |
"rewards/margins": 0.2110024392604828, | |
"rewards/rejected": -0.5048657655715942, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.11489536315141567, | |
"grad_norm": 0.060954928398132324, | |
"learning_rate": 4.893298743830168e-06, | |
"logits/chosen": -2.1551766395568848, | |
"logits/rejected": -2.5695576667785645, | |
"logps/chosen": -0.19875812530517578, | |
"logps/rejected": -0.2967599928379059, | |
"loss": 7.3179, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.29813718795776367, | |
"rewards/margins": 0.14700281620025635, | |
"rewards/rejected": -0.4451400339603424, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.12310217480508823, | |
"grad_norm": 0.05665091797709465, | |
"learning_rate": 4.8776412907378845e-06, | |
"logits/chosen": -2.1410365104675293, | |
"logits/rejected": -2.4798667430877686, | |
"logps/chosen": -0.19316771626472473, | |
"logps/rejected": -0.2972142696380615, | |
"loss": 7.2384, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.2897515296936035, | |
"rewards/margins": 0.15606984496116638, | |
"rewards/rejected": -0.4458213746547699, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.12310217480508823, | |
"eval_logits/chosen": -2.0654072761535645, | |
"eval_logits/rejected": -2.596571207046509, | |
"eval_logps/chosen": -0.17970335483551025, | |
"eval_logps/rejected": -0.2767573893070221, | |
"eval_loss": 0.8982937335968018, | |
"eval_rewards/accuracies": 0.5858585834503174, | |
"eval_rewards/chosen": -0.269555002450943, | |
"eval_rewards/margins": 0.14558106660842896, | |
"eval_rewards/rejected": -0.41513609886169434, | |
"eval_runtime": 26.0741, | |
"eval_samples_per_second": 30.222, | |
"eval_steps_per_second": 3.797, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.13130898645876077, | |
"grad_norm": 0.07328196614980698, | |
"learning_rate": 4.860940925593703e-06, | |
"logits/chosen": -2.202148914337158, | |
"logits/rejected": -2.5385117530822754, | |
"logps/chosen": -0.19814102351665497, | |
"logps/rejected": -0.3139093518257141, | |
"loss": 7.1309, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.29721152782440186, | |
"rewards/margins": 0.1736525148153305, | |
"rewards/rejected": -0.4708639979362488, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.1395157981124333, | |
"grad_norm": 0.09789691120386124, | |
"learning_rate": 4.84320497372973e-06, | |
"logits/chosen": -2.153545618057251, | |
"logits/rejected": -2.532336950302124, | |
"logps/chosen": -0.1861150860786438, | |
"logps/rejected": -0.2787100672721863, | |
"loss": 7.2498, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.2791725993156433, | |
"rewards/margins": 0.13889247179031372, | |
"rewards/rejected": -0.4180651605129242, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.14772260976610588, | |
"grad_norm": 0.0829203873872757, | |
"learning_rate": 4.824441214720629e-06, | |
"logits/chosen": -2.254868268966675, | |
"logits/rejected": -2.5931999683380127, | |
"logps/chosen": -0.19771653413772583, | |
"logps/rejected": -0.2859548032283783, | |
"loss": 7.1449, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.29657480120658875, | |
"rewards/margins": 0.1323573738336563, | |
"rewards/rejected": -0.42893218994140625, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.15592942141977842, | |
"grad_norm": 0.10499900579452515, | |
"learning_rate": 4.804657878971252e-06, | |
"logits/chosen": -2.2066543102264404, | |
"logits/rejected": -2.594515323638916, | |
"logps/chosen": -0.19944152235984802, | |
"logps/rejected": -0.26530706882476807, | |
"loss": 7.1717, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.29916223883628845, | |
"rewards/margins": 0.09879834204912186, | |
"rewards/rejected": -0.3979606032371521, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.16413623307345096, | |
"grad_norm": 0.10522742569446564, | |
"learning_rate": 4.783863644106502e-06, | |
"logits/chosen": -2.282865047454834, | |
"logits/rejected": -2.651233196258545, | |
"logps/chosen": -0.1877971738576889, | |
"logps/rejected": -0.28373947739601135, | |
"loss": 7.1001, | |
"rewards/accuracies": 0.550000011920929, | |
"rewards/chosen": -0.28169578313827515, | |
"rewards/margins": 0.14391344785690308, | |
"rewards/rejected": -0.4256092607975006, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.16413623307345096, | |
"eval_logits/chosen": -2.1934518814086914, | |
"eval_logits/rejected": -2.772143840789795, | |
"eval_logps/chosen": -0.1790025532245636, | |
"eval_logps/rejected": -0.3032245934009552, | |
"eval_loss": 0.8807509541511536, | |
"eval_rewards/accuracies": 0.5858585834503174, | |
"eval_rewards/chosen": -0.2685038149356842, | |
"eval_rewards/margins": 0.1863330751657486, | |
"eval_rewards/rejected": -0.454836905002594, | |
"eval_runtime": 26.0786, | |
"eval_samples_per_second": 30.216, | |
"eval_steps_per_second": 3.796, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.1723430447271235, | |
"grad_norm": 0.13399213552474976, | |
"learning_rate": 4.762067631165049e-06, | |
"logits/chosen": -2.3500285148620605, | |
"logits/rejected": -2.6827149391174316, | |
"logps/chosen": -0.17780962586402893, | |
"logps/rejected": -0.2749634087085724, | |
"loss": 7.0722, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.266714483499527, | |
"rewards/margins": 0.145730659365654, | |
"rewards/rejected": -0.41244515776634216, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.18054985638079607, | |
"grad_norm": 0.1727023422718048, | |
"learning_rate": 4.7392794005985324e-06, | |
"logits/chosen": -2.356797695159912, | |
"logits/rejected": -2.7276604175567627, | |
"logps/chosen": -0.20826168358325958, | |
"logps/rejected": -0.3760753273963928, | |
"loss": 7.0205, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.31239253282546997, | |
"rewards/margins": 0.2517204284667969, | |
"rewards/rejected": -0.5641129016876221, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.1887566680344686, | |
"grad_norm": 0.14360135793685913, | |
"learning_rate": 4.715508948078037e-06, | |
"logits/chosen": -2.3526904582977295, | |
"logits/rejected": -2.785292148590088, | |
"logps/chosen": -0.22017621994018555, | |
"logps/rejected": -0.35291892290115356, | |
"loss": 6.9773, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.3302643299102783, | |
"rewards/margins": 0.1991141140460968, | |
"rewards/rejected": -0.5293784737586975, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.19696347968814115, | |
"grad_norm": 0.18886974453926086, | |
"learning_rate": 4.690766700109659e-06, | |
"logits/chosen": -2.3618314266204834, | |
"logits/rejected": -2.850059747695923, | |
"logps/chosen": -0.22531962394714355, | |
"logps/rejected": -0.38331112265586853, | |
"loss": 6.9879, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -0.3379794657230377, | |
"rewards/margins": 0.23698726296424866, | |
"rewards/rejected": -0.5749667286872864, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.2051702913418137, | |
"grad_norm": 0.2599099278450012, | |
"learning_rate": 4.665063509461098e-06, | |
"logits/chosen": -2.4776885509490967, | |
"logits/rejected": -2.8583390712738037, | |
"logps/chosen": -0.2538486123085022, | |
"logps/rejected": -0.42415136098861694, | |
"loss": 6.7357, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.3807729184627533, | |
"rewards/margins": 0.2554541230201721, | |
"rewards/rejected": -0.636227011680603, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.2051702913418137, | |
"eval_logits/chosen": -2.3788387775421143, | |
"eval_logits/rejected": -2.958366632461548, | |
"eval_logps/chosen": -0.2162775695323944, | |
"eval_logps/rejected": -0.41066110134124756, | |
"eval_loss": 0.8405817747116089, | |
"eval_rewards/accuracies": 0.6060606241226196, | |
"eval_rewards/chosen": -0.3244163393974304, | |
"eval_rewards/margins": 0.2915753722190857, | |
"eval_rewards/rejected": -0.6159917116165161, | |
"eval_runtime": 26.0752, | |
"eval_samples_per_second": 30.22, | |
"eval_steps_per_second": 3.797, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.21337710299548626, | |
"grad_norm": 0.25840890407562256, | |
"learning_rate": 4.638410650401267e-06, | |
"logits/chosen": -2.5892271995544434, | |
"logits/rejected": -2.819650888442993, | |
"logps/chosen": -0.2257525473833084, | |
"logps/rejected": -0.46280306577682495, | |
"loss": 6.6048, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.3386288285255432, | |
"rewards/margins": 0.3555757403373718, | |
"rewards/rejected": -0.6942045092582703, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.2215839146491588, | |
"grad_norm": 0.2838613986968994, | |
"learning_rate": 4.610819813755038e-06, | |
"logits/chosen": -2.536020040512085, | |
"logits/rejected": -2.843383312225342, | |
"logps/chosen": -0.2739175856113434, | |
"logps/rejected": -0.5088076591491699, | |
"loss": 6.6403, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.41087642312049866, | |
"rewards/margins": 0.3523350656032562, | |
"rewards/rejected": -0.7632113695144653, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.22979072630283134, | |
"grad_norm": 0.3575810194015503, | |
"learning_rate": 4.582303101775249e-06, | |
"logits/chosen": -2.4525866508483887, | |
"logits/rejected": -2.7756829261779785, | |
"logps/chosen": -0.27604570984840393, | |
"logps/rejected": -0.6104786992073059, | |
"loss": 6.4767, | |
"rewards/accuracies": 0.6875, | |
"rewards/chosen": -0.4140685498714447, | |
"rewards/margins": 0.5016494989395142, | |
"rewards/rejected": -0.9157179594039917, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.23799753795650389, | |
"grad_norm": 0.36226338148117065, | |
"learning_rate": 4.55287302283426e-06, | |
"logits/chosen": -2.4639816284179688, | |
"logits/rejected": -2.865053415298462, | |
"logps/chosen": -0.35306140780448914, | |
"logps/rejected": -0.5840066075325012, | |
"loss": 6.2071, | |
"rewards/accuracies": 0.4749999940395355, | |
"rewards/chosen": -0.5295921564102173, | |
"rewards/margins": 0.3464178144931793, | |
"rewards/rejected": -0.8760099411010742, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.24620434961017645, | |
"grad_norm": 0.38896313309669495, | |
"learning_rate": 4.522542485937369e-06, | |
"logits/chosen": -2.6630337238311768, | |
"logits/rejected": -2.7479195594787598, | |
"logps/chosen": -0.3706950545310974, | |
"logps/rejected": -0.7957242131233215, | |
"loss": 6.1801, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.5560425519943237, | |
"rewards/margins": 0.6375436782836914, | |
"rewards/rejected": -1.1935861110687256, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.24620434961017645, | |
"eval_logits/chosen": -2.411334991455078, | |
"eval_logits/rejected": -2.820974588394165, | |
"eval_logps/chosen": -0.3725183308124542, | |
"eval_logps/rejected": -0.8138000965118408, | |
"eval_loss": 0.738965630531311, | |
"eval_rewards/accuracies": 0.6060606241226196, | |
"eval_rewards/chosen": -0.5587774515151978, | |
"eval_rewards/margins": 0.6619227528572083, | |
"eval_rewards/rejected": -1.2207001447677612, | |
"eval_runtime": 26.0764, | |
"eval_samples_per_second": 30.219, | |
"eval_steps_per_second": 3.797, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.254411161263849, | |
"grad_norm": 0.5848517417907715, | |
"learning_rate": 4.491324795060491e-06, | |
"logits/chosen": -2.5622355937957764, | |
"logits/rejected": -2.7415950298309326, | |
"logps/chosen": -0.38525494933128357, | |
"logps/rejected": -0.8741232752799988, | |
"loss": 5.98, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.5778824687004089, | |
"rewards/margins": 0.7333025336265564, | |
"rewards/rejected": -1.3111850023269653, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.26261797291752154, | |
"grad_norm": 0.38972222805023193, | |
"learning_rate": 4.4592336433146e-06, | |
"logits/chosen": -2.6293787956237793, | |
"logits/rejected": -2.7180721759796143, | |
"logps/chosen": -0.5181100964546204, | |
"logps/rejected": -0.97294682264328, | |
"loss": 5.608, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.7771651744842529, | |
"rewards/margins": 0.6822551488876343, | |
"rewards/rejected": -1.4594202041625977, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.2708247845711941, | |
"grad_norm": 0.5381959080696106, | |
"learning_rate": 4.426283106939474e-06, | |
"logits/chosen": -2.477749824523926, | |
"logits/rejected": -2.7682888507843018, | |
"logps/chosen": -0.47721824049949646, | |
"logps/rejected": -1.0577385425567627, | |
"loss": 5.5189, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.7158273458480835, | |
"rewards/margins": 0.8707805871963501, | |
"rewards/rejected": -1.5866079330444336, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.2790315962248666, | |
"grad_norm": 0.5332415699958801, | |
"learning_rate": 4.3924876391293915e-06, | |
"logits/chosen": -2.5470075607299805, | |
"logits/rejected": -2.8264012336730957, | |
"logps/chosen": -0.5053269267082214, | |
"logps/rejected": -1.412097454071045, | |
"loss": 5.3992, | |
"rewards/accuracies": 0.675000011920929, | |
"rewards/chosen": -0.7579904198646545, | |
"rewards/margins": 1.360155701637268, | |
"rewards/rejected": -2.1181461811065674, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.2872384078785392, | |
"grad_norm": 0.5841536521911621, | |
"learning_rate": 4.357862063693486e-06, | |
"logits/chosen": -2.5552780628204346, | |
"logits/rejected": -2.7644314765930176, | |
"logps/chosen": -0.6264504790306091, | |
"logps/rejected": -1.4451416730880737, | |
"loss": 5.0093, | |
"rewards/accuracies": 0.699999988079071, | |
"rewards/chosen": -0.9396758079528809, | |
"rewards/margins": 1.228036880493164, | |
"rewards/rejected": -2.167712450027466, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.2872384078785392, | |
"eval_logits/chosen": -2.486525774002075, | |
"eval_logits/rejected": -2.809356451034546, | |
"eval_logps/chosen": -0.6259626746177673, | |
"eval_logps/rejected": -1.6826657056808472, | |
"eval_loss": 0.610858678817749, | |
"eval_rewards/accuracies": 0.6464646458625793, | |
"eval_rewards/chosen": -0.9389441013336182, | |
"eval_rewards/margins": 1.585054636001587, | |
"eval_rewards/rejected": -2.523998737335205, | |
"eval_runtime": 26.0792, | |
"eval_samples_per_second": 30.216, | |
"eval_steps_per_second": 3.796, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.29544521953221176, | |
"grad_norm": 0.6259649395942688, | |
"learning_rate": 4.322421568553529e-06, | |
"logits/chosen": -2.5630745887756348, | |
"logits/rejected": -2.80169939994812, | |
"logps/chosen": -0.6148477792739868, | |
"logps/rejected": -1.7640241384506226, | |
"loss": 5.0832, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.9222715497016907, | |
"rewards/margins": 1.7237647771835327, | |
"rewards/rejected": -2.646036148071289, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.3036520311858843, | |
"grad_norm": 0.5134413838386536, | |
"learning_rate": 4.286181699082008e-06, | |
"logits/chosen": -2.5688040256500244, | |
"logits/rejected": -2.823493242263794, | |
"logps/chosen": -0.7328687906265259, | |
"logps/rejected": -2.135953187942505, | |
"loss": 4.484, | |
"rewards/accuracies": 0.6875, | |
"rewards/chosen": -1.0993033647537231, | |
"rewards/margins": 2.104626178741455, | |
"rewards/rejected": -3.203929901123047, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.31185884283955684, | |
"grad_norm": 0.5029065608978271, | |
"learning_rate": 4.249158351283414e-06, | |
"logits/chosen": -2.507948160171509, | |
"logits/rejected": -2.797893524169922, | |
"logps/chosen": -0.8517419695854187, | |
"logps/rejected": -2.6004090309143066, | |
"loss": 4.3033, | |
"rewards/accuracies": 0.675000011920929, | |
"rewards/chosen": -1.2776129245758057, | |
"rewards/margins": 2.6230006217956543, | |
"rewards/rejected": -3.900613307952881, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.3200656544932294, | |
"grad_norm": 0.6171831488609314, | |
"learning_rate": 4.211367764821722e-06, | |
"logits/chosen": -2.709599018096924, | |
"logits/rejected": -2.8980605602264404, | |
"logps/chosen": -0.9357224702835083, | |
"logps/rejected": -2.464841842651367, | |
"loss": 4.6813, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -1.4035838842391968, | |
"rewards/margins": 2.2936789989471436, | |
"rewards/rejected": -3.6972625255584717, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.3282724661469019, | |
"grad_norm": 1.2649667263031006, | |
"learning_rate": 4.172826515897146e-06, | |
"logits/chosen": -2.3837532997131348, | |
"logits/rejected": -2.6686861515045166, | |
"logps/chosen": -0.9314821362495422, | |
"logps/rejected": -2.562440872192383, | |
"loss": 4.3764, | |
"rewards/accuracies": 0.737500011920929, | |
"rewards/chosen": -1.3972232341766357, | |
"rewards/margins": 2.4464378356933594, | |
"rewards/rejected": -3.843661069869995, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.3282724661469019, | |
"eval_logits/chosen": -2.3613698482513428, | |
"eval_logits/rejected": -2.7536535263061523, | |
"eval_logps/chosen": -0.8348632454872131, | |
"eval_logps/rejected": -2.6590662002563477, | |
"eval_loss": 0.5019229650497437, | |
"eval_rewards/accuracies": 0.6767676472663879, | |
"eval_rewards/chosen": -1.2522947788238525, | |
"eval_rewards/margins": 2.736304759979248, | |
"eval_rewards/rejected": -3.9885993003845215, | |
"eval_runtime": 26.0816, | |
"eval_samples_per_second": 30.213, | |
"eval_steps_per_second": 3.796, | |
"step": 400 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 50, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 1.4997440393589555e+18, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |