|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.9936, |
|
"eval_steps": 100, |
|
"global_step": 1248, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4e-08, |
|
"logits/chosen": 0.7221256494522095, |
|
"logits/rejected": 0.8745549917221069, |
|
"logps/chosen": -277.6833801269531, |
|
"logps/rejected": -189.9869384765625, |
|
"loss": 2500.0, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.0000000000000003e-07, |
|
"logits/chosen": 0.6776503324508667, |
|
"logits/rejected": 0.7872164845466614, |
|
"logps/chosen": -273.4142761230469, |
|
"logps/rejected": -216.78836059570312, |
|
"loss": 2505.8785, |
|
"rewards/accuracies": 0.3888888955116272, |
|
"rewards/chosen": 0.00015669333515688777, |
|
"rewards/margins": -0.0005680065951310098, |
|
"rewards/rejected": 0.0007246998138725758, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 8.000000000000001e-07, |
|
"logits/chosen": 0.6071363687515259, |
|
"logits/rejected": 0.8961409330368042, |
|
"logps/chosen": -252.6411590576172, |
|
"logps/rejected": -188.6440887451172, |
|
"loss": 2498.5875, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": 0.0007678332040086389, |
|
"rewards/margins": 0.00017280881002079695, |
|
"rewards/rejected": 0.0005950243212282658, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.2000000000000002e-06, |
|
"logits/chosen": 0.5971012711524963, |
|
"logits/rejected": 0.7454315423965454, |
|
"logps/chosen": -272.0901184082031, |
|
"logps/rejected": -205.0573272705078, |
|
"loss": 2504.0305, |
|
"rewards/accuracies": 0.44999998807907104, |
|
"rewards/chosen": -0.00030031849746592343, |
|
"rewards/margins": -0.0003763613640330732, |
|
"rewards/rejected": 7.604288111906499e-05, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.6000000000000001e-06, |
|
"logits/chosen": 0.5639680027961731, |
|
"logits/rejected": 0.8156954646110535, |
|
"logps/chosen": -254.08987426757812, |
|
"logps/rejected": -199.48912048339844, |
|
"loss": 2499.0203, |
|
"rewards/accuracies": 0.5062500238418579, |
|
"rewards/chosen": -0.00016066078387666494, |
|
"rewards/margins": 0.00013786301133222878, |
|
"rewards/rejected": -0.00029852380976080894, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"logits/chosen": 0.6404772996902466, |
|
"logits/rejected": 0.8446690440177917, |
|
"logps/chosen": -264.58636474609375, |
|
"logps/rejected": -211.51986694335938, |
|
"loss": 2499.9363, |
|
"rewards/accuracies": 0.46875, |
|
"rewards/chosen": 0.0005515815573744476, |
|
"rewards/margins": 3.0463817893178202e-05, |
|
"rewards/rejected": 0.0005211178213357925, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 2.4000000000000003e-06, |
|
"logits/chosen": 0.693733274936676, |
|
"logits/rejected": 0.8246955871582031, |
|
"logps/chosen": -266.50872802734375, |
|
"logps/rejected": -209.1096649169922, |
|
"loss": 2498.2361, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": 0.0005181363085284829, |
|
"rewards/margins": 0.00020206482440698892, |
|
"rewards/rejected": 0.0003160713822580874, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 2.8000000000000003e-06, |
|
"logits/chosen": 0.6724094152450562, |
|
"logits/rejected": 0.8244352340698242, |
|
"logps/chosen": -256.1933898925781, |
|
"logps/rejected": -198.73048400878906, |
|
"loss": 2492.3055, |
|
"rewards/accuracies": 0.518750011920929, |
|
"rewards/chosen": 0.00043690926395356655, |
|
"rewards/margins": 0.0007959330687299371, |
|
"rewards/rejected": -0.0003590236883610487, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 3.2000000000000003e-06, |
|
"logits/chosen": 0.6599764227867126, |
|
"logits/rejected": 0.8170899152755737, |
|
"logps/chosen": -273.2989807128906, |
|
"logps/rejected": -216.25827026367188, |
|
"loss": 2488.535, |
|
"rewards/accuracies": 0.4937500059604645, |
|
"rewards/chosen": 0.0022243678104132414, |
|
"rewards/margins": 0.0011816672049462795, |
|
"rewards/rejected": 0.0010427006054669619, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 3.6000000000000003e-06, |
|
"logits/chosen": 0.6013220548629761, |
|
"logits/rejected": 0.8273889422416687, |
|
"logps/chosen": -268.2584533691406, |
|
"logps/rejected": -213.9058380126953, |
|
"loss": 2482.6732, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.003236656542867422, |
|
"rewards/margins": 0.0017823975067585707, |
|
"rewards/rejected": 0.001454259268939495, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.000000000000001e-06, |
|
"logits/chosen": 0.6121121644973755, |
|
"logits/rejected": 0.8102381825447083, |
|
"logps/chosen": -258.34576416015625, |
|
"logps/rejected": -212.81478881835938, |
|
"loss": 2477.3281, |
|
"rewards/accuracies": 0.543749988079071, |
|
"rewards/chosen": 0.0035489716101437807, |
|
"rewards/margins": 0.0023237697314471006, |
|
"rewards/rejected": 0.001225201180204749, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"eval_logits/chosen": 0.8220780491828918, |
|
"eval_logits/rejected": 0.8795672655105591, |
|
"eval_logps/chosen": -256.7978210449219, |
|
"eval_logps/rejected": -233.720703125, |
|
"eval_loss": 2500.715576171875, |
|
"eval_rewards/accuracies": 0.49300000071525574, |
|
"eval_rewards/chosen": -0.0018364518182352185, |
|
"eval_rewards/margins": -1.2206258361402433e-05, |
|
"eval_rewards/rejected": -0.0018242454389110208, |
|
"eval_runtime": 442.8545, |
|
"eval_samples_per_second": 4.516, |
|
"eval_steps_per_second": 1.129, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 4.4e-06, |
|
"logits/chosen": 0.6344770193099976, |
|
"logits/rejected": 0.8470233082771301, |
|
"logps/chosen": -257.6260986328125, |
|
"logps/rejected": -205.66622924804688, |
|
"loss": 2466.1889, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.005115890875458717, |
|
"rewards/margins": 0.003451352473348379, |
|
"rewards/rejected": 0.0016645386349409819, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 4.800000000000001e-06, |
|
"logits/chosen": 0.6225894689559937, |
|
"logits/rejected": 0.7519047856330872, |
|
"logps/chosen": -273.39776611328125, |
|
"logps/rejected": -211.7797088623047, |
|
"loss": 2446.4322, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": 0.007990755140781403, |
|
"rewards/margins": 0.005494705401360989, |
|
"rewards/rejected": 0.0024960506707429886, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 4.999755441268144e-06, |
|
"logits/chosen": 0.5374079942703247, |
|
"logits/rejected": 0.8167151212692261, |
|
"logps/chosen": -283.385498046875, |
|
"logps/rejected": -228.09716796875, |
|
"loss": 2434.1764, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": 0.010817321017384529, |
|
"rewards/margins": 0.006822638213634491, |
|
"rewards/rejected": 0.003994682338088751, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.997799258487003e-06, |
|
"logits/chosen": 0.6246897578239441, |
|
"logits/rejected": 0.8092406988143921, |
|
"logps/chosen": -273.5729064941406, |
|
"logps/rejected": -220.0849609375, |
|
"loss": 2406.7561, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": 0.013732816092669964, |
|
"rewards/margins": 0.009700324386358261, |
|
"rewards/rejected": 0.004032492637634277, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.993888423734898e-06, |
|
"logits/chosen": 0.6373761892318726, |
|
"logits/rejected": 0.7924760580062866, |
|
"logps/chosen": -259.81890869140625, |
|
"logps/rejected": -198.0768585205078, |
|
"loss": 2378.9869, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 0.017864007502794266, |
|
"rewards/margins": 0.01282494980841875, |
|
"rewards/rejected": 0.00503905676305294, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 4.988025997434253e-06, |
|
"logits/chosen": 0.6211603283882141, |
|
"logits/rejected": 0.7996471524238586, |
|
"logps/chosen": -262.85723876953125, |
|
"logps/rejected": -209.2670135498047, |
|
"loss": 2347.3615, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": 0.02218160405755043, |
|
"rewards/margins": 0.016304031014442444, |
|
"rewards/rejected": 0.005877572111785412, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 4.980216567224801e-06, |
|
"logits/chosen": 0.5590766668319702, |
|
"logits/rejected": 0.8358405232429504, |
|
"logps/chosen": -268.93450927734375, |
|
"logps/rejected": -217.07223510742188, |
|
"loss": 2314.8727, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 0.027453165501356125, |
|
"rewards/margins": 0.02034853585064411, |
|
"rewards/rejected": 0.0071046315133571625, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 4.970466244373527e-06, |
|
"logits/chosen": 0.5857344269752502, |
|
"logits/rejected": 0.8025603294372559, |
|
"logps/chosen": -269.49420166015625, |
|
"logps/rejected": -224.668701171875, |
|
"loss": 2323.1209, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.026119906455278397, |
|
"rewards/margins": 0.019484227523207664, |
|
"rewards/rejected": 0.006635676138103008, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.958782658992307e-06, |
|
"logits/chosen": 0.6378930807113647, |
|
"logits/rejected": 0.7625473737716675, |
|
"logps/chosen": -245.82064819335938, |
|
"logps/rejected": -212.6953582763672, |
|
"loss": 2353.0727, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": 0.02194877900183201, |
|
"rewards/margins": 0.01612677052617073, |
|
"rewards/rejected": 0.0058220066130161285, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 4.945174954066957e-06, |
|
"logits/chosen": 0.5794962048530579, |
|
"logits/rejected": 0.7216407656669617, |
|
"logps/chosen": -262.74481201171875, |
|
"logps/rejected": -205.56753540039062, |
|
"loss": 2224.3488, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.040737103670835495, |
|
"rewards/margins": 0.03179619461297989, |
|
"rewards/rejected": 0.008940907195210457, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"eval_logits/chosen": 0.7462335824966431, |
|
"eval_logits/rejected": 0.8050766587257385, |
|
"eval_logps/chosen": -258.5672912597656, |
|
"eval_logps/rejected": -235.52040100097656, |
|
"eval_loss": 2499.890380859375, |
|
"eval_rewards/accuracies": 0.5015000104904175, |
|
"eval_rewards/chosen": -0.019531190395355225, |
|
"eval_rewards/margins": 0.0002903530257754028, |
|
"eval_rewards/rejected": -0.019821541383862495, |
|
"eval_runtime": 412.3797, |
|
"eval_samples_per_second": 4.85, |
|
"eval_steps_per_second": 1.212, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 4.929653778302397e-06, |
|
"logits/chosen": 0.6170880198478699, |
|
"logits/rejected": 0.7665790915489197, |
|
"logps/chosen": -260.6783447265625, |
|
"logps/rejected": -205.8716278076172, |
|
"loss": 2165.3709, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 0.04661153629422188, |
|
"rewards/margins": 0.039808522909879684, |
|
"rewards/rejected": 0.006803011987358332, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.912231277789509e-06, |
|
"logits/chosen": 0.5444914102554321, |
|
"logits/rejected": 0.7560594081878662, |
|
"logps/chosen": -277.1501770019531, |
|
"logps/rejected": -227.8493194580078, |
|
"loss": 2199.4646, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": 0.05624958872795105, |
|
"rewards/margins": 0.037255048751831055, |
|
"rewards/rejected": 0.018994538113474846, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 4.892921086500219e-06, |
|
"logits/chosen": 0.5499123930931091, |
|
"logits/rejected": 0.6719511151313782, |
|
"logps/chosen": -243.00390625, |
|
"logps/rejected": -205.0264434814453, |
|
"loss": 2218.1998, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": 0.04481152817606926, |
|
"rewards/margins": 0.03403898701071739, |
|
"rewards/rejected": 0.010772541165351868, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.871738315618236e-06, |
|
"logits/chosen": 0.5359088182449341, |
|
"logits/rejected": 0.7013985514640808, |
|
"logps/chosen": -263.36322021484375, |
|
"logps/rejected": -217.5428924560547, |
|
"loss": 2121.8713, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.06463086605072021, |
|
"rewards/margins": 0.04686326906085014, |
|
"rewards/rejected": 0.017767589539289474, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.848699541713801e-06, |
|
"logits/chosen": 0.5078493356704712, |
|
"logits/rejected": 0.6534050703048706, |
|
"logps/chosen": -255.99612426757812, |
|
"logps/rejected": -209.0691680908203, |
|
"loss": 2179.4311, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.05505412817001343, |
|
"rewards/margins": 0.04023104906082153, |
|
"rewards/rejected": 0.014823079109191895, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.823822793771696e-06, |
|
"logits/chosen": 0.5251200795173645, |
|
"logits/rejected": 0.6907744407653809, |
|
"logps/chosen": -244.97616577148438, |
|
"logps/rejected": -193.0167694091797, |
|
"loss": 2039.2832, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.06581679731607437, |
|
"rewards/margins": 0.05999577045440674, |
|
"rewards/rejected": 0.005821021273732185, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 4.797127539082669e-06, |
|
"logits/chosen": 0.5102941393852234, |
|
"logits/rejected": 0.6884879469871521, |
|
"logps/chosen": -267.79193115234375, |
|
"logps/rejected": -216.37808227539062, |
|
"loss": 2084.2711, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": 0.06373900920152664, |
|
"rewards/margins": 0.05440465360879898, |
|
"rewards/rejected": 0.009334356524050236, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 4.7686346680093135e-06, |
|
"logits/chosen": 0.46306952834129333, |
|
"logits/rejected": 0.6997040510177612, |
|
"logps/chosen": -248.92501831054688, |
|
"logps/rejected": -192.95266723632812, |
|
"loss": 1956.5348, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 0.08462236821651459, |
|
"rewards/margins": 0.07454703748226166, |
|
"rewards/rejected": 0.01007532887160778, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 4.738366477638319e-06, |
|
"logits/chosen": 0.49755996465682983, |
|
"logits/rejected": 0.6823209524154663, |
|
"logps/chosen": -252.31265258789062, |
|
"logps/rejected": -203.94357299804688, |
|
"loss": 2082.6836, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": 0.060582734644412994, |
|
"rewards/margins": 0.0560123547911644, |
|
"rewards/rejected": 0.004570374730974436, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 4.7063466543318965e-06, |
|
"logits/chosen": 0.40442362427711487, |
|
"logits/rejected": 0.643377423286438, |
|
"logps/chosen": -253.7049102783203, |
|
"logps/rejected": -207.00146484375, |
|
"loss": 1898.0719, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.09280969202518463, |
|
"rewards/margins": 0.08169680833816528, |
|
"rewards/rejected": 0.011112888343632221, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"eval_logits/chosen": 0.6232779622077942, |
|
"eval_logits/rejected": 0.6844168305397034, |
|
"eval_logps/chosen": -262.24908447265625, |
|
"eval_logps/rejected": -239.25296020507812, |
|
"eval_loss": 2505.691162109375, |
|
"eval_rewards/accuracies": 0.5139999985694885, |
|
"eval_rewards/chosen": -0.05634931102395058, |
|
"eval_rewards/margins": 0.0007975505432114005, |
|
"eval_rewards/rejected": -0.057146865874528885, |
|
"eval_runtime": 411.7351, |
|
"eval_samples_per_second": 4.857, |
|
"eval_steps_per_second": 1.214, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 4.672600255192022e-06, |
|
"logits/chosen": 0.4478415846824646, |
|
"logits/rejected": 0.6094005703926086, |
|
"logps/chosen": -280.0593566894531, |
|
"logps/rejected": -230.06820678710938, |
|
"loss": 1902.6814, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.08015590906143188, |
|
"rewards/margins": 0.08636869490146637, |
|
"rewards/rejected": -0.006212792359292507, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 4.6371536884520115e-06, |
|
"logits/chosen": 0.4466761648654938, |
|
"logits/rejected": 0.5766214728355408, |
|
"logps/chosen": -260.2146911621094, |
|
"logps/rejected": -202.6782989501953, |
|
"loss": 1965.6586, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.08317091315984726, |
|
"rewards/margins": 0.07663293927907944, |
|
"rewards/rejected": 0.006537970155477524, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 4.600034692810764e-06, |
|
"logits/chosen": 0.4064536690711975, |
|
"logits/rejected": 0.6527116894721985, |
|
"logps/chosen": -285.71063232421875, |
|
"logps/rejected": -232.9168243408203, |
|
"loss": 1807.0604, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.1063646450638771, |
|
"rewards/margins": 0.10833654552698135, |
|
"rewards/rejected": -0.001971897669136524, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 4.561272315725852e-06, |
|
"logits/chosen": 0.39116889238357544, |
|
"logits/rejected": 0.5679996609687805, |
|
"logps/chosen": -269.14727783203125, |
|
"logps/rejected": -226.08987426757812, |
|
"loss": 1983.8736, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.09996851533651352, |
|
"rewards/margins": 0.08038529753684998, |
|
"rewards/rejected": 0.019583214074373245, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 4.520896890682449e-06, |
|
"logits/chosen": 0.3980609178543091, |
|
"logits/rejected": 0.5835639834403992, |
|
"logps/chosen": -261.222412109375, |
|
"logps/rejected": -211.2742156982422, |
|
"loss": 1903.2615, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.11096767336130142, |
|
"rewards/margins": 0.10369744151830673, |
|
"rewards/rejected": 0.007270221598446369, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 4.478940013455864e-06, |
|
"logits/chosen": 0.32668036222457886, |
|
"logits/rejected": 0.642693042755127, |
|
"logps/chosen": -248.02194213867188, |
|
"logps/rejected": -195.903564453125, |
|
"loss": 1819.0176, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.11030755937099457, |
|
"rewards/margins": 0.10598069429397583, |
|
"rewards/rejected": 0.004326865542680025, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 4.435434517386281e-06, |
|
"logits/chosen": 0.40880435705184937, |
|
"logits/rejected": 0.6121729612350464, |
|
"logps/chosen": -256.9469909667969, |
|
"logps/rejected": -210.01284790039062, |
|
"loss": 1875.7564, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": 0.10248272120952606, |
|
"rewards/margins": 0.09737477451562881, |
|
"rewards/rejected": 0.005107959732413292, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 4.39041444768504e-06, |
|
"logits/chosen": 0.35375842452049255, |
|
"logits/rejected": 0.5443102121353149, |
|
"logps/chosen": -271.12091064453125, |
|
"logps/rejected": -223.96395874023438, |
|
"loss": 1840.3432, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 0.10606244951486588, |
|
"rewards/margins": 0.11150339990854263, |
|
"rewards/rejected": -0.005440945271402597, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 4.343915034792569e-06, |
|
"logits/chosen": 0.33456122875213623, |
|
"logits/rejected": 0.6237267255783081, |
|
"logps/chosen": -248.20458984375, |
|
"logps/rejected": -200.05455017089844, |
|
"loss": 1773.2205, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.11527778208255768, |
|
"rewards/margins": 0.11671481281518936, |
|
"rewards/rejected": -0.0014370165299624205, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 4.295972666808811e-06, |
|
"logits/chosen": 0.3166826665401459, |
|
"logits/rejected": 0.5174378156661987, |
|
"logps/chosen": -285.5495910644531, |
|
"logps/rejected": -236.46630859375, |
|
"loss": 1879.8852, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 0.10617715120315552, |
|
"rewards/margins": 0.1061965599656105, |
|
"rewards/rejected": -1.941286063811276e-05, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"eval_logits/chosen": 0.5215258598327637, |
|
"eval_logits/rejected": 0.5839402079582214, |
|
"eval_logps/chosen": -266.0533447265625, |
|
"eval_logps/rejected": -243.10528564453125, |
|
"eval_loss": 2516.08349609375, |
|
"eval_rewards/accuracies": 0.5199999809265137, |
|
"eval_rewards/chosen": -0.09439140558242798, |
|
"eval_rewards/margins": 0.001278651412576437, |
|
"eval_rewards/rejected": -0.09567005187273026, |
|
"eval_runtime": 412.4154, |
|
"eval_samples_per_second": 4.849, |
|
"eval_steps_per_second": 1.212, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 4.246624861017732e-06, |
|
"logits/chosen": 0.31735214591026306, |
|
"logits/rejected": 0.45617708563804626, |
|
"logps/chosen": -242.1129608154297, |
|
"logps/rejected": -200.98440551757812, |
|
"loss": 1814.2686, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 0.13390564918518066, |
|
"rewards/margins": 0.11606212705373764, |
|
"rewards/rejected": 0.01784351095557213, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 4.195910234528186e-06, |
|
"logits/chosen": 0.34283334016799927, |
|
"logits/rejected": 0.532746434211731, |
|
"logps/chosen": -250.3861541748047, |
|
"logps/rejected": -209.1632843017578, |
|
"loss": 1950.3166, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.08673225343227386, |
|
"rewards/margins": 0.09020966291427612, |
|
"rewards/rejected": -0.003477415069937706, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 4.143868474054098e-06, |
|
"logits/chosen": 0.2972554564476013, |
|
"logits/rejected": 0.4938802123069763, |
|
"logps/chosen": -245.431640625, |
|
"logps/rejected": -195.8451385498047, |
|
"loss": 1783.8299, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.1252884864807129, |
|
"rewards/margins": 0.11878180503845215, |
|
"rewards/rejected": 0.006506689824163914, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 4.0905403048576545e-06, |
|
"logits/chosen": 0.3040269911289215, |
|
"logits/rejected": 0.5599544048309326, |
|
"logps/chosen": -254.08749389648438, |
|
"logps/rejected": -213.42941284179688, |
|
"loss": 1878.9361, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.1011713370680809, |
|
"rewards/margins": 0.10365436226129532, |
|
"rewards/rejected": -0.002483018906787038, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 4.035967458879751e-06, |
|
"logits/chosen": 0.29073578119277954, |
|
"logits/rejected": 0.4535519480705261, |
|
"logps/chosen": -226.4980926513672, |
|
"logps/rejected": -191.19338989257812, |
|
"loss": 1882.7074, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 0.11003535985946655, |
|
"rewards/margins": 0.10973072052001953, |
|
"rewards/rejected": 0.000304625544231385, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 3.980192642082682e-06, |
|
"logits/chosen": 0.27826622128486633, |
|
"logits/rejected": 0.4509051740169525, |
|
"logps/chosen": -246.6437225341797, |
|
"logps/rejected": -199.22531127929688, |
|
"loss": 1963.9541, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.10869649797677994, |
|
"rewards/margins": 0.10766948759555817, |
|
"rewards/rejected": 0.0010270171333104372, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 3.923259501030604e-06, |
|
"logits/chosen": 0.24556589126586914, |
|
"logits/rejected": 0.4926172196865082, |
|
"logps/chosen": -247.1698455810547, |
|
"logps/rejected": -208.8766326904297, |
|
"loss": 1971.4818, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.0876002386212349, |
|
"rewards/margins": 0.08931747823953629, |
|
"rewards/rejected": -0.0017172384541481733, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 3.865212588733927e-06, |
|
"logits/chosen": 0.3068960905075073, |
|
"logits/rejected": 0.515642523765564, |
|
"logps/chosen": -253.49880981445312, |
|
"logps/rejected": -201.22650146484375, |
|
"loss": 1692.4139, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 0.12761953473091125, |
|
"rewards/margins": 0.15515872836112976, |
|
"rewards/rejected": -0.027539223432540894, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 3.8060973297843773e-06, |
|
"logits/chosen": 0.2675209045410156, |
|
"logits/rejected": 0.4657977521419525, |
|
"logps/chosen": -260.93353271484375, |
|
"logps/rejected": -221.60214233398438, |
|
"loss": 1822.4361, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.10896619409322739, |
|
"rewards/margins": 0.10793409496545792, |
|
"rewards/rejected": 0.0010320901637896895, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 3.7459599848079965e-06, |
|
"logits/chosen": 0.32330405712127686, |
|
"logits/rejected": 0.49660053849220276, |
|
"logps/chosen": -270.75567626953125, |
|
"logps/rejected": -223.2246856689453, |
|
"loss": 1917.2811, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": 0.1301083266735077, |
|
"rewards/margins": 0.10672901570796967, |
|
"rewards/rejected": 0.023379310965538025, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"eval_logits/chosen": 0.4610958397388458, |
|
"eval_logits/rejected": 0.5243986248970032, |
|
"eval_logps/chosen": -268.1746826171875, |
|
"eval_logps/rejected": -245.2342987060547, |
|
"eval_loss": 2527.199462890625, |
|
"eval_rewards/accuracies": 0.5115000009536743, |
|
"eval_rewards/chosen": -0.11560481786727905, |
|
"eval_rewards/margins": 0.0013554621255025268, |
|
"eval_rewards/rejected": -0.11696028709411621, |
|
"eval_runtime": 411.7771, |
|
"eval_samples_per_second": 4.857, |
|
"eval_steps_per_second": 1.214, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 3.684847614263898e-06, |
|
"logits/chosen": 0.2653389871120453, |
|
"logits/rejected": 0.5690582990646362, |
|
"logps/chosen": -266.63861083984375, |
|
"logps/rejected": -214.3343963623047, |
|
"loss": 1769.2539, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": 0.1135379895567894, |
|
"rewards/margins": 0.13335202634334564, |
|
"rewards/rejected": -0.01981404609978199, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 3.622808041617133e-06, |
|
"logits/chosen": 0.23678083717823029, |
|
"logits/rejected": 0.3923517167568207, |
|
"logps/chosen": -251.62295532226562, |
|
"logps/rejected": -213.0308074951172, |
|
"loss": 1775.6238, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.12413735687732697, |
|
"rewards/margins": 0.11967913061380386, |
|
"rewards/rejected": 0.0044582299888134, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 3.559889815914441e-06, |
|
"logits/chosen": 0.28451576828956604, |
|
"logits/rejected": 0.5491675138473511, |
|
"logps/chosen": -242.1103057861328, |
|
"logps/rejected": -196.47967529296875, |
|
"loss": 1708.515, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.13650254905223846, |
|
"rewards/margins": 0.1416713297367096, |
|
"rewards/rejected": -0.005168789066374302, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 3.496142173792219e-06, |
|
"logits/chosen": 0.23486502468585968, |
|
"logits/rejected": 0.4870077073574066, |
|
"logps/chosen": -244.1999969482422, |
|
"logps/rejected": -209.3772430419922, |
|
"loss": 1659.4633, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.13094991445541382, |
|
"rewards/margins": 0.14943310618400574, |
|
"rewards/rejected": -0.018483208492398262, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 3.4316150009464023e-06, |
|
"logits/chosen": 0.2991487979888916, |
|
"logits/rejected": 0.5229448080062866, |
|
"logps/chosen": -264.29534912109375, |
|
"logps/rejected": -222.90609741210938, |
|
"loss": 1999.6244, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": 0.1066334992647171, |
|
"rewards/margins": 0.0911371111869812, |
|
"rewards/rejected": 0.015496388077735901, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 3.366358793094433e-06, |
|
"logits/chosen": 0.18731743097305298, |
|
"logits/rejected": 0.40507373213768005, |
|
"logps/chosen": -245.1287078857422, |
|
"logps/rejected": -194.5263214111328, |
|
"loss": 1803.6146, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": 0.15052266418933868, |
|
"rewards/margins": 0.13785871863365173, |
|
"rewards/rejected": 0.012663939967751503, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 3.3004246164598535e-06, |
|
"logits/chosen": 0.20718684792518616, |
|
"logits/rejected": 0.46155864000320435, |
|
"logps/chosen": -270.30792236328125, |
|
"logps/rejected": -233.5759735107422, |
|
"loss": 1710.7586, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 0.14311200380325317, |
|
"rewards/margins": 0.14155760407447815, |
|
"rewards/rejected": 0.0015544015914201736, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 3.233864067810446e-06, |
|
"logits/chosen": 0.3184027075767517, |
|
"logits/rejected": 0.4623110890388489, |
|
"logps/chosen": -252.27920532226562, |
|
"logps/rejected": -208.2698974609375, |
|
"loss": 1681.7773, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.137790247797966, |
|
"rewards/margins": 0.15522876381874084, |
|
"rewards/rejected": -0.017438489943742752, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 3.1667292340812077e-06, |
|
"logits/chosen": 0.1437700092792511, |
|
"logits/rejected": 0.3409765958786011, |
|
"logps/chosen": -247.97341918945312, |
|
"logps/rejected": -218.58163452148438, |
|
"loss": 1856.1729, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 0.13051538169384003, |
|
"rewards/margins": 0.12071319669485092, |
|
"rewards/rejected": 0.009802192449569702, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 3.099072651613728e-06, |
|
"logits/chosen": 0.24014055728912354, |
|
"logits/rejected": 0.3376144766807556, |
|
"logps/chosen": -236.4192352294922, |
|
"logps/rejected": -202.04861450195312, |
|
"loss": 1799.3824, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": 0.11552337557077408, |
|
"rewards/margins": 0.12494631111621857, |
|
"rewards/rejected": -0.009422937408089638, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"eval_logits/chosen": 0.40748730301856995, |
|
"eval_logits/rejected": 0.4713863432407379, |
|
"eval_logps/chosen": -270.2481994628906, |
|
"eval_logps/rejected": -247.35043334960938, |
|
"eval_loss": 2534.42919921875, |
|
"eval_rewards/accuracies": 0.5210000276565552, |
|
"eval_rewards/chosen": -0.13634006679058075, |
|
"eval_rewards/margins": 0.001781497965566814, |
|
"eval_rewards/rejected": -0.13812156021595, |
|
"eval_runtime": 412.3385, |
|
"eval_samples_per_second": 4.85, |
|
"eval_steps_per_second": 1.213, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 3.0309472650438982e-06, |
|
"logits/chosen": 0.2844320833683014, |
|
"logits/rejected": 0.44563204050064087, |
|
"logps/chosen": -256.5829772949219, |
|
"logps/rejected": -221.15725708007812, |
|
"loss": 1905.6564, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 0.10678652673959732, |
|
"rewards/margins": 0.10849614441394806, |
|
"rewards/rejected": -0.001709613250568509, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 2.9624063858701006e-06, |
|
"logits/chosen": 0.2729710638523102, |
|
"logits/rejected": 0.4474371075630188, |
|
"logps/chosen": -252.75082397460938, |
|
"logps/rejected": -210.4240264892578, |
|
"loss": 1794.8119, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 0.11081516742706299, |
|
"rewards/margins": 0.1245477944612503, |
|
"rewards/rejected": -0.013732627034187317, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 2.8935036507343185e-06, |
|
"logits/chosen": 0.20590758323669434, |
|
"logits/rejected": 0.4346593916416168, |
|
"logps/chosen": -262.78900146484375, |
|
"logps/rejected": -220.1951141357422, |
|
"loss": 1844.0162, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": 0.11512579768896103, |
|
"rewards/margins": 0.12753179669380188, |
|
"rewards/rejected": -0.012406004592776299, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 2.8242929794487926e-06, |
|
"logits/chosen": 0.18648579716682434, |
|
"logits/rejected": 0.4544282555580139, |
|
"logps/chosen": -268.1901550292969, |
|
"logps/rejected": -227.70193481445312, |
|
"loss": 1771.6072, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.1294766366481781, |
|
"rewards/margins": 0.14083310961723328, |
|
"rewards/rejected": -0.011356466449797153, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 2.7548285328010984e-06, |
|
"logits/chosen": 0.26485538482666016, |
|
"logits/rejected": 0.36643001437187195, |
|
"logps/chosen": -250.9102783203125, |
|
"logps/rejected": -210.93991088867188, |
|
"loss": 1854.1963, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.12583324313163757, |
|
"rewards/margins": 0.11704309284687042, |
|
"rewards/rejected": 0.008790150284767151, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 2.6851646701706306e-06, |
|
"logits/chosen": 0.1529732346534729, |
|
"logits/rejected": 0.33808109164237976, |
|
"logps/chosen": -280.79803466796875, |
|
"logps/rejected": -242.85238647460938, |
|
"loss": 1942.4141, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 0.11561117321252823, |
|
"rewards/margins": 0.10545969009399414, |
|
"rewards/rejected": 0.010151493363082409, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 2.6153559069897007e-06, |
|
"logits/chosen": 0.24622070789337158, |
|
"logits/rejected": 0.3663380444049835, |
|
"logps/chosen": -240.98788452148438, |
|
"logps/rejected": -203.41390991210938, |
|
"loss": 1846.866, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 0.10691628605127335, |
|
"rewards/margins": 0.11943434178829193, |
|
"rewards/rejected": -0.012518051080405712, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 2.5454568720824937e-06, |
|
"logits/chosen": 0.14400359988212585, |
|
"logits/rejected": 0.4130190908908844, |
|
"logps/chosen": -238.1641082763672, |
|
"logps/rejected": -194.319580078125, |
|
"loss": 1685.1135, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": 0.1472087949514389, |
|
"rewards/margins": 0.141302227973938, |
|
"rewards/rejected": 0.005906577687710524, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 2.4755222649153014e-06, |
|
"logits/chosen": 0.19388817250728607, |
|
"logits/rejected": 0.38251611590385437, |
|
"logps/chosen": -235.1546173095703, |
|
"logps/rejected": -187.75552368164062, |
|
"loss": 1779.3379, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.1266268938779831, |
|
"rewards/margins": 0.13590212166309357, |
|
"rewards/rejected": -0.009275219403207302, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 2.4056068127914803e-06, |
|
"logits/chosen": 0.21440652012825012, |
|
"logits/rejected": 0.46250271797180176, |
|
"logps/chosen": -270.1151123046875, |
|
"logps/rejected": -226.77603149414062, |
|
"loss": 1751.5762, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 0.12278898805379868, |
|
"rewards/margins": 0.15427103638648987, |
|
"rewards/rejected": -0.03148204833269119, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"eval_logits/chosen": 0.39058271050453186, |
|
"eval_logits/rejected": 0.45453789830207825, |
|
"eval_logps/chosen": -271.09881591796875, |
|
"eval_logps/rejected": -248.2779541015625, |
|
"eval_loss": 2531.35498046875, |
|
"eval_rewards/accuracies": 0.5180000066757202, |
|
"eval_rewards/chosen": -0.14484626054763794, |
|
"eval_rewards/margins": 0.0025505598168820143, |
|
"eval_rewards/rejected": -0.14739681780338287, |
|
"eval_runtime": 411.7503, |
|
"eval_samples_per_second": 4.857, |
|
"eval_steps_per_second": 1.214, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 2.3357652280246125e-06, |
|
"logits/chosen": 0.22305497527122498, |
|
"logits/rejected": 0.4246363043785095, |
|
"logps/chosen": -244.64987182617188, |
|
"logps/rejected": -204.69613647460938, |
|
"loss": 1997.1527, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.10293789952993393, |
|
"rewards/margins": 0.09320969879627228, |
|
"rewards/rejected": 0.009728200733661652, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 2.2660521651234036e-06, |
|
"logits/chosen": 0.25884026288986206, |
|
"logits/rejected": 0.44062429666519165, |
|
"logps/chosen": -264.1467590332031, |
|
"logps/rejected": -212.3171844482422, |
|
"loss": 1703.6529, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.14418098330497742, |
|
"rewards/margins": 0.1545061618089676, |
|
"rewards/rejected": -0.0103251738473773, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 2.1965221780218173e-06, |
|
"logits/chosen": 0.16370469331741333, |
|
"logits/rejected": 0.34310778975486755, |
|
"logps/chosen": -260.98602294921875, |
|
"logps/rejected": -223.19790649414062, |
|
"loss": 1666.0893, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": 0.14241552352905273, |
|
"rewards/margins": 0.14847032725811005, |
|
"rewards/rejected": -0.006054792553186417, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 2.1272296773879107e-06, |
|
"logits/chosen": 0.14960137009620667, |
|
"logits/rejected": 0.40142565965652466, |
|
"logps/chosen": -237.9215087890625, |
|
"logps/rejected": -199.8916015625, |
|
"loss": 1738.3658, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 0.13602004945278168, |
|
"rewards/margins": 0.14607930183410645, |
|
"rewards/rejected": -0.010059243068099022, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 2.058228888044788e-06, |
|
"logits/chosen": 0.2741914689540863, |
|
"logits/rejected": 0.3872790038585663, |
|
"logps/chosen": -223.29269409179688, |
|
"logps/rejected": -191.80650329589844, |
|
"loss": 1910.0223, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": 0.10628987848758698, |
|
"rewards/margins": 0.11196577548980713, |
|
"rewards/rejected": -0.00567590119317174, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 1.989573806536978e-06, |
|
"logits/chosen": 0.18802039325237274, |
|
"logits/rejected": 0.4464651048183441, |
|
"logps/chosen": -256.7940979003906, |
|
"logps/rejected": -208.06430053710938, |
|
"loss": 1710.393, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 0.15197846293449402, |
|
"rewards/margins": 0.1547323763370514, |
|
"rewards/rejected": -0.002753905486315489, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 1.921318158875459e-06, |
|
"logits/chosen": 0.2419842779636383, |
|
"logits/rejected": 0.4348203241825104, |
|
"logps/chosen": -249.1728973388672, |
|
"logps/rejected": -207.82229614257812, |
|
"loss": 1884.0729, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": 0.12094120681285858, |
|
"rewards/margins": 0.12061796337366104, |
|
"rewards/rejected": 0.0003232499584555626, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 1.8535153584943915e-06, |
|
"logits/chosen": 0.2110445499420166, |
|
"logits/rejected": 0.4180677533149719, |
|
"logps/chosen": -257.97967529296875, |
|
"logps/rejected": -223.7866668701172, |
|
"loss": 1903.2082, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": 0.12379207462072372, |
|
"rewards/margins": 0.1165727972984314, |
|
"rewards/rejected": 0.007219274528324604, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 1.7862184644524422e-06, |
|
"logits/chosen": 0.2049965113401413, |
|
"logits/rejected": 0.4525831639766693, |
|
"logps/chosen": -256.8290710449219, |
|
"logps/rejected": -224.7571563720703, |
|
"loss": 1728.5514, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 0.14060300588607788, |
|
"rewards/margins": 0.1399126946926117, |
|
"rewards/rejected": 0.0006903231260366738, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 1.7194801399114471e-06, |
|
"logits/chosen": 0.1295299082994461, |
|
"logits/rejected": 0.38342705368995667, |
|
"logps/chosen": -265.95697021484375, |
|
"logps/rejected": -224.35922241210938, |
|
"loss": 1711.1711, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 0.1655883491039276, |
|
"rewards/margins": 0.14990000426769257, |
|
"rewards/rejected": 0.015688356012105942, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"eval_logits/chosen": 0.37592813372612, |
|
"eval_logits/rejected": 0.44024261832237244, |
|
"eval_logps/chosen": -271.4833984375, |
|
"eval_logps/rejected": -248.64402770996094, |
|
"eval_loss": 2536.2451171875, |
|
"eval_rewards/accuracies": 0.5145000219345093, |
|
"eval_rewards/chosen": -0.14869214594364166, |
|
"eval_rewards/margins": 0.0023655793629586697, |
|
"eval_rewards/rejected": -0.15105770528316498, |
|
"eval_runtime": 412.1594, |
|
"eval_samples_per_second": 4.852, |
|
"eval_steps_per_second": 1.213, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 1.6533526109248632e-06, |
|
"logits/chosen": 0.22814953327178955, |
|
"logits/rejected": 0.44856709241867065, |
|
"logps/chosen": -264.6068115234375, |
|
"logps/rejected": -218.6339874267578, |
|
"loss": 1694.8418, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 0.1343715339899063, |
|
"rewards/margins": 0.15012042224407196, |
|
"rewards/rejected": -0.015748897567391396, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 1.5878876255682951e-06, |
|
"logits/chosen": 0.25933316349983215, |
|
"logits/rejected": 0.428168922662735, |
|
"logps/chosen": -271.8929748535156, |
|
"logps/rejected": -222.19677734375, |
|
"loss": 1799.3105, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.13050588965415955, |
|
"rewards/margins": 0.12607057392597198, |
|
"rewards/rejected": 0.00443531759083271, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 1.5231364134440485e-06, |
|
"logits/chosen": 0.16305533051490784, |
|
"logits/rejected": 0.37009209394454956, |
|
"logps/chosen": -239.7517547607422, |
|
"logps/rejected": -201.59359741210938, |
|
"loss": 1877.6279, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.13841071724891663, |
|
"rewards/margins": 0.12821929156780243, |
|
"rewards/rejected": 0.0101914182305336, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 1.4591496455914292e-06, |
|
"logits/chosen": 0.23846232891082764, |
|
"logits/rejected": 0.44133177399635315, |
|
"logps/chosen": -269.0438232421875, |
|
"logps/rejected": -235.73202514648438, |
|
"loss": 1826.1201, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.12339715659618378, |
|
"rewards/margins": 0.12225867807865143, |
|
"rewards/rejected": 0.0011384893441572785, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 1.395977394834132e-06, |
|
"logits/chosen": 0.2057628184556961, |
|
"logits/rejected": 0.457078218460083, |
|
"logps/chosen": -222.45474243164062, |
|
"logps/rejected": -178.09011840820312, |
|
"loss": 1674.3545, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": 0.14612336456775665, |
|
"rewards/margins": 0.1515127718448639, |
|
"rewards/rejected": -0.005389401223510504, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 1.3336690965957733e-06, |
|
"logits/chosen": 0.23303177952766418, |
|
"logits/rejected": 0.38948389887809753, |
|
"logps/chosen": -276.028564453125, |
|
"logps/rejected": -242.33120727539062, |
|
"loss": 1734.6137, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.13224345445632935, |
|
"rewards/margins": 0.1367109715938568, |
|
"rewards/rejected": -0.004467502236366272, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 1.2722735102142192e-06, |
|
"logits/chosen": 0.18385744094848633, |
|
"logits/rejected": 0.483973890542984, |
|
"logps/chosen": -234.6072540283203, |
|
"logps/rejected": -188.79147338867188, |
|
"loss": 1524.1409, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": 0.1483391523361206, |
|
"rewards/margins": 0.16734978556632996, |
|
"rewards/rejected": -0.019010629504919052, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 1.2118386807849733e-06, |
|
"logits/chosen": 0.23927922546863556, |
|
"logits/rejected": 0.39777225255966187, |
|
"logps/chosen": -247.7241668701172, |
|
"logps/rejected": -209.2562255859375, |
|
"loss": 1804.3422, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": 0.11453523486852646, |
|
"rewards/margins": 0.13239163160324097, |
|
"rewards/rejected": -0.0178564190864563, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 1.1524119015635116e-06, |
|
"logits/chosen": 0.14219129085540771, |
|
"logits/rejected": 0.3456658124923706, |
|
"logps/chosen": -244.8931121826172, |
|
"logps/rejected": -194.36337280273438, |
|
"loss": 1655.0236, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": 0.17296788096427917, |
|
"rewards/margins": 0.17738190293312073, |
|
"rewards/rejected": -0.004414013121277094, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 1.0940396769559584e-06, |
|
"logits/chosen": 0.23592603206634521, |
|
"logits/rejected": 0.3958457410335541, |
|
"logps/chosen": -242.33761596679688, |
|
"logps/rejected": -191.34255981445312, |
|
"loss": 1894.4447, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.11816737800836563, |
|
"rewards/margins": 0.1154218316078186, |
|
"rewards/rejected": 0.002745547564700246, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"eval_logits/chosen": 0.3617529273033142, |
|
"eval_logits/rejected": 0.42615193128585815, |
|
"eval_logps/chosen": -272.0999755859375, |
|
"eval_logps/rejected": -249.2417449951172, |
|
"eval_loss": 2542.6298828125, |
|
"eval_rewards/accuracies": 0.5235000252723694, |
|
"eval_rewards/chosen": -0.15485772490501404, |
|
"eval_rewards/margins": 0.002177263842895627, |
|
"eval_rewards/rejected": -0.1570349782705307, |
|
"eval_runtime": 411.8184, |
|
"eval_samples_per_second": 4.857, |
|
"eval_steps_per_second": 1.214, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 1.036767686127079e-06, |
|
"logits/chosen": 0.26092058420181274, |
|
"logits/rejected": 0.4202706217765808, |
|
"logps/chosen": -271.7019958496094, |
|
"logps/rejected": -229.44772338867188, |
|
"loss": 1612.1519, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.1469217836856842, |
|
"rewards/margins": 0.1562378704547882, |
|
"rewards/rejected": -0.009316088631749153, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 9.806407472540644e-07, |
|
"logits/chosen": 0.16799505054950714, |
|
"logits/rejected": 0.41259631514549255, |
|
"logps/chosen": -245.1134033203125, |
|
"logps/rejected": -193.49716186523438, |
|
"loss": 1752.974, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.1420082449913025, |
|
"rewards/margins": 0.16994443535804749, |
|
"rewards/rejected": -0.027936194092035294, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 9.257027824540823e-07, |
|
"logits/chosen": 0.15648850798606873, |
|
"logits/rejected": 0.3334013819694519, |
|
"logps/chosen": -237.5020294189453, |
|
"logps/rejected": -190.00733947753906, |
|
"loss": 1678.6684, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.1168346032500267, |
|
"rewards/margins": 0.14195187389850616, |
|
"rewards/rejected": -0.02511727437376976, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 8.719967834130385e-07, |
|
"logits/chosen": 0.17559798061847687, |
|
"logits/rejected": 0.38583293557167053, |
|
"logps/chosen": -226.3419647216797, |
|
"logps/rejected": -192.8260498046875, |
|
"loss": 1782.9469, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.13087774813175201, |
|
"rewards/margins": 0.14290814101696014, |
|
"rewards/rejected": -0.012030376121401787, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 8.195647777424479e-07, |
|
"logits/chosen": 0.19569836556911469, |
|
"logits/rejected": 0.420011043548584, |
|
"logps/chosen": -243.2490692138672, |
|
"logps/rejected": -200.2803497314453, |
|
"loss": 1728.6527, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 0.1271076500415802, |
|
"rewards/margins": 0.14936277270317078, |
|
"rewards/rejected": -0.02225511521100998, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 7.684477960907422e-07, |
|
"logits/chosen": 0.2223655879497528, |
|
"logits/rejected": 0.36084768176078796, |
|
"logps/chosen": -259.83001708984375, |
|
"logps/rejected": -217.5656280517578, |
|
"loss": 1787.5324, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.13919194042682648, |
|
"rewards/margins": 0.12921075522899628, |
|
"rewards/rejected": 0.009981190785765648, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 7.186858400347455e-07, |
|
"logits/chosen": 0.15693505108356476, |
|
"logits/rejected": 0.35678160190582275, |
|
"logps/chosen": -266.93768310546875, |
|
"logps/rejected": -233.291259765625, |
|
"loss": 1748.1148, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 0.12252594530582428, |
|
"rewards/margins": 0.13253001868724823, |
|
"rewards/rejected": -0.010004105977714062, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 6.703178507764618e-07, |
|
"logits/chosen": 0.21335263550281525, |
|
"logits/rejected": 0.3721942901611328, |
|
"logps/chosen": -250.0565948486328, |
|
"logps/rejected": -213.24624633789062, |
|
"loss": 1792.84, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": 0.1454544961452484, |
|
"rewards/margins": 0.12651817500591278, |
|
"rewards/rejected": 0.018936317414045334, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 6.233816786696414e-07, |
|
"logits/chosen": 0.17327558994293213, |
|
"logits/rejected": 0.41259893774986267, |
|
"logps/chosen": -242.4098358154297, |
|
"logps/rejected": -191.53460693359375, |
|
"loss": 1636.6745, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.16031518578529358, |
|
"rewards/margins": 0.1668807417154312, |
|
"rewards/rejected": -0.006565576884895563, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 5.77914053600005e-07, |
|
"logits/chosen": 0.15771836042404175, |
|
"logits/rejected": 0.384365975856781, |
|
"logps/chosen": -247.66213989257812, |
|
"logps/rejected": -207.638427734375, |
|
"loss": 1798.5389, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.13780926167964935, |
|
"rewards/margins": 0.1279788464307785, |
|
"rewards/rejected": 0.00983042549341917, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"eval_logits/chosen": 0.3559388518333435, |
|
"eval_logits/rejected": 0.42022594809532166, |
|
"eval_logps/chosen": -272.4200439453125, |
|
"eval_logps/rejected": -249.57797241210938, |
|
"eval_loss": 2542.728759765625, |
|
"eval_rewards/accuracies": 0.5205000042915344, |
|
"eval_rewards/chosen": -0.15805859863758087, |
|
"eval_rewards/margins": 0.0023383116349577904, |
|
"eval_rewards/rejected": -0.16039690375328064, |
|
"eval_runtime": 411.7713, |
|
"eval_samples_per_second": 4.857, |
|
"eval_steps_per_second": 1.214, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 5.339505562422851e-07, |
|
"logits/chosen": 0.18551324307918549, |
|
"logits/rejected": 0.3874700367450714, |
|
"logps/chosen": -258.8070983886719, |
|
"logps/rejected": -208.31655883789062, |
|
"loss": 1684.7502, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.13547667860984802, |
|
"rewards/margins": 0.14734682440757751, |
|
"rewards/rejected": -0.011870155110955238, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 4.915255902165734e-07, |
|
"logits/chosen": 0.16962969303131104, |
|
"logits/rejected": 0.3440442383289337, |
|
"logps/chosen": -257.40386962890625, |
|
"logps/rejected": -213.15682983398438, |
|
"loss": 1727.4756, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.1502576470375061, |
|
"rewards/margins": 0.15001319348812103, |
|
"rewards/rejected": 0.000244450056925416, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 4.506723551657879e-07, |
|
"logits/chosen": 0.08985213190317154, |
|
"logits/rejected": 0.3018781542778015, |
|
"logps/chosen": -238.0452117919922, |
|
"logps/rejected": -200.73129272460938, |
|
"loss": 1795.9609, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.1125258356332779, |
|
"rewards/margins": 0.11527595669031143, |
|
"rewards/rejected": -0.0027501187287271023, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 4.11422820775299e-07, |
|
"logits/chosen": 0.18122617900371552, |
|
"logits/rejected": 0.3776589035987854, |
|
"logps/chosen": -273.4202575683594, |
|
"logps/rejected": -214.76803588867188, |
|
"loss": 1575.2917, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.14816126227378845, |
|
"rewards/margins": 0.1801377683877945, |
|
"rewards/rejected": -0.03197649493813515, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 3.7380770175506397e-07, |
|
"logits/chosen": 0.18917641043663025, |
|
"logits/rejected": 0.3701549172401428, |
|
"logps/chosen": -236.4595184326172, |
|
"logps/rejected": -196.47378540039062, |
|
"loss": 1757.2316, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.13911323249340057, |
|
"rewards/margins": 0.14288835227489471, |
|
"rewards/rejected": -0.0037751286290585995, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 3.3785643380384063e-07, |
|
"logits/chosen": 0.22518393397331238, |
|
"logits/rejected": 0.4300554394721985, |
|
"logps/chosen": -264.11102294921875, |
|
"logps/rejected": -221.0089569091797, |
|
"loss": 1780.259, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.12094493955373764, |
|
"rewards/margins": 0.13927613198757172, |
|
"rewards/rejected": -0.018331199884414673, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 3.0359715057429186e-07, |
|
"logits/chosen": 0.15539857745170593, |
|
"logits/rejected": 0.330243319272995, |
|
"logps/chosen": -249.2795867919922, |
|
"logps/rejected": -213.9594268798828, |
|
"loss": 1857.3367, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 0.11874745041131973, |
|
"rewards/margins": 0.1254003345966339, |
|
"rewards/rejected": -0.0066528706811368465, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 2.710566616570048e-07, |
|
"logits/chosen": 0.22669236361980438, |
|
"logits/rejected": 0.3851965069770813, |
|
"logps/chosen": -243.06094360351562, |
|
"logps/rejected": -207.26522827148438, |
|
"loss": 1802.6373, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.11891194432973862, |
|
"rewards/margins": 0.1296943724155426, |
|
"rewards/rejected": -0.010782415047287941, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 2.40260431600654e-07, |
|
"logits/chosen": 0.11191525310277939, |
|
"logits/rejected": 0.3566216826438904, |
|
"logps/chosen": -258.7408142089844, |
|
"logps/rejected": -233.2893524169922, |
|
"loss": 1846.4627, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.13531699776649475, |
|
"rewards/margins": 0.1306256651878357, |
|
"rewards/rejected": 0.004691324662417173, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 2.1123255998472952e-07, |
|
"logits/chosen": 0.13580968976020813, |
|
"logits/rejected": 0.3724610209465027, |
|
"logps/chosen": -238.85391235351562, |
|
"logps/rejected": -202.35397338867188, |
|
"loss": 1834.9711, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": 0.1293712556362152, |
|
"rewards/margins": 0.12213204801082611, |
|
"rewards/rejected": 0.007239216007292271, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"eval_logits/chosen": 0.3531816899776459, |
|
"eval_logits/rejected": 0.417733371257782, |
|
"eval_logps/chosen": -272.4703369140625, |
|
"eval_logps/rejected": -249.63449096679688, |
|
"eval_loss": 2542.2373046875, |
|
"eval_rewards/accuracies": 0.5205000042915344, |
|
"eval_rewards/chosen": -0.15856170654296875, |
|
"eval_rewards/margins": 0.0024005102459341288, |
|
"eval_rewards/rejected": -0.16096222400665283, |
|
"eval_runtime": 411.8208, |
|
"eval_samples_per_second": 4.856, |
|
"eval_steps_per_second": 1.214, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 1.8399576256041525e-07, |
|
"logits/chosen": 0.27907487750053406, |
|
"logits/rejected": 0.39952999353408813, |
|
"logps/chosen": -293.59210205078125, |
|
"logps/rejected": -251.298583984375, |
|
"loss": 1979.5523, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 0.12343426048755646, |
|
"rewards/margins": 0.1068718284368515, |
|
"rewards/rejected": 0.016562417149543762, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 1.58571353474391e-07, |
|
"logits/chosen": 0.22743897140026093, |
|
"logits/rejected": 0.40824824571609497, |
|
"logps/chosen": -248.27365112304688, |
|
"logps/rejected": -217.0345916748047, |
|
"loss": 1971.9447, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.10497580468654633, |
|
"rewards/margins": 0.10696852207183838, |
|
"rewards/rejected": -0.0019927166868001223, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 1.3497922858944857e-07, |
|
"logits/chosen": 0.21788544952869415, |
|
"logits/rejected": 0.3408287763595581, |
|
"logps/chosen": -261.25634765625, |
|
"logps/rejected": -226.1531982421875, |
|
"loss": 1813.0029, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 0.12613138556480408, |
|
"rewards/margins": 0.13940462470054626, |
|
"rewards/rejected": -0.013273234479129314, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 1.1323784991499471e-07, |
|
"logits/chosen": 0.17835107445716858, |
|
"logits/rejected": 0.41546598076820374, |
|
"logps/chosen": -254.427978515625, |
|
"logps/rejected": -208.0858917236328, |
|
"loss": 1688.9838, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.13813026249408722, |
|
"rewards/margins": 0.1528724581003189, |
|
"rewards/rejected": -0.014742200262844563, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 9.336423115961002e-08, |
|
"logits/chosen": 0.13116911053657532, |
|
"logits/rejected": 0.34874704480171204, |
|
"logps/chosen": -251.4066162109375, |
|
"logps/rejected": -205.0511016845703, |
|
"loss": 1693.0176, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 0.14480547606945038, |
|
"rewards/margins": 0.1530844271183014, |
|
"rewards/rejected": -0.008278947323560715, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 7.537392441697793e-08, |
|
"logits/chosen": 0.1982724368572235, |
|
"logits/rejected": 0.3957718014717102, |
|
"logps/chosen": -265.66033935546875, |
|
"logps/rejected": -223.394775390625, |
|
"loss": 1694.5328, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": 0.12068979442119598, |
|
"rewards/margins": 0.15094828605651855, |
|
"rewards/rejected": -0.030258500948548317, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 5.928100799559938e-08, |
|
"logits/chosen": 0.2445194274187088, |
|
"logits/rejected": 0.34973251819610596, |
|
"logps/chosen": -266.8522033691406, |
|
"logps/rejected": -214.7125701904297, |
|
"loss": 1810.5473, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": 0.12299786508083344, |
|
"rewards/margins": 0.13123488426208496, |
|
"rewards/rejected": -0.008237012661993504, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 4.5098075401815435e-08, |
|
"logits/chosen": 0.17370513081550598, |
|
"logits/rejected": 0.44302305579185486, |
|
"logps/chosen": -251.4030303955078, |
|
"logps/rejected": -217.8542938232422, |
|
"loss": 1799.0785, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": 0.14102791249752045, |
|
"rewards/margins": 0.14266334474086761, |
|
"rewards/rejected": -0.0016354346880689263, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 3.283622548476445e-08, |
|
"logits/chosen": 0.2375222146511078, |
|
"logits/rejected": 0.45388326048851013, |
|
"logps/chosen": -243.04116821289062, |
|
"logps/rejected": -203.55050659179688, |
|
"loss": 1673.1176, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": 0.13139726221561432, |
|
"rewards/margins": 0.14136573672294617, |
|
"rewards/rejected": -0.009968474507331848, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 2.250505375098161e-08, |
|
"logits/chosen": 0.28876617550849915, |
|
"logits/rejected": 0.42698416113853455, |
|
"logps/chosen": -248.3870086669922, |
|
"logps/rejected": -211.6997833251953, |
|
"loss": 1765.5148, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 0.1342637687921524, |
|
"rewards/margins": 0.1372515708208084, |
|
"rewards/rejected": -0.002987801330164075, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"eval_logits/chosen": 0.35145023465156555, |
|
"eval_logits/rejected": 0.41600194573402405, |
|
"eval_logps/chosen": -272.50103759765625, |
|
"eval_logps/rejected": -249.63572692871094, |
|
"eval_loss": 2546.17138671875, |
|
"eval_rewards/accuracies": 0.5220000147819519, |
|
"eval_rewards/chosen": -0.15886859595775604, |
|
"eval_rewards/margins": 0.00210593082010746, |
|
"eval_rewards/rejected": -0.16097451746463776, |
|
"eval_runtime": 411.6701, |
|
"eval_samples_per_second": 4.858, |
|
"eval_steps_per_second": 1.215, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 1.4112644855438228e-08, |
|
"logits/chosen": 0.18522273004055023, |
|
"logits/rejected": 0.3528333306312561, |
|
"logps/chosen": -252.374755859375, |
|
"logps/rejected": -213.50112915039062, |
|
"loss": 1695.0207, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.1518661379814148, |
|
"rewards/margins": 0.1503283679485321, |
|
"rewards/rejected": 0.001537766307592392, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 7.665566274897007e-09, |
|
"logits/chosen": 0.14011794328689575, |
|
"logits/rejected": 0.45459121465682983, |
|
"logps/chosen": -269.1119079589844, |
|
"logps/rejected": -213.98684692382812, |
|
"loss": 1599.6027, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": 0.1741984784603119, |
|
"rewards/margins": 0.17417994141578674, |
|
"rewards/rejected": 1.8526614439906552e-05, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 3.1688631685364292e-09, |
|
"logits/chosen": 0.11093461513519287, |
|
"logits/rejected": 0.3354471027851105, |
|
"logps/chosen": -240.45419311523438, |
|
"logps/rejected": -200.0811004638672, |
|
"loss": 1762.0594, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.14106154441833496, |
|
"rewards/margins": 0.14005112648010254, |
|
"rewards/rejected": 0.0010104203829541802, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 6.260544298619664e-10, |
|
"logits/chosen": 0.1383497416973114, |
|
"logits/rejected": 0.3753938674926758, |
|
"logps/chosen": -235.23385620117188, |
|
"logps/rejected": -191.31582641601562, |
|
"loss": 1685.3523, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": 0.15916982293128967, |
|
"rewards/margins": 0.1575343906879425, |
|
"rewards/rejected": 0.0016354434192180634, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"step": 1248, |
|
"total_flos": 0.0, |
|
"train_loss": 1918.4646684695513, |
|
"train_runtime": 14252.7073, |
|
"train_samples_per_second": 1.403, |
|
"train_steps_per_second": 0.088 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1248, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|