|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 478, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.0416666666666666e-08, |
|
"logits/chosen": -2.5017967224121094, |
|
"logits/rejected": -2.3871021270751953, |
|
"logps/chosen": -332.3011474609375, |
|
"logps/rejected": -277.1512756347656, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.0416666666666667e-07, |
|
"logits/chosen": -2.540015459060669, |
|
"logits/rejected": -2.500929832458496, |
|
"logps/chosen": -247.11024475097656, |
|
"logps/rejected": -213.2850341796875, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.4097222089767456, |
|
"rewards/chosen": -0.000517037755344063, |
|
"rewards/margins": -0.00017402732919435948, |
|
"rewards/rejected": -0.0003430104407016188, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.0833333333333333e-07, |
|
"logits/chosen": -2.5355143547058105, |
|
"logits/rejected": -2.484562873840332, |
|
"logps/chosen": -272.35711669921875, |
|
"logps/rejected": -249.6931915283203, |
|
"loss": 0.6924, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": 0.0008343200897797942, |
|
"rewards/margins": 0.001956597436219454, |
|
"rewards/rejected": -0.001122277113609016, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": -2.6150834560394287, |
|
"logits/rejected": -2.542232036590576, |
|
"logps/chosen": -283.331298828125, |
|
"logps/rejected": -276.0316467285156, |
|
"loss": 0.6885, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.008794735185801983, |
|
"rewards/margins": 0.006988237611949444, |
|
"rewards/rejected": 0.0018064973410218954, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.1666666666666667e-07, |
|
"logits/chosen": -2.519700527191162, |
|
"logits/rejected": -2.4544477462768555, |
|
"logps/chosen": -288.609130859375, |
|
"logps/rejected": -265.47576904296875, |
|
"loss": 0.6756, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 0.04671400785446167, |
|
"rewards/margins": 0.03756200894713402, |
|
"rewards/rejected": 0.009151997044682503, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.999733114418725e-07, |
|
"logits/chosen": -2.4968419075012207, |
|
"logits/rejected": -2.4539003372192383, |
|
"logps/chosen": -312.46380615234375, |
|
"logps/rejected": -309.41632080078125, |
|
"loss": 0.6601, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.006021331064403057, |
|
"rewards/margins": 0.07272790372371674, |
|
"rewards/rejected": -0.07874923944473267, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.990398100856366e-07, |
|
"logits/chosen": -2.461306095123291, |
|
"logits/rejected": -2.4000632762908936, |
|
"logps/chosen": -267.9833679199219, |
|
"logps/rejected": -251.8109130859375, |
|
"loss": 0.6381, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.10297150909900665, |
|
"rewards/margins": 0.12468205392360687, |
|
"rewards/rejected": -0.22765357792377472, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.967775735898179e-07, |
|
"logits/chosen": -2.427084445953369, |
|
"logits/rejected": -2.366863250732422, |
|
"logps/chosen": -298.81085205078125, |
|
"logps/rejected": -287.7050476074219, |
|
"loss": 0.6177, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.1814979612827301, |
|
"rewards/margins": 0.2026226818561554, |
|
"rewards/rejected": -0.3841206133365631, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.931986719649298e-07, |
|
"logits/chosen": -1.6477082967758179, |
|
"logits/rejected": -1.5141746997833252, |
|
"logps/chosen": -328.52178955078125, |
|
"logps/rejected": -357.9725646972656, |
|
"loss": 0.609, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.5934440493583679, |
|
"rewards/margins": 0.24087591469287872, |
|
"rewards/rejected": -0.8343199491500854, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.883222001996351e-07, |
|
"logits/chosen": -1.456400990486145, |
|
"logits/rejected": -1.1816024780273438, |
|
"logps/chosen": -324.58734130859375, |
|
"logps/rejected": -347.7338562011719, |
|
"loss": 0.5717, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.5771389603614807, |
|
"rewards/margins": 0.38918066024780273, |
|
"rewards/rejected": -0.966319739818573, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.821741763807186e-07, |
|
"logits/chosen": -1.2872960567474365, |
|
"logits/rejected": -0.9513088464736938, |
|
"logps/chosen": -339.1795959472656, |
|
"logps/rejected": -366.32769775390625, |
|
"loss": 0.5545, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.5583249926567078, |
|
"rewards/margins": 0.47431764006614685, |
|
"rewards/rejected": -1.0326426029205322, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"eval_logits/chosen": -1.3212240934371948, |
|
"eval_logits/rejected": -1.0286760330200195, |
|
"eval_logps/chosen": -312.0798645019531, |
|
"eval_logps/rejected": -374.31585693359375, |
|
"eval_loss": 0.5658453106880188, |
|
"eval_rewards/accuracies": 0.71875, |
|
"eval_rewards/chosen": -0.49532508850097656, |
|
"eval_rewards/margins": 0.6263692378997803, |
|
"eval_rewards/rejected": -1.1216944456100464, |
|
"eval_runtime": 88.7193, |
|
"eval_samples_per_second": 22.543, |
|
"eval_steps_per_second": 0.361, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.747874028753375e-07, |
|
"logits/chosen": -1.1495229005813599, |
|
"logits/rejected": -0.6862327456474304, |
|
"logps/chosen": -370.4473571777344, |
|
"logps/rejected": -381.8430480957031, |
|
"loss": 0.5534, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.503128170967102, |
|
"rewards/margins": 0.6164419054985046, |
|
"rewards/rejected": -1.1195701360702515, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.662012913161997e-07, |
|
"logits/chosen": -0.21524420380592346, |
|
"logits/rejected": 0.5560011863708496, |
|
"logps/chosen": -363.287353515625, |
|
"logps/rejected": -385.227783203125, |
|
"loss": 0.5308, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.8372129201889038, |
|
"rewards/margins": 0.710771918296814, |
|
"rewards/rejected": -1.5479847192764282, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.5646165232345103e-07, |
|
"logits/chosen": -0.9693559408187866, |
|
"logits/rejected": -0.585214376449585, |
|
"logps/chosen": -310.8514404296875, |
|
"logps/rejected": -407.11956787109375, |
|
"loss": 0.5387, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.4881950318813324, |
|
"rewards/margins": 0.6856316328048706, |
|
"rewards/rejected": -1.1738265752792358, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.456204510851956e-07, |
|
"logits/chosen": -1.0192829370498657, |
|
"logits/rejected": -0.2164110690355301, |
|
"logps/chosen": -359.496826171875, |
|
"logps/rejected": -383.21826171875, |
|
"loss": 0.5381, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.6586028337478638, |
|
"rewards/margins": 0.6349435448646545, |
|
"rewards/rejected": -1.2935463190078735, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.337355301007335e-07, |
|
"logits/chosen": -0.4600129723548889, |
|
"logits/rejected": 0.2966030240058899, |
|
"logps/chosen": -379.4418640136719, |
|
"logps/rejected": -396.3006286621094, |
|
"loss": 0.5288, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.9255654215812683, |
|
"rewards/margins": 0.6836373805999756, |
|
"rewards/rejected": -1.6092027425765991, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.2087030056579986e-07, |
|
"logits/chosen": -0.17871518433094025, |
|
"logits/rejected": 0.4601938724517822, |
|
"logps/chosen": -382.32659912109375, |
|
"logps/rejected": -447.52716064453125, |
|
"loss": 0.5063, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.9520798921585083, |
|
"rewards/margins": 0.810064435005188, |
|
"rewards/rejected": -1.7621443271636963, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.070934040463998e-07, |
|
"logits/chosen": -0.7312272191047668, |
|
"logits/rejected": -0.21112406253814697, |
|
"logps/chosen": -320.51849365234375, |
|
"logps/rejected": -360.88427734375, |
|
"loss": 0.5618, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.7297524213790894, |
|
"rewards/margins": 0.565701425075531, |
|
"rewards/rejected": -1.2954537868499756, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.9247834624635404e-07, |
|
"logits/chosen": -0.5727821588516235, |
|
"logits/rejected": -0.19889096915721893, |
|
"logps/chosen": -369.8377990722656, |
|
"logps/rejected": -436.2527770996094, |
|
"loss": 0.5238, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.7431085705757141, |
|
"rewards/margins": 0.8097877502441406, |
|
"rewards/rejected": -1.55289626121521, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 3.7710310482256523e-07, |
|
"logits/chosen": -0.8184123039245605, |
|
"logits/rejected": -0.09880775213241577, |
|
"logps/chosen": -360.60174560546875, |
|
"logps/rejected": -434.35540771484375, |
|
"loss": 0.5039, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.7761167287826538, |
|
"rewards/margins": 0.8615070581436157, |
|
"rewards/rejected": -1.6376237869262695, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.610497133404795e-07, |
|
"logits/chosen": -0.597638726234436, |
|
"logits/rejected": 0.14091506600379944, |
|
"logps/chosen": -373.0635681152344, |
|
"logps/rejected": -402.6120910644531, |
|
"loss": 0.5026, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.908000648021698, |
|
"rewards/margins": 0.6652692556381226, |
|
"rewards/rejected": -1.5732697248458862, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"eval_logits/chosen": -0.17727772891521454, |
|
"eval_logits/rejected": 0.5189895629882812, |
|
"eval_logps/chosen": -352.49847412109375, |
|
"eval_logps/rejected": -439.3264465332031, |
|
"eval_loss": 0.5201631188392639, |
|
"eval_rewards/accuracies": 0.74609375, |
|
"eval_rewards/chosen": -0.899510383605957, |
|
"eval_rewards/margins": 0.8722902536392212, |
|
"eval_rewards/rejected": -1.7718006372451782, |
|
"eval_runtime": 88.5484, |
|
"eval_samples_per_second": 22.587, |
|
"eval_steps_per_second": 0.361, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.4440382358952115e-07, |
|
"logits/chosen": -0.04346243292093277, |
|
"logits/rejected": 0.8316682577133179, |
|
"logps/chosen": -377.2864990234375, |
|
"logps/rejected": -403.12933349609375, |
|
"loss": 0.5357, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -1.005774736404419, |
|
"rewards/margins": 0.6370893716812134, |
|
"rewards/rejected": -1.6428638696670532, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 3.272542485937368e-07, |
|
"logits/chosen": 0.27445563673973083, |
|
"logits/rejected": 0.8739471435546875, |
|
"logps/chosen": -359.21343994140625, |
|
"logps/rejected": -410.8561096191406, |
|
"loss": 0.5269, |
|
"rewards/accuracies": 0.793749988079071, |
|
"rewards/chosen": -0.8274116516113281, |
|
"rewards/margins": 0.8387683033943176, |
|
"rewards/rejected": -1.6661800146102905, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 3.096924887558854e-07, |
|
"logits/chosen": 0.12390404939651489, |
|
"logits/rejected": 0.7874934673309326, |
|
"logps/chosen": -343.6993713378906, |
|
"logps/rejected": -443.02716064453125, |
|
"loss": 0.5192, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.7115140557289124, |
|
"rewards/margins": 0.9087392091751099, |
|
"rewards/rejected": -1.6202532052993774, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.9181224366319943e-07, |
|
"logits/chosen": 0.47393637895584106, |
|
"logits/rejected": 1.2820873260498047, |
|
"logps/chosen": -341.52032470703125, |
|
"logps/rejected": -402.3468322753906, |
|
"loss": 0.5251, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.9017995595932007, |
|
"rewards/margins": 0.7208075523376465, |
|
"rewards/rejected": -1.6226072311401367, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 2.7370891215954565e-07, |
|
"logits/chosen": 0.6556827425956726, |
|
"logits/rejected": 1.5371648073196411, |
|
"logps/chosen": -363.3061218261719, |
|
"logps/rejected": -423.22216796875, |
|
"loss": 0.5168, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.9660114049911499, |
|
"rewards/margins": 0.6768711805343628, |
|
"rewards/rejected": -1.6428825855255127, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 2.55479083351317e-07, |
|
"logits/chosen": 1.074449062347412, |
|
"logits/rejected": 2.2258691787719727, |
|
"logps/chosen": -393.25042724609375, |
|
"logps/rejected": -435.5625915527344, |
|
"loss": 0.5007, |
|
"rewards/accuracies": 0.793749988079071, |
|
"rewards/chosen": -1.1183383464813232, |
|
"rewards/margins": 0.817892849445343, |
|
"rewards/rejected": -1.936231255531311, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 2.3722002126275822e-07, |
|
"logits/chosen": 1.035612940788269, |
|
"logits/rejected": 1.9490232467651367, |
|
"logps/chosen": -391.06488037109375, |
|
"logps/rejected": -470.64599609375, |
|
"loss": 0.5064, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.1720463037490845, |
|
"rewards/margins": 0.9149090051651001, |
|
"rewards/rejected": -2.0869553089141846, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 2.19029145890313e-07, |
|
"logits/chosen": 0.5210274457931519, |
|
"logits/rejected": 1.496964693069458, |
|
"logps/chosen": -369.4839782714844, |
|
"logps/rejected": -415.17974853515625, |
|
"loss": 0.5189, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.9482043981552124, |
|
"rewards/margins": 0.8109124898910522, |
|
"rewards/rejected": -1.759116768836975, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 2.0100351342479216e-07, |
|
"logits/chosen": 0.4483126103878021, |
|
"logits/rejected": 1.3368585109710693, |
|
"logps/chosen": -373.81475830078125, |
|
"logps/rejected": -435.36151123046875, |
|
"loss": 0.519, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.8730584979057312, |
|
"rewards/margins": 0.7535277009010315, |
|
"rewards/rejected": -1.6265861988067627, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.8323929841460178e-07, |
|
"logits/chosen": 0.27887973189353943, |
|
"logits/rejected": 1.0783421993255615, |
|
"logps/chosen": -381.5480651855469, |
|
"logps/rejected": -453.80242919921875, |
|
"loss": 0.5106, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.8568315505981445, |
|
"rewards/margins": 0.6526933312416077, |
|
"rewards/rejected": -1.5095248222351074, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"eval_logits/chosen": 0.08615332096815109, |
|
"eval_logits/rejected": 0.9099248647689819, |
|
"eval_logps/chosen": -342.00433349609375, |
|
"eval_logps/rejected": -424.99755859375, |
|
"eval_loss": 0.5103623270988464, |
|
"eval_rewards/accuracies": 0.765625, |
|
"eval_rewards/chosen": -0.7945692539215088, |
|
"eval_rewards/margins": 0.8339425921440125, |
|
"eval_rewards/rejected": -1.628511905670166, |
|
"eval_runtime": 88.6653, |
|
"eval_samples_per_second": 22.557, |
|
"eval_steps_per_second": 0.361, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.6583128063291573e-07, |
|
"logits/chosen": 0.13822266459465027, |
|
"logits/rejected": 1.3017876148223877, |
|
"logps/chosen": -365.63958740234375, |
|
"logps/rejected": -384.9111328125, |
|
"loss": 0.4988, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.9199568629264832, |
|
"rewards/margins": 0.7403375506401062, |
|
"rewards/rejected": -1.6602942943572998, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.488723393865766e-07, |
|
"logits/chosen": 0.672313392162323, |
|
"logits/rejected": 1.8797632455825806, |
|
"logps/chosen": -326.7810974121094, |
|
"logps/rejected": -421.84765625, |
|
"loss": 0.5073, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.7888548374176025, |
|
"rewards/margins": 1.0594618320465088, |
|
"rewards/rejected": -1.8483164310455322, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.3245295796480788e-07, |
|
"logits/chosen": 0.46894121170043945, |
|
"logits/rejected": 1.3748019933700562, |
|
"logps/chosen": -373.7689208984375, |
|
"logps/rejected": -419.54364013671875, |
|
"loss": 0.5143, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.8813614845275879, |
|
"rewards/margins": 0.7780431509017944, |
|
"rewards/rejected": -1.6594045162200928, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.1666074087171627e-07, |
|
"logits/chosen": 0.8704012036323547, |
|
"logits/rejected": 1.6405874490737915, |
|
"logps/chosen": -351.2308654785156, |
|
"logps/rejected": -407.41064453125, |
|
"loss": 0.5135, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.8790988922119141, |
|
"rewards/margins": 0.7077994346618652, |
|
"rewards/rejected": -1.5868984460830688, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.0157994641835734e-07, |
|
"logits/chosen": 0.8922025561332703, |
|
"logits/rejected": 2.06630277633667, |
|
"logps/chosen": -348.2925720214844, |
|
"logps/rejected": -419.91363525390625, |
|
"loss": 0.4771, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -0.9759310483932495, |
|
"rewards/margins": 0.898769736289978, |
|
"rewards/rejected": -1.8747007846832275, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.729103716819111e-08, |
|
"logits/chosen": 0.996601402759552, |
|
"logits/rejected": 2.303062915802002, |
|
"logps/chosen": -422.4617614746094, |
|
"logps/rejected": -457.77691650390625, |
|
"loss": 0.4833, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.044933557510376, |
|
"rewards/margins": 0.9208766222000122, |
|
"rewards/rejected": -1.9658104181289673, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 7.387025063449081e-08, |
|
"logits/chosen": 1.0122365951538086, |
|
"logits/rejected": 2.279644727706909, |
|
"logps/chosen": -411.8594665527344, |
|
"logps/rejected": -457.96807861328125, |
|
"loss": 0.4969, |
|
"rewards/accuracies": 0.8062499761581421, |
|
"rewards/chosen": -1.0128066539764404, |
|
"rewards/margins": 1.0020781755447388, |
|
"rewards/rejected": -2.0148847103118896, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 6.138919252022435e-08, |
|
"logits/chosen": 0.8450608253479004, |
|
"logits/rejected": 1.820166826248169, |
|
"logps/chosen": -383.67559814453125, |
|
"logps/rejected": -475.4779357910156, |
|
"loss": 0.4768, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -1.0418307781219482, |
|
"rewards/margins": 0.9196658134460449, |
|
"rewards/rejected": -1.9614967107772827, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.991445467064689e-08, |
|
"logits/chosen": 1.0426132678985596, |
|
"logits/rejected": 1.9015953540802002, |
|
"logps/chosen": -384.1719970703125, |
|
"logps/rejected": -459.9111328125, |
|
"loss": 0.5073, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -1.1213358640670776, |
|
"rewards/margins": 0.7478152513504028, |
|
"rewards/rejected": -1.8691511154174805, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.9507259776993954e-08, |
|
"logits/chosen": 0.8149229288101196, |
|
"logits/rejected": 1.6555734872817993, |
|
"logps/chosen": -430.68231201171875, |
|
"logps/rejected": -496.66900634765625, |
|
"loss": 0.4859, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.098642110824585, |
|
"rewards/margins": 0.9415690302848816, |
|
"rewards/rejected": -2.0402112007141113, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"eval_logits/chosen": 0.7817753553390503, |
|
"eval_logits/rejected": 1.7438044548034668, |
|
"eval_logps/chosen": -360.3138732910156, |
|
"eval_logps/rejected": -457.9451599121094, |
|
"eval_loss": 0.5031262040138245, |
|
"eval_rewards/accuracies": 0.7578125, |
|
"eval_rewards/chosen": -0.9776647090911865, |
|
"eval_rewards/margins": 0.9803228974342346, |
|
"eval_rewards/rejected": -1.9579875469207764, |
|
"eval_runtime": 88.4827, |
|
"eval_samples_per_second": 22.603, |
|
"eval_steps_per_second": 0.362, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.022313472693447e-08, |
|
"logits/chosen": 1.0873724222183228, |
|
"logits/rejected": 2.1809914112091064, |
|
"logps/chosen": -358.1825256347656, |
|
"logps/rejected": -440.631591796875, |
|
"loss": 0.5114, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.9868854284286499, |
|
"rewards/margins": 0.9311205744743347, |
|
"rewards/rejected": -1.918006181716919, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.2111614344599684e-08, |
|
"logits/chosen": 0.8678689002990723, |
|
"logits/rejected": 1.581181526184082, |
|
"logps/chosen": -370.5611877441406, |
|
"logps/rejected": -477.7791442871094, |
|
"loss": 0.5041, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.9568517804145813, |
|
"rewards/margins": 0.967370331287384, |
|
"rewards/rejected": -1.9242219924926758, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 1.521597710086439e-08, |
|
"logits/chosen": 0.4731677174568176, |
|
"logits/rejected": 1.4058442115783691, |
|
"logps/chosen": -396.15325927734375, |
|
"logps/rejected": -442.45404052734375, |
|
"loss": 0.5063, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.994260311126709, |
|
"rewards/margins": 0.8869916796684265, |
|
"rewards/rejected": -1.8812516927719116, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 9.57301420397924e-09, |
|
"logits/chosen": 0.8329499959945679, |
|
"logits/rejected": 1.7063286304473877, |
|
"logps/chosen": -360.3570861816406, |
|
"logps/rejected": -442.45574951171875, |
|
"loss": 0.4884, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -1.0168806314468384, |
|
"rewards/margins": 0.8515257835388184, |
|
"rewards/rejected": -1.8684062957763672, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 5.212833302556258e-09, |
|
"logits/chosen": 0.8873745799064636, |
|
"logits/rejected": 1.8809913396835327, |
|
"logps/chosen": -344.71197509765625, |
|
"logps/rejected": -413.39471435546875, |
|
"loss": 0.5068, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.99580317735672, |
|
"rewards/margins": 0.8256435394287109, |
|
"rewards/rejected": -1.8214466571807861, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 2.158697848236607e-09, |
|
"logits/chosen": 1.0402039289474487, |
|
"logits/rejected": 2.0626494884490967, |
|
"logps/chosen": -350.25115966796875, |
|
"logps/rejected": -416.1396484375, |
|
"loss": 0.4859, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.070770502090454, |
|
"rewards/margins": 0.807015597820282, |
|
"rewards/rejected": -1.8777860403060913, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 4.269029751107489e-10, |
|
"logits/chosen": 0.4914283752441406, |
|
"logits/rejected": 1.8748416900634766, |
|
"logps/chosen": -398.6361083984375, |
|
"logps/rejected": -465.1482849121094, |
|
"loss": 0.4872, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -1.004058599472046, |
|
"rewards/margins": 1.027204155921936, |
|
"rewards/rejected": -2.0312628746032715, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 478, |
|
"total_flos": 0.0, |
|
"train_loss": 0.07998354963677698, |
|
"train_runtime": 779.5821, |
|
"train_samples_per_second": 78.42, |
|
"train_steps_per_second": 0.613 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 478, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|