|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.0, |
|
"eval_steps": 100, |
|
"global_step": 574, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.003484320557491289, |
|
"grad_norm": 289.1508698329696, |
|
"learning_rate": 3.4482758620689654e-09, |
|
"logits/chosen": -2.5345611572265625, |
|
"logits/rejected": -2.581700563430786, |
|
"logps/chosen": -60.002105712890625, |
|
"logps/rejected": -99.98374938964844, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03484320557491289, |
|
"grad_norm": 277.24379710378327, |
|
"learning_rate": 3.448275862068966e-08, |
|
"logits/chosen": -2.5635900497436523, |
|
"logits/rejected": -2.562225818634033, |
|
"logps/chosen": -59.68327331542969, |
|
"logps/rejected": -73.39383697509766, |
|
"loss": 0.6974, |
|
"rewards/accuracies": 0.1875, |
|
"rewards/chosen": -0.01181174349039793, |
|
"rewards/margins": -0.0053515201434493065, |
|
"rewards/rejected": -0.006460222881287336, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.06968641114982578, |
|
"grad_norm": 357.5434709312531, |
|
"learning_rate": 6.896551724137931e-08, |
|
"logits/chosen": -2.6049184799194336, |
|
"logits/rejected": -2.5635530948638916, |
|
"logps/chosen": -104.09075927734375, |
|
"logps/rejected": -94.9400634765625, |
|
"loss": 0.6899, |
|
"rewards/accuracies": 0.3499999940395355, |
|
"rewards/chosen": 0.01815590262413025, |
|
"rewards/margins": 0.04113120958209038, |
|
"rewards/rejected": -0.02297530695796013, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.10452961672473868, |
|
"grad_norm": 337.4139606266084, |
|
"learning_rate": 1.0344827586206897e-07, |
|
"logits/chosen": -2.5913662910461426, |
|
"logits/rejected": -2.5713112354278564, |
|
"logps/chosen": -82.475341796875, |
|
"logps/rejected": -91.52882385253906, |
|
"loss": 0.6923, |
|
"rewards/accuracies": 0.26875001192092896, |
|
"rewards/chosen": 0.01061153132468462, |
|
"rewards/margins": 0.004934412427246571, |
|
"rewards/rejected": 0.005677118897438049, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.13937282229965156, |
|
"grad_norm": 282.9755513914394, |
|
"learning_rate": 1.3793103448275863e-07, |
|
"logits/chosen": -2.4979004859924316, |
|
"logits/rejected": -2.495636463165283, |
|
"logps/chosen": -77.93832397460938, |
|
"logps/rejected": -73.0629653930664, |
|
"loss": 0.6791, |
|
"rewards/accuracies": 0.20624999701976776, |
|
"rewards/chosen": -0.00995340384542942, |
|
"rewards/margins": 0.020863836631178856, |
|
"rewards/rejected": -0.030817240476608276, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.17421602787456447, |
|
"grad_norm": 245.53657846887563, |
|
"learning_rate": 1.7241379310344825e-07, |
|
"logits/chosen": -2.533763885498047, |
|
"logits/rejected": -2.537358522415161, |
|
"logps/chosen": -63.84333419799805, |
|
"logps/rejected": -76.09790802001953, |
|
"loss": 0.6717, |
|
"rewards/accuracies": 0.2562499940395355, |
|
"rewards/chosen": 0.047906167805194855, |
|
"rewards/margins": 0.04513678327202797, |
|
"rewards/rejected": 0.002769380807876587, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.20905923344947736, |
|
"grad_norm": 247.7041408944673, |
|
"learning_rate": 1.9922480620155037e-07, |
|
"logits/chosen": -2.506993532180786, |
|
"logits/rejected": -2.5004165172576904, |
|
"logps/chosen": -72.5146255493164, |
|
"logps/rejected": -67.91732025146484, |
|
"loss": 0.6647, |
|
"rewards/accuracies": 0.33125001192092896, |
|
"rewards/chosen": 0.34594517946243286, |
|
"rewards/margins": 0.1003229171037674, |
|
"rewards/rejected": 0.24562230706214905, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.24390243902439024, |
|
"grad_norm": 249.16248968780815, |
|
"learning_rate": 1.9534883720930232e-07, |
|
"logits/chosen": -2.528493881225586, |
|
"logits/rejected": -2.5241379737854004, |
|
"logps/chosen": -62.91923904418945, |
|
"logps/rejected": -67.45994567871094, |
|
"loss": 0.6534, |
|
"rewards/accuracies": 0.2874999940395355, |
|
"rewards/chosen": 0.6584776043891907, |
|
"rewards/margins": 0.17514197528362274, |
|
"rewards/rejected": 0.4833356738090515, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.2787456445993031, |
|
"grad_norm": 307.9296174602441, |
|
"learning_rate": 1.9147286821705426e-07, |
|
"logits/chosen": -2.4762024879455566, |
|
"logits/rejected": -2.4663474559783936, |
|
"logps/chosen": -74.05433654785156, |
|
"logps/rejected": -76.48423767089844, |
|
"loss": 0.6613, |
|
"rewards/accuracies": 0.3187499940395355, |
|
"rewards/chosen": 0.805270791053772, |
|
"rewards/margins": 0.2899132966995239, |
|
"rewards/rejected": 0.5153574347496033, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.313588850174216, |
|
"grad_norm": 236.73851878615983, |
|
"learning_rate": 1.8759689922480618e-07, |
|
"logits/chosen": -2.4865849018096924, |
|
"logits/rejected": -2.5006980895996094, |
|
"logps/chosen": -64.770263671875, |
|
"logps/rejected": -69.27301788330078, |
|
"loss": 0.6551, |
|
"rewards/accuracies": 0.3062500059604645, |
|
"rewards/chosen": 0.6060104966163635, |
|
"rewards/margins": 0.18549713492393494, |
|
"rewards/rejected": 0.420513391494751, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.34843205574912894, |
|
"grad_norm": 302.9071215603059, |
|
"learning_rate": 1.8372093023255813e-07, |
|
"logits/chosen": -2.4684031009674072, |
|
"logits/rejected": -2.4694905281066895, |
|
"logps/chosen": -73.97297668457031, |
|
"logps/rejected": -80.28447723388672, |
|
"loss": 0.6562, |
|
"rewards/accuracies": 0.30000001192092896, |
|
"rewards/chosen": 0.649095356464386, |
|
"rewards/margins": 0.26550906896591187, |
|
"rewards/rejected": 0.3835863173007965, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.34843205574912894, |
|
"eval_logits/chosen": -2.5565242767333984, |
|
"eval_logits/rejected": -2.540536880493164, |
|
"eval_logps/chosen": -73.13079833984375, |
|
"eval_logps/rejected": -80.36016845703125, |
|
"eval_loss": 0.6328476667404175, |
|
"eval_rewards/accuracies": 0.3551587164402008, |
|
"eval_rewards/chosen": 0.6802141070365906, |
|
"eval_rewards/margins": 0.296122670173645, |
|
"eval_rewards/rejected": 0.3840915858745575, |
|
"eval_runtime": 113.5393, |
|
"eval_samples_per_second": 17.615, |
|
"eval_steps_per_second": 0.555, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.3832752613240418, |
|
"grad_norm": 321.32489311642775, |
|
"learning_rate": 1.7984496124031007e-07, |
|
"logits/chosen": -2.497744560241699, |
|
"logits/rejected": -2.4591681957244873, |
|
"logps/chosen": -72.22078704833984, |
|
"logps/rejected": -62.5744514465332, |
|
"loss": 0.6334, |
|
"rewards/accuracies": 0.3062500059604645, |
|
"rewards/chosen": 0.474620521068573, |
|
"rewards/margins": 0.256552129983902, |
|
"rewards/rejected": 0.21806840598583221, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.4181184668989547, |
|
"grad_norm": 224.1076153559292, |
|
"learning_rate": 1.7596899224806202e-07, |
|
"logits/chosen": -2.520296573638916, |
|
"logits/rejected": -2.488671064376831, |
|
"logps/chosen": -76.48248291015625, |
|
"logps/rejected": -66.29643249511719, |
|
"loss": 0.619, |
|
"rewards/accuracies": 0.29374998807907104, |
|
"rewards/chosen": 0.48954588174819946, |
|
"rewards/margins": 0.30197009444236755, |
|
"rewards/rejected": 0.1875758022069931, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.4529616724738676, |
|
"grad_norm": 339.42643355240523, |
|
"learning_rate": 1.7209302325581394e-07, |
|
"logits/chosen": -2.5603480339050293, |
|
"logits/rejected": -2.5410096645355225, |
|
"logps/chosen": -83.0821533203125, |
|
"logps/rejected": -87.61650085449219, |
|
"loss": 0.6341, |
|
"rewards/accuracies": 0.3812499940395355, |
|
"rewards/chosen": 0.3993155062198639, |
|
"rewards/margins": 0.4533071517944336, |
|
"rewards/rejected": -0.053991664201021194, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.4878048780487805, |
|
"grad_norm": 307.50496227541765, |
|
"learning_rate": 1.6821705426356588e-07, |
|
"logits/chosen": -2.459794282913208, |
|
"logits/rejected": -2.447605609893799, |
|
"logps/chosen": -80.36363220214844, |
|
"logps/rejected": -70.87696838378906, |
|
"loss": 0.616, |
|
"rewards/accuracies": 0.35624998807907104, |
|
"rewards/chosen": 0.3909004330635071, |
|
"rewards/margins": 0.4472619891166687, |
|
"rewards/rejected": -0.05636156350374222, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.5226480836236934, |
|
"grad_norm": 305.3753178775468, |
|
"learning_rate": 1.6434108527131783e-07, |
|
"logits/chosen": -2.520991086959839, |
|
"logits/rejected": -2.47597074508667, |
|
"logps/chosen": -78.84916687011719, |
|
"logps/rejected": -79.61804962158203, |
|
"loss": 0.6534, |
|
"rewards/accuracies": 0.29374998807907104, |
|
"rewards/chosen": 0.31758052110671997, |
|
"rewards/margins": 0.35121217370033264, |
|
"rewards/rejected": -0.03363170847296715, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.5574912891986062, |
|
"grad_norm": 267.54053358304463, |
|
"learning_rate": 1.6046511627906975e-07, |
|
"logits/chosen": -2.4771218299865723, |
|
"logits/rejected": -2.498429298400879, |
|
"logps/chosen": -63.93762969970703, |
|
"logps/rejected": -71.9593734741211, |
|
"loss": 0.6535, |
|
"rewards/accuracies": 0.28125, |
|
"rewards/chosen": 0.29360949993133545, |
|
"rewards/margins": 0.2868410050868988, |
|
"rewards/rejected": 0.0067685008980333805, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.5923344947735192, |
|
"grad_norm": 274.1930461210022, |
|
"learning_rate": 1.565891472868217e-07, |
|
"logits/chosen": -2.4762015342712402, |
|
"logits/rejected": -2.461243152618408, |
|
"logps/chosen": -68.60774230957031, |
|
"logps/rejected": -76.81765747070312, |
|
"loss": 0.6071, |
|
"rewards/accuracies": 0.3375000059604645, |
|
"rewards/chosen": 0.17982278764247894, |
|
"rewards/margins": 0.43163204193115234, |
|
"rewards/rejected": -0.251809298992157, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.627177700348432, |
|
"grad_norm": 274.26901021006046, |
|
"learning_rate": 1.5271317829457364e-07, |
|
"logits/chosen": -2.5262434482574463, |
|
"logits/rejected": -2.5153231620788574, |
|
"logps/chosen": -91.26216125488281, |
|
"logps/rejected": -86.61434936523438, |
|
"loss": 0.6537, |
|
"rewards/accuracies": 0.32499998807907104, |
|
"rewards/chosen": 0.29750674962997437, |
|
"rewards/margins": 0.3494582176208496, |
|
"rewards/rejected": -0.05195152759552002, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.662020905923345, |
|
"grad_norm": 207.64618032691078, |
|
"learning_rate": 1.4883720930232558e-07, |
|
"logits/chosen": -2.523658037185669, |
|
"logits/rejected": -2.513747453689575, |
|
"logps/chosen": -70.37347412109375, |
|
"logps/rejected": -81.337646484375, |
|
"loss": 0.6272, |
|
"rewards/accuracies": 0.3125, |
|
"rewards/chosen": 0.33900195360183716, |
|
"rewards/margins": 0.24373552203178406, |
|
"rewards/rejected": 0.0952664315700531, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.6968641114982579, |
|
"grad_norm": 364.4684926175276, |
|
"learning_rate": 1.449612403100775e-07, |
|
"logits/chosen": -2.5569560527801514, |
|
"logits/rejected": -2.564101219177246, |
|
"logps/chosen": -89.63420104980469, |
|
"logps/rejected": -91.90037536621094, |
|
"loss": 0.6601, |
|
"rewards/accuracies": 0.3375000059604645, |
|
"rewards/chosen": 0.5110756754875183, |
|
"rewards/margins": 0.4359167516231537, |
|
"rewards/rejected": 0.07515887171030045, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.6968641114982579, |
|
"eval_logits/chosen": -2.5332281589508057, |
|
"eval_logits/rejected": -2.516913414001465, |
|
"eval_logps/chosen": -73.89338684082031, |
|
"eval_logps/rejected": -81.30782318115234, |
|
"eval_loss": 0.6409674882888794, |
|
"eval_rewards/accuracies": 0.3452380895614624, |
|
"eval_rewards/chosen": 0.29892128705978394, |
|
"eval_rewards/margins": 0.38865822553634644, |
|
"eval_rewards/rejected": -0.08973691612482071, |
|
"eval_runtime": 113.5083, |
|
"eval_samples_per_second": 17.62, |
|
"eval_steps_per_second": 0.555, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.7317073170731707, |
|
"grad_norm": 418.8344130807906, |
|
"learning_rate": 1.4108527131782945e-07, |
|
"logits/chosen": -2.5373802185058594, |
|
"logits/rejected": -2.510119915008545, |
|
"logps/chosen": -68.75162506103516, |
|
"logps/rejected": -63.84955978393555, |
|
"loss": 0.6439, |
|
"rewards/accuracies": 0.39375001192092896, |
|
"rewards/chosen": 0.2837230861186981, |
|
"rewards/margins": 0.5062949657440186, |
|
"rewards/rejected": -0.22257189452648163, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.7665505226480837, |
|
"grad_norm": 227.78900163940662, |
|
"learning_rate": 1.372093023255814e-07, |
|
"logits/chosen": -2.5648019313812256, |
|
"logits/rejected": -2.5423765182495117, |
|
"logps/chosen": -72.84169006347656, |
|
"logps/rejected": -71.51380920410156, |
|
"loss": 0.644, |
|
"rewards/accuracies": 0.26875001192092896, |
|
"rewards/chosen": 0.30824023485183716, |
|
"rewards/margins": 0.23355619609355927, |
|
"rewards/rejected": 0.0746840387582779, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.8013937282229965, |
|
"grad_norm": 280.8923204773476, |
|
"learning_rate": 1.333333333333333e-07, |
|
"logits/chosen": -2.56278920173645, |
|
"logits/rejected": -2.5390899181365967, |
|
"logps/chosen": -89.15130615234375, |
|
"logps/rejected": -88.63081359863281, |
|
"loss": 0.6323, |
|
"rewards/accuracies": 0.40625, |
|
"rewards/chosen": 0.4246680736541748, |
|
"rewards/margins": 0.6656498908996582, |
|
"rewards/rejected": -0.24098177254199982, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.8362369337979094, |
|
"grad_norm": 250.23928935872348, |
|
"learning_rate": 1.2945736434108528e-07, |
|
"logits/chosen": -2.5548999309539795, |
|
"logits/rejected": -2.5180442333221436, |
|
"logps/chosen": -86.33263397216797, |
|
"logps/rejected": -80.53160095214844, |
|
"loss": 0.6558, |
|
"rewards/accuracies": 0.36250001192092896, |
|
"rewards/chosen": 0.20448681712150574, |
|
"rewards/margins": 0.4022372364997864, |
|
"rewards/rejected": -0.19775035977363586, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.8710801393728222, |
|
"grad_norm": 281.88116685137084, |
|
"learning_rate": 1.255813953488372e-07, |
|
"logits/chosen": -2.5691909790039062, |
|
"logits/rejected": -2.53218412399292, |
|
"logps/chosen": -95.07816314697266, |
|
"logps/rejected": -90.45535278320312, |
|
"loss": 0.5834, |
|
"rewards/accuracies": 0.40625, |
|
"rewards/chosen": 0.1855524331331253, |
|
"rewards/margins": 0.44013509154319763, |
|
"rewards/rejected": -0.25458264350891113, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.9059233449477352, |
|
"grad_norm": 217.05487038029386, |
|
"learning_rate": 1.2170542635658915e-07, |
|
"logits/chosen": -2.4733526706695557, |
|
"logits/rejected": -2.4898457527160645, |
|
"logps/chosen": -58.55949783325195, |
|
"logps/rejected": -65.79776763916016, |
|
"loss": 0.6288, |
|
"rewards/accuracies": 0.3062500059604645, |
|
"rewards/chosen": 0.24493035674095154, |
|
"rewards/margins": 0.329669326543808, |
|
"rewards/rejected": -0.08473895490169525, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.9407665505226481, |
|
"grad_norm": 271.80312819553575, |
|
"learning_rate": 1.1782945736434109e-07, |
|
"logits/chosen": -2.5729527473449707, |
|
"logits/rejected": -2.5736825466156006, |
|
"logps/chosen": -68.29542541503906, |
|
"logps/rejected": -83.07144927978516, |
|
"loss": 0.6039, |
|
"rewards/accuracies": 0.34375, |
|
"rewards/chosen": 0.2923750579357147, |
|
"rewards/margins": 0.562214732170105, |
|
"rewards/rejected": -0.26983970403671265, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.975609756097561, |
|
"grad_norm": 238.59512203733584, |
|
"learning_rate": 1.1395348837209302e-07, |
|
"logits/chosen": -2.4834656715393066, |
|
"logits/rejected": -2.458590030670166, |
|
"logps/chosen": -67.38432312011719, |
|
"logps/rejected": -71.17386627197266, |
|
"loss": 0.6017, |
|
"rewards/accuracies": 0.32499998807907104, |
|
"rewards/chosen": 0.22502359747886658, |
|
"rewards/margins": 0.39475032687187195, |
|
"rewards/rejected": -0.16972672939300537, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.0104529616724738, |
|
"grad_norm": 76.34654611961486, |
|
"learning_rate": 1.1007751937984495e-07, |
|
"logits/chosen": -2.4974358081817627, |
|
"logits/rejected": -2.4653944969177246, |
|
"logps/chosen": -70.81477355957031, |
|
"logps/rejected": -65.4640121459961, |
|
"loss": 0.5461, |
|
"rewards/accuracies": 0.3812499940395355, |
|
"rewards/chosen": 0.6264529228210449, |
|
"rewards/margins": 0.9015191197395325, |
|
"rewards/rejected": -0.27506622672080994, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.0452961672473868, |
|
"grad_norm": 39.056755757958314, |
|
"learning_rate": 1.062015503875969e-07, |
|
"logits/chosen": -2.51816987991333, |
|
"logits/rejected": -2.5042262077331543, |
|
"logps/chosen": -64.20988464355469, |
|
"logps/rejected": -70.89491271972656, |
|
"loss": 0.4195, |
|
"rewards/accuracies": 0.4124999940395355, |
|
"rewards/chosen": 1.1695148944854736, |
|
"rewards/margins": 2.031907320022583, |
|
"rewards/rejected": -0.862392246723175, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.0452961672473868, |
|
"eval_logits/chosen": -2.5354163646698, |
|
"eval_logits/rejected": -2.519293785095215, |
|
"eval_logps/chosen": -73.24293518066406, |
|
"eval_logps/rejected": -80.80972290039062, |
|
"eval_loss": 0.6371246576309204, |
|
"eval_rewards/accuracies": 0.3531745970249176, |
|
"eval_rewards/chosen": 0.6241527199745178, |
|
"eval_rewards/margins": 0.4648365378379822, |
|
"eval_rewards/rejected": 0.15931615233421326, |
|
"eval_runtime": 113.4957, |
|
"eval_samples_per_second": 17.622, |
|
"eval_steps_per_second": 0.555, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.0801393728222997, |
|
"grad_norm": 33.225277343615126, |
|
"learning_rate": 1.0232558139534883e-07, |
|
"logits/chosen": -2.4929442405700684, |
|
"logits/rejected": -2.498056650161743, |
|
"logps/chosen": -65.01827239990234, |
|
"logps/rejected": -77.01332092285156, |
|
"loss": 0.4196, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": 1.309188723564148, |
|
"rewards/margins": 2.192389965057373, |
|
"rewards/rejected": -0.8832012414932251, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.1149825783972125, |
|
"grad_norm": 73.59283724981043, |
|
"learning_rate": 9.844961240310076e-08, |
|
"logits/chosen": -2.544821262359619, |
|
"logits/rejected": -2.5316672325134277, |
|
"logps/chosen": -71.5367431640625, |
|
"logps/rejected": -79.99516296386719, |
|
"loss": 0.4155, |
|
"rewards/accuracies": 0.4749999940395355, |
|
"rewards/chosen": 1.2115765810012817, |
|
"rewards/margins": 2.1014885902404785, |
|
"rewards/rejected": -0.8899120092391968, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.1498257839721253, |
|
"grad_norm": 61.862973963070324, |
|
"learning_rate": 9.457364341085271e-08, |
|
"logits/chosen": -2.5284900665283203, |
|
"logits/rejected": -2.502081871032715, |
|
"logps/chosen": -81.13185119628906, |
|
"logps/rejected": -80.43305969238281, |
|
"loss": 0.3845, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": 1.4948151111602783, |
|
"rewards/margins": 2.3979744911193848, |
|
"rewards/rejected": -0.9031593203544617, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.1846689895470384, |
|
"grad_norm": 38.944715898756485, |
|
"learning_rate": 9.069767441860464e-08, |
|
"logits/chosen": -2.5041089057922363, |
|
"logits/rejected": -2.510469436645508, |
|
"logps/chosen": -80.71260833740234, |
|
"logps/rejected": -97.7466049194336, |
|
"loss": 0.4026, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 1.5175840854644775, |
|
"rewards/margins": 2.749465227127075, |
|
"rewards/rejected": -1.2318809032440186, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.2195121951219512, |
|
"grad_norm": 64.94626084684288, |
|
"learning_rate": 8.682170542635659e-08, |
|
"logits/chosen": -2.5372118949890137, |
|
"logits/rejected": -2.503645896911621, |
|
"logps/chosen": -66.34137725830078, |
|
"logps/rejected": -67.35131072998047, |
|
"loss": 0.4149, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": 1.1278231143951416, |
|
"rewards/margins": 1.9639180898666382, |
|
"rewards/rejected": -0.8360951542854309, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.254355400696864, |
|
"grad_norm": 72.87191418772136, |
|
"learning_rate": 8.294573643410853e-08, |
|
"logits/chosen": -2.529754877090454, |
|
"logits/rejected": -2.4995744228363037, |
|
"logps/chosen": -69.45609283447266, |
|
"logps/rejected": -67.36495971679688, |
|
"loss": 0.3984, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": 1.1970014572143555, |
|
"rewards/margins": 2.0473990440368652, |
|
"rewards/rejected": -0.8503974080085754, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.289198606271777, |
|
"grad_norm": 78.06787405031129, |
|
"learning_rate": 7.906976744186046e-08, |
|
"logits/chosen": -2.498607635498047, |
|
"logits/rejected": -2.5195744037628174, |
|
"logps/chosen": -68.30110168457031, |
|
"logps/rejected": -75.86735534667969, |
|
"loss": 0.418, |
|
"rewards/accuracies": 0.45625001192092896, |
|
"rewards/chosen": 1.382476568222046, |
|
"rewards/margins": 2.1491870880126953, |
|
"rewards/rejected": -0.7667104005813599, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.32404181184669, |
|
"grad_norm": 35.524129566350084, |
|
"learning_rate": 7.51937984496124e-08, |
|
"logits/chosen": -2.507997989654541, |
|
"logits/rejected": -2.5067758560180664, |
|
"logps/chosen": -82.98981475830078, |
|
"logps/rejected": -88.70475769042969, |
|
"loss": 0.3909, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 1.942350149154663, |
|
"rewards/margins": 3.0615932941436768, |
|
"rewards/rejected": -1.1192431449890137, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.3588850174216027, |
|
"grad_norm": 71.79408635042864, |
|
"learning_rate": 7.131782945736434e-08, |
|
"logits/chosen": -2.5821242332458496, |
|
"logits/rejected": -2.5696043968200684, |
|
"logps/chosen": -64.58931732177734, |
|
"logps/rejected": -73.18416595458984, |
|
"loss": 0.3949, |
|
"rewards/accuracies": 0.4312500059604645, |
|
"rewards/chosen": 1.203021764755249, |
|
"rewards/margins": 2.1647653579711914, |
|
"rewards/rejected": -0.9617435336112976, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.3937282229965158, |
|
"grad_norm": 40.565767402769495, |
|
"learning_rate": 6.744186046511627e-08, |
|
"logits/chosen": -2.553471088409424, |
|
"logits/rejected": -2.5233709812164307, |
|
"logps/chosen": -83.41809844970703, |
|
"logps/rejected": -100.60682678222656, |
|
"loss": 0.3956, |
|
"rewards/accuracies": 0.518750011920929, |
|
"rewards/chosen": 1.3644764423370361, |
|
"rewards/margins": 2.466003179550171, |
|
"rewards/rejected": -1.1015267372131348, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.3937282229965158, |
|
"eval_logits/chosen": -2.5541367530822754, |
|
"eval_logits/rejected": -2.5378055572509766, |
|
"eval_logps/chosen": -73.62639617919922, |
|
"eval_logps/rejected": -81.42265319824219, |
|
"eval_loss": 0.645966649055481, |
|
"eval_rewards/accuracies": 0.363095223903656, |
|
"eval_rewards/chosen": 0.43241986632347107, |
|
"eval_rewards/margins": 0.5795699954032898, |
|
"eval_rewards/rejected": -0.14715011417865753, |
|
"eval_runtime": 113.5638, |
|
"eval_samples_per_second": 17.611, |
|
"eval_steps_per_second": 0.555, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 26.444115538892063, |
|
"learning_rate": 6.356589147286822e-08, |
|
"logits/chosen": -2.534757614135742, |
|
"logits/rejected": -2.5250518321990967, |
|
"logps/chosen": -79.89534759521484, |
|
"logps/rejected": -81.98127746582031, |
|
"loss": 0.3911, |
|
"rewards/accuracies": 0.4749999940395355, |
|
"rewards/chosen": 1.35897958278656, |
|
"rewards/margins": 2.1614632606506348, |
|
"rewards/rejected": -0.8024836778640747, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.4634146341463414, |
|
"grad_norm": 50.792504489879086, |
|
"learning_rate": 5.968992248062015e-08, |
|
"logits/chosen": -2.5849547386169434, |
|
"logits/rejected": -2.5869174003601074, |
|
"logps/chosen": -73.73815155029297, |
|
"logps/rejected": -87.13761901855469, |
|
"loss": 0.4085, |
|
"rewards/accuracies": 0.4312500059604645, |
|
"rewards/chosen": 1.1775071620941162, |
|
"rewards/margins": 2.3105506896972656, |
|
"rewards/rejected": -1.1330437660217285, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.4982578397212545, |
|
"grad_norm": 62.104061513696834, |
|
"learning_rate": 5.5813953488372087e-08, |
|
"logits/chosen": -2.5392727851867676, |
|
"logits/rejected": -2.528538465499878, |
|
"logps/chosen": -66.37205505371094, |
|
"logps/rejected": -74.73827362060547, |
|
"loss": 0.4034, |
|
"rewards/accuracies": 0.46875, |
|
"rewards/chosen": 1.1451423168182373, |
|
"rewards/margins": 2.2697441577911377, |
|
"rewards/rejected": -1.12460196018219, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.533101045296167, |
|
"grad_norm": 99.60660196952468, |
|
"learning_rate": 5.193798449612403e-08, |
|
"logits/chosen": -2.525665283203125, |
|
"logits/rejected": -2.5361111164093018, |
|
"logps/chosen": -67.78907775878906, |
|
"logps/rejected": -75.18824005126953, |
|
"loss": 0.4097, |
|
"rewards/accuracies": 0.4312500059604645, |
|
"rewards/chosen": 1.094794511795044, |
|
"rewards/margins": 1.9390010833740234, |
|
"rewards/rejected": -0.8442065119743347, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.5679442508710801, |
|
"grad_norm": 52.21837049460921, |
|
"learning_rate": 4.806201550387597e-08, |
|
"logits/chosen": -2.573503255844116, |
|
"logits/rejected": -2.538872718811035, |
|
"logps/chosen": -88.26780700683594, |
|
"logps/rejected": -89.10135650634766, |
|
"loss": 0.4059, |
|
"rewards/accuracies": 0.518750011920929, |
|
"rewards/chosen": 1.2870919704437256, |
|
"rewards/margins": 2.856309413909912, |
|
"rewards/rejected": -1.5692174434661865, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.6027874564459932, |
|
"grad_norm": 32.28937168577243, |
|
"learning_rate": 4.418604651162791e-08, |
|
"logits/chosen": -2.5357863903045654, |
|
"logits/rejected": -2.5155162811279297, |
|
"logps/chosen": -73.77734375, |
|
"logps/rejected": -79.86143493652344, |
|
"loss": 0.42, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 1.2010835409164429, |
|
"rewards/margins": 2.6294467449188232, |
|
"rewards/rejected": -1.4283630847930908, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.6376306620209058, |
|
"grad_norm": 38.87268593820624, |
|
"learning_rate": 4.031007751937984e-08, |
|
"logits/chosen": -2.5288286209106445, |
|
"logits/rejected": -2.5216517448425293, |
|
"logps/chosen": -59.2172966003418, |
|
"logps/rejected": -73.46331024169922, |
|
"loss": 0.422, |
|
"rewards/accuracies": 0.39375001192092896, |
|
"rewards/chosen": 1.042550802230835, |
|
"rewards/margins": 1.938773512840271, |
|
"rewards/rejected": -0.8962229490280151, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.6724738675958188, |
|
"grad_norm": 86.29055693743794, |
|
"learning_rate": 3.643410852713178e-08, |
|
"logits/chosen": -2.5448553562164307, |
|
"logits/rejected": -2.5282416343688965, |
|
"logps/chosen": -52.4884147644043, |
|
"logps/rejected": -53.635406494140625, |
|
"loss": 0.4111, |
|
"rewards/accuracies": 0.3499999940395355, |
|
"rewards/chosen": 0.7865492105484009, |
|
"rewards/margins": 1.6136754751205444, |
|
"rewards/rejected": -0.8271263837814331, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.7073170731707317, |
|
"grad_norm": 101.66995149882506, |
|
"learning_rate": 3.2558139534883724e-08, |
|
"logits/chosen": -2.5356173515319824, |
|
"logits/rejected": -2.5221338272094727, |
|
"logps/chosen": -71.38683319091797, |
|
"logps/rejected": -72.329345703125, |
|
"loss": 0.4539, |
|
"rewards/accuracies": 0.39375001192092896, |
|
"rewards/chosen": 0.9174805879592896, |
|
"rewards/margins": 2.05107045173645, |
|
"rewards/rejected": -1.1335899829864502, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.7421602787456445, |
|
"grad_norm": 62.46391948863256, |
|
"learning_rate": 2.868217054263566e-08, |
|
"logits/chosen": -2.4681618213653564, |
|
"logits/rejected": -2.4625699520111084, |
|
"logps/chosen": -73.69934844970703, |
|
"logps/rejected": -86.02604675292969, |
|
"loss": 0.3945, |
|
"rewards/accuracies": 0.45625001192092896, |
|
"rewards/chosen": 1.2942867279052734, |
|
"rewards/margins": 2.4456610679626465, |
|
"rewards/rejected": -1.1513745784759521, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.7421602787456445, |
|
"eval_logits/chosen": -2.5708680152893066, |
|
"eval_logits/rejected": -2.5542969703674316, |
|
"eval_logps/chosen": -73.87686920166016, |
|
"eval_logps/rejected": -81.72207641601562, |
|
"eval_loss": 0.6465447545051575, |
|
"eval_rewards/accuracies": 0.3710317313671112, |
|
"eval_rewards/chosen": 0.3071807324886322, |
|
"eval_rewards/margins": 0.6040430665016174, |
|
"eval_rewards/rejected": -0.29686233401298523, |
|
"eval_runtime": 114.1539, |
|
"eval_samples_per_second": 17.52, |
|
"eval_steps_per_second": 0.552, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.7770034843205575, |
|
"grad_norm": 45.13747528668081, |
|
"learning_rate": 2.4806201550387595e-08, |
|
"logits/chosen": -2.5347788333892822, |
|
"logits/rejected": -2.5145461559295654, |
|
"logps/chosen": -68.51091003417969, |
|
"logps/rejected": -71.65690612792969, |
|
"loss": 0.3946, |
|
"rewards/accuracies": 0.46875, |
|
"rewards/chosen": 1.1227554082870483, |
|
"rewards/margins": 2.1591665744781494, |
|
"rewards/rejected": -1.036410927772522, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.8118466898954704, |
|
"grad_norm": 69.84605729348092, |
|
"learning_rate": 2.0930232558139537e-08, |
|
"logits/chosen": -2.549290418624878, |
|
"logits/rejected": -2.5436315536499023, |
|
"logps/chosen": -72.00182342529297, |
|
"logps/rejected": -79.60043334960938, |
|
"loss": 0.4047, |
|
"rewards/accuracies": 0.46875, |
|
"rewards/chosen": 1.1346954107284546, |
|
"rewards/margins": 2.3285088539123535, |
|
"rewards/rejected": -1.1938132047653198, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.8466898954703832, |
|
"grad_norm": 47.800583904519996, |
|
"learning_rate": 1.7054263565891472e-08, |
|
"logits/chosen": -2.526092767715454, |
|
"logits/rejected": -2.5203917026519775, |
|
"logps/chosen": -68.9186782836914, |
|
"logps/rejected": -73.74215698242188, |
|
"loss": 0.4153, |
|
"rewards/accuracies": 0.44999998807907104, |
|
"rewards/chosen": 1.208732008934021, |
|
"rewards/margins": 2.3980166912078857, |
|
"rewards/rejected": -1.1892845630645752, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.8815331010452963, |
|
"grad_norm": 503.5623137913587, |
|
"learning_rate": 1.3178294573643412e-08, |
|
"logits/chosen": -2.5318820476531982, |
|
"logits/rejected": -2.5522282123565674, |
|
"logps/chosen": -61.30474090576172, |
|
"logps/rejected": -75.95423889160156, |
|
"loss": 0.426, |
|
"rewards/accuracies": 0.44999998807907104, |
|
"rewards/chosen": 1.213020920753479, |
|
"rewards/margins": 2.5194857120513916, |
|
"rewards/rejected": -1.3064649105072021, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.916376306620209, |
|
"grad_norm": 39.90012660671112, |
|
"learning_rate": 9.302325581395349e-09, |
|
"logits/chosen": -2.523984909057617, |
|
"logits/rejected": -2.5038905143737793, |
|
"logps/chosen": -87.86249542236328, |
|
"logps/rejected": -89.35391235351562, |
|
"loss": 0.4004, |
|
"rewards/accuracies": 0.4937500059604645, |
|
"rewards/chosen": 1.4861438274383545, |
|
"rewards/margins": 2.720036506652832, |
|
"rewards/rejected": -1.233892560005188, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.951219512195122, |
|
"grad_norm": 18.05863362962586, |
|
"learning_rate": 5.4263565891472866e-09, |
|
"logits/chosen": -2.5144786834716797, |
|
"logits/rejected": -2.5275046825408936, |
|
"logps/chosen": -61.50837326049805, |
|
"logps/rejected": -72.43628692626953, |
|
"loss": 0.424, |
|
"rewards/accuracies": 0.44999998807907104, |
|
"rewards/chosen": 1.105821132659912, |
|
"rewards/margins": 2.175888776779175, |
|
"rewards/rejected": -1.0700676441192627, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.986062717770035, |
|
"grad_norm": 49.2882801078698, |
|
"learning_rate": 1.5503875968992247e-09, |
|
"logits/chosen": -2.589893102645874, |
|
"logits/rejected": -2.5597004890441895, |
|
"logps/chosen": -63.0964241027832, |
|
"logps/rejected": -64.02296447753906, |
|
"loss": 0.4131, |
|
"rewards/accuracies": 0.4312500059604645, |
|
"rewards/chosen": 1.058401346206665, |
|
"rewards/margins": 1.7885780334472656, |
|
"rewards/rejected": -0.7301766872406006, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"step": 574, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5264799162901237, |
|
"train_runtime": 6499.6728, |
|
"train_samples_per_second": 5.643, |
|
"train_steps_per_second": 0.088 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 574, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|