|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 100, |
|
"global_step": 288, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.010416666666666666, |
|
"grad_norm": 7.676418798413947, |
|
"learning_rate": 3.375140204349925e-08, |
|
"logits/chosen": -2.590585231781006, |
|
"logits/rejected": -2.5664222240448, |
|
"logps/chosen": -80.29847717285156, |
|
"logps/rejected": -53.10200881958008, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.10416666666666667, |
|
"grad_norm": 6.867859377504262, |
|
"learning_rate": 3.375140204349925e-07, |
|
"logits/chosen": -2.5563881397247314, |
|
"logits/rejected": -2.5385148525238037, |
|
"logps/chosen": -87.91146850585938, |
|
"logps/rejected": -81.01969909667969, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.2291666716337204, |
|
"rewards/chosen": 4.3005715269828215e-05, |
|
"rewards/margins": -0.0001762014871928841, |
|
"rewards/rejected": 0.0002192071988247335, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.20833333333333334, |
|
"grad_norm": 6.140252183490494, |
|
"learning_rate": 6.75028040869985e-07, |
|
"logits/chosen": -2.6080267429351807, |
|
"logits/rejected": -2.559004783630371, |
|
"logps/chosen": -102.70601654052734, |
|
"logps/rejected": -89.34085083007812, |
|
"loss": 0.6915, |
|
"rewards/accuracies": 0.3187499940395355, |
|
"rewards/chosen": 0.007517552934587002, |
|
"rewards/margins": 0.0011824130779132247, |
|
"rewards/rejected": 0.006335140205919743, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.3125, |
|
"grad_norm": 8.51916101581021, |
|
"learning_rate": 9.753976451330556e-07, |
|
"logits/chosen": -2.479109287261963, |
|
"logits/rejected": -2.495014190673828, |
|
"logps/chosen": -65.36149597167969, |
|
"logps/rejected": -74.12493896484375, |
|
"loss": 0.6863, |
|
"rewards/accuracies": 0.32499998807907104, |
|
"rewards/chosen": 0.024195391684770584, |
|
"rewards/margins": 0.014859529212117195, |
|
"rewards/rejected": 0.009335865266621113, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.4166666666666667, |
|
"grad_norm": 7.712483677212969, |
|
"learning_rate": 9.414675038488285e-07, |
|
"logits/chosen": -2.49943470954895, |
|
"logits/rejected": -2.4853296279907227, |
|
"logps/chosen": -69.53825378417969, |
|
"logps/rejected": -71.04619598388672, |
|
"loss": 0.6772, |
|
"rewards/accuracies": 0.2874999940395355, |
|
"rewards/chosen": 0.030402392148971558, |
|
"rewards/margins": 0.03428281098604202, |
|
"rewards/rejected": -0.0038804211653769016, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5208333333333334, |
|
"grad_norm": 8.998261203801041, |
|
"learning_rate": 9.075373625646016e-07, |
|
"logits/chosen": -2.406968593597412, |
|
"logits/rejected": -2.4230380058288574, |
|
"logps/chosen": -54.66428756713867, |
|
"logps/rejected": -64.8364486694336, |
|
"loss": 0.6712, |
|
"rewards/accuracies": 0.20624999701976776, |
|
"rewards/chosen": -0.04386414587497711, |
|
"rewards/margins": 0.02522563934326172, |
|
"rewards/rejected": -0.06908978521823883, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.625, |
|
"grad_norm": 8.947771345110965, |
|
"learning_rate": 8.736072212803746e-07, |
|
"logits/chosen": -2.486833095550537, |
|
"logits/rejected": -2.46416974067688, |
|
"logps/chosen": -103.86640930175781, |
|
"logps/rejected": -110.64974212646484, |
|
"loss": 0.6728, |
|
"rewards/accuracies": 0.3187499940395355, |
|
"rewards/chosen": -0.2890399098396301, |
|
"rewards/margins": 0.07593990862369537, |
|
"rewards/rejected": -0.3649798035621643, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.7291666666666666, |
|
"grad_norm": 9.915857724019418, |
|
"learning_rate": 8.396770799961476e-07, |
|
"logits/chosen": -2.4623074531555176, |
|
"logits/rejected": -2.4422433376312256, |
|
"logps/chosen": -121.77342224121094, |
|
"logps/rejected": -119.22220611572266, |
|
"loss": 0.6581, |
|
"rewards/accuracies": 0.33125001192092896, |
|
"rewards/chosen": -0.2963990569114685, |
|
"rewards/margins": 0.09725265949964523, |
|
"rewards/rejected": -0.39365169405937195, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.8333333333333334, |
|
"grad_norm": 25.03318196876874, |
|
"learning_rate": 8.057469387119207e-07, |
|
"logits/chosen": -2.3540024757385254, |
|
"logits/rejected": -2.2817678451538086, |
|
"logps/chosen": -105.01051330566406, |
|
"logps/rejected": -110.41041564941406, |
|
"loss": 0.6459, |
|
"rewards/accuracies": 0.39375001192092896, |
|
"rewards/chosen": -0.22572524845600128, |
|
"rewards/margins": 0.13594061136245728, |
|
"rewards/rejected": -0.36166587471961975, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.9375, |
|
"grad_norm": 16.29058709199533, |
|
"learning_rate": 7.718167974276937e-07, |
|
"logits/chosen": -2.132964611053467, |
|
"logits/rejected": -2.117180824279785, |
|
"logps/chosen": -66.82723236083984, |
|
"logps/rejected": -93.7275161743164, |
|
"loss": 0.6446, |
|
"rewards/accuracies": 0.26875001192092896, |
|
"rewards/chosen": -0.14975103735923767, |
|
"rewards/margins": 0.202955961227417, |
|
"rewards/rejected": -0.3527069687843323, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.0416666666666667, |
|
"grad_norm": 16.284386286452357, |
|
"learning_rate": 7.378866561434668e-07, |
|
"logits/chosen": -2.041792392730713, |
|
"logits/rejected": -2.0138609409332275, |
|
"logps/chosen": -89.52456665039062, |
|
"logps/rejected": -122.956298828125, |
|
"loss": 0.5825, |
|
"rewards/accuracies": 0.40625, |
|
"rewards/chosen": -0.18652945756912231, |
|
"rewards/margins": 0.4071316123008728, |
|
"rewards/rejected": -0.5936610698699951, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.0416666666666667, |
|
"eval_logits/chosen": -2.0902979373931885, |
|
"eval_logits/rejected": -2.0643553733825684, |
|
"eval_logps/chosen": -96.38362121582031, |
|
"eval_logps/rejected": -123.11580657958984, |
|
"eval_loss": 0.6482308506965637, |
|
"eval_rewards/accuracies": 0.32341268658638, |
|
"eval_rewards/chosen": -0.25518056750297546, |
|
"eval_rewards/margins": 0.2342308610677719, |
|
"eval_rewards/rejected": -0.48941144347190857, |
|
"eval_runtime": 113.6473, |
|
"eval_samples_per_second": 17.598, |
|
"eval_steps_per_second": 0.554, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.1458333333333333, |
|
"grad_norm": 14.36714525578378, |
|
"learning_rate": 7.039565148592398e-07, |
|
"logits/chosen": -2.079672336578369, |
|
"logits/rejected": -2.1257057189941406, |
|
"logps/chosen": -75.16223907470703, |
|
"logps/rejected": -157.490234375, |
|
"loss": 0.4839, |
|
"rewards/accuracies": 0.44999998807907104, |
|
"rewards/chosen": -0.11415556818246841, |
|
"rewards/margins": 0.7717818021774292, |
|
"rewards/rejected": -0.8859373331069946, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 10.05291833999338, |
|
"learning_rate": 6.700263735750129e-07, |
|
"logits/chosen": -2.1770777702331543, |
|
"logits/rejected": -2.12736439704895, |
|
"logps/chosen": -124.73258972167969, |
|
"logps/rejected": -207.2230224609375, |
|
"loss": 0.4901, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": -0.1949707567691803, |
|
"rewards/margins": 1.1325080394744873, |
|
"rewards/rejected": -1.3274791240692139, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.3541666666666667, |
|
"grad_norm": 10.238396987504712, |
|
"learning_rate": 6.360962322907859e-07, |
|
"logits/chosen": -1.985735297203064, |
|
"logits/rejected": -1.9928312301635742, |
|
"logps/chosen": -108.13710021972656, |
|
"logps/rejected": -195.22592163085938, |
|
"loss": 0.4606, |
|
"rewards/accuracies": 0.4437499940395355, |
|
"rewards/chosen": -0.2514357268810272, |
|
"rewards/margins": 0.9828246831893921, |
|
"rewards/rejected": -1.2342605590820312, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.4583333333333333, |
|
"grad_norm": 13.992502570332631, |
|
"learning_rate": 6.02166091006559e-07, |
|
"logits/chosen": -1.9039230346679688, |
|
"logits/rejected": -1.9281995296478271, |
|
"logps/chosen": -81.67506408691406, |
|
"logps/rejected": -182.8755340576172, |
|
"loss": 0.4713, |
|
"rewards/accuracies": 0.40625, |
|
"rewards/chosen": -0.3775108754634857, |
|
"rewards/margins": 0.9950442314147949, |
|
"rewards/rejected": -1.372555136680603, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.5625, |
|
"grad_norm": 16.317117170378367, |
|
"learning_rate": 5.682359497223321e-07, |
|
"logits/chosen": -1.9301204681396484, |
|
"logits/rejected": -1.8779830932617188, |
|
"logps/chosen": -101.25444030761719, |
|
"logps/rejected": -199.9362030029297, |
|
"loss": 0.4567, |
|
"rewards/accuracies": 0.4437499940395355, |
|
"rewards/chosen": -0.3392702639102936, |
|
"rewards/margins": 1.133601427078247, |
|
"rewards/rejected": -1.4728715419769287, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.6666666666666665, |
|
"grad_norm": 13.942547798372193, |
|
"learning_rate": 5.34305808438105e-07, |
|
"logits/chosen": -1.8111422061920166, |
|
"logits/rejected": -1.8152790069580078, |
|
"logps/chosen": -125.3135757446289, |
|
"logps/rejected": -260.8206787109375, |
|
"loss": 0.4454, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": -0.5173971056938171, |
|
"rewards/margins": 1.497753381729126, |
|
"rewards/rejected": -2.015150547027588, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.7708333333333335, |
|
"grad_norm": 20.552273461683416, |
|
"learning_rate": 5.003756671538781e-07, |
|
"logits/chosen": -1.6733791828155518, |
|
"logits/rejected": -1.6446590423583984, |
|
"logps/chosen": -139.14492797851562, |
|
"logps/rejected": -261.77056884765625, |
|
"loss": 0.4492, |
|
"rewards/accuracies": 0.53125, |
|
"rewards/chosen": -0.7451165318489075, |
|
"rewards/margins": 1.3388173580169678, |
|
"rewards/rejected": -2.0839340686798096, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.875, |
|
"grad_norm": 16.343999863313055, |
|
"learning_rate": 4.664455258696512e-07, |
|
"logits/chosen": -1.697675108909607, |
|
"logits/rejected": -1.65886652469635, |
|
"logps/chosen": -117.02315521240234, |
|
"logps/rejected": -225.981201171875, |
|
"loss": 0.4335, |
|
"rewards/accuracies": 0.46875, |
|
"rewards/chosen": -0.52647864818573, |
|
"rewards/margins": 1.2495534420013428, |
|
"rewards/rejected": -1.7760320901870728, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.9791666666666665, |
|
"grad_norm": 17.543702138725678, |
|
"learning_rate": 4.325153845854242e-07, |
|
"logits/chosen": -1.708845853805542, |
|
"logits/rejected": -1.6851694583892822, |
|
"logps/chosen": -132.83035278320312, |
|
"logps/rejected": -266.851318359375, |
|
"loss": 0.4324, |
|
"rewards/accuracies": 0.5062500238418579, |
|
"rewards/chosen": -0.5171667337417603, |
|
"rewards/margins": 1.5369913578033447, |
|
"rewards/rejected": -2.0541584491729736, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.0833333333333335, |
|
"grad_norm": 4.109061869011508, |
|
"learning_rate": 3.985852433011972e-07, |
|
"logits/chosen": -1.4759489297866821, |
|
"logits/rejected": -1.4932861328125, |
|
"logps/chosen": -147.7212677001953, |
|
"logps/rejected": -366.11163330078125, |
|
"loss": 0.3884, |
|
"rewards/accuracies": 0.543749988079071, |
|
"rewards/chosen": -0.6844128370285034, |
|
"rewards/margins": 2.3995113372802734, |
|
"rewards/rejected": -3.0839240550994873, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.0833333333333335, |
|
"eval_logits/chosen": -1.449843168258667, |
|
"eval_logits/rejected": -1.4185280799865723, |
|
"eval_logps/chosen": -184.884765625, |
|
"eval_logps/rejected": -233.7708740234375, |
|
"eval_loss": 0.6749430298805237, |
|
"eval_rewards/accuracies": 0.3313491940498352, |
|
"eval_rewards/chosen": -1.2867618799209595, |
|
"eval_rewards/margins": 0.49245980381965637, |
|
"eval_rewards/rejected": -1.7792216539382935, |
|
"eval_runtime": 113.4025, |
|
"eval_samples_per_second": 17.636, |
|
"eval_steps_per_second": 0.556, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.1875, |
|
"grad_norm": 3.8226987442037843, |
|
"learning_rate": 3.646551020169702e-07, |
|
"logits/chosen": -1.3593485355377197, |
|
"logits/rejected": -1.32414972782135, |
|
"logps/chosen": -109.7019271850586, |
|
"logps/rejected": -280.071044921875, |
|
"loss": 0.4007, |
|
"rewards/accuracies": 0.42500001192092896, |
|
"rewards/chosen": -0.5374400019645691, |
|
"rewards/margins": 1.9264366626739502, |
|
"rewards/rejected": -2.463876485824585, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.2916666666666665, |
|
"grad_norm": 9.430823798801429, |
|
"learning_rate": 3.307249607327433e-07, |
|
"logits/chosen": -0.4775095582008362, |
|
"logits/rejected": -0.3128586709499359, |
|
"logps/chosen": -199.72067260742188, |
|
"logps/rejected": -454.793212890625, |
|
"loss": 0.3453, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": -1.1384669542312622, |
|
"rewards/margins": 3.0831480026245117, |
|
"rewards/rejected": -4.221614837646484, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.3958333333333335, |
|
"grad_norm": 9.78181725384326, |
|
"learning_rate": 2.9679481944851634e-07, |
|
"logits/chosen": 0.34354180097579956, |
|
"logits/rejected": 0.4988431930541992, |
|
"logps/chosen": -136.43673706054688, |
|
"logps/rejected": -317.75787353515625, |
|
"loss": 0.396, |
|
"rewards/accuracies": 0.4312500059604645, |
|
"rewards/chosen": -0.8929149508476257, |
|
"rewards/margins": 2.1588006019592285, |
|
"rewards/rejected": -3.05171537399292, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 7.751369494008921, |
|
"learning_rate": 2.628646781642894e-07, |
|
"logits/chosen": 0.8226224184036255, |
|
"logits/rejected": 1.1203088760375977, |
|
"logps/chosen": -195.84498596191406, |
|
"logps/rejected": -419.219482421875, |
|
"loss": 0.3849, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": -1.258272647857666, |
|
"rewards/margins": 2.7024192810058594, |
|
"rewards/rejected": -3.9606919288635254, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.6041666666666665, |
|
"grad_norm": 8.853348347253043, |
|
"learning_rate": 2.2893453688006246e-07, |
|
"logits/chosen": 1.439245581626892, |
|
"logits/rejected": 1.607928991317749, |
|
"logps/chosen": -183.87660217285156, |
|
"logps/rejected": -415.3749084472656, |
|
"loss": 0.3591, |
|
"rewards/accuracies": 0.48124998807907104, |
|
"rewards/chosen": -1.2962507009506226, |
|
"rewards/margins": 2.7243804931640625, |
|
"rewards/rejected": -4.020631313323975, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.7083333333333335, |
|
"grad_norm": 6.5857498778744805, |
|
"learning_rate": 1.9500439559583547e-07, |
|
"logits/chosen": 1.5265406370162964, |
|
"logits/rejected": 1.5034195184707642, |
|
"logps/chosen": -151.31393432617188, |
|
"logps/rejected": -378.006103515625, |
|
"loss": 0.3878, |
|
"rewards/accuracies": 0.45625001192092896, |
|
"rewards/chosen": -1.0861542224884033, |
|
"rewards/margins": 2.4694628715515137, |
|
"rewards/rejected": -3.555617094039917, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.8125, |
|
"grad_norm": 8.328330751046998, |
|
"learning_rate": 1.610742543116085e-07, |
|
"logits/chosen": 1.2100938558578491, |
|
"logits/rejected": 1.192025065422058, |
|
"logps/chosen": -182.81759643554688, |
|
"logps/rejected": -417.20623779296875, |
|
"loss": 0.365, |
|
"rewards/accuracies": 0.4937500059604645, |
|
"rewards/chosen": -1.2372219562530518, |
|
"rewards/margins": 2.7132503986358643, |
|
"rewards/rejected": -3.950472354888916, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.9166666666666665, |
|
"grad_norm": 9.271318342539349, |
|
"learning_rate": 1.2714411302738161e-07, |
|
"logits/chosen": 1.5337226390838623, |
|
"logits/rejected": 1.8394181728363037, |
|
"logps/chosen": -204.70779418945312, |
|
"logps/rejected": -450.3301696777344, |
|
"loss": 0.3737, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": -1.3911634683609009, |
|
"rewards/margins": 2.897785186767578, |
|
"rewards/rejected": -4.288949012756348, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 288, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5020060315728188, |
|
"train_runtime": 3747.4353, |
|
"train_samples_per_second": 4.894, |
|
"train_steps_per_second": 0.077 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 288, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|