phi3m0128-cds-0.1-kendall-onof-ofif-corr-max-2-simpo-max1500-default
/
checkpoint-350
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.5493427506376299, | |
"eval_steps": 50, | |
"global_step": 350, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.015695507161075144, | |
"grad_norm": 0.04355761408805847, | |
"learning_rate": 4.999451708687114e-06, | |
"logits/chosen": 14.845781326293945, | |
"logits/rejected": 14.576438903808594, | |
"logps/chosen": -0.31864267587661743, | |
"logps/rejected": -0.24545662105083466, | |
"loss": 1.0492, | |
"rewards/accuracies": 0.25, | |
"rewards/chosen": -0.47796401381492615, | |
"rewards/margins": -0.10977902263402939, | |
"rewards/rejected": -0.3681849539279938, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.03139101432215029, | |
"grad_norm": 0.04919258877635002, | |
"learning_rate": 4.997807075247147e-06, | |
"logits/chosen": 15.27595043182373, | |
"logits/rejected": 14.872761726379395, | |
"logps/chosen": -0.3344747722148895, | |
"logps/rejected": -0.24258682131767273, | |
"loss": 1.0487, | |
"rewards/accuracies": 0.16249999403953552, | |
"rewards/chosen": -0.5017121434211731, | |
"rewards/margins": -0.1378319263458252, | |
"rewards/rejected": -0.3638802468776703, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.047086521483225424, | |
"grad_norm": 0.049933061003685, | |
"learning_rate": 4.9950668210706795e-06, | |
"logits/chosen": 15.913165092468262, | |
"logits/rejected": 15.607622146606445, | |
"logps/chosen": -0.3440183997154236, | |
"logps/rejected": -0.2831566333770752, | |
"loss": 1.0405, | |
"rewards/accuracies": 0.1875, | |
"rewards/chosen": -0.516027569770813, | |
"rewards/margins": -0.09129264950752258, | |
"rewards/rejected": -0.4247349202632904, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.06278202864430057, | |
"grad_norm": 0.05693503096699715, | |
"learning_rate": 4.9912321481237616e-06, | |
"logits/chosen": 15.402900695800781, | |
"logits/rejected": 14.99272632598877, | |
"logps/chosen": -0.3297731578350067, | |
"logps/rejected": -0.2746916711330414, | |
"loss": 1.0369, | |
"rewards/accuracies": 0.21250000596046448, | |
"rewards/chosen": -0.49465981125831604, | |
"rewards/margins": -0.08262218534946442, | |
"rewards/rejected": -0.41203755140304565, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.07847753580537571, | |
"grad_norm": 0.05467928573489189, | |
"learning_rate": 4.986304738420684e-06, | |
"logits/chosen": 15.64543342590332, | |
"logits/rejected": 15.632547378540039, | |
"logps/chosen": -0.30952686071395874, | |
"logps/rejected": -0.24847058951854706, | |
"loss": 1.0367, | |
"rewards/accuracies": 0.21250000596046448, | |
"rewards/chosen": -0.46429023146629333, | |
"rewards/margins": -0.09158438444137573, | |
"rewards/rejected": -0.37270587682724, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.07847753580537571, | |
"eval_logits/chosen": 15.850138664245605, | |
"eval_logits/rejected": 15.368529319763184, | |
"eval_logps/chosen": -0.3222965598106384, | |
"eval_logps/rejected": -0.26877468824386597, | |
"eval_loss": 1.0326261520385742, | |
"eval_rewards/accuracies": 0.26923078298568726, | |
"eval_rewards/chosen": -0.4834447503089905, | |
"eval_rewards/margins": -0.08028276264667511, | |
"eval_rewards/rejected": -0.40316200256347656, | |
"eval_runtime": 14.5044, | |
"eval_samples_per_second": 28.405, | |
"eval_steps_per_second": 3.585, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.09417304296645085, | |
"grad_norm": 0.06174452602863312, | |
"learning_rate": 4.980286753286196e-06, | |
"logits/chosen": 15.443066596984863, | |
"logits/rejected": 15.192205429077148, | |
"logps/chosen": -0.31090402603149414, | |
"logps/rejected": -0.26281923055648804, | |
"loss": 1.04, | |
"rewards/accuracies": 0.25, | |
"rewards/chosen": -0.466356098651886, | |
"rewards/margins": -0.07212716341018677, | |
"rewards/rejected": -0.39422887563705444, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.109868550127526, | |
"grad_norm": 0.06952528655529022, | |
"learning_rate": 4.973180832407471e-06, | |
"logits/chosen": 16.024200439453125, | |
"logits/rejected": 15.82934284210205, | |
"logps/chosen": -0.348227322101593, | |
"logps/rejected": -0.26220566034317017, | |
"loss": 1.043, | |
"rewards/accuracies": 0.21250000596046448, | |
"rewards/chosen": -0.5223408937454224, | |
"rewards/margins": -0.1290324479341507, | |
"rewards/rejected": -0.39330852031707764, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.12556405728860115, | |
"grad_norm": 0.07572082430124283, | |
"learning_rate": 4.964990092676263e-06, | |
"logits/chosen": 15.884915351867676, | |
"logits/rejected": 15.603845596313477, | |
"logps/chosen": -0.34849274158477783, | |
"logps/rejected": -0.26585355401039124, | |
"loss": 1.0285, | |
"rewards/accuracies": 0.1875, | |
"rewards/chosen": -0.5227391719818115, | |
"rewards/margins": -0.12395882606506348, | |
"rewards/rejected": -0.39878037571907043, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.14125956444967627, | |
"grad_norm": 0.2423778474330902, | |
"learning_rate": 4.9557181268217225e-06, | |
"logits/chosen": 15.978216171264648, | |
"logits/rejected": 15.76471996307373, | |
"logps/chosen": -0.327436238527298, | |
"logps/rejected": -0.25457051396369934, | |
"loss": 1.03, | |
"rewards/accuracies": 0.22499999403953552, | |
"rewards/chosen": -0.49115434288978577, | |
"rewards/margins": -0.10929858684539795, | |
"rewards/rejected": -0.3818557560443878, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.15695507161075142, | |
"grad_norm": 0.1594536453485489, | |
"learning_rate": 4.9453690018345144e-06, | |
"logits/chosen": 16.307537078857422, | |
"logits/rejected": 16.138330459594727, | |
"logps/chosen": -0.3342314660549164, | |
"logps/rejected": -0.27582648396492004, | |
"loss": 1.0309, | |
"rewards/accuracies": 0.26249998807907104, | |
"rewards/chosen": -0.5013472437858582, | |
"rewards/margins": -0.0876075029373169, | |
"rewards/rejected": -0.41373974084854126, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.15695507161075142, | |
"eval_logits/chosen": 16.4310245513916, | |
"eval_logits/rejected": 15.98912525177002, | |
"eval_logps/chosen": -0.3239763677120209, | |
"eval_logps/rejected": -0.28784558176994324, | |
"eval_loss": 1.020836353302002, | |
"eval_rewards/accuracies": 0.3076923191547394, | |
"eval_rewards/chosen": -0.4859645664691925, | |
"eval_rewards/margins": -0.054196178913116455, | |
"eval_rewards/rejected": -0.43176835775375366, | |
"eval_runtime": 14.5049, | |
"eval_samples_per_second": 28.404, | |
"eval_steps_per_second": 3.585, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.17265057877182657, | |
"grad_norm": 0.07431349903345108, | |
"learning_rate": 4.933947257182901e-06, | |
"logits/chosen": 16.56686782836914, | |
"logits/rejected": 16.093189239501953, | |
"logps/chosen": -0.34455060958862305, | |
"logps/rejected": -0.2834388315677643, | |
"loss": 1.0388, | |
"rewards/accuracies": 0.30000001192092896, | |
"rewards/chosen": -0.5168259739875793, | |
"rewards/margins": -0.09166768193244934, | |
"rewards/rejected": -0.4251582622528076, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.1883460859329017, | |
"grad_norm": 0.08802352845668793, | |
"learning_rate": 4.921457902821578e-06, | |
"logits/chosen": 16.50200843811035, | |
"logits/rejected": 16.286388397216797, | |
"logps/chosen": -0.30845317244529724, | |
"logps/rejected": -0.2677682936191559, | |
"loss": 1.0247, | |
"rewards/accuracies": 0.25, | |
"rewards/chosen": -0.4626797139644623, | |
"rewards/margins": -0.06102731078863144, | |
"rewards/rejected": -0.40165242552757263, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.20404159309397685, | |
"grad_norm": 0.10464702546596527, | |
"learning_rate": 4.907906416994146e-06, | |
"logits/chosen": 16.163082122802734, | |
"logits/rejected": 16.158031463623047, | |
"logps/chosen": -0.3138599991798401, | |
"logps/rejected": -0.28097471594810486, | |
"loss": 1.0169, | |
"rewards/accuracies": 0.30000001192092896, | |
"rewards/chosen": -0.47078999876976013, | |
"rewards/margins": -0.04932791367173195, | |
"rewards/rejected": -0.4214620590209961, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.219737100255052, | |
"grad_norm": 0.16971275210380554, | |
"learning_rate": 4.893298743830168e-06, | |
"logits/chosen": 16.28864860534668, | |
"logits/rejected": 16.151805877685547, | |
"logps/chosen": -0.3283368945121765, | |
"logps/rejected": -0.2850198745727539, | |
"loss": 0.9964, | |
"rewards/accuracies": 0.3499999940395355, | |
"rewards/chosen": -0.49250537157058716, | |
"rewards/margins": -0.06497551500797272, | |
"rewards/rejected": -0.42752987146377563, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.23543260741612712, | |
"grad_norm": 0.18377964198589325, | |
"learning_rate": 4.8776412907378845e-06, | |
"logits/chosen": 16.890087127685547, | |
"logits/rejected": 16.42388153076172, | |
"logps/chosen": -0.33256903290748596, | |
"logps/rejected": -0.2939595878124237, | |
"loss": 1.0073, | |
"rewards/accuracies": 0.30000001192092896, | |
"rewards/chosen": -0.49885353446006775, | |
"rewards/margins": -0.057914119213819504, | |
"rewards/rejected": -0.44093936681747437, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.23543260741612712, | |
"eval_logits/chosen": 16.833438873291016, | |
"eval_logits/rejected": 16.328977584838867, | |
"eval_logps/chosen": -0.32567569613456726, | |
"eval_logps/rejected": -0.35700783133506775, | |
"eval_loss": 0.9802881479263306, | |
"eval_rewards/accuracies": 0.42307692766189575, | |
"eval_rewards/chosen": -0.4885135293006897, | |
"eval_rewards/margins": 0.04699822515249252, | |
"eval_rewards/rejected": -0.5355117321014404, | |
"eval_runtime": 14.5005, | |
"eval_samples_per_second": 28.413, | |
"eval_steps_per_second": 3.586, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.2511281145772023, | |
"grad_norm": 0.12049826234579086, | |
"learning_rate": 4.860940925593703e-06, | |
"logits/chosen": 16.505878448486328, | |
"logits/rejected": 16.178979873657227, | |
"logps/chosen": -0.3397011458873749, | |
"logps/rejected": -0.35640352964401245, | |
"loss": 0.9795, | |
"rewards/accuracies": 0.4124999940395355, | |
"rewards/chosen": -0.5095517039299011, | |
"rewards/margins": 0.0250535998493433, | |
"rewards/rejected": -0.5346053242683411, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.2668236217382774, | |
"grad_norm": 0.09485407918691635, | |
"learning_rate": 4.84320497372973e-06, | |
"logits/chosen": 16.245588302612305, | |
"logits/rejected": 15.922958374023438, | |
"logps/chosen": -0.29733315110206604, | |
"logps/rejected": -0.3461209237575531, | |
"loss": 0.9694, | |
"rewards/accuracies": 0.44999998807907104, | |
"rewards/chosen": -0.44599977135658264, | |
"rewards/margins": 0.0731816366314888, | |
"rewards/rejected": -0.5191814303398132, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.28251912889935255, | |
"grad_norm": 0.155483216047287, | |
"learning_rate": 4.824441214720629e-06, | |
"logits/chosen": 16.339645385742188, | |
"logits/rejected": 16.115110397338867, | |
"logps/chosen": -0.3076801002025604, | |
"logps/rejected": -0.3655286729335785, | |
"loss": 0.9488, | |
"rewards/accuracies": 0.4625000059604645, | |
"rewards/chosen": -0.4615201950073242, | |
"rewards/margins": 0.0867728441953659, | |
"rewards/rejected": -0.5482929944992065, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.2982146360604277, | |
"grad_norm": 0.21345795691013336, | |
"learning_rate": 4.804657878971252e-06, | |
"logits/chosen": 16.491886138916016, | |
"logits/rejected": 16.376684188842773, | |
"logps/chosen": -0.32437095046043396, | |
"logps/rejected": -0.39715105295181274, | |
"loss": 0.9423, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.4865564703941345, | |
"rewards/margins": 0.10917013883590698, | |
"rewards/rejected": -0.5957266092300415, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.31391014322150285, | |
"grad_norm": 0.17633090913295746, | |
"learning_rate": 4.783863644106502e-06, | |
"logits/chosen": 16.6339168548584, | |
"logits/rejected": 16.79404640197754, | |
"logps/chosen": -0.33018192648887634, | |
"logps/rejected": -0.384196400642395, | |
"loss": 0.939, | |
"rewards/accuracies": 0.42500001192092896, | |
"rewards/chosen": -0.4952728748321533, | |
"rewards/margins": 0.0810217633843422, | |
"rewards/rejected": -0.5762946009635925, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.31391014322150285, | |
"eval_logits/chosen": 17.17803192138672, | |
"eval_logits/rejected": 16.59328269958496, | |
"eval_logps/chosen": -0.33600664138793945, | |
"eval_logps/rejected": -0.47861453890800476, | |
"eval_loss": 0.9303967356681824, | |
"eval_rewards/accuracies": 0.4615384638309479, | |
"eval_rewards/chosen": -0.5040098428726196, | |
"eval_rewards/margins": 0.21391186118125916, | |
"eval_rewards/rejected": -0.717921793460846, | |
"eval_runtime": 14.5, | |
"eval_samples_per_second": 28.414, | |
"eval_steps_per_second": 3.586, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.329605650382578, | |
"grad_norm": 0.1562221795320511, | |
"learning_rate": 4.762067631165049e-06, | |
"logits/chosen": 16.597665786743164, | |
"logits/rejected": 16.3507022857666, | |
"logps/chosen": -0.33709320425987244, | |
"logps/rejected": -0.43131333589553833, | |
"loss": 0.9107, | |
"rewards/accuracies": 0.4749999940395355, | |
"rewards/chosen": -0.5056397914886475, | |
"rewards/margins": 0.14133022725582123, | |
"rewards/rejected": -0.6469700932502747, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.34530115754365315, | |
"grad_norm": 0.17680124938488007, | |
"learning_rate": 4.7392794005985324e-06, | |
"logits/chosen": 16.873676300048828, | |
"logits/rejected": 16.61945152282715, | |
"logps/chosen": -0.35083168745040894, | |
"logps/rejected": -0.537697434425354, | |
"loss": 0.9094, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.526247501373291, | |
"rewards/margins": 0.2802986204624176, | |
"rewards/rejected": -0.8065462112426758, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.3609966647047283, | |
"grad_norm": 0.2662070393562317, | |
"learning_rate": 4.715508948078037e-06, | |
"logits/chosen": 16.803974151611328, | |
"logits/rejected": 16.698320388793945, | |
"logps/chosen": -0.3500753343105316, | |
"logps/rejected": -0.5399882793426514, | |
"loss": 0.8871, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.5251129865646362, | |
"rewards/margins": 0.2848694324493408, | |
"rewards/rejected": -0.8099824786186218, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.3766921718658034, | |
"grad_norm": 0.17161667346954346, | |
"learning_rate": 4.690766700109659e-06, | |
"logits/chosen": 16.67904281616211, | |
"logits/rejected": 16.410579681396484, | |
"logps/chosen": -0.33549198508262634, | |
"logps/rejected": -0.4875665605068207, | |
"loss": 0.9037, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.5032380223274231, | |
"rewards/margins": 0.22811183333396912, | |
"rewards/rejected": -0.7313498258590698, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.39238767902687854, | |
"grad_norm": 1.106021761894226, | |
"learning_rate": 4.665063509461098e-06, | |
"logits/chosen": 17.208127975463867, | |
"logits/rejected": 16.654085159301758, | |
"logps/chosen": -0.3752726912498474, | |
"logps/rejected": -0.4947708249092102, | |
"loss": 0.8606, | |
"rewards/accuracies": 0.4375, | |
"rewards/chosen": -0.5629090070724487, | |
"rewards/margins": 0.1792471706867218, | |
"rewards/rejected": -0.7421562671661377, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.39238767902687854, | |
"eval_logits/chosen": 17.32963752746582, | |
"eval_logits/rejected": 16.589412689208984, | |
"eval_logps/chosen": -0.37825876474380493, | |
"eval_logps/rejected": -0.9001243114471436, | |
"eval_loss": 0.8168494701385498, | |
"eval_rewards/accuracies": 0.5192307829856873, | |
"eval_rewards/chosen": -0.5673881769180298, | |
"eval_rewards/margins": 0.7827982306480408, | |
"eval_rewards/rejected": -1.3501865863800049, | |
"eval_runtime": 14.5053, | |
"eval_samples_per_second": 28.403, | |
"eval_steps_per_second": 3.585, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.4080831861879537, | |
"grad_norm": 0.29772138595581055, | |
"learning_rate": 4.638410650401267e-06, | |
"logits/chosen": 16.713775634765625, | |
"logits/rejected": 16.35211944580078, | |
"logps/chosen": -0.3877524733543396, | |
"logps/rejected": -0.8511163592338562, | |
"loss": 0.8138, | |
"rewards/accuracies": 0.44999998807907104, | |
"rewards/chosen": -0.581628680229187, | |
"rewards/margins": 0.6950457692146301, | |
"rewards/rejected": -1.276674509048462, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.42377869334902885, | |
"grad_norm": 0.31860050559043884, | |
"learning_rate": 4.610819813755038e-06, | |
"logits/chosen": 17.09469985961914, | |
"logits/rejected": 16.5472412109375, | |
"logps/chosen": -0.47504258155822754, | |
"logps/rejected": -1.3266533613204956, | |
"loss": 0.7318, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.7125638723373413, | |
"rewards/margins": 1.2774161100387573, | |
"rewards/rejected": -1.9899799823760986, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.439474200510104, | |
"grad_norm": 0.6508163809776306, | |
"learning_rate": 4.582303101775249e-06, | |
"logits/chosen": 17.11003303527832, | |
"logits/rejected": 16.6564998626709, | |
"logps/chosen": -0.49357643723487854, | |
"logps/rejected": -1.4481580257415771, | |
"loss": 0.7599, | |
"rewards/accuracies": 0.6625000238418579, | |
"rewards/chosen": -0.7403645515441895, | |
"rewards/margins": 1.4318726062774658, | |
"rewards/rejected": -2.1722371578216553, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.45516970767117915, | |
"grad_norm": 0.32430580258369446, | |
"learning_rate": 4.55287302283426e-06, | |
"logits/chosen": 16.830989837646484, | |
"logits/rejected": 16.384944915771484, | |
"logps/chosen": -0.593712329864502, | |
"logps/rejected": -1.7630856037139893, | |
"loss": 0.7336, | |
"rewards/accuracies": 0.675000011920929, | |
"rewards/chosen": -0.8905684351921082, | |
"rewards/margins": 1.7540600299835205, | |
"rewards/rejected": -2.6446282863616943, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.47086521483225424, | |
"grad_norm": 0.8555932641029358, | |
"learning_rate": 4.522542485937369e-06, | |
"logits/chosen": 16.72231674194336, | |
"logits/rejected": 16.28726577758789, | |
"logps/chosen": -0.5670709609985352, | |
"logps/rejected": -2.0420775413513184, | |
"loss": 0.6861, | |
"rewards/accuracies": 0.75, | |
"rewards/chosen": -0.8506065607070923, | |
"rewards/margins": 2.2125096321105957, | |
"rewards/rejected": -3.0631160736083984, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.47086521483225424, | |
"eval_logits/chosen": 17.13121795654297, | |
"eval_logits/rejected": 16.268341064453125, | |
"eval_logps/chosen": -0.6842947602272034, | |
"eval_logps/rejected": -2.119321584701538, | |
"eval_loss": 0.7583853602409363, | |
"eval_rewards/accuracies": 0.75, | |
"eval_rewards/chosen": -1.026442289352417, | |
"eval_rewards/margins": 2.1525399684906006, | |
"eval_rewards/rejected": -3.1789822578430176, | |
"eval_runtime": 14.5007, | |
"eval_samples_per_second": 28.412, | |
"eval_steps_per_second": 3.586, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.4865607219933294, | |
"grad_norm": 0.6059070825576782, | |
"learning_rate": 4.491324795060491e-06, | |
"logits/chosen": 17.147808074951172, | |
"logits/rejected": 16.194652557373047, | |
"logps/chosen": -0.8036400079727173, | |
"logps/rejected": -2.289825201034546, | |
"loss": 0.7163, | |
"rewards/accuracies": 0.6875, | |
"rewards/chosen": -1.2054599523544312, | |
"rewards/margins": 2.229278087615967, | |
"rewards/rejected": -3.4347376823425293, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.5022562291544046, | |
"grad_norm": 1.8073927164077759, | |
"learning_rate": 4.4592336433146e-06, | |
"logits/chosen": 16.94902992248535, | |
"logits/rejected": 16.066068649291992, | |
"logps/chosen": -1.198162317276001, | |
"logps/rejected": -2.2922632694244385, | |
"loss": 0.6894, | |
"rewards/accuracies": 0.675000011920929, | |
"rewards/chosen": -1.7972434759140015, | |
"rewards/margins": 1.6411516666412354, | |
"rewards/rejected": -3.4383950233459473, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.5179517363154797, | |
"grad_norm": 3.746042490005493, | |
"learning_rate": 4.426283106939474e-06, | |
"logits/chosen": 16.31036376953125, | |
"logits/rejected": 15.991762161254883, | |
"logps/chosen": -1.6245781183242798, | |
"logps/rejected": -2.553597927093506, | |
"loss": 0.6607, | |
"rewards/accuracies": 0.7875000238418579, | |
"rewards/chosen": -2.4368672370910645, | |
"rewards/margins": 1.3935294151306152, | |
"rewards/rejected": -3.830397129058838, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.5336472434765548, | |
"grad_norm": 2.098111867904663, | |
"learning_rate": 4.3924876391293915e-06, | |
"logits/chosen": 16.59554672241211, | |
"logits/rejected": 15.915553092956543, | |
"logps/chosen": -2.183227062225342, | |
"logps/rejected": -3.4911434650421143, | |
"loss": 0.6381, | |
"rewards/accuracies": 0.8500000238418579, | |
"rewards/chosen": -3.274840831756592, | |
"rewards/margins": 1.9618743658065796, | |
"rewards/rejected": -5.236715316772461, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.5493427506376299, | |
"grad_norm": 2.153958320617676, | |
"learning_rate": 4.357862063693486e-06, | |
"logits/chosen": 16.332544326782227, | |
"logits/rejected": 15.691922187805176, | |
"logps/chosen": -2.673710346221924, | |
"logps/rejected": -3.8687057495117188, | |
"loss": 0.5752, | |
"rewards/accuracies": 0.862500011920929, | |
"rewards/chosen": -4.010565757751465, | |
"rewards/margins": 1.792493224143982, | |
"rewards/rejected": -5.8030595779418945, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.5493427506376299, | |
"eval_logits/chosen": 16.272428512573242, | |
"eval_logits/rejected": 15.381678581237793, | |
"eval_logps/chosen": -3.0390572547912598, | |
"eval_logps/rejected": -4.695068836212158, | |
"eval_loss": 0.5928590893745422, | |
"eval_rewards/accuracies": 0.9230769276618958, | |
"eval_rewards/chosen": -4.5585856437683105, | |
"eval_rewards/margins": 2.4840168952941895, | |
"eval_rewards/rejected": -7.0426025390625, | |
"eval_runtime": 14.5008, | |
"eval_samples_per_second": 28.412, | |
"eval_steps_per_second": 3.586, | |
"step": 350 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 3, | |
"save_steps": 50, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 8.48648609560789e+17, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |