|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9984301412872841, |
|
"eval_steps": 100, |
|
"global_step": 477, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.0416666666666667e-07, |
|
"logits/chosen": 0.8531318306922913, |
|
"logits/rejected": 0.7327959537506104, |
|
"logps/chosen": -133.83494567871094, |
|
"logps/rejected": -129.6299285888672, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.0416666666666667e-06, |
|
"logits/chosen": 0.8151362538337708, |
|
"logits/rejected": 0.7788704037666321, |
|
"logps/chosen": -90.5947036743164, |
|
"logps/rejected": -92.77328491210938, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.2361111044883728, |
|
"rewards/chosen": 0.00018473717500455678, |
|
"rewards/margins": 6.259369547478855e-05, |
|
"rewards/rejected": 0.00012214347952976823, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.0833333333333334e-06, |
|
"logits/chosen": 0.8342723846435547, |
|
"logits/rejected": 0.8797974586486816, |
|
"logps/chosen": -81.16204071044922, |
|
"logps/rejected": -81.1548080444336, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.17499999701976776, |
|
"rewards/chosen": -0.00041853776201605797, |
|
"rewards/margins": -0.0003428882628213614, |
|
"rewards/rejected": -7.564939733128995e-05, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.125e-06, |
|
"logits/chosen": 0.8050435185432434, |
|
"logits/rejected": 0.8031445741653442, |
|
"logps/chosen": -101.43102264404297, |
|
"logps/rejected": -98.3962631225586, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.23749999701976776, |
|
"rewards/chosen": 0.00019087354303337634, |
|
"rewards/margins": -0.00028236288926564157, |
|
"rewards/rejected": 0.000473236374091357, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.166666666666667e-06, |
|
"logits/chosen": 0.8352361917495728, |
|
"logits/rejected": 0.8269574046134949, |
|
"logps/chosen": -111.55462646484375, |
|
"logps/rejected": -99.02640533447266, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.23749999701976776, |
|
"rewards/chosen": -5.0148169975727797e-05, |
|
"rewards/margins": 7.251273200381547e-05, |
|
"rewards/rejected": -0.00012266085832379758, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.999731868769027e-06, |
|
"logits/chosen": 0.8326011896133423, |
|
"logits/rejected": 0.8801982998847961, |
|
"logps/chosen": -98.504638671875, |
|
"logps/rejected": -83.65934753417969, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.19374999403953552, |
|
"rewards/chosen": -0.0006545094074681401, |
|
"rewards/margins": -0.0007917654584161937, |
|
"rewards/rejected": 0.00013725618191529065, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.9903533134293035e-06, |
|
"logits/chosen": 0.8338711857795715, |
|
"logits/rejected": 0.8361239433288574, |
|
"logps/chosen": -105.47569274902344, |
|
"logps/rejected": -103.5544662475586, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.26249998807907104, |
|
"rewards/chosen": -6.698282231809571e-05, |
|
"rewards/margins": -0.0007280272548086941, |
|
"rewards/rejected": 0.0006610444979742169, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.967625656594782e-06, |
|
"logits/chosen": 0.8040585517883301, |
|
"logits/rejected": 0.8523794412612915, |
|
"logps/chosen": -83.11436462402344, |
|
"logps/rejected": -77.95774841308594, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.26875001192092896, |
|
"rewards/chosen": -0.0002090830384986475, |
|
"rewards/margins": -0.0001823374768719077, |
|
"rewards/rejected": -2.6745503419078887e-05, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.93167072587771e-06, |
|
"logits/chosen": 0.8447190523147583, |
|
"logits/rejected": 0.9011589288711548, |
|
"logps/chosen": -101.15557098388672, |
|
"logps/rejected": -84.94987487792969, |
|
"loss": 0.693, |
|
"rewards/accuracies": 0.3375000059604645, |
|
"rewards/chosen": 0.00010431509872432798, |
|
"rewards/margins": 0.000598998973146081, |
|
"rewards/rejected": -0.0004946838016621768, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.882681251368549e-06, |
|
"logits/chosen": 0.8417055010795593, |
|
"logits/rejected": 0.8667267560958862, |
|
"logps/chosen": -88.30390930175781, |
|
"logps/rejected": -110.51224517822266, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.24375000596046448, |
|
"rewards/chosen": -0.0014625315088778734, |
|
"rewards/margins": -0.0006530345417559147, |
|
"rewards/rejected": -0.0008094970253296196, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.8209198325401815e-06, |
|
"logits/chosen": 0.8801371455192566, |
|
"logits/rejected": 0.9127315282821655, |
|
"logps/chosen": -86.3653793334961, |
|
"logps/rejected": -69.10803985595703, |
|
"loss": 0.693, |
|
"rewards/accuracies": 0.22499999403953552, |
|
"rewards/chosen": -0.0005041404510848224, |
|
"rewards/margins": 0.0003193179436493665, |
|
"rewards/rejected": -0.0008234584820456803, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"eval_logits/chosen": 0.7997792959213257, |
|
"eval_logits/rejected": 0.8176011443138123, |
|
"eval_logps/chosen": -91.77481842041016, |
|
"eval_logps/rejected": -94.28044891357422, |
|
"eval_loss": 0.6930604577064514, |
|
"eval_rewards/accuracies": 0.2680000066757202, |
|
"eval_rewards/chosen": -0.00047452302533201873, |
|
"eval_rewards/margins": 0.00035985803697258234, |
|
"eval_rewards/rejected": -0.0008343810332007706, |
|
"eval_runtime": 273.126, |
|
"eval_samples_per_second": 7.323, |
|
"eval_steps_per_second": 0.458, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.746717530629565e-06, |
|
"logits/chosen": 0.8060985803604126, |
|
"logits/rejected": 0.8291690945625305, |
|
"logps/chosen": -110.20963287353516, |
|
"logps/rejected": -96.49766540527344, |
|
"loss": 0.6929, |
|
"rewards/accuracies": 0.26249998807907104, |
|
"rewards/chosen": -0.0005915694055147469, |
|
"rewards/margins": 0.00022233165509533137, |
|
"rewards/rejected": -0.0008139010751619935, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.660472094042121e-06, |
|
"logits/chosen": 0.8455309867858887, |
|
"logits/rejected": 0.9219174385070801, |
|
"logps/chosen": -84.42987060546875, |
|
"logps/rejected": -80.36955261230469, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.28125, |
|
"rewards/chosen": -0.0005250257672742009, |
|
"rewards/margins": 0.00011121686839032918, |
|
"rewards/rejected": -0.0006362426793202758, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.5626458262912745e-06, |
|
"logits/chosen": 0.7771774530410767, |
|
"logits/rejected": 0.8348148465156555, |
|
"logps/chosen": -91.55365753173828, |
|
"logps/rejected": -80.16468811035156, |
|
"loss": 0.693, |
|
"rewards/accuracies": 0.26249998807907104, |
|
"rewards/chosen": -0.0003579836920835078, |
|
"rewards/margins": 0.0003911118838004768, |
|
"rewards/rejected": -0.0007490954594686627, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.453763107901676e-06, |
|
"logits/chosen": 0.8080952763557434, |
|
"logits/rejected": 0.8902841806411743, |
|
"logps/chosen": -102.3683853149414, |
|
"logps/rejected": -121.3236312866211, |
|
"loss": 0.6929, |
|
"rewards/accuracies": 0.34375, |
|
"rewards/chosen": -0.0011437158100306988, |
|
"rewards/margins": 0.00023913508630357683, |
|
"rewards/rejected": -0.001382850925438106, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.33440758555951e-06, |
|
"logits/chosen": 0.8286693692207336, |
|
"logits/rejected": 0.8885159492492676, |
|
"logps/chosen": -98.10747528076172, |
|
"logps/rejected": -75.61839294433594, |
|
"loss": 0.6929, |
|
"rewards/accuracies": 0.26875001192092896, |
|
"rewards/chosen": -0.001014741021208465, |
|
"rewards/margins": 0.00025303030270151794, |
|
"rewards/rejected": -0.0012677714694291353, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.205219043576955e-06, |
|
"logits/chosen": 0.7178612947463989, |
|
"logits/rejected": 0.7653802633285522, |
|
"logps/chosen": -80.03684997558594, |
|
"logps/rejected": -73.78407287597656, |
|
"loss": 0.6928, |
|
"rewards/accuracies": 0.2562499940395355, |
|
"rewards/chosen": -0.001032956875860691, |
|
"rewards/margins": 0.0007252781069837511, |
|
"rewards/rejected": -0.0017582349246367812, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.066889974440757e-06, |
|
"logits/chosen": 0.8685981631278992, |
|
"logits/rejected": 0.9337660670280457, |
|
"logps/chosen": -72.55715942382812, |
|
"logps/rejected": -81.37494659423828, |
|
"loss": 0.6929, |
|
"rewards/accuracies": 0.26875001192092896, |
|
"rewards/chosen": -0.0013817875878885388, |
|
"rewards/margins": 0.0006504356861114502, |
|
"rewards/rejected": -0.002032223390415311, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.92016186682789e-06, |
|
"logits/chosen": 0.7872442007064819, |
|
"logits/rejected": 0.8030352592468262, |
|
"logps/chosen": -98.71012878417969, |
|
"logps/rejected": -94.06107330322266, |
|
"loss": 0.6928, |
|
"rewards/accuracies": 0.26249998807907104, |
|
"rewards/chosen": -0.0005101398564875126, |
|
"rewards/margins": 0.0011010088492184877, |
|
"rewards/rejected": -0.0016111487057060003, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 3.7658212309857576e-06, |
|
"logits/chosen": 0.7398073077201843, |
|
"logits/rejected": 0.8337036967277527, |
|
"logps/chosen": -97.44853973388672, |
|
"logps/rejected": -107.1574478149414, |
|
"loss": 0.6926, |
|
"rewards/accuracies": 0.3187499940395355, |
|
"rewards/chosen": -0.0017274795100092888, |
|
"rewards/margins": 0.0012662711087614298, |
|
"rewards/rejected": -0.0029937506187707186, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.604695382782159e-06, |
|
"logits/chosen": 0.7769767642021179, |
|
"logits/rejected": 0.8381963968276978, |
|
"logps/chosen": -77.53779602050781, |
|
"logps/rejected": -86.9189224243164, |
|
"loss": 0.6922, |
|
"rewards/accuracies": 0.3125, |
|
"rewards/chosen": -0.0016526943072676659, |
|
"rewards/margins": 0.0023483093827962875, |
|
"rewards/rejected": -0.004001003224402666, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"eval_logits/chosen": 0.7941381931304932, |
|
"eval_logits/rejected": 0.8121381998062134, |
|
"eval_logps/chosen": -91.90680694580078, |
|
"eval_logps/rejected": -94.51410675048828, |
|
"eval_loss": 0.692441999912262, |
|
"eval_rewards/accuracies": 0.3019999861717224, |
|
"eval_rewards/chosen": -0.0017943703569471836, |
|
"eval_rewards/margins": 0.001376640284433961, |
|
"eval_rewards/rejected": -0.0031710106413811445, |
|
"eval_runtime": 273.1548, |
|
"eval_samples_per_second": 7.322, |
|
"eval_steps_per_second": 0.458, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.437648009023905e-06, |
|
"logits/chosen": 0.837793231010437, |
|
"logits/rejected": 0.834112286567688, |
|
"logps/chosen": -93.53672790527344, |
|
"logps/rejected": -86.74687194824219, |
|
"loss": 0.6926, |
|
"rewards/accuracies": 0.2874999940395355, |
|
"rewards/chosen": -0.0022572961170226336, |
|
"rewards/margins": 0.0016887232195585966, |
|
"rewards/rejected": -0.00394601933658123, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 3.265574537815398e-06, |
|
"logits/chosen": 0.7990659475326538, |
|
"logits/rejected": 0.8434357643127441, |
|
"logps/chosen": -101.04875946044922, |
|
"logps/rejected": -95.13258361816406, |
|
"loss": 0.6927, |
|
"rewards/accuracies": 0.3187499940395355, |
|
"rewards/chosen": -0.002797973807901144, |
|
"rewards/margins": 0.0009291036985814571, |
|
"rewards/rejected": -0.003727077739313245, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 3.089397338773569e-06, |
|
"logits/chosen": 0.8693816065788269, |
|
"logits/rejected": 0.8417167663574219, |
|
"logps/chosen": -88.75157165527344, |
|
"logps/rejected": -93.32563018798828, |
|
"loss": 0.6923, |
|
"rewards/accuracies": 0.35624998807907104, |
|
"rewards/chosen": -0.0035379533655941486, |
|
"rewards/margins": 0.001493574702180922, |
|
"rewards/rejected": -0.005031527951359749, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.9100607788275547e-06, |
|
"logits/chosen": 0.7980669736862183, |
|
"logits/rejected": 0.8121516108512878, |
|
"logps/chosen": -115.3040771484375, |
|
"logps/rejected": -101.95747375488281, |
|
"loss": 0.6917, |
|
"rewards/accuracies": 0.36250001192092896, |
|
"rewards/chosen": -0.0025563673116266727, |
|
"rewards/margins": 0.0030108934734016657, |
|
"rewards/rejected": -0.005567261017858982, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 2.72852616010567e-06, |
|
"logits/chosen": 0.7765026688575745, |
|
"logits/rejected": 0.8406999707221985, |
|
"logps/chosen": -116.62477111816406, |
|
"logps/rejected": -107.66845703125, |
|
"loss": 0.692, |
|
"rewards/accuracies": 0.36250001192092896, |
|
"rewards/chosen": -0.0028718970715999603, |
|
"rewards/margins": 0.0028597547207027674, |
|
"rewards/rejected": -0.005731652025133371, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 2.5457665670441937e-06, |
|
"logits/chosen": 0.7957627177238464, |
|
"logits/rejected": 0.8751128911972046, |
|
"logps/chosen": -99.27378845214844, |
|
"logps/rejected": -88.92988586425781, |
|
"loss": 0.6915, |
|
"rewards/accuracies": 0.33125001192092896, |
|
"rewards/chosen": -0.0027769659645855427, |
|
"rewards/margins": 0.003127423347905278, |
|
"rewards/rejected": -0.005904389079660177, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 2.3627616503391813e-06, |
|
"logits/chosen": 0.8911062479019165, |
|
"logits/rejected": 0.9384480714797974, |
|
"logps/chosen": -114.53426361083984, |
|
"logps/rejected": -83.79386901855469, |
|
"loss": 0.6917, |
|
"rewards/accuracies": 0.4124999940395355, |
|
"rewards/chosen": -0.0030242924112826586, |
|
"rewards/margins": 0.0038346436340361834, |
|
"rewards/rejected": -0.006858936045318842, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 2.1804923757009885e-06, |
|
"logits/chosen": 0.8408538699150085, |
|
"logits/rejected": 0.839997410774231, |
|
"logps/chosen": -121.15773010253906, |
|
"logps/rejected": -109.73968505859375, |
|
"loss": 0.6916, |
|
"rewards/accuracies": 0.38749998807907104, |
|
"rewards/chosen": -0.0034516516607254744, |
|
"rewards/margins": 0.0034923548810184, |
|
"rewards/rejected": -0.006944006774574518, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 1.9999357655598894e-06, |
|
"logits/chosen": 0.825127124786377, |
|
"logits/rejected": 0.8463200330734253, |
|
"logps/chosen": -94.99199676513672, |
|
"logps/rejected": -92.08692932128906, |
|
"loss": 0.6919, |
|
"rewards/accuracies": 0.2750000059604645, |
|
"rewards/chosen": -0.005209151655435562, |
|
"rewards/margins": 0.0012942428002133965, |
|
"rewards/rejected": -0.006503394804894924, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.8220596619089576e-06, |
|
"logits/chosen": 0.7896022796630859, |
|
"logits/rejected": 0.8404830694198608, |
|
"logps/chosen": -79.6166763305664, |
|
"logps/rejected": -87.97811126708984, |
|
"loss": 0.6917, |
|
"rewards/accuracies": 0.3125, |
|
"rewards/chosen": -0.002977000316604972, |
|
"rewards/margins": 0.004023983143270016, |
|
"rewards/rejected": -0.007000982761383057, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"eval_logits/chosen": 0.787030041217804, |
|
"eval_logits/rejected": 0.8057210445404053, |
|
"eval_logps/chosen": -92.21893310546875, |
|
"eval_logps/rejected": -94.9659194946289, |
|
"eval_loss": 0.6917064189910889, |
|
"eval_rewards/accuracies": 0.3100000023841858, |
|
"eval_rewards/chosen": -0.004915657918900251, |
|
"eval_rewards/margins": 0.0027734539471566677, |
|
"eval_rewards/rejected": -0.0076891109347343445, |
|
"eval_runtime": 273.1314, |
|
"eval_samples_per_second": 7.322, |
|
"eval_steps_per_second": 0.458, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.647817538357072e-06, |
|
"logits/chosen": 0.797222912311554, |
|
"logits/rejected": 0.8298647999763489, |
|
"logps/chosen": -85.97596740722656, |
|
"logps/rejected": -87.59102630615234, |
|
"loss": 0.6915, |
|
"rewards/accuracies": 0.33125001192092896, |
|
"rewards/chosen": -0.005664575379341841, |
|
"rewards/margins": 0.002779053058475256, |
|
"rewards/rejected": -0.008443629369139671, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.4781433892011132e-06, |
|
"logits/chosen": 0.7955067753791809, |
|
"logits/rejected": 0.8460105657577515, |
|
"logps/chosen": -88.00541687011719, |
|
"logps/rejected": -91.3482437133789, |
|
"loss": 0.6916, |
|
"rewards/accuracies": 0.3687500059604645, |
|
"rewards/chosen": -0.005240852013230324, |
|
"rewards/margins": 0.003471267642453313, |
|
"rewards/rejected": -0.008712120354175568, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.3139467229135999e-06, |
|
"logits/chosen": 0.7453655004501343, |
|
"logits/rejected": 0.7835742235183716, |
|
"logps/chosen": -101.49515533447266, |
|
"logps/rejected": -102.06614685058594, |
|
"loss": 0.6914, |
|
"rewards/accuracies": 0.33125001192092896, |
|
"rewards/chosen": -0.00533133652061224, |
|
"rewards/margins": 0.004351903218775988, |
|
"rewards/rejected": -0.00968323927372694, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.1561076868822756e-06, |
|
"logits/chosen": 0.7904757857322693, |
|
"logits/rejected": 0.8207653760910034, |
|
"logps/chosen": -83.08015441894531, |
|
"logps/rejected": -73.13031768798828, |
|
"loss": 0.6908, |
|
"rewards/accuracies": 0.29374998807907104, |
|
"rewards/chosen": -0.002843918278813362, |
|
"rewards/margins": 0.005075609777122736, |
|
"rewards/rejected": -0.007919528521597385, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.0054723495346484e-06, |
|
"logits/chosen": 0.8648473620414734, |
|
"logits/rejected": 0.88224858045578, |
|
"logps/chosen": -88.79496002197266, |
|
"logps/rejected": -82.97408294677734, |
|
"loss": 0.6911, |
|
"rewards/accuracies": 0.36250001192092896, |
|
"rewards/chosen": -0.0072964876890182495, |
|
"rewards/margins": 0.003556302282959223, |
|
"rewards/rejected": -0.01085279043763876, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.628481651367876e-07, |
|
"logits/chosen": 0.7940279841423035, |
|
"logits/rejected": 0.808300793170929, |
|
"logps/chosen": -95.09690856933594, |
|
"logps/rejected": -98.6402816772461, |
|
"loss": 0.6911, |
|
"rewards/accuracies": 0.375, |
|
"rewards/chosen": -0.007194930221885443, |
|
"rewards/margins": 0.004204220604151487, |
|
"rewards/rejected": -0.01139915082603693, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 7.289996455765749e-07, |
|
"logits/chosen": 0.8749688863754272, |
|
"logits/rejected": 0.8549054861068726, |
|
"logps/chosen": -107.79057312011719, |
|
"logps/rejected": -104.4919662475586, |
|
"loss": 0.6913, |
|
"rewards/accuracies": 0.33125001192092896, |
|
"rewards/chosen": -0.005484831985086203, |
|
"rewards/margins": 0.0049090199172496796, |
|
"rewards/rejected": -0.01039385236799717, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 6.046442623320145e-07, |
|
"logits/chosen": 0.8186184167861938, |
|
"logits/rejected": 0.8605905771255493, |
|
"logps/chosen": -102.05201721191406, |
|
"logps/rejected": -88.45350646972656, |
|
"loss": 0.6905, |
|
"rewards/accuracies": 0.35624998807907104, |
|
"rewards/chosen": -0.006084176246076822, |
|
"rewards/margins": 0.004532010294497013, |
|
"rewards/rejected": -0.010616186074912548, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.904486005914027e-07, |
|
"logits/chosen": 0.8054735064506531, |
|
"logits/rejected": 0.8569159507751465, |
|
"logps/chosen": -110.53129577636719, |
|
"logps/rejected": -99.7970199584961, |
|
"loss": 0.6905, |
|
"rewards/accuracies": 0.35624998807907104, |
|
"rewards/chosen": -0.0049868240021169186, |
|
"rewards/margins": 0.005900658201426268, |
|
"rewards/rejected": -0.01088748313486576, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.8702478614051353e-07, |
|
"logits/chosen": 0.8212454915046692, |
|
"logits/rejected": 0.8515514135360718, |
|
"logps/chosen": -100.35879516601562, |
|
"logps/rejected": -103.1037368774414, |
|
"loss": 0.6905, |
|
"rewards/accuracies": 0.3375000059604645, |
|
"rewards/chosen": -0.006732915993779898, |
|
"rewards/margins": 0.004675927106291056, |
|
"rewards/rejected": -0.011408843100070953, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"eval_logits/chosen": 0.7826990485191345, |
|
"eval_logits/rejected": 0.8012421727180481, |
|
"eval_logps/chosen": -92.42469787597656, |
|
"eval_logps/rejected": -95.25089263916016, |
|
"eval_loss": 0.6913294792175293, |
|
"eval_rewards/accuracies": 0.328000009059906, |
|
"eval_rewards/chosen": -0.00697335647419095, |
|
"eval_rewards/margins": 0.0035653903614729643, |
|
"eval_rewards/rejected": -0.010538745671510696, |
|
"eval_runtime": 273.6535, |
|
"eval_samples_per_second": 7.309, |
|
"eval_steps_per_second": 0.457, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 2.9492720416985004e-07, |
|
"logits/chosen": 0.8186850547790527, |
|
"logits/rejected": 0.8151271939277649, |
|
"logps/chosen": -95.82102966308594, |
|
"logps/rejected": -80.586669921875, |
|
"loss": 0.6916, |
|
"rewards/accuracies": 0.3375000059604645, |
|
"rewards/chosen": -0.00794359389692545, |
|
"rewards/margins": 0.003056485904380679, |
|
"rewards/rejected": -0.011000080034136772, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.1464952759020857e-07, |
|
"logits/chosen": 0.7571959495544434, |
|
"logits/rejected": 0.8163139224052429, |
|
"logps/chosen": -95.65677642822266, |
|
"logps/rejected": -97.86299896240234, |
|
"loss": 0.6915, |
|
"rewards/accuracies": 0.3125, |
|
"rewards/chosen": -0.00859010498970747, |
|
"rewards/margins": 0.0028384437318891287, |
|
"rewards/rejected": -0.011428548023104668, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 1.4662207078575685e-07, |
|
"logits/chosen": 0.8645572662353516, |
|
"logits/rejected": 0.887597918510437, |
|
"logps/chosen": -83.41182708740234, |
|
"logps/rejected": -90.17640686035156, |
|
"loss": 0.6913, |
|
"rewards/accuracies": 0.39375001192092896, |
|
"rewards/chosen": -0.006618577986955643, |
|
"rewards/margins": 0.004637080244719982, |
|
"rewards/rejected": -0.011255658231675625, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 9.120948298936422e-08, |
|
"logits/chosen": 0.7825466394424438, |
|
"logits/rejected": 0.8328782916069031, |
|
"logps/chosen": -109.79942321777344, |
|
"logps/rejected": -82.07637023925781, |
|
"loss": 0.6911, |
|
"rewards/accuracies": 0.34375, |
|
"rewards/chosen": -0.005336672533303499, |
|
"rewards/margins": 0.005987245589494705, |
|
"rewards/rejected": -0.011323917657136917, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.870879364444109e-08, |
|
"logits/chosen": 0.774901270866394, |
|
"logits/rejected": 0.8499285578727722, |
|
"logps/chosen": -81.24304962158203, |
|
"logps/rejected": -71.88755798339844, |
|
"loss": 0.6911, |
|
"rewards/accuracies": 0.29374998807907104, |
|
"rewards/chosen": -0.005921828560531139, |
|
"rewards/margins": 0.0035197760444134474, |
|
"rewards/rejected": -0.00944160483777523, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 1.93478202307823e-08, |
|
"logits/chosen": 0.8049672842025757, |
|
"logits/rejected": 0.8570533990859985, |
|
"logps/chosen": -105.7918930053711, |
|
"logps/rejected": -87.53938293457031, |
|
"loss": 0.6907, |
|
"rewards/accuracies": 0.3187499940395355, |
|
"rewards/chosen": -0.005085950251668692, |
|
"rewards/margins": 0.0068057505413889885, |
|
"rewards/rejected": -0.011891700327396393, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.283947088983663e-09, |
|
"logits/chosen": 0.8561135530471802, |
|
"logits/rejected": 0.8520036935806274, |
|
"logps/chosen": -115.6650390625, |
|
"logps/rejected": -110.88753509521484, |
|
"loss": 0.6903, |
|
"rewards/accuracies": 0.39375001192092896, |
|
"rewards/chosen": -0.006030657794326544, |
|
"rewards/margins": 0.00688832625746727, |
|
"rewards/rejected": -0.012918984517455101, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 477, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6920521804121805, |
|
"train_runtime": 6813.2628, |
|
"train_samples_per_second": 4.486, |
|
"train_steps_per_second": 0.07 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 477, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|