|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9988571428571429, |
|
"eval_steps": 50, |
|
"global_step": 437, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.022857142857142857, |
|
"grad_norm": 3.2870738344316774, |
|
"learning_rate": 1.1363636363636363e-07, |
|
"logits/chosen": -2.700951099395752, |
|
"logits/rejected": -2.6252384185791016, |
|
"logps/chosen": -301.28485107421875, |
|
"logps/rejected": -281.7684020996094, |
|
"loss": 0.0556, |
|
"rewards/accuracies": 0.41874998807907104, |
|
"rewards/chosen": 1.3337824384507257e-05, |
|
"rewards/margins": 0.00020079524256289005, |
|
"rewards/rejected": -0.0001874573645181954, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.045714285714285714, |
|
"grad_norm": 2.5214503350239097, |
|
"learning_rate": 2.2727272727272726e-07, |
|
"logits/chosen": -2.641275405883789, |
|
"logits/rejected": -2.606044292449951, |
|
"logps/chosen": -278.9186706542969, |
|
"logps/rejected": -254.6912384033203, |
|
"loss": 0.0552, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.002525148680433631, |
|
"rewards/margins": 0.0017468780279159546, |
|
"rewards/rejected": 0.0007782711763866246, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06857142857142857, |
|
"grad_norm": 2.7331110185448306, |
|
"learning_rate": 3.4090909090909085e-07, |
|
"logits/chosen": -2.6379804611206055, |
|
"logits/rejected": -2.617119550704956, |
|
"logps/chosen": -263.29449462890625, |
|
"logps/rejected": -263.40509033203125, |
|
"loss": 0.0535, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": 0.01388559676706791, |
|
"rewards/margins": 0.008006090298295021, |
|
"rewards/rejected": 0.00587950786575675, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09142857142857143, |
|
"grad_norm": 2.830903095609413, |
|
"learning_rate": 4.545454545454545e-07, |
|
"logits/chosen": -2.6505517959594727, |
|
"logits/rejected": -2.5873801708221436, |
|
"logps/chosen": -290.2440185546875, |
|
"logps/rejected": -268.03863525390625, |
|
"loss": 0.0502, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.037532415241003036, |
|
"rewards/margins": 0.04160536080598831, |
|
"rewards/rejected": -0.004072942305356264, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.11428571428571428, |
|
"grad_norm": 6.7770519006865815, |
|
"learning_rate": 4.997124959943201e-07, |
|
"logits/chosen": -2.6873106956481934, |
|
"logits/rejected": -2.6081948280334473, |
|
"logps/chosen": -290.84735107421875, |
|
"logps/rejected": -249.636474609375, |
|
"loss": 0.048, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.05462931841611862, |
|
"rewards/margins": 0.08235044032335281, |
|
"rewards/rejected": -0.027721121907234192, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.11428571428571428, |
|
"eval_logits/chosen": -2.552030086517334, |
|
"eval_logits/rejected": -2.4545059204101562, |
|
"eval_logps/chosen": -269.4535217285156, |
|
"eval_logps/rejected": -224.7078094482422, |
|
"eval_loss": 0.04283829778432846, |
|
"eval_rewards/accuracies": 0.7025862336158752, |
|
"eval_rewards/chosen": 0.06150422990322113, |
|
"eval_rewards/margins": 0.11782074719667435, |
|
"eval_rewards/rejected": -0.05631651729345322, |
|
"eval_runtime": 91.0985, |
|
"eval_samples_per_second": 20.099, |
|
"eval_steps_per_second": 0.318, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13714285714285715, |
|
"grad_norm": 3.063883877478527, |
|
"learning_rate": 4.979579212164186e-07, |
|
"logits/chosen": -2.5849452018737793, |
|
"logits/rejected": -2.476766347885132, |
|
"logps/chosen": -280.53155517578125, |
|
"logps/rejected": -259.91339111328125, |
|
"loss": 0.0438, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.00025545060634613037, |
|
"rewards/margins": 0.11335714906454086, |
|
"rewards/rejected": -0.1136125922203064, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 3.212195825091894, |
|
"learning_rate": 4.946196886175515e-07, |
|
"logits/chosen": -2.527914524078369, |
|
"logits/rejected": -2.468449354171753, |
|
"logps/chosen": -273.3289489746094, |
|
"logps/rejected": -270.74847412109375, |
|
"loss": 0.0414, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 0.024492865428328514, |
|
"rewards/margins": 0.1357099711894989, |
|
"rewards/rejected": -0.11121710389852524, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.18285714285714286, |
|
"grad_norm": 2.9871578323865613, |
|
"learning_rate": 4.897191188239667e-07, |
|
"logits/chosen": -2.435612678527832, |
|
"logits/rejected": -2.3714098930358887, |
|
"logps/chosen": -269.2018127441406, |
|
"logps/rejected": -276.71612548828125, |
|
"loss": 0.0424, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.02172762155532837, |
|
"rewards/margins": 0.19234482944011688, |
|
"rewards/rejected": -0.21407246589660645, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.2057142857142857, |
|
"grad_norm": 7.2824339333866, |
|
"learning_rate": 4.832875107981763e-07, |
|
"logits/chosen": -2.3872673511505127, |
|
"logits/rejected": -2.300445318222046, |
|
"logps/chosen": -275.88641357421875, |
|
"logps/rejected": -275.3648681640625, |
|
"loss": 0.0423, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": 0.003988614305853844, |
|
"rewards/margins": 0.20215804874897003, |
|
"rewards/rejected": -0.19816945493221283, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.22857142857142856, |
|
"grad_norm": 3.336291542294826, |
|
"learning_rate": 4.753659419387223e-07, |
|
"logits/chosen": -2.3031492233276367, |
|
"logits/rejected": -2.170569658279419, |
|
"logps/chosen": -289.19390869140625, |
|
"logps/rejected": -258.89019775390625, |
|
"loss": 0.0399, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.028075385838747025, |
|
"rewards/margins": 0.2263123095035553, |
|
"rewards/rejected": -0.25438767671585083, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.22857142857142856, |
|
"eval_logits/chosen": -2.1601011753082275, |
|
"eval_logits/rejected": -1.989274263381958, |
|
"eval_logps/chosen": -283.0706481933594, |
|
"eval_logps/rejected": -250.25137329101562, |
|
"eval_loss": 0.03850938379764557, |
|
"eval_rewards/accuracies": 0.75, |
|
"eval_rewards/chosen": -0.07466702908277512, |
|
"eval_rewards/margins": 0.23708537220954895, |
|
"eval_rewards/rejected": -0.3117523789405823, |
|
"eval_runtime": 91.3269, |
|
"eval_samples_per_second": 20.049, |
|
"eval_steps_per_second": 0.318, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.25142857142857145, |
|
"grad_norm": 3.177801599242846, |
|
"learning_rate": 4.660050057270191e-07, |
|
"logits/chosen": -2.190119504928589, |
|
"logits/rejected": -2.104891061782837, |
|
"logps/chosen": -322.0883483886719, |
|
"logps/rejected": -316.6219482421875, |
|
"loss": 0.0407, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.05784715339541435, |
|
"rewards/margins": 0.12927614152431488, |
|
"rewards/rejected": -0.18712328374385834, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.2742857142857143, |
|
"grad_norm": 3.162850908555599, |
|
"learning_rate": 4.5526448859687144e-07, |
|
"logits/chosen": -2.1028637886047363, |
|
"logits/rejected": -1.9295556545257568, |
|
"logps/chosen": -316.31634521484375, |
|
"logps/rejected": -253.72195434570312, |
|
"loss": 0.0376, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.05411834642291069, |
|
"rewards/margins": 0.16595926880836487, |
|
"rewards/rejected": -0.22007760405540466, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.29714285714285715, |
|
"grad_norm": 3.4487456432507195, |
|
"learning_rate": 4.432129880904388e-07, |
|
"logits/chosen": -2.1982297897338867, |
|
"logits/rejected": -2.1048200130462646, |
|
"logps/chosen": -314.8988037109375, |
|
"logps/rejected": -276.97674560546875, |
|
"loss": 0.0373, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -0.09742867946624756, |
|
"rewards/margins": 0.16763603687286377, |
|
"rewards/rejected": -0.26506468653678894, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 3.9041569099317925, |
|
"learning_rate": 4.299274747394055e-07, |
|
"logits/chosen": -2.3467020988464355, |
|
"logits/rejected": -2.3200185298919678, |
|
"logps/chosen": -315.0827941894531, |
|
"logps/rejected": -290.0022277832031, |
|
"loss": 0.0375, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 0.0036486200988292694, |
|
"rewards/margins": 0.18455657362937927, |
|
"rewards/rejected": -0.1809079647064209, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.34285714285714286, |
|
"grad_norm": 3.1721506985515817, |
|
"learning_rate": 4.1549280046953653e-07, |
|
"logits/chosen": -2.452073097229004, |
|
"logits/rejected": -2.371002674102783, |
|
"logps/chosen": -296.94329833984375, |
|
"logps/rejected": -307.01837158203125, |
|
"loss": 0.0367, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.09957350790500641, |
|
"rewards/margins": 0.2076205015182495, |
|
"rewards/rejected": -0.3071940243244171, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.34285714285714286, |
|
"eval_logits/chosen": -2.4217660427093506, |
|
"eval_logits/rejected": -2.3057165145874023, |
|
"eval_logps/chosen": -294.94464111328125, |
|
"eval_logps/rejected": -263.3892822265625, |
|
"eval_loss": 0.037056975066661835, |
|
"eval_rewards/accuracies": 0.767241358757019, |
|
"eval_rewards/chosen": -0.19340714812278748, |
|
"eval_rewards/margins": 0.24972420930862427, |
|
"eval_rewards/rejected": -0.44313138723373413, |
|
"eval_runtime": 91.4385, |
|
"eval_samples_per_second": 20.024, |
|
"eval_steps_per_second": 0.317, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3657142857142857, |
|
"grad_norm": 3.2106332008237772, |
|
"learning_rate": 4.000011566683401e-07, |
|
"logits/chosen": -2.493586540222168, |
|
"logits/rejected": -2.4255142211914062, |
|
"logps/chosen": -316.421630859375, |
|
"logps/rejected": -294.204345703125, |
|
"loss": 0.0372, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.17081338167190552, |
|
"rewards/margins": 0.21480269730091095, |
|
"rewards/rejected": -0.38561612367630005, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.38857142857142857, |
|
"grad_norm": 3.305163626494215, |
|
"learning_rate": 3.8355148537705047e-07, |
|
"logits/chosen": -2.473874568939209, |
|
"logits/rejected": -2.4223687648773193, |
|
"logps/chosen": -309.713623046875, |
|
"logps/rejected": -291.6881103515625, |
|
"loss": 0.0373, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.056086935102939606, |
|
"rewards/margins": 0.2034837305545807, |
|
"rewards/rejected": -0.2595706582069397, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.4114285714285714, |
|
"grad_norm": 3.211306956770945, |
|
"learning_rate": 3.662488473675315e-07, |
|
"logits/chosen": -2.486910104751587, |
|
"logits/rejected": -2.3967292308807373, |
|
"logps/chosen": -334.092041015625, |
|
"logps/rejected": -292.355712890625, |
|
"loss": 0.0373, |
|
"rewards/accuracies": 0.793749988079071, |
|
"rewards/chosen": -0.03912293165922165, |
|
"rewards/margins": 0.2849285304546356, |
|
"rewards/rejected": -0.3240514397621155, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.4342857142857143, |
|
"grad_norm": 3.4199998150652564, |
|
"learning_rate": 3.48203751140067e-07, |
|
"logits/chosen": -2.507291078567505, |
|
"logits/rejected": -2.4220941066741943, |
|
"logps/chosen": -285.0407409667969, |
|
"logps/rejected": -261.77392578125, |
|
"loss": 0.0365, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.08783163130283356, |
|
"rewards/margins": 0.1849258691072464, |
|
"rewards/rejected": -0.27275753021240234, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.45714285714285713, |
|
"grad_norm": 2.966792213983748, |
|
"learning_rate": 3.2953144712759537e-07, |
|
"logits/chosen": -2.508540630340576, |
|
"logits/rejected": -2.376711130142212, |
|
"logps/chosen": -271.0993347167969, |
|
"logps/rejected": -254.6019744873047, |
|
"loss": 0.0375, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.02756611444056034, |
|
"rewards/margins": 0.2296477109193802, |
|
"rewards/rejected": -0.257213830947876, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.45714285714285713, |
|
"eval_logits/chosen": -2.290728807449341, |
|
"eval_logits/rejected": -2.1435999870300293, |
|
"eval_logps/chosen": -281.01300048828125, |
|
"eval_logps/rejected": -252.2786407470703, |
|
"eval_loss": 0.03527820482850075, |
|
"eval_rewards/accuracies": 0.767241358757019, |
|
"eval_rewards/chosen": -0.054090581834316254, |
|
"eval_rewards/margins": 0.2779344916343689, |
|
"eval_rewards/rejected": -0.33202511072158813, |
|
"eval_runtime": 91.8763, |
|
"eval_samples_per_second": 19.929, |
|
"eval_steps_per_second": 0.316, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 3.030911456207702, |
|
"learning_rate": 3.103511916141658e-07, |
|
"logits/chosen": -2.2013955116271973, |
|
"logits/rejected": -2.0953996181488037, |
|
"logps/chosen": -276.37432861328125, |
|
"logps/rejected": -280.0777893066406, |
|
"loss": 0.0362, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.12920817732810974, |
|
"rewards/margins": 0.2187701165676117, |
|
"rewards/rejected": -0.3479783535003662, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.5028571428571429, |
|
"grad_norm": 3.521691949347102, |
|
"learning_rate": 2.9078548506882117e-07, |
|
"logits/chosen": -2.1590688228607178, |
|
"logits/rejected": -2.0675222873687744, |
|
"logps/chosen": -301.6694030761719, |
|
"logps/rejected": -281.8822326660156, |
|
"loss": 0.0387, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.16603410243988037, |
|
"rewards/margins": 0.1909538209438324, |
|
"rewards/rejected": -0.35698795318603516, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.5257142857142857, |
|
"grad_norm": 3.271109178578225, |
|
"learning_rate": 2.709592897595191e-07, |
|
"logits/chosen": -2.195312023162842, |
|
"logits/rejected": -2.095759391784668, |
|
"logps/chosen": -293.96038818359375, |
|
"logps/rejected": -271.3498229980469, |
|
"loss": 0.0366, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.1290125697851181, |
|
"rewards/margins": 0.19161902368068695, |
|
"rewards/rejected": -0.32063156366348267, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.5485714285714286, |
|
"grad_norm": 3.788405528579881, |
|
"learning_rate": 2.509992316440332e-07, |
|
"logits/chosen": -2.3450160026550293, |
|
"logits/rejected": -2.233682870864868, |
|
"logps/chosen": -311.3532409667969, |
|
"logps/rejected": -312.9554138183594, |
|
"loss": 0.0359, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.12643404304981232, |
|
"rewards/margins": 0.2889019250869751, |
|
"rewards/rejected": -0.4153359532356262, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 2.975794895949038, |
|
"learning_rate": 2.3103279163519918e-07, |
|
"logits/chosen": -2.361924648284912, |
|
"logits/rejected": -2.305227041244507, |
|
"logps/chosen": -292.22430419921875, |
|
"logps/rejected": -304.44610595703125, |
|
"loss": 0.0371, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.11598072946071625, |
|
"rewards/margins": 0.22647562623023987, |
|
"rewards/rejected": -0.3424563705921173, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"eval_logits/chosen": -2.378539800643921, |
|
"eval_logits/rejected": -2.2614917755126953, |
|
"eval_logps/chosen": -283.72186279296875, |
|
"eval_logps/rejected": -254.03253173828125, |
|
"eval_loss": 0.0344076044857502, |
|
"eval_rewards/accuracies": 0.7629310488700867, |
|
"eval_rewards/chosen": -0.08117903023958206, |
|
"eval_rewards/margins": 0.2683848738670349, |
|
"eval_rewards/rejected": -0.3495638966560364, |
|
"eval_runtime": 90.3763, |
|
"eval_samples_per_second": 20.26, |
|
"eval_steps_per_second": 0.321, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5942857142857143, |
|
"grad_norm": 3.4496793515012127, |
|
"learning_rate": 2.1118749140573358e-07, |
|
"logits/chosen": -2.378929615020752, |
|
"logits/rejected": -2.367784023284912, |
|
"logps/chosen": -292.9862365722656, |
|
"logps/rejected": -303.8517150878906, |
|
"loss": 0.0378, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.1319950819015503, |
|
"rewards/margins": 0.17309217154979706, |
|
"rewards/rejected": -0.30508726835250854, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.6171428571428571, |
|
"grad_norm": 3.309695226000979, |
|
"learning_rate": 1.9159007893272703e-07, |
|
"logits/chosen": -2.4140243530273438, |
|
"logits/rejected": -2.2664575576782227, |
|
"logps/chosen": -284.2572326660156, |
|
"logps/rejected": -267.2565612792969, |
|
"loss": 0.0344, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.11731357872486115, |
|
"rewards/margins": 0.2501775622367859, |
|
"rewards/rejected": -0.3674911856651306, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 3.6035910497695895, |
|
"learning_rate": 1.7236571898357766e-07, |
|
"logits/chosen": -2.3951687812805176, |
|
"logits/rejected": -2.2973971366882324, |
|
"logps/chosen": -286.1737365722656, |
|
"logps/rejected": -301.73150634765625, |
|
"loss": 0.0359, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.13135617971420288, |
|
"rewards/margins": 0.25489118695259094, |
|
"rewards/rejected": -0.38624733686447144, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.6628571428571428, |
|
"grad_norm": 4.1650025414210665, |
|
"learning_rate": 1.5363719371356882e-07, |
|
"logits/chosen": -2.3873696327209473, |
|
"logits/rejected": -2.2862212657928467, |
|
"logps/chosen": -316.511962890625, |
|
"logps/rejected": -304.41741943359375, |
|
"loss": 0.0349, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.15606102347373962, |
|
"rewards/margins": 0.2326282560825348, |
|
"rewards/rejected": -0.3886892795562744, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.6857142857142857, |
|
"grad_norm": 2.987221364071687, |
|
"learning_rate": 1.3552411848071565e-07, |
|
"logits/chosen": -2.387683391571045, |
|
"logits/rejected": -2.2113590240478516, |
|
"logps/chosen": -317.22491455078125, |
|
"logps/rejected": -293.2046813964844, |
|
"loss": 0.0345, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.1531573385000229, |
|
"rewards/margins": 0.25246819853782654, |
|
"rewards/rejected": -0.40562552213668823, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6857142857142857, |
|
"eval_logits/chosen": -2.3475093841552734, |
|
"eval_logits/rejected": -2.2130320072174072, |
|
"eval_logps/chosen": -282.4234313964844, |
|
"eval_logps/rejected": -254.02647399902344, |
|
"eval_loss": 0.03413059934973717, |
|
"eval_rewards/accuracies": 0.7456896305084229, |
|
"eval_rewards/chosen": -0.06819500029087067, |
|
"eval_rewards/margins": 0.28130805492401123, |
|
"eval_rewards/rejected": -0.3495030105113983, |
|
"eval_runtime": 90.5949, |
|
"eval_samples_per_second": 20.211, |
|
"eval_steps_per_second": 0.32, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.7085714285714285, |
|
"grad_norm": 2.83762479667315, |
|
"learning_rate": 1.1814217788631473e-07, |
|
"logits/chosen": -2.3416621685028076, |
|
"logits/rejected": -2.2815122604370117, |
|
"logps/chosen": -267.0755310058594, |
|
"logps/rejected": -267.9870300292969, |
|
"loss": 0.0367, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.10771362483501434, |
|
"rewards/margins": 0.20020349323749542, |
|
"rewards/rejected": -0.30791711807250977, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.7314285714285714, |
|
"grad_norm": 3.3425045367761492, |
|
"learning_rate": 1.0160238692045331e-07, |
|
"logits/chosen": -2.3024659156799316, |
|
"logits/rejected": -2.2111096382141113, |
|
"logps/chosen": -257.63531494140625, |
|
"logps/rejected": -272.8826904296875, |
|
"loss": 0.0364, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.13352924585342407, |
|
"rewards/margins": 0.18541577458381653, |
|
"rewards/rejected": -0.3189450204372406, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.7542857142857143, |
|
"grad_norm": 2.769882287509293, |
|
"learning_rate": 8.601038193139438e-08, |
|
"logits/chosen": -2.4251229763031006, |
|
"logits/rejected": -2.29219388961792, |
|
"logps/chosen": -307.23333740234375, |
|
"logps/rejected": -280.0276794433594, |
|
"loss": 0.0348, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": -0.08015141636133194, |
|
"rewards/margins": 0.24224629998207092, |
|
"rewards/rejected": -0.3223976790904999, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.7771428571428571, |
|
"grad_norm": 3.0959592695193763, |
|
"learning_rate": 7.146574594727572e-08, |
|
"logits/chosen": -2.3518755435943604, |
|
"logits/rejected": -2.3008193969726562, |
|
"logps/chosen": -277.1438293457031, |
|
"logps/rejected": -279.57427978515625, |
|
"loss": 0.0353, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -0.11192858219146729, |
|
"rewards/margins": 0.2635765075683594, |
|
"rewards/rejected": -0.3755050301551819, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 3.411772564497867, |
|
"learning_rate": 5.8061372659157306e-08, |
|
"logits/chosen": -2.4039082527160645, |
|
"logits/rejected": -2.2675061225891113, |
|
"logps/chosen": -300.5389099121094, |
|
"logps/rejected": -285.20794677734375, |
|
"loss": 0.0373, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.10831709951162338, |
|
"rewards/margins": 0.20078572630882263, |
|
"rewards/rejected": -0.3091028332710266, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_logits/chosen": -2.3320775032043457, |
|
"eval_logits/rejected": -2.178847312927246, |
|
"eval_logps/chosen": -284.68194580078125, |
|
"eval_logps/rejected": -257.5619201660156, |
|
"eval_loss": 0.03409451246261597, |
|
"eval_rewards/accuracies": 0.7413793206214905, |
|
"eval_rewards/chosen": -0.09078007191419601, |
|
"eval_rewards/margins": 0.2940778136253357, |
|
"eval_rewards/rejected": -0.38485783338546753, |
|
"eval_runtime": 90.2939, |
|
"eval_samples_per_second": 20.278, |
|
"eval_steps_per_second": 0.321, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.8228571428571428, |
|
"grad_norm": 2.905033636374422, |
|
"learning_rate": 4.5882873127531614e-08, |
|
"logits/chosen": -2.4123928546905518, |
|
"logits/rejected": -2.2830798625946045, |
|
"logps/chosen": -295.31842041015625, |
|
"logps/rejected": -286.4782409667969, |
|
"loss": 0.0342, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -0.09913380444049835, |
|
"rewards/margins": 0.25478315353393555, |
|
"rewards/rejected": -0.3539169430732727, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.8457142857142858, |
|
"grad_norm": 3.0252642177231617, |
|
"learning_rate": 3.500802900154412e-08, |
|
"logits/chosen": -2.3916738033294678, |
|
"logits/rejected": -2.244600296020508, |
|
"logps/chosen": -278.41217041015625, |
|
"logps/rejected": -273.110595703125, |
|
"loss": 0.034, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": -0.07350718975067139, |
|
"rewards/margins": 0.273443341255188, |
|
"rewards/rejected": -0.346950501203537, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.8685714285714285, |
|
"grad_norm": 3.1477774029837864, |
|
"learning_rate": 2.550629574310309e-08, |
|
"logits/chosen": -2.4462573528289795, |
|
"logits/rejected": -2.309199094772339, |
|
"logps/chosen": -329.63348388671875, |
|
"logps/rejected": -289.4853515625, |
|
"loss": 0.0352, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.11577162891626358, |
|
"rewards/margins": 0.20508745312690735, |
|
"rewards/rejected": -0.32085904479026794, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.8914285714285715, |
|
"grad_norm": 3.5873354511870312, |
|
"learning_rate": 1.7438359028687983e-08, |
|
"logits/chosen": -2.403637170791626, |
|
"logits/rejected": -2.321964740753174, |
|
"logps/chosen": -318.7025146484375, |
|
"logps/rejected": -324.0418395996094, |
|
"loss": 0.0358, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.07329531759023666, |
|
"rewards/margins": 0.21939659118652344, |
|
"rewards/rejected": -0.2926918864250183, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.9142857142857143, |
|
"grad_norm": 5.030788992854401, |
|
"learning_rate": 1.0855747162029361e-08, |
|
"logits/chosen": -2.3374335765838623, |
|
"logits/rejected": -2.3087658882141113, |
|
"logps/chosen": -292.0116271972656, |
|
"logps/rejected": -298.6130065917969, |
|
"loss": 0.0367, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.13278374075889587, |
|
"rewards/margins": 0.18760989606380463, |
|
"rewards/rejected": -0.3203936815261841, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.9142857142857143, |
|
"eval_logits/chosen": -2.3139522075653076, |
|
"eval_logits/rejected": -2.1562979221343994, |
|
"eval_logps/chosen": -285.25201416015625, |
|
"eval_logps/rejected": -258.45458984375, |
|
"eval_loss": 0.034024760127067566, |
|
"eval_rewards/accuracies": 0.7413793206214905, |
|
"eval_rewards/chosen": -0.09648066759109497, |
|
"eval_rewards/margins": 0.2973036468029022, |
|
"eval_rewards/rejected": -0.3937843143939972, |
|
"eval_runtime": 90.5725, |
|
"eval_samples_per_second": 20.216, |
|
"eval_steps_per_second": 0.32, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.9371428571428572, |
|
"grad_norm": 3.2590392659418335, |
|
"learning_rate": 5.8005019731033615e-09, |
|
"logits/chosen": -2.3900809288024902, |
|
"logits/rejected": -2.28753924369812, |
|
"logps/chosen": -301.38275146484375, |
|
"logps/rejected": -294.66497802734375, |
|
"loss": 0.0352, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.13202829658985138, |
|
"rewards/margins": 0.21732142567634583, |
|
"rewards/rejected": -0.349349707365036, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 3.0742600085581127, |
|
"learning_rate": 2.3049103053431886e-09, |
|
"logits/chosen": -2.326852798461914, |
|
"logits/rejected": -2.221895694732666, |
|
"logps/chosen": -288.41644287109375, |
|
"logps/rejected": -271.6980895996094, |
|
"loss": 0.0345, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.056683529168367386, |
|
"rewards/margins": 0.32042860984802246, |
|
"rewards/rejected": -0.37711212038993835, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.9828571428571429, |
|
"grad_norm": 3.069144533981935, |
|
"learning_rate": 3.9129780600541397e-10, |
|
"logits/chosen": -2.312039613723755, |
|
"logits/rejected": -2.213510751724243, |
|
"logps/chosen": -293.47271728515625, |
|
"logps/rejected": -299.9048156738281, |
|
"loss": 0.0344, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.11240877956151962, |
|
"rewards/margins": 0.24994345009326935, |
|
"rewards/rejected": -0.3623522222042084, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.9988571428571429, |
|
"step": 437, |
|
"total_flos": 0.0, |
|
"train_loss": 0.038760803656938146, |
|
"train_runtime": 11420.4923, |
|
"train_samples_per_second": 4.903, |
|
"train_steps_per_second": 0.038 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 437, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|