|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9988571428571429, |
|
"eval_steps": 50, |
|
"global_step": 437, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.022857142857142857, |
|
"grad_norm": 3.2784174727745485, |
|
"learning_rate": 1.1363636363636363e-07, |
|
"logits/chosen": -2.700894832611084, |
|
"logits/rejected": -2.625084400177002, |
|
"logps/chosen": -301.3052062988281, |
|
"logps/rejected": -281.75274658203125, |
|
"loss": 0.0557, |
|
"rewards/accuracies": 0.4312500059604645, |
|
"rewards/chosen": -0.00019026221707463264, |
|
"rewards/margins": -0.00015981406613718718, |
|
"rewards/rejected": -3.0448136385530233e-05, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.045714285714285714, |
|
"grad_norm": 2.5255601771436815, |
|
"learning_rate": 2.2727272727272726e-07, |
|
"logits/chosen": -2.641096830368042, |
|
"logits/rejected": -2.6057686805725098, |
|
"logps/chosen": -278.879638671875, |
|
"logps/rejected": -254.6970672607422, |
|
"loss": 0.0553, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": 0.002915444318205118, |
|
"rewards/margins": 0.002195452805608511, |
|
"rewards/rejected": 0.0007199913961812854, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06857142857142857, |
|
"grad_norm": 2.762527455327235, |
|
"learning_rate": 3.4090909090909085e-07, |
|
"logits/chosen": -2.6387953758239746, |
|
"logits/rejected": -2.6177260875701904, |
|
"logps/chosen": -263.30303955078125, |
|
"logps/rejected": -263.4232177734375, |
|
"loss": 0.0535, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": 0.013800328597426414, |
|
"rewards/margins": 0.008101832121610641, |
|
"rewards/rejected": 0.005698497407138348, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09142857142857143, |
|
"grad_norm": 2.8469620893370813, |
|
"learning_rate": 4.545454545454545e-07, |
|
"logits/chosen": -2.6508777141571045, |
|
"logits/rejected": -2.5878684520721436, |
|
"logps/chosen": -290.2543640136719, |
|
"logps/rejected": -268.0738830566406, |
|
"loss": 0.0502, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": 0.03742903843522072, |
|
"rewards/margins": 0.04185490682721138, |
|
"rewards/rejected": -0.004425862338393927, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.11428571428571428, |
|
"grad_norm": 7.044347610102239, |
|
"learning_rate": 4.997124959943201e-07, |
|
"logits/chosen": -2.68748140335083, |
|
"logits/rejected": -2.6086513996124268, |
|
"logps/chosen": -290.8190612792969, |
|
"logps/rejected": -249.57925415039062, |
|
"loss": 0.048, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 0.05491219088435173, |
|
"rewards/margins": 0.08206149935722351, |
|
"rewards/rejected": -0.02714930847287178, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.11428571428571428, |
|
"eval_logits/chosen": -2.55232834815979, |
|
"eval_logits/rejected": -2.4544785022735596, |
|
"eval_logps/chosen": -269.3974304199219, |
|
"eval_logps/rejected": -224.57150268554688, |
|
"eval_loss": 0.04283786565065384, |
|
"eval_rewards/accuracies": 0.7025862336158752, |
|
"eval_rewards/chosen": 0.06206508353352547, |
|
"eval_rewards/margins": 0.11701909452676773, |
|
"eval_rewards/rejected": -0.054954010993242264, |
|
"eval_runtime": 91.1445, |
|
"eval_samples_per_second": 20.089, |
|
"eval_steps_per_second": 0.318, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13714285714285715, |
|
"grad_norm": 3.0003693671132106, |
|
"learning_rate": 4.979579212164186e-07, |
|
"logits/chosen": -2.5842273235321045, |
|
"logits/rejected": -2.476365804672241, |
|
"logps/chosen": -280.49664306640625, |
|
"logps/rejected": -259.8401794433594, |
|
"loss": 0.0438, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 9.337197843706235e-05, |
|
"rewards/margins": 0.11297394335269928, |
|
"rewards/rejected": -0.11288057267665863, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 3.2128849852724057, |
|
"learning_rate": 4.946196886175515e-07, |
|
"logits/chosen": -2.5273566246032715, |
|
"logits/rejected": -2.468844175338745, |
|
"logps/chosen": -273.30010986328125, |
|
"logps/rejected": -270.6748962402344, |
|
"loss": 0.0415, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.024781424552202225, |
|
"rewards/margins": 0.13526295125484467, |
|
"rewards/rejected": -0.11048151552677155, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.18285714285714286, |
|
"grad_norm": 2.993790876710757, |
|
"learning_rate": 4.897191188239667e-07, |
|
"logits/chosen": -2.4337241649627686, |
|
"logits/rejected": -2.369915008544922, |
|
"logps/chosen": -269.41571044921875, |
|
"logps/rejected": -276.94500732421875, |
|
"loss": 0.0424, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.023866862058639526, |
|
"rewards/margins": 0.1924942582845688, |
|
"rewards/rejected": -0.2163611203432083, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.2057142857142857, |
|
"grad_norm": 7.3103992020217285, |
|
"learning_rate": 4.832875107981763e-07, |
|
"logits/chosen": -2.3889706134796143, |
|
"logits/rejected": -2.3026340007781982, |
|
"logps/chosen": -275.86724853515625, |
|
"logps/rejected": -275.403076171875, |
|
"loss": 0.0422, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.004180185496807098, |
|
"rewards/margins": 0.20273157954216003, |
|
"rewards/rejected": -0.19855138659477234, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.22857142857142856, |
|
"grad_norm": 3.5295110150949056, |
|
"learning_rate": 4.753659419387223e-07, |
|
"logits/chosen": -2.3000099658966064, |
|
"logits/rejected": -2.165864944458008, |
|
"logps/chosen": -289.35467529296875, |
|
"logps/rejected": -259.2148742675781, |
|
"loss": 0.04, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.029682844877243042, |
|
"rewards/margins": 0.22795169055461884, |
|
"rewards/rejected": -0.25763455033302307, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.22857142857142856, |
|
"eval_logits/chosen": -2.1305601596832275, |
|
"eval_logits/rejected": -1.9564552307128906, |
|
"eval_logps/chosen": -283.3714904785156, |
|
"eval_logps/rejected": -250.93673706054688, |
|
"eval_loss": 0.03854820877313614, |
|
"eval_rewards/accuracies": 0.75, |
|
"eval_rewards/chosen": -0.07767525315284729, |
|
"eval_rewards/margins": 0.2409307211637497, |
|
"eval_rewards/rejected": -0.31860601902008057, |
|
"eval_runtime": 91.3911, |
|
"eval_samples_per_second": 20.035, |
|
"eval_steps_per_second": 0.317, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.25142857142857145, |
|
"grad_norm": 3.082269192831021, |
|
"learning_rate": 4.660050057270191e-07, |
|
"logits/chosen": -2.183840036392212, |
|
"logits/rejected": -2.098686456680298, |
|
"logps/chosen": -322.04498291015625, |
|
"logps/rejected": -316.5348205566406, |
|
"loss": 0.0406, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -0.05741389840841293, |
|
"rewards/margins": 0.1288377195596695, |
|
"rewards/rejected": -0.18625161051750183, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.2742857142857143, |
|
"grad_norm": 3.1619647572029415, |
|
"learning_rate": 4.5526448859687144e-07, |
|
"logits/chosen": -2.122063159942627, |
|
"logits/rejected": -1.943372130393982, |
|
"logps/chosen": -315.7963562011719, |
|
"logps/rejected": -253.3599395751953, |
|
"loss": 0.0377, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.048919014632701874, |
|
"rewards/margins": 0.1675383597612381, |
|
"rewards/rejected": -0.21645736694335938, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.29714285714285715, |
|
"grad_norm": 3.421686175984915, |
|
"learning_rate": 4.432129880904388e-07, |
|
"logits/chosen": -2.2705729007720947, |
|
"logits/rejected": -2.1812658309936523, |
|
"logps/chosen": -313.17596435546875, |
|
"logps/rejected": -274.95806884765625, |
|
"loss": 0.0371, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.08020049333572388, |
|
"rewards/margins": 0.16467759013175964, |
|
"rewards/rejected": -0.24487809836864471, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 3.9283597913931922, |
|
"learning_rate": 4.299274747394055e-07, |
|
"logits/chosen": -2.398083448410034, |
|
"logits/rejected": -2.3765132427215576, |
|
"logps/chosen": -314.16827392578125, |
|
"logps/rejected": -288.8768310546875, |
|
"loss": 0.0375, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.012793675065040588, |
|
"rewards/margins": 0.18244774639606476, |
|
"rewards/rejected": -0.16965410113334656, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.34285714285714286, |
|
"grad_norm": 3.018006960325513, |
|
"learning_rate": 4.1549280046953653e-07, |
|
"logits/chosen": -2.4656407833099365, |
|
"logits/rejected": -2.367140531539917, |
|
"logps/chosen": -295.70556640625, |
|
"logps/rejected": -305.4766845703125, |
|
"loss": 0.0363, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.08719640970230103, |
|
"rewards/margins": 0.204580619931221, |
|
"rewards/rejected": -0.2917770445346832, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.34285714285714286, |
|
"eval_logits/chosen": -2.3440675735473633, |
|
"eval_logits/rejected": -2.195537805557251, |
|
"eval_logps/chosen": -296.12109375, |
|
"eval_logps/rejected": -265.0228271484375, |
|
"eval_loss": 0.03708173334598541, |
|
"eval_rewards/accuracies": 0.7543103694915771, |
|
"eval_rewards/chosen": -0.20517148077487946, |
|
"eval_rewards/margins": 0.25429537892341614, |
|
"eval_rewards/rejected": -0.4594668447971344, |
|
"eval_runtime": 91.4169, |
|
"eval_samples_per_second": 20.029, |
|
"eval_steps_per_second": 0.317, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3657142857142857, |
|
"grad_norm": 3.319581864694512, |
|
"learning_rate": 4.000011566683401e-07, |
|
"logits/chosen": -2.376082181930542, |
|
"logits/rejected": -2.2733752727508545, |
|
"logps/chosen": -320.1603088378906, |
|
"logps/rejected": -300.1729431152344, |
|
"loss": 0.0377, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.20820029079914093, |
|
"rewards/margins": 0.23710182309150696, |
|
"rewards/rejected": -0.4453020989894867, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.38857142857142857, |
|
"grad_norm": 3.33265699836665, |
|
"learning_rate": 3.8355148537705047e-07, |
|
"logits/chosen": -2.5347208976745605, |
|
"logits/rejected": -2.48330020904541, |
|
"logps/chosen": -312.109619140625, |
|
"logps/rejected": -294.4687805175781, |
|
"loss": 0.0377, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.08004748821258545, |
|
"rewards/margins": 0.2073298990726471, |
|
"rewards/rejected": -0.28737738728523254, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.4114285714285714, |
|
"grad_norm": 2.9907171954669534, |
|
"learning_rate": 3.662488473675315e-07, |
|
"logits/chosen": -2.622641086578369, |
|
"logits/rejected": -2.556631565093994, |
|
"logps/chosen": -332.7588806152344, |
|
"logps/rejected": -290.630859375, |
|
"loss": 0.0373, |
|
"rewards/accuracies": 0.8062499761581421, |
|
"rewards/chosen": -0.02579095959663391, |
|
"rewards/margins": 0.2810118794441223, |
|
"rewards/rejected": -0.3068028390407562, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.4342857142857143, |
|
"grad_norm": 3.4010102454112205, |
|
"learning_rate": 3.48203751140067e-07, |
|
"logits/chosen": -2.642368793487549, |
|
"logits/rejected": -2.5668444633483887, |
|
"logps/chosen": -286.96087646484375, |
|
"logps/rejected": -263.45306396484375, |
|
"loss": 0.0366, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.10703293979167938, |
|
"rewards/margins": 0.1825161725282669, |
|
"rewards/rejected": -0.2895490825176239, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.45714285714285713, |
|
"grad_norm": 2.9611644003699062, |
|
"learning_rate": 3.2953144712759537e-07, |
|
"logits/chosen": -2.651844024658203, |
|
"logits/rejected": -2.5362985134124756, |
|
"logps/chosen": -272.91973876953125, |
|
"logps/rejected": -257.00128173828125, |
|
"loss": 0.0373, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.04576968401670456, |
|
"rewards/margins": 0.23543760180473328, |
|
"rewards/rejected": -0.28120729327201843, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.45714285714285713, |
|
"eval_logits/chosen": -2.4903368949890137, |
|
"eval_logits/rejected": -2.384784460067749, |
|
"eval_logps/chosen": -280.1239318847656, |
|
"eval_logps/rejected": -251.76303100585938, |
|
"eval_loss": 0.035302262753248215, |
|
"eval_rewards/accuracies": 0.7715517282485962, |
|
"eval_rewards/chosen": -0.04520006850361824, |
|
"eval_rewards/margins": 0.28166908025741577, |
|
"eval_rewards/rejected": -0.3268691301345825, |
|
"eval_runtime": 91.7563, |
|
"eval_samples_per_second": 19.955, |
|
"eval_steps_per_second": 0.316, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 2.9976100938955375, |
|
"learning_rate": 3.103511916141658e-07, |
|
"logits/chosen": -2.4521584510803223, |
|
"logits/rejected": -2.38016939163208, |
|
"logps/chosen": -272.8417053222656, |
|
"logps/rejected": -276.30755615234375, |
|
"loss": 0.0362, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.09388158470392227, |
|
"rewards/margins": 0.21639461815357208, |
|
"rewards/rejected": -0.31027621030807495, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.5028571428571429, |
|
"grad_norm": 3.3697037289842515, |
|
"learning_rate": 2.9078548506882117e-07, |
|
"logits/chosen": -2.486403226852417, |
|
"logits/rejected": -2.43949556350708, |
|
"logps/chosen": -299.24713134765625, |
|
"logps/rejected": -279.8057556152344, |
|
"loss": 0.0389, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.14181171357631683, |
|
"rewards/margins": 0.19441165030002594, |
|
"rewards/rejected": -0.3362233638763428, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.5257142857142857, |
|
"grad_norm": 3.1432532704884797, |
|
"learning_rate": 2.709592897595191e-07, |
|
"logits/chosen": -2.507004499435425, |
|
"logits/rejected": -2.437407970428467, |
|
"logps/chosen": -295.362060546875, |
|
"logps/rejected": -273.03448486328125, |
|
"loss": 0.0364, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.1430293470621109, |
|
"rewards/margins": 0.19444888830184937, |
|
"rewards/rejected": -0.3374782204627991, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.5485714285714286, |
|
"grad_norm": 3.601368929942566, |
|
"learning_rate": 2.509992316440332e-07, |
|
"logits/chosen": -2.5563902854919434, |
|
"logits/rejected": -2.469142436981201, |
|
"logps/chosen": -310.55108642578125, |
|
"logps/rejected": -312.4164733886719, |
|
"loss": 0.0359, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.1184125691652298, |
|
"rewards/margins": 0.2915334701538086, |
|
"rewards/rejected": -0.4099460542201996, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 2.959775188259923, |
|
"learning_rate": 2.3103279163519918e-07, |
|
"logits/chosen": -2.5154330730438232, |
|
"logits/rejected": -2.477905750274658, |
|
"logps/chosen": -291.4244689941406, |
|
"logps/rejected": -302.8068542480469, |
|
"loss": 0.0374, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.1079820767045021, |
|
"rewards/margins": 0.21808166801929474, |
|
"rewards/rejected": -0.32606372237205505, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"eval_logits/chosen": -2.5244851112365723, |
|
"eval_logits/rejected": -2.4306986331939697, |
|
"eval_logps/chosen": -283.6198425292969, |
|
"eval_logps/rejected": -253.70823669433594, |
|
"eval_loss": 0.034400373697280884, |
|
"eval_rewards/accuracies": 0.75, |
|
"eval_rewards/chosen": -0.08015903830528259, |
|
"eval_rewards/margins": 0.26616212725639343, |
|
"eval_rewards/rejected": -0.346321165561676, |
|
"eval_runtime": 91.0071, |
|
"eval_samples_per_second": 20.119, |
|
"eval_steps_per_second": 0.319, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5942857142857143, |
|
"grad_norm": 3.439430942929606, |
|
"learning_rate": 2.1118749140573358e-07, |
|
"logits/chosen": -2.5266551971435547, |
|
"logits/rejected": -2.515558958053589, |
|
"logps/chosen": -293.00262451171875, |
|
"logps/rejected": -303.7609558105469, |
|
"loss": 0.038, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.13215936720371246, |
|
"rewards/margins": 0.17202024161815643, |
|
"rewards/rejected": -0.3041796088218689, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.6171428571428571, |
|
"grad_norm": 3.3473957187400707, |
|
"learning_rate": 1.9159007893272703e-07, |
|
"logits/chosen": -2.563744068145752, |
|
"logits/rejected": -2.4393467903137207, |
|
"logps/chosen": -282.9759216308594, |
|
"logps/rejected": -266.39312744140625, |
|
"loss": 0.0343, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.10450073331594467, |
|
"rewards/margins": 0.2543557584285736, |
|
"rewards/rejected": -0.3588564991950989, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 3.487492966730279, |
|
"learning_rate": 1.7236571898357766e-07, |
|
"logits/chosen": -2.576411724090576, |
|
"logits/rejected": -2.5091185569763184, |
|
"logps/chosen": -281.6929931640625, |
|
"logps/rejected": -297.8419189453125, |
|
"loss": 0.036, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.08654884248971939, |
|
"rewards/margins": 0.2608022391796112, |
|
"rewards/rejected": -0.34735107421875, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.6628571428571428, |
|
"grad_norm": 3.951421197104747, |
|
"learning_rate": 1.5363719371356882e-07, |
|
"logits/chosen": -2.622135877609253, |
|
"logits/rejected": -2.544238328933716, |
|
"logps/chosen": -311.6895751953125, |
|
"logps/rejected": -299.3029479980469, |
|
"loss": 0.0349, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.10783736407756805, |
|
"rewards/margins": 0.2297072857618332, |
|
"rewards/rejected": -0.33754467964172363, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.6857142857142857, |
|
"grad_norm": 3.035652974254887, |
|
"learning_rate": 1.3552411848071565e-07, |
|
"logits/chosen": -2.646151304244995, |
|
"logits/rejected": -2.5031981468200684, |
|
"logps/chosen": -313.28375244140625, |
|
"logps/rejected": -289.15740966796875, |
|
"loss": 0.0346, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -0.11374591290950775, |
|
"rewards/margins": 0.25140708684921265, |
|
"rewards/rejected": -0.3651530146598816, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6857142857142857, |
|
"eval_logits/chosen": -2.5812017917633057, |
|
"eval_logits/rejected": -2.479748487472534, |
|
"eval_logps/chosen": -279.3269958496094, |
|
"eval_logps/rejected": -251.0284881591797, |
|
"eval_loss": 0.03423487767577171, |
|
"eval_rewards/accuracies": 0.7456896305084229, |
|
"eval_rewards/chosen": -0.03723056614398956, |
|
"eval_rewards/margins": 0.2822931110858917, |
|
"eval_rewards/rejected": -0.3195236325263977, |
|
"eval_runtime": 90.8627, |
|
"eval_samples_per_second": 20.151, |
|
"eval_steps_per_second": 0.319, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.7085714285714285, |
|
"grad_norm": 2.850744846118877, |
|
"learning_rate": 1.1814217788631473e-07, |
|
"logits/chosen": -2.6023502349853516, |
|
"logits/rejected": -2.559641122817993, |
|
"logps/chosen": -263.7620849609375, |
|
"logps/rejected": -264.8592224121094, |
|
"loss": 0.0369, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.07457949221134186, |
|
"rewards/margins": 0.20205926895141602, |
|
"rewards/rejected": -0.27663877606391907, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.7314285714285714, |
|
"grad_norm": 3.4445308568499575, |
|
"learning_rate": 1.0160238692045331e-07, |
|
"logits/chosen": -2.5764858722686768, |
|
"logits/rejected": -2.4977431297302246, |
|
"logps/chosen": -255.32528686523438, |
|
"logps/rejected": -270.0458984375, |
|
"loss": 0.0364, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.11042918264865875, |
|
"rewards/margins": 0.18014779686927795, |
|
"rewards/rejected": -0.2905769944190979, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.7542857142857143, |
|
"grad_norm": 2.7891304584846908, |
|
"learning_rate": 8.601038193139438e-08, |
|
"logits/chosen": -2.669182300567627, |
|
"logits/rejected": -2.559919595718384, |
|
"logps/chosen": -304.69049072265625, |
|
"logps/rejected": -277.16827392578125, |
|
"loss": 0.0348, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.05472307652235031, |
|
"rewards/margins": 0.23908019065856934, |
|
"rewards/rejected": -0.29380327463150024, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.7771428571428571, |
|
"grad_norm": 3.0804434627972372, |
|
"learning_rate": 7.146574594727572e-08, |
|
"logits/chosen": -2.5831470489501953, |
|
"logits/rejected": -2.54848051071167, |
|
"logps/chosen": -275.44757080078125, |
|
"logps/rejected": -277.88311767578125, |
|
"loss": 0.0352, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.09496594220399857, |
|
"rewards/margins": 0.2636275291442871, |
|
"rewards/rejected": -0.3585934638977051, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 3.3935330484921606, |
|
"learning_rate": 5.8061372659157306e-08, |
|
"logits/chosen": -2.6102333068847656, |
|
"logits/rejected": -2.5170912742614746, |
|
"logps/chosen": -299.60601806640625, |
|
"logps/rejected": -284.296630859375, |
|
"loss": 0.0375, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.09898806363344193, |
|
"rewards/margins": 0.20100173354148865, |
|
"rewards/rejected": -0.29998978972435, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_logits/chosen": -2.5560529232025146, |
|
"eval_logits/rejected": -2.447376012802124, |
|
"eval_logps/chosen": -283.43243408203125, |
|
"eval_logps/rejected": -256.5389099121094, |
|
"eval_loss": 0.03421488776803017, |
|
"eval_rewards/accuracies": 0.7413793206214905, |
|
"eval_rewards/chosen": -0.07828506827354431, |
|
"eval_rewards/margins": 0.29634276032447815, |
|
"eval_rewards/rejected": -0.37462782859802246, |
|
"eval_runtime": 90.9316, |
|
"eval_samples_per_second": 20.136, |
|
"eval_steps_per_second": 0.319, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.8228571428571428, |
|
"grad_norm": 2.9199488201620456, |
|
"learning_rate": 4.5882873127531614e-08, |
|
"logits/chosen": -2.6238226890563965, |
|
"logits/rejected": -2.5243430137634277, |
|
"logps/chosen": -294.04046630859375, |
|
"logps/rejected": -285.36041259765625, |
|
"loss": 0.0342, |
|
"rewards/accuracies": 0.8062499761581421, |
|
"rewards/chosen": -0.08635450899600983, |
|
"rewards/margins": 0.2563842833042145, |
|
"rewards/rejected": -0.3427388072013855, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.8457142857142858, |
|
"grad_norm": 3.0306421043965925, |
|
"learning_rate": 3.500802900154412e-08, |
|
"logits/chosen": -2.61911940574646, |
|
"logits/rejected": -2.5165693759918213, |
|
"logps/chosen": -277.0403137207031, |
|
"logps/rejected": -271.5109558105469, |
|
"loss": 0.034, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.05978885293006897, |
|
"rewards/margins": 0.2711651027202606, |
|
"rewards/rejected": -0.330953985452652, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.8685714285714285, |
|
"grad_norm": 3.142763842399202, |
|
"learning_rate": 2.550629574310309e-08, |
|
"logits/chosen": -2.68263578414917, |
|
"logits/rejected": -2.584908962249756, |
|
"logps/chosen": -327.3045959472656, |
|
"logps/rejected": -287.1323547363281, |
|
"loss": 0.0352, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.09248252213001251, |
|
"rewards/margins": 0.20484676957130432, |
|
"rewards/rejected": -0.29732924699783325, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.8914285714285715, |
|
"grad_norm": 3.6385746368012355, |
|
"learning_rate": 1.7438359028687983e-08, |
|
"logits/chosen": -2.6608521938323975, |
|
"logits/rejected": -2.607428789138794, |
|
"logps/chosen": -316.05438232421875, |
|
"logps/rejected": -321.3371887207031, |
|
"loss": 0.0359, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.04681408405303955, |
|
"rewards/margins": 0.2188311070203781, |
|
"rewards/rejected": -0.26564517617225647, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.9142857142857143, |
|
"grad_norm": 5.02358291913839, |
|
"learning_rate": 1.0855747162029361e-08, |
|
"logits/chosen": -2.590088367462158, |
|
"logits/rejected": -2.570838451385498, |
|
"logps/chosen": -289.6832580566406, |
|
"logps/rejected": -296.08538818359375, |
|
"loss": 0.0367, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.10950014740228653, |
|
"rewards/margins": 0.18561770021915436, |
|
"rewards/rejected": -0.2951178550720215, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.9142857142857143, |
|
"eval_logits/chosen": -2.561089277267456, |
|
"eval_logits/rejected": -2.4522511959075928, |
|
"eval_logps/chosen": -282.986083984375, |
|
"eval_logps/rejected": -256.24615478515625, |
|
"eval_loss": 0.03414054214954376, |
|
"eval_rewards/accuracies": 0.7413793206214905, |
|
"eval_rewards/chosen": -0.07382136583328247, |
|
"eval_rewards/margins": 0.2978789508342743, |
|
"eval_rewards/rejected": -0.3717002868652344, |
|
"eval_runtime": 90.9735, |
|
"eval_samples_per_second": 20.127, |
|
"eval_steps_per_second": 0.319, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.9371428571428572, |
|
"grad_norm": 3.2423722180060786, |
|
"learning_rate": 5.8005019731033615e-09, |
|
"logits/chosen": -2.6259548664093018, |
|
"logits/rejected": -2.5545239448547363, |
|
"logps/chosen": -299.16107177734375, |
|
"logps/rejected": -292.75714111328125, |
|
"loss": 0.0352, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.10981108993291855, |
|
"rewards/margins": 0.2204599827528, |
|
"rewards/rejected": -0.3302710950374603, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 3.0950678320925222, |
|
"learning_rate": 2.3049103053431886e-09, |
|
"logits/chosen": -2.5695414543151855, |
|
"logits/rejected": -2.504147529602051, |
|
"logps/chosen": -286.32177734375, |
|
"logps/rejected": -269.1705627441406, |
|
"loss": 0.0345, |
|
"rewards/accuracies": 0.8062499761581421, |
|
"rewards/chosen": -0.03573674336075783, |
|
"rewards/margins": 0.31610041856765747, |
|
"rewards/rejected": -0.351837158203125, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.9828571428571429, |
|
"grad_norm": 2.978033708452065, |
|
"learning_rate": 3.9129780600541397e-10, |
|
"logits/chosen": -2.5734434127807617, |
|
"logits/rejected": -2.4971184730529785, |
|
"logps/chosen": -290.81646728515625, |
|
"logps/rejected": -297.24932861328125, |
|
"loss": 0.0344, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.08584621548652649, |
|
"rewards/margins": 0.24995127320289612, |
|
"rewards/rejected": -0.33579742908477783, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.9988571428571429, |
|
"step": 437, |
|
"total_flos": 0.0, |
|
"train_loss": 0.03879852061004333, |
|
"train_runtime": 11351.3146, |
|
"train_samples_per_second": 4.933, |
|
"train_steps_per_second": 0.038 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 437, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|