AmberYifan's picture
Model save
cf4634c verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 930,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0032258064516129032,
"grad_norm": 115.22052507274148,
"learning_rate": 5.3763440860215056e-09,
"logits/chosen": -4.125,
"logits/rejected": -4.0625,
"logps/chosen": -185.0,
"logps/rejected": -196.0,
"loss": 0.6914,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.03225806451612903,
"grad_norm": 94.47396535248316,
"learning_rate": 5.3763440860215054e-08,
"logits/chosen": -4.125,
"logits/rejected": -4.15625,
"logps/chosen": -186.0,
"logps/rejected": -195.0,
"loss": 0.6886,
"rewards/accuracies": 0.2222222238779068,
"rewards/chosen": 1.3589859008789062e-05,
"rewards/margins": 0.009765625,
"rewards/rejected": -0.009765625,
"step": 10
},
{
"epoch": 0.06451612903225806,
"grad_norm": 94.33148772179948,
"learning_rate": 1.0752688172043011e-07,
"logits/chosen": -4.15625,
"logits/rejected": -4.0625,
"logps/chosen": -188.0,
"logps/rejected": -200.0,
"loss": 0.6872,
"rewards/accuracies": 0.2874999940395355,
"rewards/chosen": -0.005645751953125,
"rewards/margins": 0.01312255859375,
"rewards/rejected": -0.018798828125,
"step": 20
},
{
"epoch": 0.0967741935483871,
"grad_norm": 92.49633755921806,
"learning_rate": 1.6129032258064515e-07,
"logits/chosen": -4.0625,
"logits/rejected": -4.03125,
"logps/chosen": -185.0,
"logps/rejected": -191.0,
"loss": 0.669,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.016845703125,
"rewards/margins": 0.054443359375,
"rewards/rejected": -0.03759765625,
"step": 30
},
{
"epoch": 0.12903225806451613,
"grad_norm": 82.30619665044928,
"learning_rate": 2.1505376344086022e-07,
"logits/chosen": -4.03125,
"logits/rejected": -4.03125,
"logps/chosen": -189.0,
"logps/rejected": -200.0,
"loss": 0.6341,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": 0.038818359375,
"rewards/margins": 0.13671875,
"rewards/rejected": -0.09765625,
"step": 40
},
{
"epoch": 0.16129032258064516,
"grad_norm": 73.60890561659406,
"learning_rate": 2.6881720430107523e-07,
"logits/chosen": -4.03125,
"logits/rejected": -3.890625,
"logps/chosen": -189.0,
"logps/rejected": -201.0,
"loss": 0.5644,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.029296875,
"rewards/margins": 0.328125,
"rewards/rejected": -0.357421875,
"step": 50
},
{
"epoch": 0.1935483870967742,
"grad_norm": 83.24582572373353,
"learning_rate": 3.225806451612903e-07,
"logits/chosen": -3.890625,
"logits/rejected": -3.828125,
"logps/chosen": -191.0,
"logps/rejected": -204.0,
"loss": 0.5283,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -0.25390625,
"rewards/margins": 0.484375,
"rewards/rejected": -0.73828125,
"step": 60
},
{
"epoch": 0.22580645161290322,
"grad_norm": 70.93826710868849,
"learning_rate": 3.7634408602150537e-07,
"logits/chosen": -3.9375,
"logits/rejected": -3.890625,
"logps/chosen": -187.0,
"logps/rejected": -206.0,
"loss": 0.4758,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": -0.1494140625,
"rewards/margins": 0.83984375,
"rewards/rejected": -0.98828125,
"step": 70
},
{
"epoch": 0.25806451612903225,
"grad_norm": 67.89432749815619,
"learning_rate": 4.3010752688172043e-07,
"logits/chosen": -3.875,
"logits/rejected": -3.84375,
"logps/chosen": -188.0,
"logps/rejected": -215.0,
"loss": 0.4225,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -0.06494140625,
"rewards/margins": 1.3359375,
"rewards/rejected": -1.3984375,
"step": 80
},
{
"epoch": 0.2903225806451613,
"grad_norm": 56.57247809818219,
"learning_rate": 4.838709677419355e-07,
"logits/chosen": -3.9375,
"logits/rejected": -3.921875,
"logps/chosen": -195.0,
"logps/rejected": -220.0,
"loss": 0.3769,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": -0.8125,
"rewards/margins": 1.390625,
"rewards/rejected": -2.203125,
"step": 90
},
{
"epoch": 0.3225806451612903,
"grad_norm": 54.52791585727774,
"learning_rate": 4.958183990442055e-07,
"logits/chosen": -3.90625,
"logits/rejected": -3.890625,
"logps/chosen": -198.0,
"logps/rejected": -225.0,
"loss": 0.3733,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -1.03125,
"rewards/margins": 1.6015625,
"rewards/rejected": -2.625,
"step": 100
},
{
"epoch": 0.3548387096774194,
"grad_norm": 70.14660020921133,
"learning_rate": 4.898446833930704e-07,
"logits/chosen": -3.96875,
"logits/rejected": -3.90625,
"logps/chosen": -195.0,
"logps/rejected": -225.0,
"loss": 0.3651,
"rewards/accuracies": 0.875,
"rewards/chosen": -1.0390625,
"rewards/margins": 1.7578125,
"rewards/rejected": -2.796875,
"step": 110
},
{
"epoch": 0.3870967741935484,
"grad_norm": 70.71484436398696,
"learning_rate": 4.838709677419355e-07,
"logits/chosen": -3.9375,
"logits/rejected": -3.90625,
"logps/chosen": -196.0,
"logps/rejected": -224.0,
"loss": 0.4252,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -0.9609375,
"rewards/margins": 1.7421875,
"rewards/rejected": -2.703125,
"step": 120
},
{
"epoch": 0.41935483870967744,
"grad_norm": 60.790615448301835,
"learning_rate": 4.778972520908004e-07,
"logits/chosen": -3.9375,
"logits/rejected": -3.9375,
"logps/chosen": -197.0,
"logps/rejected": -231.0,
"loss": 0.3266,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -0.95703125,
"rewards/margins": 2.21875,
"rewards/rejected": -3.1875,
"step": 130
},
{
"epoch": 0.45161290322580644,
"grad_norm": 69.89033127044969,
"learning_rate": 4.7192353643966544e-07,
"logits/chosen": -3.9375,
"logits/rejected": -3.890625,
"logps/chosen": -201.0,
"logps/rejected": -230.0,
"loss": 0.3487,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -1.171875,
"rewards/margins": 1.8359375,
"rewards/rejected": -3.0,
"step": 140
},
{
"epoch": 0.4838709677419355,
"grad_norm": 70.95003315189264,
"learning_rate": 4.6594982078853044e-07,
"logits/chosen": -3.90625,
"logits/rejected": -3.828125,
"logps/chosen": -196.0,
"logps/rejected": -229.0,
"loss": 0.3011,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -1.140625,
"rewards/margins": 2.078125,
"rewards/rejected": -3.21875,
"step": 150
},
{
"epoch": 0.5161290322580645,
"grad_norm": 78.12294251323976,
"learning_rate": 4.5997610513739544e-07,
"logits/chosen": -3.90625,
"logits/rejected": -3.859375,
"logps/chosen": -194.0,
"logps/rejected": -229.0,
"loss": 0.2904,
"rewards/accuracies": 0.875,
"rewards/chosen": -0.80859375,
"rewards/margins": 2.453125,
"rewards/rejected": -3.265625,
"step": 160
},
{
"epoch": 0.5483870967741935,
"grad_norm": 58.17040697560013,
"learning_rate": 4.540023894862604e-07,
"logits/chosen": -3.984375,
"logits/rejected": -3.890625,
"logps/chosen": -202.0,
"logps/rejected": -232.0,
"loss": 0.286,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": -1.53125,
"rewards/margins": 2.484375,
"rewards/rejected": -4.0,
"step": 170
},
{
"epoch": 0.5806451612903226,
"grad_norm": 75.00483410106833,
"learning_rate": 4.4802867383512544e-07,
"logits/chosen": -3.8125,
"logits/rejected": -3.765625,
"logps/chosen": -200.0,
"logps/rejected": -239.0,
"loss": 0.3374,
"rewards/accuracies": 0.8125,
"rewards/chosen": -1.5625,
"rewards/margins": 2.484375,
"rewards/rejected": -4.03125,
"step": 180
},
{
"epoch": 0.6129032258064516,
"grad_norm": 72.167130272584,
"learning_rate": 4.4205495818399044e-07,
"logits/chosen": -3.90625,
"logits/rejected": -3.875,
"logps/chosen": -200.0,
"logps/rejected": -239.0,
"loss": 0.3004,
"rewards/accuracies": 0.875,
"rewards/chosen": -1.0703125,
"rewards/margins": 2.890625,
"rewards/rejected": -3.96875,
"step": 190
},
{
"epoch": 0.6451612903225806,
"grad_norm": 61.76892460773258,
"learning_rate": 4.3608124253285543e-07,
"logits/chosen": -3.90625,
"logits/rejected": -3.90625,
"logps/chosen": -199.0,
"logps/rejected": -230.0,
"loss": 0.301,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -1.375,
"rewards/margins": 2.015625,
"rewards/rejected": -3.390625,
"step": 200
},
{
"epoch": 0.6774193548387096,
"grad_norm": 52.49765267963524,
"learning_rate": 4.3010752688172043e-07,
"logits/chosen": -3.890625,
"logits/rejected": -3.78125,
"logps/chosen": -196.0,
"logps/rejected": -233.0,
"loss": 0.258,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": -0.8828125,
"rewards/margins": 2.84375,
"rewards/rejected": -3.734375,
"step": 210
},
{
"epoch": 0.7096774193548387,
"grad_norm": 58.816815163007185,
"learning_rate": 4.241338112305854e-07,
"logits/chosen": -3.859375,
"logits/rejected": -3.84375,
"logps/chosen": -194.0,
"logps/rejected": -237.0,
"loss": 0.278,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -1.0390625,
"rewards/margins": 2.6875,
"rewards/rejected": -3.734375,
"step": 220
},
{
"epoch": 0.7419354838709677,
"grad_norm": 77.22842288731327,
"learning_rate": 4.1816009557945043e-07,
"logits/chosen": -3.859375,
"logits/rejected": -3.734375,
"logps/chosen": -192.0,
"logps/rejected": -234.0,
"loss": 0.2537,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -0.7890625,
"rewards/margins": 3.046875,
"rewards/rejected": -3.84375,
"step": 230
},
{
"epoch": 0.7741935483870968,
"grad_norm": 55.73484353697734,
"learning_rate": 4.121863799283154e-07,
"logits/chosen": -3.875,
"logits/rejected": -3.796875,
"logps/chosen": -199.0,
"logps/rejected": -235.0,
"loss": 0.3267,
"rewards/accuracies": 0.875,
"rewards/chosen": -1.5078125,
"rewards/margins": 2.8125,
"rewards/rejected": -4.3125,
"step": 240
},
{
"epoch": 0.8064516129032258,
"grad_norm": 73.3120879364035,
"learning_rate": 4.0621266427718037e-07,
"logits/chosen": -3.953125,
"logits/rejected": -3.890625,
"logps/chosen": -194.0,
"logps/rejected": -233.0,
"loss": 0.2759,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": -0.85546875,
"rewards/margins": 2.96875,
"rewards/rejected": -3.8125,
"step": 250
},
{
"epoch": 0.8387096774193549,
"grad_norm": 53.82592061402359,
"learning_rate": 4.002389486260454e-07,
"logits/chosen": -3.953125,
"logits/rejected": -3.890625,
"logps/chosen": -195.0,
"logps/rejected": -245.0,
"loss": 0.2255,
"rewards/accuracies": 0.9375,
"rewards/chosen": -0.59765625,
"rewards/margins": 3.875,
"rewards/rejected": -4.46875,
"step": 260
},
{
"epoch": 0.8709677419354839,
"grad_norm": 73.21099669158846,
"learning_rate": 3.9426523297491037e-07,
"logits/chosen": -3.953125,
"logits/rejected": -3.921875,
"logps/chosen": -195.0,
"logps/rejected": -244.0,
"loss": 0.2336,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -1.0703125,
"rewards/margins": 3.53125,
"rewards/rejected": -4.59375,
"step": 270
},
{
"epoch": 0.9032258064516129,
"grad_norm": 61.8854330576029,
"learning_rate": 3.8829151732377537e-07,
"logits/chosen": -3.9375,
"logits/rejected": -3.90625,
"logps/chosen": -199.0,
"logps/rejected": -232.0,
"loss": 0.2834,
"rewards/accuracies": 0.875,
"rewards/chosen": -1.15625,
"rewards/margins": 2.890625,
"rewards/rejected": -4.03125,
"step": 280
},
{
"epoch": 0.9354838709677419,
"grad_norm": 58.02390241120245,
"learning_rate": 3.8231780167264037e-07,
"logits/chosen": -4.03125,
"logits/rejected": -3.9375,
"logps/chosen": -196.0,
"logps/rejected": -230.0,
"loss": 0.2844,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": -0.7734375,
"rewards/margins": 2.9375,
"rewards/rejected": -3.71875,
"step": 290
},
{
"epoch": 0.967741935483871,
"grad_norm": 78.57587657653356,
"learning_rate": 3.7634408602150537e-07,
"logits/chosen": -3.984375,
"logits/rejected": -3.875,
"logps/chosen": -189.0,
"logps/rejected": -236.0,
"loss": 0.2828,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -0.6171875,
"rewards/margins": 3.234375,
"rewards/rejected": -3.859375,
"step": 300
},
{
"epoch": 1.0,
"grad_norm": 67.82380071078845,
"learning_rate": 3.703703703703703e-07,
"logits/chosen": -3.890625,
"logits/rejected": -3.859375,
"logps/chosen": -196.0,
"logps/rejected": -230.0,
"loss": 0.271,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": -0.9921875,
"rewards/margins": 3.0625,
"rewards/rejected": -4.0625,
"step": 310
},
{
"epoch": 1.0,
"eval_logits/chosen": -3.953125,
"eval_logits/rejected": -3.890625,
"eval_logps/chosen": -217.0,
"eval_logps/rejected": -255.0,
"eval_loss": 0.20095214247703552,
"eval_rewards/accuracies": 0.9285714030265808,
"eval_rewards/chosen": -1.7890625,
"eval_rewards/margins": 3.21875,
"eval_rewards/rejected": -5.0,
"eval_runtime": 13.4206,
"eval_samples_per_second": 14.902,
"eval_steps_per_second": 0.522,
"step": 310
},
{
"epoch": 1.032258064516129,
"grad_norm": 14.7763712358255,
"learning_rate": 3.6439665471923536e-07,
"logits/chosen": -4.03125,
"logits/rejected": -3.90625,
"logps/chosen": -197.0,
"logps/rejected": -268.0,
"loss": 0.032,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -1.0234375,
"rewards/margins": 5.96875,
"rewards/rejected": -7.0,
"step": 320
},
{
"epoch": 1.064516129032258,
"grad_norm": 6.092608827300573,
"learning_rate": 3.5842293906810036e-07,
"logits/chosen": -3.9375,
"logits/rejected": -3.859375,
"logps/chosen": -193.0,
"logps/rejected": -268.0,
"loss": 0.0204,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -0.349609375,
"rewards/margins": 6.0,
"rewards/rejected": -6.34375,
"step": 330
},
{
"epoch": 1.096774193548387,
"grad_norm": 13.413597095885587,
"learning_rate": 3.524492234169653e-07,
"logits/chosen": -3.859375,
"logits/rejected": -3.75,
"logps/chosen": -192.0,
"logps/rejected": -270.0,
"loss": 0.0397,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -0.123046875,
"rewards/margins": 6.40625,
"rewards/rejected": -6.53125,
"step": 340
},
{
"epoch": 1.129032258064516,
"grad_norm": 23.482097218429256,
"learning_rate": 3.4647550776583036e-07,
"logits/chosen": -3.875,
"logits/rejected": -3.84375,
"logps/chosen": -193.0,
"logps/rejected": -272.0,
"loss": 0.0236,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -0.376953125,
"rewards/margins": 7.0,
"rewards/rejected": -7.375,
"step": 350
},
{
"epoch": 1.1612903225806452,
"grad_norm": 23.358340694800074,
"learning_rate": 3.405017921146953e-07,
"logits/chosen": -3.828125,
"logits/rejected": -3.8125,
"logps/chosen": -183.0,
"logps/rejected": -260.0,
"loss": 0.0311,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": 0.154296875,
"rewards/margins": 6.3125,
"rewards/rejected": -6.1875,
"step": 360
},
{
"epoch": 1.1935483870967742,
"grad_norm": 3.914331549525913,
"learning_rate": 3.345280764635603e-07,
"logits/chosen": -3.890625,
"logits/rejected": -3.8125,
"logps/chosen": -193.0,
"logps/rejected": -274.0,
"loss": 0.0253,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.0299072265625,
"rewards/margins": 7.03125,
"rewards/rejected": -7.0625,
"step": 370
},
{
"epoch": 1.2258064516129032,
"grad_norm": 7.274490128378863,
"learning_rate": 3.285543608124253e-07,
"logits/chosen": -3.96875,
"logits/rejected": -3.84375,
"logps/chosen": -190.0,
"logps/rejected": -270.0,
"loss": 0.0083,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.08349609375,
"rewards/margins": 7.3125,
"rewards/rejected": -7.21875,
"step": 380
},
{
"epoch": 1.2580645161290323,
"grad_norm": 3.5384348394162375,
"learning_rate": 3.225806451612903e-07,
"logits/chosen": -3.8125,
"logits/rejected": -3.828125,
"logps/chosen": -189.0,
"logps/rejected": -276.0,
"loss": 0.0157,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -0.119140625,
"rewards/margins": 7.09375,
"rewards/rejected": -7.1875,
"step": 390
},
{
"epoch": 1.2903225806451613,
"grad_norm": 27.375764382001474,
"learning_rate": 3.1660692951015535e-07,
"logits/chosen": -3.75,
"logits/rejected": -3.78125,
"logps/chosen": -194.0,
"logps/rejected": -274.0,
"loss": 0.0192,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.42578125,
"rewards/margins": 7.4375,
"rewards/rejected": -7.875,
"step": 400
},
{
"epoch": 1.3225806451612903,
"grad_norm": 21.54322951685049,
"learning_rate": 3.106332138590203e-07,
"logits/chosen": -3.859375,
"logits/rejected": -3.859375,
"logps/chosen": -186.0,
"logps/rejected": -274.0,
"loss": 0.0143,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.150390625,
"rewards/margins": 8.0,
"rewards/rejected": -8.125,
"step": 410
},
{
"epoch": 1.3548387096774195,
"grad_norm": 25.577249022826813,
"learning_rate": 3.046594982078853e-07,
"logits/chosen": -3.78125,
"logits/rejected": -3.78125,
"logps/chosen": -193.0,
"logps/rejected": -276.0,
"loss": 0.0173,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.37890625,
"rewards/margins": 7.28125,
"rewards/rejected": -7.65625,
"step": 420
},
{
"epoch": 1.3870967741935485,
"grad_norm": 0.8297796576977869,
"learning_rate": 2.986857825567503e-07,
"logits/chosen": -3.828125,
"logits/rejected": -3.765625,
"logps/chosen": -190.0,
"logps/rejected": -274.0,
"loss": 0.0179,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.2333984375,
"rewards/margins": 7.53125,
"rewards/rejected": -7.75,
"step": 430
},
{
"epoch": 1.4193548387096775,
"grad_norm": 37.60882100528414,
"learning_rate": 2.927120669056153e-07,
"logits/chosen": -3.71875,
"logits/rejected": -3.765625,
"logps/chosen": -189.0,
"logps/rejected": -278.0,
"loss": 0.0169,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.3671875,
"rewards/margins": 7.84375,
"rewards/rejected": -8.1875,
"step": 440
},
{
"epoch": 1.4516129032258065,
"grad_norm": 9.165654729466173,
"learning_rate": 2.8673835125448024e-07,
"logits/chosen": -3.75,
"logits/rejected": -3.6875,
"logps/chosen": -196.0,
"logps/rejected": -284.0,
"loss": 0.0261,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -0.1904296875,
"rewards/margins": 7.90625,
"rewards/rejected": -8.0625,
"step": 450
},
{
"epoch": 1.4838709677419355,
"grad_norm": 2.112943941209443,
"learning_rate": 2.807646356033453e-07,
"logits/chosen": -3.640625,
"logits/rejected": -3.65625,
"logps/chosen": -190.0,
"logps/rejected": -280.0,
"loss": 0.0271,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -0.66796875,
"rewards/margins": 7.8125,
"rewards/rejected": -8.5,
"step": 460
},
{
"epoch": 1.5161290322580645,
"grad_norm": 53.66978666939312,
"learning_rate": 2.747909199522103e-07,
"logits/chosen": -3.6875,
"logits/rejected": -3.6875,
"logps/chosen": -198.0,
"logps/rejected": -276.0,
"loss": 0.024,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -0.890625,
"rewards/margins": 7.5,
"rewards/rejected": -8.375,
"step": 470
},
{
"epoch": 1.5483870967741935,
"grad_norm": 13.967124404944409,
"learning_rate": 2.6881720430107523e-07,
"logits/chosen": -3.671875,
"logits/rejected": -3.671875,
"logps/chosen": -198.0,
"logps/rejected": -284.0,
"loss": 0.0212,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.66796875,
"rewards/margins": 7.8125,
"rewards/rejected": -8.5,
"step": 480
},
{
"epoch": 1.5806451612903225,
"grad_norm": 1.505490222650231,
"learning_rate": 2.628434886499403e-07,
"logits/chosen": -3.59375,
"logits/rejected": -3.625,
"logps/chosen": -180.0,
"logps/rejected": -266.0,
"loss": 0.0126,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.1103515625,
"rewards/margins": 7.96875,
"rewards/rejected": -7.875,
"step": 490
},
{
"epoch": 1.6129032258064515,
"grad_norm": 4.552495049762715,
"learning_rate": 2.5686977299880523e-07,
"logits/chosen": -3.65625,
"logits/rejected": -3.703125,
"logps/chosen": -190.0,
"logps/rejected": -282.0,
"loss": 0.0185,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.5234375,
"rewards/margins": 8.125,
"rewards/rejected": -8.6875,
"step": 500
},
{
"epoch": 1.6451612903225805,
"grad_norm": 7.1849195987738925,
"learning_rate": 2.508960573476702e-07,
"logits/chosen": -3.625,
"logits/rejected": -3.6875,
"logps/chosen": -196.0,
"logps/rejected": -280.0,
"loss": 0.0084,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.63671875,
"rewards/margins": 7.53125,
"rewards/rejected": -8.1875,
"step": 510
},
{
"epoch": 1.6774193548387095,
"grad_norm": 1.599699862303645,
"learning_rate": 2.449223416965352e-07,
"logits/chosen": -3.53125,
"logits/rejected": -3.578125,
"logps/chosen": -188.0,
"logps/rejected": -282.0,
"loss": 0.0167,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -0.388671875,
"rewards/margins": 8.1875,
"rewards/rejected": -8.625,
"step": 520
},
{
"epoch": 1.7096774193548387,
"grad_norm": 2.623858535249452,
"learning_rate": 2.389486260454002e-07,
"logits/chosen": -3.734375,
"logits/rejected": -3.6875,
"logps/chosen": -212.0,
"logps/rejected": -302.0,
"loss": 0.0408,
"rewards/accuracies": 1.0,
"rewards/chosen": -1.8046875,
"rewards/margins": 8.1875,
"rewards/rejected": -10.0,
"step": 530
},
{
"epoch": 1.7419354838709677,
"grad_norm": 72.63368145693447,
"learning_rate": 2.3297491039426522e-07,
"logits/chosen": -3.65625,
"logits/rejected": -3.640625,
"logps/chosen": -202.0,
"logps/rejected": -296.0,
"loss": 0.0512,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -1.2421875,
"rewards/margins": 8.4375,
"rewards/rejected": -9.6875,
"step": 540
},
{
"epoch": 1.7741935483870968,
"grad_norm": 3.1206520900533667,
"learning_rate": 2.270011947431302e-07,
"logits/chosen": -3.640625,
"logits/rejected": -3.6875,
"logps/chosen": -180.0,
"logps/rejected": -266.0,
"loss": 0.04,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.1953125,
"rewards/margins": 7.46875,
"rewards/rejected": -7.28125,
"step": 550
},
{
"epoch": 1.8064516129032258,
"grad_norm": 41.020674776975646,
"learning_rate": 2.2102747909199522e-07,
"logits/chosen": -3.6875,
"logits/rejected": -3.6875,
"logps/chosen": -194.0,
"logps/rejected": -290.0,
"loss": 0.0124,
"rewards/accuracies": 1.0,
"rewards/chosen": -1.03125,
"rewards/margins": 8.0625,
"rewards/rejected": -9.125,
"step": 560
},
{
"epoch": 1.838709677419355,
"grad_norm": 4.010133968357106,
"learning_rate": 2.1505376344086022e-07,
"logits/chosen": -3.59375,
"logits/rejected": -3.546875,
"logps/chosen": -212.0,
"logps/rejected": -316.0,
"loss": 0.0198,
"rewards/accuracies": 1.0,
"rewards/chosen": -2.328125,
"rewards/margins": 9.25,
"rewards/rejected": -11.625,
"step": 570
},
{
"epoch": 1.870967741935484,
"grad_norm": 7.069397017435533,
"learning_rate": 2.0908004778972521e-07,
"logits/chosen": -3.65625,
"logits/rejected": -3.6875,
"logps/chosen": -188.0,
"logps/rejected": -282.0,
"loss": 0.0133,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.1337890625,
"rewards/margins": 8.375,
"rewards/rejected": -8.5625,
"step": 580
},
{
"epoch": 1.903225806451613,
"grad_norm": 8.936652736646373,
"learning_rate": 2.0310633213859019e-07,
"logits/chosen": -3.625,
"logits/rejected": -3.546875,
"logps/chosen": -196.0,
"logps/rejected": -300.0,
"loss": 0.0118,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -1.171875,
"rewards/margins": 8.875,
"rewards/rejected": -10.0625,
"step": 590
},
{
"epoch": 1.935483870967742,
"grad_norm": 7.8141795170212465,
"learning_rate": 1.9713261648745518e-07,
"logits/chosen": -3.546875,
"logits/rejected": -3.640625,
"logps/chosen": -190.0,
"logps/rejected": -280.0,
"loss": 0.0089,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.8515625,
"rewards/margins": 8.125,
"rewards/rejected": -9.0,
"step": 600
},
{
"epoch": 1.967741935483871,
"grad_norm": 5.395939675702438,
"learning_rate": 1.9115890083632018e-07,
"logits/chosen": -3.6875,
"logits/rejected": -3.703125,
"logps/chosen": -199.0,
"logps/rejected": -292.0,
"loss": 0.0136,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.86328125,
"rewards/margins": 8.75,
"rewards/rejected": -9.625,
"step": 610
},
{
"epoch": 2.0,
"grad_norm": 17.221158878981882,
"learning_rate": 1.8518518518518516e-07,
"logits/chosen": -3.6875,
"logits/rejected": -3.6875,
"logps/chosen": -197.0,
"logps/rejected": -286.0,
"loss": 0.0135,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.76953125,
"rewards/margins": 8.3125,
"rewards/rejected": -9.125,
"step": 620
},
{
"epoch": 2.0,
"eval_logits/chosen": -3.796875,
"eval_logits/rejected": -3.765625,
"eval_logps/chosen": -222.0,
"eval_logps/rejected": -280.0,
"eval_loss": 0.191436767578125,
"eval_rewards/accuracies": 0.8928571343421936,
"eval_rewards/chosen": -2.21875,
"eval_rewards/margins": 5.40625,
"eval_rewards/rejected": -7.625,
"eval_runtime": 13.2451,
"eval_samples_per_second": 15.1,
"eval_steps_per_second": 0.528,
"step": 620
},
{
"epoch": 2.032258064516129,
"grad_norm": 0.45384363049400467,
"learning_rate": 1.7921146953405018e-07,
"logits/chosen": -3.5625,
"logits/rejected": -3.578125,
"logps/chosen": -191.0,
"logps/rejected": -300.0,
"loss": 0.0012,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.28125,
"rewards/margins": 10.1875,
"rewards/rejected": -10.4375,
"step": 630
},
{
"epoch": 2.064516129032258,
"grad_norm": 2.0283023878654722,
"learning_rate": 1.7323775388291518e-07,
"logits/chosen": -3.609375,
"logits/rejected": -3.640625,
"logps/chosen": -191.0,
"logps/rejected": -298.0,
"loss": 0.0019,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.302734375,
"rewards/margins": 10.25,
"rewards/rejected": -10.5625,
"step": 640
},
{
"epoch": 2.096774193548387,
"grad_norm": 0.9217664266560652,
"learning_rate": 1.6726403823178015e-07,
"logits/chosen": -3.6875,
"logits/rejected": -3.75,
"logps/chosen": -193.0,
"logps/rejected": -298.0,
"loss": 0.0013,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.93359375,
"rewards/margins": 9.625,
"rewards/rejected": -10.5625,
"step": 650
},
{
"epoch": 2.129032258064516,
"grad_norm": 0.44858819876518125,
"learning_rate": 1.6129032258064515e-07,
"logits/chosen": -3.6875,
"logits/rejected": -3.703125,
"logps/chosen": -207.0,
"logps/rejected": -316.0,
"loss": 0.0012,
"rewards/accuracies": 1.0,
"rewards/chosen": -1.3046875,
"rewards/margins": 10.125,
"rewards/rejected": -11.4375,
"step": 660
},
{
"epoch": 2.161290322580645,
"grad_norm": 9.792046659236782,
"learning_rate": 1.5531660692951015e-07,
"logits/chosen": -3.578125,
"logits/rejected": -3.59375,
"logps/chosen": -199.0,
"logps/rejected": -310.0,
"loss": 0.0019,
"rewards/accuracies": 1.0,
"rewards/chosen": -1.1875,
"rewards/margins": 9.875,
"rewards/rejected": -11.0625,
"step": 670
},
{
"epoch": 2.193548387096774,
"grad_norm": 0.3983602269153926,
"learning_rate": 1.4934289127837515e-07,
"logits/chosen": -3.5,
"logits/rejected": -3.703125,
"logps/chosen": -188.0,
"logps/rejected": -302.0,
"loss": 0.0024,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.59375,
"rewards/margins": 10.25,
"rewards/rejected": -10.875,
"step": 680
},
{
"epoch": 2.225806451612903,
"grad_norm": 1.2472683997191045,
"learning_rate": 1.4336917562724012e-07,
"logits/chosen": -3.59375,
"logits/rejected": -3.65625,
"logps/chosen": -202.0,
"logps/rejected": -312.0,
"loss": 0.0011,
"rewards/accuracies": 1.0,
"rewards/chosen": -1.0546875,
"rewards/margins": 10.25,
"rewards/rejected": -11.3125,
"step": 690
},
{
"epoch": 2.258064516129032,
"grad_norm": 0.9104056031581937,
"learning_rate": 1.3739545997610514e-07,
"logits/chosen": -3.453125,
"logits/rejected": -3.53125,
"logps/chosen": -192.0,
"logps/rejected": -308.0,
"loss": 0.0008,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.90625,
"rewards/margins": 10.625,
"rewards/rejected": -11.5625,
"step": 700
},
{
"epoch": 2.2903225806451615,
"grad_norm": 0.5005896742747854,
"learning_rate": 1.3142174432497014e-07,
"logits/chosen": -3.453125,
"logits/rejected": -3.5625,
"logps/chosen": -196.0,
"logps/rejected": -316.0,
"loss": 0.0015,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.51953125,
"rewards/margins": 11.4375,
"rewards/rejected": -11.9375,
"step": 710
},
{
"epoch": 2.3225806451612905,
"grad_norm": 2.0961727669442247,
"learning_rate": 1.254480286738351e-07,
"logits/chosen": -3.53125,
"logits/rejected": -3.546875,
"logps/chosen": -206.0,
"logps/rejected": -318.0,
"loss": 0.0011,
"rewards/accuracies": 1.0,
"rewards/chosen": -1.515625,
"rewards/margins": 10.5,
"rewards/rejected": -12.0,
"step": 720
},
{
"epoch": 2.3548387096774195,
"grad_norm": 0.33710671811391674,
"learning_rate": 1.194743130227001e-07,
"logits/chosen": -3.53125,
"logits/rejected": -3.578125,
"logps/chosen": -201.0,
"logps/rejected": -318.0,
"loss": 0.0008,
"rewards/accuracies": 1.0,
"rewards/chosen": -1.625,
"rewards/margins": 10.375,
"rewards/rejected": -12.0,
"step": 730
},
{
"epoch": 2.3870967741935485,
"grad_norm": 3.7387698089394634,
"learning_rate": 1.135005973715651e-07,
"logits/chosen": -3.59375,
"logits/rejected": -3.625,
"logps/chosen": -202.0,
"logps/rejected": -314.0,
"loss": 0.0014,
"rewards/accuracies": 1.0,
"rewards/chosen": -1.3671875,
"rewards/margins": 10.375,
"rewards/rejected": -11.75,
"step": 740
},
{
"epoch": 2.4193548387096775,
"grad_norm": 3.5076436110564426,
"learning_rate": 1.0752688172043011e-07,
"logits/chosen": -3.5625,
"logits/rejected": -3.6875,
"logps/chosen": -192.0,
"logps/rejected": -306.0,
"loss": 0.0019,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.53125,
"rewards/margins": 10.4375,
"rewards/rejected": -11.0,
"step": 750
},
{
"epoch": 2.4516129032258065,
"grad_norm": 0.3051005963902528,
"learning_rate": 1.0155316606929509e-07,
"logits/chosen": -3.46875,
"logits/rejected": -3.53125,
"logps/chosen": -194.0,
"logps/rejected": -312.0,
"loss": 0.0009,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.94140625,
"rewards/margins": 10.625,
"rewards/rejected": -11.5625,
"step": 760
},
{
"epoch": 2.4838709677419355,
"grad_norm": 0.4055555200925197,
"learning_rate": 9.557945041816009e-08,
"logits/chosen": -3.40625,
"logits/rejected": -3.546875,
"logps/chosen": -190.0,
"logps/rejected": -314.0,
"loss": 0.0014,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.76953125,
"rewards/margins": 11.0,
"rewards/rejected": -11.75,
"step": 770
},
{
"epoch": 2.5161290322580645,
"grad_norm": 0.4423005388531537,
"learning_rate": 8.960573476702509e-08,
"logits/chosen": -3.5625,
"logits/rejected": -3.5625,
"logps/chosen": -211.0,
"logps/rejected": -336.0,
"loss": 0.0013,
"rewards/accuracies": 1.0,
"rewards/chosen": -1.640625,
"rewards/margins": 11.5,
"rewards/rejected": -13.1875,
"step": 780
},
{
"epoch": 2.5483870967741935,
"grad_norm": 0.34201712686499286,
"learning_rate": 8.363201911589008e-08,
"logits/chosen": -3.4375,
"logits/rejected": -3.515625,
"logps/chosen": -197.0,
"logps/rejected": -312.0,
"loss": 0.001,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.81640625,
"rewards/margins": 10.9375,
"rewards/rejected": -11.75,
"step": 790
},
{
"epoch": 2.5806451612903225,
"grad_norm": 0.08324339497209432,
"learning_rate": 7.765830346475507e-08,
"logits/chosen": -3.421875,
"logits/rejected": -3.59375,
"logps/chosen": -207.0,
"logps/rejected": -330.0,
"loss": 0.0005,
"rewards/accuracies": 1.0,
"rewards/chosen": -1.609375,
"rewards/margins": 11.5,
"rewards/rejected": -13.125,
"step": 800
},
{
"epoch": 2.6129032258064515,
"grad_norm": 0.35882151597975387,
"learning_rate": 7.168458781362006e-08,
"logits/chosen": -3.59375,
"logits/rejected": -3.6875,
"logps/chosen": -202.0,
"logps/rejected": -328.0,
"loss": 0.0045,
"rewards/accuracies": 1.0,
"rewards/chosen": -1.03125,
"rewards/margins": 11.625,
"rewards/rejected": -12.6875,
"step": 810
},
{
"epoch": 2.6451612903225805,
"grad_norm": 0.3006723416131319,
"learning_rate": 6.571087216248507e-08,
"logits/chosen": -3.4375,
"logits/rejected": -3.609375,
"logps/chosen": -197.0,
"logps/rejected": -322.0,
"loss": 0.0026,
"rewards/accuracies": 1.0,
"rewards/chosen": -1.40625,
"rewards/margins": 11.0,
"rewards/rejected": -12.4375,
"step": 820
},
{
"epoch": 2.6774193548387095,
"grad_norm": 0.9135709315414355,
"learning_rate": 5.973715651135006e-08,
"logits/chosen": -3.40625,
"logits/rejected": -3.546875,
"logps/chosen": -194.0,
"logps/rejected": -312.0,
"loss": 0.0026,
"rewards/accuracies": 1.0,
"rewards/chosen": -1.046875,
"rewards/margins": 11.3125,
"rewards/rejected": -12.3125,
"step": 830
},
{
"epoch": 2.709677419354839,
"grad_norm": 0.11799710974470022,
"learning_rate": 5.3763440860215054e-08,
"logits/chosen": -3.453125,
"logits/rejected": -3.609375,
"logps/chosen": -193.0,
"logps/rejected": -330.0,
"loss": 0.001,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.79296875,
"rewards/margins": 12.8125,
"rewards/rejected": -13.625,
"step": 840
},
{
"epoch": 2.741935483870968,
"grad_norm": 0.24029168708895726,
"learning_rate": 4.7789725209080046e-08,
"logits/chosen": -3.484375,
"logits/rejected": -3.5625,
"logps/chosen": -200.0,
"logps/rejected": -320.0,
"loss": 0.0006,
"rewards/accuracies": 1.0,
"rewards/chosen": -1.2734375,
"rewards/margins": 10.875,
"rewards/rejected": -12.1875,
"step": 850
},
{
"epoch": 2.774193548387097,
"grad_norm": 1.5601788534872705,
"learning_rate": 4.181600955794504e-08,
"logits/chosen": -3.484375,
"logits/rejected": -3.625,
"logps/chosen": -206.0,
"logps/rejected": -326.0,
"loss": 0.0009,
"rewards/accuracies": 1.0,
"rewards/chosen": -1.265625,
"rewards/margins": 11.1875,
"rewards/rejected": -12.4375,
"step": 860
},
{
"epoch": 2.806451612903226,
"grad_norm": 2.7780970090627104,
"learning_rate": 3.584229390681003e-08,
"logits/chosen": -3.53125,
"logits/rejected": -3.65625,
"logps/chosen": -201.0,
"logps/rejected": -320.0,
"loss": 0.0025,
"rewards/accuracies": 1.0,
"rewards/chosen": -1.0546875,
"rewards/margins": 10.8125,
"rewards/rejected": -11.875,
"step": 870
},
{
"epoch": 2.838709677419355,
"grad_norm": 0.36034720795454384,
"learning_rate": 2.986857825567503e-08,
"logits/chosen": -3.546875,
"logits/rejected": -3.578125,
"logps/chosen": -198.0,
"logps/rejected": -318.0,
"loss": 0.0008,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.86328125,
"rewards/margins": 11.25,
"rewards/rejected": -12.125,
"step": 880
},
{
"epoch": 2.870967741935484,
"grad_norm": 0.2764304820275906,
"learning_rate": 2.3894862604540023e-08,
"logits/chosen": -3.59375,
"logits/rejected": -3.671875,
"logps/chosen": -203.0,
"logps/rejected": -316.0,
"loss": 0.0037,
"rewards/accuracies": 1.0,
"rewards/chosen": -1.03125,
"rewards/margins": 10.6875,
"rewards/rejected": -11.75,
"step": 890
},
{
"epoch": 2.903225806451613,
"grad_norm": 0.4293826404732342,
"learning_rate": 1.7921146953405015e-08,
"logits/chosen": -3.4375,
"logits/rejected": -3.546875,
"logps/chosen": -208.0,
"logps/rejected": -322.0,
"loss": 0.0011,
"rewards/accuracies": 1.0,
"rewards/chosen": -1.859375,
"rewards/margins": 10.875,
"rewards/rejected": -12.6875,
"step": 900
},
{
"epoch": 2.935483870967742,
"grad_norm": 0.34603080271508346,
"learning_rate": 1.1947431302270011e-08,
"logits/chosen": -3.546875,
"logits/rejected": -3.703125,
"logps/chosen": -207.0,
"logps/rejected": -320.0,
"loss": 0.0007,
"rewards/accuracies": 1.0,
"rewards/chosen": -1.265625,
"rewards/margins": 11.0,
"rewards/rejected": -12.25,
"step": 910
},
{
"epoch": 2.967741935483871,
"grad_norm": 2.3868143014919885,
"learning_rate": 5.973715651135006e-09,
"logits/chosen": -3.4375,
"logits/rejected": -3.65625,
"logps/chosen": -188.0,
"logps/rejected": -304.0,
"loss": 0.002,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.71484375,
"rewards/margins": 10.375,
"rewards/rejected": -11.125,
"step": 920
},
{
"epoch": 3.0,
"grad_norm": 0.40772402916251804,
"learning_rate": 0.0,
"logits/chosen": -3.5,
"logits/rejected": -3.609375,
"logps/chosen": -194.0,
"logps/rejected": -320.0,
"loss": 0.0006,
"rewards/accuracies": 1.0,
"rewards/chosen": -1.1796875,
"rewards/margins": 10.9375,
"rewards/rejected": -12.125,
"step": 930
},
{
"epoch": 3.0,
"eval_logits/chosen": -3.734375,
"eval_logits/rejected": -3.75,
"eval_logps/chosen": -226.0,
"eval_logps/rejected": -294.0,
"eval_loss": 0.19125854969024658,
"eval_rewards/accuracies": 0.8928571343421936,
"eval_rewards/chosen": -2.703125,
"eval_rewards/margins": 6.3125,
"eval_rewards/rejected": -9.0,
"eval_runtime": 16.0795,
"eval_samples_per_second": 12.438,
"eval_steps_per_second": 0.435,
"step": 930
},
{
"epoch": 3.0,
"step": 930,
"total_flos": 0.0,
"train_loss": 0.13316144410480735,
"train_runtime": 5554.5038,
"train_samples_per_second": 5.355,
"train_steps_per_second": 0.167
}
],
"logging_steps": 10,
"max_steps": 930,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}