ironrock's picture
Training in progress, step 600, checkpoint
fd22a20 verified
{
"best_metric": 0.017838381230831146,
"best_model_checkpoint": "./Zephyr/27-03-24-Weni-WeniGPT-QA-Zephyr-7B-4.0.0-KTO_WeniGPT Experiment using KTO trainer with no collator-2_max_steps-786_batch_16_2024-03-27_ppid_9/checkpoint-600",
"epoch": 2.2652194431335535,
"eval_steps": 50,
"global_step": 600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08,
"grad_norm": 2.2820355892181396,
"kl": 0.7391773462295532,
"learning_rate": 0.0001666666666666667,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 785.5091,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 20
},
{
"epoch": 0.15,
"grad_norm": NaN,
"kl": 1.364301085472107,
"learning_rate": 0.00019606299212598428,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 124.9389,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 40
},
{
"epoch": 0.19,
"eval_kl": 3.282975673675537,
"eval_logps/chosen": -132.00738525390625,
"eval_logps/rejected": -237.69239807128906,
"eval_loss": 0.09803511947393417,
"eval_rewards/chosen": 4.3711838722229,
"eval_rewards/margins": 8.833364486694336,
"eval_rewards/rejected": -4.462180137634277,
"eval_runtime": 474.8108,
"eval_samples_per_second": 1.07,
"eval_steps_per_second": 0.535,
"step": 50
},
{
"epoch": 0.23,
"grad_norm": 1.1073336601257324,
"kl": 0.8054396510124207,
"learning_rate": 0.00019107611548556432,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 91.2134,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 60
},
{
"epoch": 0.3,
"grad_norm": 0.4274377226829529,
"kl": 0.38704636693000793,
"learning_rate": 0.00018582677165354333,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 26.9569,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 80
},
{
"epoch": 0.38,
"grad_norm": 0.5270018577575684,
"kl": 0.47709473967552185,
"learning_rate": 0.0001805774278215223,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 10.8269,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 100
},
{
"epoch": 0.38,
"eval_kl": 0.762260377407074,
"eval_logps/chosen": -135.4526824951172,
"eval_logps/rejected": -541.3764038085938,
"eval_loss": 0.039932698011398315,
"eval_rewards/chosen": 4.026655197143555,
"eval_rewards/margins": 38.857234954833984,
"eval_rewards/rejected": -34.83057403564453,
"eval_runtime": 474.2877,
"eval_samples_per_second": 1.071,
"eval_steps_per_second": 0.536,
"step": 100
},
{
"epoch": 0.45,
"grad_norm": 0.19858050346374512,
"kl": 1.8940637111663818,
"learning_rate": 0.00017532808398950132,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 34.2088,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 120
},
{
"epoch": 0.53,
"grad_norm": 2.615041971206665,
"kl": 1.40102219581604,
"learning_rate": 0.00017007874015748033,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 276.3512,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 140
},
{
"epoch": 0.57,
"eval_kl": 1.686139702796936,
"eval_logps/chosen": -127.73213195800781,
"eval_logps/rejected": -397.8934631347656,
"eval_loss": 0.02803400717675686,
"eval_rewards/chosen": 4.798709869384766,
"eval_rewards/margins": 25.2810001373291,
"eval_rewards/rejected": -20.482288360595703,
"eval_runtime": 474.6717,
"eval_samples_per_second": 1.07,
"eval_steps_per_second": 0.535,
"step": 150
},
{
"epoch": 0.6,
"grad_norm": 0.31603240966796875,
"kl": 0.7387570738792419,
"learning_rate": 0.00016482939632545934,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 216.0894,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 160
},
{
"epoch": 0.68,
"grad_norm": 0.3982734978199005,
"kl": 1.4841536283493042,
"learning_rate": 0.00015958005249343832,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 8.0994,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 180
},
{
"epoch": 0.76,
"grad_norm": 2.1777164936065674,
"kl": 0.3081069588661194,
"learning_rate": 0.00015433070866141733,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 5.7214,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 200
},
{
"epoch": 0.76,
"eval_kl": 1.5452415943145752,
"eval_logps/chosen": -125.70950317382812,
"eval_logps/rejected": -412.7599182128906,
"eval_loss": 0.029857149347662926,
"eval_rewards/chosen": 5.000972270965576,
"eval_rewards/margins": 26.969900131225586,
"eval_rewards/rejected": -21.96892738342285,
"eval_runtime": 474.8305,
"eval_samples_per_second": 1.07,
"eval_steps_per_second": 0.535,
"step": 200
},
{
"epoch": 0.83,
"grad_norm": 0.08603761345148087,
"kl": 1.142593264579773,
"learning_rate": 0.00014908136482939634,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 22.8407,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 220
},
{
"epoch": 0.91,
"grad_norm": 5.5862650871276855,
"kl": 0.40408754348754883,
"learning_rate": 0.00014383202099737535,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 207.9747,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 240
},
{
"epoch": 0.94,
"eval_kl": 1.182385802268982,
"eval_logps/chosen": -127.54723358154297,
"eval_logps/rejected": -806.224853515625,
"eval_loss": 0.026166189461946487,
"eval_rewards/chosen": 4.817198276519775,
"eval_rewards/margins": 66.13262176513672,
"eval_rewards/rejected": -61.315425872802734,
"eval_runtime": 474.7526,
"eval_samples_per_second": 1.07,
"eval_steps_per_second": 0.535,
"step": 250
},
{
"epoch": 0.98,
"grad_norm": 0.39546045660972595,
"kl": 0.7778356671333313,
"learning_rate": 0.00013858267716535433,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 720.8411,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 260
},
{
"epoch": 1.06,
"grad_norm": 0.1680106222629547,
"kl": 1.798954963684082,
"learning_rate": 0.00013333333333333334,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 756.123,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 280
},
{
"epoch": 1.13,
"grad_norm": 0.13731369376182556,
"kl": 1.9178543090820312,
"learning_rate": 0.00012808398950131235,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 25.0348,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 300
},
{
"epoch": 1.13,
"eval_kl": 1.4845337867736816,
"eval_logps/chosen": -125.86082458496094,
"eval_logps/rejected": -901.4517211914062,
"eval_loss": 0.020615577697753906,
"eval_rewards/chosen": 4.985838890075684,
"eval_rewards/margins": 75.82396697998047,
"eval_rewards/rejected": -70.83811950683594,
"eval_runtime": 474.4234,
"eval_samples_per_second": 1.071,
"eval_steps_per_second": 0.535,
"step": 300
},
{
"epoch": 1.21,
"grad_norm": 0.35939568281173706,
"kl": 0.7613407373428345,
"learning_rate": 0.00012283464566929136,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 10.9288,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 320
},
{
"epoch": 1.28,
"grad_norm": 0.0034766767639666796,
"kl": 0.3800249993801117,
"learning_rate": 0.00011758530183727034,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 3.1951,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 340
},
{
"epoch": 1.32,
"eval_kl": 0.6363816261291504,
"eval_logps/chosen": -128.82322692871094,
"eval_logps/rejected": -1020.8375244140625,
"eval_loss": 0.026503143832087517,
"eval_rewards/chosen": 4.689598560333252,
"eval_rewards/margins": 87.4662857055664,
"eval_rewards/rejected": -82.77667999267578,
"eval_runtime": 474.6086,
"eval_samples_per_second": 1.07,
"eval_steps_per_second": 0.535,
"step": 350
},
{
"epoch": 1.36,
"grad_norm": 1.1018553972244263,
"kl": 0.1121591106057167,
"learning_rate": 0.00011233595800524934,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 3.5301,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 360
},
{
"epoch": 1.43,
"grad_norm": 0.6937451958656311,
"kl": 1.0412858724594116,
"learning_rate": 0.00010708661417322836,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 2.2977,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 380
},
{
"epoch": 1.51,
"grad_norm": 0.25449952483177185,
"kl": 0.832425594329834,
"learning_rate": 0.00010183727034120735,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 68.7248,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 400
},
{
"epoch": 1.51,
"eval_kl": 1.2176250219345093,
"eval_logps/chosen": -125.15269470214844,
"eval_logps/rejected": -730.7761840820312,
"eval_loss": 0.020107893273234367,
"eval_rewards/chosen": 5.056652545928955,
"eval_rewards/margins": 58.827205657958984,
"eval_rewards/rejected": -53.77055358886719,
"eval_runtime": 474.7959,
"eval_samples_per_second": 1.07,
"eval_steps_per_second": 0.535,
"step": 400
},
{
"epoch": 1.59,
"grad_norm": 0.14162199199199677,
"kl": 0.6998993158340454,
"learning_rate": 9.658792650918635e-05,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 4.9788,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 420
},
{
"epoch": 1.66,
"grad_norm": 0.043907281011343,
"kl": 0.17792066931724548,
"learning_rate": 9.133858267716536e-05,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 10.659,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 440
},
{
"epoch": 1.7,
"eval_kl": 0.8826082348823547,
"eval_logps/chosen": -126.6418685913086,
"eval_logps/rejected": -955.7069702148438,
"eval_loss": 0.026313357055187225,
"eval_rewards/chosen": 4.9077348709106445,
"eval_rewards/margins": 81.17137908935547,
"eval_rewards/rejected": -76.26364135742188,
"eval_runtime": 474.6199,
"eval_samples_per_second": 1.07,
"eval_steps_per_second": 0.535,
"step": 450
},
{
"epoch": 1.74,
"grad_norm": 0.03523483872413635,
"kl": 0.21202507615089417,
"learning_rate": 8.608923884514435e-05,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 9.5266,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 460
},
{
"epoch": 1.81,
"grad_norm": 0.31107035279273987,
"kl": 0.18568357825279236,
"learning_rate": 8.083989501312336e-05,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 4.6345,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 480
},
{
"epoch": 1.89,
"grad_norm": 0.05238344892859459,
"kl": 0.0,
"learning_rate": 7.559055118110236e-05,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 177.5836,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 500
},
{
"epoch": 1.89,
"eval_kl": 0.47942328453063965,
"eval_logps/chosen": -123.88298034667969,
"eval_logps/rejected": -1018.103515625,
"eval_loss": 0.01867891475558281,
"eval_rewards/chosen": 5.183623790740967,
"eval_rewards/margins": 87.6869125366211,
"eval_rewards/rejected": -82.5032958984375,
"eval_runtime": 474.5715,
"eval_samples_per_second": 1.07,
"eval_steps_per_second": 0.535,
"step": 500
},
{
"epoch": 1.96,
"grad_norm": 0.01653907261788845,
"kl": 0.29649871587753296,
"learning_rate": 7.034120734908137e-05,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 1.9292,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 520
},
{
"epoch": 2.04,
"grad_norm": 0.0456332191824913,
"kl": 1.480500340461731,
"learning_rate": 6.509186351706036e-05,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 15.4933,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 540
},
{
"epoch": 2.08,
"eval_kl": 0.9202241897583008,
"eval_logps/chosen": -127.73924255371094,
"eval_logps/rejected": -1145.0382080078125,
"eval_loss": 0.02805669605731964,
"eval_rewards/chosen": 4.797997951507568,
"eval_rewards/margins": 99.9947738647461,
"eval_rewards/rejected": -95.19676971435547,
"eval_runtime": 474.6365,
"eval_samples_per_second": 1.07,
"eval_steps_per_second": 0.535,
"step": 550
},
{
"epoch": 2.11,
"grad_norm": 0.03160392865538597,
"kl": 1.869972825050354,
"learning_rate": 5.984251968503938e-05,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 2.8522,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 560
},
{
"epoch": 2.19,
"grad_norm": 0.02318180911242962,
"kl": 1.334816575050354,
"learning_rate": 5.4593175853018376e-05,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 23.0498,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 580
},
{
"epoch": 2.27,
"grad_norm": 0.015581952407956123,
"kl": 0.0,
"learning_rate": 4.934383202099738e-05,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 3.827,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 600
},
{
"epoch": 2.27,
"eval_kl": 0.4924721121788025,
"eval_logps/chosen": -125.38406372070312,
"eval_logps/rejected": -1163.0284423828125,
"eval_loss": 0.017838381230831146,
"eval_rewards/chosen": 5.033515930175781,
"eval_rewards/margins": 102.029296875,
"eval_rewards/rejected": -96.99578094482422,
"eval_runtime": 474.851,
"eval_samples_per_second": 1.07,
"eval_steps_per_second": 0.535,
"step": 600
}
],
"logging_steps": 20,
"max_steps": 786,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}