ironrock's picture
Training in progress, step 400, checkpoint
c4c436b verified
{
"best_metric": 0.020107893273234367,
"best_model_checkpoint": "./Zephyr/27-03-24-Weni-WeniGPT-QA-Zephyr-7B-4.0.0-KTO_WeniGPT Experiment using KTO trainer with no collator-2_max_steps-786_batch_16_2024-03-27_ppid_9/checkpoint-400",
"epoch": 1.510146295422369,
"eval_steps": 50,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08,
"grad_norm": 2.2820355892181396,
"kl": 0.7391773462295532,
"learning_rate": 0.0001666666666666667,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 785.5091,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 20
},
{
"epoch": 0.15,
"grad_norm": NaN,
"kl": 1.364301085472107,
"learning_rate": 0.00019606299212598428,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 124.9389,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 40
},
{
"epoch": 0.19,
"eval_kl": 3.282975673675537,
"eval_logps/chosen": -132.00738525390625,
"eval_logps/rejected": -237.69239807128906,
"eval_loss": 0.09803511947393417,
"eval_rewards/chosen": 4.3711838722229,
"eval_rewards/margins": 8.833364486694336,
"eval_rewards/rejected": -4.462180137634277,
"eval_runtime": 474.8108,
"eval_samples_per_second": 1.07,
"eval_steps_per_second": 0.535,
"step": 50
},
{
"epoch": 0.23,
"grad_norm": 1.1073336601257324,
"kl": 0.8054396510124207,
"learning_rate": 0.00019107611548556432,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 91.2134,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 60
},
{
"epoch": 0.3,
"grad_norm": 0.4274377226829529,
"kl": 0.38704636693000793,
"learning_rate": 0.00018582677165354333,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 26.9569,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 80
},
{
"epoch": 0.38,
"grad_norm": 0.5270018577575684,
"kl": 0.47709473967552185,
"learning_rate": 0.0001805774278215223,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 10.8269,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 100
},
{
"epoch": 0.38,
"eval_kl": 0.762260377407074,
"eval_logps/chosen": -135.4526824951172,
"eval_logps/rejected": -541.3764038085938,
"eval_loss": 0.039932698011398315,
"eval_rewards/chosen": 4.026655197143555,
"eval_rewards/margins": 38.857234954833984,
"eval_rewards/rejected": -34.83057403564453,
"eval_runtime": 474.2877,
"eval_samples_per_second": 1.071,
"eval_steps_per_second": 0.536,
"step": 100
},
{
"epoch": 0.45,
"grad_norm": 0.19858050346374512,
"kl": 1.8940637111663818,
"learning_rate": 0.00017532808398950132,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 34.2088,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 120
},
{
"epoch": 0.53,
"grad_norm": 2.615041971206665,
"kl": 1.40102219581604,
"learning_rate": 0.00017007874015748033,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 276.3512,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 140
},
{
"epoch": 0.57,
"eval_kl": 1.686139702796936,
"eval_logps/chosen": -127.73213195800781,
"eval_logps/rejected": -397.8934631347656,
"eval_loss": 0.02803400717675686,
"eval_rewards/chosen": 4.798709869384766,
"eval_rewards/margins": 25.2810001373291,
"eval_rewards/rejected": -20.482288360595703,
"eval_runtime": 474.6717,
"eval_samples_per_second": 1.07,
"eval_steps_per_second": 0.535,
"step": 150
},
{
"epoch": 0.6,
"grad_norm": 0.31603240966796875,
"kl": 0.7387570738792419,
"learning_rate": 0.00016482939632545934,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 216.0894,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 160
},
{
"epoch": 0.68,
"grad_norm": 0.3982734978199005,
"kl": 1.4841536283493042,
"learning_rate": 0.00015958005249343832,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 8.0994,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 180
},
{
"epoch": 0.76,
"grad_norm": 2.1777164936065674,
"kl": 0.3081069588661194,
"learning_rate": 0.00015433070866141733,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 5.7214,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 200
},
{
"epoch": 0.76,
"eval_kl": 1.5452415943145752,
"eval_logps/chosen": -125.70950317382812,
"eval_logps/rejected": -412.7599182128906,
"eval_loss": 0.029857149347662926,
"eval_rewards/chosen": 5.000972270965576,
"eval_rewards/margins": 26.969900131225586,
"eval_rewards/rejected": -21.96892738342285,
"eval_runtime": 474.8305,
"eval_samples_per_second": 1.07,
"eval_steps_per_second": 0.535,
"step": 200
},
{
"epoch": 0.83,
"grad_norm": 0.08603761345148087,
"kl": 1.142593264579773,
"learning_rate": 0.00014908136482939634,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 22.8407,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 220
},
{
"epoch": 0.91,
"grad_norm": 5.5862650871276855,
"kl": 0.40408754348754883,
"learning_rate": 0.00014383202099737535,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 207.9747,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 240
},
{
"epoch": 0.94,
"eval_kl": 1.182385802268982,
"eval_logps/chosen": -127.54723358154297,
"eval_logps/rejected": -806.224853515625,
"eval_loss": 0.026166189461946487,
"eval_rewards/chosen": 4.817198276519775,
"eval_rewards/margins": 66.13262176513672,
"eval_rewards/rejected": -61.315425872802734,
"eval_runtime": 474.7526,
"eval_samples_per_second": 1.07,
"eval_steps_per_second": 0.535,
"step": 250
},
{
"epoch": 0.98,
"grad_norm": 0.39546045660972595,
"kl": 0.7778356671333313,
"learning_rate": 0.00013858267716535433,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 720.8411,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 260
},
{
"epoch": 1.06,
"grad_norm": 0.1680106222629547,
"kl": 1.798954963684082,
"learning_rate": 0.00013333333333333334,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 756.123,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 280
},
{
"epoch": 1.13,
"grad_norm": 0.13731369376182556,
"kl": 1.9178543090820312,
"learning_rate": 0.00012808398950131235,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 25.0348,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 300
},
{
"epoch": 1.13,
"eval_kl": 1.4845337867736816,
"eval_logps/chosen": -125.86082458496094,
"eval_logps/rejected": -901.4517211914062,
"eval_loss": 0.020615577697753906,
"eval_rewards/chosen": 4.985838890075684,
"eval_rewards/margins": 75.82396697998047,
"eval_rewards/rejected": -70.83811950683594,
"eval_runtime": 474.4234,
"eval_samples_per_second": 1.071,
"eval_steps_per_second": 0.535,
"step": 300
},
{
"epoch": 1.21,
"grad_norm": 0.35939568281173706,
"kl": 0.7613407373428345,
"learning_rate": 0.00012283464566929136,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 10.9288,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 320
},
{
"epoch": 1.28,
"grad_norm": 0.0034766767639666796,
"kl": 0.3800249993801117,
"learning_rate": 0.00011758530183727034,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 3.1951,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 340
},
{
"epoch": 1.32,
"eval_kl": 0.6363816261291504,
"eval_logps/chosen": -128.82322692871094,
"eval_logps/rejected": -1020.8375244140625,
"eval_loss": 0.026503143832087517,
"eval_rewards/chosen": 4.689598560333252,
"eval_rewards/margins": 87.4662857055664,
"eval_rewards/rejected": -82.77667999267578,
"eval_runtime": 474.6086,
"eval_samples_per_second": 1.07,
"eval_steps_per_second": 0.535,
"step": 350
},
{
"epoch": 1.36,
"grad_norm": 1.1018553972244263,
"kl": 0.1121591106057167,
"learning_rate": 0.00011233595800524934,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 3.5301,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 360
},
{
"epoch": 1.43,
"grad_norm": 0.6937451958656311,
"kl": 1.0412858724594116,
"learning_rate": 0.00010708661417322836,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 2.2977,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 380
},
{
"epoch": 1.51,
"grad_norm": 0.25449952483177185,
"kl": 0.832425594329834,
"learning_rate": 0.00010183727034120735,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 68.7248,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 400
},
{
"epoch": 1.51,
"eval_kl": 1.2176250219345093,
"eval_logps/chosen": -125.15269470214844,
"eval_logps/rejected": -730.7761840820312,
"eval_loss": 0.020107893273234367,
"eval_rewards/chosen": 5.056652545928955,
"eval_rewards/margins": 58.827205657958984,
"eval_rewards/rejected": -53.77055358886719,
"eval_runtime": 474.7959,
"eval_samples_per_second": 1.07,
"eval_steps_per_second": 0.535,
"step": 400
}
],
"logging_steps": 20,
"max_steps": 786,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}