|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9984, |
|
"eval_steps": 500, |
|
"global_step": 156, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"eta": 0.0010000000474974513, |
|
"grad_norm": 14.26872732684494, |
|
"learning_rate": 3.125e-08, |
|
"logits/chosen": -2.0110533237457275, |
|
"logits/rejected": -2.249422550201416, |
|
"logps/chosen": -141.09288024902344, |
|
"logps/pi_response": -121.86016845703125, |
|
"logps/ref_response": -121.86016845703125, |
|
"logps/rejected": -141.46664428710938, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"eta": 0.0010000000474974513, |
|
"grad_norm": 17.697699996792654, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": -2.2928249835968018, |
|
"logits/rejected": -2.312570810317993, |
|
"logps/chosen": -172.20089721679688, |
|
"logps/pi_response": -145.86224365234375, |
|
"logps/ref_response": -144.9134979248047, |
|
"logps/rejected": -177.94036865234375, |
|
"loss": 0.6924, |
|
"rewards/accuracies": 0.40625, |
|
"rewards/chosen": -0.007550257723778486, |
|
"rewards/margins": 0.0005382309318520129, |
|
"rewards/rejected": -0.008088488131761551, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"eta": 0.0010000000474974513, |
|
"grad_norm": 19.324417294835, |
|
"learning_rate": 4.989935734988097e-07, |
|
"logits/chosen": -2.3221993446350098, |
|
"logits/rejected": -2.2393600940704346, |
|
"logps/chosen": -194.87570190429688, |
|
"logps/pi_response": -169.71463012695312, |
|
"logps/ref_response": -142.37953186035156, |
|
"logps/rejected": -202.0398712158203, |
|
"loss": 0.6957, |
|
"rewards/accuracies": 0.46875, |
|
"rewards/chosen": -0.2509865462779999, |
|
"rewards/margins": 0.005367345176637173, |
|
"rewards/rejected": -0.25635388493537903, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"eta": 0.0010000000474974513, |
|
"grad_norm": 22.196329662655977, |
|
"learning_rate": 4.877641290737883e-07, |
|
"logits/chosen": -2.3096923828125, |
|
"logits/rejected": -2.27471923828125, |
|
"logps/chosen": -206.6990966796875, |
|
"logps/pi_response": -181.54678344726562, |
|
"logps/ref_response": -146.62493896484375, |
|
"logps/rejected": -208.9296112060547, |
|
"loss": 0.6937, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": -0.3044261336326599, |
|
"rewards/margins": 0.00788620300590992, |
|
"rewards/rejected": -0.3123123049736023, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eta": 0.0010000000474974513, |
|
"grad_norm": 17.53383943563289, |
|
"learning_rate": 4.646121984004665e-07, |
|
"logits/chosen": -2.53867244720459, |
|
"logits/rejected": -2.4202938079833984, |
|
"logps/chosen": -176.26608276367188, |
|
"logps/pi_response": -150.63552856445312, |
|
"logps/ref_response": -143.65682983398438, |
|
"logps/rejected": -177.16404724121094, |
|
"loss": 0.6915, |
|
"rewards/accuracies": 0.543749988079071, |
|
"rewards/chosen": -0.06383548676967621, |
|
"rewards/margins": 0.021249257028102875, |
|
"rewards/rejected": -0.08508473634719849, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"eta": 0.0010000000474974513, |
|
"grad_norm": 22.495476546149213, |
|
"learning_rate": 4.3069871595684787e-07, |
|
"logits/chosen": -2.456916570663452, |
|
"logits/rejected": -2.314687728881836, |
|
"logps/chosen": -193.93612670898438, |
|
"logps/pi_response": -164.81993103027344, |
|
"logps/ref_response": -150.46957397460938, |
|
"logps/rejected": -194.20321655273438, |
|
"loss": 0.6832, |
|
"rewards/accuracies": 0.5093749761581421, |
|
"rewards/chosen": -0.10493893921375275, |
|
"rewards/margins": 0.04571269080042839, |
|
"rewards/rejected": -0.15065164864063263, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"eta": 0.0010000000474974513, |
|
"grad_norm": 16.374382979297494, |
|
"learning_rate": 3.877242453630256e-07, |
|
"logits/chosen": -2.306607484817505, |
|
"logits/rejected": -2.2804460525512695, |
|
"logps/chosen": -239.197998046875, |
|
"logps/pi_response": -211.57894897460938, |
|
"logps/ref_response": -151.56642150878906, |
|
"logps/rejected": -241.3532257080078, |
|
"loss": 0.699, |
|
"rewards/accuracies": 0.546875, |
|
"rewards/chosen": -0.5582221150398254, |
|
"rewards/margins": 0.025322098284959793, |
|
"rewards/rejected": -0.583544135093689, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"eta": 0.0010000000474974513, |
|
"grad_norm": 14.529411357126248, |
|
"learning_rate": 3.378437060203357e-07, |
|
"logits/chosen": -2.3658206462860107, |
|
"logits/rejected": -2.25939679145813, |
|
"logps/chosen": -212.8553009033203, |
|
"logps/pi_response": -183.07717895507812, |
|
"logps/ref_response": -146.43084716796875, |
|
"logps/rejected": -210.58859252929688, |
|
"loss": 0.689, |
|
"rewards/accuracies": 0.53125, |
|
"rewards/chosen": -0.3529232144355774, |
|
"rewards/margins": 0.01966114342212677, |
|
"rewards/rejected": -0.37258434295654297, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eta": 0.0010000000474974513, |
|
"grad_norm": 16.20000732293231, |
|
"learning_rate": 2.8355831645441387e-07, |
|
"logits/chosen": -2.3814728260040283, |
|
"logits/rejected": -2.416779041290283, |
|
"logps/chosen": -198.688232421875, |
|
"logps/pi_response": -169.83807373046875, |
|
"logps/ref_response": -141.36282348632812, |
|
"logps/rejected": -201.0040740966797, |
|
"loss": 0.6799, |
|
"rewards/accuracies": 0.5562499761581421, |
|
"rewards/chosen": -0.26767629384994507, |
|
"rewards/margins": 0.015592445619404316, |
|
"rewards/rejected": -0.2832687199115753, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"eta": 0.0010000000474974513, |
|
"grad_norm": 27.20849020997422, |
|
"learning_rate": 2.2759017277414164e-07, |
|
"logits/chosen": -2.282968044281006, |
|
"logits/rejected": -2.3503777980804443, |
|
"logps/chosen": -211.1111297607422, |
|
"logps/pi_response": -184.20970153808594, |
|
"logps/ref_response": -149.5955352783203, |
|
"logps/rejected": -220.11575317382812, |
|
"loss": 0.6789, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": -0.34969550371170044, |
|
"rewards/margins": 0.02153094671666622, |
|
"rewards/rejected": -0.3712264895439148, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"eta": 0.0010000000474974513, |
|
"grad_norm": 16.29698307520508, |
|
"learning_rate": 1.7274575140626315e-07, |
|
"logits/chosen": -2.320495367050171, |
|
"logits/rejected": -2.224569797515869, |
|
"logps/chosen": -212.6381378173828, |
|
"logps/pi_response": -183.83807373046875, |
|
"logps/ref_response": -140.60658264160156, |
|
"logps/rejected": -214.096435546875, |
|
"loss": 0.6853, |
|
"rewards/accuracies": 0.578125, |
|
"rewards/chosen": -0.41716060042381287, |
|
"rewards/margins": 0.0342952124774456, |
|
"rewards/rejected": -0.45145583152770996, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"eta": 0.0010000000474974513, |
|
"grad_norm": 19.009812577905546, |
|
"learning_rate": 1.2177518064852348e-07, |
|
"logits/chosen": -2.3668313026428223, |
|
"logits/rejected": -2.2519187927246094, |
|
"logps/chosen": -211.2959442138672, |
|
"logps/pi_response": -180.33094787597656, |
|
"logps/ref_response": -139.77435302734375, |
|
"logps/rejected": -211.1301727294922, |
|
"loss": 0.6808, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -0.36967089772224426, |
|
"rewards/margins": 0.052646201103925705, |
|
"rewards/rejected": -0.42231711745262146, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"eta": 0.0010000000474974513, |
|
"grad_norm": 19.269930008011357, |
|
"learning_rate": 7.723433775328384e-08, |
|
"logits/chosen": -2.306391716003418, |
|
"logits/rejected": -2.3454627990722656, |
|
"logps/chosen": -211.0600128173828, |
|
"logps/pi_response": -182.8990936279297, |
|
"logps/ref_response": -149.42483520507812, |
|
"logps/rejected": -218.762939453125, |
|
"loss": 0.6813, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": -0.2972378134727478, |
|
"rewards/margins": 0.06230615824460983, |
|
"rewards/rejected": -0.35954394936561584, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"eta": 0.0010000000474974513, |
|
"grad_norm": 21.30435634566315, |
|
"learning_rate": 4.1356686569674335e-08, |
|
"logits/chosen": -2.2520415782928467, |
|
"logits/rejected": -2.2255797386169434, |
|
"logps/chosen": -204.5604248046875, |
|
"logps/pi_response": -179.6775665283203, |
|
"logps/ref_response": -142.9306640625, |
|
"logps/rejected": -207.15036010742188, |
|
"loss": 0.6781, |
|
"rewards/accuracies": 0.53125, |
|
"rewards/chosen": -0.336225688457489, |
|
"rewards/margins": 0.039131637662649155, |
|
"rewards/rejected": -0.37535732984542847, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"eta": 0.0010000000474974513, |
|
"grad_norm": 17.88461135981544, |
|
"learning_rate": 1.5941282340065697e-08, |
|
"logits/chosen": -2.3840537071228027, |
|
"logits/rejected": -2.356271982192993, |
|
"logps/chosen": -196.31777954101562, |
|
"logps/pi_response": -173.85084533691406, |
|
"logps/ref_response": -141.44508361816406, |
|
"logps/rejected": -209.9248046875, |
|
"loss": 0.6732, |
|
"rewards/accuracies": 0.596875011920929, |
|
"rewards/chosen": -0.3001561164855957, |
|
"rewards/margins": 0.08288339525461197, |
|
"rewards/rejected": -0.38303953409194946, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"eta": 0.0010000000474974513, |
|
"grad_norm": 21.25233665353108, |
|
"learning_rate": 2.2625595580163247e-09, |
|
"logits/chosen": -2.219001293182373, |
|
"logits/rejected": -2.2412266731262207, |
|
"logps/chosen": -209.44009399414062, |
|
"logps/pi_response": -184.37173461914062, |
|
"logps/ref_response": -143.34500122070312, |
|
"logps/rejected": -216.7500457763672, |
|
"loss": 0.6747, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": -0.3745076060295105, |
|
"rewards/margins": 0.06318662315607071, |
|
"rewards/rejected": -0.4376942217350006, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 156, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6841066911434516, |
|
"train_runtime": 31793.1415, |
|
"train_samples_per_second": 0.629, |
|
"train_steps_per_second": 0.005 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 156, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 200, |
|
"total_flos": 0.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|