|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9990817263544536, |
|
"eval_steps": 100, |
|
"global_step": 204, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.3809523809523811e-07, |
|
"logits/chosen": -2.530543327331543, |
|
"logits/rejected": -2.3806393146514893, |
|
"logps/chosen": -277.43206787109375, |
|
"logps/rejected": -259.5929870605469, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.380952380952381e-06, |
|
"logits/chosen": -2.3367958068847656, |
|
"logits/rejected": -2.3201417922973633, |
|
"logps/chosen": -237.50930786132812, |
|
"logps/rejected": -259.6923828125, |
|
"loss": 0.6911, |
|
"rewards/accuracies": 0.5138888955116272, |
|
"rewards/chosen": -0.0002860191452782601, |
|
"rewards/margins": 0.004583699628710747, |
|
"rewards/rejected": -0.004869719035923481, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.761904761904762e-06, |
|
"logits/chosen": -2.2300033569335938, |
|
"logits/rejected": -2.062291383743286, |
|
"logps/chosen": -322.0083312988281, |
|
"logps/rejected": -298.00164794921875, |
|
"loss": 0.6623, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.05355127900838852, |
|
"rewards/margins": 0.07823377847671509, |
|
"rewards/rejected": -0.1317850649356842, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.970219740227693e-06, |
|
"logits/chosen": -1.7139427661895752, |
|
"logits/rejected": -1.5317604541778564, |
|
"logps/chosen": -302.0789489746094, |
|
"logps/rejected": -304.89666748046875, |
|
"loss": 0.6183, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.13164304196834564, |
|
"rewards/margins": 0.1831807643175125, |
|
"rewards/rejected": -0.31482380628585815, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.868186180746792e-06, |
|
"logits/chosen": -1.342703104019165, |
|
"logits/rejected": -1.0153623819351196, |
|
"logps/chosen": -317.6343688964844, |
|
"logps/rejected": -340.515625, |
|
"loss": 0.6019, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.03232262283563614, |
|
"rewards/margins": 0.30836719274520874, |
|
"rewards/rejected": -0.34068983793258667, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.696530612642871e-06, |
|
"logits/chosen": -0.4007461667060852, |
|
"logits/rejected": -0.062369205057621, |
|
"logps/chosen": -344.5245056152344, |
|
"logps/rejected": -381.02655029296875, |
|
"loss": 0.5788, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.4034351408481598, |
|
"rewards/margins": 0.35783329606056213, |
|
"rewards/rejected": -0.7612683773040771, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.460299516441777e-06, |
|
"logits/chosen": -0.4702862799167633, |
|
"logits/rejected": -0.10355714708566666, |
|
"logps/chosen": -309.2970886230469, |
|
"logps/rejected": -356.2215881347656, |
|
"loss": 0.5714, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.17937391996383667, |
|
"rewards/margins": 0.42016029357910156, |
|
"rewards/rejected": -0.5995342135429382, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.1664378205239085e-06, |
|
"logits/chosen": -0.14899344742298126, |
|
"logits/rejected": 0.4177340567111969, |
|
"logps/chosen": -340.29766845703125, |
|
"logps/rejected": -379.6929626464844, |
|
"loss": 0.5634, |
|
"rewards/accuracies": 0.684374988079071, |
|
"rewards/chosen": -0.4960289001464844, |
|
"rewards/margins": 0.4510241448879242, |
|
"rewards/rejected": -0.9470531344413757, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 3.8235847280454626e-06, |
|
"logits/chosen": -0.14341413974761963, |
|
"logits/rejected": 0.4753722548484802, |
|
"logps/chosen": -334.1756286621094, |
|
"logps/rejected": -374.01556396484375, |
|
"loss": 0.527, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.3731788992881775, |
|
"rewards/margins": 0.5048251152038574, |
|
"rewards/rejected": -0.8780039548873901, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.441819734087963e-06, |
|
"logits/chosen": -0.20099422335624695, |
|
"logits/rejected": 0.4678555130958557, |
|
"logps/chosen": -348.76397705078125, |
|
"logps/rejected": -384.9765319824219, |
|
"loss": 0.5453, |
|
"rewards/accuracies": 0.7406250238418579, |
|
"rewards/chosen": -0.3222987651824951, |
|
"rewards/margins": 0.5874095559120178, |
|
"rewards/rejected": -0.9097083210945129, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 3.0323662998460396e-06, |
|
"logits/chosen": -0.17653772234916687, |
|
"logits/rejected": 0.5543693900108337, |
|
"logps/chosen": -353.01361083984375, |
|
"logps/rejected": -381.8619689941406, |
|
"loss": 0.5359, |
|
"rewards/accuracies": 0.703125, |
|
"rewards/chosen": -0.41476544737815857, |
|
"rewards/margins": 0.5261854529380798, |
|
"rewards/rejected": -0.940950870513916, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_logits/chosen": -0.04770730063319206, |
|
"eval_logits/rejected": 0.5653438568115234, |
|
"eval_logps/chosen": -348.3134765625, |
|
"eval_logps/rejected": -383.2900695800781, |
|
"eval_loss": 0.5605703592300415, |
|
"eval_rewards/accuracies": 0.699999988079071, |
|
"eval_rewards/chosen": -0.4171307384967804, |
|
"eval_rewards/margins": 0.5390676259994507, |
|
"eval_rewards/rejected": -0.9561984539031982, |
|
"eval_runtime": 383.9702, |
|
"eval_samples_per_second": 5.209, |
|
"eval_steps_per_second": 0.651, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 2.6072618954988867e-06, |
|
"logits/chosen": -0.024105533957481384, |
|
"logits/rejected": 0.6808269619941711, |
|
"logps/chosen": -357.918701171875, |
|
"logps/rejected": -408.1763610839844, |
|
"loss": 0.5632, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.5132647752761841, |
|
"rewards/margins": 0.5779321193695068, |
|
"rewards/rejected": -1.0911967754364014, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 2.1790041121336223e-06, |
|
"logits/chosen": -0.00899410154670477, |
|
"logits/rejected": 0.5235549211502075, |
|
"logps/chosen": -369.63299560546875, |
|
"logps/rejected": -408.86859130859375, |
|
"loss": 0.5253, |
|
"rewards/accuracies": 0.7281249761581421, |
|
"rewards/chosen": -0.640425980091095, |
|
"rewards/margins": 0.5291002988815308, |
|
"rewards/rejected": -1.1695263385772705, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.760183246631777e-06, |
|
"logits/chosen": 0.018746722489595413, |
|
"logits/rejected": 0.6442402005195618, |
|
"logps/chosen": -351.2228088378906, |
|
"logps/rejected": -409.70623779296875, |
|
"loss": 0.5308, |
|
"rewards/accuracies": 0.7281249761581421, |
|
"rewards/chosen": -0.4084150195121765, |
|
"rewards/margins": 0.7148237824440002, |
|
"rewards/rejected": -1.1232386827468872, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.3631121611097364e-06, |
|
"logits/chosen": 0.052795905619859695, |
|
"logits/rejected": 0.5363299250602722, |
|
"logps/chosen": -350.0050354003906, |
|
"logps/rejected": -389.28875732421875, |
|
"loss": 0.5368, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.411121129989624, |
|
"rewards/margins": 0.5764644742012024, |
|
"rewards/rejected": -0.9875855445861816, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 9.994642986290797e-07, |
|
"logits/chosen": -0.08179731667041779, |
|
"logits/rejected": 0.6078404784202576, |
|
"logps/chosen": -340.8910217285156, |
|
"logps/rejected": -395.11309814453125, |
|
"loss": 0.5396, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.4558540880680084, |
|
"rewards/margins": 0.681885302066803, |
|
"rewards/rejected": -1.1377394199371338, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 6.799304971075383e-07, |
|
"logits/chosen": -0.10812912881374359, |
|
"logits/rejected": 0.7408884763717651, |
|
"logps/chosen": -367.9963684082031, |
|
"logps/rejected": -409.3976135253906, |
|
"loss": 0.5303, |
|
"rewards/accuracies": 0.7406250238418579, |
|
"rewards/chosen": -0.5040684938430786, |
|
"rewards/margins": 0.6103622317314148, |
|
"rewards/rejected": -1.1144306659698486, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.1390469071538183e-07, |
|
"logits/chosen": 0.11164654791355133, |
|
"logits/rejected": 0.6015251278877258, |
|
"logps/chosen": -354.7975158691406, |
|
"logps/rejected": -422.2373046875, |
|
"loss": 0.5308, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.47427788376808167, |
|
"rewards/margins": 0.8326796293258667, |
|
"rewards/rejected": -1.306957483291626, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.092077387824884e-07, |
|
"logits/chosen": -0.10529808700084686, |
|
"logits/rejected": 0.4767494201660156, |
|
"logps/chosen": -361.30084228515625, |
|
"logps/rejected": -419.2337951660156, |
|
"loss": 0.5424, |
|
"rewards/accuracies": 0.7093750238418579, |
|
"rewards/chosen": -0.48817354440689087, |
|
"rewards/margins": 0.6789232492446899, |
|
"rewards/rejected": -1.167096734046936, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 7.185750133542168e-08, |
|
"logits/chosen": 0.029346108436584473, |
|
"logits/rejected": 0.39755165576934814, |
|
"logps/chosen": -337.2845764160156, |
|
"logps/rejected": -398.9493713378906, |
|
"loss": 0.54, |
|
"rewards/accuracies": 0.703125, |
|
"rewards/chosen": -0.5243724584579468, |
|
"rewards/margins": 0.6791526675224304, |
|
"rewards/rejected": -1.203525185585022, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 5.891920784984184e-09, |
|
"logits/chosen": -0.1364443600177765, |
|
"logits/rejected": 0.5197805166244507, |
|
"logps/chosen": -351.525390625, |
|
"logps/rejected": -410.6580505371094, |
|
"loss": 0.5121, |
|
"rewards/accuracies": 0.778124988079071, |
|
"rewards/chosen": -0.4877961575984955, |
|
"rewards/margins": 0.7241436839103699, |
|
"rewards/rejected": -1.211940050125122, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_logits/chosen": -0.05626865103840828, |
|
"eval_logits/rejected": 0.5949223637580872, |
|
"eval_logps/chosen": -360.6636657714844, |
|
"eval_logps/rejected": -399.93438720703125, |
|
"eval_loss": 0.5482509732246399, |
|
"eval_rewards/accuracies": 0.6959999799728394, |
|
"eval_rewards/chosen": -0.5406328439712524, |
|
"eval_rewards/margins": 0.5820090770721436, |
|
"eval_rewards/rejected": -1.122641921043396, |
|
"eval_runtime": 384.3787, |
|
"eval_samples_per_second": 5.203, |
|
"eval_steps_per_second": 0.65, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 204, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5619814822486803, |
|
"train_runtime": 9621.729, |
|
"train_samples_per_second": 2.716, |
|
"train_steps_per_second": 0.021 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 204, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|