|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9992429977289932, |
|
"eval_steps": 100, |
|
"global_step": 165, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.9411764705882356e-07, |
|
"logits/chosen": -2.4206371307373047, |
|
"logits/rejected": -2.4444966316223145, |
|
"logps/chosen": -285.239013671875, |
|
"logps/rejected": -269.48089599609375, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 2.9411764705882355e-06, |
|
"logits/chosen": -2.4849026203155518, |
|
"logits/rejected": -2.361117362976074, |
|
"logps/chosen": -271.2438659667969, |
|
"logps/rejected": -257.04443359375, |
|
"loss": 0.689, |
|
"rewards/accuracies": 0.5208333134651184, |
|
"rewards/chosen": -0.010696162469685078, |
|
"rewards/margins": 0.008694707415997982, |
|
"rewards/rejected": -0.01939087174832821, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.994932636402032e-06, |
|
"logits/chosen": -2.162980318069458, |
|
"logits/rejected": -2.0703816413879395, |
|
"logps/chosen": -297.11322021484375, |
|
"logps/rejected": -317.59942626953125, |
|
"loss": 0.6434, |
|
"rewards/accuracies": 0.690625011920929, |
|
"rewards/chosen": -0.3237840533256531, |
|
"rewards/margins": 0.11028959602117538, |
|
"rewards/rejected": -0.43407368659973145, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.905416503522124e-06, |
|
"logits/chosen": -1.9069904088974, |
|
"logits/rejected": -1.5060116052627563, |
|
"logps/chosen": -316.3459777832031, |
|
"logps/rejected": -310.5841064453125, |
|
"loss": 0.594, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.2567785978317261, |
|
"rewards/margins": 0.2907828688621521, |
|
"rewards/rejected": -0.5475614666938782, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.707922373336524e-06, |
|
"logits/chosen": -1.2782038450241089, |
|
"logits/rejected": -1.1451029777526855, |
|
"logps/chosen": -334.9066467285156, |
|
"logps/rejected": -359.5328674316406, |
|
"loss": 0.6188, |
|
"rewards/accuracies": 0.6968749761581421, |
|
"rewards/chosen": -0.6329762935638428, |
|
"rewards/margins": 0.26158708333969116, |
|
"rewards/rejected": -0.8945633769035339, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 4.411315662967732e-06, |
|
"logits/chosen": -1.3727542161941528, |
|
"logits/rejected": -1.0713155269622803, |
|
"logps/chosen": -302.04876708984375, |
|
"logps/rejected": -323.1622009277344, |
|
"loss": 0.5896, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.33613428473472595, |
|
"rewards/margins": 0.32212409377098083, |
|
"rewards/rejected": -0.658258318901062, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.028910905897229e-06, |
|
"logits/chosen": -1.2992875576019287, |
|
"logits/rejected": -0.787635862827301, |
|
"logps/chosen": -339.11273193359375, |
|
"logps/rejected": -345.2218017578125, |
|
"loss": 0.5665, |
|
"rewards/accuracies": 0.690625011920929, |
|
"rewards/chosen": -0.41970473527908325, |
|
"rewards/margins": 0.42981767654418945, |
|
"rewards/rejected": -0.8495222926139832, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.577874068920446e-06, |
|
"logits/chosen": -1.1777417659759521, |
|
"logits/rejected": -0.7731892466545105, |
|
"logps/chosen": -331.8875427246094, |
|
"logps/rejected": -348.7061767578125, |
|
"loss": 0.5574, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.45568642020225525, |
|
"rewards/margins": 0.4189928472042084, |
|
"rewards/rejected": -0.8746792674064636, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 3.0784519801008546e-06, |
|
"logits/chosen": -1.0813881158828735, |
|
"logits/rejected": -0.695389986038208, |
|
"logps/chosen": -328.33599853515625, |
|
"logps/rejected": -378.7125549316406, |
|
"loss": 0.5757, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.41106706857681274, |
|
"rewards/margins": 0.49909764528274536, |
|
"rewards/rejected": -0.9101647138595581, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 2.553063458334059e-06, |
|
"logits/chosen": -1.1919687986373901, |
|
"logits/rejected": -0.7594654560089111, |
|
"logps/chosen": -311.298095703125, |
|
"logps/rejected": -356.6019592285156, |
|
"loss": 0.5572, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.4122340679168701, |
|
"rewards/margins": 0.47736555337905884, |
|
"rewards/rejected": -0.889599621295929, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 2.025292943281429e-06, |
|
"logits/chosen": -1.1734464168548584, |
|
"logits/rejected": -0.8028995394706726, |
|
"logps/chosen": -314.7601013183594, |
|
"logps/rejected": -354.06524658203125, |
|
"loss": 0.5669, |
|
"rewards/accuracies": 0.7281249761581421, |
|
"rewards/chosen": -0.37006908655166626, |
|
"rewards/margins": 0.4851018786430359, |
|
"rewards/rejected": -0.8551709055900574, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"eval_logits/chosen": -1.1060620546340942, |
|
"eval_logits/rejected": -0.6833169460296631, |
|
"eval_logps/chosen": -329.36163330078125, |
|
"eval_logps/rejected": -354.41693115234375, |
|
"eval_loss": 0.5602970123291016, |
|
"eval_rewards/accuracies": 0.6980000138282776, |
|
"eval_rewards/chosen": -0.4443123936653137, |
|
"eval_rewards/margins": 0.428010493516922, |
|
"eval_rewards/rejected": -0.8723229169845581, |
|
"eval_runtime": 384.7387, |
|
"eval_samples_per_second": 5.198, |
|
"eval_steps_per_second": 0.65, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.5188318011445907e-06, |
|
"logits/chosen": -1.08719801902771, |
|
"logits/rejected": -0.7771456241607666, |
|
"logps/chosen": -318.55706787109375, |
|
"logps/rejected": -360.88641357421875, |
|
"loss": 0.5582, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.4783196449279785, |
|
"rewards/margins": 0.4534730315208435, |
|
"rewards/rejected": -0.9317927360534668, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.0564148305586296e-06, |
|
"logits/chosen": -1.264317274093628, |
|
"logits/rejected": -0.6377102136611938, |
|
"logps/chosen": -338.55364990234375, |
|
"logps/rejected": -344.6549072265625, |
|
"loss": 0.552, |
|
"rewards/accuracies": 0.703125, |
|
"rewards/chosen": -0.4587250351905823, |
|
"rewards/margins": 0.43041476607322693, |
|
"rewards/rejected": -0.8891397714614868, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 6.587997083462197e-07, |
|
"logits/chosen": -1.205304503440857, |
|
"logits/rejected": -0.7666274905204773, |
|
"logps/chosen": -337.1952209472656, |
|
"logps/rejected": -378.85015869140625, |
|
"loss": 0.5546, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.481029212474823, |
|
"rewards/margins": 0.47466927766799927, |
|
"rewards/rejected": -0.9556983709335327, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 3.438351873250492e-07, |
|
"logits/chosen": -1.018686056137085, |
|
"logits/rejected": -0.5739496946334839, |
|
"logps/chosen": -331.6724853515625, |
|
"logps/rejected": -385.0194396972656, |
|
"loss": 0.5522, |
|
"rewards/accuracies": 0.7593749761581421, |
|
"rewards/chosen": -0.5046229958534241, |
|
"rewards/margins": 0.5355193018913269, |
|
"rewards/rejected": -1.040142297744751, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 1.2565987432367032e-07, |
|
"logits/chosen": -1.0615447759628296, |
|
"logits/rejected": -0.6152147650718689, |
|
"logps/chosen": -330.9660949707031, |
|
"logps/rejected": -370.95867919921875, |
|
"loss": 0.5513, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.4836733937263489, |
|
"rewards/margins": 0.5048274397850037, |
|
"rewards/rejected": -0.9885008931159973, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 1.4067554877743861e-08, |
|
"logits/chosen": -1.0293254852294922, |
|
"logits/rejected": -0.6487714052200317, |
|
"logps/chosen": -317.6270751953125, |
|
"logps/rejected": -361.36053466796875, |
|
"loss": 0.5338, |
|
"rewards/accuracies": 0.734375, |
|
"rewards/chosen": -0.4425296187400818, |
|
"rewards/margins": 0.528509259223938, |
|
"rewards/rejected": -0.9710389375686646, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 165, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5778744011214285, |
|
"train_runtime": 7573.0657, |
|
"train_samples_per_second": 2.791, |
|
"train_steps_per_second": 0.022 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 165, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|