|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9968652037617555, |
|
"eval_steps": 500, |
|
"global_step": 159, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.006269592476489028, |
|
"grad_norm": 14.92016418615593, |
|
"learning_rate": 3.125e-08, |
|
"logits/chosen": -2.845900058746338, |
|
"logits/rejected": -2.8750576972961426, |
|
"logps/chosen": -339.33447265625, |
|
"logps/pi_response": -84.25662994384766, |
|
"logps/ref_response": -84.25662994384766, |
|
"logps/rejected": -136.8517303466797, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06269592476489028, |
|
"grad_norm": 10.888267743448523, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": -2.778717041015625, |
|
"logits/rejected": -2.757580280303955, |
|
"logps/chosen": -226.78994750976562, |
|
"logps/pi_response": -69.23085021972656, |
|
"logps/ref_response": -68.98346710205078, |
|
"logps/rejected": -125.45468139648438, |
|
"loss": 0.692, |
|
"rewards/accuracies": 0.4930555522441864, |
|
"rewards/chosen": 0.0016165798297151923, |
|
"rewards/margins": 0.003002046374604106, |
|
"rewards/rejected": -0.0013854664284735918, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.12539184952978055, |
|
"grad_norm": 9.519679472408939, |
|
"learning_rate": 4.990353313429303e-07, |
|
"logits/chosen": -2.7556426525115967, |
|
"logits/rejected": -2.740095615386963, |
|
"logps/chosen": -211.00942993164062, |
|
"logps/pi_response": -76.92619323730469, |
|
"logps/ref_response": -70.64810943603516, |
|
"logps/rejected": -106.86106872558594, |
|
"loss": 0.6637, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.006867959164083004, |
|
"rewards/margins": 0.04612569510936737, |
|
"rewards/rejected": -0.03925773501396179, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.18808777429467086, |
|
"grad_norm": 7.098301149383853, |
|
"learning_rate": 4.882681251368548e-07, |
|
"logits/chosen": -2.6341042518615723, |
|
"logits/rejected": -2.6018269062042236, |
|
"logps/chosen": -237.74984741210938, |
|
"logps/pi_response": -98.3823013305664, |
|
"logps/ref_response": -65.06692504882812, |
|
"logps/rejected": -122.42607116699219, |
|
"loss": 0.618, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.08040433377027512, |
|
"rewards/margins": 0.18929488956928253, |
|
"rewards/rejected": -0.26969921588897705, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2507836990595611, |
|
"grad_norm": 9.130640915503477, |
|
"learning_rate": 4.6604720940421207e-07, |
|
"logits/chosen": -2.528998851776123, |
|
"logits/rejected": -2.5069429874420166, |
|
"logps/chosen": -276.9578857421875, |
|
"logps/pi_response": -141.6453857421875, |
|
"logps/ref_response": -69.38419342041016, |
|
"logps/rejected": -177.18133544921875, |
|
"loss": 0.5824, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.23818793892860413, |
|
"rewards/margins": 0.40282946825027466, |
|
"rewards/rejected": -0.6410173177719116, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.31347962382445144, |
|
"grad_norm": 8.866163012880673, |
|
"learning_rate": 4.3344075855595097e-07, |
|
"logits/chosen": -2.5575942993164062, |
|
"logits/rejected": -2.532439708709717, |
|
"logps/chosen": -289.70233154296875, |
|
"logps/pi_response": -166.5010223388672, |
|
"logps/ref_response": -67.87925720214844, |
|
"logps/rejected": -180.68893432617188, |
|
"loss": 0.5712, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.36984091997146606, |
|
"rewards/margins": 0.5151764154434204, |
|
"rewards/rejected": -0.8850172758102417, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.3761755485893417, |
|
"grad_norm": 13.516613178785937, |
|
"learning_rate": 3.920161866827889e-07, |
|
"logits/chosen": -2.553635835647583, |
|
"logits/rejected": -2.5055148601531982, |
|
"logps/chosen": -218.26220703125, |
|
"logps/pi_response": -163.2755584716797, |
|
"logps/ref_response": -64.52303314208984, |
|
"logps/rejected": -200.37921142578125, |
|
"loss": 0.5567, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.3870890140533447, |
|
"rewards/margins": 0.43599042296409607, |
|
"rewards/rejected": -0.8230794668197632, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.438871473354232, |
|
"grad_norm": 11.23129211624929, |
|
"learning_rate": 3.4376480090239047e-07, |
|
"logits/chosen": -2.585047721862793, |
|
"logits/rejected": -2.546762228012085, |
|
"logps/chosen": -258.9521789550781, |
|
"logps/pi_response": -190.20260620117188, |
|
"logps/ref_response": -68.9866714477539, |
|
"logps/rejected": -210.7947235107422, |
|
"loss": 0.5237, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -0.46116265654563904, |
|
"rewards/margins": 0.6260020136833191, |
|
"rewards/rejected": -1.0871646404266357, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5015673981191222, |
|
"grad_norm": 14.241762559238571, |
|
"learning_rate": 2.910060778827554e-07, |
|
"logits/chosen": -2.6193995475769043, |
|
"logits/rejected": -2.5861072540283203, |
|
"logps/chosen": -253.4519500732422, |
|
"logps/pi_response": -183.41494750976562, |
|
"logps/ref_response": -72.47793579101562, |
|
"logps/rejected": -203.38510131835938, |
|
"loss": 0.5301, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.41766557097435, |
|
"rewards/margins": 0.5782700777053833, |
|
"rewards/rejected": -0.9959356188774109, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.5642633228840125, |
|
"grad_norm": 12.061582086504794, |
|
"learning_rate": 2.3627616503391812e-07, |
|
"logits/chosen": -2.5625717639923096, |
|
"logits/rejected": -2.510937213897705, |
|
"logps/chosen": -296.7856140136719, |
|
"logps/pi_response": -197.53363037109375, |
|
"logps/ref_response": -69.97444152832031, |
|
"logps/rejected": -216.2652130126953, |
|
"loss": 0.5138, |
|
"rewards/accuracies": 0.8062499761581421, |
|
"rewards/chosen": -0.5185791850090027, |
|
"rewards/margins": 0.6481128931045532, |
|
"rewards/rejected": -1.1666921377182007, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.6269592476489029, |
|
"grad_norm": 12.761867748586539, |
|
"learning_rate": 1.8220596619089573e-07, |
|
"logits/chosen": -2.5809009075164795, |
|
"logits/rejected": -2.5340776443481445, |
|
"logps/chosen": -302.91339111328125, |
|
"logps/pi_response": -206.8216552734375, |
|
"logps/ref_response": -67.3431625366211, |
|
"logps/rejected": -229.6073455810547, |
|
"loss": 0.5151, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": -0.5213590860366821, |
|
"rewards/margins": 0.7751049399375916, |
|
"rewards/rejected": -1.2964640855789185, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.6896551724137931, |
|
"grad_norm": 14.424999759406585, |
|
"learning_rate": 1.3139467229135998e-07, |
|
"logits/chosen": -2.5221171379089355, |
|
"logits/rejected": -2.4842729568481445, |
|
"logps/chosen": -236.73046875, |
|
"logps/pi_response": -175.51455688476562, |
|
"logps/ref_response": -60.56214141845703, |
|
"logps/rejected": -212.31582641601562, |
|
"loss": 0.5032, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.48990482091903687, |
|
"rewards/margins": 0.5828049778938293, |
|
"rewards/rejected": -1.0727096796035767, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.7523510971786834, |
|
"grad_norm": 45.50922725986238, |
|
"learning_rate": 8.628481651367875e-08, |
|
"logits/chosen": -2.5453531742095947, |
|
"logits/rejected": -2.499166488647461, |
|
"logps/chosen": -265.3163146972656, |
|
"logps/pi_response": -199.43765258789062, |
|
"logps/ref_response": -63.73808670043945, |
|
"logps/rejected": -221.1977996826172, |
|
"loss": 0.4952, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -0.47311264276504517, |
|
"rewards/margins": 0.8587131500244141, |
|
"rewards/rejected": -1.331825852394104, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.8150470219435737, |
|
"grad_norm": 14.647279184181341, |
|
"learning_rate": 4.904486005914027e-08, |
|
"logits/chosen": -2.524919033050537, |
|
"logits/rejected": -2.497668743133545, |
|
"logps/chosen": -285.549072265625, |
|
"logps/pi_response": -207.68014526367188, |
|
"logps/ref_response": -70.44617462158203, |
|
"logps/rejected": -234.6571807861328, |
|
"loss": 0.4974, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -0.4377771019935608, |
|
"rewards/margins": 0.8497930765151978, |
|
"rewards/rejected": -1.2875702381134033, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.877742946708464, |
|
"grad_norm": 18.06269511779979, |
|
"learning_rate": 2.1464952759020856e-08, |
|
"logits/chosen": -2.533571481704712, |
|
"logits/rejected": -2.4829599857330322, |
|
"logps/chosen": -296.2190246582031, |
|
"logps/pi_response": -209.37374877929688, |
|
"logps/ref_response": -70.29788208007812, |
|
"logps/rejected": -232.8479461669922, |
|
"loss": 0.5001, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.581600546836853, |
|
"rewards/margins": 0.6965141296386719, |
|
"rewards/rejected": -1.2781145572662354, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.9404388714733543, |
|
"grad_norm": 15.171255483252263, |
|
"learning_rate": 4.8708793644441086e-09, |
|
"logits/chosen": -2.5554707050323486, |
|
"logits/rejected": -2.50273060798645, |
|
"logps/chosen": -289.8814392089844, |
|
"logps/pi_response": -220.58633422851562, |
|
"logps/ref_response": -73.47826385498047, |
|
"logps/rejected": -257.1234130859375, |
|
"loss": 0.5001, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.5721909403800964, |
|
"rewards/margins": 0.8123014569282532, |
|
"rewards/rejected": -1.3844925165176392, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.9968652037617555, |
|
"step": 159, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5464447999150498, |
|
"train_runtime": 3432.1011, |
|
"train_samples_per_second": 5.937, |
|
"train_steps_per_second": 0.046 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 159, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|