File size: 3,184 Bytes
ddcf16e 1c18841 ddcf16e 1c18841 ddcf16e 1c18841 ddcf16e 1c18841 ddcf16e 1c18841 ddcf16e 1c18841 ddcf16e 1c18841 ddcf16e 1c18841 ddcf16e 1c18841 ddcf16e 1c18841 ddcf16e 1c18841 ddcf16e 1c18841 ddcf16e 1c18841 ddcf16e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 40,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.025,
"grad_norm": 6.698416653714476,
"learning_rate": 1.25e-07,
"logits/chosen": -2.8582587242126465,
"logits/rejected": -2.842068910598755,
"logps/chosen": -261.8958435058594,
"logps/rejected": -226.5897216796875,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.25,
"grad_norm": 6.883361102730669,
"learning_rate": 4.6650635094610966e-07,
"logits/chosen": -2.7813661098480225,
"logits/rejected": -2.772981643676758,
"logps/chosen": -254.5003204345703,
"logps/rejected": -251.1380157470703,
"loss": 0.6908,
"rewards/accuracies": 0.5381944179534912,
"rewards/chosen": 0.005406244192272425,
"rewards/margins": 0.0035838009789586067,
"rewards/rejected": 0.001822443911805749,
"step": 10
},
{
"epoch": 0.5,
"grad_norm": 6.43798743744834,
"learning_rate": 2.934120444167326e-07,
"logits/chosen": -2.7719171047210693,
"logits/rejected": -2.7447400093078613,
"logps/chosen": -265.3804931640625,
"logps/rejected": -252.05618286132812,
"loss": 0.6742,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": 0.03401705250144005,
"rewards/margins": 0.04408254474401474,
"rewards/rejected": -0.01006548572331667,
"step": 20
},
{
"epoch": 0.75,
"grad_norm": 7.502439531203193,
"learning_rate": 8.930309757836516e-08,
"logits/chosen": -2.761791467666626,
"logits/rejected": -2.740384578704834,
"logps/chosen": -258.3692626953125,
"logps/rejected": -249.21566772460938,
"loss": 0.6515,
"rewards/accuracies": 0.6781250238418579,
"rewards/chosen": -0.02331426367163658,
"rewards/margins": 0.07361713796854019,
"rewards/rejected": -0.09693139791488647,
"step": 30
},
{
"epoch": 1.0,
"grad_norm": 7.122574066847829,
"learning_rate": 0.0,
"logits/chosen": -2.7583651542663574,
"logits/rejected": -2.733116626739502,
"logps/chosen": -266.1463317871094,
"logps/rejected": -273.2423095703125,
"loss": 0.6468,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": -0.05725393444299698,
"rewards/margins": 0.06221733242273331,
"rewards/rejected": -0.11947127431631088,
"step": 40
},
{
"epoch": 1.0,
"step": 40,
"total_flos": 0.0,
"train_loss": 0.6658878326416016,
"train_runtime": 1111.8376,
"train_samples_per_second": 9.164,
"train_steps_per_second": 0.036
}
],
"logging_steps": 10,
"max_steps": 40,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}
|