File size: 12,093 Bytes
d87b18a c19d8d7 d87b18a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 100,
"global_step": 192,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.010416666666666666,
"grad_norm": 24.66960228608589,
"learning_rate": 2.0229548209232687e-08,
"logits/chosen": -2.590585231781006,
"logits/rejected": -2.5664222240448,
"logps/chosen": -80.29847717285156,
"logps/rejected": -53.10200881958008,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.10416666666666667,
"grad_norm": 22.00841805745166,
"learning_rate": 2.0229548209232686e-07,
"logits/chosen": -2.5565450191497803,
"logits/rejected": -2.5385091304779053,
"logps/chosen": -87.94749450683594,
"logps/rejected": -81.03849792480469,
"loss": 0.6933,
"rewards/accuracies": 0.1666666716337204,
"rewards/chosen": -0.0012115496210753918,
"rewards/margins": -0.001211885130032897,
"rewards/rejected": 3.3554130141055794e-07,
"step": 10
},
{
"epoch": 0.20833333333333334,
"grad_norm": 19.440282490720026,
"learning_rate": 4.045909641846537e-07,
"logits/chosen": -2.612945318222046,
"logits/rejected": -2.5642082691192627,
"logps/chosen": -103.17449951171875,
"logps/rejected": -89.7829360961914,
"loss": 0.6911,
"rewards/accuracies": 0.3187499940395355,
"rewards/chosen": 0.006612158380448818,
"rewards/margins": 0.0028112882282584906,
"rewards/rejected": 0.0038008708506822586,
"step": 20
},
{
"epoch": 0.3125,
"grad_norm": 25.805649105098475,
"learning_rate": 3.8688218719717384e-07,
"logits/chosen": -2.508690357208252,
"logits/rejected": -2.5234837532043457,
"logps/chosen": -66.37418365478516,
"logps/rejected": -74.30455017089844,
"loss": 0.6851,
"rewards/accuracies": 0.3187499940395355,
"rewards/chosen": 0.03983448073267937,
"rewards/margins": 0.016553020104765892,
"rewards/rejected": 0.023281460627913475,
"step": 30
},
{
"epoch": 0.4166666666666667,
"grad_norm": 17.133204904463383,
"learning_rate": 3.6917341020969396e-07,
"logits/chosen": -2.555058479309082,
"logits/rejected": -2.5424702167510986,
"logps/chosen": -69.98268127441406,
"logps/rejected": -69.5423583984375,
"loss": 0.6777,
"rewards/accuracies": 0.28125,
"rewards/chosen": 0.08108103275299072,
"rewards/margins": 0.03720525652170181,
"rewards/rejected": 0.04387578368186951,
"step": 40
},
{
"epoch": 0.5208333333333334,
"grad_norm": 22.625402130027865,
"learning_rate": 3.5146463322221414e-07,
"logits/chosen": -2.4543604850769043,
"logits/rejected": -2.467179775238037,
"logps/chosen": -49.026615142822266,
"logps/rejected": -58.0882568359375,
"loss": 0.6703,
"rewards/accuracies": 0.23125000298023224,
"rewards/chosen": 0.07023879140615463,
"rewards/margins": 0.03948055952787399,
"rewards/rejected": 0.030758222565054893,
"step": 50
},
{
"epoch": 0.625,
"grad_norm": 23.61323531343505,
"learning_rate": 3.3375585623473426e-07,
"logits/chosen": -2.530721664428711,
"logits/rejected": -2.5115602016448975,
"logps/chosen": -78.50758361816406,
"logps/rejected": -81.587158203125,
"loss": 0.6625,
"rewards/accuracies": 0.32499998807907104,
"rewards/chosen": 0.02104257419705391,
"rewards/margins": 0.1053396612405777,
"rewards/rejected": -0.08429709076881409,
"step": 60
},
{
"epoch": 0.7291666666666666,
"grad_norm": 21.448302962597314,
"learning_rate": 3.1604707924725434e-07,
"logits/chosen": -2.4924867153167725,
"logits/rejected": -2.4797446727752686,
"logps/chosen": -92.79881286621094,
"logps/rejected": -86.05406188964844,
"loss": 0.6497,
"rewards/accuracies": 0.35624998807907104,
"rewards/chosen": 0.13287237286567688,
"rewards/margins": 0.15550200641155243,
"rewards/rejected": -0.02262965776026249,
"step": 70
},
{
"epoch": 0.8333333333333334,
"grad_norm": 34.07385900953939,
"learning_rate": 2.983383022597745e-07,
"logits/chosen": -2.470043182373047,
"logits/rejected": -2.412937879562378,
"logps/chosen": -82.65133666992188,
"logps/rejected": -82.9260482788086,
"loss": 0.636,
"rewards/accuracies": 0.4000000059604645,
"rewards/chosen": 0.11218090355396271,
"rewards/margins": 0.24496188759803772,
"rewards/rejected": -0.13278096914291382,
"step": 80
},
{
"epoch": 0.9375,
"grad_norm": 29.698805422230546,
"learning_rate": 2.806295252722946e-07,
"logits/chosen": -2.402101755142212,
"logits/rejected": -2.3976664543151855,
"logps/chosen": -55.17719650268555,
"logps/rejected": -69.18394470214844,
"loss": 0.6461,
"rewards/accuracies": 0.26875001192092896,
"rewards/chosen": -0.04486609995365143,
"rewards/margins": 0.1693090796470642,
"rewards/rejected": -0.21417517960071564,
"step": 90
},
{
"epoch": 1.0416666666666667,
"grad_norm": 22.229206377048136,
"learning_rate": 2.6292074828481476e-07,
"logits/chosen": -2.388129711151123,
"logits/rejected": -2.3694980144500732,
"logps/chosen": -71.2069320678711,
"logps/rejected": -80.87518310546875,
"loss": 0.5735,
"rewards/accuracies": 0.4124999940395355,
"rewards/chosen": 0.08674298971891403,
"rewards/margins": 0.4183635711669922,
"rewards/rejected": -0.33162060379981995,
"step": 100
},
{
"epoch": 1.0416666666666667,
"eval_logits/chosen": -2.4165902137756348,
"eval_logits/rejected": -2.3982186317443848,
"eval_logps/chosen": -79.08678436279297,
"eval_logps/rejected": -91.30322265625,
"eval_loss": 0.6444148421287537,
"eval_rewards/accuracies": 0.32341268658638,
"eval_rewards/chosen": -0.1721993237733841,
"eval_rewards/margins": 0.2090616375207901,
"eval_rewards/rejected": -0.3812609612941742,
"eval_runtime": 113.7367,
"eval_samples_per_second": 17.584,
"eval_steps_per_second": 0.554,
"step": 100
},
{
"epoch": 1.1458333333333333,
"grad_norm": 18.970483409354923,
"learning_rate": 2.452119712973349e-07,
"logits/chosen": -2.379981279373169,
"logits/rejected": -2.420714855194092,
"logps/chosen": -61.6137809753418,
"logps/rejected": -94.87492370605469,
"loss": 0.5017,
"rewards/accuracies": 0.44999998807907104,
"rewards/chosen": 0.1406974196434021,
"rewards/margins": 0.6424610018730164,
"rewards/rejected": -0.5017635822296143,
"step": 110
},
{
"epoch": 1.25,
"grad_norm": 25.179155759555773,
"learning_rate": 2.27503194309855e-07,
"logits/chosen": -2.4509730339050293,
"logits/rejected": -2.412595510482788,
"logps/chosen": -103.92436218261719,
"logps/rejected": -112.41646575927734,
"loss": 0.5125,
"rewards/accuracies": 0.512499988079071,
"rewards/chosen": 0.1529318243265152,
"rewards/margins": 0.8678812980651855,
"rewards/rejected": -0.7149494886398315,
"step": 120
},
{
"epoch": 1.3541666666666667,
"grad_norm": 24.005615665444957,
"learning_rate": 2.0979441732237514e-07,
"logits/chosen": -2.365518093109131,
"logits/rejected": -2.3705790042877197,
"logps/chosen": -82.37178802490234,
"logps/rejected": -104.2152328491211,
"loss": 0.4926,
"rewards/accuracies": 0.4312500059604645,
"rewards/chosen": 0.15715977549552917,
"rewards/margins": 0.7146759033203125,
"rewards/rejected": -0.5575161576271057,
"step": 130
},
{
"epoch": 1.4583333333333333,
"grad_norm": 30.460528822298023,
"learning_rate": 1.9208564033489532e-07,
"logits/chosen": -2.3874213695526123,
"logits/rejected": -2.410029888153076,
"logps/chosen": -52.55329513549805,
"logps/rejected": -84.75013732910156,
"loss": 0.5088,
"rewards/accuracies": 0.4000000059604645,
"rewards/chosen": -0.12236322462558746,
"rewards/margins": 0.6131333708763123,
"rewards/rejected": -0.7354966402053833,
"step": 140
},
{
"epoch": 1.5625,
"grad_norm": 23.593513513479692,
"learning_rate": 1.7437686334741544e-07,
"logits/chosen": -2.447330951690674,
"logits/rejected": -2.4216654300689697,
"logps/chosen": -73.29743957519531,
"logps/rejected": -96.33416748046875,
"loss": 0.4942,
"rewards/accuracies": 0.4312500059604645,
"rewards/chosen": -0.04307596758008003,
"rewards/margins": 0.809691309928894,
"rewards/rejected": -0.852767288684845,
"step": 150
},
{
"epoch": 1.6666666666666665,
"grad_norm": 30.463548320289735,
"learning_rate": 1.5666808635993556e-07,
"logits/chosen": -2.4006917476654053,
"logits/rejected": -2.410930871963501,
"logps/chosen": -77.80570983886719,
"logps/rejected": -112.13191223144531,
"loss": 0.4833,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": 0.116888128221035,
"rewards/margins": 1.0234750509262085,
"rewards/rejected": -0.9065868258476257,
"step": 160
},
{
"epoch": 1.7708333333333335,
"grad_norm": 21.813576335226717,
"learning_rate": 1.389593093724557e-07,
"logits/chosen": -2.349498748779297,
"logits/rejected": -2.345773458480835,
"logps/chosen": -75.15486145019531,
"logps/rejected": -106.83766174316406,
"loss": 0.4879,
"rewards/accuracies": 0.5062500238418579,
"rewards/chosen": 0.00244431896135211,
"rewards/margins": 0.8961756825447083,
"rewards/rejected": -0.893731415271759,
"step": 170
},
{
"epoch": 1.875,
"grad_norm": 24.30833424266402,
"learning_rate": 1.2125053238497584e-07,
"logits/chosen": -2.3641536235809326,
"logits/rejected": -2.3408045768737793,
"logps/chosen": -69.39147186279297,
"logps/rejected": -93.45851135253906,
"loss": 0.467,
"rewards/accuracies": 0.45625001192092896,
"rewards/chosen": 0.09233461320400238,
"rewards/margins": 0.8359875679016113,
"rewards/rejected": -0.7436529397964478,
"step": 180
},
{
"epoch": 1.9791666666666665,
"grad_norm": 22.898056429808634,
"learning_rate": 1.0354175539749597e-07,
"logits/chosen": -2.3930718898773193,
"logits/rejected": -2.3902525901794434,
"logps/chosen": -86.51902770996094,
"logps/rejected": -118.46821594238281,
"loss": 0.4742,
"rewards/accuracies": 0.4937500059604645,
"rewards/chosen": 0.07279382646083832,
"rewards/margins": 1.116233229637146,
"rewards/rejected": -1.0434393882751465,
"step": 190
},
{
"epoch": 2.0,
"step": 192,
"total_flos": 0.0,
"train_loss": 0.0,
"train_runtime": 0.0564,
"train_samples_per_second": 216706.859,
"train_steps_per_second": 3403.216
}
],
"logging_steps": 10,
"max_steps": 192,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}
|