File size: 11,075 Bytes
5bda91f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9972451790633609,
"eval_steps": 100,
"global_step": 181,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 4.4602671481986516,
"learning_rate": 2.6315789473684208e-08,
"logits/chosen": -0.7853389978408813,
"logits/rejected": -0.651086151599884,
"logps/chosen": -1158.1322021484375,
"logps/rejected": -1143.569091796875,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.06,
"grad_norm": 4.676508624329794,
"learning_rate": 2.631578947368421e-07,
"logits/chosen": -0.709392786026001,
"logits/rejected": -0.8101767897605896,
"logps/chosen": -995.121826171875,
"logps/rejected": -1316.8297119140625,
"loss": 0.693,
"rewards/accuracies": 0.4305555522441864,
"rewards/chosen": -0.0007655912195332348,
"rewards/margins": -0.00027954205870628357,
"rewards/rejected": -0.0004860491317231208,
"step": 10
},
{
"epoch": 0.11,
"grad_norm": 4.611380190345714,
"learning_rate": 4.999529926121253e-07,
"logits/chosen": -0.7076992988586426,
"logits/rejected": -0.826177716255188,
"logps/chosen": -942.4779052734375,
"logps/rejected": -1303.2080078125,
"loss": 0.6922,
"rewards/accuracies": 0.581250011920929,
"rewards/chosen": 0.0009432749939151108,
"rewards/margins": 0.002406059531494975,
"rewards/rejected": -0.0014627845957875252,
"step": 20
},
{
"epoch": 0.17,
"grad_norm": 4.545137388238839,
"learning_rate": 4.943334645626589e-07,
"logits/chosen": -0.7218513488769531,
"logits/rejected": -0.8190325498580933,
"logps/chosen": -986.5565185546875,
"logps/rejected": -1347.8033447265625,
"loss": 0.6883,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": 0.004364156164228916,
"rewards/margins": 0.008785699494183064,
"rewards/rejected": -0.004421542398631573,
"step": 30
},
{
"epoch": 0.22,
"grad_norm": 4.401192871872299,
"learning_rate": 4.795540267200686e-07,
"logits/chosen": -0.7325557470321655,
"logits/rejected": -0.7795466184616089,
"logps/chosen": -1022.5738525390625,
"logps/rejected": -1272.192626953125,
"loss": 0.6826,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 0.01184375025331974,
"rewards/margins": 0.019737884402275085,
"rewards/rejected": -0.007894134148955345,
"step": 40
},
{
"epoch": 0.28,
"grad_norm": 4.67072465163404,
"learning_rate": 4.561687510272767e-07,
"logits/chosen": -0.7071916460990906,
"logits/rejected": -0.7952053546905518,
"logps/chosen": -973.41259765625,
"logps/rejected": -1254.411376953125,
"loss": 0.6759,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.010998593643307686,
"rewards/margins": 0.031063910573720932,
"rewards/rejected": -0.020065316930413246,
"step": 50
},
{
"epoch": 0.33,
"grad_norm": 4.340731797997725,
"learning_rate": 4.2505433694179213e-07,
"logits/chosen": -0.7227746248245239,
"logits/rejected": -0.8140878677368164,
"logps/chosen": -1035.677734375,
"logps/rejected": -1306.7808837890625,
"loss": 0.6642,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 0.017496155574917793,
"rewards/margins": 0.06553898006677628,
"rewards/rejected": -0.04804282635450363,
"step": 60
},
{
"epoch": 0.39,
"grad_norm": 4.496628183792233,
"learning_rate": 3.873772445177015e-07,
"logits/chosen": -0.7832368612289429,
"logits/rejected": -0.7676926851272583,
"logps/chosen": -1028.62890625,
"logps/rejected": -1254.6610107421875,
"loss": 0.6538,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": 0.01262822188436985,
"rewards/margins": 0.07675839215517044,
"rewards/rejected": -0.06413016468286514,
"step": 70
},
{
"epoch": 0.44,
"grad_norm": 4.939508062028092,
"learning_rate": 3.445499645429106e-07,
"logits/chosen": -0.7500189542770386,
"logits/rejected": -0.8156210780143738,
"logps/chosen": -1028.065185546875,
"logps/rejected": -1316.736328125,
"loss": 0.6433,
"rewards/accuracies": 0.8062499761581421,
"rewards/chosen": 0.0009801089763641357,
"rewards/margins": 0.0968375876545906,
"rewards/rejected": -0.09585747867822647,
"step": 80
},
{
"epoch": 0.5,
"grad_norm": 4.7399950008550205,
"learning_rate": 2.981780651370224e-07,
"logits/chosen": -0.7468028664588928,
"logits/rejected": -0.7884877920150757,
"logps/chosen": -996.8697509765625,
"logps/rejected": -1192.45654296875,
"loss": 0.6079,
"rewards/accuracies": 0.8187500238418579,
"rewards/chosen": 0.004719099495559931,
"rewards/margins": 0.21119482815265656,
"rewards/rejected": -0.20647573471069336,
"step": 90
},
{
"epoch": 0.55,
"grad_norm": 5.466151335512206,
"learning_rate": 2.5e-07,
"logits/chosen": -0.7582017183303833,
"logits/rejected": -0.8466933369636536,
"logps/chosen": -979.6242065429688,
"logps/rejected": -1357.511962890625,
"loss": 0.592,
"rewards/accuracies": 0.8187500238418579,
"rewards/chosen": -0.00921861082315445,
"rewards/margins": 0.2683759033679962,
"rewards/rejected": -0.27759450674057007,
"step": 100
},
{
"epoch": 0.55,
"eval_logits/chosen": -0.7069126963615417,
"eval_logits/rejected": -0.8721536993980408,
"eval_logps/chosen": -955.1961059570312,
"eval_logps/rejected": -1615.9989013671875,
"eval_loss": 0.6292956471443176,
"eval_rewards/accuracies": 0.7796609997749329,
"eval_rewards/chosen": -0.0145129868760705,
"eval_rewards/margins": 0.14818498492240906,
"eval_rewards/rejected": -0.16269797086715698,
"eval_runtime": 214.9002,
"eval_samples_per_second": 8.674,
"eval_steps_per_second": 0.275,
"step": 100
},
{
"epoch": 0.61,
"grad_norm": 4.779970412172738,
"learning_rate": 2.018219348629775e-07,
"logits/chosen": -0.779070258140564,
"logits/rejected": -0.8311734199523926,
"logps/chosen": -992.7100830078125,
"logps/rejected": -1354.688720703125,
"loss": 0.5575,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": -0.0526651069521904,
"rewards/margins": 0.4234324097633362,
"rewards/rejected": -0.4760975241661072,
"step": 110
},
{
"epoch": 0.66,
"grad_norm": 5.9180549707398695,
"learning_rate": 1.554500354570894e-07,
"logits/chosen": -0.7683907747268677,
"logits/rejected": -0.8335941433906555,
"logps/chosen": -1003.9620971679688,
"logps/rejected": -1312.5740966796875,
"loss": 0.5428,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -0.06494404375553131,
"rewards/margins": 0.5937026739120483,
"rewards/rejected": -0.6586467623710632,
"step": 120
},
{
"epoch": 0.72,
"grad_norm": 5.7132927229572354,
"learning_rate": 1.126227554822985e-07,
"logits/chosen": -0.798209011554718,
"logits/rejected": -0.8723245859146118,
"logps/chosen": -1054.9263916015625,
"logps/rejected": -1349.2750244140625,
"loss": 0.5312,
"rewards/accuracies": 0.8125,
"rewards/chosen": -0.15924784541130066,
"rewards/margins": 0.5923423767089844,
"rewards/rejected": -0.7515901923179626,
"step": 130
},
{
"epoch": 0.77,
"grad_norm": 10.497632384133095,
"learning_rate": 7.494566305820788e-08,
"logits/chosen": -0.8022674322128296,
"logits/rejected": -0.882843017578125,
"logps/chosen": -1055.48974609375,
"logps/rejected": -1417.170654296875,
"loss": 0.4988,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": -0.17785976827144623,
"rewards/margins": 1.1309340000152588,
"rewards/rejected": -1.3087936639785767,
"step": 140
},
{
"epoch": 0.83,
"grad_norm": 6.590111680094942,
"learning_rate": 4.38312489727233e-08,
"logits/chosen": -0.7941451668739319,
"logits/rejected": -0.8360635042190552,
"logps/chosen": -994.5494384765625,
"logps/rejected": -1374.3560791015625,
"loss": 0.4917,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": -0.2062647044658661,
"rewards/margins": 0.8967850804328918,
"rewards/rejected": -1.1030497550964355,
"step": 150
},
{
"epoch": 0.88,
"grad_norm": 8.361359149599993,
"learning_rate": 2.044597327993153e-08,
"logits/chosen": -0.7812503576278687,
"logits/rejected": -0.8671078681945801,
"logps/chosen": -1042.375244140625,
"logps/rejected": -1443.9189453125,
"loss": 0.4813,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": -0.2346261441707611,
"rewards/margins": 0.8702405095100403,
"rewards/rejected": -1.104866623878479,
"step": 160
},
{
"epoch": 0.94,
"grad_norm": 7.649602408833162,
"learning_rate": 5.666535437341108e-09,
"logits/chosen": -0.792350172996521,
"logits/rejected": -0.8578357696533203,
"logps/chosen": -1057.263427734375,
"logps/rejected": -1363.985107421875,
"loss": 0.4843,
"rewards/accuracies": 0.8062499761581421,
"rewards/chosen": -0.292695015668869,
"rewards/margins": 1.1091253757476807,
"rewards/rejected": -1.401820421218872,
"step": 170
},
{
"epoch": 0.99,
"grad_norm": 8.116244988839705,
"learning_rate": 4.700738787466463e-11,
"logits/chosen": -0.7855504751205444,
"logits/rejected": -0.8505135774612427,
"logps/chosen": -952.7326049804688,
"logps/rejected": -1447.79541015625,
"loss": 0.4771,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -0.23469653725624084,
"rewards/margins": 0.9054014086723328,
"rewards/rejected": -1.140097975730896,
"step": 180
},
{
"epoch": 1.0,
"step": 181,
"total_flos": 0.0,
"train_loss": 0.5914445311983646,
"train_runtime": 2922.1625,
"train_samples_per_second": 3.971,
"train_steps_per_second": 0.062
}
],
"logging_steps": 10,
"max_steps": 181,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}
|