File size: 9,337 Bytes
8d6f77f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 100,
"global_step": 162,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 2.9411764705882356e-07,
"logits/chosen": -2.640578269958496,
"logits/rejected": -2.6619861125946045,
"logps/chosen": -410.817138671875,
"logps/rejected": -784.9041137695312,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.06,
"learning_rate": 2.9411764705882355e-06,
"logits/chosen": -2.580946683883667,
"logits/rejected": -2.562483549118042,
"logps/chosen": -348.2420959472656,
"logps/rejected": -529.5997924804688,
"loss": 0.6857,
"rewards/accuracies": 0.5416666865348816,
"rewards/chosen": 0.047004420310258865,
"rewards/margins": 0.018747717142105103,
"rewards/rejected": 0.028256705030798912,
"step": 10
},
{
"epoch": 0.12,
"learning_rate": 4.994720857837211e-06,
"logits/chosen": -2.563664197921753,
"logits/rejected": -2.5549397468566895,
"logps/chosen": -303.5776062011719,
"logps/rejected": -509.2774353027344,
"loss": 0.6578,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": 0.23815405368804932,
"rewards/margins": 0.07588066160678864,
"rewards/rejected": 0.1622733771800995,
"step": 20
},
{
"epoch": 0.19,
"learning_rate": 4.901488388458247e-06,
"logits/chosen": -2.526702404022217,
"logits/rejected": -2.5158963203430176,
"logps/chosen": -335.10333251953125,
"logps/rejected": -552.9954223632812,
"loss": 0.6058,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": 0.2050032615661621,
"rewards/margins": 0.20939965546131134,
"rewards/rejected": -0.004396387841552496,
"step": 30
},
{
"epoch": 0.25,
"learning_rate": 4.6959649910976165e-06,
"logits/chosen": -2.532031536102295,
"logits/rejected": -2.4838037490844727,
"logps/chosen": -300.42132568359375,
"logps/rejected": -509.403564453125,
"loss": 0.5705,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": 0.23074543476104736,
"rewards/margins": 0.2837901711463928,
"rewards/rejected": -0.053044695407152176,
"step": 40
},
{
"epoch": 0.31,
"learning_rate": 4.387760711393052e-06,
"logits/chosen": -2.5283217430114746,
"logits/rejected": -2.46999192237854,
"logps/chosen": -327.3263244628906,
"logps/rejected": -512.6666870117188,
"loss": 0.5487,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 0.2256808578968048,
"rewards/margins": 0.36225491762161255,
"rewards/rejected": -0.13657405972480774,
"step": 50
},
{
"epoch": 0.37,
"learning_rate": 3.991286838919086e-06,
"logits/chosen": -2.508136034011841,
"logits/rejected": -2.471802234649658,
"logps/chosen": -333.85711669921875,
"logps/rejected": -523.9845581054688,
"loss": 0.5393,
"rewards/accuracies": 0.784375011920929,
"rewards/chosen": 0.1835666298866272,
"rewards/margins": 0.38937950134277344,
"rewards/rejected": -0.20581285655498505,
"step": 60
},
{
"epoch": 0.43,
"learning_rate": 3.5250820513035403e-06,
"logits/chosen": -2.492502450942993,
"logits/rejected": -2.484013080596924,
"logps/chosen": -314.8898620605469,
"logps/rejected": -589.8179931640625,
"loss": 0.5165,
"rewards/accuracies": 0.84375,
"rewards/chosen": 0.19387896358966827,
"rewards/margins": 0.5265167951583862,
"rewards/rejected": -0.33263787627220154,
"step": 70
},
{
"epoch": 0.49,
"learning_rate": 3.0109455662659126e-06,
"logits/chosen": -2.527844190597534,
"logits/rejected": -2.502004623413086,
"logps/chosen": -320.5412292480469,
"logps/rejected": -579.2618408203125,
"loss": 0.5083,
"rewards/accuracies": 0.815625011920929,
"rewards/chosen": 0.12712647020816803,
"rewards/margins": 0.5316451787948608,
"rewards/rejected": -0.4045187532901764,
"step": 80
},
{
"epoch": 0.56,
"learning_rate": 2.4729178344249007e-06,
"logits/chosen": -2.5523924827575684,
"logits/rejected": -2.5245110988616943,
"logps/chosen": -341.2242431640625,
"logps/rejected": -596.2203369140625,
"loss": 0.496,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.06856798380613327,
"rewards/margins": 0.590112030506134,
"rewards/rejected": -0.5215439796447754,
"step": 90
},
{
"epoch": 0.62,
"learning_rate": 1.936156434546515e-06,
"logits/chosen": -2.5596001148223877,
"logits/rejected": -2.568671703338623,
"logps/chosen": -333.087158203125,
"logps/rejected": -611.3602905273438,
"loss": 0.4768,
"rewards/accuracies": 0.8187500238418579,
"rewards/chosen": 0.12124671787023544,
"rewards/margins": 0.6403753757476807,
"rewards/rejected": -0.5191286206245422,
"step": 100
},
{
"epoch": 0.62,
"eval_logits/chosen": -2.585242509841919,
"eval_logits/rejected": -2.479508399963379,
"eval_logps/chosen": -293.3752136230469,
"eval_logps/rejected": -292.1757507324219,
"eval_loss": 0.6442223191261292,
"eval_rewards/accuracies": 0.5659999847412109,
"eval_rewards/chosen": 0.18966493010520935,
"eval_rewards/margins": 0.15400375425815582,
"eval_rewards/rejected": 0.035661179572343826,
"eval_runtime": 398.0482,
"eval_samples_per_second": 5.025,
"eval_steps_per_second": 0.628,
"step": 100
},
{
"epoch": 0.68,
"learning_rate": 1.4257597331216211e-06,
"logits/chosen": -2.5242629051208496,
"logits/rejected": -2.484691619873047,
"logps/chosen": -325.24761962890625,
"logps/rejected": -541.4208984375,
"loss": 0.4856,
"rewards/accuracies": 0.8031250238418579,
"rewards/chosen": 0.11120424419641495,
"rewards/margins": 0.6161486506462097,
"rewards/rejected": -0.5049443244934082,
"step": 110
},
{
"epoch": 0.74,
"learning_rate": 9.655933126436565e-07,
"logits/chosen": -2.5429892539978027,
"logits/rejected": -2.520012855529785,
"logps/chosen": -329.37347412109375,
"logps/rejected": -596.545654296875,
"loss": 0.4791,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 0.06060720607638359,
"rewards/margins": 0.6603037714958191,
"rewards/rejected": -0.5996966361999512,
"step": 120
},
{
"epoch": 0.8,
"learning_rate": 5.771740434959278e-07,
"logits/chosen": -2.5939841270446777,
"logits/rejected": -2.543733835220337,
"logps/chosen": -326.51824951171875,
"logps/rejected": -585.2608032226562,
"loss": 0.4705,
"rewards/accuracies": 0.831250011920929,
"rewards/chosen": 0.0589924156665802,
"rewards/margins": 0.7284678220748901,
"rewards/rejected": -0.6694754362106323,
"step": 130
},
{
"epoch": 0.86,
"learning_rate": 2.786639790067719e-07,
"logits/chosen": -2.538792848587036,
"logits/rejected": -2.572946548461914,
"logps/chosen": -327.40155029296875,
"logps/rejected": -593.7877197265625,
"loss": 0.4828,
"rewards/accuracies": 0.828125,
"rewards/chosen": 0.006606454961001873,
"rewards/margins": 0.6702617406845093,
"rewards/rejected": -0.6636553406715393,
"step": 140
},
{
"epoch": 0.93,
"learning_rate": 8.402111802159413e-08,
"logits/chosen": -2.60109281539917,
"logits/rejected": -2.500326633453369,
"logps/chosen": -350.4434814453125,
"logps/rejected": -545.3392333984375,
"loss": 0.4729,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 0.03271277993917465,
"rewards/margins": 0.6479501128196716,
"rewards/rejected": -0.615237295627594,
"step": 150
},
{
"epoch": 0.99,
"learning_rate": 2.34674439005822e-09,
"logits/chosen": -2.6234354972839355,
"logits/rejected": -2.563753366470337,
"logps/chosen": -370.4894104003906,
"logps/rejected": -656.5492553710938,
"loss": 0.4684,
"rewards/accuracies": 0.784375011920929,
"rewards/chosen": 0.009518811479210854,
"rewards/margins": 0.7224863171577454,
"rewards/rejected": -0.7129674553871155,
"step": 160
},
{
"epoch": 1.0,
"step": 162,
"total_flos": 0.0,
"train_loss": 0.5280830058050744,
"train_runtime": 8024.3189,
"train_samples_per_second": 2.584,
"train_steps_per_second": 0.02
}
],
"logging_steps": 10,
"max_steps": 162,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}
|