Jan Majkutewicz commited on
Commit
bf1e30f
·
verified ·
1 Parent(s): 69c0547

Model save

Browse files
README.md CHANGED
@@ -1,11 +1,8 @@
1
  ---
2
  base_model: alignment-handbook/zephyr-7b-sft-full
3
- datasets:
4
- - SeniorKabanocci/oasst1-binarized
5
  library_name: peft
6
  license: apache-2.0
7
  tags:
8
- - alignment-handbook
9
  - trl
10
  - dpo
11
  - generated_from_trainer
@@ -17,20 +14,10 @@ model-index:
17
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
18
  should probably proofread and complete it, then remove this comment. -->
19
 
20
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/jan-majkutewicz/huggingface/runs/uistaj1t)
21
  # zephyr-7b-oasst1
22
 
23
- This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the SeniorKabanocci/oasst1-binarized dataset.
24
- It achieves the following results on the evaluation set:
25
- - Loss: 0.6059
26
- - Rewards/chosen: -0.9430
27
- - Rewards/rejected: -1.3459
28
- - Rewards/accuracies: 0.6348
29
- - Rewards/margins: 0.4030
30
- - Logps/rejected: -239.4801
31
- - Logps/chosen: -256.8972
32
- - Logits/rejected: -2.8973
33
- - Logits/chosen: -2.9273
34
 
35
  ## Model description
36
 
@@ -49,7 +36,7 @@ More information needed
49
  ### Training hyperparameters
50
 
51
  The following hyperparameters were used during training:
52
- - learning_rate: 5e-06
53
  - train_batch_size: 8
54
  - eval_batch_size: 8
55
  - seed: 42
 
1
  ---
2
  base_model: alignment-handbook/zephyr-7b-sft-full
 
 
3
  library_name: peft
4
  license: apache-2.0
5
  tags:
 
6
  - trl
7
  - dpo
8
  - generated_from_trainer
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
  should probably proofread and complete it, then remove this comment. -->
16
 
17
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/jan-majkutewicz/huggingface/runs/vp4emkhb)
18
  # zephyr-7b-oasst1
19
 
20
+ This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on an unknown dataset.
 
 
 
 
 
 
 
 
 
 
21
 
22
  ## Model description
23
 
 
36
  ### Training hyperparameters
37
 
38
  The following hyperparameters were used during training:
39
+ - learning_rate: 1e-06
40
  - train_batch_size: 8
41
  - eval_batch_size: 8
42
  - seed: 42
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8dcc4a12b5484215a28edec6f6da157fbfbdf30f6ddd41967723590d187e05bf
3
  size 335605144
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73157727aa31d029e720b3348bbf61b47cd9d6ed4e229cd5281fd4749b57e865
3
  size 335605144
all_results.json CHANGED
@@ -1,22 +1,9 @@
1
  {
2
  "epoch": 0.9994353472614342,
3
- "eval_logits/chosen": -2.9273033142089844,
4
- "eval_logits/rejected": -2.8973140716552734,
5
- "eval_logps/chosen": -256.8972473144531,
6
- "eval_logps/rejected": -239.4800567626953,
7
- "eval_loss": 0.6058722734451294,
8
- "eval_rewards/accuracies": 0.6348314881324768,
9
- "eval_rewards/chosen": -0.9429696798324585,
10
- "eval_rewards/margins": 0.4029598534107208,
11
- "eval_rewards/rejected": -1.3459293842315674,
12
- "eval_runtime": 230.8014,
13
- "eval_samples": 712,
14
- "eval_samples_per_second": 3.085,
15
- "eval_steps_per_second": 0.386,
16
  "total_flos": 0.0,
17
- "train_loss": 0.6616038349388683,
18
- "train_runtime": 9964.5835,
19
  "train_samples": 14167,
20
- "train_samples_per_second": 1.422,
21
  "train_steps_per_second": 0.089
22
  }
 
1
  {
2
  "epoch": 0.9994353472614342,
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  "total_flos": 0.0,
4
+ "train_loss": 0.6718437324135991,
5
+ "train_runtime": 9969.8232,
6
  "train_samples": 14167,
7
+ "train_samples_per_second": 1.421,
8
  "train_steps_per_second": 0.089
9
  }
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 0.9994353472614342,
3
  "total_flos": 0.0,
4
- "train_loss": 0.6616038349388683,
5
- "train_runtime": 9964.5835,
6
  "train_samples": 14167,
7
- "train_samples_per_second": 1.422,
8
  "train_steps_per_second": 0.089
9
  }
 
1
  {
2
  "epoch": 0.9994353472614342,
3
  "total_flos": 0.0,
4
+ "train_loss": 0.6718437324135991,
5
+ "train_runtime": 9969.8232,
6
  "train_samples": 14167,
7
+ "train_samples_per_second": 1.421,
8
  "train_steps_per_second": 0.089
9
  }
trainer_state.json CHANGED
@@ -11,7 +11,7 @@
11
  {
12
  "epoch": 0.001129305477131564,
13
  "grad_norm": 10.5625,
14
- "learning_rate": 5.617977528089888e-08,
15
  "logits/chosen": -2.8912107944488525,
16
  "logits/rejected": -2.9116690158843994,
17
  "logps/chosen": -205.43255615234375,
@@ -25,1331 +25,1331 @@
25
  },
26
  {
27
  "epoch": 0.01129305477131564,
28
- "grad_norm": 11.75,
29
- "learning_rate": 5.617977528089888e-07,
30
- "logits/chosen": -2.9238009452819824,
31
- "logits/rejected": -2.8953051567077637,
32
- "logps/chosen": -286.31060791015625,
33
- "logps/rejected": -234.09100341796875,
34
- "loss": 0.6921,
35
- "rewards/accuracies": 0.4375,
36
- "rewards/chosen": 0.0044541251845657825,
37
- "rewards/margins": 0.0024419513065367937,
38
- "rewards/rejected": 0.0020121734123677015,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.02258610954263128,
43
- "grad_norm": 8.0625,
44
- "learning_rate": 1.1235955056179777e-06,
45
- "logits/chosen": -2.911529064178467,
46
- "logits/rejected": -2.8939220905303955,
47
- "logps/chosen": -242.33349609375,
48
- "logps/rejected": -230.67916870117188,
49
- "loss": 0.6967,
50
- "rewards/accuracies": 0.375,
51
- "rewards/chosen": -0.00516792107373476,
52
- "rewards/margins": -0.006779191549867392,
53
- "rewards/rejected": 0.0016112711746245623,
54
  "step": 20
55
  },
56
  {
57
  "epoch": 0.03387916431394692,
58
  "grad_norm": 10.4375,
59
- "learning_rate": 1.6853932584269663e-06,
60
- "logits/chosen": -2.933928966522217,
61
- "logits/rejected": -2.9029603004455566,
62
- "logps/chosen": -228.00210571289062,
63
- "logps/rejected": -206.8175506591797,
64
- "loss": 0.6928,
65
- "rewards/accuracies": 0.4749999940395355,
66
- "rewards/chosen": -0.00036367171560414135,
67
- "rewards/margins": 0.0011087121674790978,
68
- "rewards/rejected": -0.0014723839703947306,
69
  "step": 30
70
  },
71
  {
72
  "epoch": 0.04517221908526256,
73
- "grad_norm": 10.5625,
74
- "learning_rate": 2.2471910112359554e-06,
75
- "logits/chosen": -2.9396324157714844,
76
- "logits/rejected": -2.9021177291870117,
77
- "logps/chosen": -239.8461456298828,
78
- "logps/rejected": -223.19216918945312,
79
- "loss": 0.693,
80
- "rewards/accuracies": 0.4937500059604645,
81
- "rewards/chosen": -0.01020533312112093,
82
- "rewards/margins": 0.0009328271262347698,
83
- "rewards/rejected": -0.011138159781694412,
84
  "step": 40
85
  },
86
  {
87
  "epoch": 0.05646527385657821,
88
  "grad_norm": 11.25,
89
- "learning_rate": 2.8089887640449444e-06,
90
- "logits/chosen": -2.9262070655822754,
91
- "logits/rejected": -2.891216993331909,
92
- "logps/chosen": -252.71072387695312,
93
- "logps/rejected": -209.1496124267578,
94
- "loss": 0.6914,
95
- "rewards/accuracies": 0.512499988079071,
96
- "rewards/chosen": -0.02404838800430298,
97
- "rewards/margins": 0.004404561128467321,
98
- "rewards/rejected": -0.028452953323721886,
99
  "step": 50
100
  },
101
  {
102
  "epoch": 0.06775832862789384,
103
- "grad_norm": 9.125,
104
- "learning_rate": 3.3707865168539327e-06,
105
- "logits/chosen": -2.95279860496521,
106
- "logits/rejected": -2.9229164123535156,
107
- "logps/chosen": -231.2828826904297,
108
- "logps/rejected": -223.4114227294922,
109
- "loss": 0.6893,
110
- "rewards/accuracies": 0.4937500059604645,
111
- "rewards/chosen": -0.043369874358177185,
112
- "rewards/margins": 0.008675651624798775,
113
- "rewards/rejected": -0.05204553157091141,
114
  "step": 60
115
  },
116
  {
117
  "epoch": 0.07905138339920949,
118
- "grad_norm": 16.5,
119
- "learning_rate": 3.932584269662922e-06,
120
- "logits/chosen": -2.9178593158721924,
121
- "logits/rejected": -2.88283109664917,
122
- "logps/chosen": -249.9050750732422,
123
- "logps/rejected": -236.652099609375,
124
- "loss": 0.6814,
125
- "rewards/accuracies": 0.48750001192092896,
126
- "rewards/chosen": -0.06348244845867157,
127
- "rewards/margins": 0.026277879253029823,
128
- "rewards/rejected": -0.08976032584905624,
129
  "step": 70
130
  },
131
  {
132
  "epoch": 0.09034443817052512,
133
- "grad_norm": 11.9375,
134
- "learning_rate": 4.494382022471911e-06,
135
- "logits/chosen": -2.893878221511841,
136
- "logits/rejected": -2.858973979949951,
137
- "logps/chosen": -244.9037322998047,
138
- "logps/rejected": -243.09042358398438,
139
- "loss": 0.674,
140
- "rewards/accuracies": 0.5687500238418579,
141
- "rewards/chosen": -0.09704697877168655,
142
- "rewards/margins": 0.04427679255604744,
143
- "rewards/rejected": -0.1413237750530243,
144
  "step": 80
145
  },
146
  {
147
  "epoch": 0.10163749294184077,
148
- "grad_norm": 6.84375,
149
- "learning_rate": 4.999980529233026e-06,
150
- "logits/chosen": -2.9132912158966064,
151
- "logits/rejected": -2.8923027515411377,
152
- "logps/chosen": -232.01651000976562,
153
- "logps/rejected": -232.08056640625,
154
- "loss": 0.6793,
155
- "rewards/accuracies": 0.48750001192092896,
156
- "rewards/chosen": -0.16643305122852325,
157
- "rewards/margins": 0.03658531233668327,
158
- "rewards/rejected": -0.20301838219165802,
159
  "step": 90
160
  },
161
  {
162
  "epoch": 0.11293054771315642,
163
- "grad_norm": 8.1875,
164
- "learning_rate": 4.997644404153021e-06,
165
- "logits/chosen": -2.958055019378662,
166
- "logits/rejected": -2.9053235054016113,
167
- "logps/chosen": -232.2222900390625,
168
- "logps/rejected": -244.8426513671875,
169
- "loss": 0.6677,
170
- "rewards/accuracies": 0.550000011920929,
171
- "rewards/chosen": -0.23467275500297546,
172
- "rewards/margins": 0.08466038852930069,
173
- "rewards/rejected": -0.31933316588401794,
174
  "step": 100
175
  },
176
  {
177
  "epoch": 0.12422360248447205,
178
- "grad_norm": 9.3125,
179
- "learning_rate": 4.991418294828522e-06,
180
- "logits/chosen": -2.896815776824951,
181
- "logits/rejected": -2.8579037189483643,
182
- "logps/chosen": -256.8934020996094,
183
- "logps/rejected": -252.8817138671875,
184
- "loss": 0.6693,
185
- "rewards/accuracies": 0.543749988079071,
186
- "rewards/chosen": -0.33838751912117004,
187
- "rewards/margins": 0.08679252117872238,
188
- "rewards/rejected": -0.425180047750473,
189
  "step": 110
190
  },
191
  {
192
  "epoch": 0.13551665725578768,
193
- "grad_norm": 8.3125,
194
- "learning_rate": 4.981311898183214e-06,
195
- "logits/chosen": -2.9425575733184814,
196
- "logits/rejected": -2.927373170852661,
197
- "logps/chosen": -236.5902099609375,
198
- "logps/rejected": -218.78945922851562,
199
- "loss": 0.6899,
200
- "rewards/accuracies": 0.4625000059604645,
201
- "rewards/chosen": -0.39529091119766235,
202
- "rewards/margins": 0.03407430276274681,
203
- "rewards/rejected": -0.42936521768569946,
204
  "step": 120
205
  },
206
  {
207
  "epoch": 0.14680971202710333,
208
- "grad_norm": 9.5,
209
- "learning_rate": 4.967340954537717e-06,
210
- "logits/chosen": -2.9237935543060303,
211
- "logits/rejected": -2.896763324737549,
212
- "logps/chosen": -249.92367553710938,
213
- "logps/rejected": -257.2637023925781,
214
- "loss": 0.6441,
215
- "rewards/accuracies": 0.6625000238418579,
216
- "rewards/chosen": -0.41192659735679626,
217
- "rewards/margins": 0.14285169541835785,
218
- "rewards/rejected": -0.5547782778739929,
219
  "step": 130
220
  },
221
  {
222
  "epoch": 0.15810276679841898,
223
- "grad_norm": 7.09375,
224
- "learning_rate": 4.9495272230946525e-06,
225
- "logits/chosen": -2.9524216651916504,
226
- "logits/rejected": -2.915919303894043,
227
- "logps/chosen": -252.70565795898438,
228
- "logps/rejected": -242.8680419921875,
229
- "loss": 0.6634,
230
- "rewards/accuracies": 0.543749988079071,
231
- "rewards/chosen": -0.48054733872413635,
232
- "rewards/margins": 0.10070822387933731,
233
- "rewards/rejected": -0.5812555551528931,
234
  "step": 140
235
  },
236
  {
237
  "epoch": 0.16939582156973462,
238
- "grad_norm": 10.4375,
239
- "learning_rate": 4.927898448049522e-06,
240
- "logits/chosen": -2.9190261363983154,
241
- "logits/rejected": -2.878838062286377,
242
- "logps/chosen": -262.5068359375,
243
- "logps/rejected": -233.3218536376953,
244
- "loss": 0.6398,
245
- "rewards/accuracies": 0.606249988079071,
246
- "rewards/chosen": -0.5640592575073242,
247
- "rewards/margins": 0.16638214886188507,
248
- "rewards/rejected": -0.7304413914680481,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.18068887634105024,
253
- "grad_norm": 8.5,
254
- "learning_rate": 4.9024883153802095e-06,
255
- "logits/chosen": -2.9224305152893066,
256
- "logits/rejected": -2.8813347816467285,
257
- "logps/chosen": -225.17919921875,
258
- "logps/rejected": -219.84487915039062,
259
- "loss": 0.64,
260
- "rewards/accuracies": 0.581250011920929,
261
- "rewards/chosen": -0.6268054842948914,
262
- "rewards/margins": 0.16882720589637756,
263
- "rewards/rejected": -0.7956327199935913,
264
  "step": 160
265
  },
266
  {
267
  "epoch": 0.1919819311123659,
268
- "grad_norm": 90.5,
269
- "learning_rate": 4.873336400382367e-06,
270
- "logits/chosen": -2.952296733856201,
271
- "logits/rejected": -2.915030002593994,
272
- "logps/chosen": -253.6019744873047,
273
- "logps/rejected": -242.11801147460938,
274
- "loss": 0.6533,
275
  "rewards/accuracies": 0.581250011920929,
276
- "rewards/chosen": -0.6432451009750366,
277
- "rewards/margins": 0.15737922489643097,
278
- "rewards/rejected": -0.8006243705749512,
279
  "step": 170
280
  },
281
  {
282
  "epoch": 0.20327498588368154,
283
- "grad_norm": 8.5625,
284
- "learning_rate": 4.8404881060324375e-06,
285
- "logits/chosen": -2.9133620262145996,
286
- "logits/rejected": -2.8687403202056885,
287
- "logps/chosen": -250.570068359375,
288
- "logps/rejected": -209.63168334960938,
289
- "loss": 0.6597,
290
- "rewards/accuracies": 0.6187499761581421,
291
- "rewards/chosen": -0.7379518747329712,
292
- "rewards/margins": 0.15293964743614197,
293
- "rewards/rejected": -0.8908915519714355,
294
  "step": 180
295
  },
296
  {
297
  "epoch": 0.21456804065499718,
298
- "grad_norm": 10.1875,
299
- "learning_rate": 4.803994592274272e-06,
300
- "logits/chosen": -2.930187225341797,
301
- "logits/rejected": -2.9065961837768555,
302
- "logps/chosen": -255.08895874023438,
303
- "logps/rejected": -240.781982421875,
304
- "loss": 0.691,
305
- "rewards/accuracies": 0.5,
306
- "rewards/chosen": -0.6726095676422119,
307
- "rewards/margins": 0.09454379975795746,
308
- "rewards/rejected": -0.7671533226966858,
309
  "step": 190
310
  },
311
  {
312
  "epoch": 0.22586109542631283,
313
- "grad_norm": 8.625,
314
- "learning_rate": 4.763912696339506e-06,
315
- "logits/chosen": -2.908806800842285,
316
- "logits/rejected": -2.87739896774292,
317
- "logps/chosen": -244.31326293945312,
318
- "logps/rejected": -251.88284301757812,
319
- "loss": 0.6507,
320
- "rewards/accuracies": 0.574999988079071,
321
- "rewards/chosen": -0.6921225786209106,
322
- "rewards/margins": 0.16923566162586212,
323
- "rewards/rejected": -0.8613582849502563,
324
  "step": 200
325
  },
326
  {
327
  "epoch": 0.23715415019762845,
328
- "grad_norm": 11.5,
329
- "learning_rate": 4.720304844225781e-06,
330
- "logits/chosen": -2.9554803371429443,
331
- "logits/rejected": -2.9158945083618164,
332
- "logps/chosen": -259.2450866699219,
333
- "logps/rejected": -220.19070434570312,
334
- "loss": 0.6435,
335
- "rewards/accuracies": 0.5687500238418579,
336
- "rewards/chosen": -0.634341835975647,
337
- "rewards/margins": 0.1825425922870636,
338
- "rewards/rejected": -0.8168843388557434,
339
  "step": 210
340
  },
341
  {
342
  "epoch": 0.2484472049689441,
343
- "grad_norm": 13.125,
344
- "learning_rate": 4.6732389534706655e-06,
345
- "logits/chosen": -2.941028118133545,
346
- "logits/rejected": -2.9083430767059326,
347
- "logps/chosen": -279.50592041015625,
348
- "logps/rejected": -249.6971893310547,
349
- "loss": 0.6602,
350
- "rewards/accuracies": 0.5249999761581421,
351
- "rewards/chosen": -0.7295074462890625,
352
- "rewards/margins": 0.16120071709156036,
353
- "rewards/rejected": -0.8907082676887512,
354
  "step": 220
355
  },
356
  {
357
  "epoch": 0.2597402597402597,
358
- "grad_norm": 10.25,
359
- "learning_rate": 4.622788327372736e-06,
360
- "logits/chosen": -2.9610531330108643,
361
- "logits/rejected": -2.9274752140045166,
362
- "logps/chosen": -277.1114196777344,
363
- "logps/rejected": -249.110107421875,
364
- "loss": 0.6483,
365
- "rewards/accuracies": 0.543749988079071,
366
- "rewards/chosen": -0.7016745209693909,
367
- "rewards/margins": 0.18496869504451752,
368
- "rewards/rejected": -0.8866432309150696,
369
  "step": 230
370
  },
371
  {
372
  "epoch": 0.27103331451157536,
373
- "grad_norm": 12.8125,
374
- "learning_rate": 4.569031540824526e-06,
375
- "logits/chosen": -2.933371067047119,
376
- "logits/rejected": -2.890542984008789,
377
- "logps/chosen": -237.04415893554688,
378
- "logps/rejected": -225.23086547851562,
379
- "loss": 0.6613,
380
- "rewards/accuracies": 0.53125,
381
- "rewards/chosen": -0.6652556657791138,
382
- "rewards/margins": 0.1700500100851059,
383
- "rewards/rejected": -0.8353056907653809,
384
  "step": 240
385
  },
386
  {
387
  "epoch": 0.282326369282891,
388
- "grad_norm": 8.0625,
389
- "learning_rate": 4.512052317935184e-06,
390
- "logits/chosen": -2.958394765853882,
391
- "logits/rejected": -2.9169325828552246,
392
- "logps/chosen": -239.5146942138672,
393
- "logps/rejected": -229.6261749267578,
394
- "loss": 0.6111,
395
- "rewards/accuracies": 0.637499988079071,
396
- "rewards/chosen": -0.7014538049697876,
397
- "rewards/margins": 0.305302232503891,
398
- "rewards/rejected": -1.006756067276001,
399
  "step": 250
400
  },
401
  {
402
  "epoch": 0.29361942405420666,
403
- "grad_norm": 11.375,
404
- "learning_rate": 4.45193940163342e-06,
405
- "logits/chosen": -2.9349873065948486,
406
- "logits/rejected": -2.9028127193450928,
407
- "logps/chosen": -242.2169189453125,
408
- "logps/rejected": -246.52584838867188,
409
- "loss": 0.6804,
410
  "rewards/accuracies": 0.625,
411
- "rewards/chosen": -0.7285553812980652,
412
- "rewards/margins": 0.13187848031520844,
413
- "rewards/rejected": -0.8604338765144348,
414
  "step": 260
415
  },
416
  {
417
  "epoch": 0.3049124788255223,
418
- "grad_norm": 9.9375,
419
- "learning_rate": 4.3887864154538426e-06,
420
- "logits/chosen": -2.948988437652588,
421
- "logits/rejected": -2.934638500213623,
422
- "logps/chosen": -241.4674072265625,
423
- "logps/rejected": -244.40072631835938,
424
- "loss": 0.6575,
425
- "rewards/accuracies": 0.5625,
426
- "rewards/chosen": -0.7441710233688354,
427
- "rewards/margins": 0.2095833122730255,
428
- "rewards/rejected": -0.9537544250488281,
429
  "step": 270
430
  },
431
  {
432
  "epoch": 0.31620553359683795,
433
- "grad_norm": 10.0625,
434
- "learning_rate": 4.322691717721927e-06,
435
- "logits/chosen": -2.9385104179382324,
436
- "logits/rejected": -2.9133810997009277,
437
- "logps/chosen": -264.28955078125,
438
- "logps/rejected": -279.1156311035156,
439
- "loss": 0.6609,
440
- "rewards/accuracies": 0.5625,
441
- "rewards/chosen": -0.7867005467414856,
442
- "rewards/margins": 0.16506877541542053,
443
- "rewards/rejected": -0.9517693519592285,
444
  "step": 280
445
  },
446
  {
447
  "epoch": 0.3274985883681536,
448
- "grad_norm": 14.125,
449
- "learning_rate": 4.253758248364747e-06,
450
- "logits/chosen": -2.9210855960845947,
451
- "logits/rejected": -2.913820743560791,
452
- "logps/chosen": -258.10015869140625,
453
- "logps/rejected": -252.4558563232422,
454
- "loss": 0.7288,
455
- "rewards/accuracies": 0.5249999761581421,
456
- "rewards/chosen": -0.8864612579345703,
457
- "rewards/margins": 0.033281516283750534,
458
- "rewards/rejected": -0.9197427034378052,
459
  "step": 290
460
  },
461
  {
462
  "epoch": 0.33879164313946925,
463
- "grad_norm": 11.1875,
464
- "learning_rate": 4.182093368586034e-06,
465
- "logits/chosen": -2.935784101486206,
466
- "logits/rejected": -2.9176745414733887,
467
- "logps/chosen": -277.13153076171875,
468
- "logps/rejected": -258.02166748046875,
469
- "loss": 0.6835,
470
- "rewards/accuracies": 0.550000011920929,
471
- "rewards/chosen": -0.6932904124259949,
472
- "rewards/margins": 0.10739298164844513,
473
- "rewards/rejected": -0.8006833791732788,
474
  "step": 300
475
  },
476
  {
477
  "epoch": 0.3500846979107849,
478
- "grad_norm": 8.9375,
479
- "learning_rate": 4.107808693655262e-06,
480
- "logits/chosen": -2.9573862552642822,
481
- "logits/rejected": -2.9266855716705322,
482
- "logps/chosen": -260.0476989746094,
483
- "logps/rejected": -220.8186492919922,
484
- "loss": 0.656,
485
- "rewards/accuracies": 0.581250011920929,
486
- "rewards/chosen": -0.6015889644622803,
487
- "rewards/margins": 0.16243910789489746,
488
- "rewards/rejected": -0.7640281319618225,
489
  "step": 310
490
  },
491
  {
492
  "epoch": 0.3613777526821005,
493
- "grad_norm": 9.4375,
494
- "learning_rate": 4.0310199190712016e-06,
495
- "logits/chosen": -2.90745210647583,
496
- "logits/rejected": -2.877584934234619,
497
- "logps/chosen": -238.2447509765625,
498
- "logps/rejected": -260.91278076171875,
499
- "loss": 0.6433,
500
- "rewards/accuracies": 0.6312500238418579,
501
- "rewards/chosen": -0.6627223491668701,
502
- "rewards/margins": 0.18875077366828918,
503
- "rewards/rejected": -0.8514731526374817,
504
  "step": 320
505
  },
506
  {
507
  "epoch": 0.37267080745341613,
508
- "grad_norm": 9.375,
509
- "learning_rate": 3.951846640370666e-06,
510
- "logits/chosen": -2.9316892623901367,
511
- "logits/rejected": -2.8965978622436523,
512
- "logps/chosen": -249.91738891601562,
513
- "logps/rejected": -269.43255615234375,
514
- "loss": 0.6525,
515
- "rewards/accuracies": 0.512499988079071,
516
- "rewards/chosen": -0.5791417360305786,
517
- "rewards/margins": 0.18505547940731049,
518
- "rewards/rejected": -0.7641971707344055,
519
  "step": 330
520
  },
521
  {
522
  "epoch": 0.3839638622247318,
523
- "grad_norm": 10.0,
524
- "learning_rate": 3.870412166863107e-06,
525
- "logits/chosen": -2.9794411659240723,
526
- "logits/rejected": -2.947648525238037,
527
- "logps/chosen": -259.78289794921875,
528
- "logps/rejected": -249.4364776611328,
529
- "loss": 0.6399,
530
- "rewards/accuracies": 0.581250011920929,
531
- "rewards/chosen": -0.7674423456192017,
532
- "rewards/margins": 0.24263504147529602,
533
- "rewards/rejected": -1.0100773572921753,
534
  "step": 340
535
  },
536
  {
537
  "epoch": 0.3952569169960474,
538
- "grad_norm": 13.1875,
539
- "learning_rate": 3.786843329581147e-06,
540
- "logits/chosen": -2.9657294750213623,
541
- "logits/rejected": -2.944413900375366,
542
- "logps/chosen": -254.1212158203125,
543
- "logps/rejected": -251.4638214111328,
544
- "loss": 0.6688,
545
- "rewards/accuracies": 0.5625,
546
- "rewards/chosen": -0.9390102624893188,
547
- "rewards/margins": 0.23788292706012726,
548
- "rewards/rejected": -1.1768931150436401,
549
  "step": 350
550
  },
551
  {
552
  "epoch": 0.40654997176736307,
553
- "grad_norm": 10.25,
554
- "learning_rate": 3.701270283746168e-06,
555
- "logits/chosen": -2.997607707977295,
556
- "logits/rejected": -2.9588074684143066,
557
- "logps/chosen": -250.17568969726562,
558
- "logps/rejected": -234.97073364257812,
559
- "loss": 0.6788,
560
- "rewards/accuracies": 0.5062500238418579,
561
- "rewards/chosen": -0.9304245114326477,
562
- "rewards/margins": 0.18224455416202545,
563
- "rewards/rejected": -1.1126692295074463,
564
  "step": 360
565
  },
566
  {
567
  "epoch": 0.4178430265386787,
568
- "grad_norm": 10.875,
569
- "learning_rate": 3.613826306056607e-06,
570
- "logits/chosen": -2.8931360244750977,
571
- "logits/rejected": -2.8573451042175293,
572
- "logps/chosen": -274.30328369140625,
573
- "logps/rejected": -245.84512329101562,
574
- "loss": 0.6967,
575
- "rewards/accuracies": 0.543749988079071,
576
- "rewards/chosen": -1.037747859954834,
577
- "rewards/margins": 0.18897393345832825,
578
- "rewards/rejected": -1.2267218828201294,
579
  "step": 370
580
  },
581
  {
582
  "epoch": 0.42913608130999437,
583
- "grad_norm": 12.0625,
584
- "learning_rate": 3.5246475871146646e-06,
585
- "logits/chosen": -2.9629368782043457,
586
- "logits/rejected": -2.920045852661133,
587
- "logps/chosen": -286.22943115234375,
588
- "logps/rejected": -249.3096160888672,
589
- "loss": 0.6428,
590
  "rewards/accuracies": 0.59375,
591
- "rewards/chosen": -0.9461029767990112,
592
- "rewards/margins": 0.26600000262260437,
593
- "rewards/rejected": -1.2121028900146484,
594
  "step": 380
595
  },
596
  {
597
  "epoch": 0.44042913608131,
598
- "grad_norm": 8.5,
599
- "learning_rate": 3.433873019314731e-06,
600
- "logits/chosen": -2.905508279800415,
601
- "logits/rejected": -2.8779773712158203,
602
- "logps/chosen": -249.64663696289062,
603
- "logps/rejected": -215.81161499023438,
604
- "loss": 0.6645,
605
- "rewards/accuracies": 0.5562499761581421,
606
- "rewards/chosen": -0.9739023447036743,
607
- "rewards/margins": 0.22194626927375793,
608
- "rewards/rejected": -1.1958487033843994,
609
  "step": 390
610
  },
611
  {
612
  "epoch": 0.45172219085262566,
613
- "grad_norm": 8.5,
614
- "learning_rate": 3.341643980523871e-06,
615
- "logits/chosen": -2.905945062637329,
616
- "logits/rejected": -2.8710389137268066,
617
- "logps/chosen": -292.928955078125,
618
- "logps/rejected": -241.6216278076172,
619
- "loss": 0.6466,
620
- "rewards/accuracies": 0.5562499761581421,
621
- "rewards/chosen": -0.7866181135177612,
622
- "rewards/margins": 0.2684091627597809,
623
- "rewards/rejected": -1.0550272464752197,
624
  "step": 400
625
  },
626
  {
627
  "epoch": 0.46301524562394125,
628
- "grad_norm": 12.8125,
629
- "learning_rate": 3.2481041138912784e-06,
630
- "logits/chosen": -2.8903775215148926,
631
- "logits/rejected": -2.861661672592163,
632
- "logps/chosen": -260.47174072265625,
633
- "logps/rejected": -255.26931762695312,
634
- "loss": 0.6732,
635
- "rewards/accuracies": 0.543749988079071,
636
- "rewards/chosen": -1.0014512538909912,
637
- "rewards/margins": 0.2172473967075348,
638
- "rewards/rejected": -1.2186987400054932,
639
  "step": 410
640
  },
641
  {
642
  "epoch": 0.4743083003952569,
643
- "grad_norm": 9.75,
644
- "learning_rate": 3.1533991041296484e-06,
645
- "logits/chosen": -2.962191104888916,
646
- "logits/rejected": -2.932969331741333,
647
- "logps/chosen": -259.97283935546875,
648
- "logps/rejected": -248.1492919921875,
649
- "loss": 0.6506,
650
- "rewards/accuracies": 0.5625,
651
- "rewards/chosen": -0.9169241189956665,
652
- "rewards/margins": 0.20076242089271545,
653
- "rewards/rejected": -1.1176865100860596,
654
  "step": 420
655
  },
656
  {
657
  "epoch": 0.48560135516657255,
658
- "grad_norm": 11.375,
659
- "learning_rate": 3.0576764506168893e-06,
660
- "logits/chosen": -2.9573709964752197,
661
- "logits/rejected": -2.9248433113098145,
662
- "logps/chosen": -281.449462890625,
663
- "logps/rejected": -258.242431640625,
664
- "loss": 0.6653,
665
- "rewards/accuracies": 0.581250011920929,
666
- "rewards/chosen": -0.8910619616508484,
667
- "rewards/margins": 0.1914868950843811,
668
- "rewards/rejected": -1.0825488567352295,
669
  "step": 430
670
  },
671
  {
672
  "epoch": 0.4968944099378882,
673
- "grad_norm": 8.5,
674
- "learning_rate": 2.9610852376715625e-06,
675
- "logits/chosen": -2.9228568077087402,
676
- "logits/rejected": -2.8933017253875732,
677
- "logps/chosen": -258.58245849609375,
678
- "logps/rejected": -248.20846557617188,
679
- "loss": 0.6594,
680
- "rewards/accuracies": 0.574999988079071,
681
- "rewards/chosen": -0.9236515164375305,
682
- "rewards/margins": 0.2783660292625427,
683
- "rewards/rejected": -1.2020175457000732,
684
  "step": 440
685
  },
686
  {
687
  "epoch": 0.5081874647092038,
688
- "grad_norm": 7.84375,
689
- "learning_rate": 2.8637759023598467e-06,
690
- "logits/chosen": -2.9429585933685303,
691
- "logits/rejected": -2.9177327156066895,
692
- "logps/chosen": -246.7409210205078,
693
- "logps/rejected": -252.89218139648438,
694
- "loss": 0.6842,
695
- "rewards/accuracies": 0.4937500059604645,
696
- "rewards/chosen": -0.8663239479064941,
697
- "rewards/margins": 0.17053480446338654,
698
- "rewards/rejected": -1.0368586778640747,
699
  "step": 450
700
  },
701
  {
702
  "epoch": 0.5194805194805194,
703
- "grad_norm": 9.25,
704
- "learning_rate": 2.7659000001956376e-06,
705
- "logits/chosen": -2.9368577003479004,
706
- "logits/rejected": -2.899815559387207,
707
- "logps/chosen": -258.90765380859375,
708
- "logps/rejected": -234.8472137451172,
709
- "loss": 0.6159,
710
- "rewards/accuracies": 0.625,
711
- "rewards/chosen": -0.8144370317459106,
712
- "rewards/margins": 0.30742961168289185,
713
- "rewards/rejected": -1.1218667030334473,
714
  "step": 460
715
  },
716
  {
717
  "epoch": 0.5307735742518351,
718
- "grad_norm": 9.8125,
719
- "learning_rate": 2.6676099690987227e-06,
720
- "logits/chosen": -2.938596248626709,
721
- "logits/rejected": -2.9030938148498535,
722
- "logps/chosen": -251.0491485595703,
723
- "logps/rejected": -227.3994140625,
724
- "loss": 0.6612,
725
- "rewards/accuracies": 0.5625,
726
- "rewards/chosen": -0.7350276708602905,
727
- "rewards/margins": 0.21316580474376678,
728
- "rewards/rejected": -0.9481935501098633,
729
  "step": 470
730
  },
731
  {
732
  "epoch": 0.5420666290231507,
733
- "grad_norm": 8.625,
734
- "learning_rate": 2.5690588919786307e-06,
735
- "logits/chosen": -2.9533700942993164,
736
- "logits/rejected": -2.9271883964538574,
737
- "logps/chosen": -265.0854187011719,
738
- "logps/rejected": -239.2405242919922,
739
- "loss": 0.6447,
740
- "rewards/accuracies": 0.5562499761581421,
741
- "rewards/chosen": -0.8996723294258118,
742
- "rewards/margins": 0.29516300559043884,
743
- "rewards/rejected": -1.1948354244232178,
744
  "step": 480
745
  },
746
  {
747
  "epoch": 0.5533596837944664,
748
- "grad_norm": 12.6875,
749
- "learning_rate": 2.4704002583139426e-06,
750
- "logits/chosen": -2.9021379947662354,
751
- "logits/rejected": -2.8771567344665527,
752
- "logps/chosen": -264.7264709472656,
753
- "logps/rejected": -246.2521209716797,
754
- "loss": 0.6077,
755
- "rewards/accuracies": 0.606249988079071,
756
- "rewards/chosen": -0.8024009466171265,
757
- "rewards/margins": 0.2971910834312439,
758
- "rewards/rejected": -1.0995919704437256,
759
  "step": 490
760
  },
761
  {
762
  "epoch": 0.564652738565782,
763
- "grad_norm": 8.9375,
764
- "learning_rate": 2.3717877250983864e-06,
765
- "logits/chosen": -2.938613176345825,
766
- "logits/rejected": -2.904693841934204,
767
- "logps/chosen": -250.9031219482422,
768
- "logps/rejected": -249.9896240234375,
769
- "loss": 0.6574,
770
- "rewards/accuracies": 0.5249999761581421,
771
- "rewards/chosen": -0.845720648765564,
772
- "rewards/margins": 0.24266941845417023,
773
- "rewards/rejected": -1.0883901119232178,
774
  "step": 500
775
  },
776
  {
777
  "epoch": 0.5759457933370977,
778
- "grad_norm": 8.8125,
779
- "learning_rate": 2.2733748775260256e-06,
780
- "logits/chosen": -2.8921456336975098,
781
- "logits/rejected": -2.871410846710205,
782
- "logps/chosen": -226.85903930664062,
783
- "logps/rejected": -219.52587890625,
784
- "loss": 0.6478,
785
- "rewards/accuracies": 0.5687500238418579,
786
- "rewards/chosen": -0.7570312023162842,
787
- "rewards/margins": 0.20835983753204346,
788
- "rewards/rejected": -0.9653909802436829,
789
  "step": 510
790
  },
791
  {
792
  "epoch": 0.5872388481084133,
793
- "grad_norm": 10.25,
794
- "learning_rate": 2.1753149897882847e-06,
795
- "logits/chosen": -2.9432761669158936,
796
- "logits/rejected": -2.892080545425415,
797
- "logps/chosen": -238.2821044921875,
798
- "logps/rejected": -222.26644897460938,
799
- "loss": 0.5833,
800
- "rewards/accuracies": 0.6875,
801
- "rewards/chosen": -0.8168408274650574,
802
- "rewards/margins": 0.41468945145606995,
803
- "rewards/rejected": -1.2315301895141602,
804
  "step": 520
805
  },
806
  {
807
  "epoch": 0.598531902879729,
808
  "grad_norm": 10.0,
809
- "learning_rate": 2.0777607863553423e-06,
810
- "logits/chosen": -2.9561731815338135,
811
- "logits/rejected": -2.9202933311462402,
812
- "logps/chosen": -236.2799835205078,
813
- "logps/rejected": -216.89901733398438,
814
- "loss": 0.673,
815
  "rewards/accuracies": 0.518750011920929,
816
- "rewards/chosen": -0.8298661112785339,
817
- "rewards/margins": 0.18440715968608856,
818
- "rewards/rejected": -1.0142732858657837,
819
  "step": 530
820
  },
821
  {
822
  "epoch": 0.6098249576510446,
823
- "grad_norm": 12.5,
824
- "learning_rate": 1.9808642041136923e-06,
825
- "logits/chosen": -2.9524600505828857,
826
- "logits/rejected": -2.9057111740112305,
827
- "logps/chosen": -239.9007110595703,
828
- "logps/rejected": -232.8452606201172,
829
- "loss": 0.6733,
830
- "rewards/accuracies": 0.550000011920929,
831
- "rewards/chosen": -0.8775818943977356,
832
- "rewards/margins": 0.22822308540344238,
833
- "rewards/rejected": -1.1058050394058228,
834
  "step": 540
835
  },
836
  {
837
  "epoch": 0.6211180124223602,
838
- "grad_norm": 12.0625,
839
- "learning_rate": 1.884776155730342e-06,
840
- "logits/chosen": -2.935317277908325,
841
- "logits/rejected": -2.9047186374664307,
842
- "logps/chosen": -250.3077850341797,
843
- "logps/rejected": -248.65652465820312,
844
- "loss": 0.7337,
845
- "rewards/accuracies": 0.5062500238418579,
846
- "rewards/chosen": -0.9318073391914368,
847
- "rewards/margins": 0.11215513944625854,
848
- "rewards/rejected": -1.0439624786376953,
849
  "step": 550
850
  },
851
  {
852
  "epoch": 0.6324110671936759,
853
- "grad_norm": 11.5625,
854
- "learning_rate": 1.7896462946121873e-06,
855
- "logits/chosen": -2.951495885848999,
856
- "logits/rejected": -2.9132513999938965,
857
- "logps/chosen": -269.6780700683594,
858
- "logps/rejected": -256.81597900390625,
859
- "loss": 0.6249,
860
- "rewards/accuracies": 0.6000000238418579,
861
- "rewards/chosen": -0.8714116215705872,
862
- "rewards/margins": 0.29088258743286133,
863
- "rewards/rejected": -1.1622941493988037,
864
  "step": 560
865
  },
866
  {
867
  "epoch": 0.6437041219649915,
868
- "grad_norm": 9.75,
869
- "learning_rate": 1.695622781826638e-06,
870
- "logits/chosen": -2.954216718673706,
871
- "logits/rejected": -2.906550884246826,
872
- "logps/chosen": -257.61883544921875,
873
- "logps/rejected": -228.7353973388672,
874
- "loss": 0.6324,
875
- "rewards/accuracies": 0.5874999761581421,
876
- "rewards/chosen": -0.7798657417297363,
877
- "rewards/margins": 0.3046206533908844,
878
- "rewards/rejected": -1.0844862461090088,
879
  "step": 570
880
  },
881
  {
882
  "epoch": 0.6549971767363072,
883
- "grad_norm": 11.1875,
884
- "learning_rate": 1.6028520553465053e-06,
885
- "logits/chosen": -2.943326473236084,
886
- "logits/rejected": -2.907252788543701,
887
- "logps/chosen": -255.4546356201172,
888
- "logps/rejected": -229.12185668945312,
889
- "loss": 0.6212,
890
- "rewards/accuracies": 0.581250011920929,
891
- "rewards/chosen": -1.125480055809021,
892
- "rewards/margins": 0.27197134494781494,
893
- "rewards/rejected": -1.3974515199661255,
894
  "step": 580
895
  },
896
  {
897
  "epoch": 0.6662902315076228,
898
- "grad_norm": 10.8125,
899
- "learning_rate": 1.5114786019785416e-06,
900
- "logits/chosen": -2.9562203884124756,
901
- "logits/rejected": -2.9087963104248047,
902
- "logps/chosen": -244.3138427734375,
903
- "logps/rejected": -240.51870727539062,
904
- "loss": 0.6511,
905
- "rewards/accuracies": 0.5874999761581421,
906
- "rewards/chosen": -0.9466648101806641,
907
- "rewards/margins": 0.2708016335964203,
908
- "rewards/rejected": -1.2174664735794067,
909
  "step": 590
910
  },
911
  {
912
  "epoch": 0.6775832862789385,
913
- "grad_norm": 11.0,
914
- "learning_rate": 1.4216447323308444e-06,
915
- "logits/chosen": -2.9042696952819824,
916
- "logits/rejected": -2.883103609085083,
917
- "logps/chosen": -240.0981903076172,
918
- "logps/rejected": -246.4265899658203,
919
- "loss": 0.6582,
920
- "rewards/accuracies": 0.6000000238418579,
921
- "rewards/chosen": -1.1731393337249756,
922
- "rewards/margins": 0.262151300907135,
923
- "rewards/rejected": -1.4352905750274658,
924
  "step": 600
925
  },
926
  {
927
  "epoch": 0.6888763410502541,
928
- "grad_norm": 7.78125,
929
- "learning_rate": 1.3334903591696055e-06,
930
- "logits/chosen": -2.924773931503296,
931
- "logits/rejected": -2.8905885219573975,
932
- "logps/chosen": -243.14501953125,
933
- "logps/rejected": -215.37881469726562,
934
- "loss": 0.6368,
935
- "rewards/accuracies": 0.53125,
936
- "rewards/chosen": -0.9755071401596069,
937
- "rewards/margins": 0.27910715341567993,
938
- "rewards/rejected": -1.2546144723892212,
939
  "step": 610
940
  },
941
  {
942
  "epoch": 0.7001693958215698,
943
- "grad_norm": 13.875,
944
- "learning_rate": 1.247152779510411e-06,
945
- "logits/chosen": -2.941826820373535,
946
- "logits/rejected": -2.906125783920288,
947
- "logps/chosen": -248.43362426757812,
948
- "logps/rejected": -236.13525390625,
949
- "loss": 0.7073,
950
- "rewards/accuracies": 0.4625000059604645,
951
- "rewards/chosen": -1.108798623085022,
952
- "rewards/margins": 0.21407607197761536,
953
- "rewards/rejected": -1.3228747844696045,
954
  "step": 620
955
  },
956
  {
957
  "epoch": 0.7114624505928854,
958
- "grad_norm": 11.625,
959
- "learning_rate": 1.1627664607834593e-06,
960
- "logits/chosen": -2.946981191635132,
961
- "logits/rejected": -2.9171371459960938,
962
- "logps/chosen": -251.35324096679688,
963
- "logps/rejected": -261.75396728515625,
964
- "loss": 0.6583,
965
- "rewards/accuracies": 0.581250011920929,
966
- "rewards/chosen": -0.9292078018188477,
967
- "rewards/margins": 0.23983097076416016,
968
- "rewards/rejected": -1.1690387725830078,
969
  "step": 630
970
  },
971
  {
972
  "epoch": 0.722755505364201,
973
- "grad_norm": 14.5625,
974
- "learning_rate": 1.0804628314057659e-06,
975
- "logits/chosen": -2.920762538909912,
976
- "logits/rejected": -2.8985981941223145,
977
- "logps/chosen": -235.6101837158203,
978
- "logps/rejected": -207.3476104736328,
979
- "loss": 0.6607,
980
  "rewards/accuracies": 0.5687500238418579,
981
- "rewards/chosen": -1.0142499208450317,
982
- "rewards/margins": 0.2305765599012375,
983
- "rewards/rejected": -1.2448265552520752,
984
  "step": 640
985
  },
986
  {
987
  "epoch": 0.7340485601355167,
988
- "grad_norm": 16.0,
989
- "learning_rate": 1.0003700760864931e-06,
990
- "logits/chosen": -2.902686595916748,
991
- "logits/rejected": -2.8842315673828125,
992
- "logps/chosen": -252.89035034179688,
993
- "logps/rejected": -250.73623657226562,
994
- "loss": 0.6813,
995
  "rewards/accuracies": 0.5562499761581421,
996
- "rewards/chosen": -1.051636815071106,
997
- "rewards/margins": 0.21588905155658722,
998
- "rewards/rejected": -1.2675259113311768,
999
  "step": 650
1000
  },
1001
  {
1002
  "epoch": 0.7453416149068323,
1003
- "grad_norm": 12.3125,
1004
- "learning_rate": 9.226129361842498e-07,
1005
- "logits/chosen": -2.9301822185516357,
1006
- "logits/rejected": -2.881704807281494,
1007
- "logps/chosen": -236.1824951171875,
1008
- "logps/rejected": -207.68685913085938,
1009
- "loss": 0.6363,
1010
- "rewards/accuracies": 0.574999988079071,
1011
- "rewards/chosen": -1.060085415840149,
1012
- "rewards/margins": 0.3055773675441742,
1013
- "rewards/rejected": -1.365662693977356,
1014
  "step": 660
1015
  },
1016
  {
1017
  "epoch": 0.756634669678148,
1018
- "grad_norm": 11.3125,
1019
- "learning_rate": 8.473125154272563e-07,
1020
- "logits/chosen": -2.8978400230407715,
1021
- "logits/rejected": -2.845043659210205,
1022
- "logps/chosen": -245.31027221679688,
1023
- "logps/rejected": -251.3234100341797,
1024
- "loss": 0.6158,
1025
- "rewards/accuracies": 0.5874999761581421,
1026
- "rewards/chosen": -0.980219841003418,
1027
- "rewards/margins": 0.3760405480861664,
1028
- "rewards/rejected": -1.3562604188919067,
1029
  "step": 670
1030
  },
1031
  {
1032
  "epoch": 0.7679277244494636,
1033
- "grad_norm": 11.4375,
1034
- "learning_rate": 7.74586091298995e-07,
1035
- "logits/chosen": -2.9392664432525635,
1036
- "logits/rejected": -2.8935275077819824,
1037
- "logps/chosen": -263.9918518066406,
1038
- "logps/rejected": -226.14920043945312,
1039
- "loss": 0.6485,
1040
- "rewards/accuracies": 0.574999988079071,
1041
- "rewards/chosen": -1.0862810611724854,
1042
- "rewards/margins": 0.3031725287437439,
1043
- "rewards/rejected": -1.389453649520874,
1044
  "step": 680
1045
  },
1046
  {
1047
  "epoch": 0.7792207792207793,
1048
- "grad_norm": 10.4375,
1049
- "learning_rate": 7.045469323830731e-07,
1050
- "logits/chosen": -2.908051013946533,
1051
- "logits/rejected": -2.877911329269409,
1052
- "logps/chosen": -247.0584259033203,
1053
- "logps/rejected": -250.07669067382812,
1054
- "loss": 0.677,
1055
- "rewards/accuracies": 0.5249999761581421,
1056
- "rewards/chosen": -1.146324872970581,
1057
- "rewards/margins": 0.17685498297214508,
1058
- "rewards/rejected": -1.3231797218322754,
1059
  "step": 690
1060
  },
1061
  {
1062
  "epoch": 0.7905138339920948,
1063
- "grad_norm": 9.625,
1064
- "learning_rate": 6.373041219518059e-07,
1065
- "logits/chosen": -2.9572408199310303,
1066
- "logits/rejected": -2.937887191772461,
1067
- "logps/chosen": -252.33755493164062,
1068
- "logps/rejected": -229.02926635742188,
1069
- "loss": 0.6706,
1070
- "rewards/accuracies": 0.5375000238418579,
1071
- "rewards/chosen": -1.0239284038543701,
1072
- "rewards/margins": 0.20092003047466278,
1073
- "rewards/rejected": -1.2248482704162598,
1074
  "step": 700
1075
  },
1076
  {
1077
  "epoch": 0.8018068887634106,
1078
  "grad_norm": 9.5625,
1079
- "learning_rate": 5.729623880732455e-07,
1080
- "logits/chosen": -2.9367566108703613,
1081
- "logits/rejected": -2.9192299842834473,
1082
- "logps/chosen": -245.69668579101562,
1083
- "logps/rejected": -225.72848510742188,
1084
- "loss": 0.6588,
1085
- "rewards/accuracies": 0.612500011920929,
1086
- "rewards/chosen": -1.0508708953857422,
1087
- "rewards/margins": 0.26678308844566345,
1088
- "rewards/rejected": -1.3176538944244385,
1089
  "step": 710
1090
  },
1091
  {
1092
  "epoch": 0.8130999435347261,
1093
- "grad_norm": 8.5,
1094
- "learning_rate": 5.116219405012865e-07,
1095
- "logits/chosen": -2.9278311729431152,
1096
- "logits/rejected": -2.8987011909484863,
1097
- "logps/chosen": -246.24691772460938,
1098
- "logps/rejected": -206.0987548828125,
1099
- "loss": 0.6105,
1100
  "rewards/accuracies": 0.581250011920929,
1101
- "rewards/chosen": -1.0415557622909546,
1102
- "rewards/margins": 0.3364638686180115,
1103
- "rewards/rejected": -1.3780196905136108,
1104
  "step": 720
1105
  },
1106
  {
1107
  "epoch": 0.8243929983060417,
1108
- "grad_norm": 7.75,
1109
- "learning_rate": 4.533783146028542e-07,
1110
- "logits/chosen": -2.9237890243530273,
1111
- "logits/rejected": -2.8972859382629395,
1112
- "logps/chosen": -234.02517700195312,
1113
- "logps/rejected": -260.5685119628906,
1114
- "loss": 0.6314,
1115
- "rewards/accuracies": 0.6000000238418579,
1116
- "rewards/chosen": -1.0762670040130615,
1117
- "rewards/margins": 0.31192946434020996,
1118
- "rewards/rejected": -1.388196587562561,
1119
  "step": 730
1120
  },
1121
  {
1122
  "epoch": 0.8356860530773574,
1123
- "grad_norm": 7.71875,
1124
- "learning_rate": 3.9832222256528633e-07,
1125
- "logits/chosen": -2.9241185188293457,
1126
- "logits/rejected": -2.8836278915405273,
1127
- "logps/chosen": -238.7316131591797,
1128
- "logps/rejected": -258.7846984863281,
1129
- "loss": 0.625,
1130
- "rewards/accuracies": 0.581250011920929,
1131
- "rewards/chosen": -1.1147111654281616,
1132
- "rewards/margins": 0.31473425030708313,
1133
- "rewards/rejected": -1.4294453859329224,
1134
  "step": 740
1135
  },
1136
  {
1137
  "epoch": 0.846979107848673,
1138
- "grad_norm": 7.8125,
1139
- "learning_rate": 3.4653941211561265e-07,
1140
- "logits/chosen": -2.918570041656494,
1141
- "logits/rejected": -2.8599822521209717,
1142
- "logps/chosen": -228.1241912841797,
1143
- "logps/rejected": -200.6188507080078,
1144
- "loss": 0.591,
1145
- "rewards/accuracies": 0.581250011920929,
1146
- "rewards/chosen": -1.1377301216125488,
1147
- "rewards/margins": 0.45671454071998596,
1148
- "rewards/rejected": -1.5944445133209229,
1149
  "step": 750
1150
  },
1151
  {
1152
  "epoch": 0.8582721626199887,
1153
- "grad_norm": 11.9375,
1154
- "learning_rate": 2.9811053297180454e-07,
1155
- "logits/chosen": -2.9596495628356934,
1156
- "logits/rejected": -2.938786268234253,
1157
- "logps/chosen": -231.0944061279297,
1158
- "logps/rejected": -241.2841339111328,
1159
- "loss": 0.6626,
1160
- "rewards/accuracies": 0.5687500238418579,
1161
- "rewards/chosen": -1.0585596561431885,
1162
- "rewards/margins": 0.2590670883655548,
1163
- "rewards/rejected": -1.317626714706421,
1164
  "step": 760
1165
  },
1166
  {
1167
  "epoch": 0.8695652173913043,
1168
  "grad_norm": 9.6875,
1169
- "learning_rate": 2.531110112339638e-07,
1170
- "logits/chosen": -2.933401584625244,
1171
- "logits/rejected": -2.920301914215088,
1172
- "logps/chosen": -237.66879272460938,
1173
- "logps/rejected": -238.02297973632812,
1174
- "loss": 0.6987,
1175
- "rewards/accuracies": 0.543749988079071,
1176
- "rewards/chosen": -1.0770819187164307,
1177
- "rewards/margins": 0.14950065314769745,
1178
- "rewards/rejected": -1.2265825271606445,
1179
  "step": 770
1180
  },
1181
  {
1182
  "epoch": 0.88085827216262,
1183
- "grad_norm": 12.5,
1184
- "learning_rate": 2.116109319111015e-07,
1185
- "logits/chosen": -2.912548065185547,
1186
- "logits/rejected": -2.8936960697174072,
1187
- "logps/chosen": -235.7461395263672,
1188
- "logps/rejected": -246.876708984375,
1189
- "loss": 0.7254,
1190
- "rewards/accuracies": 0.518750011920929,
1191
- "rewards/chosen": -1.1536673307418823,
1192
- "rewards/margins": 0.14096426963806152,
1193
- "rewards/rejected": -1.2946317195892334,
1194
  "step": 780
1195
  },
1196
  {
1197
  "epoch": 0.8921513269339356,
1198
- "grad_norm": 7.34375,
1199
- "learning_rate": 1.736749297664539e-07,
1200
- "logits/chosen": -2.9254114627838135,
1201
- "logits/rejected": -2.89033842086792,
1202
- "logps/chosen": -263.29071044921875,
1203
- "logps/rejected": -274.1188659667969,
1204
- "loss": 0.6559,
1205
- "rewards/accuracies": 0.5625,
1206
- "rewards/chosen": -0.9891961216926575,
1207
- "rewards/margins": 0.2855607271194458,
1208
- "rewards/rejected": -1.2747570276260376,
1209
  "step": 790
1210
  },
1211
  {
1212
  "epoch": 0.9034443817052513,
1213
- "grad_norm": 9.4375,
1214
- "learning_rate": 1.3936208865134666e-07,
1215
- "logits/chosen": -2.930879831314087,
1216
- "logits/rejected": -2.8965582847595215,
1217
- "logps/chosen": -254.17044067382812,
1218
- "logps/rejected": -273.36724853515625,
1219
- "loss": 0.6567,
1220
- "rewards/accuracies": 0.5687500238418579,
1221
- "rewards/chosen": -1.0911872386932373,
1222
- "rewards/margins": 0.27449628710746765,
1223
- "rewards/rejected": -1.3656837940216064,
1224
  "step": 800
1225
  },
1226
  {
1227
  "epoch": 0.9147374364765669,
1228
- "grad_norm": 8.125,
1229
- "learning_rate": 1.0872584948439208e-07,
1230
- "logits/chosen": -2.9673569202423096,
1231
- "logits/rejected": -2.938894748687744,
1232
- "logps/chosen": -264.1458435058594,
1233
- "logps/rejected": -253.3695068359375,
1234
- "loss": 0.6348,
1235
- "rewards/accuracies": 0.574999988079071,
1236
- "rewards/chosen": -0.9376649856567383,
1237
- "rewards/margins": 0.28761720657348633,
1238
- "rewards/rejected": -1.225282073020935,
1239
  "step": 810
1240
  },
1241
  {
1242
  "epoch": 0.9260304912478825,
1243
- "grad_norm": 10.75,
1244
- "learning_rate": 8.181392701932745e-08,
1245
- "logits/chosen": -2.9556472301483154,
1246
- "logits/rejected": -2.930018663406372,
1247
- "logps/chosen": -244.6985321044922,
1248
- "logps/rejected": -241.3518524169922,
1249
- "loss": 0.6962,
1250
  "rewards/accuracies": 0.5375000238418579,
1251
- "rewards/chosen": -1.143431305885315,
1252
- "rewards/margins": 0.20309944450855255,
1253
- "rewards/rejected": -1.3465306758880615,
1254
  "step": 820
1255
  },
1256
  {
1257
  "epoch": 0.9373235460191982,
1258
- "grad_norm": 11.1875,
1259
- "learning_rate": 5.866823553114187e-08,
1260
- "logits/chosen": -2.9636921882629395,
1261
- "logits/rejected": -2.934422254562378,
1262
- "logps/chosen": -249.5702667236328,
1263
- "logps/rejected": -229.45053100585938,
1264
- "loss": 0.6749,
1265
- "rewards/accuracies": 0.5375000238418579,
1266
- "rewards/chosen": -1.1629018783569336,
1267
- "rewards/margins": 0.19255365431308746,
1268
- "rewards/rejected": -1.3554556369781494,
1269
  "step": 830
1270
  },
1271
  {
1272
  "epoch": 0.9486166007905138,
1273
- "grad_norm": 12.6875,
1274
- "learning_rate": 3.932482353621536e-08,
1275
- "logits/chosen": -2.9121766090393066,
1276
- "logits/rejected": -2.8778061866760254,
1277
- "logps/chosen": -252.2318115234375,
1278
- "logps/rejected": -242.0477294921875,
1279
- "loss": 0.7142,
1280
- "rewards/accuracies": 0.4375,
1281
- "rewards/chosen": -1.1486766338348389,
1282
- "rewards/margins": 0.10505317151546478,
1283
- "rewards/rejected": -1.253730058670044,
1284
  "step": 840
1285
  },
1286
  {
1287
  "epoch": 0.9599096555618295,
1288
- "grad_norm": 10.6875,
1289
- "learning_rate": 2.381381764815671e-08,
1290
- "logits/chosen": -2.9521381855010986,
1291
- "logits/rejected": -2.9332849979400635,
1292
- "logps/chosen": -248.31655883789062,
1293
- "logps/rejected": -244.4917449951172,
1294
- "loss": 0.7399,
1295
- "rewards/accuracies": 0.53125,
1296
- "rewards/chosen": -1.1000101566314697,
1297
- "rewards/margins": 0.10928668826818466,
1298
- "rewards/rejected": -1.2092968225479126,
1299
  "step": 850
1300
  },
1301
  {
1302
  "epoch": 0.9712027103331451,
1303
- "grad_norm": 11.125,
1304
- "learning_rate": 1.2159375656770056e-08,
1305
- "logits/chosen": -2.964635133743286,
1306
- "logits/rejected": -2.9422874450683594,
1307
- "logps/chosen": -266.625732421875,
1308
- "logps/rejected": -265.8684997558594,
1309
- "loss": 0.693,
1310
- "rewards/accuracies": 0.518750011920929,
1311
- "rewards/chosen": -1.1866648197174072,
1312
- "rewards/margins": 0.21592363715171814,
1313
- "rewards/rejected": -1.4025886058807373,
1314
  "step": 860
1315
  },
1316
  {
1317
  "epoch": 0.9824957651044608,
1318
- "grad_norm": 11.5,
1319
- "learning_rate": 4.379648903234668e-09,
1320
- "logits/chosen": -2.930752754211426,
1321
- "logits/rejected": -2.8959178924560547,
1322
- "logps/chosen": -268.1014404296875,
1323
- "logps/rejected": -252.4002227783203,
1324
- "loss": 0.6735,
1325
- "rewards/accuracies": 0.5375000238418579,
1326
- "rewards/chosen": -0.995762825012207,
1327
- "rewards/margins": 0.236598402261734,
1328
- "rewards/rejected": -1.2323614358901978,
1329
  "step": 870
1330
  },
1331
  {
1332
  "epoch": 0.9937888198757764,
1333
- "grad_norm": 9.75,
1334
- "learning_rate": 4.867540100961244e-10,
1335
- "logits/chosen": -2.928917646408081,
1336
- "logits/rejected": -2.881701946258545,
1337
- "logps/chosen": -294.4600524902344,
1338
- "logps/rejected": -246.2449493408203,
1339
- "loss": 0.6221,
1340
- "rewards/accuracies": 0.5687500238418579,
1341
- "rewards/chosen": -1.1883199214935303,
1342
- "rewards/margins": 0.35472264885902405,
1343
- "rewards/rejected": -1.543042540550232,
1344
  "step": 880
1345
  },
1346
  {
1347
  "epoch": 0.9994353472614342,
1348
  "step": 885,
1349
  "total_flos": 0.0,
1350
- "train_loss": 0.6616038349388683,
1351
- "train_runtime": 9964.5835,
1352
- "train_samples_per_second": 1.422,
1353
  "train_steps_per_second": 0.089
1354
  }
1355
  ],
 
11
  {
12
  "epoch": 0.001129305477131564,
13
  "grad_norm": 10.5625,
14
+ "learning_rate": 1.1235955056179774e-08,
15
  "logits/chosen": -2.8912107944488525,
16
  "logits/rejected": -2.9116690158843994,
17
  "logps/chosen": -205.43255615234375,
 
25
  },
26
  {
27
  "epoch": 0.01129305477131564,
28
+ "grad_norm": 11.5,
29
+ "learning_rate": 1.1235955056179774e-07,
30
+ "logits/chosen": -2.9240243434906006,
31
+ "logits/rejected": -2.895505666732788,
32
+ "logps/chosen": -286.3099365234375,
33
+ "logps/rejected": -234.07386779785156,
34
+ "loss": 0.6929,
35
+ "rewards/accuracies": 0.3888888955116272,
36
+ "rewards/chosen": 0.00452011963352561,
37
+ "rewards/margins": 0.0007920749485492706,
38
+ "rewards/rejected": 0.0037280453834682703,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.02258610954263128,
43
+ "grad_norm": 8.125,
44
+ "learning_rate": 2.2471910112359549e-07,
45
+ "logits/chosen": -2.9115793704986572,
46
+ "logits/rejected": -2.8940234184265137,
47
+ "logps/chosen": -242.3488311767578,
48
+ "logps/rejected": -230.69912719726562,
49
+ "loss": 0.6965,
50
+ "rewards/accuracies": 0.40625,
51
+ "rewards/chosen": -0.006699255667626858,
52
+ "rewards/margins": -0.006314368452876806,
53
+ "rewards/rejected": -0.0003848885535262525,
54
  "step": 20
55
  },
56
  {
57
  "epoch": 0.03387916431394692,
58
  "grad_norm": 10.4375,
59
+ "learning_rate": 3.3707865168539325e-07,
60
+ "logits/chosen": -2.933945894241333,
61
+ "logits/rejected": -2.9031498432159424,
62
+ "logps/chosen": -227.9883270263672,
63
+ "logps/rejected": -206.83169555664062,
64
+ "loss": 0.6914,
65
+ "rewards/accuracies": 0.5,
66
+ "rewards/chosen": 0.0010180685203522444,
67
+ "rewards/margins": 0.003906534984707832,
68
+ "rewards/rejected": -0.0028884666971862316,
69
  "step": 30
70
  },
71
  {
72
  "epoch": 0.04517221908526256,
73
+ "grad_norm": 10.5,
74
+ "learning_rate": 4.4943820224719097e-07,
75
+ "logits/chosen": -2.940782308578491,
76
+ "logits/rejected": -2.903311252593994,
77
+ "logps/chosen": -239.7572021484375,
78
+ "logps/rejected": -223.0744171142578,
79
+ "loss": 0.6943,
80
+ "rewards/accuracies": 0.46875,
81
+ "rewards/chosen": -0.001310513005591929,
82
+ "rewards/margins": -0.0019447545055299997,
83
+ "rewards/rejected": 0.0006342411506921053,
84
  "step": 40
85
  },
86
  {
87
  "epoch": 0.05646527385657821,
88
  "grad_norm": 11.25,
89
+ "learning_rate": 5.617977528089887e-07,
90
+ "logits/chosen": -2.926548719406128,
91
+ "logits/rejected": -2.891540050506592,
92
+ "logps/chosen": -252.4969024658203,
93
+ "logps/rejected": -208.90573120117188,
94
+ "loss": 0.6926,
95
+ "rewards/accuracies": 0.48124998807907104,
96
+ "rewards/chosen": -0.002666835905984044,
97
+ "rewards/margins": 0.0014006331330165267,
98
+ "rewards/rejected": -0.004067468456923962,
99
  "step": 50
100
  },
101
  {
102
  "epoch": 0.06775832862789384,
103
+ "grad_norm": 9.0625,
104
+ "learning_rate": 6.741573033707865e-07,
105
+ "logits/chosen": -2.951221227645874,
106
+ "logits/rejected": -2.9214468002319336,
107
+ "logps/chosen": -230.9364013671875,
108
+ "logps/rejected": -222.9247283935547,
109
+ "loss": 0.696,
110
+ "rewards/accuracies": 0.42500001192092896,
111
+ "rewards/chosen": -0.008724420331418514,
112
+ "rewards/margins": -0.0053501129150390625,
113
+ "rewards/rejected": -0.003374306485056877,
114
  "step": 60
115
  },
116
  {
117
  "epoch": 0.07905138339920949,
118
+ "grad_norm": 15.75,
119
+ "learning_rate": 7.865168539325843e-07,
120
+ "logits/chosen": -2.915444850921631,
121
+ "logits/rejected": -2.8806443214416504,
122
+ "logps/chosen": -249.3389434814453,
123
+ "logps/rejected": -235.85330200195312,
124
+ "loss": 0.6919,
125
+ "rewards/accuracies": 0.512499988079071,
126
+ "rewards/chosen": -0.006868250668048859,
127
+ "rewards/margins": 0.003010864369571209,
128
+ "rewards/rejected": -0.009879115037620068,
129
  "step": 70
130
  },
131
  {
132
  "epoch": 0.09034443817052512,
133
+ "grad_norm": 13.375,
134
+ "learning_rate": 8.988764044943819e-07,
135
+ "logits/chosen": -2.893014430999756,
136
+ "logits/rejected": -2.8584141731262207,
137
+ "logps/chosen": -244.0336456298828,
138
+ "logps/rejected": -241.87136840820312,
139
+ "loss": 0.6888,
140
+ "rewards/accuracies": 0.543749988079071,
141
+ "rewards/chosen": -0.01003711111843586,
142
+ "rewards/margins": 0.00937909446656704,
143
+ "rewards/rejected": -0.0194162018597126,
144
  "step": 80
145
  },
146
  {
147
  "epoch": 0.10163749294184077,
148
+ "grad_norm": 6.90625,
149
+ "learning_rate": 9.99996105846605e-07,
150
+ "logits/chosen": -2.912137031555176,
151
+ "logits/rejected": -2.8917088508605957,
152
+ "logps/chosen": -230.53872680664062,
153
+ "logps/rejected": -230.29702758789062,
154
+ "loss": 0.6905,
155
+ "rewards/accuracies": 0.5,
156
+ "rewards/chosen": -0.01865602657198906,
157
+ "rewards/margins": 0.006008770316839218,
158
+ "rewards/rejected": -0.024664796888828278,
159
  "step": 90
160
  },
161
  {
162
  "epoch": 0.11293054771315642,
163
+ "grad_norm": 8.0625,
164
+ "learning_rate": 9.99528880830604e-07,
165
+ "logits/chosen": -2.954880952835083,
166
+ "logits/rejected": -2.9029383659362793,
167
+ "logps/chosen": -230.21240234375,
168
+ "logps/rejected": -242.08786010742188,
169
+ "loss": 0.6888,
170
+ "rewards/accuracies": 0.5,
171
+ "rewards/chosen": -0.03368181362748146,
172
+ "rewards/margins": 0.010173236019909382,
173
+ "rewards/rejected": -0.04385504871606827,
174
  "step": 100
175
  },
176
  {
177
  "epoch": 0.12422360248447205,
178
+ "grad_norm": 10.4375,
179
+ "learning_rate": 9.982836589657042e-07,
180
+ "logits/chosen": -2.893187999725342,
181
+ "logits/rejected": -2.855039596557617,
182
+ "logps/chosen": -253.92001342773438,
183
+ "logps/rejected": -249.1540069580078,
184
+ "loss": 0.6881,
185
+ "rewards/accuracies": 0.53125,
186
+ "rewards/chosen": -0.04104708880186081,
187
+ "rewards/margins": 0.011361517943441868,
188
+ "rewards/rejected": -0.0524086132645607,
189
  "step": 110
190
  },
191
  {
192
  "epoch": 0.13551665725578768,
193
+ "grad_norm": 8.1875,
194
+ "learning_rate": 9.962623796366428e-07,
195
+ "logits/chosen": -2.9364559650421143,
196
+ "logits/rejected": -2.921806812286377,
197
+ "logps/chosen": -233.13034057617188,
198
+ "logps/rejected": -215.0299072265625,
199
+ "loss": 0.6917,
200
+ "rewards/accuracies": 0.5062500238418579,
201
+ "rewards/chosen": -0.0493025965988636,
202
+ "rewards/margins": 0.004110801964998245,
203
+ "rewards/rejected": -0.05341339856386185,
204
  "step": 120
205
  },
206
  {
207
  "epoch": 0.14680971202710333,
208
+ "grad_norm": 10.875,
209
+ "learning_rate": 9.934681909075434e-07,
210
+ "logits/chosen": -2.9207844734191895,
211
+ "logits/rejected": -2.894355297088623,
212
+ "logps/chosen": -246.37173461914062,
213
+ "logps/rejected": -252.4733428955078,
214
+ "loss": 0.6846,
215
+ "rewards/accuracies": 0.6187499761581421,
216
+ "rewards/chosen": -0.056733161211013794,
217
+ "rewards/margins": 0.019008487462997437,
218
+ "rewards/rejected": -0.07574164122343063,
219
  "step": 130
220
  },
221
  {
222
  "epoch": 0.15810276679841898,
223
+ "grad_norm": 7.75,
224
+ "learning_rate": 9.899054446189302e-07,
225
+ "logits/chosen": -2.9487192630767822,
226
+ "logits/rejected": -2.9129204750061035,
227
+ "logps/chosen": -248.5870361328125,
228
+ "logps/rejected": -237.9082489013672,
229
+ "loss": 0.6859,
230
+ "rewards/accuracies": 0.4937500059604645,
231
+ "rewards/chosen": -0.06868256628513336,
232
+ "rewards/margins": 0.016593391075730324,
233
+ "rewards/rejected": -0.08527596294879913,
234
  "step": 140
235
  },
236
  {
237
  "epoch": 0.16939582156973462,
238
+ "grad_norm": 10.75,
239
+ "learning_rate": 9.855796896099044e-07,
240
+ "logits/chosen": -2.915994167327881,
241
+ "logits/rejected": -2.8766446113586426,
242
+ "logps/chosen": -257.65740966796875,
243
+ "logps/rejected": -226.9933624267578,
244
+ "loss": 0.6854,
245
+ "rewards/accuracies": 0.550000011920929,
246
+ "rewards/chosen": -0.07911563664674759,
247
+ "rewards/margins": 0.018480664119124413,
248
+ "rewards/rejected": -0.09759628772735596,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.18068887634105024,
253
+ "grad_norm": 9.375,
254
+ "learning_rate": 9.804976630760418e-07,
255
+ "logits/chosen": -2.918802261352539,
256
+ "logits/rejected": -2.87815260887146,
257
+ "logps/chosen": -219.70858764648438,
258
+ "logps/rejected": -212.9764404296875,
259
+ "loss": 0.6801,
260
+ "rewards/accuracies": 0.5375000238418579,
261
+ "rewards/chosen": -0.07974254339933395,
262
+ "rewards/margins": 0.029045332223176956,
263
+ "rewards/rejected": -0.10878787934780121,
264
  "step": 160
265
  },
266
  {
267
  "epoch": 0.1919819311123659,
268
+ "grad_norm": 58.5,
269
+ "learning_rate": 9.746672800764734e-07,
270
+ "logits/chosen": -2.9482781887054443,
271
+ "logits/rejected": -2.911285400390625,
272
+ "logps/chosen": -248.09774780273438,
273
+ "logps/rejected": -235.35397338867188,
274
+ "loss": 0.6792,
275
  "rewards/accuracies": 0.581250011920929,
276
+ "rewards/chosen": -0.09282518178224564,
277
+ "rewards/margins": 0.03139440342783928,
278
+ "rewards/rejected": -0.12421958148479462,
279
  "step": 170
280
  },
281
  {
282
  "epoch": 0.20327498588368154,
283
+ "grad_norm": 9.8125,
284
+ "learning_rate": 9.680976212064874e-07,
285
+ "logits/chosen": -2.9101614952087402,
286
+ "logits/rejected": -2.865682363510132,
287
+ "logps/chosen": -244.28964233398438,
288
+ "logps/rejected": -202.06594848632812,
289
+ "loss": 0.6829,
290
+ "rewards/accuracies": 0.53125,
291
+ "rewards/chosen": -0.10991182178258896,
292
+ "rewards/margins": 0.024404151365160942,
293
+ "rewards/rejected": -0.13431595265865326,
294
  "step": 180
295
  },
296
  {
297
  "epoch": 0.21456804065499718,
298
+ "grad_norm": 9.5,
299
+ "learning_rate": 9.607989184548542e-07,
300
+ "logits/chosen": -2.928318738937378,
301
+ "logits/rejected": -2.9046478271484375,
302
+ "logps/chosen": -249.5603790283203,
303
+ "logps/rejected": -234.435791015625,
304
+ "loss": 0.6893,
305
+ "rewards/accuracies": 0.53125,
306
+ "rewards/chosen": -0.11975125968456268,
307
+ "rewards/margins": 0.012782303616404533,
308
+ "rewards/rejected": -0.13253356516361237,
309
  "step": 190
310
  },
311
  {
312
  "epoch": 0.22586109542631283,
313
+ "grad_norm": 10.0625,
314
+ "learning_rate": 9.52782539267901e-07,
315
+ "logits/chosen": -2.9089949131011963,
316
+ "logits/rejected": -2.8771417140960693,
317
+ "logps/chosen": -238.6053009033203,
318
+ "logps/rejected": -244.843994140625,
319
+ "loss": 0.6775,
320
+ "rewards/accuracies": 0.6000000238418579,
321
+ "rewards/chosen": -0.12132523953914642,
322
+ "rewards/margins": 0.03615022823214531,
323
+ "rewards/rejected": -0.15747547149658203,
324
  "step": 200
325
  },
326
  {
327
  "epoch": 0.23715415019762845,
328
+ "grad_norm": 10.75,
329
+ "learning_rate": 9.44060968845156e-07,
330
+ "logits/chosen": -2.957009792327881,
331
+ "logits/rejected": -2.917191982269287,
332
+ "logps/chosen": -254.15109252929688,
333
+ "logps/rejected": -213.61257934570312,
334
+ "loss": 0.6785,
335
+ "rewards/accuracies": 0.5562499761581421,
336
+ "rewards/chosen": -0.12494392693042755,
337
+ "rewards/margins": 0.034127604216337204,
338
+ "rewards/rejected": -0.15907153487205505,
339
  "step": 210
340
  },
341
  {
342
  "epoch": 0.2484472049689441,
343
+ "grad_norm": 11.6875,
344
+ "learning_rate": 9.346477906941331e-07,
345
+ "logits/chosen": -2.9415574073791504,
346
+ "logits/rejected": -2.908458709716797,
347
+ "logps/chosen": -273.68597412109375,
348
+ "logps/rejected": -242.5727996826172,
349
+ "loss": 0.6812,
350
+ "rewards/accuracies": 0.550000011920929,
351
+ "rewards/chosen": -0.14751294255256653,
352
+ "rewards/margins": 0.03075326606631279,
353
+ "rewards/rejected": -0.17826619744300842,
354
  "step": 220
355
  },
356
  {
357
  "epoch": 0.2597402597402597,
358
+ "grad_norm": 10.75,
359
+ "learning_rate": 9.245576654745471e-07,
360
+ "logits/chosen": -2.961373805999756,
361
+ "logits/rejected": -2.9270126819610596,
362
+ "logps/chosen": -271.6230163574219,
363
+ "logps/rejected": -242.1273956298828,
364
+ "loss": 0.6785,
365
+ "rewards/accuracies": 0.5375000238418579,
366
+ "rewards/chosen": -0.15283231437206268,
367
+ "rewards/margins": 0.035538919270038605,
368
+ "rewards/rejected": -0.18837125599384308,
369
  "step": 230
370
  },
371
  {
372
  "epoch": 0.27103331451157536,
373
+ "grad_norm": 11.4375,
374
+ "learning_rate": 9.13806308164905e-07,
375
+ "logits/chosen": -2.9348835945129395,
376
+ "logits/rejected": -2.8914520740509033,
377
+ "logps/chosen": -231.8206024169922,
378
+ "logps/rejected": -218.68106079101562,
379
+ "loss": 0.6784,
380
+ "rewards/accuracies": 0.5687500238418579,
381
+ "rewards/chosen": -0.1429007351398468,
382
+ "rewards/margins": 0.037423450499773026,
383
+ "rewards/rejected": -0.18032419681549072,
384
  "step": 240
385
  },
386
  {
387
  "epoch": 0.282326369282891,
388
+ "grad_norm": 8.9375,
389
+ "learning_rate": 9.024104635870367e-07,
390
+ "logits/chosen": -2.9596941471099854,
391
+ "logits/rejected": -2.9176220893859863,
392
+ "logps/chosen": -234.1117706298828,
393
+ "logps/rejected": -221.8756866455078,
394
+ "loss": 0.6625,
395
+ "rewards/accuracies": 0.6312500238418579,
396
+ "rewards/chosen": -0.16116580367088318,
397
+ "rewards/margins": 0.07054189592599869,
398
+ "rewards/rejected": -0.23170769214630127,
399
  "step": 250
400
  },
401
  {
402
  "epoch": 0.29361942405420666,
403
+ "grad_norm": 10.875,
404
+ "learning_rate": 8.90387880326684e-07,
405
+ "logits/chosen": -2.9382996559143066,
406
+ "logits/rejected": -2.9059150218963623,
407
+ "logps/chosen": -236.49020385742188,
408
+ "logps/rejected": -239.8851318359375,
409
+ "loss": 0.6765,
410
  "rewards/accuracies": 0.625,
411
+ "rewards/chosen": -0.15588422119617462,
412
+ "rewards/margins": 0.04047652706503868,
413
+ "rewards/rejected": -0.1963607370853424,
414
  "step": 260
415
  },
416
  {
417
  "epoch": 0.3049124788255223,
418
+ "grad_norm": 9.0,
419
+ "learning_rate": 8.777572830907684e-07,
420
+ "logits/chosen": -2.950742721557617,
421
+ "logits/rejected": -2.935871124267578,
422
+ "logps/chosen": -235.8411407470703,
423
+ "logps/rejected": -237.037109375,
424
+ "loss": 0.6797,
425
+ "rewards/accuracies": 0.5249999761581421,
426
+ "rewards/chosen": -0.18154451251029968,
427
+ "rewards/margins": 0.03585205227136612,
428
+ "rewards/rejected": -0.2173965722322464,
429
  "step": 270
430
  },
431
  {
432
  "epoch": 0.31620553359683795,
433
+ "grad_norm": 9.5,
434
+ "learning_rate": 8.645383435443851e-07,
435
+ "logits/chosen": -2.944791793823242,
436
+ "logits/rejected": -2.9193873405456543,
437
+ "logps/chosen": -258.33331298828125,
438
+ "logps/rejected": -272.0276184082031,
439
+ "loss": 0.6716,
440
+ "rewards/accuracies": 0.6000000238418579,
441
+ "rewards/chosen": -0.1910783350467682,
442
+ "rewards/margins": 0.051886141300201416,
443
+ "rewards/rejected": -0.242964506149292,
444
  "step": 280
445
  },
446
  {
447
  "epoch": 0.3274985883681536,
448
+ "grad_norm": 11.1875,
449
+ "learning_rate": 8.507516496729493e-07,
450
+ "logits/chosen": -2.9242568016052246,
451
+ "logits/rejected": -2.9168529510498047,
452
+ "logps/chosen": -251.5631103515625,
453
+ "logps/rejected": -245.46444702148438,
454
+ "loss": 0.703,
455
+ "rewards/accuracies": 0.4625000059604645,
456
+ "rewards/chosen": -0.23275713622570038,
457
+ "rewards/margins": -0.012155926786363125,
458
+ "rewards/rejected": -0.22060124576091766,
459
  "step": 290
460
  },
461
  {
462
  "epoch": 0.33879164313946925,
463
+ "grad_norm": 11.6875,
464
+ "learning_rate": 8.364186737172068e-07,
465
+ "logits/chosen": -2.9377264976501465,
466
+ "logits/rejected": -2.9192919731140137,
467
+ "logps/chosen": -272.1338806152344,
468
+ "logps/rejected": -252.2882537841797,
469
+ "loss": 0.6809,
470
+ "rewards/accuracies": 0.5249999761581421,
471
+ "rewards/chosen": -0.19352756440639496,
472
+ "rewards/margins": 0.03381720930337906,
473
+ "rewards/rejected": -0.2273447960615158,
474
  "step": 300
475
  },
476
  {
477
  "epoch": 0.3500846979107849,
478
+ "grad_norm": 9.4375,
479
+ "learning_rate": 8.215617387310522e-07,
480
+ "logits/chosen": -2.961489200592041,
481
+ "logits/rejected": -2.9303905963897705,
482
+ "logps/chosen": -255.98681640625,
483
+ "logps/rejected": -215.4890594482422,
484
+ "loss": 0.6801,
485
+ "rewards/accuracies": 0.518750011920929,
486
+ "rewards/chosen": -0.19549933075904846,
487
+ "rewards/margins": 0.035571686923503876,
488
+ "rewards/rejected": -0.23107102513313293,
489
  "step": 310
490
  },
491
  {
492
  "epoch": 0.3613777526821005,
493
+ "grad_norm": 9.6875,
494
+ "learning_rate": 8.062039838142401e-07,
495
+ "logits/chosen": -2.9097650051116943,
496
+ "logits/rejected": -2.879265546798706,
497
+ "logps/chosen": -233.59078979492188,
498
+ "logps/rejected": -255.03677368164062,
499
+ "loss": 0.6642,
500
+ "rewards/accuracies": 0.59375,
501
+ "rewards/chosen": -0.1973254680633545,
502
+ "rewards/margins": 0.06654740124940872,
503
+ "rewards/rejected": -0.2638728618621826,
504
  "step": 320
505
  },
506
  {
507
  "epoch": 0.37267080745341613,
508
+ "grad_norm": 9.5,
509
+ "learning_rate": 7.903693280741331e-07,
510
+ "logits/chosen": -2.932699203491211,
511
+ "logits/rejected": -2.8970255851745605,
512
+ "logps/chosen": -245.6687469482422,
513
+ "logps/rejected": -264.0479736328125,
514
+ "loss": 0.6635,
515
+ "rewards/accuracies": 0.59375,
516
+ "rewards/chosen": -0.1542760580778122,
517
+ "rewards/margins": 0.07145790755748749,
518
+ "rewards/rejected": -0.22573396563529968,
519
  "step": 330
520
  },
521
  {
522
  "epoch": 0.3839638622247318,
523
+ "grad_norm": 9.5625,
524
+ "learning_rate": 7.740824333726213e-07,
525
+ "logits/chosen": -2.974498748779297,
526
+ "logits/rejected": -2.9419915676116943,
527
+ "logps/chosen": -254.0312957763672,
528
+ "logps/rejected": -241.7189178466797,
529
+ "loss": 0.6755,
530
+ "rewards/accuracies": 0.5249999761581421,
531
+ "rewards/chosen": -0.19228370487689972,
532
+ "rewards/margins": 0.04604003205895424,
533
+ "rewards/rejected": -0.23832373321056366,
534
  "step": 340
535
  },
536
  {
537
  "epoch": 0.3952569169960474,
538
+ "grad_norm": 12.375,
539
+ "learning_rate": 7.573686659162293e-07,
540
+ "logits/chosen": -2.959407329559326,
541
+ "logits/rejected": -2.937516212463379,
542
+ "logps/chosen": -246.8042449951172,
543
+ "logps/rejected": -242.3812713623047,
544
+ "loss": 0.6706,
545
+ "rewards/accuracies": 0.550000011920929,
546
+ "rewards/chosen": -0.20731408894062042,
547
+ "rewards/margins": 0.061324410140514374,
548
+ "rewards/rejected": -0.2686384916305542,
549
  "step": 350
550
  },
551
  {
552
  "epoch": 0.40654997176736307,
553
+ "grad_norm": 9.5625,
554
+ "learning_rate": 7.402540567492336e-07,
555
+ "logits/chosen": -2.9905340671539307,
556
+ "logits/rejected": -2.95076060295105,
557
+ "logps/chosen": -243.1343536376953,
558
+ "logps/rejected": -226.71694946289062,
559
+ "loss": 0.6703,
560
+ "rewards/accuracies": 0.5249999761581421,
561
+ "rewards/chosen": -0.22629483044147491,
562
+ "rewards/margins": 0.06099146604537964,
563
+ "rewards/rejected": -0.28728628158569336,
564
  "step": 360
565
  },
566
  {
567
  "epoch": 0.4178430265386787,
568
+ "grad_norm": 10.125,
569
+ "learning_rate": 7.227652612113213e-07,
570
+ "logits/chosen": -2.8893442153930664,
571
+ "logits/rejected": -2.852905035018921,
572
+ "logps/chosen": -266.652099609375,
573
+ "logps/rejected": -236.76708984375,
574
+ "loss": 0.6789,
575
+ "rewards/accuracies": 0.5249999761581421,
576
+ "rewards/chosen": -0.2726292014122009,
577
+ "rewards/margins": 0.046290718019008636,
578
+ "rewards/rejected": -0.31891992688179016,
579
  "step": 370
580
  },
581
  {
582
  "epoch": 0.42913608130999437,
583
+ "grad_norm": 10.4375,
584
+ "learning_rate": 7.049295174229328e-07,
585
+ "logits/chosen": -2.9600396156311035,
586
+ "logits/rejected": -2.916116237640381,
587
+ "logps/chosen": -279.30035400390625,
588
+ "logps/rejected": -240.49032592773438,
589
+ "loss": 0.6625,
590
  "rewards/accuracies": 0.59375,
591
+ "rewards/chosen": -0.2531939148902893,
592
+ "rewards/margins": 0.0769796222448349,
593
+ "rewards/rejected": -0.3301735520362854,
594
  "step": 380
595
  },
596
  {
597
  "epoch": 0.44042913608131,
598
+ "grad_norm": 11.8125,
599
+ "learning_rate": 6.867746038629462e-07,
600
+ "logits/chosen": -2.9024429321289062,
601
+ "logits/rejected": -2.8742191791534424,
602
+ "logps/chosen": -242.5234375,
603
+ "logps/rejected": -207.02584838867188,
604
+ "loss": 0.6728,
605
+ "rewards/accuracies": 0.5687500238418579,
606
+ "rewards/chosen": -0.26158207654953003,
607
+ "rewards/margins": 0.05569303780794144,
608
+ "rewards/rejected": -0.31727513670921326,
609
  "step": 390
610
  },
611
  {
612
  "epoch": 0.45172219085262566,
613
+ "grad_norm": 9.0625,
614
+ "learning_rate": 6.683287961047741e-07,
615
+ "logits/chosen": -2.9038491249084473,
616
+ "logits/rejected": -2.8683886528015137,
617
+ "logps/chosen": -287.3917541503906,
618
+ "logps/rejected": -234.09072875976562,
619
+ "loss": 0.6692,
620
+ "rewards/accuracies": 0.5249999761581421,
621
+ "rewards/chosen": -0.23289862275123596,
622
+ "rewards/margins": 0.06903888285160065,
623
+ "rewards/rejected": -0.3019375205039978,
624
  "step": 400
625
  },
626
  {
627
  "epoch": 0.46301524562394125,
628
+ "grad_norm": 13.125,
629
+ "learning_rate": 6.496208227782556e-07,
630
+ "logits/chosen": -2.8886725902557373,
631
+ "logits/rejected": -2.8595938682556152,
632
+ "logps/chosen": -253.18283081054688,
633
+ "logps/rejected": -246.322509765625,
634
+ "loss": 0.6765,
635
+ "rewards/accuracies": 0.53125,
636
+ "rewards/chosen": -0.27256402373313904,
637
+ "rewards/margins": 0.05145202949643135,
638
+ "rewards/rejected": -0.3240160644054413,
639
  "step": 410
640
  },
641
  {
642
  "epoch": 0.4743083003952569,
643
+ "grad_norm": 10.375,
644
+ "learning_rate": 6.306798208259297e-07,
645
+ "logits/chosen": -2.9606752395629883,
646
+ "logits/rejected": -2.9306626319885254,
647
+ "logps/chosen": -253.44491577148438,
648
+ "logps/rejected": -240.0795135498047,
649
+ "loss": 0.6778,
650
+ "rewards/accuracies": 0.5375000238418579,
651
+ "rewards/chosen": -0.2641372084617615,
652
+ "rewards/margins": 0.0465705581009388,
653
+ "rewards/rejected": -0.31070777773857117,
654
  "step": 420
655
  },
656
  {
657
  "epoch": 0.48560135516657255,
658
+ "grad_norm": 10.8125,
659
+ "learning_rate": 6.115352901233778e-07,
660
+ "logits/chosen": -2.9563944339752197,
661
+ "logits/rejected": -2.9226722717285156,
662
+ "logps/chosen": -275.2793273925781,
663
+ "logps/rejected": -250.719482421875,
664
+ "loss": 0.6735,
665
+ "rewards/accuracies": 0.6000000238418579,
666
+ "rewards/chosen": -0.27404916286468506,
667
+ "rewards/margins": 0.056207090616226196,
668
+ "rewards/rejected": -0.33025628328323364,
669
  "step": 430
670
  },
671
  {
672
  "epoch": 0.4968944099378882,
673
+ "grad_norm": 8.9375,
674
+ "learning_rate": 5.922170475343124e-07,
675
+ "logits/chosen": -2.9221410751342773,
676
+ "logits/rejected": -2.891664505004883,
677
+ "logps/chosen": -252.1197509765625,
678
+ "logps/rejected": -239.79946899414062,
679
+ "loss": 0.6643,
680
+ "rewards/accuracies": 0.612500011920929,
681
+ "rewards/chosen": -0.2773807644844055,
682
+ "rewards/margins": 0.08373435586690903,
683
+ "rewards/rejected": -0.36111515760421753,
684
  "step": 440
685
  },
686
  {
687
  "epoch": 0.5081874647092038,
688
+ "grad_norm": 8.0,
689
+ "learning_rate": 5.727551804719693e-07,
690
+ "logits/chosen": -2.9440019130706787,
691
+ "logits/rejected": -2.9178504943847656,
692
+ "logps/chosen": -240.7253875732422,
693
+ "logps/rejected": -245.9892120361328,
694
+ "loss": 0.6604,
695
+ "rewards/accuracies": 0.5874999761581421,
696
+ "rewards/chosen": -0.2647743225097656,
697
+ "rewards/margins": 0.08178414404392242,
698
+ "rewards/rejected": -0.34655848145484924,
699
  "step": 450
700
  },
701
  {
702
  "epoch": 0.5194805194805194,
703
+ "grad_norm": 9.9375,
704
+ "learning_rate": 5.531800000391275e-07,
705
+ "logits/chosen": -2.9381301403045654,
706
+ "logits/rejected": -2.899775743484497,
707
+ "logps/chosen": -253.4866485595703,
708
+ "logps/rejected": -227.26168823242188,
709
+ "loss": 0.6578,
710
+ "rewards/accuracies": 0.612500011920929,
711
+ "rewards/chosen": -0.2723321318626404,
712
+ "rewards/margins": 0.09097979962825775,
713
+ "rewards/rejected": -0.3633119463920593,
714
  "step": 460
715
  },
716
  {
717
  "epoch": 0.5307735742518351,
718
+ "grad_norm": 9.625,
719
+ "learning_rate": 5.335219938197445e-07,
720
+ "logits/chosen": -2.9399991035461426,
721
+ "logits/rejected": -2.903505802154541,
722
+ "logps/chosen": -246.1412353515625,
723
+ "logps/rejected": -221.0483856201172,
724
+ "loss": 0.6679,
725
+ "rewards/accuracies": 0.53125,
726
+ "rewards/chosen": -0.24423566460609436,
727
+ "rewards/margins": 0.06885796785354614,
728
+ "rewards/rejected": -0.3130936622619629,
729
  "step": 470
730
  },
731
  {
732
  "epoch": 0.5420666290231507,
733
+ "grad_norm": 8.8125,
734
+ "learning_rate": 5.138117783957261e-07,
735
+ "logits/chosen": -2.9549431800842285,
736
+ "logits/rejected": -2.9279115200042725,
737
+ "logps/chosen": -258.9583435058594,
738
+ "logps/rejected": -230.93106079101562,
739
+ "loss": 0.6651,
740
+ "rewards/accuracies": 0.5625,
741
+ "rewards/chosen": -0.28696444630622864,
742
+ "rewards/margins": 0.07692141085863113,
743
+ "rewards/rejected": -0.3638857901096344,
744
  "step": 480
745
  },
746
  {
747
  "epoch": 0.5533596837944664,
748
+ "grad_norm": 11.25,
749
+ "learning_rate": 4.940800516627885e-07,
750
+ "logits/chosen": -2.9042859077453613,
751
+ "logits/rejected": -2.8782927989959717,
752
+ "logps/chosen": -259.2146911621094,
753
+ "logps/rejected": -238.78005981445312,
754
+ "loss": 0.6506,
755
+ "rewards/accuracies": 0.6499999761581421,
756
+ "rewards/chosen": -0.25122493505477905,
757
+ "rewards/margins": 0.10116108506917953,
758
+ "rewards/rejected": -0.3523860573768616,
759
  "step": 490
760
  },
761
  {
762
  "epoch": 0.564652738565782,
763
+ "grad_norm": 8.125,
764
+ "learning_rate": 4.743575450196772e-07,
765
+ "logits/chosen": -2.9411609172821045,
766
+ "logits/rejected": -2.9064881801605225,
767
+ "logps/chosen": -245.3742218017578,
768
+ "logps/rejected": -242.74740600585938,
769
+ "loss": 0.6671,
770
+ "rewards/accuracies": 0.53125,
771
+ "rewards/chosen": -0.29283052682876587,
772
+ "rewards/margins": 0.07133854180574417,
773
+ "rewards/rejected": -0.36416909098625183,
774
  "step": 500
775
  },
776
  {
777
  "epoch": 0.5759457933370977,
778
+ "grad_norm": 9.0,
779
+ "learning_rate": 4.5467497550520505e-07,
780
+ "logits/chosen": -2.894622325897217,
781
+ "logits/rejected": -2.8733580112457275,
782
+ "logps/chosen": -222.1239471435547,
783
+ "logps/rejected": -213.3123779296875,
784
+ "loss": 0.6705,
785
+ "rewards/accuracies": 0.5562499761581421,
786
+ "rewards/chosen": -0.2835238575935364,
787
+ "rewards/margins": 0.060517072677612305,
788
+ "rewards/rejected": -0.3440409302711487,
789
  "step": 510
790
  },
791
  {
792
  "epoch": 0.5872388481084133,
793
+ "grad_norm": 9.4375,
794
+ "learning_rate": 4.350629979576569e-07,
795
+ "logits/chosen": -2.9461731910705566,
796
+ "logits/rejected": -2.8937017917633057,
797
+ "logps/chosen": -232.8549346923828,
798
+ "logps/rejected": -213.99911499023438,
799
+ "loss": 0.6405,
800
+ "rewards/accuracies": 0.675000011920929,
801
+ "rewards/chosen": -0.27412381768226624,
802
+ "rewards/margins": 0.13067342340946198,
803
+ "rewards/rejected": -0.404797226190567,
804
  "step": 520
805
  },
806
  {
807
  "epoch": 0.598531902879729,
808
  "grad_norm": 10.0,
809
+ "learning_rate": 4.155521572710684e-07,
810
+ "logits/chosen": -2.9578232765197754,
811
+ "logits/rejected": -2.920867919921875,
812
+ "logps/chosen": -230.7373809814453,
813
+ "logps/rejected": -210.01296997070312,
814
+ "loss": 0.6765,
815
  "rewards/accuracies": 0.518750011920929,
816
+ "rewards/chosen": -0.27560561895370483,
817
+ "rewards/margins": 0.05005854368209839,
818
+ "rewards/rejected": -0.325664222240448,
819
  "step": 530
820
  },
821
  {
822
  "epoch": 0.6098249576510446,
823
+ "grad_norm": 10.375,
824
+ "learning_rate": 3.9617284082273836e-07,
825
+ "logits/chosen": -2.9541661739349365,
826
+ "logits/rejected": -2.905620574951172,
827
+ "logps/chosen": -233.76467895507812,
828
+ "logps/rejected": -225.522705078125,
829
+ "loss": 0.6498,
830
+ "rewards/accuracies": 0.625,
831
+ "rewards/chosen": -0.2639801800251007,
832
+ "rewards/margins": 0.10957106202840805,
833
+ "rewards/rejected": -0.37355121970176697,
834
  "step": 540
835
  },
836
  {
837
  "epoch": 0.6211180124223602,
838
+ "grad_norm": 10.6875,
839
+ "learning_rate": 3.7695523114606836e-07,
840
+ "logits/chosen": -2.936779499053955,
841
+ "logits/rejected": -2.9056625366210938,
842
+ "logps/chosen": -244.0745086669922,
843
+ "logps/rejected": -241.7302703857422,
844
+ "loss": 0.6858,
845
+ "rewards/accuracies": 0.5,
846
+ "rewards/chosen": -0.3084772527217865,
847
+ "rewards/margins": 0.04285974055528641,
848
+ "rewards/rejected": -0.3513370156288147,
849
  "step": 550
850
  },
851
  {
852
  "epoch": 0.6324110671936759,
853
+ "grad_norm": 10.75,
854
+ "learning_rate": 3.579292589224374e-07,
855
+ "logits/chosen": -2.9537434577941895,
856
+ "logits/rejected": -2.9143412113189697,
857
+ "logps/chosen": -263.7715759277344,
858
+ "logps/rejected": -249.03793334960938,
859
+ "loss": 0.6506,
860
+ "rewards/accuracies": 0.612500011920929,
861
+ "rewards/chosen": -0.28075623512268066,
862
+ "rewards/margins": 0.10373449325561523,
863
+ "rewards/rejected": -0.3844907283782959,
864
  "step": 560
865
  },
866
  {
867
  "epoch": 0.6437041219649915,
868
+ "grad_norm": 9.4375,
869
+ "learning_rate": 3.391245563653276e-07,
870
+ "logits/chosen": -2.957152843475342,
871
+ "logits/rejected": -2.9082839488983154,
872
+ "logps/chosen": -252.3236846923828,
873
+ "logps/rejected": -221.5681610107422,
874
+ "loss": 0.6473,
875
+ "rewards/accuracies": 0.6312500238418579,
876
+ "rewards/chosen": -0.2503497004508972,
877
+ "rewards/margins": 0.11741576343774796,
878
+ "rewards/rejected": -0.36776548624038696,
879
  "step": 570
880
  },
881
  {
882
  "epoch": 0.6549971767363072,
883
+ "grad_norm": 11.25,
884
+ "learning_rate": 3.20570411069301e-07,
885
+ "logits/chosen": -2.947129487991333,
886
+ "logits/rejected": -2.910438060760498,
887
+ "logps/chosen": -247.65579223632812,
888
+ "logps/rejected": -219.4207763671875,
889
+ "loss": 0.6602,
890
+ "rewards/accuracies": 0.574999988079071,
891
+ "rewards/chosen": -0.34559428691864014,
892
+ "rewards/margins": 0.08174686133861542,
893
+ "rewards/rejected": -0.4273412227630615,
894
  "step": 580
895
  },
896
  {
897
  "epoch": 0.6662902315076228,
898
+ "grad_norm": 9.9375,
899
+ "learning_rate": 3.0229572039570826e-07,
900
+ "logits/chosen": -2.9612464904785156,
901
+ "logits/rejected": -2.912680149078369,
902
+ "logps/chosen": -237.4712677001953,
903
+ "logps/rejected": -232.04373168945312,
904
+ "loss": 0.653,
905
+ "rewards/accuracies": 0.606249988079071,
906
+ "rewards/chosen": -0.2624071538448334,
907
+ "rewards/margins": 0.10756198316812515,
908
+ "rewards/rejected": -0.3699691891670227,
909
  "step": 590
910
  },
911
  {
912
  "epoch": 0.6775832862789385,
913
+ "grad_norm": 11.125,
914
+ "learning_rate": 2.8432894646616886e-07,
915
+ "logits/chosen": -2.9047579765319824,
916
+ "logits/rejected": -2.882847547531128,
917
+ "logps/chosen": -231.67147827148438,
918
+ "logps/rejected": -236.0974884033203,
919
+ "loss": 0.6688,
920
+ "rewards/accuracies": 0.574999988079071,
921
+ "rewards/chosen": -0.3304700553417206,
922
+ "rewards/margins": 0.07191035896539688,
923
+ "rewards/rejected": -0.40238040685653687,
924
  "step": 600
925
  },
926
  {
927
  "epoch": 0.6888763410502541,
928
+ "grad_norm": 7.4375,
929
+ "learning_rate": 2.6669807183392105e-07,
930
+ "logits/chosen": -2.92600679397583,
931
+ "logits/rejected": -2.8905367851257324,
932
+ "logps/chosen": -236.2364044189453,
933
+ "logps/rejected": -206.38174438476562,
934
+ "loss": 0.6684,
935
+ "rewards/accuracies": 0.550000011920929,
936
+ "rewards/chosen": -0.28464430570602417,
937
+ "rewards/margins": 0.07026463001966476,
938
+ "rewards/rejected": -0.35490894317626953,
939
  "step": 610
940
  },
941
  {
942
  "epoch": 0.7001693958215698,
943
+ "grad_norm": 10.8125,
944
+ "learning_rate": 2.494305559020822e-07,
945
+ "logits/chosen": -2.9433751106262207,
946
+ "logits/rejected": -2.907177686691284,
947
+ "logps/chosen": -240.6074676513672,
948
+ "logps/rejected": -226.85989379882812,
949
+ "loss": 0.6727,
950
+ "rewards/accuracies": 0.518750011920929,
951
+ "rewards/chosen": -0.3261820673942566,
952
+ "rewards/margins": 0.06915761530399323,
953
+ "rewards/rejected": -0.3953397274017334,
954
  "step": 620
955
  },
956
  {
957
  "epoch": 0.7114624505928854,
958
+ "grad_norm": 11.0,
959
+ "learning_rate": 2.3255329215669184e-07,
960
+ "logits/chosen": -2.950742244720459,
961
+ "logits/rejected": -2.920048952102661,
962
+ "logps/chosen": -244.8520965576172,
963
+ "logps/rejected": -253.5353546142578,
964
+ "loss": 0.6715,
965
+ "rewards/accuracies": 0.5625,
966
+ "rewards/chosen": -0.2790936529636383,
967
+ "rewards/margins": 0.06808016449213028,
968
+ "rewards/rejected": -0.3471737802028656,
969
  "step": 630
970
  },
971
  {
972
  "epoch": 0.722755505364201,
973
+ "grad_norm": 11.0625,
974
+ "learning_rate": 2.1609256628115312e-07,
975
+ "logits/chosen": -2.9226107597351074,
976
+ "logits/rejected": -2.8994510173797607,
977
+ "logps/chosen": -228.3145751953125,
978
+ "logps/rejected": -198.56753540039062,
979
+ "loss": 0.6619,
980
  "rewards/accuracies": 0.5687500238418579,
981
+ "rewards/chosen": -0.284686803817749,
982
+ "rewards/margins": 0.08213174343109131,
983
+ "rewards/rejected": -0.3668185770511627,
984
  "step": 640
985
  },
986
  {
987
  "epoch": 0.7340485601355167,
988
+ "grad_norm": 13.5625,
989
+ "learning_rate": 2.000740152172986e-07,
990
+ "logits/chosen": -2.9056789875030518,
991
+ "logits/rejected": -2.8870625495910645,
992
+ "logps/chosen": -245.57168579101562,
993
+ "logps/rejected": -241.86618041992188,
994
+ "loss": 0.6735,
995
  "rewards/accuracies": 0.5562499761581421,
996
+ "rewards/chosen": -0.31977060437202454,
997
+ "rewards/margins": 0.060752321034669876,
998
+ "rewards/rejected": -0.3805229663848877,
999
  "step": 650
1000
  },
1001
  {
1002
  "epoch": 0.7453416149068323,
1003
+ "grad_norm": 10.375,
1004
+ "learning_rate": 1.8452258723684995e-07,
1005
+ "logits/chosen": -2.9322152137756348,
1006
+ "logits/rejected": -2.8824894428253174,
1007
+ "logps/chosen": -228.6182098388672,
1008
+ "logps/rejected": -198.11158752441406,
1009
+ "loss": 0.6541,
1010
+ "rewards/accuracies": 0.5874999761581421,
1011
+ "rewards/chosen": -0.30365481972694397,
1012
+ "rewards/margins": 0.10447897762060165,
1013
+ "rewards/rejected": -0.4081338346004486,
1014
  "step": 660
1015
  },
1016
  {
1017
  "epoch": 0.756634669678148,
1018
+ "grad_norm": 9.8125,
1019
+ "learning_rate": 1.6946250308545124e-07,
1020
+ "logits/chosen": -2.9027180671691895,
1021
+ "logits/rejected": -2.8488707542419434,
1022
+ "logps/chosen": -238.1764678955078,
1023
+ "logps/rejected": -241.59326171875,
1024
+ "loss": 0.647,
1025
+ "rewards/accuracies": 0.637499988079071,
1026
+ "rewards/chosen": -0.2668387293815613,
1027
+ "rewards/margins": 0.11640632152557373,
1028
+ "rewards/rejected": -0.383245050907135,
1029
  "step": 670
1030
  },
1031
  {
1032
  "epoch": 0.7679277244494636,
1033
+ "grad_norm": 11.3125,
1034
+ "learning_rate": 1.54917218259799e-07,
1035
+ "logits/chosen": -2.941662549972534,
1036
+ "logits/rejected": -2.894787073135376,
1037
+ "logps/chosen": -256.3278503417969,
1038
+ "logps/rejected": -216.21945190429688,
1039
+ "loss": 0.667,
1040
+ "rewards/accuracies": 0.59375,
1041
+ "rewards/chosen": -0.3198787271976471,
1042
+ "rewards/margins": 0.07660157978534698,
1043
+ "rewards/rejected": -0.3964803218841553,
1044
  "step": 680
1045
  },
1046
  {
1047
  "epoch": 0.7792207792207793,
1048
+ "grad_norm": 10.375,
1049
+ "learning_rate": 1.409093864766146e-07,
1050
+ "logits/chosen": -2.9099855422973633,
1051
+ "logits/rejected": -2.8785247802734375,
1052
+ "logps/chosen": -238.84249877929688,
1053
+ "logps/rejected": -240.7554473876953,
1054
+ "loss": 0.6672,
1055
+ "rewards/accuracies": 0.5687500238418579,
1056
+ "rewards/chosen": -0.3247315585613251,
1057
+ "rewards/margins": 0.06632229685783386,
1058
+ "rewards/rejected": -0.3910538852214813,
1059
  "step": 690
1060
  },
1061
  {
1062
  "epoch": 0.7905138339920948,
1063
+ "grad_norm": 9.0625,
1064
+ "learning_rate": 1.2746082439036114e-07,
1065
+ "logits/chosen": -2.9594428539276123,
1066
+ "logits/rejected": -2.939582347869873,
1067
+ "logps/chosen": -244.93490600585938,
1068
+ "logps/rejected": -220.2227020263672,
1069
+ "loss": 0.6717,
1070
+ "rewards/accuracies": 0.518750011920929,
1071
+ "rewards/chosen": -0.2836626172065735,
1072
+ "rewards/margins": 0.060529135167598724,
1073
+ "rewards/rejected": -0.3441917896270752,
1074
  "step": 700
1075
  },
1076
  {
1077
  "epoch": 0.8018068887634106,
1078
  "grad_norm": 9.5625,
1079
+ "learning_rate": 1.1459247761464907e-07,
1080
+ "logits/chosen": -2.9392457008361816,
1081
+ "logits/rejected": -2.9211068153381348,
1082
+ "logps/chosen": -238.0613555908203,
1083
+ "logps/rejected": -216.3132781982422,
1084
+ "loss": 0.6608,
1085
+ "rewards/accuracies": 0.6000000238418579,
1086
+ "rewards/chosen": -0.28733736276626587,
1087
+ "rewards/margins": 0.0887908786535263,
1088
+ "rewards/rejected": -0.37612825632095337,
1089
  "step": 710
1090
  },
1091
  {
1092
  "epoch": 0.8130999435347261,
1093
+ "grad_norm": 8.125,
1094
+ "learning_rate": 1.0232438810025728e-07,
1095
+ "logits/chosen": -2.929105043411255,
1096
+ "logits/rejected": -2.8989386558532715,
1097
+ "logps/chosen": -238.7790985107422,
1098
+ "logps/rejected": -196.06771850585938,
1099
+ "loss": 0.6614,
1100
  "rewards/accuracies": 0.581250011920929,
1101
+ "rewards/chosen": -0.2947736084461212,
1102
+ "rewards/margins": 0.08014187216758728,
1103
+ "rewards/rejected": -0.3749154806137085,
1104
  "step": 720
1105
  },
1106
  {
1107
  "epoch": 0.8243929983060417,
1108
+ "grad_norm": 8.25,
1109
+ "learning_rate": 9.067566292057083e-08,
1110
+ "logits/chosen": -2.927602767944336,
1111
+ "logits/rejected": -2.899686098098755,
1112
+ "logps/chosen": -226.2371826171875,
1113
+ "logps/rejected": -250.6548309326172,
1114
+ "loss": 0.6538,
1115
+ "rewards/accuracies": 0.59375,
1116
+ "rewards/chosen": -0.2974669933319092,
1117
+ "rewards/margins": 0.09935982525348663,
1118
+ "rewards/rejected": -0.3968268036842346,
1119
  "step": 730
1120
  },
1121
  {
1122
  "epoch": 0.8356860530773574,
1123
+ "grad_norm": 8.5625,
1124
+ "learning_rate": 7.966444451305727e-08,
1125
+ "logits/chosen": -2.9271061420440674,
1126
+ "logits/rejected": -2.8855221271514893,
1127
+ "logps/chosen": -230.8705596923828,
1128
+ "logps/rejected": -248.7850341796875,
1129
+ "loss": 0.655,
1130
+ "rewards/accuracies": 0.574999988079071,
1131
+ "rewards/chosen": -0.32860639691352844,
1132
+ "rewards/margins": 0.10086989402770996,
1133
+ "rewards/rejected": -0.429476261138916,
1134
  "step": 740
1135
  },
1136
  {
1137
  "epoch": 0.846979107848673,
1138
+ "grad_norm": 7.84375,
1139
+ "learning_rate": 6.930788242312252e-08,
1140
+ "logits/chosen": -2.9216761589050293,
1141
+ "logits/rejected": -2.8618099689483643,
1142
+ "logps/chosen": -219.88839721679688,
1143
+ "logps/rejected": -189.08688354492188,
1144
+ "loss": 0.644,
1145
+ "rewards/accuracies": 0.6187499761581421,
1146
+ "rewards/chosen": -0.31415051221847534,
1147
+ "rewards/margins": 0.1270967572927475,
1148
+ "rewards/rejected": -0.44124728441238403,
1149
  "step": 750
1150
  },
1151
  {
1152
  "epoch": 0.8582721626199887,
1153
+ "grad_norm": 10.4375,
1154
+ "learning_rate": 5.96221065943609e-08,
1155
+ "logits/chosen": -2.9630675315856934,
1156
+ "logits/rejected": -2.941108226776123,
1157
+ "logps/chosen": -223.5160369873047,
1158
+ "logps/rejected": -232.03451538085938,
1159
+ "loss": 0.6592,
1160
+ "rewards/accuracies": 0.581250011920929,
1161
+ "rewards/chosen": -0.30072498321533203,
1162
+ "rewards/margins": 0.09193964302539825,
1163
+ "rewards/rejected": -0.3926646113395691,
1164
  "step": 760
1165
  },
1166
  {
1167
  "epoch": 0.8695652173913043,
1168
  "grad_norm": 9.6875,
1169
+ "learning_rate": 5.062220224679276e-08,
1170
+ "logits/chosen": -2.936577320098877,
1171
+ "logits/rejected": -2.9232354164123535,
1172
+ "logps/chosen": -229.6407012939453,
1173
+ "logps/rejected": -229.1260223388672,
1174
+ "loss": 0.6702,
1175
+ "rewards/accuracies": 0.6000000238418579,
1176
+ "rewards/chosen": -0.27426886558532715,
1177
+ "rewards/margins": 0.06261973083019257,
1178
+ "rewards/rejected": -0.3368886113166809,
1179
  "step": 770
1180
  },
1181
  {
1182
  "epoch": 0.88085827216262,
1183
+ "grad_norm": 10.375,
1184
+ "learning_rate": 4.2322186382220295e-08,
1185
+ "logits/chosen": -2.913611650466919,
1186
+ "logits/rejected": -2.8943240642547607,
1187
+ "logps/chosen": -227.52316284179688,
1188
+ "logps/rejected": -237.62997436523438,
1189
+ "loss": 0.6853,
1190
+ "rewards/accuracies": 0.53125,
1191
+ "rewards/chosen": -0.3313713073730469,
1192
+ "rewards/margins": 0.03858897089958191,
1193
+ "rewards/rejected": -0.3699602782726288,
1194
  "step": 780
1195
  },
1196
  {
1197
  "epoch": 0.8921513269339356,
1198
+ "grad_norm": 8.1875,
1199
+ "learning_rate": 3.4734985953290774e-08,
1200
+ "logits/chosen": -2.9326188564300537,
1201
+ "logits/rejected": -2.8966469764709473,
1202
+ "logps/chosen": -255.98684692382812,
1203
+ "logps/rejected": -265.226806640625,
1204
+ "loss": 0.644,
1205
+ "rewards/accuracies": 0.606249988079071,
1206
+ "rewards/chosen": -0.2588121294975281,
1207
+ "rewards/margins": 0.12674114108085632,
1208
+ "rewards/rejected": -0.3855533003807068,
1209
  "step": 790
1210
  },
1211
  {
1212
  "epoch": 0.9034443817052513,
1213
+ "grad_norm": 9.5625,
1214
+ "learning_rate": 2.7872417730269327e-08,
1215
+ "logits/chosen": -2.933467388153076,
1216
+ "logits/rejected": -2.898324728012085,
1217
+ "logps/chosen": -246.2470245361328,
1218
+ "logps/rejected": -263.4474792480469,
1219
+ "loss": 0.6672,
1220
+ "rewards/accuracies": 0.543749988079071,
1221
+ "rewards/chosen": -0.29884645342826843,
1222
+ "rewards/margins": 0.07485932111740112,
1223
+ "rewards/rejected": -0.37370574474334717,
1224
  "step": 800
1225
  },
1226
  {
1227
  "epoch": 0.9147374364765669,
1228
+ "grad_norm": 8.375,
1229
+ "learning_rate": 2.1745169896878412e-08,
1230
+ "logits/chosen": -2.971459150314331,
1231
+ "logits/rejected": -2.9422390460968018,
1232
+ "logps/chosen": -257.4516296386719,
1233
+ "logps/rejected": -244.69961547851562,
1234
+ "loss": 0.6585,
1235
+ "rewards/accuracies": 0.5687500238418579,
1236
+ "rewards/chosen": -0.2682439386844635,
1237
+ "rewards/margins": 0.09004910290241241,
1238
+ "rewards/rejected": -0.3582930564880371,
1239
  "step": 810
1240
  },
1241
  {
1242
  "epoch": 0.9260304912478825,
1243
+ "grad_norm": 10.5625,
1244
+ "learning_rate": 1.6362785403865488e-08,
1245
+ "logits/chosen": -2.9577174186706543,
1246
+ "logits/rejected": -2.9316866397857666,
1247
+ "logps/chosen": -236.38302612304688,
1248
+ "logps/rejected": -231.70822143554688,
1249
+ "loss": 0.6696,
1250
  "rewards/accuracies": 0.5375000238418579,
1251
+ "rewards/chosen": -0.3118794858455658,
1252
+ "rewards/margins": 0.07028525322675705,
1253
+ "rewards/rejected": -0.38216471672058105,
1254
  "step": 820
1255
  },
1256
  {
1257
  "epoch": 0.9373235460191982,
1258
+ "grad_norm": 12.0,
1259
+ "learning_rate": 1.1733647106228373e-08,
1260
+ "logits/chosen": -2.965700149536133,
1261
+ "logits/rejected": -2.935534715652466,
1262
+ "logps/chosen": -241.2149658203125,
1263
+ "logps/rejected": -219.77786254882812,
1264
+ "loss": 0.6709,
1265
+ "rewards/accuracies": 0.6187499761581421,
1266
+ "rewards/chosen": -0.3273731470108032,
1267
+ "rewards/margins": 0.060812730342149734,
1268
+ "rewards/rejected": -0.38818588852882385,
1269
  "step": 830
1270
  },
1271
  {
1272
  "epoch": 0.9486166007905138,
1273
+ "grad_norm": 11.625,
1274
+ "learning_rate": 7.864964707243071e-09,
1275
+ "logits/chosen": -2.9139742851257324,
1276
+ "logits/rejected": -2.8788352012634277,
1277
+ "logps/chosen": -244.0455322265625,
1278
+ "logps/rejected": -233.3373565673828,
1279
+ "loss": 0.6784,
1280
+ "rewards/accuracies": 0.53125,
1281
+ "rewards/chosen": -0.3300473690032959,
1282
+ "rewards/margins": 0.05264363810420036,
1283
+ "rewards/rejected": -0.382690966129303,
1284
  "step": 840
1285
  },
1286
  {
1287
  "epoch": 0.9599096555618295,
1288
+ "grad_norm": 9.75,
1289
+ "learning_rate": 4.762763529631342e-09,
1290
+ "logits/chosen": -2.956047773361206,
1291
+ "logits/rejected": -2.936790943145752,
1292
+ "logps/chosen": -240.4857177734375,
1293
+ "logps/rejected": -235.99301147460938,
1294
+ "loss": 0.6877,
1295
+ "rewards/accuracies": 0.550000011920929,
1296
+ "rewards/chosen": -0.31692594289779663,
1297
+ "rewards/margins": 0.042497653514146805,
1298
+ "rewards/rejected": -0.35942360758781433,
1299
  "step": 850
1300
  },
1301
  {
1302
  "epoch": 0.9712027103331451,
1303
+ "grad_norm": 10.1875,
1304
+ "learning_rate": 2.431875131354011e-09,
1305
+ "logits/chosen": -2.970977544784546,
1306
+ "logits/rejected": -2.9481375217437744,
1307
+ "logps/chosen": -258.04437255859375,
1308
+ "logps/rejected": -255.94552612304688,
1309
+ "loss": 0.664,
1310
+ "rewards/accuracies": 0.5375000238418579,
1311
+ "rewards/chosen": -0.3285280168056488,
1312
+ "rewards/margins": 0.08176346123218536,
1313
+ "rewards/rejected": -0.41029149293899536,
1314
  "step": 860
1315
  },
1316
  {
1317
  "epoch": 0.9824957651044608,
1318
+ "grad_norm": 9.9375,
1319
+ "learning_rate": 8.759297806469335e-10,
1320
+ "logits/chosen": -2.9342379570007324,
1321
+ "logits/rejected": -2.8985533714294434,
1322
+ "logps/chosen": -260.99530029296875,
1323
+ "logps/rejected": -243.7850799560547,
1324
+ "loss": 0.6639,
1325
+ "rewards/accuracies": 0.581250011920929,
1326
+ "rewards/chosen": -0.28514906764030457,
1327
+ "rewards/margins": 0.0856957957148552,
1328
+ "rewards/rejected": -0.37084484100341797,
1329
  "step": 870
1330
  },
1331
  {
1332
  "epoch": 0.9937888198757764,
1333
+ "grad_norm": 9.25,
1334
+ "learning_rate": 9.735080201922486e-11,
1335
+ "logits/chosen": -2.933637857437134,
1336
+ "logits/rejected": -2.88521146774292,
1337
+ "logps/chosen": -285.87249755859375,
1338
+ "logps/rejected": -235.1495819091797,
1339
+ "loss": 0.6537,
1340
+ "rewards/accuracies": 0.574999988079071,
1341
+ "rewards/chosen": -0.32956260442733765,
1342
+ "rewards/margins": 0.10394410043954849,
1343
+ "rewards/rejected": -0.43350666761398315,
1344
  "step": 880
1345
  },
1346
  {
1347
  "epoch": 0.9994353472614342,
1348
  "step": 885,
1349
  "total_flos": 0.0,
1350
+ "train_loss": 0.6718437324135991,
1351
+ "train_runtime": 9969.8232,
1352
+ "train_samples_per_second": 1.421,
1353
  "train_steps_per_second": 0.089
1354
  }
1355
  ],