File size: 25,713 Bytes
52741c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 2.988593155893536,
  "eval_steps": 50,
  "global_step": 393,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.07604562737642585,
      "grad_norm": 10.680026826860546,
      "learning_rate": 1.25e-07,
      "logits/chosen": -2.563544511795044,
      "logits/rejected": -2.4952266216278076,
      "logps/chosen": -273.53424072265625,
      "logps/rejected": -239.1501922607422,
      "loss": 0.6932,
      "rewards/accuracies": 0.38749998807907104,
      "rewards/chosen": 0.00019869348034262657,
      "rewards/margins": -1.28610986394051e-06,
      "rewards/rejected": 0.00019997954950667918,
      "step": 10
    },
    {
      "epoch": 0.1520912547528517,
      "grad_norm": 8.993807887778436,
      "learning_rate": 2.5e-07,
      "logits/chosen": -2.580817222595215,
      "logits/rejected": -2.462310314178467,
      "logps/chosen": -269.5621337890625,
      "logps/rejected": -258.239990234375,
      "loss": 0.6892,
      "rewards/accuracies": 0.7437499761581421,
      "rewards/chosen": 0.00650571146979928,
      "rewards/margins": 0.008498361334204674,
      "rewards/rejected": -0.0019926498644053936,
      "step": 20
    },
    {
      "epoch": 0.22813688212927757,
      "grad_norm": 8.92132470459759,
      "learning_rate": 3.75e-07,
      "logits/chosen": -2.6156678199768066,
      "logits/rejected": -2.4716460704803467,
      "logps/chosen": -295.3561096191406,
      "logps/rejected": -241.26644897460938,
      "loss": 0.6709,
      "rewards/accuracies": 0.84375,
      "rewards/chosen": 0.040296901017427444,
      "rewards/margins": 0.05595223233103752,
      "rewards/rejected": -0.015655336901545525,
      "step": 30
    },
    {
      "epoch": 0.3041825095057034,
      "grad_norm": 9.266525353467978,
      "learning_rate": 5e-07,
      "logits/chosen": -2.6184380054473877,
      "logits/rejected": -2.513917922973633,
      "logps/chosen": -254.7106475830078,
      "logps/rejected": -234.47219848632812,
      "loss": 0.6239,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": 0.028813377022743225,
      "rewards/margins": 0.14417138695716858,
      "rewards/rejected": -0.11535801738500595,
      "step": 40
    },
    {
      "epoch": 0.38022813688212925,
      "grad_norm": 12.173416216409002,
      "learning_rate": 4.990105959637203e-07,
      "logits/chosen": -2.546933650970459,
      "logits/rejected": -2.4483542442321777,
      "logps/chosen": -243.9954376220703,
      "logps/rejected": -256.17242431640625,
      "loss": 0.5554,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -0.08808934688568115,
      "rewards/margins": 0.2857440114021301,
      "rewards/rejected": -0.3738333582878113,
      "step": 50
    },
    {
      "epoch": 0.38022813688212925,
      "eval_logits/chosen": -2.62440824508667,
      "eval_logits/rejected": -2.5786266326904297,
      "eval_logps/chosen": -276.6687316894531,
      "eval_logps/rejected": -290.07366943359375,
      "eval_loss": 0.6140323281288147,
      "eval_rewards/accuracies": 0.6896551847457886,
      "eval_rewards/chosen": -0.16707415878772736,
      "eval_rewards/margins": 0.2508907914161682,
      "eval_rewards/rejected": -0.4179649353027344,
      "eval_runtime": 94.0274,
      "eval_samples_per_second": 19.473,
      "eval_steps_per_second": 0.308,
      "step": 50
    },
    {
      "epoch": 0.45627376425855515,
      "grad_norm": 24.13668429607915,
      "learning_rate": 4.960502152176573e-07,
      "logits/chosen": -2.1383702754974365,
      "logits/rejected": -2.0003161430358887,
      "logps/chosen": -272.81854248046875,
      "logps/rejected": -323.1360168457031,
      "loss": 0.4773,
      "rewards/accuracies": 0.8812500238418579,
      "rewards/chosen": -0.23167654871940613,
      "rewards/margins": 0.8607891798019409,
      "rewards/rejected": -1.0924659967422485,
      "step": 60
    },
    {
      "epoch": 0.532319391634981,
      "grad_norm": 40.013780890248455,
      "learning_rate": 4.911422898630837e-07,
      "logits/chosen": -1.2432479858398438,
      "logits/rejected": -0.6645946502685547,
      "logps/chosen": -334.7701721191406,
      "logps/rejected": -392.15496826171875,
      "loss": 0.3889,
      "rewards/accuracies": 0.856249988079071,
      "rewards/chosen": -0.5091000199317932,
      "rewards/margins": 1.100501298904419,
      "rewards/rejected": -1.6096012592315674,
      "step": 70
    },
    {
      "epoch": 0.6083650190114068,
      "grad_norm": 31.64851560561826,
      "learning_rate": 4.84325667269244e-07,
      "logits/chosen": 0.15650656819343567,
      "logits/rejected": 0.751736044883728,
      "logps/chosen": -311.51922607421875,
      "logps/rejected": -406.87860107421875,
      "loss": 0.3708,
      "rewards/accuracies": 0.84375,
      "rewards/chosen": -0.7787342667579651,
      "rewards/margins": 1.0774469375610352,
      "rewards/rejected": -1.8561809062957764,
      "step": 80
    },
    {
      "epoch": 0.6844106463878327,
      "grad_norm": 39.73186343074897,
      "learning_rate": 4.7565430258740336e-07,
      "logits/chosen": 0.31803542375564575,
      "logits/rejected": 1.7618499994277954,
      "logps/chosen": -330.3303527832031,
      "logps/rejected": -454.4091796875,
      "loss": 0.3342,
      "rewards/accuracies": 0.893750011920929,
      "rewards/chosen": -0.8064614534378052,
      "rewards/margins": 1.6557552814483643,
      "rewards/rejected": -2.46221661567688,
      "step": 90
    },
    {
      "epoch": 0.7604562737642585,
      "grad_norm": 26.592175518902643,
      "learning_rate": 4.6519683168329195e-07,
      "logits/chosen": 0.48759278655052185,
      "logits/rejected": 2.2304329872131348,
      "logps/chosen": -379.5733947753906,
      "logps/rejected": -496.21636962890625,
      "loss": 0.3147,
      "rewards/accuracies": 0.893750011920929,
      "rewards/chosen": -0.8934251070022583,
      "rewards/margins": 1.828790307044983,
      "rewards/rejected": -2.722215414047241,
      "step": 100
    },
    {
      "epoch": 0.7604562737642585,
      "eval_logits/chosen": 0.7404455542564392,
      "eval_logits/rejected": 1.5949417352676392,
      "eval_logps/chosen": -375.37860107421875,
      "eval_logps/rejected": -459.43609619140625,
      "eval_loss": 0.5617097020149231,
      "eval_rewards/accuracies": 0.732758641242981,
      "eval_rewards/chosen": -1.1541731357574463,
      "eval_rewards/margins": 0.957415759563446,
      "eval_rewards/rejected": -2.111588716506958,
      "eval_runtime": 93.8291,
      "eval_samples_per_second": 19.514,
      "eval_steps_per_second": 0.309,
      "step": 100
    },
    {
      "epoch": 0.8365019011406845,
      "grad_norm": 33.75286446177569,
      "learning_rate": 4.530360278682841e-07,
      "logits/chosen": 0.4949573576450348,
      "logits/rejected": 1.976785659790039,
      "logps/chosen": -353.00445556640625,
      "logps/rejected": -476.89678955078125,
      "loss": 0.3642,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -0.8960109949111938,
      "rewards/margins": 1.611445665359497,
      "rewards/rejected": -2.5074567794799805,
      "step": 110
    },
    {
      "epoch": 0.9125475285171103,
      "grad_norm": 25.522512035563206,
      "learning_rate": 4.3926814672941166e-07,
      "logits/chosen": 0.5252203941345215,
      "logits/rejected": 2.202810525894165,
      "logps/chosen": -359.41876220703125,
      "logps/rejected": -494.12969970703125,
      "loss": 0.318,
      "rewards/accuracies": 0.8687499761581421,
      "rewards/chosen": -1.0113165378570557,
      "rewards/margins": 1.6037089824676514,
      "rewards/rejected": -2.615025520324707,
      "step": 120
    },
    {
      "epoch": 0.9885931558935361,
      "grad_norm": 29.14776533666164,
      "learning_rate": 4.240021642440332e-07,
      "logits/chosen": -0.04642736166715622,
      "logits/rejected": 1.8715919256210327,
      "logps/chosen": -352.2441101074219,
      "logps/rejected": -486.38970947265625,
      "loss": 0.3174,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -0.9902883768081665,
      "rewards/margins": 1.7977688312530518,
      "rewards/rejected": -2.7880570888519287,
      "step": 130
    },
    {
      "epoch": 1.064638783269962,
      "grad_norm": 32.26612817404267,
      "learning_rate": 4.073589142096592e-07,
      "logits/chosen": 0.5508675575256348,
      "logits/rejected": 2.800567150115967,
      "logps/chosen": -352.32440185546875,
      "logps/rejected": -517.09228515625,
      "loss": 0.2352,
      "rewards/accuracies": 0.90625,
      "rewards/chosen": -0.9236076474189758,
      "rewards/margins": 2.010570764541626,
      "rewards/rejected": -2.934178590774536,
      "step": 140
    },
    {
      "epoch": 1.1406844106463878,
      "grad_norm": 24.555758805773927,
      "learning_rate": 3.8947013181637624e-07,
      "logits/chosen": 0.8321820497512817,
      "logits/rejected": 3.650494337081909,
      "logps/chosen": -331.7710266113281,
      "logps/rejected": -498.3985900878906,
      "loss": 0.214,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": -0.9579178094863892,
      "rewards/margins": 2.1326422691345215,
      "rewards/rejected": -3.0905601978302,
      "step": 150
    },
    {
      "epoch": 1.1406844106463878,
      "eval_logits/chosen": 0.8076741695404053,
      "eval_logits/rejected": 2.0487349033355713,
      "eval_logps/chosen": -389.57183837890625,
      "eval_logps/rejected": -491.0475158691406,
      "eval_loss": 0.5559520125389099,
      "eval_rewards/accuracies": 0.7456896305084229,
      "eval_rewards/chosen": -1.2961053848266602,
      "eval_rewards/margins": 1.1315982341766357,
      "eval_rewards/rejected": -2.427703619003296,
      "eval_runtime": 93.901,
      "eval_samples_per_second": 19.499,
      "eval_steps_per_second": 0.309,
      "step": 150
    },
    {
      "epoch": 1.2167300380228137,
      "grad_norm": 23.812525631547885,
      "learning_rate": 3.7047741093221656e-07,
      "logits/chosen": 0.6415907740592957,
      "logits/rejected": 3.418233871459961,
      "logps/chosen": -337.47711181640625,
      "logps/rejected": -565.9837036132812,
      "loss": 0.1915,
      "rewards/accuracies": 0.9312499761581421,
      "rewards/chosen": -0.7956029176712036,
      "rewards/margins": 2.5107922554016113,
      "rewards/rejected": -3.306394577026367,
      "step": 160
    },
    {
      "epoch": 1.2927756653992395,
      "grad_norm": 22.027445926013243,
      "learning_rate": 3.5053108335480205e-07,
      "logits/chosen": 0.43230652809143066,
      "logits/rejected": 3.4372222423553467,
      "logps/chosen": -404.5794982910156,
      "logps/rejected": -585.620849609375,
      "loss": 0.2138,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -1.1555755138397217,
      "rewards/margins": 2.351931571960449,
      "rewards/rejected": -3.50750732421875,
      "step": 170
    },
    {
      "epoch": 1.3688212927756653,
      "grad_norm": 21.8184530010132,
      "learning_rate": 3.29789028900245e-07,
      "logits/chosen": 0.6227339506149292,
      "logits/rejected": 3.597677707672119,
      "logps/chosen": -414.3572692871094,
      "logps/rejected": -602.4873657226562,
      "loss": 0.2101,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -1.1512380838394165,
      "rewards/margins": 2.4040679931640625,
      "rewards/rejected": -3.5553059577941895,
      "step": 180
    },
    {
      "epoch": 1.4448669201520912,
      "grad_norm": 30.492941117704806,
      "learning_rate": 3.084154257477301e-07,
      "logits/chosen": 1.2600494623184204,
      "logits/rejected": 3.9732604026794434,
      "logps/chosen": -392.4749450683594,
      "logps/rejected": -593.7326049804688,
      "loss": 0.1835,
      "rewards/accuracies": 0.893750011920929,
      "rewards/chosen": -1.3306286334991455,
      "rewards/margins": 2.4590394496917725,
      "rewards/rejected": -3.7896676063537598,
      "step": 190
    },
    {
      "epoch": 1.5209125475285172,
      "grad_norm": 18.187832229568688,
      "learning_rate": 2.865794509310888e-07,
      "logits/chosen": 1.334993839263916,
      "logits/rejected": 4.3924736976623535,
      "logps/chosen": -390.69781494140625,
      "logps/rejected": -616.49951171875,
      "loss": 0.1866,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -1.2437045574188232,
      "rewards/margins": 2.7270522117614746,
      "rewards/rejected": -3.9707565307617188,
      "step": 200
    },
    {
      "epoch": 1.5209125475285172,
      "eval_logits/chosen": 0.7928547859191895,
      "eval_logits/rejected": 2.0297083854675293,
      "eval_logps/chosen": -404.8973388671875,
      "eval_logps/rejected": -510.6939697265625,
      "eval_loss": 0.5364186763763428,
      "eval_rewards/accuracies": 0.7413793206214905,
      "eval_rewards/chosen": -1.4493602514266968,
      "eval_rewards/margins": 1.1748069524765015,
      "eval_rewards/rejected": -2.624166965484619,
      "eval_runtime": 93.5108,
      "eval_samples_per_second": 19.581,
      "eval_steps_per_second": 0.31,
      "step": 200
    },
    {
      "epoch": 1.5969581749049429,
      "grad_norm": 30.203312935912965,
      "learning_rate": 2.644539412632786e-07,
      "logits/chosen": 0.7376717329025269,
      "logits/rejected": 3.46760630607605,
      "logps/chosen": -381.1366882324219,
      "logps/rejected": -576.8548583984375,
      "loss": 0.2112,
      "rewards/accuracies": 0.918749988079071,
      "rewards/chosen": -1.2630369663238525,
      "rewards/margins": 2.4375810623168945,
      "rewards/rejected": -3.700617551803589,
      "step": 210
    },
    {
      "epoch": 1.673003802281369,
      "grad_norm": 34.60056674030066,
      "learning_rate": 2.422140252928601e-07,
      "logits/chosen": 1.3862824440002441,
      "logits/rejected": 3.9012253284454346,
      "logps/chosen": -363.8896484375,
      "logps/rejected": -638.3570556640625,
      "loss": 0.2026,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -1.3880947828292847,
      "rewards/margins": 2.823024272918701,
      "rewards/rejected": -4.211119174957275,
      "step": 220
    },
    {
      "epoch": 1.7490494296577945,
      "grad_norm": 17.75951807278943,
      "learning_rate": 2.2003573712085455e-07,
      "logits/chosen": 0.32914024591445923,
      "logits/rejected": 3.113135576248169,
      "logps/chosen": -353.28631591796875,
      "logps/rejected": -597.5838012695312,
      "loss": 0.1802,
      "rewards/accuracies": 0.981249988079071,
      "rewards/chosen": -1.1312899589538574,
      "rewards/margins": 2.7755885124206543,
      "rewards/rejected": -3.906878709793091,
      "step": 230
    },
    {
      "epoch": 1.8250950570342206,
      "grad_norm": 21.2237904198368,
      "learning_rate": 1.980946230499431e-07,
      "logits/chosen": 0.5761924982070923,
      "logits/rejected": 3.6894748210906982,
      "logps/chosen": -369.281982421875,
      "logps/rejected": -639.9312133789062,
      "loss": 0.1863,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -1.4418867826461792,
      "rewards/margins": 2.867917776107788,
      "rewards/rejected": -4.309804439544678,
      "step": 240
    },
    {
      "epoch": 1.9011406844106464,
      "grad_norm": 25.460606683183755,
      "learning_rate": 1.7656435209470376e-07,
      "logits/chosen": 1.0737273693084717,
      "logits/rejected": 3.9221127033233643,
      "logps/chosen": -389.84417724609375,
      "logps/rejected": -633.5592041015625,
      "loss": 0.1899,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -1.4279615879058838,
      "rewards/margins": 2.770998954772949,
      "rewards/rejected": -4.198960304260254,
      "step": 250
    },
    {
      "epoch": 1.9011406844106464,
      "eval_logits/chosen": 1.2524254322052002,
      "eval_logits/rejected": 2.571428060531616,
      "eval_logps/chosen": -438.79302978515625,
      "eval_logps/rejected": -556.13232421875,
      "eval_loss": 0.5391203165054321,
      "eval_rewards/accuracies": 0.7456896305084229,
      "eval_rewards/chosen": -1.7883172035217285,
      "eval_rewards/margins": 1.2902345657348633,
      "eval_rewards/rejected": -3.078551769256592,
      "eval_runtime": 96.6165,
      "eval_samples_per_second": 18.951,
      "eval_steps_per_second": 0.3,
      "step": 250
    },
    {
      "epoch": 1.9771863117870723,
      "grad_norm": 28.509116337681686,
      "learning_rate": 1.5561534135101884e-07,
      "logits/chosen": 1.822516679763794,
      "logits/rejected": 4.293866157531738,
      "logps/chosen": -417.22503662109375,
      "logps/rejected": -664.3339233398438,
      "loss": 0.1979,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -1.6485140323638916,
      "rewards/margins": 2.746192455291748,
      "rewards/rejected": -4.394707202911377,
      "step": 260
    },
    {
      "epoch": 2.053231939163498,
      "grad_norm": 9.761431912302397,
      "learning_rate": 1.3541340710517546e-07,
      "logits/chosen": 1.401188611984253,
      "logits/rejected": 4.360350608825684,
      "logps/chosen": -416.37481689453125,
      "logps/rejected": -680.0263671875,
      "loss": 0.1217,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -1.4166698455810547,
      "rewards/margins": 2.9713897705078125,
      "rewards/rejected": -4.388059616088867,
      "step": 270
    },
    {
      "epoch": 2.129277566539924,
      "grad_norm": 19.479793936953538,
      "learning_rate": 1.1611845235944143e-07,
      "logits/chosen": 2.3020176887512207,
      "logits/rejected": 5.1446309089660645,
      "logps/chosen": -407.40936279296875,
      "logps/rejected": -718.0748901367188,
      "loss": 0.1082,
      "rewards/accuracies": 0.981249988079071,
      "rewards/chosen": -1.5115373134613037,
      "rewards/margins": 3.4099037647247314,
      "rewards/rejected": -4.921441555023193,
      "step": 280
    },
    {
      "epoch": 2.20532319391635,
      "grad_norm": 12.421604139145975,
      "learning_rate": 9.788320116265892e-08,
      "logits/chosen": 1.9953467845916748,
      "logits/rejected": 5.533808708190918,
      "logps/chosen": -459.0770568847656,
      "logps/rejected": -714.7064208984375,
      "loss": 0.112,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -1.560991883277893,
      "rewards/margins": 3.3094687461853027,
      "rewards/rejected": -4.8704609870910645,
      "step": 290
    },
    {
      "epoch": 2.2813688212927756,
      "grad_norm": 17.541719244774725,
      "learning_rate": 8.085198976392124e-08,
      "logits/chosen": 2.661414384841919,
      "logits/rejected": 5.309275150299072,
      "logps/chosen": -396.86163330078125,
      "logps/rejected": -682.7183837890625,
      "loss": 0.1083,
      "rewards/accuracies": 0.956250011920929,
      "rewards/chosen": -1.661924958229065,
      "rewards/margins": 3.159823179244995,
      "rewards/rejected": -4.821747779846191,
      "step": 300
    },
    {
      "epoch": 2.2813688212927756,
      "eval_logits/chosen": 2.2903313636779785,
      "eval_logits/rejected": 3.6920409202575684,
      "eval_logps/chosen": -470.052734375,
      "eval_logps/rejected": -601.4120483398438,
      "eval_loss": 0.558447539806366,
      "eval_rewards/accuracies": 0.7629310488700867,
      "eval_rewards/chosen": -2.100914239883423,
      "eval_rewards/margins": 1.4304344654083252,
      "eval_rewards/rejected": -3.531348466873169,
      "eval_runtime": 94.198,
      "eval_samples_per_second": 19.438,
      "eval_steps_per_second": 0.308,
      "step": 300
    },
    {
      "epoch": 2.3574144486692017,
      "grad_norm": 11.598664016084259,
      "learning_rate": 6.515962415763369e-08,
      "logits/chosen": 2.566627025604248,
      "logits/rejected": 5.178008079528809,
      "logps/chosen": -364.78521728515625,
      "logps/rejected": -709.1622314453125,
      "loss": 0.0941,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -1.6015697717666626,
      "rewards/margins": 3.526782989501953,
      "rewards/rejected": -5.128352165222168,
      "step": 310
    },
    {
      "epoch": 2.4334600760456273,
      "grad_norm": 22.496239920192775,
      "learning_rate": 5.093031306275308e-08,
      "logits/chosen": 2.157808303833008,
      "logits/rejected": 5.751224040985107,
      "logps/chosen": -454.247314453125,
      "logps/rejected": -807.82275390625,
      "loss": 0.1048,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -1.797298789024353,
      "rewards/margins": 3.7231411933898926,
      "rewards/rejected": -5.520440101623535,
      "step": 320
    },
    {
      "epoch": 2.5095057034220534,
      "grad_norm": 13.016382898289478,
      "learning_rate": 3.827668478192578e-08,
      "logits/chosen": 2.29127836227417,
      "logits/rejected": 5.964070796966553,
      "logps/chosen": -475.0154724121094,
      "logps/rejected": -776.1507568359375,
      "loss": 0.0961,
      "rewards/accuracies": 0.96875,
      "rewards/chosen": -1.8038787841796875,
      "rewards/margins": 3.5154175758361816,
      "rewards/rejected": -5.319295883178711,
      "step": 330
    },
    {
      "epoch": 2.585551330798479,
      "grad_norm": 18.59300730528273,
      "learning_rate": 2.729889572230856e-08,
      "logits/chosen": 2.6335761547088623,
      "logits/rejected": 6.27906608581543,
      "logps/chosen": -456.11676025390625,
      "logps/rejected": -846.2113037109375,
      "loss": 0.0807,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -2.0658538341522217,
      "rewards/margins": 4.058461666107178,
      "rewards/rejected": -6.1243157386779785,
      "step": 340
    },
    {
      "epoch": 2.661596958174905,
      "grad_norm": 13.300072255332687,
      "learning_rate": 1.8083837634341766e-08,
      "logits/chosen": 2.266738176345825,
      "logits/rejected": 6.4141387939453125,
      "logps/chosen": -500.92901611328125,
      "logps/rejected": -874.1981201171875,
      "loss": 0.0955,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -2.200644016265869,
      "rewards/margins": 4.188169956207275,
      "rewards/rejected": -6.3888139724731445,
      "step": 350
    },
    {
      "epoch": 2.661596958174905,
      "eval_logits/chosen": 2.532045602798462,
      "eval_logits/rejected": 4.256929397583008,
      "eval_logps/chosen": -544.331787109375,
      "eval_logps/rejected": -697.9909057617188,
      "eval_loss": 0.5829063653945923,
      "eval_rewards/accuracies": 0.7629310488700867,
      "eval_rewards/chosen": -2.8437047004699707,
      "eval_rewards/margins": 1.6534322500228882,
      "eval_rewards/rejected": -4.49713659286499,
      "eval_runtime": 93.5502,
      "eval_samples_per_second": 19.572,
      "eval_steps_per_second": 0.31,
      "step": 350
    },
    {
      "epoch": 2.7376425855513307,
      "grad_norm": 14.902459964324052,
      "learning_rate": 1.0704449843359498e-08,
      "logits/chosen": 2.454040050506592,
      "logits/rejected": 6.299553871154785,
      "logps/chosen": -483.86669921875,
      "logps/rejected": -877.189453125,
      "loss": 0.0847,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -2.1329433917999268,
      "rewards/margins": 4.206452369689941,
      "rewards/rejected": -6.3393964767456055,
      "step": 360
    },
    {
      "epoch": 2.8136882129277567,
      "grad_norm": 15.13819700442962,
      "learning_rate": 5.2191419178871935e-09,
      "logits/chosen": 2.5856270790100098,
      "logits/rejected": 6.600296974182129,
      "logps/chosen": -461.5941467285156,
      "logps/rejected": -831.21240234375,
      "loss": 0.0833,
      "rewards/accuracies": 0.96875,
      "rewards/chosen": -2.348266363143921,
      "rewards/margins": 4.047614574432373,
      "rewards/rejected": -6.395880699157715,
      "step": 370
    },
    {
      "epoch": 2.8897338403041823,
      "grad_norm": 12.919733942557965,
      "learning_rate": 1.6713313443384724e-09,
      "logits/chosen": 2.7355175018310547,
      "logits/rejected": 6.480588436126709,
      "logps/chosen": -487.62274169921875,
      "logps/rejected": -859.5900268554688,
      "loss": 0.0859,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": -2.41930890083313,
      "rewards/margins": 3.938915967941284,
      "rewards/rejected": -6.358224391937256,
      "step": 380
    },
    {
      "epoch": 2.9657794676806084,
      "grad_norm": 10.73491870552627,
      "learning_rate": 8.909986752470012e-11,
      "logits/chosen": 2.8181276321411133,
      "logits/rejected": 6.899574279785156,
      "logps/chosen": -519.549560546875,
      "logps/rejected": -837.3341674804688,
      "loss": 0.0796,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -2.500697612762451,
      "rewards/margins": 3.8103880882263184,
      "rewards/rejected": -6.3110857009887695,
      "step": 390
    },
    {
      "epoch": 2.988593155893536,
      "step": 393,
      "total_flos": 0.0,
      "train_loss": 0.2546291965564699,
      "train_runtime": 10353.4149,
      "train_samples_per_second": 4.868,
      "train_steps_per_second": 0.038
    }
  ],
  "logging_steps": 10,
  "max_steps": 393,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 3,
  "save_steps": 100,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}