File size: 28,519 Bytes
17161e6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.9988571428571429,
  "eval_steps": 50,
  "global_step": 437,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.022857142857142857,
      "grad_norm": 5.484237277918217,
      "learning_rate": 1.1363636363636363e-07,
      "logits/chosen": -2.6541342735290527,
      "logits/rejected": -2.6346869468688965,
      "logps/chosen": -287.93011474609375,
      "logps/rejected": -266.5004577636719,
      "loss": 0.6931,
      "rewards/accuracies": 0.40625,
      "rewards/chosen": -0.00015118246665224433,
      "rewards/margins": -3.311875479994342e-05,
      "rewards/rejected": -0.00011806372640421614,
      "step": 10
    },
    {
      "epoch": 0.045714285714285714,
      "grad_norm": 5.141117524550972,
      "learning_rate": 2.2727272727272726e-07,
      "logits/chosen": -2.6553006172180176,
      "logits/rejected": -2.623598575592041,
      "logps/chosen": -271.3798828125,
      "logps/rejected": -260.2137145996094,
      "loss": 0.6925,
      "rewards/accuracies": 0.59375,
      "rewards/chosen": 0.003302179742604494,
      "rewards/margins": 0.0022685881704092026,
      "rewards/rejected": 0.0010335915721952915,
      "step": 20
    },
    {
      "epoch": 0.06857142857142857,
      "grad_norm": 4.867644605403788,
      "learning_rate": 3.4090909090909085e-07,
      "logits/chosen": -2.6496236324310303,
      "logits/rejected": -2.558840751647949,
      "logps/chosen": -271.86370849609375,
      "logps/rejected": -254.6623077392578,
      "loss": 0.6896,
      "rewards/accuracies": 0.71875,
      "rewards/chosen": 0.020251339301466942,
      "rewards/margins": 0.011756613850593567,
      "rewards/rejected": 0.008494723588228226,
      "step": 30
    },
    {
      "epoch": 0.09142857142857143,
      "grad_norm": 4.600987966838872,
      "learning_rate": 4.545454545454545e-07,
      "logits/chosen": -2.6127796173095703,
      "logits/rejected": -2.5982162952423096,
      "logps/chosen": -261.87078857421875,
      "logps/rejected": -255.64682006835938,
      "loss": 0.6811,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": 0.039037298411130905,
      "rewards/margins": 0.04095984250307083,
      "rewards/rejected": -0.0019225452560931444,
      "step": 40
    },
    {
      "epoch": 0.11428571428571428,
      "grad_norm": 6.01325732118956,
      "learning_rate": 4.997124959943201e-07,
      "logits/chosen": -2.59413743019104,
      "logits/rejected": -2.525812864303589,
      "logps/chosen": -288.6096496582031,
      "logps/rejected": -264.43359375,
      "loss": 0.6661,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": 0.008406409062445164,
      "rewards/margins": 0.11031482368707657,
      "rewards/rejected": -0.10190840810537338,
      "step": 50
    },
    {
      "epoch": 0.11428571428571428,
      "eval_logits/chosen": -2.619272232055664,
      "eval_logits/rejected": -2.573228359222412,
      "eval_logps/chosen": -263.4992370605469,
      "eval_logps/rejected": -264.2141418457031,
      "eval_loss": 0.6540683507919312,
      "eval_rewards/accuracies": 0.6551724076271057,
      "eval_rewards/chosen": -0.0353790745139122,
      "eval_rewards/margins": 0.12399081885814667,
      "eval_rewards/rejected": -0.15936988592147827,
      "eval_runtime": 93.0218,
      "eval_samples_per_second": 19.684,
      "eval_steps_per_second": 0.312,
      "step": 50
    },
    {
      "epoch": 0.13714285714285715,
      "grad_norm": 8.643694975506126,
      "learning_rate": 4.979579212164186e-07,
      "logits/chosen": -2.6212775707244873,
      "logits/rejected": -2.5676815509796143,
      "logps/chosen": -305.444091796875,
      "logps/rejected": -272.07867431640625,
      "loss": 0.6429,
      "rewards/accuracies": 0.71875,
      "rewards/chosen": -0.05958025902509689,
      "rewards/margins": 0.1966249793767929,
      "rewards/rejected": -0.2562052309513092,
      "step": 60
    },
    {
      "epoch": 0.16,
      "grad_norm": 12.272483900335105,
      "learning_rate": 4.946196886175515e-07,
      "logits/chosen": -2.59287166595459,
      "logits/rejected": -2.5041251182556152,
      "logps/chosen": -321.8868103027344,
      "logps/rejected": -300.4162292480469,
      "loss": 0.629,
      "rewards/accuracies": 0.706250011920929,
      "rewards/chosen": -0.18531829118728638,
      "rewards/margins": 0.3314962685108185,
      "rewards/rejected": -0.5168145895004272,
      "step": 70
    },
    {
      "epoch": 0.18285714285714286,
      "grad_norm": 13.506330348124033,
      "learning_rate": 4.897191188239667e-07,
      "logits/chosen": -2.069486141204834,
      "logits/rejected": -1.9483461380004883,
      "logps/chosen": -318.3644714355469,
      "logps/rejected": -329.19659423828125,
      "loss": 0.6107,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.5116854906082153,
      "rewards/margins": 0.31293898820877075,
      "rewards/rejected": -0.8246244192123413,
      "step": 80
    },
    {
      "epoch": 0.2057142857142857,
      "grad_norm": 12.553950954592443,
      "learning_rate": 4.832875107981763e-07,
      "logits/chosen": -1.1866068840026855,
      "logits/rejected": -0.9298005104064941,
      "logps/chosen": -316.0557556152344,
      "logps/rejected": -380.7778625488281,
      "loss": 0.5915,
      "rewards/accuracies": 0.706250011920929,
      "rewards/chosen": -0.7446193695068359,
      "rewards/margins": 0.5451987981796265,
      "rewards/rejected": -1.2898180484771729,
      "step": 90
    },
    {
      "epoch": 0.22857142857142856,
      "grad_norm": 30.03309840012713,
      "learning_rate": 4.753659419387223e-07,
      "logits/chosen": -0.9629250764846802,
      "logits/rejected": -0.5570683479309082,
      "logps/chosen": -331.9052429199219,
      "logps/rejected": -379.1312561035156,
      "loss": 0.561,
      "rewards/accuracies": 0.71875,
      "rewards/chosen": -0.7065539360046387,
      "rewards/margins": 0.7784632444381714,
      "rewards/rejected": -1.4850170612335205,
      "step": 100
    },
    {
      "epoch": 0.22857142857142856,
      "eval_logits/chosen": -0.4111216068267822,
      "eval_logits/rejected": -0.007617468014359474,
      "eval_logps/chosen": -365.1130676269531,
      "eval_logps/rejected": -431.9446716308594,
      "eval_loss": 0.5662747621536255,
      "eval_rewards/accuracies": 0.7068965435028076,
      "eval_rewards/chosen": -1.0515177249908447,
      "eval_rewards/margins": 0.7851568460464478,
      "eval_rewards/rejected": -1.8366745710372925,
      "eval_runtime": 93.7201,
      "eval_samples_per_second": 19.537,
      "eval_steps_per_second": 0.309,
      "step": 100
    },
    {
      "epoch": 0.25142857142857145,
      "grad_norm": 27.312780453008052,
      "learning_rate": 4.660050057270191e-07,
      "logits/chosen": -0.45199713110923767,
      "logits/rejected": 0.33052048087120056,
      "logps/chosen": -373.90545654296875,
      "logps/rejected": -399.00909423828125,
      "loss": 0.5415,
      "rewards/accuracies": 0.793749988079071,
      "rewards/chosen": -0.6764419674873352,
      "rewards/margins": 0.9693374633789062,
      "rewards/rejected": -1.6457793712615967,
      "step": 110
    },
    {
      "epoch": 0.2742857142857143,
      "grad_norm": 29.282942520454633,
      "learning_rate": 4.5526448859687144e-07,
      "logits/chosen": 0.3397678732872009,
      "logits/rejected": 0.8190152049064636,
      "logps/chosen": -406.60565185546875,
      "logps/rejected": -463.072021484375,
      "loss": 0.5753,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -1.3377411365509033,
      "rewards/margins": 0.7959097623825073,
      "rewards/rejected": -2.133650779724121,
      "step": 120
    },
    {
      "epoch": 0.29714285714285715,
      "grad_norm": 24.36403749878471,
      "learning_rate": 4.432129880904388e-07,
      "logits/chosen": 0.4417182505130768,
      "logits/rejected": 1.1421695947647095,
      "logps/chosen": -371.94744873046875,
      "logps/rejected": -401.11962890625,
      "loss": 0.5483,
      "rewards/accuracies": 0.6187499761581421,
      "rewards/chosen": -0.9846571683883667,
      "rewards/margins": 0.6376141309738159,
      "rewards/rejected": -1.6222712993621826,
      "step": 130
    },
    {
      "epoch": 0.32,
      "grad_norm": 22.333886590434165,
      "learning_rate": 4.299274747394055e-07,
      "logits/chosen": 0.7880607843399048,
      "logits/rejected": 1.3198670148849487,
      "logps/chosen": -435.3951110839844,
      "logps/rejected": -469.02911376953125,
      "loss": 0.549,
      "rewards/accuracies": 0.731249988079071,
      "rewards/chosen": -1.175621747970581,
      "rewards/margins": 0.9118834733963013,
      "rewards/rejected": -2.0875051021575928,
      "step": 140
    },
    {
      "epoch": 0.34285714285714286,
      "grad_norm": 21.900935378631306,
      "learning_rate": 4.1549280046953653e-07,
      "logits/chosen": 1.040090560913086,
      "logits/rejected": 1.5702952146530151,
      "logps/chosen": -405.8429870605469,
      "logps/rejected": -457.5497131347656,
      "loss": 0.5324,
      "rewards/accuracies": 0.7437499761581421,
      "rewards/chosen": -1.0978893041610718,
      "rewards/margins": 0.8218156099319458,
      "rewards/rejected": -1.9197051525115967,
      "step": 150
    },
    {
      "epoch": 0.34285714285714286,
      "eval_logits/chosen": 1.325810432434082,
      "eval_logits/rejected": 2.0029168128967285,
      "eval_logps/chosen": -405.1368103027344,
      "eval_logps/rejected": -492.1299743652344,
      "eval_loss": 0.5436938405036926,
      "eval_rewards/accuracies": 0.6853448152542114,
      "eval_rewards/chosen": -1.451755166053772,
      "eval_rewards/margins": 0.986772894859314,
      "eval_rewards/rejected": -2.438527822494507,
      "eval_runtime": 94.6665,
      "eval_samples_per_second": 19.342,
      "eval_steps_per_second": 0.306,
      "step": 150
    },
    {
      "epoch": 0.3657142857142857,
      "grad_norm": 20.99267450356195,
      "learning_rate": 4.000011566683401e-07,
      "logits/chosen": 0.8327977061271667,
      "logits/rejected": 1.764451265335083,
      "logps/chosen": -447.289794921875,
      "logps/rejected": -494.8617248535156,
      "loss": 0.5254,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -1.381363034248352,
      "rewards/margins": 0.8699455261230469,
      "rewards/rejected": -2.2513084411621094,
      "step": 160
    },
    {
      "epoch": 0.38857142857142857,
      "grad_norm": 19.625600234650467,
      "learning_rate": 3.8355148537705047e-07,
      "logits/chosen": 0.17654064297676086,
      "logits/rejected": 1.0507433414459229,
      "logps/chosen": -395.59124755859375,
      "logps/rejected": -480.58038330078125,
      "loss": 0.5315,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -1.0189447402954102,
      "rewards/margins": 1.0480358600616455,
      "rewards/rejected": -2.0669806003570557,
      "step": 170
    },
    {
      "epoch": 0.4114285714285714,
      "grad_norm": 22.131134389700858,
      "learning_rate": 3.662488473675315e-07,
      "logits/chosen": 1.4423513412475586,
      "logits/rejected": 2.64449143409729,
      "logps/chosen": -448.270263671875,
      "logps/rejected": -503.065185546875,
      "loss": 0.5355,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -1.4582993984222412,
      "rewards/margins": 1.1077758073806763,
      "rewards/rejected": -2.566075563430786,
      "step": 180
    },
    {
      "epoch": 0.4342857142857143,
      "grad_norm": 22.169020143018557,
      "learning_rate": 3.48203751140067e-07,
      "logits/chosen": 0.9658209085464478,
      "logits/rejected": 2.3707427978515625,
      "logps/chosen": -405.305419921875,
      "logps/rejected": -484.43499755859375,
      "loss": 0.538,
      "rewards/accuracies": 0.7437499761581421,
      "rewards/chosen": -1.4446977376937866,
      "rewards/margins": 1.050331711769104,
      "rewards/rejected": -2.4950294494628906,
      "step": 190
    },
    {
      "epoch": 0.45714285714285713,
      "grad_norm": 18.01149891090628,
      "learning_rate": 3.2953144712759537e-07,
      "logits/chosen": 1.7644532918930054,
      "logits/rejected": 2.6767263412475586,
      "logps/chosen": -395.5130310058594,
      "logps/rejected": -487.91192626953125,
      "loss": 0.5261,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -1.451765537261963,
      "rewards/margins": 1.0395876169204712,
      "rewards/rejected": -2.4913532733917236,
      "step": 200
    },
    {
      "epoch": 0.45714285714285713,
      "eval_logits/chosen": 1.7313151359558105,
      "eval_logits/rejected": 2.7388670444488525,
      "eval_logps/chosen": -416.20770263671875,
      "eval_logps/rejected": -507.4054870605469,
      "eval_loss": 0.5247488021850586,
      "eval_rewards/accuracies": 0.6853448152542114,
      "eval_rewards/chosen": -1.5624639987945557,
      "eval_rewards/margins": 1.0288186073303223,
      "eval_rewards/rejected": -2.591282606124878,
      "eval_runtime": 94.5093,
      "eval_samples_per_second": 19.374,
      "eval_steps_per_second": 0.307,
      "step": 200
    },
    {
      "epoch": 0.48,
      "grad_norm": 19.57404608223085,
      "learning_rate": 3.103511916141658e-07,
      "logits/chosen": 1.504326581954956,
      "logits/rejected": 2.792241334915161,
      "logps/chosen": -433.87969970703125,
      "logps/rejected": -526.0897216796875,
      "loss": 0.517,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -1.4803601503372192,
      "rewards/margins": 1.1599363088607788,
      "rewards/rejected": -2.640296459197998,
      "step": 210
    },
    {
      "epoch": 0.5028571428571429,
      "grad_norm": 21.692320362176854,
      "learning_rate": 2.9078548506882117e-07,
      "logits/chosen": 1.4797990322113037,
      "logits/rejected": 2.450709581375122,
      "logps/chosen": -449.7752990722656,
      "logps/rejected": -549.132080078125,
      "loss": 0.5296,
      "rewards/accuracies": 0.731249988079071,
      "rewards/chosen": -1.4539282321929932,
      "rewards/margins": 1.1360087394714355,
      "rewards/rejected": -2.5899369716644287,
      "step": 220
    },
    {
      "epoch": 0.5257142857142857,
      "grad_norm": 19.33385563287397,
      "learning_rate": 2.709592897595191e-07,
      "logits/chosen": 1.3153431415557861,
      "logits/rejected": 2.2327983379364014,
      "logps/chosen": -424.7271423339844,
      "logps/rejected": -506.617919921875,
      "loss": 0.5439,
      "rewards/accuracies": 0.706250011920929,
      "rewards/chosen": -1.3993257284164429,
      "rewards/margins": 0.8849741816520691,
      "rewards/rejected": -2.2843000888824463,
      "step": 230
    },
    {
      "epoch": 0.5485714285714286,
      "grad_norm": 19.794275801154928,
      "learning_rate": 2.509992316440332e-07,
      "logits/chosen": 2.2576842308044434,
      "logits/rejected": 3.4659297466278076,
      "logps/chosen": -488.4747619628906,
      "logps/rejected": -558.2715454101562,
      "loss": 0.5332,
      "rewards/accuracies": 0.793749988079071,
      "rewards/chosen": -1.8078649044036865,
      "rewards/margins": 1.1585441827774048,
      "rewards/rejected": -2.966409206390381,
      "step": 240
    },
    {
      "epoch": 0.5714285714285714,
      "grad_norm": 19.209773850229293,
      "learning_rate": 2.3103279163519918e-07,
      "logits/chosen": 1.9105958938598633,
      "logits/rejected": 2.3923182487487793,
      "logps/chosen": -476.888671875,
      "logps/rejected": -540.67724609375,
      "loss": 0.5274,
      "rewards/accuracies": 0.6312500238418579,
      "rewards/chosen": -1.9962562322616577,
      "rewards/margins": 0.6005629301071167,
      "rewards/rejected": -2.5968191623687744,
      "step": 250
    },
    {
      "epoch": 0.5714285714285714,
      "eval_logits/chosen": 1.0144059658050537,
      "eval_logits/rejected": 2.126599073410034,
      "eval_logps/chosen": -428.11065673828125,
      "eval_logps/rejected": -528.8191528320312,
      "eval_loss": 0.514776349067688,
      "eval_rewards/accuracies": 0.7155172228813171,
      "eval_rewards/chosen": -1.6814932823181152,
      "eval_rewards/margins": 1.123926043510437,
      "eval_rewards/rejected": -2.8054192066192627,
      "eval_runtime": 95.0873,
      "eval_samples_per_second": 19.256,
      "eval_steps_per_second": 0.305,
      "step": 250
    },
    {
      "epoch": 0.5942857142857143,
      "grad_norm": 21.990080244291757,
      "learning_rate": 2.1118749140573358e-07,
      "logits/chosen": 1.3521721363067627,
      "logits/rejected": 2.6136393547058105,
      "logps/chosen": -439.0127868652344,
      "logps/rejected": -476.6556701660156,
      "loss": 0.5238,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -1.6318817138671875,
      "rewards/margins": 0.9099860191345215,
      "rewards/rejected": -2.54186749458313,
      "step": 260
    },
    {
      "epoch": 0.6171428571428571,
      "grad_norm": 21.20271695558916,
      "learning_rate": 1.9159007893272703e-07,
      "logits/chosen": 1.4235649108886719,
      "logits/rejected": 2.510680913925171,
      "logps/chosen": -468.30975341796875,
      "logps/rejected": -496.22406005859375,
      "loss": 0.5192,
      "rewards/accuracies": 0.65625,
      "rewards/chosen": -1.8018286228179932,
      "rewards/margins": 0.6999757885932922,
      "rewards/rejected": -2.5018038749694824,
      "step": 270
    },
    {
      "epoch": 0.64,
      "grad_norm": 18.377021107490634,
      "learning_rate": 1.7236571898357766e-07,
      "logits/chosen": 1.396390676498413,
      "logits/rejected": 2.4561963081359863,
      "logps/chosen": -388.86273193359375,
      "logps/rejected": -491.08612060546875,
      "loss": 0.5273,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -1.3913507461547852,
      "rewards/margins": 1.1841250658035278,
      "rewards/rejected": -2.5754759311676025,
      "step": 280
    },
    {
      "epoch": 0.6628571428571428,
      "grad_norm": 23.869092599907706,
      "learning_rate": 1.5363719371356882e-07,
      "logits/chosen": 1.4888983964920044,
      "logits/rejected": 2.697493076324463,
      "logps/chosen": -438.34613037109375,
      "logps/rejected": -505.64398193359375,
      "loss": 0.5086,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -1.5241912603378296,
      "rewards/margins": 1.000713586807251,
      "rewards/rejected": -2.524904727935791,
      "step": 290
    },
    {
      "epoch": 0.6857142857142857,
      "grad_norm": 25.027316619015263,
      "learning_rate": 1.3552411848071565e-07,
      "logits/chosen": 1.6033051013946533,
      "logits/rejected": 3.075709581375122,
      "logps/chosen": -483.4491271972656,
      "logps/rejected": -563.5081787109375,
      "loss": 0.5,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -1.700990080833435,
      "rewards/margins": 1.2899569272994995,
      "rewards/rejected": -2.9909472465515137,
      "step": 300
    },
    {
      "epoch": 0.6857142857142857,
      "eval_logits/chosen": 1.5160114765167236,
      "eval_logits/rejected": 2.702754259109497,
      "eval_logps/chosen": -428.75518798828125,
      "eval_logps/rejected": -535.8169555664062,
      "eval_loss": 0.5077716112136841,
      "eval_rewards/accuracies": 0.7198275923728943,
      "eval_rewards/chosen": -1.6879388093948364,
      "eval_rewards/margins": 1.187458872795105,
      "eval_rewards/rejected": -2.8753974437713623,
      "eval_runtime": 93.8182,
      "eval_samples_per_second": 19.516,
      "eval_steps_per_second": 0.309,
      "step": 300
    },
    {
      "epoch": 0.7085714285714285,
      "grad_norm": 20.06935454117094,
      "learning_rate": 1.1814217788631473e-07,
      "logits/chosen": 1.097944974899292,
      "logits/rejected": 2.714242458343506,
      "logps/chosen": -464.17645263671875,
      "logps/rejected": -550.5721435546875,
      "loss": 0.512,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -1.509181261062622,
      "rewards/margins": 1.3204662799835205,
      "rewards/rejected": -2.8296473026275635,
      "step": 310
    },
    {
      "epoch": 0.7314285714285714,
      "grad_norm": 20.37966135449331,
      "learning_rate": 1.0160238692045331e-07,
      "logits/chosen": 1.1582145690917969,
      "logits/rejected": 2.5812854766845703,
      "logps/chosen": -450.8458557128906,
      "logps/rejected": -533.5846557617188,
      "loss": 0.5203,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -1.5372800827026367,
      "rewards/margins": 1.0487934350967407,
      "rewards/rejected": -2.586073637008667,
      "step": 320
    },
    {
      "epoch": 0.7542857142857143,
      "grad_norm": 20.9552841041291,
      "learning_rate": 8.601038193139438e-08,
      "logits/chosen": 1.9143211841583252,
      "logits/rejected": 2.735372304916382,
      "logps/chosen": -400.63372802734375,
      "logps/rejected": -489.5486755371094,
      "loss": 0.52,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": -1.5494529008865356,
      "rewards/margins": 0.9419981241226196,
      "rewards/rejected": -2.491450786590576,
      "step": 330
    },
    {
      "epoch": 0.7771428571428571,
      "grad_norm": 20.770737913391446,
      "learning_rate": 7.146574594727572e-08,
      "logits/chosen": 1.9478594064712524,
      "logits/rejected": 3.201336622238159,
      "logps/chosen": -425.11126708984375,
      "logps/rejected": -508.321533203125,
      "loss": 0.5043,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -1.7910737991333008,
      "rewards/margins": 1.1562799215316772,
      "rewards/rejected": -2.947354316711426,
      "step": 340
    },
    {
      "epoch": 0.8,
      "grad_norm": 23.43196809715842,
      "learning_rate": 5.8061372659157306e-08,
      "logits/chosen": 2.0667195320129395,
      "logits/rejected": 3.518019914627075,
      "logps/chosen": -500.10174560546875,
      "logps/rejected": -575.167236328125,
      "loss": 0.4879,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -1.8992862701416016,
      "rewards/margins": 1.168575406074524,
      "rewards/rejected": -3.067861557006836,
      "step": 350
    },
    {
      "epoch": 0.8,
      "eval_logits/chosen": 2.206516742706299,
      "eval_logits/rejected": 3.2477219104766846,
      "eval_logps/chosen": -448.67852783203125,
      "eval_logps/rejected": -555.7252197265625,
      "eval_loss": 0.5050091743469238,
      "eval_rewards/accuracies": 0.7198275923728943,
      "eval_rewards/chosen": -1.8871723413467407,
      "eval_rewards/margins": 1.187308430671692,
      "eval_rewards/rejected": -3.0744810104370117,
      "eval_runtime": 93.1425,
      "eval_samples_per_second": 19.658,
      "eval_steps_per_second": 0.311,
      "step": 350
    },
    {
      "epoch": 0.8228571428571428,
      "grad_norm": 21.572101434555105,
      "learning_rate": 4.5882873127531614e-08,
      "logits/chosen": 2.518174648284912,
      "logits/rejected": 3.5241615772247314,
      "logps/chosen": -472.5306701660156,
      "logps/rejected": -603.8782958984375,
      "loss": 0.5056,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": -1.8001983165740967,
      "rewards/margins": 1.5649583339691162,
      "rewards/rejected": -3.365156650543213,
      "step": 360
    },
    {
      "epoch": 0.8457142857142858,
      "grad_norm": 24.32119336663692,
      "learning_rate": 3.500802900154412e-08,
      "logits/chosen": 2.2133302688598633,
      "logits/rejected": 2.970715284347534,
      "logps/chosen": -420.18707275390625,
      "logps/rejected": -547.6619873046875,
      "loss": 0.5034,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -1.6418088674545288,
      "rewards/margins": 1.213688611984253,
      "rewards/rejected": -2.855497360229492,
      "step": 370
    },
    {
      "epoch": 0.8685714285714285,
      "grad_norm": 25.458784716444757,
      "learning_rate": 2.550629574310309e-08,
      "logits/chosen": 2.7819252014160156,
      "logits/rejected": 3.5777878761291504,
      "logps/chosen": -435.19268798828125,
      "logps/rejected": -533.6688842773438,
      "loss": 0.4987,
      "rewards/accuracies": 0.6812499761581421,
      "rewards/chosen": -1.7933107614517212,
      "rewards/margins": 0.9658328890800476,
      "rewards/rejected": -2.759143352508545,
      "step": 380
    },
    {
      "epoch": 0.8914285714285715,
      "grad_norm": 20.416342139746472,
      "learning_rate": 1.7438359028687983e-08,
      "logits/chosen": 2.2803592681884766,
      "logits/rejected": 3.287682056427002,
      "logps/chosen": -436.3902282714844,
      "logps/rejected": -546.454833984375,
      "loss": 0.5096,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": -1.6981942653656006,
      "rewards/margins": 1.2526156902313232,
      "rewards/rejected": -2.950809955596924,
      "step": 390
    },
    {
      "epoch": 0.9142857142857143,
      "grad_norm": 23.31604206083898,
      "learning_rate": 1.0855747162029361e-08,
      "logits/chosen": 1.8559446334838867,
      "logits/rejected": 2.6898691654205322,
      "logps/chosen": -443.5389709472656,
      "logps/rejected": -552.5650634765625,
      "loss": 0.5082,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -1.6940586566925049,
      "rewards/margins": 1.158569574356079,
      "rewards/rejected": -2.852627992630005,
      "step": 400
    },
    {
      "epoch": 0.9142857142857143,
      "eval_logits/chosen": 1.9658149480819702,
      "eval_logits/rejected": 3.059574842453003,
      "eval_logps/chosen": -435.48773193359375,
      "eval_logps/rejected": -548.0841064453125,
      "eval_loss": 0.5012447834014893,
      "eval_rewards/accuracies": 0.7198275923728943,
      "eval_rewards/chosen": -1.7552645206451416,
      "eval_rewards/margins": 1.2428052425384521,
      "eval_rewards/rejected": -2.9980695247650146,
      "eval_runtime": 94.0643,
      "eval_samples_per_second": 19.465,
      "eval_steps_per_second": 0.308,
      "step": 400
    },
    {
      "epoch": 0.9371428571428572,
      "grad_norm": 19.227803698254093,
      "learning_rate": 5.8005019731033615e-09,
      "logits/chosen": 2.1140646934509277,
      "logits/rejected": 3.0444817543029785,
      "logps/chosen": -424.0716247558594,
      "logps/rejected": -529.0479736328125,
      "loss": 0.4899,
      "rewards/accuracies": 0.768750011920929,
      "rewards/chosen": -1.6376755237579346,
      "rewards/margins": 1.076155662536621,
      "rewards/rejected": -2.7138311862945557,
      "step": 410
    },
    {
      "epoch": 0.96,
      "grad_norm": 19.178542640219938,
      "learning_rate": 2.3049103053431886e-09,
      "logits/chosen": 1.7973829507827759,
      "logits/rejected": 2.994089365005493,
      "logps/chosen": -480.82342529296875,
      "logps/rejected": -606.6632690429688,
      "loss": 0.5082,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -1.7015644311904907,
      "rewards/margins": 1.422523856163025,
      "rewards/rejected": -3.1240882873535156,
      "step": 420
    },
    {
      "epoch": 0.9828571428571429,
      "grad_norm": 23.036308488754496,
      "learning_rate": 3.9129780600541397e-10,
      "logits/chosen": 2.1120381355285645,
      "logits/rejected": 3.065523862838745,
      "logps/chosen": -451.97515869140625,
      "logps/rejected": -554.6312866210938,
      "loss": 0.4855,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": -1.6638119220733643,
      "rewards/margins": 1.303351640701294,
      "rewards/rejected": -2.967163324356079,
      "step": 430
    },
    {
      "epoch": 0.9988571428571429,
      "step": 437,
      "total_flos": 0.0,
      "train_loss": 0.5499054316252265,
      "train_runtime": 11738.6737,
      "train_samples_per_second": 4.77,
      "train_steps_per_second": 0.037
    }
  ],
  "logging_steps": 10,
  "max_steps": 437,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 100,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}