File size: 36,662 Bytes
44063a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.8632528938591328,
  "eval_steps": 50,
  "global_step": 550,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.015695507161075144,
      "grad_norm": 0.04355761408805847,
      "learning_rate": 4.999451708687114e-06,
      "logits/chosen": 14.845781326293945,
      "logits/rejected": 14.576438903808594,
      "logps/chosen": -0.31864267587661743,
      "logps/rejected": -0.24545662105083466,
      "loss": 1.0492,
      "rewards/accuracies": 0.25,
      "rewards/chosen": -0.47796401381492615,
      "rewards/margins": -0.10977902263402939,
      "rewards/rejected": -0.3681849539279938,
      "step": 10
    },
    {
      "epoch": 0.03139101432215029,
      "grad_norm": 0.04919258877635002,
      "learning_rate": 4.997807075247147e-06,
      "logits/chosen": 15.27595043182373,
      "logits/rejected": 14.872761726379395,
      "logps/chosen": -0.3344747722148895,
      "logps/rejected": -0.24258682131767273,
      "loss": 1.0487,
      "rewards/accuracies": 0.16249999403953552,
      "rewards/chosen": -0.5017121434211731,
      "rewards/margins": -0.1378319263458252,
      "rewards/rejected": -0.3638802468776703,
      "step": 20
    },
    {
      "epoch": 0.047086521483225424,
      "grad_norm": 0.049933061003685,
      "learning_rate": 4.9950668210706795e-06,
      "logits/chosen": 15.913165092468262,
      "logits/rejected": 15.607622146606445,
      "logps/chosen": -0.3440183997154236,
      "logps/rejected": -0.2831566333770752,
      "loss": 1.0405,
      "rewards/accuracies": 0.1875,
      "rewards/chosen": -0.516027569770813,
      "rewards/margins": -0.09129264950752258,
      "rewards/rejected": -0.4247349202632904,
      "step": 30
    },
    {
      "epoch": 0.06278202864430057,
      "grad_norm": 0.05693503096699715,
      "learning_rate": 4.9912321481237616e-06,
      "logits/chosen": 15.402900695800781,
      "logits/rejected": 14.99272632598877,
      "logps/chosen": -0.3297731578350067,
      "logps/rejected": -0.2746916711330414,
      "loss": 1.0369,
      "rewards/accuracies": 0.21250000596046448,
      "rewards/chosen": -0.49465981125831604,
      "rewards/margins": -0.08262218534946442,
      "rewards/rejected": -0.41203755140304565,
      "step": 40
    },
    {
      "epoch": 0.07847753580537571,
      "grad_norm": 0.05467928573489189,
      "learning_rate": 4.986304738420684e-06,
      "logits/chosen": 15.64543342590332,
      "logits/rejected": 15.632547378540039,
      "logps/chosen": -0.30952686071395874,
      "logps/rejected": -0.24847058951854706,
      "loss": 1.0367,
      "rewards/accuracies": 0.21250000596046448,
      "rewards/chosen": -0.46429023146629333,
      "rewards/margins": -0.09158438444137573,
      "rewards/rejected": -0.37270587682724,
      "step": 50
    },
    {
      "epoch": 0.07847753580537571,
      "eval_logits/chosen": 15.850138664245605,
      "eval_logits/rejected": 15.368529319763184,
      "eval_logps/chosen": -0.3222965598106384,
      "eval_logps/rejected": -0.26877468824386597,
      "eval_loss": 1.0326261520385742,
      "eval_rewards/accuracies": 0.26923078298568726,
      "eval_rewards/chosen": -0.4834447503089905,
      "eval_rewards/margins": -0.08028276264667511,
      "eval_rewards/rejected": -0.40316200256347656,
      "eval_runtime": 14.5044,
      "eval_samples_per_second": 28.405,
      "eval_steps_per_second": 3.585,
      "step": 50
    },
    {
      "epoch": 0.09417304296645085,
      "grad_norm": 0.06174452602863312,
      "learning_rate": 4.980286753286196e-06,
      "logits/chosen": 15.443066596984863,
      "logits/rejected": 15.192205429077148,
      "logps/chosen": -0.31090402603149414,
      "logps/rejected": -0.26281923055648804,
      "loss": 1.04,
      "rewards/accuracies": 0.25,
      "rewards/chosen": -0.466356098651886,
      "rewards/margins": -0.07212716341018677,
      "rewards/rejected": -0.39422887563705444,
      "step": 60
    },
    {
      "epoch": 0.109868550127526,
      "grad_norm": 0.06952528655529022,
      "learning_rate": 4.973180832407471e-06,
      "logits/chosen": 16.024200439453125,
      "logits/rejected": 15.82934284210205,
      "logps/chosen": -0.348227322101593,
      "logps/rejected": -0.26220566034317017,
      "loss": 1.043,
      "rewards/accuracies": 0.21250000596046448,
      "rewards/chosen": -0.5223408937454224,
      "rewards/margins": -0.1290324479341507,
      "rewards/rejected": -0.39330852031707764,
      "step": 70
    },
    {
      "epoch": 0.12556405728860115,
      "grad_norm": 0.07572082430124283,
      "learning_rate": 4.964990092676263e-06,
      "logits/chosen": 15.884915351867676,
      "logits/rejected": 15.603845596313477,
      "logps/chosen": -0.34849274158477783,
      "logps/rejected": -0.26585355401039124,
      "loss": 1.0285,
      "rewards/accuracies": 0.1875,
      "rewards/chosen": -0.5227391719818115,
      "rewards/margins": -0.12395882606506348,
      "rewards/rejected": -0.39878037571907043,
      "step": 80
    },
    {
      "epoch": 0.14125956444967627,
      "grad_norm": 0.2423778474330902,
      "learning_rate": 4.9557181268217225e-06,
      "logits/chosen": 15.978216171264648,
      "logits/rejected": 15.76471996307373,
      "logps/chosen": -0.327436238527298,
      "logps/rejected": -0.25457051396369934,
      "loss": 1.03,
      "rewards/accuracies": 0.22499999403953552,
      "rewards/chosen": -0.49115434288978577,
      "rewards/margins": -0.10929858684539795,
      "rewards/rejected": -0.3818557560443878,
      "step": 90
    },
    {
      "epoch": 0.15695507161075142,
      "grad_norm": 0.1594536453485489,
      "learning_rate": 4.9453690018345144e-06,
      "logits/chosen": 16.307537078857422,
      "logits/rejected": 16.138330459594727,
      "logps/chosen": -0.3342314660549164,
      "logps/rejected": -0.27582648396492004,
      "loss": 1.0309,
      "rewards/accuracies": 0.26249998807907104,
      "rewards/chosen": -0.5013472437858582,
      "rewards/margins": -0.0876075029373169,
      "rewards/rejected": -0.41373974084854126,
      "step": 100
    },
    {
      "epoch": 0.15695507161075142,
      "eval_logits/chosen": 16.4310245513916,
      "eval_logits/rejected": 15.98912525177002,
      "eval_logps/chosen": -0.3239763677120209,
      "eval_logps/rejected": -0.28784558176994324,
      "eval_loss": 1.020836353302002,
      "eval_rewards/accuracies": 0.3076923191547394,
      "eval_rewards/chosen": -0.4859645664691925,
      "eval_rewards/margins": -0.054196178913116455,
      "eval_rewards/rejected": -0.43176835775375366,
      "eval_runtime": 14.5049,
      "eval_samples_per_second": 28.404,
      "eval_steps_per_second": 3.585,
      "step": 100
    },
    {
      "epoch": 0.17265057877182657,
      "grad_norm": 0.07431349903345108,
      "learning_rate": 4.933947257182901e-06,
      "logits/chosen": 16.56686782836914,
      "logits/rejected": 16.093189239501953,
      "logps/chosen": -0.34455060958862305,
      "logps/rejected": -0.2834388315677643,
      "loss": 1.0388,
      "rewards/accuracies": 0.30000001192092896,
      "rewards/chosen": -0.5168259739875793,
      "rewards/margins": -0.09166768193244934,
      "rewards/rejected": -0.4251582622528076,
      "step": 110
    },
    {
      "epoch": 0.1883460859329017,
      "grad_norm": 0.08802352845668793,
      "learning_rate": 4.921457902821578e-06,
      "logits/chosen": 16.50200843811035,
      "logits/rejected": 16.286388397216797,
      "logps/chosen": -0.30845317244529724,
      "logps/rejected": -0.2677682936191559,
      "loss": 1.0247,
      "rewards/accuracies": 0.25,
      "rewards/chosen": -0.4626797139644623,
      "rewards/margins": -0.06102731078863144,
      "rewards/rejected": -0.40165242552757263,
      "step": 120
    },
    {
      "epoch": 0.20404159309397685,
      "grad_norm": 0.10464702546596527,
      "learning_rate": 4.907906416994146e-06,
      "logits/chosen": 16.163082122802734,
      "logits/rejected": 16.158031463623047,
      "logps/chosen": -0.3138599991798401,
      "logps/rejected": -0.28097471594810486,
      "loss": 1.0169,
      "rewards/accuracies": 0.30000001192092896,
      "rewards/chosen": -0.47078999876976013,
      "rewards/margins": -0.04932791367173195,
      "rewards/rejected": -0.4214620590209961,
      "step": 130
    },
    {
      "epoch": 0.219737100255052,
      "grad_norm": 0.16971275210380554,
      "learning_rate": 4.893298743830168e-06,
      "logits/chosen": 16.28864860534668,
      "logits/rejected": 16.151805877685547,
      "logps/chosen": -0.3283368945121765,
      "logps/rejected": -0.2850198745727539,
      "loss": 0.9964,
      "rewards/accuracies": 0.3499999940395355,
      "rewards/chosen": -0.49250537157058716,
      "rewards/margins": -0.06497551500797272,
      "rewards/rejected": -0.42752987146377563,
      "step": 140
    },
    {
      "epoch": 0.23543260741612712,
      "grad_norm": 0.18377964198589325,
      "learning_rate": 4.8776412907378845e-06,
      "logits/chosen": 16.890087127685547,
      "logits/rejected": 16.42388153076172,
      "logps/chosen": -0.33256903290748596,
      "logps/rejected": -0.2939595878124237,
      "loss": 1.0073,
      "rewards/accuracies": 0.30000001192092896,
      "rewards/chosen": -0.49885353446006775,
      "rewards/margins": -0.057914119213819504,
      "rewards/rejected": -0.44093936681747437,
      "step": 150
    },
    {
      "epoch": 0.23543260741612712,
      "eval_logits/chosen": 16.833438873291016,
      "eval_logits/rejected": 16.328977584838867,
      "eval_logps/chosen": -0.32567569613456726,
      "eval_logps/rejected": -0.35700783133506775,
      "eval_loss": 0.9802881479263306,
      "eval_rewards/accuracies": 0.42307692766189575,
      "eval_rewards/chosen": -0.4885135293006897,
      "eval_rewards/margins": 0.04699822515249252,
      "eval_rewards/rejected": -0.5355117321014404,
      "eval_runtime": 14.5005,
      "eval_samples_per_second": 28.413,
      "eval_steps_per_second": 3.586,
      "step": 150
    },
    {
      "epoch": 0.2511281145772023,
      "grad_norm": 0.12049826234579086,
      "learning_rate": 4.860940925593703e-06,
      "logits/chosen": 16.505878448486328,
      "logits/rejected": 16.178979873657227,
      "logps/chosen": -0.3397011458873749,
      "logps/rejected": -0.35640352964401245,
      "loss": 0.9795,
      "rewards/accuracies": 0.4124999940395355,
      "rewards/chosen": -0.5095517039299011,
      "rewards/margins": 0.0250535998493433,
      "rewards/rejected": -0.5346053242683411,
      "step": 160
    },
    {
      "epoch": 0.2668236217382774,
      "grad_norm": 0.09485407918691635,
      "learning_rate": 4.84320497372973e-06,
      "logits/chosen": 16.245588302612305,
      "logits/rejected": 15.922958374023438,
      "logps/chosen": -0.29733315110206604,
      "logps/rejected": -0.3461209237575531,
      "loss": 0.9694,
      "rewards/accuracies": 0.44999998807907104,
      "rewards/chosen": -0.44599977135658264,
      "rewards/margins": 0.0731816366314888,
      "rewards/rejected": -0.5191814303398132,
      "step": 170
    },
    {
      "epoch": 0.28251912889935255,
      "grad_norm": 0.155483216047287,
      "learning_rate": 4.824441214720629e-06,
      "logits/chosen": 16.339645385742188,
      "logits/rejected": 16.115110397338867,
      "logps/chosen": -0.3076801002025604,
      "logps/rejected": -0.3655286729335785,
      "loss": 0.9488,
      "rewards/accuracies": 0.4625000059604645,
      "rewards/chosen": -0.4615201950073242,
      "rewards/margins": 0.0867728441953659,
      "rewards/rejected": -0.5482929944992065,
      "step": 180
    },
    {
      "epoch": 0.2982146360604277,
      "grad_norm": 0.21345795691013336,
      "learning_rate": 4.804657878971252e-06,
      "logits/chosen": 16.491886138916016,
      "logits/rejected": 16.376684188842773,
      "logps/chosen": -0.32437095046043396,
      "logps/rejected": -0.39715105295181274,
      "loss": 0.9423,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -0.4865564703941345,
      "rewards/margins": 0.10917013883590698,
      "rewards/rejected": -0.5957266092300415,
      "step": 190
    },
    {
      "epoch": 0.31391014322150285,
      "grad_norm": 0.17633090913295746,
      "learning_rate": 4.783863644106502e-06,
      "logits/chosen": 16.6339168548584,
      "logits/rejected": 16.79404640197754,
      "logps/chosen": -0.33018192648887634,
      "logps/rejected": -0.384196400642395,
      "loss": 0.939,
      "rewards/accuracies": 0.42500001192092896,
      "rewards/chosen": -0.4952728748321533,
      "rewards/margins": 0.0810217633843422,
      "rewards/rejected": -0.5762946009635925,
      "step": 200
    },
    {
      "epoch": 0.31391014322150285,
      "eval_logits/chosen": 17.17803192138672,
      "eval_logits/rejected": 16.59328269958496,
      "eval_logps/chosen": -0.33600664138793945,
      "eval_logps/rejected": -0.47861453890800476,
      "eval_loss": 0.9303967356681824,
      "eval_rewards/accuracies": 0.4615384638309479,
      "eval_rewards/chosen": -0.5040098428726196,
      "eval_rewards/margins": 0.21391186118125916,
      "eval_rewards/rejected": -0.717921793460846,
      "eval_runtime": 14.5,
      "eval_samples_per_second": 28.414,
      "eval_steps_per_second": 3.586,
      "step": 200
    },
    {
      "epoch": 0.329605650382578,
      "grad_norm": 0.1562221795320511,
      "learning_rate": 4.762067631165049e-06,
      "logits/chosen": 16.597665786743164,
      "logits/rejected": 16.3507022857666,
      "logps/chosen": -0.33709320425987244,
      "logps/rejected": -0.43131333589553833,
      "loss": 0.9107,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.5056397914886475,
      "rewards/margins": 0.14133022725582123,
      "rewards/rejected": -0.6469700932502747,
      "step": 210
    },
    {
      "epoch": 0.34530115754365315,
      "grad_norm": 0.17680124938488007,
      "learning_rate": 4.7392794005985324e-06,
      "logits/chosen": 16.873676300048828,
      "logits/rejected": 16.61945152282715,
      "logps/chosen": -0.35083168745040894,
      "logps/rejected": -0.537697434425354,
      "loss": 0.9094,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": -0.526247501373291,
      "rewards/margins": 0.2802986204624176,
      "rewards/rejected": -0.8065462112426758,
      "step": 220
    },
    {
      "epoch": 0.3609966647047283,
      "grad_norm": 0.2662070393562317,
      "learning_rate": 4.715508948078037e-06,
      "logits/chosen": 16.803974151611328,
      "logits/rejected": 16.698320388793945,
      "logps/chosen": -0.3500753343105316,
      "logps/rejected": -0.5399882793426514,
      "loss": 0.8871,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": -0.5251129865646362,
      "rewards/margins": 0.2848694324493408,
      "rewards/rejected": -0.8099824786186218,
      "step": 230
    },
    {
      "epoch": 0.3766921718658034,
      "grad_norm": 0.17161667346954346,
      "learning_rate": 4.690766700109659e-06,
      "logits/chosen": 16.67904281616211,
      "logits/rejected": 16.410579681396484,
      "logps/chosen": -0.33549198508262634,
      "logps/rejected": -0.4875665605068207,
      "loss": 0.9037,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -0.5032380223274231,
      "rewards/margins": 0.22811183333396912,
      "rewards/rejected": -0.7313498258590698,
      "step": 240
    },
    {
      "epoch": 0.39238767902687854,
      "grad_norm": 1.106021761894226,
      "learning_rate": 4.665063509461098e-06,
      "logits/chosen": 17.208127975463867,
      "logits/rejected": 16.654085159301758,
      "logps/chosen": -0.3752726912498474,
      "logps/rejected": -0.4947708249092102,
      "loss": 0.8606,
      "rewards/accuracies": 0.4375,
      "rewards/chosen": -0.5629090070724487,
      "rewards/margins": 0.1792471706867218,
      "rewards/rejected": -0.7421562671661377,
      "step": 250
    },
    {
      "epoch": 0.39238767902687854,
      "eval_logits/chosen": 17.32963752746582,
      "eval_logits/rejected": 16.589412689208984,
      "eval_logps/chosen": -0.37825876474380493,
      "eval_logps/rejected": -0.9001243114471436,
      "eval_loss": 0.8168494701385498,
      "eval_rewards/accuracies": 0.5192307829856873,
      "eval_rewards/chosen": -0.5673881769180298,
      "eval_rewards/margins": 0.7827982306480408,
      "eval_rewards/rejected": -1.3501865863800049,
      "eval_runtime": 14.5053,
      "eval_samples_per_second": 28.403,
      "eval_steps_per_second": 3.585,
      "step": 250
    },
    {
      "epoch": 0.4080831861879537,
      "grad_norm": 0.29772138595581055,
      "learning_rate": 4.638410650401267e-06,
      "logits/chosen": 16.713775634765625,
      "logits/rejected": 16.35211944580078,
      "logps/chosen": -0.3877524733543396,
      "logps/rejected": -0.8511163592338562,
      "loss": 0.8138,
      "rewards/accuracies": 0.44999998807907104,
      "rewards/chosen": -0.581628680229187,
      "rewards/margins": 0.6950457692146301,
      "rewards/rejected": -1.276674509048462,
      "step": 260
    },
    {
      "epoch": 0.42377869334902885,
      "grad_norm": 0.31860050559043884,
      "learning_rate": 4.610819813755038e-06,
      "logits/chosen": 17.09469985961914,
      "logits/rejected": 16.5472412109375,
      "logps/chosen": -0.47504258155822754,
      "logps/rejected": -1.3266533613204956,
      "loss": 0.7318,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -0.7125638723373413,
      "rewards/margins": 1.2774161100387573,
      "rewards/rejected": -1.9899799823760986,
      "step": 270
    },
    {
      "epoch": 0.439474200510104,
      "grad_norm": 0.6508163809776306,
      "learning_rate": 4.582303101775249e-06,
      "logits/chosen": 17.11003303527832,
      "logits/rejected": 16.6564998626709,
      "logps/chosen": -0.49357643723487854,
      "logps/rejected": -1.4481580257415771,
      "loss": 0.7599,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -0.7403645515441895,
      "rewards/margins": 1.4318726062774658,
      "rewards/rejected": -2.1722371578216553,
      "step": 280
    },
    {
      "epoch": 0.45516970767117915,
      "grad_norm": 0.32430580258369446,
      "learning_rate": 4.55287302283426e-06,
      "logits/chosen": 16.830989837646484,
      "logits/rejected": 16.384944915771484,
      "logps/chosen": -0.593712329864502,
      "logps/rejected": -1.7630856037139893,
      "loss": 0.7336,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -0.8905684351921082,
      "rewards/margins": 1.7540600299835205,
      "rewards/rejected": -2.6446282863616943,
      "step": 290
    },
    {
      "epoch": 0.47086521483225424,
      "grad_norm": 0.8555932641029358,
      "learning_rate": 4.522542485937369e-06,
      "logits/chosen": 16.72231674194336,
      "logits/rejected": 16.28726577758789,
      "logps/chosen": -0.5670709609985352,
      "logps/rejected": -2.0420775413513184,
      "loss": 0.6861,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.8506065607070923,
      "rewards/margins": 2.2125096321105957,
      "rewards/rejected": -3.0631160736083984,
      "step": 300
    },
    {
      "epoch": 0.47086521483225424,
      "eval_logits/chosen": 17.13121795654297,
      "eval_logits/rejected": 16.268341064453125,
      "eval_logps/chosen": -0.6842947602272034,
      "eval_logps/rejected": -2.119321584701538,
      "eval_loss": 0.7583853602409363,
      "eval_rewards/accuracies": 0.75,
      "eval_rewards/chosen": -1.026442289352417,
      "eval_rewards/margins": 2.1525399684906006,
      "eval_rewards/rejected": -3.1789822578430176,
      "eval_runtime": 14.5007,
      "eval_samples_per_second": 28.412,
      "eval_steps_per_second": 3.586,
      "step": 300
    },
    {
      "epoch": 0.4865607219933294,
      "grad_norm": 0.6059070825576782,
      "learning_rate": 4.491324795060491e-06,
      "logits/chosen": 17.147808074951172,
      "logits/rejected": 16.194652557373047,
      "logps/chosen": -0.8036400079727173,
      "logps/rejected": -2.289825201034546,
      "loss": 0.7163,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -1.2054599523544312,
      "rewards/margins": 2.229278087615967,
      "rewards/rejected": -3.4347376823425293,
      "step": 310
    },
    {
      "epoch": 0.5022562291544046,
      "grad_norm": 1.8073927164077759,
      "learning_rate": 4.4592336433146e-06,
      "logits/chosen": 16.94902992248535,
      "logits/rejected": 16.066068649291992,
      "logps/chosen": -1.198162317276001,
      "logps/rejected": -2.2922632694244385,
      "loss": 0.6894,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -1.7972434759140015,
      "rewards/margins": 1.6411516666412354,
      "rewards/rejected": -3.4383950233459473,
      "step": 320
    },
    {
      "epoch": 0.5179517363154797,
      "grad_norm": 3.746042490005493,
      "learning_rate": 4.426283106939474e-06,
      "logits/chosen": 16.31036376953125,
      "logits/rejected": 15.991762161254883,
      "logps/chosen": -1.6245781183242798,
      "logps/rejected": -2.553597927093506,
      "loss": 0.6607,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -2.4368672370910645,
      "rewards/margins": 1.3935294151306152,
      "rewards/rejected": -3.830397129058838,
      "step": 330
    },
    {
      "epoch": 0.5336472434765548,
      "grad_norm": 2.098111867904663,
      "learning_rate": 4.3924876391293915e-06,
      "logits/chosen": 16.59554672241211,
      "logits/rejected": 15.915553092956543,
      "logps/chosen": -2.183227062225342,
      "logps/rejected": -3.4911434650421143,
      "loss": 0.6381,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -3.274840831756592,
      "rewards/margins": 1.9618743658065796,
      "rewards/rejected": -5.236715316772461,
      "step": 340
    },
    {
      "epoch": 0.5493427506376299,
      "grad_norm": 2.153958320617676,
      "learning_rate": 4.357862063693486e-06,
      "logits/chosen": 16.332544326782227,
      "logits/rejected": 15.691922187805176,
      "logps/chosen": -2.673710346221924,
      "logps/rejected": -3.8687057495117188,
      "loss": 0.5752,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -4.010565757751465,
      "rewards/margins": 1.792493224143982,
      "rewards/rejected": -5.8030595779418945,
      "step": 350
    },
    {
      "epoch": 0.5493427506376299,
      "eval_logits/chosen": 16.272428512573242,
      "eval_logits/rejected": 15.381678581237793,
      "eval_logps/chosen": -3.0390572547912598,
      "eval_logps/rejected": -4.695068836212158,
      "eval_loss": 0.5928590893745422,
      "eval_rewards/accuracies": 0.9230769276618958,
      "eval_rewards/chosen": -4.5585856437683105,
      "eval_rewards/margins": 2.4840168952941895,
      "eval_rewards/rejected": -7.0426025390625,
      "eval_runtime": 14.5008,
      "eval_samples_per_second": 28.412,
      "eval_steps_per_second": 3.586,
      "step": 350
    },
    {
      "epoch": 0.5650382577987051,
      "grad_norm": 1.5755672454833984,
      "learning_rate": 4.322421568553529e-06,
      "logits/chosen": 15.589811325073242,
      "logits/rejected": 14.780921936035156,
      "logps/chosen": -3.069565534591675,
      "logps/rejected": -4.581957817077637,
      "loss": 0.5275,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": -4.604348182678223,
      "rewards/margins": 2.268587827682495,
      "rewards/rejected": -6.872936248779297,
      "step": 360
    },
    {
      "epoch": 0.5807337649597802,
      "grad_norm": 1.8776415586471558,
      "learning_rate": 4.286181699082008e-06,
      "logits/chosen": 15.172673225402832,
      "logits/rejected": 14.400335311889648,
      "logps/chosen": -3.9551639556884766,
      "logps/rejected": -5.95252799987793,
      "loss": 0.5113,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -5.932745933532715,
      "rewards/margins": 2.996046543121338,
      "rewards/rejected": -8.928792953491211,
      "step": 370
    },
    {
      "epoch": 0.5964292721208554,
      "grad_norm": 1.5507289171218872,
      "learning_rate": 4.249158351283414e-06,
      "logits/chosen": 14.8423433303833,
      "logits/rejected": 14.592633247375488,
      "logps/chosen": -3.7946255207061768,
      "logps/rejected": -5.3750481605529785,
      "loss": 0.4862,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -5.691938877105713,
      "rewards/margins": 2.370633125305176,
      "rewards/rejected": -8.062570571899414,
      "step": 380
    },
    {
      "epoch": 0.6121247792819305,
      "grad_norm": 3.4510324001312256,
      "learning_rate": 4.211367764821722e-06,
      "logits/chosen": 14.586801528930664,
      "logits/rejected": 14.031987190246582,
      "logps/chosen": -4.391470432281494,
      "logps/rejected": -6.2107672691345215,
      "loss": 0.5117,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -6.587205410003662,
      "rewards/margins": 2.72894549369812,
      "rewards/rejected": -9.316150665283203,
      "step": 390
    },
    {
      "epoch": 0.6278202864430057,
      "grad_norm": 3.3816750049591064,
      "learning_rate": 4.172826515897146e-06,
      "logits/chosen": 14.523185729980469,
      "logits/rejected": 14.266815185546875,
      "logps/chosen": -4.090173244476318,
      "logps/rejected": -5.582955837249756,
      "loss": 0.5182,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -6.135260581970215,
      "rewards/margins": 2.2391738891601562,
      "rewards/rejected": -8.374434471130371,
      "step": 400
    },
    {
      "epoch": 0.6278202864430057,
      "eval_logits/chosen": 15.142684936523438,
      "eval_logits/rejected": 14.101082801818848,
      "eval_logps/chosen": -3.866588830947876,
      "eval_logps/rejected": -6.100707530975342,
      "eval_loss": 0.48699691891670227,
      "eval_rewards/accuracies": 0.9615384340286255,
      "eval_rewards/chosen": -5.7998833656311035,
      "eval_rewards/margins": 3.351177215576172,
      "eval_rewards/rejected": -9.15106201171875,
      "eval_runtime": 14.5024,
      "eval_samples_per_second": 28.409,
      "eval_steps_per_second": 3.586,
      "step": 400
    },
    {
      "epoch": 0.6435157936040808,
      "grad_norm": 1.845595359802246,
      "learning_rate": 4.133551509975264e-06,
      "logits/chosen": 14.54790210723877,
      "logits/rejected": 13.707855224609375,
      "logps/chosen": -4.112462043762207,
      "logps/rejected": -6.583975315093994,
      "loss": 0.5107,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -6.168692111968994,
      "rewards/margins": 3.707270383834839,
      "rewards/rejected": -9.875962257385254,
      "step": 410
    },
    {
      "epoch": 0.659211300765156,
      "grad_norm": 2.152916193008423,
      "learning_rate": 4.093559974371725e-06,
      "logits/chosen": 14.153576850891113,
      "logits/rejected": 13.553201675415039,
      "logps/chosen": -4.3408918380737305,
      "logps/rejected": -6.828585147857666,
      "loss": 0.41,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": -6.511338233947754,
      "rewards/margins": 3.7315402030944824,
      "rewards/rejected": -10.242877960205078,
      "step": 420
    },
    {
      "epoch": 0.6749068079262311,
      "grad_norm": 2.305433750152588,
      "learning_rate": 4.052869450695776e-06,
      "logits/chosen": 13.992170333862305,
      "logits/rejected": 13.264738082885742,
      "logps/chosen": -4.259932518005371,
      "logps/rejected": -6.089646339416504,
      "loss": 0.4122,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -6.389898777008057,
      "rewards/margins": 2.7445709705352783,
      "rewards/rejected": -9.134469985961914,
      "step": 430
    },
    {
      "epoch": 0.6906023150873063,
      "grad_norm": 2.397674322128296,
      "learning_rate": 4.011497787155938e-06,
      "logits/chosen": 13.975759506225586,
      "logits/rejected": 13.173799514770508,
      "logps/chosen": -4.2483391761779785,
      "logps/rejected": -6.487355709075928,
      "loss": 0.4397,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": -6.3725080490112305,
      "rewards/margins": 3.3585267066955566,
      "rewards/rejected": -9.731034278869629,
      "step": 440
    },
    {
      "epoch": 0.7062978222483814,
      "grad_norm": 2.3987627029418945,
      "learning_rate": 3.969463130731183e-06,
      "logits/chosen": 13.777560234069824,
      "logits/rejected": 12.936296463012695,
      "logps/chosen": -3.86059832572937,
      "logps/rejected": -6.495786190032959,
      "loss": 0.444,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -5.790897846221924,
      "rewards/margins": 3.952782392501831,
      "rewards/rejected": -9.743680000305176,
      "step": 450
    },
    {
      "epoch": 0.7062978222483814,
      "eval_logits/chosen": 14.397418975830078,
      "eval_logits/rejected": 13.271268844604492,
      "eval_logps/chosen": -3.9544241428375244,
      "eval_logps/rejected": -6.69989013671875,
      "eval_loss": 0.435256689786911,
      "eval_rewards/accuracies": 0.9615384340286255,
      "eval_rewards/chosen": -5.931635856628418,
      "eval_rewards/margins": 4.118198871612549,
      "eval_rewards/rejected": -10.049835205078125,
      "eval_runtime": 14.5029,
      "eval_samples_per_second": 28.408,
      "eval_steps_per_second": 3.585,
      "step": 450
    },
    {
      "epoch": 0.7219933294094566,
      "grad_norm": 2.3673465251922607,
      "learning_rate": 3.92678391921108e-06,
      "logits/chosen": 13.67004680633545,
      "logits/rejected": 12.689542770385742,
      "logps/chosen": -4.037893295288086,
      "logps/rejected": -6.727287292480469,
      "loss": 0.3858,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -6.056839942932129,
      "rewards/margins": 4.034090995788574,
      "rewards/rejected": -10.09093189239502,
      "step": 460
    },
    {
      "epoch": 0.7376888365705316,
      "grad_norm": 2.9868297576904297,
      "learning_rate": 3.88347887310836e-06,
      "logits/chosen": 13.124593734741211,
      "logits/rejected": 12.576837539672852,
      "logps/chosen": -4.324252605438232,
      "logps/rejected": -6.761715888977051,
      "loss": 0.363,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -6.486379146575928,
      "rewards/margins": 3.6561942100524902,
      "rewards/rejected": -10.142572402954102,
      "step": 470
    },
    {
      "epoch": 0.7533843437316068,
      "grad_norm": 2.613318920135498,
      "learning_rate": 3.839566987447492e-06,
      "logits/chosen": 13.514410018920898,
      "logits/rejected": 12.838116645812988,
      "logps/chosen": -4.333093166351318,
      "logps/rejected": -6.704646110534668,
      "loss": 0.3274,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -6.499639987945557,
      "rewards/margins": 3.5573298931121826,
      "rewards/rejected": -10.056970596313477,
      "step": 480
    },
    {
      "epoch": 0.7690798508926819,
      "grad_norm": 2.1568610668182373,
      "learning_rate": 3.795067523432826e-06,
      "logits/chosen": 13.456674575805664,
      "logits/rejected": 12.37132453918457,
      "logps/chosen": -4.630190849304199,
      "logps/rejected": -7.871635437011719,
      "loss": 0.3822,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": -6.945285797119141,
      "rewards/margins": 4.862167835235596,
      "rewards/rejected": -11.807454109191895,
      "step": 490
    },
    {
      "epoch": 0.7847753580537571,
      "grad_norm": 2.516496419906616,
      "learning_rate": 3.7500000000000005e-06,
      "logits/chosen": 13.195734024047852,
      "logits/rejected": 12.557401657104492,
      "logps/chosen": -3.9327914714813232,
      "logps/rejected": -6.561183929443359,
      "loss": 0.352,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -5.899188041687012,
      "rewards/margins": 3.9425880908966064,
      "rewards/rejected": -9.841775894165039,
      "step": 500
    },
    {
      "epoch": 0.7847753580537571,
      "eval_logits/chosen": 13.891414642333984,
      "eval_logits/rejected": 12.734375,
      "eval_logps/chosen": -3.924809455871582,
      "eval_logps/rejected": -7.003909587860107,
      "eval_loss": 0.3888731002807617,
      "eval_rewards/accuracies": 1.0,
      "eval_rewards/chosen": -5.887214183807373,
      "eval_rewards/margins": 4.618649482727051,
      "eval_rewards/rejected": -10.505864143371582,
      "eval_runtime": 14.5029,
      "eval_samples_per_second": 28.408,
      "eval_steps_per_second": 3.585,
      "step": 500
    },
    {
      "epoch": 0.8004708652148322,
      "grad_norm": 5.4663896560668945,
      "learning_rate": 3.7043841852542884e-06,
      "logits/chosen": 12.861404418945312,
      "logits/rejected": 11.9797945022583,
      "logps/chosen": -4.245055198669434,
      "logps/rejected": -7.063906669616699,
      "loss": 0.4009,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -6.36758279800415,
      "rewards/margins": 4.228276252746582,
      "rewards/rejected": -10.59585952758789,
      "step": 510
    },
    {
      "epoch": 0.8161663723759074,
      "grad_norm": 2.4403271675109863,
      "learning_rate": 3.658240087799655e-06,
      "logits/chosen": 12.84996223449707,
      "logits/rejected": 12.256246566772461,
      "logps/chosen": -4.2785773277282715,
      "logps/rejected": -7.003395080566406,
      "loss": 0.3327,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": -6.417865753173828,
      "rewards/margins": 4.0872273445129395,
      "rewards/rejected": -10.505093574523926,
      "step": 520
    },
    {
      "epoch": 0.8318618795369825,
      "grad_norm": 3.596749782562256,
      "learning_rate": 3.611587947962319e-06,
      "logits/chosen": 13.3360595703125,
      "logits/rejected": 12.449459075927734,
      "logps/chosen": -4.8148322105407715,
      "logps/rejected": -7.221930503845215,
      "loss": 0.3255,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": -7.222248077392578,
      "rewards/margins": 3.610647201538086,
      "rewards/rejected": -10.832895278930664,
      "step": 530
    },
    {
      "epoch": 0.8475573866980577,
      "grad_norm": 4.537969589233398,
      "learning_rate": 3.564448228912682e-06,
      "logits/chosen": 12.63983154296875,
      "logits/rejected": 12.042104721069336,
      "logps/chosen": -4.933573246002197,
      "logps/rejected": -7.656388282775879,
      "loss": 0.2941,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -7.400360107421875,
      "rewards/margins": 4.084224224090576,
      "rewards/rejected": -11.484583854675293,
      "step": 540
    },
    {
      "epoch": 0.8632528938591328,
      "grad_norm": 2.4124038219451904,
      "learning_rate": 3.516841607689501e-06,
      "logits/chosen": 13.053291320800781,
      "logits/rejected": 12.351530075073242,
      "logps/chosen": -5.196786403656006,
      "logps/rejected": -7.8461012840271,
      "loss": 0.3906,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": -7.795179843902588,
      "rewards/margins": 3.9739716053009033,
      "rewards/rejected": -11.76915168762207,
      "step": 550
    },
    {
      "epoch": 0.8632528938591328,
      "eval_logits/chosen": 13.596405982971191,
      "eval_logits/rejected": 12.482269287109375,
      "eval_logps/chosen": -4.328949928283691,
      "eval_logps/rejected": -7.602427959442139,
      "eval_loss": 0.3577499985694885,
      "eval_rewards/accuracies": 0.9807692170143127,
      "eval_rewards/chosen": -6.493425369262695,
      "eval_rewards/margins": 4.910217761993408,
      "eval_rewards/rejected": -11.403642654418945,
      "eval_runtime": 14.5032,
      "eval_samples_per_second": 28.408,
      "eval_steps_per_second": 3.585,
      "step": 550
    }
  ],
  "logging_steps": 10,
  "max_steps": 1500,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 3,
  "save_steps": 50,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 1.3318133628534784e+18,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}