File size: 46,706 Bytes
97a60b2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.5744768157570784,
  "eval_steps": 50,
  "global_step": 700,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.008206811653672548,
      "grad_norm": 0.06318386644124985,
      "learning_rate": 4.999451708687114e-06,
      "logits/chosen": -2.1367907524108887,
      "logits/rejected": -2.4948182106018066,
      "logps/chosen": -0.291498601436615,
      "logps/rejected": -0.3196522295475006,
      "loss": 7.5728,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -0.4372479021549225,
      "rewards/margins": 0.04223042353987694,
      "rewards/rejected": -0.47947829961776733,
      "step": 10
    },
    {
      "epoch": 0.016413623307345096,
      "grad_norm": 0.07310314476490021,
      "learning_rate": 4.997807075247147e-06,
      "logits/chosen": -2.1456007957458496,
      "logits/rejected": -2.4455342292785645,
      "logps/chosen": -0.26213544607162476,
      "logps/rejected": -0.32332050800323486,
      "loss": 7.5298,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.3932031989097595,
      "rewards/margins": 0.09177760779857635,
      "rewards/rejected": -0.4849807620048523,
      "step": 20
    },
    {
      "epoch": 0.024620434961017644,
      "grad_norm": 0.05936102196574211,
      "learning_rate": 4.9950668210706795e-06,
      "logits/chosen": -2.0765950679779053,
      "logits/rejected": -2.485799789428711,
      "logps/chosen": -0.26631081104278564,
      "logps/rejected": -0.32647624611854553,
      "loss": 7.5208,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -0.39946624636650085,
      "rewards/margins": 0.09024813771247864,
      "rewards/rejected": -0.4897143840789795,
      "step": 30
    },
    {
      "epoch": 0.03282724661469019,
      "grad_norm": 0.08499134331941605,
      "learning_rate": 4.9912321481237616e-06,
      "logits/chosen": -2.0753884315490723,
      "logits/rejected": -2.441580295562744,
      "logps/chosen": -0.2749950885772705,
      "logps/rejected": -0.30180150270462036,
      "loss": 7.4229,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.41249266266822815,
      "rewards/margins": 0.04020959883928299,
      "rewards/rejected": -0.45270222425460815,
      "step": 40
    },
    {
      "epoch": 0.04103405826836274,
      "grad_norm": 0.07681389898061752,
      "learning_rate": 4.986304738420684e-06,
      "logits/chosen": -2.145660877227783,
      "logits/rejected": -2.465946912765503,
      "logps/chosen": -0.24909739196300507,
      "logps/rejected": -0.2796121835708618,
      "loss": 7.4811,
      "rewards/accuracies": 0.48750001192092896,
      "rewards/chosen": -0.373646080493927,
      "rewards/margins": 0.045772187411785126,
      "rewards/rejected": -0.41941824555397034,
      "step": 50
    },
    {
      "epoch": 0.04103405826836274,
      "eval_logits/chosen": -2.012000799179077,
      "eval_logits/rejected": -2.5381252765655518,
      "eval_logps/chosen": -0.24157460033893585,
      "eval_logps/rejected": -0.2957758605480194,
      "eval_loss": 0.9317650198936462,
      "eval_rewards/accuracies": 0.5252525210380554,
      "eval_rewards/chosen": -0.3623619079589844,
      "eval_rewards/margins": 0.08130191266536713,
      "eval_rewards/rejected": -0.4436637759208679,
      "eval_runtime": 26.0809,
      "eval_samples_per_second": 30.214,
      "eval_steps_per_second": 3.796,
      "step": 50
    },
    {
      "epoch": 0.04924086992203529,
      "grad_norm": 0.06638535112142563,
      "learning_rate": 4.980286753286196e-06,
      "logits/chosen": -2.145846128463745,
      "logits/rejected": -2.4077115058898926,
      "logps/chosen": -0.22265203297138214,
      "logps/rejected": -0.30774614214897156,
      "loss": 7.4605,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.3339780271053314,
      "rewards/margins": 0.1276412308216095,
      "rewards/rejected": -0.4616192877292633,
      "step": 60
    },
    {
      "epoch": 0.057447681575707836,
      "grad_norm": 0.057281140238046646,
      "learning_rate": 4.973180832407471e-06,
      "logits/chosen": -2.0021350383758545,
      "logits/rejected": -2.4299912452697754,
      "logps/chosen": -0.23488977551460266,
      "logps/rejected": -0.33270469307899475,
      "loss": 7.4257,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -0.3523346781730652,
      "rewards/margins": 0.14672236144542694,
      "rewards/rejected": -0.4990570545196533,
      "step": 70
    },
    {
      "epoch": 0.06565449322938038,
      "grad_norm": 0.07725922018289566,
      "learning_rate": 4.964990092676263e-06,
      "logits/chosen": -2.117995023727417,
      "logits/rejected": -2.359265089035034,
      "logps/chosen": -0.21598832309246063,
      "logps/rejected": -0.300583153963089,
      "loss": 7.4384,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -0.32398244738578796,
      "rewards/margins": 0.12689228355884552,
      "rewards/rejected": -0.4508747458457947,
      "step": 80
    },
    {
      "epoch": 0.07386130488305294,
      "grad_norm": 0.0598183274269104,
      "learning_rate": 4.9557181268217225e-06,
      "logits/chosen": -2.282627582550049,
      "logits/rejected": -2.441333532333374,
      "logps/chosen": -0.23655852675437927,
      "logps/rejected": -0.3246815800666809,
      "loss": 7.4584,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -0.3548378050327301,
      "rewards/margins": 0.13218457996845245,
      "rewards/rejected": -0.48702239990234375,
      "step": 90
    },
    {
      "epoch": 0.08206811653672548,
      "grad_norm": 0.058213479816913605,
      "learning_rate": 4.9453690018345144e-06,
      "logits/chosen": -2.1114468574523926,
      "logits/rejected": -2.5035691261291504,
      "logps/chosen": -0.23073866963386536,
      "logps/rejected": -0.29445192217826843,
      "loss": 7.4116,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.34610801935195923,
      "rewards/margins": 0.09556989371776581,
      "rewards/rejected": -0.44167789816856384,
      "step": 100
    },
    {
      "epoch": 0.08206811653672548,
      "eval_logits/chosen": -2.0183491706848145,
      "eval_logits/rejected": -2.5400593280792236,
      "eval_logps/chosen": -0.20393377542495728,
      "eval_logps/rejected": -0.2818409502506256,
      "eval_loss": 0.9129964113235474,
      "eval_rewards/accuracies": 0.5656565427780151,
      "eval_rewards/chosen": -0.3059006631374359,
      "eval_rewards/margins": 0.11686072498559952,
      "eval_rewards/rejected": -0.4227614104747772,
      "eval_runtime": 26.0825,
      "eval_samples_per_second": 30.212,
      "eval_steps_per_second": 3.796,
      "step": 100
    },
    {
      "epoch": 0.09027492819039803,
      "grad_norm": 0.06249881908297539,
      "learning_rate": 4.933947257182901e-06,
      "logits/chosen": -2.1324548721313477,
      "logits/rejected": -2.434319019317627,
      "logps/chosen": -0.22180762887001038,
      "logps/rejected": -0.28862181305885315,
      "loss": 7.3604,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -0.332711398601532,
      "rewards/margins": 0.10022131353616714,
      "rewards/rejected": -0.43293270468711853,
      "step": 110
    },
    {
      "epoch": 0.09848173984407058,
      "grad_norm": 0.061758093535900116,
      "learning_rate": 4.921457902821578e-06,
      "logits/chosen": -2.0597169399261475,
      "logits/rejected": -2.4386391639709473,
      "logps/chosen": -0.22720107436180115,
      "logps/rejected": -0.303659051656723,
      "loss": 7.3624,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -0.3408016264438629,
      "rewards/margins": 0.1146869882941246,
      "rewards/rejected": -0.4554885923862457,
      "step": 120
    },
    {
      "epoch": 0.10668855149774313,
      "grad_norm": 0.08368540555238724,
      "learning_rate": 4.907906416994146e-06,
      "logits/chosen": -2.0944437980651855,
      "logits/rejected": -2.4157254695892334,
      "logps/chosen": -0.19590887427330017,
      "logps/rejected": -0.3365771770477295,
      "loss": 7.3464,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -0.29386329650878906,
      "rewards/margins": 0.2110024392604828,
      "rewards/rejected": -0.5048657655715942,
      "step": 130
    },
    {
      "epoch": 0.11489536315141567,
      "grad_norm": 0.060954928398132324,
      "learning_rate": 4.893298743830168e-06,
      "logits/chosen": -2.1551766395568848,
      "logits/rejected": -2.5695576667785645,
      "logps/chosen": -0.19875812530517578,
      "logps/rejected": -0.2967599928379059,
      "loss": 7.3179,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -0.29813718795776367,
      "rewards/margins": 0.14700281620025635,
      "rewards/rejected": -0.4451400339603424,
      "step": 140
    },
    {
      "epoch": 0.12310217480508823,
      "grad_norm": 0.05665091797709465,
      "learning_rate": 4.8776412907378845e-06,
      "logits/chosen": -2.1410365104675293,
      "logits/rejected": -2.4798667430877686,
      "logps/chosen": -0.19316771626472473,
      "logps/rejected": -0.2972142696380615,
      "loss": 7.2384,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -0.2897515296936035,
      "rewards/margins": 0.15606984496116638,
      "rewards/rejected": -0.4458213746547699,
      "step": 150
    },
    {
      "epoch": 0.12310217480508823,
      "eval_logits/chosen": -2.0654072761535645,
      "eval_logits/rejected": -2.596571207046509,
      "eval_logps/chosen": -0.17970335483551025,
      "eval_logps/rejected": -0.2767573893070221,
      "eval_loss": 0.8982937335968018,
      "eval_rewards/accuracies": 0.5858585834503174,
      "eval_rewards/chosen": -0.269555002450943,
      "eval_rewards/margins": 0.14558106660842896,
      "eval_rewards/rejected": -0.41513609886169434,
      "eval_runtime": 26.0741,
      "eval_samples_per_second": 30.222,
      "eval_steps_per_second": 3.797,
      "step": 150
    },
    {
      "epoch": 0.13130898645876077,
      "grad_norm": 0.07328196614980698,
      "learning_rate": 4.860940925593703e-06,
      "logits/chosen": -2.202148914337158,
      "logits/rejected": -2.5385117530822754,
      "logps/chosen": -0.19814102351665497,
      "logps/rejected": -0.3139093518257141,
      "loss": 7.1309,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -0.29721152782440186,
      "rewards/margins": 0.1736525148153305,
      "rewards/rejected": -0.4708639979362488,
      "step": 160
    },
    {
      "epoch": 0.1395157981124333,
      "grad_norm": 0.09789691120386124,
      "learning_rate": 4.84320497372973e-06,
      "logits/chosen": -2.153545618057251,
      "logits/rejected": -2.532336950302124,
      "logps/chosen": -0.1861150860786438,
      "logps/rejected": -0.2787100672721863,
      "loss": 7.2498,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.2791725993156433,
      "rewards/margins": 0.13889247179031372,
      "rewards/rejected": -0.4180651605129242,
      "step": 170
    },
    {
      "epoch": 0.14772260976610588,
      "grad_norm": 0.0829203873872757,
      "learning_rate": 4.824441214720629e-06,
      "logits/chosen": -2.254868268966675,
      "logits/rejected": -2.5931999683380127,
      "logps/chosen": -0.19771653413772583,
      "logps/rejected": -0.2859548032283783,
      "loss": 7.1449,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.29657480120658875,
      "rewards/margins": 0.1323573738336563,
      "rewards/rejected": -0.42893218994140625,
      "step": 180
    },
    {
      "epoch": 0.15592942141977842,
      "grad_norm": 0.10499900579452515,
      "learning_rate": 4.804657878971252e-06,
      "logits/chosen": -2.2066543102264404,
      "logits/rejected": -2.594515323638916,
      "logps/chosen": -0.19944152235984802,
      "logps/rejected": -0.26530706882476807,
      "loss": 7.1717,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": -0.29916223883628845,
      "rewards/margins": 0.09879834204912186,
      "rewards/rejected": -0.3979606032371521,
      "step": 190
    },
    {
      "epoch": 0.16413623307345096,
      "grad_norm": 0.10522742569446564,
      "learning_rate": 4.783863644106502e-06,
      "logits/chosen": -2.282865047454834,
      "logits/rejected": -2.651233196258545,
      "logps/chosen": -0.1877971738576889,
      "logps/rejected": -0.28373947739601135,
      "loss": 7.1001,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -0.28169578313827515,
      "rewards/margins": 0.14391344785690308,
      "rewards/rejected": -0.4256092607975006,
      "step": 200
    },
    {
      "epoch": 0.16413623307345096,
      "eval_logits/chosen": -2.1934518814086914,
      "eval_logits/rejected": -2.772143840789795,
      "eval_logps/chosen": -0.1790025532245636,
      "eval_logps/rejected": -0.3032245934009552,
      "eval_loss": 0.8807509541511536,
      "eval_rewards/accuracies": 0.5858585834503174,
      "eval_rewards/chosen": -0.2685038149356842,
      "eval_rewards/margins": 0.1863330751657486,
      "eval_rewards/rejected": -0.454836905002594,
      "eval_runtime": 26.0786,
      "eval_samples_per_second": 30.216,
      "eval_steps_per_second": 3.796,
      "step": 200
    },
    {
      "epoch": 0.1723430447271235,
      "grad_norm": 0.13399213552474976,
      "learning_rate": 4.762067631165049e-06,
      "logits/chosen": -2.3500285148620605,
      "logits/rejected": -2.6827149391174316,
      "logps/chosen": -0.17780962586402893,
      "logps/rejected": -0.2749634087085724,
      "loss": 7.0722,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.266714483499527,
      "rewards/margins": 0.145730659365654,
      "rewards/rejected": -0.41244515776634216,
      "step": 210
    },
    {
      "epoch": 0.18054985638079607,
      "grad_norm": 0.1727023422718048,
      "learning_rate": 4.7392794005985324e-06,
      "logits/chosen": -2.356797695159912,
      "logits/rejected": -2.7276604175567627,
      "logps/chosen": -0.20826168358325958,
      "logps/rejected": -0.3760753273963928,
      "loss": 7.0205,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": -0.31239253282546997,
      "rewards/margins": 0.2517204284667969,
      "rewards/rejected": -0.5641129016876221,
      "step": 220
    },
    {
      "epoch": 0.1887566680344686,
      "grad_norm": 0.14360135793685913,
      "learning_rate": 4.715508948078037e-06,
      "logits/chosen": -2.3526904582977295,
      "logits/rejected": -2.785292148590088,
      "logps/chosen": -0.22017621994018555,
      "logps/rejected": -0.35291892290115356,
      "loss": 6.9773,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.3302643299102783,
      "rewards/margins": 0.1991141140460968,
      "rewards/rejected": -0.5293784737586975,
      "step": 230
    },
    {
      "epoch": 0.19696347968814115,
      "grad_norm": 0.18886974453926086,
      "learning_rate": 4.690766700109659e-06,
      "logits/chosen": -2.3618314266204834,
      "logits/rejected": -2.850059747695923,
      "logps/chosen": -0.22531962394714355,
      "logps/rejected": -0.38331112265586853,
      "loss": 6.9879,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.3379794657230377,
      "rewards/margins": 0.23698726296424866,
      "rewards/rejected": -0.5749667286872864,
      "step": 240
    },
    {
      "epoch": 0.2051702913418137,
      "grad_norm": 0.2599099278450012,
      "learning_rate": 4.665063509461098e-06,
      "logits/chosen": -2.4776885509490967,
      "logits/rejected": -2.8583390712738037,
      "logps/chosen": -0.2538486123085022,
      "logps/rejected": -0.42415136098861694,
      "loss": 6.7357,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -0.3807729184627533,
      "rewards/margins": 0.2554541230201721,
      "rewards/rejected": -0.636227011680603,
      "step": 250
    },
    {
      "epoch": 0.2051702913418137,
      "eval_logits/chosen": -2.3788387775421143,
      "eval_logits/rejected": -2.958366632461548,
      "eval_logps/chosen": -0.2162775695323944,
      "eval_logps/rejected": -0.41066110134124756,
      "eval_loss": 0.8405817747116089,
      "eval_rewards/accuracies": 0.6060606241226196,
      "eval_rewards/chosen": -0.3244163393974304,
      "eval_rewards/margins": 0.2915753722190857,
      "eval_rewards/rejected": -0.6159917116165161,
      "eval_runtime": 26.0752,
      "eval_samples_per_second": 30.22,
      "eval_steps_per_second": 3.797,
      "step": 250
    },
    {
      "epoch": 0.21337710299548626,
      "grad_norm": 0.25840890407562256,
      "learning_rate": 4.638410650401267e-06,
      "logits/chosen": -2.5892271995544434,
      "logits/rejected": -2.819650888442993,
      "logps/chosen": -0.2257525473833084,
      "logps/rejected": -0.46280306577682495,
      "loss": 6.6048,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -0.3386288285255432,
      "rewards/margins": 0.3555757403373718,
      "rewards/rejected": -0.6942045092582703,
      "step": 260
    },
    {
      "epoch": 0.2215839146491588,
      "grad_norm": 0.2838613986968994,
      "learning_rate": 4.610819813755038e-06,
      "logits/chosen": -2.536020040512085,
      "logits/rejected": -2.843383312225342,
      "logps/chosen": -0.2739175856113434,
      "logps/rejected": -0.5088076591491699,
      "loss": 6.6403,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -0.41087642312049866,
      "rewards/margins": 0.3523350656032562,
      "rewards/rejected": -0.7632113695144653,
      "step": 270
    },
    {
      "epoch": 0.22979072630283134,
      "grad_norm": 0.3575810194015503,
      "learning_rate": 4.582303101775249e-06,
      "logits/chosen": -2.4525866508483887,
      "logits/rejected": -2.7756829261779785,
      "logps/chosen": -0.27604570984840393,
      "logps/rejected": -0.6104786992073059,
      "loss": 6.4767,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.4140685498714447,
      "rewards/margins": 0.5016494989395142,
      "rewards/rejected": -0.9157179594039917,
      "step": 280
    },
    {
      "epoch": 0.23799753795650389,
      "grad_norm": 0.36226338148117065,
      "learning_rate": 4.55287302283426e-06,
      "logits/chosen": -2.4639816284179688,
      "logits/rejected": -2.865053415298462,
      "logps/chosen": -0.35306140780448914,
      "logps/rejected": -0.5840066075325012,
      "loss": 6.2071,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.5295921564102173,
      "rewards/margins": 0.3464178144931793,
      "rewards/rejected": -0.8760099411010742,
      "step": 290
    },
    {
      "epoch": 0.24620434961017645,
      "grad_norm": 0.38896313309669495,
      "learning_rate": 4.522542485937369e-06,
      "logits/chosen": -2.6630337238311768,
      "logits/rejected": -2.7479195594787598,
      "logps/chosen": -0.3706950545310974,
      "logps/rejected": -0.7957242131233215,
      "loss": 6.1801,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.5560425519943237,
      "rewards/margins": 0.6375436782836914,
      "rewards/rejected": -1.1935861110687256,
      "step": 300
    },
    {
      "epoch": 0.24620434961017645,
      "eval_logits/chosen": -2.411334991455078,
      "eval_logits/rejected": -2.820974588394165,
      "eval_logps/chosen": -0.3725183308124542,
      "eval_logps/rejected": -0.8138000965118408,
      "eval_loss": 0.738965630531311,
      "eval_rewards/accuracies": 0.6060606241226196,
      "eval_rewards/chosen": -0.5587774515151978,
      "eval_rewards/margins": 0.6619227528572083,
      "eval_rewards/rejected": -1.2207001447677612,
      "eval_runtime": 26.0764,
      "eval_samples_per_second": 30.219,
      "eval_steps_per_second": 3.797,
      "step": 300
    },
    {
      "epoch": 0.254411161263849,
      "grad_norm": 0.5848517417907715,
      "learning_rate": 4.491324795060491e-06,
      "logits/chosen": -2.5622355937957764,
      "logits/rejected": -2.7415950298309326,
      "logps/chosen": -0.38525494933128357,
      "logps/rejected": -0.8741232752799988,
      "loss": 5.98,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.5778824687004089,
      "rewards/margins": 0.7333025336265564,
      "rewards/rejected": -1.3111850023269653,
      "step": 310
    },
    {
      "epoch": 0.26261797291752154,
      "grad_norm": 0.38972222805023193,
      "learning_rate": 4.4592336433146e-06,
      "logits/chosen": -2.6293787956237793,
      "logits/rejected": -2.7180721759796143,
      "logps/chosen": -0.5181100964546204,
      "logps/rejected": -0.97294682264328,
      "loss": 5.608,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.7771651744842529,
      "rewards/margins": 0.6822551488876343,
      "rewards/rejected": -1.4594202041625977,
      "step": 320
    },
    {
      "epoch": 0.2708247845711941,
      "grad_norm": 0.5381959080696106,
      "learning_rate": 4.426283106939474e-06,
      "logits/chosen": -2.477749824523926,
      "logits/rejected": -2.7682888507843018,
      "logps/chosen": -0.47721824049949646,
      "logps/rejected": -1.0577385425567627,
      "loss": 5.5189,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -0.7158273458480835,
      "rewards/margins": 0.8707805871963501,
      "rewards/rejected": -1.5866079330444336,
      "step": 330
    },
    {
      "epoch": 0.2790315962248666,
      "grad_norm": 0.5332415699958801,
      "learning_rate": 4.3924876391293915e-06,
      "logits/chosen": -2.5470075607299805,
      "logits/rejected": -2.8264012336730957,
      "logps/chosen": -0.5053269267082214,
      "logps/rejected": -1.412097454071045,
      "loss": 5.3992,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -0.7579904198646545,
      "rewards/margins": 1.360155701637268,
      "rewards/rejected": -2.1181461811065674,
      "step": 340
    },
    {
      "epoch": 0.2872384078785392,
      "grad_norm": 0.5841536521911621,
      "learning_rate": 4.357862063693486e-06,
      "logits/chosen": -2.5552780628204346,
      "logits/rejected": -2.7644314765930176,
      "logps/chosen": -0.6264504790306091,
      "logps/rejected": -1.4451416730880737,
      "loss": 5.0093,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.9396758079528809,
      "rewards/margins": 1.228036880493164,
      "rewards/rejected": -2.167712450027466,
      "step": 350
    },
    {
      "epoch": 0.2872384078785392,
      "eval_logits/chosen": -2.486525774002075,
      "eval_logits/rejected": -2.809356451034546,
      "eval_logps/chosen": -0.6259626746177673,
      "eval_logps/rejected": -1.6826657056808472,
      "eval_loss": 0.610858678817749,
      "eval_rewards/accuracies": 0.6464646458625793,
      "eval_rewards/chosen": -0.9389441013336182,
      "eval_rewards/margins": 1.585054636001587,
      "eval_rewards/rejected": -2.523998737335205,
      "eval_runtime": 26.0792,
      "eval_samples_per_second": 30.216,
      "eval_steps_per_second": 3.796,
      "step": 350
    },
    {
      "epoch": 0.29544521953221176,
      "grad_norm": 0.6259649395942688,
      "learning_rate": 4.322421568553529e-06,
      "logits/chosen": -2.5630745887756348,
      "logits/rejected": -2.80169939994812,
      "logps/chosen": -0.6148477792739868,
      "logps/rejected": -1.7640241384506226,
      "loss": 5.0832,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": -0.9222715497016907,
      "rewards/margins": 1.7237647771835327,
      "rewards/rejected": -2.646036148071289,
      "step": 360
    },
    {
      "epoch": 0.3036520311858843,
      "grad_norm": 0.5134413838386536,
      "learning_rate": 4.286181699082008e-06,
      "logits/chosen": -2.5688040256500244,
      "logits/rejected": -2.823493242263794,
      "logps/chosen": -0.7328687906265259,
      "logps/rejected": -2.135953187942505,
      "loss": 4.484,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -1.0993033647537231,
      "rewards/margins": 2.104626178741455,
      "rewards/rejected": -3.203929901123047,
      "step": 370
    },
    {
      "epoch": 0.31185884283955684,
      "grad_norm": 0.5029065608978271,
      "learning_rate": 4.249158351283414e-06,
      "logits/chosen": -2.507948160171509,
      "logits/rejected": -2.797893524169922,
      "logps/chosen": -0.8517419695854187,
      "logps/rejected": -2.6004090309143066,
      "loss": 4.3033,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -1.2776129245758057,
      "rewards/margins": 2.6230006217956543,
      "rewards/rejected": -3.900613307952881,
      "step": 380
    },
    {
      "epoch": 0.3200656544932294,
      "grad_norm": 0.6171831488609314,
      "learning_rate": 4.211367764821722e-06,
      "logits/chosen": -2.709599018096924,
      "logits/rejected": -2.8980605602264404,
      "logps/chosen": -0.9357224702835083,
      "logps/rejected": -2.464841842651367,
      "loss": 4.6813,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -1.4035838842391968,
      "rewards/margins": 2.2936789989471436,
      "rewards/rejected": -3.6972625255584717,
      "step": 390
    },
    {
      "epoch": 0.3282724661469019,
      "grad_norm": 1.2649667263031006,
      "learning_rate": 4.172826515897146e-06,
      "logits/chosen": -2.3837532997131348,
      "logits/rejected": -2.6686861515045166,
      "logps/chosen": -0.9314821362495422,
      "logps/rejected": -2.562440872192383,
      "loss": 4.3764,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -1.3972232341766357,
      "rewards/margins": 2.4464378356933594,
      "rewards/rejected": -3.843661069869995,
      "step": 400
    },
    {
      "epoch": 0.3282724661469019,
      "eval_logits/chosen": -2.3613698482513428,
      "eval_logits/rejected": -2.7536535263061523,
      "eval_logps/chosen": -0.8348632454872131,
      "eval_logps/rejected": -2.6590662002563477,
      "eval_loss": 0.5019229650497437,
      "eval_rewards/accuracies": 0.6767676472663879,
      "eval_rewards/chosen": -1.2522947788238525,
      "eval_rewards/margins": 2.736304759979248,
      "eval_rewards/rejected": -3.9885993003845215,
      "eval_runtime": 26.0816,
      "eval_samples_per_second": 30.213,
      "eval_steps_per_second": 3.796,
      "step": 400
    },
    {
      "epoch": 0.33647927780057446,
      "grad_norm": 0.509773313999176,
      "learning_rate": 4.133551509975264e-06,
      "logits/chosen": -2.4433794021606445,
      "logits/rejected": -2.732313871383667,
      "logps/chosen": -1.0649070739746094,
      "logps/rejected": -3.0055129528045654,
      "loss": 4.0575,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -1.597360372543335,
      "rewards/margins": 2.9109084606170654,
      "rewards/rejected": -4.508269309997559,
      "step": 410
    },
    {
      "epoch": 0.344686089454247,
      "grad_norm": 1.3467975854873657,
      "learning_rate": 4.093559974371725e-06,
      "logits/chosen": -2.296419858932495,
      "logits/rejected": -2.650160551071167,
      "logps/chosen": -1.1074317693710327,
      "logps/rejected": -3.8539538383483887,
      "loss": 3.9082,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -1.6611478328704834,
      "rewards/margins": 4.119783401489258,
      "rewards/rejected": -5.780930519104004,
      "step": 420
    },
    {
      "epoch": 0.3528929011079196,
      "grad_norm": 0.576802134513855,
      "learning_rate": 4.052869450695776e-06,
      "logits/chosen": -2.4122748374938965,
      "logits/rejected": -2.657745122909546,
      "logps/chosen": -1.4457646608352661,
      "logps/rejected": -3.8541057109832764,
      "loss": 3.6948,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -2.168646812438965,
      "rewards/margins": 3.612511396408081,
      "rewards/rejected": -5.781157970428467,
      "step": 430
    },
    {
      "epoch": 0.36109971276159214,
      "grad_norm": 1.8899520635604858,
      "learning_rate": 4.011497787155938e-06,
      "logits/chosen": -2.380706310272217,
      "logits/rejected": -2.628760576248169,
      "logps/chosen": -1.5100795030593872,
      "logps/rejected": -4.016777515411377,
      "loss": 3.7709,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -2.2651190757751465,
      "rewards/margins": 3.760047197341919,
      "rewards/rejected": -6.0251665115356445,
      "step": 440
    },
    {
      "epoch": 0.3693065244152647,
      "grad_norm": 1.3820478916168213,
      "learning_rate": 3.969463130731183e-06,
      "logits/chosen": -2.3203773498535156,
      "logits/rejected": -2.7199389934539795,
      "logps/chosen": -1.7879537343978882,
      "logps/rejected": -4.4142656326293945,
      "loss": 3.6878,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -2.6819303035736084,
      "rewards/margins": 3.9394683837890625,
      "rewards/rejected": -6.621399879455566,
      "step": 450
    },
    {
      "epoch": 0.3693065244152647,
      "eval_logits/chosen": -2.3454010486602783,
      "eval_logits/rejected": -2.7509803771972656,
      "eval_logps/chosen": -2.039729595184326,
      "eval_logps/rejected": -4.705678462982178,
      "eval_loss": 0.43350929021835327,
      "eval_rewards/accuracies": 0.8383838534355164,
      "eval_rewards/chosen": -3.05959415435791,
      "eval_rewards/margins": 3.9989237785339355,
      "eval_rewards/rejected": -7.058517932891846,
      "eval_runtime": 26.0827,
      "eval_samples_per_second": 30.212,
      "eval_steps_per_second": 3.796,
      "step": 450
    },
    {
      "epoch": 0.3775133360689372,
      "grad_norm": 1.2694813013076782,
      "learning_rate": 3.92678391921108e-06,
      "logits/chosen": -2.419553279876709,
      "logits/rejected": -2.722438335418701,
      "logps/chosen": -2.116955280303955,
      "logps/rejected": -4.574510097503662,
      "loss": 3.5762,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": -3.1754324436187744,
      "rewards/margins": 3.6863322257995605,
      "rewards/rejected": -6.861765384674072,
      "step": 460
    },
    {
      "epoch": 0.38572014772260976,
      "grad_norm": 2.1713900566101074,
      "learning_rate": 3.88347887310836e-06,
      "logits/chosen": -2.509795904159546,
      "logits/rejected": -2.8286545276641846,
      "logps/chosen": -3.043957233428955,
      "logps/rejected": -5.701972007751465,
      "loss": 3.3953,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": -4.565936088562012,
      "rewards/margins": 3.9870212078094482,
      "rewards/rejected": -8.552957534790039,
      "step": 470
    },
    {
      "epoch": 0.3939269593762823,
      "grad_norm": 1.7294141054153442,
      "learning_rate": 3.839566987447492e-06,
      "logits/chosen": -2.5797224044799805,
      "logits/rejected": -2.8154430389404297,
      "logps/chosen": -3.6535918712615967,
      "logps/rejected": -6.4350786209106445,
      "loss": 2.8405,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -5.4803876876831055,
      "rewards/margins": 4.172229290008545,
      "rewards/rejected": -9.652616500854492,
      "step": 480
    },
    {
      "epoch": 0.40213377102995485,
      "grad_norm": 2.604421615600586,
      "learning_rate": 3.795067523432826e-06,
      "logits/chosen": -2.658399820327759,
      "logits/rejected": -2.8255085945129395,
      "logps/chosen": -5.536412239074707,
      "logps/rejected": -7.725207328796387,
      "loss": 2.5705,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": -8.304617881774902,
      "rewards/margins": 3.2831923961639404,
      "rewards/rejected": -11.587809562683105,
      "step": 490
    },
    {
      "epoch": 0.4103405826836274,
      "grad_norm": 3.06144118309021,
      "learning_rate": 3.7500000000000005e-06,
      "logits/chosen": -2.4845776557922363,
      "logits/rejected": -2.7703354358673096,
      "logps/chosen": -5.598423004150391,
      "logps/rejected": -8.852571487426758,
      "loss": 2.6946,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -8.397635459899902,
      "rewards/margins": 4.881222724914551,
      "rewards/rejected": -13.27885913848877,
      "step": 500
    },
    {
      "epoch": 0.4103405826836274,
      "eval_logits/chosen": -2.3113462924957275,
      "eval_logits/rejected": -2.7164077758789062,
      "eval_logps/chosen": -5.1130757331848145,
      "eval_logps/rejected": -9.016916275024414,
      "eval_loss": 0.28886228799819946,
      "eval_rewards/accuracies": 0.9090909361839294,
      "eval_rewards/chosen": -7.669614791870117,
      "eval_rewards/margins": 5.8557610511779785,
      "eval_rewards/rejected": -13.525375366210938,
      "eval_runtime": 26.082,
      "eval_samples_per_second": 30.212,
      "eval_steps_per_second": 3.796,
      "step": 500
    },
    {
      "epoch": 0.4185473943373,
      "grad_norm": 6.627122402191162,
      "learning_rate": 3.7043841852542884e-06,
      "logits/chosen": -2.3907535076141357,
      "logits/rejected": -2.667914390563965,
      "logps/chosen": -5.433152198791504,
      "logps/rejected": -8.439302444458008,
      "loss": 2.0307,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -8.149726867675781,
      "rewards/margins": 4.5092267990112305,
      "rewards/rejected": -12.658953666687012,
      "step": 510
    },
    {
      "epoch": 0.4267542059909725,
      "grad_norm": 4.345485687255859,
      "learning_rate": 3.658240087799655e-06,
      "logits/chosen": -2.4811301231384277,
      "logits/rejected": -2.8047549724578857,
      "logps/chosen": -6.806387424468994,
      "logps/rejected": -11.425605773925781,
      "loss": 1.9227,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -10.209580421447754,
      "rewards/margins": 6.92882776260376,
      "rewards/rejected": -17.13840675354004,
      "step": 520
    },
    {
      "epoch": 0.43496101764464506,
      "grad_norm": 3.9401891231536865,
      "learning_rate": 3.611587947962319e-06,
      "logits/chosen": -2.5136146545410156,
      "logits/rejected": -2.815864086151123,
      "logps/chosen": -8.350787162780762,
      "logps/rejected": -13.539543151855469,
      "loss": 2.1664,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": -12.526180267333984,
      "rewards/margins": 7.783134460449219,
      "rewards/rejected": -20.30931282043457,
      "step": 530
    },
    {
      "epoch": 0.4431678292983176,
      "grad_norm": 8.136115074157715,
      "learning_rate": 3.564448228912682e-06,
      "logits/chosen": -2.3747506141662598,
      "logits/rejected": -2.767707586288452,
      "logps/chosen": -7.152952671051025,
      "logps/rejected": -12.549365043640137,
      "loss": 1.9159,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -10.729429244995117,
      "rewards/margins": 8.094616889953613,
      "rewards/rejected": -18.824045181274414,
      "step": 540
    },
    {
      "epoch": 0.45137464095199015,
      "grad_norm": 4.128848552703857,
      "learning_rate": 3.516841607689501e-06,
      "logits/chosen": -2.4747514724731445,
      "logits/rejected": -2.753988265991211,
      "logps/chosen": -7.145654201507568,
      "logps/rejected": -11.649099349975586,
      "loss": 1.7838,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -10.718483924865723,
      "rewards/margins": 6.755165100097656,
      "rewards/rejected": -17.473648071289062,
      "step": 550
    },
    {
      "epoch": 0.45137464095199015,
      "eval_logits/chosen": -2.358137845993042,
      "eval_logits/rejected": -2.7661235332489014,
      "eval_logps/chosen": -6.704189300537109,
      "eval_logps/rejected": -12.318349838256836,
      "eval_loss": 0.2348441481590271,
      "eval_rewards/accuracies": 0.9292929172515869,
      "eval_rewards/chosen": -10.056282997131348,
      "eval_rewards/margins": 8.42124080657959,
      "eval_rewards/rejected": -18.477523803710938,
      "eval_runtime": 26.0731,
      "eval_samples_per_second": 30.223,
      "eval_steps_per_second": 3.797,
      "step": 550
    },
    {
      "epoch": 0.4595814526056627,
      "grad_norm": 6.159413814544678,
      "learning_rate": 3.4687889661302577e-06,
      "logits/chosen": -2.4763951301574707,
      "logits/rejected": -2.7941336631774902,
      "logps/chosen": -7.845038414001465,
      "logps/rejected": -14.3870210647583,
      "loss": 1.834,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -11.767557144165039,
      "rewards/margins": 9.81297492980957,
      "rewards/rejected": -21.58053207397461,
      "step": 560
    },
    {
      "epoch": 0.46778826425933523,
      "grad_norm": 3.0514960289001465,
      "learning_rate": 3.4203113817116955e-06,
      "logits/chosen": -2.4743447303771973,
      "logits/rejected": -2.7862396240234375,
      "logps/chosen": -8.275420188903809,
      "logps/rejected": -16.52743911743164,
      "loss": 1.4036,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -12.413130760192871,
      "rewards/margins": 12.378029823303223,
      "rewards/rejected": -24.791160583496094,
      "step": 570
    },
    {
      "epoch": 0.47599507591300777,
      "grad_norm": 7.868257999420166,
      "learning_rate": 3.3714301183045382e-06,
      "logits/chosen": -2.4062681198120117,
      "logits/rejected": -2.616164445877075,
      "logps/chosen": -8.249846458435059,
      "logps/rejected": -14.887449264526367,
      "loss": 1.7889,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -12.37476921081543,
      "rewards/margins": 9.956401824951172,
      "rewards/rejected": -22.3311710357666,
      "step": 580
    },
    {
      "epoch": 0.48420188756668037,
      "grad_norm": 2.8784306049346924,
      "learning_rate": 3.3221666168464584e-06,
      "logits/chosen": -2.447603702545166,
      "logits/rejected": -2.6810977458953857,
      "logps/chosen": -7.720141410827637,
      "logps/rejected": -14.280428886413574,
      "loss": 1.4502,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -11.580211639404297,
      "rewards/margins": 9.840431213378906,
      "rewards/rejected": -21.420642852783203,
      "step": 590
    },
    {
      "epoch": 0.4924086992203529,
      "grad_norm": 3.972952127456665,
      "learning_rate": 3.272542485937369e-06,
      "logits/chosen": -2.3675425052642822,
      "logits/rejected": -2.7083656787872314,
      "logps/chosen": -7.870957851409912,
      "logps/rejected": -15.028215408325195,
      "loss": 1.338,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": -11.806436538696289,
      "rewards/margins": 10.73588752746582,
      "rewards/rejected": -22.542322158813477,
      "step": 600
    },
    {
      "epoch": 0.4924086992203529,
      "eval_logits/chosen": -2.2910001277923584,
      "eval_logits/rejected": -2.656534194946289,
      "eval_logps/chosen": -8.230629920959473,
      "eval_logps/rejected": -15.021241188049316,
      "eval_loss": 0.21987247467041016,
      "eval_rewards/accuracies": 0.939393937587738,
      "eval_rewards/chosen": -12.345946311950684,
      "eval_rewards/margins": 10.185916900634766,
      "eval_rewards/rejected": -22.531862258911133,
      "eval_runtime": 26.0717,
      "eval_samples_per_second": 30.224,
      "eval_steps_per_second": 3.797,
      "step": 600
    },
    {
      "epoch": 0.5006155108740254,
      "grad_norm": 5.846790313720703,
      "learning_rate": 3.222579492361179e-06,
      "logits/chosen": -2.3496642112731934,
      "logits/rejected": -2.651423692703247,
      "logps/chosen": -9.013282775878906,
      "logps/rejected": -16.293872833251953,
      "loss": 1.6971,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -13.519923210144043,
      "rewards/margins": 10.920884132385254,
      "rewards/rejected": -24.440807342529297,
      "step": 610
    },
    {
      "epoch": 0.508822322527698,
      "grad_norm": 5.161929607391357,
      "learning_rate": 3.1722995515381644e-06,
      "logits/chosen": -2.323763132095337,
      "logits/rejected": -2.656510591506958,
      "logps/chosen": -8.339229583740234,
      "logps/rejected": -16.096925735473633,
      "loss": 0.9451,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -12.508844375610352,
      "rewards/margins": 11.636543273925781,
      "rewards/rejected": -24.145389556884766,
      "step": 620
    },
    {
      "epoch": 0.5170291341813705,
      "grad_norm": 6.016699314117432,
      "learning_rate": 3.121724717912138e-06,
      "logits/chosen": -2.457834482192993,
      "logits/rejected": -2.661825180053711,
      "logps/chosen": -9.72178840637207,
      "logps/rejected": -16.58643341064453,
      "loss": 1.3102,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": -14.582681655883789,
      "rewards/margins": 10.296967506408691,
      "rewards/rejected": -24.879650115966797,
      "step": 630
    },
    {
      "epoch": 0.5252359458350431,
      "grad_norm": 4.966028213500977,
      "learning_rate": 3.0708771752766397e-06,
      "logits/chosen": -2.3414790630340576,
      "logits/rejected": -2.6370954513549805,
      "logps/chosen": -9.21303939819336,
      "logps/rejected": -16.45307731628418,
      "loss": 1.5846,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -13.819559097290039,
      "rewards/margins": 10.860057830810547,
      "rewards/rejected": -24.679615020751953,
      "step": 640
    },
    {
      "epoch": 0.5334427574887156,
      "grad_norm": 2.0964155197143555,
      "learning_rate": 3.019779227044398e-06,
      "logits/chosen": -2.370847225189209,
      "logits/rejected": -2.6393492221832275,
      "logps/chosen": -9.985071182250977,
      "logps/rejected": -16.840097427368164,
      "loss": 1.3448,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -14.977605819702148,
      "rewards/margins": 10.282541275024414,
      "rewards/rejected": -25.260149002075195,
      "step": 650
    },
    {
      "epoch": 0.5334427574887156,
      "eval_logits/chosen": -2.256930351257324,
      "eval_logits/rejected": -2.6202380657196045,
      "eval_logps/chosen": -8.08243465423584,
      "eval_logps/rejected": -15.596598625183105,
      "eval_loss": 0.19149567186832428,
      "eval_rewards/accuracies": 0.9292929172515869,
      "eval_rewards/chosen": -12.123653411865234,
      "eval_rewards/margins": 11.271244049072266,
      "eval_rewards/rejected": -23.394899368286133,
      "eval_runtime": 26.0758,
      "eval_samples_per_second": 30.22,
      "eval_steps_per_second": 3.797,
      "step": 650
    },
    {
      "epoch": 0.5416495691423882,
      "grad_norm": 24.455801010131836,
      "learning_rate": 2.9684532864643123e-06,
      "logits/chosen": -2.367436647415161,
      "logits/rejected": -2.587658405303955,
      "logps/chosen": -9.489423751831055,
      "logps/rejected": -17.4456844329834,
      "loss": 1.0519,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -14.234136581420898,
      "rewards/margins": 11.9343900680542,
      "rewards/rejected": -26.168527603149414,
      "step": 660
    },
    {
      "epoch": 0.5498563807960607,
      "grad_norm": 5.811470985412598,
      "learning_rate": 2.9169218667902562e-06,
      "logits/chosen": -2.348598003387451,
      "logits/rejected": -2.6428780555725098,
      "logps/chosen": -8.679718971252441,
      "logps/rejected": -17.370378494262695,
      "loss": 1.4414,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -13.01957893371582,
      "rewards/margins": 13.035990715026855,
      "rewards/rejected": -26.05556869506836,
      "step": 670
    },
    {
      "epoch": 0.5580631924497332,
      "grad_norm": 3.920671224594116,
      "learning_rate": 2.8652075714060296e-06,
      "logits/chosen": -2.2460615634918213,
      "logits/rejected": -2.688439130783081,
      "logps/chosen": -8.605902671813965,
      "logps/rejected": -17.995298385620117,
      "loss": 1.2588,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -12.908853530883789,
      "rewards/margins": 14.084096908569336,
      "rewards/rejected": -26.992950439453125,
      "step": 680
    },
    {
      "epoch": 0.5662700041034058,
      "grad_norm": 6.326926231384277,
      "learning_rate": 2.813333083910761e-06,
      "logits/chosen": -2.225262403488159,
      "logits/rejected": -2.5890913009643555,
      "logps/chosen": -8.476136207580566,
      "logps/rejected": -17.39042854309082,
      "loss": 1.0845,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": -12.714203834533691,
      "rewards/margins": 13.371438980102539,
      "rewards/rejected": -26.085641860961914,
      "step": 690
    },
    {
      "epoch": 0.5744768157570784,
      "grad_norm": 40.18976593017578,
      "learning_rate": 2.761321158169134e-06,
      "logits/chosen": -2.3147075176239014,
      "logits/rejected": -2.6593971252441406,
      "logps/chosen": -9.569334983825684,
      "logps/rejected": -17.609041213989258,
      "loss": 1.112,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -14.35400104522705,
      "rewards/margins": 12.059560775756836,
      "rewards/rejected": -26.413562774658203,
      "step": 700
    },
    {
      "epoch": 0.5744768157570784,
      "eval_logits/chosen": -2.2287755012512207,
      "eval_logits/rejected": -2.61726450920105,
      "eval_logps/chosen": -8.035316467285156,
      "eval_logps/rejected": -16.38852310180664,
      "eval_loss": 0.15530110895633698,
      "eval_rewards/accuracies": 0.9494949579238892,
      "eval_rewards/chosen": -12.052973747253418,
      "eval_rewards/margins": 12.529810905456543,
      "eval_rewards/rejected": -24.582786560058594,
      "eval_runtime": 26.0758,
      "eval_samples_per_second": 30.22,
      "eval_steps_per_second": 3.797,
      "step": 700
    }
  ],
  "logging_steps": 10,
  "max_steps": 1500,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 2,
  "save_steps": 50,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 2.62667310197008e+18,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}