File size: 54,112 Bytes
42a6f26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.9989071038251366,
  "eval_steps": 400,
  "global_step": 457,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.01092896174863388,
      "grad_norm": 358.5620562461214,
      "learning_rate": 1.0869565217391303e-07,
      "logits/chosen": -1.0113575458526611,
      "logits/rejected": -1.0064939260482788,
      "logps/chosen": -0.2803983986377716,
      "logps/rejected": -0.2860395908355713,
      "loss": 5.2315,
      "rewards/accuracies": 0.53125,
      "rewards/chosen": -2.8039839267730713,
      "rewards/margins": 0.056411754339933395,
      "rewards/rejected": -2.860395669937134,
      "semantic_entropy": 0.7518940567970276,
      "step": 5
    },
    {
      "epoch": 0.02185792349726776,
      "grad_norm": 233.11198507849488,
      "learning_rate": 2.1739130434782607e-07,
      "logits/chosen": -1.056563138961792,
      "logits/rejected": -1.0053507089614868,
      "logps/chosen": -0.2568749487400055,
      "logps/rejected": -0.27021342515945435,
      "loss": 5.206,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -2.568748950958252,
      "rewards/margins": 0.13338527083396912,
      "rewards/rejected": -2.702134370803833,
      "semantic_entropy": 0.7094504237174988,
      "step": 10
    },
    {
      "epoch": 0.03278688524590164,
      "grad_norm": 210.79380420075688,
      "learning_rate": 3.260869565217391e-07,
      "logits/chosen": -1.0091139078140259,
      "logits/rejected": -0.9631060361862183,
      "logps/chosen": -0.2674282491207123,
      "logps/rejected": -0.27336788177490234,
      "loss": 5.1278,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -2.6742825508117676,
      "rewards/margins": 0.05939612537622452,
      "rewards/rejected": -2.7336785793304443,
      "semantic_entropy": 0.7274739742279053,
      "step": 15
    },
    {
      "epoch": 0.04371584699453552,
      "grad_norm": 375.0299222500816,
      "learning_rate": 4.3478260869565214e-07,
      "logits/chosen": -0.9487798810005188,
      "logits/rejected": -0.8998070955276489,
      "logps/chosen": -0.2723819613456726,
      "logps/rejected": -0.28497135639190674,
      "loss": 5.222,
      "rewards/accuracies": 0.5687500238418579,
      "rewards/chosen": -2.7238194942474365,
      "rewards/margins": 0.1258939504623413,
      "rewards/rejected": -2.8497138023376465,
      "semantic_entropy": 0.7452942132949829,
      "step": 20
    },
    {
      "epoch": 0.0546448087431694,
      "grad_norm": 299.8105605992341,
      "learning_rate": 5.434782608695652e-07,
      "logits/chosen": -0.9505411386489868,
      "logits/rejected": -0.8759678602218628,
      "logps/chosen": -0.27557066082954407,
      "logps/rejected": -0.29382389783859253,
      "loss": 5.1223,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -2.755706548690796,
      "rewards/margins": 0.1825324147939682,
      "rewards/rejected": -2.9382388591766357,
      "semantic_entropy": 0.7546485662460327,
      "step": 25
    },
    {
      "epoch": 0.06557377049180328,
      "grad_norm": 300.511189676041,
      "learning_rate": 6.521739130434782e-07,
      "logits/chosen": -1.0557540655136108,
      "logits/rejected": -0.990186333656311,
      "logps/chosen": -0.267598032951355,
      "logps/rejected": -0.28400346636772156,
      "loss": 5.1993,
      "rewards/accuracies": 0.518750011920929,
      "rewards/chosen": -2.6759800910949707,
      "rewards/margins": 0.1640542596578598,
      "rewards/rejected": -2.8400347232818604,
      "semantic_entropy": 0.7248786091804504,
      "step": 30
    },
    {
      "epoch": 0.07650273224043716,
      "grad_norm": 186.14457687250982,
      "learning_rate": 7.608695652173913e-07,
      "logits/chosen": -1.0076847076416016,
      "logits/rejected": -0.9405019879341125,
      "logps/chosen": -0.2580474615097046,
      "logps/rejected": -0.279694139957428,
      "loss": 5.0812,
      "rewards/accuracies": 0.4937500059604645,
      "rewards/chosen": -2.580474615097046,
      "rewards/margins": 0.2164669930934906,
      "rewards/rejected": -2.7969415187835693,
      "semantic_entropy": 0.7201561331748962,
      "step": 35
    },
    {
      "epoch": 0.08743169398907104,
      "grad_norm": 223.67562662995005,
      "learning_rate": 8.695652173913043e-07,
      "logits/chosen": -0.9655061960220337,
      "logits/rejected": -0.9045132398605347,
      "logps/chosen": -0.2820321023464203,
      "logps/rejected": -0.2990434765815735,
      "loss": 5.3116,
      "rewards/accuracies": 0.518750011920929,
      "rewards/chosen": -2.820321559906006,
      "rewards/margins": 0.17011362314224243,
      "rewards/rejected": -2.9904348850250244,
      "semantic_entropy": 0.7597802877426147,
      "step": 40
    },
    {
      "epoch": 0.09836065573770492,
      "grad_norm": 131.00541589477967,
      "learning_rate": 9.782608695652173e-07,
      "logits/chosen": -1.0179851055145264,
      "logits/rejected": -0.9359539151191711,
      "logps/chosen": -0.2855134606361389,
      "logps/rejected": -0.3078162968158722,
      "loss": 4.919,
      "rewards/accuracies": 0.4937500059604645,
      "rewards/chosen": -2.8551342487335205,
      "rewards/margins": 0.22302868962287903,
      "rewards/rejected": -3.078162908554077,
      "semantic_entropy": 0.7599790096282959,
      "step": 45
    },
    {
      "epoch": 0.1092896174863388,
      "grad_norm": 360.5211336376954,
      "learning_rate": 9.997663088532014e-07,
      "logits/chosen": -0.9701619148254395,
      "logits/rejected": -0.887865424156189,
      "logps/chosen": -0.28138467669487,
      "logps/rejected": -0.28913217782974243,
      "loss": 5.1014,
      "rewards/accuracies": 0.48750001192092896,
      "rewards/chosen": -2.813847064971924,
      "rewards/margins": 0.07747481018304825,
      "rewards/rejected": -2.891321897506714,
      "semantic_entropy": 0.7510823607444763,
      "step": 50
    },
    {
      "epoch": 0.12021857923497267,
      "grad_norm": 162.42445549849919,
      "learning_rate": 9.98817312944725e-07,
      "logits/chosen": -1.0028311014175415,
      "logits/rejected": -0.8850187063217163,
      "logps/chosen": -0.27895885705947876,
      "logps/rejected": -0.3143305778503418,
      "loss": 4.8519,
      "rewards/accuracies": 0.5562499761581421,
      "rewards/chosen": -2.7895889282226562,
      "rewards/margins": 0.3537173271179199,
      "rewards/rejected": -3.143306255340576,
      "semantic_entropy": 0.7608937621116638,
      "step": 55
    },
    {
      "epoch": 0.13114754098360656,
      "grad_norm": 120.127890644715,
      "learning_rate": 9.971397915250336e-07,
      "logits/chosen": -1.0246554613113403,
      "logits/rejected": -0.9786936640739441,
      "logps/chosen": -0.26926669478416443,
      "logps/rejected": -0.3113505244255066,
      "loss": 4.6493,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -2.692667245864868,
      "rewards/margins": 0.42083826661109924,
      "rewards/rejected": -3.1135053634643555,
      "semantic_entropy": 0.7576217651367188,
      "step": 60
    },
    {
      "epoch": 0.14207650273224043,
      "grad_norm": 214.34275301709462,
      "learning_rate": 9.94736194623663e-07,
      "logits/chosen": -0.9859918355941772,
      "logits/rejected": -0.9181090593338013,
      "logps/chosen": -0.3084973096847534,
      "logps/rejected": -0.3399081826210022,
      "loss": 4.9942,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -3.084973096847534,
      "rewards/margins": 0.31410855054855347,
      "rewards/rejected": -3.3990814685821533,
      "semantic_entropy": 0.8060176968574524,
      "step": 65
    },
    {
      "epoch": 0.15300546448087432,
      "grad_norm": 544.9393207205336,
      "learning_rate": 9.916100327075037e-07,
      "logits/chosen": -0.9521551132202148,
      "logits/rejected": -0.9334642291069031,
      "logps/chosen": -0.29637694358825684,
      "logps/rejected": -0.3240143656730652,
      "loss": 4.7857,
      "rewards/accuracies": 0.5687500238418579,
      "rewards/chosen": -2.9637694358825684,
      "rewards/margins": 0.27637460827827454,
      "rewards/rejected": -3.2401435375213623,
      "semantic_entropy": 0.7712014317512512,
      "step": 70
    },
    {
      "epoch": 0.16393442622950818,
      "grad_norm": 123.2584567983735,
      "learning_rate": 9.877658715537428e-07,
      "logits/chosen": -0.9457874298095703,
      "logits/rejected": -0.9289388656616211,
      "logps/chosen": -0.31344500184059143,
      "logps/rejected": -0.3473803997039795,
      "loss": 4.8544,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -3.1344501972198486,
      "rewards/margins": 0.33935409784317017,
      "rewards/rejected": -3.473804473876953,
      "semantic_entropy": 0.8020299077033997,
      "step": 75
    },
    {
      "epoch": 0.17486338797814208,
      "grad_norm": 136.55956527115663,
      "learning_rate": 9.832093255815216e-07,
      "logits/chosen": -0.9409273266792297,
      "logits/rejected": -0.8797443509101868,
      "logps/chosen": -0.3109641373157501,
      "logps/rejected": -0.3346250057220459,
      "loss": 4.7853,
      "rewards/accuracies": 0.606249988079071,
      "rewards/chosen": -3.1096413135528564,
      "rewards/margins": 0.23660895228385925,
      "rewards/rejected": -3.346250534057617,
      "semantic_entropy": 0.7786868810653687,
      "step": 80
    },
    {
      "epoch": 0.18579234972677597,
      "grad_norm": 202.99615488786125,
      "learning_rate": 9.779470496520441e-07,
      "logits/chosen": -0.9390329122543335,
      "logits/rejected": -0.89063560962677,
      "logps/chosen": -0.31133827567100525,
      "logps/rejected": -0.3710102438926697,
      "loss": 4.6394,
      "rewards/accuracies": 0.643750011920929,
      "rewards/chosen": -3.1133828163146973,
      "rewards/margins": 0.5967196226119995,
      "rewards/rejected": -3.7101027965545654,
      "semantic_entropy": 0.8020746111869812,
      "step": 85
    },
    {
      "epoch": 0.19672131147540983,
      "grad_norm": 147.8274369377715,
      "learning_rate": 9.719867293491144e-07,
      "logits/chosen": -0.9997242093086243,
      "logits/rejected": -0.9218411445617676,
      "logps/chosen": -0.33917468786239624,
      "logps/rejected": -0.3771332800388336,
      "loss": 4.7466,
      "rewards/accuracies": 0.581250011920929,
      "rewards/chosen": -3.391746997833252,
      "rewards/margins": 0.37958595156669617,
      "rewards/rejected": -3.7713329792022705,
      "semantic_entropy": 0.8518031239509583,
      "step": 90
    },
    {
      "epoch": 0.20765027322404372,
      "grad_norm": 150.5965714109422,
      "learning_rate": 9.653370697542987e-07,
      "logits/chosen": -0.949033260345459,
      "logits/rejected": -0.9504354596138,
      "logps/chosen": -0.34006524085998535,
      "logps/rejected": -0.3670746684074402,
      "loss": 4.5023,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -3.4006524085998535,
      "rewards/margins": 0.27009397745132446,
      "rewards/rejected": -3.6707465648651123,
      "semantic_entropy": 0.8378095626831055,
      "step": 95
    },
    {
      "epoch": 0.2185792349726776,
      "grad_norm": 205.55603229224332,
      "learning_rate": 9.580077827331037e-07,
      "logits/chosen": -0.9659525752067566,
      "logits/rejected": -0.9214147329330444,
      "logps/chosen": -0.37815189361572266,
      "logps/rejected": -0.4408392012119293,
      "loss": 4.4615,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -3.7815189361572266,
      "rewards/margins": 0.6268730163574219,
      "rewards/rejected": -4.408391952514648,
      "semantic_entropy": 0.8833344578742981,
      "step": 100
    },
    {
      "epoch": 0.22950819672131148,
      "grad_norm": 168.46156689250674,
      "learning_rate": 9.500095727507419e-07,
      "logits/chosen": -1.0187479257583618,
      "logits/rejected": -0.9876689910888672,
      "logps/chosen": -0.3568420708179474,
      "logps/rejected": -0.4102093577384949,
      "loss": 4.4882,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -3.568420886993408,
      "rewards/margins": 0.5336726903915405,
      "rewards/rejected": -4.102093696594238,
      "semantic_entropy": 0.8596333265304565,
      "step": 105
    },
    {
      "epoch": 0.24043715846994534,
      "grad_norm": 163.05795657074705,
      "learning_rate": 9.413541212382004e-07,
      "logits/chosen": -1.013091802597046,
      "logits/rejected": -0.9950464367866516,
      "logps/chosen": -0.37098902463912964,
      "logps/rejected": -0.45781344175338745,
      "loss": 4.3876,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -3.709890842437744,
      "rewards/margins": 0.8682438135147095,
      "rewards/rejected": -4.578134059906006,
      "semantic_entropy": 0.9028980135917664,
      "step": 110
    },
    {
      "epoch": 0.25136612021857924,
      "grad_norm": 159.92608074418774,
      "learning_rate": 9.320540695314438e-07,
      "logits/chosen": -1.0206435918807983,
      "logits/rejected": -0.9817934036254883,
      "logps/chosen": -0.37152332067489624,
      "logps/rejected": -0.4835759103298187,
      "loss": 4.1862,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -3.715233325958252,
      "rewards/margins": 1.1205257177352905,
      "rewards/rejected": -4.835759162902832,
      "semantic_entropy": 0.8893026113510132,
      "step": 115
    },
    {
      "epoch": 0.26229508196721313,
      "grad_norm": 126.53869344949423,
      "learning_rate": 9.221230004086721e-07,
      "logits/chosen": -1.0430892705917358,
      "logits/rejected": -0.9731811285018921,
      "logps/chosen": -0.38018742203712463,
      "logps/rejected": -0.44267600774765015,
      "loss": 4.3042,
      "rewards/accuracies": 0.706250011920929,
      "rewards/chosen": -3.8018736839294434,
      "rewards/margins": 0.6248863935470581,
      "rewards/rejected": -4.426759719848633,
      "semantic_entropy": 0.9051868319511414,
      "step": 120
    },
    {
      "epoch": 0.273224043715847,
      "grad_norm": 330.21792860177044,
      "learning_rate": 9.11575418252596e-07,
      "logits/chosen": -0.9472485780715942,
      "logits/rejected": -0.9132539629936218,
      "logps/chosen": -0.396454393863678,
      "logps/rejected": -0.4733741283416748,
      "loss": 4.0912,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": -3.9645442962646484,
      "rewards/margins": 0.7691973447799683,
      "rewards/rejected": -4.733741283416748,
      "semantic_entropy": 0.9098461866378784,
      "step": 125
    },
    {
      "epoch": 0.28415300546448086,
      "grad_norm": 136.7218535491149,
      "learning_rate": 9.004267278667031e-07,
      "logits/chosen": -0.9847833514213562,
      "logits/rejected": -0.9780336618423462,
      "logps/chosen": -0.4162030816078186,
      "logps/rejected": -0.5500742793083191,
      "loss": 4.1012,
      "rewards/accuracies": 0.731249988079071,
      "rewards/chosen": -4.162030220031738,
      "rewards/margins": 1.3387129306793213,
      "rewards/rejected": -5.500743389129639,
      "semantic_entropy": 0.9023580551147461,
      "step": 130
    },
    {
      "epoch": 0.29508196721311475,
      "grad_norm": 114.59260330847327,
      "learning_rate": 8.886932119764565e-07,
      "logits/chosen": -1.018243670463562,
      "logits/rejected": -0.934001624584198,
      "logps/chosen": -0.40604203939437866,
      "logps/rejected": -0.521537184715271,
      "loss": 3.933,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -4.060420036315918,
      "rewards/margins": 1.154951810836792,
      "rewards/rejected": -5.215372562408447,
      "semantic_entropy": 0.9238536953926086,
      "step": 135
    },
    {
      "epoch": 0.30601092896174864,
      "grad_norm": 151.22961611030988,
      "learning_rate": 8.763920074482809e-07,
      "logits/chosen": -1.02057683467865,
      "logits/rejected": -0.9660438299179077,
      "logps/chosen": -0.43255481123924255,
      "logps/rejected": -0.5811036229133606,
      "loss": 3.5202,
      "rewards/accuracies": 0.71875,
      "rewards/chosen": -4.32554817199707,
      "rewards/margins": 1.485487699508667,
      "rewards/rejected": -5.811036109924316,
      "semantic_entropy": 0.9533751606941223,
      "step": 140
    },
    {
      "epoch": 0.31693989071038253,
      "grad_norm": 171.9485188405326,
      "learning_rate": 8.635410802610723e-07,
      "logits/chosen": -1.0028008222579956,
      "logits/rejected": -0.9838630557060242,
      "logps/chosen": -0.4235480725765228,
      "logps/rejected": -0.49707216024398804,
      "loss": 3.7809,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -4.235480785369873,
      "rewards/margins": 0.7352409958839417,
      "rewards/rejected": -4.970722198486328,
      "semantic_entropy": 0.9417489767074585,
      "step": 145
    },
    {
      "epoch": 0.32786885245901637,
      "grad_norm": 135.1994551251087,
      "learning_rate": 8.501591992667849e-07,
      "logits/chosen": -1.0660603046417236,
      "logits/rejected": -1.0330188274383545,
      "logps/chosen": -0.458204448223114,
      "logps/rejected": -0.6369711756706238,
      "loss": 3.6343,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -4.58204460144043,
      "rewards/margins": 1.7876676321029663,
      "rewards/rejected": -6.369711875915527,
      "semantic_entropy": 0.9522278904914856,
      "step": 150
    },
    {
      "epoch": 0.33879781420765026,
      "grad_norm": 123.13149743090504,
      "learning_rate": 8.362659087784152e-07,
      "logits/chosen": -0.9992470741271973,
      "logits/rejected": -0.946983814239502,
      "logps/chosen": -0.45955339074134827,
      "logps/rejected": -0.5732384324073792,
      "loss": 3.6764,
      "rewards/accuracies": 0.6812499761581421,
      "rewards/chosen": -4.5955328941345215,
      "rewards/margins": 1.1368510723114014,
      "rewards/rejected": -5.732384204864502,
      "semantic_entropy": 0.9510468244552612,
      "step": 155
    },
    {
      "epoch": 0.34972677595628415,
      "grad_norm": 245.75242739092204,
      "learning_rate": 8.218815000254231e-07,
      "logits/chosen": -1.0509494543075562,
      "logits/rejected": -0.9942834973335266,
      "logps/chosen": -0.5233258008956909,
      "logps/rejected": -0.6250703930854797,
      "loss": 3.7281,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": -5.233258247375488,
      "rewards/margins": 1.0174460411071777,
      "rewards/rejected": -6.250703811645508,
      "semantic_entropy": 0.9671458005905151,
      "step": 160
    },
    {
      "epoch": 0.36065573770491804,
      "grad_norm": 158.28325663577223,
      "learning_rate": 8.07026981518276e-07,
      "logits/chosen": -1.0312683582305908,
      "logits/rejected": -0.9780334234237671,
      "logps/chosen": -0.5141728520393372,
      "logps/rejected": -0.6252259016036987,
      "loss": 3.6091,
      "rewards/accuracies": 0.7437499761581421,
      "rewards/chosen": -5.141728401184082,
      "rewards/margins": 1.1105303764343262,
      "rewards/rejected": -6.252258777618408,
      "semantic_entropy": 0.9860242605209351,
      "step": 165
    },
    {
      "epoch": 0.37158469945355194,
      "grad_norm": 136.70967385020978,
      "learning_rate": 7.917240483654e-07,
      "logits/chosen": -1.0271486043930054,
      "logits/rejected": -0.9650506973266602,
      "logps/chosen": -0.5105966329574585,
      "logps/rejected": -0.6147508025169373,
      "loss": 3.7493,
      "rewards/accuracies": 0.71875,
      "rewards/chosen": -5.105965614318848,
      "rewards/margins": 1.0415422916412354,
      "rewards/rejected": -6.147508144378662,
      "semantic_entropy": 0.9853399991989136,
      "step": 170
    },
    {
      "epoch": 0.3825136612021858,
      "grad_norm": 192.89943041726585,
      "learning_rate": 7.759950505873521e-07,
      "logits/chosen": -1.0819661617279053,
      "logits/rejected": -1.050621747970581,
      "logps/chosen": -0.5413715839385986,
      "logps/rejected": -0.6315657496452332,
      "loss": 3.5161,
      "rewards/accuracies": 0.65625,
      "rewards/chosen": -5.413715362548828,
      "rewards/margins": 0.9019424319267273,
      "rewards/rejected": -6.315657615661621,
      "semantic_entropy": 0.9645811319351196,
      "step": 175
    },
    {
      "epoch": 0.39344262295081966,
      "grad_norm": 152.89832261081025,
      "learning_rate": 7.598629604744872e-07,
      "logits/chosen": -1.0869873762130737,
      "logits/rejected": -1.0785080194473267,
      "logps/chosen": -0.5242325067520142,
      "logps/rejected": -0.7012667655944824,
      "loss": 3.3521,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -5.242325305938721,
      "rewards/margins": 1.7703425884246826,
      "rewards/rejected": -7.012667655944824,
      "semantic_entropy": 1.0011693239212036,
      "step": 180
    },
    {
      "epoch": 0.40437158469945356,
      "grad_norm": 150.16456326171243,
      "learning_rate": 7.433513390357989e-07,
      "logits/chosen": -1.1106479167938232,
      "logits/rejected": -1.1203057765960693,
      "logps/chosen": -0.5613775253295898,
      "logps/rejected": -0.7369329333305359,
      "loss": 3.3325,
      "rewards/accuracies": 0.793749988079071,
      "rewards/chosen": -5.61377477645874,
      "rewards/margins": 1.755553960800171,
      "rewards/rejected": -7.36932897567749,
      "semantic_entropy": 1.0098798274993896,
      "step": 185
    },
    {
      "epoch": 0.41530054644808745,
      "grad_norm": 177.03981164325452,
      "learning_rate": 7.264843015879321e-07,
      "logits/chosen": -1.118817925453186,
      "logits/rejected": -1.075656771659851,
      "logps/chosen": -0.5620681047439575,
      "logps/rejected": -0.7573049664497375,
      "loss": 3.3889,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -5.620680332183838,
      "rewards/margins": 1.9523694515228271,
      "rewards/rejected": -7.573049068450928,
      "semantic_entropy": 0.998263955116272,
      "step": 190
    },
    {
      "epoch": 0.4262295081967213,
      "grad_norm": 126.76170022708793,
      "learning_rate": 7.092864825346266e-07,
      "logits/chosen": -1.1385692358016968,
      "logits/rejected": -1.1158863306045532,
      "logps/chosen": -0.6738488078117371,
      "logps/rejected": -0.8919061422348022,
      "loss": 3.431,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -6.73848819732666,
      "rewards/margins": 2.180572986602783,
      "rewards/rejected": -8.919061660766602,
      "semantic_entropy": 0.9914792776107788,
      "step": 195
    },
    {
      "epoch": 0.4371584699453552,
      "grad_norm": 146.12776815167663,
      "learning_rate": 6.917829993880302e-07,
      "logits/chosen": -1.1107165813446045,
      "logits/rejected": -1.0282893180847168,
      "logps/chosen": -0.6398170590400696,
      "logps/rejected": -0.8206952810287476,
      "loss": 3.2848,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": -6.398170471191406,
      "rewards/margins": 1.8087825775146484,
      "rewards/rejected": -8.206953048706055,
      "semantic_entropy": 1.0069334506988525,
      "step": 200
    },
    {
      "epoch": 0.44808743169398907,
      "grad_norm": 114.70543290599623,
      "learning_rate": 6.739994160844309e-07,
      "logits/chosen": -1.0968120098114014,
      "logits/rejected": -1.1100647449493408,
      "logps/chosen": -0.6191304326057434,
      "logps/rejected": -0.8075233697891235,
      "loss": 3.113,
      "rewards/accuracies": 0.8062499761581421,
      "rewards/chosen": -6.1913042068481445,
      "rewards/margins": 1.8839294910430908,
      "rewards/rejected": -8.075233459472656,
      "semantic_entropy": 1.0141515731811523,
      "step": 205
    },
    {
      "epoch": 0.45901639344262296,
      "grad_norm": 207.23320066468338,
      "learning_rate": 6.559617056479827e-07,
      "logits/chosen": -1.1229215860366821,
      "logits/rejected": -1.1227028369903564,
      "logps/chosen": -0.6795850396156311,
      "logps/rejected": -0.9186260104179382,
      "loss": 3.1442,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -6.7958502769470215,
      "rewards/margins": 2.3904099464416504,
      "rewards/rejected": -9.186259269714355,
      "semantic_entropy": 0.9720403552055359,
      "step": 210
    },
    {
      "epoch": 0.46994535519125685,
      "grad_norm": 154.50502979258272,
      "learning_rate": 6.376962122569567e-07,
      "logits/chosen": -1.1284302473068237,
      "logits/rejected": -1.0769437551498413,
      "logps/chosen": -0.6974600553512573,
      "logps/rejected": -0.9393995404243469,
      "loss": 3.4054,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -6.974600315093994,
      "rewards/margins": 2.4193947315216064,
      "rewards/rejected": -9.39399528503418,
      "semantic_entropy": 1.0159441232681274,
      "step": 215
    },
    {
      "epoch": 0.4808743169398907,
      "grad_norm": 138.6943703552703,
      "learning_rate": 6.192296127679192e-07,
      "logits/chosen": -1.1995110511779785,
      "logits/rejected": -1.1735769510269165,
      "logps/chosen": -0.7303147912025452,
      "logps/rejected": -0.9362783432006836,
      "loss": 3.1146,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -7.303147792816162,
      "rewards/margins": 2.0596346855163574,
      "rewards/rejected": -9.362783432006836,
      "semantic_entropy": 0.9887905120849609,
      "step": 220
    },
    {
      "epoch": 0.4918032786885246,
      "grad_norm": 120.75185480754473,
      "learning_rate": 6.005888777540319e-07,
      "logits/chosen": -1.2185839414596558,
      "logits/rejected": -1.1783679723739624,
      "logps/chosen": -0.7471610307693481,
      "logps/rejected": -0.9852075576782227,
      "loss": 3.1221,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": -7.471610069274902,
      "rewards/margins": 2.380465030670166,
      "rewards/rejected": -9.852075576782227,
      "semantic_entropy": 0.9979068040847778,
      "step": 225
    },
    {
      "epoch": 0.5027322404371585,
      "grad_norm": 122.09354415954165,
      "learning_rate": 5.818012321143773e-07,
      "logits/chosen": -1.1167972087860107,
      "logits/rejected": -1.1158543825149536,
      "logps/chosen": -0.7506524920463562,
      "logps/rejected": -1.0082188844680786,
      "loss": 3.1675,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": -7.50652551651001,
      "rewards/margins": 2.5756633281707764,
      "rewards/rejected": -10.082188606262207,
      "semantic_entropy": 0.993320643901825,
      "step": 230
    },
    {
      "epoch": 0.5136612021857924,
      "grad_norm": 147.5670346142303,
      "learning_rate": 5.628941153118388e-07,
      "logits/chosen": -1.1291579008102417,
      "logits/rejected": -1.0918009281158447,
      "logps/chosen": -0.775924563407898,
      "logps/rejected": -1.0093990564346313,
      "loss": 2.9612,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": -7.759246826171875,
      "rewards/margins": 2.3347439765930176,
      "rewards/rejected": -10.093989372253418,
      "semantic_entropy": 0.981053352355957,
      "step": 235
    },
    {
      "epoch": 0.5245901639344263,
      "grad_norm": 133.14046441252583,
      "learning_rate": 5.438951412976098e-07,
      "logits/chosen": -1.1797640323638916,
      "logits/rejected": -1.1900447607040405,
      "logps/chosen": -0.7702494859695435,
      "logps/rejected": -1.056715488433838,
      "loss": 2.7098,
      "rewards/accuracies": 0.8187500238418579,
      "rewards/chosen": -7.702495574951172,
      "rewards/margins": 2.864659547805786,
      "rewards/rejected": -10.567155838012695,
      "semantic_entropy": 0.991602897644043,
      "step": 240
    },
    {
      "epoch": 0.5355191256830601,
      "grad_norm": 133.0444183672,
      "learning_rate": 5.248320581808619e-07,
      "logits/chosen": -1.1087061166763306,
      "logits/rejected": -1.0653207302093506,
      "logps/chosen": -0.792129397392273,
      "logps/rejected": -1.075703501701355,
      "loss": 2.9413,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -7.921294212341309,
      "rewards/margins": 2.835740566253662,
      "rewards/rejected": -10.757036209106445,
      "semantic_entropy": 0.9732475280761719,
      "step": 245
    },
    {
      "epoch": 0.546448087431694,
      "grad_norm": 122.1994564086186,
      "learning_rate": 5.057327077024744e-07,
      "logits/chosen": -1.1738090515136719,
      "logits/rejected": -1.1409178972244263,
      "logps/chosen": -0.806613564491272,
      "logps/rejected": -1.0335161685943604,
      "loss": 3.0864,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -8.06613540649414,
      "rewards/margins": 2.2690269947052,
      "rewards/rejected": -10.335162162780762,
      "semantic_entropy": 0.9622586965560913,
      "step": 250
    },
    {
      "epoch": 0.5573770491803278,
      "grad_norm": 232.10642636329976,
      "learning_rate": 4.866249845720132e-07,
      "logits/chosen": -1.1730918884277344,
      "logits/rejected": -1.1448460817337036,
      "logps/chosen": -0.8790150880813599,
      "logps/rejected": -1.1891063451766968,
      "loss": 2.8134,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -8.790151596069336,
      "rewards/margins": 3.1009116172790527,
      "rewards/rejected": -11.891061782836914,
      "semantic_entropy": 0.9595390558242798,
      "step": 255
    },
    {
      "epoch": 0.5683060109289617,
      "grad_norm": 131.2755348822229,
      "learning_rate": 4.675367957273505e-07,
      "logits/chosen": -1.141157865524292,
      "logits/rejected": -1.1335737705230713,
      "logps/chosen": -0.8586977124214172,
      "logps/rejected": -1.1454424858093262,
      "loss": 2.8892,
      "rewards/accuracies": 0.8062499761581421,
      "rewards/chosen": -8.586977005004883,
      "rewards/margins": 2.8674476146698,
      "rewards/rejected": -11.454424858093262,
      "semantic_entropy": 0.9425627589225769,
      "step": 260
    },
    {
      "epoch": 0.5792349726775956,
      "grad_norm": 140.64129624293926,
      "learning_rate": 4.4849601957642285e-07,
      "logits/chosen": -1.1671048402786255,
      "logits/rejected": -1.1342850923538208,
      "logps/chosen": -0.884295642375946,
      "logps/rejected": -1.1824935674667358,
      "loss": 2.8754,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -8.84295654296875,
      "rewards/margins": 2.9819793701171875,
      "rewards/rejected": -11.824935913085938,
      "semantic_entropy": 0.9442464709281921,
      "step": 265
    },
    {
      "epoch": 0.5901639344262295,
      "grad_norm": 147.4714622434341,
      "learning_rate": 4.295304652806592e-07,
      "logits/chosen": -1.161628246307373,
      "logits/rejected": -1.1403006315231323,
      "logps/chosen": -0.9028175473213196,
      "logps/rejected": -1.2475910186767578,
      "loss": 2.6617,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": -9.028175354003906,
      "rewards/margins": 3.447734832763672,
      "rewards/rejected": -12.475909233093262,
      "semantic_entropy": 0.9366561770439148,
      "step": 270
    },
    {
      "epoch": 0.6010928961748634,
      "grad_norm": 103.487924065158,
      "learning_rate": 4.106678321395433e-07,
      "logits/chosen": -1.149213194847107,
      "logits/rejected": -1.091584324836731,
      "logps/chosen": -0.944200873374939,
      "logps/rejected": -1.1400898694992065,
      "loss": 2.9059,
      "rewards/accuracies": 0.668749988079071,
      "rewards/chosen": -9.442008972167969,
      "rewards/margins": 1.9588892459869385,
      "rewards/rejected": -11.400897979736328,
      "semantic_entropy": 0.9353906512260437,
      "step": 275
    },
    {
      "epoch": 0.6120218579234973,
      "grad_norm": 134.66475933979152,
      "learning_rate": 3.9193566913562915e-07,
      "logits/chosen": -1.112555742263794,
      "logits/rejected": -1.1183173656463623,
      "logps/chosen": -0.9673782587051392,
      "logps/rejected": -1.330810308456421,
      "loss": 2.8642,
      "rewards/accuracies": 0.731249988079071,
      "rewards/chosen": -9.673782348632812,
      "rewards/margins": 3.6343204975128174,
      "rewards/rejected": -13.30810260772705,
      "semantic_entropy": 0.8982528448104858,
      "step": 280
    },
    {
      "epoch": 0.6229508196721312,
      "grad_norm": 122.90153644042246,
      "learning_rate": 3.7336133469909623e-07,
      "logits/chosen": -1.2511584758758545,
      "logits/rejected": -1.2238370180130005,
      "logps/chosen": -0.9596298933029175,
      "logps/rejected": -1.3390361070632935,
      "loss": 2.5195,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -9.59630012512207,
      "rewards/margins": 3.794060468673706,
      "rewards/rejected": -13.390359878540039,
      "semantic_entropy": 0.9061362147331238,
      "step": 285
    },
    {
      "epoch": 0.6338797814207651,
      "grad_norm": 155.0095981855771,
      "learning_rate": 3.549719567506076e-07,
      "logits/chosen": -1.190308690071106,
      "logits/rejected": -1.1568708419799805,
      "logps/chosen": -1.0390634536743164,
      "logps/rejected": -1.3806968927383423,
      "loss": 2.8348,
      "rewards/accuracies": 0.8062499761581421,
      "rewards/chosen": -10.390633583068848,
      "rewards/margins": 3.4163336753845215,
      "rewards/rejected": -13.806968688964844,
      "semantic_entropy": 0.8884953260421753,
      "step": 290
    },
    {
      "epoch": 0.644808743169399,
      "grad_norm": 128.49285257186182,
      "learning_rate": 3.3679439308082774e-07,
      "logits/chosen": -1.1692029237747192,
      "logits/rejected": -1.172719120979309,
      "logps/chosen": -1.0511713027954102,
      "logps/rejected": -1.4550367593765259,
      "loss": 2.2826,
      "rewards/accuracies": 0.856249988079071,
      "rewards/chosen": -10.511713027954102,
      "rewards/margins": 4.0386552810668945,
      "rewards/rejected": -14.55036735534668,
      "semantic_entropy": 0.878921389579773,
      "step": 295
    },
    {
      "epoch": 0.6557377049180327,
      "grad_norm": 113.97800775884723,
      "learning_rate": 3.1885519212446716e-07,
      "logits/chosen": -1.2200191020965576,
      "logits/rejected": -1.209221601486206,
      "logps/chosen": -1.0968992710113525,
      "logps/rejected": -1.4734210968017578,
      "loss": 2.5534,
      "rewards/accuracies": 0.793749988079071,
      "rewards/chosen": -10.968993186950684,
      "rewards/margins": 3.7652194499969482,
      "rewards/rejected": -14.734212875366211,
      "semantic_entropy": 0.8491713404655457,
      "step": 300
    },
    {
      "epoch": 0.6666666666666666,
      "grad_norm": 153.40901841678263,
      "learning_rate": 3.0118055418614295e-07,
      "logits/chosen": -1.210296392440796,
      "logits/rejected": -1.166100263595581,
      "logps/chosen": -1.0915873050689697,
      "logps/rejected": -1.5003201961517334,
      "loss": 2.7077,
      "rewards/accuracies": 0.8187500238418579,
      "rewards/chosen": -10.915873527526855,
      "rewards/margins": 4.087328910827637,
      "rewards/rejected": -15.003199577331543,
      "semantic_entropy": 0.851865291595459,
      "step": 305
    },
    {
      "epoch": 0.6775956284153005,
      "grad_norm": 188.92294277142176,
      "learning_rate": 2.83796293174686e-07,
      "logits/chosen": -1.144047498703003,
      "logits/rejected": -1.1552207469940186,
      "logps/chosen": -1.1141725778579712,
      "logps/rejected": -1.5596559047698975,
      "loss": 2.9239,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -11.141725540161133,
      "rewards/margins": 4.454832077026367,
      "rewards/rejected": -15.5965576171875,
      "semantic_entropy": 0.8461610078811646,
      "step": 310
    },
    {
      "epoch": 0.6885245901639344,
      "grad_norm": 158.1416950642293,
      "learning_rate": 2.6672779890178046e-07,
      "logits/chosen": -1.1891381740570068,
      "logits/rejected": -1.1934657096862793,
      "logps/chosen": -1.1909846067428589,
      "logps/rejected": -1.4780324697494507,
      "loss": 2.6254,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -11.9098482131958,
      "rewards/margins": 2.8704779148101807,
      "rewards/rejected": -14.780324935913086,
      "semantic_entropy": 0.8265172243118286,
      "step": 315
    },
    {
      "epoch": 0.6994535519125683,
      "grad_norm": 117.90491663822478,
      "learning_rate": 2.500000000000001e-07,
      "logits/chosen": -1.2632883787155151,
      "logits/rejected": -1.2213891744613647,
      "logps/chosen": -1.2055654525756836,
      "logps/rejected": -1.6077735424041748,
      "loss": 2.6783,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -12.05565357208252,
      "rewards/margins": 4.022082805633545,
      "rewards/rejected": -16.077735900878906,
      "semantic_entropy": 0.8192012906074524,
      "step": 320
    },
    {
      "epoch": 0.7103825136612022,
      "grad_norm": 164.40962706267163,
      "learning_rate": 2.3363732751439923e-07,
      "logits/chosen": -1.2160775661468506,
      "logits/rejected": -1.205322027206421,
      "logps/chosen": -1.1661893129348755,
      "logps/rejected": -1.5277663469314575,
      "loss": 2.6624,
      "rewards/accuracies": 0.793749988079071,
      "rewards/chosen": -11.661892890930176,
      "rewards/margins": 3.615769863128662,
      "rewards/rejected": -15.27766227722168,
      "semantic_entropy": 0.823623538017273,
      "step": 325
    },
    {
      "epoch": 0.7213114754098361,
      "grad_norm": 107.16070244814924,
      "learning_rate": 2.1766367922083283e-07,
      "logits/chosen": -1.1548130512237549,
      "logits/rejected": -1.1359449625015259,
      "logps/chosen": -1.1202858686447144,
      "logps/rejected": -1.626604676246643,
      "loss": 2.5554,
      "rewards/accuracies": 0.84375,
      "rewards/chosen": -11.20285701751709,
      "rewards/margins": 5.063187599182129,
      "rewards/rejected": -16.26604461669922,
      "semantic_entropy": 0.8284898996353149,
      "step": 330
    },
    {
      "epoch": 0.73224043715847,
      "grad_norm": 143.54805923440733,
      "learning_rate": 2.021023847231202e-07,
      "logits/chosen": -1.1354625225067139,
      "logits/rejected": -1.1055997610092163,
      "logps/chosen": -1.2420397996902466,
      "logps/rejected": -1.6342121362686157,
      "loss": 2.5194,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -12.42039680480957,
      "rewards/margins": 3.9217231273651123,
      "rewards/rejected": -16.342121124267578,
      "semantic_entropy": 0.8055655360221863,
      "step": 335
    },
    {
      "epoch": 0.7431693989071039,
      "grad_norm": 143.90601843179704,
      "learning_rate": 1.869761713800254e-07,
      "logits/chosen": -1.159055471420288,
      "logits/rejected": -1.1217933893203735,
      "logps/chosen": -1.216088056564331,
      "logps/rejected": -1.6224826574325562,
      "loss": 2.6871,
      "rewards/accuracies": 0.768750011920929,
      "rewards/chosen": -12.160881996154785,
      "rewards/margins": 4.0639448165893555,
      "rewards/rejected": -16.22482681274414,
      "semantic_entropy": 0.7940818667411804,
      "step": 340
    },
    {
      "epoch": 0.7540983606557377,
      "grad_norm": 146.77126754239146,
      "learning_rate": 1.7230713111182164e-07,
      "logits/chosen": -1.2234665155410767,
      "logits/rejected": -1.2274234294891357,
      "logps/chosen": -1.2689043283462524,
      "logps/rejected": -1.6999661922454834,
      "loss": 2.6683,
      "rewards/accuracies": 0.8062499761581421,
      "rewards/chosen": -12.689043998718262,
      "rewards/margins": 4.310617923736572,
      "rewards/rejected": -16.99966049194336,
      "semantic_entropy": 0.7804916501045227,
      "step": 345
    },
    {
      "epoch": 0.7650273224043715,
      "grad_norm": 156.59132030322806,
      "learning_rate": 1.5811668813491696e-07,
      "logits/chosen": -1.2142775058746338,
      "logits/rejected": -1.1987954378128052,
      "logps/chosen": -1.1977766752243042,
      "logps/rejected": -1.5605593919754028,
      "loss": 2.6148,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -11.977766036987305,
      "rewards/margins": 3.62782621383667,
      "rewards/rejected": -15.605592727661133,
      "semantic_entropy": 0.8086638450622559,
      "step": 350
    },
    {
      "epoch": 0.7759562841530054,
      "grad_norm": 144.3372503884412,
      "learning_rate": 1.4442556767166369e-07,
      "logits/chosen": -1.1762679815292358,
      "logits/rejected": -1.1479089260101318,
      "logps/chosen": -1.193447470664978,
      "logps/rejected": -1.6105226278305054,
      "loss": 2.5697,
      "rewards/accuracies": 0.84375,
      "rewards/chosen": -11.934475898742676,
      "rewards/margins": 4.170751094818115,
      "rewards/rejected": -16.105228424072266,
      "semantic_entropy": 0.8080552220344543,
      "step": 355
    },
    {
      "epoch": 0.7868852459016393,
      "grad_norm": 148.5272314733488,
      "learning_rate": 1.312537656810549e-07,
      "logits/chosen": -1.135874629020691,
      "logits/rejected": -1.1405253410339355,
      "logps/chosen": -1.242232322692871,
      "logps/rejected": -1.6316314935684204,
      "loss": 2.629,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -12.422323226928711,
      "rewards/margins": 3.8939926624298096,
      "rewards/rejected": -16.316316604614258,
      "semantic_entropy": 0.8038153648376465,
      "step": 360
    },
    {
      "epoch": 0.7978142076502732,
      "grad_norm": 189.12080572010774,
      "learning_rate": 1.1862051965451214e-07,
      "logits/chosen": -1.216966152191162,
      "logits/rejected": -1.2227869033813477,
      "logps/chosen": -1.2868871688842773,
      "logps/rejected": -1.718698263168335,
      "loss": 2.5421,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -12.868871688842773,
      "rewards/margins": 4.318110466003418,
      "rewards/rejected": -17.18698501586914,
      "semantic_entropy": 0.7714166045188904,
      "step": 365
    },
    {
      "epoch": 0.8087431693989071,
      "grad_norm": 130.34992194933218,
      "learning_rate": 1.0654428051942138e-07,
      "logits/chosen": -1.2347556352615356,
      "logits/rejected": -1.2042248249053955,
      "logps/chosen": -1.3064343929290771,
      "logps/rejected": -1.777515172958374,
      "loss": 2.6716,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": -13.06434440612793,
      "rewards/margins": 4.710807800292969,
      "rewards/rejected": -17.7751522064209,
      "semantic_entropy": 0.7663524150848389,
      "step": 370
    },
    {
      "epoch": 0.819672131147541,
      "grad_norm": 120.52547478396558,
      "learning_rate": 9.504268569144763e-08,
      "logits/chosen": -1.2310011386871338,
      "logits/rejected": -1.179221749305725,
      "logps/chosen": -1.2675514221191406,
      "logps/rejected": -1.7146999835968018,
      "loss": 2.5273,
      "rewards/accuracies": 0.8187500238418579,
      "rewards/chosen": -12.675516128540039,
      "rewards/margins": 4.4714837074279785,
      "rewards/rejected": -17.14699935913086,
      "semantic_entropy": 0.7719418406486511,
      "step": 375
    },
    {
      "epoch": 0.8306010928961749,
      "grad_norm": 134.85584452412903,
      "learning_rate": 8.413253331499049e-08,
      "logits/chosen": -1.1171958446502686,
      "logits/rejected": -1.1349446773529053,
      "logps/chosen": -1.2904092073440552,
      "logps/rejected": -1.6963565349578857,
      "loss": 2.5116,
      "rewards/accuracies": 0.84375,
      "rewards/chosen": -12.904090881347656,
      "rewards/margins": 4.059475898742676,
      "rewards/rejected": -16.96356773376465,
      "semantic_entropy": 0.7810186147689819,
      "step": 380
    },
    {
      "epoch": 0.8415300546448088,
      "grad_norm": 137.9880749898867,
      "learning_rate": 7.382975772939865e-08,
      "logits/chosen": -1.219100832939148,
      "logits/rejected": -1.2109915018081665,
      "logps/chosen": -1.3701751232147217,
      "logps/rejected": -1.809171438217163,
      "loss": 2.8231,
      "rewards/accuracies": 0.831250011920929,
      "rewards/chosen": -13.701749801635742,
      "rewards/margins": 4.389963626861572,
      "rewards/rejected": -18.09171485900879,
      "semantic_entropy": 0.7575483918190002,
      "step": 385
    },
    {
      "epoch": 0.8524590163934426,
      "grad_norm": 192.7458723286622,
      "learning_rate": 6.414940619677734e-08,
      "logits/chosen": -1.2024834156036377,
      "logits/rejected": -1.1897705793380737,
      "logps/chosen": -1.2859004735946655,
      "logps/rejected": -1.8171262741088867,
      "loss": 2.4732,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -12.85900592803955,
      "rewards/margins": 5.312258243560791,
      "rewards/rejected": -18.171262741088867,
      "semantic_entropy": 0.77564936876297,
      "step": 390
    },
    {
      "epoch": 0.8633879781420765,
      "grad_norm": 182.36527261020933,
      "learning_rate": 5.5105616925376296e-08,
      "logits/chosen": -1.1923558712005615,
      "logits/rejected": -1.1765515804290771,
      "logps/chosen": -1.3574202060699463,
      "logps/rejected": -1.7072795629501343,
      "loss": 2.4702,
      "rewards/accuracies": 0.768750011920929,
      "rewards/chosen": -13.574200630187988,
      "rewards/margins": 3.4985928535461426,
      "rewards/rejected": -17.07279396057129,
      "semantic_entropy": 0.7606626749038696,
      "step": 395
    },
    {
      "epoch": 0.8743169398907104,
      "grad_norm": 156.32689835666156,
      "learning_rate": 4.6711598420656976e-08,
      "logits/chosen": -1.144141435623169,
      "logits/rejected": -1.1195942163467407,
      "logps/chosen": -1.3408299684524536,
      "logps/rejected": -1.8310232162475586,
      "loss": 2.3262,
      "rewards/accuracies": 0.8687499761581421,
      "rewards/chosen": -13.408299446105957,
      "rewards/margins": 4.901931285858154,
      "rewards/rejected": -18.310230255126953,
      "semantic_entropy": 0.7442251443862915,
      "step": 400
    },
    {
      "epoch": 0.8743169398907104,
      "eval_logits/chosen": -1.3813101053237915,
      "eval_logits/rejected": -1.3473328351974487,
      "eval_logps/chosen": -1.2879669666290283,
      "eval_logps/rejected": -1.739721655845642,
      "eval_loss": 2.5607941150665283,
      "eval_rewards/accuracies": 0.8072289228439331,
      "eval_rewards/chosen": -12.879671096801758,
      "eval_rewards/margins": 4.517546653747559,
      "eval_rewards/rejected": -17.397218704223633,
      "eval_runtime": 36.6792,
      "eval_samples_per_second": 35.933,
      "eval_semantic_entropy": 0.7719007730484009,
      "eval_steps_per_second": 2.263,
      "step": 400
    },
    {
      "epoch": 0.8852459016393442,
      "grad_norm": 148.89570610138972,
      "learning_rate": 3.897961019419516e-08,
      "logits/chosen": -1.1572606563568115,
      "logits/rejected": -1.1004865169525146,
      "logps/chosen": -1.2273566722869873,
      "logps/rejected": -1.640681266784668,
      "loss": 2.4505,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -12.273566246032715,
      "rewards/margins": 4.13324499130249,
      "rewards/rejected": -16.406810760498047,
      "semantic_entropy": 0.7908967137336731,
      "step": 405
    },
    {
      "epoch": 0.8961748633879781,
      "grad_norm": 135.13513698899922,
      "learning_rate": 3.192094485859526e-08,
      "logits/chosen": -1.164189100265503,
      "logits/rejected": -1.1957439184188843,
      "logps/chosen": -1.340441346168518,
      "logps/rejected": -1.8204329013824463,
      "loss": 2.5453,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": -13.404413223266602,
      "rewards/margins": 4.799916744232178,
      "rewards/rejected": -18.204330444335938,
      "semantic_entropy": 0.7424927949905396,
      "step": 410
    },
    {
      "epoch": 0.907103825136612,
      "grad_norm": 138.1252447502212,
      "learning_rate": 2.5545911634565265e-08,
      "logits/chosen": -1.2133328914642334,
      "logits/rejected": -1.217905879020691,
      "logps/chosen": -1.3356597423553467,
      "logps/rejected": -1.8359012603759766,
      "loss": 2.719,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -13.356595993041992,
      "rewards/margins": 5.002416133880615,
      "rewards/rejected": -18.359012603759766,
      "semantic_entropy": 0.7578593492507935,
      "step": 415
    },
    {
      "epoch": 0.9180327868852459,
      "grad_norm": 133.36843556957237,
      "learning_rate": 1.9863821294241522e-08,
      "logits/chosen": -1.2045748233795166,
      "logits/rejected": -1.188718557357788,
      "logps/chosen": -1.3029712438583374,
      "logps/rejected": -1.7860324382781982,
      "loss": 2.3706,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -13.029711723327637,
      "rewards/margins": 4.830612659454346,
      "rewards/rejected": -17.86032485961914,
      "semantic_entropy": 0.7500916123390198,
      "step": 420
    },
    {
      "epoch": 0.9289617486338798,
      "grad_norm": 139.42311056578998,
      "learning_rate": 1.4882972562753615e-08,
      "logits/chosen": -1.2088757753372192,
      "logits/rejected": -1.2028796672821045,
      "logps/chosen": -1.4182978868484497,
      "logps/rejected": -1.899735450744629,
      "loss": 2.6805,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -14.182978630065918,
      "rewards/margins": 4.814376354217529,
      "rewards/rejected": -18.997356414794922,
      "semantic_entropy": 0.7167907953262329,
      "step": 425
    },
    {
      "epoch": 0.9398907103825137,
      "grad_norm": 138.05690419269808,
      "learning_rate": 1.0610639997888915e-08,
      "logits/chosen": -1.1360652446746826,
      "logits/rejected": -1.1394312381744385,
      "logps/chosen": -1.265080451965332,
      "logps/rejected": -1.7739555835723877,
      "loss": 2.2297,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": -12.65080451965332,
      "rewards/margins": 5.0887532234191895,
      "rewards/rejected": -17.73955535888672,
      "semantic_entropy": 0.7776240110397339,
      "step": 430
    },
    {
      "epoch": 0.9508196721311475,
      "grad_norm": 156.31606318677163,
      "learning_rate": 7.053063365559997e-09,
      "logits/chosen": -1.1962147951126099,
      "logits/rejected": -1.2261369228363037,
      "logps/chosen": -1.330643892288208,
      "logps/rejected": -1.8400895595550537,
      "loss": 2.397,
      "rewards/accuracies": 0.8687499761581421,
      "rewards/chosen": -13.306437492370605,
      "rewards/margins": 5.094459056854248,
      "rewards/rejected": -18.400897979736328,
      "semantic_entropy": 0.7367173433303833,
      "step": 435
    },
    {
      "epoch": 0.9617486338797814,
      "grad_norm": 137.08276067025474,
      "learning_rate": 4.215438526591064e-09,
      "logits/chosen": -1.1730583906173706,
      "logits/rejected": -1.140211582183838,
      "logps/chosen": -1.3757076263427734,
      "logps/rejected": -1.7452484369277954,
      "loss": 2.5885,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": -13.757074356079102,
      "rewards/margins": 3.6954073905944824,
      "rewards/rejected": -17.452484130859375,
      "semantic_entropy": 0.7384070754051208,
      "step": 440
    },
    {
      "epoch": 0.9726775956284153,
      "grad_norm": 153.84420514847153,
      "learning_rate": 2.1019098481337426e-09,
      "logits/chosen": -1.2059695720672607,
      "logits/rejected": -1.1932657957077026,
      "logps/chosen": -1.2917059659957886,
      "logps/rejected": -1.7843118906021118,
      "loss": 2.4083,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": -12.917058944702148,
      "rewards/margins": 4.926059246063232,
      "rewards/rejected": -17.843120574951172,
      "semantic_entropy": 0.7641780376434326,
      "step": 445
    },
    {
      "epoch": 0.9836065573770492,
      "grad_norm": 169.02746393168292,
      "learning_rate": 7.155641507955445e-10,
      "logits/chosen": -1.122040033340454,
      "logits/rejected": -1.1226763725280762,
      "logps/chosen": -1.3791415691375732,
      "logps/rejected": -1.798413872718811,
      "loss": 2.6799,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": -13.791415214538574,
      "rewards/margins": 4.192723274230957,
      "rewards/rejected": -17.98413848876953,
      "semantic_entropy": 0.7384847402572632,
      "step": 450
    },
    {
      "epoch": 0.994535519125683,
      "grad_norm": 160.91531446057363,
      "learning_rate": 5.842620032053824e-11,
      "logits/chosen": -1.1409598588943481,
      "logits/rejected": -1.139664649963379,
      "logps/chosen": -1.3967525959014893,
      "logps/rejected": -1.73647940158844,
      "loss": 2.866,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -13.96752643585205,
      "rewards/margins": 3.3972675800323486,
      "rewards/rejected": -17.364791870117188,
      "semantic_entropy": 0.7418167591094971,
      "step": 455
    },
    {
      "epoch": 0.9989071038251366,
      "step": 457,
      "total_flos": 0.0,
      "train_loss": 3.4314852449513107,
      "train_runtime": 5934.8281,
      "train_samples_per_second": 9.867,
      "train_steps_per_second": 0.077
    }
  ],
  "logging_steps": 5,
  "max_steps": 457,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 1000000,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 2,
  "trial_name": null,
  "trial_params": null
}