File size: 28,586 Bytes
180937f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 1.0,
  "eval_steps": 50,
  "global_step": 436,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.022935779816513763,
      "grad_norm": 6.666838198680302,
      "learning_rate": 1.1363636363636363e-07,
      "logits/chosen": -2.6193127632141113,
      "logits/rejected": -2.5524516105651855,
      "logps/chosen": -265.4483337402344,
      "logps/rejected": -236.09951782226562,
      "loss": 0.6931,
      "rewards/accuracies": 0.35624998807907104,
      "rewards/chosen": -2.520665293559432e-05,
      "rewards/margins": -0.0003298574883956462,
      "rewards/rejected": 0.00030465080635622144,
      "step": 10
    },
    {
      "epoch": 0.045871559633027525,
      "grad_norm": 6.256434066153107,
      "learning_rate": 2.2727272727272726e-07,
      "logits/chosen": -2.6579253673553467,
      "logits/rejected": -2.5760178565979004,
      "logps/chosen": -298.82305908203125,
      "logps/rejected": -274.31829833984375,
      "loss": 0.6922,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.0003779557882808149,
      "rewards/margins": 0.0017056630458682775,
      "rewards/rejected": -0.0020836188923567533,
      "step": 20
    },
    {
      "epoch": 0.06880733944954129,
      "grad_norm": 6.014004575787888,
      "learning_rate": 3.4090909090909085e-07,
      "logits/chosen": -2.675771713256836,
      "logits/rejected": -2.6020450592041016,
      "logps/chosen": -290.2829895019531,
      "logps/rejected": -234.431396484375,
      "loss": 0.6883,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": 0.01049796212464571,
      "rewards/margins": 0.01499337237328291,
      "rewards/rejected": -0.004495412111282349,
      "step": 30
    },
    {
      "epoch": 0.09174311926605505,
      "grad_norm": 6.545633202821474,
      "learning_rate": 4.545454545454545e-07,
      "logits/chosen": -2.6612305641174316,
      "logits/rejected": -2.6114158630371094,
      "logps/chosen": -280.93109130859375,
      "logps/rejected": -267.76605224609375,
      "loss": 0.6776,
      "rewards/accuracies": 0.668749988079071,
      "rewards/chosen": 0.04238061234354973,
      "rewards/margins": 0.04211103171110153,
      "rewards/rejected": 0.00026957987574860454,
      "step": 40
    },
    {
      "epoch": 0.11467889908256881,
      "grad_norm": 7.0518705334996215,
      "learning_rate": 4.997110275491701e-07,
      "logits/chosen": -2.6194958686828613,
      "logits/rejected": -2.6111502647399902,
      "logps/chosen": -294.24957275390625,
      "logps/rejected": -305.2260437011719,
      "loss": 0.6607,
      "rewards/accuracies": 0.706250011920929,
      "rewards/chosen": 0.020016254857182503,
      "rewards/margins": 0.08043506741523743,
      "rewards/rejected": -0.060418806970119476,
      "step": 50
    },
    {
      "epoch": 0.11467889908256881,
      "eval_logits/chosen": -2.5735373497009277,
      "eval_logits/rejected": -2.4922757148742676,
      "eval_logps/chosen": -285.52752685546875,
      "eval_logps/rejected": -261.0398254394531,
      "eval_loss": 0.6447442770004272,
      "eval_rewards/accuracies": 0.6896551847457886,
      "eval_rewards/chosen": -0.004374094773083925,
      "eval_rewards/margins": 0.140799880027771,
      "eval_rewards/rejected": -0.14517399668693542,
      "eval_runtime": 94.492,
      "eval_samples_per_second": 19.24,
      "eval_steps_per_second": 0.307,
      "step": 50
    },
    {
      "epoch": 0.13761467889908258,
      "grad_norm": 10.342302388199876,
      "learning_rate": 4.979475034558115e-07,
      "logits/chosen": -2.559204578399658,
      "logits/rejected": -2.5000882148742676,
      "logps/chosen": -296.8924255371094,
      "logps/rejected": -279.6778869628906,
      "loss": 0.6362,
      "rewards/accuracies": 0.668749988079071,
      "rewards/chosen": -0.07407740503549576,
      "rewards/margins": 0.14713598787784576,
      "rewards/rejected": -0.22121338546276093,
      "step": 60
    },
    {
      "epoch": 0.16055045871559634,
      "grad_norm": 11.57375336560156,
      "learning_rate": 4.945923025551788e-07,
      "logits/chosen": -2.439746141433716,
      "logits/rejected": -2.39266037940979,
      "logps/chosen": -341.58990478515625,
      "logps/rejected": -305.513427734375,
      "loss": 0.614,
      "rewards/accuracies": 0.706250011920929,
      "rewards/chosen": -0.14018920063972473,
      "rewards/margins": 0.3008686304092407,
      "rewards/rejected": -0.44105786085128784,
      "step": 70
    },
    {
      "epoch": 0.1834862385321101,
      "grad_norm": 13.101676053286978,
      "learning_rate": 4.896669632591651e-07,
      "logits/chosen": -2.333707332611084,
      "logits/rejected": -2.2206273078918457,
      "logps/chosen": -311.94189453125,
      "logps/rejected": -312.6253662109375,
      "loss": 0.5993,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -0.2609206438064575,
      "rewards/margins": 0.33877214789390564,
      "rewards/rejected": -0.5996928215026855,
      "step": 80
    },
    {
      "epoch": 0.20642201834862386,
      "grad_norm": 18.464805323764754,
      "learning_rate": 4.832031033425662e-07,
      "logits/chosen": -1.2291042804718018,
      "logits/rejected": -1.0986605882644653,
      "logps/chosen": -349.7447814941406,
      "logps/rejected": -355.8329162597656,
      "loss": 0.5841,
      "rewards/accuracies": 0.6812499761581421,
      "rewards/chosen": -0.6215134859085083,
      "rewards/margins": 0.4688030779361725,
      "rewards/rejected": -1.0903165340423584,
      "step": 90
    },
    {
      "epoch": 0.22935779816513763,
      "grad_norm": 18.44194365148455,
      "learning_rate": 4.752422169756047e-07,
      "logits/chosen": -0.5803453326225281,
      "logits/rejected": -0.4585650861263275,
      "logps/chosen": -330.8556823730469,
      "logps/rejected": -383.45281982421875,
      "loss": 0.5616,
      "rewards/accuracies": 0.706250011920929,
      "rewards/chosen": -0.5843100547790527,
      "rewards/margins": 0.5309332609176636,
      "rewards/rejected": -1.1152434349060059,
      "step": 100
    },
    {
      "epoch": 0.22935779816513763,
      "eval_logits/chosen": -0.241001695394516,
      "eval_logits/rejected": 0.20754042267799377,
      "eval_logps/chosen": -370.36248779296875,
      "eval_logps/rejected": -404.2408142089844,
      "eval_loss": 0.5463992357254028,
      "eval_rewards/accuracies": 0.6853448152542114,
      "eval_rewards/chosen": -0.8527240753173828,
      "eval_rewards/margins": 0.7244595289230347,
      "eval_rewards/rejected": -1.5771836042404175,
      "eval_runtime": 94.4096,
      "eval_samples_per_second": 19.257,
      "eval_steps_per_second": 0.307,
      "step": 100
    },
    {
      "epoch": 0.25229357798165136,
      "grad_norm": 20.029871948259476,
      "learning_rate": 4.658354083558188e-07,
      "logits/chosen": -0.10621030628681183,
      "logits/rejected": 0.371669203042984,
      "logps/chosen": -369.4322204589844,
      "logps/rejected": -397.5771179199219,
      "loss": 0.5451,
      "rewards/accuracies": 0.71875,
      "rewards/chosen": -1.0424408912658691,
      "rewards/margins": 0.5959984660148621,
      "rewards/rejected": -1.638439416885376,
      "step": 110
    },
    {
      "epoch": 0.27522935779816515,
      "grad_norm": 25.325536766276052,
      "learning_rate": 4.550430636492389e-07,
      "logits/chosen": 0.43674445152282715,
      "logits/rejected": 0.7346484065055847,
      "logps/chosen": -387.1654968261719,
      "logps/rejected": -426.51898193359375,
      "loss": 0.5604,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -1.1134614944458008,
      "rewards/margins": 0.6370891332626343,
      "rewards/rejected": -1.7505505084991455,
      "step": 120
    },
    {
      "epoch": 0.2981651376146789,
      "grad_norm": 25.1538794496433,
      "learning_rate": 4.429344633468004e-07,
      "logits/chosen": 0.7902742624282837,
      "logits/rejected": 1.2213919162750244,
      "logps/chosen": -351.0482482910156,
      "logps/rejected": -406.3229675292969,
      "loss": 0.5509,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.9486812353134155,
      "rewards/margins": 0.6884225606918335,
      "rewards/rejected": -1.6371036767959595,
      "step": 130
    },
    {
      "epoch": 0.3211009174311927,
      "grad_norm": 21.483029077159443,
      "learning_rate": 4.2958733752443187e-07,
      "logits/chosen": 0.7886204123497009,
      "logits/rejected": 1.4716787338256836,
      "logps/chosen": -352.66986083984375,
      "logps/rejected": -371.2418212890625,
      "loss": 0.5467,
      "rewards/accuracies": 0.731249988079071,
      "rewards/chosen": -0.764846920967102,
      "rewards/margins": 0.704605758190155,
      "rewards/rejected": -1.4694526195526123,
      "step": 140
    },
    {
      "epoch": 0.3440366972477064,
      "grad_norm": 21.063627942952767,
      "learning_rate": 4.150873668617898e-07,
      "logits/chosen": 1.314632534980774,
      "logits/rejected": 2.00125789642334,
      "logps/chosen": -385.40447998046875,
      "logps/rejected": -437.3175354003906,
      "loss": 0.5333,
      "rewards/accuracies": 0.71875,
      "rewards/chosen": -1.1886088848114014,
      "rewards/margins": 0.8526908755302429,
      "rewards/rejected": -2.04129958152771,
      "step": 150
    },
    {
      "epoch": 0.3440366972477064,
      "eval_logits/chosen": 0.5890395045280457,
      "eval_logits/rejected": 1.4808454513549805,
      "eval_logps/chosen": -385.3254699707031,
      "eval_logps/rejected": -434.7274169921875,
      "eval_loss": 0.5195103883743286,
      "eval_rewards/accuracies": 0.7112069129943848,
      "eval_rewards/chosen": -1.0023537874221802,
      "eval_rewards/margins": 0.8796959519386292,
      "eval_rewards/rejected": -1.8820499181747437,
      "eval_runtime": 94.3575,
      "eval_samples_per_second": 19.267,
      "eval_steps_per_second": 0.307,
      "step": 150
    },
    {
      "epoch": 0.3669724770642202,
      "grad_norm": 27.196294375049977,
      "learning_rate": 3.9952763262280397e-07,
      "logits/chosen": 0.8305681943893433,
      "logits/rejected": 1.4138782024383545,
      "logps/chosen": -414.48675537109375,
      "logps/rejected": -488.97027587890625,
      "loss": 0.5169,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -1.179356336593628,
      "rewards/margins": 0.8506600260734558,
      "rewards/rejected": -2.0300164222717285,
      "step": 160
    },
    {
      "epoch": 0.38990825688073394,
      "grad_norm": 20.448285631169824,
      "learning_rate": 3.8300801912883414e-07,
      "logits/chosen": 1.4580880403518677,
      "logits/rejected": 1.959507703781128,
      "logps/chosen": -411.11505126953125,
      "logps/rejected": -499.7334899902344,
      "loss": 0.5017,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": -1.395141363143921,
      "rewards/margins": 0.9015046954154968,
      "rewards/rejected": -2.2966461181640625,
      "step": 170
    },
    {
      "epoch": 0.41284403669724773,
      "grad_norm": 19.388105918914484,
      "learning_rate": 3.6563457256020884e-07,
      "logits/chosen": 1.2716890573501587,
      "logits/rejected": 2.1560723781585693,
      "logps/chosen": -418.70068359375,
      "logps/rejected": -435.9356994628906,
      "loss": 0.5267,
      "rewards/accuracies": 0.731249988079071,
      "rewards/chosen": -1.1534276008605957,
      "rewards/margins": 0.8209800720214844,
      "rewards/rejected": -1.9744075536727905,
      "step": 180
    },
    {
      "epoch": 0.43577981651376146,
      "grad_norm": 24.752350050203567,
      "learning_rate": 3.475188202022617e-07,
      "logits/chosen": 1.6485626697540283,
      "logits/rejected": 2.1269421577453613,
      "logps/chosen": -365.346435546875,
      "logps/rejected": -476.385009765625,
      "loss": 0.5115,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -1.0723192691802979,
      "rewards/margins": 1.048086404800415,
      "rewards/rejected": -2.120405673980713,
      "step": 190
    },
    {
      "epoch": 0.45871559633027525,
      "grad_norm": 20.591094290068703,
      "learning_rate": 3.287770545059052e-07,
      "logits/chosen": 1.790414810180664,
      "logits/rejected": 2.4998342990875244,
      "logps/chosen": -400.47991943359375,
      "logps/rejected": -456.0809020996094,
      "loss": 0.5219,
      "rewards/accuracies": 0.71875,
      "rewards/chosen": -1.221982717514038,
      "rewards/margins": 0.900266170501709,
      "rewards/rejected": -2.122248888015747,
      "step": 200
    },
    {
      "epoch": 0.45871559633027525,
      "eval_logits/chosen": 1.4256442785263062,
      "eval_logits/rejected": 2.4260053634643555,
      "eval_logps/chosen": -392.2837829589844,
      "eval_logps/rejected": -451.93536376953125,
      "eval_loss": 0.5009589195251465,
      "eval_rewards/accuracies": 0.732758641242981,
      "eval_rewards/chosen": -1.0719367265701294,
      "eval_rewards/margins": 0.9821925759315491,
      "eval_rewards/rejected": -2.0541296005249023,
      "eval_runtime": 96.4986,
      "eval_samples_per_second": 18.84,
      "eval_steps_per_second": 0.301,
      "step": 200
    },
    {
      "epoch": 0.481651376146789,
      "grad_norm": 21.25864225686741,
      "learning_rate": 3.0952958655864954e-07,
      "logits/chosen": 1.83803391456604,
      "logits/rejected": 2.399480104446411,
      "logps/chosen": -394.57037353515625,
      "logps/rejected": -468.97021484375,
      "loss": 0.5006,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -1.212659478187561,
      "rewards/margins": 0.9336206316947937,
      "rewards/rejected": -2.14628005027771,
      "step": 210
    },
    {
      "epoch": 0.5045871559633027,
      "grad_norm": 24.95309197907959,
      "learning_rate": 2.898999737583448e-07,
      "logits/chosen": 1.8371950387954712,
      "logits/rejected": 2.8440494537353516,
      "logps/chosen": -448.06109619140625,
      "logps/rejected": -526.2056884765625,
      "loss": 0.491,
      "rewards/accuracies": 0.8062499761581421,
      "rewards/chosen": -1.2538080215454102,
      "rewards/margins": 1.158546805381775,
      "rewards/rejected": -2.4123549461364746,
      "step": 220
    },
    {
      "epoch": 0.5275229357798165,
      "grad_norm": 24.874258979226397,
      "learning_rate": 2.7001422664752333e-07,
      "logits/chosen": 1.71869695186615,
      "logits/rejected": 2.2633819580078125,
      "logps/chosen": -376.9873046875,
      "logps/rejected": -471.9263610839844,
      "loss": 0.5124,
      "rewards/accuracies": 0.731249988079071,
      "rewards/chosen": -1.0904380083084106,
      "rewards/margins": 0.9726268649101257,
      "rewards/rejected": -2.0630650520324707,
      "step": 230
    },
    {
      "epoch": 0.5504587155963303,
      "grad_norm": 18.86072207414666,
      "learning_rate": 2.5e-07,
      "logits/chosen": 1.5722936391830444,
      "logits/rejected": 2.684537410736084,
      "logps/chosen": -418.7516174316406,
      "logps/rejected": -480.7166442871094,
      "loss": 0.5197,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -1.3121980428695679,
      "rewards/margins": 0.8814684152603149,
      "rewards/rejected": -2.193666696548462,
      "step": 240
    },
    {
      "epoch": 0.573394495412844,
      "grad_norm": 19.046011375519186,
      "learning_rate": 2.2998577335247667e-07,
      "logits/chosen": 1.6455481052398682,
      "logits/rejected": 2.8547589778900146,
      "logps/chosen": -438.38238525390625,
      "logps/rejected": -492.9747619628906,
      "loss": 0.5007,
      "rewards/accuracies": 0.7562500238418579,
      "rewards/chosen": -1.3458855152130127,
      "rewards/margins": 0.9794231653213501,
      "rewards/rejected": -2.3253085613250732,
      "step": 250
    },
    {
      "epoch": 0.573394495412844,
      "eval_logits/chosen": 1.4527463912963867,
      "eval_logits/rejected": 2.673773765563965,
      "eval_logps/chosen": -408.2994384765625,
      "eval_logps/rejected": -479.4298095703125,
      "eval_loss": 0.4917016923427582,
      "eval_rewards/accuracies": 0.7241379022598267,
      "eval_rewards/chosen": -1.232093095779419,
      "eval_rewards/margins": 1.096980333328247,
      "eval_rewards/rejected": -2.329073429107666,
      "eval_runtime": 95.5197,
      "eval_samples_per_second": 19.033,
      "eval_steps_per_second": 0.304,
      "step": 250
    },
    {
      "epoch": 0.5963302752293578,
      "grad_norm": 19.810153184401507,
      "learning_rate": 2.1010002624165524e-07,
      "logits/chosen": 1.6456598043441772,
      "logits/rejected": 2.469041347503662,
      "logps/chosen": -406.33642578125,
      "logps/rejected": -532.65869140625,
      "loss": 0.4916,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -1.356218934059143,
      "rewards/margins": 1.226430892944336,
      "rewards/rejected": -2.5826497077941895,
      "step": 260
    },
    {
      "epoch": 0.6192660550458715,
      "grad_norm": 24.810334002988427,
      "learning_rate": 1.9047041344135043e-07,
      "logits/chosen": 2.157437801361084,
      "logits/rejected": 2.676581621170044,
      "logps/chosen": -414.63427734375,
      "logps/rejected": -517.6594848632812,
      "loss": 0.4963,
      "rewards/accuracies": 0.7562500238418579,
      "rewards/chosen": -1.4857197999954224,
      "rewards/margins": 1.172404408454895,
      "rewards/rejected": -2.6581242084503174,
      "step": 270
    },
    {
      "epoch": 0.6422018348623854,
      "grad_norm": 22.618579944183196,
      "learning_rate": 1.7122294549409482e-07,
      "logits/chosen": 1.9851901531219482,
      "logits/rejected": 2.9032249450683594,
      "logps/chosen": -424.8306579589844,
      "logps/rejected": -531.9037475585938,
      "loss": 0.5145,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -1.5183179378509521,
      "rewards/margins": 1.1627616882324219,
      "rewards/rejected": -2.681079387664795,
      "step": 280
    },
    {
      "epoch": 0.6651376146788991,
      "grad_norm": 29.728057206179926,
      "learning_rate": 1.524811797977383e-07,
      "logits/chosen": 1.9606460332870483,
      "logits/rejected": 3.0971603393554688,
      "logps/chosen": -424.40155029296875,
      "logps/rejected": -510.2555236816406,
      "loss": 0.4931,
      "rewards/accuracies": 0.7437499761581421,
      "rewards/chosen": -1.3667072057724,
      "rewards/margins": 1.1966798305511475,
      "rewards/rejected": -2.563387155532837,
      "step": 290
    },
    {
      "epoch": 0.6880733944954128,
      "grad_norm": 32.56443619783417,
      "learning_rate": 1.3436542743979125e-07,
      "logits/chosen": 1.548921823501587,
      "logits/rejected": 2.2454886436462402,
      "logps/chosen": -435.3851623535156,
      "logps/rejected": -480.7173767089844,
      "loss": 0.5109,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -1.2943581342697144,
      "rewards/margins": 0.9242169260978699,
      "rewards/rejected": -2.2185752391815186,
      "step": 300
    },
    {
      "epoch": 0.6880733944954128,
      "eval_logits/chosen": 1.5762183666229248,
      "eval_logits/rejected": 2.8883702754974365,
      "eval_logps/chosen": -418.6533508300781,
      "eval_logps/rejected": -496.99908447265625,
      "eval_loss": 0.48780226707458496,
      "eval_rewards/accuracies": 0.7284482717514038,
      "eval_rewards/chosen": -1.3356326818466187,
      "eval_rewards/margins": 1.1691336631774902,
      "eval_rewards/rejected": -2.5047662258148193,
      "eval_runtime": 94.5476,
      "eval_samples_per_second": 19.228,
      "eval_steps_per_second": 0.307,
      "step": 300
    },
    {
      "epoch": 0.7110091743119266,
      "grad_norm": 28.8657602827159,
      "learning_rate": 1.1699198087116588e-07,
      "logits/chosen": 1.6565430164337158,
      "logits/rejected": 2.6408467292785645,
      "logps/chosen": -422.91943359375,
      "logps/rejected": -511.9986267089844,
      "loss": 0.5225,
      "rewards/accuracies": 0.7562500238418579,
      "rewards/chosen": -1.449542760848999,
      "rewards/margins": 1.0059467554092407,
      "rewards/rejected": -2.4554896354675293,
      "step": 310
    },
    {
      "epoch": 0.7339449541284404,
      "grad_norm": 28.01306501480046,
      "learning_rate": 1.00472367377196e-07,
      "logits/chosen": 2.107029914855957,
      "logits/rejected": 3.2613253593444824,
      "logps/chosen": -415.86041259765625,
      "logps/rejected": -515.1480712890625,
      "loss": 0.4977,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -1.4162843227386475,
      "rewards/margins": 1.390293836593628,
      "rewards/rejected": -2.8065781593322754,
      "step": 320
    },
    {
      "epoch": 0.7568807339449541,
      "grad_norm": 23.410627824071433,
      "learning_rate": 8.49126331382102e-08,
      "logits/chosen": 2.485326051712036,
      "logits/rejected": 3.1795496940612793,
      "logps/chosen": -442.5235290527344,
      "logps/rejected": -500.9856872558594,
      "loss": 0.4944,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -1.68001389503479,
      "rewards/margins": 0.8588422536849976,
      "rewards/rejected": -2.538856029510498,
      "step": 330
    },
    {
      "epoch": 0.7798165137614679,
      "grad_norm": 36.6413379461555,
      "learning_rate": 7.041266247556812e-08,
      "logits/chosen": 2.2517406940460205,
      "logits/rejected": 2.871661901473999,
      "logps/chosen": -440.378662109375,
      "logps/rejected": -506.90625,
      "loss": 0.4867,
      "rewards/accuracies": 0.793749988079071,
      "rewards/chosen": -1.5238606929779053,
      "rewards/margins": 0.9453619718551636,
      "rewards/rejected": -2.4692225456237793,
      "step": 340
    },
    {
      "epoch": 0.8027522935779816,
      "grad_norm": 32.4776553295204,
      "learning_rate": 5.706553665319955e-08,
      "logits/chosen": 2.2310845851898193,
      "logits/rejected": 3.158881187438965,
      "logps/chosen": -426.0604553222656,
      "logps/rejected": -490.872314453125,
      "loss": 0.5063,
      "rewards/accuracies": 0.7437499761581421,
      "rewards/chosen": -1.5156935453414917,
      "rewards/margins": 1.016414761543274,
      "rewards/rejected": -2.5321083068847656,
      "step": 350
    },
    {
      "epoch": 0.8027522935779816,
      "eval_logits/chosen": 2.1698544025421143,
      "eval_logits/rejected": 3.3468618392944336,
      "eval_logps/chosen": -433.7904052734375,
      "eval_logps/rejected": -514.8548583984375,
      "eval_loss": 0.4814243018627167,
      "eval_rewards/accuracies": 0.7370689511299133,
      "eval_rewards/chosen": -1.487002968788147,
      "eval_rewards/margins": 1.1963213682174683,
      "eval_rewards/rejected": -2.683324098587036,
      "eval_runtime": 95.0017,
      "eval_samples_per_second": 19.136,
      "eval_steps_per_second": 0.305,
      "step": 350
    },
    {
      "epoch": 0.8256880733944955,
      "grad_norm": 22.48584624269231,
      "learning_rate": 4.4956936350761005e-08,
      "logits/chosen": 2.1727540493011475,
      "logits/rejected": 2.8084352016448975,
      "logps/chosen": -391.9521179199219,
      "logps/rejected": -506.19482421875,
      "loss": 0.4802,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -1.3963208198547363,
      "rewards/margins": 1.0802565813064575,
      "rewards/rejected": -2.4765772819519043,
      "step": 360
    },
    {
      "epoch": 0.8486238532110092,
      "grad_norm": 26.981108661605717,
      "learning_rate": 3.416459164418123e-08,
      "logits/chosen": 1.7840194702148438,
      "logits/rejected": 2.861525535583496,
      "logps/chosen": -447.0101013183594,
      "logps/rejected": -527.9534912109375,
      "loss": 0.4861,
      "rewards/accuracies": 0.7562500238418579,
      "rewards/chosen": -1.4742560386657715,
      "rewards/margins": 1.179532527923584,
      "rewards/rejected": -2.6537883281707764,
      "step": 370
    },
    {
      "epoch": 0.8715596330275229,
      "grad_norm": 32.47929591425802,
      "learning_rate": 2.475778302439524e-08,
      "logits/chosen": 2.0354526042938232,
      "logits/rejected": 3.16379976272583,
      "logps/chosen": -440.89593505859375,
      "logps/rejected": -530.92919921875,
      "loss": 0.4928,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -1.4451792240142822,
      "rewards/margins": 1.287550687789917,
      "rewards/rejected": -2.732729911804199,
      "step": 380
    },
    {
      "epoch": 0.8944954128440367,
      "grad_norm": 33.85969586257753,
      "learning_rate": 1.6796896657433805e-08,
      "logits/chosen": 2.4787354469299316,
      "logits/rejected": 3.341116428375244,
      "logps/chosen": -407.4842834472656,
      "logps/rejected": -490.75555419921875,
      "loss": 0.503,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -1.5552680492401123,
      "rewards/margins": 1.0592255592346191,
      "rewards/rejected": -2.6144936084747314,
      "step": 390
    },
    {
      "epoch": 0.9174311926605505,
      "grad_norm": 23.099468591696,
      "learning_rate": 1.0333036740834855e-08,
      "logits/chosen": 2.7448534965515137,
      "logits/rejected": 3.38175630569458,
      "logps/chosen": -370.7669372558594,
      "logps/rejected": -485.293701171875,
      "loss": 0.4936,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -1.4620729684829712,
      "rewards/margins": 1.098707914352417,
      "rewards/rejected": -2.5607810020446777,
      "step": 400
    },
    {
      "epoch": 0.9174311926605505,
      "eval_logits/chosen": 2.333301305770874,
      "eval_logits/rejected": 3.4838266372680664,
      "eval_logps/chosen": -443.5758361816406,
      "eval_logps/rejected": -526.9686279296875,
      "eval_loss": 0.4815457761287689,
      "eval_rewards/accuracies": 0.732758641242981,
      "eval_rewards/chosen": -1.5848578214645386,
      "eval_rewards/margins": 1.2196037769317627,
      "eval_rewards/rejected": -2.8044614791870117,
      "eval_runtime": 95.9268,
      "eval_samples_per_second": 18.952,
      "eval_steps_per_second": 0.302,
      "step": 400
    },
    {
      "epoch": 0.9403669724770642,
      "grad_norm": 24.65582545776771,
      "learning_rate": 5.4076974448211685e-09,
      "logits/chosen": 2.678440809249878,
      "logits/rejected": 3.7608656883239746,
      "logps/chosen": -434.31890869140625,
      "logps/rejected": -525.4384765625,
      "loss": 0.4901,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -1.633670449256897,
      "rewards/margins": 1.2402476072311401,
      "rewards/rejected": -2.873918056488037,
      "step": 410
    },
    {
      "epoch": 0.963302752293578,
      "grad_norm": 24.35589696145217,
      "learning_rate": 2.052496544188487e-09,
      "logits/chosen": 2.744503974914551,
      "logits/rejected": 3.5964274406433105,
      "logps/chosen": -424.82373046875,
      "logps/rejected": -535.3244018554688,
      "loss": 0.4891,
      "rewards/accuracies": 0.768750011920929,
      "rewards/chosen": -1.6538474559783936,
      "rewards/margins": 1.2406232357025146,
      "rewards/rejected": -2.894470691680908,
      "step": 420
    },
    {
      "epoch": 0.9862385321100917,
      "grad_norm": 24.120958058828865,
      "learning_rate": 2.889724508297886e-10,
      "logits/chosen": 2.3434717655181885,
      "logits/rejected": 3.6963977813720703,
      "logps/chosen": -459.7005310058594,
      "logps/rejected": -510.67413330078125,
      "loss": 0.4816,
      "rewards/accuracies": 0.731249988079071,
      "rewards/chosen": -1.5652509927749634,
      "rewards/margins": 1.1011250019073486,
      "rewards/rejected": -2.6663756370544434,
      "step": 430
    },
    {
      "epoch": 1.0,
      "step": 436,
      "total_flos": 0.0,
      "train_loss": 0.5380998532706445,
      "train_runtime": 11735.5354,
      "train_samples_per_second": 4.751,
      "train_steps_per_second": 0.037
    }
  ],
  "logging_steps": 10,
  "max_steps": 436,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 100,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}