File size: 28,745 Bytes
1d95806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.9989462592202318,
  "eval_steps": 100,
  "global_step": 474,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.002107481559536354,
      "grad_norm": 6.614315138552694,
      "learning_rate": 1.0416666666666666e-08,
      "logits/chosen": -2.9142751693725586,
      "logits/rejected": -2.8893983364105225,
      "logps/chosen": -285.5545654296875,
      "logps/rejected": -301.5740966796875,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.02107481559536354,
      "grad_norm": 6.815845823783859,
      "learning_rate": 1.0416666666666667e-07,
      "logits/chosen": -2.927753448486328,
      "logits/rejected": -2.894536018371582,
      "logps/chosen": -286.5064392089844,
      "logps/rejected": -292.89776611328125,
      "loss": 0.693,
      "rewards/accuracies": 0.4548611044883728,
      "rewards/chosen": -8.25391907710582e-05,
      "rewards/margins": 0.0003568828688003123,
      "rewards/rejected": -0.00043942214688286185,
      "step": 10
    },
    {
      "epoch": 0.04214963119072708,
      "grad_norm": 7.858560008275229,
      "learning_rate": 2.0833333333333333e-07,
      "logits/chosen": -2.9354426860809326,
      "logits/rejected": -2.9020581245422363,
      "logps/chosen": -306.3351135253906,
      "logps/rejected": -318.660888671875,
      "loss": 0.6894,
      "rewards/accuracies": 0.71875,
      "rewards/chosen": 0.005721195600926876,
      "rewards/margins": 0.0084818284958601,
      "rewards/rejected": -0.0027606328949332237,
      "step": 20
    },
    {
      "epoch": 0.06322444678609063,
      "grad_norm": 7.846241639500232,
      "learning_rate": 3.1249999999999997e-07,
      "logits/chosen": -2.920705795288086,
      "logits/rejected": -2.904344081878662,
      "logps/chosen": -298.88543701171875,
      "logps/rejected": -309.48614501953125,
      "loss": 0.6698,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": 0.026271456852555275,
      "rewards/margins": 0.04990110918879509,
      "rewards/rejected": -0.023629654198884964,
      "step": 30
    },
    {
      "epoch": 0.08429926238145416,
      "grad_norm": 8.807848819773,
      "learning_rate": 4.1666666666666667e-07,
      "logits/chosen": -2.92798113822937,
      "logits/rejected": -2.9023869037628174,
      "logps/chosen": -287.96044921875,
      "logps/rejected": -320.7830505371094,
      "loss": 0.6116,
      "rewards/accuracies": 0.8218749761581421,
      "rewards/chosen": -0.0031547886319458485,
      "rewards/margins": 0.1678742617368698,
      "rewards/rejected": -0.17102906107902527,
      "step": 40
    },
    {
      "epoch": 0.1053740779768177,
      "grad_norm": 9.9820742658112,
      "learning_rate": 4.999728079043996e-07,
      "logits/chosen": -3.0243992805480957,
      "logits/rejected": -2.9876396656036377,
      "logps/chosen": -386.5646057128906,
      "logps/rejected": -462.30694580078125,
      "loss": 0.4878,
      "rewards/accuracies": 0.828125,
      "rewards/chosen": -0.7237198948860168,
      "rewards/margins": 0.6255429983139038,
      "rewards/rejected": -1.3492629528045654,
      "step": 50
    },
    {
      "epoch": 0.12644889357218125,
      "grad_norm": 13.622612239694844,
      "learning_rate": 4.990217055187362e-07,
      "logits/chosen": -3.105769395828247,
      "logits/rejected": -3.083045482635498,
      "logps/chosen": -568.8998413085938,
      "logps/rejected": -771.6845703125,
      "loss": 0.3721,
      "rewards/accuracies": 0.828125,
      "rewards/chosen": -2.756446361541748,
      "rewards/margins": 1.8887875080108643,
      "rewards/rejected": -4.645233631134033,
      "step": 60
    },
    {
      "epoch": 0.1475237091675448,
      "grad_norm": 13.789853181411452,
      "learning_rate": 4.967169078520476e-07,
      "logits/chosen": -3.179182291030884,
      "logits/rejected": -3.1583375930786133,
      "logps/chosen": -573.2684326171875,
      "logps/rejected": -837.7855224609375,
      "loss": 0.3064,
      "rewards/accuracies": 0.828125,
      "rewards/chosen": -2.7872049808502197,
      "rewards/margins": 2.5140316486358643,
      "rewards/rejected": -5.301236629486084,
      "step": 70
    },
    {
      "epoch": 0.16859852476290832,
      "grad_norm": 22.075616248232635,
      "learning_rate": 4.930709439074527e-07,
      "logits/chosen": -3.175481081008911,
      "logits/rejected": -3.1578330993652344,
      "logps/chosen": -679.1819458007812,
      "logps/rejected": -1066.6025390625,
      "loss": 0.2837,
      "rewards/accuracies": 0.840624988079071,
      "rewards/chosen": -3.896761417388916,
      "rewards/margins": 3.724388599395752,
      "rewards/rejected": -7.62114953994751,
      "step": 80
    },
    {
      "epoch": 0.18967334035827185,
      "grad_norm": 15.685694072094627,
      "learning_rate": 4.881036333395328e-07,
      "logits/chosen": -3.170954942703247,
      "logits/rejected": -3.155890941619873,
      "logps/chosen": -609.4580688476562,
      "logps/rejected": -974.7374267578125,
      "loss": 0.268,
      "rewards/accuracies": 0.840624988079071,
      "rewards/chosen": -3.0708985328674316,
      "rewards/margins": 3.6035964488983154,
      "rewards/rejected": -6.674495697021484,
      "step": 90
    },
    {
      "epoch": 0.2107481559536354,
      "grad_norm": 18.90111640126689,
      "learning_rate": 4.818419787136311e-07,
      "logits/chosen": -3.083106279373169,
      "logits/rejected": -3.054839611053467,
      "logps/chosen": -578.0242919921875,
      "logps/rejected": -1001.0924682617188,
      "loss": 0.2558,
      "rewards/accuracies": 0.8687499761581421,
      "rewards/chosen": -2.942237138748169,
      "rewards/margins": 4.090949058532715,
      "rewards/rejected": -7.033186912536621,
      "step": 100
    },
    {
      "epoch": 0.2107481559536354,
      "eval_logits/chosen": -3.124534845352173,
      "eval_logits/rejected": -3.112900733947754,
      "eval_logps/chosen": -694.1878051757812,
      "eval_logps/rejected": -1132.10791015625,
      "eval_loss": 0.25145235657691956,
      "eval_rewards/accuracies": 0.8362500071525574,
      "eval_rewards/chosen": -3.3401803970336914,
      "eval_rewards/margins": 4.293397426605225,
      "eval_rewards/rejected": -7.633577823638916,
      "eval_runtime": 294.2421,
      "eval_samples_per_second": 21.724,
      "eval_steps_per_second": 0.34,
      "step": 100
    },
    {
      "epoch": 0.23182297154899895,
      "grad_norm": 16.556417816253123,
      "learning_rate": 4.7432001871846694e-07,
      "logits/chosen": -3.0214481353759766,
      "logits/rejected": -2.9939751625061035,
      "logps/chosen": -565.4187622070312,
      "logps/rejected": -990.65478515625,
      "loss": 0.2415,
      "rewards/accuracies": 0.846875011920929,
      "rewards/chosen": -2.8635013103485107,
      "rewards/margins": 4.140561103820801,
      "rewards/rejected": -7.004061698913574,
      "step": 110
    },
    {
      "epoch": 0.2528977871443625,
      "grad_norm": 36.18904544734523,
      "learning_rate": 4.655786431300069e-07,
      "logits/chosen": -3.0037097930908203,
      "logits/rejected": -2.979717493057251,
      "logps/chosen": -620.5673828125,
      "logps/rejected": -1151.0147705078125,
      "loss": 0.2443,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -3.3321540355682373,
      "rewards/margins": 5.147377967834473,
      "rewards/rejected": -8.479532241821289,
      "step": 120
    },
    {
      "epoch": 0.273972602739726,
      "grad_norm": 18.70901795335142,
      "learning_rate": 4.55665370532461e-07,
      "logits/chosen": -2.97050404548645,
      "logits/rejected": -2.9448835849761963,
      "logps/chosen": -572.4600830078125,
      "logps/rejected": -1059.3128662109375,
      "loss": 0.2432,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -2.720818519592285,
      "rewards/margins": 4.68039608001709,
      "rewards/rejected": -7.401214599609375,
      "step": 130
    },
    {
      "epoch": 0.2950474183350896,
      "grad_norm": 18.2112429436409,
      "learning_rate": 4.446340900047223e-07,
      "logits/chosen": -2.942176342010498,
      "logits/rejected": -2.9192707538604736,
      "logps/chosen": -543.0445556640625,
      "logps/rejected": -1057.8526611328125,
      "loss": 0.2351,
      "rewards/accuracies": 0.856249988079071,
      "rewards/chosen": -2.618699789047241,
      "rewards/margins": 4.893606185913086,
      "rewards/rejected": -7.512306213378906,
      "step": 140
    },
    {
      "epoch": 0.31612223393045313,
      "grad_norm": 19.519419751801294,
      "learning_rate": 4.325447681764586e-07,
      "logits/chosen": -2.861685276031494,
      "logits/rejected": -2.836148738861084,
      "logps/chosen": -572.3717041015625,
      "logps/rejected": -1162.0958251953125,
      "loss": 0.2207,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -3.064368486404419,
      "rewards/margins": 5.642943382263184,
      "rewards/rejected": -8.707311630249023,
      "step": 150
    },
    {
      "epoch": 0.33719704952581664,
      "grad_norm": 21.374254957458422,
      "learning_rate": 4.1946312324631276e-07,
      "logits/chosen": -2.8398401737213135,
      "logits/rejected": -2.79673433303833,
      "logps/chosen": -608.8257446289062,
      "logps/rejected": -1237.2921142578125,
      "loss": 0.2219,
      "rewards/accuracies": 0.878125011920929,
      "rewards/chosen": -3.0954906940460205,
      "rewards/margins": 6.1136980056762695,
      "rewards/rejected": -9.209188461303711,
      "step": 160
    },
    {
      "epoch": 0.3582718651211802,
      "grad_norm": 22.91268457823563,
      "learning_rate": 4.0546026773426835e-07,
      "logits/chosen": -2.7913198471069336,
      "logits/rejected": -2.751511335372925,
      "logps/chosen": -636.1942749023438,
      "logps/rejected": -1279.91357421875,
      "loss": 0.2176,
      "rewards/accuracies": 0.856249988079071,
      "rewards/chosen": -3.3635153770446777,
      "rewards/margins": 6.284361839294434,
      "rewards/rejected": -9.647878646850586,
      "step": 170
    },
    {
      "epoch": 0.3793466807165437,
      "grad_norm": 17.21799076325381,
      "learning_rate": 3.9061232191019517e-07,
      "logits/chosen": -2.7712769508361816,
      "logits/rejected": -2.7276077270507812,
      "logps/chosen": -575.2244873046875,
      "logps/rejected": -1119.7806396484375,
      "loss": 0.2257,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -2.81962251663208,
      "rewards/margins": 5.422026634216309,
      "rewards/rejected": -8.241649627685547,
      "step": 180
    },
    {
      "epoch": 0.40042149631190727,
      "grad_norm": 20.672214976979642,
      "learning_rate": 3.75e-07,
      "logits/chosen": -2.659935712814331,
      "logits/rejected": -2.618821144104004,
      "logps/chosen": -634.2330322265625,
      "logps/rejected": -1279.434326171875,
      "loss": 0.225,
      "rewards/accuracies": 0.871874988079071,
      "rewards/chosen": -3.622291088104248,
      "rewards/margins": 6.227897644042969,
      "rewards/rejected": -9.850188255310059,
      "step": 190
    },
    {
      "epoch": 0.4214963119072708,
      "grad_norm": 21.322162159621154,
      "learning_rate": 3.5870817141878733e-07,
      "logits/chosen": -2.649911880493164,
      "logits/rejected": -2.598778247833252,
      "logps/chosen": -625.1468505859375,
      "logps/rejected": -1208.257080078125,
      "loss": 0.2204,
      "rewards/accuracies": 0.8687499761581421,
      "rewards/chosen": -3.3302555084228516,
      "rewards/margins": 5.740489959716797,
      "rewards/rejected": -9.070745468139648,
      "step": 200
    },
    {
      "epoch": 0.4214963119072708,
      "eval_logits/chosen": -2.7329447269439697,
      "eval_logits/rejected": -2.6947696208953857,
      "eval_logps/chosen": -736.4188842773438,
      "eval_logps/rejected": -1306.89501953125,
      "eval_loss": 0.22599634528160095,
      "eval_rewards/accuracies": 0.8587499856948853,
      "eval_rewards/chosen": -3.7624917030334473,
      "eval_rewards/margins": 5.618957042694092,
      "eval_rewards/rejected": -9.381448745727539,
      "eval_runtime": 297.1596,
      "eval_samples_per_second": 21.51,
      "eval_steps_per_second": 0.337,
      "step": 200
    },
    {
      "epoch": 0.44257112750263433,
      "grad_norm": 13.31035102593267,
      "learning_rate": 3.418253994161892e-07,
      "logits/chosen": -2.6757235527038574,
      "logits/rejected": -2.6246249675750732,
      "logps/chosen": -629.2353515625,
      "logps/rejected": -1236.3388671875,
      "loss": 0.2229,
      "rewards/accuracies": 0.8531249761581421,
      "rewards/chosen": -3.4933292865753174,
      "rewards/margins": 5.829395294189453,
      "rewards/rejected": -9.322725296020508,
      "step": 210
    },
    {
      "epoch": 0.4636459430979979,
      "grad_norm": 16.25315803809995,
      "learning_rate": 3.244434596418139e-07,
      "logits/chosen": -2.7054009437561035,
      "logits/rejected": -2.658482313156128,
      "logps/chosen": -635.1846313476562,
      "logps/rejected": -1274.047119140625,
      "loss": 0.2029,
      "rewards/accuracies": 0.8531249761581421,
      "rewards/chosen": -3.43379282951355,
      "rewards/margins": 6.226017951965332,
      "rewards/rejected": -9.659811019897461,
      "step": 220
    },
    {
      "epoch": 0.48472075869336145,
      "grad_norm": 18.979777410538098,
      "learning_rate": 3.066568412479167e-07,
      "logits/chosen": -2.7195122241973877,
      "logits/rejected": -2.6623740196228027,
      "logps/chosen": -645.3162841796875,
      "logps/rejected": -1333.099365234375,
      "loss": 0.209,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -3.6151633262634277,
      "rewards/margins": 6.788934230804443,
      "rewards/rejected": -10.404096603393555,
      "step": 230
    },
    {
      "epoch": 0.505795574288725,
      "grad_norm": 14.572953436795377,
      "learning_rate": 2.8856223324132555e-07,
      "logits/chosen": -2.73162579536438,
      "logits/rejected": -2.68361234664917,
      "logps/chosen": -623.8787231445312,
      "logps/rejected": -1263.7047119140625,
      "loss": 0.2131,
      "rewards/accuracies": 0.8656250238418579,
      "rewards/chosen": -3.288330554962158,
      "rewards/margins": 6.264595985412598,
      "rewards/rejected": -9.552926063537598,
      "step": 240
    },
    {
      "epoch": 0.5268703898840885,
      "grad_norm": 15.727626316245573,
      "learning_rate": 2.7025799887683996e-07,
      "logits/chosen": -2.652764081954956,
      "logits/rejected": -2.596436023712158,
      "logps/chosen": -676.56494140625,
      "logps/rejected": -1391.48095703125,
      "loss": 0.2028,
      "rewards/accuracies": 0.8531249761581421,
      "rewards/chosen": -3.7927825450897217,
      "rewards/margins": 7.004532814025879,
      "rewards/rejected": -10.797314643859863,
      "step": 250
    },
    {
      "epoch": 0.547945205479452,
      "grad_norm": 14.772632800998503,
      "learning_rate": 2.518436409493281e-07,
      "logits/chosen": -2.593949794769287,
      "logits/rejected": -2.5094432830810547,
      "logps/chosen": -597.7491455078125,
      "logps/rejected": -1275.842529296875,
      "loss": 0.2136,
      "rewards/accuracies": 0.893750011920929,
      "rewards/chosen": -3.1343584060668945,
      "rewards/margins": 6.584381103515625,
      "rewards/rejected": -9.718740463256836,
      "step": 260
    },
    {
      "epoch": 0.5690200210748156,
      "grad_norm": 14.402069920182464,
      "learning_rate": 2.3341926089122408e-07,
      "logits/chosen": -2.5253357887268066,
      "logits/rejected": -2.403183698654175,
      "logps/chosen": -579.7105712890625,
      "logps/rejected": -1186.1314697265625,
      "loss": 0.2185,
      "rewards/accuracies": 0.871874988079071,
      "rewards/chosen": -2.8341760635375977,
      "rewards/margins": 5.931792259216309,
      "rewards/rejected": -8.76596736907959,
      "step": 270
    },
    {
      "epoch": 0.5900948366701791,
      "grad_norm": 20.389043372443385,
      "learning_rate": 2.1508501461579848e-07,
      "logits/chosen": -2.323107957839966,
      "logits/rejected": -2.1116223335266113,
      "logps/chosen": -674.8060913085938,
      "logps/rejected": -1314.333740234375,
      "loss": 0.1983,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -3.736111879348755,
      "rewards/margins": 6.262539386749268,
      "rewards/rejected": -9.998651504516602,
      "step": 280
    },
    {
      "epoch": 0.6111696522655427,
      "grad_norm": 11.479399018053,
      "learning_rate": 1.9694056806426927e-07,
      "logits/chosen": -2.2525992393493652,
      "logits/rejected": -1.9792003631591797,
      "logps/chosen": -641.6122436523438,
      "logps/rejected": -1334.318115234375,
      "loss": 0.2017,
      "rewards/accuracies": 0.8687499761581421,
      "rewards/chosen": -3.6301467418670654,
      "rewards/margins": 6.797834873199463,
      "rewards/rejected": -10.427982330322266,
      "step": 290
    },
    {
      "epoch": 0.6322444678609063,
      "grad_norm": 18.35346885487197,
      "learning_rate": 1.7908455541642582e-07,
      "logits/chosen": -2.305216073989868,
      "logits/rejected": -1.9890754222869873,
      "logps/chosen": -631.3724365234375,
      "logps/rejected": -1385.84765625,
      "loss": 0.204,
      "rewards/accuracies": 0.8843749761581421,
      "rewards/chosen": -3.2906665802001953,
      "rewards/margins": 7.4196929931640625,
      "rewards/rejected": -10.710360527038574,
      "step": 300
    },
    {
      "epoch": 0.6322444678609063,
      "eval_logits/chosen": -2.410874128341675,
      "eval_logits/rejected": -2.1722898483276367,
      "eval_logps/chosen": -699.75537109375,
      "eval_logps/rejected": -1343.685791015625,
      "eval_loss": 0.2095702439546585,
      "eval_rewards/accuracies": 0.8650000095367432,
      "eval_rewards/chosen": -3.3958559036254883,
      "eval_rewards/margins": 6.353498935699463,
      "eval_rewards/rejected": -9.749356269836426,
      "eval_runtime": 296.8451,
      "eval_samples_per_second": 21.533,
      "eval_steps_per_second": 0.337,
      "step": 300
    },
    {
      "epoch": 0.6533192834562698,
      "grad_norm": 15.446119970622084,
      "learning_rate": 1.616140429099641e-07,
      "logits/chosen": -2.237611770629883,
      "logits/rejected": -1.8744182586669922,
      "logps/chosen": -615.1163330078125,
      "logps/rejected": -1335.3438720703125,
      "loss": 0.1899,
      "rewards/accuracies": 0.871874988079071,
      "rewards/chosen": -3.29571533203125,
      "rewards/margins": 7.18838357925415,
      "rewards/rejected": -10.484098434448242,
      "step": 310
    },
    {
      "epoch": 0.6743940990516333,
      "grad_norm": 14.390167758723463,
      "learning_rate": 1.4462400118323796e-07,
      "logits/chosen": -2.0499463081359863,
      "logits/rejected": -1.637145757675171,
      "logps/chosen": -694.0391235351562,
      "logps/rejected": -1580.717041015625,
      "loss": 0.1836,
      "rewards/accuracies": 0.8968750238418579,
      "rewards/chosen": -4.01133394241333,
      "rewards/margins": 8.607083320617676,
      "rewards/rejected": -12.618416786193848,
      "step": 320
    },
    {
      "epoch": 0.6954689146469969,
      "grad_norm": 24.67534044273584,
      "learning_rate": 1.2820678900980092e-07,
      "logits/chosen": -2.1196751594543457,
      "logits/rejected": -1.6566823720932007,
      "logps/chosen": -678.884033203125,
      "logps/rejected": -1541.774169921875,
      "loss": 0.215,
      "rewards/accuracies": 0.8656250238418579,
      "rewards/chosen": -3.847275495529175,
      "rewards/margins": 8.554319381713867,
      "rewards/rejected": -12.401594161987305,
      "step": 330
    },
    {
      "epoch": 0.7165437302423604,
      "grad_norm": 12.333676271606883,
      "learning_rate": 1.1245165123118359e-07,
      "logits/chosen": -2.0177483558654785,
      "logits/rejected": -1.5283257961273193,
      "logps/chosen": -682.48486328125,
      "logps/rejected": -1445.4771728515625,
      "loss": 0.1979,
      "rewards/accuracies": 0.878125011920929,
      "rewards/chosen": -3.8393378257751465,
      "rewards/margins": 7.487583160400391,
      "rewards/rejected": -11.326921463012695,
      "step": 340
    },
    {
      "epoch": 0.7376185458377239,
      "grad_norm": 19.10139426185179,
      "learning_rate": 9.744423361717321e-08,
      "logits/chosen": -1.8916494846343994,
      "logits/rejected": -1.4087257385253906,
      "logps/chosen": -687.3905639648438,
      "logps/rejected": -1476.8948974609375,
      "loss": 0.1986,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -3.84537935256958,
      "rewards/margins": 7.789330959320068,
      "rewards/rejected": -11.634710311889648,
      "step": 350
    },
    {
      "epoch": 0.7586933614330874,
      "grad_norm": 18.80792334093226,
      "learning_rate": 8.32661172908373e-08,
      "logits/chosen": -1.90652334690094,
      "logits/rejected": -1.4300750494003296,
      "logps/chosen": -688.5945434570312,
      "logps/rejected": -1452.357177734375,
      "loss": 0.195,
      "rewards/accuracies": 0.8812500238418579,
      "rewards/chosen": -4.015127182006836,
      "rewards/margins": 7.490170955657959,
      "rewards/rejected": -11.505297660827637,
      "step": 360
    },
    {
      "epoch": 0.779768177028451,
      "grad_norm": 16.997599298870874,
      "learning_rate": 6.999437524918569e-08,
      "logits/chosen": -1.9973640441894531,
      "logits/rejected": -1.5239393711090088,
      "logps/chosen": -659.0184936523438,
      "logps/rejected": -1305.682861328125,
      "loss": 0.1912,
      "rewards/accuracies": 0.8687499761581421,
      "rewards/chosen": -3.621332883834839,
      "rewards/margins": 6.495469570159912,
      "rewards/rejected": -10.116803169250488,
      "step": 370
    },
    {
      "epoch": 0.8008429926238145,
      "grad_norm": 14.084078614585282,
      "learning_rate": 5.770115339024484e-08,
      "logits/chosen": -1.9838542938232422,
      "logits/rejected": -1.5452864170074463,
      "logps/chosen": -661.1948852539062,
      "logps/rejected": -1432.19140625,
      "loss": 0.1824,
      "rewards/accuracies": 0.893750011920929,
      "rewards/chosen": -3.7382915019989014,
      "rewards/margins": 7.500711917877197,
      "rewards/rejected": -11.239004135131836,
      "step": 380
    },
    {
      "epoch": 0.821917808219178,
      "grad_norm": 30.61074285987269,
      "learning_rate": 4.645327832410648e-08,
      "logits/chosen": -1.9398887157440186,
      "logits/rejected": -1.4298299551010132,
      "logps/chosen": -683.1571655273438,
      "logps/rejected": -1461.3238525390625,
      "loss": 0.1854,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": -4.023918628692627,
      "rewards/margins": 7.673205375671387,
      "rewards/rejected": -11.697123527526855,
      "step": 390
    },
    {
      "epoch": 0.8429926238145417,
      "grad_norm": 22.843164623474657,
      "learning_rate": 3.6311894099908145e-08,
      "logits/chosen": -1.9181022644042969,
      "logits/rejected": -1.380041480064392,
      "logps/chosen": -674.0886840820312,
      "logps/rejected": -1500.5560302734375,
      "loss": 0.1992,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -3.9246249198913574,
      "rewards/margins": 8.195623397827148,
      "rewards/rejected": -12.120248794555664,
      "step": 400
    },
    {
      "epoch": 0.8429926238145417,
      "eval_logits/chosen": -2.0689847469329834,
      "eval_logits/rejected": -1.6459288597106934,
      "eval_logps/chosen": -781.930419921875,
      "eval_logps/rejected": -1544.589111328125,
      "eval_loss": 0.20072707533836365,
      "eval_rewards/accuracies": 0.8650000095367432,
      "eval_rewards/chosen": -4.217607021331787,
      "eval_rewards/margins": 7.540782928466797,
      "eval_rewards/rejected": -11.75838851928711,
      "eval_runtime": 293.2964,
      "eval_samples_per_second": 21.794,
      "eval_steps_per_second": 0.341,
      "step": 400
    },
    {
      "epoch": 0.8640674394099052,
      "grad_norm": 19.85885717669904,
      "learning_rate": 2.7332129823519568e-08,
      "logits/chosen": -1.881338357925415,
      "logits/rejected": -1.340071439743042,
      "logps/chosen": -689.0841064453125,
      "logps/rejected": -1438.508544921875,
      "loss": 0.1926,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -3.9749252796173096,
      "rewards/margins": 7.4057297706604,
      "rewards/rejected": -11.380653381347656,
      "step": 410
    },
    {
      "epoch": 0.8851422550052687,
      "grad_norm": 17.487805159196274,
      "learning_rate": 1.956279997278043e-08,
      "logits/chosen": -1.9210704565048218,
      "logits/rejected": -1.4240577220916748,
      "logps/chosen": -656.7258911132812,
      "logps/rejected": -1409.5032958984375,
      "loss": 0.1887,
      "rewards/accuracies": 0.909375011920929,
      "rewards/chosen": -3.5839180946350098,
      "rewards/margins": 7.417031288146973,
      "rewards/rejected": -11.00094985961914,
      "step": 420
    },
    {
      "epoch": 0.9062170706006323,
      "grad_norm": 20.395649107934336,
      "learning_rate": 1.3046139039394e-08,
      "logits/chosen": -1.998051643371582,
      "logits/rejected": -1.4957393407821655,
      "logps/chosen": -653.7247314453125,
      "logps/rejected": -1424.0172119140625,
      "loss": 0.1918,
      "rewards/accuracies": 0.840624988079071,
      "rewards/chosen": -3.6267688274383545,
      "rewards/margins": 7.642977714538574,
      "rewards/rejected": -11.269746780395508,
      "step": 430
    },
    {
      "epoch": 0.9272918861959958,
      "grad_norm": 29.88901784574838,
      "learning_rate": 7.817571939976286e-09,
      "logits/chosen": -1.935821294784546,
      "logits/rejected": -1.4820038080215454,
      "logps/chosen": -660.3615112304688,
      "logps/rejected": -1442.939697265625,
      "loss": 0.1977,
      "rewards/accuracies": 0.890625,
      "rewards/chosen": -3.6211764812469482,
      "rewards/margins": 7.658024787902832,
      "rewards/rejected": -11.279200553894043,
      "step": 440
    },
    {
      "epoch": 0.9483667017913593,
      "grad_norm": 24.434459876362702,
      "learning_rate": 3.905521444318604e-09,
      "logits/chosen": -1.974572777748108,
      "logits/rejected": -1.43704092502594,
      "logps/chosen": -653.2598266601562,
      "logps/rejected": -1464.2042236328125,
      "loss": 0.1991,
      "rewards/accuracies": 0.846875011920929,
      "rewards/chosen": -3.468592405319214,
      "rewards/margins": 8.024352073669434,
      "rewards/rejected": -11.492944717407227,
      "step": 450
    },
    {
      "epoch": 0.9694415173867229,
      "grad_norm": 15.522136682704172,
      "learning_rate": 1.3312536676942377e-09,
      "logits/chosen": -1.9699881076812744,
      "logits/rejected": -1.501794457435608,
      "logps/chosen": -654.7131958007812,
      "logps/rejected": -1468.2657470703125,
      "loss": 0.1974,
      "rewards/accuracies": 0.878125011920929,
      "rewards/chosen": -3.683525800704956,
      "rewards/margins": 7.84524393081665,
      "rewards/rejected": -11.528770446777344,
      "step": 460
    },
    {
      "epoch": 0.9905163329820864,
      "grad_norm": 13.129882834290527,
      "learning_rate": 1.0876246712074322e-10,
      "logits/chosen": -2.0095367431640625,
      "logits/rejected": -1.5517585277557373,
      "logps/chosen": -640.3642578125,
      "logps/rejected": -1447.8231201171875,
      "loss": 0.1857,
      "rewards/accuracies": 0.893750011920929,
      "rewards/chosen": -3.563875675201416,
      "rewards/margins": 7.8468523025512695,
      "rewards/rejected": -11.410726547241211,
      "step": 470
    },
    {
      "epoch": 0.9989462592202318,
      "step": 474,
      "total_flos": 0.0,
      "train_loss": 0.26123517437323235,
      "train_runtime": 15193.8056,
      "train_samples_per_second": 7.992,
      "train_steps_per_second": 0.031
    }
  ],
  "logging_steps": 10,
  "max_steps": 474,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 100,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}