File size: 27,984 Bytes
2529f71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 1.0,
  "eval_steps": 100,
  "global_step": 329,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "dpo_losses": 0.6931471824645996,
      "epoch": 0.0,
      "grad_norm": 2.094792348544482,
      "learning_rate": 1.5151515151515152e-07,
      "logits/chosen": -2.6820077896118164,
      "logits/rejected": -2.6930205821990967,
      "logps/chosen": -281.2528381347656,
      "logps/rejected": -258.0622253417969,
      "loss": 0.6931,
      "positive_losses": 0.0,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/margins_max": 0.0,
      "rewards/margins_min": 0.0,
      "rewards/margins_std": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "dpo_losses": 0.6931204795837402,
      "epoch": 0.03,
      "grad_norm": 7.907274986440123,
      "learning_rate": 1.5151515151515152e-06,
      "logits/chosen": -2.7687692642211914,
      "logits/rejected": -2.7543087005615234,
      "logps/chosen": -284.4098815917969,
      "logps/rejected": -249.6673126220703,
      "loss": 0.6967,
      "positive_losses": 0.03447924554347992,
      "rewards/accuracies": 0.4583333432674408,
      "rewards/chosen": 0.0019425028003752232,
      "rewards/margins": 5.710232289857231e-05,
      "rewards/margins_max": 0.003333130618557334,
      "rewards/margins_min": -0.0030696813482791185,
      "rewards/margins_std": 0.002927461639046669,
      "rewards/rejected": 0.0018854006193578243,
      "step": 10
    },
    {
      "dpo_losses": 0.6927222609519958,
      "epoch": 0.06,
      "grad_norm": 1.9785585238747578,
      "learning_rate": 3.0303030303030305e-06,
      "logits/chosen": -2.8341968059539795,
      "logits/rejected": -2.7815167903900146,
      "logps/chosen": -289.8644714355469,
      "logps/rejected": -268.81475830078125,
      "loss": 0.6935,
      "positive_losses": 0.00822601281106472,
      "rewards/accuracies": 0.5,
      "rewards/chosen": 0.016672641038894653,
      "rewards/margins": 0.0008590976940467954,
      "rewards/margins_max": 0.007572791539132595,
      "rewards/margins_min": -0.004768169019371271,
      "rewards/margins_std": 0.0055806804448366165,
      "rewards/rejected": 0.015813542529940605,
      "step": 20
    },
    {
      "dpo_losses": 0.6912546157836914,
      "epoch": 0.09,
      "grad_norm": 9.993422824831308,
      "learning_rate": 4.5454545454545455e-06,
      "logits/chosen": -2.86173152923584,
      "logits/rejected": -2.814188241958618,
      "logps/chosen": -255.656494140625,
      "logps/rejected": -224.1510467529297,
      "loss": 0.692,
      "positive_losses": 0.00018234252638649195,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": 0.03588816151022911,
      "rewards/margins": 0.00383453955873847,
      "rewards/margins_max": 0.017018133774399757,
      "rewards/margins_min": -0.008637982420623302,
      "rewards/margins_std": 0.011648830957710743,
      "rewards/rejected": 0.0320536233484745,
      "step": 30
    },
    {
      "dpo_losses": 0.6852566003799438,
      "epoch": 0.12,
      "grad_norm": 7.551494566668885,
      "learning_rate": 4.993103596812269e-06,
      "logits/chosen": -2.8286097049713135,
      "logits/rejected": -2.7643656730651855,
      "logps/chosen": -311.00653076171875,
      "logps/rejected": -219.75357055664062,
      "loss": 0.6928,
      "positive_losses": 0.0,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": 0.06535681337118149,
      "rewards/margins": 0.016044817864894867,
      "rewards/margins_max": 0.04139421135187149,
      "rewards/margins_min": -0.006711454130709171,
      "rewards/margins_std": 0.021322522312402725,
      "rewards/rejected": 0.049311988055706024,
      "step": 40
    },
    {
      "dpo_losses": 0.6857399940490723,
      "epoch": 0.15,
      "grad_norm": 7.613114395779342,
      "learning_rate": 4.95941685833271e-06,
      "logits/chosen": -2.7872977256774902,
      "logits/rejected": -2.8009393215179443,
      "logps/chosen": -235.7885284423828,
      "logps/rejected": -274.2627258300781,
      "loss": 0.6875,
      "positive_losses": 0.015755081549286842,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": 0.07146342098712921,
      "rewards/margins": 0.0152506772428751,
      "rewards/margins_max": 0.058862246572971344,
      "rewards/margins_min": -0.022146200761198997,
      "rewards/margins_std": 0.03581809252500534,
      "rewards/rejected": 0.056212734431028366,
      "step": 50
    },
    {
      "dpo_losses": 0.680141806602478,
      "epoch": 0.18,
      "grad_norm": 7.505695469277918,
      "learning_rate": 4.898051734555676e-06,
      "logits/chosen": -2.8290228843688965,
      "logits/rejected": -2.839033603668213,
      "logps/chosen": -312.344482421875,
      "logps/rejected": -276.32183837890625,
      "loss": 0.6846,
      "positive_losses": 0.05971794202923775,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": 0.0957661122083664,
      "rewards/margins": 0.027223944664001465,
      "rewards/margins_max": 0.09758423268795013,
      "rewards/margins_min": -0.04277171194553375,
      "rewards/margins_std": 0.061675649136304855,
      "rewards/rejected": 0.06854216754436493,
      "step": 60
    },
    {
      "dpo_losses": 0.6815682649612427,
      "epoch": 0.21,
      "grad_norm": 1.5360756176524522,
      "learning_rate": 4.809698831278217e-06,
      "logits/chosen": -2.736055374145508,
      "logits/rejected": -2.7235770225524902,
      "logps/chosen": -257.50408935546875,
      "logps/rejected": -239.8359375,
      "loss": 0.694,
      "positive_losses": 0.1467689573764801,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": 0.08973346650600433,
      "rewards/margins": 0.024347994476556778,
      "rewards/margins_max": 0.0899825394153595,
      "rewards/margins_min": -0.03823970630764961,
      "rewards/margins_std": 0.05780817195773125,
      "rewards/rejected": 0.06538547575473785,
      "step": 70
    },
    {
      "dpo_losses": 0.6777443885803223,
      "epoch": 0.24,
      "grad_norm": 7.6098783040686255,
      "learning_rate": 4.695352475952706e-06,
      "logits/chosen": -2.825540542602539,
      "logits/rejected": -2.8001182079315186,
      "logps/chosen": -272.2143859863281,
      "logps/rejected": -268.1064147949219,
      "loss": 0.6843,
      "positive_losses": 0.019883345812559128,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": 0.10059104114770889,
      "rewards/margins": 0.03249071165919304,
      "rewards/margins_max": 0.10248656570911407,
      "rewards/margins_min": -0.04477204009890556,
      "rewards/margins_std": 0.06633640825748444,
      "rewards/rejected": 0.06810032576322556,
      "step": 80
    },
    {
      "dpo_losses": 0.6827337741851807,
      "epoch": 0.27,
      "grad_norm": 13.032468304298272,
      "learning_rate": 4.556299527482029e-06,
      "logits/chosen": -2.7769341468811035,
      "logits/rejected": -2.7250094413757324,
      "logps/chosen": -285.997314453125,
      "logps/rejected": -284.3042297363281,
      "loss": 0.6924,
      "positive_losses": 0.0755772590637207,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": 0.09554663300514221,
      "rewards/margins": 0.022927606478333473,
      "rewards/margins_max": 0.11027027666568756,
      "rewards/margins_min": -0.06920486688613892,
      "rewards/margins_std": 0.0804494172334671,
      "rewards/rejected": 0.07261903584003448,
      "step": 90
    },
    {
      "dpo_losses": 0.6703774333000183,
      "epoch": 0.3,
      "grad_norm": 1.6704382025698337,
      "learning_rate": 4.394104893853007e-06,
      "logits/chosen": -2.8705239295959473,
      "logits/rejected": -2.832496166229248,
      "logps/chosen": -262.93939208984375,
      "logps/rejected": -251.376953125,
      "loss": 0.6896,
      "positive_losses": 0.10124854743480682,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": 0.10522875934839249,
      "rewards/margins": 0.04796325042843819,
      "rewards/margins_max": 0.13638350367546082,
      "rewards/margins_min": -0.02367212623357773,
      "rewards/margins_std": 0.07211659848690033,
      "rewards/rejected": 0.0572655126452446,
      "step": 100
    },
    {
      "epoch": 0.3,
      "eval_dpo_losses": 0.6766990423202515,
      "eval_logits/chosen": -2.782886028289795,
      "eval_logits/rejected": -2.7451796531677246,
      "eval_logps/chosen": -271.81536865234375,
      "eval_logps/rejected": -249.28070068359375,
      "eval_loss": 0.692669689655304,
      "eval_positive_losses": 0.05891977623105049,
      "eval_rewards/accuracies": 0.6710000038146973,
      "eval_rewards/chosen": 0.12778052687644958,
      "eval_rewards/margins": 0.03479834645986557,
      "eval_rewards/margins_max": 0.15719929337501526,
      "eval_rewards/margins_min": -0.0766773670911789,
      "eval_rewards/margins_std": 0.07707200944423676,
      "eval_rewards/rejected": 0.09298218786716461,
      "eval_runtime": 429.6713,
      "eval_samples_per_second": 4.655,
      "eval_steps_per_second": 0.291,
      "step": 100
    },
    {
      "dpo_losses": 0.669114887714386,
      "epoch": 0.33,
      "grad_norm": 2.0588703005951716,
      "learning_rate": 4.210593920593201e-06,
      "logits/chosen": -2.732736110687256,
      "logits/rejected": -2.717144727706909,
      "logps/chosen": -297.5173645019531,
      "logps/rejected": -227.2909698486328,
      "loss": 0.6716,
      "positive_losses": 0.005372238345444202,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": 0.14238855242729187,
      "rewards/margins": 0.06108074635267258,
      "rewards/margins_max": 0.21817930042743683,
      "rewards/margins_min": -0.03649354726076126,
      "rewards/margins_std": 0.11923160403966904,
      "rewards/rejected": 0.08130781352519989,
      "step": 110
    },
    {
      "dpo_losses": 0.6759502291679382,
      "epoch": 0.36,
      "grad_norm": 2.114750885236866,
      "learning_rate": 4.007831848252212e-06,
      "logits/chosen": -2.71893572807312,
      "logits/rejected": -2.7199361324310303,
      "logps/chosen": -312.4023132324219,
      "logps/rejected": -266.9503479003906,
      "loss": 0.6841,
      "positive_losses": 0.1982906311750412,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": 0.11162187904119492,
      "rewards/margins": 0.03713056445121765,
      "rewards/margins_max": 0.13481460511684418,
      "rewards/margins_min": -0.06108905002474785,
      "rewards/margins_std": 0.08896367251873016,
      "rewards/rejected": 0.07449130713939667,
      "step": 120
    },
    {
      "dpo_losses": 0.6827476024627686,
      "epoch": 0.4,
      "grad_norm": 1.6378871780311928,
      "learning_rate": 3.7881005700938635e-06,
      "logits/chosen": -2.7933266162872314,
      "logits/rejected": -2.803476333618164,
      "logps/chosen": -254.1282196044922,
      "logps/rejected": -224.1819610595703,
      "loss": 0.6872,
      "positive_losses": 0.06588058173656464,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": 0.12265358120203018,
      "rewards/margins": 0.023296961560845375,
      "rewards/margins_max": 0.1208866611123085,
      "rewards/margins_min": -0.07595702260732651,
      "rewards/margins_std": 0.08657418191432953,
      "rewards/rejected": 0.09935661405324936,
      "step": 130
    },
    {
      "dpo_losses": 0.6661409735679626,
      "epoch": 0.43,
      "grad_norm": 1.861594662390886,
      "learning_rate": 3.553872951569236e-06,
      "logits/chosen": -2.7491869926452637,
      "logits/rejected": -2.7360832691192627,
      "logps/chosen": -280.62664794921875,
      "logps/rejected": -262.3188781738281,
      "loss": 0.6745,
      "positive_losses": 0.11735143512487411,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": 0.13766564428806305,
      "rewards/margins": 0.05706212669610977,
      "rewards/margins_max": 0.1553352326154709,
      "rewards/margins_min": -0.027615731582045555,
      "rewards/margins_std": 0.08170460909605026,
      "rewards/rejected": 0.08060351014137268,
      "step": 140
    },
    {
      "dpo_losses": 0.6667992472648621,
      "epoch": 0.46,
      "grad_norm": 1.3867667176452936,
      "learning_rate": 3.307785000580313e-06,
      "logits/chosen": -2.8069396018981934,
      "logits/rejected": -2.788170099258423,
      "logps/chosen": -256.98443603515625,
      "logps/rejected": -237.07907104492188,
      "loss": 0.6927,
      "positive_losses": 0.10638203471899033,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": 0.1365698128938675,
      "rewards/margins": 0.05615685135126114,
      "rewards/margins_max": 0.1702376753091812,
      "rewards/margins_min": -0.057074159383773804,
      "rewards/margins_std": 0.099973663687706,
      "rewards/rejected": 0.08041295409202576,
      "step": 150
    },
    {
      "dpo_losses": 0.678627610206604,
      "epoch": 0.49,
      "grad_norm": 2.064113448248422,
      "learning_rate": 3.052606201731325e-06,
      "logits/chosen": -2.7706875801086426,
      "logits/rejected": -2.7537920475006104,
      "logps/chosen": -240.9077606201172,
      "logps/rejected": -229.16552734375,
      "loss": 0.6928,
      "positive_losses": 0.03613028675317764,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": 0.14596374332904816,
      "rewards/margins": 0.03164920210838318,
      "rewards/margins_max": 0.13097994029521942,
      "rewards/margins_min": -0.0587020106613636,
      "rewards/margins_std": 0.08477158099412918,
      "rewards/rejected": 0.11431453377008438,
      "step": 160
    },
    {
      "dpo_losses": 0.6667446494102478,
      "epoch": 0.52,
      "grad_norm": 1.6001303619683518,
      "learning_rate": 2.7912083484274266e-06,
      "logits/chosen": -2.7891182899475098,
      "logits/rejected": -2.7103054523468018,
      "logps/chosen": -287.3363342285156,
      "logps/rejected": -262.26116943359375,
      "loss": 0.67,
      "positive_losses": 0.011825943365693092,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": 0.16382232308387756,
      "rewards/margins": 0.05722727254033089,
      "rewards/margins_max": 0.15611840784549713,
      "rewards/margins_min": -0.05285083130002022,
      "rewards/margins_std": 0.09372057765722275,
      "rewards/rejected": 0.10659502446651459,
      "step": 170
    },
    {
      "dpo_losses": 0.674170196056366,
      "epoch": 0.55,
      "grad_norm": 9.426032570856917,
      "learning_rate": 2.526533223585641e-06,
      "logits/chosen": -2.815913438796997,
      "logits/rejected": -2.7526583671569824,
      "logps/chosen": -241.7661590576172,
      "logps/rejected": -218.5923614501953,
      "loss": 0.6845,
      "positive_losses": 0.0364038459956646,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": 0.1436757743358612,
      "rewards/margins": 0.040739208459854126,
      "rewards/margins_max": 0.14018966257572174,
      "rewards/margins_min": -0.03748547285795212,
      "rewards/margins_std": 0.07952649146318436,
      "rewards/rejected": 0.10293656587600708,
      "step": 180
    },
    {
      "dpo_losses": 0.6727156639099121,
      "epoch": 0.58,
      "grad_norm": 9.828820911992421,
      "learning_rate": 2.2615594926807554e-06,
      "logits/chosen": -2.754612922668457,
      "logits/rejected": -2.739302158355713,
      "logps/chosen": -284.7493896484375,
      "logps/rejected": -259.38885498046875,
      "loss": 0.7021,
      "positive_losses": 0.08002634346485138,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": 0.1583152711391449,
      "rewards/margins": 0.04616940766572952,
      "rewards/margins_max": 0.19369181990623474,
      "rewards/margins_min": -0.09055180847644806,
      "rewards/margins_std": 0.12469442188739777,
      "rewards/rejected": 0.11214585602283478,
      "step": 190
    },
    {
      "dpo_losses": 0.6642587184906006,
      "epoch": 0.61,
      "grad_norm": 12.372285795702703,
      "learning_rate": 1.9992691817133025e-06,
      "logits/chosen": -2.760648727416992,
      "logits/rejected": -2.7311196327209473,
      "logps/chosen": -267.6399230957031,
      "logps/rejected": -280.528076171875,
      "loss": 0.6919,
      "positive_losses": 0.2932780385017395,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": 0.13960692286491394,
      "rewards/margins": 0.06209836155176163,
      "rewards/margins_max": 0.1877867430448532,
      "rewards/margins_min": -0.06410057097673416,
      "rewards/margins_std": 0.11066991090774536,
      "rewards/rejected": 0.07750856131315231,
      "step": 200
    },
    {
      "epoch": 0.61,
      "eval_dpo_losses": 0.6688500046730042,
      "eval_logits/chosen": -2.785562753677368,
      "eval_logits/rejected": -2.7480554580688477,
      "eval_logps/chosen": -270.2085876464844,
      "eval_logps/rejected": -249.3992919921875,
      "eval_loss": 0.6864475011825562,
      "eval_positive_losses": 0.09515316039323807,
      "eval_rewards/accuracies": 0.6830000281333923,
      "eval_rewards/chosen": 0.1438482403755188,
      "eval_rewards/margins": 0.052051883190870285,
      "eval_rewards/margins_max": 0.21146319806575775,
      "eval_rewards/margins_min": -0.09425032883882523,
      "eval_rewards/margins_std": 0.10122820734977722,
      "eval_rewards/rejected": 0.09179634600877762,
      "eval_runtime": 428.7542,
      "eval_samples_per_second": 4.665,
      "eval_steps_per_second": 0.292,
      "step": 200
    },
    {
      "dpo_losses": 0.6639125943183899,
      "epoch": 0.64,
      "grad_norm": 5.938345116150278,
      "learning_rate": 1.742614117358029e-06,
      "logits/chosen": -2.7786309719085693,
      "logits/rejected": -2.737117290496826,
      "logps/chosen": -291.00030517578125,
      "logps/rejected": -280.262451171875,
      "loss": 0.6787,
      "positive_losses": 0.15116862952709198,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": 0.14316870272159576,
      "rewards/margins": 0.06285777688026428,
      "rewards/margins_max": 0.1971181035041809,
      "rewards/margins_min": -0.05821852758526802,
      "rewards/margins_std": 0.11372953653335571,
      "rewards/rejected": 0.08031092584133148,
      "step": 210
    },
    {
      "dpo_losses": 0.6715448498725891,
      "epoch": 0.67,
      "grad_norm": 1.9113410130667947,
      "learning_rate": 1.4944827069769125e-06,
      "logits/chosen": -2.8290793895721436,
      "logits/rejected": -2.8052849769592285,
      "logps/chosen": -296.83233642578125,
      "logps/rejected": -254.8493194580078,
      "loss": 0.6738,
      "positive_losses": 0.0,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": 0.16140839457511902,
      "rewards/margins": 0.04584597796201706,
      "rewards/margins_max": 0.14889702200889587,
      "rewards/margins_min": -0.04799387976527214,
      "rewards/margins_std": 0.08770614862442017,
      "rewards/rejected": 0.11556243896484375,
      "step": 220
    },
    {
      "dpo_losses": 0.6808522939682007,
      "epoch": 0.7,
      "grad_norm": 14.916786804283728,
      "learning_rate": 1.257667432355893e-06,
      "logits/chosen": -2.79703950881958,
      "logits/rejected": -2.8196463584899902,
      "logps/chosen": -274.06671142578125,
      "logps/rejected": -251.3921661376953,
      "loss": 0.6919,
      "positive_losses": 0.1536567509174347,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": 0.1457880586385727,
      "rewards/margins": 0.032118141651153564,
      "rewards/margins_max": 0.19181016087532043,
      "rewards/margins_min": -0.09802166372537613,
      "rewards/margins_std": 0.13066089153289795,
      "rewards/rejected": 0.11366990953683853,
      "step": 230
    },
    {
      "dpo_losses": 0.6678847074508667,
      "epoch": 0.73,
      "grad_norm": 2.0373215532852718,
      "learning_rate": 1.0348334229922677e-06,
      "logits/chosen": -2.854497194290161,
      "logits/rejected": -2.8086347579956055,
      "logps/chosen": -276.0729064941406,
      "logps/rejected": -265.6085510253906,
      "loss": 0.689,
      "positive_losses": 0.08907928317785263,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": 0.14944827556610107,
      "rewards/margins": 0.05373426154255867,
      "rewards/margins_max": 0.16546599566936493,
      "rewards/margins_min": -0.03850485011935234,
      "rewards/margins_std": 0.093926340341568,
      "rewards/rejected": 0.0957140102982521,
      "step": 240
    },
    {
      "dpo_losses": 0.6652002334594727,
      "epoch": 0.76,
      "grad_norm": 2.0611778414132793,
      "learning_rate": 8.284884626103165e-07,
      "logits/chosen": -2.7938308715820312,
      "logits/rejected": -2.762302875518799,
      "logps/chosen": -285.65093994140625,
      "logps/rejected": -295.174560546875,
      "loss": 0.7577,
      "positive_losses": 0.03922748565673828,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": 0.15438929200172424,
      "rewards/margins": 0.06061772257089615,
      "rewards/margins_max": 0.1899864226579666,
      "rewards/margins_min": -0.060655929148197174,
      "rewards/margins_std": 0.11546076834201813,
      "rewards/rejected": 0.09377159178256989,
      "step": 250
    },
    {
      "dpo_losses": 0.6641849279403687,
      "epoch": 0.79,
      "grad_norm": 5.534968963232783,
      "learning_rate": 6.409547664531734e-07,
      "logits/chosen": -2.817701816558838,
      "logits/rejected": -2.7872586250305176,
      "logps/chosen": -316.77471923828125,
      "logps/rejected": -301.3828125,
      "loss": 0.6791,
      "positive_losses": 0.059366799890995026,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": 0.17207232117652893,
      "rewards/margins": 0.060843367129564285,
      "rewards/margins_max": 0.1557174026966095,
      "rewards/margins_min": -0.024733999744057655,
      "rewards/margins_std": 0.0783897191286087,
      "rewards/rejected": 0.11122896522283554,
      "step": 260
    },
    {
      "dpo_losses": 0.6656020879745483,
      "epoch": 0.82,
      "grad_norm": 2.074871528049425,
      "learning_rate": 4.743428469705336e-07,
      "logits/chosen": -2.7688679695129395,
      "logits/rejected": -2.767603874206543,
      "logps/chosen": -289.3965759277344,
      "logps/rejected": -299.40924072265625,
      "loss": 0.6782,
      "positive_losses": 0.13356761634349823,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": 0.14411255717277527,
      "rewards/margins": 0.058389417827129364,
      "rewards/margins_max": 0.1643931269645691,
      "rewards/margins_min": -0.037889931350946426,
      "rewards/margins_std": 0.09290634840726852,
      "rewards/rejected": 0.0857231393456459,
      "step": 270
    },
    {
      "dpo_losses": 0.6564372181892395,
      "epoch": 0.85,
      "grad_norm": 2.142298246340979,
      "learning_rate": 3.3052776201888266e-07,
      "logits/chosen": -2.818432569503784,
      "logits/rejected": -2.8038887977600098,
      "logps/chosen": -308.4718933105469,
      "logps/rejected": -260.4856262207031,
      "loss": 0.6756,
      "positive_losses": 0.07436485588550568,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": 0.17093774676322937,
      "rewards/margins": 0.07805446535348892,
      "rewards/margins_max": 0.18430791795253754,
      "rewards/margins_min": -0.037783779203891754,
      "rewards/margins_std": 0.09794415533542633,
      "rewards/rejected": 0.09288326650857925,
      "step": 280
    },
    {
      "dpo_losses": 0.668868899345398,
      "epoch": 0.88,
      "grad_norm": 8.396243354371068,
      "learning_rate": 2.111280128780638e-07,
      "logits/chosen": -2.756545305252075,
      "logits/rejected": -2.7231669425964355,
      "logps/chosen": -259.9716491699219,
      "logps/rejected": -236.48770141601562,
      "loss": 0.684,
      "positive_losses": 0.07783536612987518,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": 0.1421826183795929,
      "rewards/margins": 0.051858507096767426,
      "rewards/margins_max": 0.16274937987327576,
      "rewards/margins_min": -0.04277097061276436,
      "rewards/margins_std": 0.09085315465927124,
      "rewards/rejected": 0.09032410383224487,
      "step": 290
    },
    {
      "dpo_losses": 0.6606593132019043,
      "epoch": 0.91,
      "grad_norm": 7.1470269484352835,
      "learning_rate": 1.1748732956682023e-07,
      "logits/chosen": -2.8557376861572266,
      "logits/rejected": -2.791503667831421,
      "logps/chosen": -309.07489013671875,
      "logps/rejected": -277.8647155761719,
      "loss": 0.6774,
      "positive_losses": 0.09160251915454865,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": 0.14638249576091766,
      "rewards/margins": 0.06913793087005615,
      "rewards/margins_max": 0.1924934983253479,
      "rewards/margins_min": -0.04603328928351402,
      "rewards/margins_std": 0.10587652772665024,
      "rewards/rejected": 0.07724456489086151,
      "step": 300
    },
    {
      "epoch": 0.91,
      "eval_dpo_losses": 0.6679198145866394,
      "eval_logits/chosen": -2.7836546897888184,
      "eval_logits/rejected": -2.746371030807495,
      "eval_logps/chosen": -269.767822265625,
      "eval_logps/rejected": -249.15957641601562,
      "eval_loss": 0.687869131565094,
      "eval_positive_losses": 0.09410960972309113,
      "eval_rewards/accuracies": 0.6899999976158142,
      "eval_rewards/chosen": 0.1482561081647873,
      "eval_rewards/margins": 0.05406271293759346,
      "eval_rewards/margins_max": 0.2146386057138443,
      "eval_rewards/margins_min": -0.0945044532418251,
      "eval_rewards/margins_std": 0.10281499475240707,
      "eval_rewards/rejected": 0.09419341385364532,
      "eval_runtime": 428.9213,
      "eval_samples_per_second": 4.663,
      "eval_steps_per_second": 0.291,
      "step": 300
    },
    {
      "dpo_losses": 0.6655529141426086,
      "epoch": 0.94,
      "grad_norm": 1.6083474443571015,
      "learning_rate": 5.065954844616722e-08,
      "logits/chosen": -2.7985780239105225,
      "logits/rejected": -2.756269931793213,
      "logps/chosen": -261.9189758300781,
      "logps/rejected": -271.715576171875,
      "loss": 0.6852,
      "positive_losses": 0.16939087212085724,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": 0.1520276516675949,
      "rewards/margins": 0.05855011194944382,
      "rewards/margins_max": 0.17010439932346344,
      "rewards/margins_min": -0.046878743916749954,
      "rewards/margins_std": 0.09575958549976349,
      "rewards/rejected": 0.0934775322675705,
      "step": 310
    },
    {
      "dpo_losses": 0.6655773520469666,
      "epoch": 0.97,
      "grad_norm": 1.8914240259960515,
      "learning_rate": 1.1396752298723501e-08,
      "logits/chosen": -2.8393120765686035,
      "logits/rejected": -2.7888612747192383,
      "logps/chosen": -235.32876586914062,
      "logps/rejected": -249.7720184326172,
      "loss": 0.666,
      "positive_losses": 0.027046585455536842,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": 0.1361643224954605,
      "rewards/margins": 0.05848071724176407,
      "rewards/margins_max": 0.16818095743656158,
      "rewards/margins_min": -0.04760957881808281,
      "rewards/margins_std": 0.09690945595502853,
      "rewards/rejected": 0.07768361270427704,
      "step": 320
    },
    {
      "epoch": 1.0,
      "step": 329,
      "total_flos": 0.0,
      "train_loss": 0.6874516553791823,
      "train_runtime": 3895.4317,
      "train_samples_per_second": 1.351,
      "train_steps_per_second": 0.084
    }
  ],
  "logging_steps": 10,
  "max_steps": 329,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 100,
  "total_flos": 0.0,
  "train_batch_size": 4,
  "trial_name": null,
  "trial_params": null
}