sfulay commited on
Commit
0d3f7c6
1 Parent(s): 64c9089

Model save

Browse files
README.md CHANGED
@@ -18,15 +18,15 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.0007
22
- - Rewards/chosen: -2.2108
23
- - Rewards/rejected: -84.9647
24
  - Rewards/accuracies: 1.0
25
- - Rewards/margins: 82.7539
26
- - Logps/rejected: -9137.2617
27
- - Logps/chosen: -588.0638
28
- - Logits/rejected: 4.7555
29
- - Logits/chosen: -0.0442
30
 
31
  ## Model description
32
 
@@ -63,13 +63,13 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
  |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
- | 0.0158 | 0.1420 | 50 | 0.0104 | -1.4246 | -52.2276 | 0.9960 | 50.8030 | -5863.5493 | -509.4384 | -2.3895 | -2.8735 |
67
- | 0.0111 | 0.2841 | 100 | 0.0024 | -2.2596 | -93.3357 | 1.0 | 91.0762 | -9974.3623 | -592.9393 | 0.1786 | -2.9450 |
68
- | 0.0039 | 0.4261 | 150 | 0.0016 | -2.3351 | -100.4879 | 1.0 | 98.1528 | -10689.5820 | -600.4949 | 2.3549 | -1.8831 |
69
- | 0.0022 | 0.5682 | 200 | 0.0012 | -2.2027 | -86.1756 | 1.0 | 83.9729 | -9258.3438 | -587.2476 | 2.5108 | -1.6726 |
70
- | 0.0022 | 0.7102 | 250 | 0.0008 | -2.2903 | -83.1896 | 1.0 | 80.8993 | -8959.7471 | -596.0095 | 3.7585 | -1.0150 |
71
- | 0.001 | 0.8523 | 300 | 0.0007 | -2.1936 | -83.9541 | 1.0 | 81.7606 | -9036.2012 | -586.3376 | 4.7089 | -0.1221 |
72
- | 0.008 | 0.9943 | 350 | 0.0007 | -2.2108 | -84.9647 | 1.0 | 82.7539 | -9137.2617 | -588.0638 | 4.7555 | -0.0442 |
73
 
74
 
75
  ### Framework versions
 
18
 
19
  This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 0.0010
22
+ - Rewards/chosen: -2.2051
23
+ - Rewards/rejected: -74.7846
24
  - Rewards/accuracies: 1.0
25
+ - Rewards/margins: 72.5795
26
+ - Logps/rejected: -8119.2456
27
+ - Logps/chosen: -587.4921
28
+ - Logits/rejected: 2.7746
29
+ - Logits/chosen: -0.3615
30
 
31
  ## Model description
32
 
 
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
  |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
+ | 0.0135 | 0.1420 | 50 | 0.0099 | -1.3212 | -47.4111 | 0.9960 | 46.0899 | -5381.9028 | -499.1044 | -2.4386 | -2.9291 |
67
+ | 0.0109 | 0.2841 | 100 | 0.0025 | -2.5970 | -80.9884 | 1.0 | 78.3913 | -8739.6260 | -626.6862 | -0.0340 | -2.5428 |
68
+ | 0.0017 | 0.4261 | 150 | 0.0011 | -1.9943 | -77.1591 | 1.0 | 75.1648 | -8356.6973 | -566.4090 | 2.8304 | -1.6476 |
69
+ | 0.002 | 0.5682 | 200 | 0.0008 | -2.1292 | -82.8472 | 1.0 | 80.7180 | -8925.5107 | -579.9021 | 2.8840 | -1.3436 |
70
+ | 0.0018 | 0.7102 | 250 | 0.0009 | -2.1417 | -72.3491 | 1.0 | 70.2074 | -7875.6992 | -581.1540 | 2.5447 | -1.1532 |
71
+ | 0.0013 | 0.8523 | 300 | 0.0010 | -2.2050 | -73.9724 | 1.0 | 71.7674 | -8038.0322 | -587.4813 | 2.7313 | -0.4348 |
72
+ | 0.0058 | 0.9943 | 350 | 0.0010 | -2.2051 | -74.7846 | 1.0 | 72.5795 | -8119.2456 | -587.4921 | 2.7746 | -0.3615 |
73
 
74
 
75
  ### Framework versions
all_results.json CHANGED
@@ -1,22 +1,22 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -0.5261995196342468,
4
- "eval_logits/rejected": 2.7145843505859375,
5
- "eval_logps/chosen": -577.6981201171875,
6
- "eval_logps/rejected": -7961.19677734375,
7
- "eval_loss": 0.000621323473751545,
8
  "eval_rewards/accuracies": 1.0,
9
- "eval_rewards/chosen": -2.1071643829345703,
10
- "eval_rewards/margins": 71.09691619873047,
11
- "eval_rewards/rejected": -73.20408630371094,
12
- "eval_runtime": 196.4219,
13
  "eval_samples": 3905,
14
- "eval_samples_per_second": 19.881,
15
- "eval_steps_per_second": 0.316,
16
  "total_flos": 0.0,
17
- "train_loss": 0.0941242430602539,
18
- "train_runtime": 10036.5915,
19
  "train_samples": 45000,
20
- "train_samples_per_second": 4.484,
21
  "train_steps_per_second": 0.035
22
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": -0.043021149933338165,
4
+ "eval_logits/rejected": 4.756043434143066,
5
+ "eval_logps/chosen": -587.93701171875,
6
+ "eval_logps/rejected": -9136.2529296875,
7
+ "eval_loss": 0.0007141114911064506,
8
  "eval_rewards/accuracies": 1.0,
9
+ "eval_rewards/chosen": -2.2095537185668945,
10
+ "eval_rewards/margins": 82.74508666992188,
11
+ "eval_rewards/rejected": -84.95464324951172,
12
+ "eval_runtime": 192.6276,
13
  "eval_samples": 3905,
14
+ "eval_samples_per_second": 20.272,
15
+ "eval_steps_per_second": 0.322,
16
  "total_flos": 0.0,
17
+ "train_loss": 0.08915322791869006,
18
+ "train_runtime": 10109.537,
19
  "train_samples": 45000,
20
+ "train_samples_per_second": 4.451,
21
  "train_steps_per_second": 0.035
22
  }
eval_results.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -0.5261995196342468,
4
- "eval_logits/rejected": 2.7145843505859375,
5
- "eval_logps/chosen": -577.6981201171875,
6
- "eval_logps/rejected": -7961.19677734375,
7
- "eval_loss": 0.000621323473751545,
8
  "eval_rewards/accuracies": 1.0,
9
- "eval_rewards/chosen": -2.1071643829345703,
10
- "eval_rewards/margins": 71.09691619873047,
11
- "eval_rewards/rejected": -73.20408630371094,
12
- "eval_runtime": 196.4219,
13
  "eval_samples": 3905,
14
- "eval_samples_per_second": 19.881,
15
- "eval_steps_per_second": 0.316
16
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": -0.043021149933338165,
4
+ "eval_logits/rejected": 4.756043434143066,
5
+ "eval_logps/chosen": -587.93701171875,
6
+ "eval_logps/rejected": -9136.2529296875,
7
+ "eval_loss": 0.0007141114911064506,
8
  "eval_rewards/accuracies": 1.0,
9
+ "eval_rewards/chosen": -2.2095537185668945,
10
+ "eval_rewards/margins": 82.74508666992188,
11
+ "eval_rewards/rejected": -84.95464324951172,
12
+ "eval_runtime": 192.6276,
13
  "eval_samples": 3905,
14
+ "eval_samples_per_second": 20.272,
15
+ "eval_steps_per_second": 0.322
16
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f45f4617ade43cbd1c152d467a79efb24b1005ab4dc580a4984340acf70f4d9a
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea062bb686c221314613acef65146caf3a37869e4cc120273b6a23c064b4f51e
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66198156d42f0ed0b2654c2fc5301e9c15d14f00a11804852a1f4d8ae40c6f8d
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8589631baa5d782c5f19f5553cede0e0dc4c913ec78556038707b221c4223125
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aab1ddfcd9ff288d3d156b06d15d150fceb54220ae64fab783bbcb76386844e8
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf137591cd2b521550761718081f24b02337f26e652ec7f14ddaeb325d2334c7
3
  size 4540516344
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 1.0,
3
  "total_flos": 0.0,
4
- "train_loss": 0.0941242430602539,
5
- "train_runtime": 10036.5915,
6
  "train_samples": 45000,
7
- "train_samples_per_second": 4.484,
8
  "train_steps_per_second": 0.035
9
  }
 
1
  {
2
  "epoch": 1.0,
3
  "total_flos": 0.0,
4
+ "train_loss": 0.08915322791869006,
5
+ "train_runtime": 10109.537,
6
  "train_samples": 45000,
7
+ "train_samples_per_second": 4.451,
8
  "train_steps_per_second": 0.035
9
  }
trainer_state.json CHANGED
@@ -10,638 +10,638 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.028409090909090908,
13
- "grad_norm": 63.70768739794084,
14
  "learning_rate": 1.3888888888888888e-07,
15
- "logits/chosen": -2.859254837036133,
16
- "logits/rejected": -2.642847776412964,
17
- "logps/chosen": -390.5564880371094,
18
- "logps/rejected": -607.8642578125,
19
- "loss": 1.3793,
20
- "rewards/accuracies": 0.6000000238418579,
21
- "rewards/chosen": 0.0020330864936113358,
22
- "rewards/margins": 0.013599475845694542,
23
- "rewards/rejected": -0.01156639028340578,
24
  "step": 10
25
  },
26
  {
27
  "epoch": 0.056818181818181816,
28
- "grad_norm": 37.53559671175899,
29
  "learning_rate": 2.7777777777777776e-07,
30
- "logits/chosen": -2.842411518096924,
31
- "logits/rejected": -2.694638967514038,
32
- "logps/chosen": -328.69854736328125,
33
- "logps/rejected": -775.2159423828125,
34
- "loss": 1.1119,
35
- "rewards/accuracies": 0.9937499761581421,
36
- "rewards/chosen": 0.08446629345417023,
37
- "rewards/margins": 0.6622289419174194,
38
- "rewards/rejected": -0.5777625441551208,
39
  "step": 20
40
  },
41
  {
42
  "epoch": 0.08522727272727272,
43
- "grad_norm": 11.233762764198163,
44
  "learning_rate": 4.1666666666666667e-07,
45
- "logits/chosen": -2.8567159175872803,
46
- "logits/rejected": -2.6723504066467285,
47
- "logps/chosen": -296.57916259765625,
48
- "logps/rejected": -1129.5751953125,
49
- "loss": 0.4749,
50
  "rewards/accuracies": 1.0,
51
- "rewards/chosen": 0.35702842473983765,
52
- "rewards/margins": 4.873236179351807,
53
- "rewards/rejected": -4.516207218170166,
54
  "step": 30
55
  },
56
  {
57
  "epoch": 0.11363636363636363,
58
- "grad_norm": 3.6495758175953084,
59
  "learning_rate": 4.998023493068254e-07,
60
- "logits/chosen": -2.814579725265503,
61
- "logits/rejected": -2.6064682006835938,
62
- "logps/chosen": -323.31414794921875,
63
- "logps/rejected": -2251.45361328125,
64
- "loss": 0.1291,
65
  "rewards/accuracies": 1.0,
66
- "rewards/chosen": 0.23441331088542938,
67
- "rewards/margins": 15.669992446899414,
68
- "rewards/rejected": -15.435577392578125,
69
  "step": 40
70
  },
71
  {
72
  "epoch": 0.14204545454545456,
73
- "grad_norm": 0.3660230434321398,
74
  "learning_rate": 4.975823666181255e-07,
75
- "logits/chosen": -2.7671985626220703,
76
- "logits/rejected": -2.447579860687256,
77
- "logps/chosen": -483.5135803222656,
78
- "logps/rejected": -4943.55126953125,
79
- "loss": 0.0158,
80
  "rewards/accuracies": 1.0,
81
- "rewards/chosen": -1.2087831497192383,
82
- "rewards/margins": 41.68732452392578,
83
- "rewards/rejected": -42.8961067199707,
84
  "step": 50
85
  },
86
  {
87
  "epoch": 0.14204545454545456,
88
- "eval_logits/chosen": -2.873523473739624,
89
- "eval_logits/rejected": -2.389453411102295,
90
- "eval_logps/chosen": -509.4383850097656,
91
- "eval_logps/rejected": -5863.54931640625,
92
- "eval_loss": 0.010370529256761074,
93
  "eval_rewards/accuracies": 0.9959677457809448,
94
- "eval_rewards/chosen": -1.4245665073394775,
95
- "eval_rewards/margins": 50.803043365478516,
96
- "eval_rewards/rejected": -52.22761154174805,
97
- "eval_runtime": 195.8611,
98
- "eval_samples_per_second": 19.938,
99
- "eval_steps_per_second": 0.317,
100
  "step": 50
101
  },
102
  {
103
  "epoch": 0.17045454545454544,
104
- "grad_norm": 0.9752123540317181,
105
  "learning_rate": 4.929173350101024e-07,
106
- "logits/chosen": -2.865919351577759,
107
- "logits/rejected": -2.1330180168151855,
108
- "logps/chosen": -531.2206420898438,
109
- "logps/rejected": -6913.0869140625,
110
- "loss": 0.0245,
111
  "rewards/accuracies": 0.9937499761581421,
112
- "rewards/chosen": -1.7132558822631836,
113
- "rewards/margins": 60.98906707763672,
114
- "rewards/rejected": -62.70232009887695,
115
  "step": 60
116
  },
117
  {
118
  "epoch": 0.19886363636363635,
119
- "grad_norm": 0.478800645918279,
120
  "learning_rate": 4.858533249305336e-07,
121
- "logits/chosen": -2.895341396331787,
122
- "logits/rejected": -1.7176485061645508,
123
- "logps/chosen": -551.4154052734375,
124
- "logps/rejected": -7227.5439453125,
125
- "loss": 0.0068,
126
  "rewards/accuracies": 1.0,
127
- "rewards/chosen": -1.7225472927093506,
128
- "rewards/margins": 64.42552185058594,
129
- "rewards/rejected": -66.14808654785156,
130
  "step": 70
131
  },
132
  {
133
  "epoch": 0.22727272727272727,
134
- "grad_norm": 10.076839489234553,
135
  "learning_rate": 4.764600984163808e-07,
136
- "logits/chosen": -2.921323537826538,
137
- "logits/rejected": -1.122173547744751,
138
- "logps/chosen": -585.6353149414062,
139
- "logps/rejected": -8391.6171875,
140
- "loss": 0.0053,
141
  "rewards/accuracies": 1.0,
142
- "rewards/chosen": -1.7931442260742188,
143
- "rewards/margins": 75.38569641113281,
144
- "rewards/rejected": -77.17884826660156,
145
  "step": 80
146
  },
147
  {
148
  "epoch": 0.2556818181818182,
149
- "grad_norm": 0.06316207964609313,
150
  "learning_rate": 4.6483042014491527e-07,
151
- "logits/chosen": -2.8797919750213623,
152
- "logits/rejected": -1.447209119796753,
153
- "logps/chosen": -596.3788452148438,
154
- "logps/rejected": -8167.92724609375,
155
- "loss": 0.0137,
156
  "rewards/accuracies": 0.9937499761581421,
157
- "rewards/chosen": -2.3426554203033447,
158
- "rewards/margins": 72.96277618408203,
159
- "rewards/rejected": -75.305419921875,
160
  "step": 90
161
  },
162
  {
163
  "epoch": 0.2840909090909091,
164
- "grad_norm": 1.4636731976721238,
165
  "learning_rate": 4.510791413176912e-07,
166
- "logits/chosen": -2.9729702472686768,
167
- "logits/rejected": -0.3022725284099579,
168
- "logps/chosen": -607.1732177734375,
169
- "logps/rejected": -10554.962890625,
170
- "loss": 0.0111,
171
  "rewards/accuracies": 1.0,
172
- "rewards/chosen": -2.3770313262939453,
173
- "rewards/margins": 96.11477661132812,
174
- "rewards/rejected": -98.49180603027344,
175
  "step": 100
176
  },
177
  {
178
  "epoch": 0.2840909090909091,
179
- "eval_logits/chosen": -2.945009231567383,
180
- "eval_logits/rejected": 0.17861562967300415,
181
- "eval_logps/chosen": -592.9392700195312,
182
- "eval_logps/rejected": -9974.3623046875,
183
- "eval_loss": 0.0023908319417387247,
184
  "eval_rewards/accuracies": 1.0,
185
- "eval_rewards/chosen": -2.259575366973877,
186
- "eval_rewards/margins": 91.07615661621094,
187
- "eval_rewards/rejected": -93.33573150634766,
188
- "eval_runtime": 193.9785,
189
- "eval_samples_per_second": 20.131,
190
- "eval_steps_per_second": 0.32,
191
  "step": 100
192
  },
193
  {
194
  "epoch": 0.3125,
195
- "grad_norm": 0.06640741403344876,
196
  "learning_rate": 4.353420654246546e-07,
197
- "logits/chosen": -2.9682483673095703,
198
- "logits/rejected": -0.10797711461782455,
199
- "logps/chosen": -584.2022094726562,
200
- "logps/rejected": -9037.5048828125,
201
- "loss": 0.0062,
202
  "rewards/accuracies": 1.0,
203
- "rewards/chosen": -2.087810754776001,
204
- "rewards/margins": 82.29454803466797,
205
- "rewards/rejected": -84.3823471069336,
206
  "step": 110
207
  },
208
  {
209
  "epoch": 0.3409090909090909,
210
- "grad_norm": 0.11961136514165396,
211
  "learning_rate": 4.177746070897592e-07,
212
- "logits/chosen": -2.854703187942505,
213
- "logits/rejected": 0.2248195856809616,
214
- "logps/chosen": -577.8111572265625,
215
- "logps/rejected": -8910.1435546875,
216
- "loss": 0.0301,
217
  "rewards/accuracies": 1.0,
218
- "rewards/chosen": -1.9629430770874023,
219
- "rewards/margins": 80.76622009277344,
220
- "rewards/rejected": -82.72917175292969,
221
  "step": 120
222
  },
223
  {
224
  "epoch": 0.3693181818181818,
225
- "grad_norm": 0.44055468356671706,
226
  "learning_rate": 3.9855025724292763e-07,
227
- "logits/chosen": -2.659201145172119,
228
- "logits/rejected": 1.2345632314682007,
229
- "logps/chosen": -610.489990234375,
230
- "logps/rejected": -9278.4609375,
231
- "loss": 0.0023,
232
  "rewards/accuracies": 1.0,
233
- "rewards/chosen": -2.4003846645355225,
234
- "rewards/margins": 83.97884368896484,
235
- "rewards/rejected": -86.37923431396484,
236
  "step": 130
237
  },
238
  {
239
  "epoch": 0.3977272727272727,
240
- "grad_norm": 2.2725494852752717,
241
  "learning_rate": 3.7785886977585555e-07,
242
- "logits/chosen": -2.1955535411834717,
243
- "logits/rejected": 1.7099689245224,
244
- "logps/chosen": -569.4786376953125,
245
- "logps/rejected": -11541.3544921875,
246
- "loss": 0.003,
247
  "rewards/accuracies": 1.0,
248
- "rewards/chosen": -2.034423589706421,
249
- "rewards/margins": 106.959228515625,
250
- "rewards/rejected": -108.99365234375,
251
  "step": 140
252
  },
253
  {
254
  "epoch": 0.42613636363636365,
255
- "grad_norm": 13.991084938762679,
256
  "learning_rate": 3.5590478660213206e-07,
257
- "logits/chosen": -1.8308794498443604,
258
- "logits/rejected": 2.1075541973114014,
259
- "logps/chosen": -663.3027954101562,
260
- "logps/rejected": -11290.779296875,
261
- "loss": 0.0039,
262
- "rewards/accuracies": 0.9937499761581421,
263
- "rewards/chosen": -2.852395534515381,
264
- "rewards/margins": 102.93562316894531,
265
- "rewards/rejected": -105.78802490234375,
266
  "step": 150
267
  },
268
  {
269
  "epoch": 0.42613636363636365,
270
- "eval_logits/chosen": -1.883123755455017,
271
- "eval_logits/rejected": 2.354902982711792,
272
- "eval_logps/chosen": -600.4949340820312,
273
- "eval_logps/rejected": -10689.58203125,
274
- "eval_loss": 0.0015989234670996666,
275
  "eval_rewards/accuracies": 1.0,
276
- "eval_rewards/chosen": -2.3351328372955322,
277
- "eval_rewards/margins": 98.15279388427734,
278
- "eval_rewards/rejected": -100.48792266845703,
279
- "eval_runtime": 194.1289,
280
- "eval_samples_per_second": 20.116,
281
  "eval_steps_per_second": 0.319,
282
  "step": 150
283
  },
284
  {
285
  "epoch": 0.45454545454545453,
286
- "grad_norm": 15.634259463426035,
287
  "learning_rate": 3.3290481963801696e-07,
288
- "logits/chosen": -2.0796897411346436,
289
- "logits/rejected": 2.3097519874572754,
290
- "logps/chosen": -567.3259887695312,
291
- "logps/rejected": -9335.7529296875,
292
- "loss": 0.0018,
293
  "rewards/accuracies": 1.0,
294
- "rewards/chosen": -1.9718801975250244,
295
- "rewards/margins": 85.22074890136719,
296
- "rewards/rejected": -87.19263458251953,
297
  "step": 160
298
  },
299
  {
300
  "epoch": 0.48295454545454547,
301
- "grad_norm": 0.04468151503862641,
302
  "learning_rate": 3.0908610963322626e-07,
303
- "logits/chosen": -2.098179340362549,
304
- "logits/rejected": 1.9576606750488281,
305
- "logps/chosen": -608.5695190429688,
306
- "logps/rejected": -9132.7138671875,
307
- "loss": 0.0059,
308
  "rewards/accuracies": 1.0,
309
- "rewards/chosen": -2.2095131874084473,
310
- "rewards/margins": 81.83451843261719,
311
- "rewards/rejected": -84.04402160644531,
312
  "step": 170
313
  },
314
  {
315
  "epoch": 0.5113636363636364,
316
- "grad_norm": 0.046664236017310436,
317
  "learning_rate": 2.846838829972671e-07,
318
- "logits/chosen": -2.053903102874756,
319
- "logits/rejected": 2.1386537551879883,
320
- "logps/chosen": -602.9306030273438,
321
- "logps/rejected": -8629.5322265625,
322
- "loss": 0.0205,
323
  "rewards/accuracies": 1.0,
324
- "rewards/chosen": -2.4202630519866943,
325
- "rewards/margins": 77.7452163696289,
326
- "rewards/rejected": -80.16548156738281,
327
  "step": 180
328
  },
329
  {
330
  "epoch": 0.5397727272727273,
331
- "grad_norm": 0.2531340520912063,
332
  "learning_rate": 2.5993912877423147e-07,
333
- "logits/chosen": -1.8579347133636475,
334
- "logits/rejected": 1.7126737833023071,
335
- "logps/chosen": -551.639404296875,
336
- "logps/rejected": -9325.890625,
337
- "loss": 0.0012,
338
  "rewards/accuracies": 1.0,
339
- "rewards/chosen": -2.060236692428589,
340
- "rewards/margins": 84.2825698852539,
341
- "rewards/rejected": -86.34281158447266,
342
  "step": 190
343
  },
344
  {
345
  "epoch": 0.5681818181818182,
346
- "grad_norm": 1.2128193169362667,
347
  "learning_rate": 2.3509621870754504e-07,
348
- "logits/chosen": -1.699134111404419,
349
- "logits/rejected": 2.7847816944122314,
350
- "logps/chosen": -572.9049072265625,
351
- "logps/rejected": -8628.181640625,
352
- "loss": 0.0022,
353
  "rewards/accuracies": 1.0,
354
- "rewards/chosen": -2.0777506828308105,
355
- "rewards/margins": 77.8956527709961,
356
- "rewards/rejected": -79.97340393066406,
357
  "step": 200
358
  },
359
  {
360
  "epoch": 0.5681818181818182,
361
- "eval_logits/chosen": -1.6725599765777588,
362
- "eval_logits/rejected": 2.5107614994049072,
363
- "eval_logps/chosen": -587.24755859375,
364
- "eval_logps/rejected": -9258.34375,
365
- "eval_loss": 0.0011689095990732312,
366
  "eval_rewards/accuracies": 1.0,
367
- "eval_rewards/chosen": -2.202658176422119,
368
- "eval_rewards/margins": 83.97289276123047,
369
- "eval_rewards/rejected": -86.1755599975586,
370
- "eval_runtime": 193.9485,
371
- "eval_samples_per_second": 20.134,
372
- "eval_steps_per_second": 0.32,
373
  "step": 200
374
  },
375
  {
376
  "epoch": 0.5965909090909091,
377
- "grad_norm": 1.5143241412178876,
378
  "learning_rate": 2.1040049389819624e-07,
379
- "logits/chosen": -1.7269471883773804,
380
- "logits/rejected": 2.757033586502075,
381
- "logps/chosen": -597.5856323242188,
382
- "logps/rejected": -8867.4111328125,
383
- "loss": 0.0051,
384
  "rewards/accuracies": 1.0,
385
- "rewards/chosen": -2.0694241523742676,
386
- "rewards/margins": 80.28562927246094,
387
- "rewards/rejected": -82.35506439208984,
388
  "step": 210
389
  },
390
  {
391
  "epoch": 0.625,
392
- "grad_norm": 3.3477814516315405,
393
  "learning_rate": 1.8609584188988133e-07,
394
- "logits/chosen": -1.5605463981628418,
395
- "logits/rejected": 2.3806936740875244,
396
- "logps/chosen": -627.6144409179688,
397
- "logps/rejected": -7452.0048828125,
398
- "loss": 0.0066,
399
- "rewards/accuracies": 1.0,
400
- "rewards/chosen": -2.6301381587982178,
401
- "rewards/margins": 65.39354705810547,
402
- "rewards/rejected": -68.02368927001953,
403
  "step": 220
404
  },
405
  {
406
  "epoch": 0.6534090909090909,
407
- "grad_norm": 86.10043361080564,
408
  "learning_rate": 1.624222881090439e-07,
409
- "logits/chosen": -1.8570022583007812,
410
- "logits/rejected": 2.0753836631774902,
411
- "logps/chosen": -650.1534423828125,
412
- "logps/rejected": -8688.783203125,
413
- "loss": 0.0216,
414
  "rewards/accuracies": 0.9937499761581421,
415
- "rewards/chosen": -2.5548887252807617,
416
- "rewards/margins": 77.47712707519531,
417
- "rewards/rejected": -80.03202056884766,
418
  "step": 230
419
  },
420
  {
421
  "epoch": 0.6818181818181818,
422
- "grad_norm": 0.6247509460136759,
423
  "learning_rate": 1.3961362544602212e-07,
424
- "logits/chosen": -1.4957786798477173,
425
- "logits/rejected": 2.78035306930542,
426
- "logps/chosen": -612.0357666015625,
427
- "logps/rejected": -7844.0234375,
428
- "loss": 0.0027,
429
  "rewards/accuracies": 1.0,
430
- "rewards/chosen": -2.360233783721924,
431
- "rewards/margins": 70.1392822265625,
432
- "rewards/rejected": -72.49951171875,
433
  "step": 240
434
  },
435
  {
436
  "epoch": 0.7102272727272727,
437
- "grad_norm": 0.8355271945684738,
438
  "learning_rate": 1.1789510538684522e-07,
439
- "logits/chosen": -1.3467962741851807,
440
- "logits/rejected": 3.0541701316833496,
441
- "logps/chosen": -582.3834838867188,
442
- "logps/rejected": -10293.1650390625,
443
- "loss": 0.0022,
444
  "rewards/accuracies": 1.0,
445
- "rewards/chosen": -2.3034989833831787,
446
- "rewards/margins": 93.33232116699219,
447
- "rewards/rejected": -95.63582611083984,
448
  "step": 250
449
  },
450
  {
451
  "epoch": 0.7102272727272727,
452
- "eval_logits/chosen": -1.0149785280227661,
453
- "eval_logits/rejected": 3.758545160293579,
454
- "eval_logps/chosen": -596.009521484375,
455
- "eval_logps/rejected": -8959.7470703125,
456
- "eval_loss": 0.0008426356362178922,
457
  "eval_rewards/accuracies": 1.0,
458
- "eval_rewards/chosen": -2.290278196334839,
459
- "eval_rewards/margins": 80.89930725097656,
460
- "eval_rewards/rejected": -83.18959045410156,
461
- "eval_runtime": 193.2341,
462
- "eval_samples_per_second": 20.209,
463
- "eval_steps_per_second": 0.321,
464
  "step": 250
465
  },
466
  {
467
  "epoch": 0.7386363636363636,
468
- "grad_norm": 0.3552207920354382,
469
  "learning_rate": 9.748121349736891e-08,
470
- "logits/chosen": -0.6737051010131836,
471
- "logits/rejected": 3.9108054637908936,
472
- "logps/chosen": -637.5172119140625,
473
- "logps/rejected": -9320.220703125,
474
- "loss": 0.0011,
475
  "rewards/accuracies": 1.0,
476
- "rewards/chosen": -2.5368902683258057,
477
- "rewards/margins": 83.88404083251953,
478
- "rewards/rejected": -86.4209213256836,
479
  "step": 260
480
  },
481
  {
482
  "epoch": 0.7670454545454546,
483
- "grad_norm": 0.09771710883654211,
484
  "learning_rate": 7.857355122839673e-08,
485
- "logits/chosen": -0.7171241044998169,
486
- "logits/rejected": 3.9581894874572754,
487
- "logps/chosen": -603.0701293945312,
488
- "logps/rejected": -8722.890625,
489
- "loss": 0.0013,
490
  "rewards/accuracies": 1.0,
491
- "rewards/chosen": -2.222205638885498,
492
- "rewards/margins": 78.31710052490234,
493
- "rewards/rejected": -80.539306640625,
494
  "step": 270
495
  },
496
  {
497
  "epoch": 0.7954545454545454,
498
- "grad_norm": 0.20312109356571723,
499
  "learning_rate": 6.135884496044244e-08,
500
- "logits/chosen": -0.48570650815963745,
501
- "logits/rejected": 4.524535655975342,
502
- "logps/chosen": -585.2769775390625,
503
- "logps/rejected": -8691.55859375,
504
- "loss": 0.0015,
505
  "rewards/accuracies": 1.0,
506
- "rewards/chosen": -2.0610411167144775,
507
- "rewards/margins": 78.40565490722656,
508
- "rewards/rejected": -80.46669006347656,
509
  "step": 280
510
  },
511
  {
512
  "epoch": 0.8238636363636364,
513
- "grad_norm": 0.0562319233114269,
514
  "learning_rate": 4.600710195020982e-08,
515
- "logits/chosen": -0.22825559973716736,
516
- "logits/rejected": 4.695797443389893,
517
- "logps/chosen": -607.1776123046875,
518
- "logps/rejected": -8615.1923828125,
519
- "loss": 0.0052,
520
  "rewards/accuracies": 1.0,
521
- "rewards/chosen": -2.1803441047668457,
522
- "rewards/margins": 77.55899810791016,
523
- "rewards/rejected": -79.73933410644531,
524
  "step": 290
525
  },
526
  {
527
  "epoch": 0.8522727272727273,
528
- "grad_norm": 1.3409016161535439,
529
  "learning_rate": 3.2669931390104374e-08,
530
- "logits/chosen": -0.32437822222709656,
531
- "logits/rejected": 4.543366432189941,
532
- "logps/chosen": -559.5152587890625,
533
- "logps/rejected": -9425.8134765625,
534
- "loss": 0.001,
535
  "rewards/accuracies": 1.0,
536
- "rewards/chosen": -2.032606601715088,
537
- "rewards/margins": 86.01726531982422,
538
- "rewards/rejected": -88.04988098144531,
539
  "step": 300
540
  },
541
  {
542
  "epoch": 0.8522727272727273,
543
- "eval_logits/chosen": -0.12213863432407379,
544
- "eval_logits/rejected": 4.708926677703857,
545
- "eval_logps/chosen": -586.3375854492188,
546
- "eval_logps/rejected": -9036.201171875,
547
- "eval_loss": 0.0007051606080494821,
548
  "eval_rewards/accuracies": 1.0,
549
- "eval_rewards/chosen": -2.1935579776763916,
550
- "eval_rewards/margins": 81.76056671142578,
551
- "eval_rewards/rejected": -83.95413208007812,
552
- "eval_runtime": 194.0614,
553
- "eval_samples_per_second": 20.122,
554
- "eval_steps_per_second": 0.319,
555
  "step": 300
556
  },
557
  {
558
  "epoch": 0.8806818181818182,
559
- "grad_norm": 0.1502360317259883,
560
  "learning_rate": 2.147904716149135e-08,
561
- "logits/chosen": -0.03174494951963425,
562
- "logits/rejected": 4.578262805938721,
563
- "logps/chosen": -568.2017822265625,
564
- "logps/rejected": -9189.7119140625,
565
- "loss": 0.0037,
566
  "rewards/accuracies": 1.0,
567
- "rewards/chosen": -2.061352014541626,
568
- "rewards/margins": 82.83074188232422,
569
- "rewards/rejected": -84.89209747314453,
570
  "step": 310
571
  },
572
  {
573
  "epoch": 0.9090909090909091,
574
- "grad_norm": 0.12997178207019558,
575
  "learning_rate": 1.254496706805433e-08,
576
- "logits/chosen": -0.2779918313026428,
577
- "logits/rejected": 4.609086036682129,
578
- "logps/chosen": -596.7515869140625,
579
- "logps/rejected": -9248.6181640625,
580
- "loss": 0.0005,
581
  "rewards/accuracies": 1.0,
582
- "rewards/chosen": -2.140784978866577,
583
- "rewards/margins": 83.82556915283203,
584
- "rewards/rejected": -85.96635437011719,
585
  "step": 320
586
  },
587
  {
588
  "epoch": 0.9375,
589
- "grad_norm": 0.20243054667676932,
590
  "learning_rate": 5.955921395237318e-09,
591
- "logits/chosen": -0.13022509217262268,
592
- "logits/rejected": 4.544893741607666,
593
- "logps/chosen": -555.28271484375,
594
- "logps/rejected": -9316.8115234375,
595
- "loss": 0.0006,
596
  "rewards/accuracies": 1.0,
597
- "rewards/chosen": -2.0001461505889893,
598
- "rewards/margins": 84.69043731689453,
599
- "rewards/rejected": -86.69058990478516,
600
  "step": 330
601
  },
602
  {
603
  "epoch": 0.9659090909090909,
604
- "grad_norm": 0.25686140623112713,
605
  "learning_rate": 1.7769815745066474e-09,
606
- "logits/chosen": -0.500297486782074,
607
- "logits/rejected": 4.298487186431885,
608
- "logps/chosen": -568.9089965820312,
609
- "logps/rejected": -8478.435546875,
610
- "loss": 0.0026,
611
  "rewards/accuracies": 1.0,
612
- "rewards/chosen": -1.9992825984954834,
613
- "rewards/margins": 76.69742584228516,
614
- "rewards/rejected": -78.69671630859375,
615
  "step": 340
616
  },
617
  {
618
  "epoch": 0.9943181818181818,
619
- "grad_norm": 2.1488990981394114,
620
  "learning_rate": 4.9417557483610875e-11,
621
- "logits/chosen": -0.16308510303497314,
622
- "logits/rejected": 4.496396064758301,
623
- "logps/chosen": -605.3216552734375,
624
- "logps/rejected": -9169.884765625,
625
- "loss": 0.008,
626
  "rewards/accuracies": 0.9937499761581421,
627
- "rewards/chosen": -2.310539484024048,
628
- "rewards/margins": 82.91272735595703,
629
- "rewards/rejected": -85.2232666015625,
630
  "step": 350
631
  },
632
  {
633
  "epoch": 0.9943181818181818,
634
- "eval_logits/chosen": -0.04419805109500885,
635
- "eval_logits/rejected": 4.755471229553223,
636
- "eval_logps/chosen": -588.0638427734375,
637
- "eval_logps/rejected": -9137.26171875,
638
- "eval_loss": 0.0007233908982016146,
639
  "eval_rewards/accuracies": 1.0,
640
- "eval_rewards/chosen": -2.2108209133148193,
641
- "eval_rewards/margins": 82.75391387939453,
642
- "eval_rewards/rejected": -84.96473693847656,
643
- "eval_runtime": 194.5802,
644
- "eval_samples_per_second": 20.069,
645
  "eval_steps_per_second": 0.319,
646
  "step": 350
647
  },
@@ -649,9 +649,9 @@
649
  "epoch": 1.0,
650
  "step": 352,
651
  "total_flos": 0.0,
652
- "train_loss": 0.0941242430602539,
653
- "train_runtime": 10036.5915,
654
- "train_samples_per_second": 4.484,
655
  "train_steps_per_second": 0.035
656
  }
657
  ],
 
10
  "log_history": [
11
  {
12
  "epoch": 0.028409090909090908,
13
+ "grad_norm": 77.31810180796927,
14
  "learning_rate": 1.3888888888888888e-07,
15
+ "logits/chosen": -2.8591513633728027,
16
+ "logits/rejected": -2.6428322792053223,
17
+ "logps/chosen": -390.54095458984375,
18
+ "logps/rejected": -607.8343505859375,
19
+ "loss": 1.3778,
20
+ "rewards/accuracies": 0.612500011920929,
21
+ "rewards/chosen": 0.002188617829233408,
22
+ "rewards/margins": 0.013455559499561787,
23
+ "rewards/rejected": -0.011266940273344517,
24
  "step": 10
25
  },
26
  {
27
  "epoch": 0.056818181818181816,
28
+ "grad_norm": 38.318260170576636,
29
  "learning_rate": 2.7777777777777776e-07,
30
+ "logits/chosen": -2.842257261276245,
31
+ "logits/rejected": -2.6946189403533936,
32
+ "logps/chosen": -328.62799072265625,
33
+ "logps/rejected": -775.0516357421875,
34
+ "loss": 1.0648,
35
+ "rewards/accuracies": 0.987500011920929,
36
+ "rewards/chosen": 0.08517134189605713,
37
+ "rewards/margins": 0.6612905859947205,
38
+ "rewards/rejected": -0.5761191844940186,
39
  "step": 20
40
  },
41
  {
42
  "epoch": 0.08522727272727272,
43
+ "grad_norm": 10.337672358301827,
44
  "learning_rate": 4.1666666666666667e-07,
45
+ "logits/chosen": -2.856661319732666,
46
+ "logits/rejected": -2.6734800338745117,
47
+ "logps/chosen": -295.8897399902344,
48
+ "logps/rejected": -1121.2652587890625,
49
+ "loss": 0.4072,
50
  "rewards/accuracies": 1.0,
51
+ "rewards/chosen": 0.36392277479171753,
52
+ "rewards/margins": 4.797031402587891,
53
+ "rewards/rejected": -4.433108329772949,
54
  "step": 30
55
  },
56
  {
57
  "epoch": 0.11363636363636363,
58
+ "grad_norm": 3.166840866383729,
59
  "learning_rate": 4.998023493068254e-07,
60
+ "logits/chosen": -2.8248767852783203,
61
+ "logits/rejected": -2.627469539642334,
62
+ "logps/chosen": -318.3464050292969,
63
+ "logps/rejected": -2185.42578125,
64
+ "loss": 0.1013,
65
  "rewards/accuracies": 1.0,
66
+ "rewards/chosen": 0.28409063816070557,
67
+ "rewards/margins": 15.0593900680542,
68
+ "rewards/rejected": -14.775299072265625,
69
  "step": 40
70
  },
71
  {
72
  "epoch": 0.14204545454545456,
73
+ "grad_norm": 1.0472115502346335,
74
  "learning_rate": 4.975823666181255e-07,
75
+ "logits/chosen": -2.8039865493774414,
76
+ "logits/rejected": -2.5030531883239746,
77
+ "logps/chosen": -466.212890625,
78
+ "logps/rejected": -4635.23095703125,
79
+ "loss": 0.0135,
80
  "rewards/accuracies": 1.0,
81
+ "rewards/chosen": -1.0357766151428223,
82
+ "rewards/margins": 38.77712631225586,
83
+ "rewards/rejected": -39.81290054321289,
84
  "step": 50
85
  },
86
  {
87
  "epoch": 0.14204545454545456,
88
+ "eval_logits/chosen": -2.9291088581085205,
89
+ "eval_logits/rejected": -2.438577175140381,
90
+ "eval_logps/chosen": -499.10443115234375,
91
+ "eval_logps/rejected": -5381.90283203125,
92
+ "eval_loss": 0.00992405042052269,
93
  "eval_rewards/accuracies": 0.9959677457809448,
94
+ "eval_rewards/chosen": -1.321226954460144,
95
+ "eval_rewards/margins": 46.089908599853516,
96
+ "eval_rewards/rejected": -47.41114044189453,
97
+ "eval_runtime": 194.7089,
98
+ "eval_samples_per_second": 20.056,
99
+ "eval_steps_per_second": 0.318,
100
  "step": 50
101
  },
102
  {
103
  "epoch": 0.17045454545454544,
104
+ "grad_norm": 0.9673005612652019,
105
  "learning_rate": 4.929173350101024e-07,
106
+ "logits/chosen": -2.9523978233337402,
107
+ "logits/rejected": -2.3117947578430176,
108
+ "logps/chosen": -503.43365478515625,
109
+ "logps/rejected": -5926.720703125,
110
+ "loss": 0.0114,
111
  "rewards/accuracies": 0.9937499761581421,
112
+ "rewards/chosen": -1.4353859424591064,
113
+ "rewards/margins": 51.40327835083008,
114
+ "rewards/rejected": -52.83866500854492,
115
  "step": 60
116
  },
117
  {
118
  "epoch": 0.19886363636363635,
119
+ "grad_norm": 0.3988175169368107,
120
  "learning_rate": 4.858533249305336e-07,
121
+ "logits/chosen": -2.878004550933838,
122
+ "logits/rejected": -1.9669125080108643,
123
+ "logps/chosen": -545.6673583984375,
124
+ "logps/rejected": -6498.20263671875,
125
+ "loss": 0.0065,
126
  "rewards/accuracies": 1.0,
127
+ "rewards/chosen": -1.6650670766830444,
128
+ "rewards/margins": 57.189605712890625,
129
+ "rewards/rejected": -58.85467529296875,
130
  "step": 70
131
  },
132
  {
133
  "epoch": 0.22727272727272727,
134
+ "grad_norm": 13.315517602902379,
135
  "learning_rate": 4.764600984163808e-07,
136
+ "logits/chosen": -2.780555248260498,
137
+ "logits/rejected": -1.0796103477478027,
138
+ "logps/chosen": -584.189697265625,
139
+ "logps/rejected": -7447.61572265625,
140
+ "loss": 0.0056,
141
  "rewards/accuracies": 1.0,
142
+ "rewards/chosen": -1.7786884307861328,
143
+ "rewards/margins": 65.96015167236328,
144
+ "rewards/rejected": -67.73884582519531,
145
  "step": 80
146
  },
147
  {
148
  "epoch": 0.2556818181818182,
149
+ "grad_norm": 0.060351047390217884,
150
  "learning_rate": 4.6483042014491527e-07,
151
+ "logits/chosen": -2.8976826667785645,
152
+ "logits/rejected": -1.835821509361267,
153
+ "logps/chosen": -552.1006469726562,
154
+ "logps/rejected": -6640.58984375,
155
+ "loss": 0.0148,
156
  "rewards/accuracies": 0.9937499761581421,
157
+ "rewards/chosen": -1.8998725414276123,
158
+ "rewards/margins": 58.132171630859375,
159
+ "rewards/rejected": -60.03204345703125,
160
  "step": 90
161
  },
162
  {
163
  "epoch": 0.2840909090909091,
164
+ "grad_norm": 1.627678131457394,
165
  "learning_rate": 4.510791413176912e-07,
166
+ "logits/chosen": -2.7810730934143066,
167
+ "logits/rejected": -0.7030321359634399,
168
+ "logps/chosen": -615.0423583984375,
169
+ "logps/rejected": -8701.7958984375,
170
+ "loss": 0.0109,
171
  "rewards/accuracies": 1.0,
172
+ "rewards/chosen": -2.455723285675049,
173
+ "rewards/margins": 77.5044174194336,
174
+ "rewards/rejected": -79.96015167236328,
175
  "step": 100
176
  },
177
  {
178
  "epoch": 0.2840909090909091,
179
+ "eval_logits/chosen": -2.5427753925323486,
180
+ "eval_logits/rejected": -0.03400120511651039,
181
+ "eval_logps/chosen": -626.6861572265625,
182
+ "eval_logps/rejected": -8739.6259765625,
183
+ "eval_loss": 0.0025400689337402582,
184
  "eval_rewards/accuracies": 1.0,
185
+ "eval_rewards/chosen": -2.5970447063446045,
186
+ "eval_rewards/margins": 78.39134216308594,
187
+ "eval_rewards/rejected": -80.9883804321289,
188
+ "eval_runtime": 194.2924,
189
+ "eval_samples_per_second": 20.099,
190
+ "eval_steps_per_second": 0.319,
191
  "step": 100
192
  },
193
  {
194
  "epoch": 0.3125,
195
+ "grad_norm": 0.04122796046055686,
196
  "learning_rate": 4.353420654246546e-07,
197
+ "logits/chosen": -2.4864227771759033,
198
+ "logits/rejected": -0.10299022495746613,
199
+ "logps/chosen": -605.4061279296875,
200
+ "logps/rejected": -7677.7255859375,
201
+ "loss": 0.0083,
202
  "rewards/accuracies": 1.0,
203
+ "rewards/chosen": -2.29984974861145,
204
+ "rewards/margins": 68.48472595214844,
205
+ "rewards/rejected": -70.78457641601562,
206
  "step": 110
207
  },
208
  {
209
  "epoch": 0.3409090909090909,
210
+ "grad_norm": 0.09070738570484652,
211
  "learning_rate": 4.177746070897592e-07,
212
+ "logits/chosen": -2.3363277912139893,
213
+ "logits/rejected": 0.5452026724815369,
214
+ "logps/chosen": -594.1629638671875,
215
+ "logps/rejected": -7349.20068359375,
216
+ "loss": 0.0063,
217
  "rewards/accuracies": 1.0,
218
+ "rewards/chosen": -2.1264612674713135,
219
+ "rewards/margins": 64.9932632446289,
220
+ "rewards/rejected": -67.1197280883789,
221
  "step": 120
222
  },
223
  {
224
  "epoch": 0.3693181818181818,
225
+ "grad_norm": 0.4807602533089145,
226
  "learning_rate": 3.9855025724292763e-07,
227
+ "logits/chosen": -2.243814468383789,
228
+ "logits/rejected": 1.6017320156097412,
229
+ "logps/chosen": -609.6871337890625,
230
+ "logps/rejected": -7587.91552734375,
231
+ "loss": 0.0019,
232
  "rewards/accuracies": 1.0,
233
+ "rewards/chosen": -2.392354965209961,
234
+ "rewards/margins": 67.08141326904297,
235
+ "rewards/rejected": -69.47377014160156,
236
  "step": 130
237
  },
238
  {
239
  "epoch": 0.3977272727272727,
240
+ "grad_norm": 1.856213293772278,
241
  "learning_rate": 3.7785886977585555e-07,
242
+ "logits/chosen": -1.8541723489761353,
243
+ "logits/rejected": 2.44315505027771,
244
+ "logps/chosen": -582.9074096679688,
245
+ "logps/rejected": -9157.189453125,
246
+ "loss": 0.0035,
247
  "rewards/accuracies": 1.0,
248
+ "rewards/chosen": -2.168710231781006,
249
+ "rewards/margins": 82.98329162597656,
250
+ "rewards/rejected": -85.1520004272461,
251
  "step": 140
252
  },
253
  {
254
  "epoch": 0.42613636363636365,
255
+ "grad_norm": 1.4955330936107978,
256
  "learning_rate": 3.5590478660213206e-07,
257
+ "logits/chosen": -1.5623462200164795,
258
+ "logits/rejected": 2.7078518867492676,
259
+ "logps/chosen": -628.0105590820312,
260
+ "logps/rejected": -8812.60546875,
261
+ "loss": 0.0017,
262
+ "rewards/accuracies": 1.0,
263
+ "rewards/chosen": -2.499473810195923,
264
+ "rewards/margins": 78.50679779052734,
265
+ "rewards/rejected": -81.00627136230469,
266
  "step": 150
267
  },
268
  {
269
  "epoch": 0.42613636363636365,
270
+ "eval_logits/chosen": -1.6475961208343506,
271
+ "eval_logits/rejected": 2.8304052352905273,
272
+ "eval_logps/chosen": -566.4089965820312,
273
+ "eval_logps/rejected": -8356.697265625,
274
+ "eval_loss": 0.0010887953685596585,
275
  "eval_rewards/accuracies": 1.0,
276
+ "eval_rewards/chosen": -1.9942734241485596,
277
+ "eval_rewards/margins": 75.1648178100586,
278
+ "eval_rewards/rejected": -77.15908813476562,
279
+ "eval_runtime": 194.5753,
280
+ "eval_samples_per_second": 20.069,
281
  "eval_steps_per_second": 0.319,
282
  "step": 150
283
  },
284
  {
285
  "epoch": 0.45454545454545453,
286
+ "grad_norm": 8.762005226870338,
287
  "learning_rate": 3.3290481963801696e-07,
288
+ "logits/chosen": -2.1097240447998047,
289
+ "logits/rejected": 2.4051706790924072,
290
+ "logps/chosen": -540.6964111328125,
291
+ "logps/rejected": -7950.2998046875,
292
+ "loss": 0.001,
293
  "rewards/accuracies": 1.0,
294
+ "rewards/chosen": -1.705583930015564,
295
+ "rewards/margins": 71.63252258300781,
296
+ "rewards/rejected": -73.33810424804688,
297
  "step": 160
298
  },
299
  {
300
  "epoch": 0.48295454545454547,
301
+ "grad_norm": 0.030626366308689413,
302
  "learning_rate": 3.0908610963322626e-07,
303
+ "logits/chosen": -2.4956324100494385,
304
+ "logits/rejected": 1.2054059505462646,
305
+ "logps/chosen": -597.5648803710938,
306
+ "logps/rejected": -8411.56640625,
307
+ "loss": 0.006,
308
  "rewards/accuracies": 1.0,
309
+ "rewards/chosen": -2.099465847015381,
310
+ "rewards/margins": 74.73308563232422,
311
+ "rewards/rejected": -76.83256530761719,
312
  "step": 170
313
  },
314
  {
315
  "epoch": 0.5113636363636364,
316
+ "grad_norm": 0.014685643153554214,
317
  "learning_rate": 2.846838829972671e-07,
318
+ "logits/chosen": -2.5342214107513428,
319
+ "logits/rejected": 1.157962441444397,
320
+ "logps/chosen": -590.8978271484375,
321
+ "logps/rejected": -7980.2666015625,
322
+ "loss": 0.0116,
323
  "rewards/accuracies": 1.0,
324
+ "rewards/chosen": -2.2999348640441895,
325
+ "rewards/margins": 71.37288665771484,
326
+ "rewards/rejected": -73.67282104492188,
327
  "step": 180
328
  },
329
  {
330
  "epoch": 0.5397727272727273,
331
+ "grad_norm": 0.1745056386100847,
332
  "learning_rate": 2.5993912877423147e-07,
333
+ "logits/chosen": -2.2881453037261963,
334
+ "logits/rejected": 1.435770869255066,
335
+ "logps/chosen": -543.35205078125,
336
+ "logps/rejected": -8919.642578125,
337
+ "loss": 0.0011,
338
  "rewards/accuracies": 1.0,
339
+ "rewards/chosen": -1.9773623943328857,
340
+ "rewards/margins": 80.30296325683594,
341
+ "rewards/rejected": -82.28031921386719,
342
  "step": 190
343
  },
344
  {
345
  "epoch": 0.5681818181818182,
346
+ "grad_norm": 0.7368778028750317,
347
  "learning_rate": 2.3509621870754504e-07,
348
+ "logits/chosen": -1.6877973079681396,
349
+ "logits/rejected": 3.008781671524048,
350
+ "logps/chosen": -565.8089599609375,
351
+ "logps/rejected": -8412.712890625,
352
+ "loss": 0.002,
353
  "rewards/accuracies": 1.0,
354
+ "rewards/chosen": -2.0067920684814453,
355
+ "rewards/margins": 75.81190490722656,
356
+ "rewards/rejected": -77.8187026977539,
357
  "step": 200
358
  },
359
  {
360
  "epoch": 0.5681818181818182,
361
+ "eval_logits/chosen": -1.3436212539672852,
362
+ "eval_logits/rejected": 2.8839986324310303,
363
+ "eval_logps/chosen": -579.902099609375,
364
+ "eval_logps/rejected": -8925.5107421875,
365
+ "eval_loss": 0.000815804407466203,
366
  "eval_rewards/accuracies": 1.0,
367
+ "eval_rewards/chosen": -2.1292035579681396,
368
+ "eval_rewards/margins": 80.71800994873047,
369
+ "eval_rewards/rejected": -82.84722137451172,
370
+ "eval_runtime": 194.6381,
371
+ "eval_samples_per_second": 20.063,
372
+ "eval_steps_per_second": 0.319,
373
  "step": 200
374
  },
375
  {
376
  "epoch": 0.5965909090909091,
377
+ "grad_norm": 0.9148796537082613,
378
  "learning_rate": 2.1040049389819624e-07,
379
+ "logits/chosen": -1.2771052122116089,
380
+ "logits/rejected": 3.0268094539642334,
381
+ "logps/chosen": -591.0138549804688,
382
+ "logps/rejected": -8609.7705078125,
383
+ "loss": 0.0067,
384
  "rewards/accuracies": 1.0,
385
+ "rewards/chosen": -2.0037057399749756,
386
+ "rewards/margins": 77.77495574951172,
387
+ "rewards/rejected": -79.77867126464844,
388
  "step": 210
389
  },
390
  {
391
  "epoch": 0.625,
392
+ "grad_norm": 4.610253822471946,
393
  "learning_rate": 1.8609584188988133e-07,
394
+ "logits/chosen": -0.9021345376968384,
395
+ "logits/rejected": 2.644864559173584,
396
+ "logps/chosen": -637.8895263671875,
397
+ "logps/rejected": -7473.07666015625,
398
+ "loss": 0.016,
399
+ "rewards/accuracies": 0.9937499761581421,
400
+ "rewards/chosen": -2.732889175415039,
401
+ "rewards/margins": 65.50151824951172,
402
+ "rewards/rejected": -68.2343978881836,
403
  "step": 220
404
  },
405
  {
406
  "epoch": 0.6534090909090909,
407
+ "grad_norm": 80.2393605437428,
408
  "learning_rate": 1.624222881090439e-07,
409
+ "logits/chosen": -1.08247971534729,
410
+ "logits/rejected": 2.224297046661377,
411
+ "logps/chosen": -652.9135131835938,
412
+ "logps/rejected": -8397.490234375,
413
+ "loss": 0.0294,
414
  "rewards/accuracies": 0.9937499761581421,
415
+ "rewards/chosen": -2.5824902057647705,
416
+ "rewards/margins": 74.53660583496094,
417
+ "rewards/rejected": -77.11909484863281,
418
  "step": 230
419
  },
420
  {
421
  "epoch": 0.6818181818181818,
422
+ "grad_norm": 0.967663894443262,
423
  "learning_rate": 1.3961362544602212e-07,
424
+ "logits/chosen": -1.0073425769805908,
425
+ "logits/rejected": 2.469111442565918,
426
+ "logps/chosen": -597.539794921875,
427
+ "logps/rejected": -7158.27490234375,
428
+ "loss": 0.0022,
429
  "rewards/accuracies": 1.0,
430
+ "rewards/chosen": -2.2152743339538574,
431
+ "rewards/margins": 63.4267463684082,
432
+ "rewards/rejected": -65.64202117919922,
433
  "step": 240
434
  },
435
  {
436
  "epoch": 0.7102272727272727,
437
+ "grad_norm": 0.5670683312609751,
438
  "learning_rate": 1.1789510538684522e-07,
439
+ "logits/chosen": -1.1133817434310913,
440
+ "logits/rejected": 2.4663312435150146,
441
+ "logps/chosen": -563.9259643554688,
442
+ "logps/rejected": -9071.0546875,
443
+ "loss": 0.0018,
444
  "rewards/accuracies": 1.0,
445
+ "rewards/chosen": -2.1189236640930176,
446
+ "rewards/margins": 81.29580688476562,
447
+ "rewards/rejected": -83.41471862792969,
448
  "step": 250
449
  },
450
  {
451
  "epoch": 0.7102272727272727,
452
+ "eval_logits/chosen": -1.153204321861267,
453
+ "eval_logits/rejected": 2.5446887016296387,
454
+ "eval_logps/chosen": -581.1539916992188,
455
+ "eval_logps/rejected": -7875.69921875,
456
+ "eval_loss": 0.0009098726441152394,
457
  "eval_rewards/accuracies": 1.0,
458
+ "eval_rewards/chosen": -2.1417224407196045,
459
+ "eval_rewards/margins": 70.20738220214844,
460
+ "eval_rewards/rejected": -72.34910583496094,
461
+ "eval_runtime": 194.9128,
462
+ "eval_samples_per_second": 20.035,
463
+ "eval_steps_per_second": 0.318,
464
  "step": 250
465
  },
466
  {
467
  "epoch": 0.7386363636363636,
468
+ "grad_norm": 1.0164128695484267,
469
  "learning_rate": 9.748121349736891e-08,
470
+ "logits/chosen": -1.0999664068222046,
471
+ "logits/rejected": 2.403217315673828,
472
+ "logps/chosen": -622.03173828125,
473
+ "logps/rejected": -8243.462890625,
474
+ "loss": 0.0014,
475
  "rewards/accuracies": 1.0,
476
+ "rewards/chosen": -2.3820364475250244,
477
+ "rewards/margins": 73.27131652832031,
478
+ "rewards/rejected": -75.65335845947266,
479
  "step": 260
480
  },
481
  {
482
  "epoch": 0.7670454545454546,
483
+ "grad_norm": 0.08769848802929946,
484
  "learning_rate": 7.857355122839673e-08,
485
+ "logits/chosen": -1.1708943843841553,
486
+ "logits/rejected": 2.2550511360168457,
487
+ "logps/chosen": -594.3570556640625,
488
+ "logps/rejected": -7704.54541015625,
489
+ "loss": 0.0016,
490
  "rewards/accuracies": 1.0,
491
+ "rewards/chosen": -2.135075092315674,
492
+ "rewards/margins": 68.22078704833984,
493
+ "rewards/rejected": -70.3558578491211,
494
  "step": 270
495
  },
496
  {
497
  "epoch": 0.7954545454545454,
498
+ "grad_norm": 0.21922033699532475,
499
  "learning_rate": 6.135884496044244e-08,
500
+ "logits/chosen": -0.8851491212844849,
501
+ "logits/rejected": 2.587214708328247,
502
+ "logps/chosen": -582.6546630859375,
503
+ "logps/rejected": -7667.8701171875,
504
+ "loss": 0.0019,
505
  "rewards/accuracies": 1.0,
506
+ "rewards/chosen": -2.034817934036255,
507
+ "rewards/margins": 68.19500732421875,
508
+ "rewards/rejected": -70.22981262207031,
509
  "step": 280
510
  },
511
  {
512
  "epoch": 0.8238636363636364,
513
+ "grad_norm": 0.1479090204803531,
514
  "learning_rate": 4.600710195020982e-08,
515
+ "logits/chosen": -0.6345096826553345,
516
+ "logits/rejected": 2.7067768573760986,
517
+ "logps/chosen": -606.1185913085938,
518
+ "logps/rejected": -7632.70068359375,
519
+ "loss": 0.006,
520
  "rewards/accuracies": 1.0,
521
+ "rewards/chosen": -2.1697540283203125,
522
+ "rewards/margins": 67.74466705322266,
523
+ "rewards/rejected": -69.91442108154297,
524
  "step": 290
525
  },
526
  {
527
  "epoch": 0.8522727272727273,
528
+ "grad_norm": 1.431809898038739,
529
  "learning_rate": 3.2669931390104374e-08,
530
+ "logits/chosen": -0.6104838252067566,
531
+ "logits/rejected": 2.62327241897583,
532
+ "logps/chosen": -560.2278442382812,
533
+ "logps/rejected": -8326.173828125,
534
+ "loss": 0.0013,
535
  "rewards/accuracies": 1.0,
536
+ "rewards/chosen": -2.0397324562072754,
537
+ "rewards/margins": 75.01374816894531,
538
+ "rewards/rejected": -77.05347442626953,
539
  "step": 300
540
  },
541
  {
542
  "epoch": 0.8522727272727273,
543
+ "eval_logits/chosen": -0.43478134274482727,
544
+ "eval_logits/rejected": 2.7312941551208496,
545
+ "eval_logps/chosen": -587.4812622070312,
546
+ "eval_logps/rejected": -8038.0322265625,
547
+ "eval_loss": 0.0010067835683003068,
548
  "eval_rewards/accuracies": 1.0,
549
+ "eval_rewards/chosen": -2.2049953937530518,
550
+ "eval_rewards/margins": 71.7674331665039,
551
+ "eval_rewards/rejected": -73.9724349975586,
552
+ "eval_runtime": 193.3426,
553
+ "eval_samples_per_second": 20.197,
554
+ "eval_steps_per_second": 0.321,
555
  "step": 300
556
  },
557
  {
558
  "epoch": 0.8806818181818182,
559
+ "grad_norm": 0.1560528226318461,
560
  "learning_rate": 2.147904716149135e-08,
561
+ "logits/chosen": -0.37615886330604553,
562
+ "logits/rejected": 2.69954252243042,
563
+ "logps/chosen": -567.6695556640625,
564
+ "logps/rejected": -8216.876953125,
565
+ "loss": 0.0046,
566
  "rewards/accuracies": 1.0,
567
+ "rewards/chosen": -2.0560302734375,
568
+ "rewards/margins": 73.10771942138672,
569
+ "rewards/rejected": -75.16374206542969,
570
  "step": 310
571
  },
572
  {
573
  "epoch": 0.9090909090909091,
574
+ "grad_norm": 0.024456003641593862,
575
  "learning_rate": 1.254496706805433e-08,
576
+ "logits/chosen": -0.5904131531715393,
577
+ "logits/rejected": 2.5954599380493164,
578
+ "logps/chosen": -595.1561279296875,
579
+ "logps/rejected": -8192.4501953125,
580
+ "loss": 0.0006,
581
  "rewards/accuracies": 1.0,
582
+ "rewards/chosen": -2.124830722808838,
583
+ "rewards/margins": 73.27985382080078,
584
+ "rewards/rejected": -75.40467834472656,
585
  "step": 320
586
  },
587
  {
588
  "epoch": 0.9375,
589
+ "grad_norm": 0.24568296389199945,
590
  "learning_rate": 5.955921395237318e-09,
591
+ "logits/chosen": -0.48061466217041016,
592
+ "logits/rejected": 2.652150869369507,
593
+ "logps/chosen": -554.1439208984375,
594
+ "logps/rejected": -8231.5478515625,
595
+ "loss": 0.0007,
596
  "rewards/accuracies": 1.0,
597
+ "rewards/chosen": -1.9887583255767822,
598
+ "rewards/margins": 73.84918975830078,
599
+ "rewards/rejected": -75.83795166015625,
600
  "step": 330
601
  },
602
  {
603
  "epoch": 0.9659090909090909,
604
+ "grad_norm": 0.22303732669521928,
605
  "learning_rate": 1.7769815745066474e-09,
606
+ "logits/chosen": -0.7278428673744202,
607
+ "logits/rejected": 2.4799537658691406,
608
+ "logps/chosen": -569.5009765625,
609
+ "logps/rejected": -7504.6611328125,
610
+ "loss": 0.0013,
611
  "rewards/accuracies": 1.0,
612
+ "rewards/chosen": -2.005201816558838,
613
+ "rewards/margins": 66.95375061035156,
614
+ "rewards/rejected": -68.9589614868164,
615
  "step": 340
616
  },
617
  {
618
  "epoch": 0.9943181818181818,
619
+ "grad_norm": 1.783067144450784,
620
  "learning_rate": 4.9417557483610875e-11,
621
+ "logits/chosen": -0.47829318046569824,
622
+ "logits/rejected": 2.6286473274230957,
623
+ "logps/chosen": -601.5467529296875,
624
+ "logps/rejected": -8092.9609375,
625
+ "loss": 0.0058,
626
  "rewards/accuracies": 0.9937499761581421,
627
+ "rewards/chosen": -2.272789716720581,
628
+ "rewards/margins": 72.18123626708984,
629
+ "rewards/rejected": -74.45402526855469,
630
  "step": 350
631
  },
632
  {
633
  "epoch": 0.9943181818181818,
634
+ "eval_logits/chosen": -0.3615128695964813,
635
+ "eval_logits/rejected": 2.7746219635009766,
636
+ "eval_logps/chosen": -587.4920654296875,
637
+ "eval_logps/rejected": -8119.24560546875,
638
+ "eval_loss": 0.0010170801542699337,
639
  "eval_rewards/accuracies": 1.0,
640
+ "eval_rewards/chosen": -2.205103874206543,
641
+ "eval_rewards/margins": 72.57946014404297,
642
+ "eval_rewards/rejected": -74.78457641601562,
643
+ "eval_runtime": 194.3785,
644
+ "eval_samples_per_second": 20.09,
645
  "eval_steps_per_second": 0.319,
646
  "step": 350
647
  },
 
649
  "epoch": 1.0,
650
  "step": 352,
651
  "total_flos": 0.0,
652
+ "train_loss": 0.08915322791869006,
653
+ "train_runtime": 10109.537,
654
+ "train_samples_per_second": 4.451,
655
  "train_steps_per_second": 0.035
656
  }
657
  ],
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7478770c4681452fb8a226c8859be376547be9d21f5c658595e7281ecf7c0650
3
  size 7608
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d4409e813389e25fd4bcad04907a450b0a34c4d2dbaa6266b5991ef8c160b92
3
  size 7608