ydeng9 commited on
Commit
363f333
1 Parent(s): fae5ca3

Model save

Browse files
README.md ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: cognitivecomputations/dolphin-2.9.4-llama3.1-8b
3
+ library_name: transformers
4
+ model_name: datagen_round_0_rpo
5
+ tags:
6
+ - generated_from_trainer
7
+ - trl
8
+ - dpo
9
+ licence: license
10
+ ---
11
+
12
+ # Model Card for datagen_round_0_rpo
13
+
14
+ This model is a fine-tuned version of [cognitivecomputations/dolphin-2.9.4-llama3.1-8b](https://huggingface.co/cognitivecomputations/dolphin-2.9.4-llama3.1-8b).
15
+ It has been trained using [TRL](https://github.com/huggingface/trl).
16
+
17
+ ## Quick start
18
+
19
+ ```python
20
+ from transformers import pipeline
21
+
22
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
23
+ generator = pipeline("text-generation", model="ydeng9/datagen_round_0_rpo", device="cuda")
24
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
25
+ print(output["generated_text"])
26
+ ```
27
+
28
+ ## Training procedure
29
+
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/yihedeng9/huggingface/runs/az7oyxn2)
31
+
32
+ This model was trained with DPO, a method introduced in [Direct Preference Optimization: Your Language Model is Secretly a Reward Model](https://huggingface.co/papers/2305.18290).
33
+
34
+ ### Framework versions
35
+
36
+ - TRL: 0.12.0
37
+ - Transformers: 4.46.2
38
+ - Pytorch: 2.4.0
39
+ - Datasets: 3.0.0
40
+ - Tokenizers: 0.20.3
41
+
42
+ ## Citations
43
+
44
+ Cite DPO as:
45
+
46
+ ```bibtex
47
+ @inproceedings{rafailov2023direct,
48
+ title = {{Direct Preference Optimization: Your Language Model is Secretly a Reward Model}},
49
+ author = {Rafael Rafailov and Archit Sharma and Eric Mitchell and Christopher D. Manning and Stefano Ermon and Chelsea Finn},
50
+ year = 2023,
51
+ booktitle = {Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023},
52
+ url = {http://papers.nips.cc/paper_files/paper/2023/hash/a85b405ed65c6477a4fe8302b5e06ce7-Abstract-Conference.html},
53
+ editor = {Alice Oh and Tristan Naumann and Amir Globerson and Kate Saenko and Moritz Hardt and Sergey Levine},
54
+ }
55
+ ```
56
+
57
+ Cite TRL as:
58
+
59
+ ```bibtex
60
+ @misc{vonwerra2022trl,
61
+ title = {{TRL: Transformer Reinforcement Learning}},
62
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
63
+ year = 2020,
64
+ journal = {GitHub repository},
65
+ publisher = {GitHub},
66
+ howpublished = {\url{https://github.com/huggingface/trl}}
67
+ }
68
+ ```
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9990141307919816,
3
+ "total_flos": 0.0,
4
+ "train_loss": 1.0731394717567846,
5
+ "train_runtime": 3128.4011,
6
+ "train_samples": 24339,
7
+ "train_samples_per_second": 7.78,
8
+ "train_steps_per_second": 0.121
9
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 128000,
4
+ "do_sample": true,
5
+ "eos_token_id": [
6
+ 128001,
7
+ 128008,
8
+ 128009,
9
+ 128256
10
+ ],
11
+ "temperature": 0.6,
12
+ "top_p": 0.9,
13
+ "transformers_version": "4.46.2"
14
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9990141307919816,
3
+ "total_flos": 0.0,
4
+ "train_loss": 1.0731394717567846,
5
+ "train_runtime": 3128.4011,
6
+ "train_samples": 24339,
7
+ "train_samples_per_second": 7.78,
8
+ "train_steps_per_second": 0.121
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,717 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9990141307919816,
5
+ "eval_steps": 100,
6
+ "global_step": 380,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0026289845547157412,
13
+ "grad_norm": 430.5451731957118,
14
+ "learning_rate": 1.3157894736842104e-08,
15
+ "logits/chosen": -1.2421875,
16
+ "logits/rejected": -1.21875,
17
+ "logps/chosen": -189.0,
18
+ "logps/rejected": -255.0,
19
+ "loss": 2.0842,
20
+ "nll_loss": 2.96875,
21
+ "rewards/accuracies": 0.0,
22
+ "rewards/chosen": 0.0,
23
+ "rewards/margins": 0.0,
24
+ "rewards/rejected": 0.0,
25
+ "step": 1
26
+ },
27
+ {
28
+ "epoch": 0.02628984554715741,
29
+ "grad_norm": 380.94348914412785,
30
+ "learning_rate": 1.3157894736842104e-07,
31
+ "logits/chosen": -1.2578125,
32
+ "logits/rejected": -1.25,
33
+ "logps/chosen": -209.0,
34
+ "logps/rejected": -338.0,
35
+ "loss": 2.0892,
36
+ "nll_loss": 2.953125,
37
+ "rewards/accuracies": 0.2916666567325592,
38
+ "rewards/chosen": 0.00933837890625,
39
+ "rewards/margins": -0.00396728515625,
40
+ "rewards/rejected": 0.0133056640625,
41
+ "step": 10
42
+ },
43
+ {
44
+ "epoch": 0.05257969109431482,
45
+ "grad_norm": 192.52773167988067,
46
+ "learning_rate": 2.631578947368421e-07,
47
+ "logits/chosen": -1.21875,
48
+ "logits/rejected": -1.203125,
49
+ "logps/chosen": -206.0,
50
+ "logps/rejected": -288.0,
51
+ "loss": 2.0101,
52
+ "nll_loss": 2.546875,
53
+ "rewards/accuracies": 0.33125001192092896,
54
+ "rewards/chosen": 0.3125,
55
+ "rewards/margins": -0.1083984375,
56
+ "rewards/rejected": 0.419921875,
57
+ "step": 20
58
+ },
59
+ {
60
+ "epoch": 0.07886953664147223,
61
+ "grad_norm": 29.353212473973965,
62
+ "learning_rate": 3.9473684210526315e-07,
63
+ "logits/chosen": -1.265625,
64
+ "logits/rejected": -1.2578125,
65
+ "logps/chosen": -134.0,
66
+ "logps/rejected": -182.0,
67
+ "loss": 1.7046,
68
+ "nll_loss": 1.625,
69
+ "rewards/accuracies": 0.33125001192092896,
70
+ "rewards/chosen": 1.1171875,
71
+ "rewards/margins": -0.353515625,
72
+ "rewards/rejected": 1.4765625,
73
+ "step": 30
74
+ },
75
+ {
76
+ "epoch": 0.10515938218862964,
77
+ "grad_norm": 13.110128400909407,
78
+ "learning_rate": 4.999578104083306e-07,
79
+ "logits/chosen": -1.3359375,
80
+ "logits/rejected": -1.3359375,
81
+ "logps/chosen": -87.0,
82
+ "logps/rejected": -123.5,
83
+ "loss": 1.4689,
84
+ "nll_loss": 1.015625,
85
+ "rewards/accuracies": 0.4312500059604645,
86
+ "rewards/chosen": 1.4921875,
87
+ "rewards/margins": -0.296875,
88
+ "rewards/rejected": 1.7890625,
89
+ "step": 40
90
+ },
91
+ {
92
+ "epoch": 0.13144922773578704,
93
+ "grad_norm": 12.991424819775641,
94
+ "learning_rate": 4.984826693294873e-07,
95
+ "logits/chosen": -1.203125,
96
+ "logits/rejected": -1.2265625,
97
+ "logps/chosen": -69.0,
98
+ "logps/rejected": -121.0,
99
+ "loss": 1.4155,
100
+ "nll_loss": 0.87890625,
101
+ "rewards/accuracies": 0.4124999940395355,
102
+ "rewards/chosen": 1.5703125,
103
+ "rewards/margins": -0.3984375,
104
+ "rewards/rejected": 1.96875,
105
+ "step": 50
106
+ },
107
+ {
108
+ "epoch": 0.15773907328294445,
109
+ "grad_norm": 48.0340809183726,
110
+ "learning_rate": 4.949122667718934e-07,
111
+ "logits/chosen": -1.125,
112
+ "logits/rejected": -1.1640625,
113
+ "logps/chosen": -83.0,
114
+ "logps/rejected": -189.0,
115
+ "loss": 1.3282,
116
+ "nll_loss": 1.0546875,
117
+ "rewards/accuracies": 0.4625000059604645,
118
+ "rewards/chosen": 1.3828125,
119
+ "rewards/margins": -0.09326171875,
120
+ "rewards/rejected": 1.4765625,
121
+ "step": 60
122
+ },
123
+ {
124
+ "epoch": 0.18402891883010186,
125
+ "grad_norm": 18.079466671824335,
126
+ "learning_rate": 4.892767091689785e-07,
127
+ "logits/chosen": -1.0234375,
128
+ "logits/rejected": -1.1015625,
129
+ "logps/chosen": -93.0,
130
+ "logps/rejected": -253.0,
131
+ "loss": 1.0846,
132
+ "nll_loss": 1.0,
133
+ "rewards/accuracies": 0.5625,
134
+ "rewards/chosen": 1.5390625,
135
+ "rewards/margins": 0.76171875,
136
+ "rewards/rejected": 0.7734375,
137
+ "step": 70
138
+ },
139
+ {
140
+ "epoch": 0.21031876437725927,
141
+ "grad_norm": 22.52030988227434,
142
+ "learning_rate": 4.816235168037004e-07,
143
+ "logits/chosen": -1.0078125,
144
+ "logits/rejected": -1.1015625,
145
+ "logps/chosen": -89.5,
146
+ "logps/rejected": -324.0,
147
+ "loss": 1.0849,
148
+ "nll_loss": 0.92578125,
149
+ "rewards/accuracies": 0.6937500238418579,
150
+ "rewards/chosen": 1.5546875,
151
+ "rewards/margins": 1.703125,
152
+ "rewards/rejected": -0.1484375,
153
+ "step": 80
154
+ },
155
+ {
156
+ "epoch": 0.2366086099244167,
157
+ "grad_norm": 23.44831636466729,
158
+ "learning_rate": 4.720172231068844e-07,
159
+ "logits/chosen": -1.0390625,
160
+ "logits/rejected": -1.1171875,
161
+ "logps/chosen": -88.0,
162
+ "logps/rejected": -318.0,
163
+ "loss": 1.0445,
164
+ "nll_loss": 0.99609375,
165
+ "rewards/accuracies": 0.625,
166
+ "rewards/chosen": 1.6015625,
167
+ "rewards/margins": 1.6015625,
168
+ "rewards/rejected": -0.002410888671875,
169
+ "step": 90
170
+ },
171
+ {
172
+ "epoch": 0.2628984554715741,
173
+ "grad_norm": 15.553732102036754,
174
+ "learning_rate": 4.605388304968914e-07,
175
+ "logits/chosen": -1.0625,
176
+ "logits/rejected": -1.1796875,
177
+ "logps/chosen": -85.0,
178
+ "logps/rejected": -492.0,
179
+ "loss": 1.0258,
180
+ "nll_loss": 1.0625,
181
+ "rewards/accuracies": 0.706250011920929,
182
+ "rewards/chosen": 1.4765625,
183
+ "rewards/margins": 3.203125,
184
+ "rewards/rejected": -1.734375,
185
+ "step": 100
186
+ },
187
+ {
188
+ "epoch": 0.2628984554715741,
189
+ "eval_logits/chosen": -1.1171875,
190
+ "eval_logits/rejected": -1.2265625,
191
+ "eval_logps/chosen": -84.0,
192
+ "eval_logps/rejected": -420.0,
193
+ "eval_loss": 1.0434179306030273,
194
+ "eval_nll_loss": 1.203125,
195
+ "eval_rewards/accuracies": 0.7200000286102295,
196
+ "eval_rewards/chosen": 1.3828125,
197
+ "eval_rewards/margins": 2.375,
198
+ "eval_rewards/rejected": -0.99609375,
199
+ "eval_runtime": 9.2898,
200
+ "eval_samples_per_second": 21.529,
201
+ "eval_steps_per_second": 2.691,
202
+ "step": 100
203
+ },
204
+ {
205
+ "epoch": 0.2891883010187315,
206
+ "grad_norm": 13.573028085768115,
207
+ "learning_rate": 4.472851273490984e-07,
208
+ "logits/chosen": -1.046875,
209
+ "logits/rejected": -1.1171875,
210
+ "logps/chosen": -82.5,
211
+ "logps/rejected": -246.0,
212
+ "loss": 1.0418,
213
+ "nll_loss": 0.921875,
214
+ "rewards/accuracies": 0.612500011920929,
215
+ "rewards/chosen": 1.6796875,
216
+ "rewards/margins": 1.1640625,
217
+ "rewards/rejected": 0.515625,
218
+ "step": 110
219
+ },
220
+ {
221
+ "epoch": 0.3154781465658889,
222
+ "grad_norm": 24.07297014456965,
223
+ "learning_rate": 4.323678718546552e-07,
224
+ "logits/chosen": -1.0625,
225
+ "logits/rejected": -1.125,
226
+ "logps/chosen": -79.0,
227
+ "logps/rejected": -380.0,
228
+ "loss": 0.9516,
229
+ "nll_loss": 0.921875,
230
+ "rewards/accuracies": 0.7124999761581421,
231
+ "rewards/chosen": 1.625,
232
+ "rewards/margins": 2.203125,
233
+ "rewards/rejected": -0.5859375,
234
+ "step": 120
235
+ },
236
+ {
237
+ "epoch": 0.3417679921130463,
238
+ "grad_norm": 13.788627232860541,
239
+ "learning_rate": 4.159128496504053e-07,
240
+ "logits/chosen": -1.0234375,
241
+ "logits/rejected": -1.0859375,
242
+ "logps/chosen": -82.5,
243
+ "logps/rejected": -402.0,
244
+ "loss": 0.9785,
245
+ "nll_loss": 0.96484375,
246
+ "rewards/accuracies": 0.7124999761581421,
247
+ "rewards/chosen": 1.5703125,
248
+ "rewards/margins": 2.515625,
249
+ "rewards/rejected": -0.9453125,
250
+ "step": 130
251
+ },
252
+ {
253
+ "epoch": 0.3680578376602037,
254
+ "grad_norm": 56.02055058840433,
255
+ "learning_rate": 3.9805881316624503e-07,
256
+ "logits/chosen": -0.98046875,
257
+ "logits/rejected": -1.0703125,
258
+ "logps/chosen": -94.0,
259
+ "logps/rejected": -364.0,
260
+ "loss": 0.9971,
261
+ "nll_loss": 1.015625,
262
+ "rewards/accuracies": 0.65625,
263
+ "rewards/chosen": 1.5546875,
264
+ "rewards/margins": 2.078125,
265
+ "rewards/rejected": -0.515625,
266
+ "step": 140
267
+ },
268
+ {
269
+ "epoch": 0.39434768320736113,
270
+ "grad_norm": 13.013669525516791,
271
+ "learning_rate": 3.78956311633581e-07,
272
+ "logits/chosen": -0.984375,
273
+ "logits/rejected": -1.1015625,
274
+ "logps/chosen": -80.5,
275
+ "logps/rejected": -478.0,
276
+ "loss": 0.9852,
277
+ "nll_loss": 0.93359375,
278
+ "rewards/accuracies": 0.7749999761581421,
279
+ "rewards/chosen": 1.609375,
280
+ "rewards/margins": 2.84375,
281
+ "rewards/rejected": -1.2421875,
282
+ "step": 150
283
+ },
284
+ {
285
+ "epoch": 0.42063752875451854,
286
+ "grad_norm": 40.15490393114984,
287
+ "learning_rate": 3.587664216205183e-07,
288
+ "logits/chosen": -1.0078125,
289
+ "logits/rejected": -1.078125,
290
+ "logps/chosen": -66.0,
291
+ "logps/rejected": -388.0,
292
+ "loss": 0.9339,
293
+ "nll_loss": 0.80859375,
294
+ "rewards/accuracies": 0.637499988079071,
295
+ "rewards/chosen": 1.5859375,
296
+ "rewards/margins": 2.46875,
297
+ "rewards/rejected": -0.87890625,
298
+ "step": 160
299
+ },
300
+ {
301
+ "epoch": 0.44692737430167595,
302
+ "grad_norm": 17.30322447545255,
303
+ "learning_rate": 3.376593887981886e-07,
304
+ "logits/chosen": -1.015625,
305
+ "logits/rejected": -1.0859375,
306
+ "logps/chosen": -93.5,
307
+ "logps/rejected": -464.0,
308
+ "loss": 0.9452,
309
+ "nll_loss": 0.94921875,
310
+ "rewards/accuracies": 0.6625000238418579,
311
+ "rewards/chosen": 1.6015625,
312
+ "rewards/margins": 3.0,
313
+ "rewards/rejected": -1.40625,
314
+ "step": 170
315
+ },
316
+ {
317
+ "epoch": 0.4732172198488334,
318
+ "grad_norm": 27.318196890495926,
319
+ "learning_rate": 3.1581319239114976e-07,
320
+ "logits/chosen": -0.984375,
321
+ "logits/rejected": -1.0390625,
322
+ "logps/chosen": -85.0,
323
+ "logps/rejected": -416.0,
324
+ "loss": 0.9577,
325
+ "nll_loss": 0.94140625,
326
+ "rewards/accuracies": 0.6875,
327
+ "rewards/chosen": 1.6953125,
328
+ "rewards/margins": 2.578125,
329
+ "rewards/rejected": -0.88671875,
330
+ "step": 180
331
+ },
332
+ {
333
+ "epoch": 0.4995070653959908,
334
+ "grad_norm": 19.71975113316725,
335
+ "learning_rate": 2.934120444167326e-07,
336
+ "logits/chosen": -0.9609375,
337
+ "logits/rejected": -1.0390625,
338
+ "logps/chosen": -71.5,
339
+ "logps/rejected": -456.0,
340
+ "loss": 0.9496,
341
+ "nll_loss": 0.8203125,
342
+ "rewards/accuracies": 0.706250011920929,
343
+ "rewards/chosen": 1.6484375,
344
+ "rewards/margins": 3.125,
345
+ "rewards/rejected": -1.484375,
346
+ "step": 190
347
+ },
348
+ {
349
+ "epoch": 0.5257969109431482,
350
+ "grad_norm": 14.910685294534735,
351
+ "learning_rate": 2.706448363680831e-07,
352
+ "logits/chosen": -0.98046875,
353
+ "logits/rejected": -1.046875,
354
+ "logps/chosen": -90.0,
355
+ "logps/rejected": -402.0,
356
+ "loss": 0.9286,
357
+ "nll_loss": 0.9140625,
358
+ "rewards/accuracies": 0.6312500238418579,
359
+ "rewards/chosen": 1.7421875,
360
+ "rewards/margins": 2.78125,
361
+ "rewards/rejected": -1.0390625,
362
+ "step": 200
363
+ },
364
+ {
365
+ "epoch": 0.5257969109431482,
366
+ "eval_logits/chosen": -1.046875,
367
+ "eval_logits/rejected": -1.140625,
368
+ "eval_logps/chosen": -83.0,
369
+ "eval_logps/rejected": -648.0,
370
+ "eval_loss": 0.9766796827316284,
371
+ "eval_nll_loss": 1.1640625,
372
+ "eval_rewards/accuracies": 0.7200000286102295,
373
+ "eval_rewards/chosen": 1.390625,
374
+ "eval_rewards/margins": 4.6875,
375
+ "eval_rewards/rejected": -3.28125,
376
+ "eval_runtime": 8.9047,
377
+ "eval_samples_per_second": 22.46,
378
+ "eval_steps_per_second": 2.808,
379
+ "step": 200
380
+ },
381
+ {
382
+ "epoch": 0.5520867564903056,
383
+ "grad_norm": 12.064748996822903,
384
+ "learning_rate": 2.477035464388184e-07,
385
+ "logits/chosen": -0.96875,
386
+ "logits/rejected": -1.078125,
387
+ "logps/chosen": -84.0,
388
+ "logps/rejected": -636.0,
389
+ "loss": 0.9051,
390
+ "nll_loss": 0.85546875,
391
+ "rewards/accuracies": 0.6937500238418579,
392
+ "rewards/chosen": 1.7734375,
393
+ "rewards/margins": 4.59375,
394
+ "rewards/rejected": -2.8125,
395
+ "step": 210
396
+ },
397
+ {
398
+ "epoch": 0.578376602037463,
399
+ "grad_norm": 123.10630313220432,
400
+ "learning_rate": 2.2478162071993296e-07,
401
+ "logits/chosen": -0.98828125,
402
+ "logits/rejected": -1.1171875,
403
+ "logps/chosen": -61.75,
404
+ "logps/rejected": -764.0,
405
+ "loss": 0.8841,
406
+ "nll_loss": 0.85546875,
407
+ "rewards/accuracies": 0.762499988079071,
408
+ "rewards/chosen": 1.484375,
409
+ "rewards/margins": 6.0625,
410
+ "rewards/rejected": -4.5625,
411
+ "step": 220
412
+ },
413
+ {
414
+ "epoch": 0.6046664475846204,
415
+ "grad_norm": 26.672092273760438,
416
+ "learning_rate": 2.0207234201906545e-07,
417
+ "logits/chosen": -0.97265625,
418
+ "logits/rejected": -1.09375,
419
+ "logps/chosen": -67.0,
420
+ "logps/rejected": -592.0,
421
+ "loss": 0.9282,
422
+ "nll_loss": 0.8203125,
423
+ "rewards/accuracies": 0.65625,
424
+ "rewards/chosen": 1.5,
425
+ "rewards/margins": 4.46875,
426
+ "rewards/rejected": -2.953125,
427
+ "step": 230
428
+ },
429
+ {
430
+ "epoch": 0.6309562931317778,
431
+ "grad_norm": 61.7684539616632,
432
+ "learning_rate": 1.7976720005660767e-07,
433
+ "logits/chosen": -0.96875,
434
+ "logits/rejected": -1.0859375,
435
+ "logps/chosen": -80.0,
436
+ "logps/rejected": -632.0,
437
+ "loss": 0.9369,
438
+ "nll_loss": 0.828125,
439
+ "rewards/accuracies": 0.668749988079071,
440
+ "rewards/chosen": 1.6015625,
441
+ "rewards/margins": 4.5625,
442
+ "rewards/rejected": -2.96875,
443
+ "step": 240
444
+ },
445
+ {
446
+ "epoch": 0.6572461386789352,
447
+ "grad_norm": 11.575062050232066,
448
+ "learning_rate": 1.5805427678152674e-07,
449
+ "logits/chosen": -0.96484375,
450
+ "logits/rejected": -1.078125,
451
+ "logps/chosen": -74.0,
452
+ "logps/rejected": -576.0,
453
+ "loss": 0.9185,
454
+ "nll_loss": 0.90234375,
455
+ "rewards/accuracies": 0.7437499761581421,
456
+ "rewards/chosen": 1.6484375,
457
+ "rewards/margins": 4.21875,
458
+ "rewards/rejected": -2.578125,
459
+ "step": 250
460
+ },
461
+ {
462
+ "epoch": 0.6835359842260926,
463
+ "grad_norm": 18.395086537639692,
464
+ "learning_rate": 1.371166604222777e-07,
465
+ "logits/chosen": -0.9296875,
466
+ "logits/rejected": -1.046875,
467
+ "logps/chosen": -72.5,
468
+ "logps/rejected": -510.0,
469
+ "loss": 0.9474,
470
+ "nll_loss": 0.8359375,
471
+ "rewards/accuracies": 0.675000011920929,
472
+ "rewards/chosen": 1.5625,
473
+ "rewards/margins": 3.65625,
474
+ "rewards/rejected": -2.09375,
475
+ "step": 260
476
+ },
477
+ {
478
+ "epoch": 0.70982582977325,
479
+ "grad_norm": 66.67533840049661,
480
+ "learning_rate": 1.1713090164588606e-07,
481
+ "logits/chosen": -0.9609375,
482
+ "logits/rejected": -1.046875,
483
+ "logps/chosen": -85.5,
484
+ "logps/rejected": -568.0,
485
+ "loss": 0.9435,
486
+ "nll_loss": 0.93359375,
487
+ "rewards/accuracies": 0.731249988079071,
488
+ "rewards/chosen": 1.6484375,
489
+ "rewards/margins": 3.984375,
490
+ "rewards/rejected": -2.328125,
491
+ "step": 270
492
+ },
493
+ {
494
+ "epoch": 0.7361156753204074,
495
+ "grad_norm": 13.452197354246106,
496
+ "learning_rate": 9.826552484321085e-08,
497
+ "logits/chosen": -0.92578125,
498
+ "logits/rejected": -1.0,
499
+ "logps/chosen": -71.0,
500
+ "logps/rejected": -380.0,
501
+ "loss": 0.9612,
502
+ "nll_loss": 0.83203125,
503
+ "rewards/accuracies": 0.637499988079071,
504
+ "rewards/chosen": 1.6640625,
505
+ "rewards/margins": 2.46875,
506
+ "rewards/rejected": -0.8046875,
507
+ "step": 280
508
+ },
509
+ {
510
+ "epoch": 0.7624055208675649,
511
+ "grad_norm": 14.445709675738982,
512
+ "learning_rate": 8.067960709356478e-08,
513
+ "logits/chosen": -0.9296875,
514
+ "logits/rejected": -1.0,
515
+ "logps/chosen": -78.5,
516
+ "logps/rejected": -382.0,
517
+ "loss": 0.9563,
518
+ "nll_loss": 0.890625,
519
+ "rewards/accuracies": 0.6937500238418579,
520
+ "rewards/chosen": 1.6484375,
521
+ "rewards/margins": 2.46875,
522
+ "rewards/rejected": -0.82421875,
523
+ "step": 290
524
+ },
525
+ {
526
+ "epoch": 0.7886953664147223,
527
+ "grad_norm": 12.432822027887278,
528
+ "learning_rate": 6.452143679117964e-08,
529
+ "logits/chosen": -0.921875,
530
+ "logits/rejected": -1.0,
531
+ "logps/chosen": -74.0,
532
+ "logps/rejected": -414.0,
533
+ "loss": 0.9449,
534
+ "nll_loss": 0.796875,
535
+ "rewards/accuracies": 0.6937500238418579,
536
+ "rewards/chosen": 1.609375,
537
+ "rewards/margins": 2.46875,
538
+ "rewards/rejected": -0.85546875,
539
+ "step": 300
540
+ },
541
+ {
542
+ "epoch": 0.7886953664147223,
543
+ "eval_logits/chosen": -0.99609375,
544
+ "eval_logits/rejected": -1.078125,
545
+ "eval_logps/chosen": -76.5,
546
+ "eval_logps/rejected": -572.0,
547
+ "eval_loss": 0.9423046708106995,
548
+ "eval_nll_loss": 1.09375,
549
+ "eval_rewards/accuracies": 0.7599999904632568,
550
+ "eval_rewards/chosen": 1.453125,
551
+ "eval_rewards/margins": 3.984375,
552
+ "eval_rewards/rejected": -2.515625,
553
+ "eval_runtime": 9.1636,
554
+ "eval_samples_per_second": 21.825,
555
+ "eval_steps_per_second": 2.728,
556
+ "step": 300
557
+ },
558
+ {
559
+ "epoch": 0.8149852119618797,
560
+ "grad_norm": 13.49396528945543,
561
+ "learning_rate": 4.992726324427901e-08,
562
+ "logits/chosen": -0.92578125,
563
+ "logits/rejected": -1.046875,
564
+ "logps/chosen": -72.5,
565
+ "logps/rejected": -612.0,
566
+ "loss": 0.9326,
567
+ "nll_loss": 0.859375,
568
+ "rewards/accuracies": 0.7250000238418579,
569
+ "rewards/chosen": 1.7265625,
570
+ "rewards/margins": 4.25,
571
+ "rewards/rejected": -2.53125,
572
+ "step": 310
573
+ },
574
+ {
575
+ "epoch": 0.8412750575090371,
576
+ "grad_norm": 85.18507320569196,
577
+ "learning_rate": 3.702014779041826e-08,
578
+ "logits/chosen": -0.94140625,
579
+ "logits/rejected": -1.046875,
580
+ "logps/chosen": -86.5,
581
+ "logps/rejected": -644.0,
582
+ "loss": 0.9377,
583
+ "nll_loss": 0.9609375,
584
+ "rewards/accuracies": 0.7437499761581421,
585
+ "rewards/chosen": 1.6640625,
586
+ "rewards/margins": 4.6875,
587
+ "rewards/rejected": -3.046875,
588
+ "step": 320
589
+ },
590
+ {
591
+ "epoch": 0.8675649030561945,
592
+ "grad_norm": 9.69506288075215,
593
+ "learning_rate": 2.5908926115744994e-08,
594
+ "logits/chosen": -0.921875,
595
+ "logits/rejected": -1.0078125,
596
+ "logps/chosen": -80.5,
597
+ "logps/rejected": -428.0,
598
+ "loss": 0.9131,
599
+ "nll_loss": 0.90234375,
600
+ "rewards/accuracies": 0.6812499761581421,
601
+ "rewards/chosen": 1.7109375,
602
+ "rewards/margins": 2.78125,
603
+ "rewards/rejected": -1.0703125,
604
+ "step": 330
605
+ },
606
+ {
607
+ "epoch": 0.8938547486033519,
608
+ "grad_norm": 17.413302922847254,
609
+ "learning_rate": 1.6687290528135722e-08,
610
+ "logits/chosen": -0.9453125,
611
+ "logits/rejected": -1.0390625,
612
+ "logps/chosen": -90.5,
613
+ "logps/rejected": -540.0,
614
+ "loss": 0.9705,
615
+ "nll_loss": 0.921875,
616
+ "rewards/accuracies": 0.7250000238418579,
617
+ "rewards/chosen": 1.6953125,
618
+ "rewards/margins": 3.828125,
619
+ "rewards/rejected": -2.140625,
620
+ "step": 340
621
+ },
622
+ {
623
+ "epoch": 0.9201445941505094,
624
+ "grad_norm": 25.357904792782886,
625
+ "learning_rate": 9.432999922687396e-09,
626
+ "logits/chosen": -0.93359375,
627
+ "logits/rejected": -1.015625,
628
+ "logps/chosen": -77.0,
629
+ "logps/rejected": -536.0,
630
+ "loss": 0.9608,
631
+ "nll_loss": 0.859375,
632
+ "rewards/accuracies": 0.706250011920929,
633
+ "rewards/chosen": 1.625,
634
+ "rewards/margins": 3.890625,
635
+ "rewards/rejected": -2.265625,
636
+ "step": 350
637
+ },
638
+ {
639
+ "epoch": 0.9464344396976668,
640
+ "grad_norm": 17.381689281866176,
641
+ "learning_rate": 4.207224101311246e-09,
642
+ "logits/chosen": -0.93359375,
643
+ "logits/rejected": -1.015625,
644
+ "logps/chosen": -100.0,
645
+ "logps/rejected": -580.0,
646
+ "loss": 0.895,
647
+ "nll_loss": 0.9296875,
648
+ "rewards/accuracies": 0.7124999761581421,
649
+ "rewards/chosen": 1.53125,
650
+ "rewards/margins": 4.0625,
651
+ "rewards/rejected": -2.53125,
652
+ "step": 360
653
+ },
654
+ {
655
+ "epoch": 0.9727242852448242,
656
+ "grad_norm": 62.410423904563885,
657
+ "learning_rate": 1.0540279752731252e-09,
658
+ "logits/chosen": -0.95703125,
659
+ "logits/rejected": -1.03125,
660
+ "logps/chosen": -79.0,
661
+ "logps/rejected": -524.0,
662
+ "loss": 0.9448,
663
+ "nll_loss": 0.92578125,
664
+ "rewards/accuracies": 0.731249988079071,
665
+ "rewards/chosen": 1.6171875,
666
+ "rewards/margins": 3.578125,
667
+ "rewards/rejected": -1.9609375,
668
+ "step": 370
669
+ },
670
+ {
671
+ "epoch": 0.9990141307919816,
672
+ "grad_norm": 14.434777424546557,
673
+ "learning_rate": 0.0,
674
+ "logits/chosen": -0.93359375,
675
+ "logits/rejected": -1.03125,
676
+ "logps/chosen": -93.0,
677
+ "logps/rejected": -624.0,
678
+ "loss": 0.974,
679
+ "nll_loss": 0.953125,
680
+ "rewards/accuracies": 0.65625,
681
+ "rewards/chosen": 1.6484375,
682
+ "rewards/margins": 4.3125,
683
+ "rewards/rejected": -2.671875,
684
+ "step": 380
685
+ },
686
+ {
687
+ "epoch": 0.9990141307919816,
688
+ "step": 380,
689
+ "total_flos": 0.0,
690
+ "train_loss": 1.0731394717567846,
691
+ "train_runtime": 3128.4011,
692
+ "train_samples_per_second": 7.78,
693
+ "train_steps_per_second": 0.121
694
+ }
695
+ ],
696
+ "logging_steps": 10,
697
+ "max_steps": 380,
698
+ "num_input_tokens_seen": 0,
699
+ "num_train_epochs": 1,
700
+ "save_steps": 100,
701
+ "stateful_callbacks": {
702
+ "TrainerControl": {
703
+ "args": {
704
+ "should_epoch_stop": false,
705
+ "should_evaluate": false,
706
+ "should_log": false,
707
+ "should_save": true,
708
+ "should_training_stop": true
709
+ },
710
+ "attributes": {}
711
+ }
712
+ },
713
+ "total_flos": 0.0,
714
+ "train_batch_size": 2,
715
+ "trial_name": null,
716
+ "trial_params": null
717
+ }