YYYYYYibo commited on
Commit
8d6f77f
1 Parent(s): 922e896

Model save

Browse files
Files changed (4) hide show
  1. README.md +75 -0
  2. all_results.json +8 -0
  3. train_results.json +8 -0
  4. trainer_state.json +284 -0
README.md ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ base_model: alignment-handbook/zephyr-7b-sft-full
9
+ model-index:
10
+ - name: nash_dpo_doff_no_golden_iter_3
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # nash_dpo_doff_no_golden_iter_3
18
+
19
+ This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the None dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.6442
22
+ - Rewards/chosen: 0.1897
23
+ - Rewards/rejected: 0.0357
24
+ - Rewards/accuracies: 0.5660
25
+ - Rewards/margins: 0.1540
26
+ - Logps/rejected: -292.1758
27
+ - Logps/chosen: -293.3752
28
+ - Logits/rejected: -2.4795
29
+ - Logits/chosen: -2.5852
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ ## Intended uses & limitations
36
+
37
+ More information needed
38
+
39
+ ## Training and evaluation data
40
+
41
+ More information needed
42
+
43
+ ## Training procedure
44
+
45
+ ### Training hyperparameters
46
+
47
+ The following hyperparameters were used during training:
48
+ - learning_rate: 5e-06
49
+ - train_batch_size: 2
50
+ - eval_batch_size: 2
51
+ - seed: 42
52
+ - distributed_type: multi-GPU
53
+ - num_devices: 4
54
+ - gradient_accumulation_steps: 16
55
+ - total_train_batch_size: 128
56
+ - total_eval_batch_size: 8
57
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
+ - lr_scheduler_type: cosine
59
+ - lr_scheduler_warmup_ratio: 0.1
60
+ - num_epochs: 1
61
+
62
+ ### Training results
63
+
64
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
+ | 0.4768 | 0.62 | 100 | 0.6442 | 0.1897 | 0.0357 | 0.5660 | 0.1540 | -292.1758 | -293.3752 | -2.4795 | -2.5852 |
67
+
68
+
69
+ ### Framework versions
70
+
71
+ - PEFT 0.7.1
72
+ - Transformers 4.36.2
73
+ - Pytorch 2.1.2+cu121
74
+ - Datasets 2.14.6
75
+ - Tokenizers 0.15.2
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.5280830058050744,
4
+ "train_runtime": 8024.3189,
5
+ "train_samples": 20735,
6
+ "train_samples_per_second": 2.584,
7
+ "train_steps_per_second": 0.02
8
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.5280830058050744,
4
+ "train_runtime": 8024.3189,
5
+ "train_samples": 20735,
6
+ "train_samples_per_second": 2.584,
7
+ "train_steps_per_second": 0.02
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 100,
6
+ "global_step": 162,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.01,
13
+ "learning_rate": 2.9411764705882356e-07,
14
+ "logits/chosen": -2.640578269958496,
15
+ "logits/rejected": -2.6619861125946045,
16
+ "logps/chosen": -410.817138671875,
17
+ "logps/rejected": -784.9041137695312,
18
+ "loss": 0.6931,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/chosen": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/rejected": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.06,
27
+ "learning_rate": 2.9411764705882355e-06,
28
+ "logits/chosen": -2.580946683883667,
29
+ "logits/rejected": -2.562483549118042,
30
+ "logps/chosen": -348.2420959472656,
31
+ "logps/rejected": -529.5997924804688,
32
+ "loss": 0.6857,
33
+ "rewards/accuracies": 0.5416666865348816,
34
+ "rewards/chosen": 0.047004420310258865,
35
+ "rewards/margins": 0.018747717142105103,
36
+ "rewards/rejected": 0.028256705030798912,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.12,
41
+ "learning_rate": 4.994720857837211e-06,
42
+ "logits/chosen": -2.563664197921753,
43
+ "logits/rejected": -2.5549397468566895,
44
+ "logps/chosen": -303.5776062011719,
45
+ "logps/rejected": -509.2774353027344,
46
+ "loss": 0.6578,
47
+ "rewards/accuracies": 0.6625000238418579,
48
+ "rewards/chosen": 0.23815405368804932,
49
+ "rewards/margins": 0.07588066160678864,
50
+ "rewards/rejected": 0.1622733771800995,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.19,
55
+ "learning_rate": 4.901488388458247e-06,
56
+ "logits/chosen": -2.526702404022217,
57
+ "logits/rejected": -2.5158963203430176,
58
+ "logps/chosen": -335.10333251953125,
59
+ "logps/rejected": -552.9954223632812,
60
+ "loss": 0.6058,
61
+ "rewards/accuracies": 0.7875000238418579,
62
+ "rewards/chosen": 0.2050032615661621,
63
+ "rewards/margins": 0.20939965546131134,
64
+ "rewards/rejected": -0.004396387841552496,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.25,
69
+ "learning_rate": 4.6959649910976165e-06,
70
+ "logits/chosen": -2.532031536102295,
71
+ "logits/rejected": -2.4838037490844727,
72
+ "logps/chosen": -300.42132568359375,
73
+ "logps/rejected": -509.403564453125,
74
+ "loss": 0.5705,
75
+ "rewards/accuracies": 0.7875000238418579,
76
+ "rewards/chosen": 0.23074543476104736,
77
+ "rewards/margins": 0.2837901711463928,
78
+ "rewards/rejected": -0.053044695407152176,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.31,
83
+ "learning_rate": 4.387760711393052e-06,
84
+ "logits/chosen": -2.5283217430114746,
85
+ "logits/rejected": -2.46999192237854,
86
+ "logps/chosen": -327.3263244628906,
87
+ "logps/rejected": -512.6666870117188,
88
+ "loss": 0.5487,
89
+ "rewards/accuracies": 0.800000011920929,
90
+ "rewards/chosen": 0.2256808578968048,
91
+ "rewards/margins": 0.36225491762161255,
92
+ "rewards/rejected": -0.13657405972480774,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.37,
97
+ "learning_rate": 3.991286838919086e-06,
98
+ "logits/chosen": -2.508136034011841,
99
+ "logits/rejected": -2.471802234649658,
100
+ "logps/chosen": -333.85711669921875,
101
+ "logps/rejected": -523.9845581054688,
102
+ "loss": 0.5393,
103
+ "rewards/accuracies": 0.784375011920929,
104
+ "rewards/chosen": 0.1835666298866272,
105
+ "rewards/margins": 0.38937950134277344,
106
+ "rewards/rejected": -0.20581285655498505,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 0.43,
111
+ "learning_rate": 3.5250820513035403e-06,
112
+ "logits/chosen": -2.492502450942993,
113
+ "logits/rejected": -2.484013080596924,
114
+ "logps/chosen": -314.8898620605469,
115
+ "logps/rejected": -589.8179931640625,
116
+ "loss": 0.5165,
117
+ "rewards/accuracies": 0.84375,
118
+ "rewards/chosen": 0.19387896358966827,
119
+ "rewards/margins": 0.5265167951583862,
120
+ "rewards/rejected": -0.33263787627220154,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.49,
125
+ "learning_rate": 3.0109455662659126e-06,
126
+ "logits/chosen": -2.527844190597534,
127
+ "logits/rejected": -2.502004623413086,
128
+ "logps/chosen": -320.5412292480469,
129
+ "logps/rejected": -579.2618408203125,
130
+ "loss": 0.5083,
131
+ "rewards/accuracies": 0.815625011920929,
132
+ "rewards/chosen": 0.12712647020816803,
133
+ "rewards/margins": 0.5316451787948608,
134
+ "rewards/rejected": -0.4045187532901764,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 0.56,
139
+ "learning_rate": 2.4729178344249007e-06,
140
+ "logits/chosen": -2.5523924827575684,
141
+ "logits/rejected": -2.5245110988616943,
142
+ "logps/chosen": -341.2242431640625,
143
+ "logps/rejected": -596.2203369140625,
144
+ "loss": 0.496,
145
+ "rewards/accuracies": 0.8125,
146
+ "rewards/chosen": 0.06856798380613327,
147
+ "rewards/margins": 0.590112030506134,
148
+ "rewards/rejected": -0.5215439796447754,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 0.62,
153
+ "learning_rate": 1.936156434546515e-06,
154
+ "logits/chosen": -2.5596001148223877,
155
+ "logits/rejected": -2.568671703338623,
156
+ "logps/chosen": -333.087158203125,
157
+ "logps/rejected": -611.3602905273438,
158
+ "loss": 0.4768,
159
+ "rewards/accuracies": 0.8187500238418579,
160
+ "rewards/chosen": 0.12124671787023544,
161
+ "rewards/margins": 0.6403753757476807,
162
+ "rewards/rejected": -0.5191286206245422,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 0.62,
167
+ "eval_logits/chosen": -2.585242509841919,
168
+ "eval_logits/rejected": -2.479508399963379,
169
+ "eval_logps/chosen": -293.3752136230469,
170
+ "eval_logps/rejected": -292.1757507324219,
171
+ "eval_loss": 0.6442223191261292,
172
+ "eval_rewards/accuracies": 0.5659999847412109,
173
+ "eval_rewards/chosen": 0.18966493010520935,
174
+ "eval_rewards/margins": 0.15400375425815582,
175
+ "eval_rewards/rejected": 0.035661179572343826,
176
+ "eval_runtime": 398.0482,
177
+ "eval_samples_per_second": 5.025,
178
+ "eval_steps_per_second": 0.628,
179
+ "step": 100
180
+ },
181
+ {
182
+ "epoch": 0.68,
183
+ "learning_rate": 1.4257597331216211e-06,
184
+ "logits/chosen": -2.5242629051208496,
185
+ "logits/rejected": -2.484691619873047,
186
+ "logps/chosen": -325.24761962890625,
187
+ "logps/rejected": -541.4208984375,
188
+ "loss": 0.4856,
189
+ "rewards/accuracies": 0.8031250238418579,
190
+ "rewards/chosen": 0.11120424419641495,
191
+ "rewards/margins": 0.6161486506462097,
192
+ "rewards/rejected": -0.5049443244934082,
193
+ "step": 110
194
+ },
195
+ {
196
+ "epoch": 0.74,
197
+ "learning_rate": 9.655933126436565e-07,
198
+ "logits/chosen": -2.5429892539978027,
199
+ "logits/rejected": -2.520012855529785,
200
+ "logps/chosen": -329.37347412109375,
201
+ "logps/rejected": -596.545654296875,
202
+ "loss": 0.4791,
203
+ "rewards/accuracies": 0.800000011920929,
204
+ "rewards/chosen": 0.06060720607638359,
205
+ "rewards/margins": 0.6603037714958191,
206
+ "rewards/rejected": -0.5996966361999512,
207
+ "step": 120
208
+ },
209
+ {
210
+ "epoch": 0.8,
211
+ "learning_rate": 5.771740434959278e-07,
212
+ "logits/chosen": -2.5939841270446777,
213
+ "logits/rejected": -2.543733835220337,
214
+ "logps/chosen": -326.51824951171875,
215
+ "logps/rejected": -585.2608032226562,
216
+ "loss": 0.4705,
217
+ "rewards/accuracies": 0.831250011920929,
218
+ "rewards/chosen": 0.0589924156665802,
219
+ "rewards/margins": 0.7284678220748901,
220
+ "rewards/rejected": -0.6694754362106323,
221
+ "step": 130
222
+ },
223
+ {
224
+ "epoch": 0.86,
225
+ "learning_rate": 2.786639790067719e-07,
226
+ "logits/chosen": -2.538792848587036,
227
+ "logits/rejected": -2.572946548461914,
228
+ "logps/chosen": -327.40155029296875,
229
+ "logps/rejected": -593.7877197265625,
230
+ "loss": 0.4828,
231
+ "rewards/accuracies": 0.828125,
232
+ "rewards/chosen": 0.006606454961001873,
233
+ "rewards/margins": 0.6702617406845093,
234
+ "rewards/rejected": -0.6636553406715393,
235
+ "step": 140
236
+ },
237
+ {
238
+ "epoch": 0.93,
239
+ "learning_rate": 8.402111802159413e-08,
240
+ "logits/chosen": -2.60109281539917,
241
+ "logits/rejected": -2.500326633453369,
242
+ "logps/chosen": -350.4434814453125,
243
+ "logps/rejected": -545.3392333984375,
244
+ "loss": 0.4729,
245
+ "rewards/accuracies": 0.800000011920929,
246
+ "rewards/chosen": 0.03271277993917465,
247
+ "rewards/margins": 0.6479501128196716,
248
+ "rewards/rejected": -0.615237295627594,
249
+ "step": 150
250
+ },
251
+ {
252
+ "epoch": 0.99,
253
+ "learning_rate": 2.34674439005822e-09,
254
+ "logits/chosen": -2.6234354972839355,
255
+ "logits/rejected": -2.563753366470337,
256
+ "logps/chosen": -370.4894104003906,
257
+ "logps/rejected": -656.5492553710938,
258
+ "loss": 0.4684,
259
+ "rewards/accuracies": 0.784375011920929,
260
+ "rewards/chosen": 0.009518811479210854,
261
+ "rewards/margins": 0.7224863171577454,
262
+ "rewards/rejected": -0.7129674553871155,
263
+ "step": 160
264
+ },
265
+ {
266
+ "epoch": 1.0,
267
+ "step": 162,
268
+ "total_flos": 0.0,
269
+ "train_loss": 0.5280830058050744,
270
+ "train_runtime": 8024.3189,
271
+ "train_samples_per_second": 2.584,
272
+ "train_steps_per_second": 0.02
273
+ }
274
+ ],
275
+ "logging_steps": 10,
276
+ "max_steps": 162,
277
+ "num_input_tokens_seen": 0,
278
+ "num_train_epochs": 1,
279
+ "save_steps": 100,
280
+ "total_flos": 0.0,
281
+ "train_batch_size": 2,
282
+ "trial_name": null,
283
+ "trial_params": null
284
+ }