pbevan11 commited on
Commit
cd45551
·
verified ·
1 Parent(s): 5b5bc5a

Model save

Browse files
README.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: pbevan11/Mistral-Nemo-Instruct-MCAI-SFT-revision-only
5
+ tags:
6
+ - trl
7
+ - dpo
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: Mistral-Nemo-Instruct-MCAI-SFT-DPO-revision-only
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # Mistral-Nemo-Instruct-MCAI-SFT-DPO-revision-only
18
+
19
+ This model is a fine-tuned version of [pbevan11/Mistral-Nemo-Instruct-MCAI-SFT-revision-only](https://huggingface.co/pbevan11/Mistral-Nemo-Instruct-MCAI-SFT-revision-only) on the None dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 5e-07
39
+ - train_batch_size: 6
40
+ - eval_batch_size: 8
41
+ - seed: 42
42
+ - distributed_type: multi-GPU
43
+ - num_devices: 8
44
+ - total_train_batch_size: 48
45
+ - total_eval_batch_size: 64
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: linear
48
+ - lr_scheduler_warmup_ratio: 0.1
49
+ - num_epochs: 1
50
+
51
+ ### Training results
52
+
53
+
54
+
55
+ ### Framework versions
56
+
57
+ - Transformers 4.45.1
58
+ - Pytorch 2.4.1+cu121
59
+ - Datasets 3.0.1
60
+ - Tokenizers 0.20.0
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.5670995542988975,
5
+ "train_runtime": 1780.4141,
6
+ "train_samples": 8069,
7
+ "train_samples_per_second": 4.532,
8
+ "train_steps_per_second": 0.095
9
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.45.1"
6
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.5670995542988975,
5
+ "train_runtime": 1780.4141,
6
+ "train_samples": 8069,
7
+ "train_samples_per_second": 4.532,
8
+ "train_steps_per_second": 0.095
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 1000,
6
+ "global_step": 169,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.005917159763313609,
13
+ "grad_norm": 86.11190361804692,
14
+ "learning_rate": 2.941176470588235e-08,
15
+ "logits/chosen": -0.1997361034154892,
16
+ "logits/rejected": -0.19101263582706451,
17
+ "logps/chosen": -210.513671875,
18
+ "logps/rejected": -157.554931640625,
19
+ "loss": 0.6931,
20
+ "rewards/accuracies": 0.0,
21
+ "rewards/chosen": 0.0,
22
+ "rewards/margins": 0.0,
23
+ "rewards/rejected": 0.0,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.05917159763313609,
28
+ "grad_norm": 78.94990602073459,
29
+ "learning_rate": 2.941176470588235e-07,
30
+ "logits/chosen": -0.13639792799949646,
31
+ "logits/rejected": -0.1734662801027298,
32
+ "logps/chosen": -218.1241912841797,
33
+ "logps/rejected": -238.053466796875,
34
+ "loss": 0.6903,
35
+ "rewards/accuracies": 0.37037035822868347,
36
+ "rewards/chosen": -0.008327378891408443,
37
+ "rewards/margins": -0.011080991476774216,
38
+ "rewards/rejected": 0.002753612119704485,
39
+ "step": 10
40
+ },
41
+ {
42
+ "epoch": 0.11834319526627218,
43
+ "grad_norm": 76.16607821763226,
44
+ "learning_rate": 4.901315789473684e-07,
45
+ "logits/chosen": -0.1947246491909027,
46
+ "logits/rejected": -0.24646611511707306,
47
+ "logps/chosen": -177.68190002441406,
48
+ "logps/rejected": -174.81893920898438,
49
+ "loss": 0.6672,
50
+ "rewards/accuracies": 0.5833333730697632,
51
+ "rewards/chosen": -0.04245181754231453,
52
+ "rewards/margins": 0.05021858215332031,
53
+ "rewards/rejected": -0.09267039597034454,
54
+ "step": 20
55
+ },
56
+ {
57
+ "epoch": 0.17751479289940827,
58
+ "grad_norm": 109.70241370606963,
59
+ "learning_rate": 4.5723684210526313e-07,
60
+ "logits/chosen": -0.06654810905456543,
61
+ "logits/rejected": -0.10471577942371368,
62
+ "logps/chosen": -205.10757446289062,
63
+ "logps/rejected": -221.33865356445312,
64
+ "loss": 0.6287,
65
+ "rewards/accuracies": 0.6333333253860474,
66
+ "rewards/chosen": -0.1406942903995514,
67
+ "rewards/margins": 0.19435003399848938,
68
+ "rewards/rejected": -0.33504432439804077,
69
+ "step": 30
70
+ },
71
+ {
72
+ "epoch": 0.23668639053254437,
73
+ "grad_norm": 104.51396601202018,
74
+ "learning_rate": 4.243421052631579e-07,
75
+ "logits/chosen": -0.28040310740470886,
76
+ "logits/rejected": -0.22528812289237976,
77
+ "logps/chosen": -218.5230712890625,
78
+ "logps/rejected": -223.2051239013672,
79
+ "loss": 0.5973,
80
+ "rewards/accuracies": 0.699999988079071,
81
+ "rewards/chosen": -0.35167446732521057,
82
+ "rewards/margins": 0.22981619834899902,
83
+ "rewards/rejected": -0.5814906358718872,
84
+ "step": 40
85
+ },
86
+ {
87
+ "epoch": 0.2958579881656805,
88
+ "grad_norm": 77.4636654508096,
89
+ "learning_rate": 3.914473684210526e-07,
90
+ "logits/chosen": -0.09673222154378891,
91
+ "logits/rejected": -0.046957988291978836,
92
+ "logps/chosen": -151.72000122070312,
93
+ "logps/rejected": -199.17935180664062,
94
+ "loss": 0.5671,
95
+ "rewards/accuracies": 0.7833333611488342,
96
+ "rewards/chosen": -0.3736603260040283,
97
+ "rewards/margins": 0.3569501042366028,
98
+ "rewards/rejected": -0.7306104898452759,
99
+ "step": 50
100
+ },
101
+ {
102
+ "epoch": 0.35502958579881655,
103
+ "grad_norm": 64.26194560632132,
104
+ "learning_rate": 3.5855263157894734e-07,
105
+ "logits/chosen": -0.08753165602684021,
106
+ "logits/rejected": -0.07491908967494965,
107
+ "logps/chosen": -178.92002868652344,
108
+ "logps/rejected": -237.271240234375,
109
+ "loss": 0.5653,
110
+ "rewards/accuracies": 0.6666666865348816,
111
+ "rewards/chosen": -0.44847816228866577,
112
+ "rewards/margins": 0.366828054189682,
113
+ "rewards/rejected": -0.8153061866760254,
114
+ "step": 60
115
+ },
116
+ {
117
+ "epoch": 0.41420118343195267,
118
+ "grad_norm": 75.47610499926685,
119
+ "learning_rate": 3.2565789473684206e-07,
120
+ "logits/chosen": -0.25761300325393677,
121
+ "logits/rejected": -0.25849616527557373,
122
+ "logps/chosen": -191.11752319335938,
123
+ "logps/rejected": -177.6903839111328,
124
+ "loss": 0.5494,
125
+ "rewards/accuracies": 0.6500000357627869,
126
+ "rewards/chosen": -0.5434780120849609,
127
+ "rewards/margins": 0.3972131609916687,
128
+ "rewards/rejected": -0.9406911730766296,
129
+ "step": 70
130
+ },
131
+ {
132
+ "epoch": 0.47337278106508873,
133
+ "grad_norm": 68.31555898880357,
134
+ "learning_rate": 2.9276315789473684e-07,
135
+ "logits/chosen": -0.06761662662029266,
136
+ "logits/rejected": -0.08591805398464203,
137
+ "logps/chosen": -151.31748962402344,
138
+ "logps/rejected": -214.83871459960938,
139
+ "loss": 0.5368,
140
+ "rewards/accuracies": 0.7666666507720947,
141
+ "rewards/chosen": -0.34307175874710083,
142
+ "rewards/margins": 0.5734245181083679,
143
+ "rewards/rejected": -0.9164963960647583,
144
+ "step": 80
145
+ },
146
+ {
147
+ "epoch": 0.5325443786982249,
148
+ "grad_norm": 89.70054927345474,
149
+ "learning_rate": 2.5986842105263156e-07,
150
+ "logits/chosen": 0.28517499566078186,
151
+ "logits/rejected": 0.20042014122009277,
152
+ "logps/chosen": -219.8415985107422,
153
+ "logps/rejected": -200.08901977539062,
154
+ "loss": 0.5263,
155
+ "rewards/accuracies": 0.7166666388511658,
156
+ "rewards/chosen": -0.5583446621894836,
157
+ "rewards/margins": 0.48568302392959595,
158
+ "rewards/rejected": -1.0440276861190796,
159
+ "step": 90
160
+ },
161
+ {
162
+ "epoch": 0.591715976331361,
163
+ "grad_norm": 70.5257802097204,
164
+ "learning_rate": 2.2697368421052633e-07,
165
+ "logits/chosen": -0.08118040859699249,
166
+ "logits/rejected": -0.05347698926925659,
167
+ "logps/chosen": -132.26165771484375,
168
+ "logps/rejected": -201.5222625732422,
169
+ "loss": 0.5441,
170
+ "rewards/accuracies": 0.6833332777023315,
171
+ "rewards/chosen": -0.5124568939208984,
172
+ "rewards/margins": 0.4847620129585266,
173
+ "rewards/rejected": -0.997218906879425,
174
+ "step": 100
175
+ },
176
+ {
177
+ "epoch": 0.650887573964497,
178
+ "grad_norm": 88.85554517295625,
179
+ "learning_rate": 1.9407894736842102e-07,
180
+ "logits/chosen": 0.028139472007751465,
181
+ "logits/rejected": -0.018909335136413574,
182
+ "logps/chosen": -192.33306884765625,
183
+ "logps/rejected": -235.2200469970703,
184
+ "loss": 0.5436,
185
+ "rewards/accuracies": 0.7666667103767395,
186
+ "rewards/chosen": -0.45699724555015564,
187
+ "rewards/margins": 0.7322259545326233,
188
+ "rewards/rejected": -1.1892231702804565,
189
+ "step": 110
190
+ },
191
+ {
192
+ "epoch": 0.7100591715976331,
193
+ "grad_norm": 64.03669024190756,
194
+ "learning_rate": 1.611842105263158e-07,
195
+ "logits/chosen": 0.00658189645037055,
196
+ "logits/rejected": -0.047699280083179474,
197
+ "logps/chosen": -223.26205444335938,
198
+ "logps/rejected": -258.1011657714844,
199
+ "loss": 0.547,
200
+ "rewards/accuracies": 0.7333333492279053,
201
+ "rewards/chosen": -0.7406572103500366,
202
+ "rewards/margins": 0.8165454864501953,
203
+ "rewards/rejected": -1.557202935218811,
204
+ "step": 120
205
+ },
206
+ {
207
+ "epoch": 0.7692307692307693,
208
+ "grad_norm": 120.82289691922682,
209
+ "learning_rate": 1.2828947368421054e-07,
210
+ "logits/chosen": -0.1693594604730606,
211
+ "logits/rejected": -0.184026300907135,
212
+ "logps/chosen": -212.06900024414062,
213
+ "logps/rejected": -218.6104278564453,
214
+ "loss": 0.5153,
215
+ "rewards/accuracies": 0.7500000596046448,
216
+ "rewards/chosen": -0.5004691481590271,
217
+ "rewards/margins": 0.5659549832344055,
218
+ "rewards/rejected": -1.0664241313934326,
219
+ "step": 130
220
+ },
221
+ {
222
+ "epoch": 0.8284023668639053,
223
+ "grad_norm": 75.4403830508224,
224
+ "learning_rate": 9.539473684210526e-08,
225
+ "logits/chosen": -0.0047453404404222965,
226
+ "logits/rejected": 0.06134886294603348,
227
+ "logps/chosen": -215.7347412109375,
228
+ "logps/rejected": -216.7386016845703,
229
+ "loss": 0.5365,
230
+ "rewards/accuracies": 0.7500000596046448,
231
+ "rewards/chosen": -0.47300204634666443,
232
+ "rewards/margins": 0.6861640214920044,
233
+ "rewards/rejected": -1.1591660976409912,
234
+ "step": 140
235
+ },
236
+ {
237
+ "epoch": 0.8875739644970414,
238
+ "grad_norm": 403.81954432259465,
239
+ "learning_rate": 6.25e-08,
240
+ "logits/chosen": 0.07171961665153503,
241
+ "logits/rejected": 0.06573580205440521,
242
+ "logps/chosen": -168.96322631835938,
243
+ "logps/rejected": -237.2980194091797,
244
+ "loss": 0.5128,
245
+ "rewards/accuracies": 0.783333420753479,
246
+ "rewards/chosen": -0.5706448554992676,
247
+ "rewards/margins": 0.8216232061386108,
248
+ "rewards/rejected": -1.3922679424285889,
249
+ "step": 150
250
+ },
251
+ {
252
+ "epoch": 0.9467455621301775,
253
+ "grad_norm": 77.59593509813354,
254
+ "learning_rate": 2.9605263157894734e-08,
255
+ "logits/chosen": 0.030731897801160812,
256
+ "logits/rejected": 0.0011664718622341752,
257
+ "logps/chosen": -192.11680603027344,
258
+ "logps/rejected": -223.5975799560547,
259
+ "loss": 0.5015,
260
+ "rewards/accuracies": 0.7833333611488342,
261
+ "rewards/chosen": -0.5610911250114441,
262
+ "rewards/margins": 0.7299409508705139,
263
+ "rewards/rejected": -1.291032075881958,
264
+ "step": 160
265
+ },
266
+ {
267
+ "epoch": 1.0,
268
+ "step": 169,
269
+ "total_flos": 0.0,
270
+ "train_loss": 0.5670995542988975,
271
+ "train_runtime": 1780.4141,
272
+ "train_samples_per_second": 4.532,
273
+ "train_steps_per_second": 0.095
274
+ }
275
+ ],
276
+ "logging_steps": 10,
277
+ "max_steps": 169,
278
+ "num_input_tokens_seen": 0,
279
+ "num_train_epochs": 1,
280
+ "save_steps": 100,
281
+ "stateful_callbacks": {
282
+ "TrainerControl": {
283
+ "args": {
284
+ "should_epoch_stop": false,
285
+ "should_evaluate": false,
286
+ "should_log": false,
287
+ "should_save": true,
288
+ "should_training_stop": true
289
+ },
290
+ "attributes": {}
291
+ }
292
+ },
293
+ "total_flos": 0.0,
294
+ "train_batch_size": 6,
295
+ "trial_name": null,
296
+ "trial_params": null
297
+ }