aadityap commited on
Commit
59b857f
·
verified ·
1 Parent(s): 896c11d

Model save

Browse files
Files changed (4) hide show
  1. README.md +61 -0
  2. all_results.json +9 -0
  3. train_results.json +9 -0
  4. trainer_state.json +378 -0
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: mit
4
+ base_model: deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
5
+ tags:
6
+ - trl
7
+ - sft
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: models-ttt-bigestrun-021225-night-step2
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # models-ttt-bigestrun-021225-night-step2
18
+
19
+ This model is a fine-tuned version of [deepseek-ai/DeepSeek-R1-Distill-Qwen-32B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B) on an unknown dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 5e-05
39
+ - train_batch_size: 2
40
+ - eval_batch_size: 1
41
+ - seed: 42
42
+ - distributed_type: multi-GPU
43
+ - num_devices: 8
44
+ - total_train_batch_size: 16
45
+ - total_eval_batch_size: 8
46
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
47
+ - lr_scheduler_type: cosine
48
+ - lr_scheduler_warmup_ratio: 0.1
49
+ - num_epochs: 3
50
+
51
+ ### Training results
52
+
53
+
54
+
55
+ ### Framework versions
56
+
57
+ - PEFT 0.13.2
58
+ - Transformers 4.47.0.dev0
59
+ - Pytorch 2.4.0+cu121
60
+ - Datasets 3.1.0
61
+ - Tokenizers 0.20.3
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "total_flos": 882865236606976.0,
4
+ "train_loss": 0.0980342753076305,
5
+ "train_runtime": 1550.4315,
6
+ "train_samples": 250,
7
+ "train_samples_per_second": 0.484,
8
+ "train_steps_per_second": 0.031
9
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "total_flos": 882865236606976.0,
4
+ "train_loss": 0.0980342753076305,
5
+ "train_runtime": 1550.4315,
6
+ "train_samples": 250,
7
+ "train_samples_per_second": 0.484,
8
+ "train_steps_per_second": 0.031
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 48,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0625,
13
+ "grad_norm": 0.04137394207755112,
14
+ "learning_rate": 1e-05,
15
+ "loss": 0.1671,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.125,
20
+ "grad_norm": 0.037803193435615265,
21
+ "learning_rate": 2e-05,
22
+ "loss": 0.167,
23
+ "step": 2
24
+ },
25
+ {
26
+ "epoch": 0.1875,
27
+ "grad_norm": 0.04445740088277594,
28
+ "learning_rate": 3e-05,
29
+ "loss": 0.1607,
30
+ "step": 3
31
+ },
32
+ {
33
+ "epoch": 0.25,
34
+ "grad_norm": 0.0380231527607073,
35
+ "learning_rate": 4e-05,
36
+ "loss": 0.1591,
37
+ "step": 4
38
+ },
39
+ {
40
+ "epoch": 0.3125,
41
+ "grad_norm": 0.046362646963448,
42
+ "learning_rate": 5e-05,
43
+ "loss": 0.1597,
44
+ "step": 5
45
+ },
46
+ {
47
+ "epoch": 0.375,
48
+ "grad_norm": 0.05235320825077946,
49
+ "learning_rate": 4.9933307091588796e-05,
50
+ "loss": 0.1616,
51
+ "step": 6
52
+ },
53
+ {
54
+ "epoch": 0.4375,
55
+ "grad_norm": 0.05740591760676448,
56
+ "learning_rate": 4.973358420187776e-05,
57
+ "loss": 0.1459,
58
+ "step": 7
59
+ },
60
+ {
61
+ "epoch": 0.5,
62
+ "grad_norm": 0.04283964782544257,
63
+ "learning_rate": 4.9401896938898185e-05,
64
+ "loss": 0.1332,
65
+ "step": 8
66
+ },
67
+ {
68
+ "epoch": 0.5625,
69
+ "grad_norm": 0.036679242954440874,
70
+ "learning_rate": 4.894001499771015e-05,
71
+ "loss": 0.1293,
72
+ "step": 9
73
+ },
74
+ {
75
+ "epoch": 0.625,
76
+ "grad_norm": 0.033723367633845105,
77
+ "learning_rate": 4.83504027183137e-05,
78
+ "loss": 0.1342,
79
+ "step": 10
80
+ },
81
+ {
82
+ "epoch": 0.6875,
83
+ "grad_norm": 0.037227526954279944,
84
+ "learning_rate": 4.763620593732867e-05,
85
+ "loss": 0.128,
86
+ "step": 11
87
+ },
88
+ {
89
+ "epoch": 0.75,
90
+ "grad_norm": 0.044207038446114774,
91
+ "learning_rate": 4.6801235203595195e-05,
92
+ "loss": 0.1275,
93
+ "step": 12
94
+ },
95
+ {
96
+ "epoch": 0.8125,
97
+ "grad_norm": 0.04394682449442954,
98
+ "learning_rate": 4.584994544724695e-05,
99
+ "loss": 0.1142,
100
+ "step": 13
101
+ },
102
+ {
103
+ "epoch": 0.875,
104
+ "grad_norm": 0.03406265181060845,
105
+ "learning_rate": 4.478741221073136e-05,
106
+ "loss": 0.1161,
107
+ "step": 14
108
+ },
109
+ {
110
+ "epoch": 0.9375,
111
+ "grad_norm": 0.030054446617056498,
112
+ "learning_rate": 4.361930456859455e-05,
113
+ "loss": 0.1087,
114
+ "step": 15
115
+ },
116
+ {
117
+ "epoch": 1.0,
118
+ "grad_norm": 0.028409394434840918,
119
+ "learning_rate": 4.235185488051585e-05,
120
+ "loss": 0.1126,
121
+ "step": 16
122
+ },
123
+ {
124
+ "epoch": 1.0625,
125
+ "grad_norm": 0.023153227915814832,
126
+ "learning_rate": 4.099182553897229e-05,
127
+ "loss": 0.1054,
128
+ "step": 17
129
+ },
130
+ {
131
+ "epoch": 1.125,
132
+ "grad_norm": 0.021598055829915037,
133
+ "learning_rate": 3.954647288894883e-05,
134
+ "loss": 0.1007,
135
+ "step": 18
136
+ },
137
+ {
138
+ "epoch": 1.1875,
139
+ "grad_norm": 0.02123540151753295,
140
+ "learning_rate": 3.8023508512198256e-05,
141
+ "loss": 0.1001,
142
+ "step": 19
143
+ },
144
+ {
145
+ "epoch": 1.25,
146
+ "grad_norm": 0.02137306513970009,
147
+ "learning_rate": 3.6431058082615964e-05,
148
+ "loss": 0.0968,
149
+ "step": 20
150
+ },
151
+ {
152
+ "epoch": 1.3125,
153
+ "grad_norm": 0.02433397130782817,
154
+ "learning_rate": 3.47776180122539e-05,
155
+ "loss": 0.0983,
156
+ "step": 21
157
+ },
158
+ {
159
+ "epoch": 1.375,
160
+ "grad_norm": 0.021942525621062504,
161
+ "learning_rate": 3.307201011928616e-05,
162
+ "loss": 0.0935,
163
+ "step": 22
164
+ },
165
+ {
166
+ "epoch": 1.4375,
167
+ "grad_norm": 0.021261141593508576,
168
+ "learning_rate": 3.132333455979202e-05,
169
+ "loss": 0.0887,
170
+ "step": 23
171
+ },
172
+ {
173
+ "epoch": 1.5,
174
+ "grad_norm": 0.02142792591828939,
175
+ "learning_rate": 2.954092127448591e-05,
176
+ "loss": 0.0888,
177
+ "step": 24
178
+ },
179
+ {
180
+ "epoch": 1.5625,
181
+ "grad_norm": 0.020750630946201602,
182
+ "learning_rate": 2.7734280209446865e-05,
183
+ "loss": 0.0891,
184
+ "step": 25
185
+ },
186
+ {
187
+ "epoch": 1.625,
188
+ "grad_norm": 0.019872557078390127,
189
+ "learning_rate": 2.5913050576441477e-05,
190
+ "loss": 0.0872,
191
+ "step": 26
192
+ },
193
+ {
194
+ "epoch": 1.6875,
195
+ "grad_norm": 0.019131650653635524,
196
+ "learning_rate": 2.4086949423558526e-05,
197
+ "loss": 0.0835,
198
+ "step": 27
199
+ },
200
+ {
201
+ "epoch": 1.75,
202
+ "grad_norm": 0.019027351901529727,
203
+ "learning_rate": 2.2265719790553147e-05,
204
+ "loss": 0.0827,
205
+ "step": 28
206
+ },
207
+ {
208
+ "epoch": 1.8125,
209
+ "grad_norm": 0.019517019388363612,
210
+ "learning_rate": 2.0459078725514092e-05,
211
+ "loss": 0.0769,
212
+ "step": 29
213
+ },
214
+ {
215
+ "epoch": 1.875,
216
+ "grad_norm": 0.019073963774100196,
217
+ "learning_rate": 1.867666544020798e-05,
218
+ "loss": 0.0768,
219
+ "step": 30
220
+ },
221
+ {
222
+ "epoch": 1.9375,
223
+ "grad_norm": 0.020535779704439937,
224
+ "learning_rate": 1.692798988071385e-05,
225
+ "loss": 0.0703,
226
+ "step": 31
227
+ },
228
+ {
229
+ "epoch": 2.0,
230
+ "grad_norm": 0.01735083491599702,
231
+ "learning_rate": 1.5222381987746104e-05,
232
+ "loss": 0.0721,
233
+ "step": 32
234
+ },
235
+ {
236
+ "epoch": 2.0625,
237
+ "grad_norm": 0.016996572618955864,
238
+ "learning_rate": 1.3568941917384036e-05,
239
+ "loss": 0.0729,
240
+ "step": 33
241
+ },
242
+ {
243
+ "epoch": 2.125,
244
+ "grad_norm": 0.016947999154191592,
245
+ "learning_rate": 1.1976491487801748e-05,
246
+ "loss": 0.0683,
247
+ "step": 34
248
+ },
249
+ {
250
+ "epoch": 2.1875,
251
+ "grad_norm": 0.01759855889883996,
252
+ "learning_rate": 1.0453527111051184e-05,
253
+ "loss": 0.0682,
254
+ "step": 35
255
+ },
256
+ {
257
+ "epoch": 2.25,
258
+ "grad_norm": 0.016563435997106427,
259
+ "learning_rate": 9.008174461027724e-06,
260
+ "loss": 0.0701,
261
+ "step": 36
262
+ },
263
+ {
264
+ "epoch": 2.3125,
265
+ "grad_norm": 0.0154902825546126,
266
+ "learning_rate": 7.648145119484152e-06,
267
+ "loss": 0.0673,
268
+ "step": 37
269
+ },
270
+ {
271
+ "epoch": 2.375,
272
+ "grad_norm": 0.01691672911978778,
273
+ "learning_rate": 6.380695431405453e-06,
274
+ "loss": 0.0694,
275
+ "step": 38
276
+ },
277
+ {
278
+ "epoch": 2.4375,
279
+ "grad_norm": 0.01530029785885113,
280
+ "learning_rate": 5.2125877892686496e-06,
281
+ "loss": 0.0681,
282
+ "step": 39
283
+ },
284
+ {
285
+ "epoch": 2.5,
286
+ "grad_norm": 0.017885100797614902,
287
+ "learning_rate": 4.150054552753055e-06,
288
+ "loss": 0.0627,
289
+ "step": 40
290
+ },
291
+ {
292
+ "epoch": 2.5625,
293
+ "grad_norm": 0.016638045960609936,
294
+ "learning_rate": 3.198764796404807e-06,
295
+ "loss": 0.0669,
296
+ "step": 41
297
+ },
298
+ {
299
+ "epoch": 2.625,
300
+ "grad_norm": 0.016208072151038286,
301
+ "learning_rate": 2.3637940626713346e-06,
302
+ "loss": 0.0678,
303
+ "step": 42
304
+ },
305
+ {
306
+ "epoch": 2.6875,
307
+ "grad_norm": 0.016401199651084344,
308
+ "learning_rate": 1.649597281686302e-06,
309
+ "loss": 0.0649,
310
+ "step": 43
311
+ },
312
+ {
313
+ "epoch": 2.75,
314
+ "grad_norm": 0.01598951472231698,
315
+ "learning_rate": 1.0599850022898539e-06,
316
+ "loss": 0.0674,
317
+ "step": 44
318
+ },
319
+ {
320
+ "epoch": 2.8125,
321
+ "grad_norm": 0.015277414785260311,
322
+ "learning_rate": 5.981030611018234e-07,
323
+ "loss": 0.0634,
324
+ "step": 45
325
+ },
326
+ {
327
+ "epoch": 2.875,
328
+ "grad_norm": 0.01537912920538106,
329
+ "learning_rate": 2.664157981222437e-07,
330
+ "loss": 0.0637,
331
+ "step": 46
332
+ },
333
+ {
334
+ "epoch": 2.9375,
335
+ "grad_norm": 0.015282877713047758,
336
+ "learning_rate": 6.66929084112089e-08,
337
+ "loss": 0.0667,
338
+ "step": 47
339
+ },
340
+ {
341
+ "epoch": 3.0,
342
+ "grad_norm": 0.01629157768683682,
343
+ "learning_rate": 0.0,
344
+ "loss": 0.0622,
345
+ "step": 48
346
+ },
347
+ {
348
+ "epoch": 3.0,
349
+ "step": 48,
350
+ "total_flos": 882865236606976.0,
351
+ "train_loss": 0.0980342753076305,
352
+ "train_runtime": 1550.4315,
353
+ "train_samples_per_second": 0.484,
354
+ "train_steps_per_second": 0.031
355
+ }
356
+ ],
357
+ "logging_steps": 1,
358
+ "max_steps": 48,
359
+ "num_input_tokens_seen": 0,
360
+ "num_train_epochs": 3,
361
+ "save_steps": 500,
362
+ "stateful_callbacks": {
363
+ "TrainerControl": {
364
+ "args": {
365
+ "should_epoch_stop": false,
366
+ "should_evaluate": false,
367
+ "should_log": false,
368
+ "should_save": true,
369
+ "should_training_stop": true
370
+ },
371
+ "attributes": {}
372
+ }
373
+ },
374
+ "total_flos": 882865236606976.0,
375
+ "train_batch_size": 2,
376
+ "trial_name": null,
377
+ "trial_params": null
378
+ }