Namronaldo2004 commited on
Commit
bb58c6e
·
1 Parent(s): 51c515c

Update fine-tuned model

Browse files
README.md CHANGED
@@ -199,4 +199,4 @@ Carbon emissions can be estimated using the [Machine Learning Impact calculator]
199
  [More Information Needed]
200
  ### Framework versions
201
 
202
- - PEFT 0.13.2
 
199
  [More Information Needed]
200
  ### Framework versions
201
 
202
+ - PEFT 0.14.0
adapter_config.json CHANGED
@@ -3,6 +3,8 @@
3
  "auto_mapping": null,
4
  "base_model_name_or_path": "vilm/vinallama-7b-chat",
5
  "bias": "none",
 
 
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
8
  "init_lora_weights": true,
@@ -11,6 +13,7 @@
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
  "lora_alpha": 32,
 
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
@@ -20,15 +23,15 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "v_proj",
24
- "q_proj",
25
- "down_proj",
26
  "up_proj",
 
27
  "k_proj",
 
28
  "gate_proj",
29
- "o_proj"
30
  ],
31
- "task_type": " CAUSAL_LM",
32
  "use_dora": false,
33
  "use_rslora": false
34
  }
 
3
  "auto_mapping": null,
4
  "base_model_name_or_path": "vilm/vinallama-7b-chat",
5
  "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
  "fan_in_fan_out": false,
9
  "inference_mode": true,
10
  "init_lora_weights": true,
 
13
  "layers_to_transform": null,
14
  "loftq_config": {},
15
  "lora_alpha": 32,
16
+ "lora_bias": false,
17
  "lora_dropout": 0.05,
18
  "megatron_config": null,
19
  "megatron_core": "megatron.core",
 
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
26
+ "o_proj",
 
 
27
  "up_proj",
28
+ "q_proj",
29
  "k_proj",
30
+ "v_proj",
31
  "gate_proj",
32
+ "down_proj"
33
  ],
34
+ "task_type": "CAUSAL_LM",
35
  "use_dora": false,
36
  "use_rslora": false
37
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5dc9e67cf667ac86a0f824787134893aafd3f3f0b3e135c44a0f864106826f6f
3
  size 159967880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e8effcf0f9d1573444b2ba1b1c1aa7c47adcdbd5fa28babbd4f06b9a051608c
3
  size 159967880
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16fe861659b0c63a38c21766b3b40e8c41307f475e9f9bef71147b28d9a61498
3
  size 852876198
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b07eea0e1c7747d7a9f46f8cc3858399f3e24e6c90638a08821fc78a7f94549
3
  size 852876198
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:174c9505441c14b31f5ce63d06158dc7eb3a747979db40c6b24a3ffaa7cc1878
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7eb67e1e2db21e202d28ef6dfe201b68b5de1aa4f215c25120ecdbb7474af84
3
  size 14244
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e69e2b49ea642509f0c688c16fb190b7cf27dac0a18903a5e2d1467d0343d8b8
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25c0243da09e051ffbb600413baac079966a139cd6939cbef7eb321283bd0767
3
  size 1064
trainer_state.json CHANGED
@@ -1,368 +1,893 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.0,
5
  "eval_steps": 500,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.02,
13
- "grad_norm": 0.6174958944320679,
14
- "learning_rate": 6.666666666666667e-05,
15
- "loss": 0.5017,
16
  "step": 1
17
  },
18
  {
19
- "epoch": 0.04,
20
- "grad_norm": 0.6429479122161865,
21
- "learning_rate": 0.00013333333333333334,
22
- "loss": 0.4915,
23
  "step": 2
24
  },
25
  {
26
- "epoch": 0.06,
27
- "grad_norm": 0.49867865443229675,
28
- "learning_rate": 0.0002,
29
- "loss": 0.428,
30
  "step": 3
31
  },
32
  {
33
- "epoch": 0.08,
34
- "grad_norm": 0.5035229921340942,
35
- "learning_rate": 0.00019977668786231534,
36
- "loss": 0.4537,
37
  "step": 4
38
  },
39
  {
40
- "epoch": 0.1,
41
- "grad_norm": 0.45080244541168213,
42
- "learning_rate": 0.000199107748815478,
43
- "loss": 0.4549,
44
  "step": 5
45
  },
46
  {
47
- "epoch": 0.12,
48
- "grad_norm": 0.4055653214454651,
49
- "learning_rate": 0.0001979961705036587,
50
- "loss": 0.4596,
51
  "step": 6
52
  },
53
  {
54
- "epoch": 0.14,
55
- "grad_norm": 0.41731277108192444,
56
- "learning_rate": 0.00019644691750543767,
57
- "loss": 0.4274,
58
  "step": 7
59
  },
60
  {
61
- "epoch": 0.16,
62
- "grad_norm": 0.3783501982688904,
63
- "learning_rate": 0.0001944669091607919,
64
- "loss": 0.4789,
65
  "step": 8
66
  },
67
  {
68
- "epoch": 0.18,
69
- "grad_norm": 0.37090209126472473,
70
- "learning_rate": 0.00019206498866764288,
71
- "loss": 0.4522,
72
  "step": 9
73
  },
74
  {
75
- "epoch": 0.2,
76
- "grad_norm": 0.3544338345527649,
77
- "learning_rate": 0.00018925188358598813,
78
- "loss": 0.4677,
79
  "step": 10
80
  },
81
  {
82
- "epoch": 0.22,
83
- "grad_norm": 0.3952384889125824,
84
- "learning_rate": 0.00018604015792601396,
85
- "loss": 0.4454,
86
  "step": 11
87
  },
88
  {
89
- "epoch": 0.24,
90
- "grad_norm": 0.3702380657196045,
91
- "learning_rate": 0.00018244415603417603,
92
- "loss": 0.4508,
93
  "step": 12
94
  },
95
  {
96
- "epoch": 0.26,
97
- "grad_norm": 0.36494874954223633,
98
- "learning_rate": 0.0001784799385278661,
99
- "loss": 0.4466,
100
  "step": 13
101
  },
102
  {
103
- "epoch": 0.28,
104
- "grad_norm": 0.34794506430625916,
105
- "learning_rate": 0.00017416521056479577,
106
- "loss": 0.4429,
107
  "step": 14
108
  },
109
  {
110
- "epoch": 0.3,
111
- "grad_norm": 0.3768218159675598,
112
- "learning_rate": 0.00016951924276746425,
113
- "loss": 0.451,
114
  "step": 15
115
  },
116
  {
117
- "epoch": 0.32,
118
- "grad_norm": 0.37294796109199524,
119
- "learning_rate": 0.00016456278515588024,
120
- "loss": 0.4359,
121
  "step": 16
122
  },
123
  {
124
- "epoch": 0.34,
125
- "grad_norm": 0.3664343059062958,
126
- "learning_rate": 0.00015931797447293552,
127
- "loss": 0.4223,
128
  "step": 17
129
  },
130
  {
131
- "epoch": 0.36,
132
- "grad_norm": 0.38887134194374084,
133
- "learning_rate": 0.00015380823531633729,
134
- "loss": 0.4317,
135
  "step": 18
136
  },
137
  {
138
- "epoch": 0.38,
139
- "grad_norm": 0.3960750699043274,
140
- "learning_rate": 0.00014805817551866838,
141
- "loss": 0.4302,
142
  "step": 19
143
  },
144
  {
145
- "epoch": 0.4,
146
- "grad_norm": 0.3703005611896515,
147
- "learning_rate": 0.0001420934762428335,
148
- "loss": 0.4238,
149
  "step": 20
150
  },
151
  {
152
- "epoch": 0.42,
153
- "grad_norm": 0.3697049617767334,
154
- "learning_rate": 0.00013594077728375128,
155
- "loss": 0.4275,
156
  "step": 21
157
  },
158
  {
159
- "epoch": 0.44,
160
- "grad_norm": 0.3797580897808075,
161
- "learning_rate": 0.00012962755808856342,
162
- "loss": 0.4295,
163
  "step": 22
164
  },
165
  {
166
- "epoch": 0.46,
167
- "grad_norm": 0.37148579955101013,
168
- "learning_rate": 0.00012318201502675285,
169
- "loss": 0.413,
170
  "step": 23
171
  },
172
  {
173
- "epoch": 0.48,
174
- "grad_norm": 0.36919766664505005,
175
- "learning_rate": 0.00011663293545831302,
176
- "loss": 0.418,
177
  "step": 24
178
  },
179
  {
180
- "epoch": 0.5,
181
- "grad_norm": 0.36413446068763733,
182
- "learning_rate": 0.00011000956916240985,
183
- "loss": 0.3946,
184
  "step": 25
185
  },
186
  {
187
- "epoch": 0.52,
188
- "grad_norm": 0.37875810265541077,
189
- "learning_rate": 0.00010334149770076747,
190
- "loss": 0.4423,
191
  "step": 26
192
  },
193
  {
194
- "epoch": 0.54,
195
- "grad_norm": 0.3651341497898102,
196
- "learning_rate": 9.665850229923258e-05,
197
- "loss": 0.4193,
198
  "step": 27
199
  },
200
  {
201
- "epoch": 0.56,
202
- "grad_norm": 0.36425620317459106,
203
- "learning_rate": 8.999043083759017e-05,
204
- "loss": 0.4126,
205
  "step": 28
206
  },
207
  {
208
- "epoch": 0.58,
209
- "grad_norm": 0.3773588538169861,
210
- "learning_rate": 8.336706454168701e-05,
211
- "loss": 0.3913,
212
  "step": 29
213
  },
214
  {
215
- "epoch": 0.6,
216
- "grad_norm": 0.3708311915397644,
217
- "learning_rate": 7.681798497324716e-05,
218
- "loss": 0.4078,
219
  "step": 30
220
  },
221
  {
222
- "epoch": 0.62,
223
- "grad_norm": 0.38130757212638855,
224
- "learning_rate": 7.037244191143661e-05,
225
- "loss": 0.4229,
226
  "step": 31
227
  },
228
  {
229
- "epoch": 0.64,
230
- "grad_norm": 0.3573770821094513,
231
- "learning_rate": 6.405922271624874e-05,
232
- "loss": 0.4117,
233
  "step": 32
234
  },
235
  {
236
- "epoch": 0.66,
237
- "grad_norm": 0.3715918958187103,
238
- "learning_rate": 5.790652375716652e-05,
239
- "loss": 0.3888,
240
  "step": 33
241
  },
242
  {
243
- "epoch": 0.68,
244
- "grad_norm": 0.371981143951416,
245
- "learning_rate": 5.1941824481331626e-05,
246
- "loss": 0.3819,
247
  "step": 34
248
  },
249
  {
250
- "epoch": 0.7,
251
- "grad_norm": 0.3743939995765686,
252
- "learning_rate": 4.6191764683662744e-05,
253
- "loss": 0.4123,
254
  "step": 35
255
  },
256
  {
257
- "epoch": 0.72,
258
- "grad_norm": 0.3548343777656555,
259
- "learning_rate": 4.0682025527064486e-05,
260
- "loss": 0.3768,
261
  "step": 36
262
  },
263
  {
264
- "epoch": 0.74,
265
- "grad_norm": 0.34915444254875183,
266
- "learning_rate": 3.543721484411976e-05,
267
- "loss": 0.3836,
268
  "step": 37
269
  },
270
  {
271
- "epoch": 0.76,
272
- "grad_norm": 0.36232128739356995,
273
- "learning_rate": 3.0480757232535772e-05,
274
- "loss": 0.3905,
275
  "step": 38
276
  },
277
  {
278
- "epoch": 0.78,
279
- "grad_norm": 0.3516843020915985,
280
- "learning_rate": 2.5834789435204243e-05,
281
- "loss": 0.4018,
282
  "step": 39
283
  },
284
  {
285
- "epoch": 0.8,
286
- "grad_norm": 0.35692131519317627,
287
- "learning_rate": 2.1520061472133902e-05,
288
- "loss": 0.4125,
289
  "step": 40
290
  },
291
  {
292
- "epoch": 0.82,
293
- "grad_norm": 0.366886168718338,
294
- "learning_rate": 1.7555843965823992e-05,
295
- "loss": 0.3963,
296
  "step": 41
297
  },
298
  {
299
- "epoch": 0.84,
300
- "grad_norm": 0.3520941436290741,
301
- "learning_rate": 1.3959842073986085e-05,
302
- "loss": 0.3923,
303
  "step": 42
304
  },
305
  {
306
- "epoch": 0.86,
307
- "grad_norm": 0.3794335424900055,
308
- "learning_rate": 1.0748116414011888e-05,
309
- "loss": 0.4048,
310
  "step": 43
311
  },
312
  {
313
- "epoch": 0.88,
314
- "grad_norm": 0.34629887342453003,
315
- "learning_rate": 7.935011332357112e-06,
316
- "loss": 0.3738,
317
  "step": 44
318
  },
319
  {
320
- "epoch": 0.9,
321
- "grad_norm": 0.3593948185443878,
322
- "learning_rate": 5.533090839208133e-06,
323
- "loss": 0.3967,
324
  "step": 45
325
  },
326
  {
327
- "epoch": 0.92,
328
- "grad_norm": 0.33537453413009644,
329
- "learning_rate": 3.5530824945623542e-06,
330
- "loss": 0.3729,
331
  "step": 46
332
  },
333
  {
334
- "epoch": 0.94,
335
- "grad_norm": 0.3365356922149658,
336
- "learning_rate": 2.003829496341325e-06,
337
- "loss": 0.3799,
338
  "step": 47
339
  },
340
  {
341
- "epoch": 0.96,
342
- "grad_norm": 0.3586476147174835,
343
- "learning_rate": 8.922511845219971e-07,
344
- "loss": 0.4037,
345
  "step": 48
346
  },
347
  {
348
- "epoch": 0.98,
349
- "grad_norm": 0.3276454508304596,
350
- "learning_rate": 2.2331213768468363e-07,
351
- "loss": 0.3982,
352
  "step": 49
353
  },
354
  {
355
- "epoch": 1.0,
356
- "grad_norm": 0.3641741871833801,
357
- "learning_rate": 0.0,
358
- "loss": 0.388,
359
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
360
  }
361
  ],
362
  "logging_steps": 1,
363
- "max_steps": 50,
364
  "num_input_tokens_seen": 0,
365
- "num_train_epochs": 1,
366
  "save_steps": 500,
367
  "stateful_callbacks": {
368
  "TrainerControl": {
@@ -376,7 +901,7 @@
376
  "attributes": {}
377
  }
378
  },
379
- "total_flos": 7.469785844519731e+16,
380
  "train_batch_size": 1,
381
  "trial_name": null,
382
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 5.0,
5
  "eval_steps": 500,
6
+ "global_step": 125,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.04,
13
+ "grad_norm": 4.460970878601074,
14
+ "learning_rate": 2.857142857142857e-05,
15
+ "loss": 2.7426,
16
  "step": 1
17
  },
18
  {
19
+ "epoch": 0.08,
20
+ "grad_norm": 4.278836727142334,
21
+ "learning_rate": 5.714285714285714e-05,
22
+ "loss": 2.712,
23
  "step": 2
24
  },
25
  {
26
+ "epoch": 0.12,
27
+ "grad_norm": 3.84088134765625,
28
+ "learning_rate": 8.571428571428571e-05,
29
+ "loss": 2.546,
30
  "step": 3
31
  },
32
  {
33
+ "epoch": 0.16,
34
+ "grad_norm": 3.0376698970794678,
35
+ "learning_rate": 0.00011428571428571428,
36
+ "loss": 2.2798,
37
  "step": 4
38
  },
39
  {
40
+ "epoch": 0.2,
41
+ "grad_norm": 2.979853391647339,
42
+ "learning_rate": 0.00014285714285714287,
43
+ "loss": 2.0483,
44
  "step": 5
45
  },
46
  {
47
+ "epoch": 0.24,
48
+ "grad_norm": 2.246155261993408,
49
+ "learning_rate": 0.00017142857142857143,
50
+ "loss": 1.7379,
51
  "step": 6
52
  },
53
  {
54
+ "epoch": 0.28,
55
+ "grad_norm": 1.8112757205963135,
56
+ "learning_rate": 0.0002,
57
+ "loss": 1.4445,
58
  "step": 7
59
  },
60
  {
61
+ "epoch": 0.32,
62
+ "grad_norm": 1.477661371231079,
63
+ "learning_rate": 0.00019996456111234527,
64
+ "loss": 1.2269,
65
  "step": 8
66
  },
67
  {
68
+ "epoch": 0.36,
69
+ "grad_norm": 1.2332955598831177,
70
+ "learning_rate": 0.0001998582695676762,
71
+ "loss": 1.0502,
72
  "step": 9
73
  },
74
  {
75
+ "epoch": 0.4,
76
+ "grad_norm": 1.0987950563430786,
77
+ "learning_rate": 0.000199681200703075,
78
+ "loss": 0.9511,
79
  "step": 10
80
  },
81
  {
82
+ "epoch": 0.44,
83
+ "grad_norm": 1.4148105382919312,
84
+ "learning_rate": 0.00019943348002101371,
85
+ "loss": 0.8553,
86
  "step": 11
87
  },
88
  {
89
+ "epoch": 0.48,
90
+ "grad_norm": 0.6310116052627563,
91
+ "learning_rate": 0.00019911528310040074,
92
+ "loss": 0.8427,
93
  "step": 12
94
  },
95
  {
96
+ "epoch": 0.52,
97
+ "grad_norm": 0.5813988447189331,
98
+ "learning_rate": 0.00019872683547213446,
99
+ "loss": 0.8571,
100
  "step": 13
101
  },
102
  {
103
+ "epoch": 0.56,
104
+ "grad_norm": 0.5922828912734985,
105
+ "learning_rate": 0.00019826841245925212,
106
+ "loss": 0.7995,
107
  "step": 14
108
  },
109
  {
110
+ "epoch": 0.6,
111
+ "grad_norm": 0.5135756731033325,
112
+ "learning_rate": 0.00019774033898178667,
113
+ "loss": 0.7967,
114
  "step": 15
115
  },
116
  {
117
+ "epoch": 0.64,
118
+ "grad_norm": 0.4386445879936218,
119
+ "learning_rate": 0.00019714298932647098,
120
+ "loss": 0.7655,
121
  "step": 16
122
  },
123
  {
124
+ "epoch": 0.68,
125
+ "grad_norm": 0.35705333948135376,
126
+ "learning_rate": 0.0001964767868814516,
127
+ "loss": 0.7255,
128
  "step": 17
129
  },
130
  {
131
+ "epoch": 0.72,
132
+ "grad_norm": 0.2756179869174957,
133
+ "learning_rate": 0.00019574220383620055,
134
+ "loss": 0.7296,
135
  "step": 18
136
  },
137
  {
138
+ "epoch": 0.76,
139
+ "grad_norm": 0.27240365743637085,
140
+ "learning_rate": 0.00019493976084683813,
141
+ "loss": 0.7523,
142
  "step": 19
143
  },
144
  {
145
+ "epoch": 0.8,
146
+ "grad_norm": 0.2601045072078705,
147
+ "learning_rate": 0.00019407002666710336,
148
+ "loss": 0.7477,
149
  "step": 20
150
  },
151
  {
152
+ "epoch": 0.84,
153
+ "grad_norm": 0.24948769807815552,
154
+ "learning_rate": 0.00019313361774523385,
155
+ "loss": 0.6847,
156
  "step": 21
157
  },
158
  {
159
+ "epoch": 0.88,
160
+ "grad_norm": 0.2666375935077667,
161
+ "learning_rate": 0.00019213119778704128,
162
+ "loss": 0.7327,
163
  "step": 22
164
  },
165
  {
166
+ "epoch": 0.92,
167
+ "grad_norm": 0.24206262826919556,
168
+ "learning_rate": 0.00019106347728549135,
169
+ "loss": 0.704,
170
  "step": 23
171
  },
172
  {
173
+ "epoch": 0.96,
174
+ "grad_norm": 0.2375992089509964,
175
+ "learning_rate": 0.00018993121301712193,
176
+ "loss": 0.7215,
177
  "step": 24
178
  },
179
  {
180
+ "epoch": 1.0,
181
+ "grad_norm": 0.2436981201171875,
182
+ "learning_rate": 0.00018873520750565718,
183
+ "loss": 0.7342,
184
  "step": 25
185
  },
186
  {
187
+ "epoch": 1.04,
188
+ "grad_norm": 0.23295441269874573,
189
+ "learning_rate": 0.00018747630845319612,
190
+ "loss": 0.6412,
191
  "step": 26
192
  },
193
  {
194
+ "epoch": 1.08,
195
+ "grad_norm": 0.23264053463935852,
196
+ "learning_rate": 0.0001861554081393806,
197
+ "loss": 0.6375,
198
  "step": 27
199
  },
200
  {
201
+ "epoch": 1.12,
202
+ "grad_norm": 0.24330440163612366,
203
+ "learning_rate": 0.0001847734427889671,
204
+ "loss": 0.6281,
205
  "step": 28
206
  },
207
  {
208
+ "epoch": 1.16,
209
+ "grad_norm": 0.226120725274086,
210
+ "learning_rate": 0.0001833313919082515,
211
+ "loss": 0.6366,
212
  "step": 29
213
  },
214
  {
215
+ "epoch": 1.2,
216
+ "grad_norm": 0.23621249198913574,
217
+ "learning_rate": 0.0001818302775908169,
218
+ "loss": 0.6118,
219
  "step": 30
220
  },
221
  {
222
+ "epoch": 1.24,
223
+ "grad_norm": 0.2544279396533966,
224
+ "learning_rate": 0.00018027116379309638,
225
+ "loss": 0.6374,
226
  "step": 31
227
  },
228
  {
229
+ "epoch": 1.28,
230
+ "grad_norm": 0.2579609453678131,
231
+ "learning_rate": 0.00017865515558026428,
232
+ "loss": 0.6422,
233
  "step": 32
234
  },
235
  {
236
+ "epoch": 1.32,
237
+ "grad_norm": 0.23667974770069122,
238
+ "learning_rate": 0.00017698339834299061,
239
+ "loss": 0.6253,
240
  "step": 33
241
  },
242
  {
243
+ "epoch": 1.3599999999999999,
244
+ "grad_norm": 0.219462051987648,
245
+ "learning_rate": 0.00017525707698561385,
246
+ "loss": 0.6171,
247
  "step": 34
248
  },
249
  {
250
+ "epoch": 1.4,
251
+ "grad_norm": 0.22832028567790985,
252
+ "learning_rate": 0.00017347741508630672,
253
+ "loss": 0.6588,
254
  "step": 35
255
  },
256
  {
257
+ "epoch": 1.44,
258
+ "grad_norm": 0.2148832529783249,
259
+ "learning_rate": 0.00017164567402983152,
260
+ "loss": 0.6157,
261
  "step": 36
262
  },
263
  {
264
+ "epoch": 1.48,
265
+ "grad_norm": 0.22131554782390594,
266
+ "learning_rate": 0.0001697631521134985,
267
+ "loss": 0.6157,
268
  "step": 37
269
  },
270
  {
271
+ "epoch": 1.52,
272
+ "grad_norm": 0.21793827414512634,
273
+ "learning_rate": 0.00016783118362696163,
274
+ "loss": 0.5867,
275
  "step": 38
276
  },
277
  {
278
+ "epoch": 1.56,
279
+ "grad_norm": 0.2158416360616684,
280
+ "learning_rate": 0.00016585113790650388,
281
+ "loss": 0.6006,
282
  "step": 39
283
  },
284
  {
285
+ "epoch": 1.6,
286
+ "grad_norm": 0.2120426446199417,
287
+ "learning_rate": 0.00016382441836448202,
288
+ "loss": 0.593,
289
  "step": 40
290
  },
291
  {
292
+ "epoch": 1.6400000000000001,
293
+ "grad_norm": 0.22278062999248505,
294
+ "learning_rate": 0.0001617524614946192,
295
+ "loss": 0.5768,
296
  "step": 41
297
  },
298
  {
299
+ "epoch": 1.6800000000000002,
300
+ "grad_norm": 0.21742485463619232,
301
+ "learning_rate": 0.00015963673585385016,
302
+ "loss": 0.5981,
303
  "step": 42
304
  },
305
  {
306
+ "epoch": 1.72,
307
+ "grad_norm": 0.22864408791065216,
308
+ "learning_rate": 0.0001574787410214407,
309
+ "loss": 0.6262,
310
  "step": 43
311
  },
312
  {
313
+ "epoch": 1.76,
314
+ "grad_norm": 0.2160646617412567,
315
+ "learning_rate": 0.00015528000653611935,
316
+ "loss": 0.6195,
317
  "step": 44
318
  },
319
  {
320
+ "epoch": 1.8,
321
+ "grad_norm": 0.21609608829021454,
322
+ "learning_rate": 0.00015304209081197425,
323
+ "loss": 0.5855,
324
  "step": 45
325
  },
326
  {
327
+ "epoch": 1.8399999999999999,
328
+ "grad_norm": 0.21653196215629578,
329
+ "learning_rate": 0.000150766580033884,
330
+ "loss": 0.5852,
331
  "step": 46
332
  },
333
  {
334
+ "epoch": 1.88,
335
+ "grad_norm": 0.20484659075737,
336
+ "learning_rate": 0.00014845508703326504,
337
+ "loss": 0.5932,
338
  "step": 47
339
  },
340
  {
341
+ "epoch": 1.92,
342
+ "grad_norm": 0.214134082198143,
343
+ "learning_rate": 0.0001461092501449326,
344
+ "loss": 0.5787,
345
  "step": 48
346
  },
347
  {
348
+ "epoch": 1.96,
349
+ "grad_norm": 0.21110501885414124,
350
+ "learning_rate": 0.00014373073204588556,
351
+ "loss": 0.572,
352
  "step": 49
353
  },
354
  {
355
+ "epoch": 2.0,
356
+ "grad_norm": 0.21470192074775696,
357
+ "learning_rate": 0.00014132121857683783,
358
+ "loss": 0.5655,
359
  "step": 50
360
+ },
361
+ {
362
+ "epoch": 2.04,
363
+ "grad_norm": 0.2057720124721527,
364
+ "learning_rate": 0.00013888241754733208,
365
+ "loss": 0.5304,
366
+ "step": 51
367
+ },
368
+ {
369
+ "epoch": 2.08,
370
+ "grad_norm": 0.21427400410175323,
371
+ "learning_rate": 0.00013641605752528224,
372
+ "loss": 0.5319,
373
+ "step": 52
374
+ },
375
+ {
376
+ "epoch": 2.12,
377
+ "grad_norm": 0.20478470623493195,
378
+ "learning_rate": 0.00013392388661180303,
379
+ "loss": 0.5296,
380
+ "step": 53
381
+ },
382
+ {
383
+ "epoch": 2.16,
384
+ "grad_norm": 0.2072669267654419,
385
+ "learning_rate": 0.0001314076712021949,
386
+ "loss": 0.508,
387
+ "step": 54
388
+ },
389
+ {
390
+ "epoch": 2.2,
391
+ "grad_norm": 0.22309353947639465,
392
+ "learning_rate": 0.0001288691947339621,
393
+ "loss": 0.5454,
394
+ "step": 55
395
+ },
396
+ {
397
+ "epoch": 2.24,
398
+ "grad_norm": 0.22911317646503448,
399
+ "learning_rate": 0.00012631025642275212,
400
+ "loss": 0.5113,
401
+ "step": 56
402
+ },
403
+ {
404
+ "epoch": 2.2800000000000002,
405
+ "grad_norm": 0.22047486901283264,
406
+ "learning_rate": 0.0001237326699871115,
407
+ "loss": 0.5108,
408
+ "step": 57
409
+ },
410
+ {
411
+ "epoch": 2.32,
412
+ "grad_norm": 0.2026248276233673,
413
+ "learning_rate": 0.00012113826236296244,
414
+ "loss": 0.4972,
415
+ "step": 58
416
+ },
417
+ {
418
+ "epoch": 2.36,
419
+ "grad_norm": 0.22186334431171417,
420
+ "learning_rate": 0.00011852887240871145,
421
+ "loss": 0.473,
422
+ "step": 59
423
+ },
424
+ {
425
+ "epoch": 2.4,
426
+ "grad_norm": 0.22699356079101562,
427
+ "learning_rate": 0.00011590634960190721,
428
+ "loss": 0.5094,
429
+ "step": 60
430
+ },
431
+ {
432
+ "epoch": 2.44,
433
+ "grad_norm": 0.2264937162399292,
434
+ "learning_rate": 0.00011327255272837221,
435
+ "loss": 0.5157,
436
+ "step": 61
437
+ },
438
+ {
439
+ "epoch": 2.48,
440
+ "grad_norm": 0.21824264526367188,
441
+ "learning_rate": 0.00011062934856473655,
442
+ "loss": 0.4982,
443
+ "step": 62
444
+ },
445
+ {
446
+ "epoch": 2.52,
447
+ "grad_norm": 0.2194606065750122,
448
+ "learning_rate": 0.00010797861055530831,
449
+ "loss": 0.5322,
450
+ "step": 63
451
+ },
452
+ {
453
+ "epoch": 2.56,
454
+ "grad_norm": 0.24011456966400146,
455
+ "learning_rate": 0.00010532221748421787,
456
+ "loss": 0.5426,
457
+ "step": 64
458
+ },
459
+ {
460
+ "epoch": 2.6,
461
+ "grad_norm": 0.23064349591732025,
462
+ "learning_rate": 0.00010266205214377748,
463
+ "loss": 0.5237,
464
+ "step": 65
465
+ },
466
+ {
467
+ "epoch": 2.64,
468
+ "grad_norm": 0.23363706469535828,
469
+ "learning_rate": 0.0001,
470
+ "loss": 0.504,
471
+ "step": 66
472
+ },
473
+ {
474
+ "epoch": 2.68,
475
+ "grad_norm": 0.22657082974910736,
476
+ "learning_rate": 9.733794785622253e-05,
477
+ "loss": 0.4921,
478
+ "step": 67
479
+ },
480
+ {
481
+ "epoch": 2.7199999999999998,
482
+ "grad_norm": 0.2323610782623291,
483
+ "learning_rate": 9.467778251578217e-05,
484
+ "loss": 0.5026,
485
+ "step": 68
486
+ },
487
+ {
488
+ "epoch": 2.76,
489
+ "grad_norm": 0.22157961130142212,
490
+ "learning_rate": 9.202138944469168e-05,
491
+ "loss": 0.4814,
492
+ "step": 69
493
+ },
494
+ {
495
+ "epoch": 2.8,
496
+ "grad_norm": 0.2387864887714386,
497
+ "learning_rate": 8.937065143526347e-05,
498
+ "loss": 0.512,
499
+ "step": 70
500
+ },
501
+ {
502
+ "epoch": 2.84,
503
+ "grad_norm": 0.22291497886180878,
504
+ "learning_rate": 8.672744727162781e-05,
505
+ "loss": 0.5037,
506
+ "step": 71
507
+ },
508
+ {
509
+ "epoch": 2.88,
510
+ "grad_norm": 0.22832155227661133,
511
+ "learning_rate": 8.409365039809281e-05,
512
+ "loss": 0.4887,
513
+ "step": 72
514
+ },
515
+ {
516
+ "epoch": 2.92,
517
+ "grad_norm": 0.22989900410175323,
518
+ "learning_rate": 8.147112759128859e-05,
519
+ "loss": 0.5223,
520
+ "step": 73
521
+ },
522
+ {
523
+ "epoch": 2.96,
524
+ "grad_norm": 0.23885484039783478,
525
+ "learning_rate": 7.886173763703757e-05,
526
+ "loss": 0.5121,
527
+ "step": 74
528
+ },
529
+ {
530
+ "epoch": 3.0,
531
+ "grad_norm": 0.24740351736545563,
532
+ "learning_rate": 7.626733001288851e-05,
533
+ "loss": 0.5035,
534
+ "step": 75
535
+ },
536
+ {
537
+ "epoch": 3.04,
538
+ "grad_norm": 0.22852157056331635,
539
+ "learning_rate": 7.368974357724789e-05,
540
+ "loss": 0.4772,
541
+ "step": 76
542
+ },
543
+ {
544
+ "epoch": 3.08,
545
+ "grad_norm": 0.24161659181118011,
546
+ "learning_rate": 7.113080526603792e-05,
547
+ "loss": 0.4723,
548
+ "step": 77
549
+ },
550
+ {
551
+ "epoch": 3.12,
552
+ "grad_norm": 0.23222312331199646,
553
+ "learning_rate": 6.859232879780515e-05,
554
+ "loss": 0.4573,
555
+ "step": 78
556
+ },
557
+ {
558
+ "epoch": 3.16,
559
+ "grad_norm": 0.2266985923051834,
560
+ "learning_rate": 6.607611338819697e-05,
561
+ "loss": 0.4845,
562
+ "step": 79
563
+ },
564
+ {
565
+ "epoch": 3.2,
566
+ "grad_norm": 0.2399943470954895,
567
+ "learning_rate": 6.358394247471778e-05,
568
+ "loss": 0.4522,
569
+ "step": 80
570
+ },
571
+ {
572
+ "epoch": 3.24,
573
+ "grad_norm": 0.24266791343688965,
574
+ "learning_rate": 6.111758245266794e-05,
575
+ "loss": 0.4546,
576
+ "step": 81
577
+ },
578
+ {
579
+ "epoch": 3.2800000000000002,
580
+ "grad_norm": 0.2595340609550476,
581
+ "learning_rate": 5.867878142316221e-05,
582
+ "loss": 0.4648,
583
+ "step": 82
584
+ },
585
+ {
586
+ "epoch": 3.32,
587
+ "grad_norm": 0.23132656514644623,
588
+ "learning_rate": 5.626926795411447e-05,
589
+ "loss": 0.418,
590
+ "step": 83
591
+ },
592
+ {
593
+ "epoch": 3.36,
594
+ "grad_norm": 0.2526528239250183,
595
+ "learning_rate": 5.38907498550674e-05,
596
+ "loss": 0.4485,
597
+ "step": 84
598
+ },
599
+ {
600
+ "epoch": 3.4,
601
+ "grad_norm": 0.24199239909648895,
602
+ "learning_rate": 5.1544912966734994e-05,
603
+ "loss": 0.4466,
604
+ "step": 85
605
+ },
606
+ {
607
+ "epoch": 3.44,
608
+ "grad_norm": 0.24821186065673828,
609
+ "learning_rate": 4.9233419966116036e-05,
610
+ "loss": 0.4471,
611
+ "step": 86
612
+ },
613
+ {
614
+ "epoch": 3.48,
615
+ "grad_norm": 0.2661694586277008,
616
+ "learning_rate": 4.695790918802576e-05,
617
+ "loss": 0.4555,
618
+ "step": 87
619
+ },
620
+ {
621
+ "epoch": 3.52,
622
+ "grad_norm": 0.24729236960411072,
623
+ "learning_rate": 4.47199934638807e-05,
624
+ "loss": 0.4459,
625
+ "step": 88
626
+ },
627
+ {
628
+ "epoch": 3.56,
629
+ "grad_norm": 0.24867290258407593,
630
+ "learning_rate": 4.252125897855932e-05,
631
+ "loss": 0.436,
632
+ "step": 89
633
+ },
634
+ {
635
+ "epoch": 3.6,
636
+ "grad_norm": 0.24836455285549164,
637
+ "learning_rate": 4.036326414614985e-05,
638
+ "loss": 0.433,
639
+ "step": 90
640
+ },
641
+ {
642
+ "epoch": 3.64,
643
+ "grad_norm": 0.2629256844520569,
644
+ "learning_rate": 3.824753850538082e-05,
645
+ "loss": 0.4329,
646
+ "step": 91
647
+ },
648
+ {
649
+ "epoch": 3.68,
650
+ "grad_norm": 0.25363099575042725,
651
+ "learning_rate": 3.617558163551802e-05,
652
+ "loss": 0.4162,
653
+ "step": 92
654
+ },
655
+ {
656
+ "epoch": 3.7199999999999998,
657
+ "grad_norm": 0.256331205368042,
658
+ "learning_rate": 3.414886209349615e-05,
659
+ "loss": 0.4352,
660
+ "step": 93
661
+ },
662
+ {
663
+ "epoch": 3.76,
664
+ "grad_norm": 0.259033203125,
665
+ "learning_rate": 3.216881637303839e-05,
666
+ "loss": 0.4539,
667
+ "step": 94
668
+ },
669
+ {
670
+ "epoch": 3.8,
671
+ "grad_norm": 0.2493227869272232,
672
+ "learning_rate": 3.0236847886501542e-05,
673
+ "loss": 0.4232,
674
+ "step": 95
675
+ },
676
+ {
677
+ "epoch": 3.84,
678
+ "grad_norm": 0.2529692053794861,
679
+ "learning_rate": 2.8354325970168484e-05,
680
+ "loss": 0.4448,
681
+ "step": 96
682
+ },
683
+ {
684
+ "epoch": 3.88,
685
+ "grad_norm": 0.25735777616500854,
686
+ "learning_rate": 2.6522584913693294e-05,
687
+ "loss": 0.4428,
688
+ "step": 97
689
+ },
690
+ {
691
+ "epoch": 3.92,
692
+ "grad_norm": 0.25182002782821655,
693
+ "learning_rate": 2.4742923014386156e-05,
694
+ "loss": 0.4126,
695
+ "step": 98
696
+ },
697
+ {
698
+ "epoch": 3.96,
699
+ "grad_norm": 0.2627415657043457,
700
+ "learning_rate": 2.301660165700936e-05,
701
+ "loss": 0.4499,
702
+ "step": 99
703
+ },
704
+ {
705
+ "epoch": 4.0,
706
+ "grad_norm": 0.2550097703933716,
707
+ "learning_rate": 2.1344844419735755e-05,
708
+ "loss": 0.4174,
709
+ "step": 100
710
+ },
711
+ {
712
+ "epoch": 4.04,
713
+ "grad_norm": 0.2350243628025055,
714
+ "learning_rate": 1.9728836206903656e-05,
715
+ "loss": 0.4139,
716
+ "step": 101
717
+ },
718
+ {
719
+ "epoch": 4.08,
720
+ "grad_norm": 0.24459272623062134,
721
+ "learning_rate": 1.8169722409183097e-05,
722
+ "loss": 0.3973,
723
+ "step": 102
724
+ },
725
+ {
726
+ "epoch": 4.12,
727
+ "grad_norm": 0.2467670738697052,
728
+ "learning_rate": 1.6668608091748495e-05,
729
+ "loss": 0.4252,
730
+ "step": 103
731
+ },
732
+ {
733
+ "epoch": 4.16,
734
+ "grad_norm": 0.24727074801921844,
735
+ "learning_rate": 1.522655721103291e-05,
736
+ "loss": 0.3919,
737
+ "step": 104
738
+ },
739
+ {
740
+ "epoch": 4.2,
741
+ "grad_norm": 0.24538907408714294,
742
+ "learning_rate": 1.3844591860619383e-05,
743
+ "loss": 0.4517,
744
+ "step": 105
745
+ },
746
+ {
747
+ "epoch": 4.24,
748
+ "grad_norm": 0.2430519461631775,
749
+ "learning_rate": 1.2523691546803873e-05,
750
+ "loss": 0.4272,
751
+ "step": 106
752
+ },
753
+ {
754
+ "epoch": 4.28,
755
+ "grad_norm": 0.2513393759727478,
756
+ "learning_rate": 1.1264792494342857e-05,
757
+ "loss": 0.4022,
758
+ "step": 107
759
+ },
760
+ {
761
+ "epoch": 4.32,
762
+ "grad_norm": 0.24626919627189636,
763
+ "learning_rate": 1.0068786982878087e-05,
764
+ "loss": 0.405,
765
+ "step": 108
766
+ },
767
+ {
768
+ "epoch": 4.36,
769
+ "grad_norm": 0.25008630752563477,
770
+ "learning_rate": 8.936522714508678e-06,
771
+ "loss": 0.4123,
772
+ "step": 109
773
+ },
774
+ {
775
+ "epoch": 4.4,
776
+ "grad_norm": 0.2524281442165375,
777
+ "learning_rate": 7.868802212958703e-06,
778
+ "loss": 0.428,
779
+ "step": 110
780
+ },
781
+ {
782
+ "epoch": 4.44,
783
+ "grad_norm": 0.24142776429653168,
784
+ "learning_rate": 6.866382254766157e-06,
785
+ "loss": 0.4291,
786
+ "step": 111
787
+ },
788
+ {
789
+ "epoch": 4.48,
790
+ "grad_norm": 0.24990609288215637,
791
+ "learning_rate": 5.929973332896677e-06,
792
+ "loss": 0.3961,
793
+ "step": 112
794
+ },
795
+ {
796
+ "epoch": 4.52,
797
+ "grad_norm": 0.23870083689689636,
798
+ "learning_rate": 5.060239153161872e-06,
799
+ "loss": 0.4001,
800
+ "step": 113
801
+ },
802
+ {
803
+ "epoch": 4.5600000000000005,
804
+ "grad_norm": 0.2507989704608917,
805
+ "learning_rate": 4.257796163799455e-06,
806
+ "loss": 0.386,
807
+ "step": 114
808
+ },
809
+ {
810
+ "epoch": 4.6,
811
+ "grad_norm": 0.244070902466774,
812
+ "learning_rate": 3.5232131185484076e-06,
813
+ "loss": 0.4357,
814
+ "step": 115
815
+ },
816
+ {
817
+ "epoch": 4.64,
818
+ "grad_norm": 0.24638234078884125,
819
+ "learning_rate": 2.857010673529015e-06,
820
+ "loss": 0.3986,
821
+ "step": 116
822
+ },
823
+ {
824
+ "epoch": 4.68,
825
+ "grad_norm": 0.24885112047195435,
826
+ "learning_rate": 2.259661018213333e-06,
827
+ "loss": 0.3969,
828
+ "step": 117
829
+ },
830
+ {
831
+ "epoch": 4.72,
832
+ "grad_norm": 0.24754343926906586,
833
+ "learning_rate": 1.7315875407479032e-06,
834
+ "loss": 0.3918,
835
+ "step": 118
836
+ },
837
+ {
838
+ "epoch": 4.76,
839
+ "grad_norm": 0.2457382082939148,
840
+ "learning_rate": 1.2731645278655445e-06,
841
+ "loss": 0.3882,
842
+ "step": 119
843
+ },
844
+ {
845
+ "epoch": 4.8,
846
+ "grad_norm": 0.2584328353404999,
847
+ "learning_rate": 8.847168995992916e-07,
848
+ "loss": 0.419,
849
+ "step": 120
850
+ },
851
+ {
852
+ "epoch": 4.84,
853
+ "grad_norm": 0.24872322380542755,
854
+ "learning_rate": 5.665199789862907e-07,
855
+ "loss": 0.4084,
856
+ "step": 121
857
+ },
858
+ {
859
+ "epoch": 4.88,
860
+ "grad_norm": 0.26830849051475525,
861
+ "learning_rate": 3.1879929692498757e-07,
862
+ "loss": 0.4304,
863
+ "step": 122
864
+ },
865
+ {
866
+ "epoch": 4.92,
867
+ "grad_norm": 0.24320735037326813,
868
+ "learning_rate": 1.4173043232380557e-07,
869
+ "loss": 0.4183,
870
+ "step": 123
871
+ },
872
+ {
873
+ "epoch": 4.96,
874
+ "grad_norm": 0.272348552942276,
875
+ "learning_rate": 3.5438887654737355e-08,
876
+ "loss": 0.4105,
877
+ "step": 124
878
+ },
879
+ {
880
+ "epoch": 5.0,
881
+ "grad_norm": 0.24668046832084656,
882
+ "learning_rate": 0.0,
883
+ "loss": 0.4111,
884
+ "step": 125
885
  }
886
  ],
887
  "logging_steps": 1,
888
+ "max_steps": 125,
889
  "num_input_tokens_seen": 0,
890
+ "num_train_epochs": 5,
891
  "save_steps": 500,
892
  "stateful_callbacks": {
893
  "TrainerControl": {
 
901
  "attributes": {}
902
  }
903
  },
904
+ "total_flos": 1.5177777819849523e+17,
905
  "train_batch_size": 1,
906
  "trial_name": null,
907
  "trial_params": null
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8cfa4fec4e50cec40df623f571d3006caf05648d4ae42452a1182e9044e6e5b0
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a5c0b48f8e3321653a88d10a9bb20e8b8f39b171caaf986ab7c06add68636f7
3
  size 5240