prateeky2806 commited on
Commit
633f651
1 Parent(s): 914da1c

Training in progress, step 600

Browse files
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:53a22919ce13eb751f650cf407d214e4b1d6f25c3f314bc3c382ea1eedfebc47
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:540d903ba74ff248bf733c34856663c644c35d1d59d8f5f41398897f7827dfa4
3
  size 319977229
checkpoint-400/adapter_model/adapter_model/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: bfloat16
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
checkpoint-400/adapter_model/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "NousResearch/Nous-Hermes-llama-2-7b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16.0,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "gate_proj",
18
+ "o_proj",
19
+ "k_proj",
20
+ "up_proj",
21
+ "v_proj",
22
+ "q_proj",
23
+ "down_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-400/adapter_model/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53a22919ce13eb751f650cf407d214e4b1d6f25c3f314bc3c382ea1eedfebc47
3
+ size 319977229
checkpoint-600/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: bfloat16
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
checkpoint-600/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "NousResearch/Nous-Hermes-llama-2-7b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16.0,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "gate_proj",
18
+ "o_proj",
19
+ "k_proj",
20
+ "up_proj",
21
+ "v_proj",
22
+ "q_proj",
23
+ "down_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-600/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:540d903ba74ff248bf733c34856663c644c35d1d59d8f5f41398897f7827dfa4
3
+ size 319977229
checkpoint-600/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<pad>": 32000
3
+ }
checkpoint-600/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f42454e6d42c67f5dcb551c7ec34f272937f2b884768d40dfab803915fc24816
3
+ size 1279539973
checkpoint-600/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e64dcf6725824583d36020f65f38bd5b063e5c0090025e8dff846816224fe4f8
3
+ size 14511
checkpoint-600/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c1a2ac1c11599601eeac95feb1dbfd49ec5c625e61dcce18b3f094491f9cf2d
3
+ size 627
checkpoint-600/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "<unk>",
5
+ "unk_token": "<unk>"
6
+ }
checkpoint-600/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
checkpoint-600/tokenizer_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": false,
22
+ "model_max_length": 1000000000000000019884624838656,
23
+ "pad_token": null,
24
+ "padding_side": "right",
25
+ "sp_model_kwargs": {},
26
+ "tokenizer_class": "LlamaTokenizer",
27
+ "unk_token": {
28
+ "__type": "AddedToken",
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
checkpoint-600/trainer_state.json ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.42340007424354553,
3
+ "best_model_checkpoint": "./output_v2/7b_cluster00_Nous-Hermes-llama-2-7b_partitioned_v3_standardized_00/checkpoint-600",
4
+ "epoch": 0.6362672322375398,
5
+ "global_step": 600,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.01,
12
+ "learning_rate": 0.0002,
13
+ "loss": 0.5637,
14
+ "step": 10
15
+ },
16
+ {
17
+ "epoch": 0.02,
18
+ "learning_rate": 0.0002,
19
+ "loss": 0.4775,
20
+ "step": 20
21
+ },
22
+ {
23
+ "epoch": 0.03,
24
+ "learning_rate": 0.0002,
25
+ "loss": 0.5079,
26
+ "step": 30
27
+ },
28
+ {
29
+ "epoch": 0.04,
30
+ "learning_rate": 0.0002,
31
+ "loss": 0.4849,
32
+ "step": 40
33
+ },
34
+ {
35
+ "epoch": 0.05,
36
+ "learning_rate": 0.0002,
37
+ "loss": 0.4587,
38
+ "step": 50
39
+ },
40
+ {
41
+ "epoch": 0.06,
42
+ "learning_rate": 0.0002,
43
+ "loss": 0.4867,
44
+ "step": 60
45
+ },
46
+ {
47
+ "epoch": 0.07,
48
+ "learning_rate": 0.0002,
49
+ "loss": 0.4578,
50
+ "step": 70
51
+ },
52
+ {
53
+ "epoch": 0.08,
54
+ "learning_rate": 0.0002,
55
+ "loss": 0.4665,
56
+ "step": 80
57
+ },
58
+ {
59
+ "epoch": 0.1,
60
+ "learning_rate": 0.0002,
61
+ "loss": 0.5044,
62
+ "step": 90
63
+ },
64
+ {
65
+ "epoch": 0.11,
66
+ "learning_rate": 0.0002,
67
+ "loss": 0.4979,
68
+ "step": 100
69
+ },
70
+ {
71
+ "epoch": 0.12,
72
+ "learning_rate": 0.0002,
73
+ "loss": 0.4715,
74
+ "step": 110
75
+ },
76
+ {
77
+ "epoch": 0.13,
78
+ "learning_rate": 0.0002,
79
+ "loss": 0.4717,
80
+ "step": 120
81
+ },
82
+ {
83
+ "epoch": 0.14,
84
+ "learning_rate": 0.0002,
85
+ "loss": 0.4419,
86
+ "step": 130
87
+ },
88
+ {
89
+ "epoch": 0.15,
90
+ "learning_rate": 0.0002,
91
+ "loss": 0.4671,
92
+ "step": 140
93
+ },
94
+ {
95
+ "epoch": 0.16,
96
+ "learning_rate": 0.0002,
97
+ "loss": 0.4503,
98
+ "step": 150
99
+ },
100
+ {
101
+ "epoch": 0.17,
102
+ "learning_rate": 0.0002,
103
+ "loss": 0.4878,
104
+ "step": 160
105
+ },
106
+ {
107
+ "epoch": 0.18,
108
+ "learning_rate": 0.0002,
109
+ "loss": 0.4648,
110
+ "step": 170
111
+ },
112
+ {
113
+ "epoch": 0.19,
114
+ "learning_rate": 0.0002,
115
+ "loss": 0.4625,
116
+ "step": 180
117
+ },
118
+ {
119
+ "epoch": 0.2,
120
+ "learning_rate": 0.0002,
121
+ "loss": 0.4473,
122
+ "step": 190
123
+ },
124
+ {
125
+ "epoch": 0.21,
126
+ "learning_rate": 0.0002,
127
+ "loss": 0.4585,
128
+ "step": 200
129
+ },
130
+ {
131
+ "epoch": 0.21,
132
+ "eval_loss": 0.4498298466205597,
133
+ "eval_runtime": 253.5551,
134
+ "eval_samples_per_second": 3.944,
135
+ "eval_steps_per_second": 0.986,
136
+ "step": 200
137
+ },
138
+ {
139
+ "epoch": 0.22,
140
+ "learning_rate": 0.0002,
141
+ "loss": 0.4396,
142
+ "step": 210
143
+ },
144
+ {
145
+ "epoch": 0.23,
146
+ "learning_rate": 0.0002,
147
+ "loss": 0.4545,
148
+ "step": 220
149
+ },
150
+ {
151
+ "epoch": 0.24,
152
+ "learning_rate": 0.0002,
153
+ "loss": 0.4632,
154
+ "step": 230
155
+ },
156
+ {
157
+ "epoch": 0.25,
158
+ "learning_rate": 0.0002,
159
+ "loss": 0.4458,
160
+ "step": 240
161
+ },
162
+ {
163
+ "epoch": 0.27,
164
+ "learning_rate": 0.0002,
165
+ "loss": 0.4612,
166
+ "step": 250
167
+ },
168
+ {
169
+ "epoch": 0.28,
170
+ "learning_rate": 0.0002,
171
+ "loss": 0.4493,
172
+ "step": 260
173
+ },
174
+ {
175
+ "epoch": 0.29,
176
+ "learning_rate": 0.0002,
177
+ "loss": 0.4652,
178
+ "step": 270
179
+ },
180
+ {
181
+ "epoch": 0.3,
182
+ "learning_rate": 0.0002,
183
+ "loss": 0.4596,
184
+ "step": 280
185
+ },
186
+ {
187
+ "epoch": 0.31,
188
+ "learning_rate": 0.0002,
189
+ "loss": 0.4483,
190
+ "step": 290
191
+ },
192
+ {
193
+ "epoch": 0.32,
194
+ "learning_rate": 0.0002,
195
+ "loss": 0.4386,
196
+ "step": 300
197
+ },
198
+ {
199
+ "epoch": 0.33,
200
+ "learning_rate": 0.0002,
201
+ "loss": 0.4746,
202
+ "step": 310
203
+ },
204
+ {
205
+ "epoch": 0.34,
206
+ "learning_rate": 0.0002,
207
+ "loss": 0.4299,
208
+ "step": 320
209
+ },
210
+ {
211
+ "epoch": 0.35,
212
+ "learning_rate": 0.0002,
213
+ "loss": 0.4462,
214
+ "step": 330
215
+ },
216
+ {
217
+ "epoch": 0.36,
218
+ "learning_rate": 0.0002,
219
+ "loss": 0.4346,
220
+ "step": 340
221
+ },
222
+ {
223
+ "epoch": 0.37,
224
+ "learning_rate": 0.0002,
225
+ "loss": 0.4425,
226
+ "step": 350
227
+ },
228
+ {
229
+ "epoch": 0.38,
230
+ "learning_rate": 0.0002,
231
+ "loss": 0.4356,
232
+ "step": 360
233
+ },
234
+ {
235
+ "epoch": 0.39,
236
+ "learning_rate": 0.0002,
237
+ "loss": 0.4647,
238
+ "step": 370
239
+ },
240
+ {
241
+ "epoch": 0.4,
242
+ "learning_rate": 0.0002,
243
+ "loss": 0.4448,
244
+ "step": 380
245
+ },
246
+ {
247
+ "epoch": 0.41,
248
+ "learning_rate": 0.0002,
249
+ "loss": 0.4208,
250
+ "step": 390
251
+ },
252
+ {
253
+ "epoch": 0.42,
254
+ "learning_rate": 0.0002,
255
+ "loss": 0.4095,
256
+ "step": 400
257
+ },
258
+ {
259
+ "epoch": 0.42,
260
+ "eval_loss": 0.43381235003471375,
261
+ "eval_runtime": 253.5754,
262
+ "eval_samples_per_second": 3.944,
263
+ "eval_steps_per_second": 0.986,
264
+ "step": 400
265
+ },
266
+ {
267
+ "epoch": 0.43,
268
+ "learning_rate": 0.0002,
269
+ "loss": 0.4181,
270
+ "step": 410
271
+ },
272
+ {
273
+ "epoch": 0.45,
274
+ "learning_rate": 0.0002,
275
+ "loss": 0.445,
276
+ "step": 420
277
+ },
278
+ {
279
+ "epoch": 0.46,
280
+ "learning_rate": 0.0002,
281
+ "loss": 0.417,
282
+ "step": 430
283
+ },
284
+ {
285
+ "epoch": 0.47,
286
+ "learning_rate": 0.0002,
287
+ "loss": 0.438,
288
+ "step": 440
289
+ },
290
+ {
291
+ "epoch": 0.48,
292
+ "learning_rate": 0.0002,
293
+ "loss": 0.4376,
294
+ "step": 450
295
+ },
296
+ {
297
+ "epoch": 0.49,
298
+ "learning_rate": 0.0002,
299
+ "loss": 0.4249,
300
+ "step": 460
301
+ },
302
+ {
303
+ "epoch": 0.5,
304
+ "learning_rate": 0.0002,
305
+ "loss": 0.4407,
306
+ "step": 470
307
+ },
308
+ {
309
+ "epoch": 0.51,
310
+ "learning_rate": 0.0002,
311
+ "loss": 0.4506,
312
+ "step": 480
313
+ },
314
+ {
315
+ "epoch": 0.52,
316
+ "learning_rate": 0.0002,
317
+ "loss": 0.4289,
318
+ "step": 490
319
+ },
320
+ {
321
+ "epoch": 0.53,
322
+ "learning_rate": 0.0002,
323
+ "loss": 0.4596,
324
+ "step": 500
325
+ },
326
+ {
327
+ "epoch": 0.54,
328
+ "learning_rate": 0.0002,
329
+ "loss": 0.4174,
330
+ "step": 510
331
+ },
332
+ {
333
+ "epoch": 0.55,
334
+ "learning_rate": 0.0002,
335
+ "loss": 0.4373,
336
+ "step": 520
337
+ },
338
+ {
339
+ "epoch": 0.56,
340
+ "learning_rate": 0.0002,
341
+ "loss": 0.4253,
342
+ "step": 530
343
+ },
344
+ {
345
+ "epoch": 0.57,
346
+ "learning_rate": 0.0002,
347
+ "loss": 0.4243,
348
+ "step": 540
349
+ },
350
+ {
351
+ "epoch": 0.58,
352
+ "learning_rate": 0.0002,
353
+ "loss": 0.4455,
354
+ "step": 550
355
+ },
356
+ {
357
+ "epoch": 0.59,
358
+ "learning_rate": 0.0002,
359
+ "loss": 0.4362,
360
+ "step": 560
361
+ },
362
+ {
363
+ "epoch": 0.6,
364
+ "learning_rate": 0.0002,
365
+ "loss": 0.4364,
366
+ "step": 570
367
+ },
368
+ {
369
+ "epoch": 0.62,
370
+ "learning_rate": 0.0002,
371
+ "loss": 0.4257,
372
+ "step": 580
373
+ },
374
+ {
375
+ "epoch": 0.63,
376
+ "learning_rate": 0.0002,
377
+ "loss": 0.4193,
378
+ "step": 590
379
+ },
380
+ {
381
+ "epoch": 0.64,
382
+ "learning_rate": 0.0002,
383
+ "loss": 0.4233,
384
+ "step": 600
385
+ },
386
+ {
387
+ "epoch": 0.64,
388
+ "eval_loss": 0.42340007424354553,
389
+ "eval_runtime": 253.6896,
390
+ "eval_samples_per_second": 3.942,
391
+ "eval_steps_per_second": 0.985,
392
+ "step": 600
393
+ }
394
+ ],
395
+ "max_steps": 5000,
396
+ "num_train_epochs": 6,
397
+ "total_flos": 1.64911009544405e+17,
398
+ "trial_name": null,
399
+ "trial_params": null
400
+ }
checkpoint-600/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d43bb46b7e18307cd325d889344b3c5a6b0f12a2594434f5acb40117cbab761
3
+ size 6011