erbacher commited on
Commit
2e5b753
1 Parent(s): 889ad60

Model save

Browse files
README.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: HuggingFaceH4/zephyr-7b-beta
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: zephyr-7b-ikat
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # zephyr-7b-ikat
15
+
16
+ This model is a fine-tuned version of [HuggingFaceH4/zephyr-7b-beta](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta) on an unknown dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 0.5166
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 2e-05
38
+ - train_batch_size: 8
39
+ - eval_batch_size: 4
40
+ - seed: 42
41
+ - distributed_type: multi-GPU
42
+ - num_devices: 2
43
+ - gradient_accumulation_steps: 16
44
+ - total_train_batch_size: 256
45
+ - total_eval_batch_size: 8
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: cosine
48
+ - num_epochs: 10
49
+
50
+ ### Training results
51
+
52
+ | Training Loss | Epoch | Step | Validation Loss |
53
+ |:-------------:|:-----:|:----:|:---------------:|
54
+ | 0.7437 | 0.62 | 17 | 0.6867 |
55
+ | 0.6372 | 1.63 | 35 | 0.6215 |
56
+ | 0.6078 | 2.64 | 53 | 0.5859 |
57
+ | 0.5724 | 3.62 | 70 | 0.5625 |
58
+ | 0.5613 | 4.63 | 88 | 0.5448 |
59
+ | 0.5427 | 5.64 | 106 | 0.5337 |
60
+ | 0.5388 | 6.62 | 123 | 0.5274 |
61
+ | 0.5284 | 7.63 | 141 | 0.5229 |
62
+ | 0.5285 | 8.64 | 159 | 0.5188 |
63
+ | 0.5222 | 9.61 | 176 | 0.5165 |
64
+
65
+
66
+ ### Framework versions
67
+
68
+ - Transformers 4.35.0
69
+ - Pytorch 2.1.1+cu118
70
+ - Datasets 2.14.6
71
+ - Tokenizers 0.14.1
adapter_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "HuggingFaceH4/zephyr-7b-beta",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "lora_alpha": 16,
12
+ "lora_dropout": 0.1,
13
+ "modules_to_save": null,
14
+ "peft_type": "LORA",
15
+ "r": 64,
16
+ "rank_pattern": {},
17
+ "revision": null,
18
+ "target_modules": [
19
+ "k_proj",
20
+ "v_proj",
21
+ "o_proj",
22
+ "q_proj"
23
+ ],
24
+ "task_type": "CAUSAL_LM"
25
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f166319ea108369065349f0caf68c4d2e72c1d26dfe9e6b5c242a09e25ddb31d
3
+ size 109086672
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.61,
3
+ "eval_loss": 0.5165773034095764,
4
+ "eval_runtime": 30.0245,
5
+ "eval_samples": 200,
6
+ "eval_samples_per_second": 6.661,
7
+ "eval_steps_per_second": 0.833,
8
+ "train_loss": 0.5895595401525497,
9
+ "train_runtime": 27383.9371,
10
+ "train_samples": 7016,
11
+ "train_samples_per_second": 2.562,
12
+ "train_steps_per_second": 0.01
13
+ }
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "HuggingFaceH4/zephyr-7b-beta",
3
+ "architectures": [
4
+ "MistralForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 4096,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 14336,
12
+ "max_position_embeddings": 32768,
13
+ "model_type": "mistral",
14
+ "num_attention_heads": 32,
15
+ "num_hidden_layers": 32,
16
+ "num_key_value_heads": 8,
17
+ "pad_token_id": 2,
18
+ "rms_norm_eps": 1e-05,
19
+ "rope_theta": 10000.0,
20
+ "sliding_window": 4096,
21
+ "tie_word_embeddings": false,
22
+ "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.35.0",
24
+ "use_cache": true,
25
+ "vocab_size": 32000
26
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.61,
3
+ "eval_loss": 0.5165773034095764,
4
+ "eval_runtime": 30.0245,
5
+ "eval_samples": 200,
6
+ "eval_samples_per_second": 6.661,
7
+ "eval_steps_per_second": 0.833
8
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<unk>",
4
+ "<s>",
5
+ "</s>"
6
+ ],
7
+ "bos_token": {
8
+ "content": "<s>",
9
+ "lstrip": false,
10
+ "normalized": false,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "eos_token": {
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "pad_token": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ },
28
+ "unk_token": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "additional_special_tokens": [
29
+ "<unk>",
30
+ "<s>",
31
+ "</s>"
32
+ ],
33
+ "bos_token": "<s>",
34
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "</s>",
37
+ "legacy": true,
38
+ "model_max_length": 2048,
39
+ "pad_token": "</s>",
40
+ "sp_model_kwargs": {},
41
+ "spaces_between_special_tokens": false,
42
+ "tokenizer_class": "LlamaTokenizer",
43
+ "truncation_side": "left",
44
+ "unk_token": "<unk>",
45
+ "use_default_system_prompt": true
46
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.61,
3
+ "train_loss": 0.5895595401525497,
4
+ "train_runtime": 27383.9371,
5
+ "train_samples": 7016,
6
+ "train_samples_per_second": 2.562,
7
+ "train_steps_per_second": 0.01
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 9.612756264236902,
5
+ "eval_steps": 500,
6
+ "global_step": 176,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.04,
13
+ "learning_rate": 1.9999323080037623e-05,
14
+ "loss": 0.8679,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 0.18,
19
+ "learning_rate": 1.9983081582712684e-05,
20
+ "loss": 0.8526,
21
+ "step": 5
22
+ },
23
+ {
24
+ "epoch": 0.36,
25
+ "learning_rate": 1.9932383577419432e-05,
26
+ "loss": 0.8014,
27
+ "step": 10
28
+ },
29
+ {
30
+ "epoch": 0.55,
31
+ "learning_rate": 1.9848077530122083e-05,
32
+ "loss": 0.7437,
33
+ "step": 15
34
+ },
35
+ {
36
+ "epoch": 0.62,
37
+ "eval_loss": 0.6867304444313049,
38
+ "eval_runtime": 30.7118,
39
+ "eval_samples_per_second": 6.512,
40
+ "eval_steps_per_second": 0.814,
41
+ "step": 17
42
+ },
43
+ {
44
+ "epoch": 1.08,
45
+ "learning_rate": 1.973044870579824e-05,
46
+ "loss": 0.6989,
47
+ "step": 20
48
+ },
49
+ {
50
+ "epoch": 1.27,
51
+ "learning_rate": 1.957989512315489e-05,
52
+ "loss": 0.6673,
53
+ "step": 25
54
+ },
55
+ {
56
+ "epoch": 1.45,
57
+ "learning_rate": 1.9396926207859085e-05,
58
+ "loss": 0.6539,
59
+ "step": 30
60
+ },
61
+ {
62
+ "epoch": 1.63,
63
+ "learning_rate": 1.9182161068802742e-05,
64
+ "loss": 0.6372,
65
+ "step": 35
66
+ },
67
+ {
68
+ "epoch": 1.63,
69
+ "eval_loss": 0.621515691280365,
70
+ "eval_runtime": 30.0651,
71
+ "eval_samples_per_second": 6.652,
72
+ "eval_steps_per_second": 0.832,
73
+ "step": 35
74
+ },
75
+ {
76
+ "epoch": 2.17,
77
+ "learning_rate": 1.8936326403234125e-05,
78
+ "loss": 0.6243,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 2.35,
83
+ "learning_rate": 1.866025403784439e-05,
84
+ "loss": 0.6186,
85
+ "step": 45
86
+ },
87
+ {
88
+ "epoch": 2.53,
89
+ "learning_rate": 1.8354878114129368e-05,
90
+ "loss": 0.6078,
91
+ "step": 50
92
+ },
93
+ {
94
+ "epoch": 2.64,
95
+ "eval_loss": 0.5859256386756897,
96
+ "eval_runtime": 30.0404,
97
+ "eval_samples_per_second": 6.658,
98
+ "eval_steps_per_second": 0.832,
99
+ "step": 53
100
+ },
101
+ {
102
+ "epoch": 3.07,
103
+ "learning_rate": 1.802123192755044e-05,
104
+ "loss": 0.5942,
105
+ "step": 55
106
+ },
107
+ {
108
+ "epoch": 3.25,
109
+ "learning_rate": 1.766044443118978e-05,
110
+ "loss": 0.5896,
111
+ "step": 60
112
+ },
113
+ {
114
+ "epoch": 3.44,
115
+ "learning_rate": 1.7273736415730488e-05,
116
+ "loss": 0.5848,
117
+ "step": 65
118
+ },
119
+ {
120
+ "epoch": 3.62,
121
+ "learning_rate": 1.686241637868734e-05,
122
+ "loss": 0.5724,
123
+ "step": 70
124
+ },
125
+ {
126
+ "epoch": 3.62,
127
+ "eval_loss": 0.562507688999176,
128
+ "eval_runtime": 30.0633,
129
+ "eval_samples_per_second": 6.653,
130
+ "eval_steps_per_second": 0.832,
131
+ "step": 70
132
+ },
133
+ {
134
+ "epoch": 4.15,
135
+ "learning_rate": 1.6427876096865394e-05,
136
+ "loss": 0.5703,
137
+ "step": 75
138
+ },
139
+ {
140
+ "epoch": 4.34,
141
+ "learning_rate": 1.5971585917027864e-05,
142
+ "loss": 0.5612,
143
+ "step": 80
144
+ },
145
+ {
146
+ "epoch": 4.52,
147
+ "learning_rate": 1.5495089780708062e-05,
148
+ "loss": 0.5613,
149
+ "step": 85
150
+ },
151
+ {
152
+ "epoch": 4.63,
153
+ "eval_loss": 0.544775128364563,
154
+ "eval_runtime": 30.0298,
155
+ "eval_samples_per_second": 6.66,
156
+ "eval_steps_per_second": 0.833,
157
+ "step": 88
158
+ },
159
+ {
160
+ "epoch": 5.06,
161
+ "learning_rate": 1.5000000000000002e-05,
162
+ "loss": 0.5509,
163
+ "step": 90
164
+ },
165
+ {
166
+ "epoch": 5.24,
167
+ "learning_rate": 1.4487991802004625e-05,
168
+ "loss": 0.5486,
169
+ "step": 95
170
+ },
171
+ {
172
+ "epoch": 5.42,
173
+ "learning_rate": 1.396079766039157e-05,
174
+ "loss": 0.5485,
175
+ "step": 100
176
+ },
177
+ {
178
+ "epoch": 5.6,
179
+ "learning_rate": 1.342020143325669e-05,
180
+ "loss": 0.5427,
181
+ "step": 105
182
+ },
183
+ {
184
+ "epoch": 5.64,
185
+ "eval_loss": 0.5336735844612122,
186
+ "eval_runtime": 30.037,
187
+ "eval_samples_per_second": 6.658,
188
+ "eval_steps_per_second": 0.832,
189
+ "step": 106
190
+ },
191
+ {
192
+ "epoch": 6.14,
193
+ "learning_rate": 1.2868032327110904e-05,
194
+ "loss": 0.5399,
195
+ "step": 110
196
+ },
197
+ {
198
+ "epoch": 6.32,
199
+ "learning_rate": 1.2306158707424402e-05,
200
+ "loss": 0.5376,
201
+ "step": 115
202
+ },
203
+ {
204
+ "epoch": 6.51,
205
+ "learning_rate": 1.1736481776669307e-05,
206
+ "loss": 0.5388,
207
+ "step": 120
208
+ },
209
+ {
210
+ "epoch": 6.62,
211
+ "eval_loss": 0.5273823142051697,
212
+ "eval_runtime": 30.0618,
213
+ "eval_samples_per_second": 6.653,
214
+ "eval_steps_per_second": 0.832,
215
+ "step": 123
216
+ },
217
+ {
218
+ "epoch": 7.04,
219
+ "learning_rate": 1.1160929141252303e-05,
220
+ "loss": 0.5321,
221
+ "step": 125
222
+ },
223
+ {
224
+ "epoch": 7.23,
225
+ "learning_rate": 1.0581448289104759e-05,
226
+ "loss": 0.5323,
227
+ "step": 130
228
+ },
229
+ {
230
+ "epoch": 7.41,
231
+ "learning_rate": 1e-05,
232
+ "loss": 0.5326,
233
+ "step": 135
234
+ },
235
+ {
236
+ "epoch": 7.59,
237
+ "learning_rate": 9.418551710895243e-06,
238
+ "loss": 0.5284,
239
+ "step": 140
240
+ },
241
+ {
242
+ "epoch": 7.63,
243
+ "eval_loss": 0.5229138731956482,
244
+ "eval_runtime": 30.0491,
245
+ "eval_samples_per_second": 6.656,
246
+ "eval_steps_per_second": 0.832,
247
+ "step": 141
248
+ },
249
+ {
250
+ "epoch": 8.13,
251
+ "learning_rate": 8.839070858747697e-06,
252
+ "loss": 0.529,
253
+ "step": 145
254
+ },
255
+ {
256
+ "epoch": 8.31,
257
+ "learning_rate": 8.263518223330698e-06,
258
+ "loss": 0.5277,
259
+ "step": 150
260
+ },
261
+ {
262
+ "epoch": 8.49,
263
+ "learning_rate": 7.6938412925756e-06,
264
+ "loss": 0.5285,
265
+ "step": 155
266
+ },
267
+ {
268
+ "epoch": 8.64,
269
+ "eval_loss": 0.5188391208648682,
270
+ "eval_runtime": 30.0316,
271
+ "eval_samples_per_second": 6.66,
272
+ "eval_steps_per_second": 0.832,
273
+ "step": 159
274
+ },
275
+ {
276
+ "epoch": 9.03,
277
+ "learning_rate": 7.131967672889101e-06,
278
+ "loss": 0.5219,
279
+ "step": 160
280
+ },
281
+ {
282
+ "epoch": 9.21,
283
+ "learning_rate": 6.579798566743314e-06,
284
+ "loss": 0.5249,
285
+ "step": 165
286
+ },
287
+ {
288
+ "epoch": 9.39,
289
+ "learning_rate": 6.039202339608432e-06,
290
+ "loss": 0.5249,
291
+ "step": 170
292
+ },
293
+ {
294
+ "epoch": 9.58,
295
+ "learning_rate": 5.512008197995379e-06,
296
+ "loss": 0.5222,
297
+ "step": 175
298
+ },
299
+ {
300
+ "epoch": 9.61,
301
+ "eval_loss": 0.5165340304374695,
302
+ "eval_runtime": 30.0644,
303
+ "eval_samples_per_second": 6.652,
304
+ "eval_steps_per_second": 0.832,
305
+ "step": 176
306
+ },
307
+ {
308
+ "epoch": 9.61,
309
+ "step": 176,
310
+ "total_flos": 1.59704785354752e+16,
311
+ "train_loss": 0.5895595401525497,
312
+ "train_runtime": 27383.9371,
313
+ "train_samples_per_second": 2.562,
314
+ "train_steps_per_second": 0.01
315
+ }
316
+ ],
317
+ "logging_steps": 5,
318
+ "max_steps": 270,
319
+ "num_train_epochs": 10,
320
+ "save_steps": 500,
321
+ "total_flos": 1.59704785354752e+16,
322
+ "trial_name": null,
323
+ "trial_params": null
324
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7afc8cf8129522d9383886b34de8fc50050e9d86f2a554c68ff58e4b0be77d7
3
+ size 5624