BraylonDash commited on
Commit
36e9f95
1 Parent(s): e86fc0c

Model save

Browse files
README.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - sft
7
+ - generated_from_trainer
8
+ base_model: mistralai/Mistral-7B-Instruct-v0.1
9
+ datasets:
10
+ - generator
11
+ model-index:
12
+ - name: Mistral-7B-gsm8k
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # Mistral-7B-gsm8k
20
+
21
+ This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) on the generator dataset.
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 2e-05
41
+ - train_batch_size: 2
42
+ - eval_batch_size: 8
43
+ - seed: 42
44
+ - distributed_type: multi-GPU
45
+ - num_devices: 3
46
+ - total_train_batch_size: 6
47
+ - total_eval_batch_size: 24
48
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
49
+ - lr_scheduler_type: cosine
50
+ - lr_scheduler_warmup_ratio: 0.1
51
+ - num_epochs: 1
52
+
53
+ ### Training results
54
+
55
+
56
+
57
+ ### Framework versions
58
+
59
+ - PEFT 0.7.1
60
+ - Transformers 4.36.2
61
+ - Pytorch 2.1.2
62
+ - Datasets 2.14.6
63
+ - Tokenizers 0.15.2
adapter_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "mistralai/Mistral-7B-Instruct-v0.1",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 512,
13
+ "lora_dropout": 0.05,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 512,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "k_proj",
23
+ "v_proj",
24
+ "down_proj",
25
+ "up_proj",
26
+ "o_proj",
27
+ "gate_proj",
28
+ "q_proj"
29
+ ],
30
+ "task_type": "CAUSAL_LM"
31
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:062fe9d0c8f4c0d2bf2b831ac11a4edae6d44e9f7e01fb3f8d91f1e4e40969dc
3
+ size 2684416656
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.8479397050605333,
4
+ "train_runtime": 621.2868,
5
+ "train_samples": 7473,
6
+ "train_samples_per_second": 1.167,
7
+ "train_steps_per_second": 0.195
8
+ }
runs/Oct08_06-20-25_gpu4-119-5/events.out.tfevents.1728328885.gpu4-119-5.1531074.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a645f310e3cc81ee81ee49a81af788341325050c97cfbd5754eda34b95d93cf8
3
+ size 4445
runs/Oct08_06-30-18_gpu4-119-5/events.out.tfevents.1728329491.gpu4-119-5.1539054.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0509d4ce98a988e9424fc9f9b3c25e99c5477e4cf19afbb703482826c0a40ed6
3
+ size 8643
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "additional_special_tokens": [],
32
+ "bos_token": "<s>",
33
+ "chat_template": "{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] %}\n {%- set loop_messages = messages[1:] %}\n{%- else %}\n {%- set loop_messages = messages %}\n{%- endif %}\n\n{{- bos_token }}\n{%- for message in loop_messages %}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}\n {{- raise_exception('After the optional system message, conversation roles must alternate user/assistant/user/assistant/...') }}\n {%- endif %}\n {%- if message['role'] == 'user' %}\n {%- if loop.first and system_message is defined %}\n {{- ' [INST] ' + system_message + '\\n\\n' + message['content'] + ' [/INST]' }}\n {%- else %}\n {{- ' [INST] ' + message['content'] + ' [/INST]' }}\n {%- endif %}\n {%- elif message['role'] == 'assistant' %}\n {{- ' ' + message['content'] + eos_token}}\n {%- else %}\n {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\n {%- endif %}\n{%- endfor %}\n",
34
+ "clean_up_tokenization_spaces": false,
35
+ "eos_token": "</s>",
36
+ "legacy": false,
37
+ "model_max_length": 1000000000000000019884624838656,
38
+ "pad_token": "</s>",
39
+ "sp_model_kwargs": {},
40
+ "spaces_between_special_tokens": false,
41
+ "tokenizer_class": "LlamaTokenizer",
42
+ "unk_token": "<unk>",
43
+ "use_default_system_prompt": false
44
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.8479397050605333,
4
+ "train_runtime": 621.2868,
5
+ "train_samples": 7473,
6
+ "train_samples_per_second": 1.167,
7
+ "train_steps_per_second": 0.195
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 121,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.01,
13
+ "learning_rate": 1.5384615384615387e-06,
14
+ "loss": 1.1746,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 0.04,
19
+ "learning_rate": 7.692307692307694e-06,
20
+ "loss": 1.1302,
21
+ "step": 5
22
+ },
23
+ {
24
+ "epoch": 0.08,
25
+ "learning_rate": 1.5384615384615387e-05,
26
+ "loss": 0.9861,
27
+ "step": 10
28
+ },
29
+ {
30
+ "epoch": 0.12,
31
+ "learning_rate": 1.9983081582712684e-05,
32
+ "loss": 0.9154,
33
+ "step": 15
34
+ },
35
+ {
36
+ "epoch": 0.17,
37
+ "learning_rate": 1.9793406217655516e-05,
38
+ "loss": 0.8791,
39
+ "step": 20
40
+ },
41
+ {
42
+ "epoch": 0.21,
43
+ "learning_rate": 1.9396926207859085e-05,
44
+ "loss": 0.8825,
45
+ "step": 25
46
+ },
47
+ {
48
+ "epoch": 0.25,
49
+ "learning_rate": 1.880201391180111e-05,
50
+ "loss": 0.8663,
51
+ "step": 30
52
+ },
53
+ {
54
+ "epoch": 0.29,
55
+ "learning_rate": 1.802123192755044e-05,
56
+ "loss": 0.8349,
57
+ "step": 35
58
+ },
59
+ {
60
+ "epoch": 0.33,
61
+ "learning_rate": 1.7071067811865477e-05,
62
+ "loss": 0.8493,
63
+ "step": 40
64
+ },
65
+ {
66
+ "epoch": 0.37,
67
+ "learning_rate": 1.5971585917027864e-05,
68
+ "loss": 0.8408,
69
+ "step": 45
70
+ },
71
+ {
72
+ "epoch": 0.41,
73
+ "learning_rate": 1.4746003697476406e-05,
74
+ "loss": 0.8537,
75
+ "step": 50
76
+ },
77
+ {
78
+ "epoch": 0.45,
79
+ "learning_rate": 1.342020143325669e-05,
80
+ "loss": 0.8231,
81
+ "step": 55
82
+ },
83
+ {
84
+ "epoch": 0.5,
85
+ "learning_rate": 1.2022175723320382e-05,
86
+ "loss": 0.8292,
87
+ "step": 60
88
+ },
89
+ {
90
+ "epoch": 0.54,
91
+ "learning_rate": 1.0581448289104759e-05,
92
+ "loss": 0.81,
93
+ "step": 65
94
+ },
95
+ {
96
+ "epoch": 0.58,
97
+ "learning_rate": 9.128442572523418e-06,
98
+ "loss": 0.8043,
99
+ "step": 70
100
+ },
101
+ {
102
+ "epoch": 0.62,
103
+ "learning_rate": 7.6938412925756e-06,
104
+ "loss": 0.8119,
105
+ "step": 75
106
+ },
107
+ {
108
+ "epoch": 0.66,
109
+ "learning_rate": 6.3079385268731575e-06,
110
+ "loss": 0.8052,
111
+ "step": 80
112
+ },
113
+ {
114
+ "epoch": 0.7,
115
+ "learning_rate": 5.000000000000003e-06,
116
+ "loss": 0.7873,
117
+ "step": 85
118
+ },
119
+ {
120
+ "epoch": 0.74,
121
+ "learning_rate": 3.797645087317401e-06,
122
+ "loss": 0.8023,
123
+ "step": 90
124
+ },
125
+ {
126
+ "epoch": 0.79,
127
+ "learning_rate": 2.726263584269513e-06,
128
+ "loss": 0.8073,
129
+ "step": 95
130
+ },
131
+ {
132
+ "epoch": 0.83,
133
+ "learning_rate": 1.808479557110081e-06,
134
+ "loss": 0.8061,
135
+ "step": 100
136
+ },
137
+ {
138
+ "epoch": 0.87,
139
+ "learning_rate": 1.0636735967658785e-06,
140
+ "loss": 0.7997,
141
+ "step": 105
142
+ },
143
+ {
144
+ "epoch": 0.91,
145
+ "learning_rate": 5.075735642696611e-07,
146
+ "loss": 0.7987,
147
+ "step": 110
148
+ },
149
+ {
150
+ "epoch": 0.95,
151
+ "learning_rate": 1.519224698779198e-07,
152
+ "loss": 0.7965,
153
+ "step": 115
154
+ },
155
+ {
156
+ "epoch": 0.99,
157
+ "learning_rate": 4.230499177994007e-09,
158
+ "loss": 0.8183,
159
+ "step": 120
160
+ },
161
+ {
162
+ "epoch": 1.0,
163
+ "step": 121,
164
+ "total_flos": 2375150469120.0,
165
+ "train_loss": 0.8479397050605333,
166
+ "train_runtime": 621.2868,
167
+ "train_samples_per_second": 1.167,
168
+ "train_steps_per_second": 0.195
169
+ }
170
+ ],
171
+ "logging_steps": 5,
172
+ "max_steps": 121,
173
+ "num_input_tokens_seen": 0,
174
+ "num_train_epochs": 1,
175
+ "save_steps": 100,
176
+ "total_flos": 2375150469120.0,
177
+ "train_batch_size": 2,
178
+ "trial_name": null,
179
+ "trial_params": null
180
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82f13e84b55cb0285e7494979cebe30b66616a43d816bbf5ca13854ed4954140
3
+ size 5688