Jiazheng Li commited on
Commit
7d35748
·
1 Parent(s): 9417bd0
README.md ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ library_name: peft
4
+ tags:
5
+ - llama-factory
6
+ - lora
7
+ - generated_from_trainer
8
+ base_model: mistralai/Mixtral-8x7B-Instruct-v0.1
9
+ model-index:
10
+ - name: sft_trained_woaqa_mixtral
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # sft_trained_woaqa_mixtral
18
+
19
+ This model is a fine-tuned version of [mistralai/Mixtral-8x7B-Instruct-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) on the sft_wo_aqa_mistral dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.8062
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 5e-05
41
+ - train_batch_size: 8
42
+ - eval_batch_size: 8
43
+ - seed: 42
44
+ - gradient_accumulation_steps: 8
45
+ - total_train_batch_size: 64
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: cosine
48
+ - num_epochs: 4.0
49
+ - mixed_precision_training: Native AMP
50
+
51
+ ### Training results
52
+
53
+ | Training Loss | Epoch | Step | Validation Loss |
54
+ |:-------------:|:-----:|:----:|:---------------:|
55
+ | 0.8668 | 0.63 | 100 | 0.8571 |
56
+ | 0.7837 | 1.26 | 200 | 0.8230 |
57
+ | 0.7824 | 1.9 | 300 | 0.8058 |
58
+ | 0.7401 | 2.53 | 400 | 0.8059 |
59
+ | 0.7101 | 3.16 | 500 | 0.8072 |
60
+ | 0.7037 | 3.79 | 600 | 0.8062 |
61
+
62
+
63
+ ### Framework versions
64
+
65
+ - PEFT 0.10.0
66
+ - Transformers 4.38.2
67
+ - Pytorch 2.2.1+cu121
68
+ - Datasets 2.18.0
69
+ - Tokenizers 0.15.2
adapter_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "mistralai/Mixtral-8x7B-Instruct-v0.1",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "w3",
24
+ "w1",
25
+ "w2",
26
+ "v_proj",
27
+ "o_proj",
28
+ "q_proj",
29
+ "gate",
30
+ "k_proj"
31
+ ],
32
+ "task_type": "CAUSAL_LM",
33
+ "use_dora": false,
34
+ "use_rslora": false
35
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95394e419230e4e32bc7a3243d358ebc852aaf01d6f99d31e9fe116b41372fe9
3
+ size 484722304
all_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.99,
3
+ "eval_loss": 0.8061766028404236,
4
+ "eval_runtime": 867.2475,
5
+ "eval_samples_per_second": 3.195,
6
+ "eval_steps_per_second": 0.4,
7
+ "train_loss": 0.7920433780815028,
8
+ "train_runtime": 47185.3474,
9
+ "train_samples_per_second": 0.858,
10
+ "train_steps_per_second": 0.013
11
+ }
eval_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.99,
3
+ "eval_loss": 0.8061766028404236,
4
+ "eval_runtime": 867.2475,
5
+ "eval_samples_per_second": 3.195,
6
+ "eval_steps_per_second": 0.4
7
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "additional_special_tokens": [],
31
+ "bos_token": "<s>",
32
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{{ '<s>' + system_message }}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ ' [INST] ' + content + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ content + '</s>' }}{% endif %}{% endfor %}",
33
+ "clean_up_tokenization_spaces": false,
34
+ "eos_token": "</s>",
35
+ "legacy": true,
36
+ "model_max_length": 1000000000000000019884624838656,
37
+ "pad_token": "</s>",
38
+ "padding_side": "right",
39
+ "sp_model_kwargs": {},
40
+ "spaces_between_special_tokens": false,
41
+ "split_special_tokens": false,
42
+ "tokenizer_class": "LlamaTokenizer",
43
+ "unk_token": "<unk>",
44
+ "use_default_system_prompt": false
45
+ }
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.99,
3
+ "train_loss": 0.7920433780815028,
4
+ "train_runtime": 47185.3474,
5
+ "train_samples_per_second": 0.858,
6
+ "train_steps_per_second": 0.013
7
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 20, "total_steps": 632, "loss": 1.3016, "learning_rate": 4.9876553763060684e-05, "epoch": 0.13, "percentage": 3.16, "elapsed_time": "0:22:10", "remaining_time": "11:18:44"}
2
+ {"current_steps": 40, "total_steps": 632, "loss": 0.9953, "learning_rate": 4.950743417011591e-05, "epoch": 0.25, "percentage": 6.33, "elapsed_time": "0:44:06", "remaining_time": "10:52:50"}
3
+ {"current_steps": 60, "total_steps": 632, "loss": 0.925, "learning_rate": 4.889628653514402e-05, "epoch": 0.38, "percentage": 9.49, "elapsed_time": "1:09:28", "remaining_time": "11:02:22"}
4
+ {"current_steps": 80, "total_steps": 632, "loss": 0.8788, "learning_rate": 4.804914636820517e-05, "epoch": 0.51, "percentage": 12.66, "elapsed_time": "1:31:29", "remaining_time": "10:31:14"}
5
+ {"current_steps": 100, "total_steps": 632, "loss": 0.8668, "learning_rate": 4.6974379770560846e-05, "epoch": 0.63, "percentage": 15.82, "elapsed_time": "1:53:23", "remaining_time": "10:03:12"}
6
+ {"current_steps": 100, "total_steps": 632, "eval_loss": 0.8571113348007202, "epoch": 0.63, "percentage": 15.82, "elapsed_time": "2:07:51", "remaining_time": "11:20:11"}
7
+ {"current_steps": 120, "total_steps": 632, "loss": 0.8488, "learning_rate": 4.5682600813576435e-05, "epoch": 0.76, "percentage": 18.99, "elapsed_time": "2:29:50", "remaining_time": "10:39:21"}
8
+ {"current_steps": 140, "total_steps": 632, "loss": 0.8462, "learning_rate": 4.41865667173477e-05, "epoch": 0.88, "percentage": 22.15, "elapsed_time": "2:51:49", "remaining_time": "10:03:51"}
9
+ {"current_steps": 160, "total_steps": 632, "loss": 0.827, "learning_rate": 4.2501051864235636e-05, "epoch": 1.01, "percentage": 25.32, "elapsed_time": "3:14:53", "remaining_time": "9:34:54"}
10
+ {"current_steps": 180, "total_steps": 632, "loss": 0.7885, "learning_rate": 4.0642701891514e-05, "epoch": 1.14, "percentage": 28.48, "elapsed_time": "3:37:46", "remaining_time": "9:06:50"}
11
+ {"current_steps": 200, "total_steps": 632, "loss": 0.7837, "learning_rate": 3.862986930406669e-05, "epoch": 1.26, "percentage": 31.65, "elapsed_time": "3:59:48", "remaining_time": "8:37:58"}
12
+ {"current_steps": 200, "total_steps": 632, "eval_loss": 0.8230095505714417, "epoch": 1.26, "percentage": 31.65, "elapsed_time": "4:14:17", "remaining_time": "9:09:16"}
13
+ {"current_steps": 220, "total_steps": 632, "loss": 0.7938, "learning_rate": 3.6482432230574446e-05, "epoch": 1.39, "percentage": 34.81, "elapsed_time": "4:36:16", "remaining_time": "8:37:23"}
14
+ {"current_steps": 240, "total_steps": 632, "loss": 0.7819, "learning_rate": 3.4221598113100195e-05, "epoch": 1.52, "percentage": 37.97, "elapsed_time": "4:58:13", "remaining_time": "8:07:06"}
15
+ {"current_steps": 260, "total_steps": 632, "loss": 0.7877, "learning_rate": 3.186969426877563e-05, "epoch": 1.64, "percentage": 41.14, "elapsed_time": "5:20:11", "remaining_time": "7:38:07"}
16
+ {"current_steps": 280, "total_steps": 632, "loss": 0.7918, "learning_rate": 2.9449947391938766e-05, "epoch": 1.77, "percentage": 44.3, "elapsed_time": "5:42:00", "remaining_time": "7:09:57"}
17
+ {"current_steps": 300, "total_steps": 632, "loss": 0.7824, "learning_rate": 2.6986254174292862e-05, "epoch": 1.9, "percentage": 47.47, "elapsed_time": "6:04:00", "remaining_time": "6:42:49"}
18
+ {"current_steps": 300, "total_steps": 632, "eval_loss": 0.8058096766471863, "epoch": 1.9, "percentage": 47.47, "elapsed_time": "6:18:27", "remaining_time": "6:58:50"}
19
+ {"current_steps": 320, "total_steps": 632, "loss": 0.7741, "learning_rate": 2.4502945308373246e-05, "epoch": 2.02, "percentage": 50.63, "elapsed_time": "6:40:24", "remaining_time": "6:30:24"}
20
+ {"current_steps": 340, "total_steps": 632, "loss": 0.7369, "learning_rate": 2.2024545204952383e-05, "epoch": 2.15, "percentage": 53.8, "elapsed_time": "7:02:25", "remaining_time": "6:02:47"}
21
+ {"current_steps": 360, "total_steps": 632, "loss": 0.7365, "learning_rate": 1.957552979734205e-05, "epoch": 2.27, "percentage": 56.96, "elapsed_time": "7:24:19", "remaining_time": "5:35:42"}
22
+ {"current_steps": 380, "total_steps": 632, "loss": 0.7463, "learning_rate": 1.7180084824444325e-05, "epoch": 2.4, "percentage": 60.13, "elapsed_time": "7:46:14", "remaining_time": "5:09:11"}
23
+ {"current_steps": 400, "total_steps": 632, "loss": 0.7401, "learning_rate": 1.4861866979675154e-05, "epoch": 2.53, "percentage": 63.29, "elapsed_time": "8:08:15", "remaining_time": "4:43:11"}
24
+ {"current_steps": 400, "total_steps": 632, "eval_loss": 0.8059037923812866, "epoch": 2.53, "percentage": 63.29, "elapsed_time": "8:22:49", "remaining_time": "4:51:38"}
25
+ {"current_steps": 420, "total_steps": 632, "loss": 0.7332, "learning_rate": 1.2643770284581929e-05, "epoch": 2.65, "percentage": 66.46, "elapsed_time": "8:44:49", "remaining_time": "4:24:54"}
26
+ {"current_steps": 440, "total_steps": 632, "loss": 0.7364, "learning_rate": 1.0547699994378787e-05, "epoch": 2.78, "percentage": 69.62, "elapsed_time": "9:06:48", "remaining_time": "3:58:36"}
27
+ {"current_steps": 460, "total_steps": 632, "loss": 0.7318, "learning_rate": 8.594356268240616e-06, "epoch": 2.91, "percentage": 72.78, "elapsed_time": "9:28:46", "remaining_time": "3:32:40"}
28
+ {"current_steps": 480, "total_steps": 632, "loss": 0.7222, "learning_rate": 6.803029740762648e-06, "epoch": 3.03, "percentage": 75.95, "elapsed_time": "9:50:39", "remaining_time": "3:07:02"}
29
+ {"current_steps": 500, "total_steps": 632, "loss": 0.7101, "learning_rate": 5.191411013460645e-06, "epoch": 3.16, "percentage": 79.11, "elapsed_time": "10:12:38", "remaining_time": "2:41:44"}
30
+ {"current_steps": 500, "total_steps": 632, "eval_loss": 0.807178795337677, "epoch": 3.16, "percentage": 79.11, "elapsed_time": "10:27:05", "remaining_time": "2:45:33"}
31
+ {"current_steps": 520, "total_steps": 632, "loss": 0.7135, "learning_rate": 3.775415947715899e-06, "epoch": 3.29, "percentage": 82.28, "elapsed_time": "10:49:03", "remaining_time": "2:19:47"}
32
+ {"current_steps": 540, "total_steps": 632, "loss": 0.7045, "learning_rate": 2.5690284845196923e-06, "epoch": 3.41, "percentage": 85.44, "elapsed_time": "11:10:58", "remaining_time": "1:54:18"}
33
+ {"current_steps": 560, "total_steps": 632, "loss": 0.7057, "learning_rate": 1.5841625432818057e-06, "epoch": 3.54, "percentage": 88.61, "elapsed_time": "11:32:56", "remaining_time": "1:29:05"}
34
+ {"current_steps": 580, "total_steps": 632, "loss": 0.7044, "learning_rate": 8.305443635490711e-07, "epoch": 3.67, "percentage": 91.77, "elapsed_time": "11:54:45", "remaining_time": "1:04:04"}
35
+ {"current_steps": 600, "total_steps": 632, "loss": 0.7037, "learning_rate": 3.1561645159166597e-07, "epoch": 3.79, "percentage": 94.94, "elapsed_time": "12:16:39", "remaining_time": "0:39:17"}
36
+ {"current_steps": 600, "total_steps": 632, "eval_loss": 0.8061766028404236, "epoch": 3.79, "percentage": 94.94, "elapsed_time": "12:31:07", "remaining_time": "0:40:03"}
37
+ {"current_steps": 620, "total_steps": 632, "loss": 0.7027, "learning_rate": 4.4464080451675494e-08, "epoch": 3.92, "percentage": 98.1, "elapsed_time": "12:53:10", "remaining_time": "0:14:57"}
38
+ {"current_steps": 632, "total_steps": 632, "epoch": 3.99, "percentage": 100.0, "elapsed_time": "13:06:25", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.993680884676145,
5
+ "eval_steps": 100,
6
+ "global_step": 632,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.13,
13
+ "grad_norm": 0.37133172154426575,
14
+ "learning_rate": 4.9876553763060684e-05,
15
+ "loss": 1.3016,
16
+ "step": 20
17
+ },
18
+ {
19
+ "epoch": 0.25,
20
+ "grad_norm": 0.3215538263320923,
21
+ "learning_rate": 4.950743417011591e-05,
22
+ "loss": 0.9953,
23
+ "step": 40
24
+ },
25
+ {
26
+ "epoch": 0.38,
27
+ "grad_norm": 0.33872345089912415,
28
+ "learning_rate": 4.889628653514402e-05,
29
+ "loss": 0.925,
30
+ "step": 60
31
+ },
32
+ {
33
+ "epoch": 0.51,
34
+ "grad_norm": 0.3175918459892273,
35
+ "learning_rate": 4.804914636820517e-05,
36
+ "loss": 0.8788,
37
+ "step": 80
38
+ },
39
+ {
40
+ "epoch": 0.63,
41
+ "grad_norm": 0.3202904164791107,
42
+ "learning_rate": 4.6974379770560846e-05,
43
+ "loss": 0.8668,
44
+ "step": 100
45
+ },
46
+ {
47
+ "epoch": 0.63,
48
+ "eval_loss": 0.8571113348007202,
49
+ "eval_runtime": 868.2347,
50
+ "eval_samples_per_second": 3.192,
51
+ "eval_steps_per_second": 0.4,
52
+ "step": 100
53
+ },
54
+ {
55
+ "epoch": 0.76,
56
+ "grad_norm": 0.33093100786209106,
57
+ "learning_rate": 4.5682600813576435e-05,
58
+ "loss": 0.8488,
59
+ "step": 120
60
+ },
61
+ {
62
+ "epoch": 0.88,
63
+ "grad_norm": 0.3297623097896576,
64
+ "learning_rate": 4.41865667173477e-05,
65
+ "loss": 0.8462,
66
+ "step": 140
67
+ },
68
+ {
69
+ "epoch": 1.01,
70
+ "grad_norm": 0.3524036705493927,
71
+ "learning_rate": 4.2501051864235636e-05,
72
+ "loss": 0.827,
73
+ "step": 160
74
+ },
75
+ {
76
+ "epoch": 1.14,
77
+ "grad_norm": 0.3592537045478821,
78
+ "learning_rate": 4.0642701891514e-05,
79
+ "loss": 0.7885,
80
+ "step": 180
81
+ },
82
+ {
83
+ "epoch": 1.26,
84
+ "grad_norm": 0.3888987600803375,
85
+ "learning_rate": 3.862986930406669e-05,
86
+ "loss": 0.7837,
87
+ "step": 200
88
+ },
89
+ {
90
+ "epoch": 1.26,
91
+ "eval_loss": 0.8230095505714417,
92
+ "eval_runtime": 869.2538,
93
+ "eval_samples_per_second": 3.188,
94
+ "eval_steps_per_second": 0.399,
95
+ "step": 200
96
+ },
97
+ {
98
+ "epoch": 1.39,
99
+ "grad_norm": 0.380818247795105,
100
+ "learning_rate": 3.6482432230574446e-05,
101
+ "loss": 0.7938,
102
+ "step": 220
103
+ },
104
+ {
105
+ "epoch": 1.52,
106
+ "grad_norm": 0.3564074635505676,
107
+ "learning_rate": 3.4221598113100195e-05,
108
+ "loss": 0.7819,
109
+ "step": 240
110
+ },
111
+ {
112
+ "epoch": 1.64,
113
+ "grad_norm": 0.3780010938644409,
114
+ "learning_rate": 3.186969426877563e-05,
115
+ "loss": 0.7877,
116
+ "step": 260
117
+ },
118
+ {
119
+ "epoch": 1.77,
120
+ "grad_norm": 0.36975908279418945,
121
+ "learning_rate": 2.9449947391938766e-05,
122
+ "loss": 0.7918,
123
+ "step": 280
124
+ },
125
+ {
126
+ "epoch": 1.9,
127
+ "grad_norm": 0.39148128032684326,
128
+ "learning_rate": 2.6986254174292862e-05,
129
+ "loss": 0.7824,
130
+ "step": 300
131
+ },
132
+ {
133
+ "epoch": 1.9,
134
+ "eval_loss": 0.8058096766471863,
135
+ "eval_runtime": 867.7611,
136
+ "eval_samples_per_second": 3.193,
137
+ "eval_steps_per_second": 0.4,
138
+ "step": 300
139
+ },
140
+ {
141
+ "epoch": 2.02,
142
+ "grad_norm": 0.36495909094810486,
143
+ "learning_rate": 2.4502945308373246e-05,
144
+ "loss": 0.7741,
145
+ "step": 320
146
+ },
147
+ {
148
+ "epoch": 2.15,
149
+ "grad_norm": 0.40916556119918823,
150
+ "learning_rate": 2.2024545204952383e-05,
151
+ "loss": 0.7369,
152
+ "step": 340
153
+ },
154
+ {
155
+ "epoch": 2.27,
156
+ "grad_norm": 0.42436033487319946,
157
+ "learning_rate": 1.957552979734205e-05,
158
+ "loss": 0.7365,
159
+ "step": 360
160
+ },
161
+ {
162
+ "epoch": 2.4,
163
+ "grad_norm": 0.43983975052833557,
164
+ "learning_rate": 1.7180084824444325e-05,
165
+ "loss": 0.7463,
166
+ "step": 380
167
+ },
168
+ {
169
+ "epoch": 2.53,
170
+ "grad_norm": 0.44617146253585815,
171
+ "learning_rate": 1.4861866979675154e-05,
172
+ "loss": 0.7401,
173
+ "step": 400
174
+ },
175
+ {
176
+ "epoch": 2.53,
177
+ "eval_loss": 0.8059037923812866,
178
+ "eval_runtime": 873.5556,
179
+ "eval_samples_per_second": 3.172,
180
+ "eval_steps_per_second": 0.397,
181
+ "step": 400
182
+ },
183
+ {
184
+ "epoch": 2.65,
185
+ "grad_norm": 0.4369719624519348,
186
+ "learning_rate": 1.2643770284581929e-05,
187
+ "loss": 0.7332,
188
+ "step": 420
189
+ },
190
+ {
191
+ "epoch": 2.78,
192
+ "grad_norm": 0.4235495328903198,
193
+ "learning_rate": 1.0547699994378787e-05,
194
+ "loss": 0.7364,
195
+ "step": 440
196
+ },
197
+ {
198
+ "epoch": 2.91,
199
+ "grad_norm": 0.4584214389324188,
200
+ "learning_rate": 8.594356268240616e-06,
201
+ "loss": 0.7318,
202
+ "step": 460
203
+ },
204
+ {
205
+ "epoch": 3.03,
206
+ "grad_norm": 0.4329874813556671,
207
+ "learning_rate": 6.803029740762648e-06,
208
+ "loss": 0.7222,
209
+ "step": 480
210
+ },
211
+ {
212
+ "epoch": 3.16,
213
+ "grad_norm": 0.5470691919326782,
214
+ "learning_rate": 5.191411013460645e-06,
215
+ "loss": 0.7101,
216
+ "step": 500
217
+ },
218
+ {
219
+ "epoch": 3.16,
220
+ "eval_loss": 0.807178795337677,
221
+ "eval_runtime": 867.2257,
222
+ "eval_samples_per_second": 3.195,
223
+ "eval_steps_per_second": 0.4,
224
+ "step": 500
225
+ },
226
+ {
227
+ "epoch": 3.29,
228
+ "grad_norm": 0.4557144343852997,
229
+ "learning_rate": 3.775415947715899e-06,
230
+ "loss": 0.7135,
231
+ "step": 520
232
+ },
233
+ {
234
+ "epoch": 3.41,
235
+ "grad_norm": 0.46527382731437683,
236
+ "learning_rate": 2.5690284845196923e-06,
237
+ "loss": 0.7045,
238
+ "step": 540
239
+ },
240
+ {
241
+ "epoch": 3.54,
242
+ "grad_norm": 0.4702458679676056,
243
+ "learning_rate": 1.5841625432818057e-06,
244
+ "loss": 0.7057,
245
+ "step": 560
246
+ },
247
+ {
248
+ "epoch": 3.67,
249
+ "grad_norm": 0.4915095865726471,
250
+ "learning_rate": 8.305443635490711e-07,
251
+ "loss": 0.7044,
252
+ "step": 580
253
+ },
254
+ {
255
+ "epoch": 3.79,
256
+ "grad_norm": 0.5043957233428955,
257
+ "learning_rate": 3.1561645159166597e-07,
258
+ "loss": 0.7037,
259
+ "step": 600
260
+ },
261
+ {
262
+ "epoch": 3.79,
263
+ "eval_loss": 0.8061766028404236,
264
+ "eval_runtime": 867.9324,
265
+ "eval_samples_per_second": 3.193,
266
+ "eval_steps_per_second": 0.4,
267
+ "step": 600
268
+ },
269
+ {
270
+ "epoch": 3.92,
271
+ "grad_norm": 0.4672119915485382,
272
+ "learning_rate": 4.4464080451675494e-08,
273
+ "loss": 0.7027,
274
+ "step": 620
275
+ },
276
+ {
277
+ "epoch": 3.99,
278
+ "step": 632,
279
+ "total_flos": 1.137224723506643e+19,
280
+ "train_loss": 0.7920433780815028,
281
+ "train_runtime": 47185.3474,
282
+ "train_samples_per_second": 0.858,
283
+ "train_steps_per_second": 0.013
284
+ }
285
+ ],
286
+ "logging_steps": 20,
287
+ "max_steps": 632,
288
+ "num_input_tokens_seen": 0,
289
+ "num_train_epochs": 4,
290
+ "save_steps": 100,
291
+ "total_flos": 1.137224723506643e+19,
292
+ "train_batch_size": 8,
293
+ "trial_name": null,
294
+ "trial_params": null
295
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b573d48fc5f9488e07fd9eb672eddaf1b96a03a8caa287045c162f31893e3fd
3
+ size 5112
training_eval_loss.png ADDED
training_loss.png ADDED