Kitajiang commited on
Commit
671c258
1 Parent(s): 5e156b2

Upload 16 files

Browse files
README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2-7B-Instruct
3
+ library_name: peft
4
+ license: other
5
+ tags:
6
+ - llama-factory
7
+ - lora
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: sft3
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # sft3
18
+
19
+ This model is a fine-tuned version of [Qwen/Qwen2-7B-Instruct](https://huggingface.co/Qwen/Qwen2-7B-Instruct) on the identity and the alpaca_en_demo datasets.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 1.2586
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 0.0001
41
+ - train_batch_size: 1
42
+ - eval_batch_size: 1
43
+ - seed: 42
44
+ - gradient_accumulation_steps: 8
45
+ - total_train_batch_size: 8
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: cosine
48
+ - lr_scheduler_warmup_ratio: 0.1
49
+ - num_epochs: 3.0
50
+ - mixed_precision_training: Native AMP
51
+
52
+ ### Training results
53
+
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - PEFT 0.11.1
59
+ - Transformers 4.41.2
60
+ - Pytorch 2.2.2+cu121
61
+ - Datasets 2.18.0
62
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2-7B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "o_proj",
24
+ "v_proj",
25
+ "q_proj",
26
+ "up_proj",
27
+ "down_proj",
28
+ "gate_proj",
29
+ "k_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50341d47ea497ad790c540bbc124dc9380c213741deef962611f526af3d571a3
3
+ size 80792096
added_tokens.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|eot_id|>": 151646,
4
+ "<|im_end|>": 151645,
5
+ "<|im_start|>": 151644
6
+ }
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.984709480122324,
3
+ "eval_loss": 1.258605718612671,
4
+ "eval_runtime": 8.3287,
5
+ "eval_samples_per_second": 13.207,
6
+ "eval_steps_per_second": 13.207,
7
+ "total_flos": 2.6312102502948864e+16,
8
+ "train_loss": 1.1005799705213537,
9
+ "train_runtime": 684.9766,
10
+ "train_samples_per_second": 4.296,
11
+ "train_steps_per_second": 0.534
12
+ }
eval_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.984709480122324,
3
+ "eval_loss": 1.258605718612671,
4
+ "eval_runtime": 8.3287,
5
+ "eval_samples_per_second": 13.207,
6
+ "eval_steps_per_second": 13.207
7
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|eot_id|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "151646": {
29
+ "content": "<|eot_id|>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "additional_special_tokens": [
38
+ "<|im_start|>",
39
+ "<|im_end|>"
40
+ ],
41
+ "bos_token": null,
42
+ "chat_template": "{% set system_message = 'You are a helpful assistant.' %}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|start_header_id|>system<|end_header_id|>\n\n' + system_message + '<|eot_id|>' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|start_header_id|>user<|end_header_id|>\n\n' + content + '<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|eot_id|>' }}{% endif %}{% endfor %}",
43
+ "clean_up_tokenization_spaces": false,
44
+ "eos_token": "<|eot_id|>",
45
+ "errors": "replace",
46
+ "model_max_length": 131072,
47
+ "pad_token": "<|endoftext|>",
48
+ "padding_side": "right",
49
+ "split_special_tokens": false,
50
+ "tokenizer_class": "Qwen2Tokenizer",
51
+ "unk_token": null
52
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.984709480122324,
3
+ "total_flos": 2.6312102502948864e+16,
4
+ "train_loss": 1.1005799705213537,
5
+ "train_runtime": 684.9766,
6
+ "train_samples_per_second": 4.296,
7
+ "train_steps_per_second": 0.534
8
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 10, "total_steps": 366, "loss": 1.8972, "learning_rate": 2.702702702702703e-05, "epoch": 0.08154943934760449, "percentage": 2.73, "elapsed_time": "0:00:17", "remaining_time": "0:10:30", "throughput": "0.00", "total_tokens": 0}
2
+ {"current_steps": 20, "total_steps": 366, "loss": 1.7883, "learning_rate": 5.405405405405406e-05, "epoch": 0.16309887869520898, "percentage": 5.46, "elapsed_time": "0:00:34", "remaining_time": "0:09:56", "throughput": "0.00", "total_tokens": 0}
3
+ {"current_steps": 30, "total_steps": 366, "loss": 1.3865, "learning_rate": 8.108108108108109e-05, "epoch": 0.24464831804281345, "percentage": 8.2, "elapsed_time": "0:00:51", "remaining_time": "0:09:33", "throughput": "0.00", "total_tokens": 0}
4
+ {"current_steps": 40, "total_steps": 366, "loss": 1.3647, "learning_rate": 9.999088210158001e-05, "epoch": 0.32619775739041795, "percentage": 10.93, "elapsed_time": "0:01:08", "remaining_time": "0:09:16", "throughput": "0.00", "total_tokens": 0}
5
+ {"current_steps": 50, "total_steps": 366, "loss": 1.2114, "learning_rate": 9.967210469256656e-05, "epoch": 0.4077471967380224, "percentage": 13.66, "elapsed_time": "0:01:25", "remaining_time": "0:08:59", "throughput": "0.00", "total_tokens": 0}
6
+ {"current_steps": 60, "total_steps": 366, "loss": 1.2255, "learning_rate": 9.890075235781779e-05, "epoch": 0.4892966360856269, "percentage": 16.39, "elapsed_time": "0:01:42", "remaining_time": "0:08:42", "throughput": "0.00", "total_tokens": 0}
7
+ {"current_steps": 70, "total_steps": 366, "loss": 1.3242, "learning_rate": 9.768385308070138e-05, "epoch": 0.5708460754332314, "percentage": 19.13, "elapsed_time": "0:02:00", "remaining_time": "0:08:29", "throughput": "0.00", "total_tokens": 0}
8
+ {"current_steps": 80, "total_steps": 366, "loss": 1.106, "learning_rate": 9.603249433382144e-05, "epoch": 0.6523955147808359, "percentage": 21.86, "elapsed_time": "0:02:18", "remaining_time": "0:08:16", "throughput": "0.00", "total_tokens": 0}
9
+ {"current_steps": 90, "total_steps": 366, "loss": 1.1686, "learning_rate": 9.396172205829234e-05, "epoch": 0.7339449541284404, "percentage": 24.59, "elapsed_time": "0:02:36", "remaining_time": "0:08:00", "throughput": "0.00", "total_tokens": 0}
10
+ {"current_steps": 100, "total_steps": 366, "loss": 1.148, "learning_rate": 9.149040357641929e-05, "epoch": 0.8154943934760448, "percentage": 27.32, "elapsed_time": "0:02:54", "remaining_time": "0:07:44", "throughput": "0.00", "total_tokens": 0}
11
+ {"current_steps": 110, "total_steps": 366, "loss": 1.3969, "learning_rate": 8.864105568682244e-05, "epoch": 0.8970438328236493, "percentage": 30.05, "elapsed_time": "0:03:11", "remaining_time": "0:07:26", "throughput": "0.00", "total_tokens": 0}
12
+ {"current_steps": 120, "total_steps": 366, "loss": 1.1595, "learning_rate": 8.543963950827279e-05, "epoch": 0.9785932721712538, "percentage": 32.79, "elapsed_time": "0:03:29", "remaining_time": "0:07:09", "throughput": "0.00", "total_tokens": 0}
13
+ {"current_steps": 130, "total_steps": 366, "loss": 1.091, "learning_rate": 8.191532394146865e-05, "epoch": 1.0601427115188584, "percentage": 35.52, "elapsed_time": "0:03:47", "remaining_time": "0:06:52", "throughput": "0.00", "total_tokens": 0}
14
+ {"current_steps": 140, "total_steps": 366, "loss": 1.0718, "learning_rate": 7.810021990391164e-05, "epoch": 1.1416921508664628, "percentage": 38.25, "elapsed_time": "0:04:05", "remaining_time": "0:06:36", "throughput": "0.00", "total_tokens": 0}
15
+ {"current_steps": 150, "total_steps": 366, "loss": 1.0833, "learning_rate": 7.402908775933419e-05, "epoch": 1.2232415902140672, "percentage": 40.98, "elapsed_time": "0:04:23", "remaining_time": "0:06:19", "throughput": "0.00", "total_tokens": 0}
16
+ {"current_steps": 160, "total_steps": 366, "loss": 1.073, "learning_rate": 6.973902060736226e-05, "epoch": 1.3047910295616718, "percentage": 43.72, "elapsed_time": "0:04:41", "remaining_time": "0:06:02", "throughput": "0.00", "total_tokens": 0}
17
+ {"current_steps": 170, "total_steps": 366, "loss": 1.0823, "learning_rate": 6.526910631903973e-05, "epoch": 1.3863404689092762, "percentage": 46.45, "elapsed_time": "0:04:59", "remaining_time": "0:05:45", "throughput": "0.00", "total_tokens": 0}
18
+ {"current_steps": 180, "total_steps": 366, "loss": 0.9893, "learning_rate": 6.0660071397493514e-05, "epoch": 1.4678899082568808, "percentage": 49.18, "elapsed_time": "0:05:16", "remaining_time": "0:05:27", "throughput": "0.00", "total_tokens": 0}
19
+ {"current_steps": 190, "total_steps": 366, "loss": 0.9235, "learning_rate": 5.5953909908613114e-05, "epoch": 1.5494393476044852, "percentage": 51.91, "elapsed_time": "0:05:34", "remaining_time": "0:05:09", "throughput": "0.00", "total_tokens": 0}
20
+ {"current_steps": 200, "total_steps": 366, "loss": 1.012, "learning_rate": 5.119350086265004e-05, "epoch": 1.6309887869520896, "percentage": 54.64, "elapsed_time": "0:05:51", "remaining_time": "0:04:51", "throughput": "0.00", "total_tokens": 0}
21
+ {"current_steps": 210, "total_steps": 366, "loss": 1.0746, "learning_rate": 4.64222175328687e-05, "epoch": 1.7125382262996942, "percentage": 57.38, "elapsed_time": "0:06:09", "remaining_time": "0:04:34", "throughput": "0.00", "total_tokens": 0}
22
+ {"current_steps": 220, "total_steps": 366, "loss": 1.0924, "learning_rate": 4.1683532270843504e-05, "epoch": 1.7940876656472988, "percentage": 60.11, "elapsed_time": "0:06:27", "remaining_time": "0:04:16", "throughput": "0.00", "total_tokens": 0}
23
+ {"current_steps": 230, "total_steps": 366, "loss": 1.0545, "learning_rate": 3.7020620419029094e-05, "epoch": 1.8756371049949032, "percentage": 62.84, "elapsed_time": "0:06:44", "remaining_time": "0:03:59", "throughput": "0.00", "total_tokens": 0}
24
+ {"current_steps": 240, "total_steps": 366, "loss": 1.0335, "learning_rate": 3.2475966929454504e-05, "epoch": 1.9571865443425076, "percentage": 65.57, "elapsed_time": "0:07:04", "remaining_time": "0:03:42", "throughput": "0.00", "total_tokens": 0}
25
+ {"current_steps": 250, "total_steps": 366, "loss": 0.9929, "learning_rate": 2.8090979272736662e-05, "epoch": 2.038735983690112, "percentage": 68.31, "elapsed_time": "0:07:24", "remaining_time": "0:03:26", "throughput": "0.00", "total_tokens": 0}
26
+ {"current_steps": 260, "total_steps": 366, "loss": 0.9195, "learning_rate": 2.3905610164295394e-05, "epoch": 2.120285423037717, "percentage": 71.04, "elapsed_time": "0:07:45", "remaining_time": "0:03:09", "throughput": "0.00", "total_tokens": 0}
27
+ {"current_steps": 270, "total_steps": 366, "loss": 0.8906, "learning_rate": 1.995799354520598e-05, "epoch": 2.2018348623853212, "percentage": 73.77, "elapsed_time": "0:08:07", "remaining_time": "0:02:53", "throughput": "0.00", "total_tokens": 0}
28
+ {"current_steps": 280, "total_steps": 366, "loss": 0.8852, "learning_rate": 1.6284097134357536e-05, "epoch": 2.2833843017329256, "percentage": 76.5, "elapsed_time": "0:08:26", "remaining_time": "0:02:35", "throughput": "0.00", "total_tokens": 0}
29
+ {"current_steps": 290, "total_steps": 366, "loss": 0.8899, "learning_rate": 1.2917394717602121e-05, "epoch": 2.36493374108053, "percentage": 79.23, "elapsed_time": "0:08:48", "remaining_time": "0:02:18", "throughput": "0.00", "total_tokens": 0}
30
+ {"current_steps": 300, "total_steps": 366, "loss": 0.9932, "learning_rate": 9.888561159748993e-06, "epoch": 2.4464831804281344, "percentage": 81.97, "elapsed_time": "0:09:08", "remaining_time": "0:02:00", "throughput": "0.00", "total_tokens": 0}
31
+ {"current_steps": 310, "total_steps": 366, "loss": 0.8445, "learning_rate": 7.225192918226214e-06, "epoch": 2.528032619775739, "percentage": 84.7, "elapsed_time": "0:09:30", "remaining_time": "0:01:43", "throughput": "0.00", "total_tokens": 0}
32
+ {"current_steps": 320, "total_steps": 366, "loss": 0.8961, "learning_rate": 4.951556604879048e-06, "epoch": 2.6095820591233436, "percentage": 87.43, "elapsed_time": "0:09:50", "remaining_time": "0:01:24", "throughput": "0.00", "total_tokens": 0}
33
+ {"current_steps": 330, "total_steps": 366, "loss": 0.9571, "learning_rate": 3.0883678868214806e-06, "epoch": 2.691131498470948, "percentage": 90.16, "elapsed_time": "0:10:11", "remaining_time": "0:01:06", "throughput": "0.00", "total_tokens": 0}
34
+ {"current_steps": 340, "total_steps": 366, "loss": 0.9048, "learning_rate": 1.6526027408301226e-06, "epoch": 2.7726809378185524, "percentage": 92.9, "elapsed_time": "0:10:32", "remaining_time": "0:00:48", "throughput": "0.00", "total_tokens": 0}
35
+ {"current_steps": 350, "total_steps": 366, "loss": 0.8868, "learning_rate": 6.573427809888067e-07, "epoch": 2.8542303771661572, "percentage": 95.63, "elapsed_time": "0:10:52", "remaining_time": "0:00:29", "throughput": "0.00", "total_tokens": 0}
36
+ {"current_steps": 360, "total_steps": 366, "loss": 0.9579, "learning_rate": 1.1165606884234181e-07, "epoch": 2.9357798165137616, "percentage": 98.36, "elapsed_time": "0:11:12", "remaining_time": "0:00:11", "throughput": "0.00", "total_tokens": 0}
37
+ {"current_steps": 366, "total_steps": 366, "epoch": 2.984709480122324, "percentage": 100.0, "elapsed_time": "0:11:24", "remaining_time": "0:00:00", "throughput": "0.00", "total_tokens": 0}
trainer_state.json ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.984709480122324,
5
+ "eval_steps": 500,
6
+ "global_step": 366,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.08154943934760449,
13
+ "grad_norm": 1.5832443237304688,
14
+ "learning_rate": 2.702702702702703e-05,
15
+ "loss": 1.8972,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.16309887869520898,
20
+ "grad_norm": 1.6229541301727295,
21
+ "learning_rate": 5.405405405405406e-05,
22
+ "loss": 1.7883,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.24464831804281345,
27
+ "grad_norm": 1.407547116279602,
28
+ "learning_rate": 8.108108108108109e-05,
29
+ "loss": 1.3865,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.32619775739041795,
34
+ "grad_norm": 2.573899745941162,
35
+ "learning_rate": 9.999088210158001e-05,
36
+ "loss": 1.3647,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.4077471967380224,
41
+ "grad_norm": 2.391404390335083,
42
+ "learning_rate": 9.967210469256656e-05,
43
+ "loss": 1.2114,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.4892966360856269,
48
+ "grad_norm": 1.054701805114746,
49
+ "learning_rate": 9.890075235781779e-05,
50
+ "loss": 1.2255,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.5708460754332314,
55
+ "grad_norm": 1.9387873411178589,
56
+ "learning_rate": 9.768385308070138e-05,
57
+ "loss": 1.3242,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.6523955147808359,
62
+ "grad_norm": 0.961560070514679,
63
+ "learning_rate": 9.603249433382144e-05,
64
+ "loss": 1.106,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.7339449541284404,
69
+ "grad_norm": 0.783678412437439,
70
+ "learning_rate": 9.396172205829234e-05,
71
+ "loss": 1.1686,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 0.8154943934760448,
76
+ "grad_norm": 1.3414908647537231,
77
+ "learning_rate": 9.149040357641929e-05,
78
+ "loss": 1.148,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 0.8970438328236493,
83
+ "grad_norm": 1.628290057182312,
84
+ "learning_rate": 8.864105568682244e-05,
85
+ "loss": 1.3969,
86
+ "step": 110
87
+ },
88
+ {
89
+ "epoch": 0.9785932721712538,
90
+ "grad_norm": 1.0776128768920898,
91
+ "learning_rate": 8.543963950827279e-05,
92
+ "loss": 1.1595,
93
+ "step": 120
94
+ },
95
+ {
96
+ "epoch": 1.0601427115188584,
97
+ "grad_norm": 0.6094745993614197,
98
+ "learning_rate": 8.191532394146865e-05,
99
+ "loss": 1.091,
100
+ "step": 130
101
+ },
102
+ {
103
+ "epoch": 1.1416921508664628,
104
+ "grad_norm": 1.0875447988510132,
105
+ "learning_rate": 7.810021990391164e-05,
106
+ "loss": 1.0718,
107
+ "step": 140
108
+ },
109
+ {
110
+ "epoch": 1.2232415902140672,
111
+ "grad_norm": 0.9140685200691223,
112
+ "learning_rate": 7.402908775933419e-05,
113
+ "loss": 1.0833,
114
+ "step": 150
115
+ },
116
+ {
117
+ "epoch": 1.3047910295616718,
118
+ "grad_norm": 0.7726348638534546,
119
+ "learning_rate": 6.973902060736226e-05,
120
+ "loss": 1.073,
121
+ "step": 160
122
+ },
123
+ {
124
+ "epoch": 1.3863404689092762,
125
+ "grad_norm": 1.4581207036972046,
126
+ "learning_rate": 6.526910631903973e-05,
127
+ "loss": 1.0823,
128
+ "step": 170
129
+ },
130
+ {
131
+ "epoch": 1.4678899082568808,
132
+ "grad_norm": 0.8327229022979736,
133
+ "learning_rate": 6.0660071397493514e-05,
134
+ "loss": 0.9893,
135
+ "step": 180
136
+ },
137
+ {
138
+ "epoch": 1.5494393476044852,
139
+ "grad_norm": 0.6057823300361633,
140
+ "learning_rate": 5.5953909908613114e-05,
141
+ "loss": 0.9235,
142
+ "step": 190
143
+ },
144
+ {
145
+ "epoch": 1.6309887869520896,
146
+ "grad_norm": 0.7681270241737366,
147
+ "learning_rate": 5.119350086265004e-05,
148
+ "loss": 1.012,
149
+ "step": 200
150
+ },
151
+ {
152
+ "epoch": 1.7125382262996942,
153
+ "grad_norm": 1.238531231880188,
154
+ "learning_rate": 4.64222175328687e-05,
155
+ "loss": 1.0746,
156
+ "step": 210
157
+ },
158
+ {
159
+ "epoch": 1.7940876656472988,
160
+ "grad_norm": 1.5333278179168701,
161
+ "learning_rate": 4.1683532270843504e-05,
162
+ "loss": 1.0924,
163
+ "step": 220
164
+ },
165
+ {
166
+ "epoch": 1.8756371049949032,
167
+ "grad_norm": 1.807810664176941,
168
+ "learning_rate": 3.7020620419029094e-05,
169
+ "loss": 1.0545,
170
+ "step": 230
171
+ },
172
+ {
173
+ "epoch": 1.9571865443425076,
174
+ "grad_norm": 1.5522844791412354,
175
+ "learning_rate": 3.2475966929454504e-05,
176
+ "loss": 1.0335,
177
+ "step": 240
178
+ },
179
+ {
180
+ "epoch": 2.038735983690112,
181
+ "grad_norm": 1.2546252012252808,
182
+ "learning_rate": 2.8090979272736662e-05,
183
+ "loss": 0.9929,
184
+ "step": 250
185
+ },
186
+ {
187
+ "epoch": 2.120285423037717,
188
+ "grad_norm": 1.7230066061019897,
189
+ "learning_rate": 2.3905610164295394e-05,
190
+ "loss": 0.9195,
191
+ "step": 260
192
+ },
193
+ {
194
+ "epoch": 2.2018348623853212,
195
+ "grad_norm": 1.0860559940338135,
196
+ "learning_rate": 1.995799354520598e-05,
197
+ "loss": 0.8906,
198
+ "step": 270
199
+ },
200
+ {
201
+ "epoch": 2.2833843017329256,
202
+ "grad_norm": 1.6877241134643555,
203
+ "learning_rate": 1.6284097134357536e-05,
204
+ "loss": 0.8852,
205
+ "step": 280
206
+ },
207
+ {
208
+ "epoch": 2.36493374108053,
209
+ "grad_norm": 0.8254657983779907,
210
+ "learning_rate": 1.2917394717602121e-05,
211
+ "loss": 0.8899,
212
+ "step": 290
213
+ },
214
+ {
215
+ "epoch": 2.4464831804281344,
216
+ "grad_norm": 1.212756633758545,
217
+ "learning_rate": 9.888561159748993e-06,
218
+ "loss": 0.9932,
219
+ "step": 300
220
+ },
221
+ {
222
+ "epoch": 2.528032619775739,
223
+ "grad_norm": 1.147220253944397,
224
+ "learning_rate": 7.225192918226214e-06,
225
+ "loss": 0.8445,
226
+ "step": 310
227
+ },
228
+ {
229
+ "epoch": 2.6095820591233436,
230
+ "grad_norm": 1.090319275856018,
231
+ "learning_rate": 4.951556604879048e-06,
232
+ "loss": 0.8961,
233
+ "step": 320
234
+ },
235
+ {
236
+ "epoch": 2.691131498470948,
237
+ "grad_norm": 0.7078003287315369,
238
+ "learning_rate": 3.0883678868214806e-06,
239
+ "loss": 0.9571,
240
+ "step": 330
241
+ },
242
+ {
243
+ "epoch": 2.7726809378185524,
244
+ "grad_norm": 0.9578835964202881,
245
+ "learning_rate": 1.6526027408301226e-06,
246
+ "loss": 0.9048,
247
+ "step": 340
248
+ },
249
+ {
250
+ "epoch": 2.8542303771661572,
251
+ "grad_norm": 2.023444652557373,
252
+ "learning_rate": 6.573427809888067e-07,
253
+ "loss": 0.8868,
254
+ "step": 350
255
+ },
256
+ {
257
+ "epoch": 2.9357798165137616,
258
+ "grad_norm": 1.268535852432251,
259
+ "learning_rate": 1.1165606884234181e-07,
260
+ "loss": 0.9579,
261
+ "step": 360
262
+ },
263
+ {
264
+ "epoch": 2.984709480122324,
265
+ "step": 366,
266
+ "total_flos": 2.6312102502948864e+16,
267
+ "train_loss": 1.1005799705213537,
268
+ "train_runtime": 684.9766,
269
+ "train_samples_per_second": 4.296,
270
+ "train_steps_per_second": 0.534
271
+ }
272
+ ],
273
+ "logging_steps": 10,
274
+ "max_steps": 366,
275
+ "num_input_tokens_seen": 0,
276
+ "num_train_epochs": 3,
277
+ "save_steps": 500,
278
+ "stateful_callbacks": {
279
+ "TrainerControl": {
280
+ "args": {
281
+ "should_epoch_stop": false,
282
+ "should_evaluate": false,
283
+ "should_log": false,
284
+ "should_save": false,
285
+ "should_training_stop": false
286
+ },
287
+ "attributes": {}
288
+ }
289
+ },
290
+ "total_flos": 2.6312102502948864e+16,
291
+ "train_batch_size": 1,
292
+ "trial_name": null,
293
+ "trial_params": null
294
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ca5be73f4c1b170f3ef4bfb2e02fb48d6078198ed7964910e8905da8bd2e411
3
+ size 5240
training_loss.png ADDED
vocab.json ADDED
The diff for this file is too large to render. See raw diff