ryanmarten commited on
Commit
7012d00
·
verified ·
1 Parent(s): b70d2d6

Upload model

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: other
4
+ base_model: Qwen/Qwen2.5-7B-Instruct
5
+ tags:
6
+ - llama-factory
7
+ - full
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: original
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # original
18
+
19
+ This model is a fine-tuned version of [Qwen/Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct) on the Stratos-R1 dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 1e-05
39
+ - train_batch_size: 1
40
+ - eval_batch_size: 8
41
+ - seed: 42
42
+ - distributed_type: multi-GPU
43
+ - num_devices: 8
44
+ - gradient_accumulation_steps: 12
45
+ - total_train_batch_size: 96
46
+ - total_eval_batch_size: 64
47
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
48
+ - lr_scheduler_type: cosine
49
+ - lr_scheduler_warmup_ratio: 0.1
50
+ - num_epochs: 3.0
51
+
52
+ ### Training results
53
+
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - Transformers 4.46.1
59
+ - Pytorch 2.5.1+cu124
60
+ - Datasets 3.1.0
61
+ - Tokenizers 0.20.3
added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.998563906175203,
3
+ "total_flos": 538113478688768.0,
4
+ "train_loss": 0.5088067185490525,
5
+ "train_runtime": 25669.0251,
6
+ "train_samples_per_second": 1.953,
7
+ "train_steps_per_second": 0.02
8
+ }
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Qwen/Qwen2.5-7B-Instruct",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151645,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 3584,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 18944,
13
+ "max_position_embeddings": 32768,
14
+ "max_window_layers": 28,
15
+ "model_type": "qwen2",
16
+ "num_attention_heads": 28,
17
+ "num_hidden_layers": 28,
18
+ "num_key_value_heads": 4,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_scaling": null,
21
+ "rope_theta": 1000000.0,
22
+ "sliding_window": null,
23
+ "tie_word_embeddings": false,
24
+ "torch_dtype": "bfloat16",
25
+ "transformers_version": "4.46.1",
26
+ "use_cache": false,
27
+ "use_sliding_window": false,
28
+ "vocab_size": 152064
29
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.05,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.46.1"
14
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7562d655c52d8df7c6b454b0b9d658f7e775ee383d2e9e62fc36d9f346b00703
3
+ size 4877660776
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac727eb597410b187dd138bf2ace3d7f52a9fcd65d8316d346ecfc2cbf211ca2
3
+ size 4932751008
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75e257bfae1c4838b6ca6280288fe8e30e22a7b203854a956914db03fb844de4
3
+ size 4330865200
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36275af91d7e4d7e3c4e27b851e0891911cebdf12b38c5f486af5209167f3844
3
+ size 1089994880
model.safetensors.index.json ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 15231233024
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00004-of-00004.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
13
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
14
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
15
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
16
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
17
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
18
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
19
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
20
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
21
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
22
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
23
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
24
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
25
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
26
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
27
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
28
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
29
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
30
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
31
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
32
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
33
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
34
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
35
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
36
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
37
+ "model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
38
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
39
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
40
+ "model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
41
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
42
+ "model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
43
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
44
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
45
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
46
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
47
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
48
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
49
+ "model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
50
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
51
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
52
+ "model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
53
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
54
+ "model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
55
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
56
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
57
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
58
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
59
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
60
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
61
+ "model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
62
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
63
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
64
+ "model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
65
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
66
+ "model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
67
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
68
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
69
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
70
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
71
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
72
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
73
+ "model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
74
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
75
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
76
+ "model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
77
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
78
+ "model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
79
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
80
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
81
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
82
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
83
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
84
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
85
+ "model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
86
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
87
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
88
+ "model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
89
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
90
+ "model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
91
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
92
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
93
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
94
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
95
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
96
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
97
+ "model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
98
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
99
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
100
+ "model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
101
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
102
+ "model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
103
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
104
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
105
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
106
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
107
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
108
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
109
+ "model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
110
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
111
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
112
+ "model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
113
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
114
+ "model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
115
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
116
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
117
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
118
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
119
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
120
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
121
+ "model.layers.17.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
122
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
123
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
124
+ "model.layers.17.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
125
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
126
+ "model.layers.17.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
127
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
128
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
129
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
130
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
131
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
132
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
133
+ "model.layers.18.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
134
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
135
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
136
+ "model.layers.18.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
137
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
138
+ "model.layers.18.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
139
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
140
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
141
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
142
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
143
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
144
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
145
+ "model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
146
+ "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
147
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
148
+ "model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
149
+ "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
150
+ "model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
151
+ "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
152
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
153
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
154
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
155
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
156
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
157
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
158
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
159
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
160
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
161
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
162
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
163
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
164
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
165
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
166
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
167
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
168
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
169
+ "model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
170
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
171
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
172
+ "model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
173
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
174
+ "model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
175
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
176
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
177
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
178
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
179
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
180
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
181
+ "model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
182
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
183
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
184
+ "model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
185
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
186
+ "model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
187
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
188
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
189
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
190
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
191
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
192
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
193
+ "model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
194
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
195
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
196
+ "model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
197
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
198
+ "model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
199
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
200
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
201
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
202
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
203
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
204
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
205
+ "model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
206
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
207
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
208
+ "model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
209
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
210
+ "model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
211
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
212
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
213
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
214
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
215
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
216
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
217
+ "model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
218
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
219
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
220
+ "model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
221
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
222
+ "model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
223
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
224
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
225
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
226
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
227
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
228
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
229
+ "model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
230
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
231
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
232
+ "model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
233
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
234
+ "model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
235
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
236
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
237
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
238
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
239
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
240
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
241
+ "model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
242
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
243
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
244
+ "model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
245
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
246
+ "model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
247
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
248
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
249
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
250
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
251
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
252
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
253
+ "model.layers.27.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
254
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
255
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
256
+ "model.layers.27.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
257
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
258
+ "model.layers.27.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
259
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
260
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
261
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
262
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
263
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
264
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
265
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
266
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
267
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
268
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
269
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
270
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
271
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
272
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
273
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
274
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
275
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
276
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
277
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
278
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
279
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
280
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
281
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
282
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
283
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
284
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
285
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
286
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
287
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
288
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
289
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
290
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
291
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
292
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
293
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
294
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
295
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
296
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
297
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
298
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
299
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
300
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
301
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
302
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
303
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
304
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
305
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
306
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
307
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
308
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
309
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
310
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
311
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
312
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
313
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
314
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
315
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
316
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
317
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
318
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
319
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
320
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
321
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
322
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
323
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
324
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
325
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
326
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
327
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
328
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
329
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
330
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
331
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
332
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
333
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
334
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
335
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
336
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
337
+ "model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
338
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
339
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
340
+ "model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
341
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
342
+ "model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
343
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
344
+ "model.norm.weight": "model-00003-of-00004.safetensors"
345
+ }
346
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
tokenizer_config.json ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|endoftext|>",
201
+ "errors": "replace",
202
+ "model_max_length": 131072,
203
+ "pad_token": "<|endoftext|>",
204
+ "padding_side": "right",
205
+ "split_special_tokens": false,
206
+ "tokenizer_class": "Qwen2Tokenizer",
207
+ "unk_token": null
208
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.998563906175203,
3
+ "total_flos": 538113478688768.0,
4
+ "train_loss": 0.5088067185490525,
5
+ "train_runtime": 25669.0251,
6
+ "train_samples_per_second": 1.953,
7
+ "train_steps_per_second": 0.02
8
+ }
trainer_log.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
trainer_state.json ADDED
@@ -0,0 +1,3696 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.998563906175203,
5
+ "eval_steps": 500,
6
+ "global_step": 522,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0057443752991862135,
13
+ "grad_norm": 5.37349271774292,
14
+ "learning_rate": 1.886792452830189e-07,
15
+ "loss": 0.8587,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.011488750598372427,
20
+ "grad_norm": 5.775772571563721,
21
+ "learning_rate": 3.773584905660378e-07,
22
+ "loss": 0.8937,
23
+ "step": 2
24
+ },
25
+ {
26
+ "epoch": 0.01723312589755864,
27
+ "grad_norm": 5.701127529144287,
28
+ "learning_rate": 5.660377358490567e-07,
29
+ "loss": 0.9179,
30
+ "step": 3
31
+ },
32
+ {
33
+ "epoch": 0.022977501196744854,
34
+ "grad_norm": 5.48037576675415,
35
+ "learning_rate": 7.547169811320755e-07,
36
+ "loss": 0.8829,
37
+ "step": 4
38
+ },
39
+ {
40
+ "epoch": 0.028721876495931067,
41
+ "grad_norm": 5.497180938720703,
42
+ "learning_rate": 9.433962264150944e-07,
43
+ "loss": 0.8801,
44
+ "step": 5
45
+ },
46
+ {
47
+ "epoch": 0.03446625179511728,
48
+ "grad_norm": 5.722774028778076,
49
+ "learning_rate": 1.1320754716981133e-06,
50
+ "loss": 0.8665,
51
+ "step": 6
52
+ },
53
+ {
54
+ "epoch": 0.040210627094303494,
55
+ "grad_norm": 5.297784328460693,
56
+ "learning_rate": 1.3207547169811322e-06,
57
+ "loss": 0.8801,
58
+ "step": 7
59
+ },
60
+ {
61
+ "epoch": 0.04595500239348971,
62
+ "grad_norm": 4.2470808029174805,
63
+ "learning_rate": 1.509433962264151e-06,
64
+ "loss": 0.8368,
65
+ "step": 8
66
+ },
67
+ {
68
+ "epoch": 0.05169937769267592,
69
+ "grad_norm": 3.891289234161377,
70
+ "learning_rate": 1.6981132075471698e-06,
71
+ "loss": 0.8067,
72
+ "step": 9
73
+ },
74
+ {
75
+ "epoch": 0.057443752991862135,
76
+ "grad_norm": 3.872174024581909,
77
+ "learning_rate": 1.8867924528301889e-06,
78
+ "loss": 0.8609,
79
+ "step": 10
80
+ },
81
+ {
82
+ "epoch": 0.06318812829104835,
83
+ "grad_norm": 2.5940165519714355,
84
+ "learning_rate": 2.075471698113208e-06,
85
+ "loss": 0.8291,
86
+ "step": 11
87
+ },
88
+ {
89
+ "epoch": 0.06893250359023456,
90
+ "grad_norm": 2.293347120285034,
91
+ "learning_rate": 2.2641509433962266e-06,
92
+ "loss": 0.7539,
93
+ "step": 12
94
+ },
95
+ {
96
+ "epoch": 0.07467687888942078,
97
+ "grad_norm": 2.191865921020508,
98
+ "learning_rate": 2.4528301886792453e-06,
99
+ "loss": 0.8272,
100
+ "step": 13
101
+ },
102
+ {
103
+ "epoch": 0.08042125418860699,
104
+ "grad_norm": 2.2178380489349365,
105
+ "learning_rate": 2.6415094339622644e-06,
106
+ "loss": 0.8099,
107
+ "step": 14
108
+ },
109
+ {
110
+ "epoch": 0.0861656294877932,
111
+ "grad_norm": 3.483651638031006,
112
+ "learning_rate": 2.830188679245283e-06,
113
+ "loss": 0.806,
114
+ "step": 15
115
+ },
116
+ {
117
+ "epoch": 0.09191000478697942,
118
+ "grad_norm": 3.8942596912384033,
119
+ "learning_rate": 3.018867924528302e-06,
120
+ "loss": 0.8085,
121
+ "step": 16
122
+ },
123
+ {
124
+ "epoch": 0.09765438008616563,
125
+ "grad_norm": 3.8473079204559326,
126
+ "learning_rate": 3.207547169811321e-06,
127
+ "loss": 0.7816,
128
+ "step": 17
129
+ },
130
+ {
131
+ "epoch": 0.10339875538535184,
132
+ "grad_norm": 3.5139431953430176,
133
+ "learning_rate": 3.3962264150943395e-06,
134
+ "loss": 0.7845,
135
+ "step": 18
136
+ },
137
+ {
138
+ "epoch": 0.10914313068453806,
139
+ "grad_norm": 2.965437412261963,
140
+ "learning_rate": 3.5849056603773586e-06,
141
+ "loss": 0.7167,
142
+ "step": 19
143
+ },
144
+ {
145
+ "epoch": 0.11488750598372427,
146
+ "grad_norm": 2.0699801445007324,
147
+ "learning_rate": 3.7735849056603777e-06,
148
+ "loss": 0.7372,
149
+ "step": 20
150
+ },
151
+ {
152
+ "epoch": 0.12063188128291048,
153
+ "grad_norm": 1.5903819799423218,
154
+ "learning_rate": 3.962264150943396e-06,
155
+ "loss": 0.711,
156
+ "step": 21
157
+ },
158
+ {
159
+ "epoch": 0.1263762565820967,
160
+ "grad_norm": 1.460195779800415,
161
+ "learning_rate": 4.150943396226416e-06,
162
+ "loss": 0.7038,
163
+ "step": 22
164
+ },
165
+ {
166
+ "epoch": 0.13212063188128292,
167
+ "grad_norm": 1.229818344116211,
168
+ "learning_rate": 4.339622641509435e-06,
169
+ "loss": 0.661,
170
+ "step": 23
171
+ },
172
+ {
173
+ "epoch": 0.13786500718046912,
174
+ "grad_norm": 1.1705495119094849,
175
+ "learning_rate": 4.528301886792453e-06,
176
+ "loss": 0.6991,
177
+ "step": 24
178
+ },
179
+ {
180
+ "epoch": 0.14360938247965535,
181
+ "grad_norm": 1.2829539775848389,
182
+ "learning_rate": 4.716981132075472e-06,
183
+ "loss": 0.6844,
184
+ "step": 25
185
+ },
186
+ {
187
+ "epoch": 0.14935375777884155,
188
+ "grad_norm": 1.1270300149917603,
189
+ "learning_rate": 4.905660377358491e-06,
190
+ "loss": 0.6736,
191
+ "step": 26
192
+ },
193
+ {
194
+ "epoch": 0.15509813307802778,
195
+ "grad_norm": 1.018497109413147,
196
+ "learning_rate": 5.09433962264151e-06,
197
+ "loss": 0.7077,
198
+ "step": 27
199
+ },
200
+ {
201
+ "epoch": 0.16084250837721398,
202
+ "grad_norm": 0.8882578611373901,
203
+ "learning_rate": 5.283018867924529e-06,
204
+ "loss": 0.6665,
205
+ "step": 28
206
+ },
207
+ {
208
+ "epoch": 0.1665868836764002,
209
+ "grad_norm": 1.0214155912399292,
210
+ "learning_rate": 5.4716981132075475e-06,
211
+ "loss": 0.6512,
212
+ "step": 29
213
+ },
214
+ {
215
+ "epoch": 0.1723312589755864,
216
+ "grad_norm": 0.9866452813148499,
217
+ "learning_rate": 5.660377358490566e-06,
218
+ "loss": 0.6561,
219
+ "step": 30
220
+ },
221
+ {
222
+ "epoch": 0.17807563427477263,
223
+ "grad_norm": 0.822456955909729,
224
+ "learning_rate": 5.849056603773585e-06,
225
+ "loss": 0.6137,
226
+ "step": 31
227
+ },
228
+ {
229
+ "epoch": 0.18382000957395883,
230
+ "grad_norm": 0.7711695432662964,
231
+ "learning_rate": 6.037735849056604e-06,
232
+ "loss": 0.5908,
233
+ "step": 32
234
+ },
235
+ {
236
+ "epoch": 0.18956438487314506,
237
+ "grad_norm": 0.9906867146492004,
238
+ "learning_rate": 6.226415094339623e-06,
239
+ "loss": 0.6336,
240
+ "step": 33
241
+ },
242
+ {
243
+ "epoch": 0.19530876017233126,
244
+ "grad_norm": 0.9756529331207275,
245
+ "learning_rate": 6.415094339622642e-06,
246
+ "loss": 0.6528,
247
+ "step": 34
248
+ },
249
+ {
250
+ "epoch": 0.20105313547151749,
251
+ "grad_norm": 0.7538486123085022,
252
+ "learning_rate": 6.60377358490566e-06,
253
+ "loss": 0.6377,
254
+ "step": 35
255
+ },
256
+ {
257
+ "epoch": 0.20679751077070369,
258
+ "grad_norm": 0.7044028639793396,
259
+ "learning_rate": 6.792452830188679e-06,
260
+ "loss": 0.6312,
261
+ "step": 36
262
+ },
263
+ {
264
+ "epoch": 0.2125418860698899,
265
+ "grad_norm": 0.766700029373169,
266
+ "learning_rate": 6.981132075471699e-06,
267
+ "loss": 0.6142,
268
+ "step": 37
269
+ },
270
+ {
271
+ "epoch": 0.2182862613690761,
272
+ "grad_norm": 0.8737772107124329,
273
+ "learning_rate": 7.169811320754717e-06,
274
+ "loss": 0.6278,
275
+ "step": 38
276
+ },
277
+ {
278
+ "epoch": 0.22403063666826234,
279
+ "grad_norm": 0.7406350374221802,
280
+ "learning_rate": 7.358490566037736e-06,
281
+ "loss": 0.6274,
282
+ "step": 39
283
+ },
284
+ {
285
+ "epoch": 0.22977501196744854,
286
+ "grad_norm": 0.6247995495796204,
287
+ "learning_rate": 7.5471698113207555e-06,
288
+ "loss": 0.604,
289
+ "step": 40
290
+ },
291
+ {
292
+ "epoch": 0.23551938726663477,
293
+ "grad_norm": 0.7143349647521973,
294
+ "learning_rate": 7.735849056603775e-06,
295
+ "loss": 0.6461,
296
+ "step": 41
297
+ },
298
+ {
299
+ "epoch": 0.24126376256582097,
300
+ "grad_norm": 0.6662243604660034,
301
+ "learning_rate": 7.924528301886793e-06,
302
+ "loss": 0.5815,
303
+ "step": 42
304
+ },
305
+ {
306
+ "epoch": 0.2470081378650072,
307
+ "grad_norm": 0.7262428402900696,
308
+ "learning_rate": 8.113207547169812e-06,
309
+ "loss": 0.6194,
310
+ "step": 43
311
+ },
312
+ {
313
+ "epoch": 0.2527525131641934,
314
+ "grad_norm": 0.6320751905441284,
315
+ "learning_rate": 8.301886792452832e-06,
316
+ "loss": 0.6338,
317
+ "step": 44
318
+ },
319
+ {
320
+ "epoch": 0.2584968884633796,
321
+ "grad_norm": 0.5772255063056946,
322
+ "learning_rate": 8.49056603773585e-06,
323
+ "loss": 0.6176,
324
+ "step": 45
325
+ },
326
+ {
327
+ "epoch": 0.26424126376256585,
328
+ "grad_norm": 0.7007628679275513,
329
+ "learning_rate": 8.67924528301887e-06,
330
+ "loss": 0.5683,
331
+ "step": 46
332
+ },
333
+ {
334
+ "epoch": 0.26998563906175205,
335
+ "grad_norm": 0.7302536368370056,
336
+ "learning_rate": 8.867924528301887e-06,
337
+ "loss": 0.5975,
338
+ "step": 47
339
+ },
340
+ {
341
+ "epoch": 0.27573001436093825,
342
+ "grad_norm": 0.6005181670188904,
343
+ "learning_rate": 9.056603773584907e-06,
344
+ "loss": 0.5844,
345
+ "step": 48
346
+ },
347
+ {
348
+ "epoch": 0.28147438966012445,
349
+ "grad_norm": 0.8314005136489868,
350
+ "learning_rate": 9.245283018867926e-06,
351
+ "loss": 0.6181,
352
+ "step": 49
353
+ },
354
+ {
355
+ "epoch": 0.2872187649593107,
356
+ "grad_norm": 0.6696397662162781,
357
+ "learning_rate": 9.433962264150944e-06,
358
+ "loss": 0.5741,
359
+ "step": 50
360
+ },
361
+ {
362
+ "epoch": 0.2929631402584969,
363
+ "grad_norm": 0.6071884632110596,
364
+ "learning_rate": 9.622641509433963e-06,
365
+ "loss": 0.5599,
366
+ "step": 51
367
+ },
368
+ {
369
+ "epoch": 0.2987075155576831,
370
+ "grad_norm": 0.6364148259162903,
371
+ "learning_rate": 9.811320754716981e-06,
372
+ "loss": 0.6052,
373
+ "step": 52
374
+ },
375
+ {
376
+ "epoch": 0.3044518908568693,
377
+ "grad_norm": 0.6715644001960754,
378
+ "learning_rate": 1e-05,
379
+ "loss": 0.5774,
380
+ "step": 53
381
+ },
382
+ {
383
+ "epoch": 0.31019626615605556,
384
+ "grad_norm": 0.5949195623397827,
385
+ "learning_rate": 9.999887825938495e-06,
386
+ "loss": 0.5668,
387
+ "step": 54
388
+ },
389
+ {
390
+ "epoch": 0.31594064145524176,
391
+ "grad_norm": 0.5966293811798096,
392
+ "learning_rate": 9.999551308787183e-06,
393
+ "loss": 0.5708,
394
+ "step": 55
395
+ },
396
+ {
397
+ "epoch": 0.32168501675442795,
398
+ "grad_norm": 0.6295287013053894,
399
+ "learning_rate": 9.998990463645464e-06,
400
+ "loss": 0.5788,
401
+ "step": 56
402
+ },
403
+ {
404
+ "epoch": 0.32742939205361415,
405
+ "grad_norm": 0.623221755027771,
406
+ "learning_rate": 9.998205315678248e-06,
407
+ "loss": 0.5868,
408
+ "step": 57
409
+ },
410
+ {
411
+ "epoch": 0.3331737673528004,
412
+ "grad_norm": 0.6538676023483276,
413
+ "learning_rate": 9.997195900114833e-06,
414
+ "loss": 0.5802,
415
+ "step": 58
416
+ },
417
+ {
418
+ "epoch": 0.3389181426519866,
419
+ "grad_norm": 0.6484891772270203,
420
+ "learning_rate": 9.995962262247314e-06,
421
+ "loss": 0.5543,
422
+ "step": 59
423
+ },
424
+ {
425
+ "epoch": 0.3446625179511728,
426
+ "grad_norm": 0.6138676404953003,
427
+ "learning_rate": 9.994504457428557e-06,
428
+ "loss": 0.5633,
429
+ "step": 60
430
+ },
431
+ {
432
+ "epoch": 0.350406893250359,
433
+ "grad_norm": 0.5668913722038269,
434
+ "learning_rate": 9.99282255106972e-06,
435
+ "loss": 0.5792,
436
+ "step": 61
437
+ },
438
+ {
439
+ "epoch": 0.35615126854954526,
440
+ "grad_norm": 0.5608384013175964,
441
+ "learning_rate": 9.99091661863731e-06,
442
+ "loss": 0.5884,
443
+ "step": 62
444
+ },
445
+ {
446
+ "epoch": 0.36189564384873146,
447
+ "grad_norm": 0.5740435719490051,
448
+ "learning_rate": 9.988786745649798e-06,
449
+ "loss": 0.5722,
450
+ "step": 63
451
+ },
452
+ {
453
+ "epoch": 0.36764001914791766,
454
+ "grad_norm": 0.5896041393280029,
455
+ "learning_rate": 9.986433027673786e-06,
456
+ "loss": 0.5558,
457
+ "step": 64
458
+ },
459
+ {
460
+ "epoch": 0.37338439444710386,
461
+ "grad_norm": 0.5800390243530273,
462
+ "learning_rate": 9.983855570319716e-06,
463
+ "loss": 0.5773,
464
+ "step": 65
465
+ },
466
+ {
467
+ "epoch": 0.3791287697462901,
468
+ "grad_norm": 0.5293853282928467,
469
+ "learning_rate": 9.981054489237132e-06,
470
+ "loss": 0.5709,
471
+ "step": 66
472
+ },
473
+ {
474
+ "epoch": 0.3848731450454763,
475
+ "grad_norm": 0.5264029502868652,
476
+ "learning_rate": 9.978029910109491e-06,
477
+ "loss": 0.5314,
478
+ "step": 67
479
+ },
480
+ {
481
+ "epoch": 0.3906175203446625,
482
+ "grad_norm": 0.5725036859512329,
483
+ "learning_rate": 9.974781968648523e-06,
484
+ "loss": 0.5544,
485
+ "step": 68
486
+ },
487
+ {
488
+ "epoch": 0.3963618956438487,
489
+ "grad_norm": 0.5662187933921814,
490
+ "learning_rate": 9.971310810588141e-06,
491
+ "loss": 0.5816,
492
+ "step": 69
493
+ },
494
+ {
495
+ "epoch": 0.40210627094303497,
496
+ "grad_norm": 0.4821569323539734,
497
+ "learning_rate": 9.967616591677906e-06,
498
+ "loss": 0.5054,
499
+ "step": 70
500
+ },
501
+ {
502
+ "epoch": 0.40785064624222117,
503
+ "grad_norm": 0.6668592691421509,
504
+ "learning_rate": 9.963699477676031e-06,
505
+ "loss": 0.5594,
506
+ "step": 71
507
+ },
508
+ {
509
+ "epoch": 0.41359502154140737,
510
+ "grad_norm": 0.5324398279190063,
511
+ "learning_rate": 9.959559644341954e-06,
512
+ "loss": 0.5384,
513
+ "step": 72
514
+ },
515
+ {
516
+ "epoch": 0.41933939684059357,
517
+ "grad_norm": 0.5210046172142029,
518
+ "learning_rate": 9.95519727742844e-06,
519
+ "loss": 0.5588,
520
+ "step": 73
521
+ },
522
+ {
523
+ "epoch": 0.4250837721397798,
524
+ "grad_norm": 0.6099966764450073,
525
+ "learning_rate": 9.950612572673255e-06,
526
+ "loss": 0.562,
527
+ "step": 74
528
+ },
529
+ {
530
+ "epoch": 0.430828147438966,
531
+ "grad_norm": 0.5562371611595154,
532
+ "learning_rate": 9.945805735790383e-06,
533
+ "loss": 0.5726,
534
+ "step": 75
535
+ },
536
+ {
537
+ "epoch": 0.4365725227381522,
538
+ "grad_norm": 0.49663493037223816,
539
+ "learning_rate": 9.940776982460787e-06,
540
+ "loss": 0.5486,
541
+ "step": 76
542
+ },
543
+ {
544
+ "epoch": 0.4423168980373384,
545
+ "grad_norm": 0.5064303278923035,
546
+ "learning_rate": 9.935526538322744e-06,
547
+ "loss": 0.5751,
548
+ "step": 77
549
+ },
550
+ {
551
+ "epoch": 0.4480612733365247,
552
+ "grad_norm": 0.5378572344779968,
553
+ "learning_rate": 9.930054638961709e-06,
554
+ "loss": 0.552,
555
+ "step": 78
556
+ },
557
+ {
558
+ "epoch": 0.4538056486357109,
559
+ "grad_norm": 0.5384443402290344,
560
+ "learning_rate": 9.924361529899754e-06,
561
+ "loss": 0.5612,
562
+ "step": 79
563
+ },
564
+ {
565
+ "epoch": 0.4595500239348971,
566
+ "grad_norm": 0.49997466802597046,
567
+ "learning_rate": 9.918447466584545e-06,
568
+ "loss": 0.5432,
569
+ "step": 80
570
+ },
571
+ {
572
+ "epoch": 0.4652943992340833,
573
+ "grad_norm": 0.5184059143066406,
574
+ "learning_rate": 9.91231271437788e-06,
575
+ "loss": 0.5471,
576
+ "step": 81
577
+ },
578
+ {
579
+ "epoch": 0.47103877453326953,
580
+ "grad_norm": 0.5947031378746033,
581
+ "learning_rate": 9.905957548543794e-06,
582
+ "loss": 0.5658,
583
+ "step": 82
584
+ },
585
+ {
586
+ "epoch": 0.47678314983245573,
587
+ "grad_norm": 0.4928087592124939,
588
+ "learning_rate": 9.899382254236186e-06,
589
+ "loss": 0.5503,
590
+ "step": 83
591
+ },
592
+ {
593
+ "epoch": 0.48252752513164193,
594
+ "grad_norm": 0.6400409936904907,
595
+ "learning_rate": 9.892587126486046e-06,
596
+ "loss": 0.5788,
597
+ "step": 84
598
+ },
599
+ {
600
+ "epoch": 0.48827190043082813,
601
+ "grad_norm": 0.5572645664215088,
602
+ "learning_rate": 9.885572470188207e-06,
603
+ "loss": 0.5362,
604
+ "step": 85
605
+ },
606
+ {
607
+ "epoch": 0.4940162757300144,
608
+ "grad_norm": 0.49279481172561646,
609
+ "learning_rate": 9.878338600087658e-06,
610
+ "loss": 0.5539,
611
+ "step": 86
612
+ },
613
+ {
614
+ "epoch": 0.4997606510292006,
615
+ "grad_norm": 0.6104176640510559,
616
+ "learning_rate": 9.87088584076544e-06,
617
+ "loss": 0.5706,
618
+ "step": 87
619
+ },
620
+ {
621
+ "epoch": 0.5055050263283868,
622
+ "grad_norm": 0.49791330099105835,
623
+ "learning_rate": 9.863214526624065e-06,
624
+ "loss": 0.5543,
625
+ "step": 88
626
+ },
627
+ {
628
+ "epoch": 0.511249401627573,
629
+ "grad_norm": 0.4971017837524414,
630
+ "learning_rate": 9.85532500187252e-06,
631
+ "loss": 0.5503,
632
+ "step": 89
633
+ },
634
+ {
635
+ "epoch": 0.5169937769267592,
636
+ "grad_norm": 0.5559049844741821,
637
+ "learning_rate": 9.847217620510815e-06,
638
+ "loss": 0.56,
639
+ "step": 90
640
+ },
641
+ {
642
+ "epoch": 0.5227381522259454,
643
+ "grad_norm": 0.45909181237220764,
644
+ "learning_rate": 9.83889274631411e-06,
645
+ "loss": 0.5375,
646
+ "step": 91
647
+ },
648
+ {
649
+ "epoch": 0.5284825275251317,
650
+ "grad_norm": 0.6156418323516846,
651
+ "learning_rate": 9.830350752816386e-06,
652
+ "loss": 0.5436,
653
+ "step": 92
654
+ },
655
+ {
656
+ "epoch": 0.5342269028243178,
657
+ "grad_norm": 0.46865999698638916,
658
+ "learning_rate": 9.821592023293686e-06,
659
+ "loss": 0.5317,
660
+ "step": 93
661
+ },
662
+ {
663
+ "epoch": 0.5399712781235041,
664
+ "grad_norm": 0.4963522255420685,
665
+ "learning_rate": 9.81261695074691e-06,
666
+ "loss": 0.5609,
667
+ "step": 94
668
+ },
669
+ {
670
+ "epoch": 0.5457156534226902,
671
+ "grad_norm": 0.5644580721855164,
672
+ "learning_rate": 9.803425937884202e-06,
673
+ "loss": 0.5489,
674
+ "step": 95
675
+ },
676
+ {
677
+ "epoch": 0.5514600287218765,
678
+ "grad_norm": 0.5057684779167175,
679
+ "learning_rate": 9.794019397102852e-06,
680
+ "loss": 0.5628,
681
+ "step": 96
682
+ },
683
+ {
684
+ "epoch": 0.5572044040210627,
685
+ "grad_norm": 0.5877383947372437,
686
+ "learning_rate": 9.784397750470818e-06,
687
+ "loss": 0.5329,
688
+ "step": 97
689
+ },
690
+ {
691
+ "epoch": 0.5629487793202489,
692
+ "grad_norm": 0.5501551628112793,
693
+ "learning_rate": 9.774561429707769e-06,
694
+ "loss": 0.5756,
695
+ "step": 98
696
+ },
697
+ {
698
+ "epoch": 0.5686931546194351,
699
+ "grad_norm": 0.5017531514167786,
700
+ "learning_rate": 9.764510876165727e-06,
701
+ "loss": 0.5309,
702
+ "step": 99
703
+ },
704
+ {
705
+ "epoch": 0.5744375299186214,
706
+ "grad_norm": 0.5698550343513489,
707
+ "learning_rate": 9.754246540809257e-06,
708
+ "loss": 0.5344,
709
+ "step": 100
710
+ },
711
+ {
712
+ "epoch": 0.5801819052178075,
713
+ "grad_norm": 0.6889923810958862,
714
+ "learning_rate": 9.743768884195233e-06,
715
+ "loss": 0.566,
716
+ "step": 101
717
+ },
718
+ {
719
+ "epoch": 0.5859262805169938,
720
+ "grad_norm": 0.5755162835121155,
721
+ "learning_rate": 9.733078376452172e-06,
722
+ "loss": 0.5623,
723
+ "step": 102
724
+ },
725
+ {
726
+ "epoch": 0.59167065581618,
727
+ "grad_norm": 0.5690771341323853,
728
+ "learning_rate": 9.722175497259145e-06,
729
+ "loss": 0.5472,
730
+ "step": 103
731
+ },
732
+ {
733
+ "epoch": 0.5974150311153662,
734
+ "grad_norm": 0.48988884687423706,
735
+ "learning_rate": 9.71106073582425e-06,
736
+ "loss": 0.5306,
737
+ "step": 104
738
+ },
739
+ {
740
+ "epoch": 0.6031594064145525,
741
+ "grad_norm": 0.5045924782752991,
742
+ "learning_rate": 9.699734590862655e-06,
743
+ "loss": 0.5319,
744
+ "step": 105
745
+ },
746
+ {
747
+ "epoch": 0.6089037817137386,
748
+ "grad_norm": 0.6457961201667786,
749
+ "learning_rate": 9.688197570574238e-06,
750
+ "loss": 0.5546,
751
+ "step": 106
752
+ },
753
+ {
754
+ "epoch": 0.6146481570129249,
755
+ "grad_norm": 0.5357460379600525,
756
+ "learning_rate": 9.676450192620767e-06,
757
+ "loss": 0.5485,
758
+ "step": 107
759
+ },
760
+ {
761
+ "epoch": 0.6203925323121111,
762
+ "grad_norm": 0.6195516586303711,
763
+ "learning_rate": 9.66449298410268e-06,
764
+ "loss": 0.5651,
765
+ "step": 108
766
+ },
767
+ {
768
+ "epoch": 0.6261369076112973,
769
+ "grad_norm": 0.532525360584259,
770
+ "learning_rate": 9.652326481535434e-06,
771
+ "loss": 0.5119,
772
+ "step": 109
773
+ },
774
+ {
775
+ "epoch": 0.6318812829104835,
776
+ "grad_norm": 0.6295329332351685,
777
+ "learning_rate": 9.639951230825433e-06,
778
+ "loss": 0.5788,
779
+ "step": 110
780
+ },
781
+ {
782
+ "epoch": 0.6376256582096697,
783
+ "grad_norm": 0.6266581416130066,
784
+ "learning_rate": 9.62736778724553e-06,
785
+ "loss": 0.5582,
786
+ "step": 111
787
+ },
788
+ {
789
+ "epoch": 0.6433700335088559,
790
+ "grad_norm": 0.6455391645431519,
791
+ "learning_rate": 9.614576715410116e-06,
792
+ "loss": 0.5676,
793
+ "step": 112
794
+ },
795
+ {
796
+ "epoch": 0.6491144088080422,
797
+ "grad_norm": 0.6532648801803589,
798
+ "learning_rate": 9.60157858924978e-06,
799
+ "loss": 0.5322,
800
+ "step": 113
801
+ },
802
+ {
803
+ "epoch": 0.6548587841072283,
804
+ "grad_norm": 0.695582389831543,
805
+ "learning_rate": 9.588373991985566e-06,
806
+ "loss": 0.557,
807
+ "step": 114
808
+ },
809
+ {
810
+ "epoch": 0.6606031594064146,
811
+ "grad_norm": 0.6813830733299255,
812
+ "learning_rate": 9.574963516102795e-06,
813
+ "loss": 0.5639,
814
+ "step": 115
815
+ },
816
+ {
817
+ "epoch": 0.6663475347056008,
818
+ "grad_norm": 0.537449836730957,
819
+ "learning_rate": 9.561347763324484e-06,
820
+ "loss": 0.528,
821
+ "step": 116
822
+ },
823
+ {
824
+ "epoch": 0.672091910004787,
825
+ "grad_norm": 0.5756135582923889,
826
+ "learning_rate": 9.547527344584353e-06,
827
+ "loss": 0.5461,
828
+ "step": 117
829
+ },
830
+ {
831
+ "epoch": 0.6778362853039732,
832
+ "grad_norm": 0.664249837398529,
833
+ "learning_rate": 9.533502879999398e-06,
834
+ "loss": 0.5685,
835
+ "step": 118
836
+ },
837
+ {
838
+ "epoch": 0.6835806606031594,
839
+ "grad_norm": 0.5206965804100037,
840
+ "learning_rate": 9.519274998842084e-06,
841
+ "loss": 0.5717,
842
+ "step": 119
843
+ },
844
+ {
845
+ "epoch": 0.6893250359023456,
846
+ "grad_norm": 0.6174411773681641,
847
+ "learning_rate": 9.504844339512096e-06,
848
+ "loss": 0.5338,
849
+ "step": 120
850
+ },
851
+ {
852
+ "epoch": 0.6950694112015319,
853
+ "grad_norm": 0.4804738461971283,
854
+ "learning_rate": 9.490211549507701e-06,
855
+ "loss": 0.5557,
856
+ "step": 121
857
+ },
858
+ {
859
+ "epoch": 0.700813786500718,
860
+ "grad_norm": 0.49055036902427673,
861
+ "learning_rate": 9.475377285396692e-06,
862
+ "loss": 0.5324,
863
+ "step": 122
864
+ },
865
+ {
866
+ "epoch": 0.7065581617999043,
867
+ "grad_norm": 0.486482709646225,
868
+ "learning_rate": 9.460342212786933e-06,
869
+ "loss": 0.5242,
870
+ "step": 123
871
+ },
872
+ {
873
+ "epoch": 0.7123025370990905,
874
+ "grad_norm": 0.549220085144043,
875
+ "learning_rate": 9.445107006296488e-06,
876
+ "loss": 0.5692,
877
+ "step": 124
878
+ },
879
+ {
880
+ "epoch": 0.7180469123982767,
881
+ "grad_norm": 0.5209650993347168,
882
+ "learning_rate": 9.42967234952335e-06,
883
+ "loss": 0.5685,
884
+ "step": 125
885
+ },
886
+ {
887
+ "epoch": 0.7237912876974629,
888
+ "grad_norm": 0.5501351952552795,
889
+ "learning_rate": 9.414038935014777e-06,
890
+ "loss": 0.5288,
891
+ "step": 126
892
+ },
893
+ {
894
+ "epoch": 0.7295356629966491,
895
+ "grad_norm": 0.5890218615531921,
896
+ "learning_rate": 9.398207464236209e-06,
897
+ "loss": 0.5288,
898
+ "step": 127
899
+ },
900
+ {
901
+ "epoch": 0.7352800382958353,
902
+ "grad_norm": 0.5494324564933777,
903
+ "learning_rate": 9.382178647539794e-06,
904
+ "loss": 0.541,
905
+ "step": 128
906
+ },
907
+ {
908
+ "epoch": 0.7410244135950216,
909
+ "grad_norm": 0.5340805053710938,
910
+ "learning_rate": 9.365953204132526e-06,
911
+ "loss": 0.5494,
912
+ "step": 129
913
+ },
914
+ {
915
+ "epoch": 0.7467687888942077,
916
+ "grad_norm": 0.4657943844795227,
917
+ "learning_rate": 9.349531862043952e-06,
918
+ "loss": 0.5629,
919
+ "step": 130
920
+ },
921
+ {
922
+ "epoch": 0.752513164193394,
923
+ "grad_norm": 0.4700133502483368,
924
+ "learning_rate": 9.332915358093532e-06,
925
+ "loss": 0.5265,
926
+ "step": 131
927
+ },
928
+ {
929
+ "epoch": 0.7582575394925802,
930
+ "grad_norm": 0.5137308239936829,
931
+ "learning_rate": 9.316104437857561e-06,
932
+ "loss": 0.5315,
933
+ "step": 132
934
+ },
935
+ {
936
+ "epoch": 0.7640019147917664,
937
+ "grad_norm": 0.5512875914573669,
938
+ "learning_rate": 9.299099855635716e-06,
939
+ "loss": 0.5246,
940
+ "step": 133
941
+ },
942
+ {
943
+ "epoch": 0.7697462900909526,
944
+ "grad_norm": 0.5698496699333191,
945
+ "learning_rate": 9.28190237441722e-06,
946
+ "loss": 0.5161,
947
+ "step": 134
948
+ },
949
+ {
950
+ "epoch": 0.7754906653901388,
951
+ "grad_norm": 0.6161168217658997,
952
+ "learning_rate": 9.2645127658466e-06,
953
+ "loss": 0.5358,
954
+ "step": 135
955
+ },
956
+ {
957
+ "epoch": 0.781235040689325,
958
+ "grad_norm": 0.5661118626594543,
959
+ "learning_rate": 9.246931810189061e-06,
960
+ "loss": 0.546,
961
+ "step": 136
962
+ },
963
+ {
964
+ "epoch": 0.7869794159885113,
965
+ "grad_norm": 0.5279558897018433,
966
+ "learning_rate": 9.229160296295488e-06,
967
+ "loss": 0.5096,
968
+ "step": 137
969
+ },
970
+ {
971
+ "epoch": 0.7927237912876974,
972
+ "grad_norm": 0.5370686650276184,
973
+ "learning_rate": 9.211199021567034e-06,
974
+ "loss": 0.5523,
975
+ "step": 138
976
+ },
977
+ {
978
+ "epoch": 0.7984681665868837,
979
+ "grad_norm": 0.6411705613136292,
980
+ "learning_rate": 9.193048791919357e-06,
981
+ "loss": 0.5595,
982
+ "step": 139
983
+ },
984
+ {
985
+ "epoch": 0.8042125418860699,
986
+ "grad_norm": 0.5257410407066345,
987
+ "learning_rate": 9.174710421746445e-06,
988
+ "loss": 0.5686,
989
+ "step": 140
990
+ },
991
+ {
992
+ "epoch": 0.8099569171852561,
993
+ "grad_norm": 0.5763169527053833,
994
+ "learning_rate": 9.156184733884084e-06,
995
+ "loss": 0.5252,
996
+ "step": 141
997
+ },
998
+ {
999
+ "epoch": 0.8157012924844423,
1000
+ "grad_norm": 0.5250378847122192,
1001
+ "learning_rate": 9.137472559572935e-06,
1002
+ "loss": 0.561,
1003
+ "step": 142
1004
+ },
1005
+ {
1006
+ "epoch": 0.8214456677836285,
1007
+ "grad_norm": 0.6087478399276733,
1008
+ "learning_rate": 9.118574738421236e-06,
1009
+ "loss": 0.5177,
1010
+ "step": 143
1011
+ },
1012
+ {
1013
+ "epoch": 0.8271900430828147,
1014
+ "grad_norm": 0.6588859558105469,
1015
+ "learning_rate": 9.099492118367123e-06,
1016
+ "loss": 0.5578,
1017
+ "step": 144
1018
+ },
1019
+ {
1020
+ "epoch": 0.832934418382001,
1021
+ "grad_norm": 0.48864591121673584,
1022
+ "learning_rate": 9.080225555640601e-06,
1023
+ "loss": 0.5394,
1024
+ "step": 145
1025
+ },
1026
+ {
1027
+ "epoch": 0.8386787936811871,
1028
+ "grad_norm": 0.5315064191818237,
1029
+ "learning_rate": 9.0607759147251e-06,
1030
+ "loss": 0.5404,
1031
+ "step": 146
1032
+ },
1033
+ {
1034
+ "epoch": 0.8444231689803734,
1035
+ "grad_norm": 0.5749916434288025,
1036
+ "learning_rate": 9.04114406831871e-06,
1037
+ "loss": 0.5114,
1038
+ "step": 147
1039
+ },
1040
+ {
1041
+ "epoch": 0.8501675442795597,
1042
+ "grad_norm": 0.47096845507621765,
1043
+ "learning_rate": 9.021330897295011e-06,
1044
+ "loss": 0.5383,
1045
+ "step": 148
1046
+ },
1047
+ {
1048
+ "epoch": 0.8559119195787458,
1049
+ "grad_norm": 0.5951332449913025,
1050
+ "learning_rate": 9.001337290663548e-06,
1051
+ "loss": 0.5173,
1052
+ "step": 149
1053
+ },
1054
+ {
1055
+ "epoch": 0.861656294877932,
1056
+ "grad_norm": 0.5525330305099487,
1057
+ "learning_rate": 8.981164145529943e-06,
1058
+ "loss": 0.5428,
1059
+ "step": 150
1060
+ },
1061
+ {
1062
+ "epoch": 0.8674006701771182,
1063
+ "grad_norm": 0.47650671005249023,
1064
+ "learning_rate": 8.960812367055646e-06,
1065
+ "loss": 0.5582,
1066
+ "step": 151
1067
+ },
1068
+ {
1069
+ "epoch": 0.8731450454763044,
1070
+ "grad_norm": 0.542094349861145,
1071
+ "learning_rate": 8.940282868417321e-06,
1072
+ "loss": 0.4893,
1073
+ "step": 152
1074
+ },
1075
+ {
1076
+ "epoch": 0.8788894207754907,
1077
+ "grad_norm": 0.5436213612556458,
1078
+ "learning_rate": 8.91957657076586e-06,
1079
+ "loss": 0.5531,
1080
+ "step": 153
1081
+ },
1082
+ {
1083
+ "epoch": 0.8846337960746768,
1084
+ "grad_norm": 0.529384434223175,
1085
+ "learning_rate": 8.898694403185066e-06,
1086
+ "loss": 0.5151,
1087
+ "step": 154
1088
+ },
1089
+ {
1090
+ "epoch": 0.8903781713738631,
1091
+ "grad_norm": 0.5005679726600647,
1092
+ "learning_rate": 8.877637302649962e-06,
1093
+ "loss": 0.5259,
1094
+ "step": 155
1095
+ },
1096
+ {
1097
+ "epoch": 0.8961225466730494,
1098
+ "grad_norm": 0.5237105488777161,
1099
+ "learning_rate": 8.856406213984743e-06,
1100
+ "loss": 0.5442,
1101
+ "step": 156
1102
+ },
1103
+ {
1104
+ "epoch": 0.9018669219722355,
1105
+ "grad_norm": 0.45996320247650146,
1106
+ "learning_rate": 8.835002089820387e-06,
1107
+ "loss": 0.5279,
1108
+ "step": 157
1109
+ },
1110
+ {
1111
+ "epoch": 0.9076112972714218,
1112
+ "grad_norm": 0.5661066770553589,
1113
+ "learning_rate": 8.81342589055191e-06,
1114
+ "loss": 0.5578,
1115
+ "step": 158
1116
+ },
1117
+ {
1118
+ "epoch": 0.9133556725706079,
1119
+ "grad_norm": 0.46714460849761963,
1120
+ "learning_rate": 8.791678584295276e-06,
1121
+ "loss": 0.5223,
1122
+ "step": 159
1123
+ },
1124
+ {
1125
+ "epoch": 0.9191000478697942,
1126
+ "grad_norm": 0.5787307024002075,
1127
+ "learning_rate": 8.76976114684395e-06,
1128
+ "loss": 0.5285,
1129
+ "step": 160
1130
+ },
1131
+ {
1132
+ "epoch": 0.9248444231689804,
1133
+ "grad_norm": 0.5483257174491882,
1134
+ "learning_rate": 8.747674561625121e-06,
1135
+ "loss": 0.5488,
1136
+ "step": 161
1137
+ },
1138
+ {
1139
+ "epoch": 0.9305887984681666,
1140
+ "grad_norm": 0.4516684412956238,
1141
+ "learning_rate": 8.725419819655582e-06,
1142
+ "loss": 0.4819,
1143
+ "step": 162
1144
+ },
1145
+ {
1146
+ "epoch": 0.9363331737673528,
1147
+ "grad_norm": 0.5912066698074341,
1148
+ "learning_rate": 8.702997919497247e-06,
1149
+ "loss": 0.5398,
1150
+ "step": 163
1151
+ },
1152
+ {
1153
+ "epoch": 0.9420775490665391,
1154
+ "grad_norm": 0.544422447681427,
1155
+ "learning_rate": 8.680409867212359e-06,
1156
+ "loss": 0.5563,
1157
+ "step": 164
1158
+ },
1159
+ {
1160
+ "epoch": 0.9478219243657252,
1161
+ "grad_norm": 0.615469217300415,
1162
+ "learning_rate": 8.657656676318346e-06,
1163
+ "loss": 0.5527,
1164
+ "step": 165
1165
+ },
1166
+ {
1167
+ "epoch": 0.9535662996649115,
1168
+ "grad_norm": 0.48167160153388977,
1169
+ "learning_rate": 8.634739367742341e-06,
1170
+ "loss": 0.5031,
1171
+ "step": 166
1172
+ },
1173
+ {
1174
+ "epoch": 0.9593106749640976,
1175
+ "grad_norm": 0.659505307674408,
1176
+ "learning_rate": 8.611658969775378e-06,
1177
+ "loss": 0.5239,
1178
+ "step": 167
1179
+ },
1180
+ {
1181
+ "epoch": 0.9650550502632839,
1182
+ "grad_norm": 0.5576907396316528,
1183
+ "learning_rate": 8.588416518026248e-06,
1184
+ "loss": 0.5173,
1185
+ "step": 168
1186
+ },
1187
+ {
1188
+ "epoch": 0.9707994255624701,
1189
+ "grad_norm": 0.5543246269226074,
1190
+ "learning_rate": 8.565013055375035e-06,
1191
+ "loss": 0.5346,
1192
+ "step": 169
1193
+ },
1194
+ {
1195
+ "epoch": 0.9765438008616563,
1196
+ "grad_norm": 0.6100912690162659,
1197
+ "learning_rate": 8.541449631926325e-06,
1198
+ "loss": 0.5268,
1199
+ "step": 170
1200
+ },
1201
+ {
1202
+ "epoch": 0.9822881761608425,
1203
+ "grad_norm": 0.47118762135505676,
1204
+ "learning_rate": 8.51772730496208e-06,
1205
+ "loss": 0.5605,
1206
+ "step": 171
1207
+ },
1208
+ {
1209
+ "epoch": 0.9880325514600288,
1210
+ "grad_norm": 0.6019647121429443,
1211
+ "learning_rate": 8.49384713889421e-06,
1212
+ "loss": 0.5273,
1213
+ "step": 172
1214
+ },
1215
+ {
1216
+ "epoch": 0.9937769267592149,
1217
+ "grad_norm": 0.6136192083358765,
1218
+ "learning_rate": 8.469810205216795e-06,
1219
+ "loss": 0.5659,
1220
+ "step": 173
1221
+ },
1222
+ {
1223
+ "epoch": 0.9995213020584012,
1224
+ "grad_norm": 0.5436699986457825,
1225
+ "learning_rate": 8.445617582458033e-06,
1226
+ "loss": 0.5469,
1227
+ "step": 174
1228
+ },
1229
+ {
1230
+ "epoch": 1.0052656773575874,
1231
+ "grad_norm": 1.5260647535324097,
1232
+ "learning_rate": 8.42127035613182e-06,
1233
+ "loss": 0.9094,
1234
+ "step": 175
1235
+ },
1236
+ {
1237
+ "epoch": 1.0110100526567736,
1238
+ "grad_norm": 0.5587025880813599,
1239
+ "learning_rate": 8.396769618689064e-06,
1240
+ "loss": 0.4939,
1241
+ "step": 176
1242
+ },
1243
+ {
1244
+ "epoch": 1.0167544279559597,
1245
+ "grad_norm": 0.5418161153793335,
1246
+ "learning_rate": 8.372116469468654e-06,
1247
+ "loss": 0.5211,
1248
+ "step": 177
1249
+ },
1250
+ {
1251
+ "epoch": 1.022498803255146,
1252
+ "grad_norm": 0.6211748719215393,
1253
+ "learning_rate": 8.347312014648144e-06,
1254
+ "loss": 0.4619,
1255
+ "step": 178
1256
+ },
1257
+ {
1258
+ "epoch": 1.0282431785543322,
1259
+ "grad_norm": 0.5461438298225403,
1260
+ "learning_rate": 8.32235736719411e-06,
1261
+ "loss": 0.4424,
1262
+ "step": 179
1263
+ },
1264
+ {
1265
+ "epoch": 1.0339875538535184,
1266
+ "grad_norm": 0.7238538861274719,
1267
+ "learning_rate": 8.297253646812213e-06,
1268
+ "loss": 0.5283,
1269
+ "step": 180
1270
+ },
1271
+ {
1272
+ "epoch": 1.0397319291527047,
1273
+ "grad_norm": 0.5685709714889526,
1274
+ "learning_rate": 8.272001979896962e-06,
1275
+ "loss": 0.4777,
1276
+ "step": 181
1277
+ },
1278
+ {
1279
+ "epoch": 1.0454763044518909,
1280
+ "grad_norm": 0.6029736399650574,
1281
+ "learning_rate": 8.246603499481177e-06,
1282
+ "loss": 0.4754,
1283
+ "step": 182
1284
+ },
1285
+ {
1286
+ "epoch": 1.051220679751077,
1287
+ "grad_norm": 0.7201405167579651,
1288
+ "learning_rate": 8.221059345185136e-06,
1289
+ "loss": 0.4864,
1290
+ "step": 183
1291
+ },
1292
+ {
1293
+ "epoch": 1.0569650550502634,
1294
+ "grad_norm": 0.5729609131813049,
1295
+ "learning_rate": 8.195370663165455e-06,
1296
+ "loss": 0.4793,
1297
+ "step": 184
1298
+ },
1299
+ {
1300
+ "epoch": 1.0627094303494495,
1301
+ "grad_norm": 0.6719270944595337,
1302
+ "learning_rate": 8.169538606063647e-06,
1303
+ "loss": 0.4964,
1304
+ "step": 185
1305
+ },
1306
+ {
1307
+ "epoch": 1.0684538056486357,
1308
+ "grad_norm": 0.6572569608688354,
1309
+ "learning_rate": 8.143564332954426e-06,
1310
+ "loss": 0.5049,
1311
+ "step": 186
1312
+ },
1313
+ {
1314
+ "epoch": 1.0741981809478218,
1315
+ "grad_norm": 0.5375853180885315,
1316
+ "learning_rate": 8.117449009293668e-06,
1317
+ "loss": 0.5056,
1318
+ "step": 187
1319
+ },
1320
+ {
1321
+ "epoch": 1.0799425562470082,
1322
+ "grad_norm": 0.5356974005699158,
1323
+ "learning_rate": 8.091193806866147e-06,
1324
+ "loss": 0.5014,
1325
+ "step": 188
1326
+ },
1327
+ {
1328
+ "epoch": 1.0856869315461943,
1329
+ "grad_norm": 0.5870072245597839,
1330
+ "learning_rate": 8.064799903732936e-06,
1331
+ "loss": 0.4648,
1332
+ "step": 189
1333
+ },
1334
+ {
1335
+ "epoch": 1.0914313068453805,
1336
+ "grad_norm": 0.4977969825267792,
1337
+ "learning_rate": 8.038268484178566e-06,
1338
+ "loss": 0.4912,
1339
+ "step": 190
1340
+ },
1341
+ {
1342
+ "epoch": 1.0971756821445668,
1343
+ "grad_norm": 0.49062633514404297,
1344
+ "learning_rate": 8.011600738657865e-06,
1345
+ "loss": 0.493,
1346
+ "step": 191
1347
+ },
1348
+ {
1349
+ "epoch": 1.102920057443753,
1350
+ "grad_norm": 0.5364016890525818,
1351
+ "learning_rate": 7.98479786374257e-06,
1352
+ "loss": 0.4551,
1353
+ "step": 192
1354
+ },
1355
+ {
1356
+ "epoch": 1.1086644327429391,
1357
+ "grad_norm": 0.6891268491744995,
1358
+ "learning_rate": 7.957861062067614e-06,
1359
+ "loss": 0.4907,
1360
+ "step": 193
1361
+ },
1362
+ {
1363
+ "epoch": 1.1144088080421255,
1364
+ "grad_norm": 0.5725308060646057,
1365
+ "learning_rate": 7.930791542277175e-06,
1366
+ "loss": 0.4892,
1367
+ "step": 194
1368
+ },
1369
+ {
1370
+ "epoch": 1.1201531833413116,
1371
+ "grad_norm": 0.5667552947998047,
1372
+ "learning_rate": 7.903590518970445e-06,
1373
+ "loss": 0.5127,
1374
+ "step": 195
1375
+ },
1376
+ {
1377
+ "epoch": 1.1258975586404978,
1378
+ "grad_norm": 0.462298721075058,
1379
+ "learning_rate": 7.876259212647129e-06,
1380
+ "loss": 0.441,
1381
+ "step": 196
1382
+ },
1383
+ {
1384
+ "epoch": 1.1316419339396842,
1385
+ "grad_norm": 0.5504534244537354,
1386
+ "learning_rate": 7.848798849652684e-06,
1387
+ "loss": 0.5009,
1388
+ "step": 197
1389
+ },
1390
+ {
1391
+ "epoch": 1.1373863092388703,
1392
+ "grad_norm": 0.48440906405448914,
1393
+ "learning_rate": 7.821210662123284e-06,
1394
+ "loss": 0.4731,
1395
+ "step": 198
1396
+ },
1397
+ {
1398
+ "epoch": 1.1431306845380564,
1399
+ "grad_norm": 0.4688943922519684,
1400
+ "learning_rate": 7.793495887930551e-06,
1401
+ "loss": 0.4456,
1402
+ "step": 199
1403
+ },
1404
+ {
1405
+ "epoch": 1.1488750598372426,
1406
+ "grad_norm": 0.5395812392234802,
1407
+ "learning_rate": 7.765655770625997e-06,
1408
+ "loss": 0.466,
1409
+ "step": 200
1410
+ },
1411
+ {
1412
+ "epoch": 1.154619435136429,
1413
+ "grad_norm": 0.6602702140808105,
1414
+ "learning_rate": 7.737691559385237e-06,
1415
+ "loss": 0.4981,
1416
+ "step": 201
1417
+ },
1418
+ {
1419
+ "epoch": 1.160363810435615,
1420
+ "grad_norm": 0.48276349902153015,
1421
+ "learning_rate": 7.709604508951927e-06,
1422
+ "loss": 0.5092,
1423
+ "step": 202
1424
+ },
1425
+ {
1426
+ "epoch": 1.1661081857348012,
1427
+ "grad_norm": 0.5397800803184509,
1428
+ "learning_rate": 7.68139587958148e-06,
1429
+ "loss": 0.4675,
1430
+ "step": 203
1431
+ },
1432
+ {
1433
+ "epoch": 1.1718525610339876,
1434
+ "grad_norm": 0.5717769861221313,
1435
+ "learning_rate": 7.653066936984504e-06,
1436
+ "loss": 0.5035,
1437
+ "step": 204
1438
+ },
1439
+ {
1440
+ "epoch": 1.1775969363331737,
1441
+ "grad_norm": 0.5147746801376343,
1442
+ "learning_rate": 7.6246189522700205e-06,
1443
+ "loss": 0.5191,
1444
+ "step": 205
1445
+ },
1446
+ {
1447
+ "epoch": 1.18334131163236,
1448
+ "grad_norm": 0.4830034077167511,
1449
+ "learning_rate": 7.596053201888425e-06,
1450
+ "loss": 0.4659,
1451
+ "step": 206
1452
+ },
1453
+ {
1454
+ "epoch": 1.1890856869315463,
1455
+ "grad_norm": 0.5149533748626709,
1456
+ "learning_rate": 7.56737096757421e-06,
1457
+ "loss": 0.4666,
1458
+ "step": 207
1459
+ },
1460
+ {
1461
+ "epoch": 1.1948300622307324,
1462
+ "grad_norm": 0.47677546739578247,
1463
+ "learning_rate": 7.538573536288466e-06,
1464
+ "loss": 0.4842,
1465
+ "step": 208
1466
+ },
1467
+ {
1468
+ "epoch": 1.2005744375299185,
1469
+ "grad_norm": 0.42174142599105835,
1470
+ "learning_rate": 7.509662200161122e-06,
1471
+ "loss": 0.4519,
1472
+ "step": 209
1473
+ },
1474
+ {
1475
+ "epoch": 1.206318812829105,
1476
+ "grad_norm": 0.5489972829818726,
1477
+ "learning_rate": 7.480638256432977e-06,
1478
+ "loss": 0.5119,
1479
+ "step": 210
1480
+ },
1481
+ {
1482
+ "epoch": 1.212063188128291,
1483
+ "grad_norm": 0.4667467474937439,
1484
+ "learning_rate": 7.4515030073974915e-06,
1485
+ "loss": 0.4918,
1486
+ "step": 211
1487
+ },
1488
+ {
1489
+ "epoch": 1.2178075634274772,
1490
+ "grad_norm": 0.5094060897827148,
1491
+ "learning_rate": 7.422257760342351e-06,
1492
+ "loss": 0.4915,
1493
+ "step": 212
1494
+ },
1495
+ {
1496
+ "epoch": 1.2235519387266636,
1497
+ "grad_norm": 0.5071128606796265,
1498
+ "learning_rate": 7.392903827490814e-06,
1499
+ "loss": 0.4598,
1500
+ "step": 213
1501
+ },
1502
+ {
1503
+ "epoch": 1.2292963140258497,
1504
+ "grad_norm": 0.49673011898994446,
1505
+ "learning_rate": 7.363442525942827e-06,
1506
+ "loss": 0.4467,
1507
+ "step": 214
1508
+ },
1509
+ {
1510
+ "epoch": 1.2350406893250359,
1511
+ "grad_norm": 0.46016961336135864,
1512
+ "learning_rate": 7.333875177615931e-06,
1513
+ "loss": 0.4927,
1514
+ "step": 215
1515
+ },
1516
+ {
1517
+ "epoch": 1.2407850646242222,
1518
+ "grad_norm": 0.5728297829627991,
1519
+ "learning_rate": 7.304203109185947e-06,
1520
+ "loss": 0.5002,
1521
+ "step": 216
1522
+ },
1523
+ {
1524
+ "epoch": 1.2465294399234084,
1525
+ "grad_norm": 0.43362411856651306,
1526
+ "learning_rate": 7.274427652027444e-06,
1527
+ "loss": 0.4451,
1528
+ "step": 217
1529
+ },
1530
+ {
1531
+ "epoch": 1.2522738152225945,
1532
+ "grad_norm": 0.496272474527359,
1533
+ "learning_rate": 7.244550142154009e-06,
1534
+ "loss": 0.4794,
1535
+ "step": 218
1536
+ },
1537
+ {
1538
+ "epoch": 1.2580181905217809,
1539
+ "grad_norm": 0.5590081810951233,
1540
+ "learning_rate": 7.214571920158293e-06,
1541
+ "loss": 0.4845,
1542
+ "step": 219
1543
+ },
1544
+ {
1545
+ "epoch": 1.263762565820967,
1546
+ "grad_norm": 0.5159772038459778,
1547
+ "learning_rate": 7.1844943311518665e-06,
1548
+ "loss": 0.5251,
1549
+ "step": 220
1550
+ },
1551
+ {
1552
+ "epoch": 1.2695069411201532,
1553
+ "grad_norm": 0.4696570932865143,
1554
+ "learning_rate": 7.1543187247048525e-06,
1555
+ "loss": 0.5033,
1556
+ "step": 221
1557
+ },
1558
+ {
1559
+ "epoch": 1.2752513164193395,
1560
+ "grad_norm": 0.5043511390686035,
1561
+ "learning_rate": 7.124046454785387e-06,
1562
+ "loss": 0.4739,
1563
+ "step": 222
1564
+ },
1565
+ {
1566
+ "epoch": 1.2809956917185257,
1567
+ "grad_norm": 0.5755232572555542,
1568
+ "learning_rate": 7.093678879698858e-06,
1569
+ "loss": 0.5209,
1570
+ "step": 223
1571
+ },
1572
+ {
1573
+ "epoch": 1.2867400670177118,
1574
+ "grad_norm": 0.46288853883743286,
1575
+ "learning_rate": 7.063217362026957e-06,
1576
+ "loss": 0.4692,
1577
+ "step": 224
1578
+ },
1579
+ {
1580
+ "epoch": 1.292484442316898,
1581
+ "grad_norm": 0.4486297369003296,
1582
+ "learning_rate": 7.032663268566547e-06,
1583
+ "loss": 0.4581,
1584
+ "step": 225
1585
+ },
1586
+ {
1587
+ "epoch": 1.2982288176160843,
1588
+ "grad_norm": 0.5111509561538696,
1589
+ "learning_rate": 7.002017970268336e-06,
1590
+ "loss": 0.4648,
1591
+ "step": 226
1592
+ },
1593
+ {
1594
+ "epoch": 1.3039731929152705,
1595
+ "grad_norm": 0.5335001349449158,
1596
+ "learning_rate": 6.97128284217535e-06,
1597
+ "loss": 0.508,
1598
+ "step": 227
1599
+ },
1600
+ {
1601
+ "epoch": 1.3097175682144566,
1602
+ "grad_norm": 0.5523168444633484,
1603
+ "learning_rate": 6.9404592633612486e-06,
1604
+ "loss": 0.546,
1605
+ "step": 228
1606
+ },
1607
+ {
1608
+ "epoch": 1.3154619435136428,
1609
+ "grad_norm": 0.4908071458339691,
1610
+ "learning_rate": 6.909548616868444e-06,
1611
+ "loss": 0.4959,
1612
+ "step": 229
1613
+ },
1614
+ {
1615
+ "epoch": 1.3212063188128291,
1616
+ "grad_norm": 0.4557272791862488,
1617
+ "learning_rate": 6.878552289646041e-06,
1618
+ "loss": 0.4815,
1619
+ "step": 230
1620
+ },
1621
+ {
1622
+ "epoch": 1.3269506941120153,
1623
+ "grad_norm": 0.4479963779449463,
1624
+ "learning_rate": 6.847471672487607e-06,
1625
+ "loss": 0.4357,
1626
+ "step": 231
1627
+ },
1628
+ {
1629
+ "epoch": 1.3326950694112014,
1630
+ "grad_norm": 0.4967019855976105,
1631
+ "learning_rate": 6.816308159968761e-06,
1632
+ "loss": 0.539,
1633
+ "step": 232
1634
+ },
1635
+ {
1636
+ "epoch": 1.3384394447103878,
1637
+ "grad_norm": 0.4799559414386749,
1638
+ "learning_rate": 6.7850631503846165e-06,
1639
+ "loss": 0.4802,
1640
+ "step": 233
1641
+ },
1642
+ {
1643
+ "epoch": 1.344183820009574,
1644
+ "grad_norm": 0.485273540019989,
1645
+ "learning_rate": 6.753738045687021e-06,
1646
+ "loss": 0.4945,
1647
+ "step": 234
1648
+ },
1649
+ {
1650
+ "epoch": 1.34992819530876,
1651
+ "grad_norm": 0.4190736711025238,
1652
+ "learning_rate": 6.722334251421665e-06,
1653
+ "loss": 0.4703,
1654
+ "step": 235
1655
+ },
1656
+ {
1657
+ "epoch": 1.3556725706079464,
1658
+ "grad_norm": 0.4951339364051819,
1659
+ "learning_rate": 6.690853176665007e-06,
1660
+ "loss": 0.5339,
1661
+ "step": 236
1662
+ },
1663
+ {
1664
+ "epoch": 1.3614169459071326,
1665
+ "grad_norm": 0.4466400742530823,
1666
+ "learning_rate": 6.659296233961055e-06,
1667
+ "loss": 0.4521,
1668
+ "step": 237
1669
+ },
1670
+ {
1671
+ "epoch": 1.3671613212063187,
1672
+ "grad_norm": 0.4460606575012207,
1673
+ "learning_rate": 6.627664839257979e-06,
1674
+ "loss": 0.4827,
1675
+ "step": 238
1676
+ },
1677
+ {
1678
+ "epoch": 1.372905696505505,
1679
+ "grad_norm": 0.49249327182769775,
1680
+ "learning_rate": 6.595960411844589e-06,
1681
+ "loss": 0.5051,
1682
+ "step": 239
1683
+ },
1684
+ {
1685
+ "epoch": 1.3786500718046912,
1686
+ "grad_norm": 0.491413414478302,
1687
+ "learning_rate": 6.564184374286636e-06,
1688
+ "loss": 0.5031,
1689
+ "step": 240
1690
+ },
1691
+ {
1692
+ "epoch": 1.3843944471038774,
1693
+ "grad_norm": 0.42033085227012634,
1694
+ "learning_rate": 6.532338152363001e-06,
1695
+ "loss": 0.4646,
1696
+ "step": 241
1697
+ },
1698
+ {
1699
+ "epoch": 1.3901388224030637,
1700
+ "grad_norm": 0.4750801920890808,
1701
+ "learning_rate": 6.500423175001705e-06,
1702
+ "loss": 0.4337,
1703
+ "step": 242
1704
+ },
1705
+ {
1706
+ "epoch": 1.39588319770225,
1707
+ "grad_norm": 0.5122845768928528,
1708
+ "learning_rate": 6.468440874215801e-06,
1709
+ "loss": 0.5052,
1710
+ "step": 243
1711
+ },
1712
+ {
1713
+ "epoch": 1.401627573001436,
1714
+ "grad_norm": 0.4859732985496521,
1715
+ "learning_rate": 6.43639268503912e-06,
1716
+ "loss": 0.499,
1717
+ "step": 244
1718
+ },
1719
+ {
1720
+ "epoch": 1.4073719483006224,
1721
+ "grad_norm": 0.512253999710083,
1722
+ "learning_rate": 6.40428004546188e-06,
1723
+ "loss": 0.5064,
1724
+ "step": 245
1725
+ },
1726
+ {
1727
+ "epoch": 1.4131163235998085,
1728
+ "grad_norm": 0.46881169080734253,
1729
+ "learning_rate": 6.372104396366162e-06,
1730
+ "loss": 0.4765,
1731
+ "step": 246
1732
+ },
1733
+ {
1734
+ "epoch": 1.4188606988989947,
1735
+ "grad_norm": 0.4910268187522888,
1736
+ "learning_rate": 6.339867181461265e-06,
1737
+ "loss": 0.5059,
1738
+ "step": 247
1739
+ },
1740
+ {
1741
+ "epoch": 1.424605074198181,
1742
+ "grad_norm": 0.43128618597984314,
1743
+ "learning_rate": 6.307569847218917e-06,
1744
+ "loss": 0.4667,
1745
+ "step": 248
1746
+ },
1747
+ {
1748
+ "epoch": 1.4303494494973672,
1749
+ "grad_norm": 0.4424154460430145,
1750
+ "learning_rate": 6.275213842808383e-06,
1751
+ "loss": 0.4834,
1752
+ "step": 249
1753
+ },
1754
+ {
1755
+ "epoch": 1.4360938247965533,
1756
+ "grad_norm": 0.49621695280075073,
1757
+ "learning_rate": 6.242800620031434e-06,
1758
+ "loss": 0.4701,
1759
+ "step": 250
1760
+ },
1761
+ {
1762
+ "epoch": 1.4418382000957397,
1763
+ "grad_norm": 0.4383918344974518,
1764
+ "learning_rate": 6.2103316332572095e-06,
1765
+ "loss": 0.5017,
1766
+ "step": 251
1767
+ },
1768
+ {
1769
+ "epoch": 1.4475825753949259,
1770
+ "grad_norm": 0.4955291748046875,
1771
+ "learning_rate": 6.177808339356954e-06,
1772
+ "loss": 0.5253,
1773
+ "step": 252
1774
+ },
1775
+ {
1776
+ "epoch": 1.453326950694112,
1777
+ "grad_norm": 0.43212026357650757,
1778
+ "learning_rate": 6.14523219763866e-06,
1779
+ "loss": 0.455,
1780
+ "step": 253
1781
+ },
1782
+ {
1783
+ "epoch": 1.4590713259932984,
1784
+ "grad_norm": 0.44476839900016785,
1785
+ "learning_rate": 6.112604669781572e-06,
1786
+ "loss": 0.4844,
1787
+ "step": 254
1788
+ },
1789
+ {
1790
+ "epoch": 1.4648157012924845,
1791
+ "grad_norm": 0.46556708216667175,
1792
+ "learning_rate": 6.079927219770623e-06,
1793
+ "loss": 0.4957,
1794
+ "step": 255
1795
+ },
1796
+ {
1797
+ "epoch": 1.4705600765916707,
1798
+ "grad_norm": 0.4209665060043335,
1799
+ "learning_rate": 6.047201313830724e-06,
1800
+ "loss": 0.4859,
1801
+ "step": 256
1802
+ },
1803
+ {
1804
+ "epoch": 1.4763044518908568,
1805
+ "grad_norm": 0.4632583558559418,
1806
+ "learning_rate": 6.014428420360987e-06,
1807
+ "loss": 0.5136,
1808
+ "step": 257
1809
+ },
1810
+ {
1811
+ "epoch": 1.4820488271900432,
1812
+ "grad_norm": 0.4351138472557068,
1813
+ "learning_rate": 5.9816100098688456e-06,
1814
+ "loss": 0.4618,
1815
+ "step": 258
1816
+ },
1817
+ {
1818
+ "epoch": 1.4877932024892293,
1819
+ "grad_norm": 0.4440339207649231,
1820
+ "learning_rate": 5.948747554904054e-06,
1821
+ "loss": 0.5211,
1822
+ "step": 259
1823
+ },
1824
+ {
1825
+ "epoch": 1.4935375777884154,
1826
+ "grad_norm": 0.49033647775650024,
1827
+ "learning_rate": 5.915842529992632e-06,
1828
+ "loss": 0.4836,
1829
+ "step": 260
1830
+ },
1831
+ {
1832
+ "epoch": 1.4992819530876016,
1833
+ "grad_norm": 0.45823630690574646,
1834
+ "learning_rate": 5.8828964115706925e-06,
1835
+ "loss": 0.4288,
1836
+ "step": 261
1837
+ },
1838
+ {
1839
+ "epoch": 1.505026328386788,
1840
+ "grad_norm": 0.4555763304233551,
1841
+ "learning_rate": 5.849910677918205e-06,
1842
+ "loss": 0.4548,
1843
+ "step": 262
1844
+ },
1845
+ {
1846
+ "epoch": 1.510770703685974,
1847
+ "grad_norm": 0.46555453538894653,
1848
+ "learning_rate": 5.816886809092651e-06,
1849
+ "loss": 0.4688,
1850
+ "step": 263
1851
+ },
1852
+ {
1853
+ "epoch": 1.5165150789851602,
1854
+ "grad_norm": 0.5020288825035095,
1855
+ "learning_rate": 5.783826286862631e-06,
1856
+ "loss": 0.5108,
1857
+ "step": 264
1858
+ },
1859
+ {
1860
+ "epoch": 1.5222594542843466,
1861
+ "grad_norm": 0.410932719707489,
1862
+ "learning_rate": 5.750730594641367e-06,
1863
+ "loss": 0.4571,
1864
+ "step": 265
1865
+ },
1866
+ {
1867
+ "epoch": 1.5280038295835328,
1868
+ "grad_norm": 0.46809419989585876,
1869
+ "learning_rate": 5.717601217420143e-06,
1870
+ "loss": 0.4691,
1871
+ "step": 266
1872
+ },
1873
+ {
1874
+ "epoch": 1.533748204882719,
1875
+ "grad_norm": 0.44337236881256104,
1876
+ "learning_rate": 5.68443964170168e-06,
1877
+ "loss": 0.477,
1878
+ "step": 267
1879
+ },
1880
+ {
1881
+ "epoch": 1.5394925801819053,
1882
+ "grad_norm": 0.4952318072319031,
1883
+ "learning_rate": 5.6512473554334294e-06,
1884
+ "loss": 0.49,
1885
+ "step": 268
1886
+ },
1887
+ {
1888
+ "epoch": 1.5452369554810914,
1889
+ "grad_norm": 0.46753644943237305,
1890
+ "learning_rate": 5.618025847940817e-06,
1891
+ "loss": 0.4848,
1892
+ "step": 269
1893
+ },
1894
+ {
1895
+ "epoch": 1.5509813307802776,
1896
+ "grad_norm": 0.45212316513061523,
1897
+ "learning_rate": 5.584776609860414e-06,
1898
+ "loss": 0.4696,
1899
+ "step": 270
1900
+ },
1901
+ {
1902
+ "epoch": 1.556725706079464,
1903
+ "grad_norm": 0.5053378939628601,
1904
+ "learning_rate": 5.551501133073048e-06,
1905
+ "loss": 0.5447,
1906
+ "step": 271
1907
+ },
1908
+ {
1909
+ "epoch": 1.56247008137865,
1910
+ "grad_norm": 0.4040236473083496,
1911
+ "learning_rate": 5.518200910636875e-06,
1912
+ "loss": 0.4678,
1913
+ "step": 272
1914
+ },
1915
+ {
1916
+ "epoch": 1.5682144566778362,
1917
+ "grad_norm": 0.4419972002506256,
1918
+ "learning_rate": 5.4848774367203715e-06,
1919
+ "loss": 0.4994,
1920
+ "step": 273
1921
+ },
1922
+ {
1923
+ "epoch": 1.5739588319770226,
1924
+ "grad_norm": 0.43512895703315735,
1925
+ "learning_rate": 5.451532206535306e-06,
1926
+ "loss": 0.5056,
1927
+ "step": 274
1928
+ },
1929
+ {
1930
+ "epoch": 1.5797032072762087,
1931
+ "grad_norm": 0.492079496383667,
1932
+ "learning_rate": 5.418166716269636e-06,
1933
+ "loss": 0.4957,
1934
+ "step": 275
1935
+ },
1936
+ {
1937
+ "epoch": 1.5854475825753949,
1938
+ "grad_norm": 0.4276512861251831,
1939
+ "learning_rate": 5.384782463020385e-06,
1940
+ "loss": 0.4901,
1941
+ "step": 276
1942
+ },
1943
+ {
1944
+ "epoch": 1.5911919578745812,
1945
+ "grad_norm": 0.39996013045310974,
1946
+ "learning_rate": 5.351380944726465e-06,
1947
+ "loss": 0.4862,
1948
+ "step": 277
1949
+ },
1950
+ {
1951
+ "epoch": 1.5969363331737674,
1952
+ "grad_norm": 0.3694234788417816,
1953
+ "learning_rate": 5.317963660101464e-06,
1954
+ "loss": 0.4218,
1955
+ "step": 278
1956
+ },
1957
+ {
1958
+ "epoch": 1.6026807084729535,
1959
+ "grad_norm": 0.4611278772354126,
1960
+ "learning_rate": 5.284532108566396e-06,
1961
+ "loss": 0.4908,
1962
+ "step": 279
1963
+ },
1964
+ {
1965
+ "epoch": 1.6084250837721399,
1966
+ "grad_norm": 0.435573548078537,
1967
+ "learning_rate": 5.251087790182428e-06,
1968
+ "loss": 0.4905,
1969
+ "step": 280
1970
+ },
1971
+ {
1972
+ "epoch": 1.614169459071326,
1973
+ "grad_norm": 0.4718412458896637,
1974
+ "learning_rate": 5.217632205583574e-06,
1975
+ "loss": 0.5055,
1976
+ "step": 281
1977
+ },
1978
+ {
1979
+ "epoch": 1.6199138343705122,
1980
+ "grad_norm": 0.41079840064048767,
1981
+ "learning_rate": 5.184166855909355e-06,
1982
+ "loss": 0.4872,
1983
+ "step": 282
1984
+ },
1985
+ {
1986
+ "epoch": 1.6256582096696985,
1987
+ "grad_norm": 0.42057737708091736,
1988
+ "learning_rate": 5.150693242737444e-06,
1989
+ "loss": 0.5028,
1990
+ "step": 283
1991
+ },
1992
+ {
1993
+ "epoch": 1.6314025849688847,
1994
+ "grad_norm": 0.43592604994773865,
1995
+ "learning_rate": 5.117212868016303e-06,
1996
+ "loss": 0.5228,
1997
+ "step": 284
1998
+ },
1999
+ {
2000
+ "epoch": 1.6371469602680708,
2001
+ "grad_norm": 0.4700012803077698,
2002
+ "learning_rate": 5.083727233997775e-06,
2003
+ "loss": 0.4603,
2004
+ "step": 285
2005
+ },
2006
+ {
2007
+ "epoch": 1.6428913355672572,
2008
+ "grad_norm": 0.4413910210132599,
2009
+ "learning_rate": 5.05023784316969e-06,
2010
+ "loss": 0.4787,
2011
+ "step": 286
2012
+ },
2013
+ {
2014
+ "epoch": 1.6486357108664431,
2015
+ "grad_norm": 0.4498383700847626,
2016
+ "learning_rate": 5.016746198188439e-06,
2017
+ "loss": 0.5171,
2018
+ "step": 287
2019
+ },
2020
+ {
2021
+ "epoch": 1.6543800861656295,
2022
+ "grad_norm": 0.3725832998752594,
2023
+ "learning_rate": 4.983253801811562e-06,
2024
+ "loss": 0.447,
2025
+ "step": 288
2026
+ },
2027
+ {
2028
+ "epoch": 1.6601244614648158,
2029
+ "grad_norm": 0.41565757989883423,
2030
+ "learning_rate": 4.949762156830312e-06,
2031
+ "loss": 0.5029,
2032
+ "step": 289
2033
+ },
2034
+ {
2035
+ "epoch": 1.6658688367640018,
2036
+ "grad_norm": 0.4530212879180908,
2037
+ "learning_rate": 4.916272766002227e-06,
2038
+ "loss": 0.487,
2039
+ "step": 290
2040
+ },
2041
+ {
2042
+ "epoch": 1.6716132120631881,
2043
+ "grad_norm": 0.44627729058265686,
2044
+ "learning_rate": 4.882787131983698e-06,
2045
+ "loss": 0.4529,
2046
+ "step": 291
2047
+ },
2048
+ {
2049
+ "epoch": 1.6773575873623745,
2050
+ "grad_norm": 0.4549354612827301,
2051
+ "learning_rate": 4.849306757262558e-06,
2052
+ "loss": 0.4706,
2053
+ "step": 292
2054
+ },
2055
+ {
2056
+ "epoch": 1.6831019626615604,
2057
+ "grad_norm": 0.48366779088974,
2058
+ "learning_rate": 4.8158331440906466e-06,
2059
+ "loss": 0.5149,
2060
+ "step": 293
2061
+ },
2062
+ {
2063
+ "epoch": 1.6888463379607468,
2064
+ "grad_norm": 0.46364232897758484,
2065
+ "learning_rate": 4.7823677944164285e-06,
2066
+ "loss": 0.5396,
2067
+ "step": 294
2068
+ },
2069
+ {
2070
+ "epoch": 1.694590713259933,
2071
+ "grad_norm": 0.43879616260528564,
2072
+ "learning_rate": 4.748912209817572e-06,
2073
+ "loss": 0.4773,
2074
+ "step": 295
2075
+ },
2076
+ {
2077
+ "epoch": 1.700335088559119,
2078
+ "grad_norm": 0.3839658200740814,
2079
+ "learning_rate": 4.715467891433607e-06,
2080
+ "loss": 0.4477,
2081
+ "step": 296
2082
+ },
2083
+ {
2084
+ "epoch": 1.7060794638583054,
2085
+ "grad_norm": 0.4020622968673706,
2086
+ "learning_rate": 4.682036339898537e-06,
2087
+ "loss": 0.4777,
2088
+ "step": 297
2089
+ },
2090
+ {
2091
+ "epoch": 1.7118238391574916,
2092
+ "grad_norm": 0.42523112893104553,
2093
+ "learning_rate": 4.6486190552735375e-06,
2094
+ "loss": 0.5096,
2095
+ "step": 298
2096
+ },
2097
+ {
2098
+ "epoch": 1.7175682144566777,
2099
+ "grad_norm": 0.43856796622276306,
2100
+ "learning_rate": 4.615217536979616e-06,
2101
+ "loss": 0.4802,
2102
+ "step": 299
2103
+ },
2104
+ {
2105
+ "epoch": 1.723312589755864,
2106
+ "grad_norm": 0.3956157863140106,
2107
+ "learning_rate": 4.581833283730367e-06,
2108
+ "loss": 0.4479,
2109
+ "step": 300
2110
+ },
2111
+ {
2112
+ "epoch": 1.7290569650550502,
2113
+ "grad_norm": 0.4652330279350281,
2114
+ "learning_rate": 4.548467793464696e-06,
2115
+ "loss": 0.5231,
2116
+ "step": 301
2117
+ },
2118
+ {
2119
+ "epoch": 1.7348013403542364,
2120
+ "grad_norm": 0.3997798562049866,
2121
+ "learning_rate": 4.515122563279631e-06,
2122
+ "loss": 0.4558,
2123
+ "step": 302
2124
+ },
2125
+ {
2126
+ "epoch": 1.7405457156534228,
2127
+ "grad_norm": 0.4045880138874054,
2128
+ "learning_rate": 4.481799089363127e-06,
2129
+ "loss": 0.4589,
2130
+ "step": 303
2131
+ },
2132
+ {
2133
+ "epoch": 1.746290090952609,
2134
+ "grad_norm": 0.4493541717529297,
2135
+ "learning_rate": 4.448498866926952e-06,
2136
+ "loss": 0.4963,
2137
+ "step": 304
2138
+ },
2139
+ {
2140
+ "epoch": 1.752034466251795,
2141
+ "grad_norm": 0.3999355435371399,
2142
+ "learning_rate": 4.415223390139588e-06,
2143
+ "loss": 0.5129,
2144
+ "step": 305
2145
+ },
2146
+ {
2147
+ "epoch": 1.7577788415509814,
2148
+ "grad_norm": 0.40998125076293945,
2149
+ "learning_rate": 4.381974152059184e-06,
2150
+ "loss": 0.49,
2151
+ "step": 306
2152
+ },
2153
+ {
2154
+ "epoch": 1.7635232168501676,
2155
+ "grad_norm": 0.3941861391067505,
2156
+ "learning_rate": 4.348752644566573e-06,
2157
+ "loss": 0.4556,
2158
+ "step": 307
2159
+ },
2160
+ {
2161
+ "epoch": 1.7692675921493537,
2162
+ "grad_norm": 0.4115658402442932,
2163
+ "learning_rate": 4.315560358298321e-06,
2164
+ "loss": 0.4988,
2165
+ "step": 308
2166
+ },
2167
+ {
2168
+ "epoch": 1.77501196744854,
2169
+ "grad_norm": 0.4041730463504791,
2170
+ "learning_rate": 4.2823987825798575e-06,
2171
+ "loss": 0.4673,
2172
+ "step": 309
2173
+ },
2174
+ {
2175
+ "epoch": 1.7807563427477262,
2176
+ "grad_norm": 0.3939685821533203,
2177
+ "learning_rate": 4.249269405358634e-06,
2178
+ "loss": 0.4542,
2179
+ "step": 310
2180
+ },
2181
+ {
2182
+ "epoch": 1.7865007180469124,
2183
+ "grad_norm": 0.4312926232814789,
2184
+ "learning_rate": 4.2161737131373695e-06,
2185
+ "loss": 0.5143,
2186
+ "step": 311
2187
+ },
2188
+ {
2189
+ "epoch": 1.7922450933460987,
2190
+ "grad_norm": 0.4078906178474426,
2191
+ "learning_rate": 4.183113190907349e-06,
2192
+ "loss": 0.475,
2193
+ "step": 312
2194
+ },
2195
+ {
2196
+ "epoch": 1.7979894686452849,
2197
+ "grad_norm": 0.42950204014778137,
2198
+ "learning_rate": 4.150089322081797e-06,
2199
+ "loss": 0.4607,
2200
+ "step": 313
2201
+ },
2202
+ {
2203
+ "epoch": 1.803733843944471,
2204
+ "grad_norm": 0.37166374921798706,
2205
+ "learning_rate": 4.1171035884293075e-06,
2206
+ "loss": 0.4488,
2207
+ "step": 314
2208
+ },
2209
+ {
2210
+ "epoch": 1.8094782192436574,
2211
+ "grad_norm": 0.38504746556282043,
2212
+ "learning_rate": 4.084157470007371e-06,
2213
+ "loss": 0.4823,
2214
+ "step": 315
2215
+ },
2216
+ {
2217
+ "epoch": 1.8152225945428435,
2218
+ "grad_norm": 0.45277538895606995,
2219
+ "learning_rate": 4.051252445095946e-06,
2220
+ "loss": 0.5264,
2221
+ "step": 316
2222
+ },
2223
+ {
2224
+ "epoch": 1.8209669698420297,
2225
+ "grad_norm": 0.3826143443584442,
2226
+ "learning_rate": 4.018389990131156e-06,
2227
+ "loss": 0.4834,
2228
+ "step": 317
2229
+ },
2230
+ {
2231
+ "epoch": 1.826711345141216,
2232
+ "grad_norm": 0.36567309498786926,
2233
+ "learning_rate": 3.985571579639013e-06,
2234
+ "loss": 0.4675,
2235
+ "step": 318
2236
+ },
2237
+ {
2238
+ "epoch": 1.832455720440402,
2239
+ "grad_norm": 0.38398852944374084,
2240
+ "learning_rate": 3.952798686169279e-06,
2241
+ "loss": 0.468,
2242
+ "step": 319
2243
+ },
2244
+ {
2245
+ "epoch": 1.8382000957395883,
2246
+ "grad_norm": 0.46181172132492065,
2247
+ "learning_rate": 3.920072780229378e-06,
2248
+ "loss": 0.4756,
2249
+ "step": 320
2250
+ },
2251
+ {
2252
+ "epoch": 1.8439444710387747,
2253
+ "grad_norm": 0.4305135905742645,
2254
+ "learning_rate": 3.887395330218429e-06,
2255
+ "loss": 0.5031,
2256
+ "step": 321
2257
+ },
2258
+ {
2259
+ "epoch": 1.8496888463379606,
2260
+ "grad_norm": 0.36271166801452637,
2261
+ "learning_rate": 3.854767802361342e-06,
2262
+ "loss": 0.4411,
2263
+ "step": 322
2264
+ },
2265
+ {
2266
+ "epoch": 1.855433221637147,
2267
+ "grad_norm": 0.441859632730484,
2268
+ "learning_rate": 3.822191660643047e-06,
2269
+ "loss": 0.5007,
2270
+ "step": 323
2271
+ },
2272
+ {
2273
+ "epoch": 1.8611775969363333,
2274
+ "grad_norm": 0.382299542427063,
2275
+ "learning_rate": 3.789668366742792e-06,
2276
+ "loss": 0.5045,
2277
+ "step": 324
2278
+ },
2279
+ {
2280
+ "epoch": 1.8669219722355193,
2281
+ "grad_norm": 0.408326119184494,
2282
+ "learning_rate": 3.7571993799685675e-06,
2283
+ "loss": 0.4843,
2284
+ "step": 325
2285
+ },
2286
+ {
2287
+ "epoch": 1.8726663475347056,
2288
+ "grad_norm": 0.42178499698638916,
2289
+ "learning_rate": 3.7247861571916183e-06,
2290
+ "loss": 0.4641,
2291
+ "step": 326
2292
+ },
2293
+ {
2294
+ "epoch": 1.8784107228338918,
2295
+ "grad_norm": 0.4093269407749176,
2296
+ "learning_rate": 3.6924301527810856e-06,
2297
+ "loss": 0.4825,
2298
+ "step": 327
2299
+ },
2300
+ {
2301
+ "epoch": 1.884155098133078,
2302
+ "grad_norm": 0.41595694422721863,
2303
+ "learning_rate": 3.6601328185387364e-06,
2304
+ "loss": 0.4981,
2305
+ "step": 328
2306
+ },
2307
+ {
2308
+ "epoch": 1.8898994734322643,
2309
+ "grad_norm": 0.38833877444267273,
2310
+ "learning_rate": 3.6278956036338397e-06,
2311
+ "loss": 0.46,
2312
+ "step": 329
2313
+ },
2314
+ {
2315
+ "epoch": 1.8956438487314504,
2316
+ "grad_norm": 0.4010329842567444,
2317
+ "learning_rate": 3.5957199545381216e-06,
2318
+ "loss": 0.4538,
2319
+ "step": 330
2320
+ },
2321
+ {
2322
+ "epoch": 1.9013882240306366,
2323
+ "grad_norm": 0.42216378450393677,
2324
+ "learning_rate": 3.5636073149608824e-06,
2325
+ "loss": 0.477,
2326
+ "step": 331
2327
+ },
2328
+ {
2329
+ "epoch": 1.907132599329823,
2330
+ "grad_norm": 0.39908266067504883,
2331
+ "learning_rate": 3.5315591257842e-06,
2332
+ "loss": 0.4271,
2333
+ "step": 332
2334
+ },
2335
+ {
2336
+ "epoch": 1.912876974629009,
2337
+ "grad_norm": 0.41619524359703064,
2338
+ "learning_rate": 3.4995768249982975e-06,
2339
+ "loss": 0.4588,
2340
+ "step": 333
2341
+ },
2342
+ {
2343
+ "epoch": 1.9186213499281952,
2344
+ "grad_norm": 0.3965836763381958,
2345
+ "learning_rate": 3.467661847637001e-06,
2346
+ "loss": 0.501,
2347
+ "step": 334
2348
+ },
2349
+ {
2350
+ "epoch": 1.9243657252273816,
2351
+ "grad_norm": 0.3913739025592804,
2352
+ "learning_rate": 3.4358156257133644e-06,
2353
+ "loss": 0.5025,
2354
+ "step": 335
2355
+ },
2356
+ {
2357
+ "epoch": 1.9301101005265677,
2358
+ "grad_norm": 0.42657947540283203,
2359
+ "learning_rate": 3.404039588155413e-06,
2360
+ "loss": 0.4434,
2361
+ "step": 336
2362
+ },
2363
+ {
2364
+ "epoch": 1.9358544758257539,
2365
+ "grad_norm": 0.381879597902298,
2366
+ "learning_rate": 3.372335160742022e-06,
2367
+ "loss": 0.4315,
2368
+ "step": 337
2369
+ },
2370
+ {
2371
+ "epoch": 1.9415988511249402,
2372
+ "grad_norm": 0.3740031123161316,
2373
+ "learning_rate": 3.3407037660389474e-06,
2374
+ "loss": 0.4561,
2375
+ "step": 338
2376
+ },
2377
+ {
2378
+ "epoch": 1.9473432264241264,
2379
+ "grad_norm": 0.4027283489704132,
2380
+ "learning_rate": 3.3091468233349934e-06,
2381
+ "loss": 0.521,
2382
+ "step": 339
2383
+ },
2384
+ {
2385
+ "epoch": 1.9530876017233125,
2386
+ "grad_norm": 0.4218166172504425,
2387
+ "learning_rate": 3.2776657485783357e-06,
2388
+ "loss": 0.4838,
2389
+ "step": 340
2390
+ },
2391
+ {
2392
+ "epoch": 1.958831977022499,
2393
+ "grad_norm": 0.42057153582572937,
2394
+ "learning_rate": 3.246261954312979e-06,
2395
+ "loss": 0.4954,
2396
+ "step": 341
2397
+ },
2398
+ {
2399
+ "epoch": 1.964576352321685,
2400
+ "grad_norm": 0.4057151675224304,
2401
+ "learning_rate": 3.2149368496153856e-06,
2402
+ "loss": 0.5009,
2403
+ "step": 342
2404
+ },
2405
+ {
2406
+ "epoch": 1.9703207276208712,
2407
+ "grad_norm": 0.43289145827293396,
2408
+ "learning_rate": 3.1836918400312387e-06,
2409
+ "loss": 0.4539,
2410
+ "step": 343
2411
+ },
2412
+ {
2413
+ "epoch": 1.9760651029200575,
2414
+ "grad_norm": 0.41700872778892517,
2415
+ "learning_rate": 3.152528327512395e-06,
2416
+ "loss": 0.4734,
2417
+ "step": 344
2418
+ },
2419
+ {
2420
+ "epoch": 1.9818094782192437,
2421
+ "grad_norm": 0.4243963062763214,
2422
+ "learning_rate": 3.1214477103539585e-06,
2423
+ "loss": 0.4877,
2424
+ "step": 345
2425
+ },
2426
+ {
2427
+ "epoch": 1.9875538535184298,
2428
+ "grad_norm": 0.4166891276836395,
2429
+ "learning_rate": 3.0904513831315563e-06,
2430
+ "loss": 0.5031,
2431
+ "step": 346
2432
+ },
2433
+ {
2434
+ "epoch": 1.9932982288176162,
2435
+ "grad_norm": 0.41003891825675964,
2436
+ "learning_rate": 3.059540736638751e-06,
2437
+ "loss": 0.5046,
2438
+ "step": 347
2439
+ },
2440
+ {
2441
+ "epoch": 1.9990426041168023,
2442
+ "grad_norm": 0.3934224843978882,
2443
+ "learning_rate": 3.028717157824652e-06,
2444
+ "loss": 0.4309,
2445
+ "step": 348
2446
+ },
2447
+ {
2448
+ "epoch": 2.0047869794159885,
2449
+ "grad_norm": 1.5628243684768677,
2450
+ "learning_rate": 2.9979820297316652e-06,
2451
+ "loss": 0.7723,
2452
+ "step": 349
2453
+ },
2454
+ {
2455
+ "epoch": 2.010531354715175,
2456
+ "grad_norm": 0.5319270491600037,
2457
+ "learning_rate": 2.9673367314334533e-06,
2458
+ "loss": 0.5022,
2459
+ "step": 350
2460
+ },
2461
+ {
2462
+ "epoch": 2.016275730014361,
2463
+ "grad_norm": 0.5007623434066772,
2464
+ "learning_rate": 2.936782637973044e-06,
2465
+ "loss": 0.4484,
2466
+ "step": 351
2467
+ },
2468
+ {
2469
+ "epoch": 2.022020105313547,
2470
+ "grad_norm": 0.4784458577632904,
2471
+ "learning_rate": 2.9063211203011443e-06,
2472
+ "loss": 0.454,
2473
+ "step": 352
2474
+ },
2475
+ {
2476
+ "epoch": 2.0277644806127335,
2477
+ "grad_norm": 0.3986125886440277,
2478
+ "learning_rate": 2.8759535452146128e-06,
2479
+ "loss": 0.4522,
2480
+ "step": 353
2481
+ },
2482
+ {
2483
+ "epoch": 2.0335088559119194,
2484
+ "grad_norm": 0.4448578953742981,
2485
+ "learning_rate": 2.8456812752951483e-06,
2486
+ "loss": 0.4543,
2487
+ "step": 354
2488
+ },
2489
+ {
2490
+ "epoch": 2.039253231211106,
2491
+ "grad_norm": 0.46886104345321655,
2492
+ "learning_rate": 2.815505668848136e-06,
2493
+ "loss": 0.4183,
2494
+ "step": 355
2495
+ },
2496
+ {
2497
+ "epoch": 2.044997606510292,
2498
+ "grad_norm": 0.438160240650177,
2499
+ "learning_rate": 2.785428079841709e-06,
2500
+ "loss": 0.4507,
2501
+ "step": 356
2502
+ },
2503
+ {
2504
+ "epoch": 2.050741981809478,
2505
+ "grad_norm": 0.412517249584198,
2506
+ "learning_rate": 2.755449857845992e-06,
2507
+ "loss": 0.4479,
2508
+ "step": 357
2509
+ },
2510
+ {
2511
+ "epoch": 2.0564863571086645,
2512
+ "grad_norm": 0.4198094308376312,
2513
+ "learning_rate": 2.725572347972558e-06,
2514
+ "loss": 0.4193,
2515
+ "step": 358
2516
+ },
2517
+ {
2518
+ "epoch": 2.062230732407851,
2519
+ "grad_norm": 0.4086361825466156,
2520
+ "learning_rate": 2.6957968908140546e-06,
2521
+ "loss": 0.4264,
2522
+ "step": 359
2523
+ },
2524
+ {
2525
+ "epoch": 2.0679751077070367,
2526
+ "grad_norm": 0.4513247013092041,
2527
+ "learning_rate": 2.666124822384071e-06,
2528
+ "loss": 0.4473,
2529
+ "step": 360
2530
+ },
2531
+ {
2532
+ "epoch": 2.073719483006223,
2533
+ "grad_norm": 0.3780079483985901,
2534
+ "learning_rate": 2.636557474057173e-06,
2535
+ "loss": 0.4005,
2536
+ "step": 361
2537
+ },
2538
+ {
2539
+ "epoch": 2.0794638583054095,
2540
+ "grad_norm": 0.3676167130470276,
2541
+ "learning_rate": 2.607096172509187e-06,
2542
+ "loss": 0.4392,
2543
+ "step": 362
2544
+ },
2545
+ {
2546
+ "epoch": 2.0852082336045954,
2547
+ "grad_norm": 0.44044387340545654,
2548
+ "learning_rate": 2.5777422396576503e-06,
2549
+ "loss": 0.4911,
2550
+ "step": 363
2551
+ },
2552
+ {
2553
+ "epoch": 2.0909526089037818,
2554
+ "grad_norm": 0.3587888479232788,
2555
+ "learning_rate": 2.5484969926025114e-06,
2556
+ "loss": 0.4374,
2557
+ "step": 364
2558
+ },
2559
+ {
2560
+ "epoch": 2.096696984202968,
2561
+ "grad_norm": 0.41746485233306885,
2562
+ "learning_rate": 2.5193617435670244e-06,
2563
+ "loss": 0.4883,
2564
+ "step": 365
2565
+ },
2566
+ {
2567
+ "epoch": 2.102441359502154,
2568
+ "grad_norm": 0.38463401794433594,
2569
+ "learning_rate": 2.4903377998388783e-06,
2570
+ "loss": 0.4396,
2571
+ "step": 366
2572
+ },
2573
+ {
2574
+ "epoch": 2.1081857348013404,
2575
+ "grad_norm": 0.395751953125,
2576
+ "learning_rate": 2.461426463711535e-06,
2577
+ "loss": 0.4429,
2578
+ "step": 367
2579
+ },
2580
+ {
2581
+ "epoch": 2.113930110100527,
2582
+ "grad_norm": 0.3884614109992981,
2583
+ "learning_rate": 2.4326290324257896e-06,
2584
+ "loss": 0.4227,
2585
+ "step": 368
2586
+ },
2587
+ {
2588
+ "epoch": 2.1196744853997127,
2589
+ "grad_norm": 0.3690508306026459,
2590
+ "learning_rate": 2.403946798111576e-06,
2591
+ "loss": 0.4141,
2592
+ "step": 369
2593
+ },
2594
+ {
2595
+ "epoch": 2.125418860698899,
2596
+ "grad_norm": 0.4512092173099518,
2597
+ "learning_rate": 2.37538104772998e-06,
2598
+ "loss": 0.4846,
2599
+ "step": 370
2600
+ },
2601
+ {
2602
+ "epoch": 2.131163235998085,
2603
+ "grad_norm": 0.341816782951355,
2604
+ "learning_rate": 2.3469330630154974e-06,
2605
+ "loss": 0.383,
2606
+ "step": 371
2607
+ },
2608
+ {
2609
+ "epoch": 2.1369076112972714,
2610
+ "grad_norm": 0.39507344365119934,
2611
+ "learning_rate": 2.318604120418521e-06,
2612
+ "loss": 0.4662,
2613
+ "step": 372
2614
+ },
2615
+ {
2616
+ "epoch": 2.1426519865964577,
2617
+ "grad_norm": 0.36571794748306274,
2618
+ "learning_rate": 2.2903954910480746e-06,
2619
+ "loss": 0.4621,
2620
+ "step": 373
2621
+ },
2622
+ {
2623
+ "epoch": 2.1483963618956436,
2624
+ "grad_norm": 0.3868046700954437,
2625
+ "learning_rate": 2.2623084406147643e-06,
2626
+ "loss": 0.4306,
2627
+ "step": 374
2628
+ },
2629
+ {
2630
+ "epoch": 2.15414073719483,
2631
+ "grad_norm": 0.37293747067451477,
2632
+ "learning_rate": 2.234344229374003e-06,
2633
+ "loss": 0.441,
2634
+ "step": 375
2635
+ },
2636
+ {
2637
+ "epoch": 2.1598851124940164,
2638
+ "grad_norm": 0.38192519545555115,
2639
+ "learning_rate": 2.2065041120694487e-06,
2640
+ "loss": 0.442,
2641
+ "step": 376
2642
+ },
2643
+ {
2644
+ "epoch": 2.1656294877932023,
2645
+ "grad_norm": 0.373929888010025,
2646
+ "learning_rate": 2.178789337876716e-06,
2647
+ "loss": 0.4448,
2648
+ "step": 377
2649
+ },
2650
+ {
2651
+ "epoch": 2.1713738630923887,
2652
+ "grad_norm": 0.40156957507133484,
2653
+ "learning_rate": 2.151201150347318e-06,
2654
+ "loss": 0.4359,
2655
+ "step": 378
2656
+ },
2657
+ {
2658
+ "epoch": 2.177118238391575,
2659
+ "grad_norm": 0.3525901138782501,
2660
+ "learning_rate": 2.123740787352872e-06,
2661
+ "loss": 0.4045,
2662
+ "step": 379
2663
+ },
2664
+ {
2665
+ "epoch": 2.182862613690761,
2666
+ "grad_norm": 0.3916381001472473,
2667
+ "learning_rate": 2.096409481029556e-06,
2668
+ "loss": 0.4423,
2669
+ "step": 380
2670
+ },
2671
+ {
2672
+ "epoch": 2.1886069889899473,
2673
+ "grad_norm": 0.3852902948856354,
2674
+ "learning_rate": 2.069208457722828e-06,
2675
+ "loss": 0.421,
2676
+ "step": 381
2677
+ },
2678
+ {
2679
+ "epoch": 2.1943513642891337,
2680
+ "grad_norm": 0.40378549695014954,
2681
+ "learning_rate": 2.042138937932388e-06,
2682
+ "loss": 0.4254,
2683
+ "step": 382
2684
+ },
2685
+ {
2686
+ "epoch": 2.2000957395883196,
2687
+ "grad_norm": 0.3902316987514496,
2688
+ "learning_rate": 2.015202136257432e-06,
2689
+ "loss": 0.4891,
2690
+ "step": 383
2691
+ },
2692
+ {
2693
+ "epoch": 2.205840114887506,
2694
+ "grad_norm": 0.3631356954574585,
2695
+ "learning_rate": 1.988399261342135e-06,
2696
+ "loss": 0.4048,
2697
+ "step": 384
2698
+ },
2699
+ {
2700
+ "epoch": 2.2115844901866923,
2701
+ "grad_norm": 0.3928806483745575,
2702
+ "learning_rate": 1.9617315158214363e-06,
2703
+ "loss": 0.4654,
2704
+ "step": 385
2705
+ },
2706
+ {
2707
+ "epoch": 2.2173288654858783,
2708
+ "grad_norm": 0.3718385100364685,
2709
+ "learning_rate": 1.935200096267064e-06,
2710
+ "loss": 0.4473,
2711
+ "step": 386
2712
+ },
2713
+ {
2714
+ "epoch": 2.2230732407850646,
2715
+ "grad_norm": 0.3905206024646759,
2716
+ "learning_rate": 1.908806193133855e-06,
2717
+ "loss": 0.4315,
2718
+ "step": 387
2719
+ },
2720
+ {
2721
+ "epoch": 2.228817616084251,
2722
+ "grad_norm": 0.37699949741363525,
2723
+ "learning_rate": 1.8825509907063328e-06,
2724
+ "loss": 0.4605,
2725
+ "step": 388
2726
+ },
2727
+ {
2728
+ "epoch": 2.234561991383437,
2729
+ "grad_norm": 0.4052666425704956,
2730
+ "learning_rate": 1.856435667045577e-06,
2731
+ "loss": 0.4561,
2732
+ "step": 389
2733
+ },
2734
+ {
2735
+ "epoch": 2.2403063666826233,
2736
+ "grad_norm": 0.41954728960990906,
2737
+ "learning_rate": 1.8304613939363531e-06,
2738
+ "loss": 0.451,
2739
+ "step": 390
2740
+ },
2741
+ {
2742
+ "epoch": 2.2460507419818097,
2743
+ "grad_norm": 0.3681614398956299,
2744
+ "learning_rate": 1.8046293368345485e-06,
2745
+ "loss": 0.4332,
2746
+ "step": 391
2747
+ },
2748
+ {
2749
+ "epoch": 2.2517951172809956,
2750
+ "grad_norm": 0.3315896987915039,
2751
+ "learning_rate": 1.7789406548148647e-06,
2752
+ "loss": 0.4176,
2753
+ "step": 392
2754
+ },
2755
+ {
2756
+ "epoch": 2.257539492580182,
2757
+ "grad_norm": 0.4171721935272217,
2758
+ "learning_rate": 1.7533965005188242e-06,
2759
+ "loss": 0.5205,
2760
+ "step": 393
2761
+ },
2762
+ {
2763
+ "epoch": 2.2632838678793683,
2764
+ "grad_norm": 0.35167965292930603,
2765
+ "learning_rate": 1.7279980201030382e-06,
2766
+ "loss": 0.4135,
2767
+ "step": 394
2768
+ },
2769
+ {
2770
+ "epoch": 2.2690282431785542,
2771
+ "grad_norm": 0.38858693838119507,
2772
+ "learning_rate": 1.7027463531877897e-06,
2773
+ "loss": 0.4432,
2774
+ "step": 395
2775
+ },
2776
+ {
2777
+ "epoch": 2.2747726184777406,
2778
+ "grad_norm": 0.3728155791759491,
2779
+ "learning_rate": 1.677642632805892e-06,
2780
+ "loss": 0.4512,
2781
+ "step": 396
2782
+ },
2783
+ {
2784
+ "epoch": 2.280516993776927,
2785
+ "grad_norm": 0.395333856344223,
2786
+ "learning_rate": 1.6526879853518558e-06,
2787
+ "loss": 0.4461,
2788
+ "step": 397
2789
+ },
2790
+ {
2791
+ "epoch": 2.286261369076113,
2792
+ "grad_norm": 0.37841910123825073,
2793
+ "learning_rate": 1.6278835305313462e-06,
2794
+ "loss": 0.4539,
2795
+ "step": 398
2796
+ },
2797
+ {
2798
+ "epoch": 2.2920057443752992,
2799
+ "grad_norm": 0.3504939675331116,
2800
+ "learning_rate": 1.6032303813109368e-06,
2801
+ "loss": 0.4478,
2802
+ "step": 399
2803
+ },
2804
+ {
2805
+ "epoch": 2.297750119674485,
2806
+ "grad_norm": 0.383880615234375,
2807
+ "learning_rate": 1.578729643868181e-06,
2808
+ "loss": 0.4544,
2809
+ "step": 400
2810
+ },
2811
+ {
2812
+ "epoch": 2.3034944949736715,
2813
+ "grad_norm": 0.39890047907829285,
2814
+ "learning_rate": 1.5543824175419691e-06,
2815
+ "loss": 0.4616,
2816
+ "step": 401
2817
+ },
2818
+ {
2819
+ "epoch": 2.309238870272858,
2820
+ "grad_norm": 0.3388942778110504,
2821
+ "learning_rate": 1.5301897947832063e-06,
2822
+ "loss": 0.4472,
2823
+ "step": 402
2824
+ },
2825
+ {
2826
+ "epoch": 2.3149832455720443,
2827
+ "grad_norm": 0.3660285770893097,
2828
+ "learning_rate": 1.5061528611057917e-06,
2829
+ "loss": 0.4071,
2830
+ "step": 403
2831
+ },
2832
+ {
2833
+ "epoch": 2.32072762087123,
2834
+ "grad_norm": 0.38760945200920105,
2835
+ "learning_rate": 1.4822726950379207e-06,
2836
+ "loss": 0.4753,
2837
+ "step": 404
2838
+ },
2839
+ {
2840
+ "epoch": 2.3264719961704166,
2841
+ "grad_norm": 0.36534708738327026,
2842
+ "learning_rate": 1.4585503680736756e-06,
2843
+ "loss": 0.4351,
2844
+ "step": 405
2845
+ },
2846
+ {
2847
+ "epoch": 2.3322163714696025,
2848
+ "grad_norm": 0.3707476258277893,
2849
+ "learning_rate": 1.4349869446249664e-06,
2850
+ "loss": 0.43,
2851
+ "step": 406
2852
+ },
2853
+ {
2854
+ "epoch": 2.337960746768789,
2855
+ "grad_norm": 0.36531203985214233,
2856
+ "learning_rate": 1.4115834819737534e-06,
2857
+ "loss": 0.3951,
2858
+ "step": 407
2859
+ },
2860
+ {
2861
+ "epoch": 2.343705122067975,
2862
+ "grad_norm": 0.39617377519607544,
2863
+ "learning_rate": 1.3883410302246237e-06,
2864
+ "loss": 0.4387,
2865
+ "step": 408
2866
+ },
2867
+ {
2868
+ "epoch": 2.349449497367161,
2869
+ "grad_norm": 0.36693063378334045,
2870
+ "learning_rate": 1.3652606322576606e-06,
2871
+ "loss": 0.4014,
2872
+ "step": 409
2873
+ },
2874
+ {
2875
+ "epoch": 2.3551938726663475,
2876
+ "grad_norm": 0.3821011781692505,
2877
+ "learning_rate": 1.3423433236816563e-06,
2878
+ "loss": 0.4526,
2879
+ "step": 410
2880
+ },
2881
+ {
2882
+ "epoch": 2.360938247965534,
2883
+ "grad_norm": 0.3760710060596466,
2884
+ "learning_rate": 1.3195901327876426e-06,
2885
+ "loss": 0.4402,
2886
+ "step": 411
2887
+ },
2888
+ {
2889
+ "epoch": 2.36668262326472,
2890
+ "grad_norm": 0.3526499271392822,
2891
+ "learning_rate": 1.2970020805027555e-06,
2892
+ "loss": 0.4167,
2893
+ "step": 412
2894
+ },
2895
+ {
2896
+ "epoch": 2.372426998563906,
2897
+ "grad_norm": 0.3931331932544708,
2898
+ "learning_rate": 1.2745801803444192e-06,
2899
+ "loss": 0.4686,
2900
+ "step": 413
2901
+ },
2902
+ {
2903
+ "epoch": 2.3781713738630925,
2904
+ "grad_norm": 0.40180233120918274,
2905
+ "learning_rate": 1.25232543837488e-06,
2906
+ "loss": 0.4679,
2907
+ "step": 414
2908
+ },
2909
+ {
2910
+ "epoch": 2.3839157491622784,
2911
+ "grad_norm": 0.35506054759025574,
2912
+ "learning_rate": 1.2302388531560515e-06,
2913
+ "loss": 0.4335,
2914
+ "step": 415
2915
+ },
2916
+ {
2917
+ "epoch": 2.389660124461465,
2918
+ "grad_norm": 0.3469352126121521,
2919
+ "learning_rate": 1.2083214157047257e-06,
2920
+ "loss": 0.4631,
2921
+ "step": 416
2922
+ },
2923
+ {
2924
+ "epoch": 2.395404499760651,
2925
+ "grad_norm": 0.3637499213218689,
2926
+ "learning_rate": 1.186574109448091e-06,
2927
+ "loss": 0.4489,
2928
+ "step": 417
2929
+ },
2930
+ {
2931
+ "epoch": 2.401148875059837,
2932
+ "grad_norm": 0.3483814597129822,
2933
+ "learning_rate": 1.164997910179615e-06,
2934
+ "loss": 0.4659,
2935
+ "step": 418
2936
+ },
2937
+ {
2938
+ "epoch": 2.4068932503590235,
2939
+ "grad_norm": 0.38259443640708923,
2940
+ "learning_rate": 1.1435937860152579e-06,
2941
+ "loss": 0.453,
2942
+ "step": 419
2943
+ },
2944
+ {
2945
+ "epoch": 2.41263762565821,
2946
+ "grad_norm": 0.34772536158561707,
2947
+ "learning_rate": 1.1223626973500395e-06,
2948
+ "loss": 0.4076,
2949
+ "step": 420
2950
+ },
2951
+ {
2952
+ "epoch": 2.4183820009573957,
2953
+ "grad_norm": 0.36162224411964417,
2954
+ "learning_rate": 1.1013055968149343e-06,
2955
+ "loss": 0.4482,
2956
+ "step": 421
2957
+ },
2958
+ {
2959
+ "epoch": 2.424126376256582,
2960
+ "grad_norm": 0.3481464385986328,
2961
+ "learning_rate": 1.0804234292341426e-06,
2962
+ "loss": 0.4694,
2963
+ "step": 422
2964
+ },
2965
+ {
2966
+ "epoch": 2.4298707515557685,
2967
+ "grad_norm": 0.3406570553779602,
2968
+ "learning_rate": 1.0597171315826805e-06,
2969
+ "loss": 0.4026,
2970
+ "step": 423
2971
+ },
2972
+ {
2973
+ "epoch": 2.4356151268549544,
2974
+ "grad_norm": 0.359840452671051,
2975
+ "learning_rate": 1.0391876329443534e-06,
2976
+ "loss": 0.4212,
2977
+ "step": 424
2978
+ },
2979
+ {
2980
+ "epoch": 2.4413595021541408,
2981
+ "grad_norm": 0.41036349534988403,
2982
+ "learning_rate": 1.0188358544700583e-06,
2983
+ "loss": 0.4463,
2984
+ "step": 425
2985
+ },
2986
+ {
2987
+ "epoch": 2.447103877453327,
2988
+ "grad_norm": 0.3857036828994751,
2989
+ "learning_rate": 9.986627093364542e-07,
2990
+ "loss": 0.479,
2991
+ "step": 426
2992
+ },
2993
+ {
2994
+ "epoch": 2.452848252752513,
2995
+ "grad_norm": 0.3274599611759186,
2996
+ "learning_rate": 9.786691027049893e-07,
2997
+ "loss": 0.3942,
2998
+ "step": 427
2999
+ },
3000
+ {
3001
+ "epoch": 2.4585926280516994,
3002
+ "grad_norm": 0.41199931502342224,
3003
+ "learning_rate": 9.588559316812906e-07,
3004
+ "loss": 0.4954,
3005
+ "step": 428
3006
+ },
3007
+ {
3008
+ "epoch": 2.4643370033508853,
3009
+ "grad_norm": 0.36387866735458374,
3010
+ "learning_rate": 9.392240852749007e-07,
3011
+ "loss": 0.4519,
3012
+ "step": 429
3013
+ },
3014
+ {
3015
+ "epoch": 2.4700813786500717,
3016
+ "grad_norm": 0.3550173342227936,
3017
+ "learning_rate": 9.197744443594003e-07,
3018
+ "loss": 0.4146,
3019
+ "step": 430
3020
+ },
3021
+ {
3022
+ "epoch": 2.475825753949258,
3023
+ "grad_norm": 0.36893606185913086,
3024
+ "learning_rate": 9.005078816328772e-07,
3025
+ "loss": 0.4837,
3026
+ "step": 431
3027
+ },
3028
+ {
3029
+ "epoch": 2.4815701292484444,
3030
+ "grad_norm": 0.3411610424518585,
3031
+ "learning_rate": 8.814252615787661e-07,
3032
+ "loss": 0.4311,
3033
+ "step": 432
3034
+ },
3035
+ {
3036
+ "epoch": 2.4873145045476304,
3037
+ "grad_norm": 0.3465381860733032,
3038
+ "learning_rate": 8.625274404270662e-07,
3039
+ "loss": 0.4731,
3040
+ "step": 433
3041
+ },
3042
+ {
3043
+ "epoch": 2.4930588798468167,
3044
+ "grad_norm": 0.3729943633079529,
3045
+ "learning_rate": 8.438152661159165e-07,
3046
+ "loss": 0.4194,
3047
+ "step": 434
3048
+ },
3049
+ {
3050
+ "epoch": 2.4988032551460027,
3051
+ "grad_norm": 0.4155563414096832,
3052
+ "learning_rate": 8.252895782535569e-07,
3053
+ "loss": 0.4698,
3054
+ "step": 435
3055
+ },
3056
+ {
3057
+ "epoch": 2.504547630445189,
3058
+ "grad_norm": 0.3166390061378479,
3059
+ "learning_rate": 8.069512080806441e-07,
3060
+ "loss": 0.392,
3061
+ "step": 436
3062
+ },
3063
+ {
3064
+ "epoch": 2.5102920057443754,
3065
+ "grad_norm": 0.3697630763053894,
3066
+ "learning_rate": 7.88800978432967e-07,
3067
+ "loss": 0.4642,
3068
+ "step": 437
3069
+ },
3070
+ {
3071
+ "epoch": 2.5160363810435618,
3072
+ "grad_norm": 0.4319329559803009,
3073
+ "learning_rate": 7.708397037045129e-07,
3074
+ "loss": 0.4768,
3075
+ "step": 438
3076
+ },
3077
+ {
3078
+ "epoch": 2.5217807563427477,
3079
+ "grad_norm": 0.32879865169525146,
3080
+ "learning_rate": 7.530681898109393e-07,
3081
+ "loss": 0.4141,
3082
+ "step": 439
3083
+ },
3084
+ {
3085
+ "epoch": 2.527525131641934,
3086
+ "grad_norm": 0.3696901798248291,
3087
+ "learning_rate": 7.35487234153402e-07,
3088
+ "loss": 0.4802,
3089
+ "step": 440
3090
+ },
3091
+ {
3092
+ "epoch": 2.53326950694112,
3093
+ "grad_norm": 0.3675316274166107,
3094
+ "learning_rate": 7.180976255827809e-07,
3095
+ "loss": 0.4756,
3096
+ "step": 441
3097
+ },
3098
+ {
3099
+ "epoch": 2.5390138822403063,
3100
+ "grad_norm": 0.33102041482925415,
3101
+ "learning_rate": 7.009001443642843e-07,
3102
+ "loss": 0.3936,
3103
+ "step": 442
3104
+ },
3105
+ {
3106
+ "epoch": 2.5447582575394927,
3107
+ "grad_norm": 0.32732781767845154,
3108
+ "learning_rate": 6.838955621424404e-07,
3109
+ "loss": 0.3896,
3110
+ "step": 443
3111
+ },
3112
+ {
3113
+ "epoch": 2.550502632838679,
3114
+ "grad_norm": 0.3652030825614929,
3115
+ "learning_rate": 6.67084641906468e-07,
3116
+ "loss": 0.4773,
3117
+ "step": 444
3118
+ },
3119
+ {
3120
+ "epoch": 2.556247008137865,
3121
+ "grad_norm": 0.3629269003868103,
3122
+ "learning_rate": 6.50468137956049e-07,
3123
+ "loss": 0.4434,
3124
+ "step": 445
3125
+ },
3126
+ {
3127
+ "epoch": 2.5619913834370514,
3128
+ "grad_norm": 0.34360983967781067,
3129
+ "learning_rate": 6.340467958674762e-07,
3130
+ "loss": 0.4265,
3131
+ "step": 446
3132
+ },
3133
+ {
3134
+ "epoch": 2.5677357587362373,
3135
+ "grad_norm": 0.3844148814678192,
3136
+ "learning_rate": 6.178213524602061e-07,
3137
+ "loss": 0.4666,
3138
+ "step": 447
3139
+ },
3140
+ {
3141
+ "epoch": 2.5734801340354236,
3142
+ "grad_norm": 0.36176225543022156,
3143
+ "learning_rate": 6.017925357637932e-07,
3144
+ "loss": 0.4188,
3145
+ "step": 448
3146
+ },
3147
+ {
3148
+ "epoch": 2.57922450933461,
3149
+ "grad_norm": 0.35700544714927673,
3150
+ "learning_rate": 5.859610649852249e-07,
3151
+ "loss": 0.426,
3152
+ "step": 449
3153
+ },
3154
+ {
3155
+ "epoch": 2.584968884633796,
3156
+ "grad_norm": 0.3662126958370209,
3157
+ "learning_rate": 5.703276504766514e-07,
3158
+ "loss": 0.4135,
3159
+ "step": 450
3160
+ },
3161
+ {
3162
+ "epoch": 2.5907132599329823,
3163
+ "grad_norm": 0.33082348108291626,
3164
+ "learning_rate": 5.548929937035147e-07,
3165
+ "loss": 0.3923,
3166
+ "step": 451
3167
+ },
3168
+ {
3169
+ "epoch": 2.5964576352321687,
3170
+ "grad_norm": 0.351136714220047,
3171
+ "learning_rate": 5.396577872130676e-07,
3172
+ "loss": 0.4475,
3173
+ "step": 452
3174
+ },
3175
+ {
3176
+ "epoch": 2.6022020105313546,
3177
+ "grad_norm": 0.3499198853969574,
3178
+ "learning_rate": 5.246227146033089e-07,
3179
+ "loss": 0.4704,
3180
+ "step": 453
3181
+ },
3182
+ {
3183
+ "epoch": 2.607946385830541,
3184
+ "grad_norm": 0.3229888677597046,
3185
+ "learning_rate": 5.097884504922996e-07,
3186
+ "loss": 0.4099,
3187
+ "step": 454
3188
+ },
3189
+ {
3190
+ "epoch": 2.6136907611297273,
3191
+ "grad_norm": 0.37059321999549866,
3192
+ "learning_rate": 4.951556604879049e-07,
3193
+ "loss": 0.5134,
3194
+ "step": 455
3195
+ },
3196
+ {
3197
+ "epoch": 2.6194351364289132,
3198
+ "grad_norm": 0.35366615653038025,
3199
+ "learning_rate": 4.807250011579168e-07,
3200
+ "loss": 0.442,
3201
+ "step": 456
3202
+ },
3203
+ {
3204
+ "epoch": 2.6251795117280996,
3205
+ "grad_norm": 0.33588507771492004,
3206
+ "learning_rate": 4.6649712000060297e-07,
3207
+ "loss": 0.3842,
3208
+ "step": 457
3209
+ },
3210
+ {
3211
+ "epoch": 2.6309238870272855,
3212
+ "grad_norm": 0.33627060055732727,
3213
+ "learning_rate": 4.5247265541564836e-07,
3214
+ "loss": 0.4408,
3215
+ "step": 458
3216
+ },
3217
+ {
3218
+ "epoch": 2.636668262326472,
3219
+ "grad_norm": 0.3534417450428009,
3220
+ "learning_rate": 4.386522366755169e-07,
3221
+ "loss": 0.4385,
3222
+ "step": 459
3223
+ },
3224
+ {
3225
+ "epoch": 2.6424126376256583,
3226
+ "grad_norm": 0.3756772577762604,
3227
+ "learning_rate": 4.250364838972065e-07,
3228
+ "loss": 0.4684,
3229
+ "step": 460
3230
+ },
3231
+ {
3232
+ "epoch": 2.6481570129248446,
3233
+ "grad_norm": 0.33255767822265625,
3234
+ "learning_rate": 4.116260080144352e-07,
3235
+ "loss": 0.4493,
3236
+ "step": 461
3237
+ },
3238
+ {
3239
+ "epoch": 2.6539013882240305,
3240
+ "grad_norm": 0.3787069618701935,
3241
+ "learning_rate": 3.98421410750221e-07,
3242
+ "loss": 0.4556,
3243
+ "step": 462
3244
+ },
3245
+ {
3246
+ "epoch": 2.659645763523217,
3247
+ "grad_norm": 0.35434430837631226,
3248
+ "learning_rate": 3.854232845898859e-07,
3249
+ "loss": 0.441,
3250
+ "step": 463
3251
+ },
3252
+ {
3253
+ "epoch": 2.665390138822403,
3254
+ "grad_norm": 0.331163614988327,
3255
+ "learning_rate": 3.7263221275447125e-07,
3256
+ "loss": 0.4376,
3257
+ "step": 464
3258
+ },
3259
+ {
3260
+ "epoch": 2.671134514121589,
3261
+ "grad_norm": 0.3512961268424988,
3262
+ "learning_rate": 3.60048769174568e-07,
3263
+ "loss": 0.438,
3264
+ "step": 465
3265
+ },
3266
+ {
3267
+ "epoch": 2.6768788894207756,
3268
+ "grad_norm": 0.3708810806274414,
3269
+ "learning_rate": 3.4767351846456744e-07,
3270
+ "loss": 0.4711,
3271
+ "step": 466
3272
+ },
3273
+ {
3274
+ "epoch": 2.682623264719962,
3275
+ "grad_norm": 0.35665786266326904,
3276
+ "learning_rate": 3.355070158973212e-07,
3277
+ "loss": 0.4434,
3278
+ "step": 467
3279
+ },
3280
+ {
3281
+ "epoch": 2.688367640019148,
3282
+ "grad_norm": 0.3402157723903656,
3283
+ "learning_rate": 3.235498073792342e-07,
3284
+ "loss": 0.4436,
3285
+ "step": 468
3286
+ },
3287
+ {
3288
+ "epoch": 2.694112015318334,
3289
+ "grad_norm": 0.3638489842414856,
3290
+ "learning_rate": 3.118024294257621e-07,
3291
+ "loss": 0.4612,
3292
+ "step": 469
3293
+ },
3294
+ {
3295
+ "epoch": 2.69985639061752,
3296
+ "grad_norm": 0.3516465425491333,
3297
+ "learning_rate": 3.002654091373453e-07,
3298
+ "loss": 0.4177,
3299
+ "step": 470
3300
+ },
3301
+ {
3302
+ "epoch": 2.7056007659167065,
3303
+ "grad_norm": 0.3707068860530853,
3304
+ "learning_rate": 2.889392641757527e-07,
3305
+ "loss": 0.4764,
3306
+ "step": 471
3307
+ },
3308
+ {
3309
+ "epoch": 2.711345141215893,
3310
+ "grad_norm": 0.3246111571788788,
3311
+ "learning_rate": 2.778245027408566e-07,
3312
+ "loss": 0.4262,
3313
+ "step": 472
3314
+ },
3315
+ {
3316
+ "epoch": 2.7170895165150792,
3317
+ "grad_norm": 0.37666356563568115,
3318
+ "learning_rate": 2.669216235478295e-07,
3319
+ "loss": 0.4879,
3320
+ "step": 473
3321
+ },
3322
+ {
3323
+ "epoch": 2.722833891814265,
3324
+ "grad_norm": 0.3712378144264221,
3325
+ "learning_rate": 2.562311158047692e-07,
3326
+ "loss": 0.4568,
3327
+ "step": 474
3328
+ },
3329
+ {
3330
+ "epoch": 2.7285782671134515,
3331
+ "grad_norm": 0.35843536257743835,
3332
+ "learning_rate": 2.45753459190744e-07,
3333
+ "loss": 0.4473,
3334
+ "step": 475
3335
+ },
3336
+ {
3337
+ "epoch": 2.7343226424126374,
3338
+ "grad_norm": 0.36265286803245544,
3339
+ "learning_rate": 2.354891238342738e-07,
3340
+ "loss": 0.4247,
3341
+ "step": 476
3342
+ },
3343
+ {
3344
+ "epoch": 2.740067017711824,
3345
+ "grad_norm": 0.35885879397392273,
3346
+ "learning_rate": 2.254385702922318e-07,
3347
+ "loss": 0.4544,
3348
+ "step": 477
3349
+ },
3350
+ {
3351
+ "epoch": 2.74581139301101,
3352
+ "grad_norm": 0.3318557143211365,
3353
+ "learning_rate": 2.1560224952918373e-07,
3354
+ "loss": 0.3918,
3355
+ "step": 478
3356
+ },
3357
+ {
3358
+ "epoch": 2.751555768310196,
3359
+ "grad_norm": 0.3384019732475281,
3360
+ "learning_rate": 2.0598060289714893e-07,
3361
+ "loss": 0.3935,
3362
+ "step": 479
3363
+ },
3364
+ {
3365
+ "epoch": 2.7573001436093825,
3366
+ "grad_norm": 0.37354743480682373,
3367
+ "learning_rate": 1.9657406211579966e-07,
3368
+ "loss": 0.477,
3369
+ "step": 480
3370
+ },
3371
+ {
3372
+ "epoch": 2.763044518908569,
3373
+ "grad_norm": 0.3306199312210083,
3374
+ "learning_rate": 1.8738304925308926e-07,
3375
+ "loss": 0.4606,
3376
+ "step": 481
3377
+ },
3378
+ {
3379
+ "epoch": 2.7687888942077548,
3380
+ "grad_norm": 0.337941974401474,
3381
+ "learning_rate": 1.7840797670631572e-07,
3382
+ "loss": 0.4581,
3383
+ "step": 482
3384
+ },
3385
+ {
3386
+ "epoch": 2.774533269506941,
3387
+ "grad_norm": 0.34849002957344055,
3388
+ "learning_rate": 1.6964924718361364e-07,
3389
+ "loss": 0.4416,
3390
+ "step": 483
3391
+ },
3392
+ {
3393
+ "epoch": 2.7802776448061275,
3394
+ "grad_norm": 0.3213178515434265,
3395
+ "learning_rate": 1.6110725368589041e-07,
3396
+ "loss": 0.4177,
3397
+ "step": 484
3398
+ },
3399
+ {
3400
+ "epoch": 2.7860220201053134,
3401
+ "grad_norm": 0.3672527074813843,
3402
+ "learning_rate": 1.5278237948918585e-07,
3403
+ "loss": 0.4831,
3404
+ "step": 485
3405
+ },
3406
+ {
3407
+ "epoch": 2.7917663954045,
3408
+ "grad_norm": 0.3290295898914337,
3409
+ "learning_rate": 1.4467499812748143e-07,
3410
+ "loss": 0.4421,
3411
+ "step": 486
3412
+ },
3413
+ {
3414
+ "epoch": 2.7975107707036857,
3415
+ "grad_norm": 0.32079824805259705,
3416
+ "learning_rate": 1.3678547337593494e-07,
3417
+ "loss": 0.4536,
3418
+ "step": 487
3419
+ },
3420
+ {
3421
+ "epoch": 2.803255146002872,
3422
+ "grad_norm": 0.34473419189453125,
3423
+ "learning_rate": 1.2911415923456017e-07,
3424
+ "loss": 0.4474,
3425
+ "step": 488
3426
+ },
3427
+ {
3428
+ "epoch": 2.8089995213020584,
3429
+ "grad_norm": 0.34892538189888,
3430
+ "learning_rate": 1.2166139991234227e-07,
3431
+ "loss": 0.4153,
3432
+ "step": 489
3433
+ },
3434
+ {
3435
+ "epoch": 2.814743896601245,
3436
+ "grad_norm": 0.35367730259895325,
3437
+ "learning_rate": 1.1442752981179527e-07,
3438
+ "loss": 0.4633,
3439
+ "step": 490
3440
+ },
3441
+ {
3442
+ "epoch": 2.8204882719004307,
3443
+ "grad_norm": 0.34787774085998535,
3444
+ "learning_rate": 1.0741287351395402e-07,
3445
+ "loss": 0.4833,
3446
+ "step": 491
3447
+ },
3448
+ {
3449
+ "epoch": 2.826232647199617,
3450
+ "grad_norm": 0.34205448627471924,
3451
+ "learning_rate": 1.0061774576381411e-07,
3452
+ "loss": 0.443,
3453
+ "step": 492
3454
+ },
3455
+ {
3456
+ "epoch": 2.831977022498803,
3457
+ "grad_norm": 0.3662581741809845,
3458
+ "learning_rate": 9.404245145620717e-08,
3459
+ "loss": 0.4819,
3460
+ "step": 493
3461
+ },
3462
+ {
3463
+ "epoch": 2.8377213977979894,
3464
+ "grad_norm": 0.3398209512233734,
3465
+ "learning_rate": 8.768728562211948e-08,
3466
+ "loss": 0.3826,
3467
+ "step": 494
3468
+ },
3469
+ {
3470
+ "epoch": 2.8434657730971757,
3471
+ "grad_norm": 0.3441368341445923,
3472
+ "learning_rate": 8.155253341545655e-08,
3473
+ "loss": 0.4825,
3474
+ "step": 495
3475
+ },
3476
+ {
3477
+ "epoch": 2.849210148396362,
3478
+ "grad_norm": 0.36746805906295776,
3479
+ "learning_rate": 7.563847010024716e-08,
3480
+ "loss": 0.4402,
3481
+ "step": 496
3482
+ },
3483
+ {
3484
+ "epoch": 2.854954523695548,
3485
+ "grad_norm": 0.3405812978744507,
3486
+ "learning_rate": 6.994536103829164e-08,
3487
+ "loss": 0.4468,
3488
+ "step": 497
3489
+ },
3490
+ {
3491
+ "epoch": 2.8606988989947344,
3492
+ "grad_norm": 0.33339643478393555,
3493
+ "learning_rate": 6.447346167725688e-08,
3494
+ "loss": 0.4059,
3495
+ "step": 498
3496
+ },
3497
+ {
3498
+ "epoch": 2.8664432742939203,
3499
+ "grad_norm": 0.3409021496772766,
3500
+ "learning_rate": 5.9223017539213335e-08,
3501
+ "loss": 0.4844,
3502
+ "step": 499
3503
+ },
3504
+ {
3505
+ "epoch": 2.8721876495931067,
3506
+ "grad_norm": 0.30741554498672485,
3507
+ "learning_rate": 5.4194264209617705e-08,
3508
+ "loss": 0.4113,
3509
+ "step": 500
3510
+ },
3511
+ {
3512
+ "epoch": 2.877932024892293,
3513
+ "grad_norm": 0.34918904304504395,
3514
+ "learning_rate": 4.9387427326745287e-08,
3515
+ "loss": 0.4648,
3516
+ "step": 501
3517
+ },
3518
+ {
3519
+ "epoch": 2.8836764001914794,
3520
+ "grad_norm": 0.3559444844722748,
3521
+ "learning_rate": 4.4802722571561374e-08,
3522
+ "loss": 0.4712,
3523
+ "step": 502
3524
+ },
3525
+ {
3526
+ "epoch": 2.8894207754906653,
3527
+ "grad_norm": 0.3617500960826874,
3528
+ "learning_rate": 4.044035565804793e-08,
3529
+ "loss": 0.4212,
3530
+ "step": 503
3531
+ },
3532
+ {
3533
+ "epoch": 2.8951651507898517,
3534
+ "grad_norm": 0.3423933982849121,
3535
+ "learning_rate": 3.6300522323969855e-08,
3536
+ "loss": 0.3823,
3537
+ "step": 504
3538
+ },
3539
+ {
3540
+ "epoch": 2.9009095260890376,
3541
+ "grad_norm": 0.32970568537712097,
3542
+ "learning_rate": 3.2383408322095856e-08,
3543
+ "loss": 0.483,
3544
+ "step": 505
3545
+ },
3546
+ {
3547
+ "epoch": 2.906653901388224,
3548
+ "grad_norm": 0.32338789105415344,
3549
+ "learning_rate": 2.8689189411859607e-08,
3550
+ "loss": 0.4111,
3551
+ "step": 506
3552
+ },
3553
+ {
3554
+ "epoch": 2.9123982766874104,
3555
+ "grad_norm": 0.36587268114089966,
3556
+ "learning_rate": 2.5218031351478268e-08,
3557
+ "loss": 0.5196,
3558
+ "step": 507
3559
+ },
3560
+ {
3561
+ "epoch": 2.9181426519865967,
3562
+ "grad_norm": 0.372019499540329,
3563
+ "learning_rate": 2.1970089890509527e-08,
3564
+ "loss": 0.4153,
3565
+ "step": 508
3566
+ },
3567
+ {
3568
+ "epoch": 2.9238870272857826,
3569
+ "grad_norm": 0.3676818907260895,
3570
+ "learning_rate": 1.8945510762868325e-08,
3571
+ "loss": 0.445,
3572
+ "step": 509
3573
+ },
3574
+ {
3575
+ "epoch": 2.929631402584969,
3576
+ "grad_norm": 0.33755603432655334,
3577
+ "learning_rate": 1.614442968028429e-08,
3578
+ "loss": 0.4542,
3579
+ "step": 510
3580
+ },
3581
+ {
3582
+ "epoch": 2.935375777884155,
3583
+ "grad_norm": 0.3397558629512787,
3584
+ "learning_rate": 1.3566972326214956e-08,
3585
+ "loss": 0.4402,
3586
+ "step": 511
3587
+ },
3588
+ {
3589
+ "epoch": 2.9411201531833413,
3590
+ "grad_norm": 0.3535856604576111,
3591
+ "learning_rate": 1.1213254350202486e-08,
3592
+ "loss": 0.4235,
3593
+ "step": 512
3594
+ },
3595
+ {
3596
+ "epoch": 2.9468645284825277,
3597
+ "grad_norm": 0.324677973985672,
3598
+ "learning_rate": 9.083381362690603e-09,
3599
+ "loss": 0.4315,
3600
+ "step": 513
3601
+ },
3602
+ {
3603
+ "epoch": 2.9526089037817136,
3604
+ "grad_norm": 0.3455667495727539,
3605
+ "learning_rate": 7.177448930279496e-09,
3606
+ "loss": 0.4506,
3607
+ "step": 514
3608
+ },
3609
+ {
3610
+ "epoch": 2.9583532790809,
3611
+ "grad_norm": 0.35920631885528564,
3612
+ "learning_rate": 5.495542571443135e-09,
3613
+ "loss": 0.4154,
3614
+ "step": 515
3615
+ },
3616
+ {
3617
+ "epoch": 2.9640976543800863,
3618
+ "grad_norm": 0.3734237849712372,
3619
+ "learning_rate": 4.037737752686788e-09,
3620
+ "loss": 0.4393,
3621
+ "step": 516
3622
+ },
3623
+ {
3624
+ "epoch": 2.9698420296792722,
3625
+ "grad_norm": 0.34162041544914246,
3626
+ "learning_rate": 2.8040998851674996e-09,
3627
+ "loss": 0.4448,
3628
+ "step": 517
3629
+ },
3630
+ {
3631
+ "epoch": 2.9755864049784586,
3632
+ "grad_norm": 0.3412844240665436,
3633
+ "learning_rate": 1.7946843217514498e-09,
3634
+ "loss": 0.4195,
3635
+ "step": 518
3636
+ },
3637
+ {
3638
+ "epoch": 2.981330780277645,
3639
+ "grad_norm": 0.35345590114593506,
3640
+ "learning_rate": 1.009536354537044e-09,
3641
+ "loss": 0.3924,
3642
+ "step": 519
3643
+ },
3644
+ {
3645
+ "epoch": 2.987075155576831,
3646
+ "grad_norm": 0.36779072880744934,
3647
+ "learning_rate": 4.486912128182086e-10,
3648
+ "loss": 0.5045,
3649
+ "step": 520
3650
+ },
3651
+ {
3652
+ "epoch": 2.9928195308760173,
3653
+ "grad_norm": 0.3303414583206177,
3654
+ "learning_rate": 1.1217406150676457e-10,
3655
+ "loss": 0.4413,
3656
+ "step": 521
3657
+ },
3658
+ {
3659
+ "epoch": 2.998563906175203,
3660
+ "grad_norm": 0.33037903904914856,
3661
+ "learning_rate": 0.0,
3662
+ "loss": 0.4208,
3663
+ "step": 522
3664
+ },
3665
+ {
3666
+ "epoch": 2.998563906175203,
3667
+ "step": 522,
3668
+ "total_flos": 538113478688768.0,
3669
+ "train_loss": 0.5088067185490525,
3670
+ "train_runtime": 25669.0251,
3671
+ "train_samples_per_second": 1.953,
3672
+ "train_steps_per_second": 0.02
3673
+ }
3674
+ ],
3675
+ "logging_steps": 1,
3676
+ "max_steps": 522,
3677
+ "num_input_tokens_seen": 0,
3678
+ "num_train_epochs": 3,
3679
+ "save_steps": 100,
3680
+ "stateful_callbacks": {
3681
+ "TrainerControl": {
3682
+ "args": {
3683
+ "should_epoch_stop": false,
3684
+ "should_evaluate": false,
3685
+ "should_log": false,
3686
+ "should_save": true,
3687
+ "should_training_stop": true
3688
+ },
3689
+ "attributes": {}
3690
+ }
3691
+ },
3692
+ "total_flos": 538113478688768.0,
3693
+ "train_batch_size": 1,
3694
+ "trial_name": null,
3695
+ "trial_params": null
3696
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b2c7be9d6a854256321e1dcea5671272ffb62341c43bb590e825ffc34d907ee
3
+ size 7416
training_loss.png ADDED
vocab.json ADDED
The diff for this file is too large to render. See raw diff