zstanjj commited on
Commit
3f61ae8
1 Parent(s): 2abc8b5

Upload 13 files

Browse files
added_tokens.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|assistant|>": 32001,
3
+ "<|endoftext|>": 32000,
4
+ "<|end|>": 32007,
5
+ "<|placeholder1|>": 32002,
6
+ "<|placeholder2|>": 32003,
7
+ "<|placeholder3|>": 32004,
8
+ "<|placeholder4|>": 32005,
9
+ "<|placeholder5|>": 32008,
10
+ "<|placeholder6|>": 32009,
11
+ "<|system|>": 32006,
12
+ "<|user|>": 32010
13
+ }
config.json ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Phi-3.5-mini-instruct",
3
+ "architectures": [
4
+ "Phi3ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_phi3.Phi3Config",
9
+ "AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM",
10
+ "AutoModelForSeq2SeqLM": "modeling_phi3.PHI3ForHTMLTreeGeneration"
11
+ },
12
+ "bos_token_id": 1,
13
+ "embd_pdrop": 0.0,
14
+ "eos_token_id": 32000,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 3072,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 8192,
19
+ "max_position_embeddings": 131072,
20
+ "model_type": "phi3",
21
+ "num_attention_heads": 32,
22
+ "num_hidden_layers": 32,
23
+ "num_key_value_heads": 32,
24
+ "original_max_position_embeddings": 4096,
25
+ "pad_token_id": 32000,
26
+ "resid_pdrop": 0.0,
27
+ "rms_norm_eps": 1e-05,
28
+ "rope_scaling": {
29
+ "long_factor": [
30
+ 1.0800000429153442,
31
+ 1.1100000143051147,
32
+ 1.1399999856948853,
33
+ 1.340000033378601,
34
+ 1.5899999141693115,
35
+ 1.600000023841858,
36
+ 1.6200000047683716,
37
+ 2.620000123977661,
38
+ 3.2300000190734863,
39
+ 3.2300000190734863,
40
+ 4.789999961853027,
41
+ 7.400000095367432,
42
+ 7.700000286102295,
43
+ 9.09000015258789,
44
+ 12.199999809265137,
45
+ 17.670000076293945,
46
+ 24.46000099182129,
47
+ 28.57000160217285,
48
+ 30.420001983642578,
49
+ 30.840002059936523,
50
+ 32.590003967285156,
51
+ 32.93000411987305,
52
+ 42.320003509521484,
53
+ 44.96000289916992,
54
+ 50.340003967285156,
55
+ 50.45000457763672,
56
+ 57.55000305175781,
57
+ 57.93000411987305,
58
+ 58.21000289916992,
59
+ 60.1400032043457,
60
+ 62.61000442504883,
61
+ 62.62000274658203,
62
+ 62.71000289916992,
63
+ 63.1400032043457,
64
+ 63.1400032043457,
65
+ 63.77000427246094,
66
+ 63.93000411987305,
67
+ 63.96000289916992,
68
+ 63.970001220703125,
69
+ 64.02999877929688,
70
+ 64.06999969482422,
71
+ 64.08000183105469,
72
+ 64.12000274658203,
73
+ 64.41000366210938,
74
+ 64.4800033569336,
75
+ 64.51000213623047,
76
+ 64.52999877929688,
77
+ 64.83999633789062
78
+ ],
79
+ "short_factor": [
80
+ 1.0,
81
+ 1.0199999809265137,
82
+ 1.0299999713897705,
83
+ 1.0299999713897705,
84
+ 1.0499999523162842,
85
+ 1.0499999523162842,
86
+ 1.0499999523162842,
87
+ 1.0499999523162842,
88
+ 1.0499999523162842,
89
+ 1.0699999332427979,
90
+ 1.0999999046325684,
91
+ 1.1099998950958252,
92
+ 1.1599998474121094,
93
+ 1.1599998474121094,
94
+ 1.1699998378753662,
95
+ 1.2899998426437378,
96
+ 1.339999794960022,
97
+ 1.679999828338623,
98
+ 1.7899998426437378,
99
+ 1.8199998140335083,
100
+ 1.8499997854232788,
101
+ 1.8799997568130493,
102
+ 1.9099997282028198,
103
+ 1.9399996995925903,
104
+ 1.9899996519088745,
105
+ 2.0199997425079346,
106
+ 2.0199997425079346,
107
+ 2.0199997425079346,
108
+ 2.0199997425079346,
109
+ 2.0199997425079346,
110
+ 2.0199997425079346,
111
+ 2.0299997329711914,
112
+ 2.0299997329711914,
113
+ 2.0299997329711914,
114
+ 2.0299997329711914,
115
+ 2.0299997329711914,
116
+ 2.0299997329711914,
117
+ 2.0299997329711914,
118
+ 2.0299997329711914,
119
+ 2.0299997329711914,
120
+ 2.0799996852874756,
121
+ 2.0899996757507324,
122
+ 2.189999580383301,
123
+ 2.2199995517730713,
124
+ 2.5899994373321533,
125
+ 2.729999542236328,
126
+ 2.749999523162842,
127
+ 2.8399994373321533
128
+ ],
129
+ "type": "longrope"
130
+ },
131
+ "rope_theta": 10000.0,
132
+ "sliding_window": 262144,
133
+ "tie_word_embeddings": false,
134
+ "torch_dtype": "bfloat16",
135
+ "transformers_version": "4.43.3",
136
+ "use_cache": true,
137
+ "attention_bias": false,
138
+ "vocab_size": 32064,
139
+ "attn_implementation": "flash_attention_2"
140
+ }
configuration_phi3.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """ Phi-3 model configuration"""
17
+
18
+
19
+ from transformers.configuration_utils import PretrainedConfig
20
+ from transformers.utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+ PHI3_PRETRAINED_CONFIG_ARCHIVE_MAP = {
26
+ "microsoft/Phi-3-mini-4k-instruct": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/config.json",
27
+ "microsoft/Phi-3-mini-128k-instruct": "https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/config.json",
28
+ }
29
+
30
+
31
+ class Phi3Config(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`Phi3Model`]. It is used to instantiate a Phi-3
34
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
35
+ defaults will yield a similar configuration to that of the
36
+ [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct).
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 32064):
43
+ Vocabulary size of the Phi-3 model. Defines the number of different tokens that can be represented by the
44
+ `inputs_ids` passed when calling [`Phi3Model`].
45
+ hidden_size (`int`, *optional*, defaults to 3072):
46
+ Dimension of the hidden representations.
47
+ intermediate_size (`int`, *optional*, defaults to 8192):
48
+ Dimension of the MLP representations.
49
+ num_hidden_layers (`int`, *optional*, defaults to 32):
50
+ Number of hidden layers in the Transformer decoder.
51
+ num_attention_heads (`int`, *optional*, defaults to 32):
52
+ Number of attention heads for each attention layer in the Transformer decoder.
53
+ num_key_value_heads (`int`, *optional*):
54
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
55
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
56
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
57
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
58
+ by meanpooling all the original heads within that group. For more details checkout [this
59
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
60
+ `num_attention_heads`.
61
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
62
+ Dropout probability for mlp outputs.
63
+ embd_pdrop (`int`, *optional*, defaults to 0.0):
64
+ The dropout ratio for the embeddings.
65
+ attention_dropout (`float`, *optional*, defaults to 0.0):
66
+ The dropout ratio after computing the attention scores.
67
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
68
+ The non-linear activation function (function or string) in the decoder.
69
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
70
+ The maximum sequence length that this model might ever be used with.
71
+ original_max_position_embeddings (`int`, *optional*, defaults to 4096):
72
+ The maximum sequence length that this model was trained with. This is used to determine the size of the
73
+ original RoPE embeddings when using long scaling.
74
+ initializer_range (`float`, *optional*, defaults to 0.02):
75
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
76
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
77
+ The epsilon value used for the RMSNorm.
78
+ use_cache (`bool`, *optional*, defaults to `True`):
79
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
80
+ relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.
81
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
82
+ Whether to tie weight embeddings
83
+ rope_theta (`float`, *optional*, defaults to 10000.0):
84
+ The base period of the RoPE embeddings.
85
+ rope_scaling (`dict`, *optional*):
86
+ The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must
87
+ contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be `longrope` and
88
+ the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size
89
+ divided by the number of attention heads divided by 2.
90
+ bos_token_id (`int`, *optional*, defaults to 1):
91
+ The id of the "beginning-of-sequence" token.
92
+ eos_token_id (`int`, *optional*, defaults to 32000):
93
+ The id of the "end-of-sequence" token.
94
+ pad_token_id (`int`, *optional*, defaults to 32000):
95
+ The id of the padding token.
96
+ sliding_window (`int`, *optional*):
97
+ Sliding window attention window size. If `None`, no sliding window is applied.
98
+
99
+ Example:
100
+
101
+ ```python
102
+ >>> from transformers import Phi3Model, Phi3Config
103
+
104
+ >>> # Initializing a Phi-3 style configuration
105
+ >>> configuration = Phi3Config.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
106
+
107
+ >>> # Initializing a model from the configuration
108
+ >>> model = Phi3Model(configuration)
109
+
110
+ >>> # Accessing the model configuration
111
+ >>> configuration = model.config
112
+ ```"""
113
+
114
+ model_type = "phi3"
115
+ keys_to_ignore_at_inference = ["past_key_values"]
116
+
117
+ def __init__(
118
+ self,
119
+ vocab_size=32064,
120
+ hidden_size=3072,
121
+ intermediate_size=8192,
122
+ num_hidden_layers=32,
123
+ num_attention_heads=32,
124
+ num_key_value_heads=None,
125
+ resid_pdrop=0.0,
126
+ embd_pdrop=0.0,
127
+ attention_dropout=0.0,
128
+ hidden_act="silu",
129
+ max_position_embeddings=4096,
130
+ original_max_position_embeddings=4096,
131
+ initializer_range=0.02,
132
+ rms_norm_eps=1e-5,
133
+ use_cache=True,
134
+ tie_word_embeddings=False,
135
+ rope_theta=10000.0,
136
+ rope_scaling=None,
137
+ bos_token_id=1,
138
+ eos_token_id=32000,
139
+ pad_token_id=32000,
140
+ sliding_window=None,
141
+ **kwargs,
142
+ ):
143
+ self.vocab_size = vocab_size
144
+ self.hidden_size = hidden_size
145
+ self.intermediate_size = intermediate_size
146
+ self.num_hidden_layers = num_hidden_layers
147
+ self.num_attention_heads = num_attention_heads
148
+
149
+ if num_key_value_heads is None:
150
+ num_key_value_heads = num_attention_heads
151
+
152
+ self.num_key_value_heads = num_key_value_heads
153
+ self.resid_pdrop = resid_pdrop
154
+ self.embd_pdrop = embd_pdrop
155
+ self.attention_dropout = attention_dropout
156
+ self.hidden_act = hidden_act
157
+ self.max_position_embeddings = max_position_embeddings
158
+ self.original_max_position_embeddings = original_max_position_embeddings
159
+ self.initializer_range = initializer_range
160
+ self.rms_norm_eps = rms_norm_eps
161
+ self.use_cache = use_cache
162
+ self.rope_theta = rope_theta
163
+ self.rope_scaling = rope_scaling
164
+ self._rope_scaling_adjustment()
165
+ self._rope_scaling_validation()
166
+ self.sliding_window = sliding_window
167
+
168
+ super().__init__(
169
+ bos_token_id=bos_token_id,
170
+ eos_token_id=eos_token_id,
171
+ pad_token_id=pad_token_id,
172
+ tie_word_embeddings=tie_word_embeddings,
173
+ **kwargs,
174
+ )
175
+
176
+ def _rope_scaling_adjustment(self):
177
+ """
178
+ Adjust the `type` of the `rope_scaling` configuration for backward compatibility.
179
+ """
180
+ if self.rope_scaling is None:
181
+ return
182
+
183
+ rope_scaling_type = self.rope_scaling.get("type", None)
184
+
185
+ # For backward compatibility if previous version used "su" or "yarn"
186
+ if rope_scaling_type is not None and rope_scaling_type in ["su", "yarn"]:
187
+ self.rope_scaling["type"] = "longrope"
188
+
189
+ def _rope_scaling_validation(self):
190
+ """
191
+ Validate the `rope_scaling` configuration.
192
+ """
193
+ if self.rope_scaling is None:
194
+ return
195
+
196
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 3:
197
+ raise ValueError(
198
+ "`rope_scaling` must be a dictionary with three fields, `type`, `short_factor` and `long_factor`, "
199
+ f"got {self.rope_scaling}"
200
+ )
201
+ rope_scaling_type = self.rope_scaling.get("type", None)
202
+ rope_scaling_short_factor = self.rope_scaling.get("short_factor", None)
203
+ rope_scaling_long_factor = self.rope_scaling.get("long_factor", None)
204
+ if rope_scaling_type is None or rope_scaling_type not in ["longrope"]:
205
+ raise ValueError(f"`rope_scaling`'s type field must be one of ['longrope'], got {rope_scaling_type}")
206
+ if not (
207
+ isinstance(rope_scaling_short_factor, list)
208
+ and all(isinstance(x, (int, float)) for x in rope_scaling_short_factor)
209
+ ):
210
+ raise ValueError(
211
+ f"`rope_scaling`'s short_factor field must be a list of numbers, got {rope_scaling_short_factor}"
212
+ )
213
+ if not len(rope_scaling_short_factor) == self.hidden_size // self.num_attention_heads // 2:
214
+ raise ValueError(
215
+ f"`rope_scaling`'s short_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_short_factor)}"
216
+ )
217
+ if not (
218
+ isinstance(rope_scaling_long_factor, list)
219
+ and all(isinstance(x, (int, float)) for x in rope_scaling_long_factor)
220
+ ):
221
+ raise ValueError(
222
+ f"`rope_scaling`'s long_factor field must be a list of numbers, got {rope_scaling_long_factor}"
223
+ )
224
+ if not len(rope_scaling_long_factor) == self.hidden_size // self.num_attention_heads // 2:
225
+ raise ValueError(
226
+ f"`rope_scaling`'s long_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_long_factor)}"
227
+ )
generation_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": [
5
+ 32007,
6
+ 32001,
7
+ 32000
8
+ ],
9
+ "pad_token_id": 32000,
10
+ "transformers_version": "4.43.4"
11
+ }
latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step164
model.safetensors.index.json ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 7642159104
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00002-of-00002.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
10
+ "model.layers.0.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
11
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
12
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
13
+ "model.layers.0.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
14
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
15
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
16
+ "model.layers.1.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
17
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
18
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
19
+ "model.layers.1.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
20
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
21
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
22
+ "model.layers.10.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
23
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
24
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
25
+ "model.layers.10.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
26
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
27
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
28
+ "model.layers.11.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
29
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
30
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
31
+ "model.layers.11.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
32
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
33
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
34
+ "model.layers.12.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
35
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
36
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
37
+ "model.layers.12.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
38
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
39
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
40
+ "model.layers.13.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
41
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
42
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
43
+ "model.layers.13.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
44
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
45
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
46
+ "model.layers.14.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
47
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
48
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
49
+ "model.layers.14.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
50
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
51
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
52
+ "model.layers.15.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
53
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
54
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
55
+ "model.layers.15.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
56
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
57
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
58
+ "model.layers.16.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
59
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
60
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
61
+ "model.layers.16.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
62
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
63
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
64
+ "model.layers.17.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
65
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
66
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
67
+ "model.layers.17.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
68
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
69
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
70
+ "model.layers.18.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
71
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
72
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
73
+ "model.layers.18.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
74
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
75
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
76
+ "model.layers.19.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
77
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
78
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
79
+ "model.layers.19.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
80
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
81
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
82
+ "model.layers.2.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
83
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
84
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
85
+ "model.layers.2.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
86
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
87
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
88
+ "model.layers.20.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
89
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
90
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
91
+ "model.layers.20.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
92
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
93
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
94
+ "model.layers.21.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
95
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
96
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
97
+ "model.layers.21.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
98
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
99
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
100
+ "model.layers.22.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
101
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
102
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
103
+ "model.layers.22.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
104
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
105
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
106
+ "model.layers.23.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
107
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
108
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
109
+ "model.layers.23.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
110
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
111
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
112
+ "model.layers.24.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
113
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
114
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
115
+ "model.layers.24.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
116
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
117
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
118
+ "model.layers.25.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
119
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
120
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
121
+ "model.layers.25.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
122
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
123
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
124
+ "model.layers.26.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
125
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
126
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
127
+ "model.layers.26.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
128
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
129
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
130
+ "model.layers.27.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
131
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
132
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
133
+ "model.layers.27.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
134
+ "model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
135
+ "model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
136
+ "model.layers.28.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
137
+ "model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
138
+ "model.layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
139
+ "model.layers.28.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
140
+ "model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
141
+ "model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
142
+ "model.layers.29.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
143
+ "model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
144
+ "model.layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
145
+ "model.layers.29.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
146
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
147
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
148
+ "model.layers.3.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
149
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
150
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
151
+ "model.layers.3.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
152
+ "model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors",
153
+ "model.layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
154
+ "model.layers.30.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
155
+ "model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
156
+ "model.layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
157
+ "model.layers.30.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
158
+ "model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors",
159
+ "model.layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
160
+ "model.layers.31.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
161
+ "model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
162
+ "model.layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
163
+ "model.layers.31.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
164
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
165
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
166
+ "model.layers.4.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
167
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
168
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
169
+ "model.layers.4.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
170
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
171
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
172
+ "model.layers.5.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
173
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
174
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
175
+ "model.layers.5.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
176
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
177
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
178
+ "model.layers.6.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
179
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
180
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
181
+ "model.layers.6.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
182
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
183
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
184
+ "model.layers.7.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
185
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
186
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
187
+ "model.layers.7.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
188
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
189
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
190
+ "model.layers.8.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
191
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
192
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
193
+ "model.layers.8.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
194
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
195
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
196
+ "model.layers.9.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
197
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
198
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
199
+ "model.layers.9.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
200
+ "model.norm.weight": "model-00002-of-00002.safetensors"
201
+ }
202
+ }
modeling_phi3.py ADDED
@@ -0,0 +1,1888 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """ PyTorch Phi-3 model."""
17
+
18
+ import inspect
19
+
20
+ import bs4
21
+ import loguru
22
+ import math
23
+ import warnings
24
+ from typing import List, Optional, Tuple, Union
25
+
26
+ import numpy as np
27
+ import torch
28
+ import torch.nn.functional as F
29
+ import torch.utils.checkpoint
30
+ from torch import nn
31
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
32
+
33
+ from transformers.activations import ACT2FN
34
+ from transformers.cache_utils import Cache, DynamicCache
35
+ from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
36
+ from transformers.modeling_outputs import (
37
+ BaseModelOutputWithPast,
38
+ CausalLMOutputWithPast,
39
+ SequenceClassifierOutputWithPast,
40
+ TokenClassifierOutput,
41
+ )
42
+ from transformers.modeling_utils import PreTrainedModel
43
+ from transformers.utils import (
44
+ add_code_sample_docstrings,
45
+ add_start_docstrings,
46
+ add_start_docstrings_to_model_forward,
47
+ is_flash_attn_2_available,
48
+ is_flash_attn_greater_or_equal_2_10,
49
+ logging,
50
+ replace_return_docstrings,
51
+ )
52
+ from .configuration_phi3 import Phi3Config
53
+ from .tree_gen_utils import split_tree, TokenIdNode, TokenDotExporter, nodenamefunc
54
+
55
+
56
+ logger = logging.get_logger(__name__)
57
+
58
+ # Transformers scans dependencies in the modeling file, causing issues on conditional loading. The regex only ignores try/catch blocks, but not if statements
59
+ # if is_flash_attn_2_available():
60
+ _flash_supports_window_size = False
61
+ try:
62
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
63
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
64
+
65
+ _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
66
+ except ImportError as error:
67
+ logger.warning(
68
+ f"`flash-attention` package not found, consider installing for better performance: {error}."
69
+ )
70
+ if not _flash_supports_window_size:
71
+ logger.warning(
72
+ "Current `flash-attention` does not support `window_size`. Either upgrade or use `attn_implementation='eager'`."
73
+ )
74
+
75
+ _CHECKPOINT_FOR_DOC = "microsoft/Phi-3-mini-4k-instruct"
76
+ _CONFIG_FOR_DOC = "Phi3Config"
77
+
78
+ PHI3_PRETRAINED_MODEL_ARCHIVE_LIST = [
79
+ "microsoft/Phi-3-mini-4k-instruct",
80
+ "microsoft/Phi-3-mini-128k-instruct",
81
+ # See all Phi-3 models at https://huggingface.co/models?filter=Phi-3
82
+ ]
83
+
84
+
85
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Phi3
86
+ class Phi3RMSNorm(nn.Module):
87
+ def __init__(self, hidden_size, eps=1e-6):
88
+ """
89
+ Phi3RMSNorm is equivalent to T5LayerNorm
90
+ """
91
+ super().__init__()
92
+ self.weight = nn.Parameter(torch.ones(hidden_size))
93
+ self.variance_epsilon = eps
94
+
95
+ def forward(self, hidden_states):
96
+ input_dtype = hidden_states.dtype
97
+ hidden_states = hidden_states.to(torch.float32)
98
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
99
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
100
+ return self.weight * hidden_states.to(input_dtype)
101
+
102
+
103
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
104
+ def _get_unpad_data(attention_mask):
105
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
106
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
107
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
108
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
109
+ return (
110
+ indices,
111
+ cu_seqlens,
112
+ max_seqlen_in_batch,
113
+ )
114
+
115
+
116
+ # Copied from transformers.models.gemma.modeling_gemma.GemmaRotaryEmbedding with gemma->phi3, Gemma->Phi3
117
+ class Phi3RotaryEmbedding(nn.Module):
118
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
119
+ super().__init__()
120
+
121
+ self.dim = dim
122
+ self.max_position_embeddings = max_position_embeddings
123
+ self.base = base
124
+ self.register_buffer("inv_freq", None, persistent=False)
125
+
126
+ @torch.no_grad()
127
+ def forward(self, x, position_ids, seq_len=None):
128
+ # x: [bs, num_attention_heads, seq_len, head_size]
129
+ if self.inv_freq is None:
130
+ self.inv_freq = 1.0 / (
131
+ self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim)
132
+ )
133
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
134
+ position_ids_expanded = position_ids[:, None, :].float()
135
+ # Force float32 since bfloat16 loses precision on long contexts
136
+ # See https://github.com/huggingface/transformers/pull/29285
137
+ device_type = x.device.type
138
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
139
+ with torch.autocast(device_type=device_type, enabled=False):
140
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
141
+ emb = torch.cat((freqs, freqs), dim=-1)
142
+ cos = emb.cos()
143
+ sin = emb.sin()
144
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
145
+
146
+
147
+ class Phi3LongRoPEScaledRotaryEmbedding(Phi3RotaryEmbedding):
148
+ def __init__(self, dim, config, device=None):
149
+ super().__init__(dim, config.max_position_embeddings, config.rope_theta, device)
150
+
151
+ self.short_factor = config.rope_scaling["short_factor"]
152
+ self.long_factor = config.rope_scaling["long_factor"]
153
+ self.original_max_position_embeddings = config.original_max_position_embeddings
154
+
155
+ @torch.no_grad()
156
+ def forward(self, x, position_ids, seq_len=None):
157
+ seq_len = seq_len or torch.max(position_ids) + 1
158
+ if seq_len > self.original_max_position_embeddings:
159
+ ext_factors = torch.tensor(self.long_factor, dtype=torch.float32, device=x.device)
160
+ else:
161
+ ext_factors = torch.tensor(self.short_factor, dtype=torch.float32, device=x.device)
162
+
163
+ inv_freq_shape = torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim
164
+ self.inv_freq = 1.0 / (ext_factors * self.base**inv_freq_shape)
165
+
166
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
167
+ position_ids_expanded = position_ids[:, None, :].float()
168
+
169
+ # Force float32 since bfloat16 loses precision on long contexts
170
+ # See https://github.com/huggingface/transformers/pull/29285
171
+ device_type = x.device.type
172
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
173
+ with torch.autocast(device_type=device_type, enabled=False):
174
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
175
+ emb = torch.cat((freqs, freqs), dim=-1)
176
+
177
+ scale = self.max_position_embeddings / self.original_max_position_embeddings
178
+ if scale <= 1.0:
179
+ scaling_factor = 1.0
180
+ else:
181
+ scaling_factor = math.sqrt(1 + math.log(scale) / math.log(self.original_max_position_embeddings))
182
+
183
+ cos = emb.cos() * scaling_factor
184
+ sin = emb.sin() * scaling_factor
185
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
186
+
187
+
188
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
189
+ def rotate_half(x):
190
+ """Rotates half the hidden dims of the input."""
191
+ x1 = x[..., : x.shape[-1] // 2]
192
+ x2 = x[..., x.shape[-1] // 2 :]
193
+ return torch.cat((-x2, x1), dim=-1)
194
+
195
+
196
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
197
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
198
+ """Applies Rotary Position Embedding to the query and key tensors.
199
+
200
+ Args:
201
+ q (`torch.Tensor`): The query tensor.
202
+ k (`torch.Tensor`): The key tensor.
203
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
204
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
205
+ position_ids (`torch.Tensor`, *optional*):
206
+ Deprecated and unused.
207
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
208
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
209
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
210
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
211
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
212
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
213
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
214
+ Returns:
215
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
216
+ """
217
+ cos = cos.unsqueeze(unsqueeze_dim)
218
+ sin = sin.unsqueeze(unsqueeze_dim)
219
+ q_embed = (q * cos) + (rotate_half(q) * sin)
220
+ k_embed = (k * cos) + (rotate_half(k) * sin)
221
+ return q_embed, k_embed
222
+
223
+
224
+ class Phi3MLP(nn.Module):
225
+ def __init__(self, config):
226
+ super().__init__()
227
+
228
+ self.config = config
229
+ self.gate_up_proj = nn.Linear(config.hidden_size, 2 * config.intermediate_size, bias=False)
230
+ self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
231
+
232
+ self.activation_fn = ACT2FN[config.hidden_act]
233
+
234
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
235
+ up_states = self.gate_up_proj(hidden_states)
236
+
237
+ gate, up_states = up_states.chunk(2, dim=-1)
238
+ up_states = up_states * self.activation_fn(gate)
239
+
240
+ return self.down_proj(up_states)
241
+
242
+
243
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv with llama->phi
244
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
245
+ """
246
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
247
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
248
+ """
249
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
250
+ if n_rep == 1:
251
+ return hidden_states
252
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
253
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
254
+
255
+
256
+ class Phi3Attention(nn.Module):
257
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
258
+
259
+ def __init__(self, config: Phi3Config, layer_idx: Optional[int] = None):
260
+ super().__init__()
261
+ self.config = config
262
+ self.layer_idx = layer_idx
263
+ if layer_idx is None:
264
+ logger.warning_once(
265
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
266
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
267
+ "when creating this class."
268
+ )
269
+
270
+ self.attention_dropout = config.attention_dropout
271
+ self.hidden_size = config.hidden_size
272
+ self.num_heads = config.num_attention_heads
273
+ self.head_dim = self.hidden_size // self.num_heads
274
+ self.num_key_value_heads = config.num_key_value_heads
275
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
276
+ self.max_position_embeddings = config.max_position_embeddings
277
+ self.original_max_position_embeddings = config.original_max_position_embeddings
278
+ self.rope_theta = config.rope_theta
279
+ self.rope_scaling = config.rope_scaling
280
+ self.is_causal = True
281
+
282
+ if (self.head_dim * self.num_heads) != self.hidden_size:
283
+ raise ValueError(
284
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
285
+ f" and `num_heads`: {self.num_heads})."
286
+ )
287
+
288
+ op_size = self.num_heads * self.head_dim + 2 * (self.num_key_value_heads * self.head_dim)
289
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
290
+ self.qkv_proj = nn.Linear(self.hidden_size, op_size, bias=False)
291
+ self._init_rope()
292
+
293
+ def _init_rope(self):
294
+ if self.rope_scaling is None:
295
+ self.rotary_emb = Phi3RotaryEmbedding(
296
+ self.head_dim,
297
+ max_position_embeddings=self.max_position_embeddings,
298
+ base=self.rope_theta,
299
+ )
300
+ else:
301
+ scaling_type = self.config.rope_scaling["type"]
302
+ if scaling_type == "longrope":
303
+ self.rotary_emb = Phi3LongRoPEScaledRotaryEmbedding(self.head_dim, self.config)
304
+ else:
305
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
306
+
307
+ def forward(
308
+ self,
309
+ hidden_states: torch.Tensor,
310
+ attention_mask: Optional[torch.Tensor] = None,
311
+ position_ids: Optional[torch.LongTensor] = None,
312
+ past_key_value: Optional[Cache] = None,
313
+ output_attentions: bool = False,
314
+ use_cache: bool = False,
315
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
316
+ logger.warning_once("You are not running the flash-attention implementation, expect numerical differences.")
317
+
318
+ bsz, q_len, _ = hidden_states.size()
319
+
320
+ qkv = self.qkv_proj(hidden_states)
321
+ query_pos = self.num_heads * self.head_dim
322
+ query_states = qkv[..., :query_pos]
323
+ key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
324
+ value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
325
+
326
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
327
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
328
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
329
+
330
+ kv_seq_len = key_states.shape[-2]
331
+ if past_key_value is not None:
332
+ if self.layer_idx is None:
333
+ raise ValueError(
334
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
335
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
336
+ "with a layer index."
337
+ )
338
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
339
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=kv_seq_len)
340
+
341
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
342
+
343
+ if past_key_value is not None:
344
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
345
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
346
+
347
+ # repeat k/v heads if n_kv_heads < n_heads
348
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
349
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
350
+
351
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
352
+
353
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
354
+ raise ValueError(
355
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
356
+ f" {attn_weights.size()}"
357
+ )
358
+
359
+ if attention_mask is not None:
360
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
361
+ raise ValueError(
362
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
363
+ )
364
+ attn_weights = attn_weights + attention_mask
365
+
366
+ # upcast attention to fp32
367
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(value_states.dtype)
368
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
369
+
370
+ attn_output = torch.matmul(attn_weights, value_states)
371
+
372
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
373
+ raise ValueError(
374
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
375
+ f" {attn_output.size()}"
376
+ )
377
+
378
+ attn_output = attn_output.transpose(1, 2).contiguous()
379
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
380
+
381
+ attn_output = self.o_proj(attn_output)
382
+
383
+ if not output_attentions:
384
+ attn_weights = None
385
+
386
+ return attn_output, attn_weights, past_key_value
387
+
388
+
389
+ class Phi3FlashAttention2(Phi3Attention):
390
+ """
391
+ Phi-3 flash attention module. This module inherits from `Phi3Attention` as the weights of the module stays
392
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
393
+ flash attention and deal with padding tokens in case the input contains any of them.
394
+ """
395
+
396
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
397
+ def __init__(self, *args, **kwargs):
398
+ super().__init__(*args, **kwargs)
399
+
400
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
401
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
402
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
403
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
404
+
405
+ def forward(
406
+ self,
407
+ hidden_states: torch.Tensor,
408
+ attention_mask: Optional[torch.LongTensor] = None,
409
+ position_ids: Optional[torch.LongTensor] = None,
410
+ past_key_value: Optional[Cache] = None,
411
+ output_attentions: bool = False,
412
+ use_cache: bool = False,
413
+ **kwargs,
414
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
415
+ # Phi3FlashAttention2 attention does not support output_attentions
416
+
417
+ if not _flash_supports_window_size:
418
+ logger.warning_once(
419
+ "The current flash attention version does not support sliding window attention. Please use `attn_implementation='eager'` or upgrade flash-attn library."
420
+ )
421
+ raise ValueError("The current flash attention version does not support sliding window attention.")
422
+
423
+ output_attentions = False
424
+
425
+ if "padding_mask" in kwargs:
426
+ warnings.warn(
427
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
428
+ )
429
+
430
+ # overwrite attention_mask with padding_mask
431
+ attention_mask = kwargs.pop("padding_mask")
432
+
433
+ bsz, q_len, _ = hidden_states.size()
434
+
435
+ qkv = self.qkv_proj(hidden_states)
436
+ query_pos = self.num_heads * self.head_dim
437
+ query_states = qkv[..., :query_pos]
438
+ key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
439
+ value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
440
+
441
+ # Flash attention requires the input to have the shape
442
+ # batch_size x seq_length x head_dim x hidden_dim
443
+ # therefore we just need to keep the original shape
444
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
445
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
446
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
447
+
448
+ kv_seq_len = key_states.shape[-2]
449
+ if past_key_value is not None:
450
+ if self.layer_idx is None:
451
+ raise ValueError(
452
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
453
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
454
+ "with a layer index."
455
+ )
456
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
457
+
458
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
459
+ rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item() + 1)
460
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=rotary_seq_len)
461
+
462
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
463
+
464
+ use_sliding_windows = (
465
+ _flash_supports_window_size
466
+ and getattr(self.config, "sliding_window", None) is not None
467
+ and kv_seq_len > self.config.sliding_window
468
+ )
469
+
470
+ if past_key_value is not None:
471
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
472
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
473
+ if (
474
+ getattr(self.config, "sliding_window", None) is not None
475
+ and kv_seq_len > self.config.sliding_window
476
+ and cache_has_contents
477
+ ):
478
+ slicing_tokens = 1 - self.config.sliding_window
479
+
480
+ past_key = past_key_value[self.layer_idx][0]
481
+ past_value = past_key_value[self.layer_idx][1]
482
+
483
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
484
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
485
+
486
+ if past_key.shape[-2] != self.config.sliding_window - 1:
487
+ raise ValueError(
488
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
489
+ f" {past_key.shape}"
490
+ )
491
+
492
+ if attention_mask is not None:
493
+ attention_mask = attention_mask[:, slicing_tokens:]
494
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
495
+
496
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
497
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
498
+
499
+ # repeat k/v heads if n_kv_heads < n_heads
500
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
501
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
502
+
503
+ attn_dropout = self.attention_dropout if self.training else 0.0
504
+
505
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
506
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
507
+ # cast them back in the correct dtype just to be sure everything works as expected.
508
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
509
+ # in fp32.
510
+
511
+ if query_states.dtype == torch.float32:
512
+ if torch.is_autocast_enabled():
513
+ target_dtype = torch.get_autocast_gpu_dtype()
514
+ # Handle the case where the model is quantized
515
+ elif hasattr(self.config, "_pre_quantization_dtype"):
516
+ target_dtype = self.config._pre_quantization_dtype
517
+ else:
518
+ target_dtype = self.qkv_proj.weight.dtype
519
+
520
+ logger.warning_once(
521
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
522
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
523
+ f" {target_dtype}."
524
+ )
525
+
526
+ query_states = query_states.to(target_dtype)
527
+ key_states = key_states.to(target_dtype)
528
+ value_states = value_states.to(target_dtype)
529
+
530
+ # Reashape to the expected shape for Flash Attention
531
+ query_states = query_states.transpose(1, 2)
532
+ key_states = key_states.transpose(1, 2)
533
+ value_states = value_states.transpose(1, 2)
534
+
535
+ attn_output = self._flash_attention_forward(
536
+ query_states,
537
+ key_states,
538
+ value_states,
539
+ attention_mask,
540
+ q_len,
541
+ dropout=attn_dropout,
542
+ use_sliding_windows=use_sliding_windows,
543
+ )
544
+
545
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
546
+ attn_output = self.o_proj(attn_output)
547
+
548
+ if not output_attentions:
549
+ attn_weights = None
550
+
551
+ return attn_output, attn_weights, past_key_value
552
+
553
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._flash_attention_forward
554
+ def _flash_attention_forward(
555
+ self,
556
+ query_states,
557
+ key_states,
558
+ value_states,
559
+ attention_mask,
560
+ query_length,
561
+ dropout=0.0,
562
+ softmax_scale=None,
563
+ use_sliding_windows=False,
564
+ ):
565
+ """
566
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
567
+ first unpad the input, then computes the attention scores and pad the final attention scores.
568
+
569
+ Args:
570
+ query_states (`torch.Tensor`):
571
+ Input query states to be passed to Flash Attention API
572
+ key_states (`torch.Tensor`):
573
+ Input key states to be passed to Flash Attention API
574
+ value_states (`torch.Tensor`):
575
+ Input value states to be passed to Flash Attention API
576
+ attention_mask (`torch.Tensor`):
577
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
578
+ position of padding tokens and 1 for the position of non-padding tokens.
579
+ dropout (`float`):
580
+ Attention dropout
581
+ softmax_scale (`float`, *optional*):
582
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
583
+ use_sliding_windows (`bool`, *optional*):
584
+ Whether to activate sliding window attention.
585
+ """
586
+ if not self._flash_attn_uses_top_left_mask:
587
+ causal = self.is_causal
588
+ else:
589
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
590
+ causal = self.is_causal and query_length != 1
591
+
592
+ # Contains at least one padding token in the sequence
593
+ if attention_mask is not None:
594
+ batch_size = query_states.shape[0]
595
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
596
+ query_states, key_states, value_states, attention_mask, query_length
597
+ )
598
+
599
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
600
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
601
+
602
+ if not use_sliding_windows:
603
+ attn_output_unpad = flash_attn_varlen_func(
604
+ query_states,
605
+ key_states,
606
+ value_states,
607
+ cu_seqlens_q=cu_seqlens_q,
608
+ cu_seqlens_k=cu_seqlens_k,
609
+ max_seqlen_q=max_seqlen_in_batch_q,
610
+ max_seqlen_k=max_seqlen_in_batch_k,
611
+ dropout_p=dropout,
612
+ softmax_scale=softmax_scale,
613
+ causal=causal,
614
+ )
615
+ else:
616
+ attn_output_unpad = flash_attn_varlen_func(
617
+ query_states,
618
+ key_states,
619
+ value_states,
620
+ cu_seqlens_q=cu_seqlens_q,
621
+ cu_seqlens_k=cu_seqlens_k,
622
+ max_seqlen_q=max_seqlen_in_batch_q,
623
+ max_seqlen_k=max_seqlen_in_batch_k,
624
+ dropout_p=dropout,
625
+ softmax_scale=softmax_scale,
626
+ causal=causal,
627
+ window_size=(self.config.sliding_window, self.config.sliding_window),
628
+ )
629
+
630
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
631
+ else:
632
+ if not use_sliding_windows:
633
+ attn_output = flash_attn_func(
634
+ query_states,
635
+ key_states,
636
+ value_states,
637
+ dropout,
638
+ softmax_scale=softmax_scale,
639
+ causal=causal,
640
+ )
641
+ else:
642
+ attn_output = flash_attn_func(
643
+ query_states,
644
+ key_states,
645
+ value_states,
646
+ dropout,
647
+ softmax_scale=softmax_scale,
648
+ causal=causal,
649
+ window_size=(self.config.sliding_window, self.config.sliding_window),
650
+ )
651
+
652
+ return attn_output
653
+
654
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
655
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
656
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
657
+
658
+ # On the first iteration we need to properly re-create the padding mask
659
+ # by slicing it on the proper place
660
+ if kv_seq_len != attention_mask.shape[-1]:
661
+ attention_mask_num_tokens = attention_mask.shape[-1]
662
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
663
+
664
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
665
+
666
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
667
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
668
+
669
+ if query_length == kv_seq_len:
670
+ query_layer = index_first_axis(
671
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
672
+ )
673
+ cu_seqlens_q = cu_seqlens_k
674
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
675
+ indices_q = indices_k
676
+ elif query_length == 1:
677
+ max_seqlen_in_batch_q = 1
678
+ cu_seqlens_q = torch.arange(
679
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
680
+ ) # There is a memcpy here, that is very bad.
681
+ indices_q = cu_seqlens_q[:-1]
682
+ query_layer = query_layer.squeeze(1)
683
+ else:
684
+ # The -q_len: slice assumes left padding.
685
+ attention_mask = attention_mask[:, -query_length:]
686
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
687
+
688
+ return (
689
+ query_layer,
690
+ key_layer,
691
+ value_layer,
692
+ indices_q,
693
+ (cu_seqlens_q, cu_seqlens_k),
694
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
695
+ )
696
+
697
+
698
+ # copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with Llama->Phi3
699
+ # TODO @Arthur no longer copied from LLama after static cache
700
+ class Phi3SdpaAttention(Phi3Attention):
701
+ """
702
+ Phi3 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
703
+ `Phi3Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
704
+ SDPA API.
705
+ """
706
+
707
+ # Adapted from Phi3Attention.forward
708
+ def forward(
709
+ self,
710
+ hidden_states: torch.Tensor,
711
+ attention_mask: Optional[torch.Tensor] = None,
712
+ position_ids: Optional[torch.LongTensor] = None,
713
+ past_key_value: Optional[Cache] = None,
714
+ output_attentions: bool = False,
715
+ use_cache: bool = False,
716
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
717
+ if output_attentions:
718
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
719
+ logger.warning_once(
720
+ "Phi3Model is using Phi3SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
721
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
722
+ )
723
+ return super().forward(
724
+ hidden_states=hidden_states,
725
+ attention_mask=attention_mask,
726
+ position_ids=position_ids,
727
+ past_key_value=past_key_value,
728
+ output_attentions=output_attentions,
729
+ use_cache=use_cache,
730
+ )
731
+
732
+ bsz, q_len, _ = hidden_states.size()
733
+
734
+ qkv = self.qkv_proj(hidden_states)
735
+ query_pos = self.num_heads * self.head_dim
736
+ query_states = qkv[..., :query_pos]
737
+ key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
738
+ value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
739
+
740
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
741
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
742
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
743
+
744
+ kv_seq_len = key_states.shape[-2]
745
+ if past_key_value is not None:
746
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
747
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=kv_seq_len)
748
+
749
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
750
+
751
+ if past_key_value is not None:
752
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
753
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
754
+
755
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
756
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
757
+
758
+ if attention_mask is not None:
759
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
760
+ raise ValueError(
761
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
762
+ )
763
+
764
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
765
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
766
+ if query_states.device.type == "cuda" and attention_mask is not None:
767
+ query_states = query_states.contiguous()
768
+ key_states = key_states.contiguous()
769
+ value_states = value_states.contiguous()
770
+
771
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
772
+ query_states,
773
+ key_states,
774
+ value_states,
775
+ attn_mask=attention_mask,
776
+ dropout_p=self.attention_dropout if self.training else 0.0,
777
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
778
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
779
+ )
780
+
781
+ attn_output = attn_output.transpose(1, 2).contiguous()
782
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
783
+
784
+ attn_output = self.o_proj(attn_output)
785
+
786
+ return attn_output, None, past_key_value
787
+
788
+
789
+ PHI3_ATTENTION_CLASSES = {
790
+ "eager": Phi3Attention,
791
+ "flash_attention_2": Phi3FlashAttention2,
792
+ "sdpa": Phi3SdpaAttention,
793
+ }
794
+
795
+
796
+ class Phi3DecoderLayer(nn.Module):
797
+ def __init__(self, config: Phi3Config, layer_idx: int):
798
+ super().__init__()
799
+
800
+ self.config = config
801
+ if is_flash_attn_2_available():
802
+ config._attn_implementation = "flash_attention_2"
803
+ # loguru.logger.info(f"Using {config._attn_implementation} for attention in layer {layer_idx}")
804
+ self.self_attn = PHI3_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx)
805
+
806
+ self.mlp = Phi3MLP(config)
807
+ self.input_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
808
+
809
+ self.resid_attn_dropout = nn.Dropout(config.resid_pdrop)
810
+ self.resid_mlp_dropout = nn.Dropout(config.resid_pdrop)
811
+ self.post_attention_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
812
+
813
+ def forward(
814
+ self,
815
+ hidden_states: torch.Tensor,
816
+ attention_mask: Optional[torch.Tensor] = None,
817
+ position_ids: Optional[torch.LongTensor] = None,
818
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
819
+ output_attentions: Optional[bool] = False,
820
+ use_cache: Optional[bool] = False,
821
+ **kwargs,
822
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
823
+ if "padding_mask" in kwargs:
824
+ warnings.warn(
825
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
826
+ )
827
+ """
828
+ Args:
829
+ hidden_states (`torch.FloatTensor`):
830
+ input to the layer of shape `(batch, seq_len, embed_dim)`
831
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
832
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
833
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
834
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
835
+ `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
836
+ output_attentions (`bool`, *optional*):
837
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
838
+ returned tensors for more detail.
839
+ use_cache (`bool`, *optional*):
840
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
841
+ (see `past_key_values`).
842
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
843
+ """
844
+
845
+ residual = hidden_states
846
+
847
+ hidden_states = self.input_layernorm(hidden_states)
848
+
849
+ # Self Attention
850
+ attn_outputs, self_attn_weights, present_key_value = self.self_attn(
851
+ hidden_states=hidden_states,
852
+ attention_mask=attention_mask,
853
+ position_ids=position_ids,
854
+ past_key_value=past_key_value,
855
+ output_attentions=output_attentions,
856
+ use_cache=use_cache,
857
+ )
858
+
859
+ hidden_states = residual + self.resid_attn_dropout(attn_outputs)
860
+
861
+ residual = hidden_states
862
+ hidden_states = self.post_attention_layernorm(hidden_states)
863
+ hidden_states = self.mlp(hidden_states)
864
+ hidden_states = residual + self.resid_mlp_dropout(hidden_states)
865
+
866
+ outputs = (hidden_states,)
867
+
868
+ if output_attentions:
869
+ outputs += (self_attn_weights,)
870
+
871
+ if use_cache:
872
+ outputs += (present_key_value,)
873
+
874
+ return outputs
875
+
876
+
877
+ PHI3_START_DOCSTRING = r"""
878
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
879
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
880
+ etc.)
881
+
882
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
883
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
884
+ and behavior.
885
+
886
+ Parameters:
887
+ config ([`Phi3Config`]):
888
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
889
+ load the weights associated with the model, only the configuration. Check out the
890
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
891
+ """
892
+
893
+
894
+ @add_start_docstrings(
895
+ "The bare Phi-3 model outputting raw hidden-states without any specific head on top.",
896
+ PHI3_START_DOCSTRING,
897
+ )
898
+ class Phi3PreTrainedModel(PreTrainedModel):
899
+ config_class = Phi3Config
900
+ base_model_prefix = "model"
901
+ supports_gradient_checkpointing = True
902
+ _no_split_modules = ["Phi3DecoderLayer"]
903
+ _skip_keys_device_placement = "past_key_values"
904
+ _supports_flash_attn_2 = True
905
+ _supports_sdpa = False
906
+ _supports_cache_class = True
907
+
908
+ _version = "0.0.5"
909
+
910
+ def _init_weights(self, module):
911
+ std = self.config.initializer_range
912
+ if isinstance(module, nn.Linear):
913
+ module.weight.data.normal_(mean=0.0, std=std)
914
+ if module.bias is not None:
915
+ module.bias.data.zero_()
916
+ elif isinstance(module, nn.Embedding):
917
+ module.weight.data.normal_(mean=0.0, std=std)
918
+ if module.padding_idx is not None:
919
+ module.weight.data[module.padding_idx].zero_()
920
+
921
+
922
+ PHI3_INPUTS_DOCSTRING = r"""
923
+ Args:
924
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
925
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
926
+ it.
927
+
928
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
929
+ [`PreTrainedTokenizer.__call__`] for details.
930
+
931
+ [What are input IDs?](../glossary#input-ids)
932
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
933
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
934
+
935
+ - 1 for tokens that are **not masked**,
936
+ - 0 for tokens that are **masked**.
937
+
938
+ [What are attention masks?](../glossary#attention-mask)
939
+
940
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
941
+ [`PreTrainedTokenizer.__call__`] for details.
942
+
943
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
944
+ `past_key_values`).
945
+
946
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
947
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
948
+ information on the default strategy.
949
+
950
+ - 1 indicates the head is **not masked**,
951
+ - 0 indicates the head is **masked**.
952
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
953
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
954
+ config.n_positions - 1]`.
955
+
956
+ [What are position IDs?](../glossary#position-ids)
957
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
958
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
959
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
960
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
961
+
962
+ Two formats are allowed:
963
+ - a [`~cache_utils.Cache`] instance;
964
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
965
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
966
+ cache format.
967
+
968
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
969
+ legacy cache format will be returned.
970
+
971
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
972
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
973
+ of shape `(batch_size, sequence_length)`.
974
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
975
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
976
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
977
+ model's internal embedding lookup matrix.
978
+ use_cache (`bool`, *optional*):
979
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
980
+ `past_key_values`).
981
+ output_attentions (`bool`, *optional*):
982
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
983
+ tensors for more detail.
984
+ output_hidden_states (`bool`, *optional*):
985
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
986
+ more detail.
987
+ return_dict (`bool`, *optional*):
988
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
989
+ """
990
+
991
+
992
+ @add_start_docstrings(
993
+ "The bare Phi-3 model outputting raw hidden-states without any specific head on top.",
994
+ PHI3_START_DOCSTRING,
995
+ )
996
+ class Phi3Model(Phi3PreTrainedModel):
997
+ """
998
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Phi3DecoderLayer`]
999
+
1000
+ Args:
1001
+ config: Phi3Config
1002
+ """
1003
+
1004
+ def __init__(self, config: Phi3Config):
1005
+ super().__init__(config)
1006
+ self.padding_idx = config.pad_token_id
1007
+ self.vocab_size = config.vocab_size
1008
+
1009
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
1010
+ self.embed_dropout = nn.Dropout(config.embd_pdrop)
1011
+ self.layers = nn.ModuleList(
1012
+ [Phi3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
1013
+ )
1014
+ if is_flash_attn_2_available():
1015
+ config._attn_implementation = "flash_attention_2"
1016
+ self._attn_implementation = config._attn_implementation
1017
+ self.norm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1018
+
1019
+ self.gradient_checkpointing = False
1020
+ # Initialize weights and apply final processing
1021
+ self.post_init()
1022
+
1023
+ def get_input_embeddings(self):
1024
+ return self.embed_tokens
1025
+
1026
+ def set_input_embeddings(self, value):
1027
+ self.embed_tokens = value
1028
+
1029
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1030
+ def forward(
1031
+ self,
1032
+ input_ids: torch.LongTensor = None,
1033
+ attention_mask: Optional[torch.Tensor] = None,
1034
+ position_ids: Optional[torch.LongTensor] = None,
1035
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1036
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1037
+ use_cache: Optional[bool] = None,
1038
+ output_attentions: Optional[bool] = None,
1039
+ output_hidden_states: Optional[bool] = None,
1040
+ return_dict: Optional[bool] = None,
1041
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
1042
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1043
+ output_hidden_states = (
1044
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1045
+ )
1046
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1047
+
1048
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1049
+
1050
+ # retrieve input_ids and inputs_embeds
1051
+ if input_ids is not None and inputs_embeds is not None:
1052
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1053
+ elif input_ids is not None:
1054
+ batch_size, seq_length = input_ids.shape[:2]
1055
+ elif inputs_embeds is not None:
1056
+ batch_size, seq_length = inputs_embeds.shape[:2]
1057
+ else:
1058
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1059
+
1060
+ past_key_values_length = 0
1061
+
1062
+ if self.gradient_checkpointing and self.training:
1063
+ if use_cache:
1064
+ logger.warning_once(
1065
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1066
+ )
1067
+ use_cache = False
1068
+
1069
+ if use_cache:
1070
+ use_legacy_cache = not isinstance(past_key_values, Cache)
1071
+ if use_legacy_cache:
1072
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
1073
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
1074
+
1075
+ if position_ids is None:
1076
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1077
+ position_ids = torch.arange(
1078
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
1079
+ )
1080
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
1081
+ else:
1082
+ position_ids = position_ids.view(-1, seq_length).long()
1083
+
1084
+ if inputs_embeds is None:
1085
+ inputs_embeds = self.embed_tokens(input_ids)
1086
+
1087
+ if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache:
1088
+ is_padding_right = attention_mask[:, -1].sum().item() != batch_size
1089
+ if is_padding_right:
1090
+ raise ValueError(
1091
+ "You are attempting to perform batched generation with padding_side='right'"
1092
+ " this may lead to unexpected behaviour for Flash Attention version of Phi3. Make sure to "
1093
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
1094
+ )
1095
+
1096
+ if self._attn_implementation == "flash_attention_2":
1097
+ # 2d mask is passed through the layers
1098
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
1099
+ else:
1100
+ # 4d mask is passed through the layers
1101
+ attention_mask = _prepare_4d_causal_attention_mask(
1102
+ attention_mask,
1103
+ (batch_size, seq_length),
1104
+ inputs_embeds,
1105
+ past_key_values_length,
1106
+ sliding_window=self.config.sliding_window,
1107
+ )
1108
+
1109
+ hidden_states = inputs_embeds
1110
+
1111
+ # decoder layers
1112
+ all_hidden_states = () if output_hidden_states else None
1113
+ all_self_attns = () if output_attentions else None
1114
+ next_decoder_cache = None
1115
+
1116
+ for decoder_layer in self.layers:
1117
+ if output_hidden_states:
1118
+ all_hidden_states += (hidden_states,)
1119
+
1120
+ if self.gradient_checkpointing and self.training:
1121
+ layer_outputs = self._gradient_checkpointing_func(
1122
+ decoder_layer.__call__,
1123
+ hidden_states,
1124
+ attention_mask,
1125
+ position_ids,
1126
+ past_key_values,
1127
+ output_attentions,
1128
+ use_cache,
1129
+ )
1130
+ else:
1131
+ layer_outputs = decoder_layer(
1132
+ hidden_states,
1133
+ attention_mask=attention_mask,
1134
+ position_ids=position_ids,
1135
+ past_key_value=past_key_values,
1136
+ output_attentions=output_attentions,
1137
+ use_cache=use_cache,
1138
+ )
1139
+
1140
+ hidden_states = layer_outputs[0]
1141
+
1142
+ if use_cache:
1143
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1144
+
1145
+ if output_attentions:
1146
+ all_self_attns += (layer_outputs[1],)
1147
+
1148
+ hidden_states = self.norm(hidden_states)
1149
+
1150
+ # add hidden states from the last decoder layer
1151
+ if output_hidden_states:
1152
+ all_hidden_states += (hidden_states,)
1153
+
1154
+ next_cache = None
1155
+ if use_cache:
1156
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
1157
+ if not return_dict:
1158
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1159
+ return BaseModelOutputWithPast(
1160
+ last_hidden_state=hidden_states,
1161
+ past_key_values=next_cache,
1162
+ hidden_states=all_hidden_states,
1163
+ attentions=all_self_attns,
1164
+ )
1165
+
1166
+
1167
+ class Phi3ForCausalLM(Phi3PreTrainedModel):
1168
+ _tied_weights_keys = ["lm_head.weight"]
1169
+
1170
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with Llama->Phi3
1171
+ def __init__(self, config):
1172
+ super().__init__(config)
1173
+ self.model = Phi3Model(config)
1174
+ self.vocab_size = config.vocab_size
1175
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1176
+
1177
+ # Initialize weights and apply final processing
1178
+ self.post_init()
1179
+
1180
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_input_embeddings
1181
+ def get_input_embeddings(self):
1182
+ return self.model.embed_tokens
1183
+
1184
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_input_embeddings
1185
+ def set_input_embeddings(self, value):
1186
+ self.model.embed_tokens = value
1187
+
1188
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_output_embeddings
1189
+ def get_output_embeddings(self):
1190
+ return self.lm_head
1191
+
1192
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_output_embeddings
1193
+ def set_output_embeddings(self, new_embeddings):
1194
+ self.lm_head = new_embeddings
1195
+
1196
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder
1197
+ def set_decoder(self, decoder):
1198
+ self.model = decoder
1199
+
1200
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder
1201
+ def get_decoder(self):
1202
+ return self.model
1203
+
1204
+ # Ignore copy
1205
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1206
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1207
+ def forward(
1208
+ self,
1209
+ input_ids: torch.LongTensor = None,
1210
+ attention_mask: Optional[torch.Tensor] = None,
1211
+ position_ids: Optional[torch.LongTensor] = None,
1212
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1213
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1214
+ labels: Optional[torch.LongTensor] = None,
1215
+ use_cache: Optional[bool] = None,
1216
+ output_attentions: Optional[bool] = None,
1217
+ output_hidden_states: Optional[bool] = None,
1218
+ return_dict: Optional[bool] = None,
1219
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1220
+ r"""
1221
+ Args:
1222
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1223
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1224
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1225
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1226
+
1227
+ Returns:
1228
+
1229
+ Example:
1230
+
1231
+ ```python
1232
+ >>> from transformers import AutoTokenizer, Phi3ForCausalLM
1233
+
1234
+ >>> model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct")
1235
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-4k-instruct")
1236
+
1237
+ >>> prompt = "This is an example script ."
1238
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1239
+
1240
+ >>> # Generate
1241
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1242
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1243
+ 'This is an example script .\n Certainly! Below is a sample script that demonstrates a simple task, such as calculating the sum'
1244
+ ```"""
1245
+
1246
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1247
+ output_hidden_states = (
1248
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1249
+ )
1250
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1251
+
1252
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1253
+ outputs = self.model(
1254
+ input_ids=input_ids,
1255
+ attention_mask=attention_mask,
1256
+ position_ids=position_ids,
1257
+ past_key_values=past_key_values,
1258
+ inputs_embeds=inputs_embeds,
1259
+ use_cache=use_cache,
1260
+ output_attentions=output_attentions,
1261
+ output_hidden_states=output_hidden_states,
1262
+ return_dict=return_dict,
1263
+ )
1264
+
1265
+ hidden_states = outputs[0]
1266
+ logits = self.lm_head(hidden_states)
1267
+ logits = logits.float()
1268
+
1269
+ loss = None
1270
+ if labels is not None:
1271
+ # Shift so that tokens < n predict n
1272
+ shift_logits = logits[..., :-1, :].contiguous()
1273
+ shift_labels = labels[..., 1:].contiguous()
1274
+ # Flatten the tokens
1275
+ loss_fct = CrossEntropyLoss()
1276
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1277
+ shift_labels = shift_labels.view(-1)
1278
+ # Enable model parallelism
1279
+ shift_labels = shift_labels.to(shift_logits.device)
1280
+ loss = loss_fct(shift_logits, shift_labels)
1281
+
1282
+ if not return_dict:
1283
+ output = (logits,) + outputs[1:]
1284
+ return (loss,) + output if loss is not None else output
1285
+
1286
+ return CausalLMOutputWithPast(
1287
+ loss=loss,
1288
+ logits=logits,
1289
+ past_key_values=outputs.past_key_values,
1290
+ hidden_states=outputs.hidden_states,
1291
+ attentions=outputs.attentions,
1292
+ )
1293
+
1294
+ # Copied from transformers.models.persimmon.modeling_persimmon.PersimmonForCausalLM.prepare_inputs_for_generation
1295
+ def prepare_inputs_for_generation(
1296
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1297
+ ):
1298
+ # When the first time input length reached long and short factor switching point, enforce re-compute cache
1299
+ # It will cause downside of slower at this single token position, however, better than current failure.
1300
+ if past_key_values and self.config.rope_scaling and input_ids.shape[1] >= self.config.original_max_position_embeddings + 1:
1301
+ past_length = past_key_values.seen_tokens if isinstance(past_key_values, Cache) else past_key_values[0][0].shape[2]
1302
+ if past_length <= self.config.original_max_position_embeddings:
1303
+ past_key_values = None
1304
+
1305
+ if past_key_values is not None:
1306
+ if isinstance(past_key_values, Cache):
1307
+ cache_length = past_key_values.get_seq_length()
1308
+ past_length = past_key_values.seen_tokens
1309
+ max_cache_length = past_key_values.get_max_length()
1310
+ else:
1311
+ cache_length = past_length = past_key_values[0][0].shape[2]
1312
+ max_cache_length = None
1313
+
1314
+ # Keep only the unprocessed tokens:
1315
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1316
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1317
+ # input)
1318
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1319
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1320
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1321
+ # input_ids based on the past_length.
1322
+ elif past_length < input_ids.shape[1]:
1323
+ input_ids = input_ids[:, past_length:]
1324
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1325
+
1326
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1327
+ if (
1328
+ max_cache_length is not None
1329
+ and attention_mask is not None
1330
+ and cache_length + input_ids.shape[1] > max_cache_length
1331
+ ):
1332
+ attention_mask = attention_mask[:, -max_cache_length:]
1333
+
1334
+ position_ids = kwargs.get("position_ids", None)
1335
+ if attention_mask is not None and position_ids is None:
1336
+ # create position_ids on the fly for batch generation
1337
+ position_ids = attention_mask.long().cumsum(-1) - 1
1338
+ position_ids.masked_fill_(attention_mask == 0, 1)
1339
+ if past_key_values:
1340
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1341
+
1342
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1343
+ if inputs_embeds is not None and past_key_values is None:
1344
+ model_inputs = {"inputs_embeds": inputs_embeds}
1345
+ else:
1346
+ model_inputs = {"input_ids": input_ids}
1347
+
1348
+ model_inputs.update(
1349
+ {
1350
+ "position_ids": position_ids,
1351
+ "past_key_values": past_key_values,
1352
+ "use_cache": kwargs.get("use_cache"),
1353
+ "attention_mask": attention_mask,
1354
+ }
1355
+ )
1356
+ return model_inputs
1357
+
1358
+ @staticmethod
1359
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM._reorder_cache
1360
+ def _reorder_cache(past_key_values, beam_idx):
1361
+ reordered_past = ()
1362
+ for layer_past in past_key_values:
1363
+ reordered_past += (
1364
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1365
+ )
1366
+ return reordered_past
1367
+
1368
+
1369
+ @add_start_docstrings(
1370
+ """
1371
+ The [`Phi3Model`] with a sequence classification head on top (linear layer).
1372
+
1373
+ [`Phi3ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1374
+ (e.g. GPT-2) do.
1375
+
1376
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1377
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1378
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1379
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1380
+ each row of the batch).
1381
+ """,
1382
+ PHI3_START_DOCSTRING,
1383
+ )
1384
+ # Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Phi3, LLAMA->PHI3, self.transformer->self.model, transformer_outputs->model_outputs
1385
+ class Phi3ForSequenceClassification(Phi3PreTrainedModel):
1386
+ def __init__(self, config):
1387
+ super().__init__(config)
1388
+ self.num_labels = config.num_labels
1389
+ self.model = Phi3Model(config)
1390
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1391
+
1392
+ # Initialize weights and apply final processing
1393
+ self.post_init()
1394
+
1395
+ def get_input_embeddings(self):
1396
+ return self.model.embed_tokens
1397
+
1398
+ def set_input_embeddings(self, value):
1399
+ self.model.embed_tokens = value
1400
+
1401
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1402
+ def forward(
1403
+ self,
1404
+ input_ids: torch.LongTensor = None,
1405
+ attention_mask: Optional[torch.Tensor] = None,
1406
+ position_ids: Optional[torch.LongTensor] = None,
1407
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1408
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1409
+ labels: Optional[torch.LongTensor] = None,
1410
+ use_cache: Optional[bool] = None,
1411
+ output_attentions: Optional[bool] = None,
1412
+ output_hidden_states: Optional[bool] = None,
1413
+ return_dict: Optional[bool] = None,
1414
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1415
+ r"""
1416
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1417
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1418
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1419
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1420
+ """
1421
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1422
+
1423
+ model_outputs = self.model(
1424
+ input_ids,
1425
+ attention_mask=attention_mask,
1426
+ position_ids=position_ids,
1427
+ past_key_values=past_key_values,
1428
+ inputs_embeds=inputs_embeds,
1429
+ use_cache=use_cache,
1430
+ output_attentions=output_attentions,
1431
+ output_hidden_states=output_hidden_states,
1432
+ return_dict=return_dict,
1433
+ )
1434
+ hidden_states = model_outputs[0]
1435
+ logits = self.score(hidden_states)
1436
+
1437
+ if input_ids is not None:
1438
+ batch_size = input_ids.shape[0]
1439
+ else:
1440
+ batch_size = inputs_embeds.shape[0]
1441
+
1442
+ if self.config.pad_token_id is None and batch_size != 1:
1443
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1444
+ if self.config.pad_token_id is None:
1445
+ sequence_lengths = -1
1446
+ else:
1447
+ if input_ids is not None:
1448
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1449
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1450
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1451
+ sequence_lengths = sequence_lengths.to(logits.device)
1452
+ else:
1453
+ sequence_lengths = -1
1454
+
1455
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1456
+
1457
+ loss = None
1458
+ if labels is not None:
1459
+ labels = labels.to(logits.device)
1460
+ if self.config.problem_type is None:
1461
+ if self.num_labels == 1:
1462
+ self.config.problem_type = "regression"
1463
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1464
+ self.config.problem_type = "single_label_classification"
1465
+ else:
1466
+ self.config.problem_type = "multi_label_classification"
1467
+
1468
+ if self.config.problem_type == "regression":
1469
+ loss_fct = MSELoss()
1470
+ if self.num_labels == 1:
1471
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1472
+ else:
1473
+ loss = loss_fct(pooled_logits, labels)
1474
+ elif self.config.problem_type == "single_label_classification":
1475
+ loss_fct = CrossEntropyLoss()
1476
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1477
+ elif self.config.problem_type == "multi_label_classification":
1478
+ loss_fct = BCEWithLogitsLoss()
1479
+ loss = loss_fct(pooled_logits, labels)
1480
+ if not return_dict:
1481
+ output = (pooled_logits,) + model_outputs[1:]
1482
+ return ((loss,) + output) if loss is not None else output
1483
+
1484
+ return SequenceClassifierOutputWithPast(
1485
+ loss=loss,
1486
+ logits=pooled_logits,
1487
+ past_key_values=model_outputs.past_key_values,
1488
+ hidden_states=model_outputs.hidden_states,
1489
+ attentions=model_outputs.attentions,
1490
+ )
1491
+
1492
+
1493
+ @add_start_docstrings(
1494
+ """
1495
+ [`Phi3Model`] with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1496
+ Named-Entity-Recognition (NER) tasks.
1497
+ """,
1498
+ PHI3_START_DOCSTRING,
1499
+ )
1500
+ # Copied from transformers.models.mpt.modeling_mpt.MptForTokenClassification with Mpt->Phi3,MPT->PHI3,self.transformer->self.model,transformer_outputs->model_outputs
1501
+ class Phi3ForTokenClassification(Phi3PreTrainedModel):
1502
+ def __init__(self, config: Phi3Config):
1503
+ super().__init__(config)
1504
+ self.num_labels = config.num_labels
1505
+
1506
+ self.model = Phi3Model(config)
1507
+ if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
1508
+ classifier_dropout = config.classifier_dropout
1509
+ elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
1510
+ classifier_dropout = config.hidden_dropout
1511
+ else:
1512
+ classifier_dropout = 0.1
1513
+ self.dropout = nn.Dropout(classifier_dropout)
1514
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1515
+
1516
+ # Initialize weights and apply final processing
1517
+ self.post_init()
1518
+
1519
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1520
+ @add_code_sample_docstrings(
1521
+ checkpoint=_CHECKPOINT_FOR_DOC,
1522
+ output_type=TokenClassifierOutput,
1523
+ config_class=_CONFIG_FOR_DOC,
1524
+ )
1525
+ def forward(
1526
+ self,
1527
+ input_ids: Optional[torch.LongTensor] = None,
1528
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
1529
+ attention_mask: Optional[torch.Tensor] = None,
1530
+ inputs_embeds: Optional[torch.Tensor] = None,
1531
+ labels: Optional[torch.Tensor] = None,
1532
+ use_cache: Optional[bool] = None,
1533
+ output_attentions: Optional[bool] = None,
1534
+ output_hidden_states: Optional[bool] = None,
1535
+ return_dict: Optional[bool] = None,
1536
+ **deprecated_arguments,
1537
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1538
+ r"""
1539
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1540
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1541
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1542
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1543
+ """
1544
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1545
+
1546
+ model_outputs = self.model(
1547
+ input_ids,
1548
+ past_key_values=past_key_values,
1549
+ attention_mask=attention_mask,
1550
+ inputs_embeds=inputs_embeds,
1551
+ use_cache=use_cache,
1552
+ output_attentions=output_attentions,
1553
+ output_hidden_states=output_hidden_states,
1554
+ return_dict=return_dict,
1555
+ )
1556
+
1557
+ hidden_states = model_outputs[0]
1558
+ hidden_states = self.dropout(hidden_states)
1559
+ logits = self.classifier(hidden_states)
1560
+
1561
+ loss = None
1562
+ if labels is not None:
1563
+ # move labels to correct device to enable model parallelism
1564
+ labels = labels.to(logits.device)
1565
+ batch_size, seq_length = labels.shape
1566
+ loss_fct = CrossEntropyLoss()
1567
+ loss = loss_fct(
1568
+ logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)
1569
+ )
1570
+
1571
+ if not return_dict:
1572
+ output = (logits,) + model_outputs[2:]
1573
+ return ((loss,) + output) if loss is not None else output
1574
+
1575
+ return TokenClassifierOutput(
1576
+ loss=loss,
1577
+ logits=logits,
1578
+ hidden_states=model_outputs.hidden_states,
1579
+ attentions=model_outputs.attentions,
1580
+ )
1581
+
1582
+ class PHI3ForHTMLTreeGeneration(Phi3PreTrainedModel):
1583
+ # _tied_weights_keys = ["lm_head.weight"]
1584
+
1585
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with Llama->Phi3
1586
+ def __init__(self, config):
1587
+ super().__init__(config)
1588
+ self.model = Phi3Model(config)
1589
+ self.vocab_size = config.vocab_size
1590
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1591
+
1592
+ # Initialize weights and apply final processing
1593
+ self.post_init()
1594
+
1595
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_input_embeddings
1596
+ def get_input_embeddings(self):
1597
+ return self.model.embed_tokens
1598
+
1599
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_input_embeddings
1600
+ def set_input_embeddings(self, value):
1601
+ self.model.embed_tokens = value
1602
+
1603
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_output_embeddings
1604
+ def get_output_embeddings(self):
1605
+ return self.lm_head
1606
+
1607
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_output_embeddings
1608
+ def set_output_embeddings(self, new_embeddings):
1609
+ self.lm_head = new_embeddings
1610
+
1611
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder
1612
+ def set_decoder(self, decoder):
1613
+ self.model = decoder
1614
+
1615
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder
1616
+ def get_decoder(self):
1617
+ return self.model
1618
+
1619
+ # Ignore copy
1620
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1621
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1622
+ def forward(
1623
+ self,
1624
+ input_ids: torch.LongTensor = None,
1625
+ attention_mask: Optional[torch.Tensor] = None,
1626
+ position_ids: Optional[torch.LongTensor] = None,
1627
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1628
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1629
+ labels: Optional[torch.LongTensor] = None,
1630
+ use_cache: Optional[bool] = None,
1631
+ output_attentions: Optional[bool] = None,
1632
+ output_hidden_states: Optional[bool] = None,
1633
+ return_dict: Optional[bool] = None,
1634
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1635
+ r"""
1636
+ Args:
1637
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1638
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1639
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1640
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1641
+
1642
+ Returns:
1643
+
1644
+ Example:
1645
+
1646
+ ```python
1647
+ >>> from transformers import AutoTokenizer, Phi3ForCausalLM
1648
+
1649
+ >>> model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct")
1650
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-4k-instruct")
1651
+
1652
+ >>> prompt = "This is an example script ."
1653
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1654
+
1655
+ >>> # Generate
1656
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1657
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1658
+ 'This is an example script .\n Certainly! Below is a sample script that demonstrates a simple task, such as calculating the sum'
1659
+ ```"""
1660
+
1661
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1662
+ output_hidden_states = (
1663
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1664
+ )
1665
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1666
+
1667
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1668
+ outputs = self.model(
1669
+ input_ids=input_ids,
1670
+ attention_mask=attention_mask,
1671
+ position_ids=position_ids,
1672
+ past_key_values=past_key_values,
1673
+ inputs_embeds=inputs_embeds,
1674
+ use_cache=use_cache,
1675
+ output_attentions=output_attentions,
1676
+ output_hidden_states=output_hidden_states,
1677
+ return_dict=return_dict,
1678
+ )
1679
+
1680
+ hidden_states = outputs[0]
1681
+ logits = self.lm_head(hidden_states)
1682
+ logits = logits.float()
1683
+
1684
+ loss = None
1685
+ if labels is not None:
1686
+ # Shift so that tokens < n predict n
1687
+ shift_logits = logits[..., :-1, :].contiguous()
1688
+ shift_labels = labels[..., 1:].contiguous()
1689
+ # Flatten the tokens
1690
+ loss_fct = CrossEntropyLoss()
1691
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1692
+ shift_labels = shift_labels.view(-1)
1693
+ # Enable model parallelism
1694
+ shift_labels = shift_labels.to(shift_logits.device)
1695
+ loss = loss_fct(shift_logits, shift_labels)
1696
+
1697
+ if not return_dict:
1698
+ output = (logits,) + outputs[1:]
1699
+ return (loss,) + output if loss is not None else output
1700
+
1701
+ return CausalLMOutputWithPast(
1702
+ loss=loss,
1703
+ logits=logits,
1704
+ past_key_values=outputs.past_key_values,
1705
+ hidden_states=outputs.hidden_states,
1706
+ attentions=outputs.attentions,
1707
+ )
1708
+
1709
+ # Copied from transformers.models.persimmon.modeling_persimmon.PersimmonForCausalLM.prepare_inputs_for_generation
1710
+ def prepare_inputs_for_generation(
1711
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1712
+ ):
1713
+ # When the first time input length reached long and short factor switching point, enforce re-compute cache
1714
+ # It will cause downside of slower at this single token position, however, better than current failure.
1715
+ if past_key_values and self.config.rope_scaling and input_ids.shape[1] >= self.config.original_max_position_embeddings + 1:
1716
+ past_length = past_key_values.seen_tokens if isinstance(past_key_values, Cache) else past_key_values[0][0].shape[2]
1717
+ if past_length <= self.config.original_max_position_embeddings:
1718
+ past_key_values = None
1719
+
1720
+ if past_key_values is not None:
1721
+ if isinstance(past_key_values, Cache):
1722
+ cache_length = past_key_values.get_seq_length()
1723
+ past_length = past_key_values.seen_tokens
1724
+ max_cache_length = past_key_values.get_max_length()
1725
+ else:
1726
+ cache_length = past_length = past_key_values[0][0].shape[2]
1727
+ max_cache_length = None
1728
+
1729
+ # Keep only the unprocessed tokens:
1730
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1731
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1732
+ # input)
1733
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1734
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1735
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1736
+ # input_ids based on the past_length.
1737
+ elif past_length < input_ids.shape[1]:
1738
+ input_ids = input_ids[:, past_length:]
1739
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1740
+
1741
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1742
+ if (
1743
+ max_cache_length is not None
1744
+ and attention_mask is not None
1745
+ and cache_length + input_ids.shape[1] > max_cache_length
1746
+ ):
1747
+ attention_mask = attention_mask[:, -max_cache_length:]
1748
+
1749
+ position_ids = kwargs.get("position_ids", None)
1750
+ if attention_mask is not None and position_ids is None:
1751
+ # create position_ids on the fly for batch generation
1752
+ position_ids = attention_mask.long().cumsum(-1) - 1
1753
+ position_ids.masked_fill_(attention_mask == 0, 1)
1754
+ if past_key_values:
1755
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1756
+
1757
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1758
+ if inputs_embeds is not None and past_key_values is None:
1759
+ model_inputs = {"inputs_embeds": inputs_embeds}
1760
+ else:
1761
+ model_inputs = {"input_ids": input_ids}
1762
+
1763
+ model_inputs.update(
1764
+ {
1765
+ "position_ids": position_ids,
1766
+ "past_key_values": past_key_values,
1767
+ "use_cache": kwargs.get("use_cache"),
1768
+ "attention_mask": attention_mask,
1769
+ }
1770
+ )
1771
+ return model_inputs
1772
+
1773
+ @staticmethod
1774
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM._reorder_cache
1775
+ def _reorder_cache(past_key_values, beam_idx):
1776
+ reordered_past = ()
1777
+ for layer_past in past_key_values:
1778
+ reordered_past += (
1779
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1780
+ )
1781
+ return reordered_past
1782
+
1783
+ @torch.inference_mode()
1784
+ def generate_html_tree(self,
1785
+ tokenizer,
1786
+ query: List[str],
1787
+ htmls: List[List[str]],
1788
+ **kwargs):
1789
+ max_seq_length = kwargs.pop("max_seq_length", 131072)
1790
+ def apply_html_tree_template(query, htmls):
1791
+ template = """**HTML**: ```{input_html}```\n**Question**: **{question}**\n Your task is to identify the most relevant text piece to the given question in the HTML document. This text piece could either be a direct paraphrase to the fact, or a supporting evidence that can be used to infer the fact. The overall length of the text piece should be more than 300 words and less than 500 words. You should provide the path to the text piece in the HTML document. An example for the output is: <html 1><body><div 2><p>Some key information..."""
1792
+ return template.format(input_html="\n".join(htmls), question=query)
1793
+
1794
+ res_html_refs = []
1795
+ # get the generation probability of tree nodes
1796
+ for idx, _htmls in enumerate(htmls):
1797
+ if isinstance(_htmls, str):
1798
+ _htmls = [_htmls]
1799
+ else:
1800
+ # drop htmls that are too long
1801
+ html_token_lens = [len(tokenizer.encode(html)) for html in _htmls]
1802
+ total_html_token_len = sum(html_token_lens)
1803
+ while total_html_token_len > max_seq_length - 2048:
1804
+ if len(_htmls) == 1:
1805
+ break
1806
+ max_length_idx = html_token_lens.index(max(html_token_lens))
1807
+ html_token_lens.pop(max_length_idx)
1808
+ _htmls.pop(max_length_idx)
1809
+ total_html_token_len = sum(html_token_lens)
1810
+
1811
+ model_input = apply_html_tree_template(query, _htmls)
1812
+
1813
+ inputs = tokenizer.apply_chat_template([{"role": "user", "content": model_input}], add_special_tokens=True,
1814
+ add_generation_prompt=True, tokenize=True, return_tensors="pt",
1815
+ return_dict=True)
1816
+
1817
+ # merge htmls to a single html
1818
+ soup = bs4.BeautifulSoup("", 'html.parser')
1819
+ for html in _htmls:
1820
+ soup.append(bs4.BeautifulSoup(html, 'html.parser'))
1821
+
1822
+ token_id_paths = []
1823
+ html_chunk_paths = split_tree(soup, max_node_words=self.max_node_words)
1824
+ is_leaf = [p[2] for p in html_chunk_paths]
1825
+ html_chunk_paths = [p[1] for p in html_chunk_paths]
1826
+
1827
+ for path in html_chunk_paths:
1828
+ path_str = "<" + "><".join(path) + ">"
1829
+ token_ids = tokenizer.encode(path_str, add_special_tokens=False)
1830
+ token_id_paths.append(token_ids)
1831
+
1832
+ # construct token_id_tree
1833
+ root = TokenIdNode(-1)
1834
+ for path in token_id_paths:
1835
+ parent = root
1836
+ # iterate through path
1837
+ for i, token_id in enumerate(path):
1838
+ has_child = False
1839
+ # find existing child
1840
+ for child in parent.children:
1841
+ if child.name == token_id:
1842
+ parent = child
1843
+ has_child = True
1844
+ break
1845
+ if not has_child:
1846
+ node = TokenIdNode(token_id, parent=parent, input_ids=path[:i + 1])
1847
+ parent = node
1848
+
1849
+ node_queue = [root]
1850
+ while node_queue:
1851
+ cur_node = node_queue.pop(0)
1852
+ children = cur_node.children
1853
+ if len(children) == 1:
1854
+ cur_node.children[0].prob = str(np.float32(1.0))
1855
+ node_queue.append(children[0])
1856
+ continue
1857
+ elif len(children) == 0:
1858
+ continue
1859
+ # calculate transition probability for each child
1860
+ force_token_id = [c.name for c in children]
1861
+ child_input_ids = torch.tensor(cur_node.input_ids, dtype=torch.long).unsqueeze(0)
1862
+ # concatenate context input id with child input id
1863
+ child_input_ids = torch.cat([inputs["input_ids"][idx:idx + 1], child_input_ids], dim=1).to(self.device)
1864
+ model_inputs = self.prepare_inputs_for_generation(child_input_ids, **kwargs)
1865
+ outputs = self(
1866
+ **model_inputs,
1867
+ return_dict=True,
1868
+ )
1869
+ # get the probability of force_token_id
1870
+ force_token_id = torch.tensor(force_token_id, device=self.device)
1871
+ probs = torch.gather(outputs.logits[:, 0, :], -1, force_token_id.unsqueeze(0))
1872
+ # softmax
1873
+ probs = torch.nn.functional.softmax(probs, dim=-1)
1874
+ #. linear transformation
1875
+ # probs = probs / probs.sum()
1876
+ probs = probs.squeeze(0).detach().to(torch.float32).cpu().numpy()
1877
+ for i, child in enumerate(children):
1878
+ child.prob = str(probs[i])
1879
+ node_queue.append(child)
1880
+
1881
+ res_html_refs.append({
1882
+ "html": str(soup),
1883
+ "paths": html_chunk_paths,
1884
+ "is_leaf": is_leaf,
1885
+ "path_token_ids": token_id_paths,
1886
+ "node_tree": list(TokenDotExporter(root, nodenamefunc=nodenamefunc))
1887
+ })
1888
+ return res_html_refs
seq_para_utils.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import logging
4
+ import transformers
5
+ import torch.distributed as dist
6
+ import torch
7
+ import math
8
+
9
+ # global var
10
+ _SEQUENCE_PARALLEL_GROUP = None
11
+ _SEQUENCE_PARALLEL_SIZE = 1
12
+
13
+ def init_logger(fpath='', local_rank=0):
14
+ if transformers.trainer_utils.is_main_process(local_rank):
15
+ if fpath:
16
+ if os.path.dirname(fpath):
17
+ os.makedirs(os.path.dirname(fpath), exist_ok=True)
18
+ file_handler = logging.FileHandler(fpath, mode='a') # to file
19
+ transformers.logging.add_handler(file_handler)
20
+ transformers.logging.set_verbosity_info()
21
+ else:
22
+ transformers.logging.set_verbosity_error() # reduce
23
+ transformers.logging.enable_explicit_format()
24
+ return transformers.logging.get_logger()
25
+
26
+ class DistributedSampler(torch.utils.data.distributed.DistributedSampler):
27
+ def set_epoch(self, epoch):
28
+ # 重载Sample 保证每个epoch dataset更新后sampler 重新更新
29
+ # If the dataset length is evenly divisible by # of replicas, then there
30
+ # is no need to drop any data, since the dataset will be split equally.
31
+ if self.drop_last and len(self.dataset) % self.num_replicas != 0: # type: ignore[arg-type]
32
+ # Split to nearest available length that is evenly divisible.
33
+ # This is to ensure each rank receives the same amount of data when
34
+ # using this Sampler.
35
+ self.num_samples = math.ceil(
36
+ (len(self.dataset) - self.num_replicas) / self.num_replicas # type: ignore[arg-type]
37
+ )
38
+ else:
39
+ self.num_samples = math.ceil(len(self.dataset) / self.num_replicas) # type: ignore[arg-type]
40
+ self.total_size = self.num_samples * self.num_replicas
41
+ super().set_epoch(epoch)
42
+
43
+ def add_custom_callback(trainer, logger):
44
+ if 'PrinterCallback' in trainer.callback_handler.callback_list:
45
+ trainer.pop_callback(transformers.PrinterCallback)
46
+ trainer.add_callback(LogCallback(logger))
47
+ logger.info('Add custom LogCallback')
48
+ trainer.add_callback(DatasetUpdateCallback(trainer))
49
+ logger.info('Add custom DatasetUpdateCallback')
50
+ trainer.add_callback(SaveDiskCallback())
51
+ logger.info('Add custom SaveDiskCallback')
52
+ logger.info(f"trainer's callbacks: {trainer.callback_handler.callback_list}")
53
+
54
+
55
+ class LogCallback(transformers.TrainerCallback):
56
+ """
57
+ A bare :class:`~transformers.TrainerCallback` that just prints with logger.
58
+ """
59
+ def __init__(self, logger, exclude=('total_flos', 'epoch')):
60
+ self.logger = logger
61
+ self.exclude = exclude
62
+
63
+ def on_log(self, args, state, control, logs=None, **kwargs):
64
+ if state.is_world_process_zero:
65
+ self.logger.info(''.join([
66
+ f"[global_steps={state.global_step}]",
67
+ f"[epochs={logs['epoch']}]",
68
+ ','.join(f'{k}={v}' for k, v in logs.items()
69
+ if k not in self.exclude)
70
+ ]))
71
+
72
+
73
+ class DatasetUpdateCallback(transformers.TrainerCallback):
74
+ def __init__(self, trainer):
75
+ self.trainer = trainer
76
+
77
+ def on_epoch_begin(self, args, state, control,train_dataloader, **kwargs):
78
+ self.trainer.train_dataset.update(int(state.epoch))
79
+ train_dataloader.sampler.set_epoch(int(state.epoch))
80
+
81
+
82
+ class SaveDiskCallback(transformers.TrainerCallback):
83
+ def on_save(self, args, state, control, **kwargs):
84
+ if args.local_rank != 0:
85
+ return
86
+
87
+ for ckpt in os.listdir(args.output_dir):
88
+ # remove out-of-date deepspeed checkpoints
89
+ if ckpt.startswith('checkpoint-') and not ckpt.endswith(f'-{state.global_step}'):
90
+ for pattern in ['global_step*', '*.pth']:
91
+ os.system("rm -rf " + os.path.join(args.output_dir, ckpt, pattern))
92
+
93
+ def on_train_end(self, args, state, control, **kwargs):
94
+ if state.is_local_process_zero and False:
95
+ for pattern in ['global_step*', '*.pth']:
96
+ os.system("rm -rf " + os.path.join(args.output_dir, "checkpoint-*", pattern))
97
+
98
+
99
+ def register_nan_hook(model):
100
+ torch.autograd.set_detect_anomaly(True)
101
+
102
+ def add_module_name(module):
103
+ for name, sub_module in module.named_modules():
104
+ sub_module.name = name
105
+
106
+ def add_check_nan_hook(module):
107
+ def check_nan(module, inputs, outputs):
108
+ any_nan = False
109
+ for i, tensor in enumerate(inputs):
110
+ if isinstance(tensor, torch.Tensor) and tensor.isnan().any():
111
+ print(f'module {module.name} contains nan in its {i}th input.')
112
+ any_nan = True
113
+ for i, tensor in enumerate(outputs):
114
+ if isinstance(tensor, torch.Tensor) and tensor.isnan().any():
115
+ print(f'module {module.name} contains nan in its {i}th output.')
116
+ any_nan = True
117
+ if any_nan:
118
+ if torch.distributed.get_rank() == 0:
119
+ torch.save({
120
+ 'state_dict': module.state_dict(),
121
+ 'inputs': inputs,
122
+ 'outputs': outputs,
123
+ 'type': module.__class__.__name__
124
+ }, module.name + '.pth')
125
+ # from ipdb import set_trace; set_trace()
126
+ # else:
127
+ # import time; time.sleep(10000)
128
+
129
+ module.register_forward_hook(lambda module, inputs, outputs: check_nan(module, inputs, outputs))
130
+ module.register_forward_hook(lambda module, inputs, outputs: check_nan(module, inputs, outputs))
131
+
132
+ model.apply(add_module_name)
133
+ model.apply(add_check_nan_hook)
134
+
135
+
136
+ def initialize_seq_parallel(
137
+ sequence_parallel_size,
138
+ ):
139
+ if sequence_parallel_size <= 1:
140
+ return None
141
+ num_sequence_parallel_groups: int = dist.get_world_size() // sequence_parallel_size
142
+ global _SEQUENCE_PARALLEL_GROUP
143
+ global _SEQUENCE_PARALLEL_SIZE
144
+ _SEQUENCE_PARALLEL_SIZE = sequence_parallel_size
145
+ for i in range(num_sequence_parallel_groups):
146
+ ranks = range(i * sequence_parallel_size,
147
+ (i + 1) * sequence_parallel_size)
148
+ group = torch.distributed.new_group(ranks)
149
+ if dist.get_rank() in ranks:
150
+ _SEQUENCE_PARALLEL_GROUP = group
151
+
152
+ def get_sequence_parallel_group():
153
+ """Get the sequence parallel group the caller rank belongs to."""
154
+ return _SEQUENCE_PARALLEL_GROUP
155
+
156
+ def get_sequence_parallel_size():
157
+ return _SEQUENCE_PARALLEL_SIZE
158
+
159
+ def get_sequence_parallel_rank():
160
+ return torch.distributed.get_rank(group=get_sequence_parallel_group())
161
+
162
+ # 设置序列并行参数来保证优化器正确平均
163
+ from deepspeed.utils import groups
164
+ groups._get_sequence_parallel_world_size = get_sequence_parallel_size
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": true,
27
+ "single_word": false,
28
+ "special": false
29
+ },
30
+ "32000": {
31
+ "content": "<|endoftext|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "32001": {
39
+ "content": "<|assistant|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": true,
43
+ "single_word": false,
44
+ "special": true
45
+ },
46
+ "32002": {
47
+ "content": "<|placeholder1|>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": true,
51
+ "single_word": false,
52
+ "special": true
53
+ },
54
+ "32003": {
55
+ "content": "<|placeholder2|>",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": true,
59
+ "single_word": false,
60
+ "special": true
61
+ },
62
+ "32004": {
63
+ "content": "<|placeholder3|>",
64
+ "lstrip": false,
65
+ "normalized": false,
66
+ "rstrip": true,
67
+ "single_word": false,
68
+ "special": true
69
+ },
70
+ "32005": {
71
+ "content": "<|placeholder4|>",
72
+ "lstrip": false,
73
+ "normalized": false,
74
+ "rstrip": true,
75
+ "single_word": false,
76
+ "special": true
77
+ },
78
+ "32006": {
79
+ "content": "<|system|>",
80
+ "lstrip": false,
81
+ "normalized": false,
82
+ "rstrip": true,
83
+ "single_word": false,
84
+ "special": true
85
+ },
86
+ "32007": {
87
+ "content": "<|end|>",
88
+ "lstrip": false,
89
+ "normalized": false,
90
+ "rstrip": true,
91
+ "single_word": false,
92
+ "special": true
93
+ },
94
+ "32008": {
95
+ "content": "<|placeholder5|>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": true,
99
+ "single_word": false,
100
+ "special": true
101
+ },
102
+ "32009": {
103
+ "content": "<|placeholder6|>",
104
+ "lstrip": false,
105
+ "normalized": false,
106
+ "rstrip": true,
107
+ "single_word": false,
108
+ "special": true
109
+ },
110
+ "32010": {
111
+ "content": "<|user|>",
112
+ "lstrip": false,
113
+ "normalized": false,
114
+ "rstrip": true,
115
+ "single_word": false,
116
+ "special": true
117
+ }
118
+ },
119
+ "bos_token": "<s>",
120
+ "chat_template": "{% for message in messages %}{% if message['role'] == 'system' and message['content'] %}{{'<|system|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'user' %}{{'<|user|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}",
121
+ "clean_up_tokenization_spaces": false,
122
+ "eos_token": "<|endoftext|>",
123
+ "legacy": false,
124
+ "model_max_length": 35000,
125
+ "pad_token": "<|endoftext|>",
126
+ "padding_side": "left",
127
+ "sp_model_kwargs": {},
128
+ "spaces_between_special_tokens": false,
129
+ "tokenizer_class": "LlamaTokenizer",
130
+ "unk_token": "<unk>",
131
+ "use_default_system_prompt": false
132
+ }
tree_gen_utils.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ from typing import List, Tuple
3
+
4
+ import numpy as np
5
+ from anytree import Node, RenderTree
6
+ import bs4
7
+ from anytree import PreOrderIter
8
+ from anytree.exporter import DotExporter
9
+
10
+
11
+ def nodenamefunc(node):
12
+ return f"{node.name}|{node.prob}|{node.input_ids}"
13
+
14
+
15
+ class TokenDotExporter(DotExporter):
16
+ def __init__(self, node, **kwargs):
17
+ super().__init__(node, **kwargs)
18
+
19
+ def __iter__(self):
20
+ # prepare
21
+ indent = " " * self.indent
22
+ nodenamefunc = self.nodenamefunc or self._default_nodenamefunc
23
+ nodeattrfunc = self.nodeattrfunc or self._default_nodeattrfunc
24
+ edgeattrfunc = self.edgeattrfunc or self._default_edgeattrfunc
25
+ edgetypefunc = self.edgetypefunc or self._default_edgetypefunc
26
+ filter_ = self.filter_ or self._default_filter
27
+ return self.__iter(indent, nodenamefunc, nodeattrfunc, edgeattrfunc, edgetypefunc, filter_)
28
+
29
+ def __iter_nodes(self, indent, nodenamefunc, nodeattrfunc, filter_):
30
+ for node in PreOrderIter(self.node, filter_=filter_, stop=self.stop, maxlevel=self.maxlevel):
31
+ nodename = nodenamefunc(node)
32
+ nodeattr = nodeattrfunc(node)
33
+ nodeattr = " {%s}" % nodeattr if nodeattr is not None else ""
34
+ yield '%s%s' % (DotExporter.esc(nodename), nodeattr)
35
+
36
+ def __iter(self, indent, nodenamefunc, nodeattrfunc, edgeattrfunc, edgetypefunc, filter_):
37
+ for node in self.__iter_nodes(indent, nodenamefunc, nodeattrfunc, filter_):
38
+ yield node
39
+
40
+
41
+ class TokenIdNode(Node):
42
+ def __init__(self, name, parent=None, children=None, **kwargs):
43
+ super().__init__(name, parent, children, **kwargs)
44
+ self.input_ids = kwargs.get('input_ids', [])
45
+ self.prob = kwargs.get('prob', np.float32(0.0))
46
+
47
+
48
+ def split_tree(soup: bs4.BeautifulSoup, max_node_words=0) -> List[Tuple[bs4.element.Tag, List[str], bool]]:
49
+ word_count = len(soup.get_text().split())
50
+ if word_count > max_node_words:
51
+ possible_trees = [(soup, [])]
52
+ target_trees = [] # [(tag, path, is_leaf)]
53
+ # split the entire dom tee into subtrees, until the length of the subtree is less than max_node_words words
54
+ # find all possible trees
55
+ while True:
56
+ if len(possible_trees) == 0:
57
+ break
58
+ tree = possible_trees.pop(0)
59
+ tag_children = defaultdict(int)
60
+ bare_word_count = 0
61
+ # count child tags
62
+ for child in tree[0].contents:
63
+ if isinstance(child, bs4.element.Tag):
64
+ tag_children[child.name] += 1
65
+ _tag_children = {k: 0 for k in tag_children.keys()}
66
+
67
+ # check if the tree can be split
68
+ for child in tree[0].contents:
69
+ if isinstance(child, bs4.element.Tag):
70
+ # change child tag with duplicate names
71
+ if tag_children[child.name] > 1:
72
+ new_name = f"{child.name}{_tag_children[child.name]}"
73
+ new_tree = (child, tree[1] + [new_name])
74
+ _tag_children[child.name] += 1
75
+ child.name = new_name
76
+ else:
77
+ new_tree = (child, tree[1] + [child.name])
78
+ word_count = len(child.get_text().split())
79
+ # add node with more than max_node_words words, and recursion depth is less than 64
80
+ if word_count > max_node_words and len(new_tree[1]) < 64:
81
+ possible_trees.append(new_tree)
82
+ else:
83
+ target_trees.append((new_tree[0], new_tree[1], True))
84
+ else:
85
+ bare_word_count += len(str(child).split())
86
+
87
+ # add leaf node
88
+ if len(tag_children) == 0:
89
+ target_trees.append((tree[0], tree[1], True))
90
+ # add node with more than max_node_words bare words
91
+ elif bare_word_count > max_node_words:
92
+ target_trees.append((tree[0], tree[1], False))
93
+ else:
94
+ soup_children = [c for c in soup.contents if isinstance(c, bs4.element.Tag)]
95
+ if len(soup_children) == 1:
96
+ target_trees = [(soup_children[0], [soup_children[0].name], True)]
97
+ else:
98
+ # add an html tag to wrap all children
99
+ new_soup = bs4.BeautifulSoup("", 'html.parser')
100
+ new_tag = new_soup.new_tag("html")
101
+ new_soup.append(new_tag)
102
+ for child in soup_children:
103
+ new_tag.append(child)
104
+ target_trees = [(new_tag, ["html"], True)]
105
+ return target_trees
106
+
zero_to_fp32.py ADDED
@@ -0,0 +1,604 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example: python zero_to_fp32.py . pytorch_model.bin
14
+
15
+ import argparse
16
+ import torch
17
+ import glob
18
+ import math
19
+ import os
20
+ import re
21
+ from collections import OrderedDict
22
+ from dataclasses import dataclass
23
+
24
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
25
+ # DeepSpeed data structures it has to be available in the current python environment.
26
+ from deepspeed.utils import logger
27
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
28
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
29
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
30
+
31
+
32
+ @dataclass
33
+ class zero_model_state:
34
+ buffers: dict()
35
+ param_shapes: dict()
36
+ shared_params: list
37
+ ds_version: int
38
+ frozen_param_shapes: dict()
39
+ frozen_param_fragments: dict()
40
+
41
+
42
+ debug = 0
43
+
44
+ # load to cpu
45
+ device = torch.device('cpu')
46
+
47
+
48
+ def atoi(text):
49
+ return int(text) if text.isdigit() else text
50
+
51
+
52
+ def natural_keys(text):
53
+ '''
54
+ alist.sort(key=natural_keys) sorts in human order
55
+ http://nedbatchelder.com/blog/200712/human_sorting.html
56
+ (See Toothy's implementation in the comments)
57
+ '''
58
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
59
+
60
+
61
+ def get_model_state_file(checkpoint_dir, zero_stage):
62
+ if not os.path.isdir(checkpoint_dir):
63
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
64
+
65
+ # there should be only one file
66
+ if zero_stage <= 2:
67
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
68
+ elif zero_stage == 3:
69
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
70
+
71
+ if not os.path.exists(file):
72
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
73
+
74
+ return file
75
+
76
+
77
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
78
+ # XXX: need to test that this simple glob rule works for multi-node setup too
79
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
80
+
81
+ if len(ckpt_files) == 0:
82
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
83
+
84
+ return ckpt_files
85
+
86
+
87
+ def get_optim_files(checkpoint_dir):
88
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
89
+
90
+
91
+ def get_model_state_files(checkpoint_dir):
92
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
93
+
94
+
95
+ def parse_model_states(files):
96
+ zero_model_states = []
97
+ for file in files:
98
+ state_dict = torch.load(file, map_location=device)
99
+
100
+ if BUFFER_NAMES not in state_dict:
101
+ raise ValueError(f"{file} is not a model state checkpoint")
102
+ buffer_names = state_dict[BUFFER_NAMES]
103
+ if debug:
104
+ print("Found buffers:", buffer_names)
105
+
106
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
107
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
108
+ param_shapes = state_dict[PARAM_SHAPES]
109
+
110
+ # collect parameters that are included in param_shapes
111
+ param_names = []
112
+ for s in param_shapes:
113
+ for name in s.keys():
114
+ param_names.append(name)
115
+
116
+ # update with frozen parameters
117
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
118
+ if frozen_param_shapes is not None:
119
+ if debug:
120
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
121
+ param_names += list(frozen_param_shapes.keys())
122
+
123
+ # handle shared params
124
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
125
+
126
+ ds_version = state_dict.get(DS_VERSION, None)
127
+
128
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
129
+
130
+ z_model_state = zero_model_state(buffers=buffers,
131
+ param_shapes=param_shapes,
132
+ shared_params=shared_params,
133
+ ds_version=ds_version,
134
+ frozen_param_shapes=frozen_param_shapes,
135
+ frozen_param_fragments=frozen_param_fragments)
136
+ zero_model_states.append(z_model_state)
137
+
138
+ return zero_model_states
139
+
140
+
141
+ def parse_optim_states(files, ds_checkpoint_dir):
142
+
143
+ total_files = len(files)
144
+ state_dicts = []
145
+ for f in files:
146
+ state_dict = torch.load(f, map_location=device)
147
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
148
+ # and also handle the case where it was already removed by another helper script
149
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
150
+ state_dicts.append(state_dict)
151
+
152
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
153
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
154
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
155
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
156
+
157
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
158
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
159
+ # use the max of the partition_count to get the dp world_size.
160
+
161
+ if type(world_size) is list:
162
+ world_size = max(world_size)
163
+
164
+ if world_size != total_files:
165
+ raise ValueError(
166
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
167
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
168
+ )
169
+
170
+ # the groups are named differently in each stage
171
+ if zero_stage <= 2:
172
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
173
+ elif zero_stage == 3:
174
+ fp32_groups_key = FP32_FLAT_GROUPS
175
+ else:
176
+ raise ValueError(f"unknown zero stage {zero_stage}")
177
+
178
+ if zero_stage <= 2:
179
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
180
+ elif zero_stage == 3:
181
+ # if there is more than one param group, there will be multiple flattened tensors - one
182
+ # flattened tensor per group - for simplicity merge them into a single tensor
183
+ #
184
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
185
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
186
+
187
+ fp32_flat_groups = [
188
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
189
+ ]
190
+
191
+ return zero_stage, world_size, fp32_flat_groups
192
+
193
+
194
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
195
+ """
196
+ Returns fp32 state_dict reconstructed from ds checkpoint
197
+
198
+ Args:
199
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
200
+
201
+ """
202
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
203
+
204
+ optim_files = get_optim_files(ds_checkpoint_dir)
205
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
206
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
207
+
208
+ model_files = get_model_state_files(ds_checkpoint_dir)
209
+
210
+ zero_model_states = parse_model_states(model_files)
211
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
212
+
213
+ if zero_stage <= 2:
214
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
215
+ exclude_frozen_parameters)
216
+ elif zero_stage == 3:
217
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
218
+ exclude_frozen_parameters)
219
+
220
+
221
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
222
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
223
+ return
224
+
225
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
226
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
227
+
228
+ if debug:
229
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
230
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
231
+
232
+ wanted_params = len(frozen_param_shapes)
233
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
234
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
235
+ print(f'Frozen params: Have {avail_numel} numels to process.')
236
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
237
+
238
+ total_params = 0
239
+ total_numel = 0
240
+ for name, shape in frozen_param_shapes.items():
241
+ total_params += 1
242
+ unpartitioned_numel = shape.numel()
243
+ total_numel += unpartitioned_numel
244
+
245
+ state_dict[name] = frozen_param_fragments[name]
246
+
247
+ if debug:
248
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
249
+
250
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
251
+
252
+
253
+ def _has_callable(obj, fn):
254
+ attr = getattr(obj, fn, None)
255
+ return callable(attr)
256
+
257
+
258
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
259
+ param_shapes = zero_model_states[0].param_shapes
260
+
261
+ # Reconstruction protocol:
262
+ #
263
+ # XXX: document this
264
+
265
+ if debug:
266
+ for i in range(world_size):
267
+ for j in range(len(fp32_flat_groups[0])):
268
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
269
+
270
+ # XXX: memory usage doubles here (zero2)
271
+ num_param_groups = len(fp32_flat_groups[0])
272
+ merged_single_partition_of_fp32_groups = []
273
+ for i in range(num_param_groups):
274
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
275
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
276
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
277
+ avail_numel = sum(
278
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
279
+
280
+ if debug:
281
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
282
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
283
+ # not asserting if there is a mismatch due to possible padding
284
+ print(f"Have {avail_numel} numels to process.")
285
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
286
+
287
+ # params
288
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
289
+ # out-of-core computing solution
290
+ total_numel = 0
291
+ total_params = 0
292
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
293
+ offset = 0
294
+ avail_numel = full_single_fp32_vector.numel()
295
+ for name, shape in shapes.items():
296
+
297
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
298
+ total_numel += unpartitioned_numel
299
+ total_params += 1
300
+
301
+ if debug:
302
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
303
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
304
+ offset += unpartitioned_numel
305
+
306
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
307
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
308
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
309
+ # live optimizer object, so we are checking that the numbers are within the right range
310
+ align_to = 2 * world_size
311
+
312
+ def zero2_align(x):
313
+ return align_to * math.ceil(x / align_to)
314
+
315
+ if debug:
316
+ print(f"original offset={offset}, avail_numel={avail_numel}")
317
+
318
+ offset = zero2_align(offset)
319
+ avail_numel = zero2_align(avail_numel)
320
+
321
+ if debug:
322
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
323
+
324
+ # Sanity check
325
+ if offset != avail_numel:
326
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
327
+
328
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
329
+
330
+
331
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
332
+ exclude_frozen_parameters):
333
+ state_dict = OrderedDict()
334
+
335
+ # buffers
336
+ buffers = zero_model_states[0].buffers
337
+ state_dict.update(buffers)
338
+ if debug:
339
+ print(f"added {len(buffers)} buffers")
340
+
341
+ if not exclude_frozen_parameters:
342
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
343
+
344
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
345
+
346
+ # recover shared parameters
347
+ for pair in zero_model_states[0].shared_params:
348
+ if pair[1] in state_dict:
349
+ state_dict[pair[0]] = state_dict[pair[1]]
350
+
351
+ return state_dict
352
+
353
+
354
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
355
+ remainder = unpartitioned_numel % world_size
356
+ padding_numel = (world_size - remainder) if remainder else 0
357
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
358
+ return partitioned_numel, padding_numel
359
+
360
+
361
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
362
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
363
+ return
364
+
365
+ if debug:
366
+ for i in range(world_size):
367
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
368
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
369
+
370
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
371
+ wanted_params = len(frozen_param_shapes)
372
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
373
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
374
+ print(f'Frozen params: Have {avail_numel} numels to process.')
375
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
376
+
377
+ total_params = 0
378
+ total_numel = 0
379
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
380
+ total_params += 1
381
+ unpartitioned_numel = shape.numel()
382
+ total_numel += unpartitioned_numel
383
+
384
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
385
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
386
+
387
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
388
+
389
+ if debug:
390
+ print(
391
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
392
+ )
393
+
394
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
395
+
396
+
397
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
398
+ param_shapes = zero_model_states[0].param_shapes
399
+ avail_numel = fp32_flat_groups[0].numel() * world_size
400
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
401
+ # param, re-consolidating each param, while dealing with padding if any
402
+
403
+ # merge list of dicts, preserving order
404
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
405
+
406
+ if debug:
407
+ for i in range(world_size):
408
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
409
+
410
+ wanted_params = len(param_shapes)
411
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
412
+ # not asserting if there is a mismatch due to possible padding
413
+ avail_numel = fp32_flat_groups[0].numel() * world_size
414
+ print(f"Trainable params: Have {avail_numel} numels to process.")
415
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
416
+
417
+ # params
418
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
419
+ # out-of-core computing solution
420
+ offset = 0
421
+ total_numel = 0
422
+ total_params = 0
423
+ for name, shape in param_shapes.items():
424
+
425
+ unpartitioned_numel = shape.numel()
426
+ total_numel += unpartitioned_numel
427
+ total_params += 1
428
+
429
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
430
+
431
+ if debug:
432
+ print(
433
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
434
+ )
435
+
436
+ # XXX: memory usage doubles here
437
+ state_dict[name] = torch.cat(
438
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
439
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
440
+ offset += partitioned_numel
441
+
442
+ offset *= world_size
443
+
444
+ # Sanity check
445
+ if offset != avail_numel:
446
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
447
+
448
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
449
+
450
+
451
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
452
+ exclude_frozen_parameters):
453
+ state_dict = OrderedDict()
454
+
455
+ # buffers
456
+ buffers = zero_model_states[0].buffers
457
+ state_dict.update(buffers)
458
+ if debug:
459
+ print(f"added {len(buffers)} buffers")
460
+
461
+ if not exclude_frozen_parameters:
462
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
463
+
464
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
465
+
466
+ # recover shared parameters
467
+ for pair in zero_model_states[0].shared_params:
468
+ if pair[1] in state_dict:
469
+ state_dict[pair[0]] = state_dict[pair[1]]
470
+
471
+ return state_dict
472
+
473
+
474
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
475
+ """
476
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
477
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
478
+ via a model hub.
479
+
480
+ Args:
481
+ - ``checkpoint_dir``: path to the desired checkpoint folder
482
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
483
+ - ``exclude_frozen_parameters``: exclude frozen parameters
484
+
485
+ Returns:
486
+ - pytorch ``state_dict``
487
+
488
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
489
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
490
+ the checkpoint.
491
+
492
+ A typical usage might be ::
493
+
494
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
495
+ # do the training and checkpoint saving
496
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
497
+ model = model.cpu() # move to cpu
498
+ model.load_state_dict(state_dict)
499
+ # submit to model hub or save the model to share with others
500
+
501
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
502
+ application. i.e. you will need to re-initialize the deepspeed engine, since
503
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
504
+
505
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
506
+
507
+ """
508
+ if tag is None:
509
+ latest_path = os.path.join(checkpoint_dir, 'latest')
510
+ if os.path.isfile(latest_path):
511
+ with open(latest_path, 'r') as fd:
512
+ tag = fd.read().strip()
513
+ else:
514
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
515
+
516
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
517
+
518
+ if not os.path.isdir(ds_checkpoint_dir):
519
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
520
+
521
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
522
+
523
+
524
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
525
+ """
526
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
527
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
528
+
529
+ Args:
530
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
531
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
532
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
533
+ - ``exclude_frozen_parameters``: exclude frozen parameters
534
+ """
535
+
536
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
537
+ print(f"Saving fp32 state dict to {output_file}")
538
+ torch.save(state_dict, output_file)
539
+
540
+
541
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
542
+ """
543
+ 1. Put the provided model to cpu
544
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
545
+ 3. Load it into the provided model
546
+
547
+ Args:
548
+ - ``model``: the model object to update
549
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
550
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
551
+
552
+ Returns:
553
+ - ``model`: modified model
554
+
555
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
556
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
557
+ conveniently placed for you in the checkpoint folder.
558
+
559
+ A typical usage might be ::
560
+
561
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
562
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
563
+ # submit to model hub or save the model to share with others
564
+
565
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
566
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
567
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
568
+
569
+ """
570
+ logger.info(f"Extracting fp32 weights")
571
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
572
+
573
+ logger.info(f"Overwriting model with fp32 weights")
574
+ model = model.cpu()
575
+ model.load_state_dict(state_dict, strict=False)
576
+
577
+ return model
578
+
579
+
580
+ if __name__ == "__main__":
581
+
582
+ parser = argparse.ArgumentParser()
583
+ parser.add_argument("checkpoint_dir",
584
+ type=str,
585
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
586
+ parser.add_argument(
587
+ "output_file",
588
+ type=str,
589
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
590
+ parser.add_argument("-t",
591
+ "--tag",
592
+ type=str,
593
+ default=None,
594
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
595
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
596
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
597
+ args = parser.parse_args()
598
+
599
+ debug = args.debug
600
+
601
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
602
+ args.output_file,
603
+ tag=args.tag,
604
+ exclude_frozen_parameters=args.exclude_frozen_parameters)