huwenxing
commited on
Commit
•
62477b8
1
Parent(s):
d82fc55
v1.0.3
Browse files- config.json +6 -13
- configuration_internlm.py +26 -8
- generation_config.json +2 -1
- modeling_internlm.py +90 -166
- pytorch_model-00001-of-00005.bin +2 -2
- pytorch_model-00002-of-00005.bin +2 -2
- pytorch_model-00003-of-00005.bin +2 -2
- pytorch_model-00004-of-00005.bin +2 -2
- pytorch_model-00005-of-00005.bin +1 -1
- pytorch_model.bin.index.json +2 -2
config.json
CHANGED
@@ -4,8 +4,8 @@
|
|
4 |
],
|
5 |
"auto_map": {
|
6 |
"AutoConfig": "configuration_internlm.InternLMConfig",
|
7 |
-
"
|
8 |
-
"
|
9 |
},
|
10 |
"bias": false,
|
11 |
"bos_token_id": 1,
|
@@ -19,18 +19,11 @@
|
|
19 |
"num_attention_heads": 40,
|
20 |
"num_hidden_layers": 60,
|
21 |
"num_key_value_heads": 40,
|
22 |
-
"pad_token_id":
|
23 |
-
"pretraining_tp": 1,
|
24 |
"rms_norm_eps": 1e-06,
|
25 |
-
"rope_scaling": null,
|
26 |
-
"rope_theta": 10000.0,
|
27 |
"tie_word_embeddings": false,
|
28 |
"torch_dtype": "bfloat16",
|
29 |
-
"transformers_version": "4.33.
|
30 |
"use_cache": true,
|
31 |
-
"vocab_size": 103168
|
32 |
-
|
33 |
-
"base": 10000,
|
34 |
-
"type": "dynamic"
|
35 |
-
}
|
36 |
-
}
|
|
|
4 |
],
|
5 |
"auto_map": {
|
6 |
"AutoConfig": "configuration_internlm.InternLMConfig",
|
7 |
+
"AutoModelForCausalLM": "modeling_internlm.InternLMForCausalLM",
|
8 |
+
"AutoModel": "modeling_internlm.InternLMForCausalLM"
|
9 |
},
|
10 |
"bias": false,
|
11 |
"bos_token_id": 1,
|
|
|
19 |
"num_attention_heads": 40,
|
20 |
"num_hidden_layers": 60,
|
21 |
"num_key_value_heads": 40,
|
22 |
+
"pad_token_id": 0,
|
|
|
23 |
"rms_norm_eps": 1e-06,
|
|
|
|
|
24 |
"tie_word_embeddings": false,
|
25 |
"torch_dtype": "bfloat16",
|
26 |
+
"transformers_version": "4.33.2",
|
27 |
"use_cache": true,
|
28 |
+
"vocab_size": 103168
|
29 |
+
}
|
|
|
|
|
|
|
|
configuration_internlm.py
CHANGED
@@ -19,8 +19,9 @@
|
|
19 |
# limitations under the License.
|
20 |
""" InternLM model configuration"""
|
21 |
|
22 |
-
from transformers.configuration_utils import PretrainedConfig
|
23 |
from transformers.utils import logging
|
|
|
|
|
24 |
|
25 |
logger = logging.get_logger(__name__)
|
26 |
|
@@ -29,9 +30,9 @@ INTERNLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
|
|
29 |
|
30 |
class InternLMConfig(PretrainedConfig):
|
31 |
r"""
|
32 |
-
This is the configuration class to store the configuration of a [`InternLMModel`]. It is used to instantiate
|
33 |
-
|
34 |
-
|
35 |
|
36 |
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
37 |
documentation from [`PretrainedConfig`] for more information.
|
@@ -49,6 +50,19 @@ class InternLMConfig(PretrainedConfig):
|
|
49 |
Number of hidden layers in the Transformer encoder.
|
50 |
num_attention_heads (`int`, *optional*, defaults to 32):
|
51 |
Number of attention heads for each attention layer in the Transformer encoder.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
53 |
The non-linear activation function (function or string) in the decoder.
|
54 |
max_position_embeddings (`int`, *optional*, defaults to 2048):
|
@@ -80,13 +94,14 @@ class InternLMConfig(PretrainedConfig):
|
|
80 |
model_type = "internlm"
|
81 |
_auto_class = "AutoConfig"
|
82 |
|
83 |
-
def __init__(
|
84 |
self,
|
85 |
vocab_size=103168,
|
86 |
hidden_size=4096,
|
87 |
intermediate_size=11008,
|
88 |
num_hidden_layers=32,
|
89 |
num_attention_heads=32,
|
|
|
90 |
hidden_act="silu",
|
91 |
max_position_embeddings=2048,
|
92 |
initializer_range=0.02,
|
@@ -97,7 +112,6 @@ class InternLMConfig(PretrainedConfig):
|
|
97 |
eos_token_id=2,
|
98 |
tie_word_embeddings=False,
|
99 |
bias=True,
|
100 |
-
rotary={"base": 10000, "type": "dynamic"}, # pylint: disable=W0102
|
101 |
**kwargs,
|
102 |
):
|
103 |
self.vocab_size = vocab_size
|
@@ -106,16 +120,20 @@ class InternLMConfig(PretrainedConfig):
|
|
106 |
self.intermediate_size = intermediate_size
|
107 |
self.num_hidden_layers = num_hidden_layers
|
108 |
self.num_attention_heads = num_attention_heads
|
|
|
|
|
|
|
|
|
|
|
109 |
self.hidden_act = hidden_act
|
110 |
self.initializer_range = initializer_range
|
111 |
self.rms_norm_eps = rms_norm_eps
|
112 |
self.use_cache = use_cache
|
113 |
self.bias = bias
|
114 |
-
self.rotary = rotary
|
115 |
super().__init__(
|
116 |
pad_token_id=pad_token_id,
|
117 |
bos_token_id=bos_token_id,
|
118 |
eos_token_id=eos_token_id,
|
119 |
tie_word_embeddings=tie_word_embeddings,
|
120 |
**kwargs,
|
121 |
-
)
|
|
|
19 |
# limitations under the License.
|
20 |
""" InternLM model configuration"""
|
21 |
|
|
|
22 |
from transformers.utils import logging
|
23 |
+
from transformers.configuration_utils import PretrainedConfig
|
24 |
+
|
25 |
|
26 |
logger = logging.get_logger(__name__)
|
27 |
|
|
|
30 |
|
31 |
class InternLMConfig(PretrainedConfig):
|
32 |
r"""
|
33 |
+
This is the configuration class to store the configuration of a [`InternLMModel`]. It is used to instantiate an InternLM
|
34 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
35 |
+
defaults will yield a similar configuration to that of the InternLM-7B.
|
36 |
|
37 |
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
38 |
documentation from [`PretrainedConfig`] for more information.
|
|
|
50 |
Number of hidden layers in the Transformer encoder.
|
51 |
num_attention_heads (`int`, *optional*, defaults to 32):
|
52 |
Number of attention heads for each attention layer in the Transformer encoder.
|
53 |
+
num_key_value_heads (`int`, *optional*):
|
54 |
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
55 |
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
56 |
+
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
57 |
+
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
58 |
+
by meanpooling all the original heads within that group. For more details checkout [this
|
59 |
+
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
|
60 |
+
`num_attention_heads`.
|
61 |
+
pretraining_tp (`int`, *optional*, defaults to `1`):
|
62 |
+
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
|
63 |
+
document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
|
64 |
+
necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
|
65 |
+
issue](https://github.com/pytorch/pytorch/issues/76232).
|
66 |
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
67 |
The non-linear activation function (function or string) in the decoder.
|
68 |
max_position_embeddings (`int`, *optional*, defaults to 2048):
|
|
|
94 |
model_type = "internlm"
|
95 |
_auto_class = "AutoConfig"
|
96 |
|
97 |
+
def __init__(
|
98 |
self,
|
99 |
vocab_size=103168,
|
100 |
hidden_size=4096,
|
101 |
intermediate_size=11008,
|
102 |
num_hidden_layers=32,
|
103 |
num_attention_heads=32,
|
104 |
+
num_key_value_heads=None,
|
105 |
hidden_act="silu",
|
106 |
max_position_embeddings=2048,
|
107 |
initializer_range=0.02,
|
|
|
112 |
eos_token_id=2,
|
113 |
tie_word_embeddings=False,
|
114 |
bias=True,
|
|
|
115 |
**kwargs,
|
116 |
):
|
117 |
self.vocab_size = vocab_size
|
|
|
120 |
self.intermediate_size = intermediate_size
|
121 |
self.num_hidden_layers = num_hidden_layers
|
122 |
self.num_attention_heads = num_attention_heads
|
123 |
+
|
124 |
+
if num_key_value_heads is None:
|
125 |
+
num_key_value_heads = num_attention_heads
|
126 |
+
self.num_key_value_heads = num_key_value_heads
|
127 |
+
|
128 |
self.hidden_act = hidden_act
|
129 |
self.initializer_range = initializer_range
|
130 |
self.rms_norm_eps = rms_norm_eps
|
131 |
self.use_cache = use_cache
|
132 |
self.bias = bias
|
|
|
133 |
super().__init__(
|
134 |
pad_token_id=pad_token_id,
|
135 |
bos_token_id=bos_token_id,
|
136 |
eos_token_id=eos_token_id,
|
137 |
tie_word_embeddings=tie_word_embeddings,
|
138 |
**kwargs,
|
139 |
+
)
|
generation_config.json
CHANGED
@@ -2,5 +2,6 @@
|
|
2 |
"_from_model_config": true,
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
-
"
|
|
|
6 |
}
|
|
|
2 |
"_from_model_config": true,
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
+
"pad_token_id": 0,
|
6 |
+
"transformers_version": "4.33.2"
|
7 |
}
|
modeling_internlm.py
CHANGED
@@ -19,36 +19,26 @@
|
|
19 |
# limitations under the License.
|
20 |
""" PyTorch InternLM model."""
|
21 |
import math
|
22 |
-
import queue
|
23 |
-
import threading
|
24 |
from typing import List, Optional, Tuple, Union
|
|
|
25 |
|
26 |
import torch
|
27 |
import torch.utils.checkpoint
|
28 |
from torch import nn
|
29 |
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
|
|
30 |
from transformers.activations import ACT2FN
|
31 |
-
from transformers.
|
32 |
-
from transformers.modeling_outputs import (
|
33 |
-
BaseModelOutputWithPast,
|
34 |
-
CausalLMOutputWithPast,
|
35 |
-
SequenceClassifierOutputWithPast,
|
36 |
-
)
|
37 |
from transformers.modeling_utils import PreTrainedModel
|
38 |
-
from transformers.
|
39 |
-
|
40 |
-
add_start_docstrings_to_model_forward,
|
41 |
-
logging,
|
42 |
-
replace_return_docstrings,
|
43 |
-
)
|
44 |
-
|
45 |
from .configuration_internlm import InternLMConfig
|
46 |
|
|
|
47 |
logger = logging.get_logger(__name__)
|
48 |
|
49 |
_CONFIG_FOR_DOC = "InternLMConfig"
|
50 |
|
51 |
-
|
52 |
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
|
53 |
def _make_causal_mask(
|
54 |
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
|
@@ -81,10 +71,17 @@ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int]
|
|
81 |
|
82 |
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
|
83 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
class InternLMRMSNorm(nn.Module):
|
86 |
-
"""RMSNorm implemention."""
|
87 |
-
|
88 |
def __init__(self, hidden_size, eps=1e-6):
|
89 |
"""
|
90 |
InternLMRMSNorm is equivalent to T5LayerNorm
|
@@ -105,14 +102,6 @@ class InternLMRMSNorm(nn.Module):
|
|
105 |
|
106 |
|
107 |
class InternLMRotaryEmbedding(torch.nn.Module):
|
108 |
-
"""Implement InternLM's rotary embedding.
|
109 |
-
|
110 |
-
Args:
|
111 |
-
dim (int): Characteristic dimension of each self-attentional head.
|
112 |
-
max_position_embeddings (int, optional): Model's training length. Defaults to 2048.
|
113 |
-
base (int, optional): The rotation position encodes the rotation Angle base number. Defaults to 10000.
|
114 |
-
device (Any, optional): Running device. Defaults to None.
|
115 |
-
"""
|
116 |
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
117 |
super().__init__()
|
118 |
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
|
@@ -144,66 +133,6 @@ class InternLMRotaryEmbedding(torch.nn.Module):
|
|
144 |
)
|
145 |
|
146 |
|
147 |
-
class InternLMDynamicNTKScalingRotaryEmbedding(torch.nn.Module):
|
148 |
-
"""Implement InternLM's DyanmicNTK extrapolation method, thereby broadening the model support context to 16K.
|
149 |
-
|
150 |
-
Args:
|
151 |
-
dim (int): Characteristic dimension of each self-attentional head.
|
152 |
-
max_position_embeddings (int, optional): Model's training length. Defaults to 2048.
|
153 |
-
base (int, optional): The rotation position encodes the rotation Angle base number. Defaults to 10000.
|
154 |
-
device (Any, optional): Running device. Defaults to None.
|
155 |
-
scaling_factor (float, optional): NTK method extrapolation coefficient. Defaults to 1.0.
|
156 |
-
"""
|
157 |
-
|
158 |
-
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
|
159 |
-
super().__init__()
|
160 |
-
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
|
161 |
-
self.register_buffer("inv_freq", inv_freq)
|
162 |
-
self.dim = dim
|
163 |
-
self.base = base
|
164 |
-
self.scaling_factor = scaling_factor
|
165 |
-
|
166 |
-
# Build here to make `torch.jit.trace` work.
|
167 |
-
self.max_position_embeddings = max_position_embeddings
|
168 |
-
self.max_seq_len_cached = max_position_embeddings
|
169 |
-
t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
|
170 |
-
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
171 |
-
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
172 |
-
emb = torch.cat((freqs, freqs), dim=-1)
|
173 |
-
self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
|
174 |
-
self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
|
175 |
-
|
176 |
-
def _update_cached(self, x, seq_len=None):
|
177 |
-
self.max_seq_len_cached = max(seq_len, self.max_position_embeddings)
|
178 |
-
if seq_len > self.max_position_embeddings:
|
179 |
-
base = self.base * (
|
180 |
-
(self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
|
181 |
-
) ** (self.dim / (self.dim - 2))
|
182 |
-
inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(x.device) / self.dim))
|
183 |
-
else:
|
184 |
-
inv_freq = self.inv_freq
|
185 |
-
t = torch.arange(self.max_seq_len_cached, device=inv_freq.device, dtype=inv_freq.dtype)
|
186 |
-
freqs = torch.einsum("i,j->ij", t, inv_freq)
|
187 |
-
emb = torch.cat((freqs, freqs), dim=-1)
|
188 |
-
self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
|
189 |
-
self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
|
190 |
-
|
191 |
-
def forward(self, x, seq_len=None):
|
192 |
-
# x: [bs, num_attention_heads, seq_len, head_size]
|
193 |
-
# This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
|
194 |
-
if seq_len <= self.max_position_embeddings:
|
195 |
-
# Reset the tables if the sequence length has changed,
|
196 |
-
if self.max_seq_len_cached > self.max_position_embeddings:
|
197 |
-
self._update_cached(x, seq_len)
|
198 |
-
else:
|
199 |
-
self._update_cached(x, seq_len)
|
200 |
-
|
201 |
-
return (
|
202 |
-
self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
|
203 |
-
self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
|
204 |
-
)
|
205 |
-
|
206 |
-
|
207 |
def rotate_half(x):
|
208 |
"""Rotates half the hidden dims of the input."""
|
209 |
x1 = x[..., : x.shape[-1] // 2]
|
@@ -215,18 +144,10 @@ def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
|
|
215 |
# The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
|
216 |
cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
|
217 |
sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
|
218 |
-
cos = cos.unsqueeze(
|
219 |
-
sin = sin.unsqueeze(
|
220 |
-
|
221 |
-
|
222 |
-
else:
|
223 |
-
q_embed = (q * cos) + (rotate_half(q) * sin)
|
224 |
-
|
225 |
-
if k.size(2) == 1:
|
226 |
-
k_embed = (k * cos[:, :, -1, :]) + (rotate_half(k) * sin[:, :, -1, :])
|
227 |
-
else:
|
228 |
-
k_embed = (k * cos) + (rotate_half(k) * sin)
|
229 |
-
|
230 |
return q_embed, k_embed
|
231 |
|
232 |
|
@@ -256,6 +177,8 @@ class InternLMAttention(nn.Module):
|
|
256 |
self.hidden_size = config.hidden_size
|
257 |
self.num_heads = config.num_attention_heads
|
258 |
self.head_dim = self.hidden_size // self.num_heads
|
|
|
|
|
259 |
self.max_position_embeddings = config.max_position_embeddings
|
260 |
|
261 |
if (self.head_dim * self.num_heads) != self.hidden_size:
|
@@ -264,28 +187,10 @@ class InternLMAttention(nn.Module):
|
|
264 |
f" and `num_heads`: {self.num_heads})."
|
265 |
)
|
266 |
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.bias)
|
267 |
-
self.k_proj = nn.Linear(self.hidden_size, self.
|
268 |
-
self.v_proj = nn.Linear(self.hidden_size, self.
|
269 |
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
|
270 |
-
self.rotary_emb = self.
|
271 |
-
|
272 |
-
def _init_rope(self):
|
273 |
-
if self.config.rotary["type"] == "origin":
|
274 |
-
self.rotary_emb = InternLMRotaryEmbedding(
|
275 |
-
self.head_dim,
|
276 |
-
max_position_embeddings=self.max_position_embeddings,
|
277 |
-
base=self.config.rotary["base"],
|
278 |
-
)
|
279 |
-
elif self.config.rotary["type"] == "dynamic":
|
280 |
-
self.rotary_emb = InternLMDynamicNTKScalingRotaryEmbedding(
|
281 |
-
self.head_dim,
|
282 |
-
max_position_embeddings=self.max_position_embeddings,
|
283 |
-
base=self.config.rotary["base"],
|
284 |
-
scaling_factor=self.config.rotary.get("scaling_factor", 1.0),
|
285 |
-
)
|
286 |
-
else:
|
287 |
-
raise ValueError("Currently we only support rotary embedding's type being one of ('origin', 'dynamic').")
|
288 |
-
return self.rotary_emb
|
289 |
|
290 |
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
291 |
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
@@ -302,20 +207,25 @@ class InternLMAttention(nn.Module):
|
|
302 |
bsz, q_len, _ = hidden_states.size()
|
303 |
|
304 |
query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
305 |
-
key_states = self.k_proj(hidden_states).view(bsz, q_len, self.
|
306 |
-
value_states = self.v_proj(hidden_states).view(bsz, q_len, self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
307 |
|
308 |
if past_key_value is not None:
|
309 |
# reuse k, v, self_attention
|
310 |
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
311 |
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
312 |
|
313 |
-
# print(use_cache)
|
314 |
past_key_value = (key_states, value_states) if use_cache else None
|
315 |
|
316 |
-
|
317 |
-
|
318 |
-
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
319 |
|
320 |
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
321 |
|
@@ -426,9 +336,11 @@ INTERNLM_START_DOCSTRING = r"""
|
|
426 |
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
427 |
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
428 |
etc.)
|
|
|
429 |
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
430 |
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
431 |
and behavior.
|
|
|
432 |
Parameters:
|
433 |
config ([`InternLMConfig`]):
|
434 |
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
@@ -469,34 +381,44 @@ INTERNLM_INPUTS_DOCSTRING = r"""
|
|
469 |
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
470 |
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
471 |
it.
|
|
|
472 |
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
473 |
[`PreTrainedTokenizer.__call__`] for details.
|
|
|
474 |
[What are input IDs?](../glossary#input-ids)
|
475 |
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
476 |
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
|
|
477 |
- 1 for tokens that are **not masked**,
|
478 |
- 0 for tokens that are **masked**.
|
|
|
479 |
[What are attention masks?](../glossary#attention-mask)
|
|
|
480 |
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
481 |
[`PreTrainedTokenizer.__call__`] for details.
|
|
|
482 |
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
483 |
`past_key_values`).
|
|
|
484 |
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
485 |
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
486 |
information on the default strategy.
|
|
|
487 |
- 1 indicates the head is **not masked**,
|
488 |
- 0 indicates the head is **masked**.
|
489 |
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
490 |
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
491 |
config.n_positions - 1]`.
|
|
|
492 |
[What are position IDs?](../glossary#position-ids)
|
493 |
-
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or
|
494 |
-
when `config.use_cache=True`):
|
495 |
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
496 |
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
497 |
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
|
|
498 |
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
499 |
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
|
|
500 |
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
501 |
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
502 |
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
@@ -525,10 +447,10 @@ INTERNLM_INPUTS_DOCSTRING = r"""
|
|
525 |
class InternLMModel(InternLMPreTrainedModel):
|
526 |
"""
|
527 |
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLMDecoderLayer`]
|
|
|
528 |
Args:
|
529 |
config: InternLMConfig
|
530 |
"""
|
531 |
-
|
532 |
_auto_class = "AutoModel"
|
533 |
|
534 |
def __init__(self, config: InternLMConfig):
|
@@ -754,14 +676,20 @@ class InternLMForCausalLM(InternLMPreTrainedModel):
|
|
754 |
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
755 |
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
756 |
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
|
757 |
Returns:
|
|
|
758 |
Example:
|
|
|
759 |
```python
|
760 |
>>> from transformers import AutoTokenizer, InternLMForCausalLM
|
|
|
761 |
>>> model = InternLMForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
|
762 |
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
|
|
|
763 |
>>> prompt = "Hey, are you consciours? Can you talk to me?"
|
764 |
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
|
|
765 |
>>> # Generate
|
766 |
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
767 |
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
@@ -851,56 +779,50 @@ class InternLMForCausalLM(InternLMPreTrainedModel):
|
|
851 |
for layer_past in past_key_values:
|
852 |
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
|
853 |
return reordered_past
|
854 |
-
|
855 |
def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = []):
|
856 |
prompt = ""
|
857 |
for record in history:
|
858 |
prompt += f"""<|User|>:{record[0]}<eoh>\n<|Bot|>:{record[1]}<eoa>\n"""
|
859 |
prompt += f"""<|User|>:{query}<eoh>\n<|Bot|>:"""
|
860 |
return tokenizer([prompt], return_tensors="pt")
|
861 |
-
|
862 |
@torch.no_grad()
|
863 |
-
def chat(
|
864 |
-
|
865 |
-
|
866 |
-
|
867 |
-
|
868 |
-
|
869 |
-
|
870 |
-
|
871 |
-
|
872 |
-
|
873 |
-
**kwargs,
|
874 |
-
):
|
875 |
inputs = self.build_inputs(tokenizer, query, history)
|
876 |
inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
|
877 |
-
outputs = self.generate(
|
878 |
-
|
879 |
-
|
880 |
-
|
881 |
-
|
882 |
-
|
883 |
-
|
884 |
-
|
885 |
-
)
|
886 |
-
outputs = outputs[0].cpu().tolist()[len(inputs["input_ids"][0]) :]
|
887 |
response = tokenizer.decode(outputs, skip_special_tokens=True)
|
888 |
response = response.split("<eoa>")[0]
|
889 |
history = history + [(query, response)]
|
890 |
return response, history
|
891 |
-
|
892 |
@torch.no_grad()
|
893 |
-
def stream_chat(
|
894 |
-
|
895 |
-
|
896 |
-
|
897 |
-
|
898 |
-
|
899 |
-
|
900 |
-
|
901 |
-
|
902 |
-
**kwargs,
|
903 |
-
):
|
904 |
"""
|
905 |
Return a generator in format: (response, history)
|
906 |
Eg.
|
@@ -946,12 +868,12 @@ class InternLMForCausalLM(InternLMPreTrainedModel):
|
|
946 |
tokenizer=tokenizer,
|
947 |
query=query,
|
948 |
streamer=ChatStreamer(tokenizer=tokenizer),
|
949 |
-
history=history,
|
950 |
max_new_tokens=max_new_tokens,
|
951 |
do_sample=do_sample,
|
952 |
temperature=temperature,
|
953 |
top_p=top_p,
|
954 |
-
**kwargs
|
955 |
)
|
956 |
|
957 |
def consumer():
|
@@ -969,8 +891,10 @@ class InternLMForCausalLM(InternLMPreTrainedModel):
|
|
969 |
@add_start_docstrings(
|
970 |
"""
|
971 |
The InternLM Model transformer with a sequence classification head on top (linear layer).
|
|
|
972 |
[`InternLMForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
973 |
(e.g. GPT-2) do.
|
|
|
974 |
Since it does classification on the last token, it requires to know the position of the last token. If a
|
975 |
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
976 |
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
|
|
19 |
# limitations under the License.
|
20 |
""" PyTorch InternLM model."""
|
21 |
import math
|
|
|
|
|
22 |
from typing import List, Optional, Tuple, Union
|
23 |
+
import threading, queue
|
24 |
|
25 |
import torch
|
26 |
import torch.utils.checkpoint
|
27 |
from torch import nn
|
28 |
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
29 |
+
|
30 |
from transformers.activations import ACT2FN
|
31 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
|
|
|
|
|
|
|
|
|
|
|
32 |
from transformers.modeling_utils import PreTrainedModel
|
33 |
+
from transformers.generation.streamers import BaseStreamer
|
34 |
+
from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
|
|
|
|
|
|
|
|
|
|
|
35 |
from .configuration_internlm import InternLMConfig
|
36 |
|
37 |
+
|
38 |
logger = logging.get_logger(__name__)
|
39 |
|
40 |
_CONFIG_FOR_DOC = "InternLMConfig"
|
41 |
|
|
|
42 |
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
|
43 |
def _make_causal_mask(
|
44 |
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
|
|
|
71 |
|
72 |
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
|
73 |
|
74 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
75 |
+
"""
|
76 |
+
(batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
77 |
+
"""
|
78 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
79 |
+
if n_rep == 1:
|
80 |
+
return hidden_states
|
81 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
82 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
83 |
|
84 |
class InternLMRMSNorm(nn.Module):
|
|
|
|
|
85 |
def __init__(self, hidden_size, eps=1e-6):
|
86 |
"""
|
87 |
InternLMRMSNorm is equivalent to T5LayerNorm
|
|
|
102 |
|
103 |
|
104 |
class InternLMRotaryEmbedding(torch.nn.Module):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
106 |
super().__init__()
|
107 |
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
|
|
|
133 |
)
|
134 |
|
135 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
def rotate_half(x):
|
137 |
"""Rotates half the hidden dims of the input."""
|
138 |
x1 = x[..., : x.shape[-1] // 2]
|
|
|
144 |
# The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
|
145 |
cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
|
146 |
sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
|
147 |
+
cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
|
148 |
+
sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
|
149 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
150 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
return q_embed, k_embed
|
152 |
|
153 |
|
|
|
177 |
self.hidden_size = config.hidden_size
|
178 |
self.num_heads = config.num_attention_heads
|
179 |
self.head_dim = self.hidden_size // self.num_heads
|
180 |
+
self.num_key_value_heads = config.num_key_value_heads
|
181 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
182 |
self.max_position_embeddings = config.max_position_embeddings
|
183 |
|
184 |
if (self.head_dim * self.num_heads) != self.hidden_size:
|
|
|
187 |
f" and `num_heads`: {self.num_heads})."
|
188 |
)
|
189 |
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.bias)
|
190 |
+
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
|
191 |
+
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
|
192 |
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
|
193 |
+
self.rotary_emb = InternLMRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
194 |
|
195 |
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
196 |
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
|
|
207 |
bsz, q_len, _ = hidden_states.size()
|
208 |
|
209 |
query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
210 |
+
key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
211 |
+
value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
212 |
+
|
213 |
+
kv_seq_len = key_states.shape[-2]
|
214 |
+
if past_key_value is not None:
|
215 |
+
kv_seq_len += past_key_value[0].shape[-2]
|
216 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
217 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
218 |
+
# [bsz, nh, t, hd]
|
219 |
|
220 |
if past_key_value is not None:
|
221 |
# reuse k, v, self_attention
|
222 |
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
223 |
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
224 |
|
|
|
225 |
past_key_value = (key_states, value_states) if use_cache else None
|
226 |
|
227 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
228 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
|
|
229 |
|
230 |
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
231 |
|
|
|
336 |
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
337 |
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
338 |
etc.)
|
339 |
+
|
340 |
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
341 |
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
342 |
and behavior.
|
343 |
+
|
344 |
Parameters:
|
345 |
config ([`InternLMConfig`]):
|
346 |
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
|
|
381 |
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
382 |
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
383 |
it.
|
384 |
+
|
385 |
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
386 |
[`PreTrainedTokenizer.__call__`] for details.
|
387 |
+
|
388 |
[What are input IDs?](../glossary#input-ids)
|
389 |
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
390 |
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
391 |
+
|
392 |
- 1 for tokens that are **not masked**,
|
393 |
- 0 for tokens that are **masked**.
|
394 |
+
|
395 |
[What are attention masks?](../glossary#attention-mask)
|
396 |
+
|
397 |
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
398 |
[`PreTrainedTokenizer.__call__`] for details.
|
399 |
+
|
400 |
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
401 |
`past_key_values`).
|
402 |
+
|
403 |
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
404 |
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
405 |
information on the default strategy.
|
406 |
+
|
407 |
- 1 indicates the head is **not masked**,
|
408 |
- 0 indicates the head is **masked**.
|
409 |
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
410 |
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
411 |
config.n_positions - 1]`.
|
412 |
+
|
413 |
[What are position IDs?](../glossary#position-ids)
|
414 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
|
|
415 |
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
416 |
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
417 |
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
418 |
+
|
419 |
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
420 |
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
421 |
+
|
422 |
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
423 |
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
424 |
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
|
|
447 |
class InternLMModel(InternLMPreTrainedModel):
|
448 |
"""
|
449 |
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLMDecoderLayer`]
|
450 |
+
|
451 |
Args:
|
452 |
config: InternLMConfig
|
453 |
"""
|
|
|
454 |
_auto_class = "AutoModel"
|
455 |
|
456 |
def __init__(self, config: InternLMConfig):
|
|
|
676 |
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
677 |
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
678 |
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
679 |
+
|
680 |
Returns:
|
681 |
+
|
682 |
Example:
|
683 |
+
|
684 |
```python
|
685 |
>>> from transformers import AutoTokenizer, InternLMForCausalLM
|
686 |
+
|
687 |
>>> model = InternLMForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
|
688 |
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
|
689 |
+
|
690 |
>>> prompt = "Hey, are you consciours? Can you talk to me?"
|
691 |
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
692 |
+
|
693 |
>>> # Generate
|
694 |
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
695 |
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
|
|
779 |
for layer_past in past_key_values:
|
780 |
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
|
781 |
return reordered_past
|
782 |
+
|
783 |
def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = []):
|
784 |
prompt = ""
|
785 |
for record in history:
|
786 |
prompt += f"""<|User|>:{record[0]}<eoh>\n<|Bot|>:{record[1]}<eoa>\n"""
|
787 |
prompt += f"""<|User|>:{query}<eoh>\n<|Bot|>:"""
|
788 |
return tokenizer([prompt], return_tensors="pt")
|
789 |
+
|
790 |
@torch.no_grad()
|
791 |
+
def chat(self,
|
792 |
+
tokenizer,
|
793 |
+
query: str,
|
794 |
+
history: List[Tuple[str, str]] = [],
|
795 |
+
streamer: Optional[BaseStreamer] = None,
|
796 |
+
max_new_tokens: int = 1024,
|
797 |
+
do_sample: bool = True,
|
798 |
+
temperature: float = 0.8,
|
799 |
+
top_p: float = 0.8,
|
800 |
+
**kwargs):
|
|
|
|
|
801 |
inputs = self.build_inputs(tokenizer, query, history)
|
802 |
inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
|
803 |
+
outputs = self.generate(**inputs,
|
804 |
+
streamer=streamer,
|
805 |
+
max_new_tokens=max_new_tokens,
|
806 |
+
do_sample=do_sample,
|
807 |
+
temperature=temperature,
|
808 |
+
top_p=top_p,
|
809 |
+
**kwargs)
|
810 |
+
outputs = outputs[0].cpu().tolist()[len(inputs["input_ids"][0]):]
|
|
|
|
|
811 |
response = tokenizer.decode(outputs, skip_special_tokens=True)
|
812 |
response = response.split("<eoa>")[0]
|
813 |
history = history + [(query, response)]
|
814 |
return response, history
|
815 |
+
|
816 |
@torch.no_grad()
|
817 |
+
def stream_chat(self,
|
818 |
+
tokenizer,
|
819 |
+
query: str,
|
820 |
+
history: List[Tuple[str, str]] = [],
|
821 |
+
max_new_tokens: int = 1024,
|
822 |
+
do_sample: bool = True,
|
823 |
+
temperature: float = 0.8,
|
824 |
+
top_p: float = 0.8,
|
825 |
+
**kwargs):
|
|
|
|
|
826 |
"""
|
827 |
Return a generator in format: (response, history)
|
828 |
Eg.
|
|
|
868 |
tokenizer=tokenizer,
|
869 |
query=query,
|
870 |
streamer=ChatStreamer(tokenizer=tokenizer),
|
871 |
+
history=history,
|
872 |
max_new_tokens=max_new_tokens,
|
873 |
do_sample=do_sample,
|
874 |
temperature=temperature,
|
875 |
top_p=top_p,
|
876 |
+
**kwargs
|
877 |
)
|
878 |
|
879 |
def consumer():
|
|
|
891 |
@add_start_docstrings(
|
892 |
"""
|
893 |
The InternLM Model transformer with a sequence classification head on top (linear layer).
|
894 |
+
|
895 |
[`InternLMForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
896 |
(e.g. GPT-2) do.
|
897 |
+
|
898 |
Since it does classification on the last token, it requires to know the position of the last token. If a
|
899 |
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
900 |
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
pytorch_model-00001-of-00005.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3b4c026c01d46ffe745f63cb1a4c4f55f87df4431d903d4a05b91c9b9af0e022
|
3 |
+
size 9990638603
|
pytorch_model-00002-of-00005.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:71fbbe437a6925920a59c238888f8cbd3a49420b021bca12f4c7f58c6cb8b805
|
3 |
+
size 9956584547
|
pytorch_model-00003-of-00005.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fb580e788d4cf1748bcfccdc2745081bac2fd3202fdd377a00d1d67430987de5
|
3 |
+
size 9867476673
|
pytorch_model-00004-of-00005.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b7f4e71ea6f7f8fd23bd96d1a8c42a8b53dbff06f1be2e77b24282a84bfffde1
|
3 |
+
size 9306474783
|
pytorch_model-00005-of-00005.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1056441258
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8a13477240a0579f118187581d03e437cb30f434e949d1236f25c78bfaadcdb6
|
3 |
size 1056441258
|
pytorch_model.bin.index.json
CHANGED
@@ -204,9 +204,9 @@
|
|
204 |
"model.layers.28.self_attn.q_proj.weight": "pytorch_model-00002-of-00005.bin",
|
205 |
"model.layers.28.self_attn.v_proj.weight": "pytorch_model-00002-of-00005.bin",
|
206 |
"model.layers.29.input_layernorm.weight": "pytorch_model-00003-of-00005.bin",
|
207 |
-
"model.layers.29.mlp.down_proj.weight": "pytorch_model-
|
208 |
"model.layers.29.mlp.gate_proj.weight": "pytorch_model-00002-of-00005.bin",
|
209 |
-
"model.layers.29.mlp.up_proj.weight": "pytorch_model-
|
210 |
"model.layers.29.post_attention_layernorm.weight": "pytorch_model-00003-of-00005.bin",
|
211 |
"model.layers.29.self_attn.k_proj.weight": "pytorch_model-00002-of-00005.bin",
|
212 |
"model.layers.29.self_attn.o_proj.weight": "pytorch_model-00002-of-00005.bin",
|
|
|
204 |
"model.layers.28.self_attn.q_proj.weight": "pytorch_model-00002-of-00005.bin",
|
205 |
"model.layers.28.self_attn.v_proj.weight": "pytorch_model-00002-of-00005.bin",
|
206 |
"model.layers.29.input_layernorm.weight": "pytorch_model-00003-of-00005.bin",
|
207 |
+
"model.layers.29.mlp.down_proj.weight": "pytorch_model-00002-of-00005.bin",
|
208 |
"model.layers.29.mlp.gate_proj.weight": "pytorch_model-00002-of-00005.bin",
|
209 |
+
"model.layers.29.mlp.up_proj.weight": "pytorch_model-00003-of-00005.bin",
|
210 |
"model.layers.29.post_attention_layernorm.weight": "pytorch_model-00003-of-00005.bin",
|
211 |
"model.layers.29.self_attn.k_proj.weight": "pytorch_model-00002-of-00005.bin",
|
212 |
"model.layers.29.self_attn.o_proj.weight": "pytorch_model-00002-of-00005.bin",
|