genggui001 commited on
Commit
8e067ec
1 Parent(s): 4fcc362

to PreTrainedTokenizerFast

Browse files
added_tokens.json DELETED
@@ -1,5 +0,0 @@
1
- {
2
- "<pad>": 103168,
3
- "<|modelname|>": 103169,
4
- "<|modelorg|>": 103170
5
- }
 
 
 
 
 
 
special_tokens_map.json CHANGED
@@ -1,8 +1,4 @@
1
  {
2
- "additional_special_tokens": [
3
- "<|modelname|>",
4
- "<|modelorg|>"
5
- ],
6
  "bos_token": "<s>",
7
  "eos_token": "</s>",
8
  "pad_token": "<pad>",
 
1
  {
 
 
 
 
2
  "bos_token": "<s>",
3
  "eos_token": "</s>",
4
  "pad_token": "<pad>",
tokenization_internlm.py DELETED
@@ -1,242 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
- #
4
- # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
- # and OPT implementations in this library. It has been modified from its
6
- # original forms to accommodate minor architectural differences compared
7
- # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
- #
9
- # Licensed under the Apache License, Version 2.0 (the "License");
10
- # you may not use this file except in compliance with the License.
11
- # You may obtain a copy of the License at
12
- #
13
- # http://www.apache.org/licenses/LICENSE-2.0
14
- #
15
- # Unless required by applicable law or agreed to in writing, software
16
- # distributed under the License is distributed on an "AS IS" BASIS,
17
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
- # See the License for the specific language governing permissions and
19
- # limitations under the License.
20
-
21
- """Tokenization classes for IntermLM."""
22
- import os
23
- from shutil import copyfile
24
- from typing import Any, Dict, List, Optional, Tuple
25
-
26
- import sentencepiece as spm
27
-
28
- from transformers.tokenization_utils import PreTrainedTokenizer
29
- from transformers.utils import logging
30
-
31
-
32
- logger = logging.get_logger(__name__)
33
-
34
- VOCAB_FILES_NAMES = {"vocab_file": "./tokenizer.model"}
35
-
36
- PRETRAINED_VOCAB_FILES_MAP = {}
37
-
38
-
39
- class InternLMTokenizer(PreTrainedTokenizer):
40
- """
41
- Construct a InternLM tokenizer. Based on byte-level Byte-Pair-Encoding.
42
-
43
- Args:
44
- vocab_file (`str`):
45
- Path to the vocabulary file.
46
- """
47
-
48
- vocab_files_names = VOCAB_FILES_NAMES
49
- pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
50
- model_input_names = ["input_ids", "attention_mask"]
51
- _auto_class = "AutoTokenizer"
52
-
53
- def __init__(
54
- self,
55
- vocab_file,
56
- unk_token="<unk>",
57
- bos_token="<s>",
58
- eos_token="</s>",
59
- pad_token="</s>",
60
- sp_model_kwargs: Optional[Dict[str, Any]] = None,
61
- add_bos_token=True,
62
- add_eos_token=False,
63
- decode_with_prefix_space=False,
64
- clean_up_tokenization_spaces=False,
65
- **kwargs,
66
- ):
67
- self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
68
- self.vocab_file = vocab_file
69
- self.add_bos_token = add_bos_token
70
- self.add_eos_token = add_eos_token
71
- self.decode_with_prefix_space = decode_with_prefix_space
72
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
73
- self.sp_model.Load(vocab_file)
74
- self._no_prefix_space_tokens = None
75
- super().__init__(
76
- bos_token=bos_token,
77
- eos_token=eos_token,
78
- unk_token=unk_token,
79
- pad_token=pad_token,
80
- clean_up_tokenization_spaces=clean_up_tokenization_spaces,
81
- **kwargs,
82
- )
83
-
84
- """ Initialization"""
85
-
86
- @property
87
- def no_prefix_space_tokens(self):
88
- if self._no_prefix_space_tokens is None:
89
- vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
90
- self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith("▁")}
91
- return self._no_prefix_space_tokens
92
-
93
- @property
94
- def vocab_size(self):
95
- """Returns vocab size"""
96
- return self.sp_model.get_piece_size()
97
-
98
- @property
99
- def bos_token_id(self) -> Optional[int]:
100
- return self.sp_model.bos_id()
101
-
102
- @property
103
- def eos_token_id(self) -> Optional[int]:
104
- return self.sp_model.eos_id()
105
-
106
- def get_vocab(self):
107
- """Returns vocab as a dict"""
108
- vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
109
- vocab.update(self.added_tokens_encoder)
110
- return vocab
111
-
112
- def _tokenize(self, text):
113
- """Returns a tokenized string."""
114
- return self.sp_model.encode(text, out_type=str)
115
-
116
- def _convert_token_to_id(self, token):
117
- """Converts a token (str) in an id using the vocab."""
118
- return self.sp_model.piece_to_id(token)
119
-
120
- def _convert_id_to_token(self, index):
121
- """Converts an index (integer) in a token (str) using the vocab."""
122
- token = self.sp_model.IdToPiece(index)
123
- return token
124
-
125
- def _maybe_add_prefix_space(self, tokens, decoded):
126
- if tokens and tokens[0] not in self.no_prefix_space_tokens:
127
- return " " + decoded
128
- else:
129
- return decoded
130
-
131
- def convert_tokens_to_string(self, tokens):
132
- """Converts a sequence of tokens (string) in a single string."""
133
- current_sub_tokens = []
134
- out_string = ""
135
- prev_is_special = False
136
- for token in tokens:
137
- # make sure that special tokens are not decoded using sentencepiece model
138
- if token in self.all_special_tokens:
139
- if not prev_is_special:
140
- out_string += " "
141
- out_string += self.sp_model.decode(current_sub_tokens) + token
142
- prev_is_special = True
143
- current_sub_tokens = []
144
- else:
145
- current_sub_tokens.append(token)
146
- prev_is_special = False
147
- out_string += self.sp_model.decode(current_sub_tokens)
148
- out_string = self.clean_up_tokenization(out_string)
149
- out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
150
- return out_string[1:]
151
-
152
- def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
153
- """
154
- Save the vocabulary and special tokens file to a directory.
155
-
156
- Args:
157
- save_directory (`str`):
158
- The directory in which to save the vocabulary.
159
-
160
- Returns:
161
- `Tuple(str)`: Paths to the files saved.
162
- """
163
- if not os.path.isdir(save_directory):
164
- logger.error(f"Vocabulary path ({save_directory}) should be a directory")
165
- return
166
- out_vocab_file = os.path.join(
167
- save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
168
- )
169
-
170
- if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
171
- copyfile(self.vocab_file, out_vocab_file)
172
- elif not os.path.isfile(self.vocab_file):
173
- with open(out_vocab_file, "wb") as fi:
174
- content_spiece_model = self.sp_model.serialized_model_proto()
175
- fi.write(content_spiece_model)
176
-
177
- return (out_vocab_file,)
178
-
179
- def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
180
- if self.add_bos_token:
181
- bos_token_ids = [self.bos_token_id]
182
- else:
183
- bos_token_ids = []
184
-
185
- output = bos_token_ids + token_ids_0
186
-
187
- if token_ids_1 is not None:
188
- output = output + token_ids_1
189
-
190
- if self.add_eos_token:
191
- output = output + [self.eos_token_id]
192
-
193
- return output
194
-
195
- def get_special_tokens_mask(
196
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
197
- ) -> List[int]:
198
- """
199
- Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
200
- special tokens using the tokenizer `prepare_for_model` method.
201
-
202
- Args:
203
- token_ids_0 (`List[int]`):
204
- List of IDs.
205
- token_ids_1 (`List[int]`, *optional*):
206
- Optional second list of IDs for sequence pairs.
207
- already_has_special_tokens (`bool`, *optional*, defaults to `False`):
208
- Whether or not the token list is already formatted with special tokens for the model.
209
-
210
- Returns:
211
- `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
212
- """
213
- if already_has_special_tokens:
214
- return super().get_special_tokens_mask(
215
- token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
216
- )
217
-
218
- if token_ids_1 is None:
219
- return [1] + ([0] * len(token_ids_0)) + [1]
220
- return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
221
-
222
- def create_token_type_ids_from_sequences(
223
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
224
- ) -> List[int]:
225
- """
226
- Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
227
- use of token type ids, therefore a list of zeros is returned.
228
-
229
- Args:
230
- token_ids_0 (`List[int]`):
231
- List of IDs.
232
- token_ids_1 (`List[int]`, *optional*):
233
- Optional second list of IDs for sequence pairs.
234
-
235
- Returns:
236
- `List[int]`: List of zeros.
237
- """
238
- eos = [self.eos_token_id]
239
-
240
- if token_ids_1 is None:
241
- return len(token_ids_0 + eos) * [0]
242
- return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:aab622d98c98677a1a51f969e25765154487bf3e85c7819db105db2fcacba83f
3
- size 1658691
 
 
 
 
tokenizer_config.json CHANGED
@@ -1,20 +1,10 @@
1
  {
2
- "additional_special_tokens": [
3
- "<|modelname|>",
4
- "<|modelorg|>"
5
- ],
6
- "auto_map": {
7
- "AutoTokenizer": [
8
- "tokenization_internlm.InternLMTokenizer",
9
- null
10
- ]
11
- },
12
  "bos_token": "<s>",
13
  "clean_up_tokenization_spaces": false,
14
  "eos_token": "</s>",
15
  "model_max_length": 1000000000000000019884624838656,
16
  "pad_token": "<pad>",
17
- "padding_side": "right",
18
- "tokenizer_class": "InternLMTokenizer",
19
  "unk_token": "<unk>"
20
  }
 
1
  {
 
 
 
 
 
 
 
 
 
 
2
  "bos_token": "<s>",
3
  "clean_up_tokenization_spaces": false,
4
  "eos_token": "</s>",
5
  "model_max_length": 1000000000000000019884624838656,
6
  "pad_token": "<pad>",
7
+ "padding_side": "left",
8
+ "tokenizer_class": "PreTrainedTokenizerFast",
9
  "unk_token": "<unk>"
10
  }