Spaces:
Running
Running
File size: 1,880 Bytes
751936e 8e0e4e9 751936e 9495a4f 751936e d10ecd7 751936e 428b731 9495a4f d10ecd7 8e0e4e9 d10ecd7 9495a4f 8e0e4e9 9495a4f 8e0e4e9 9495a4f 8e0e4e9 d10ecd7 8e0e4e9 d10ecd7 751936e 9495a4f 751936e 428b731 d10ecd7 751936e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
import tiktoken
from tiktoken import Encoding
from utils.log_util import logger
tokenizer = tiktoken.encoding_for_model('gpt-3.5-turbo')
tokenizer.vocab_size = tokenizer.n_vocab
tokenizer.comments = "tiktoken is a fast BPE tokeniser for use with OpenAI's models. There are 16 tokens KeyError"
tokenizer.reversible = True # It's reversible and lossless, so you can convert tokens back into the original text
def decode(self, tokens, errors="replace"):
# def decode(self, tokens: list[int], errors: str = "replace") -> str:
try:
decode_str = self._core_bpe.decode_bytes(tokens).decode("utf-8", errors=errors)
except:
decode_str = "null"
return decode_str
def convert_ids_to_tokens(self, tokens):
return tokenizer.decode_tokens_bytes(tokens)
def get_vocab(self, token_type="str"):
"""Returns vocab as a dict
:param token_type: ["str", "byte"]
:return:
"""
vocab = {}
key_error_list = []
unicode_decode_error_list = []
for i in range(self.vocab_size):
try:
token_byte = self.convert_ids_to_tokens([i])[0]
token_str = token_byte.decode("utf-8")
vocab[token_byte] = i
except KeyError: # 16 KeyError, 100256 100261-100275
key_error_list.append(i)
# vocab[f"[KeyError]-{i}"] = i
except UnicodeDecodeError: # 773 UnicodeDecodeError
unicode_decode_error_list.append((i, str(token_byte)))
vocab[token_byte] = i
# vocab.update(self.added_tokens_encoder)
logger.info(f"gpt_35_turbo {len(key_error_list)} KeyError: {key_error_list}")
logger.info(f"gpt_35_turbo {len(unicode_decode_error_list)} UnicodeDecodeError: {unicode_decode_error_list[:5]}")
return vocab
# tiktoken patch
Encoding.decode = decode
Encoding.convert_ids_to_tokens = convert_ids_to_tokens
Encoding.get_vocab = get_vocab
|