Spaces:
Running
Running
import tiktoken | |
from tiktoken import Encoding | |
from utils.log_util import logger | |
tokenizer = tiktoken.encoding_for_model('gpt-4') | |
tokenizer.vocab_size = tokenizer.n_vocab | |
def decode(self, tokens, errors="replace"): | |
# def decode(self, tokens: list[int], errors: str = "replace") -> str: | |
try: | |
decode_str = self._core_bpe.decode_bytes(tokens).decode("utf-8", errors=errors) | |
except: | |
decode_str = "null" | |
return decode_str | |
def convert_ids_to_tokens(self, tokens): | |
return tokenizer.decode_tokens_bytes(tokens) | |
def get_vocab(self): | |
"""Returns vocab as a dict""" | |
vocab = {} | |
key_error_list = [] | |
unicode_decode_error_list = [] | |
for i in range(self.vocab_size): | |
try: | |
token_byte = self.convert_ids_to_tokens([i])[0] | |
token_str = token_byte.decode("utf-8") | |
vocab[token_str] = i | |
except KeyError: # 100256 100261-100275 | |
key_error_list.append(i) | |
except UnicodeDecodeError: # 特别多 | |
unicode_decode_error_list.append((i, str(token_byte))) | |
# vocab.update(self.added_tokens_encoder) | |
logger.info(f"gpt-4 {len(key_error_list)} KeyError: {key_error_list}") | |
logger.info(f"gpt-4 {len(unicode_decode_error_list)} UnicodeDecodeError: {unicode_decode_error_list[:5]}") | |
return vocab | |
Encoding.decode = decode | |
Encoding.convert_ids_to_tokens = convert_ids_to_tokens | |
Encoding.get_vocab = get_vocab | |