Spaces:
Running
Running
import tiktoken | |
from tiktoken import Encoding | |
tokenizer = tiktoken.encoding_for_model('gpt-3.5-turbo') | |
tokenizer.vocab_size = tokenizer.n_vocab | |
def decode(self, tokens, errors="replace"): | |
# def decode(self, tokens: list[int], errors: str = "replace") -> str: | |
try: | |
decode_str = self._core_bpe.decode_bytes(tokens).decode("utf-8", errors=errors) | |
except: | |
decode_str = "null" | |
return decode_str | |
def convert_ids_to_tokens(self, tokens): | |
return tokenizer.decode_tokens_bytes(tokens) | |
def get_vocab(self): | |
"""Returns vocab as a dict""" | |
vocab = {} | |
for i in range(self.vocab_size): | |
try: | |
token_byte = self.convert_ids_to_tokens([i])[0] | |
token_str = token_byte.decode("utf-8") | |
vocab[token_str] = i | |
except KeyError: | |
print("gpt_35_turbo decode KeyError", i) | |
except UnicodeDecodeError: | |
print("gpt_35_turbo decode UnicodeDecodeError", i, str(token_byte)) | |
# vocab.update(self.added_tokens_encoder) | |
return vocab | |
Encoding.decode = decode | |
Encoding.convert_ids_to_tokens = convert_ids_to_tokens | |
Encoding.get_vocab = get_vocab | |