Spaces:
Running
Running
import os | |
from tokenizers import Tokenizer | |
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) | |
TOKENIZER_DIR = os.path.join(CURRENT_DIR, "20B_tokenizer_chinese.json") | |
tokenizer = Tokenizer.from_file(TOKENIZER_DIR) | |
tokenizer.vocab_size = tokenizer.get_vocab_size(with_added_tokens=True) | |
# vocab_size = len(tokenizer.get_vocab()) | |
# vocab_size = tokenizer.vocab_size | |