xu-song's picture
add more tokenizers
f4973d4
raw
history blame contribute delete
594 Bytes
import os
import config
from transformers import AutoTokenizer
from vocab import TokenizerType
if config.USE_REMOTE:
tokenizer = AutoTokenizer.from_pretrained("baichuan-inc/Baichuan-7B", trust_remote_code=True)
else:
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
TOKENIZER_DIR = os.path.join(CURRENT_DIR, "Baichuan-7B")
tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_DIR, trust_remote_code=True)
# byte-bpe sentencepiece
tokenizer.type = TokenizerType.ByteBPE
tokenizer.comments = "使用 SentencePiece 中的 Byte-Pair Encoding (BPE) 作为分词算法"