xu-song's picture
update
9495a4f
raw
history blame
330 Bytes
from transformers import AutoTokenizer
from vocab import TokenizerType
tokenizer = AutoTokenizer.from_pretrained("baichuan-inc/Baichuan-7B", trust_remote_code=True)
# byte-bpe sentencepiece
tokenizer.type = TokenizerType.ByteBPE
tokenizer.comments = "使用 SentencePiece 中的 Byte-Pair Encoding (BPE) 作为分词算法"