tokenizer-arena / vocab /llama /gpt_neox /load_by_gpt_neox.py
xu-song's picture
update
0ce6477
raw
history blame
549 Bytes
# HF_tokenizer
# from tokenizers import Tokenizer
# tokenizer = Tokenizer.from_file("tokenizer/tokenizer.json")
import sentencepiece as spm
text = "nice job 华为手机"
tokenizer = spm.SentencePieceProcessor(model_file="tokenizer/tokenizer.model")
tokens = tokenizer.encode(text) # [7575, 4982, 29871, 31266, 30573, 30880, 31429]
print(tokens)
from transformers import LlamaTokenizer
tokenizer = LlamaTokenizer.from_pretrained("tokenizer")
tokens = tokenizer.encode(text) # [1, 7575, 4982, 29871, 31266, 30573, 30880, 31429]
print(tokens)