Spaces:
Running
Running
# HF_tokenizer | |
# from tokenizers import Tokenizer | |
# tokenizer = Tokenizer.from_file("tokenizer/tokenizer.json") | |
import sentencepiece as spm | |
text = "nice job 华为手机" | |
tokenizer = spm.SentencePieceProcessor(model_file="tokenizer/tokenizer.model") | |
tokens = tokenizer.encode(text) # [7575, 4982, 29871, 31266, 30573, 30880, 31429] | |
print(tokens) | |
from transformers import LlamaTokenizer | |
tokenizer = LlamaTokenizer.from_pretrained("tokenizer") | |
tokens = tokenizer.encode(text) # [1, 7575, 4982, 29871, 31266, 30573, 30880, 31429] | |
print(tokens) | |