File size: 860 Bytes
751936e
 
 
ef8594d
 
751936e
 
ef8594d
751936e
ef8594d
 
751936e
 
ef8594d
 
751936e
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
"""
依赖 torch tiktoken
依赖 transformer 4.31.0 及以上,

https://huggingface.co/tangger/Qwen-7B-Chat  Qwen官方模型临时下架了,这个是备份
"""

import os
from transformers import AutoTokenizer
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
TOKENIZER_DIR = os.path.join(CURRENT_DIR, "Qwen-7B-Chat")

# 请注意:分词器默认行为已更改为默认关闭特殊token攻击防护。
# tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-VL-Chat", trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_DIR, trust_remote_code=True)

def test():
    encoding = tokenizer.encode("测试华为手机10086        8个空格")
    for token_id in encoding:
        token = tokenizer.convert_ids_to_tokens([token_id])[0].decode("utf-8")
        print(token_id, ":", token)

if __name__ == "__main__":
    test()