|
""" |
|
1. jd_vocab_tokens的中文: |
|
|
|
|
|
|
|
2. 中文标点 |
|
|
|
|
|
3. 全中文(单字) unicode |
|
|
|
|
|
4. 全中文() |
|
词典大小:46145。其中 中文汉字数:{'total': 25359, '中文单字': 5089, '中文多字': 20270}, 中文标点数: 266 |
|
""" |
|
|
|
from collections import Counter |
|
from transformers import AutoTokenizer |
|
from data_sample.oov_base import jd_vocab_tokens |
|
from utils.text_util import is_chinese, has_chinese |
|
from zhon.hanzi import punctuation as zh_punc |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("tokenizer", trust_remote_code=True) |
|
|
|
vocab = tokenizer.get_vocab() |
|
|
|
def zh_iterator(): |
|
for idx in range(ord(u'\u4e00'), ord(u'\u9fa5')): |
|
yield (chr(idx)) |
|
|
|
|
|
def test_coding_length(vocab, filter=None): |
|
all_length = [] |
|
for word in vocab: |
|
if len(word) > 1: |
|
continue |
|
if filter is not None and filter(word): |
|
continue |
|
tokens = tokenizer.encode(word) |
|
all_length.append(len(tokens)) |
|
|
|
if len(tokens.ids) == 1: |
|
print(word, tokens.ids) |
|
|
|
print("编码长度统计:", Counter(all_length)) |
|
print("平均编码长度:", sum(all_length)/len(all_length)) |
|
|
|
|
|
def has_zh_char(text): |
|
return any(ch in zh_punc for ch in text) |
|
|
|
|
|
def iter_vocab(): |
|
|
|
f_out = open("vocab.zh.txt", "w", encoding="utf-8") |
|
zh_token_count = {"total": 0, "中文单字": 0, "中文多字": 0} |
|
zh_symbol_count = 0 |
|
for idx in range(len(vocab)): |
|
decode_str = tokenizer.decode([idx]) |
|
if has_chinese(decode_str): |
|
zh_token_count["total"] += 1 |
|
if len(decode_str.strip()) > 1: |
|
zh_token_count["中文多字"] += 1 |
|
else: |
|
zh_token_count["中文单字"] += 1 |
|
|
|
|
|
f_out.write("%d\t%s\t中文汉字\n" % (idx, decode_str)) |
|
|
|
|
|
elif has_zh_char(decode_str): |
|
zh_symbol_count += 1 |
|
f_out.write("%d\t%s\t中文标点\n" % (idx, decode_str)) |
|
|
|
print("词典大小:%d。其中 中文汉字数:%s, 中文标点数: %d" % (len(vocab), str(zh_token_count), zh_symbol_count)) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
|
|
|
|
iter_vocab() |