xu-song's picture
add more tokenizers
f4973d4
raw
history blame
625 Bytes
def is_chinese(uchar):
"""
https://github.com/fxsjy/jieba/blob/master/jieba/__init__.py#L48
re.compile("([\u4E00-\u9FD5]+)", re.U)
"""
return u'\u4e00' <= uchar <= u'\u9fa5'
def has_chinese(text):
""" contains Chinese characters """
return any(is_chinese(ch) for ch in text)
def get_zh_count(text):
return sum([is_chinese(uchar) for uchar in text])
def is_all_chinese(text):
return all(is_chinese(char) for char in text)
def get_digit_count(text):
digit_count = 0
for char in text:
if char in "0123456789":
digit_count += 1
return digit_count