shibing624
commited on
Commit
·
9954ba7
1
Parent(s):
2d0087e
Update README.md
Browse files
README.md
CHANGED
@@ -13,7 +13,7 @@ license: "apache-2.0"
|
|
13 |
|
14 |
`macbert4csc-base-chinese` evaluate SIGHAN2015 test data:
|
15 |
|
16 |
-
- Char Level:
|
17 |
- Sentence Level: precision:0.8264, recall:0.7366, f1:0.7789
|
18 |
|
19 |
由于训练使用的数据使用了SIGHAN2015的训练集(复现paper),在SIGHAN2015的测试集上达到SOTA水平。
|
@@ -47,21 +47,26 @@ model = BertForMaskedLM.from_pretrained("shibing624/macbert4csc-base-chinese")
|
|
47 |
model = model.to(device)
|
48 |
|
49 |
texts = ["今天新情很好", "你找到你最喜欢的工作,我也很高心。"]
|
50 |
-
|
|
|
51 |
|
52 |
def get_errors(corrected_text, origin_text):
|
53 |
-
|
54 |
for i, ori_char in enumerate(origin_text):
|
55 |
-
if ori_char in [' ', '“', '”', '‘', '’', '琊']:
|
56 |
-
# add
|
57 |
corrected_text = corrected_text[:i] + ori_char + corrected_text[i:]
|
58 |
continue
|
59 |
if i >= len(corrected_text):
|
60 |
continue
|
61 |
if ori_char != corrected_text[i]:
|
62 |
-
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
|
|
65 |
|
66 |
result = []
|
67 |
for ids, text in zip(outputs.logits, texts):
|
|
|
13 |
|
14 |
`macbert4csc-base-chinese` evaluate SIGHAN2015 test data:
|
15 |
|
16 |
+
- Char Level: precision:0.9372, recall:0.8640, f1:0.8991
|
17 |
- Sentence Level: precision:0.8264, recall:0.7366, f1:0.7789
|
18 |
|
19 |
由于训练使用的数据使用了SIGHAN2015的训练集(复现paper),在SIGHAN2015的测试集上达到SOTA水平。
|
|
|
47 |
model = model.to(device)
|
48 |
|
49 |
texts = ["今天新情很好", "你找到你最喜欢的工作,我也很高心。"]
|
50 |
+
with torch.no_grad():
|
51 |
+
outputs = model(**tokenizer(texts, padding=True, return_tensors='pt').to(device))
|
52 |
|
53 |
def get_errors(corrected_text, origin_text):
|
54 |
+
sub_details = []
|
55 |
for i, ori_char in enumerate(origin_text):
|
56 |
+
if ori_char in [' ', '“', '”', '‘', '’', '琊', '\n', '…', '—', '擤']:
|
57 |
+
# add unk word
|
58 |
corrected_text = corrected_text[:i] + ori_char + corrected_text[i:]
|
59 |
continue
|
60 |
if i >= len(corrected_text):
|
61 |
continue
|
62 |
if ori_char != corrected_text[i]:
|
63 |
+
if ori_char.lower() == corrected_text[i]:
|
64 |
+
# pass english upper char
|
65 |
+
corrected_text = corrected_text[:i] + ori_char + corrected_text[i + 1:]
|
66 |
+
continue
|
67 |
+
sub_details.append((ori_char, corrected_text[i], i, i + 1))
|
68 |
+
sub_details = sorted(sub_details, key=operator.itemgetter(2))
|
69 |
+
return corrected_text, sub_details
|
70 |
|
71 |
result = []
|
72 |
for ids, text in zip(outputs.logits, texts):
|