PereLluis13
commited on
Commit
•
765c512
1
Parent(s):
a6051e6
add eval script
Browse files
eval.py
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
from datasets import load_dataset, load_metric, Audio, Dataset
|
3 |
+
from transformers import pipeline, AutoFeatureExtractor
|
4 |
+
import re
|
5 |
+
import argparse
|
6 |
+
import unicodedata
|
7 |
+
from typing import Dict
|
8 |
+
from text.numbers_ca import normalize_numbers_ca
|
9 |
+
|
10 |
+
def log_results(result: Dataset, args: Dict[str, str]):
|
11 |
+
""" DO NOT CHANGE. This function computes and logs the result metrics. """
|
12 |
+
|
13 |
+
log_outputs = args.log_outputs
|
14 |
+
dataset_id = "_".join(args.dataset.split("/") + [args.config, args.split])
|
15 |
+
|
16 |
+
# load metric
|
17 |
+
wer = load_metric("wer")
|
18 |
+
cer = load_metric("cer")
|
19 |
+
|
20 |
+
# compute metrics
|
21 |
+
wer_result = wer.compute(references=result["target"], predictions=result["prediction"])
|
22 |
+
cer_result = cer.compute(references=result["target"], predictions=result["prediction"])
|
23 |
+
|
24 |
+
# print & log results
|
25 |
+
result_str = (
|
26 |
+
f"WER: {wer_result}\n"
|
27 |
+
f"CER: {cer_result}"
|
28 |
+
)
|
29 |
+
print(result_str)
|
30 |
+
|
31 |
+
with open(f"{dataset_id}_eval_results.txt", "w") as f:
|
32 |
+
f.write(result_str)
|
33 |
+
|
34 |
+
# log all results in text file. Possibly interesting for analysis
|
35 |
+
if log_outputs is not None:
|
36 |
+
pred_file = f"log_{dataset_id}_predictions.txt"
|
37 |
+
target_file = f"log_{dataset_id}_targets.txt"
|
38 |
+
|
39 |
+
with open(pred_file, "w") as p, open(target_file, "w") as t:
|
40 |
+
|
41 |
+
# mapping function to write output
|
42 |
+
def write_to_file(batch, i):
|
43 |
+
p.write(f"{i}" + "\n")
|
44 |
+
p.write(batch["prediction"] + "\n")
|
45 |
+
t.write(f"{i}" + "\n")
|
46 |
+
t.write(batch["target"] + "\n")
|
47 |
+
|
48 |
+
result.map(write_to_file, with_indices=True)
|
49 |
+
|
50 |
+
|
51 |
+
def normalize_text(text: str) -> str:
|
52 |
+
""" DO ADAPT FOR YOUR USE CASE. this function normalizes the target text. """
|
53 |
+
text = normalize_numbers_ca(text)
|
54 |
+
|
55 |
+
chars_to_ignore_regex = '[,?.!\;\:"“%”�—…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
|
56 |
+
|
57 |
+
text = text.lower()
|
58 |
+
# normalize non-standard (stylized) unicode characters
|
59 |
+
text = unicodedata.normalize('NFKC', text)
|
60 |
+
# remove punctuation
|
61 |
+
text = re.sub(chars_to_ignore_regex, "", text)
|
62 |
+
text = re.sub("á", "a", text)
|
63 |
+
text = re.sub("ñ", "ny", text)
|
64 |
+
# Let's also make sure we split on all kinds of newlines, spaces, etc...
|
65 |
+
text = " ".join(text.split())
|
66 |
+
|
67 |
+
return text
|
68 |
+
|
69 |
+
def main(args):
|
70 |
+
# load dataset
|
71 |
+
dataset = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=True)
|
72 |
+
|
73 |
+
# for testing: only process the first two examples as a test
|
74 |
+
# dataset = dataset.select(range(10))
|
75 |
+
|
76 |
+
# load processor
|
77 |
+
feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_id)
|
78 |
+
sampling_rate = feature_extractor.sampling_rate
|
79 |
+
|
80 |
+
# resample audio
|
81 |
+
dataset = dataset.cast_column("audio", Audio(sampling_rate=sampling_rate))
|
82 |
+
|
83 |
+
# load eval pipeline
|
84 |
+
asr = pipeline("automatic-speech-recognition", model=args.model_id, device=0)
|
85 |
+
|
86 |
+
# map function to decode audio
|
87 |
+
def map_to_pred(batch):
|
88 |
+
prediction = asr(batch["audio"]["array"], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s)
|
89 |
+
|
90 |
+
batch["prediction"] = prediction["text"]
|
91 |
+
batch["target"] = normalize_text(batch[args.text_column])
|
92 |
+
return batch
|
93 |
+
|
94 |
+
# run inference on all examples
|
95 |
+
result = dataset.map(map_to_pred, remove_columns=dataset.column_names)
|
96 |
+
|
97 |
+
# compute and log_results
|
98 |
+
# do not change function below
|
99 |
+
log_results(result, args)
|
100 |
+
|
101 |
+
|
102 |
+
if __name__ == "__main__":
|
103 |
+
parser = argparse.ArgumentParser()
|
104 |
+
|
105 |
+
parser.add_argument(
|
106 |
+
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
|
107 |
+
)
|
108 |
+
parser.add_argument(
|
109 |
+
"--dataset", type=str, required=True, help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets"
|
110 |
+
)
|
111 |
+
parser.add_argument(
|
112 |
+
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
|
113 |
+
)
|
114 |
+
parser.add_argument(
|
115 |
+
"--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`"
|
116 |
+
)
|
117 |
+
parser.add_argument(
|
118 |
+
"--text_column", type=str, default="sentence", help="The name of the dataset column containing the text data. Defaults to 'text'"
|
119 |
+
)
|
120 |
+
parser.add_argument(
|
121 |
+
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to None. For long audio files a good value would be 5.0 seconds."
|
122 |
+
)
|
123 |
+
parser.add_argument(
|
124 |
+
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to None. For long audio files a good value would be 1.0 seconds."
|
125 |
+
)
|
126 |
+
parser.add_argument(
|
127 |
+
"--log_outputs", action='store_true', help="If defined, write outputs to log file for analysis."
|
128 |
+
)
|
129 |
+
args = parser.parse_args()
|
130 |
+
|
131 |
+
main(args)
|