update training and eval code
Browse files- .ipynb_checkpoints/alphabet-checkpoint.json +1 -0
- .ipynb_checkpoints/eval-checkpoint.py +149 -0
- alphabet.json +1 -0
- eval.py +149 -0
- language_model/.ipynb_checkpoints/attrs-checkpoint.json +1 -3
- language_model/attrs.json +1 -3
- log_openslr_SLR53_train[95%:]_predictions.txt +0 -0
- log_openslr_SLR53_train[95%:]_targets.txt +0 -0
- openslr_SLR53_train[95%:]_eval_results.txt +2 -0
- run.sh +1 -0
- run_speech_recognition_ctc.py +760 -0
.ipynb_checkpoints/alphabet-checkpoint.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"labels": [" ", "_", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "r", "s", "t", "u", "v", "w", "x", "y", "z", "।", "ঁ", "ং", "ঃ", "অ", "আ", "ই", "ঈ", "উ", "ঊ", "ঋ", "এ", "ঐ", "ও", "ঔ", "ক", "খ", "গ", "ঘ", "ঙ", "চ", "ছ", "জ", "ঝ", "ঞ", "ট", "ঠ", "ড", "ঢ", "ণ", "ত", "থ", "দ", "ধ", "ন", "প", "ফ", "ব", "ভ", "ম", "য", "র", "ল", "শ", "ষ", "স", "হ", "়", "া", "ি", "ী", "ু", "ূ", "ৃ", "ে", "ৈ", "ো", "ৌ", "্", "ৎ", "ৗ", "ড়", "ঢ়", "য়", "০", "১", "২", "৩", "৪", "৫", "৬", "৭", "৮", "৯", "ৰ", "\u200c", "\u200d", "\u200e", "[pad]", "<s>", "</s>"], "is_bpe": false}
|
.ipynb_checkpoints/eval-checkpoint.py
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import argparse
|
| 3 |
+
import re
|
| 4 |
+
from typing import Dict
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from datasets import Audio, Dataset, load_dataset, load_metric
|
| 9 |
+
|
| 10 |
+
from transformers import AutoFeatureExtractor, pipeline
|
| 11 |
+
import transformers
|
| 12 |
+
|
| 13 |
+
def log_results(result: Dataset, args: Dict[str, str]):
|
| 14 |
+
"""DO NOT CHANGE. This function computes and logs the result metrics."""
|
| 15 |
+
|
| 16 |
+
log_outputs = args.log_outputs
|
| 17 |
+
dataset_id = "_".join(args.dataset.split("/") + [args.config, args.split])
|
| 18 |
+
|
| 19 |
+
# load metric
|
| 20 |
+
wer = load_metric("wer")
|
| 21 |
+
cer = load_metric("cer")
|
| 22 |
+
|
| 23 |
+
# compute metrics
|
| 24 |
+
wer_result = wer.compute(references=result["target"], predictions=result["prediction"])
|
| 25 |
+
cer_result = cer.compute(references=result["target"], predictions=result["prediction"])
|
| 26 |
+
|
| 27 |
+
# print & log results
|
| 28 |
+
result_str = f"WER: {wer_result}\n" f"CER: {cer_result}"
|
| 29 |
+
print(result_str)
|
| 30 |
+
|
| 31 |
+
with open(f"{dataset_id}_eval_results.txt", "w") as f:
|
| 32 |
+
f.write(result_str)
|
| 33 |
+
|
| 34 |
+
# log all results in text file. Possibly interesting for analysis
|
| 35 |
+
if log_outputs is not None:
|
| 36 |
+
pred_file = f"log_{dataset_id}_predictions.txt"
|
| 37 |
+
target_file = f"log_{dataset_id}_targets.txt"
|
| 38 |
+
|
| 39 |
+
with open(pred_file, "w") as p, open(target_file, "w") as t:
|
| 40 |
+
|
| 41 |
+
# mapping function to write output
|
| 42 |
+
def write_to_file(batch, i):
|
| 43 |
+
p.write(f"{i}" + "\n")
|
| 44 |
+
p.write(batch["prediction"] + "\n")
|
| 45 |
+
t.write(f"{i}" + "\n")
|
| 46 |
+
t.write(batch["target"] + "\n")
|
| 47 |
+
|
| 48 |
+
result.map(write_to_file, with_indices=True)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def normalize_text(text: str) -> str:
|
| 52 |
+
"""DO ADAPT FOR YOUR USE CASE. this function normalizes the target text."""
|
| 53 |
+
|
| 54 |
+
chars_to_ignore_regex = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
|
| 55 |
+
|
| 56 |
+
text = re.sub(chars_to_ignore_regex, "", text.lower())
|
| 57 |
+
|
| 58 |
+
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
|
| 59 |
+
# note that order is important here!
|
| 60 |
+
token_sequences_to_ignore = ["\n\n", "\n", " ", " "]
|
| 61 |
+
|
| 62 |
+
for t in token_sequences_to_ignore:
|
| 63 |
+
text = " ".join(text.split(t))
|
| 64 |
+
|
| 65 |
+
return text
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def main(args):
|
| 69 |
+
# load dataset
|
| 70 |
+
dataset = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=True)
|
| 71 |
+
|
| 72 |
+
# for testing: only process the first two examples as a test
|
| 73 |
+
# dataset = dataset.select(range(10))
|
| 74 |
+
|
| 75 |
+
# load processor
|
| 76 |
+
feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_id)
|
| 77 |
+
sampling_rate = feature_extractor.sampling_rate
|
| 78 |
+
|
| 79 |
+
# resample audio
|
| 80 |
+
dataset = dataset.cast_column("audio", Audio(sampling_rate=sampling_rate))
|
| 81 |
+
|
| 82 |
+
# load eval pipeline
|
| 83 |
+
if args.device is None:
|
| 84 |
+
args.device = 0 if torch.cuda.is_available() else -1
|
| 85 |
+
# asr = pipeline("automatic-speech-recognition", model=args.model_id, device=args.device, use_auth=True)
|
| 86 |
+
config = transformers.PretrainedConfig.from_pretrained(args.model_id)
|
| 87 |
+
model = transformers.Wav2Vec2ForCTC.from_pretrained(args.model_id)
|
| 88 |
+
processor = transformers.AutoProcessor.from_pretrained(args.model_id)
|
| 89 |
+
vocab_dict = processor.tokenizer.get_vocab()
|
| 90 |
+
sorted_vocab_dict = {k.lower(): v for k, v in sorted(vocab_dict.items(), key=lambda item: item[1])}
|
| 91 |
+
print(list(sorted_vocab_dict))
|
| 92 |
+
|
| 93 |
+
#with lm
|
| 94 |
+
asr = pipeline("automatic-speech-recognition", config=config, model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, decoder=processor.decoder, device=args.device)
|
| 95 |
+
#without lm
|
| 96 |
+
#asr = pipeline("automatic-speech-recognition", config=config, model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, device=args.device)
|
| 97 |
+
|
| 98 |
+
# map function to decode audio
|
| 99 |
+
def map_to_pred(batch):
|
| 100 |
+
prediction = asr(
|
| 101 |
+
batch["audio"]["array"], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
batch["prediction"] = prediction["text"]
|
| 105 |
+
batch["target"] = normalize_text(batch["sentence"])
|
| 106 |
+
return batch
|
| 107 |
+
|
| 108 |
+
# run inference on all examples
|
| 109 |
+
result = dataset.map(map_to_pred, remove_columns=dataset.column_names)
|
| 110 |
+
|
| 111 |
+
# compute and log_results
|
| 112 |
+
# do not change function below
|
| 113 |
+
log_results(result, args)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
if __name__ == "__main__":
|
| 117 |
+
parser = argparse.ArgumentParser()
|
| 118 |
+
|
| 119 |
+
parser.add_argument(
|
| 120 |
+
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
|
| 121 |
+
)
|
| 122 |
+
parser.add_argument(
|
| 123 |
+
"--dataset",
|
| 124 |
+
type=str,
|
| 125 |
+
required=True,
|
| 126 |
+
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
|
| 127 |
+
)
|
| 128 |
+
parser.add_argument(
|
| 129 |
+
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
|
| 130 |
+
)
|
| 131 |
+
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
|
| 132 |
+
parser.add_argument(
|
| 133 |
+
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
|
| 134 |
+
)
|
| 135 |
+
parser.add_argument(
|
| 136 |
+
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
|
| 137 |
+
)
|
| 138 |
+
parser.add_argument(
|
| 139 |
+
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
|
| 140 |
+
)
|
| 141 |
+
parser.add_argument(
|
| 142 |
+
"--device",
|
| 143 |
+
type=int,
|
| 144 |
+
default=None,
|
| 145 |
+
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
|
| 146 |
+
)
|
| 147 |
+
args = parser.parse_args()
|
| 148 |
+
|
| 149 |
+
main(args)
|
alphabet.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"labels": [" ", "_", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "r", "s", "t", "u", "v", "w", "x", "y", "z", "।", "ঁ", "ং", "ঃ", "অ", "আ", "ই", "ঈ", "উ", "ঊ", "ঋ", "এ", "ঐ", "ও", "ঔ", "ক", "খ", "গ", "ঘ", "ঙ", "চ", "ছ", "জ", "ঝ", "ঞ", "ট", "ঠ", "ড", "ঢ", "ণ", "ত", "থ", "দ", "ধ", "ন", "প", "ফ", "ব", "ভ", "ম", "য", "র", "ল", "শ", "ষ", "স", "হ", "়", "া", "ি", "ী", "ু", "ূ", "ৃ", "ে", "ৈ", "ো", "ৌ", "্", "ৎ", "ৗ", "ড়", "ঢ়", "য়", "০", "১", "২", "৩", "৪", "৫", "৬", "৭", "৮", "৯", "ৰ", "\u200c", "\u200d", "\u200e", "[pad]", "<s>", "</s>"], "is_bpe": false}
|
eval.py
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import argparse
|
| 3 |
+
import re
|
| 4 |
+
from typing import Dict
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from datasets import Audio, Dataset, load_dataset, load_metric
|
| 9 |
+
|
| 10 |
+
from transformers import AutoFeatureExtractor, pipeline
|
| 11 |
+
import transformers
|
| 12 |
+
|
| 13 |
+
def log_results(result: Dataset, args: Dict[str, str]):
|
| 14 |
+
"""DO NOT CHANGE. This function computes and logs the result metrics."""
|
| 15 |
+
|
| 16 |
+
log_outputs = args.log_outputs
|
| 17 |
+
dataset_id = "_".join(args.dataset.split("/") + [args.config, args.split])
|
| 18 |
+
|
| 19 |
+
# load metric
|
| 20 |
+
wer = load_metric("wer")
|
| 21 |
+
cer = load_metric("cer")
|
| 22 |
+
|
| 23 |
+
# compute metrics
|
| 24 |
+
wer_result = wer.compute(references=result["target"], predictions=result["prediction"])
|
| 25 |
+
cer_result = cer.compute(references=result["target"], predictions=result["prediction"])
|
| 26 |
+
|
| 27 |
+
# print & log results
|
| 28 |
+
result_str = f"WER: {wer_result}\n" f"CER: {cer_result}"
|
| 29 |
+
print(result_str)
|
| 30 |
+
|
| 31 |
+
with open(f"{dataset_id}_eval_results.txt", "w") as f:
|
| 32 |
+
f.write(result_str)
|
| 33 |
+
|
| 34 |
+
# log all results in text file. Possibly interesting for analysis
|
| 35 |
+
if log_outputs is not None:
|
| 36 |
+
pred_file = f"log_{dataset_id}_predictions.txt"
|
| 37 |
+
target_file = f"log_{dataset_id}_targets.txt"
|
| 38 |
+
|
| 39 |
+
with open(pred_file, "w") as p, open(target_file, "w") as t:
|
| 40 |
+
|
| 41 |
+
# mapping function to write output
|
| 42 |
+
def write_to_file(batch, i):
|
| 43 |
+
p.write(f"{i}" + "\n")
|
| 44 |
+
p.write(batch["prediction"] + "\n")
|
| 45 |
+
t.write(f"{i}" + "\n")
|
| 46 |
+
t.write(batch["target"] + "\n")
|
| 47 |
+
|
| 48 |
+
result.map(write_to_file, with_indices=True)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def normalize_text(text: str) -> str:
|
| 52 |
+
"""DO ADAPT FOR YOUR USE CASE. this function normalizes the target text."""
|
| 53 |
+
|
| 54 |
+
chars_to_ignore_regex = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
|
| 55 |
+
|
| 56 |
+
text = re.sub(chars_to_ignore_regex, "", text.lower())
|
| 57 |
+
|
| 58 |
+
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
|
| 59 |
+
# note that order is important here!
|
| 60 |
+
token_sequences_to_ignore = ["\n\n", "\n", " ", " "]
|
| 61 |
+
|
| 62 |
+
for t in token_sequences_to_ignore:
|
| 63 |
+
text = " ".join(text.split(t))
|
| 64 |
+
|
| 65 |
+
return text
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def main(args):
|
| 69 |
+
# load dataset
|
| 70 |
+
dataset = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=True)
|
| 71 |
+
|
| 72 |
+
# for testing: only process the first two examples as a test
|
| 73 |
+
# dataset = dataset.select(range(10))
|
| 74 |
+
|
| 75 |
+
# load processor
|
| 76 |
+
feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_id)
|
| 77 |
+
sampling_rate = feature_extractor.sampling_rate
|
| 78 |
+
|
| 79 |
+
# resample audio
|
| 80 |
+
dataset = dataset.cast_column("audio", Audio(sampling_rate=sampling_rate))
|
| 81 |
+
|
| 82 |
+
# load eval pipeline
|
| 83 |
+
if args.device is None:
|
| 84 |
+
args.device = 0 if torch.cuda.is_available() else -1
|
| 85 |
+
# asr = pipeline("automatic-speech-recognition", model=args.model_id, device=args.device, use_auth=True)
|
| 86 |
+
config = transformers.PretrainedConfig.from_pretrained(args.model_id)
|
| 87 |
+
model = transformers.Wav2Vec2ForCTC.from_pretrained(args.model_id)
|
| 88 |
+
processor = transformers.AutoProcessor.from_pretrained(args.model_id)
|
| 89 |
+
vocab_dict = processor.tokenizer.get_vocab()
|
| 90 |
+
sorted_vocab_dict = {k.lower(): v for k, v in sorted(vocab_dict.items(), key=lambda item: item[1])}
|
| 91 |
+
print(list(sorted_vocab_dict))
|
| 92 |
+
|
| 93 |
+
#with lm
|
| 94 |
+
asr = pipeline("automatic-speech-recognition", config=config, model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, decoder=processor.decoder, device=args.device)
|
| 95 |
+
#without lm
|
| 96 |
+
#asr = pipeline("automatic-speech-recognition", config=config, model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, device=args.device)
|
| 97 |
+
|
| 98 |
+
# map function to decode audio
|
| 99 |
+
def map_to_pred(batch):
|
| 100 |
+
prediction = asr(
|
| 101 |
+
batch["audio"]["array"], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
batch["prediction"] = prediction["text"]
|
| 105 |
+
batch["target"] = normalize_text(batch["sentence"])
|
| 106 |
+
return batch
|
| 107 |
+
|
| 108 |
+
# run inference on all examples
|
| 109 |
+
result = dataset.map(map_to_pred, remove_columns=dataset.column_names)
|
| 110 |
+
|
| 111 |
+
# compute and log_results
|
| 112 |
+
# do not change function below
|
| 113 |
+
log_results(result, args)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
if __name__ == "__main__":
|
| 117 |
+
parser = argparse.ArgumentParser()
|
| 118 |
+
|
| 119 |
+
parser.add_argument(
|
| 120 |
+
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
|
| 121 |
+
)
|
| 122 |
+
parser.add_argument(
|
| 123 |
+
"--dataset",
|
| 124 |
+
type=str,
|
| 125 |
+
required=True,
|
| 126 |
+
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
|
| 127 |
+
)
|
| 128 |
+
parser.add_argument(
|
| 129 |
+
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
|
| 130 |
+
)
|
| 131 |
+
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
|
| 132 |
+
parser.add_argument(
|
| 133 |
+
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
|
| 134 |
+
)
|
| 135 |
+
parser.add_argument(
|
| 136 |
+
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
|
| 137 |
+
)
|
| 138 |
+
parser.add_argument(
|
| 139 |
+
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
|
| 140 |
+
)
|
| 141 |
+
parser.add_argument(
|
| 142 |
+
"--device",
|
| 143 |
+
type=int,
|
| 144 |
+
default=None,
|
| 145 |
+
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
|
| 146 |
+
)
|
| 147 |
+
args = parser.parse_args()
|
| 148 |
+
|
| 149 |
+
main(args)
|
language_model/.ipynb_checkpoints/attrs-checkpoint.json
CHANGED
|
@@ -1,3 +1 @@
|
|
| 1 |
-
|
| 2 |
-
oid sha256:f5ffd02e1ceef6517476e72ebe7997ddef7e92d27cb5a23d6695d64c4317d6ad
|
| 3 |
-
size 78
|
|
|
|
| 1 |
+
{"alpha": 0.5, "beta": 1.5, "unk_score_offset": -10.0, "score_boundary": true}
|
|
|
|
|
|
language_model/attrs.json
CHANGED
|
@@ -1,3 +1 @@
|
|
| 1 |
-
|
| 2 |
-
oid sha256:f5ffd02e1ceef6517476e72ebe7997ddef7e92d27cb5a23d6695d64c4317d6ad
|
| 3 |
-
size 78
|
|
|
|
| 1 |
+
{"alpha": 0.5, "beta": 1.5, "unk_score_offset": -10.0, "score_boundary": true}
|
|
|
|
|
|
log_openslr_SLR53_train[95%:]_predictions.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
log_openslr_SLR53_train[95%:]_targets.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
openslr_SLR53_train[95%:]_eval_results.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
WER: 0.21726385291857586
|
| 2 |
+
CER: 0.04725010353701041
|
run.sh
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
python run_speech_recognition_ctc.py --dataset_name="openslr" --model_name_or_path="facebook/wav2vec2-xls-r-300m" --dataset_config_name="SLR53" --output_dir="./wav2vec2-xls-r-300m-bengali" --overwrite_output_dir --num_train_epochs="50" --per_device_train_batch_size="32" --per_device_eval_batch_size="32" --gradient_accumulation_steps="1" --learning_rate="7.5e-5" --warmup_steps="2000" --length_column_name="input_length" --evaluation_strategy="steps" --text_column_name="sentence" --chars_to_ignore , ? . ! \- \; \: \" “ % ‘ ” � — ’ … – --save_steps="2000" --eval_steps="3000" --logging_steps="100" --layerdrop="0.0" --activation_dropout="0.1" --save_total_limit="3" --freeze_feature_encoder --feat_proj_dropout="0.0" --mask_time_prob="0.75" --mask_time_length="10" --mask_feature_prob="0.25" --mask_feature_length="64" --preprocessing_num_workers 32 --gradient_checkpointing --use_auth_token --fp16 --group_by_length --do_train --do_eval --report_to wandb --min_duration_in_seconds 0.5 --push_to_hub
|
run_speech_recognition_ctc.py
ADDED
|
@@ -0,0 +1,760 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# coding=utf-8
|
| 3 |
+
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
|
| 16 |
+
""" Fine-tuning a 🤗 Transformers CTC model for automatic speech recognition"""
|
| 17 |
+
|
| 18 |
+
import functools
|
| 19 |
+
import json
|
| 20 |
+
import logging
|
| 21 |
+
from optparse import Option
|
| 22 |
+
import os
|
| 23 |
+
import re
|
| 24 |
+
import sys
|
| 25 |
+
import warnings
|
| 26 |
+
from dataclasses import dataclass, field
|
| 27 |
+
from typing import Dict, List, Optional, Union
|
| 28 |
+
|
| 29 |
+
import datasets
|
| 30 |
+
import numpy as np
|
| 31 |
+
import torch
|
| 32 |
+
from datasets import DatasetDict, load_dataset, load_metric
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
import transformers
|
| 36 |
+
from transformers import (
|
| 37 |
+
AutoConfig,
|
| 38 |
+
AutoFeatureExtractor,
|
| 39 |
+
AutoModelForCTC,
|
| 40 |
+
AutoProcessor,
|
| 41 |
+
AutoTokenizer,
|
| 42 |
+
HfArgumentParser,
|
| 43 |
+
Trainer,
|
| 44 |
+
TrainingArguments,
|
| 45 |
+
Wav2Vec2Processor,
|
| 46 |
+
set_seed,
|
| 47 |
+
)
|
| 48 |
+
from transformers.trainer_pt_utils import get_parameter_names
|
| 49 |
+
from transformers.trainer_utils import get_last_checkpoint, is_main_process
|
| 50 |
+
from transformers.utils import check_min_version
|
| 51 |
+
from transformers.utils.versions import require_version
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
import wandb
|
| 55 |
+
from transformers import TrainingArguments, Trainer
|
| 56 |
+
|
| 57 |
+
wandb.init(project="wav2vec2_bn_xlsr", entity="arijitx")
|
| 58 |
+
|
| 59 |
+
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
| 60 |
+
check_min_version("4.16.0.dev0")
|
| 61 |
+
|
| 62 |
+
require_version("datasets>=1.13.3", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
logger = logging.getLogger(__name__)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def list_field(default=None, metadata=None):
|
| 69 |
+
return field(default_factory=lambda: default, metadata=metadata)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
@dataclass
|
| 73 |
+
class ModelArguments:
|
| 74 |
+
"""
|
| 75 |
+
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
model_name_or_path: str = field(
|
| 79 |
+
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
|
| 80 |
+
)
|
| 81 |
+
tokenizer_name_or_path: Optional[str] = field(
|
| 82 |
+
default=None,
|
| 83 |
+
metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"},
|
| 84 |
+
)
|
| 85 |
+
cache_dir: Optional[str] = field(
|
| 86 |
+
default=None,
|
| 87 |
+
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
|
| 88 |
+
)
|
| 89 |
+
freeze_feature_encoder: bool = field(
|
| 90 |
+
default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
|
| 91 |
+
)
|
| 92 |
+
attention_dropout: float = field(
|
| 93 |
+
default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."}
|
| 94 |
+
)
|
| 95 |
+
activation_dropout: float = field(
|
| 96 |
+
default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."}
|
| 97 |
+
)
|
| 98 |
+
feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."})
|
| 99 |
+
hidden_dropout: float = field(
|
| 100 |
+
default=0.0,
|
| 101 |
+
metadata={
|
| 102 |
+
"help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler."
|
| 103 |
+
},
|
| 104 |
+
)
|
| 105 |
+
final_dropout: float = field(
|
| 106 |
+
default=0.0,
|
| 107 |
+
metadata={"help": "The dropout probability for the final projection layer."},
|
| 108 |
+
)
|
| 109 |
+
mask_time_prob: float = field(
|
| 110 |
+
default=0.05,
|
| 111 |
+
metadata={
|
| 112 |
+
"help": "Probability of each feature vector along the time axis to be chosen as the start of the vector"
|
| 113 |
+
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
|
| 114 |
+
"vectors will be masked along the time axis."
|
| 115 |
+
},
|
| 116 |
+
)
|
| 117 |
+
mask_time_length: int = field(
|
| 118 |
+
default=10,
|
| 119 |
+
metadata={"help": "Length of vector span to mask along the time axis."},
|
| 120 |
+
)
|
| 121 |
+
mask_feature_prob: float = field(
|
| 122 |
+
default=0.0,
|
| 123 |
+
metadata={
|
| 124 |
+
"help": "Probability of each feature vector along the feature axis to be chosen as the start of the vector"
|
| 125 |
+
"span to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature bins will be masked along the time axis."
|
| 126 |
+
},
|
| 127 |
+
)
|
| 128 |
+
mask_feature_length: int = field(
|
| 129 |
+
default=10,
|
| 130 |
+
metadata={"help": "Length of vector span to mask along the feature axis."},
|
| 131 |
+
)
|
| 132 |
+
layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."})
|
| 133 |
+
ctc_loss_reduction: Optional[str] = field(
|
| 134 |
+
default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."}
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
@dataclass
|
| 139 |
+
class DataTrainingArguments:
|
| 140 |
+
"""
|
| 141 |
+
Arguments pertaining to what data we are going to input our model for training and eval.
|
| 142 |
+
|
| 143 |
+
Using `HfArgumentParser` we can turn this class
|
| 144 |
+
into argparse arguments to be able to specify them on
|
| 145 |
+
the command line.
|
| 146 |
+
"""
|
| 147 |
+
|
| 148 |
+
dataset_name: str = field(
|
| 149 |
+
metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
|
| 150 |
+
)
|
| 151 |
+
dataset_config_name: str = field(
|
| 152 |
+
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
|
| 153 |
+
)
|
| 154 |
+
train_split_name: str = field(
|
| 155 |
+
default="train+validation",
|
| 156 |
+
metadata={
|
| 157 |
+
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
|
| 158 |
+
},
|
| 159 |
+
)
|
| 160 |
+
eval_split_name: str = field(
|
| 161 |
+
default="test",
|
| 162 |
+
metadata={
|
| 163 |
+
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
|
| 164 |
+
},
|
| 165 |
+
)
|
| 166 |
+
audio_column_name: str = field(
|
| 167 |
+
default="audio",
|
| 168 |
+
metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
|
| 169 |
+
)
|
| 170 |
+
text_column_name: str = field(
|
| 171 |
+
default="text",
|
| 172 |
+
metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
|
| 173 |
+
)
|
| 174 |
+
overwrite_cache: bool = field(
|
| 175 |
+
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
|
| 176 |
+
)
|
| 177 |
+
preprocessing_num_workers: Optional[int] = field(
|
| 178 |
+
default=None,
|
| 179 |
+
metadata={"help": "The number of processes to use for the preprocessing."},
|
| 180 |
+
)
|
| 181 |
+
max_train_samples: Optional[int] = field(
|
| 182 |
+
default=None,
|
| 183 |
+
metadata={
|
| 184 |
+
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
|
| 185 |
+
"value if set."
|
| 186 |
+
},
|
| 187 |
+
)
|
| 188 |
+
max_eval_samples: Optional[int] = field(
|
| 189 |
+
default=None,
|
| 190 |
+
metadata={
|
| 191 |
+
"help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
|
| 192 |
+
"value if set."
|
| 193 |
+
},
|
| 194 |
+
)
|
| 195 |
+
chars_to_ignore: Optional[List[str]] = list_field(
|
| 196 |
+
default=None,
|
| 197 |
+
metadata={"help": "A list of characters to remove from the transcripts."},
|
| 198 |
+
)
|
| 199 |
+
eval_metrics: List[str] = list_field(
|
| 200 |
+
default=["wer"],
|
| 201 |
+
metadata={"help": "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"},
|
| 202 |
+
)
|
| 203 |
+
max_duration_in_seconds: float = field(
|
| 204 |
+
default=20.0,
|
| 205 |
+
metadata={
|
| 206 |
+
"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"
|
| 207 |
+
},
|
| 208 |
+
)
|
| 209 |
+
min_duration_in_seconds: float = field(
|
| 210 |
+
default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
|
| 211 |
+
)
|
| 212 |
+
preprocessing_only: bool = field(
|
| 213 |
+
default=False,
|
| 214 |
+
metadata={
|
| 215 |
+
"help": "Whether to only do data preprocessing and skip training. "
|
| 216 |
+
"This is especially useful when data preprocessing errors out in distributed training due to timeout. "
|
| 217 |
+
"In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` "
|
| 218 |
+
"so that the cached datasets can consequently be loaded in distributed training"
|
| 219 |
+
},
|
| 220 |
+
)
|
| 221 |
+
use_auth_token: bool = field(
|
| 222 |
+
default=False,
|
| 223 |
+
metadata={
|
| 224 |
+
"help": "If :obj:`True`, will use the token generated when running"
|
| 225 |
+
":obj:`transformers-cli login` as HTTP bearer authorization for remote files."
|
| 226 |
+
},
|
| 227 |
+
)
|
| 228 |
+
unk_token: str = field(
|
| 229 |
+
default="[UNK]",
|
| 230 |
+
metadata={"help": "The unk token for the tokenizer"},
|
| 231 |
+
)
|
| 232 |
+
pad_token: str = field(
|
| 233 |
+
default="[PAD]",
|
| 234 |
+
metadata={"help": "The padding token for the tokenizer"},
|
| 235 |
+
)
|
| 236 |
+
word_delimiter_token: str = field(
|
| 237 |
+
default="|",
|
| 238 |
+
metadata={"help": "The word delimiter token for the tokenizer"},
|
| 239 |
+
)
|
| 240 |
+
phoneme_language: Optional[str] = field(
|
| 241 |
+
default=None,
|
| 242 |
+
metadata={
|
| 243 |
+
"help": "The target language that should be used be"
|
| 244 |
+
" passed to the tokenizer for tokenization. Note that"
|
| 245 |
+
" this is only relevant if the model classifies the"
|
| 246 |
+
" input audio to a sequence of phoneme sequences."
|
| 247 |
+
},
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
@dataclass
|
| 251 |
+
class DataCollatorCTCWithPadding:
|
| 252 |
+
"""
|
| 253 |
+
Data collator that will dynamically pad the inputs received.
|
| 254 |
+
Args:
|
| 255 |
+
processor (:class:`~transformers.AutoProcessor`)
|
| 256 |
+
The processor used for proccessing the data.
|
| 257 |
+
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
|
| 258 |
+
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
|
| 259 |
+
among:
|
| 260 |
+
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
|
| 261 |
+
sequence if provided).
|
| 262 |
+
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
|
| 263 |
+
maximum acceptable input length for the model if that argument is not provided.
|
| 264 |
+
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
|
| 265 |
+
different lengths).
|
| 266 |
+
max_length (:obj:`int`, `optional`):
|
| 267 |
+
Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
|
| 268 |
+
max_length_labels (:obj:`int`, `optional`):
|
| 269 |
+
Maximum length of the ``labels`` returned list and optionally padding length (see above).
|
| 270 |
+
pad_to_multiple_of (:obj:`int`, `optional`):
|
| 271 |
+
If set will pad the sequence to a multiple of the provided value.
|
| 272 |
+
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
|
| 273 |
+
7.5 (Volta).
|
| 274 |
+
"""
|
| 275 |
+
|
| 276 |
+
processor: AutoProcessor
|
| 277 |
+
padding: Union[bool, str] = "longest"
|
| 278 |
+
pad_to_multiple_of: Optional[int] = None
|
| 279 |
+
pad_to_multiple_of_labels: Optional[int] = None
|
| 280 |
+
|
| 281 |
+
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
|
| 282 |
+
# split inputs and labels since they have to be of different lenghts and need
|
| 283 |
+
# different padding methods
|
| 284 |
+
input_features = [{"input_values": feature["input_values"]} for feature in features]
|
| 285 |
+
label_features = [{"input_ids": feature["labels"]} for feature in features]
|
| 286 |
+
|
| 287 |
+
batch = self.processor.pad(
|
| 288 |
+
input_features,
|
| 289 |
+
padding=self.padding,
|
| 290 |
+
pad_to_multiple_of=self.pad_to_multiple_of,
|
| 291 |
+
return_tensors="pt",
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
with self.processor.as_target_processor():
|
| 295 |
+
labels_batch = self.processor.pad(
|
| 296 |
+
label_features,
|
| 297 |
+
padding=self.padding,
|
| 298 |
+
pad_to_multiple_of=self.pad_to_multiple_of_labels,
|
| 299 |
+
return_tensors="pt",
|
| 300 |
+
)
|
| 301 |
+
|
| 302 |
+
# replace padding with -100 to ignore loss correctly
|
| 303 |
+
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
|
| 304 |
+
|
| 305 |
+
batch["labels"] = labels
|
| 306 |
+
|
| 307 |
+
return batch
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
def create_vocabulary_from_data(
|
| 311 |
+
datasets: DatasetDict,
|
| 312 |
+
word_delimiter_token: Optional[str] = None,
|
| 313 |
+
unk_token: Optional[str] = None,
|
| 314 |
+
pad_token: Optional[str] = None,
|
| 315 |
+
):
|
| 316 |
+
# Given training and test labels create vocabulary
|
| 317 |
+
def extract_all_chars(batch):
|
| 318 |
+
all_text = " ".join(batch["target_text"])
|
| 319 |
+
vocab = list(set(all_text))
|
| 320 |
+
return {"vocab": [vocab], "all_text": [all_text]}
|
| 321 |
+
|
| 322 |
+
vocabs = datasets.map(
|
| 323 |
+
extract_all_chars,
|
| 324 |
+
batched=True,
|
| 325 |
+
batch_size=-1,
|
| 326 |
+
keep_in_memory=True,
|
| 327 |
+
remove_columns=datasets["train"].column_names,
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
# take union of all unique characters in each dataset
|
| 331 |
+
vocab_set = functools.reduce(
|
| 332 |
+
lambda vocab_1, vocab_2: set(vocab_1["vocab"][0]) | set(vocab_2["vocab"][0]), vocabs.values()
|
| 333 |
+
)
|
| 334 |
+
|
| 335 |
+
vocab_dict = {v: k for k, v in enumerate(sorted(list(vocab_set)))}
|
| 336 |
+
|
| 337 |
+
# replace white space with delimiter token
|
| 338 |
+
if word_delimiter_token is not None:
|
| 339 |
+
vocab_dict[word_delimiter_token] = vocab_dict[" "]
|
| 340 |
+
del vocab_dict[" "]
|
| 341 |
+
|
| 342 |
+
# add unk and pad token
|
| 343 |
+
if unk_token is not None:
|
| 344 |
+
vocab_dict[unk_token] = len(vocab_dict)
|
| 345 |
+
|
| 346 |
+
if pad_token is not None:
|
| 347 |
+
vocab_dict[pad_token] = len(vocab_dict)
|
| 348 |
+
|
| 349 |
+
return vocab_dict
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
def main():
|
| 353 |
+
# See all possible arguments in src/transformers/training_args.py
|
| 354 |
+
# or by passing the --help flag to this script.
|
| 355 |
+
# We now keep distinct sets of args, for a cleaner separation of concerns.
|
| 356 |
+
|
| 357 |
+
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
|
| 358 |
+
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
|
| 359 |
+
# If we pass only one argument to the script and it's the path to a json file,
|
| 360 |
+
# let's parse it to get our arguments.
|
| 361 |
+
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
|
| 362 |
+
else:
|
| 363 |
+
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
| 364 |
+
|
| 365 |
+
# Detecting last checkpoint.
|
| 366 |
+
last_checkpoint = None
|
| 367 |
+
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
|
| 368 |
+
last_checkpoint = get_last_checkpoint(training_args.output_dir)
|
| 369 |
+
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
|
| 370 |
+
raise ValueError(
|
| 371 |
+
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
|
| 372 |
+
"Use --overwrite_output_dir to overcome."
|
| 373 |
+
)
|
| 374 |
+
elif last_checkpoint is not None:
|
| 375 |
+
logger.info(
|
| 376 |
+
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
|
| 377 |
+
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
+
# Setup logging
|
| 381 |
+
logging.basicConfig(
|
| 382 |
+
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
| 383 |
+
datefmt="%m/%d/%Y %H:%M:%S",
|
| 384 |
+
handlers=[logging.StreamHandler(sys.stdout)],
|
| 385 |
+
)
|
| 386 |
+
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
|
| 387 |
+
|
| 388 |
+
# Log on each process the small summary:
|
| 389 |
+
logger.warning(
|
| 390 |
+
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
|
| 391 |
+
f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
|
| 392 |
+
)
|
| 393 |
+
# Set the verbosity to info of the Transformers logger (on main process only):
|
| 394 |
+
if is_main_process(training_args.local_rank):
|
| 395 |
+
transformers.utils.logging.set_verbosity_info()
|
| 396 |
+
logger.info("Training/evaluation parameters %s", training_args)
|
| 397 |
+
|
| 398 |
+
# Set seed before initializing model.
|
| 399 |
+
set_seed(training_args.seed)
|
| 400 |
+
|
| 401 |
+
# 1. First, let's load the dataset
|
| 402 |
+
raw_datasets = DatasetDict()
|
| 403 |
+
|
| 404 |
+
if training_args.do_train:
|
| 405 |
+
raw_datasets["train"] = load_dataset(
|
| 406 |
+
data_args.dataset_name,
|
| 407 |
+
data_args.dataset_config_name,
|
| 408 |
+
split='train[:95%]',
|
| 409 |
+
use_auth_token=data_args.use_auth_token,
|
| 410 |
+
|
| 411 |
+
)
|
| 412 |
+
|
| 413 |
+
if data_args.audio_column_name not in raw_datasets["train"].column_names:
|
| 414 |
+
raise ValueError(
|
| 415 |
+
f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
|
| 416 |
+
"Make sure to set `--audio_column_name` to the correct audio column - one of "
|
| 417 |
+
f"{', '.join(raw_datasets['train'].column_names)}."
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
if data_args.text_column_name not in raw_datasets["train"].column_names:
|
| 421 |
+
raise ValueError(
|
| 422 |
+
f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
|
| 423 |
+
"Make sure to set `--text_column_name` to the correct text column - one of "
|
| 424 |
+
f"{', '.join(raw_datasets['train'].column_names)}."
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
if data_args.max_train_samples is not None:
|
| 428 |
+
raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
|
| 429 |
+
|
| 430 |
+
if training_args.do_eval:
|
| 431 |
+
raw_datasets["eval"] = load_dataset(
|
| 432 |
+
data_args.dataset_name,
|
| 433 |
+
data_args.dataset_config_name,
|
| 434 |
+
split='train[95%:]',
|
| 435 |
+
use_auth_token=data_args.use_auth_token,
|
| 436 |
+
)
|
| 437 |
+
|
| 438 |
+
if data_args.max_eval_samples is not None:
|
| 439 |
+
raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
|
| 440 |
+
|
| 441 |
+
# 2. We remove some special characters from the datasets
|
| 442 |
+
# that make training complicated and do not help in transcribing the speech
|
| 443 |
+
# E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
|
| 444 |
+
# that could be easily picked up by the model
|
| 445 |
+
chars_to_ignore_regex = (
|
| 446 |
+
f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None
|
| 447 |
+
)
|
| 448 |
+
text_column_name = data_args.text_column_name
|
| 449 |
+
|
| 450 |
+
def remove_special_characters(batch):
|
| 451 |
+
if chars_to_ignore_regex is not None:
|
| 452 |
+
batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[text_column_name]).lower() + " "
|
| 453 |
+
else:
|
| 454 |
+
batch["target_text"] = batch[text_column_name].lower() + " "
|
| 455 |
+
return batch
|
| 456 |
+
|
| 457 |
+
with training_args.main_process_first(desc="dataset map special characters removal"):
|
| 458 |
+
raw_datasets = raw_datasets.map(
|
| 459 |
+
remove_special_characters,
|
| 460 |
+
remove_columns=[text_column_name],
|
| 461 |
+
desc="remove special characters from datasets",
|
| 462 |
+
)
|
| 463 |
+
|
| 464 |
+
# save special tokens for tokenizer
|
| 465 |
+
word_delimiter_token = data_args.word_delimiter_token
|
| 466 |
+
unk_token = data_args.unk_token
|
| 467 |
+
pad_token = data_args.pad_token
|
| 468 |
+
|
| 469 |
+
# 3. Next, let's load the config as we might need it to create
|
| 470 |
+
# the tokenizer
|
| 471 |
+
# load config
|
| 472 |
+
config = AutoConfig.from_pretrained(
|
| 473 |
+
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
|
| 474 |
+
)
|
| 475 |
+
|
| 476 |
+
# 4. Next, if no tokenizer file is defined,
|
| 477 |
+
# we create the vocabulary of the model by extracting all unique characters from
|
| 478 |
+
# the training and evaluation datasets
|
| 479 |
+
# We need to make sure that only first rank saves vocabulary
|
| 480 |
+
# make sure all processes wait until vocab is created
|
| 481 |
+
tokenizer_name_or_path = model_args.tokenizer_name_or_path
|
| 482 |
+
tokenizer_kwargs = {}
|
| 483 |
+
if tokenizer_name_or_path is None:
|
| 484 |
+
# save vocab in training output dir
|
| 485 |
+
tokenizer_name_or_path = training_args.output_dir
|
| 486 |
+
|
| 487 |
+
vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json")
|
| 488 |
+
|
| 489 |
+
with training_args.main_process_first():
|
| 490 |
+
if training_args.overwrite_output_dir and os.path.isfile(vocab_file):
|
| 491 |
+
os.remove(vocab_file)
|
| 492 |
+
|
| 493 |
+
with training_args.main_process_first(desc="dataset map vocabulary creation"):
|
| 494 |
+
if not os.path.isfile(vocab_file):
|
| 495 |
+
os.makedirs(tokenizer_name_or_path, exist_ok=True)
|
| 496 |
+
vocab_dict = create_vocabulary_from_data(
|
| 497 |
+
raw_datasets,
|
| 498 |
+
word_delimiter_token=word_delimiter_token,
|
| 499 |
+
unk_token=unk_token,
|
| 500 |
+
pad_token=pad_token,
|
| 501 |
+
)
|
| 502 |
+
|
| 503 |
+
# save vocab dict to be loaded into tokenizer
|
| 504 |
+
with open(vocab_file, "w") as file:
|
| 505 |
+
json.dump(vocab_dict, file)
|
| 506 |
+
|
| 507 |
+
# if tokenizer has just been created
|
| 508 |
+
# it is defined by `tokenizer_class` if present in config else by `model_type`
|
| 509 |
+
tokenizer_kwargs = {
|
| 510 |
+
"config": config if config.tokenizer_class is not None else None,
|
| 511 |
+
"tokenizer_type": config.model_type if config.tokenizer_class is None else None,
|
| 512 |
+
"unk_token": unk_token,
|
| 513 |
+
"pad_token": pad_token,
|
| 514 |
+
"word_delimiter_token": word_delimiter_token,
|
| 515 |
+
}
|
| 516 |
+
|
| 517 |
+
# 5. Now we can instantiate the feature extractor, tokenizer and model
|
| 518 |
+
# Note for distributed training, the .from_pretrained methods guarantee that only
|
| 519 |
+
# one local process can concurrently download model & vocab.
|
| 520 |
+
|
| 521 |
+
# load feature_extractor and tokenizer
|
| 522 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
| 523 |
+
tokenizer_name_or_path,
|
| 524 |
+
use_auth_token=data_args.use_auth_token,
|
| 525 |
+
**tokenizer_kwargs,
|
| 526 |
+
)
|
| 527 |
+
feature_extractor = AutoFeatureExtractor.from_pretrained(
|
| 528 |
+
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
|
| 529 |
+
)
|
| 530 |
+
|
| 531 |
+
# adapt config
|
| 532 |
+
config.update(
|
| 533 |
+
{
|
| 534 |
+
"feat_proj_dropout": model_args.feat_proj_dropout,
|
| 535 |
+
"attention_dropout": model_args.attention_dropout,
|
| 536 |
+
"hidden_dropout": model_args.hidden_dropout,
|
| 537 |
+
"final_dropout": model_args.final_dropout,
|
| 538 |
+
"mask_time_prob": model_args.mask_time_prob,
|
| 539 |
+
"mask_time_length": model_args.mask_time_length,
|
| 540 |
+
"mask_feature_prob": model_args.mask_feature_prob,
|
| 541 |
+
"mask_feature_length": model_args.mask_feature_length,
|
| 542 |
+
"gradient_checkpointing": training_args.gradient_checkpointing,
|
| 543 |
+
"layerdrop": model_args.layerdrop,
|
| 544 |
+
"ctc_loss_reduction": model_args.ctc_loss_reduction,
|
| 545 |
+
"pad_token_id": tokenizer.pad_token_id,
|
| 546 |
+
"vocab_size": len(tokenizer),
|
| 547 |
+
"activation_dropout": model_args.activation_dropout,
|
| 548 |
+
}
|
| 549 |
+
)
|
| 550 |
+
|
| 551 |
+
# create model
|
| 552 |
+
model = AutoModelForCTC.from_pretrained(
|
| 553 |
+
model_args.model_name_or_path,
|
| 554 |
+
cache_dir=model_args.cache_dir,
|
| 555 |
+
config=config,
|
| 556 |
+
use_auth_token=data_args.use_auth_token,
|
| 557 |
+
)
|
| 558 |
+
|
| 559 |
+
# freeze encoder
|
| 560 |
+
if model_args.freeze_feature_encoder:
|
| 561 |
+
model.freeze_feature_encoder()
|
| 562 |
+
|
| 563 |
+
# 6. Now we preprocess the datasets including loading the audio, resampling and normalization
|
| 564 |
+
# Thankfully, `datasets` takes care of automatically loading and resampling the audio,
|
| 565 |
+
# so that we just need to set the correct target sampling rate and normalize the input
|
| 566 |
+
# via the `feature_extractor`
|
| 567 |
+
|
| 568 |
+
# make sure that dataset decodes audio with correct sampling rate
|
| 569 |
+
dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
|
| 570 |
+
if dataset_sampling_rate != feature_extractor.sampling_rate:
|
| 571 |
+
raw_datasets = raw_datasets.cast_column(
|
| 572 |
+
data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
|
| 573 |
+
)
|
| 574 |
+
|
| 575 |
+
# derive max & min input length for sample rate & max duration
|
| 576 |
+
max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
|
| 577 |
+
min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
|
| 578 |
+
audio_column_name = data_args.audio_column_name
|
| 579 |
+
num_workers = data_args.preprocessing_num_workers
|
| 580 |
+
|
| 581 |
+
# `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification
|
| 582 |
+
phoneme_language = data_args.phoneme_language
|
| 583 |
+
|
| 584 |
+
# Preprocessing the datasets.
|
| 585 |
+
# We need to read the audio files as arrays and tokenize the targets.
|
| 586 |
+
def prepare_dataset(batch):
|
| 587 |
+
# load audio
|
| 588 |
+
sample = batch[audio_column_name]
|
| 589 |
+
|
| 590 |
+
inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
|
| 591 |
+
batch["input_values"] = inputs.input_values[0]
|
| 592 |
+
batch["input_length"] = len(batch["input_values"])
|
| 593 |
+
|
| 594 |
+
# encode targets
|
| 595 |
+
additional_kwargs = {}
|
| 596 |
+
if phoneme_language is not None:
|
| 597 |
+
additional_kwargs["phonemizer_lang"] = phoneme_language
|
| 598 |
+
|
| 599 |
+
batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids
|
| 600 |
+
return batch
|
| 601 |
+
|
| 602 |
+
with training_args.main_process_first(desc="dataset map preprocessing"):
|
| 603 |
+
vectorized_datasets = raw_datasets.map(
|
| 604 |
+
prepare_dataset,
|
| 605 |
+
remove_columns=next(iter(raw_datasets.values())).column_names,
|
| 606 |
+
num_proc=num_workers,
|
| 607 |
+
desc="preprocess datasets",
|
| 608 |
+
)
|
| 609 |
+
|
| 610 |
+
def is_audio_in_length_range(length):
|
| 611 |
+
return length > min_input_length and length < max_input_length
|
| 612 |
+
|
| 613 |
+
# filter data that is shorter than min_input_length
|
| 614 |
+
vectorized_datasets = vectorized_datasets.filter(
|
| 615 |
+
is_audio_in_length_range,
|
| 616 |
+
num_proc=num_workers,
|
| 617 |
+
input_columns=["input_length"],
|
| 618 |
+
)
|
| 619 |
+
|
| 620 |
+
# 7. Next, we can prepare the training.
|
| 621 |
+
# Let's use word error rate (WER) as our evaluation metric,
|
| 622 |
+
# instantiate a data collator and the trainer
|
| 623 |
+
|
| 624 |
+
# Define evaluation metrics during training, *i.e.* word error rate, character error rate
|
| 625 |
+
eval_metrics = {metric: load_metric(metric) for metric in data_args.eval_metrics}
|
| 626 |
+
|
| 627 |
+
# for large datasets it is advised to run the preprocessing on a
|
| 628 |
+
# single machine first with ``args.preprocessing_only`` since there will mostly likely
|
| 629 |
+
# be a timeout when running the script in distributed mode.
|
| 630 |
+
# In a second step ``args.preprocessing_only`` can then be set to `False` to load the
|
| 631 |
+
# cached dataset
|
| 632 |
+
if data_args.preprocessing_only:
|
| 633 |
+
logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}")
|
| 634 |
+
return
|
| 635 |
+
|
| 636 |
+
def compute_metrics(pred):
|
| 637 |
+
pred_logits = pred.predictions
|
| 638 |
+
pred_ids = np.argmax(pred_logits, axis=-1)
|
| 639 |
+
|
| 640 |
+
pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
|
| 641 |
+
|
| 642 |
+
pred_str = tokenizer.batch_decode(pred_ids)
|
| 643 |
+
# we do not want to group tokens when computing the metrics
|
| 644 |
+
label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False)
|
| 645 |
+
|
| 646 |
+
metrics = {k: v.compute(predictions=pred_str, references=label_str) for k, v in eval_metrics.items()}
|
| 647 |
+
|
| 648 |
+
return metrics
|
| 649 |
+
|
| 650 |
+
# Now save everything to be able to create a single processor later
|
| 651 |
+
if is_main_process(training_args.local_rank):
|
| 652 |
+
# save feature extractor, tokenizer and config
|
| 653 |
+
feature_extractor.save_pretrained(training_args.output_dir)
|
| 654 |
+
tokenizer.save_pretrained(training_args.output_dir)
|
| 655 |
+
config.save_pretrained(training_args.output_dir)
|
| 656 |
+
|
| 657 |
+
try:
|
| 658 |
+
processor = AutoProcessor.from_pretrained(training_args.output_dir)
|
| 659 |
+
except (OSError, KeyError):
|
| 660 |
+
warnings.warn(
|
| 661 |
+
"Loading a processor from a feature extractor config that does not"
|
| 662 |
+
" include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following "
|
| 663 |
+
" attribute to your `preprocessor_config.json` file to suppress this warning: "
|
| 664 |
+
" `'processor_class': 'Wav2Vec2Processor'`",
|
| 665 |
+
FutureWarning,
|
| 666 |
+
)
|
| 667 |
+
processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir)
|
| 668 |
+
|
| 669 |
+
# Instantiate custom data collator
|
| 670 |
+
data_collator = DataCollatorCTCWithPadding(processor=processor)
|
| 671 |
+
|
| 672 |
+
decay_parameters = get_parameter_names(model, [torch.nn.LayerNorm])
|
| 673 |
+
decay_parameters = [name for name in decay_parameters if "bias" not in name]
|
| 674 |
+
optimizer_grouped_parameters = [
|
| 675 |
+
{
|
| 676 |
+
"params": [p for n, p in model.named_parameters() if n in decay_parameters],
|
| 677 |
+
"weight_decay": training_args.weight_decay,
|
| 678 |
+
},
|
| 679 |
+
{
|
| 680 |
+
"params": [p for n, p in model.named_parameters() if n not in decay_parameters],
|
| 681 |
+
"weight_decay": 0.0,
|
| 682 |
+
},
|
| 683 |
+
]
|
| 684 |
+
|
| 685 |
+
|
| 686 |
+
|
| 687 |
+
# Initialize Trainer
|
| 688 |
+
trainer = Trainer(
|
| 689 |
+
model=model,
|
| 690 |
+
data_collator=data_collator,
|
| 691 |
+
args=training_args,
|
| 692 |
+
compute_metrics=compute_metrics,
|
| 693 |
+
train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
|
| 694 |
+
eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
|
| 695 |
+
tokenizer=feature_extractor
|
| 696 |
+
)
|
| 697 |
+
|
| 698 |
+
# 8. Finally, we can start training
|
| 699 |
+
|
| 700 |
+
# Training
|
| 701 |
+
if training_args.do_train:
|
| 702 |
+
|
| 703 |
+
# use last checkpoint if exist
|
| 704 |
+
if last_checkpoint is not None:
|
| 705 |
+
checkpoint = last_checkpoint
|
| 706 |
+
elif os.path.isdir(model_args.model_name_or_path):
|
| 707 |
+
checkpoint = model_args.model_name_or_path
|
| 708 |
+
else:
|
| 709 |
+
checkpoint = None
|
| 710 |
+
|
| 711 |
+
train_result = trainer.train(resume_from_checkpoint=checkpoint)
|
| 712 |
+
trainer.save_model()
|
| 713 |
+
|
| 714 |
+
metrics = train_result.metrics
|
| 715 |
+
max_train_samples = (
|
| 716 |
+
data_args.max_train_samples
|
| 717 |
+
if data_args.max_train_samples is not None
|
| 718 |
+
else len(vectorized_datasets["train"])
|
| 719 |
+
)
|
| 720 |
+
metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
|
| 721 |
+
|
| 722 |
+
trainer.log_metrics("train", metrics)
|
| 723 |
+
trainer.save_metrics("train", metrics)
|
| 724 |
+
trainer.save_state()
|
| 725 |
+
|
| 726 |
+
# Evaluation
|
| 727 |
+
results = {}
|
| 728 |
+
if training_args.do_eval:
|
| 729 |
+
logger.info("*** Evaluate ***")
|
| 730 |
+
metrics = trainer.evaluate()
|
| 731 |
+
max_eval_samples = (
|
| 732 |
+
data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"])
|
| 733 |
+
)
|
| 734 |
+
metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"]))
|
| 735 |
+
|
| 736 |
+
trainer.log_metrics("eval", metrics)
|
| 737 |
+
trainer.save_metrics("eval", metrics)
|
| 738 |
+
|
| 739 |
+
# Write model card and (optionally) push to hub
|
| 740 |
+
config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na"
|
| 741 |
+
kwargs = {
|
| 742 |
+
"finetuned_from": model_args.model_name_or_path,
|
| 743 |
+
"tasks": "speech-recognition",
|
| 744 |
+
"tags": ["automatic-speech-recognition", data_args.dataset_name,"robust-speech-event","bn"],
|
| 745 |
+
"dataset_args": f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split: {data_args.eval_split_name}",
|
| 746 |
+
"dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}",
|
| 747 |
+
}
|
| 748 |
+
if "common_voice" in data_args.dataset_name:
|
| 749 |
+
kwargs["language"] = config_name
|
| 750 |
+
|
| 751 |
+
if training_args.push_to_hub:
|
| 752 |
+
trainer.push_to_hub(**kwargs)
|
| 753 |
+
else:
|
| 754 |
+
trainer.create_model_card(**kwargs)
|
| 755 |
+
|
| 756 |
+
return results
|
| 757 |
+
|
| 758 |
+
|
| 759 |
+
if __name__ == "__main__":
|
| 760 |
+
main()
|