|
import os |
|
|
|
from datasets import load_dataset, Audio, concatenate_datasets |
|
|
|
|
|
|
|
num_proc = os.cpu_count()//2 |
|
num_dataloaders = os.cpu_count()//2 |
|
|
|
print(f"Cpu count: {os.cpu_count()}\nNum proc: {num_proc}\nNum dataloaders: {num_dataloaders}") |
|
|
|
|
|
|
|
train = load_dataset() |
|
dev = load_dataset() |
|
test = load_dataset() |
|
|
|
import unicodedata |
|
import re |
|
|
|
def preprocess_text(batch): |
|
|
|
batch['sentence'] = batch['sentence'].lower() |
|
|
|
|
|
batch['sentence'] = unicodedata.normalize('NFKC', batch['sentence']) |
|
batch['sentence'] = re.sub(r'[\’\ʻ\ʼ\ʽ\‘]', "'", batch['sentence']) |
|
|
|
|
|
batch['sentence'] = re.sub(r'[^\w\s\']', '', batch['sentence']) |
|
batch['sentence'] = re.sub(r'_', ' ', batch['sentence']) |
|
|
|
|
|
batch['sentence'] = ' '.join(batch['sentence'].split()) |
|
|
|
return batch |
|
|
|
import librosa |
|
import numpy as np |
|
|
|
def get_lens(batch): |
|
try: |
|
audio_len = librosa.get_duration(y=batch['audio']['array'], sr=batch['audio']['sampling_rate']) |
|
except: |
|
del batch['audio'] |
|
batch['audio'] = None |
|
audio_len = 0.0 |
|
transcript_len = len(batch['sentence']) |
|
batch['audio_len'] = audio_len |
|
batch['transcript_len'] = transcript_len |
|
batch['len_ratio'] = float(audio_len)/float(transcript_len) |
|
batch['num_feature_vecs'] = int(np.round(audio_len * 1000 / 20)) |
|
return batch |
|
|
|
transcript_len = len(batch['sentence']) |
|
|
|
batch['audio_len'] = audio_len |
|
batch['transcript_len'] = transcript_len |
|
batch['len_ratio'] = float(audio_len)/float(transcript_len) |
|
batch['num_feature_vecs'] = int(np.round(audio_len * 1000 / 20)) |
|
|
|
return batch |
|
|
|
def data_checks(batch): |
|
audio_check = (batch['audio_len']>1.0 and batch['audio_len']<30.0) |
|
transcript_check = (batch['transcript_len']>10) |
|
|
|
input_output_ratio = float(batch['num_feature_vecs']) / float(batch['transcript_len']) |
|
input_output_ratio_check = input_output_ratio>1.0 |
|
|
|
return (audio_check and transcript_check and input_output_ratio_check) |
|
|
|
train = train.map(preprocess_text, num_proc=num_proc) |
|
dev = dev.map(preprocess_text, num_proc=num_proc) |
|
|
|
try: |
|
train = train.map(get_lens, num_proc=num_proc) |
|
except: |
|
train = train.map(get_lens, num_proc=4) |
|
|
|
try: |
|
dev = dev.map(get_lens, num_proc=num_proc) |
|
except: |
|
dev = dev.map(get_lens, num_proc=4) |
|
|
|
train = train.filter(data_checks, num_proc=num_proc) |
|
dev = dev.filter(data_checks, num_proc=num_proc) |
|
|
|
train_mean = np.mean(train['len_ratio']) |
|
train_std = np.std(train['len_ratio']) |
|
|
|
dev_mean = np.mean(dev['len_ratio']) |
|
dev_std = np.std(dev['len_ratio']) |
|
|
|
num_std_devs = 2 |
|
train = train.filter(lambda batch: (abs(batch['len_ratio'] - train_mean) - (num_std_devs * train_std)) <= 0, num_proc=num_proc) |
|
dev = dev.filter(lambda batch: (abs(batch['len_ratio'] - dev_mean) - (num_std_devs * dev_std)) <= 0, num_proc=num_proc) |
|
|
|
print(f"Train hours: {sum(train['audio_len'])/3600}\nDev hours: {sum(dev['audio_len'])/3600}") |
|
|
|
train = train.remove_columns(['audio_len', 'transcript_len', 'len_ratio', 'num_feature_vecs']) |
|
dev = dev.remove_columns(['audio_len', 'transcript_len', 'len_ratio', 'num_feature_vecs']) |
|
|
|
alphabet = None |
|
|
|
alphabet = sorted(list(set(alphabet))) |
|
|
|
vocab_dict = {v: k for k, v in enumerate(alphabet)} |
|
|
|
vocab_dict["|"] = vocab_dict[" "] |
|
del vocab_dict[" "] |
|
|
|
vocab_dict["[UNK]"] = len(vocab_dict) |
|
vocab_dict["[PAD]"] = len(vocab_dict) |
|
|
|
import json |
|
|
|
with open('vocab.json', 'w') as vocab_file: |
|
json.dump(vocab_dict, vocab_file) |
|
|
|
from transformers import Wav2Vec2CTCTokenizer |
|
|
|
tokenizer = Wav2Vec2CTCTokenizer.from_pretrained("./", unk_token="[UNK]", pad_token="[PAD]", word_delimiter_token="|") |
|
|
|
from transformers import Wav2Vec2FeatureExtractor |
|
|
|
feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0.0, do_normalize=True, return_attention_mask=True) |
|
|
|
from transformers import Wav2Vec2Processor |
|
|
|
processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) |
|
|
|
def prepare_dataset(batch): |
|
audio = batch["audio"] |
|
batch["input_values"] = processor(audio=audio["array"], sampling_rate=audio["sampling_rate"]).input_values[0] |
|
batch["input_length"] = len(batch["input_values"]) |
|
batch["labels"] = processor(text=batch["sentence"]).input_ids |
|
return batch |
|
|
|
try: |
|
train = train.map(prepare_dataset, remove_columns=train.column_names, num_proc=num_proc) |
|
except: |
|
train = train.map(prepare_dataset, remove_columns=train.column_names, num_proc=4) |
|
|
|
try: |
|
dev = dev.map(prepare_dataset, remove_columns=dev.column_names, num_proc=num_proc) |
|
except: |
|
dev = dev.map(prepare_dataset, remove_columns=dev.column_names, num_proc=4) |
|
|
|
import torch |
|
|
|
from dataclasses import dataclass, field |
|
from typing import Any, Dict, List, Optional, Union |
|
|
|
@dataclass |
|
class DataCollatorCTCWithPadding: |
|
""" |
|
Data collator that will dynamically pad the inputs received. |
|
Args: |
|
processor (:class:`~transformers.Wav2Vec2Processor`) |
|
The processor used for proccessing the data. |
|
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): |
|
Select a strategy to pad the returned sequences (according to the model's padding side and padding index) |
|
among: |
|
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single |
|
sequence if provided). |
|
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the |
|
maximum acceptable input length for the model if that argument is not provided. |
|
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of |
|
different lengths). |
|
""" |
|
|
|
processor: Wav2Vec2Processor |
|
padding: Union[bool, str] = True |
|
|
|
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: |
|
|
|
|
|
input_features = [{"input_values": feature["input_values"]} for feature in features] |
|
label_features = [{"input_ids": feature["labels"]} for feature in features] |
|
|
|
batch = self.processor.pad( |
|
input_features=input_features, |
|
padding=self.padding, |
|
return_tensors="pt", |
|
) |
|
|
|
labels_batch = self.processor.pad( |
|
labels=label_features, |
|
padding=self.padding, |
|
return_tensors="pt", |
|
) |
|
|
|
|
|
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) |
|
|
|
batch["labels"] = labels |
|
|
|
return batch |
|
|
|
data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True) |
|
|
|
"""# Model Training""" |
|
|
|
import evaluate |
|
|
|
wer_metric = evaluate.load("wer") |
|
cer_metric = evaluate.load("cer") |
|
|
|
import numpy as np |
|
|
|
def compute_metrics(pred): |
|
pred_logits = pred.predictions |
|
pred_ids = np.argmax(pred_logits, axis=-1) |
|
|
|
pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id |
|
|
|
pred_str = processor.batch_decode(pred_ids) |
|
label_str = processor.batch_decode(pred.label_ids, group_tokens=False) |
|
|
|
wer = wer_metric.compute(predictions=pred_str, references=label_str) |
|
cer = cer_metric.compute(predictions=pred_str, references=label_str) |
|
|
|
return {"wer": wer, "cer": cer} |
|
|
|
from transformers import Wav2Vec2ForCTC, TrainingArguments, Trainer, EarlyStoppingCallback |
|
|
|
model_checkpoint = "facebook/wav2vec2-xls-r-300m" |
|
|
|
model = Wav2Vec2ForCTC.from_pretrained( |
|
model_checkpoint, |
|
attention_dropout=0.0, |
|
hidden_dropout=0.0, |
|
feat_proj_dropout=0.0, |
|
mask_time_prob=0.05, |
|
layerdrop=0.0, |
|
ctc_loss_reduction="mean", |
|
pad_token_id=processor.tokenizer.pad_token_id, |
|
vocab_size=len(processor.tokenizer), |
|
) |
|
|
|
model.freeze_feature_encoder() |
|
|
|
import wandb |
|
|
|
dataset = None |
|
language = None |
|
sample_hours = None |
|
version = None |
|
batch_size = None |
|
grad_acc = 1 |
|
eval_batch_size = batch_size//2 |
|
epochs = None |
|
output_dir = f"{model_checkpoint.split('/')[-1]}-{dataset}-{language}-{sample_hours}hrs-{version}" |
|
|
|
wandb.init( |
|
project="ASR Africa", |
|
entity="asr-africa-research-team", |
|
name=output_dir, |
|
) |
|
|
|
training_args = TrainingArguments( |
|
output_dir=output_dir, |
|
group_by_length=True, |
|
per_device_train_batch_size=batch_size, |
|
per_device_eval_batch_size=eval_batch_size, |
|
gradient_accumulation_steps=grad_acc, |
|
eval_strategy="epoch", |
|
logging_strategy="epoch", |
|
save_strategy="epoch", |
|
num_train_epochs=epochs, |
|
gradient_checkpointing=True, |
|
fp16=True, |
|
learning_rate=None, |
|
lr_scheduler_type='linear', |
|
warmup_ratio=None, |
|
save_total_limit=2, |
|
load_best_model_at_end=True, |
|
metric_for_best_model="wer", |
|
greater_is_better=False, |
|
optim='adamw_torch', |
|
push_to_hub=True, |
|
hub_model_id=f"asr-africa/{output_dir}", |
|
hub_private_repo=True, |
|
dataloader_num_workers=num_dataloaders, |
|
) |
|
|
|
trainer = Trainer( |
|
model=model, |
|
data_collator=data_collator, |
|
args=training_args, |
|
compute_metrics=compute_metrics, |
|
train_dataset=train, |
|
eval_dataset=dev, |
|
tokenizer=processor.feature_extractor, |
|
callbacks=[ |
|
EarlyStoppingCallback( |
|
early_stopping_patience=10, |
|
early_stopping_threshold=1e-3 |
|
) |
|
], |
|
) |
|
|
|
trainer.train() |
|
|
|
kwargs = { |
|
"dataset_tags": "", |
|
"dataset": "", |
|
"language": "", |
|
"model_name": "", |
|
"finetuned_from": model_checkpoint, |
|
"tasks": "automatic-speech-recognition", |
|
} |
|
|
|
trainer.push_to_hub(**kwargs) |
|
|
|
other_test_dataset_1 = load_dataset() |
|
other_test_dataset_2 = load_dataset() |
|
|
|
test = concatenate_datasets([test, other_test_dataset_1, other_test_dataset_2]).shuffle(42) |
|
|
|
test = test.map(preprocess_text, num_proc=num_proc) |
|
|
|
try: |
|
test = test.map(get_lens, num_proc=num_proc) |
|
except: |
|
test = test.map(get_lens, num_proc=4) |
|
|
|
test = test.filter(data_checks, num_proc=num_proc) |
|
|
|
test_mean = np.mean(test['len_ratio']) |
|
test_std = np.std(test['len_ratio']) |
|
num_std_devs = 2 |
|
test = test.filter(lambda batch: (abs(batch['len_ratio'] - test_mean) - (num_std_devs * test_std)) <= 0, num_proc=num_proc) |
|
|
|
print(f"Test hours: {sum(test['audio_len'])/3600}") |
|
|
|
test = test.remove_columns(['audio_len', 'transcript_len', 'len_ratio', 'num_feature_vecs']) |
|
|
|
try: |
|
test = test.map(prepare_dataset, remove_columns=test.column_names, num_proc=num_proc) |
|
except: |
|
test = test.map(prepare_dataset, remove_columns=test.column_names, num_proc=4) |
|
|
|
results = trainer.evaluate(eval_dataset=test, metric_key_prefix="test") |
|
print(results) |
|
|
|
wandb.log(results) |
|
|
|
train.cleanup_cache_files() |
|
dev.cleanup_cache_files() |
|
test.cleanup_cache_files() |
|
|
|
torch.cuda.empty_cache() |