patrickvonplaten
commited on
Commit
•
9515498
1
Parent(s):
ef0c53d
up
Browse files- run_convert.sh +16 -0
- run_forward.py +152 -0
run_convert.sh
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env bash
|
2 |
+
hf_name=${1}
|
3 |
+
ckpt=${2}
|
4 |
+
dict=${3}
|
5 |
+
|
6 |
+
curPath=$(pwd)
|
7 |
+
|
8 |
+
cp ${dict} ${curPath}/data/temp/dict.ltr.txt
|
9 |
+
|
10 |
+
# load a config that is equal to the config of the model you wish to convert
|
11 |
+
python -c "from transformers import Wav2Vec2Config; config = Wav2Vec2Config.from_pretrained('facebook/wav2vec2-base'); config.save_pretrained('./');"
|
12 |
+
|
13 |
+
# pretrained only
|
14 |
+
eval "python ../transformers/src/transformers/models/wav2vec2/convert_wav2vec2_original_pytorch_checkpoint_to_pytorch.py --pytorch_dump_folder ${hf_name} --checkpoint_path ${ckpt} --config_path ./config.json --not_finetuned"
|
15 |
+
# fine-tuned
|
16 |
+
#eval "python ../transformers/src/transformers/models/wav2vec2/convert_wav2vec2_original_pytorch_checkpoint_to_pytorch.py --pytorch_dump_folder ${hf_name} --checkpoint_path ${ckpt} --config_path ./config.json --dict_path ${curPath}/data/temp/dict.ltr.txt"
|
run_forward.py
ADDED
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
import datasets
|
3 |
+
import fairseq
|
4 |
+
import torch
|
5 |
+
import os
|
6 |
+
|
7 |
+
import soundfile as sf
|
8 |
+
from datasets import load_dataset
|
9 |
+
import sys
|
10 |
+
from shutil import copyfile
|
11 |
+
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor, Wav2Vec2Model, Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor
|
12 |
+
|
13 |
+
finetuned = bool(int(sys.argv[1]))
|
14 |
+
fairseq_wav2vec2_path = str(sys.argv[2])
|
15 |
+
hf_path = str(sys.argv[3])
|
16 |
+
|
17 |
+
|
18 |
+
if finetuned:
|
19 |
+
processor = Wav2Vec2Processor.from_pretrained(hf_path)
|
20 |
+
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task(
|
21 |
+
[fairseq_wav2vec2_path], arg_overrides={"data": "../add_wav2vec/data/temp"}
|
22 |
+
)
|
23 |
+
hf_model = Wav2Vec2ForCTC.from_pretrained(hf_path)
|
24 |
+
else:
|
25 |
+
processor = Wav2Vec2FeatureExtractor.from_pretrained(hf_path)
|
26 |
+
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([fairseq_wav2vec2_path])
|
27 |
+
hf_model = Wav2Vec2Model.from_pretrained(hf_path)
|
28 |
+
|
29 |
+
model = model[0]
|
30 |
+
model.eval()
|
31 |
+
|
32 |
+
|
33 |
+
def test_feature_extractor(hf_feat_extractor, fsq_feat_extract, example_wav):
|
34 |
+
# set hf_feat_extractor.output to dummy
|
35 |
+
fsq_output = fsq_feat_extract(example_wav)
|
36 |
+
hf_output = hf_feat_extractor(example_wav)
|
37 |
+
|
38 |
+
assert (
|
39 |
+
hf_output.shape == fsq_output.shape
|
40 |
+
), f"Shapes don't match. Got {hf_output.shape} for HF and {fsq_output.shape} for fsq"
|
41 |
+
assert torch.allclose(hf_output, fsq_output, atol=1e-3)
|
42 |
+
|
43 |
+
|
44 |
+
def test_full_encoder(hf_model, fsq_model, example_wav, attention_mask):
|
45 |
+
fsq_output = fsq_model(example_wav, padding_mask=attention_mask.ne(1), mask=False, features_only=True)["x"]
|
46 |
+
hf_output = hf_model(example_wav, attention_mask=attention_mask)[0]
|
47 |
+
|
48 |
+
assert (
|
49 |
+
hf_output.shape == fsq_output.shape
|
50 |
+
), f"Shapes don't match. Got {hf_output.shape} for HF and {fsq_output.shape} for fsq"
|
51 |
+
assert torch.allclose(hf_output, fsq_output, atol=1e-2)
|
52 |
+
|
53 |
+
|
54 |
+
def test_full_model(hf_model, fsq_model, example_wav, attention_mask):
|
55 |
+
fsq_output = fsq_model(source=example_wav, padding_mask=attention_mask.ne(1))["encoder_out"]
|
56 |
+
hf_output = hf_model(example_wav, attention_mask=attention_mask)[0].transpose(0, 1)
|
57 |
+
|
58 |
+
assert (
|
59 |
+
hf_output.shape == fsq_output.shape
|
60 |
+
), f"Shapes don't match. Got {hf_output.shape} for HF and {fsq_output.shape} for fsq"
|
61 |
+
assert torch.allclose(hf_output, fsq_output, atol=1e-2)
|
62 |
+
|
63 |
+
|
64 |
+
def test_loss(hf_model, fsq_model, example_wav, attention_mask, target):
|
65 |
+
from fairseq.criterions.ctc import CtcCriterion, CtcCriterionConfig
|
66 |
+
from fairseq.tasks.audio_pretraining import AudioPretrainingConfig, AudioPretrainingTask
|
67 |
+
audio_cfg = AudioPretrainingConfig(labels="ltr", data="./data")
|
68 |
+
task = AudioPretrainingTask.setup_task(audio_cfg)
|
69 |
+
ctc = CtcCriterion(CtcCriterionConfig(), task)
|
70 |
+
fsq_model.train()
|
71 |
+
|
72 |
+
labels_dict = processor.tokenizer(target, padding="longest", return_tensors="pt")
|
73 |
+
labels = labels_dict.input_ids
|
74 |
+
target_lengths = labels_dict.attention_mask.sum(-1)
|
75 |
+
|
76 |
+
sample = {
|
77 |
+
"net_input": {
|
78 |
+
"source": example_wav,
|
79 |
+
"padding_mask": attention_mask.ne(1),
|
80 |
+
},
|
81 |
+
"target": labels,
|
82 |
+
"target_lengths": target_lengths,
|
83 |
+
"id": torch.zeros((1,)),
|
84 |
+
}
|
85 |
+
|
86 |
+
loss, _, _ = ctc(fsq_model, sample)
|
87 |
+
|
88 |
+
labels = labels_dict.attention_mask * labels + (1 - labels_dict.attention_mask) * -100
|
89 |
+
|
90 |
+
hf_model.config.ctc_loss_reduction = "mean"
|
91 |
+
hf_loss = hf_model(example_wav, attention_mask=attention_mask, labels=labels).loss
|
92 |
+
|
93 |
+
print("Loss", loss)
|
94 |
+
print("Hf loss", hf_loss)
|
95 |
+
|
96 |
+
|
97 |
+
def test_all(example_wav, attention_mask):
|
98 |
+
with torch.no_grad():
|
99 |
+
if finetuned:
|
100 |
+
test_feature_extractor(
|
101 |
+
hf_model.wav2vec2.feature_extractor, model.w2v_encoder.w2v_model.feature_extractor, example_wav
|
102 |
+
)
|
103 |
+
else:
|
104 |
+
test_feature_extractor(
|
105 |
+
hf_model.feature_extractor, model.feature_extractor, example_wav
|
106 |
+
)
|
107 |
+
print("Succeded feature extractor Test")
|
108 |
+
|
109 |
+
with torch.no_grad():
|
110 |
+
# IMPORTANT: It is assumed that layer_norm_first is FALSE
|
111 |
+
# This is the case for `wav2vec_small_960h.pt`, but might not be for all models
|
112 |
+
# Adapt if necessary
|
113 |
+
if finetuned:
|
114 |
+
test_full_encoder(hf_model.wav2vec2, model.w2v_encoder.w2v_model, example_wav, attention_mask)
|
115 |
+
else:
|
116 |
+
test_full_encoder(hf_model, model, example_wav, attention_mask)
|
117 |
+
print("Succeded full encoder test")
|
118 |
+
|
119 |
+
if finetuned:
|
120 |
+
with torch.no_grad():
|
121 |
+
# IMPORTANT: It is assumed that layer_norm_first is FALSE
|
122 |
+
# This is the case for `wav2vec_small_960h.pt`, but might not be for all models
|
123 |
+
# Adapt if necessary
|
124 |
+
test_full_model(hf_model, model, example_wav, attention_mask)
|
125 |
+
print("Succeded full model test")
|
126 |
+
|
127 |
+
|
128 |
+
dummy_speech_data = datasets.load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
|
129 |
+
|
130 |
+
|
131 |
+
def map_to_array(batch):
|
132 |
+
speech_array, _ = sf.read(batch["file"])
|
133 |
+
batch["speech"] = speech_array
|
134 |
+
return batch
|
135 |
+
|
136 |
+
def map_to_array_mp3(batch, i):
|
137 |
+
speech_array, sr = sf.read(f"/home/patrick/hugging_face/add_wav2vec/common_voice/cv-corpus-6.1-2020-12-11/nl/converted/sample_{i}.wav")
|
138 |
+
batch["speech"] = speech_array
|
139 |
+
batch["sampling_rate"] = sr
|
140 |
+
return batch
|
141 |
+
|
142 |
+
|
143 |
+
dummy_speech_data = dummy_speech_data.map(map_to_array, remove_columns=["file"])
|
144 |
+
inputs = processor(dummy_speech_data[:3]["speech"], return_tensors="pt", padding="longest", return_attention_mask=True)
|
145 |
+
|
146 |
+
transciption = dummy_speech_data[:3]["text"]
|
147 |
+
|
148 |
+
input_values = inputs.input_values
|
149 |
+
attention_mask = inputs.attention_mask
|
150 |
+
|
151 |
+
test_all(input_values, attention_mask)
|
152 |
+
#test_loss(hf_model, model, input_values, attention_mask, transciption)
|