huzy0 commited on
Commit
a225830
·
verified ·
1 Parent(s): f903aee

Upload feature extractor

Browse files
preprocessor_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoFeatureExtractor": "processing_bestrq_conformer.ModifiedWhisperFeatureExtractor"
4
+ },
5
+ "chunk_length": 30,
6
+ "feature_extractor_type": "ModifiedWhisperFeatureExtractor",
7
+ "feature_size": 80,
8
+ "hop_length": 160,
9
+ "n_fft": 400,
10
+ "n_samples": 480000,
11
+ "nb_max_frames": 3000,
12
+ "padding_side": "right",
13
+ "padding_value": 0.0,
14
+ "return_attention_mask": false,
15
+ "sampling_rate": 16000
16
+ }
processing_bestrq_conformer.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Feature extractor class for MERaLiON-SpeechEncoder, modified from original WhisperFeatureExtractor
17
+ """
18
+
19
+ from typing import List, Optional, Union
20
+
21
+ import numpy as np
22
+
23
+ from transformers import is_torch_available
24
+ from transformers.audio_utils import mel_filter_bank, spectrogram, window_function
25
+ from transformers.feature_extraction_sequence_utils import SequenceFeatureExtractor
26
+ from transformers.feature_extraction_utils import BatchFeature
27
+ from transformers.utils import TensorType, logging
28
+
29
+
30
+ if is_torch_available():
31
+ import torch
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+
36
+ class ModifiedWhisperFeatureExtractor(SequenceFeatureExtractor):
37
+ r"""
38
+ Constructs a modified Whisper feature extractor.
39
+
40
+ This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
41
+ most of the main methods. Users should refer to this superclass for more information regarding those methods.
42
+
43
+ This class extracts mel-filter bank features from raw speech using a custom numpy implementation of the `Short Time
44
+ Fourier Transform` which should match pytorch's `torch.stft` equivalent.
45
+
46
+ Differences from WhisperFeatureExtractor:
47
+ - mel_filter_bank
48
+ - norm: "slaney" -> None
49
+ - mel_scale: "slaney" -> "htk"
50
+ - still uses log scaling and clamp but removes additional min-max/mean normalization
51
+
52
+ Args:
53
+ feature_size (`int`, *optional*, defaults to 80):
54
+ The feature dimension of the extracted features.
55
+ sampling_rate (`int`, *optional*, defaults to 16000):
56
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
57
+ hop_length (`int`, *optional*, defaults to 160):
58
+ Length of the overlapping windows for the STFT used to obtain the Mel Frequency coefficients.
59
+ chunk_length (`int`, *optional*, defaults to 30):
60
+ The maximum number of chunks of `sampling_rate` samples used to trim and pad longer or shorter audio
61
+ sequences.
62
+ n_fft (`int`, *optional*, defaults to 400):
63
+ Size of the Fourier transform.
64
+ padding_value (`float`, *optional*, defaults to 0.0):
65
+ Padding value used to pad the audio. Should correspond to silences.
66
+ """
67
+
68
+ model_input_names = ["input_values"]
69
+
70
+ def __init__(
71
+ self,
72
+ feature_size=80,
73
+ sampling_rate=16000,
74
+ hop_length=160,
75
+ chunk_length=30,
76
+ n_fft=400,
77
+ padding_value=0.0,
78
+ return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask
79
+ **kwargs,
80
+ ):
81
+ super().__init__(
82
+ feature_size=feature_size,
83
+ sampling_rate=sampling_rate,
84
+ padding_value=padding_value,
85
+ return_attention_mask=return_attention_mask,
86
+ **kwargs,
87
+ )
88
+ self.n_fft = n_fft
89
+ self.hop_length = hop_length
90
+ self.chunk_length = chunk_length
91
+ self.n_samples = chunk_length * sampling_rate
92
+ self.nb_max_frames = self.n_samples // hop_length
93
+ self.sampling_rate = sampling_rate
94
+ self.mel_filters = mel_filter_bank(
95
+ num_frequency_bins=1 + n_fft // 2,
96
+ num_mel_filters=feature_size,
97
+ min_frequency=0.0,
98
+ max_frequency=8000.0,
99
+ sampling_rate=sampling_rate,
100
+ norm=None,
101
+ mel_scale="htk",
102
+ )
103
+
104
+ def _np_extract_fbank_features(self, waveform_batch: np.array, device: str) -> np.ndarray:
105
+ """
106
+ Compute the log-mel spectrogram of the provided audio, gives similar results to Whisper's original torch
107
+ implementation with 1e-5 tolerance.
108
+ """
109
+ if device != "cpu":
110
+ raise ValueError(
111
+ f"Got device `{device}` for feature extraction, but feature extraction on CUDA accelerator "
112
+ "devices requires torch, which is not installed. Either set `device='cpu'`, or "
113
+ "install torch according to the official instructions: https://pytorch.org/get-started/locally/"
114
+ )
115
+ log_spec_batch = []
116
+ for waveform in waveform_batch:
117
+ log_spec = spectrogram(
118
+ waveform,
119
+ window_function(self.n_fft, "hann"),
120
+ frame_length=self.n_fft,
121
+ hop_length=self.hop_length,
122
+ power=2.0,
123
+ mel_filters=self.mel_filters,
124
+ log_mel="log10",
125
+ )
126
+ log_spec = log_spec[:, :-1]
127
+
128
+ log_spec_batch.append(log_spec)
129
+ log_spec_batch = np.array(log_spec_batch)
130
+ return log_spec_batch
131
+
132
+ def _torch_extract_fbank_features(self, waveform: np.array, device: str = "cpu") -> np.ndarray:
133
+ """
134
+ Compute the log-mel spectrogram of the audio using PyTorch's GPU-accelerated STFT implementation with batching,
135
+ yielding results similar to cpu computing with 1e-5 tolerance.
136
+ """
137
+ waveform = torch.from_numpy(waveform).type(torch.float32)
138
+
139
+ window = torch.hann_window(self.n_fft)
140
+ if device != "cpu":
141
+ waveform = waveform.to(device)
142
+ window = window.to(device)
143
+ stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)
144
+ magnitudes = stft[..., :-1].abs() ** 2
145
+
146
+ mel_filters = torch.from_numpy(self.mel_filters).type(torch.float32)
147
+ if device != "cpu":
148
+ mel_filters = mel_filters.to(device)
149
+ mel_spec = mel_filters.T @ magnitudes
150
+
151
+ log_spec = torch.clamp(mel_spec, min=1e-10).log10()
152
+
153
+ if device != "cpu":
154
+ log_spec = log_spec.detach().cpu()
155
+ return log_spec.numpy()
156
+
157
+ @staticmethod
158
+ # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
159
+ def zero_mean_unit_var_norm(
160
+ input_values: List[np.ndarray], attention_mask: List[np.ndarray], padding_value: float = 0.0
161
+ ) -> List[np.ndarray]:
162
+ """
163
+ Every array in the list is normalized to have zero mean and unit variance
164
+ """
165
+ if attention_mask is not None:
166
+ attention_mask = np.array(attention_mask, np.int32)
167
+ normed_input_values = []
168
+
169
+ for vector, length in zip(input_values, attention_mask.sum(-1)):
170
+ normed_slice = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
171
+ if length < normed_slice.shape[0]:
172
+ normed_slice[length:] = padding_value
173
+
174
+ normed_input_values.append(normed_slice)
175
+ else:
176
+ normed_input_values = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
177
+
178
+ return normed_input_values
179
+
180
+ def __call__(
181
+ self,
182
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
183
+ truncation: bool = True,
184
+ pad_to_multiple_of: Optional[int] = None,
185
+ return_tensors: Optional[Union[str, TensorType]] = None,
186
+ return_attention_mask: Optional[bool] = None,
187
+ padding: Optional[str] = "max_length",
188
+ max_length: Optional[int] = None,
189
+ sampling_rate: Optional[int] = None,
190
+ do_normalize: Optional[bool] = None,
191
+ device: Optional[str] = "cpu",
192
+ return_token_timestamps: Optional[bool] = None,
193
+ **kwargs,
194
+ ) -> BatchFeature:
195
+ """
196
+ Main method to featurize and prepare for the model one or several sequence(s). Implementation uses PyTorch for
197
+ the STFT computation if available, otherwise a slower NumPy based one.
198
+
199
+ Args:
200
+ raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
201
+ The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
202
+ values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
203
+ stereo, i.e. single float per timestep.
204
+ truncation (`bool`, *optional*, default to `True`):
205
+ Activates truncation to cut input sequences longer than *max_length* to *max_length*.
206
+ pad_to_multiple_of (`int`, *optional*, defaults to None):
207
+ If set will pad the sequence to a multiple of the provided value.
208
+
209
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
210
+ `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
211
+ return_attention_mask (`bool`, *optional*):
212
+ Whether to return the attention mask. If left to the default, will return the attention mask according
213
+ to the specific feature_extractor's default.
214
+
215
+ [What are attention masks?](../glossary#attention-mask)
216
+
217
+ <Tip>
218
+
219
+ For Whisper models, `attention_mask` should always be passed for batched inference, to avoid subtle
220
+ bugs.
221
+
222
+ </Tip>
223
+
224
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
225
+ If set, will return tensors instead of list of python integers. Acceptable values are:
226
+
227
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
228
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
229
+ - `'np'`: Return Numpy `np.ndarray` objects.
230
+ sampling_rate (`int`, *optional*):
231
+ The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
232
+ `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition
233
+ pipeline.
234
+ padding_value (`float`, *optional*, defaults to 0.0):
235
+ The value that is used to fill the padding values / vectors.
236
+ do_normalize (`bool`, *optional*, defaults to `False`):
237
+ Whether or not to zero-mean unit-variance normalize the input. Normalizing can help to significantly
238
+ improve the performance of the model.
239
+ device (`str`, *optional*, defaults to `'cpu'`):
240
+ Specifies the device for computation of the log-mel spectrogram of audio signals in the
241
+ `_torch_extract_fbank_features` method. (e.g., "cpu", "cuda")
242
+ return_token_timestamps (`bool`, *optional*, defaults to `None`):
243
+ Whether or not to return the number of frames of the input raw_speech.
244
+ These num_frames can be used by the model to compute word level timestamps.
245
+ """
246
+
247
+ if sampling_rate is not None:
248
+ if sampling_rate != self.sampling_rate:
249
+ raise ValueError(
250
+ f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
251
+ f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
252
+ f" was sampled with {self.sampling_rate} and not {sampling_rate}."
253
+ )
254
+ else:
255
+ logger.warning(
256
+ "It is strongly recommended to pass the `sampling_rate` argument to this function. "
257
+ "Failing to do so can result in silent errors that might be hard to debug."
258
+ )
259
+
260
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
261
+ if is_batched_numpy and len(raw_speech.shape) > 2:
262
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
263
+ is_batched = is_batched_numpy or (
264
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
265
+ )
266
+
267
+ if is_batched:
268
+ raw_speech = [np.asarray([speech], dtype=np.float32).T for speech in raw_speech]
269
+ elif not is_batched and not isinstance(raw_speech, np.ndarray):
270
+ raw_speech = np.asarray(raw_speech, dtype=np.float32)
271
+ elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
272
+ raw_speech = raw_speech.astype(np.float32)
273
+
274
+ # always return batch
275
+ if not is_batched:
276
+ raw_speech = [np.asarray([raw_speech]).T]
277
+
278
+ batched_speech = BatchFeature({"input_values": raw_speech})
279
+
280
+ # convert into correct format for padding
281
+
282
+ padded_inputs = self.pad( #whisper pads first then transform, while we do the reverse
283
+ batched_speech,
284
+ padding=padding,
285
+ max_length=max_length if max_length else self.n_samples,
286
+ truncation=truncation,
287
+ pad_to_multiple_of=pad_to_multiple_of,
288
+ return_attention_mask=return_attention_mask or do_normalize,
289
+ )
290
+
291
+ # zero-mean and unit-variance normalization
292
+ if do_normalize:
293
+ padded_inputs["input_values"] = self.zero_mean_unit_var_norm(
294
+ padded_inputs["input_values"],
295
+ attention_mask=padded_inputs["attention_mask"],
296
+ padding_value=self.padding_value,
297
+ )
298
+ padded_inputs["input_values"] = np.stack(padded_inputs["input_values"], axis=0)
299
+
300
+ # make sure list is in array format
301
+ input_values = padded_inputs.get("input_values").transpose(2, 0, 1)
302
+
303
+ extract_fbank_features = (
304
+ self._torch_extract_fbank_features if is_torch_available() else self._np_extract_fbank_features
305
+ )
306
+ input_values = extract_fbank_features(input_values[0], device)
307
+
308
+ if isinstance(input_values[0], List):
309
+ padded_inputs["input_values"] = [np.asarray(feature, dtype=np.float32) for feature in input_values]
310
+
311
+ else:
312
+ padded_inputs["input_values"] = input_values
313
+
314
+ if return_attention_mask:
315
+ # rescale from sample (48000) to feature (3000)
316
+ padded_inputs["attention_mask"] = padded_inputs["attention_mask"][:, :: self.hop_length]
317
+
318
+ if return_token_timestamps is not None:
319
+ padded_inputs["num_frames"] = [len(raw_speech_i) // self.hop_length for raw_speech_i in raw_speech]
320
+
321
+ if return_tensors is not None:
322
+ padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
323
+
324
+ return padded_inputs