csukuangfj's picture
add medium-aishell
cce0d1e
# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang)
#
# See LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import lru_cache
from huggingface_hub import hf_hub_download
import sherpa_onnx
import numpy as np
from typing import Tuple
import wave
sample_rate = 16000
def read_wave(wave_filename: str) -> Tuple[np.ndarray, int]:
"""
Args:
wave_filename:
Path to a wave file. It should be single channel and each sample should
be 16-bit. Its sample rate does not need to be 16kHz.
Returns:
Return a tuple containing:
- A 1-D array of dtype np.float32 containing the samples, which are
normalized to the range [-1, 1].
- sample rate of the wave file
"""
with wave.open(wave_filename) as f:
assert f.getnchannels() == 1, f.getnchannels()
assert f.getsampwidth() == 2, f.getsampwidth() # it is in bytes
num_samples = f.getnframes()
samples = f.readframes(num_samples)
samples_int16 = np.frombuffer(samples, dtype=np.int16)
samples_float32 = samples_int16.astype(np.float32)
samples_float32 = samples_float32 / 32768
return samples_float32, f.getframerate()
def decode(
recognizer: sherpa_onnx.OfflineRecognizer,
filename: str,
) -> str:
s = recognizer.create_stream()
samples, sample_rate = read_wave(filename)
s.accept_waveform(sample_rate, samples)
recognizer.decode_stream(s)
return s.result.text.lower()
def _get_nn_model_filename(
repo_id: str,
filename: str,
subfolder: str = ".",
) -> str:
nn_model_filename = hf_hub_download(
repo_id=repo_id,
filename=filename,
subfolder=subfolder,
)
return nn_model_filename
def _get_token_filename(
repo_id: str,
filename: str,
subfolder: str = ".",
) -> str:
token_filename = hf_hub_download(
repo_id=repo_id,
filename=filename,
subfolder=subfolder,
)
return token_filename
@lru_cache(maxsize=8)
def get_pretrained_model(name: str) -> sherpa_onnx.OfflineRecognizer:
assert name in (
"tiny.en",
"base.en",
"small.en",
"medium.en",
"tiny",
"base",
"small",
"medium",
"medium-aishell",
), name
full_repo_id = "csukuangfj/sherpa-onnx-whisper-" + name
encoder = _get_nn_model_filename(
repo_id=full_repo_id,
filename=f"{name}-encoder.int8.onnx",
)
decoder = _get_nn_model_filename(
repo_id=full_repo_id,
filename=f"{name}-decoder.int8.onnx",
)
tokens = _get_token_filename(repo_id=full_repo_id, filename=f"{name}-tokens.txt")
recognizer = sherpa_onnx.OfflineRecognizer.from_whisper(
encoder=encoder,
decoder=decoder,
tokens=tokens,
num_threads=2,
)
return recognizer
whisper_models = {
"tiny.en": get_pretrained_model,
"base.en": get_pretrained_model,
"small.en": get_pretrained_model,
"medium.en": get_pretrained_model,
"distil-medium.en": get_pretrained_model,
"tiny": get_pretrained_model,
"base": get_pretrained_model,
"small": get_pretrained_model,
"distil-small.en": get_pretrained_model,
"medium": get_pretrained_model,
"medium-aishell": get_pretrained_model,
}