large / handler.py
S-Fry's picture
Update handler.py
ea421c5
raw
history blame
1.34 kB
from typing import Dict
from transformers import WhisperProcessor, WhisperForConditionalGeneration
from transformers.pipelines.audio import AudioClassificationPipeline
from datasets import load_dataset
import torch
SAMPLE_RATE = 16000
class EndpointHandler():
def __init__(self, path=""):
# load the model
self.processor = WhisperProcessor.from_pretrained("openai/whisper-large-v2")
self.model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v2")
self.classifier = AudioClassificationPipeline(model=self.model, processor=self.processor, device=0)
self.forced_decoder_ids = self.processor.get_decoder_prompt_ids(language="Danish", task="transcribe")
def __call__(self, data: Dict[str, bytes]) -> Dict[str, str]:
"""
Args:
data (:obj:):
includes the deserialized audio file as bytes
Return:
A :obj:`dict`:. base64 encoded image
"""
# process input
inputs = data.pop("inputs", data)
audio_nparray = ffmpeg_read(inputs, sample_rate=SAMPLE_RATE)
audio_tensor= torch.from_numpy(audio_nparray)
# run inference pipeline
result = self.classifier(audio_nparray)
# postprocess the prediction
return {"txt": result[0]["transcription"]}