tsobolev's picture
Update app.py
8eed880
import gradio as gr
from transformers import WhisperForConditionalGeneration
from transformers import WhisperFeatureExtractor
from transformers import WhisperTokenizer
from transformers import pipeline
checkpoint = "tsobolev/whisper-small-ka"
feature_extractor = WhisperFeatureExtractor.from_pretrained(checkpoint)
tokenizer = WhisperTokenizer.from_pretrained(checkpoint, language="georgian", task="transcribe")
model = WhisperForConditionalGeneration.from_pretrained(checkpoint)
forced_decoder_ids = tokenizer.get_decoder_prompt_ids(language="georgian", task="transcribe")
asr_pipe = pipeline(
"automatic-speech-recognition",
model=model,
feature_extractor=feature_extractor,
tokenizer=tokenizer,
chunk_length_s=30,
stride_length_s=(4, 2)
)
def transcribe_ge(speech):
text = asr_pipe(
speech,
generate_kwargs={"forced_decoder_ids": forced_decoder_ids}
)["text"]
return text
title = "Whisper small finetuned on CV14 dataset"
description = """
Demo for speech-to-text translation
"""
demo = gr.Blocks()
mic_translate = gr.Interface(
fn=transcribe_ge,
inputs=gr.Audio(source="microphone", type="filepath"),
outputs=gr.Textbox(),
title=title,
description=description,
)
file_translate = gr.Interface(
fn=transcribe_ge,
inputs=gr.Audio(source="upload", type="filepath"),
outputs=gr.Textbox(),
examples=[["./example.wav"]],
title=title,
description=description,
)
with demo:
gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"])
demo.launch()