import gradio as gr from transformers import pipeline import numpy as np transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-medium") transcriber.model.config.forced_decoder_ids = ( transcriber.tokenizer.get_decoder_prompt_ids( language="pt", task="transcribe" ) ) def transcribe(audio): sr, y = audio y = y.astype(np.float32) y /= np.max(np.abs(y)) return transcriber({"sampling_rate": sr, "raw": y})["text"] demo = gr.Interface( transcribe, gr.Audio(source="microphone"), "text", ) demo.launch()