from transformers import pipeline import gradio as gr pipe = pipeline(model="jbatista79/whisper-jrb-small-es") # change to "your-username/the-name-you-picked" def transcribe(audio): text = pipe(audio)["text"] return text ''' iface = gr.Interface( fn=transcribe, # inputs=gr.Audio(source="microphone", type="filepath"), inputs=gr.Audio(sources=["microphone"], type="filepath"), outputs="text", title="Whisper Small Spanish - Español (ES)", description="Realtime proof-of-concept demo for Spanish speech recognition using a fine-tuned Whisper small model. Created by Josué R. Batista - 2024-01-14", ) iface.launch(share=True) ''' with gr.Blocks() as app: with gr.Row(): with gr.Column(): gr.Image(value="lion-face-outline-cut-hi-strategia-black-strategia01-300x77.png", width=300, show_label=False, show_download_button=False, show_share_button=False) # Adjust width as needed with gr.Column(): gr.Markdown("## Whisper Small Spanish - Español (ES)") gr.Markdown("Realtime proof-of-concept demo for Spanish speech recognition using a fine-tuned Whisper small model. Created by Josué R. Batista - 2024-01-14") with gr.Row(): audio_input = gr.Audio(sources=["microphone"], type="filepath") output_text = gr.Textbox() gr.Button("Transcribe").click(fn=transcribe, inputs=audio_input, outputs=output_text) #app.launch(share=True) app.launch()