Spaces:
Runtime error
Runtime error
File size: 900 Bytes
fa081e7 470a6ad fa081e7 470a6ad 7d47ef1 470a6ad 7d47ef1 470a6ad fa081e7 470a6ad fa081e7 470a6ad fa081e7 470a6ad 80c7a36 470a6ad fa081e7 470a6ad 80c7a36 fa081e7 470a6ad fa081e7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
from transformers import pipeline
pipe = pipeline('automatic-speech-recognition', model='openai/whisper-small')
def transcribe_speech(filepath):
output = pipe(
filepath,
max_new_tokens = 256,
generate_kwargs={
"task": "transcribe",
"language": "english",
},
chunk_length_s = 30,
batch_size = 8,
)
return output["text"]
import gradio as gr
demo = gr.Blocks()
mic_transcribe = gr.Interface(
fn = transcribe_speech,
inputs=gr.Audio(sources = "microphone", type = "filepath"),
outputs = 'text',
)
file_transcribe = gr.Interface(
fn = transcribe_speech,
inputs = gr.Audio(sources = "upload", type = "filepath"),
outputs ='text',
)
with demo:
gr.TabbedInterface(
[mic_transcribe, file_transcribe],
["Transcribe Microphone", "Transcribe Audio File"],
)
demo.launch() |