Spaces:
Running
Running
import gradio as gr | |
import time | |
from transformers import pipeline | |
import torch | |
# Check if GPU is available | |
use_gpu = torch.cuda.is_available() | |
# Configure the pipeline to use the GPU if available | |
if use_gpu: | |
p = pipeline("automatic-speech-recognition", | |
model="carlosdanielhernandezmena/wav2vec2-large-xlsr-53-faroese-100h", device=0) | |
else: | |
p = pipeline("automatic-speech-recognition", | |
model="carlosdanielhernandezmena/wav2vec2-large-xlsr-53-faroese-100h") | |
def transcribe(audio, state="", uploaded_audio=None): | |
if uploaded_audio is not None: | |
audio = uploaded_audio | |
if not audio: | |
return state, state # Return a meaningful message | |
try: | |
time.sleep(3) | |
text = p(audio)["text"] | |
state += text + "\n" | |
return state, state | |
except Exception as e: | |
return "An error occurred during transcription.", state # Handle other exceptions | |
gr.Interface( | |
fn=transcribe, | |
inputs=[ | |
gr.inputs.Audio(source="microphone", type="filepath"), | |
'state', | |
gr.inputs.Audio(label="Upload Audio File", type="filepath", source="upload") | |
], | |
outputs=[ | |
"textbox", | |
"state" | |
], | |
live=True).launch() | |