FYP / app.py
zahoor54321's picture
Update app.py
b1892c7
raw
history blame
1.19 kB
import gradio as gr
import torch
import soundfile as sf
from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer
model_path = "https://drive.google.com/drive/folders/1-CcW6f_wNoECTPIu92bnHBsJQgbdUih5?usp=sharing" # Update with your model's public link
model = Wav2Vec2ForCTC.from_pretrained(model_path)
tokenizer = Wav2Vec2Tokenizer.from_pretrained(model_path)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def transcribe_audio(audio):
audio_data, _ = sf.read(audio.name)
inputs = tokenizer(audio_data, return_tensors="pt", padding=True, truncation=True)
inputs = inputs.to(device)
with torch.no_grad():
logits = model(inputs.input_values).logits
predicted_ids = torch.argmax(logits, dim=-1).squeeze()
transcription = tokenizer.decode(predicted_ids)
return transcription
audio_input = gr.inputs.Audio(source="file", type="file", label="Upload audio file")
text_output = gr.outputs.Textbox(label="Transcription")
interface = gr.Interface(fn=transcribe_audio, inputs=audio_input, outputs=text_output, title="Speech Recognition", description="Convert speech to text using your model")
interface.launch()