EOS-POSv2 / app.py
Ahmed107's picture
Update app.py
1231312 verified
import gradio as gr
import torchaudio
from transformers import pipeline
# Load your model
classifier = pipeline("audio-classification", model="Ahmed107/wav2vec2-base-eos-v5-mulaw-eos-v5-mulaw")
# Function to resample audio to 16kHz and convert to mono if needed
def resample_audio(audio_file, target_sampling_rate=16000):
waveform, original_sample_rate = torchaudio.load(audio_file)
# Resample if necessary
if original_sample_rate != target_sampling_rate:
resampler = torchaudio.transforms.Resample(orig_freq=original_sample_rate, new_freq=target_sampling_rate)
waveform = resampler(waveform)
# Convert stereo to mono by averaging channels (if needed)
if waveform.shape[0] > 1:
waveform = waveform.mean(dim=0, keepdim=True)
return waveform.squeeze().numpy(), target_sampling_rate
# Define the prediction function
def classify_audio(audio_file):
# Resample the audio to 16kHz and handle channels
resampled_audio, sampling_rate = resample_audio(audio_file)
# Pass both the array and sampling_rate to the classifier
input_audio = {"array": resampled_audio, "sampling_rate": sampling_rate}
prediction = classifier(input_audio)
# Return predictions as a dictionary with labels and their scores
return {entry['label']: entry['score'] for entry in prediction}
# Define Gradio interface
def demo():
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("## Eos Audio Classification")
# Input Audio component
with gr.Row():
audio_input = gr.Audio(type="filepath", label="Input Audio")
# Output Labels component
with gr.Row():
label_output = gr.Label(label="Prediction")
# Predict Button
classify_btn = gr.Button("Classify")
# Set the button click action
classify_btn.click(fn=classify_audio, inputs=audio_input, outputs=label_output)
return demo
# Launch the Gradio demo
demo().launch()