jvalero's picture
Update app.py
23b03c5 verified
raw
history blame
1.45 kB
from transformers import pipeline
import torch
from transformers import pipeline
from transformers.pipelines.audio_utils import ffmpeg_read
import gradio as gr
import gradio as gr
device = 0 if torch.cuda.is_available() else "cpu"
MODEL_ID = "jvalero/wav2vec2-base-music_genre_classifier"
pipe = pipeline(
task="audio-classification",
model=MODEL_ID,
chunk_length_s=30,
device=device,
)
def get_edm(filepath):
output = pipe(
filepath,
max_new_tokens=256,
chunk_length_s=30,
batch_size=8,
)
return output[0]["label"]
demo = gr.Blocks()
demo = gr.Blocks()
file_transcribe = gr.Interface(
fn=get_edm,
inputs=[
gr.Audio(sources="upload", label="Audio file", type="filepath"),
],
outputs="label",
title="Vinyl Condition Classificator",
description=(
"Get the genre of your song! Demo uses the"
f" checkpoint [{MODEL_ID}](https://huggingface.co/{MODEL_ID}) and 🤗 Transformers to get the condition of audio files"
" of arbitrary length. \nThe audio will be classified into one of the following: ['drumbass', 'dubtechno', 'dupstep', 'hardcore_breaks', 'house', 'psytrance', 'techno', 'ukgarage']"
),
examples=[
["./example.mp3"],
["./example1.mp3"],
],
cache_examples=True,
allow_flagging="never",
)
with demo:
gr.TabbedInterface([file_transcribe], ["Get Viny Condition"])
demo.launch()