jvalero's picture
Update app.py
ac55c5e verified
from transformers import pipeline
import torch
from transformers import pipeline
from transformers.pipelines.audio_utils import ffmpeg_read
import gradio as gr
import gradio as gr
device = 0 if torch.cuda.is_available() else "cpu"
MODEL_ID = "jvalero/wav2vec2-base-music_genre_classifier"
pipe = pipeline(
task="audio-classification",
model=MODEL_ID,
chunk_length_s=30,
device=device,
)
id2label = {
"LABEL_0" : 'drumbass',
"LABEL_1" : 'dubtechno',
"LABEL_2": 'dupstep',
"LABEL_3":'hardcore_breaks',
"LABEL_4": 'house',
"LABEL_5":'psytrance',
"LABEL_6" : 'techno',
"LABEL_7":'ukgarage'
}
def get_edm(filepath):
output = pipe(
filepath,
max_new_tokens=256,
chunk_length_s=30,
batch_size=8,
)
return id2label[output[0]["label"]]
demo = gr.Blocks()
demo = gr.Blocks()
file_transcribe = gr.Interface(
fn=get_edm,
inputs=[
gr.Audio(sources="upload", label="Audio file", type="filepath"),
],
outputs="label",
title="EDM genre classificator",
description=(
"Get the genre of your song! Demo uses the"
f" checkpoint [{MODEL_ID}](https://huggingface.co/{MODEL_ID}) and 🤗 Transformers to get the condition of audio files"
" of arbitrary length. \nThe audio will be classified into one of the following: ['drumbass', 'dubtechno', 'dupstep', 'hardcore_breaks', 'house', 'psytrance', 'techno', 'ukgarage']"
),
examples=[
["./example1.mp3"],
["./example2.mp3"],
["./example3.mp3"],
["./example4.mp3"],
["./example5.mp3"],
["./example6.mp3"],
],
cache_examples=True,
allow_flagging="never",
)
with demo:
gr.TabbedInterface([file_transcribe], ["Get Viny Condition"])
demo.launch()