Spaces:
Sleeping
Sleeping
File size: 1,409 Bytes
1b1e8e6 02500bf a4e0bf3 02500bf 804f975 1b1e8e6 a4e0bf3 1b1e8e6 a4e0bf3 1b1e8e6 17734e0 1b1e8e6 a4e0bf3 1b1e8e6 a4e0bf3 1b1e8e6 46d22b1 a4e0bf3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
from transformers import pipeline
import torch
from transformers import pipeline
from transformers.pipelines.audio_utils import ffmpeg_read
import gradio as gr
import gradio as gr
device = 0 if torch.cuda.is_available() else "cpu"
MODEL_ID = "wav2vec2-base-vinyl_condition"
pipe = pipeline(
task="audio-classification",
model=MODEL_ID,
chunk_length_s=30,
device=device,
)
def get_vinyl_condition(filepath):
output = pipe(
filepath,
max_new_tokens=256,
chunk_length_s=30,
batch_size=8,
)
return output[0]["label"]
demo = gr.Blocks()
demo = gr.Blocks()
file_transcribe = gr.Interface(
fn=get_vinyl_condition,
inputs=[
gr.inputs.Audio(source="upload", optional=True, label="Audio file", type="filepath"),
],
outputs="text",
layout="horizontal",
theme="huggingface",
title="Vinyl Demo: Get Vinyl Condition",
description=(
"Get your vinyl ocndition based on the golmine grading starndard! Demo uses the"
f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to get the condition of audio files"
" of arbitrary length."
),
examples=[
["./example.flac"],
],
cache_examples=True,
allow_flagging="never",
)
with demo:
gr.TabbedInterface([file_transcribe], ["Transcribe Audio File"])
demo.launch(enable_queue=True)
|