Spaces:
Sleeping
Sleeping
import gradio as gr | |
import torch | |
from transformers import pipeline | |
import numpy as np | |
import time | |
pipe_base = pipeline("automatic-speech-recognition", model="aitor-medrano/lara-base-pushed") | |
pipe_small = pipeline("automatic-speech-recognition", model="aitor-medrano/whisper-small-lara") | |
def greet(grabacion, modelo="base"): | |
inicio = time.time() | |
sr, y = grabacion | |
# Pasamos el array de muestras a tipo NumPy de 32 bits | |
y = y.astype(np.float32) | |
y /= np.max(np.abs(y)) | |
if modelo is not None and modelo == "base": | |
pipe = pipe_base | |
else: | |
modelo = "small" | |
pipe = pipe_small | |
result = modelo + ":" + pipe({"sampling_rate": sr, "raw": y})["text"] | |
fin = time.time() | |
return result, fin - inicio | |
demo = gr.Interface(fn=greet, | |
inputs=[ | |
gr.Audio(), | |
gr.Dropdown( | |
["base", "small"], label="Modelo", info="Modelos de Lara entrenados" | |
) | |
], | |
outputs=[ | |
gr.Text(label="Salida"), | |
gr.Number(label="Tiempo") | |
]) | |
demo.launch() |