File size: 3,619 Bytes
9ba2a1c a67942c 9ba2a1c 0f31a21 9ba2a1c 581327d 9ba2a1c 581327d 9ba2a1c f65253b 9ba2a1c a67942c 51499e8 9ba2a1c 660c8a4 9ba2a1c a67942c 9ba2a1c d240a60 d3e6e92 9ba2a1c d3e6e92 9ba2a1c d3e6e92 9ba2a1c d3e6e92 9ba2a1c d240a60 9ba2a1c 31aa57c 9ba2a1c 660c8a4 98cb8af 9ba2a1c 3b97833 eac425b 3b97833 9ba2a1c 31aa57c 9ba2a1c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
import torch
import gradio as gr
from transformers import pipeline
from transformers.pipelines.audio_utils import ffmpeg_read
import tempfile
import os
access_token=os.getenv("access_token")
MODEL_NAME = "openai/whisper-large-v3"
BATCH_SIZE = 3
device = 0 if torch.cuda.is_available() else "cpu"
pipe = pipeline(
task="automatic-speech-recognition",
model=MODEL_NAME,
#chunk_length_s=30,
device=device,
token=access_token
)
def transcribe(inputs, task):
if inputs is None:
raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task})["text"]
return text
#def _return_yt_html_embed(yt_url):
#video_id = yt_url.split("?v=")[-1]
#HTML_str = (
# f'<center> <iframe width="500" height="320" src="https://www.youtube.com/embed/{video_id}"> </iframe>'
# " </center>"
#)
#return HTML_str
#def download_yt_audio(yt_url, filename):
#info_loader = youtube_dl.YoutubeDL()
#
#try:
# info = info_loader.extract_info(yt_url, download=False)
#except youtube_dl.utils.DownloadError as err:
# raise gr.Error(str(err))
#
#file_length = info["duration_string"]
#file_h_m_s = file_length.split(":")
#file_h_m_s = [int(sub_length) for sub_length in file_h_m_s]
#
#if len(file_h_m_s) == 1:
# file_h_m_s.insert(0, 0)
#if len(file_h_m_s) == 2:
# file_h_m_s.insert(0, 0)
#file_length_s = file_h_m_s[0] * 3600 + file_h_m_s[1] * 60 + file_h_m_s[2]
#
#if file_length_s > YT_LENGTH_LIMIT_S:
# yt_length_limit_hms = time.strftime("%HH:%MM:%SS", time.gmtime(YT_LENGTH_LIMIT_S))
# file_length_hms = time.strftime("%HH:%MM:%SS", time.gmtime(file_length_s))
# raise gr.Error(f"Maximum YouTube length is {yt_length_limit_hms}, got {file_length_hms} YouTube video.")
#
#ydl_opts = {"outtmpl": filename, "format": "worstvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best"}
#
#with youtube_dl.YoutubeDL(ydl_opts) as ydl:
# try:
# ydl.download([yt_url])
# except youtube_dl.utils.ExtractorError as err:
# raise gr.Error(str(err))
#def yt_transcribe(yt_url, task, max_filesize=75.0):
#html_embed_str = _return_yt_html_embed(yt_url)
#with tempfile.TemporaryDirectory() as tmpdirname:
#filepath = os.path.join(tmpdirname, "video.mp4")
#download_yt_audio(yt_url, filepath)
#with open(filepath, "rb") as f:
#inputs = f.read()
#inputs = ffmpeg_read(inputs, pipe.feature_extractor.sampling_rate)
#inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate}
#text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
#return None#html_embed_str, text
demo = gr.Blocks()
gradio_app = gr.Interface(
fn=transcribe,
inputs=[
gr.Audio(type='filepath'),
gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
],
outputs="text",
#layout="horizontal",
#theme="huggingface",
#title="Whisper Large V3: Transcribe Audio",
#description=(
# "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the OpenAI Whisper"
# f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
# " of arbitrary length."
#),
#allow_flagging="never",
)
if __name__ == "__main__":
gradio_app.launch()
|