# Imports import gradio as gr import whisper from pytube import YouTube from transformers import pipeline, T5Tokenizer, T5ForConditionalGeneration from wordcloud import WordCloud class GradioInference: def __init__(self): # OpenAI's Whisper model sizes self.sizes = list(whisper._MODELS.keys()) # Whisper's available languages for ASR self.langs = ["none"] + sorted(list(whisper.tokenizer.LANGUAGES.values())) # Default size self.current_size = "base" # Default model size self.loaded_model = whisper.load_model(self.current_size) # Initialize Pytube Object self.yt = None # Initialize summary model self.summarizer = pipeline("summarization", model="facebook/bart-large-cnn") # Initialize VoiceLabT5 model and tokenizer self.keyword_model = T5ForConditionalGeneration.from_pretrained( "Voicelab/vlt5-base-keywords" ) self.keyword_tokenizer = T5Tokenizer.from_pretrained( "Voicelab/vlt5-base-keywords" ) # Sentiment Classifier self.classifier = pipeline("text-classification") def __call__(self, link, lang, size): """ Call the Gradio Inference python class. This class gets access to a YouTube video using python's library Pytube and downloads its audio. Then it uses the Whisper model to perform Automatic Speech Recognition (i.e Speech-to-Text). Once the function has the transcription of the video it proccess it to obtain: - Summary: using Facebook's BART transformer. - KeyWords: using VoiceLabT5 keyword extractor. - Sentiment Analysis: using Hugging Face's default sentiment classifier - WordCloud: using the wordcloud python library. """ if self.yt is None: self.yt = YouTube(link) # Pytube library to access to YouTube audio stream path = self.yt.streams.filter(only_audio=True)[0].download(filename="tmp.mp4") if lang == "none": lang = None if size != self.current_size: self.loaded_model = whisper.load_model(size) self.current_size = size # Transcribe the audio extracted from pytube results = self.loaded_model.transcribe(path, language=lang) # Perform summarization on the transcription transcription_summary = self.summarizer( results["text"], max_length=512, min_length=30, do_sample=False ) # Extract keywords using VoiceLabT5 task_prefix = "Keywords: " input_sequence = task_prefix + results["text"] input_ids = self.keyword_tokenizer( input_sequence, return_tensors="pt", truncation=False ).input_ids output = self.keyword_model.generate( input_ids, no_repeat_ngram_size=3, num_beams=4 ) predicted = self.keyword_tokenizer.decode(output[0], skip_special_tokens=True) keywords = [x.strip() for x in predicted.split(",") if x.strip()] # Sentiment label label = self.classifier(results["text"])[0]["label"] # Generate WordCloud object wordcloud = WordCloud().generate(results["text"]) # WordCloud image to display wordcloud_image = wordcloud.to_image() return ( results["text"], transcription_summary[0]["summary_text"], keywords, label, wordcloud_image, ) def populate_metadata(self, link): """ Access to the YouTube video title and thumbnail image to further display it params: - link: a YouTube URL. """ self.yt = YouTube(link) return self.yt.thumbnail_url, self.yt.title def from_audio_input(self, lang, size, audio_file): """ Call the Gradio Inference python class. Uses it directly the Whisper model to perform Automatic Speech Recognition (i.e Speech-to-Text). Once the function has the transcription of the video it proccess it to obtain: - Summary: using Facebook's BART transformer. - KeyWords: using VoiceLabT5 keyword extractor. - Sentiment Analysis: using Hugging Face's default sentiment classifier - WordCloud: using the wordcloud python library. """ if lang == "none": lang = None if size != self.current_size: self.loaded_model = whisper.load_model(size) self.current_size = size results = self.loaded_model.transcribe(audio_file, language=lang) # Perform summarization on the transcription transcription_summary = self.summarizer( results["text"], max_length=512, min_length=30, do_sample=False ) # Extract keywords using VoiceLabT5 task_prefix = "Keywords: " input_sequence = task_prefix + results["text"] input_ids = self.keyword_tokenizer( input_sequence, return_tensors="pt", truncation=False ).input_ids output = self.keyword_model.generate( input_ids, no_repeat_ngram_size=3, num_beams=4 ) predicted = self.keyword_tokenizer.decode(output[0], skip_special_tokens=True) keywords = [x.strip() for x in predicted.split(",") if x.strip()] # Sentiment label label = self.classifier(results["text"])[0]["label"] # WordCloud object wordcloud = WordCloud().generate( results["text"] ) wordcloud_image = wordcloud.to_image() return ( results["text"], transcription_summary[0]["summary_text"], keywords, label, wordcloud_image, ) gio = GradioInference() title = "Youtube Insights" description = "Your AI-powered video analytics tool" block = gr.Blocks() with block as demo: gr.HTML( """

Youtube Insights 📹

Your AI-powered video analytics tool

""" ) with gr.Group(): with gr.Tab("From YouTube"): with gr.Box(): with gr.Row().style(equal_height=True): size = gr.Dropdown( label="Model Size", choices=gio.sizes, value="base" ) lang = gr.Dropdown( label="Language (Optional)", choices=gio.langs, value="none" ) link = gr.Textbox( label="YouTube Link", placeholder="Enter YouTube link..." ) title = gr.Label(label="Video Title") with gr.Row().style(equal_height=True): img = gr.Image(label="Thumbnail") text = gr.Textbox( label="Transcription", placeholder="Transcription Output...", lines=10, ).style(show_copy_button=True, container=True) with gr.Row().style(equal_height=True): summary = gr.Textbox( label="Summary", placeholder="Summary Output...", lines=5 ).style(show_copy_button=True, container=True) keywords = gr.Textbox( label="Keywords", placeholder="Keywords Output...", lines=5 ).style(show_copy_button=True, container=True) label = gr.Label(label="Sentiment Analysis") wordcloud_image = gr.Image() with gr.Row().style(equal_height=True): clear = gr.ClearButton( [link, title, img, text, summary, keywords, label, wordcloud_image], scale=1 ) btn = gr.Button("Get video insights", variant="primary", scale=1) btn.click( gio, inputs=[link, lang, size], outputs=[text, summary, keywords, label, wordcloud_image], ) if link: link.change(gio.populate_metadata, inputs=[link], outputs=[img, title]) with gr.Tab("From Audio file"): with gr.Box(): with gr.Row().style(equal_height=True): size = gr.Dropdown( label="Model Size", choices=gio.sizes, value="base" ) lang = gr.Dropdown( label="Language (Optional)", choices=gio.langs, value="none" ) audio_file = gr.Audio(type="filepath") with gr.Row().style(equal_height=True): text = gr.Textbox( label="Transcription", placeholder="Transcription Output...", lines=10, ).style(show_copy_button=True, container=False) with gr.Row().style(equal_height=True): summary = gr.Textbox( label="Summary", placeholder="Summary Output", lines=5 ) keywords = gr.Textbox( label="Keywords", placeholder="Keywords Output", lines=5 ) label = gr.Label(label="Sentiment Analysis") wordcloud_image = gr.Image() with gr.Row().style(equal_height=True): clear = gr.ClearButton([audio_file,text, summary, keywords, label, wordcloud_image], scale=1) btn = gr.Button( "Get video insights", variant="primary", scale=1 ) btn.click( gio.from_audio_input, inputs=[lang, size, audio_file], outputs=[text, summary, keywords, label, wordcloud_image], ) with block: gr.Markdown("### Video Examples") gr.Examples(["https://www.youtube.com/shorts/xDNzz8yAH7I"], inputs=link) gr.Markdown("About the app:") with gr.Accordion("What is YouTube Insights?", open=False): gr.Markdown( "YouTube Insights is a tool developed with academic purposes only, that creates summaries, keywords and sentiments analysis based on YouTube videos or user audio files." ) with gr.Accordion("How does it work?", open=False): gr.Markdown( "Works by using OpenAI's Whisper, BART for summarization and VoiceLabT5 for Keyword Extraction." ) gr.HTML( """

2023 Master in Big Data & Data Science - Universidad Complutense de Madrid

""" ) demo.launch()