ysharma's picture
ysharma HF staff
1
ae8c816
raw
history blame
6.85 kB
import gradio as gr
from youtube_transcript_api import YouTubeTranscriptApi
from transformers import AutoTokenizer
from transformers import pipeline
from transformers import AutoModelForQuestionAnswering
import pandas as pd
from sentence_transformers import SentenceTransformer, util
import torch
#input - video link, output - full transcript
def get_transcript(link):
print("******** Inside get_transcript ********")
print(f"link to be extracted is : {link}")
video_id = link.split("=")[1]
print(f"video id extracted is : {video_id}")
transcript = YouTubeTranscriptApi.get_transcript(video_id)
FinalTranscript = ' '.join([i['text'] for i in transcript])
return FinalTranscript,transcript, video_id
#input - question and transcript, output - answer timestamp
def get_answers_timestamp(question, final_transcript, transcript):
print("******** Inside get_answers_timestamp ********")
model_ckpt = "deepset/minilm-uncased-squad2"
tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
#question = "any funny examples in video??"
context = final_transcript
print(f"Input Question is : {question}")
print(f"Type of trancript is : {type(context)}, Length of transcript is : {len(context)}")
inputs = tokenizer(question, context, return_overflowing_tokens=True, max_length=512, stride = 25)
#getting a list of contexts available after striding
contx=[]
for window in inputs["input_ids"]:
#print(f"{tokenizer.decode(window)} \n")
contx.append(tokenizer.decode(window).split('[SEP]')[1].strip())
#print(ques)
#print(contx)
model = AutoModelForQuestionAnswering.from_pretrained(model_ckpt)
lst=[]
pipe = pipeline("question-answering", model=model, tokenizer=tokenizer)
for contexts in contx:
lst.append(pipe(question=question, context=contexts))
lst_scores = [dicts['score'] for dicts in lst]
#getting highest and second highest scores
idxmax = lst_scores.index(max(lst_scores))
lst_scores.remove(max(lst_scores))
idxmax2 = lst_scores.index(max(lst_scores))
sentence_for_timestamp = lst[idxmax]['answer']
dftranscript = pd.DataFrame(transcript)
modelST = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
embedding_1= modelST.encode(dftranscript.text, convert_to_tensor=True)
embedding_2 = modelST.encode(sentence_for_timestamp, convert_to_tensor=True)
similarity_tensor = util.pytorch_cos_sim(embedding_1, embedding_2)
idx = torch.argmax(similarity_tensor)
start_timestamp = dftranscript.iloc[[int(idx)-3]].start.values[0]
start_timestamp = round(start_timestamp)
return start_timestamp
def display_vid(url, question, sample_question=None, example_video=None):
print("******** display_vid ********")
if question == '':
question = sample_question
#get embedding and youtube link
html_in = "<iframe width='560' height='315' src=" + url + " frameborder='0' allowfullscreen></iframe>"
#print(html)
if example_video is not None:
print(f"example_video is : {example_video}")
url = example_video[0]
#get transcript
final_transcript, transcript, video_id = get_transcript(url)
#get answer timestamp
#input - question and transcript, output - answer timestamp
ans_timestamp = get_answers_timestamp(question, final_transcript, transcript)
#created embedding
html_out = "<iframe width='560' height='315' src='https://www.youtube.com/embed/" + video_id + "?start=" + str(ans_timestamp) + "' title='YouTube video player' frameborder='0' allow='accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture' allowfullscreen></iframe>"
print(f"html output is : {html_out}")
if question == '':
print(f"Inside display_vid(), Sample_Question coming from Radio box is BEFORE : {sample_question}")
sample_ques = set_example_question(sample_question)
print(f"Inside display_vid(), Sample Question coming from Radio box is AFTER : {sample_ques}")
else:
sample_ques = question
return html_out, sample_ques, url
def set_example_question(sample_question):
print(f"******* Inside Sample Questions ********")
print(f"Sample Question coming from Radio box is : {sample_question}")
print("What is the Return value : {gr.Radio.update(value=sample_question)}")
return gr.Radio.update(value=sample_question) #input_ques.update(example)
demo = gr.Blocks()
with demo:
gr.Markdown("<h1><center>Ask a Question to a YouTube Video and get the Video played from the answer timestamp</center></h1>")
gr.Markdown(
"<div>How many times have you seen a long video/podcast on Youtube and wondered only if there would have been 'explanatory' timestamps it would have been so much better..</div>"
"<div>Well, using this Space/App you can provide a YouTube video link and then provide some questions that you would like, and the App will generate timestamps/play video at those timestamps for you in the space provided. Idea is that your question could be like 'Is this xxxx thing covered in the video?', or maybe 'does the host talks about the architecture of the model', or maybe 'Does host talk about alien doorway on Mars?' and so on.</div><br> <br> <div> This App is still little bit 'Work in Progress' with some sharp edges still left, please bear with me.<br><br></div>"
)
with gr.Row():
input_url = gr.Textbox(label="Input a Youtube video link") #gr.HTML(placeholder="Enter a video link here..")
input_ques = gr.Textbox(label="Ask a Question")
output_vid = gr.HTML(label="Video will play at the answer timestamp")
with gr.Row():
example_question = gr.Dropdown(
["Choose a sample question", "Does video talk about different modalities",
"Can the model do classification",
"Does the model pushes state of the art in image classification",
"Is deepmind copying openai",
"Is flamingo good enough",
"Has flamingo passed andre karpathy challnge yet?",
"Are there cool examples from flamingo in the video?",
"Does the video talk about cat?", ""
"Any funny examples in video?"], label= "Choose a sample Question", value=None)
with gr.Row():
example_video = gr.CheckboxGroup( ["https://www.youtube.com/watch?v=smUHQndcmOY"], label= "Choose a sample YouTube video") #, value="Any funny examples in video?")
#example_question.update(set_example_question) #,inputs=example_question, outputs= input_url) #example_styles.components)
b1 = gr.Button("Publish Video")
b1.click(display_vid, inputs=[input_url, input_ques, example_question, example_video], outputs=[output_vid, input_ques, input_url])
demo.launch(enable_queue=True, debug=True)