File size: 854 Bytes
3992a54 c1b028b 1f7842c c1b028b 96f834e c1b028b 1f7842c b3e2801 649fbe4 aaa4456 1f7c1e2 c1b028b 3992a54 b3e2801 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 |
import gradio as gr
from transformers import AutoModelForQuestionAnswering, pipeline,AutoTokenizer
import torch
def question_answer(context, question):
AUTH_TOKEN = "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"
model_checkpoint = "letrunglinh/qa_pnc"
device = "cuda" if torch.cuda.is_available() else "cpu"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_auth_token=AUTH_TOKEN)
model = AutoModelForQuestionAnswering.from_pretrained(model_checkpoint)
model = pipeline('question-answering', model=model,
tokenizer=tokenizer, use_auth_token=AUTH_TOKEN)
to_predict = [
{
"question": question,
"context": context,
}
]
answers = model(to_predict)
return answers['answer'], answers['score']
gr.Interface(fn=question_answer, inputs=["text", "text"], outputs=["textbox","textbox"], share = True).launch() |