Files changed (1) hide show
  1. README.md +18 -0
README.md ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoTokenizer, AutoModelForQuestionAnswering
3
+ # import tokenizer
4
+ tokenizer = AutoTokenizer.from_pretrained("atharvamundada99/bert-large-question-answering-finetuned-legal",cache_dir="/E/HUG_Models")
5
+ model = AutoModelForQuestionAnswering.from_pretrained("atharvamundada99/bert-large-question-answering-finetuned-legal", cache_dir="/E/HUG_Models")
6
+
7
+ def get_answer( question, context):
8
+ inputs = tokenizer(question, context, return_tensors="pt")
9
+ with torch.no_grad():
10
+ outputs = model(**inputs)
11
+ answer_start_index = outputs.start_logits.argmax()
12
+ answer_end_index = outputs.end_logits.argmax()
13
+
14
+ predict_answer_tokens = inputs.input_ids[0, answer_start_index: answer_end_index + 1]
15
+ answer=tokenizer.decode(predict_answer_tokens, skip_special_tokens=True)
16
+ return answer
17
+ print(get_answer("What is your name", "My name is JACK"))
18
+ #Output JACK