Add 2 models for qa task
Browse files
app.py
CHANGED
@@ -120,9 +120,21 @@ m_bert_sa.load_state_dict(torch.load('bert_model_sentiment_analysis.pth', map_lo
|
|
120 |
m_bert_sa.to(device)
|
121 |
|
122 |
# Load Q&A model
|
123 |
-
|
124 |
-
|
125 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
|
127 |
# Load NER model
|
128 |
label_map = {
|
@@ -244,7 +256,9 @@ def process_input(input_text, context, task):
|
|
244 |
results["BARTPho Base"] = multitask_inference(bartpho_mt_base, bartpho_mt_base_tokenizer, input_text, "mt-vi-en", device)
|
245 |
results["BARTPho Large"] = multitask_inference(bartpho_mt, bartpho_mt_tokenizer, input_text, "mt-vi-en", device)
|
246 |
elif task == "Question Answering":
|
247 |
-
results["RoBERTa"] = qa_inference(
|
|
|
|
|
248 |
elif task == "Named Entity Recognition":
|
249 |
results["PhoBERT"] = ner_inference(phobert_ner, phobert_ner_tokenizer, input_text, device)
|
250 |
results["PhoBERTv2"] = ner_inference(phobertv2_ner, phobertv2_ner_tokenizer, input_text, device)
|
|
|
120 |
m_bert_sa.to(device)
|
121 |
|
122 |
# Load Q&A model
|
123 |
+
|
124 |
+
## XLM-RoBERTa-Large
|
125 |
+
roberta_large_qa = AutoModelForQuestionAnswering.from_pretrained("HungLV2512/Vietnamese-QA-fine-tuned")
|
126 |
+
roberta_large_qa_tokenizer = AutoTokenizer.from_pretrained("HungLV2512/Vietnamese-QA-fine-tuned")
|
127 |
+
roberta_large_qa.to(device)
|
128 |
+
|
129 |
+
## XLM-RoBERTa-Base
|
130 |
+
roberta_base_qa = AutoModelForQuestionAnswering.from_pretrained("HungLV2512/xlm-roberta-base-fine-tuned-qa-vietnamese", output_hidden_states=True)
|
131 |
+
roberta_base_qa_tokenizer = AutoTokenizer.from_pretrained("HungLV2512/xlm-roberta-base-fine-tuned-qa-vietnamese")
|
132 |
+
roberta_base_qa.to(device)
|
133 |
+
|
134 |
+
## Multilingual BERT
|
135 |
+
m_bert_qa = AutoModelForQuestionAnswering.from_pretrained("HungLV2512/bert-base-multilingual-cased-fine-tuned-qa-vietnamese")
|
136 |
+
m_bert_qa_tokenizer = AutoTokenizer.from_pretrained("HungLV2512/bert-base-multilingual-cased-fine-tuned-qa-vietnamese")
|
137 |
+
m_bert_qa.to(device)
|
138 |
|
139 |
# Load NER model
|
140 |
label_map = {
|
|
|
256 |
results["BARTPho Base"] = multitask_inference(bartpho_mt_base, bartpho_mt_base_tokenizer, input_text, "mt-vi-en", device)
|
257 |
results["BARTPho Large"] = multitask_inference(bartpho_mt, bartpho_mt_tokenizer, input_text, "mt-vi-en", device)
|
258 |
elif task == "Question Answering":
|
259 |
+
results["RoBERTa Base"] = qa_inference(roberta_base_qa, roberta_base_qa_tokenizer, input_text, context, device)
|
260 |
+
results["RoBERTa Large"] = qa_inference(roberta_large_qa, roberta_large_qa_tokenizer, input_text, context, device)
|
261 |
+
results["Multilingual BERT"] = qa_inference(m_bert_qa, m_bert_qa_tokenizer, input_text, context, device)
|
262 |
elif task == "Named Entity Recognition":
|
263 |
results["PhoBERT"] = ner_inference(phobert_ner, phobert_ner_tokenizer, input_text, device)
|
264 |
results["PhoBERTv2"] = ner_inference(phobertv2_ner, phobertv2_ner_tokenizer, input_text, device)
|