|
import streamlit as st |
|
from spacy import displacy |
|
from Model.NER.VLSP2021.Predict_Ner import ViTagger |
|
import re |
|
from thunghiemxuly import save_uploaded_image,convert_text_to_txt,add_string_to_txt |
|
|
|
import os |
|
from transformers import AutoTokenizer, BertConfig |
|
from Model.MultimodelNER.VLSP2016.train_umt_2016 import load_model,predict |
|
from Model.MultimodelNER.Ner_processing import format_predictions,process_predictions,combine_entities,remove_B_prefix,combine_i_tags |
|
|
|
from Model.MultimodelNER.predict import get_test_examples_predict |
|
from Model.MultimodelNER import resnet as resnet |
|
from Model.MultimodelNER.resnet_utils import myResnet |
|
import torch |
|
import numpy as np |
|
from Model.MultimodelNER.VLSP2016.dataset_roberta import MNERProcessor_2016 |
|
|
|
|
|
CONFIG_NAME = 'bert_config.json' |
|
WEIGHTS_NAME = 'pytorch_model.bin' |
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
|
|
net = getattr(resnet, 'resnet152')() |
|
net.load_state_dict(torch.load(os.path.join('Model/Resnet/', 'resnet152.pth'))) |
|
encoder = myResnet(net, True, device) |
|
def process_text(text): |
|
|
|
processed_text = re.sub(r'\s+', ' ', text.strip()) |
|
return processed_text |
|
|
|
|
|
|
|
def show_mner_2016(): |
|
multimodal_text = st.text_area("Enter your text for MNER:", height=300) |
|
multimodal_text = process_text(multimodal_text) |
|
image = st.file_uploader("Upload an image (only jpg):", type=["jpg"]) |
|
if st.button("Process Multimodal NER"): |
|
save_image = 'Model/MultimodelNER/VLSP2016/Image' |
|
save_txt = 'Model/MultimodelNER/VLSP2016/Filetxt/test.txt' |
|
image_name = image.name |
|
save_uploaded_image(image, save_image) |
|
convert_text_to_txt(multimodal_text, save_txt) |
|
add_string_to_txt(image_name, save_txt) |
|
st.image(image, caption="Uploaded Image", use_column_width=True) |
|
|
|
bert_model='vinai/phobert-base-v2' |
|
output_dir='Model/MultimodelNER/VLSP2016/best_model' |
|
output_model_file = os.path.join(output_dir, WEIGHTS_NAME) |
|
output_encoder_file = os.path.join(output_dir, "pytorch_encoder.bin") |
|
processor = MNERProcessor_2016() |
|
label_list = processor.get_labels() |
|
auxlabel_list = processor.get_auxlabels() |
|
num_labels = len(label_list) + 1 |
|
auxnum_labels = len(auxlabel_list) + 1 |
|
trans_matrix = np.zeros((auxnum_labels, num_labels), dtype=float) |
|
trans_matrix[0, 0] = 1 |
|
trans_matrix[1, 1] = 1 |
|
trans_matrix[2, 2] = 0.25 |
|
trans_matrix[2, 4] = 0.25 |
|
trans_matrix[2, 6] = 0.25 |
|
trans_matrix[2, 8] = 0.25 |
|
trans_matrix[3, 3] = 0.25 |
|
trans_matrix[3, 5] = 0.25 |
|
trans_matrix[3, 7] = 0.25 |
|
trans_matrix[3, 9] = 0.25 |
|
trans_matrix[4, 10] = 1 |
|
trans_matrix[5, 11] = 1 |
|
trans_matrix[6, 12] = 1 |
|
tokenizer = AutoTokenizer.from_pretrained(bert_model, do_lower_case=False) |
|
model_umt, encoder_umt = load_model(output_model_file, output_encoder_file, encoder,num_labels,auxnum_labels) |
|
eval_examples = get_test_examples_predict('Model/MultimodelNER/VLSP2016/Filetxt/') |
|
|
|
y_pred, a = predict(model_umt, encoder_umt, eval_examples, tokenizer, device,save_image,trans_matrix) |
|
formatted_output = format_predictions(a, y_pred[0]) |
|
final = process_predictions(formatted_output) |
|
final2 = combine_entities(final) |
|
final3 = remove_B_prefix(final2) |
|
final4 = combine_i_tags(final3) |
|
words_and_labels = final4 |
|
|
|
words = [word for word, _ in words_and_labels] |
|
|
|
entities = [{'start': sum(len(word) + 1 for word, _ in words_and_labels[:i]), |
|
'end': sum(len(word) + 1 for word, _ in words_and_labels[:i + 1]), 'label': label} for |
|
i, (word, label) |
|
in enumerate(words_and_labels) if label != 'O'] |
|
|
|
|
|
|
|
html = displacy.render( |
|
{"text": " ".join(words), "ents": entities, "title": None}, |
|
style="ent", |
|
manual=True, |
|
options={"colors": {"MISC": "#806699", |
|
"ORG": "#ff6666", |
|
"LOC": "#66cc66", |
|
"PER": "#bf80ff", |
|
"O": None}} |
|
) |
|
|
|
st.markdown(html, unsafe_allow_html=True) |
|
|
|
|
|
|