Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import torch
|
3 |
+
import numpy as np
|
4 |
+
import transformers
|
5 |
+
import pickle
|
6 |
+
|
7 |
+
def load_model():
|
8 |
+
model_finetuned = transformers.AutoModel.from_pretrained(
|
9 |
+
"nghuyong/ernie-2.0-base-en",
|
10 |
+
output_attentions = False,
|
11 |
+
output_hidden_states = False
|
12 |
+
)
|
13 |
+
model_finetuned.load_state_dict(torch.load('ErnieModel_imdb.pt'))
|
14 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained("nghuyong/ernie-2.0-base-en")
|
15 |
+
return model_finetuned, tokenizer
|
16 |
+
|
17 |
+
def preprocess_text(text_input, max_len, tokenizer):
|
18 |
+
input_tokens = tokenizer(
|
19 |
+
text_input,
|
20 |
+
return_tensors='pt',
|
21 |
+
padding=True,
|
22 |
+
max_length=max_len,
|
23 |
+
truncation = True
|
24 |
+
)
|
25 |
+
return input_tokens
|
26 |
+
|
27 |
+
def predict_sentiment(model, input_tokens):
|
28 |
+
id2label = {0: "NEGATIVE", 1: "POSITIVE"}
|
29 |
+
output = model(**input_tokens).pooler_output.detach().numpy()
|
30 |
+
with open('LogReg_imdb_Ernie.pkl', 'rb') as file:
|
31 |
+
cls = pickle.load(file)
|
32 |
+
result = id2label[cls.predict(output)]
|
33 |
+
return result
|
34 |
+
|
35 |
+
st.title('Text sentiment analysis by ErnieModel')
|
36 |
+
|
37 |
+
max_len = st.slider('Maximum word length', 0, 500, 250)
|
38 |
+
|
39 |
+
text_input = st.text_input("Enter some text about movie")
|
40 |
+
model, tokenizer = load_model()
|
41 |
+
|
42 |
+
if text_input:
|
43 |
+
input_tokens = preprocess_text(text_input, max_len, tokenizer)
|
44 |
+
output = predict_sentiment(model, input_tokens)
|
45 |
+
st.write(output)
|
46 |
+
|