SaviAnna commited on
Commit
2b0aaf5
1 Parent(s): 71d98a2

Update pages/✨second.py

Browse files
Files changed (1) hide show
  1. pages/✨second.py +4 -4
pages/✨second.py CHANGED
@@ -47,10 +47,10 @@ user_review = st.text_input("Enter your comment:", "")
47
  user_review_clean = clean(user_review)
48
  user_features = vectorizer.transform([user_review_clean])
49
  prediction = model.predict(user_features)
50
- inputs = tokenizer(user_review_clean, max_length=512, padding=True, truncation=True, return_tensors='pt')
51
  outputs = model_bert(**inputs)
52
  prediction_bert = torch.nn.functional.softmax(outputs.logits, dim=1)
53
- prediction_bert = torch.argmax(predicted, dim=1).numpy()
54
  st.write("Comment by ML model:", user_review)
55
 
56
  if prediction == 0:
@@ -59,9 +59,9 @@ else:
59
  st.markdown("<p style='color: red;'>Toxic comment</p>", unsafe_allow_html=True)
60
  st.write("Comment by RuBERT:", user_review)
61
 
62
- if prediction == 0:
63
  st.markdown("<p style='color: green;'>Controversial comment</p>", unsafe_allow_html=True)
64
- elif prediction == 1:
65
  st.markdown("<p style='color: red;'>Non-toxic comment</p>", unsafe_allow_html=True)
66
  else:
67
  st.markdown("<p style='color: red;'>Toxic comment</p>", unsafe_allow_html=True)
 
47
  user_review_clean = clean(user_review)
48
  user_features = vectorizer.transform([user_review_clean])
49
  prediction = model.predict(user_features)
50
+ inputs = tokenizer_bert(user_review_clean, max_length=512, padding=True, truncation=True, return_tensors='pt')
51
  outputs = model_bert(**inputs)
52
  prediction_bert = torch.nn.functional.softmax(outputs.logits, dim=1)
53
+ prediction_bert = torch.argmax(prediction_bert, dim=1).numpy()
54
  st.write("Comment by ML model:", user_review)
55
 
56
  if prediction == 0:
 
59
  st.markdown("<p style='color: red;'>Toxic comment</p>", unsafe_allow_html=True)
60
  st.write("Comment by RuBERT:", user_review)
61
 
62
+ if prediction_bert == 0:
63
  st.markdown("<p style='color: green;'>Controversial comment</p>", unsafe_allow_html=True)
64
+ elif prediction_bert == 1:
65
  st.markdown("<p style='color: red;'>Non-toxic comment</p>", unsafe_allow_html=True)
66
  else:
67
  st.markdown("<p style='color: red;'>Toxic comment</p>", unsafe_allow_html=True)