|
import streamlit as st |
|
import transformers |
|
import torch |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
|
|
|
|
model_name = AutoModelForSequenceClassification.from_pretrained("ikoghoemmanuell/finetuned_fake_news_roberta") |
|
tokenizer_name = AutoTokenizer.from_pretrained("ikoghoemmanuell/finetuned_fake_news_roberta") |
|
|
|
|
|
@st.cache_resource |
|
def detect_fake_news(text): |
|
|
|
pipeline = transformers.pipeline("text-classification", model=model_name, tokenizer=tokenizer_name) |
|
|
|
|
|
prediction = pipeline(text) |
|
sentiment = prediction[0]["label"] |
|
score = prediction[0]["score"] |
|
|
|
return sentiment, score |
|
|
|
|
|
st.set_page_config( |
|
page_title="Fake News Detection App", |
|
page_icon=":smile:", |
|
layout="wide", |
|
initial_sidebar_state="auto", |
|
) |
|
|
|
|
|
st.write(""" |
|
# Fake News Detection |
|
Enter some text and we'll tell you if it's likely to be fake news or not! |
|
""") |
|
|
|
|
|
image = st.image("https://docs.gato.txst.edu/78660/w/2000/a_1dzGZrL3bG/fake-fact.jpg", width=400) |
|
|
|
|
|
text = st.text_input("Enter some text here:") |
|
|
|
|
|
st.markdown( |
|
""" |
|
<style> |
|
body { |
|
background-color: #f5f5f5; |
|
} |
|
h1 { |
|
color: #4e79a7; |
|
} |
|
</style> |
|
""", |
|
unsafe_allow_html=True |
|
) |
|
|
|
|
|
if text: |
|
label, score = detect_fake_news(text) |
|
print(label, score) |
|
if label == "LABEL_1": |
|
st.error(f"The text is likely to be fake news with a confidence score of {score*100:.2f}%!") |
|
else: |
|
st.success(f"The text is likely to be genuine with a confidence score of {score*100:.2f}%!") |
|
|