YixuanWang commited on
Commit
efc4793
·
verified ·
1 Parent(s): 6a2645d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -0
app.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ import numpy as np
4
+ import torch
5
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
6
+ from textblob import TextBlob
7
+ import os
8
+ from huggingface_hub import login
9
+
10
+ # Get the Hugging Face API token from the environment variable
11
+ hf_token = os.getenv("pasavectoi")
12
+ login(hf_token)
13
+
14
+ # Load the dataset from the local file
15
+ data = pd.read_csv('twitter_dataset.csv').head(1000)
16
+
17
+ # Calculate sentiment polarity and popularity
18
+ data['Sentiment'] = data['Text'].apply(lambda x: TextBlob(x).sentiment.polarity)
19
+ data['Popularity'] = data['Retweets'] + data['Likes']
20
+ data['Popularity'] = (data['Popularity'] - data['Popularity'].mean()) / data['Popularity'].std()
21
+ data['Popularity'] = data['Popularity'] / data['Popularity'].abs().max()
22
+
23
+ # Load the fake news classification model
24
+ model_name = "hamzab/roberta-fake-news-classification"
25
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
26
+ model = AutoModelForSequenceClassification.from_pretrained(model_name)
27
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
28
+ model = model.to(device)
29
+
30
+ # Process tweets in batches to avoid memory issues
31
+ batch_size = 100
32
+ predictions = []
33
+ for i in range(0, len(data), batch_size):
34
+ batch = data['Text'][i:i + batch_size].tolist()
35
+ inputs = tokenizer(batch, return_tensors="pt", padding=True, truncation=True, max_length=128)
36
+ inputs = {key: val.to(device) for key, val in inputs.items()}
37
+ with torch.no_grad():
38
+ outputs = model(**inputs)
39
+ predictions.extend(outputs.logits.argmax(dim=1).cpu().numpy())
40
+
41
+ data['Fake_News_Prediction'] = predictions
42
+ data['Credibility'] = data['Fake_News_Prediction'].apply(lambda x: 1 if x == 1 else -1)
43
+
44
+ # Define the prediction and recommendation function
45
+ def predict_and_recommend(title, text, visibility_weight, sentiment_weight, popularity_weight):
46
+ # Adjust weights and calculate the final score
47
+ total_weight = visibility_weight + sentiment_weight + popularity_weight
48
+ visibility_weight /= total_weight
49
+ sentiment_weight /= total_weight
50
+ popularity_weight /= total_weight
51
+
52
+ # Update final visibility score with user-defined weights
53
+ data['User_Final_Visibility_Score'] = (
54
+ data['Credibility'] * visibility_weight +
55
+ data['Sentiment'] * sentiment_weight +
56
+ data['Popularity'] * popularity_weight
57
+ )
58
+ # Sort and randomly sample 10 recommendations
59
+ top_100_data = data.nlargest(100, 'User_Final_Visibility_Score')
60
+ recommended_data = top_100_data.sample(10)
61
+
62
+ return recommended_data[['Text', 'User_Final_Visibility_Score']]
63
+
64
+ # Set up Gradio interface
65
+ iface = gr.Interface(
66
+ fn=predict_and_recommend,
67
+ inputs=[
68
+ gr.Textbox(label="Title"),
69
+ gr.Textbox(label="Text", lines=10),
70
+ gr.Slider(0, 1, 0.5, label="Visibility Weight"),
71
+ gr.Slider(0, 1, 0.3, label="Sentiment Weight"),
72
+ gr.Slider(0, 1, 0.2, label="Popularity Weight")
73
+ ],
74
+ outputs="dataframe",
75
+ title="Customizable Fake News Recommendation System",
76
+ description="Adjust weights to receive customized tweet recommendations based on visibility, sentiment, and popularity."
77
+ )
78
+
79
+ iface.launch()