Spaces:
Sleeping
Sleeping
import gradio as gr | |
import numpy as np | |
from keras.models import load_model # type: ignore | |
import joblib | |
import nltk | |
import pickle | |
from nltk.corpus import stopwords | |
from nltk.tokenize import word_tokenize | |
from nltk.stem import WordNetLemmatizer | |
import re | |
# Modeli ve TF-IDF vectorizer'ı yükle | |
with open('disaster_model.pkl', 'rb') as f: | |
model = pickle.load(f) | |
vectorizer = joblib.load("tfidf_vectorizer.pkl") | |
# NLTK paketlerini indir | |
nltk.download('stopwords') | |
nltk.download('punkt') | |
nltk.download('wordnet') | |
# Preprocessing fonksiyonları | |
stop_words = set(stopwords.words('english')) | |
lemmatizer = WordNetLemmatizer() | |
def preprocess_text(text): | |
text = text.lower() | |
text = re.sub(r'[^\w\s]', '', text) | |
text = re.sub(r'\s+', ' ', text) | |
text = re.sub(r'\d+', ' ', text) | |
tokens = word_tokenize(text) | |
tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words] | |
return ' '.join(tokens) | |
def predict_sentiment(text): | |
text = preprocess_text(text) | |
text_vectorized = vectorizer.transform([text]).toarray() | |
prediction = model.predict(text_vectorized) | |
return 'Real Disaster' if prediction > 0.5 else 'Not Real Disaster' | |
# Gradio arayüzü | |
demo = gr.Interface( | |
fn=predict_sentiment, | |
inputs=gr.Textbox(lines=2, placeholder="Enter a tweet here..."), | |
outputs="text", | |
title="Tweet", | |
description="Enter a tweet and get the prediction (preal disaster or not real disaster)." | |
) | |
if __name__ == "__main__": | |
demo.launch(share=True) | |