Spaces:
Sleeping
Sleeping
Commit
·
abc900f
1
Parent(s):
c115ecc
Upload 21 files
Browse files- BackPropogation.py +53 -0
- Perceptron.py +46 -0
- app.py +152 -0
- backpropogation_.py +31 -0
- bp_model.pkl +3 -0
- bp_tokeniser.pkl +3 -0
- cnn_tumor.py +60 -0
- dnn_main.py +48 -0
- dnn_model.h5 +3 -0
- dnn_tokeniser.pkl +3 -0
- lstm_model.h5 +3 -0
- lstm_tokeniser.pkl +3 -0
- perceptron _.py +41 -0
- ppn_model.pkl +3 -0
- ppn_tokeniser.pkl +3 -0
- rnn_model.h5 +3 -0
- simplelstm.py +46 -0
- smsspam.py +57 -0
- spam_tokeniser.pkl +3 -0
- tumor_detection_model.h5 +3 -0
- tumor_detection_model.pkl +3 -0
BackPropogation.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from tqdm import tqdm
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class BackPropogation:
|
| 6 |
+
def __init__(self,learning_rate=0.01, epochs=100,activation_function='step'):
|
| 7 |
+
self.bias = 0
|
| 8 |
+
self.learning_rate = learning_rate
|
| 9 |
+
self.max_epochs = epochs
|
| 10 |
+
self.activation_function = activation_function
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def activate(self, x):
|
| 14 |
+
if self.activation_function == 'step':
|
| 15 |
+
return 1 if x >= 0 else 0
|
| 16 |
+
elif self.activation_function == 'sigmoid':
|
| 17 |
+
return 1 if (1 / (1 + np.exp(-x)))>=0.5 else 0
|
| 18 |
+
elif self.activation_function == 'relu':
|
| 19 |
+
return 1 if max(0,x)>=0.5 else 0
|
| 20 |
+
|
| 21 |
+
def fit(self, X, y):
|
| 22 |
+
error_sum=0
|
| 23 |
+
n_features = X.shape[1]
|
| 24 |
+
self.weights = np.zeros((n_features))
|
| 25 |
+
for epoch in tqdm(range(self.max_epochs)):
|
| 26 |
+
for i in range(len(X)):
|
| 27 |
+
inputs = X[i]
|
| 28 |
+
target = y[i]
|
| 29 |
+
weighted_sum = np.dot(inputs, self.weights) + self.bias
|
| 30 |
+
prediction = self.activate(weighted_sum)
|
| 31 |
+
|
| 32 |
+
# Calculating loss and updating weights.
|
| 33 |
+
error = target - prediction
|
| 34 |
+
self.weights += self.learning_rate * error * inputs
|
| 35 |
+
self.bias += self.learning_rate * error
|
| 36 |
+
|
| 37 |
+
print(f"Updated Weights after epoch {epoch} with {self.weights}")
|
| 38 |
+
print("Training Completed")
|
| 39 |
+
|
| 40 |
+
def predict(self, X):
|
| 41 |
+
predictions = []
|
| 42 |
+
for i in range(len(X)):
|
| 43 |
+
inputs = X[i]
|
| 44 |
+
weighted_sum = np.dot(inputs, self.weights) + self.bias
|
| 45 |
+
prediction = self.activate(weighted_sum)
|
| 46 |
+
predictions.append(prediction)
|
| 47 |
+
return predictions
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
|
Perceptron.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from tqdm import tqdm
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class Perceptron:
|
| 6 |
+
|
| 7 |
+
def __init__(self,learning_rate=0.01, epochs=100,activation_function='step'):
|
| 8 |
+
self.bias = 0
|
| 9 |
+
self.learning_rate = learning_rate
|
| 10 |
+
self.max_epochs = epochs
|
| 11 |
+
self.activation_function = activation_function
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def activate(self, x):
|
| 15 |
+
if self.activation_function == 'step':
|
| 16 |
+
return 1 if x >= 0 else 0
|
| 17 |
+
elif self.activation_function == 'sigmoid':
|
| 18 |
+
return 1 if (1 / (1 + np.exp(-x)))>=0.5 else 0
|
| 19 |
+
elif self.activation_function == 'relu':
|
| 20 |
+
return 1 if max(0,x)>=0.5 else 0
|
| 21 |
+
|
| 22 |
+
def fit(self, X, y):
|
| 23 |
+
n_features = X.shape[1]
|
| 24 |
+
self.weights = np.random.randint(n_features, size=(n_features))
|
| 25 |
+
for epoch in tqdm(range(self.max_epochs)):
|
| 26 |
+
for i in range(len(X)):
|
| 27 |
+
inputs = X[i]
|
| 28 |
+
target = y[i]
|
| 29 |
+
weighted_sum = np.dot(inputs, self.weights) + self.bias
|
| 30 |
+
prediction = self.activate(weighted_sum)
|
| 31 |
+
print("Training Completed")
|
| 32 |
+
|
| 33 |
+
def predict(self, X):
|
| 34 |
+
predictions = []
|
| 35 |
+
for i in range(len(X)):
|
| 36 |
+
inputs = X[i]
|
| 37 |
+
weighted_sum = np.dot(inputs, self.weights) + self.bias
|
| 38 |
+
prediction = self.activate(weighted_sum)
|
| 39 |
+
predictions.append(prediction)
|
| 40 |
+
return predictions
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
|
app.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import streamlit as st
|
| 3 |
+
import numpy as np
|
| 4 |
+
import tensorflow as tf
|
| 5 |
+
from PIL import Image
|
| 6 |
+
import pickle
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
st.header('Demo')
|
| 10 |
+
task = st.selectbox('Select Task', ["Select One",'Sentiment Classification', 'Tumor Detection'])
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
if task == "Tumor Detection":
|
| 14 |
+
def cnn(img, model):
|
| 15 |
+
img = Image.open(img)
|
| 16 |
+
img = img.resize((128, 128))
|
| 17 |
+
img = np.array(img)
|
| 18 |
+
input_img = np.expand_dims(img, axis=0)
|
| 19 |
+
res = model.predict(input_img)
|
| 20 |
+
if res:
|
| 21 |
+
return "Tumor Detected"
|
| 22 |
+
else:
|
| 23 |
+
return "No Tumor"
|
| 24 |
+
|
| 25 |
+
cnn_model = tf.keras.models.load_model("tumor_detection_model.h5")
|
| 26 |
+
uploaded_file = st.file_uploader("Choose a file", type=["jpg", "jpeg", "png"])
|
| 27 |
+
if uploaded_file is not None:
|
| 28 |
+
st.image(uploaded_file, caption="Uploaded Image", use_column_width=True)
|
| 29 |
+
if st.button("Submit"):
|
| 30 |
+
result=cnn(uploaded_file, cnn_model)
|
| 31 |
+
st.write(result)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
elif task == "Sentiment Classification":
|
| 35 |
+
types = ["Perceptron","BackPropagation", "RNN","DNN", "LSTM"]
|
| 36 |
+
input_text2 = st.radio("Select", types, horizontal=True)
|
| 37 |
+
|
| 38 |
+
if input_text2 == "Perceptron":
|
| 39 |
+
with open("ppn_model.pkl",'rb') as file:
|
| 40 |
+
perceptron = pickle.load(file)
|
| 41 |
+
with open("ppn_tokeniser.pkl",'rb') as file:
|
| 42 |
+
ppn_tokeniser = pickle.load(file)
|
| 43 |
+
|
| 44 |
+
def ppn_make_predictions(inp, model):
|
| 45 |
+
encoded_inp = ppn_tokeniser.texts_to_sequences([inp])
|
| 46 |
+
padded_inp = tf.keras.preprocessing.sequence.pad_sequences(encoded_inp, maxlen=500)
|
| 47 |
+
res = model.predict(padded_inp)
|
| 48 |
+
if res:
|
| 49 |
+
return "Negative"
|
| 50 |
+
else:
|
| 51 |
+
return "Positive"
|
| 52 |
+
|
| 53 |
+
st.subheader('Movie Review Classification using Perceptron')
|
| 54 |
+
inp = st.text_area('Enter message')
|
| 55 |
+
if st.button('Check'):
|
| 56 |
+
pred = ppn_make_predictions([inp], perceptron)
|
| 57 |
+
st.write(pred)
|
| 58 |
+
|
| 59 |
+
if input_text2 == "BackPropagation":
|
| 60 |
+
with open("bp_model.pkl",'rb') as file:
|
| 61 |
+
backprop = pickle.load(file)
|
| 62 |
+
with open("bp_tokeniser.pkl",'rb') as file:
|
| 63 |
+
bp_tokeniser = pickle.load(file)
|
| 64 |
+
|
| 65 |
+
def bp_make_predictions(inp, model):
|
| 66 |
+
encoded_inp = bp_tokeniser.texts_to_sequences([inp])
|
| 67 |
+
padded_inp = tf.keras.preprocessing.sequence.pad_sequences(encoded_inp, maxlen=500)
|
| 68 |
+
res = model.predict(padded_inp)
|
| 69 |
+
if res:
|
| 70 |
+
return "Negative"
|
| 71 |
+
else:
|
| 72 |
+
return "Positive"
|
| 73 |
+
|
| 74 |
+
st.subheader('Movie Review Classification using BackPropagation')
|
| 75 |
+
inp = st.text_area('Enter message')
|
| 76 |
+
if st.button('Check'):
|
| 77 |
+
pred = bp_make_predictions([inp], backprop)
|
| 78 |
+
st.write(pred)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
elif input_text2 == "RNN":
|
| 82 |
+
rnn_model=tf.keras.models.load_model("rnn_model.h5")
|
| 83 |
+
with open("spam_tokeniser.pkl", 'rb') as model_file:
|
| 84 |
+
rnn_tokeniser=pickle.load(model_file)
|
| 85 |
+
|
| 86 |
+
def rnn_make_predictions(inp, model):
|
| 87 |
+
encoded_inp = rnn_tokeniser.texts_to_sequences(inp)
|
| 88 |
+
padded_inp = tf.keras.preprocessing.sequence.pad_sequences(encoded_inp, maxlen=10, padding='post')
|
| 89 |
+
res = (model.predict(padded_inp) > 0.5).astype("int32")
|
| 90 |
+
if res:
|
| 91 |
+
return "Spam"
|
| 92 |
+
else:
|
| 93 |
+
return "Ham"
|
| 94 |
+
|
| 95 |
+
st.subheader('Spam message Classification using RNN')
|
| 96 |
+
input = st.text_area("Give message")
|
| 97 |
+
if st.button('Check'):
|
| 98 |
+
pred = rnn_make_predictions([input], rnn_model)
|
| 99 |
+
st.write(pred)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
elif input_text2 == "DNN":
|
| 104 |
+
dnn_model=tf.keras.models.load_model("dnn_model.h5")
|
| 105 |
+
with open("dnn_tokeniser.pkl",'rb') as file:
|
| 106 |
+
dnn_tokeniser = pickle.load(file)
|
| 107 |
+
|
| 108 |
+
def dnn_make_predictions(inp, model):
|
| 109 |
+
inp = dnn_tokeniser.texts_to_sequences(inp)
|
| 110 |
+
inp = tf.keras.preprocessing.sequence.pad_sequences(inp, maxlen=500)
|
| 111 |
+
res = (model.predict(inp) > 0.5).astype("int32")
|
| 112 |
+
if res:
|
| 113 |
+
return "Negative"
|
| 114 |
+
else:
|
| 115 |
+
return "Positive"
|
| 116 |
+
|
| 117 |
+
st.subheader('Movie Review Classification using DNN')
|
| 118 |
+
inp = st.text_area('Enter message')
|
| 119 |
+
if st.button('Check'):
|
| 120 |
+
pred = dnn_make_predictions([inp], dnn_model)
|
| 121 |
+
st.write(pred)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
elif input_text2 == "LSTM":
|
| 126 |
+
lstm_model=tf.keras.models.load_model("lstm_model.h5")
|
| 127 |
+
|
| 128 |
+
with open("lstm_tokeniser.pkl",'rb') as file:
|
| 129 |
+
lstm_tokeniser = pickle.load(file)
|
| 130 |
+
|
| 131 |
+
def lstm_make_predictions(inp, model):
|
| 132 |
+
inp = lstm_tokeniser.texts_to_sequences(inp)
|
| 133 |
+
inp = tf.keras.preprocessing.sequence.pad_sequences(inp, maxlen=500)
|
| 134 |
+
res = (model.predict(inp) > 0.5).astype("int32")
|
| 135 |
+
if res:
|
| 136 |
+
return "Negative"
|
| 137 |
+
else:
|
| 138 |
+
return "Positive"
|
| 139 |
+
st.subheader('Movie Review Classification using LSTM')
|
| 140 |
+
inp = st.text_area('Enter message')
|
| 141 |
+
if st.button('Check'):
|
| 142 |
+
pred = lstm_make_predictions([inp], lstm_model)
|
| 143 |
+
st.write(pred)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
|
backpropogation_.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sklearn.model_selection import train_test_split
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import tensorflow as tf
|
| 4 |
+
from tensorflow.keras.preprocessing import sequence
|
| 5 |
+
from BackPropogation import BackPropogation
|
| 6 |
+
import pickle
|
| 7 |
+
|
| 8 |
+
dataset = pd.read_csv(r"C:\Users\Ajitha V\OneDrive\Desktop\Neural_network\IMDB Dataset.csv")
|
| 9 |
+
|
| 10 |
+
dataset['sentiment'] = dataset['sentiment'].map( {'negative': 1, 'positive': 0} )
|
| 11 |
+
X = dataset['review'].values
|
| 12 |
+
y = dataset['sentiment'].values
|
| 13 |
+
|
| 14 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=42)
|
| 15 |
+
tokeniser = tf.keras.preprocessing.text.Tokenizer()
|
| 16 |
+
tokeniser.fit_on_texts(X_train)
|
| 17 |
+
X_train = tokeniser.texts_to_sequences(X_train)
|
| 18 |
+
X_test = tokeniser.texts_to_sequences(X_test)
|
| 19 |
+
|
| 20 |
+
max_review_length = 500
|
| 21 |
+
X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
|
| 22 |
+
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)
|
| 23 |
+
|
| 24 |
+
backprop = BackPropogation(learning_rate=0.01, epochs=5, activation_function='sigmoid')
|
| 25 |
+
backprop.fit(X_train, y_train)
|
| 26 |
+
pred = backprop.predict(X_test)
|
| 27 |
+
|
| 28 |
+
with open("bp_model.pkl",'wb') as file:
|
| 29 |
+
pickle.dump(backprop, file)
|
| 30 |
+
with open("bp_tokeniser.pkl",'wb') as file:
|
| 31 |
+
pickle.dump(tokeniser, file)
|
bp_model.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2898ac4c9ef15f477f4bd8ac49b1ae1357b92e6d8867b14c0b05ec7a4ea45149
|
| 3 |
+
size 4300
|
bp_tokeniser.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e5b5110d992f43b2be8ae7213e998f5ed8364e0ea50160bf27a07f8eda3071b5
|
| 3 |
+
size 4992453
|
cnn_tumor.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import matplotlib.pyplot as plt
|
| 3 |
+
import cv2
|
| 4 |
+
import os
|
| 5 |
+
import tensorflow as tf
|
| 6 |
+
from PIL import Image
|
| 7 |
+
from sklearn.model_selection import train_test_split
|
| 8 |
+
from tqdm import tqdm
|
| 9 |
+
import pickle
|
| 10 |
+
|
| 11 |
+
image_dir = r"C:\Users\Ajitha V\OneDrive\Desktop\Neural_network\tumordata"
|
| 12 |
+
no_tumor_images = os.listdir(image_dir + '/no')
|
| 13 |
+
yes_tumor_images = os.listdir(image_dir + '/yes')
|
| 14 |
+
|
| 15 |
+
dataset = []
|
| 16 |
+
label = []
|
| 17 |
+
img_siz = (128, 128)
|
| 18 |
+
|
| 19 |
+
for i, image_name in tqdm(enumerate(no_tumor_images), desc="No Tumor"):
|
| 20 |
+
if image_name.split('.')[1] == 'jpg':
|
| 21 |
+
image = cv2.imread(image_dir + '/no/' + image_name)
|
| 22 |
+
image = Image.fromarray(image, 'RGB')
|
| 23 |
+
image = image.resize(img_siz)
|
| 24 |
+
dataset.append(np.array(image))
|
| 25 |
+
label.append(0)
|
| 26 |
+
|
| 27 |
+
for i, image_name in tqdm(enumerate(yes_tumor_images), desc="Tumor"):
|
| 28 |
+
if image_name.split('.')[1] == 'jpg':
|
| 29 |
+
image = cv2.imread(image_dir + '/yes/' + image_name)
|
| 30 |
+
image = Image.fromarray(image, 'RGB')
|
| 31 |
+
image = image.resize(img_siz)
|
| 32 |
+
dataset.append(np.array(image))
|
| 33 |
+
label.append(1)
|
| 34 |
+
|
| 35 |
+
dataset = np.array(dataset)
|
| 36 |
+
label = np.array(label)
|
| 37 |
+
|
| 38 |
+
x_train, x_test, y_train, y_test = train_test_split(dataset, label, test_size=0.2, random_state=42)
|
| 39 |
+
|
| 40 |
+
x_train = tf.keras.utils.normalize(x_train, axis=1)
|
| 41 |
+
x_test = tf.keras.utils.normalize(x_test, axis=1)
|
| 42 |
+
|
| 43 |
+
model = tf.keras.models.Sequential([
|
| 44 |
+
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(128, 128, 3)),
|
| 45 |
+
tf.keras.layers.MaxPooling2D((2, 2)),
|
| 46 |
+
tf.keras.layers.Flatten(),
|
| 47 |
+
tf.keras.layers.Dense(256, activation='relu'),
|
| 48 |
+
tf.keras.layers.Dropout(.5),
|
| 49 |
+
tf.keras.layers.Dense(512, activation='relu'),
|
| 50 |
+
tf.keras.layers.Dense(1, activation='sigmoid')
|
| 51 |
+
])
|
| 52 |
+
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
|
| 53 |
+
history = model.fit(x_train, y_train, epochs=5, batch_size=128, validation_split=0.1)
|
| 54 |
+
|
| 55 |
+
model.save("tumor_detection_model.h5")
|
| 56 |
+
|
| 57 |
+
with open("tumor_detection_model.pkl", "wb") as model_file:
|
| 58 |
+
pickle.dump(model, model_file)
|
| 59 |
+
|
| 60 |
+
|
dnn_main.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
from sklearn.preprocessing import LabelEncoder
|
| 3 |
+
from sklearn.model_selection import train_test_split
|
| 4 |
+
from numpy import argmax
|
| 5 |
+
import tensorflow as tf
|
| 6 |
+
from tensorflow.keras import Sequential
|
| 7 |
+
from tensorflow.keras.layers import Dense
|
| 8 |
+
from tensorflow.keras.optimizers import Adam
|
| 9 |
+
from tensorflow.keras.preprocessing import sequence
|
| 10 |
+
import pickle
|
| 11 |
+
|
| 12 |
+
dataset = pd.read_csv(r"C:\Users\Ajitha V\OneDrive\Desktop\Neural_network\IMDB Dataset.csv")
|
| 13 |
+
|
| 14 |
+
dataset['sentiment'] = dataset['sentiment'].map( {'negative': 1, 'positive': 0} )
|
| 15 |
+
X = dataset['review'].values
|
| 16 |
+
y = dataset['sentiment'].values
|
| 17 |
+
|
| 18 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,random_state=42)
|
| 19 |
+
tokeniser = tf.keras.preprocessing.text.Tokenizer()
|
| 20 |
+
tokeniser.fit_on_texts(X_train)
|
| 21 |
+
X_train = tokeniser.texts_to_sequences(X_train)
|
| 22 |
+
X_test = tokeniser.texts_to_sequences(X_test)
|
| 23 |
+
|
| 24 |
+
vocab_size = len(tokeniser.word_index)+1
|
| 25 |
+
|
| 26 |
+
max_review_length = 500
|
| 27 |
+
X_train = sequence.pad_sequences(X_train, maxlen=max_review_length, padding = 'post')
|
| 28 |
+
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length, padding = 'post')
|
| 29 |
+
|
| 30 |
+
n_features = X_train.shape[1]
|
| 31 |
+
|
| 32 |
+
#Modelling a sample DNN
|
| 33 |
+
model = Sequential()
|
| 34 |
+
model.add(Dense(64, activation='relu',input_shape=(500,)))
|
| 35 |
+
model.add(Dense(32, activation='relu'))
|
| 36 |
+
model.add(Dense(16, activation='relu'))
|
| 37 |
+
model.add(Dense(1,activation='sigmoid'))
|
| 38 |
+
|
| 39 |
+
opt=Adam(learning_rate=0.01)
|
| 40 |
+
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
|
| 41 |
+
|
| 42 |
+
history=model.fit(X_train, y_train, epochs=50, batch_size=16)
|
| 43 |
+
loss, acc = model.evaluate(X_test, y_test)
|
| 44 |
+
|
| 45 |
+
model.save("dnn_model.h5")
|
| 46 |
+
|
| 47 |
+
with open("dnn_tokeniser.pkl",'wb') as file:
|
| 48 |
+
pickle.dump(tokeniser, file)
|
dnn_model.h5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:351a02fd6686d9bd4632e3639e90cfac8bac2e98ddd8c27347324c38b87bc26e
|
| 3 |
+
size 457224
|
dnn_tokeniser.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:996f49d4268b6ec55c32471bf1926ef3d870f9e31a21d4bebbd53249d0d00fc3
|
| 3 |
+
size 4534143
|
lstm_model.h5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d1eefcee0f50c3dcd660032b8fc50a1fad2ae56656cebd2867ea38517bf62662
|
| 3 |
+
size 41224696
|
lstm_tokeniser.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:919b41e8e2abcc668ea83f0c7454d0fe078a2c8a83bdbb23fb5c7c9daf703626
|
| 3 |
+
size 4534143
|
perceptron _.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sklearn.model_selection import train_test_split
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import tensorflow as tf
|
| 4 |
+
from tensorflow.keras.preprocessing import sequence
|
| 5 |
+
from Perceptron import Perceptron
|
| 6 |
+
import pickle
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
from sklearn.metrics import accuracy_score
|
| 10 |
+
from sklearn.metrics import classification_report
|
| 11 |
+
|
| 12 |
+
dataset = pd.read_csv(r"C:\Users\Ajitha V\OneDrive\Desktop\Neural_network\IMDB Dataset.csv")
|
| 13 |
+
|
| 14 |
+
dataset['sentiment'] = dataset['sentiment'].map( {'negative': 1, 'positive': 0} )
|
| 15 |
+
X = dataset['review'].values
|
| 16 |
+
y = dataset['sentiment'].values
|
| 17 |
+
|
| 18 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 19 |
+
tokeniser = tf.keras.preprocessing.text.Tokenizer()
|
| 20 |
+
tokeniser.fit_on_texts(X_train)
|
| 21 |
+
X_train = tokeniser.texts_to_sequences(X_train)
|
| 22 |
+
X_test = tokeniser.texts_to_sequences(X_test)
|
| 23 |
+
|
| 24 |
+
max_review_length = 500
|
| 25 |
+
X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
|
| 26 |
+
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)
|
| 27 |
+
|
| 28 |
+
perceptron = Perceptron(epochs=10,activation_function='sigmoid')
|
| 29 |
+
|
| 30 |
+
perceptron.fit(X_train, y_train)
|
| 31 |
+
pred = perceptron.predict(X_test)
|
| 32 |
+
|
| 33 |
+
print(f"Accuracy : {accuracy_score(pred, y_test)}")
|
| 34 |
+
report = classification_report(pred, y_test, digits=2)
|
| 35 |
+
|
| 36 |
+
print(report)
|
| 37 |
+
|
| 38 |
+
with open("ppn_model.pkl",'wb') as file:
|
| 39 |
+
pickle.dump(perceptron, file)
|
| 40 |
+
with open("ppn_tokeniser.pkl",'wb') as file:
|
| 41 |
+
pickle.dump(tokeniser, file)
|
ppn_model.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6b2f35d300e2e1e62ba318a5c87325ceaec298f9acd7d8e1d367e663ba049715
|
| 3 |
+
size 2267
|
ppn_tokeniser.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:37aa7ac9ab0c53abd1a1062e78cf9c480fcea66189ed01804e30bf4123a93626
|
| 3 |
+
size 4848716
|
rnn_model.h5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:232b1f0ce22ba5b28f4e58eb24cb5b25392a07f2d2f78982fd9d41b8055edc32
|
| 3 |
+
size 2269016
|
simplelstm.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import tensorflow as tf
|
| 2 |
+
import pandas as pd
|
| 3 |
+
from tensorflow.keras.models import Sequential
|
| 4 |
+
from tensorflow.keras.layers import Dense
|
| 5 |
+
from tensorflow.keras.layers import LSTM
|
| 6 |
+
from tensorflow.keras.layers import Embedding
|
| 7 |
+
from tensorflow.keras.preprocessing import sequence
|
| 8 |
+
from sklearn.model_selection import train_test_split
|
| 9 |
+
|
| 10 |
+
import pickle
|
| 11 |
+
|
| 12 |
+
dataset = pd.read_csv(r"C:\Users\Ajitha V\OneDrive\Desktop\Neural_network\IMDB Dataset.csv")
|
| 13 |
+
|
| 14 |
+
dataset['sentiment'] = dataset['sentiment'].map( {'negative': 1, 'positive': 0} )
|
| 15 |
+
X = dataset['review'].values
|
| 16 |
+
y = dataset['sentiment'].values
|
| 17 |
+
|
| 18 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
|
| 19 |
+
tokeniser = tf.keras.preprocessing.text.Tokenizer()
|
| 20 |
+
tokeniser.fit_on_texts(X_train)
|
| 21 |
+
X_train = tokeniser.texts_to_sequences(X_train)
|
| 22 |
+
X_test = tokeniser.texts_to_sequences(X_test)
|
| 23 |
+
print(X_train[0:2])
|
| 24 |
+
|
| 25 |
+
vocab_size = len(tokeniser.word_index)+1
|
| 26 |
+
|
| 27 |
+
max_review_length = 500
|
| 28 |
+
X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
|
| 29 |
+
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)
|
| 30 |
+
|
| 31 |
+
embedding_vector_length = 32
|
| 32 |
+
model = Sequential()
|
| 33 |
+
model.add(Embedding(vocab_size, embedding_vector_length, input_length=max_review_length))
|
| 34 |
+
model.add(LSTM(100))
|
| 35 |
+
model.add(Dense(1, activation='sigmoid'))
|
| 36 |
+
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
|
| 37 |
+
model.fit(X_train, y_train, epochs=3, batch_size=64)
|
| 38 |
+
|
| 39 |
+
scores = model.evaluate(X_test, y_test, verbose=0)
|
| 40 |
+
print("Accuracy: %.2f%%" % (scores[1]*100))
|
| 41 |
+
|
| 42 |
+
model.save("lstm_model.h5")
|
| 43 |
+
with open("lstm_tokeniser.pkl",'wb') as file:
|
| 44 |
+
pickle.dump(tokeniser, file)
|
| 45 |
+
|
| 46 |
+
|
smsspam.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
from sklearn.model_selection import train_test_split
|
| 3 |
+
import tensorflow as tf
|
| 4 |
+
import pickle
|
| 5 |
+
|
| 6 |
+
dataset = pd.read_csv(r"C:\Users\Ajitha V\OneDrive\Desktop\Neural_network\SMSSpamCollection.txt",sep='\t',names=['label','message'])
|
| 7 |
+
|
| 8 |
+
dataset['label'] = dataset['label'].map( {'spam': 1, 'ham': 0} )
|
| 9 |
+
X = dataset['message'].values
|
| 10 |
+
y = dataset['label'].values
|
| 11 |
+
|
| 12 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
|
| 13 |
+
tokeniser = tf.keras.preprocessing.text.Tokenizer()
|
| 14 |
+
tokeniser.fit_on_texts(X_train)
|
| 15 |
+
encoded_train = tokeniser.texts_to_sequences(X_train)
|
| 16 |
+
encoded_test = tokeniser.texts_to_sequences(X_test)
|
| 17 |
+
|
| 18 |
+
max_length = 10
|
| 19 |
+
padded_train = tf.keras.preprocessing.sequence.pad_sequences(encoded_train, maxlen=max_length, padding='post')
|
| 20 |
+
padded_test = tf.keras.preprocessing.sequence.pad_sequences(encoded_test, maxlen=max_length, padding='post')
|
| 21 |
+
|
| 22 |
+
vocab_size = len(tokeniser.word_index)+1
|
| 23 |
+
|
| 24 |
+
# define the model
|
| 25 |
+
|
| 26 |
+
model=tf.keras.models.Sequential([
|
| 27 |
+
tf.keras.layers.Embedding(input_dim=vocab_size,output_dim= 24, input_length=max_length),
|
| 28 |
+
tf.keras.layers.SimpleRNN(24, return_sequences=False),
|
| 29 |
+
tf.keras.layers.Dense(64, activation='relu'),
|
| 30 |
+
tf.keras.layers.Dense(32, activation='relu'),
|
| 31 |
+
tf.keras.layers.Dense(1, activation='sigmoid')
|
| 32 |
+
])
|
| 33 |
+
|
| 34 |
+
# compile the model
|
| 35 |
+
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
|
| 36 |
+
|
| 37 |
+
# summarize the model
|
| 38 |
+
|
| 39 |
+
early_stop = tf.keras.callbacks.EarlyStopping(monitor='accuracy', mode='min', patience=10)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# fit the model
|
| 43 |
+
model.fit(x=padded_train,
|
| 44 |
+
y=y_train,
|
| 45 |
+
epochs=50,
|
| 46 |
+
validation_data=(padded_test, y_test),
|
| 47 |
+
callbacks=[early_stop]
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
preds = (model.predict(padded_test) > 0.5).astype("int32")
|
| 51 |
+
|
| 52 |
+
model.save("rnn_model.h5")
|
| 53 |
+
|
| 54 |
+
with open("spam_tokeniser.pkl", 'wb') as model_file:
|
| 55 |
+
pickle.dump(tokeniser, model_file)
|
| 56 |
+
|
| 57 |
+
|
spam_tokeniser.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ac105ca73f47803b91def2ec4924dc4c217de63950b4fe170ef4758788efce01
|
| 3 |
+
size 290462
|
tumor_detection_model.h5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5bf07748badd8583c2f9a77f3b20d2a0d36c5e9e6440eb398de4e1e1975b6304
|
| 3 |
+
size 391811360
|
tumor_detection_model.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b511ff209422d5b75731fe30d78a6f5b8b4e32e7a04c6c36bfae89a1ee7e65b7
|
| 3 |
+
size 391803384
|