Create utils/helper.py
Browse files- utils/helper.py +132 -0
utils/helper.py
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import numpy as np
|
3 |
+
import pandas as pd
|
4 |
+
import yfinance as yf
|
5 |
+
import datetime as dt
|
6 |
+
import plotly.graph_objects as go
|
7 |
+
import tensorflow as tf
|
8 |
+
from tensorflow.keras.models import Sequential
|
9 |
+
from tensorflow.keras.layers import SimpleRNN, LSTM, GRU, Dense, Dropout
|
10 |
+
from tensorflow.keras.optimizers import SGD, Adam
|
11 |
+
from sklearn.preprocessing import MinMaxScaler
|
12 |
+
|
13 |
+
# Defining model creation functions
|
14 |
+
def create_rnn_model(input_shape: tuple) -> tf.keras.Model:
|
15 |
+
"""
|
16 |
+
Constructs a Recurrent Neural Network (RNN) model with multiple SimpleRNN layers and dropout regularization,
|
17 |
+
followed by a Dense output layer. The model is compiled with the SGD optimizer.
|
18 |
+
Args:
|
19 |
+
input_shape (tuple): A tuple representing the input shape of the training data, excluding the batch size.
|
20 |
+
For example, (timesteps, features).
|
21 |
+
Returns:
|
22 |
+
tf.keras.Model: The constructed and compiled TensorFlow model.
|
23 |
+
"""
|
24 |
+
# Initializing the RNN model
|
25 |
+
regressor = Sequential()
|
26 |
+
|
27 |
+
# Adding the first RNN layer with dropout regularization
|
28 |
+
regressor.add(SimpleRNN(units=50, activation="tanh", return_sequences=True, input_shape=input_shape))
|
29 |
+
regressor.add(Dropout(0.2))
|
30 |
+
|
31 |
+
# Adding more RNN layers
|
32 |
+
regressor.add(SimpleRNN(units=50, activation="tanh", return_sequences=True))
|
33 |
+
regressor.add(SimpleRNN(units=50, activation="tanh", return_sequences=True))
|
34 |
+
regressor.add(SimpleRNN(units=50, activation="tanh")) # Last RNN layer does not return sequences
|
35 |
+
|
36 |
+
# Adding the output layer with a single unit and sigmoid activation for binary outcomes
|
37 |
+
regressor.add(Dense(units=1, activation='sigmoid'))
|
38 |
+
|
39 |
+
# Compiling the RNN with the SGD optimizer
|
40 |
+
regressor.compile(optimizer=SGD(learning_rate=0.01, momentum=0.9, nesterov=True),
|
41 |
+
loss="mean_squared_error")
|
42 |
+
|
43 |
+
return regressor
|
44 |
+
|
45 |
+
|
46 |
+
def create_lstm_model(input_shape: tuple) -> tf.keras.Model:
|
47 |
+
"""
|
48 |
+
Constructs a Long Short-Term Memory (LSTM) model with LSTM layers and a Dense layer,
|
49 |
+
followed by a Dense output layer. The model is compiled with the Adam optimizer.
|
50 |
+
Args:
|
51 |
+
input_shape (tuple): A tuple representing the input shape of the training data, excluding the batch size.
|
52 |
+
For example, (timesteps, features).
|
53 |
+
Returns:
|
54 |
+
tf.keras.Model: The constructed and compiled TensorFlow model.
|
55 |
+
"""
|
56 |
+
# Initializing the LSTM model
|
57 |
+
regressorLSTM = Sequential()
|
58 |
+
|
59 |
+
# Adding LSTM layers
|
60 |
+
regressorLSTM.add(LSTM(50, return_sequences=True, input_shape=input_shape))
|
61 |
+
regressorLSTM.add(LSTM(50, return_sequences=False)) # Last LSTM layer does not return sequences
|
62 |
+
regressorLSTM.add(Dense(25)) # Additional Dense layer with 25 units
|
63 |
+
|
64 |
+
# Adding the output layer with a single unit for output
|
65 |
+
regressorLSTM.add(Dense(1))
|
66 |
+
|
67 |
+
# Compiling the LSTM with the Adam optimizer
|
68 |
+
regressorLSTM.compile(optimizer='adam', loss='mean_squared_error', metrics=["accuracy"])
|
69 |
+
|
70 |
+
return regressorLSTM
|
71 |
+
|
72 |
+
|
73 |
+
def create_gru_model(input_shape: tuple) -> tf.keras.Model:
|
74 |
+
"""
|
75 |
+
Constructs a Gated Recurrent Unit (GRU) model with multiple GRU layers including dropout for regularization,
|
76 |
+
and a Dense output layer. The model is compiled with the SGD optimizer.
|
77 |
+
Args:
|
78 |
+
input_shape (tuple): A tuple representing the input shape of the training data, excluding the batch size.
|
79 |
+
For example, (timesteps, features).
|
80 |
+
Returns:
|
81 |
+
tf.keras.Model: The constructed and compiled TensorFlow model.
|
82 |
+
"""
|
83 |
+
# Initializing the GRU model
|
84 |
+
regressorGRU = Sequential()
|
85 |
+
|
86 |
+
# Adding GRU layers with dropout regularization
|
87 |
+
regressorGRU.add(GRU(units=50, return_sequences=True, input_shape=input_shape, activation='tanh'))
|
88 |
+
regressorGRU.add(Dropout(0.2))
|
89 |
+
|
90 |
+
regressorGRU.add(GRU(units=50, return_sequences=True, activation='tanh'))
|
91 |
+
regressorGRU.add(GRU(units=50, return_sequences=True, activation='tanh'))
|
92 |
+
regressorGRU.add(GRU(units=50, activation='tanh')) # Last GRU layer does not return sequences
|
93 |
+
|
94 |
+
# Adding the output layer with a relu activation
|
95 |
+
regressorGRU.add(Dense(units=1, activation='relu'))
|
96 |
+
|
97 |
+
# Compiling the GRU with the SGD optimizer
|
98 |
+
regressorGRU.compile(optimizer=SGD(learning_rate=0.01, momentum=0.9, nesterov=False),
|
99 |
+
loss='mean_squared_error')
|
100 |
+
|
101 |
+
return regressorGRU
|
102 |
+
|
103 |
+
|
104 |
+
def download_data(stock, start_date, end_date):
|
105 |
+
data = yf.download(stock, start=start_date, end=end_date)
|
106 |
+
return data
|
107 |
+
|
108 |
+
|
109 |
+
def prepare_data(data):
|
110 |
+
scaler = MinMaxScaler(feature_range=(0, 1))
|
111 |
+
data_scaled = scaler.fit_transform(data.reshape(-1, 1))
|
112 |
+
return data_scaled, scaler
|
113 |
+
|
114 |
+
|
115 |
+
def create_datasets(data_scaled, look_back=50):
|
116 |
+
X, y = [], []
|
117 |
+
for i in range(look_back, len(data_scaled)):
|
118 |
+
X.append(data_scaled[i-look_back:i, 0])
|
119 |
+
y.append(data_scaled[i, 0])
|
120 |
+
X, y = np.array(X), np.array(y)
|
121 |
+
X = np.reshape(X, (X.shape[0], X.shape[1], 1))
|
122 |
+
return X, y
|
123 |
+
|
124 |
+
|
125 |
+
def plot_predictions(train_data, test_data, y_train_pred, y_test_pred, model_name):
|
126 |
+
fig = go.Figure()
|
127 |
+
fig.add_trace(go.Scatter(x=train_data.index, y=train_data.values.flatten(), mode='lines', name='Training Data'))
|
128 |
+
fig.add_trace(go.Scatter(x=test_data.index, y=test_data.values.flatten(), mode='lines', name='Test Data'))
|
129 |
+
fig.add_trace(go.Scatter(x=test_data.index[look_back:], y=y_test_pred, mode='lines', name='Predicted Data'))
|
130 |
+
fig.update_layout(title=f'{model_name} Predictions', xaxis_title='Date', yaxis_title='Stock Price')
|
131 |
+
st.plotly_chart(fig)
|
132 |
+
|