File size: 7,757 Bytes
abbe827
58c28b3
8074031
abbe827
 
 
 
 
 
 
 
 
 
 
58c28b3
 
 
 
 
abbe827
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a26a34d
abbe827
 
 
 
 
 
 
0f7ee99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4c12088
 
 
0f7ee99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
import streamlit as st
from datetime import datetime
import math
import numpy as np
import pandas as pd
import yfinance as yf
import datetime as dt
import plotly.graph_objects as go
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import SimpleRNN, LSTM, GRU, Dense, Dropout
from tensorflow.keras.optimizers import SGD, Adam
from sklearn.preprocessing import MinMaxScaler

# Credit
def current_year():
    now = datetime.now()
    return now.year

# Defining model creation functions
def create_rnn_model(input_shape: tuple) -> tf.keras.Model:
    """
    Constructs a Recurrent Neural Network (RNN) model with multiple SimpleRNN layers and dropout regularization,
    followed by a Dense output layer. The model is compiled with the SGD optimizer.
    Args:
    input_shape (tuple): A tuple representing the input shape of the training data, excluding the batch size.
                         For example, (timesteps, features).
    Returns:
    tf.keras.Model: The constructed and compiled TensorFlow model.
    """
    # Initializing the RNN model
    regressor = Sequential()

    # Adding the first RNN layer with dropout regularization
    regressor.add(SimpleRNN(units=50, activation="tanh", return_sequences=True, input_shape=input_shape))
    regressor.add(Dropout(0.2))

    # Adding more RNN layers
    regressor.add(SimpleRNN(units=50, activation="tanh", return_sequences=True))
    regressor.add(SimpleRNN(units=50, activation="tanh", return_sequences=True))
    regressor.add(SimpleRNN(units=50, activation="tanh"))  # Last RNN layer does not return sequences

    # Adding the output layer with a single unit and sigmoid activation for binary outcomes
    regressor.add(Dense(units=1, activation='sigmoid'))

    # Compiling the RNN with the SGD optimizer
    regressor.compile(optimizer=SGD(learning_rate=0.01, momentum=0.9, nesterov=True),
                      loss="mean_squared_error")

    return regressor


def create_lstm_model(input_shape: tuple) -> tf.keras.Model:
    """
    Constructs a Long Short-Term Memory (LSTM) model with LSTM layers and a Dense layer,
    followed by a Dense output layer. The model is compiled with the Adam optimizer.
    Args:
    input_shape (tuple): A tuple representing the input shape of the training data, excluding the batch size.
                         For example, (timesteps, features).
    Returns:
    tf.keras.Model: The constructed and compiled TensorFlow model.
    """
    # Initializing the LSTM model
    regressorLSTM = Sequential()

    # Adding LSTM layers
    regressorLSTM.add(LSTM(50, return_sequences=True, input_shape=input_shape))
    regressorLSTM.add(LSTM(50, return_sequences=False))  # Last LSTM layer does not return sequences
    regressorLSTM.add(Dense(25))  # Additional Dense layer with 25 units

    # Adding the output layer with a single unit for output
    regressorLSTM.add(Dense(1))

    # Compiling the LSTM with the Adam optimizer
    regressorLSTM.compile(optimizer='adam', loss='mean_squared_error', metrics=["accuracy"])

    return regressorLSTM


def create_gru_model(input_shape: tuple) -> tf.keras.Model:
    """
    Constructs a Gated Recurrent Unit (GRU) model with multiple GRU layers including dropout for regularization,
    and a Dense output layer. The model is compiled with the SGD optimizer.
    Args:
    input_shape (tuple): A tuple representing the input shape of the training data, excluding the batch size.
                         For example, (timesteps, features).
    Returns:
    tf.keras.Model: The constructed and compiled TensorFlow model.
    """
    # Initializing the GRU model
    regressorGRU = Sequential()

    # Adding GRU layers with dropout regularization
    regressorGRU.add(GRU(units=50, return_sequences=True, input_shape=input_shape, activation='tanh'))
    regressorGRU.add(Dropout(0.2))

    regressorGRU.add(GRU(units=50, return_sequences=True, activation='tanh'))
    regressorGRU.add(GRU(units=50, return_sequences=True, activation='tanh'))
    regressorGRU.add(GRU(units=50, activation='tanh'))  # Last GRU layer does not return sequences

    # Adding the output layer with a relu activation
    regressorGRU.add(Dense(units=1, activation='relu'))

    # Compiling the GRU with the SGD optimizer
    regressorGRU.compile(optimizer=SGD(learning_rate=0.01, momentum=0.9, nesterov=False),
                         loss='mean_squared_error')

    return regressorGRU


def download_data(stock, start_date, end_date):
    data = yf.download(stock, start=start_date, end=end_date)
    return data


def prepare_data(data):
    scaler = MinMaxScaler(feature_range=(0, 1))
    data_scaled = scaler.fit_transform(data.reshape(-1, 1))
    return data_scaled, scaler


def create_datasets(data_scaled, look_back=50):
    X, y = [], []
    for i in range(look_back, len(data_scaled)):
        X.append(data_scaled[i-look_back:i, 0])
        y.append(data_scaled[i, 0])
    X, y = np.array(X), np.array(y)
    X = np.reshape(X, (X.shape[0], X.shape[1], 1))
    return X, y


def plot_predictions(train_data, test_data, y_train_pred, y_test_pred, model_name, look_back=50):
    fig = go.Figure()
    fig.add_trace(go.Scatter(x=train_data.index, y=train_data.values.flatten(), mode='lines', name='Training Data'))
    fig.add_trace(go.Scatter(x=test_data.index, y=test_data.values.flatten(), mode='lines', name='Test Data'))
    fig.add_trace(go.Scatter(x=test_data.index[look_back:], y=y_test_pred, mode='lines', name='Predicted Data'))
    fig.update_layout(title=f'{model_name} Predictions', xaxis_title='Date', yaxis_title='Stock Price')
    st.plotly_chart(fig)


import numpy as np
import pandas as pd
import plotly.graph_objects as go
import streamlit as st

def plot_monte_carlo_forecasts(data: pd.DataFrame, n_futures: int, n_samples: int, mean_return: float, std_dev: float):
    """
    Simulates future stock price paths using the Monte Carlo method and visualizes the results using Plotly in Streamlit.

    Args:
    data (pd.DataFrame): Stock data containing at least the 'Close' prices.
    n_futures (int): Number of days in the future to simulate.
    n_samples (int): Number of simulation paths to generate.

    Outputs a Plotly graph in Streamlit displaying the stock's closing price and the simulated paths.
    """
    # Extract closing prices
    closing_prices = data['Close']
    
    # Calculate mean and standard deviation of the daily returns
    # daily_returns = closing_prices.pct_change()
    # mean_return = daily_returns.mean()
    # std_dev = daily_returns.std()
    
    # Last closing price to anchor the simulation
    last_close = closing_prices.iloc[-1]
    
    # Prepare figure
    fig = go.Figure()
    
    # Add historical closing price line
    fig.add_trace(go.Scatter(x=closing_prices.index, y=closing_prices, mode='lines', name='Historical Closing Price'))
    
    # Generating Monte Carlo simulations
    for _ in range(n_samples):
        # Simulate future returns
        future_returns = np.random.normal(mean_return, std_dev, n_futures)
        future_prices = last_close * (np.cumprod(future_returns+1))

        # Add future price simulation to plot
        future_dates = pd.date_range(start=closing_prices.index[-1], periods=n_futures + 1, freq='B')[1:]
        fig.add_trace(go.Scatter(x=future_dates, y=future_prices, mode='lines', opacity=0.5))
    
    # Updating the layout of the plot
    fig.update_layout(
        title=f'Monte Carlo Simulations for Next {n_futures} Days',
        xaxis_title='Date',
        yaxis_title='Simulated Stock Price',
        showlegend=True,
        legend_title="Legend"
    )
    
    # Display the plot in Streamlit
    st.plotly_chart(fig, use_container_width=True)