{ "cells": [ { "cell_type": "code", "execution_count": 85, "metadata": {}, "outputs": [], "source": [ "import pandas as pd \n", "from datetime import datetime \n", "from datetime import date\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import pandas as pd\n", "from keras.models import Sequential\n", "from keras.layers import LSTM, Dense\n", "from sklearn.model_selection import train_test_split\n", "from sklearn.preprocessing import MinMaxScaler,StandardScaler\n", "from keras.callbacks import ModelCheckpoint\n", "\n", "dataPATH = r\"C:\\Users\\levim\\OneDrive\\Documents\\MastersAI_ES\\TeamProject-5ARIP10\\smart-buildings\\Data\"\n", "# all_data = pd.read_csv(dataPATH + r\"\\long_merge.csv\")\n", "all_data = pd.read_csv(dataPATH + r\"\\extended_energy_data.csv\")\n", "interpolated_data = pd.read_csv(dataPATH + r\"\\interpolated_energy_data.csv\", index_col=0)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Prepar energy data set with extended features\n", "feature_list = ['date', 'hvac_N', 'hvac_S', 'air_temp_set_1', 'solar_radiation_set_1']\n", "extended_energy_data = all_data[feature_list]\n", "\n", "extended_energy_data['date'] = pd.to_datetime(extended_energy_data['date'])\n", "extended_energy_data.set_index('date', inplace=True)\n", "\n", "eed_15m = extended_energy_data.resample('15T').mean()\n", "eed_1h = extended_energy_data.resample('60T').mean()\n", "\n", "eed_15m = eed_15m.reset_index(drop=False)\n", "eed_1h = eed_1h.reset_index(drop=False)\n", "\n", "window_size = 4*4 # 4 hours\n", "eed_15m_avg = eed_15m.copy()\n", "eed_15m_avg['hvac_N'] = eed_15m['hvac_N'].rolling(window=window_size).mean()\n", "eed_15m_avg['hvac_S'] = eed_15m['hvac_S'].rolling(window=window_size).mean()\n", "\n", "window_size = 4 # 4 hours\n", "eed_1h_avg = eed_1h.copy()\n", "eed_1h_avg['hvac_N'] = eed_1h['hvac_N'].rolling(window=window_size).mean()\n", "eed_1h_avg['hvac_S'] = eed_1h['hvac_S'].rolling(window=window_size).mean()\n", "\n", "eed_15m.head()" ] }, { "cell_type": "code", "execution_count": 86, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
datehvac_Nhvac_Sday_of_weekair_temp_set_1solar_radiation_set_1
02018-01-02 00:00:0038.22500026.4000114.955087.4450
12018-01-02 01:00:0038.29750121.1750114.21252.8675
22018-01-02 02:00:0038.07250021.7225114.27000.0925
32018-01-02 03:00:0039.14750021.7000114.13750.1175
42018-01-02 04:00:0038.17250021.6250113.98500.0725
\n", "
" ], "text/plain": [ " date hvac_N hvac_S day_of_week air_temp_set_1 \\\n", "0 2018-01-02 00:00:00 38.225000 26.4000 1 14.9550 \n", "1 2018-01-02 01:00:00 38.297501 21.1750 1 14.2125 \n", "2 2018-01-02 02:00:00 38.072500 21.7225 1 14.2700 \n", "3 2018-01-02 03:00:00 39.147500 21.7000 1 14.1375 \n", "4 2018-01-02 04:00:00 38.172500 21.6250 1 13.9850 \n", "\n", " solar_radiation_set_1 \n", "0 87.4450 \n", "1 2.8675 \n", "2 0.0925 \n", "3 0.1175 \n", "4 0.0725 " ] }, "execution_count": 86, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# energy_data = pd.read_csv(dataPATH + r\"\\extended_energy_data.csv\")\n", "# energy_data = eed_15m\n", "# energy_data = eed_15m_avg\n", "energy_data = interpolated_data.copy()\n", "energy_data = energy_data.reset_index()\n", "\n", "# Convert the date column to datetime\n", "energy_data['date'] = pd.to_datetime(energy_data['date'], format = \"%Y-%m-%d %H:%M:%S\")\n", "\n", "energy_data.insert(3, 'day_of_week', energy_data['date'].dt.weekday)\n", "# Filter the data for the year 2019\n", "df_filtered = energy_data[ (energy_data.date.dt.date >date(2018, 1, 1)) & (energy_data.date.dt.date< date(2021, 1, 1))]\n", "\n", "# Check for NA values in the DataFrame\n", "if df_filtered.isna().any().any():\n", " print(\"There are NA values in the DataFrame columns.\")\n", "\n", "df_filtered.head()" ] }, { "cell_type": "code", "execution_count": 88, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[]" ] }, "execution_count": 88, "metadata": {}, "output_type": "execute_result" } ], "source": [ "testdataset_df = df_filtered[(df_filtered.date.dt.date >=date(2019, 3, 1)) & (df_filtered.date.dt.date <= date(2019, 6, 1))]\n", "\n", "traindataset_df = df_filtered[ (df_filtered.date.dt.date date(2019, 6, 1))]\n", "\n", "testdataset = testdataset_df.drop(columns=[\"date\"]).values\n", "\n", "traindataset = traindataset_df.drop(columns=[\"date\"]).values\n", "\n", "columns_with_na = traindataset_df.columns[traindataset_df.isna().any()].tolist()\n", "columns_with_na" ] }, { "cell_type": "code", "execution_count": 89, "metadata": {}, "outputs": [], "source": [ "traindataset = traindataset.astype('float32')\n", "testdataset = testdataset.astype('float32')\n", "\n", "mintest = np.min(testdataset[:,0:2])\n", "maxtest = np.max(testdataset[:,0:2])\n", "\n", "scaler = MinMaxScaler(feature_range=(0, 1))\n", "traindataset = scaler.fit_transform(traindataset)\n", "testdataset = scaler.transform(testdataset)" ] }, { "cell_type": "code", "execution_count": 104, "metadata": {}, "outputs": [], "source": [ "def create_model(X_train, time_step, no_outputs):\n", " model = Sequential()\n", " model.add(LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1], X_train.shape[2])))\n", " model.add(LSTM(units=50, return_sequences=True))\n", " model.add(LSTM(units=time_step*no_outputs))\n", " model.add(Dense(units=time_step*no_outputs))\n", "\n", " model.compile(optimizer='adam', loss='mean_squared_error')\n", "\n", " return model" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Model 1 (continous predictions)" ] }, { "cell_type": "code", "execution_count": 94, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/5\n", "370/371 [============================>.] - ETA: 0s - loss: 0.0224\n", "Epoch 1: val_loss improved from inf to 0.01162, saving model to lstm_energy_01.keras\n", "371/371 [==============================] - 11s 15ms/step - loss: 0.0224 - val_loss: 0.0116\n", "Epoch 2/5\n", "368/371 [============================>.] - ETA: 0s - loss: 0.0139\n", "Epoch 2: val_loss improved from 0.01162 to 0.01146, saving model to lstm_energy_01.keras\n", "371/371 [==============================] - 5s 12ms/step - loss: 0.0139 - val_loss: 0.0115\n", "Epoch 3/5\n", "370/371 [============================>.] - ETA: 0s - loss: 0.0125\n", "Epoch 3: val_loss improved from 0.01146 to 0.01132, saving model to lstm_energy_01.keras\n", "371/371 [==============================] - 5s 13ms/step - loss: 0.0125 - val_loss: 0.0113\n", "Epoch 4/5\n", "367/371 [============================>.] - ETA: 0s - loss: 0.0119\n", "Epoch 4: val_loss improved from 0.01132 to 0.01007, saving model to lstm_energy_01.keras\n", "371/371 [==============================] - 5s 13ms/step - loss: 0.0119 - val_loss: 0.0101\n", "Epoch 5/5\n", "371/371 [==============================] - ETA: 0s - loss: 0.0117\n", "Epoch 5: val_loss did not improve from 0.01007\n", "371/371 [==============================] - 5s 13ms/step - loss: 0.0117 - val_loss: 0.0101\n" ] }, { "data": { "text/plain": [ "" ] }, "execution_count": 94, "metadata": {}, "output_type": "execute_result" } ], "source": [ "train,test = traindataset,testdataset\n", "steps_in_past = 7 \n", "time_step = 24\n", "no_inputs = 5\n", "no_outputs = 2\n", "def create_dataset(dataset,time_step):\n", " x = [[] for _ in range(no_inputs)] \n", " Y = [[] for _ in range(no_outputs)]\n", " for i in range(time_step * steps_in_past, len(dataset) - time_step * steps_in_past): # -time_step is to ensure that the Y value has enough values\n", " for j in range(no_inputs):\n", " x[j].append(dataset[(i-time_step*steps_in_past):i, j])\n", " for j in range(no_outputs):\n", " Y[j].append(dataset[i:i+time_step, j]) \n", " x = [np.array(feature_list) for feature_list in x]\n", " x = np.stack(x,axis=1)\n", " Y = [np.array(feature_list) for feature_list in Y] \n", " Y = np.stack(Y,axis=1)\n", " Y = np.reshape(Y, (Y.shape[0], time_step*no_outputs))\n", " return x, Y\n", "\n", "\n", "X_train, y_train = create_dataset(train, time_step)\n", "X_test, y_test = create_dataset(test, time_step)\n", "\n", "model = create_model(X_train, time_step, no_outputs)\n", "checkpoint_path = \"lstm_energy_01.keras\"\n", "checkpoint_callback = ModelCheckpoint(filepath=checkpoint_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n", "model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=5, batch_size=64, verbose=1, callbacks=[checkpoint_callback])" ] }, { "cell_type": "code", "execution_count": 95, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "60/60 [==============================] - 0s 4ms/step - loss: 0.0101\n", "60/60 [==============================] - 1s 3ms/step\n", "Loss: 0.010141444392502308\n" ] } ], "source": [ "loss = model.evaluate(X_test, y_test)\n", "test_predict1 = model.predict(X_test)\n", "print(\"Loss: \", loss)\n", "# Converting values back to the original scale\n", "scalerBack = MinMaxScaler(feature_range=(mintest, maxtest))\n", "test_predict2 = scalerBack.fit_transform(test_predict1)\n", "y_test1 = scalerBack.fit_transform(y_test)\n" ] }, { "cell_type": "code", "execution_count": 100, "metadata": {}, "outputs": [], "source": [ "%matplotlib qt\n", "\n", "# Create a 3x3 grid of subplots\n", "fig, axes = plt.subplots(3, 3, figsize=(10, 10))\n", "\n", "var = 15\n", "# Loop over the value index\n", "for i, ax in enumerate(axes.flat):\n", " # Plot your data or perform any other operations\n", " ax.plot(y_test1[var+i*9,0:time_step], label='Original Testing Data', color='blue')\n", " ax.plot(test_predict2[var+i*9,0:time_step], label='Predicted Testing Data', color='red',alpha=0.8)\n", " # ax.set_title(f'Plot {i+1}')\n", " ax.set_title('Testing Data - Predicted vs Actual')\n", " ax.set_xlabel('Time [hours]')\n", " ax.set_ylabel('Energy Consumption [kW]') \n", " ax.legend()\n", "\n", "# Adjust the spacing between subplots\n", "plt.tight_layout()\n", "\n", "# Show the plot\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Autoregressive prediction\n", "X_pred = testdataset.copy()\n", "for i in range(steps_in_past,steps_in_past*2):\n", " xin = X_pred[i-steps_in_past:i].reshape((1, steps_in_past, no_outputs)) \n", " X_pred[i] = model.predict(xin, verbose = 0)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Plot prediction vs actual for test data\n", "plt.figure()\n", "plt.plot(X_pred[steps_in_past:steps_in_past*2,0],':',label='LSTM')\n", "plt.plot(testdataset[steps_in_past:steps_in_past*2,0],'--',label='Actual')\n", "plt.legend()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Model 2 (Predicting once per day)" ] }, { "cell_type": "code", "execution_count": 105, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/20\n", "13/16 [=======================>......] - ETA: 0s - loss: 0.0893\n", "Epoch 1: val_loss improved from inf to 0.02898, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 6s 100ms/step - loss: 0.0820 - val_loss: 0.0290\n", "Epoch 2/20\n", "13/16 [=======================>......] - ETA: 0s - loss: 0.0316\n", "Epoch 2: val_loss improved from 0.02898 to 0.02435, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 20ms/step - loss: 0.0310 - val_loss: 0.0243\n", "Epoch 3/20\n", "16/16 [==============================] - ETA: 0s - loss: 0.0242\n", "Epoch 3: val_loss improved from 0.02435 to 0.01740, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 24ms/step - loss: 0.0242 - val_loss: 0.0174\n", "Epoch 4/20\n", "16/16 [==============================] - ETA: 0s - loss: 0.0213\n", "Epoch 4: val_loss improved from 0.01740 to 0.01566, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 25ms/step - loss: 0.0213 - val_loss: 0.0157\n", "Epoch 5/20\n", "16/16 [==============================] - ETA: 0s - loss: 0.0189\n", "Epoch 5: val_loss improved from 0.01566 to 0.01483, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 25ms/step - loss: 0.0189 - val_loss: 0.0148\n", "Epoch 6/20\n", "13/16 [=======================>......] - ETA: 0s - loss: 0.0184\n", "Epoch 6: val_loss improved from 0.01483 to 0.01359, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 25ms/step - loss: 0.0182 - val_loss: 0.0136\n", "Epoch 7/20\n", "14/16 [=========================>....] - ETA: 0s - loss: 0.0177\n", "Epoch 7: val_loss improved from 0.01359 to 0.01285, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 22ms/step - loss: 0.0175 - val_loss: 0.0128\n", "Epoch 8/20\n", "13/16 [=======================>......] - ETA: 0s - loss: 0.0168\n", "Epoch 8: val_loss did not improve from 0.01285\n", "16/16 [==============================] - 0s 20ms/step - loss: 0.0171 - val_loss: 0.0148\n", "Epoch 9/20\n", "14/16 [=========================>....] - ETA: 0s - loss: 0.0178\n", "Epoch 9: val_loss did not improve from 0.01285\n", "16/16 [==============================] - 0s 20ms/step - loss: 0.0175 - val_loss: 0.0143\n", "Epoch 10/20\n", "15/16 [===========================>..] - ETA: 0s - loss: 0.0165\n", "Epoch 10: val_loss improved from 0.01285 to 0.01277, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 22ms/step - loss: 0.0166 - val_loss: 0.0128\n", "Epoch 11/20\n", "16/16 [==============================] - ETA: 0s - loss: 0.0164\n", "Epoch 11: val_loss did not improve from 0.01277\n", "16/16 [==============================] - 0s 23ms/step - loss: 0.0164 - val_loss: 0.0139\n", "Epoch 12/20\n", "15/16 [===========================>..] - ETA: 0s - loss: 0.0162\n", "Epoch 12: val_loss improved from 0.01277 to 0.01235, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 1s 33ms/step - loss: 0.0162 - val_loss: 0.0124\n", "Epoch 13/20\n", "15/16 [===========================>..] - ETA: 0s - loss: 0.0154\n", "Epoch 13: val_loss did not improve from 0.01235\n", "16/16 [==============================] - 0s 20ms/step - loss: 0.0153 - val_loss: 0.0131\n", "Epoch 14/20\n", "13/16 [=======================>......] - ETA: 0s - loss: 0.0156\n", "Epoch 14: val_loss did not improve from 0.01235\n", "16/16 [==============================] - 0s 21ms/step - loss: 0.0160 - val_loss: 0.0136\n", "Epoch 15/20\n", "13/16 [=======================>......] - ETA: 0s - loss: 0.0167\n", "Epoch 15: val_loss did not improve from 0.01235\n", "16/16 [==============================] - 0s 20ms/step - loss: 0.0164 - val_loss: 0.0125\n", "Epoch 16/20\n", "16/16 [==============================] - ETA: 0s - loss: 0.0149\n", "Epoch 16: val_loss improved from 0.01235 to 0.01134, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 25ms/step - loss: 0.0149 - val_loss: 0.0113\n", "Epoch 17/20\n", "16/16 [==============================] - ETA: 0s - loss: 0.0147\n", "Epoch 17: val_loss did not improve from 0.01134\n", "16/16 [==============================] - 0s 21ms/step - loss: 0.0147 - val_loss: 0.0125\n", "Epoch 18/20\n", "15/16 [===========================>..] - ETA: 0s - loss: 0.0143\n", "Epoch 18: val_loss did not improve from 0.01134\n", "16/16 [==============================] - 0s 23ms/step - loss: 0.0143 - val_loss: 0.0116\n", "Epoch 19/20\n", "15/16 [===========================>..] - ETA: 0s - loss: 0.0138\n", "Epoch 19: val_loss improved from 0.01134 to 0.01108, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 23ms/step - loss: 0.0138 - val_loss: 0.0111\n", "Epoch 20/20\n", "16/16 [==============================] - ETA: 0s - loss: 0.0137\n", "Epoch 20: val_loss improved from 0.01108 to 0.01093, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 25ms/step - loss: 0.0137 - val_loss: 0.0109\n" ] }, { "data": { "text/plain": [ "" ] }, "execution_count": 105, "metadata": {}, "output_type": "execute_result" } ], "source": [ "train,test = traindataset,testdataset\n", "steps_in_past = 7 \n", "time_step = 24\n", "no_inputs = 5\n", "no_outputs = 2\n", "def create_dataset(dataset,time_step):\n", " x = [[] for _ in range(no_inputs)] \n", " Y = [[] for _ in range(no_outputs)]\n", " for i in range(steps_in_past, round(len(dataset)/24) - steps_in_past): # -time_step is to ensure that the Y value has enough values\n", " for j in range(no_inputs):\n", " x[j].append(dataset[(i-steps_in_past)*time_step:i*time_step, j])\n", " for j in range(no_outputs):\n", " Y[j].append(dataset[i*time_step:(i+1)*time_step, j]) \n", " x = [np.array(feature_list) for feature_list in x]\n", " x = np.stack(x,axis=1)\n", " Y = [np.array(feature_list) for feature_list in Y] \n", " Y = np.stack(Y,axis=1)\n", " Y = np.reshape(Y, (Y.shape[0], time_step*no_outputs))\n", " return x, Y\n", "\n", "\n", "X_train, y_train = create_dataset(train, time_step)\n", "X_test, y_test = create_dataset(test, time_step)\n", "\n", "model2 = create_model(X_train, time_step, no_outputs)\n", "checkpoint_path = \"lstm_energy_01.keras\"\n", "checkpoint_callback = ModelCheckpoint(filepath=checkpoint_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n", "model2.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=20, batch_size=64, verbose=1, callbacks=[checkpoint_callback])" ] }, { "cell_type": "code", "execution_count": 106, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "3/3 [==============================] - 0s 5ms/step - loss: 0.0109\n", "3/3 [==============================] - 1s 5ms/step\n", "Loss: 0.010930849239230156\n" ] } ], "source": [ "loss = model2.evaluate(X_test, y_test)\n", "test_predict1 = model2.predict(X_test)\n", "print(\"Loss: \", loss)\n", "# Converting values back to the original scale\n", "scalerBack = MinMaxScaler(feature_range=(mintest, maxtest))\n", "test_predict2 = scalerBack.fit_transform(test_predict1)\n", "y_test1 = scalerBack.fit_transform(y_test)\n" ] }, { "cell_type": "code", "execution_count": 107, "metadata": {}, "outputs": [], "source": [ "%matplotlib qt\n", "\n", "# Create a 3x3 grid of subplots\n", "fig, axes = plt.subplots(3, 3, figsize=(10, 10))\n", "\n", "var = 1\n", "# Loop over the value index\n", "for i, ax in enumerate(axes.flat):\n", " # Plot your data or perform any other operations\n", " ax.plot(y_test1[var+i,0:time_step], label='Original Testing Data', color='blue')\n", " ax.plot(test_predict2[var+i,0:time_step], label='Predicted Testing Data', color='red',alpha=0.8)\n", " # ax.set_title(f'Plot {i+1}')\n", " ax.set_title('Testing Data - Predicted vs Actual')\n", " ax.set_xlabel('Time [hours]')\n", " ax.set_ylabel('Energy Consumption [kW]') \n", " ax.legend()\n", "\n", "# Adjust the spacing between subplots\n", "plt.tight_layout()\n", "\n", "# Show the plot\n", "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Model 3 predicting based on past Mondays" ] }, { "cell_type": "code", "execution_count": 140, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/20\n", "16/16 [==============================] - ETA: 0s - loss: 0.0888\n", "Epoch 1: val_loss improved from inf to 0.02289, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 7s 109ms/step - loss: 0.0888 - val_loss: 0.0229\n", "Epoch 2/20\n", "13/16 [=======================>......] - ETA: 0s - loss: 0.0288\n", "Epoch 2: val_loss improved from 0.02289 to 0.01442, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 25ms/step - loss: 0.0276 - val_loss: 0.0144\n", "Epoch 3/20\n", "16/16 [==============================] - ETA: 0s - loss: 0.0197\n", "Epoch 3: val_loss improved from 0.01442 to 0.01279, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 25ms/step - loss: 0.0197 - val_loss: 0.0128\n", "Epoch 4/20\n", "16/16 [==============================] - ETA: 0s - loss: 0.0186\n", "Epoch 4: val_loss improved from 0.01279 to 0.01133, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 26ms/step - loss: 0.0186 - val_loss: 0.0113\n", "Epoch 5/20\n", "16/16 [==============================] - ETA: 0s - loss: 0.0183\n", "Epoch 5: val_loss improved from 0.01133 to 0.01111, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 22ms/step - loss: 0.0183 - val_loss: 0.0111\n", "Epoch 6/20\n", "16/16 [==============================] - ETA: 0s - loss: 0.0183\n", "Epoch 6: val_loss did not improve from 0.01111\n", "16/16 [==============================] - 0s 24ms/step - loss: 0.0183 - val_loss: 0.0113\n", "Epoch 7/20\n", "16/16 [==============================] - ETA: 0s - loss: 0.0177\n", "Epoch 7: val_loss did not improve from 0.01111\n", "16/16 [==============================] - 0s 23ms/step - loss: 0.0177 - val_loss: 0.0112\n", "Epoch 8/20\n", "15/16 [===========================>..] - ETA: 0s - loss: 0.0176\n", "Epoch 8: val_loss improved from 0.01111 to 0.01089, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 22ms/step - loss: 0.0177 - val_loss: 0.0109\n", "Epoch 9/20\n", "16/16 [==============================] - ETA: 0s - loss: 0.0170\n", "Epoch 9: val_loss improved from 0.01089 to 0.01028, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 27ms/step - loss: 0.0170 - val_loss: 0.0103\n", "Epoch 10/20\n", "13/16 [=======================>......] - ETA: 0s - loss: 0.0164\n", "Epoch 10: val_loss improved from 0.01028 to 0.00991, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 23ms/step - loss: 0.0164 - val_loss: 0.0099\n", "Epoch 11/20\n", "16/16 [==============================] - ETA: 0s - loss: 0.0162\n", "Epoch 11: val_loss improved from 0.00991 to 0.00951, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 25ms/step - loss: 0.0162 - val_loss: 0.0095\n", "Epoch 12/20\n", "16/16 [==============================] - ETA: 0s - loss: 0.0156\n", "Epoch 12: val_loss improved from 0.00951 to 0.00937, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 27ms/step - loss: 0.0156 - val_loss: 0.0094\n", "Epoch 13/20\n", "13/16 [=======================>......] - ETA: 0s - loss: 0.0151\n", "Epoch 13: val_loss improved from 0.00937 to 0.00884, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 22ms/step - loss: 0.0151 - val_loss: 0.0088\n", "Epoch 14/20\n", "15/16 [===========================>..] - ETA: 0s - loss: 0.0151\n", "Epoch 14: val_loss improved from 0.00884 to 0.00858, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 27ms/step - loss: 0.0150 - val_loss: 0.0086\n", "Epoch 15/20\n", "13/16 [=======================>......] - ETA: 0s - loss: 0.0140\n", "Epoch 15: val_loss improved from 0.00858 to 0.00820, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 24ms/step - loss: 0.0141 - val_loss: 0.0082\n", "Epoch 16/20\n", "16/16 [==============================] - ETA: 0s - loss: 0.0138\n", "Epoch 16: val_loss did not improve from 0.00820\n", "16/16 [==============================] - 0s 22ms/step - loss: 0.0138 - val_loss: 0.0083\n", "Epoch 17/20\n", "15/16 [===========================>..] - ETA: 0s - loss: 0.0134\n", "Epoch 17: val_loss improved from 0.00820 to 0.00776, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 1s 34ms/step - loss: 0.0133 - val_loss: 0.0078\n", "Epoch 18/20\n", "16/16 [==============================] - ETA: 0s - loss: 0.0128\n", "Epoch 18: val_loss improved from 0.00776 to 0.00728, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 27ms/step - loss: 0.0128 - val_loss: 0.0073\n", "Epoch 19/20\n", "16/16 [==============================] - ETA: 0s - loss: 0.0119\n", "Epoch 19: val_loss improved from 0.00728 to 0.00668, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 27ms/step - loss: 0.0119 - val_loss: 0.0067\n", "Epoch 20/20\n", "13/16 [=======================>......] - ETA: 0s - loss: 0.0118\n", "Epoch 20: val_loss improved from 0.00668 to 0.00635, saving model to lstm_energy_01.keras\n", "16/16 [==============================] - 0s 23ms/step - loss: 0.0118 - val_loss: 0.0064\n" ] }, { "data": { "text/plain": [ "" ] }, "execution_count": 140, "metadata": {}, "output_type": "execute_result" } ], "source": [ "train,test = traindataset,testdataset\n", "days_in_past = 3 # number of days to look back \n", "time_step = 24 # define a day in hours\n", "no_inputs = 2\n", "no_outputs = 2\n", "def create_dataset(dataset,time_step):\n", " x = [[] for _ in range(no_inputs*days_in_past)] \n", " Y = [[] for _ in range(no_outputs)]\n", " for i in range(days_in_past*7, round(len(dataset)/time_step) - days_in_past): # -time_step is to ensure that the Y value has enough values\n", " for k in range(no_inputs*days_in_past):\n", " if k > 3:\n", " j = 1\n", " l = k - 4\n", " x[k].append(dataset[(i-l*7)*time_step:(i-l*7+1)*time_step, j])\n", " else:\n", " j = 0\n", " x[k].append(dataset[(i-k*7)*time_step:(i-k*7+1)*time_step, j])\n", " \n", " for j in range(no_outputs):\n", " Y[j].append(dataset[i*time_step:(i+1)*time_step, j]) \n", " x = [np.array(feature_list) for feature_list in x]\n", " x = np.stack(x,axis=1)\n", " Y = [np.array(feature_list) for feature_list in Y] \n", " Y = np.stack(Y,axis=1)\n", " Y = np.reshape(Y, (Y.shape[0], time_step*no_outputs))\n", " return x, Y\n", "\n", "\n", "X_train, y_train = create_dataset(train, time_step)\n", "X_test, y_test = create_dataset(test, time_step)\n", "\n", "model3 = create_model(X_train, time_step, no_outputs)\n", "checkpoint_path = \"lstm_energy_01.keras\"\n", "checkpoint_callback = ModelCheckpoint(filepath=checkpoint_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n", "model3.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=20, batch_size=64, verbose=1, callbacks=[checkpoint_callback])" ] }, { "cell_type": "code", "execution_count": 142, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "3/3 [==============================] - 0s 5ms/step - loss: 0.0064\n", "3/3 [==============================] - 1s 4ms/step\n", "Loss: 0.00635459553450346\n" ] } ], "source": [ "loss = model3.evaluate(X_test, y_test)\n", "test_predict1 = model3.predict(X_test)\n", "print(\"Loss: \", loss)\n", "# Converting values back to the original scale\n", "scalerBack = MinMaxScaler(feature_range=(mintest, maxtest))\n", "test_predict2 = scalerBack.fit_transform(test_predict1)\n", "y_test1 = scalerBack.fit_transform(y_test)\n" ] }, { "cell_type": "code", "execution_count": 143, "metadata": {}, "outputs": [], "source": [ "%matplotlib qt\n", "\n", "# Create a 3x3 grid of subplots\n", "fig, axes = plt.subplots(3, 3, figsize=(10, 10))\n", "\n", "var = 1\n", "# Loop over the value index\n", "for i, ax in enumerate(axes.flat):\n", " # Plot your data or perform any other operations\n", " ax.plot(y_test1[var+i,0:time_step], label='Original Testing Data', color='blue')\n", " ax.plot(test_predict2[var+i,0:time_step], label='Predicted Testing Data', color='red',alpha=0.8)\n", " # ax.set_title(f'Plot {i+1}')\n", " ax.set_title('Testing Data - Predicted vs Actual')\n", " ax.set_xlabel('Time [hours]')\n", " ax.set_ylabel('Energy Consumption [kW]') \n", " ax.legend()\n", "\n", "# Adjust the spacing between subplots\n", "plt.tight_layout()\n", "\n", "# Show the plot\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "experiments", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.15" } }, "nbformat": 4, "nbformat_minor": 2 }