levimohle commited on
Commit
21bf222
1 Parent(s): b98571e

added training on only similar days

Browse files
Files changed (1) hide show
  1. EnergyLSTM/lstm_energy.ipynb +205 -1
EnergyLSTM/lstm_energy.ipynb CHANGED
@@ -610,8 +610,212 @@
610
  ]
611
  },
612
  {
613
- "cell_type": "markdown",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
614
  "metadata": {},
 
 
 
 
 
 
 
 
615
  "source": []
616
  }
617
  ],
 
610
  ]
611
  },
612
  {
613
+ "cell_type": "code",
614
+ "execution_count": 140,
615
+ "metadata": {},
616
+ "outputs": [
617
+ {
618
+ "name": "stdout",
619
+ "output_type": "stream",
620
+ "text": [
621
+ "Epoch 1/20\n",
622
+ "16/16 [==============================] - ETA: 0s - loss: 0.0888\n",
623
+ "Epoch 1: val_loss improved from inf to 0.02289, saving model to lstm_energy_01.keras\n",
624
+ "16/16 [==============================] - 7s 109ms/step - loss: 0.0888 - val_loss: 0.0229\n",
625
+ "Epoch 2/20\n",
626
+ "13/16 [=======================>......] - ETA: 0s - loss: 0.0288\n",
627
+ "Epoch 2: val_loss improved from 0.02289 to 0.01442, saving model to lstm_energy_01.keras\n",
628
+ "16/16 [==============================] - 0s 25ms/step - loss: 0.0276 - val_loss: 0.0144\n",
629
+ "Epoch 3/20\n",
630
+ "16/16 [==============================] - ETA: 0s - loss: 0.0197\n",
631
+ "Epoch 3: val_loss improved from 0.01442 to 0.01279, saving model to lstm_energy_01.keras\n",
632
+ "16/16 [==============================] - 0s 25ms/step - loss: 0.0197 - val_loss: 0.0128\n",
633
+ "Epoch 4/20\n",
634
+ "16/16 [==============================] - ETA: 0s - loss: 0.0186\n",
635
+ "Epoch 4: val_loss improved from 0.01279 to 0.01133, saving model to lstm_energy_01.keras\n",
636
+ "16/16 [==============================] - 0s 26ms/step - loss: 0.0186 - val_loss: 0.0113\n",
637
+ "Epoch 5/20\n",
638
+ "16/16 [==============================] - ETA: 0s - loss: 0.0183\n",
639
+ "Epoch 5: val_loss improved from 0.01133 to 0.01111, saving model to lstm_energy_01.keras\n",
640
+ "16/16 [==============================] - 0s 22ms/step - loss: 0.0183 - val_loss: 0.0111\n",
641
+ "Epoch 6/20\n",
642
+ "16/16 [==============================] - ETA: 0s - loss: 0.0183\n",
643
+ "Epoch 6: val_loss did not improve from 0.01111\n",
644
+ "16/16 [==============================] - 0s 24ms/step - loss: 0.0183 - val_loss: 0.0113\n",
645
+ "Epoch 7/20\n",
646
+ "16/16 [==============================] - ETA: 0s - loss: 0.0177\n",
647
+ "Epoch 7: val_loss did not improve from 0.01111\n",
648
+ "16/16 [==============================] - 0s 23ms/step - loss: 0.0177 - val_loss: 0.0112\n",
649
+ "Epoch 8/20\n",
650
+ "15/16 [===========================>..] - ETA: 0s - loss: 0.0176\n",
651
+ "Epoch 8: val_loss improved from 0.01111 to 0.01089, saving model to lstm_energy_01.keras\n",
652
+ "16/16 [==============================] - 0s 22ms/step - loss: 0.0177 - val_loss: 0.0109\n",
653
+ "Epoch 9/20\n",
654
+ "16/16 [==============================] - ETA: 0s - loss: 0.0170\n",
655
+ "Epoch 9: val_loss improved from 0.01089 to 0.01028, saving model to lstm_energy_01.keras\n",
656
+ "16/16 [==============================] - 0s 27ms/step - loss: 0.0170 - val_loss: 0.0103\n",
657
+ "Epoch 10/20\n",
658
+ "13/16 [=======================>......] - ETA: 0s - loss: 0.0164\n",
659
+ "Epoch 10: val_loss improved from 0.01028 to 0.00991, saving model to lstm_energy_01.keras\n",
660
+ "16/16 [==============================] - 0s 23ms/step - loss: 0.0164 - val_loss: 0.0099\n",
661
+ "Epoch 11/20\n",
662
+ "16/16 [==============================] - ETA: 0s - loss: 0.0162\n",
663
+ "Epoch 11: val_loss improved from 0.00991 to 0.00951, saving model to lstm_energy_01.keras\n",
664
+ "16/16 [==============================] - 0s 25ms/step - loss: 0.0162 - val_loss: 0.0095\n",
665
+ "Epoch 12/20\n",
666
+ "16/16 [==============================] - ETA: 0s - loss: 0.0156\n",
667
+ "Epoch 12: val_loss improved from 0.00951 to 0.00937, saving model to lstm_energy_01.keras\n",
668
+ "16/16 [==============================] - 0s 27ms/step - loss: 0.0156 - val_loss: 0.0094\n",
669
+ "Epoch 13/20\n",
670
+ "13/16 [=======================>......] - ETA: 0s - loss: 0.0151\n",
671
+ "Epoch 13: val_loss improved from 0.00937 to 0.00884, saving model to lstm_energy_01.keras\n",
672
+ "16/16 [==============================] - 0s 22ms/step - loss: 0.0151 - val_loss: 0.0088\n",
673
+ "Epoch 14/20\n",
674
+ "15/16 [===========================>..] - ETA: 0s - loss: 0.0151\n",
675
+ "Epoch 14: val_loss improved from 0.00884 to 0.00858, saving model to lstm_energy_01.keras\n",
676
+ "16/16 [==============================] - 0s 27ms/step - loss: 0.0150 - val_loss: 0.0086\n",
677
+ "Epoch 15/20\n",
678
+ "13/16 [=======================>......] - ETA: 0s - loss: 0.0140\n",
679
+ "Epoch 15: val_loss improved from 0.00858 to 0.00820, saving model to lstm_energy_01.keras\n",
680
+ "16/16 [==============================] - 0s 24ms/step - loss: 0.0141 - val_loss: 0.0082\n",
681
+ "Epoch 16/20\n",
682
+ "16/16 [==============================] - ETA: 0s - loss: 0.0138\n",
683
+ "Epoch 16: val_loss did not improve from 0.00820\n",
684
+ "16/16 [==============================] - 0s 22ms/step - loss: 0.0138 - val_loss: 0.0083\n",
685
+ "Epoch 17/20\n",
686
+ "15/16 [===========================>..] - ETA: 0s - loss: 0.0134\n",
687
+ "Epoch 17: val_loss improved from 0.00820 to 0.00776, saving model to lstm_energy_01.keras\n",
688
+ "16/16 [==============================] - 1s 34ms/step - loss: 0.0133 - val_loss: 0.0078\n",
689
+ "Epoch 18/20\n",
690
+ "16/16 [==============================] - ETA: 0s - loss: 0.0128\n",
691
+ "Epoch 18: val_loss improved from 0.00776 to 0.00728, saving model to lstm_energy_01.keras\n",
692
+ "16/16 [==============================] - 0s 27ms/step - loss: 0.0128 - val_loss: 0.0073\n",
693
+ "Epoch 19/20\n",
694
+ "16/16 [==============================] - ETA: 0s - loss: 0.0119\n",
695
+ "Epoch 19: val_loss improved from 0.00728 to 0.00668, saving model to lstm_energy_01.keras\n",
696
+ "16/16 [==============================] - 0s 27ms/step - loss: 0.0119 - val_loss: 0.0067\n",
697
+ "Epoch 20/20\n",
698
+ "13/16 [=======================>......] - ETA: 0s - loss: 0.0118\n",
699
+ "Epoch 20: val_loss improved from 0.00668 to 0.00635, saving model to lstm_energy_01.keras\n",
700
+ "16/16 [==============================] - 0s 23ms/step - loss: 0.0118 - val_loss: 0.0064\n"
701
+ ]
702
+ },
703
+ {
704
+ "data": {
705
+ "text/plain": [
706
+ "<keras.callbacks.History at 0x1da6976bcd0>"
707
+ ]
708
+ },
709
+ "execution_count": 140,
710
+ "metadata": {},
711
+ "output_type": "execute_result"
712
+ }
713
+ ],
714
+ "source": [
715
+ "train,test = traindataset,testdataset\n",
716
+ "days_in_past = 3 # number of days to look back \n",
717
+ "time_step = 24 # define a day in hours\n",
718
+ "no_inputs = 2\n",
719
+ "no_outputs = 2\n",
720
+ "def create_dataset(dataset,time_step):\n",
721
+ " x = [[] for _ in range(no_inputs*days_in_past)] \n",
722
+ " Y = [[] for _ in range(no_outputs)]\n",
723
+ " for i in range(days_in_past*7, round(len(dataset)/time_step) - days_in_past): # -time_step is to ensure that the Y value has enough values\n",
724
+ " for k in range(no_inputs*days_in_past):\n",
725
+ " if k > 3:\n",
726
+ " j = 1\n",
727
+ " l = k - 4\n",
728
+ " x[k].append(dataset[(i-l*7)*time_step:(i-l*7+1)*time_step, j])\n",
729
+ " else:\n",
730
+ " j = 0\n",
731
+ " x[k].append(dataset[(i-k*7)*time_step:(i-k*7+1)*time_step, j])\n",
732
+ " \n",
733
+ " for j in range(no_outputs):\n",
734
+ " Y[j].append(dataset[i*time_step:(i+1)*time_step, j]) \n",
735
+ " x = [np.array(feature_list) for feature_list in x]\n",
736
+ " x = np.stack(x,axis=1)\n",
737
+ " Y = [np.array(feature_list) for feature_list in Y] \n",
738
+ " Y = np.stack(Y,axis=1)\n",
739
+ " Y = np.reshape(Y, (Y.shape[0], time_step*no_outputs))\n",
740
+ " return x, Y\n",
741
+ "\n",
742
+ "\n",
743
+ "X_train, y_train = create_dataset(train, time_step)\n",
744
+ "X_test, y_test = create_dataset(test, time_step)\n",
745
+ "\n",
746
+ "model3 = create_model(X_train, time_step, no_outputs)\n",
747
+ "checkpoint_path = \"lstm_energy_01.keras\"\n",
748
+ "checkpoint_callback = ModelCheckpoint(filepath=checkpoint_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n",
749
+ "model3.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=20, batch_size=64, verbose=1, callbacks=[checkpoint_callback])"
750
+ ]
751
+ },
752
+ {
753
+ "cell_type": "code",
754
+ "execution_count": 142,
755
+ "metadata": {},
756
+ "outputs": [
757
+ {
758
+ "name": "stdout",
759
+ "output_type": "stream",
760
+ "text": [
761
+ "3/3 [==============================] - 0s 5ms/step - loss: 0.0064\n",
762
+ "3/3 [==============================] - 1s 4ms/step\n",
763
+ "Loss: 0.00635459553450346\n"
764
+ ]
765
+ }
766
+ ],
767
+ "source": [
768
+ "loss = model3.evaluate(X_test, y_test)\n",
769
+ "test_predict1 = model3.predict(X_test)\n",
770
+ "print(\"Loss: \", loss)\n",
771
+ "# Converting values back to the original scale\n",
772
+ "scalerBack = MinMaxScaler(feature_range=(mintest, maxtest))\n",
773
+ "test_predict2 = scalerBack.fit_transform(test_predict1)\n",
774
+ "y_test1 = scalerBack.fit_transform(y_test)\n"
775
+ ]
776
+ },
777
+ {
778
+ "cell_type": "code",
779
+ "execution_count": 143,
780
+ "metadata": {},
781
+ "outputs": [],
782
+ "source": [
783
+ "%matplotlib qt\n",
784
+ "\n",
785
+ "# Create a 3x3 grid of subplots\n",
786
+ "fig, axes = plt.subplots(3, 3, figsize=(10, 10))\n",
787
+ "\n",
788
+ "var = 1\n",
789
+ "# Loop over the value index\n",
790
+ "for i, ax in enumerate(axes.flat):\n",
791
+ " # Plot your data or perform any other operations\n",
792
+ " ax.plot(y_test1[var+i,0:time_step], label='Original Testing Data', color='blue')\n",
793
+ " ax.plot(test_predict2[var+i,0:time_step], label='Predicted Testing Data', color='red',alpha=0.8)\n",
794
+ " # ax.set_title(f'Plot {i+1}')\n",
795
+ " ax.set_title('Testing Data - Predicted vs Actual')\n",
796
+ " ax.set_xlabel('Time [hours]')\n",
797
+ " ax.set_ylabel('Energy Consumption [kW]') \n",
798
+ " ax.legend()\n",
799
+ "\n",
800
+ "# Adjust the spacing between subplots\n",
801
+ "plt.tight_layout()\n",
802
+ "\n",
803
+ "# Show the plot\n",
804
+ "plt.show()"
805
+ ]
806
+ },
807
+ {
808
+ "cell_type": "code",
809
+ "execution_count": null,
810
  "metadata": {},
811
+ "outputs": [],
812
+ "source": []
813
+ },
814
+ {
815
+ "cell_type": "code",
816
+ "execution_count": null,
817
+ "metadata": {},
818
+ "outputs": [],
819
  "source": []
820
  }
821
  ],