{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 224 }, "executionInfo": { "elapsed": 24689, "status": "ok", "timestamp": 1744101251163, "user": { "displayName": "Dinesh Kumar", "userId": "18299454607260962281" }, "user_tz": -330 }, "id": "Ni_Q3LdXWC-q", "outputId": "5f3fff46-29a3-41ac-c79f-9ca052214953" }, "outputs": [ { "ename": "ModuleNotFoundError", "evalue": "No module named 'google.colab'", "output_type": "error", "traceback": [ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[1;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", "Cell \u001b[1;32mIn[1], line 2\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[38;5;66;03m# Mount Google Drive\u001b[39;00m\n\u001b[1;32m----> 2\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mgoogle\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcolab\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m drive\n\u001b[0;32m 3\u001b[0m drive\u001b[38;5;241m.\u001b[39mmount(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m/content/drive\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[0;32m 4\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mpandas\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mpd\u001b[39;00m\n", "\u001b[1;31mModuleNotFoundError\u001b[0m: No module named 'google.colab'" ] } ], "source": [ "# Mount Google Drive\n", "from google.colab import drive\n", "drive.mount('/content/drive')\n", "import pandas as pd\n", "\n", "# Load the match + commentary data\n", "csv_path = '/content/drive/MyDrive/Colab Notebooks/IPLPrediction/gru_match_simulation_commentary.csv'\n", "df = pd.read_csv(csv_path)\n", "\n", "# Add DELTA columns for comparison between overs\n", "df['Runs_This_Over'] = df['Cumulative Runs'].diff().fillna(df['Cumulative Runs'])\n", "df['Score_Delta'] = df['Predicted Final Score'].diff().fillna(df['Predicted Final Score'])\n", "df['Win_Prob_Change'] = df['Win Probability (%)'].diff().fillna(df['Win Probability (%)'])\n", "\n", "# Display result\n", "df.head()\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "executionInfo": { "elapsed": 57, "status": "ok", "timestamp": 1744099400163, "user": { "displayName": "Dinesh Kumar", "userId": "18299454607260962281" }, "user_tz": -330 }, "id": "oizD-WjjbM1V", "outputId": "d6303264-2d78-4b3a-a7d5-2cf2d5c5ec35" }, "outputs": [], "source": [ "# 💥 Highest run overs\n", "top_run_overs = df.sort_values(by='Runs_This_Over', ascending=False).head(3)\n", "\n", "# 🎯 Biggest predicted score jump\n", "top_score_jump = df.sort_values(by='Score_Delta', ascending=False).head(3)\n", "\n", "# 🔻 Biggest drop in win probability\n", "biggest_win_prob_drop = df.sort_values(by='Win_Prob_Change').head(3)\n", "\n", "# 🔼 Biggest increase in win probability\n", "biggest_win_prob_rise = df.sort_values(by='Win_Prob_Change', ascending=False).head(3)\n", "\n", "# Display each\n", "print(\"💥 Top 3 High-Scoring Overs:\")\n", "print(top_run_overs[['Over', 'Runs_This_Over', 'Commentary']], end=\"\\n\\n\")\n", "\n", "print(\"🎯 Top 3 Predicted Score Jumps:\")\n", "print(top_score_jump[['Over', 'Score_Delta', 'Commentary']], end=\"\\n\\n\")\n", "\n", "print(\"🔻 Top 3 Win Probability Drops:\")\n", "print(biggest_win_prob_drop[['Over', 'Win_Prob_Change', 'Commentary']], end=\"\\n\\n\")\n", "\n", "print(\"🔼 Top 3 Win Probability Gains:\")\n", "print(biggest_win_prob_rise[['Over', 'Win_Prob_Change', 'Commentary']])\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "executionInfo": { "elapsed": 5058, "status": "ok", "timestamp": 1744101274146, "user": { "displayName": "Dinesh Kumar", "userId": "18299454607260962281" }, "user_tz": -330 }, "id": "7zlfOX9pbMzU", "outputId": "5e8a9048-60cf-43a4-c8e8-a3c712da238a" }, "outputs": [], "source": [ "from openai import OpenAI\n", "import os\n", "\n", "# Set your API key securely\n", "os.environ[\"OPENAI_API_KEY\"] = \"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\" # 🔐 Use your actual key here\n", "client = OpenAI(api_key=os.getenv(\"OPENAI_API_KEY\"))\n", "\n", "# Construct the summary prompt from insights\n", "summary_prompt = \"\"\"\n", "You're a cricket commentator. Generate an IPL-style summary of the match based on these insights:\n", "\n", "- The first 3 overs saw very little movement (low runs, 0% win probability).\n", "- Over 5 to Over 7 showed good progress — higher runs and confidence.\n", "- Over 19 was a massive momentum swing — highest runs, max predicted score jump, and win probability jump (22%).\n", "- Over 20 ended with a predicted score of 154 and a win probability of 43%.\n", "\n", "Write the commentary in a high-energy, sharp, and story-like tone. Mention momentum shift, turning points, and crowd reactions.\n", "\"\"\"\n", "\n", "# Get GPT response\n", "# Re-run GPT summary generation with more tokens\n", "response = client.chat.completions.create(\n", " model=\"gpt-3.5-turbo\",\n", " messages=[{\"role\": \"user\", \"content\": summary_prompt}],\n", " temperature=0.9,\n", " max_tokens=350 # ✅ Increased to avoid truncation\n", ")\n", "\n", "match_summary = response.choices[0].message.content\n", "print(\"🏏 Full Match Summary:\\n\")\n", "print(match_summary)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "executionInfo": { "elapsed": 284, "status": "ok", "timestamp": 1744101284093, "user": { "displayName": "Dinesh Kumar", "userId": "18299454607260962281" }, "user_tz": -330 }, "id": "-M-DEfnCbMwv", "outputId": "01a1ef6c-cdcd-464f-d557-a1b2526704de" }, "outputs": [], "source": [ "# Save as text file\n", "summary_path_txt = '/content/drive/MyDrive/Colab Notebooks/IPLPrediction/gru_match_summary.txt'\n", "\n", "with open(summary_path_txt, 'w') as f:\n", " f.write(match_summary)\n", "\n", "print(f\"✅ Full match summary saved to: {summary_path_txt}\")\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 734 }, "executionInfo": { "elapsed": 1927, "status": "ok", "timestamp": 1744101288885, "user": { "displayName": "Dinesh Kumar", "userId": "18299454607260962281" }, "user_tz": -330 }, "id": "NoH11T4obMnf", "outputId": "26391ad0-a59a-48c1-b4a9-e48226753c26" }, "outputs": [], "source": [ "import pandas as pd\n", "import matplotlib.pyplot as plt\n", "\n", "# Load commentary data (already saved earlier)\n", "file_path = '/content/drive/MyDrive/Colab Notebooks/IPLPrediction/gru_match_simulation_commentary.csv'\n", "df = pd.read_csv(file_path)\n", "\n", "# Prepare values\n", "overs = df['Over']\n", "runs_this_over = df['Cumulative Runs'].diff().fillna(df['Cumulative Runs'])\n", "win_prob = df['Win Probability (%)']\n", "\n", "# Create plot\n", "fig, ax1 = plt.subplots(figsize=(14, 6))\n", "\n", "# 🟦 Bar: Runs per over\n", "bars = ax1.bar(overs, runs_this_over, color='dodgerblue', label='Runs This Over')\n", "ax1.set_xlabel(\"Over\", fontsize=12)\n", "ax1.set_ylabel(\"Runs Scored\", color='dodgerblue', fontsize=12)\n", "ax1.tick_params(axis='y', labelcolor='dodgerblue')\n", "ax1.set_xticks(range(1, 21))\n", "\n", "# 🟧 Line: Win Probability %\n", "ax2 = ax1.twinx()\n", "ax2.plot(overs, win_prob, color='orange', linewidth=2.5, label='Win Probability (%)')\n", "ax2.set_ylabel(\"Win Probability (%)\", color='orange', fontsize=12)\n", "ax2.tick_params(axis='y', labelcolor='orange')\n", "\n", "# 🎯 Title and Layout\n", "plt.title(\"🏏 Match Momentum Dashboard – Runs vs Win Probability\", fontsize=15, fontweight='bold')\n", "fig.tight_layout()\n", "\n", "# Save output\n", "momentum_plot_path = \"/content/drive/MyDrive/Colab Notebooks/IPLPrediction/match_momentum_dashboard.png\"\n", "plt.savefig(momentum_plot_path, dpi=300)\n", "plt.show()\n", "\n", "print(f\"✅ Momentum dashboard saved to: {momentum_plot_path}\")\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "executionInfo": { "elapsed": 6907, "status": "ok", "timestamp": 1744101439165, "user": { "displayName": "Dinesh Kumar", "userId": "18299454607260962281" }, "user_tz": -330 }, "id": "GoZr1Knoi3sf", "outputId": "f7774c23-585d-4ac2-808c-da79fbc52ab4" }, "outputs": [], "source": [ "!pip install streamlit" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "executionInfo": { "elapsed": 5762, "status": "ok", "timestamp": 1744101743444, "user": { "displayName": "Dinesh Kumar", "userId": "18299454607260962281" }, "user_tz": -330 }, "id": "Ew5xg6xxizSM", "outputId": "9655ba3b-e7b5-49a4-b5ea-a4580ad8c081" }, "outputs": [], "source": [ "!pip uninstall matplotlib -y\n", "!pip install matplotlib --upgrade --force-reinstall\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "!pip install matplotlib" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "!pip install scipy==1.13.0 scikit-learn==1.4.1.post1 pillow==10.2.0" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import joblib\n", "from tensorflow.keras.models import load_model\n", "\n", "def simulate_gru_final_score(current_runs, current_overs, wickets, gru_model_path, run_scaler_path, score_scaler_path):\n", " \"\"\"\n", " Simulate final score from current match state using trained GRU model.\n", "\n", " Parameters:\n", " - current_runs: list of cumulative runs till current over (length = current_overs)\n", " - current_overs: int (number of completed overs)\n", " - wickets: int (optional, not used in prediction directly)\n", " - gru_model_path: path to trained GRU .keras model\n", " - run_scaler_path: fitted MinMaxScaler for input sequence (joblib path)\n", " - score_scaler_path: fitted MinMaxScaler for output (joblib path)\n", "\n", " Returns:\n", " - predicted_final_score (float)\n", " \"\"\"\n", " # Load scalers\n", " input_scaler = joblib.load(run_scaler_path)\n", " output_scaler = joblib.load(score_scaler_path)\n", "\n", " # Load GRU model\n", " model = load_model(gru_model_path, compile=False)\n", "\n", " # Pad sequence to 20 overs\n", " seq = np.array(current_runs).reshape(-1, 1)\n", " padded_seq = np.pad(seq, ((0, 20 - len(seq)), (0, 0)), mode='constant')\n", "\n", " # Scale input\n", " scaled_input = input_scaler.transform(padded_seq).reshape(1, 20, 1)\n", "\n", " # Predict\n", " pred_scaled = model.predict(scaled_input, verbose=0)\n", " predicted_final_score = output_scaler.inverse_transform(pred_scaled)[0][0]\n", "\n", " return round(predicted_final_score, 2)\n" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:The `save_format` argument is deprecated in Keras 3. We recommend removing this argument as it can be inferred from the file path. Received: save_format=keras\n" ] } ], "source": [ "from tensorflow.keras.models import load_model\n", "\n", "# Load the trained .h5 model\n", "model = load_model(\"gru_score_predictor.h5\", compile=False)\n", "\n", "# Save in .keras format\n", "model.save(\"gru_score_predictor.keras\", save_format=\"keras\")\n" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "['scaler_output.save']" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import joblib\n", "import numpy as np\n", "from sklearn.preprocessing import MinMaxScaler\n", "\n", "# Simulate training data (same structure as used earlier)\n", "cumulative_runs = np.random.randint(0, 200, size=(400, 1)) # mimic overwise cumulative runs\n", "final_scores = np.random.randint(100, 220, size=(400, 1)) # mimic final score ranges\n", "\n", "# Create and fit scalers\n", "scaler_input = MinMaxScaler().fit(cumulative_runs)\n", "scaler_output = MinMaxScaler().fit(final_scores)\n", "\n", "# Save scalers for future use\n", "joblib.dump(scaler_input, 'scaler_input.save')\n", "joblib.dump(scaler_output, 'scaler_output.save')\n" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "C:\\Users\\Dine24\\anaconda3\\Lib\\site-packages\\keras\\src\\layers\\rnn\\rnn.py:200: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n", " super().__init__(**kwargs)\n" ] }, { "data": { "text/plain": [ "0.0034383272286504507" ] }, "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Re-execute after environment reset\n", "import numpy as np\n", "import pandas as pd\n", "from sklearn.preprocessing import MinMaxScaler\n", "from tensorflow.keras.models import Sequential\n", "from tensorflow.keras.layers import GRU, Dense\n", "from tensorflow.keras.callbacks import EarlyStopping\n", "import joblib\n", "import os\n", "\n", "# Step 1: Simulate sample training data (Runs + Wickets as input, Final Score as output)\n", "np.random.seed(42)\n", "\n", "# Generate 500 samples\n", "samples = 500\n", "overs = 20\n", "\n", "# Random per-over runs (0 to 20)\n", "runs_per_over = np.random.randint(0, 21, size=(samples, overs))\n", "\n", "# Random per-over wickets (0 to 2)\n", "wickets_per_over = np.random.randint(0, 3, size=(samples, overs))\n", "\n", "# Cumulative input: concatenate cumulative runs and cumulative wickets\n", "cumulative_runs = np.cumsum(runs_per_over, axis=1)\n", "cumulative_wickets = np.cumsum(wickets_per_over, axis=1)\n", "\n", "# Combine features\n", "X_combined = np.stack((cumulative_runs, cumulative_wickets), axis=2) # shape = (samples, 20, 2)\n", "\n", "# Generate final scores (simulate realistic final score with some noise)\n", "final_scores = cumulative_runs[:, -1] + np.random.normal(loc=5.0, scale=10.0, size=(samples,))\n", "final_scores = final_scores.reshape(-1, 1)\n", "\n", "# Step 2: Normalize\n", "input_scaler = MinMaxScaler()\n", "output_scaler = MinMaxScaler()\n", "\n", "X_scaled = input_scaler.fit_transform(X_combined.reshape(-1, 2)).reshape(samples, overs, 2)\n", "y_scaled = output_scaler.fit_transform(final_scores)\n", "\n", "# Step 3: Build GRU model\n", "model = Sequential()\n", "model.add(GRU(64, input_shape=(20, 2), return_sequences=False))\n", "model.add(Dense(1))\n", "model.compile(optimizer='adam', loss='mse')\n", "\n", "# Step 4: Train\n", "early_stop = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)\n", "history = model.fit(X_scaled, y_scaled, epochs=50, batch_size=32, validation_split=0.2, callbacks=[early_stop], verbose=0)\n", "\n", "# Step 5: Save model and scalers\n", "os.makedirs(\"trained_model\", exist_ok=True)\n", "model.save(\"trained_model/gru_runs_wickets.keras\")\n", "joblib.dump(input_scaler, \"trained_model/scaler_input_rw.save\")\n", "joblib.dump(output_scaler, \"trained_model/scaler_output_rw.save\")\n", "\n", "# Return final loss to confirm successful training\n", "final_loss = history.history['val_loss'][-1]\n", "final_loss\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "ql4r1e5hkcG1", "outputId": "4c067f05-a996-4c2d-b265-3124b6ed99e1" }, "outputs": [], "source": [ "!streamlit run app.py\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "!streamlit run match_simulator.py\n" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: ace_tools in c:\\users\\dine24\\anaconda3\\lib\\site-packages (0.0)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING: Ignoring invalid distribution ~onttools (C:\\Users\\Dine24\\anaconda3\\Lib\\site-packages)\n", "WARNING: Ignoring invalid distribution ~onttools (C:\\Users\\Dine24\\anaconda3\\Lib\\site-packages)\n", "WARNING: Ignoring invalid distribution ~onttools (C:\\Users\\Dine24\\anaconda3\\Lib\\site-packages)\n" ] } ], "source": [ "!pip install ace_tools" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "!streamlit run \"C:/Users/Dine24/Python Course/IPL_Cricket/trained_model/match_simulator_rw.py\"" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "C:\\Users\\Dine24\\anaconda3\\Lib\\site-packages\\keras\\src\\layers\\rnn\\rnn.py:200: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n", " super().__init__(**kwargs)\n" ] }, { "data": { "text/plain": [ "198.30263" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import numpy as np\n", "import pandas as pd\n", "from tensorflow.keras.models import Sequential\n", "from tensorflow.keras.layers import GRU, Dense\n", "from sklearn.preprocessing import MinMaxScaler\n", "import joblib\n", "\n", "# Simulate training data (runs + wickets as input, final score as output)\n", "np.random.seed(42)\n", "num_samples = 500\n", "\n", "# Generate random over-wise data for 20 overs\n", "runs_data = np.random.randint(0, 21, size=(num_samples, 20)) # 0-20 runs per over\n", "wickets_data = np.random.binomial(1, 0.3, size=(num_samples, 20)) # 0 or 1 wickets per over\n", "\n", "# Input shape: [samples, 20 overs, 2 features]\n", "X = np.stack((runs_data, wickets_data), axis=2)\n", "\n", "# Output: final scores with some variation based on runs and wickets\n", "final_scores = runs_data.sum(axis=1) + np.random.normal(0, 5, size=num_samples) - (wickets_data.sum(axis=1) * 2)\n", "y = final_scores.reshape(-1, 1)\n", "\n", "# Normalize inputs and outputs\n", "scaler_input = MinMaxScaler()\n", "X_reshaped = X.reshape(-1, 2)\n", "X_scaled = scaler_input.fit_transform(X_reshaped).reshape(num_samples, 20, 2)\n", "\n", "scaler_output = MinMaxScaler()\n", "y_scaled = scaler_output.fit_transform(y)\n", "\n", "# Define GRU model\n", "model = Sequential([\n", " GRU(64, input_shape=(20, 2), return_sequences=False),\n", " Dense(1)\n", "])\n", "model.compile(optimizer='adam', loss='mse')\n", "model.fit(X_scaled, y_scaled, epochs=10, batch_size=32, verbose=0)\n", "\n", "# Save model and scalers\n", "model.save(\"match_live_predictor/gru_score_predictor_rw.keras\")\n", "joblib.dump(scaler_input, \"scaler_rw_input.save\")\n", "joblib.dump(scaler_output, \"scaler_rw_output.save\")\n", "\n", "# Predict on a new random sample for validation\n", "sample_index = 0\n", "sample_input = X_scaled[sample_index:sample_index+1]\n", "pred_scaled = model.predict(sample_input, verbose=0)\n", "predicted_score = scaler_output.inverse_transform(pred_scaled)[0][0]\n", "\n", "predicted_score\n" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✅ Scalers saved successfully!\n" ] } ], "source": [ "# scaler_preparation_rw.py\n", "import numpy as np\n", "import joblib\n", "from sklearn.preprocessing import MinMaxScaler\n", "\n", "# Simulated dummy data for fitting scalers\n", "runs_input = np.random.randint(0, 220, size=(400, 2)) # 2 features: runs + wickets\n", "final_scores = np.random.randint(100, 250, size=(400, 1))\n", "\n", "# Create and fit scalers\n", "scaler_input_rw = MinMaxScaler().fit(runs_input)\n", "scaler_output_rw = MinMaxScaler().fit(final_scores)\n", "\n", "# Save scalers\n", "joblib.dump(scaler_input_rw, 'match_live_predictor/scaler_input_rw.save')\n", "joblib.dump(scaler_output_rw, 'match_live_predictor/scaler_output_rw.save')\n", "\n", "print(\"✅ Scalers saved successfully!\")\n" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m40s\u001b[0m 1s/step - loss: 0.1794\n", "\u001b[1m12/32\u001b[0m \u001b[32m=======\u001b[0m\u001b[37m=============\u001b[0m \u001b[1m0s\u001b[0m 5ms/step - loss: 0.1057\n", "\u001b[1m24/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0790\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 5ms/step - loss: 0.0690\n", "Epoch 2/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m0s\u001b[0m 26ms/step - loss: 0.0300\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0237 \n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0231\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 5ms/step - loss: 0.0226\n", "Epoch 3/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m0s\u001b[0m 25ms/step - loss: 0.0166\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0191 \n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0193\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 5ms/step - loss: 0.0192\n", "Epoch 4/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m0s\u001b[0m 25ms/step - loss: 0.0129\n", "\u001b[1m14/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0187 \n", "\u001b[1m26/32\u001b[0m \u001b[32m================\u001b[0m\u001b[37m====\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0177\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 5ms/step - loss: 0.0173\n", "Epoch 5/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m0s\u001b[0m 25ms/step - loss: 0.0137\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0134 \n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0130\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 5ms/step - loss: 0.0128\n", "Epoch 6/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m0s\u001b[0m 24ms/step - loss: 0.0071\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0091 \n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0089\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 5ms/step - loss: 0.0088\n", "Epoch 7/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m0s\u001b[0m 25ms/step - loss: 0.0048\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0064 \n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0072\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 5ms/step - loss: 0.0074\n", "Epoch 8/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m0s\u001b[0m 27ms/step - loss: 0.0090\n", "\u001b[1m12/32\u001b[0m \u001b[32m=======\u001b[0m\u001b[37m=============\u001b[0m \u001b[1m0s\u001b[0m 5ms/step - loss: 0.0091 \n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0093\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 5ms/step - loss: 0.0094\n", "Epoch 9/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m0s\u001b[0m 24ms/step - loss: 0.0090\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0073 \n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0072\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 5ms/step - loss: 0.0072\n", "Epoch 10/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m0s\u001b[0m 24ms/step - loss: 0.0089\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0069 \n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0070\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 5ms/step - loss: 0.0071\n", "Epoch 11/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m0s\u001b[0m 26ms/step - loss: 0.0095\n", "\u001b[1m14/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0139 \n", "\u001b[1m26/32\u001b[0m \u001b[32m================\u001b[0m\u001b[37m====\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0128\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 5ms/step - loss: 0.0123\n", "Epoch 12/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m0s\u001b[0m 27ms/step - loss: 0.0049\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0088 \n", "\u001b[1m26/32\u001b[0m \u001b[32m================\u001b[0m\u001b[37m====\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0083\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 5ms/step - loss: 0.0080\n", "Epoch 13/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m0s\u001b[0m 23ms/step - loss: 0.0098\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0074 \n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 4ms/step - loss: 0.0070\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 5ms/step - loss: 0.0069\n", "Epoch 14/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 38ms/step - loss: 0.0067\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0090\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0085\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0081\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0079\n", "\u001b[1m21/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0077\n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0075\n", "\u001b[1m29/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0074\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 17ms/step - loss: 0.0074\n", "Epoch 15/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 39ms/step - loss: 0.0061\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0051\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0052\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0055\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0058\n", "\u001b[1m20/32\u001b[0m \u001b[32m============\u001b[0m\u001b[37m========\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0059\n", "\u001b[1m24/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0061\n", "\u001b[1m28/32\u001b[0m \u001b[32m=================\u001b[0m\u001b[37m===\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0062\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0063\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 16ms/step - loss: 0.0063\n", "Epoch 16/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 38ms/step - loss: 0.0059\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0061\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0062\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0062\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0064\n", "\u001b[1m21/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0064\n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0064\n", "\u001b[1m30/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0065\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0065\n", "Epoch 17/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 61ms/step - loss: 0.0091\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0082\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0081\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0079\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0076\n", "\u001b[1m21/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0073\n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0072\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 14ms/step - loss: 0.0071\n", "Epoch 18/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 35ms/step - loss: 0.0056\n", "\u001b[1m 6/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 10ms/step - loss: 0.0070\n", "\u001b[1m10/32\u001b[0m \u001b[32m======\u001b[0m\u001b[37m==============\u001b[0m \u001b[1m0s\u001b[0m 12ms/step - loss: 0.0070\n", "\u001b[1m14/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 12ms/step - loss: 0.0071\n", "\u001b[1m18/32\u001b[0m \u001b[32m===========\u001b[0m\u001b[37m=========\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0071\n", "\u001b[1m22/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0071\n", "\u001b[1m26/32\u001b[0m \u001b[32m================\u001b[0m\u001b[37m====\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0071\n", "\u001b[1m30/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0071\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0071\n", "Epoch 19/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 63ms/step - loss: 0.0039\n", "\u001b[1m 6/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 11ms/step - loss: 0.0072\n", "\u001b[1m10/32\u001b[0m \u001b[32m======\u001b[0m\u001b[37m==============\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0076\n", "\u001b[1m14/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0076\n", "\u001b[1m18/32\u001b[0m \u001b[32m===========\u001b[0m\u001b[37m=========\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0075\n", "\u001b[1m22/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0075\n", "\u001b[1m26/32\u001b[0m \u001b[32m================\u001b[0m\u001b[37m====\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0074\n", "\u001b[1m30/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0073\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 15ms/step - loss: 0.0072\n", "Epoch 20/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 45ms/step - loss: 0.0049\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0054\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0063\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0066\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0068\n", "\u001b[1m21/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0070\n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0071\n", "\u001b[1m29/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0071\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 15ms/step - loss: 0.0071\n", "Epoch 21/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m0s\u001b[0m 31ms/step - loss: 0.0041\n", "\u001b[1m 6/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 11ms/step - loss: 0.0059\n", "\u001b[1m10/32\u001b[0m \u001b[32m======\u001b[0m\u001b[37m==============\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0060\n", "\u001b[1m14/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0061\n", "\u001b[1m18/32\u001b[0m \u001b[32m===========\u001b[0m\u001b[37m=========\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0061\n", "\u001b[1m22/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0060\n", "\u001b[1m26/32\u001b[0m \u001b[32m================\u001b[0m\u001b[37m====\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0061\n", "\u001b[1m30/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0062\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 16ms/step - loss: 0.0062\n", "Epoch 22/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 59ms/step - loss: 0.0121\n", "\u001b[1m 6/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 11ms/step - loss: 0.0076\n", "\u001b[1m10/32\u001b[0m \u001b[32m======\u001b[0m\u001b[37m==============\u001b[0m \u001b[1m0s\u001b[0m 12ms/step - loss: 0.0077\n", "\u001b[1m14/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0077\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0078\n", "\u001b[1m21/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0078\n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0078\n", "\u001b[1m29/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0078\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 16ms/step - loss: 0.0078\n", "Epoch 23/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 64ms/step - loss: 0.0083\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0076\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0072\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0071\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0073\n", "\u001b[1m21/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0073\n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0073\n", "\u001b[1m29/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0073\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 16ms/step - loss: 0.0073\n", "Epoch 24/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m2s\u001b[0m 84ms/step - loss: 0.0107\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0076\n", "\u001b[1m 8/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0074\n", "\u001b[1m12/32\u001b[0m \u001b[32m=======\u001b[0m\u001b[37m=============\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0072\n", "\u001b[1m16/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0071\n", "\u001b[1m20/32\u001b[0m \u001b[32m============\u001b[0m\u001b[37m========\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0070\n", "\u001b[1m24/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0070\n", "\u001b[1m28/32\u001b[0m \u001b[32m=================\u001b[0m\u001b[37m===\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0069\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0069\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 16ms/step - loss: 0.0069\n", "Epoch 25/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 53ms/step - loss: 0.0113\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0110\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0105\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0100\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0096\n", "\u001b[1m21/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0093\n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0090\n", "\u001b[1m29/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0087\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 15ms/step - loss: 0.0085\n", "Epoch 26/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 38ms/step - loss: 0.0050\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0070\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0073\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0074\n", "\u001b[1m19/32\u001b[0m \u001b[32m===========\u001b[0m\u001b[37m=========\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0073\n", "\u001b[1m28/32\u001b[0m \u001b[32m=================\u001b[0m\u001b[37m===\u001b[0m \u001b[1m0s\u001b[0m 10ms/step - loss: 0.0072\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 10ms/step - loss: 0.0071\n", "Epoch 27/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 37ms/step - loss: 0.0115\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0100\n", "\u001b[1m10/32\u001b[0m \u001b[32m======\u001b[0m\u001b[37m==============\u001b[0m \u001b[1m0s\u001b[0m 12ms/step - loss: 0.0087\n", "\u001b[1m16/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 11ms/step - loss: 0.0081\n", "\u001b[1m22/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 10ms/step - loss: 0.0078\n", "\u001b[1m28/32\u001b[0m \u001b[32m=================\u001b[0m\u001b[37m===\u001b[0m \u001b[1m0s\u001b[0m 10ms/step - loss: 0.0075\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 11ms/step - loss: 0.0074\n", "Epoch 28/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 49ms/step - loss: 0.0046\n", "\u001b[1m 8/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 8ms/step - loss: 0.0065 \n", "\u001b[1m14/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 8ms/step - loss: 0.0065\n", "\u001b[1m23/32\u001b[0m \u001b[32m==============\u001b[0m\u001b[37m======\u001b[0m \u001b[1m0s\u001b[0m 7ms/step - loss: 0.0067\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 8ms/step - loss: 0.0068\n", "Epoch 29/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 51ms/step - loss: 0.0080\n", "\u001b[1m 6/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 12ms/step - loss: 0.0070\n", "\u001b[1m12/32\u001b[0m \u001b[32m=======\u001b[0m\u001b[37m=============\u001b[0m \u001b[1m0s\u001b[0m 10ms/step - loss: 0.0071\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 10ms/step - loss: 0.0071\n", "\u001b[1m22/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 10ms/step - loss: 0.0071\n", "\u001b[1m31/32\u001b[0m \u001b[32m===================\u001b[0m\u001b[37m=\u001b[0m \u001b[1m0s\u001b[0m 9ms/step - loss: 0.0071 \n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 10ms/step - loss: 0.0071\n", "Epoch 30/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 44ms/step - loss: 0.0055\n", "\u001b[1m 6/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 12ms/step - loss: 0.0059\n", "\u001b[1m11/32\u001b[0m \u001b[32m======\u001b[0m\u001b[37m==============\u001b[0m \u001b[1m0s\u001b[0m 12ms/step - loss: 0.0057\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 11ms/step - loss: 0.0059\n", "\u001b[1m23/32\u001b[0m \u001b[32m==============\u001b[0m\u001b[37m======\u001b[0m \u001b[1m0s\u001b[0m 10ms/step - loss: 0.0059\n", "\u001b[1m29/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 10ms/step - loss: 0.0060\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 11ms/step - loss: 0.0061\n", "Epoch 31/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 47ms/step - loss: 0.0066\n", "\u001b[1m 6/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 10ms/step - loss: 0.0070\n", "\u001b[1m12/32\u001b[0m \u001b[32m=======\u001b[0m\u001b[37m=============\u001b[0m \u001b[1m0s\u001b[0m 10ms/step - loss: 0.0076\n", "\u001b[1m18/32\u001b[0m \u001b[32m===========\u001b[0m\u001b[37m=========\u001b[0m \u001b[1m0s\u001b[0m 10ms/step - loss: 0.0074\n", "\u001b[1m22/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 10ms/step - loss: 0.0073\n", "\u001b[1m26/32\u001b[0m \u001b[32m================\u001b[0m\u001b[37m====\u001b[0m \u001b[1m0s\u001b[0m 11ms/step - loss: 0.0072\n", "\u001b[1m30/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 12ms/step - loss: 0.0071\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0071\n", "Epoch 32/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 64ms/step - loss: 0.0041\n", "\u001b[1m 6/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 11ms/step - loss: 0.0055\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 9ms/step - loss: 0.0061 \n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 10ms/step - loss: 0.0062\n", "\u001b[1m21/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 11ms/step - loss: 0.0063\n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 12ms/step - loss: 0.0063\n", "\u001b[1m29/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0064\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 14ms/step - loss: 0.0064\n", "Epoch 33/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 39ms/step - loss: 0.0060\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0064\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0067\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0071\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0071\n", "\u001b[1m21/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0071\n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0070\n", "\u001b[1m29/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0070\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 16ms/step - loss: 0.0070\n", "Epoch 34/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 51ms/step - loss: 0.0038\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0057\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0063\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0064\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0064\n", "\u001b[1m21/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0065\n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0065\n", "\u001b[1m29/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0066\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 16ms/step - loss: 0.0066\n", "Epoch 35/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 53ms/step - loss: 0.0058\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0063\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0062\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0061\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0061\n", "\u001b[1m21/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0061\n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0061\n", "\u001b[1m29/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0062\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 16ms/step - loss: 0.0062\n", "Epoch 36/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 53ms/step - loss: 0.0124\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0090\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0083\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0080\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0078\n", "\u001b[1m20/32\u001b[0m \u001b[32m============\u001b[0m\u001b[37m========\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0077\n", "\u001b[1m24/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0076\n", "\u001b[1m28/32\u001b[0m \u001b[32m=================\u001b[0m\u001b[37m===\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0075\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0074\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 16ms/step - loss: 0.0074\n", "Epoch 37/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 37ms/step - loss: 0.0065\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0063\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0061\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0059\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0059\n", "\u001b[1m21/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0060\n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0061\n", "\u001b[1m29/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0061\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 17ms/step - loss: 0.0062\n", "Epoch 38/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 34ms/step - loss: 0.0079\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0067\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0067\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0066\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0065\n", "\u001b[1m21/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0064\n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0064\n", "\u001b[1m29/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0063\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 15ms/step - loss: 0.0063\n", "Epoch 39/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 46ms/step - loss: 0.0058\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0063\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0062\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0062\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0061\n", "\u001b[1m21/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0062\n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0063\n", "\u001b[1m29/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0064\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 16ms/step - loss: 0.0064\n", "Epoch 40/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 62ms/step - loss: 0.0093\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0088\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0077\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0074\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0072\n", "\u001b[1m21/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0071\n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0071\n", "\u001b[1m29/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0071\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 16ms/step - loss: 0.0070\n", "Epoch 41/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 62ms/step - loss: 0.0059\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0064\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0067\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0070\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0071\n", "\u001b[1m21/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0070\n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0070\n", "\u001b[1m29/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0070\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 16ms/step - loss: 0.0070\n", "Epoch 42/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 61ms/step - loss: 0.0047\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0055\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0057\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0058\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0060\n", "\u001b[1m21/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0061\n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0062\n", "\u001b[1m30/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0063\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 15ms/step - loss: 0.0064\n", "Epoch 43/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 47ms/step - loss: 0.0056\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0053\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0054\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0055\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0056\n", "\u001b[1m21/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 16ms/step - loss: 0.0058\n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0059\n", "\u001b[1m29/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0060\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 16ms/step - loss: 0.0061\n", "Epoch 44/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 57ms/step - loss: 0.0050\n", "\u001b[1m 8/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 8ms/step - loss: 0.0065 \n", "\u001b[1m15/32\u001b[0m \u001b[32m=========\u001b[0m\u001b[37m===========\u001b[0m \u001b[1m0s\u001b[0m 8ms/step - loss: 0.0069\n", "\u001b[1m22/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 7ms/step - loss: 0.0070\n", "\u001b[1m31/32\u001b[0m \u001b[32m===================\u001b[0m\u001b[37m=\u001b[0m \u001b[1m0s\u001b[0m 7ms/step - loss: 0.0070\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 8ms/step - loss: 0.0070\n", "Epoch 45/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 64ms/step - loss: 0.0067\n", "\u001b[1m 8/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 8ms/step - loss: 0.0061 \n", "\u001b[1m18/32\u001b[0m \u001b[32m===========\u001b[0m\u001b[37m=========\u001b[0m \u001b[1m0s\u001b[0m 6ms/step - loss: 0.0062\n", "\u001b[1m27/32\u001b[0m \u001b[32m================\u001b[0m\u001b[37m====\u001b[0m \u001b[1m0s\u001b[0m 6ms/step - loss: 0.0063\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 7ms/step - loss: 0.0063\n", "Epoch 46/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 37ms/step - loss: 0.0030\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0041\n", "\u001b[1m11/32\u001b[0m \u001b[32m======\u001b[0m\u001b[37m==============\u001b[0m \u001b[1m0s\u001b[0m 11ms/step - loss: 0.0045\n", "\u001b[1m16/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 11ms/step - loss: 0.0047\n", "\u001b[1m20/32\u001b[0m \u001b[32m============\u001b[0m\u001b[37m========\u001b[0m \u001b[1m0s\u001b[0m 11ms/step - loss: 0.0050\n", "\u001b[1m24/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 12ms/step - loss: 0.0052\n", "\u001b[1m28/32\u001b[0m \u001b[32m=================\u001b[0m\u001b[37m===\u001b[0m \u001b[1m0s\u001b[0m 12ms/step - loss: 0.0054\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0055\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0056\n", "Epoch 47/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 41ms/step - loss: 0.0048\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0062\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0069\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0072\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0074\n", "\u001b[1m21/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0075\n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0076\n", "\u001b[1m29/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0077\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 16ms/step - loss: 0.0077\n", "Epoch 48/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 39ms/step - loss: 0.0039\n", "\u001b[1m 5/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0064\n", "\u001b[1m 9/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0067\n", "\u001b[1m13/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0069\n", "\u001b[1m17/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0070\n", "\u001b[1m21/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0070\n", "\u001b[1m25/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0069\n", "\u001b[1m29/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0069\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 15ms/step - loss: 0.0069\n", "Epoch 49/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 36ms/step - loss: 0.0036\n", "\u001b[1m 8/32\u001b[0m \u001b[32m=====\u001b[0m\u001b[37m===============\u001b[0m \u001b[1m0s\u001b[0m 8ms/step - loss: 0.0058 \n", "\u001b[1m12/32\u001b[0m \u001b[32m=======\u001b[0m\u001b[37m=============\u001b[0m \u001b[1m0s\u001b[0m 10ms/step - loss: 0.0060\n", "\u001b[1m16/32\u001b[0m \u001b[32m==========\u001b[0m\u001b[37m==========\u001b[0m \u001b[1m0s\u001b[0m 11ms/step - loss: 0.0061\n", "\u001b[1m20/32\u001b[0m \u001b[32m============\u001b[0m\u001b[37m========\u001b[0m \u001b[1m0s\u001b[0m 11ms/step - loss: 0.0061\n", "\u001b[1m24/32\u001b[0m \u001b[32m===============\u001b[0m\u001b[37m=====\u001b[0m \u001b[1m0s\u001b[0m 12ms/step - loss: 0.0061\n", "\u001b[1m31/32\u001b[0m \u001b[32m===================\u001b[0m\u001b[37m=\u001b[0m \u001b[1m0s\u001b[0m 11ms/step - loss: 0.0062\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 12ms/step - loss: 0.0063\n", "Epoch 50/50\n", "\n", "\u001b[1m 1/32\u001b[0m \u001b[37m====================\u001b[0m \u001b[1m1s\u001b[0m 36ms/step - loss: 0.0047\n", "\u001b[1m 6/32\u001b[0m \u001b[32m===\u001b[0m\u001b[37m=================\u001b[0m \u001b[1m0s\u001b[0m 12ms/step - loss: 0.0052\n", "\u001b[1m10/32\u001b[0m \u001b[32m======\u001b[0m\u001b[37m==============\u001b[0m \u001b[1m0s\u001b[0m 13ms/step - loss: 0.0054\n", "\u001b[1m14/32\u001b[0m \u001b[32m========\u001b[0m\u001b[37m============\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0056\n", "\u001b[1m18/32\u001b[0m \u001b[32m===========\u001b[0m\u001b[37m=========\u001b[0m \u001b[1m0s\u001b[0m 14ms/step - loss: 0.0057\n", "\u001b[1m22/32\u001b[0m \u001b[32m=============\u001b[0m\u001b[37m=======\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0058\n", "\u001b[1m26/32\u001b[0m \u001b[32m================\u001b[0m\u001b[37m====\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0059\n", "\u001b[1m30/32\u001b[0m \u001b[32m==================\u001b[0m\u001b[37m==\u001b[0m \u001b[1m0s\u001b[0m 15ms/step - loss: 0.0060\n", "\u001b[1m32/32\u001b[0m \u001b[32m====================\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 16ms/step - loss: 0.0061\n", "GRU model retrained and saved successfully!\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "2025-04-09 11:37:28.632025: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", "2025-04-09 11:37:29.328822: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", "2025-04-09 11:37:31.986338: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", "To enable the following instructions: SSE3 SSE4.1 SSE4.2 AVX AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n" ] } ], "source": [ "!python \"C:/Users/Dine24/Python Course/IPL_Cricket/match_live_predictor/train_gru_rw_model.py\"\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "!streamlit run \"C:/Users/Dine24/Python Course/IPL_Cricket/match_live_predictor/live_predictor.py\"" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "colab": { "authorship_tag": "ABX9TyMWBYJ+0zFgkHMyKR8dqetW", "provenance": [] }, "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.7" } }, "nbformat": 4, "nbformat_minor": 4 }