import os
import streamlit as st
import torch
import uuid

from st_app import launch_bot

import nest_asyncio
import asyncio

torch.classes.__path__ = []

import sqlite3
from datasets import load_dataset

# Setup for HTTP API Calls to Amplitude Analytics
if 'device_id' not in st.session_state:
    st.session_state.device_id = str(uuid.uuid4())

def setup_db():
    db_path = 'ev_database.db'
    conn = sqlite3.connect(db_path)
    cursor = conn.cursor()        

    with st.spinner("Loading data... Please wait..."):
        def tables_populated() -> bool:
            tables = ['ev_population', 'county_registrations', 'ev_registrations', 'washington_population']        
            for table in tables:
                cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table}'")
                result = cursor.fetchone()
                if not result:
                    return False
            return True            

        if tables_populated():
            print("Database tables already populated, skipping setup")
            conn.close()
            return
        else:
            print("Populating database tables")

        # Execute the SQL commands to create tables
        with open('create_tables.sql', 'r') as sql_file:
            sql_script = sql_file.read()
            cursor.executescript(sql_script)

        hf_token = os.getenv('HF_TOKEN')

        # Load data into ev_population table
        df = load_dataset("vectara/ev-dataset", data_files="Electric_Vehicle_Population_Data.csv", token=hf_token)['train'].to_pandas()
        df.to_sql('ev_population', conn, if_exists='replace', index=False)

        # Load data into county_registrations table
        df = load_dataset("vectara/ev-dataset", data_files="Electric_Vehicle_Population_Size_History_By_County.csv", token=hf_token)['train'].to_pandas()
        df.to_sql('county_registrations', conn, if_exists='replace', index=False)

        # Load data into ev_registrations table
        df = load_dataset("vectara/ev-dataset", data_files="Electric_Vehicle_Title_and_Registration_Activity.csv", token=hf_token)['train'].to_pandas()
        df.to_sql('ev_registrations', conn, if_exists='replace', index=False)

        # Load data into washington_population table
        df = load_dataset("vectara/ev-dataset", data_files="washington_population.csv", token=hf_token)['train'].to_pandas()
        df.to_sql('washington_population', conn, if_exists='replace', index=False)

        # Commit changes and close connection
        conn.commit()
        conn.close()

if __name__ == "__main__":
    st.set_page_config(page_title="Electric Vehicles Assistant", layout="wide")
    setup_db()

    nest_asyncio.apply()
    asyncio.run(launch_bot())