import gradio as gr
from api import *
from processing import *
import pandas as pd
from indices import indices
import xgboost as xgb
#from lightgbm import LGBMRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import pickle as pk
import json
#import boto3
from shapely.geometry import MultiPolygon,shape
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
from glob import glob
import wget


def predict(location_name,lat, lon):
    cord = [lon,lat]
    lon = round(lon,4)
    lat = round(lat,4)
    x1 = [lon,lat]
    x2 = [lat,lon]
    with open("data/CIV_0.json","r") as file:
        data = json.load(file)
    # extract ivory coast polygone
    features = [data['features'][0]['geometry']['coordinates'][0]+data['features'][0]['geometry']['coordinates'][1]+data['features'][0]['geometry']['coordinates'][2]]
    data['features'][0]['geometry']['coordinates'] = features
    ci_polygone = data['features'][0]['geometry']['coordinates'][0][0]  
    point1 = Point(x1)
    point2 = Point(x2)
    polygon = Polygon(ci_polygone)
    result = polygon.contains(point1)

    if not result:
        return "Choisissez une zone de la CI","","","",""
    
    else:
        df = pd.read_csv("data/frame.csv")
        name = find_good_tile(df,point2)
        if name ==404:
            reponse = "Sentinel-2 ne dispose pas de données ce sur ce lieu à ce jour"
            return reponse,"","","",""
        else:
            path = "https://data354-public-assets.s3.eu-west-3.amazonaws.com/cisentineldata/"
            url = path+name
            #wget.download(url)
            unzip()
            name,cld_prob,days_ago = select_best_cloud_coverage_tile()
            bandes_path_10,bandes_path_20,bandes_path_60,tile_path,path_cld_20,path_cld_60 =paths(name)
            # create image dataset
            images_10 = extract_sub_image(bandes_path_10,tile_path,cord)

            # bandes with 20m resolution
            #path_cld_20
            images_20 = extract_sub_image(bandes_path_20,tile_path,cord,20,1)

            # bandes with 60m resolution
            #path_cld_60
            images_60 = extract_sub_image(bandes_path_60,tile_path,cord,60)
            #
            feature = images_10.tolist()+images_20.tolist()+images_60.tolist()
            bands = ['B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B8A', 'B11', 'B12','B01','B09']
            print("feature : ",feature)
            print("BANDES : ",bands)
            
            X = pd.DataFrame([feature],columns = bands)
            print("==================== X SHAPE", X.shape)
            ## Coordinate
            cord_df = pd.DataFrame({"Latitude":[lat],
                                    "Longitude":[lon]})
            #print("==================== cord_df SHAPE", cord_df.shape)
            ## PCA dimension reduction
            # later reload the pickle file
            sdc_reload = pk.load(open("data/sdc.pkl",'rb'))
            pca_reload = pk.load(open("data/pca.pkl",'rb'))
            # standardization
            X_pca = sdc_reload.transform(X)
            # make pca
            principalComponents = pca_reload .transform(X_pca)
            principalDf = pd.DataFrame(data =principalComponents[:,:4],
                           columns = ["PC1","PC2","PC3","PC4"])
            #print("==================== principalDf SHAPE", principalDf.shape)

            # vegetation index calculation
            X = indices(X)
            # Drop all 12 bands of S2
            tab = list(range(12))
            X_index = X.drop(X.iloc[:,tab],axis=1)

            #print("=============SHAPE1",X_index.shape)

            # Create predictive features
            X_final =pd.concat([cord_df,principalDf,X_index],axis=1)
            #print("=============SHAPE2",X_final.shape)

            # load the model from disk
            filename = "data/finalized_model4.sav"
            loaded_model = pk.load(open(filename, 'rb'))
            # make prediction
            biomass = loaded_model.predict(X_final)[0]
            if biomass<0:
                biomass =0.0
            
            carbon = 0.55*biomass

            # NDVI
            ndvi_index = ndvi(cord,name)

            # deleted download files
            #delete_tiles()

            return str(cld_prob)+ " % cloud coverage", str(days_ago)+" days ago",str(biomass)+" t/ha", str(carbon)+" tC/ha","NDVI: "+ str(ndvi_index)

# Create title, description and article strings
title = "🌴BEEPAS : Biomass estimation to Evaluate the Environmental Performance of Agroforestry Systems🌴"
description = "This application estimates the biomass of certain areas using AI and satellite images (S2)."
article = "Created by data354."

# Create examples list from "examples/" directory
#example_list = [["examples/" + example] for example in os.listdir("examples")]
example_list = [["Foret du banco :",5.379913, -4.050445],["Pharmacie Y4 :",5.363292, -3.9481601],["Treichville Bernabé :",5.293168, -3.999796],["Adjamé :",5.346938, -4.027849],["ile boulay :",5.280498,-4.089883]]


outputs = [
    gr.Textbox(label="Cloud coverage"),
    gr.Textbox(label="Number of days since sensing"),
    gr.Textbox(label="Above ground biomass density(AGBD) t/ha"),
    gr.Textbox(label="Carbon stock density tC/ha "),
    gr.Textbox(label="Mean NDVI"),]


demo = gr.Interface(
    fn=predict,
    inputs=["text","number", "number"],
    outputs=outputs, #[ "text", "text","text","text","text"],
    examples=example_list,
    title=title,
    description=description,
    article=article,
    )

demo.launch(share=True)