|
import re |
|
import pandas as pd |
|
import pickle |
|
import numpy as np |
|
from sklearn.preprocessing import MinMaxScaler |
|
import joblib |
|
from io import StringIO |
|
import gradio as gr |
|
import os |
|
import sys |
|
from json import load |
|
from skforecast.utils import load_forecaster |
|
from skforecast.preprocessing import RollingFeatures |
|
from sklearn.preprocessing import MinMaxScaler |
|
from sklearn.preprocessing import FunctionTransformer |
|
from sklearn.pipeline import Pipeline |
|
from exog_creation import * |
|
import contextlib |
|
import warnings |
|
|
|
def load_csv(input_file): |
|
try: |
|
|
|
df = pd.read_csv(input_file) |
|
|
|
|
|
if df.empty: |
|
return "El archivo subido est谩 vac铆o o no tiene datos v谩lidos." |
|
|
|
|
|
return df |
|
except Exception as e: |
|
return f"Error al procesar el archivo: {e}" |
|
|
|
def set_datetime_index(df): |
|
df['datetime'] = pd.to_datetime(df['datetime']) |
|
df = df.set_index('datetime') |
|
df = df.asfreq('h') |
|
return df |
|
|
|
def load_model(name): |
|
current_dir = os.getcwd() |
|
ROOT_PATH = os.path.dirname(current_dir) |
|
sys.path.insert(1, ROOT_PATH) |
|
import root |
|
model = load_forecaster(root.DIR_DATA_ANALYTICS + name, |
|
verbose=True) |
|
return model |
|
|
|
def load_pipeline(name): |
|
with open('pipeline.pkl', 'rb') as file: |
|
pipeline = pickle.load(file) |
|
return pipeline |
|
|
|
|
|
def flujo(input_file): |
|
|
|
datos = load_csv("archivo.csv") |
|
|
|
datos_dict = datos.to_dict(orient="records") |
|
|
|
return datos_dict |
|
|
|
|
|
interface = gr.Interface( |
|
fn=flujo, |
|
inputs=gr.File(label="Sube tu archivo CSV"), |
|
outputs="json", |
|
title="Visualizaci贸n de CSV", |
|
description="Sube un archivo CSV y perdice la geenracion de energia." |
|
) |
|
|
|
interface.launch() |
|
|
|
|
|
|