Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import joblib
|
3 |
+
import pandas as pd
|
4 |
+
import numpy as np
|
5 |
+
import re
|
6 |
+
import requests
|
7 |
+
from datetime import datetime
|
8 |
+
from geopy.geocoders import Nominatim
|
9 |
+
from geopy.distance import geodesic
|
10 |
+
|
11 |
+
# Cargar modelo y dataset
|
12 |
+
model = joblib.load("modelo_docks.pkl")
|
13 |
+
df_stations = pd.read_csv("Informacio_Estacions_Bicing_2025.csv")
|
14 |
+
geolocator = Nominatim(user_agent="bicing-agent")
|
15 |
+
|
16 |
+
# LLM: llama-3.3-70b-versatile
|
17 |
+
def preguntar_al_usuario(pregunta):
|
18 |
+
response = client.chat.completions.create(
|
19 |
+
messages=[
|
20 |
+
{"role": "system", "content": "Eres un asistente de Bicing. Tu tarea es hacer una preguntal usuario y esperar su respuesta. No saludes a menos que te lo pida"},
|
21 |
+
{"role": "user", "content": f"Pregunta al usuario lo siguiente, puedes modificar el tono para hacerlo mas amigable y añadir ayudas adicionales sobre como introducir los datos: '{pregunta}'"}
|
22 |
+
],
|
23 |
+
model="llama-3.3-70b-versatile",
|
24 |
+
temperature=0.5,
|
25 |
+
max_completion_tokens=512,
|
26 |
+
top_p=1,
|
27 |
+
stream=False,
|
28 |
+
)
|
29 |
+
return response.choices[0].message.content.strip()
|
30 |
+
|
31 |
+
# Estaciones más cercanas
|
32 |
+
def get_nearest_stations(ubicacion, top_n=5):
|
33 |
+
loc = geolocator.geocode(f"{ubicacion}, Barcelona, Spain")
|
34 |
+
if not loc:
|
35 |
+
return pd.DataFrame()
|
36 |
+
|
37 |
+
user_coord = (loc.latitude, loc.longitude)
|
38 |
+
df_stations["distancia"] = df_stations.apply(
|
39 |
+
lambda row: geodesic(user_coord, (row["lat"], row["lon"])).meters,
|
40 |
+
axis=1
|
41 |
+
)
|
42 |
+
return df_stations.nsmallest(top_n, "distancia")[["station_id", "address", "lat", "lon"]]
|
43 |
+
|
44 |
+
# Tiempo (Open-Meteo)
|
45 |
+
def get_weather_forecast(lat, lon, year, month, day, hour):
|
46 |
+
fecha = f"{year}-{month:02d}-{day:02d}"
|
47 |
+
hora_str = f"{hour:02d}:00"
|
48 |
+
|
49 |
+
url = (
|
50 |
+
f"https://api.open-meteo.com/v1/forecast?"
|
51 |
+
f"latitude={lat}&longitude={lon}&hourly=temperature_2m,precipitation&timezone=Europe%2FMadrid"
|
52 |
+
f"&start_date={fecha}&end_date={fecha}"
|
53 |
+
)
|
54 |
+
|
55 |
+
r = requests.get(url)
|
56 |
+
if r.status_code != 200:
|
57 |
+
return None, None
|
58 |
+
|
59 |
+
data = r.json()
|
60 |
+
horas = data["hourly"]["time"]
|
61 |
+
temperaturas = data["hourly"]["temperature_2m"]
|
62 |
+
precipitaciones = data["hourly"]["precipitation"]
|
63 |
+
|
64 |
+
for i, h in enumerate(horas):
|
65 |
+
if h.endswith(hora_str):
|
66 |
+
return temperaturas[i], precipitaciones[i]
|
67 |
+
|
68 |
+
return None, None
|
69 |
+
|
70 |
+
# Predicción con el modelo
|
71 |
+
def predict_disponibilidad(context):
|
72 |
+
estaciones_cercanas = get_nearest_stations(context["ubicacion"])
|
73 |
+
if estaciones_cercanas.empty:
|
74 |
+
return {"error": "No se encontraron estaciones cercanas."}
|
75 |
+
|
76 |
+
resultados = []
|
77 |
+
|
78 |
+
for _, row in estaciones_cercanas.iterrows():
|
79 |
+
temp, precip = get_weather_forecast(
|
80 |
+
row["lat"], row["lon"], 2025,
|
81 |
+
context["month"], context["day"], context["hour"]
|
82 |
+
)
|
83 |
+
if temp is None:
|
84 |
+
continue
|
85 |
+
|
86 |
+
X = np.array([[
|
87 |
+
row["station_id"],
|
88 |
+
context["month"],
|
89 |
+
context["day"],
|
90 |
+
context["hour"],
|
91 |
+
0, 0, 0, 0, # ctx históricos por defecto
|
92 |
+
temp,
|
93 |
+
1.0 if precip > 0 else 0.0
|
94 |
+
]])
|
95 |
+
pred = model.predict(X)[0]
|
96 |
+
|
97 |
+
resultados.append({
|
98 |
+
"station_id": row["station_id"],
|
99 |
+
"address": row["address"],
|
100 |
+
"pred_pct": float(pred),
|
101 |
+
"temperature": round(temp, 1),
|
102 |
+
"precip": round(precip, 1)
|
103 |
+
})
|
104 |
+
|
105 |
+
if not resultados:
|
106 |
+
return {"error": "No se pudieron calcular predicciones meteorológicas."}
|
107 |
+
|
108 |
+
resultados_ordenados = sorted(resultados, key=lambda x: x["pred_pct"], reverse=True)
|
109 |
+
return {
|
110 |
+
"target_pct": context["target_pct"],
|
111 |
+
"candidatas": resultados_ordenados
|
112 |
+
}
|
113 |
+
|
114 |
+
# Preguntas al usuario
|
115 |
+
preguntas = [
|
116 |
+
("ubicacion", "INTRODUCE SALUDO y la pregunta ¿Dónde te gustaría coger la bici? (zona o dirección en Barcelona)"),
|
117 |
+
("month", "¿En qué mes planeas cogerla? (número 1-12)"),
|
118 |
+
("day", "¿Qué día del mes?"),
|
119 |
+
("hour", "¿A qué hora la necesitas? (0-23)?"),
|
120 |
+
("target_pct", "¿Qué porcentaje mínimo de bicicletas esperas encontrar disponibles? (0 a 100%)")
|
121 |
+
]
|
122 |
+
|
123 |
+
# Flujo de conversación
|
124 |
+
def chat(user_input, chat_history, current_step, user_context):
|
125 |
+
key, _ = preguntas[current_step]
|
126 |
+
|
127 |
+
if key in ["month", "day", "hour", "target_pct"]:
|
128 |
+
match = re.search(r"\d+(\.\d+)?", user_input)
|
129 |
+
if match:
|
130 |
+
value = float(match.group())
|
131 |
+
user_context[key] = value / 100 if key == "target_pct" else int(value)
|
132 |
+
else:
|
133 |
+
chat_history.append(("user", user_input))
|
134 |
+
chat_history.append(("assistant", "Introduce un número válido."))
|
135 |
+
return chat_history, current_step, user_context
|
136 |
+
else:
|
137 |
+
user_context[key] = user_input.strip()
|
138 |
+
|
139 |
+
chat_history.append(("user", user_input))
|
140 |
+
current_step += 1
|
141 |
+
|
142 |
+
if current_step < len(preguntas):
|
143 |
+
siguiente_pregunta = preguntar_al_usuario(preguntas[current_step][1])
|
144 |
+
chat_history.append(("assistant", siguiente_pregunta))
|
145 |
+
else:
|
146 |
+
resultado = predict_disponibilidad(user_context)
|
147 |
+
if "error" in resultado:
|
148 |
+
chat_history.append(("assistant", resultado["error"] + " Reiniciando conversación..."))
|
149 |
+
# Reiniciar el estado
|
150 |
+
user_context = {
|
151 |
+
"ubicacion": None,
|
152 |
+
"month": None,
|
153 |
+
"day": None,
|
154 |
+
"hour": None,
|
155 |
+
"target_pct": None,
|
156 |
+
"temperature": None,
|
157 |
+
"lluvia": None
|
158 |
+
}
|
159 |
+
current_step = 0
|
160 |
+
chat_history.append(("assistant", preguntar_al_usuario(preguntas[0][1])))
|
161 |
+
return chat_history, current_step, user_context
|
162 |
+
|
163 |
+
else:
|
164 |
+
clima = resultado["candidatas"][0]
|
165 |
+
msg = (
|
166 |
+
f"📅 Predicción meteorológica para la hora solicitada:\n"
|
167 |
+
f"🌡️ Temperatura aprox.: {clima['temperature']}°C\n"
|
168 |
+
f"☔ Precipitación aprox.: {clima['precip']} mm\n\n"
|
169 |
+
f"🚲 Estado de las estaciones de bicing en ese momento {round(resultado['target_pct']*100)}% de bicis:\n\n"
|
170 |
+
)
|
171 |
+
for r in resultado["candidatas"]:
|
172 |
+
emoji = "✅" if r["pred_pct"] >= resultado["target_pct"] else "⚠️"
|
173 |
+
msg += (
|
174 |
+
f"{emoji} Estación '{r['address']}' (ID {r['station_id']}): "
|
175 |
+
f"{round(r['pred_pct']*100)}% disponibilidad\n"
|
176 |
+
)
|
177 |
+
chat_history.append(("assistant", msg.strip()))
|
178 |
+
|
179 |
+
return chat_history, current_step, user_context
|
180 |
+
|
181 |
+
# Interfaz Gradio
|
182 |
+
with gr.Blocks() as demo:
|
183 |
+
chatbot = gr.Chatbot()
|
184 |
+
txt = gr.Textbox(placeholder="Escribe tu respuesta...", label="Tu mensaje")
|
185 |
+
|
186 |
+
state_chat = gr.State([])
|
187 |
+
state_step = gr.State(0)
|
188 |
+
state_context = gr.State({
|
189 |
+
"ubicacion": None,
|
190 |
+
"month": None,
|
191 |
+
"day": None,
|
192 |
+
"hour": None,
|
193 |
+
"target_pct": None,
|
194 |
+
"temperature": None,
|
195 |
+
"lluvia": None
|
196 |
+
})
|
197 |
+
|
198 |
+
def user_submit(message, chat_history, current_step, user_context):
|
199 |
+
return chat(message, chat_history, current_step, user_context)
|
200 |
+
|
201 |
+
txt.submit(user_submit, inputs=[txt, state_chat, state_step, state_context],
|
202 |
+
outputs=[chatbot, state_step, state_context])
|
203 |
+
|
204 |
+
# Primer mensaje
|
205 |
+
primer_pregunta = preguntar_al_usuario(preguntas[0][1])
|
206 |
+
state_chat.value = [("assistant", primer_pregunta)]
|
207 |
+
chatbot.value = state_chat.value
|
208 |
+
|
209 |
+
demo.launch()
|