|
from dotenv import load_dotenv
|
|
from fastapi import APIRouter
|
|
from langchain.globals import set_verbose
|
|
from schemas.question import Question
|
|
from model.model_ai import ModelAI
|
|
from schemas.model_llm import ModelLLM
|
|
from tools.duckduckgo import search
|
|
from tools.search_db_vectorial import exists_database, list_databases, search_database
|
|
from tools.time import time
|
|
from os import getenv
|
|
import warnings
|
|
|
|
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
|
|
|
load_dotenv()
|
|
set_verbose(bool(getenv("VERBOSE", False)))
|
|
|
|
LLM_API_NAME_MODEL = getenv("LLM_API_NAME_MODEL")
|
|
LLM_API_URL = getenv("LLM_API_URL")
|
|
LLM_API_KEY = getenv("LLM_API_KEY")
|
|
|
|
router = APIRouter(
|
|
prefix="/ai",
|
|
tags=["Ask Agent"],
|
|
responses={404: {"description": "Not found"}},
|
|
)
|
|
model_llm = ModelLLM(
|
|
name_model=LLM_API_NAME_MODEL, base_url=LLM_API_URL, api_key=LLM_API_KEY
|
|
)
|
|
model = ModelAI(model_llm)
|
|
tools = [time, search, list_databases, exists_database, search_database]
|
|
agent_executor = model.agent_executer(tools)
|
|
|
|
|
|
@router.get("/ask")
|
|
def ask_question():
|
|
"""Devuelve el formato de pregunta esperado."""
|
|
return {"input": "Pregunta"}
|
|
|
|
|
|
@router.post("/ask")
|
|
async def ask_question(question: Question):
|
|
"""Recibe una pregunta y devuelve una respuesta del agente."""
|
|
try:
|
|
|
|
respuesta = await agent_executor.ainvoke({"input": question.input})
|
|
return {"respuesta": respuesta["output"]}
|
|
except Exception as e:
|
|
return {"error": str(e)}
|
|
|