JairoDanielMT commited on
Commit
646085c
verified
1 Parent(s): 637b2c7

Upload 9 files

Browse files
Dockerfile ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Usa una imagen base de Python
2
+ FROM python:3.9
3
+ # Establece el directorio de trabajo
4
+ WORKDIR /code
5
+
6
+ # Copia los archivos necesarios al contenedor
7
+ COPY ./requirements.txt /code/requirements.txt
8
+ RUN pip install --no-cache-dir -r /code/requirements.txt
9
+
10
+ COPY . .
11
+
12
+ RUN chmod -R 777 /code
13
+
14
+ # Comando para ejecutar la aplicaci贸n
15
+ CMD ["python", "app.py"]
app.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from fastapi.responses import RedirectResponse
3
+ from langchain.globals import set_verbose
4
+ from fastapi.middleware.cors import CORSMiddleware
5
+ from routers import ask
6
+
7
+ set_verbose(True) # esto es la depuraci贸n de langchain
8
+
9
+ app = FastAPI() # esto es la instancia de FastAPI
10
+
11
+ # CORS
12
+ app.add_middleware( # esto es el middleware de CORS
13
+ CORSMiddleware,
14
+ allow_origins=["*"],
15
+ allow_credentials=True,
16
+ allow_methods=["*"],
17
+ allow_headers=["*"],
18
+ )
19
+
20
+
21
+ @app.get("/") # esto es la ruta ra铆z
22
+ async def redirect_to_docs():
23
+ return RedirectResponse(url="/docs")
24
+
25
+
26
+ app.include_router(ask.router, prefix="/api") # esto es el enrutador de la API
27
+
28
+ if __name__ == "__main__":
29
+ import uvicorn
30
+
31
+ uvicorn.run(app, host="0.0.0.0", port=7860)
controller/question.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+
3
+
4
+ class Question(BaseModel):
5
+ input: str
model/model_ai.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Sequence
3
+ from dotenv import load_dotenv
4
+ from langchain_core.tools import BaseTool
5
+ from langchain_openai import ChatOpenAI
6
+ from langchain.memory import ConversationBufferMemory
7
+ from langchain_core.prompts.prompt import PromptTemplate
8
+ from langchain.tools.render import render_text_description
9
+ from langchain.agents.output_parsers import ReActSingleInputOutputParser
10
+ from langchain.agents import AgentExecutor
11
+ from langchain.agents.format_scratchpad import format_log_to_str
12
+
13
+ from langchain.globals import set_verbose
14
+
15
+ set_verbose(True) # Mensajes de depuraci贸n desactivados
16
+
17
+
18
+ class ModelAI:
19
+ def __init__(self):
20
+ load_dotenv()
21
+ self.model = "deepseek-chat"
22
+ self.base_url = "https://api.deepseek.com"
23
+ self.api_key = os.getenv("DEEPSEEK_API_KEY")
24
+
25
+ def agent_executer(self, tools: Sequence[BaseTool]) -> AgentExecutor:
26
+ """
27
+ Create an agent executor with the given tools and the model.
28
+
29
+ Args:
30
+ tools: A sequence of tools to be used by the agent.
31
+
32
+ Returns:
33
+ An agent executor with the given tools and the model.
34
+ """
35
+ # definici贸n del modelo llm
36
+ llm = ChatOpenAI(
37
+ model=self.model,
38
+ api_key=self.api_key,
39
+ base_url=self.base_url,
40
+ temperature=0.5,
41
+ )
42
+ # a帽adir memoria
43
+ memory = ConversationBufferMemory(memory_key="chat_history")
44
+ # cargar instrucciones del sistema
45
+ prompt = self._load_prompt("prompt_system_agent.txt")
46
+ # a帽adir herramientas a las instrucciones del sistema
47
+ agent_prompt = PromptTemplate.from_template(prompt)
48
+ prompt = agent_prompt.partial(
49
+ tools=render_text_description(tools),
50
+ tool_names=", ".join([t.name for t in tools]),
51
+ )
52
+ # crear agente que es la suma de las instrucciones del sistema y el modelo llm
53
+ agent = self._create_agent(llm, prompt)
54
+ # devolver el agente ejecutor
55
+ return AgentExecutor(agent=agent, tools=tools, memory=memory)
56
+
57
+ @staticmethod
58
+ def _load_prompt(filepath: str) -> str:
59
+ with open(filepath, "r") as file:
60
+ return file.read()
61
+
62
+ @staticmethod
63
+ def _create_agent(llm: ChatOpenAI, prompt: PromptTemplate) -> dict:
64
+ llm_with_stop = llm.bind(stop=["\nObservation"])
65
+ return (
66
+ {
67
+ "input": lambda x: x["input"],
68
+ "agent_scratchpad": lambda x: format_log_to_str(
69
+ x["intermediate_steps"]
70
+ ),
71
+ "chat_history": lambda x: x["chat_history"],
72
+ }
73
+ | prompt
74
+ | llm_with_stop
75
+ | ReActSingleInputOutputParser()
76
+ )
prompt_system_agent.txt ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Responde en el idioma: Espa帽ol
2
+ You are ReqGen (Requirement Generator), a specialized virtual assistant designed to assist in the generation of functional and non-functional requirements for software development projects. ReqGen is equipped with advanced natural language processing capabilities to understand and interpret user inputs, ensuring that the generated requirements are clear, precise, and aligned with the project's objectives.
3
+
4
+ # Proposed Standard for Requirement Definition
5
+
6
+ 1. Functional Requirements (FR)
7
+ Description: Detail what the system must do to meet the project's objectives. They focus on visible functionalities for the user or essential internal operations.
8
+ - Rules for Defining FR:
9
+ 1. Clear and Objective Name: Each FR must have a unique identifier (FR-01, FR-02, etc.) and a representative title.
10
+ 2. Action-Oriented: Use infinitives, such as "Allow", "Register", "Display".
11
+ 3. Measurable and Verifiable: Define expected outcomes or clear acceptance criteria.
12
+ 4. Avoid Ambiguities: Use specific terms instead of expressions like "fast", "better".
13
+ 5. User Focus: Include, when relevant, the main actor of the requirement.
14
+ Example:
15
+ - FR-01: Register Users.
16
+ - The system must allow new users to register by providing a name, email, and password.
17
+
18
+ 2. Non-Functional Requirements (NFR)
19
+ Description: Describe how the system should behave. They are not related to specific functions but to general characteristics such as performance, usability, and security.
20
+ - Rules for Defining NFR:
21
+ 1. Representative and Unique Name: Each NFR must have a unique identifier (NFR-01, NFR-02, etc.).
22
+ 2. Focused on "How": Detail quality attributes, technical or environmental constraints.
23
+ 3. Measurable Criteria: Include specific values (e.g., "response time less than 3 seconds").
24
+ 4. Categorized: Group NFR by attributes such as performance, availability, scalability, etc.
25
+ Example:
26
+ - NFR-01: Performance.
27
+ - The system must respond to user requests in less than 2 seconds for 95% of queries made.
28
+
29
+ # Common Categories
30
+ Functional Requirements
31
+ - System functions.
32
+ - User flows.
33
+ - Business rules.
34
+ - Integrations with other systems.
35
+
36
+ Non-Functional Requirements
37
+ - Performance: Response time, processing speed.
38
+ - Scalability: Capacity to support an increase in users or data.
39
+ - Security: Authentication, authorization, encryption.
40
+ - Compatibility: Supported platforms and browsers.
41
+ - Usability: Ease of use, accessibility.
42
+ - Availability: Guaranteed uptime.
43
+
44
+ TOOLS:
45
+ ------
46
+
47
+ ReqGen has access to the following tools:
48
+
49
+ {tools}
50
+
51
+ To use a tool, please use the following format:
52
+
53
+ ```
54
+
55
+ Thought: Do I need to use a tool? Yes
56
+
57
+ Action: the action to take, should be one of [{tool_names}]
58
+
59
+ Action Input: the input to the action
60
+
61
+ Observation: the result of the action
62
+
63
+ ```
64
+
65
+ When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
66
+
67
+ ```
68
+
69
+ Thought: Do I need to use a tool? No
70
+
71
+ Final Answer: [your response here]
72
+
73
+ ```
74
+
75
+ Begin!
76
+
77
+ Previous conversation history:
78
+
79
+ {chat_history}
80
+
81
+ New input: {input}
82
+
83
+ {agent_scratchpad}
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ requests
2
+ fastapi
3
+ langchain
4
+ langchain-core
5
+ langchain-openai
6
+ langchain-community
7
+ langchain-huggingface
8
+ faiss-cpu
9
+ duckduckgo-search
10
+ uvicorn
11
+ einops
12
+ python-multipart
13
+ docx2txt
14
+ aiofiles
routers/ask.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter
2
+ from langchain.globals import set_verbose
3
+ from controller.question import Question
4
+ from model.model_ai import ModelAI
5
+ from tools.duckduckgo import search
6
+ from tools.time import time
7
+
8
+ set_verbose(True) # Mensajes de depuraci贸n desactivados
9
+
10
+ router = APIRouter(
11
+ prefix="/IA",
12
+ tags=["Ask Agent"],
13
+ responses={404: {"description": "No encontrado"}},
14
+ )
15
+ model = ModelAI()
16
+ # Se a帽ade las herramientas de tiempo y b煤squeda al agente
17
+ tools = [time, search]
18
+ # esto incluye el uso de llm + memoria + herramientas
19
+ agent_executor = model.agent_executer(tools)
20
+
21
+
22
+ @router.get("/ask")
23
+ def ask_question():
24
+ """Devuelve el formato de pregunta esperado."""
25
+ return {"input": "Pregunta"}
26
+
27
+
28
+ @router.post("/ask")
29
+ async def ask_question(question: Question):
30
+ """Recibe una pregunta y devuelve una respuesta del agente ejecutor."""
31
+ try:
32
+ # Usamos ainvoke para realizar la llamada as铆ncrona
33
+ respuesta = await agent_executor.ainvoke({"input": question.input})
34
+ return {"respuesta": respuesta["output"]}
35
+ except Exception as e:
36
+ return {"error": str(e)}
tools/duckduckgo.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
2
+ from langchain_community.tools import DuckDuckGoSearchResults
3
+ from langchain.agents import tool
4
+
5
+
6
+ @tool
7
+ def search(query: str) -> str:
8
+ """B煤squeda en DuckDuckGo."""
9
+ wrapper = DuckDuckGoSearchAPIWrapper(max_results=10, safesearch="off", time="w")
10
+ search = DuckDuckGoSearchResults(api_wrapper=wrapper)
11
+ return str(search.invoke(query))
tools/time.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ from langchain.agents import tool
3
+
4
+
5
+ @tool
6
+ def time(format: str = "%Y-%m-%d %H:%M:%S") -> str:
7
+ """
8
+ Returns the current system time as a formatted string.
9
+
10
+ Args:
11
+ format (str): The format for the output time string. Default is "%Y-%m-%d %H:%M:%S".
12
+
13
+ Returns:
14
+ str: The current system time in the specified format.
15
+ """
16
+
17
+ current_time = datetime.now().strftime(format)
18
+ print(f"Current system time: {current_time}")
19
+ return current_time