[ { "name": "app.py", "type": "file", "content": "import streamlit as st\n#from utils.pdf_utils import convert_pdf_to_base64 as display_pdf\nfrom tasks.data_ingestion_task.data_ingestion_task import DataIngestionTask\nfrom tasks.query_handling_task.query_handling_task import QueryHandlingTask\n\n# Title Section\nst.title(\"(PDF) Chat con documentos de Procesos \ud83d\uddde\ufe0f\")\n\n# Subtitle Section\nst.markdown(\"Retrieval-Augmented Generation\")\nst.markdown(\"iniciar chat ...\ud83d\ude80\")\n\n# Session State Initialization\nif 'messages' not in st.session_state:\n st.session_state.messages = [{'role': 'assistant', \"content\": 'Hola! Selecciona un pdf para cargar, y hazme una pregunta.'}]\n\n# Sidebar Section\nwith st.sidebar:\n st.image('image_logo.jpeg', use_column_width=True)\n st.title(\"Menu:\")\n \n uploaded_file = st.file_uploader(\"Sube un archivo PDF y dale click al bot\u00f3n enviar y procesar.\")\n\n if st.button(\"Enviar y Procesar\"):\n with st.spinner(\"Procesando...\"):\n filepath = \"data/saved_pdf.pdf\"\n with open(filepath, \"wb\") as f:\n f.write(uploaded_file.getbuffer())\n \n # Use DataIngestionTask to process the PDF\n ingestion_task = DataIngestionTask(\n config_path='tasks/data_ingestion_task/config.txt',\n input_structure_path='tasks/data_ingestion_task/input_structure.json',\n output_structure_path='tasks/data_ingestion_task/output_structure.json'\n )\n ingestion_task.execute({}) # Assuming no specific input is required\n \n st.success(\"PDF processed successfully\")\n\n st.subheader(\"Select Model:\")\n model_selected = st.session_state.get('model_selected', '')\n \n col1, col2 = st.columns(2)\n \n if col1.button(\"GEMMA\", key=\"gemmabtn\"):\n st.session_state.model_selected = \"GEMMA\"\n \n if col2.button(\"GEMINI\", key=\"geminibtn\"):\n st.session_state.model_selected = \"GEMINI\"\n \n if model_selected:\n st.write(f\"Selected Model: **{model_selected}**\")\n\n# Chat Input Section\nuser_prompt = st.chat_input(\"Pregunta acerca del contenido en el archivo PDF:\")\nif user_prompt:\n st.session_state.messages.append({'role': 'user', \"content\": user_prompt})\n \n # Use QueryHandlingTask to handle the query\n query_task = QueryHandlingTask(\n config_path='tasks/query_handling_task/config.txt',\n input_structure_path='tasks/query_handling_task/input_structure.json',\n output_structure_path='tasks/query_handling_task/output_structure.json'\n )\n response = query_task.execute({'query': user_prompt})\n \n st.session_state.messages.append({'role': 'assistant', \"content\": response})\n\n# Chat Message Display\nfor message in st.session_state.messages:\n with st.chat_message(message['role']):\n st.write(message['content'])\n\n", "children": [] }, { "name": "config", "type": "directory", "content": null, "children": [ { "name": "app_config.py", "type": "file", "content": "# General application-level configurations\n", "children": [] }, { "name": "logging_config.py", "type": "file", "content": "# Configuration for logging\n", "children": [] } ] }, { "name": "documenter.py", "type": "file", "content": "import os\nimport json\n\n# Define a set of multimedia file extensions\nMULTIMEDIA_EXTENSIONS = {'.jpg', '.jpeg', '.png', '.gif', '.bmp', \n '.mp3', '.wav', '.aac', '.flac',\n '.mp4', '.avi', '.mov', '.mkv', '.wmv'}\n\ndef get_directory_structure(root_dir):\n def build_structure(current_dir):\n structure = []\n for item in os.listdir(current_dir):\n item_path = os.path.join(current_dir, item)\n if os.path.isdir(item_path):\n structure.append({\n \"name\": item,\n \"type\": \"directory\",\n \"content\": None,\n \"children\": build_structure(item_path)\n })\n else:\n file_extension = os.path.splitext(item)[1].lower()\n if file_extension in MULTIMEDIA_EXTENSIONS:\n # For multimedia files, don't capture the content\n structure.append({\n \"name\": item,\n \"type\": \"file\",\n \"content\": None, # No content for multimedia files\n \"children\": []\n })\n else:\n with open(item_path, 'r', encoding='utf-8', errors='ignore') as file:\n file_content = file.read()\n structure.append({\n \"name\": item,\n \"type\": \"file\",\n \"content\": file_content,\n \"children\": []\n })\n return structure\n \n return build_structure(root_dir)\n\ndef generate_json(output_file):\n current_directory = os.getcwd()\n structure = get_directory_structure(current_directory)\n with open(output_file, 'w', encoding='utf-8') as json_file:\n json.dump(structure, json_file, indent=4)\n\nif __name__ == \"__main__\":\n output_json_file = \"output.json\"\n generate_json(output_json_file)\n print(f\"Directory structure saved to {output_json_file}\")\n", "children": [] }, { "name": "generated_project", "type": "directory", "content": null, "children": [ { "name": "config", "type": "directory", "content": null, "children": [ { "name": "app_config.py", "type": "file", "content": "# General application-level configurations\n", "children": [] }, { "name": "logging_config.py", "type": "file", "content": "# Configuration for logging\n", "children": [] } ] }, { "name": "data", "type": "file", "content": "# Directory for storing uploaded PDF files", "children": [] }, { "name": "db", "type": "file", "content": "# Directory for persistent storage of indexes", "children": [] }, { "name": "logs", "type": "directory", "content": null, "children": [ { "name": "app.log", "type": "file", "content": "# Log file for the application", "children": [] } ] }, { "name": "main.py", "type": "file", "content": "# Entry point for backend execution\n", "children": [] }, { "name": "pipes", "type": "directory", "content": null, "children": [ { "name": "base_pipe.py", "type": "file", "content": "# Abstract base class for all pipes\n", "children": [] }, { "name": "example_pipe", "type": "directory", "content": null, "children": [ { "name": "example_pipe.py", "type": "file", "content": "# Defines the sequence of tasks for the example pipe\n", "children": [] }, { "name": "pipe_config.txt", "type": "file", "content": "# Configuration for example pipe\n", "children": [] } ] } ] }, { "name": "requirements.txt", "type": "file", "content": "# Dependencies for the project\nllama_index\nstreamlit\npython-dotenv\n", "children": [] }, { "name": "tasks", "type": "directory", "content": null, "children": [ { "name": "base_task.py", "type": "file", "content": "from abc import ABC, abstractmethod\nimport json\nimport os\n\nclass BaseTask(ABC):\n def __init__(self, config_path, input_structure_path, output_structure_path):\n self.config = self.load_config(config_path)\n self.input_structure = self.load_json(input_structure_path)\n self.output_structure = self.load_json(output_structure_path)\n \n def load_json(self, file_path):\n \"\"\"Load a JSON file.\"\"\"\n if not os.path.exists(file_path):\n raise FileNotFoundError(f\"{file_path} not found.\")\n with open(file_path, 'r') as file:\n return json.load(file)\n \n def load_config(self, config_path):\n \"\"\"Load task configuration from the config file.\"\"\"\n if not os.path.exists(config_path):\n raise FileNotFoundError(f\"{config_path} not found.\")\n # Implement logic to load and parse the config file\n with open(config_path, 'r') as file:\n return json.load(file)\n \n def validate_input(self, input_data):\n \"\"\"Validate the input data against the input structure.\"\"\"\n # Implement validation logic comparing input_data with self.input_structure\n for key, value_type in self.input_structure.items():\n if key not in input_data or not isinstance(input_data[key], value_type):\n raise ValueError(f\"Invalid input for {key}: Expected {value_type}, got {type(input_data.get(key))}\")\n \n def validate_output(self, output_data):\n \"\"\"Validate the output data against the output structure.\"\"\"\n for key, value_type in self.output_structure.items():\n if key not in output_data or not isinstance(output_data[key], value_type):\n raise ValueError(f\"Invalid output for {key}: Expected {value_type}, got {type(output_data.get(key))}\")\n \n @abstractmethod\n def load_input(self, input_data):\n \"\"\"Load and validate input data.\"\"\"\n self.validate_input(input_data)\n pass\n\n @abstractmethod\n def process(self):\n \"\"\"Process the input and perform the task's specific action.\"\"\"\n pass\n \n @abstractmethod\n def save_output(self, result):\n \"\"\"Format, validate, and return the output data.\"\"\"\n self.validate_output(result)\n pass\n \n def execute(self, input_data):\n \"\"\"The main method to run the task end-to-end.\"\"\"\n self.load_input(input_data)\n result = self.process()\n return self.save_output(result)\n", "children": [] }, { "name": "data_ingestion_task", "type": "directory", "content": null, "children": [ { "name": "config.txt", "type": "file", "content": "# Configuration specific to data ingestion task\n", "children": [] }, { "name": "data_ingestion_task.py", "type": "file", "content": "from tasks.base_task import BaseTask\nfrom utils.llama_index_utils import setup_directories\nfrom llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext\n\nclass DataIngestionTask(BaseTask):\n def load_input(self, input_data):\n # No input data required; handles its own input (documents in the data directory)\n self.data_dir, self.persist_dir = setup_directories()\n\n def process(self):\n # Process the documents and store the index\n documents = SimpleDirectoryReader(self.data_dir).load_data()\n storage_context = StorageContext.from_defaults()\n self.index = VectorStoreIndex.from_documents(documents)\n self.index.storage_context.persist(persist_dir=self.persist_dir)\n\n def save_output(self, result):\n # No output to return for this task\n pass\n", "children": [] }, { "name": "input_structure.json", "type": "file", "content": "{\n \"type\": \"object\",\n \"properties\": {}\n}\n", "children": [] }, { "name": "output_structure.json", "type": "file", "content": "{\n \"type\": \"object\",\n \"properties\": {}\n}\n", "children": [] } ] }, { "name": "query_handling_task", "type": "directory", "content": null, "children": [ { "name": "config.txt", "type": "file", "content": "# Configuration specific to query handling task\n", "children": [] }, { "name": "input_structure.json", "type": "file", "content": "{\n \"type\": \"object\",\n \"properties\": {\n \"query\": {\n \"type\": \"string\"\n }\n },\n \"required\": [\"query\"]\n}\n", "children": [] }, { "name": "output_structure.json", "type": "file", "content": "{\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\n \"type\": \"string\"\n }\n },\n \"required\": [\"response\"]\n}\n", "children": [] }, { "name": "query_handling_task.py", "type": "file", "content": "from tasks.base_task import BaseTask\nfrom utils.llama_index_utils import setup_directories\nfrom llama_index.core import StorageContext, load_index_from_storage, ChatPromptTemplate\n\nclass QueryHandlingTask(BaseTask):\n def load_input(self, input_data):\n self.query = input_data['query']\n self.data_dir, self.persist_dir = setup_directories()\n\n def process(self):\n # Load the index and create the query engine\n storage_context = StorageContext.from_defaults(persist_dir=self.persist_dir)\n self.index = load_index_from_storage(storage_context)\n chat_text_qa_msgs = [\n (\n \"user\",\n \"\"\"You are a Q&A assistant. Your main goal is to provide answers as accurately as possible, based on the instructions and context you have been given. If a question does not match the provided context or is outside the scope of the document, kindly advise the user to ask questions within the context of the document. Provide the answers in Spanish and cite the page and section where the answers were found.\n Context:\n {context_str}\n Question:\n {query_str}\n \"\"\"\n )\n ]\n self.text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)\n self.query_engine = self.index.as_query_engine(text_qa_template=self.text_qa_template)\n\n def save_output(self, result):\n # Process the query and return the response\n answer = self.query_engine.query(self.query)\n if hasattr(answer, 'response'):\n return answer.response\n elif isinstance(answer, dict) and 'response' in answer:\n return answer['response']\n else:\n return \"Disculpa no pude encontrar una respuesta.\"\n", "children": [] } ] } ] }, { "name": "utils", "type": "directory", "content": null, "children": [ { "name": "file_loader.py", "type": "file", "content": "# Utility for loading files\n", "children": [] }, { "name": "llama_index_utils.py", "type": "file", "content": "import os\nfrom llama_index.core import Settings\nfrom llama_index.llms.huggingface import HuggingFaceInferenceAPI\nfrom llama_index.embeddings.huggingface import HuggingFaceEmbedding\nfrom dotenv import load_dotenv\n\n# Load environment variables\nload_dotenv()\n\n# Configure the Llama index settings\ndef initialize_llama_settings():\n Settings.llm = HuggingFaceInferenceAPI(\n model_name=\"google/gemma-1.1-7b-it\",\n tokenizer_name=\"google/gemma-1.1-7b-it\",\n context_window=3000,\n token=os.getenv(\"HF_TOKEN\"),\n max_new_tokens=512,\n generate_kwargs={\"temperature\": 0.1},\n )\n Settings.embed_model = HuggingFaceEmbedding(\n model_name=\"BAAI/bge-small-en-v1.5\"\n )\n\n# Ensure data directory and persistent storage directory exist\ndef setup_directories(data_dir=\"data\", persist_dir=\"./db\"):\n os.makedirs(data_dir, exist_ok=True)\n os.makedirs(persist_dir, exist_ok=True)\n return data_dir, persist_dir\n", "children": [] }, { "name": "pdf_utils.py", "type": "file", "content": "import base64\n\n def convert_pdf_to_base64(file):\n with open(file, \"rb\") as f:\n base64_pdf = base64.b64encode(f.read()).decode('utf-8')\n pdf_display = f''\n return pdf_display\n", "children": [] } ] } ] }, { "name": "image_logo.jpeg", "type": "file", "content": null, "children": [] }, { "name": "logs", "type": "directory", "content": null, "children": [ { "name": "app.log", "type": "file", "content": "# Log file for the application", "children": [] } ] }, { "name": "main.py", "type": "file", "content": "# Entry point for backend execution\n", "children": [] }, { "name": "pipes", "type": "directory", "content": null, "children": [ { "name": "base_pipe.py", "type": "file", "content": "# Abstract base class for all pipes\n", "children": [] }, { "name": "example_pipe", "type": "directory", "content": null, "children": [ { "name": "example_pipe.py", "type": "file", "content": "# Defines the sequence of tasks for the example pipe\n", "children": [] }, { "name": "pipe_config.txt", "type": "file", "content": "# Configuration for example pipe\n", "children": [] } ] } ] }, { "name": "requirements.txt", "type": "file", "content": "# Dependencies for the project\nstreamlit\npython-dotenv\nllama-index\nllama-index-embeddings-huggingface\nllama-index-llms-huggingface\n", "children": [] }, { "name": "tasks", "type": "directory", "content": null, "children": [ { "name": "base_task.py", "type": "file", "content": "from abc import ABC, abstractmethod\nimport json\nimport os\n\nclass BaseTask(ABC):\n def __init__(self, config_path, input_structure_path, output_structure_path):\n self.config = self.load_config(config_path)\n self.input_structure = self.load_json(input_structure_path)\n self.output_structure = self.load_json(output_structure_path)\n \n def load_json(self, file_path):\n \"\"\"Load a JSON file.\"\"\"\n if not os.path.exists(file_path):\n raise FileNotFoundError(f\"{file_path} not found.\")\n with open(file_path, 'r') as file:\n return json.load(file)\n \n def load_config(self, config_path):\n \"\"\"Load task configuration from the config file.\"\"\"\n if not os.path.exists(config_path):\n raise FileNotFoundError(f\"{config_path} not found.\")\n # Implement logic to load and parse the config file\n with open(config_path, 'r') as file:\n return json.load(file)\n \n def validate_input(self, input_data):\n \"\"\"Validate the input data against the input structure.\"\"\"\n # Implement validation logic comparing input_data with self.input_structure\n for key, value_type in self.input_structure.items():\n if key not in input_data or not isinstance(input_data[key], value_type):\n raise ValueError(f\"Invalid input for {key}: Expected {value_type}, got {type(input_data.get(key))}\")\n \n def validate_output(self, output_data):\n \"\"\"Validate the output data against the output structure.\"\"\"\n for key, value_type in self.output_structure.items():\n if key not in output_data or not isinstance(output_data[key], value_type):\n raise ValueError(f\"Invalid output for {key}: Expected {value_type}, got {type(output_data.get(key))}\")\n \n @abstractmethod\n def load_input(self, input_data):\n \"\"\"Load and validate input data.\"\"\"\n self.validate_input(input_data)\n pass\n\n @abstractmethod\n def process(self):\n \"\"\"Process the input and perform the task's specific action.\"\"\"\n pass\n \n @abstractmethod\n def save_output(self, result):\n \"\"\"Format, validate, and return the output data.\"\"\"\n self.validate_output(result)\n pass\n \n def execute(self, input_data):\n \"\"\"The main method to run the task end-to-end.\"\"\"\n self.load_input(input_data)\n result = self.process()\n return self.save_output(result)\n", "children": [] }, { "name": "data_ingestion_task", "type": "directory", "content": null, "children": [ { "name": "config.txt", "type": "file", "content": "# Configuration specific to data ingestion task\n", "children": [] }, { "name": "data_ingestion_task.py", "type": "file", "content": "from tasks.base_task import BaseTask\nfrom utils.llama_index_utils import setup_directories\nfrom llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext\n\nclass DataIngestionTask(BaseTask):\n def load_input(self, input_data):\n # No input data required; handles its own input (documents in the data directory)\n self.data_dir, self.persist_dir = setup_directories()\n\n def process(self):\n # Process the documents and store the index\n documents = SimpleDirectoryReader(self.data_dir).load_data()\n storage_context = StorageContext.from_defaults()\n self.index = VectorStoreIndex.from_documents(documents)\n self.index.storage_context.persist(persist_dir=self.persist_dir)\n\n def save_output(self, result):\n # No output to return for this task\n pass\n", "children": [] }, { "name": "input_structure.json", "type": "file", "content": "{\n \"type\": \"object\",\n \"properties\": {}\n}\n", "children": [] }, { "name": "output_structure.json", "type": "file", "content": "{\n \"type\": \"object\",\n \"properties\": {}\n}\n", "children": [] } ] }, { "name": "query_handling_task", "type": "directory", "content": null, "children": [ { "name": "config.txt", "type": "file", "content": "# Configuration specific to query handling task\n", "children": [] }, { "name": "input_structure.json", "type": "file", "content": "{\n \"type\": \"object\",\n \"properties\": {\n \"query\": {\n \"type\": \"string\"\n }\n },\n \"required\": [\"query\"]\n}\n", "children": [] }, { "name": "output_structure.json", "type": "file", "content": "{\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\n \"type\": \"string\"\n }\n },\n \"required\": [\"response\"]\n}\n", "children": [] }, { "name": "query_handling_task.py", "type": "file", "content": "from tasks.base_task import BaseTask\nfrom utils.llama_index_utils import setup_directories\nfrom llama_index.core import StorageContext, load_index_from_storage, ChatPromptTemplate\n\nclass QueryHandlingTask(BaseTask):\n def load_input(self, input_data):\n self.query = input_data['query']\n self.data_dir, self.persist_dir = setup_directories()\n\n def process(self):\n # Load the index and create the query engine\n storage_context = StorageContext.from_defaults(persist_dir=self.persist_dir)\n self.index = load_index_from_storage(storage_context)\n chat_text_qa_msgs = [\n (\n \"user\",\n \"\"\"You are a Q&A assistant. Your main goal is to provide answers as accurately as possible, based on the instructions and context you have been given. If a question does not match the provided context or is outside the scope of the document, kindly advise the user to ask questions within the context of the document. Provide the answers in Spanish and cite the page and section where the answers were found.\n Context:\n {context_str}\n Question:\n {query_str}\n \"\"\"\n )\n ]\n self.text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)\n self.query_engine = self.index.as_query_engine(text_qa_template=self.text_qa_template)\n\n def save_output(self, result):\n # Process the query and return the response\n answer = self.query_engine.query(self.query)\n if hasattr(answer, 'response'):\n return answer.response\n elif isinstance(answer, dict) and 'response' in answer:\n return answer['response']\n else:\n return \"Disculpa no pude encontrar una respuesta.\"\n", "children": [] } ] } ] }, { "name": "utils", "type": "directory", "content": null, "children": [ { "name": "file_loader.py", "type": "file", "content": "# Utility for loading files\n", "children": [] }, { "name": "llama_index_utils.py", "type": "file", "content": "import os\nfrom llama_index.core import Settings\nfrom llama_index.llms.huggingface import HuggingFaceInferenceAPI\nfrom llama_index.embeddings.huggingface import HuggingFaceEmbedding\nfrom dotenv import load_dotenv\n\n# Load environment variables\nload_dotenv()\n\n# Configure the Llama index settings\ndef initialize_llama_settings():\n Settings.llm = HuggingFaceInferenceAPI(\n model_name=\"google/gemma-1.1-7b-it\",\n tokenizer_name=\"google/gemma-1.1-7b-it\",\n context_window=3000,\n token=os.getenv(\"HF_TOKEN\"),\n max_new_tokens=512,\n generate_kwargs={\"temperature\": 0.1},\n )\n Settings.embed_model = HuggingFaceEmbedding(\n model_name=\"BAAI/bge-small-en-v1.5\"\n )\n\n# Ensure data directory and persistent storage directory exist\ndef setup_directories(data_dir=\"data\", persist_dir=\"./db\"):\n os.makedirs(data_dir, exist_ok=True)\n os.makedirs(persist_dir, exist_ok=True)\n return data_dir, persist_dir", "children": [] }, { "name": "pdf_utils.py", "type": "file", "content": "import base64\n\ndef convert_pdf_to_base64(file):\n with open(file, \"rb\") as f:\n base64_pdf = base64.b64encode(f.read()).decode('utf-8')\n pdf_display = f''\n return pdf_display\n", "children": [] } ] } ]