Spaces:
Sleeping
Sleeping
ariansyahdedy
commited on
Commit
·
9fd3be8
1
Parent(s):
95c31ee
Add api indexed doc
Browse files- app/api/api_file.py +74 -12
- app/services/message.py +4 -3
- indexed_links.txt +5 -0
app/api/api_file.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Form, Request, Query, status
|
2 |
-
from fastapi.responses import StreamingResponse
|
3 |
import os
|
4 |
import logging
|
5 |
import uuid
|
@@ -36,16 +36,30 @@ def is_url(path: str) -> bool:
|
|
36 |
|
37 |
file_router = APIRouter()
|
38 |
|
39 |
-
# Configure logging to file with date-based filenames
|
40 |
-
log_filename = f"document_logs_{datetime.now().strftime('%Y-%m-%d')}.txt"
|
41 |
-
file_handler = logging.FileHandler(log_filename)
|
42 |
-
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
|
43 |
-
file_handler.setFormatter(formatter)
|
44 |
|
45 |
-
# Create a logger for document processing
|
46 |
-
doc_logger = logging.getLogger('document_logger')
|
47 |
-
doc_logger.setLevel(logging.INFO)
|
48 |
-
doc_logger.addHandler(file_handler)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
# Also configure the general logger if not already configured
|
51 |
logging.basicConfig(level=logging.INFO)
|
@@ -54,7 +68,53 @@ logger = logging.getLogger(__name__)
|
|
54 |
from app.search.rag_pipeline import RAGSystem
|
55 |
from sentence_transformers import SentenceTransformer
|
56 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
@file_router.post("/load_file_with_markdown/")
|
59 |
async def load_file_with_markdown(request: Request, filepaths: List[str]):
|
60 |
try:
|
@@ -93,7 +153,9 @@ async def load_file_with_markdown(request: Request, filepaths: List[str]):
|
|
93 |
# Log the ID and a 100-character snippet of the document
|
94 |
snippet = document.text_content[:100].replace('\n', ' ').replace('\r', ' ')
|
95 |
# Ensure 'doc_logger' is defined; if not, use 'logger' or define 'doc_logger'
|
96 |
-
doc_logger.info(f"ID: {doc_id}_{
|
|
|
|
|
97 |
|
98 |
|
99 |
except Exception as e:
|
@@ -181,7 +243,7 @@ async def load_file_with_markdown_function(filepaths: List[str],
|
|
181 |
# Log the ID and a 100-character snippet of the document
|
182 |
snippet = document.text_content[:100].replace('\n', ' ').replace('\r', ' ')
|
183 |
# Ensure 'doc_logger' is defined; if not, use 'logger' or define 'doc_logger'
|
184 |
-
doc_logger(f"ID: {doc_id}, Snippet: {snippet}")
|
185 |
logger.info(f"ID: {doc_id}, Snippet: {snippet}")
|
186 |
|
187 |
except Exception as e:
|
|
|
1 |
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Form, Request, Query, status
|
2 |
+
from fastapi.responses import StreamingResponse, JSONResponse
|
3 |
import os
|
4 |
import logging
|
5 |
import uuid
|
|
|
36 |
|
37 |
file_router = APIRouter()
|
38 |
|
39 |
+
# # Configure logging to file with date-based filenames
|
40 |
+
# log_filename = f"document_logs_{datetime.now().strftime('%Y-%m-%d')}.txt"
|
41 |
+
# file_handler = logging.FileHandler(log_filename)
|
42 |
+
# formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
|
43 |
+
# file_handler.setFormatter(formatter)
|
44 |
|
45 |
+
# # Create a logger for document processing
|
46 |
+
# doc_logger = logging.getLogger('document_logger')
|
47 |
+
# doc_logger.setLevel(logging.INFO)
|
48 |
+
# doc_logger.addHandler(file_handler)
|
49 |
+
|
50 |
+
# Configure logging to a single file for indexed links
|
51 |
+
INDEXED_LINKS_LOG = "indexed_links.txt"
|
52 |
+
indexed_links_handler = logging.FileHandler(INDEXED_LINKS_LOG)
|
53 |
+
indexed_links_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
|
54 |
+
indexed_links_handler.setFormatter(indexed_links_formatter)
|
55 |
+
|
56 |
+
# Create a logger for indexed links
|
57 |
+
indexed_links_logger = logging.getLogger('indexed_links_logger')
|
58 |
+
indexed_links_logger.setLevel(logging.INFO)
|
59 |
+
indexed_links_logger.addHandler(indexed_links_handler)
|
60 |
+
|
61 |
+
# Ensure that the general logger does not propagate to avoid duplicate logs
|
62 |
+
indexed_links_logger.propagate = False
|
63 |
|
64 |
# Also configure the general logger if not already configured
|
65 |
logging.basicConfig(level=logging.INFO)
|
|
|
68 |
from app.search.rag_pipeline import RAGSystem
|
69 |
from sentence_transformers import SentenceTransformer
|
70 |
|
71 |
+
# Path to the indexed links log file
|
72 |
+
INDEXED_LINKS_LOG = "indexed_links.txt"
|
73 |
+
|
74 |
+
@file_router.get("/indexed_links/", response_class=JSONResponse)
|
75 |
+
async def get_indexed_links(
|
76 |
+
limit: Optional[int] = Query(
|
77 |
+
None,
|
78 |
+
description="Maximum number of indexed links to return. If not specified, all links are returned."
|
79 |
+
)
|
80 |
+
) -> dict:
|
81 |
+
"""
|
82 |
+
Retrieve a list of all indexed URLs from the single log file.
|
83 |
|
84 |
+
Args:
|
85 |
+
limit (int, optional): Maximum number of indexed links to return.
|
86 |
+
|
87 |
+
Returns:
|
88 |
+
dict: A dictionary containing the list of indexed URLs.
|
89 |
+
"""
|
90 |
+
try:
|
91 |
+
if not os.path.exists(INDEXED_LINKS_LOG):
|
92 |
+
raise HTTPException(status_code=404, detail="No indexed links found.")
|
93 |
+
|
94 |
+
indexed_links = []
|
95 |
+
|
96 |
+
with open(INDEXED_LINKS_LOG, "r", encoding="utf-8") as log_file:
|
97 |
+
for line in log_file:
|
98 |
+
# Assuming each log entry is in the format: 'timestamp - level - URL'
|
99 |
+
parts = line.strip().split(" - ")
|
100 |
+
if len(parts) >= 3:
|
101 |
+
url = parts[2]
|
102 |
+
indexed_links.append(url)
|
103 |
+
|
104 |
+
if limit is not None:
|
105 |
+
if limit < 0:
|
106 |
+
raise HTTPException(status_code=400, detail="Limit must be a positive integer.")
|
107 |
+
indexed_links = indexed_links[:limit]
|
108 |
+
|
109 |
+
return {"indexed_links": indexed_links}
|
110 |
+
|
111 |
+
except HTTPException as he:
|
112 |
+
raise he # Re-raise HTTP exceptions to be handled by FastAPI
|
113 |
+
except Exception as e:
|
114 |
+
# Log the exception details for debugging
|
115 |
+
logger.exception("Error retrieving indexed links")
|
116 |
+
raise HTTPException(status_code=500, detail="Internal server error while retrieving indexed links.")
|
117 |
+
|
118 |
@file_router.post("/load_file_with_markdown/")
|
119 |
async def load_file_with_markdown(request: Request, filepaths: List[str]):
|
120 |
try:
|
|
|
153 |
# Log the ID and a 100-character snippet of the document
|
154 |
snippet = document.text_content[:100].replace('\n', ' ').replace('\r', ' ')
|
155 |
# Ensure 'doc_logger' is defined; if not, use 'logger' or define 'doc_logger'
|
156 |
+
# doc_logger.info(f"ID: {doc_id}_{path}, Snippet: {snippet}")
|
157 |
+
# Log the indexed URL to the single log file
|
158 |
+
indexed_links_logger.info(f"{doc_id}_{path}")
|
159 |
|
160 |
|
161 |
except Exception as e:
|
|
|
243 |
# Log the ID and a 100-character snippet of the document
|
244 |
snippet = document.text_content[:100].replace('\n', ' ').replace('\r', ' ')
|
245 |
# Ensure 'doc_logger' is defined; if not, use 'logger' or define 'doc_logger'
|
246 |
+
# doc_logger(f"ID: {doc_id}, Snippet: {snippet}")
|
247 |
logger.info(f"ID: {doc_id}, Snippet: {snippet}")
|
248 |
|
249 |
except Exception as e:
|
app/services/message.py
CHANGED
@@ -142,17 +142,18 @@ async def generate_response_from_gemini(
|
|
142 |
logger.info(f"Generating response for sender: {sender}")
|
143 |
|
144 |
# Initialize the model
|
145 |
-
model = genai.GenerativeModel("gemini-1.5-pro-002", system_instruction= system_prompt)
|
146 |
-
|
147 |
# Start chat with history
|
148 |
chat = model.start_chat(history=history)
|
149 |
|
150 |
if content:
|
151 |
if rag_system:
|
152 |
keywords = extract_keywords_async(content)
|
|
|
153 |
logger.info(f"Extracted Keywords: {keywords}")
|
154 |
# Implement RAG: Retrieve relevant documents
|
155 |
-
retrieved_docs = await rag_system.adv_query(content, keywords=keywords, top_k=
|
156 |
if retrieved_docs:
|
157 |
logger.info(f"Retrieved {len(retrieved_docs)} documents for context.")
|
158 |
# Format the retrieved documents as a context string
|
|
|
142 |
logger.info(f"Generating response for sender: {sender}")
|
143 |
|
144 |
# Initialize the model
|
145 |
+
# model = genai.GenerativeModel("gemini-1.5-pro-002", system_instruction= system_prompt)
|
146 |
+
model = genai.GenerativeModel("gemini-1.5-flash", system_instruction= system_prompt)
|
147 |
# Start chat with history
|
148 |
chat = model.start_chat(history=history)
|
149 |
|
150 |
if content:
|
151 |
if rag_system:
|
152 |
keywords = extract_keywords_async(content)
|
153 |
+
# keywords = []
|
154 |
logger.info(f"Extracted Keywords: {keywords}")
|
155 |
# Implement RAG: Retrieve relevant documents
|
156 |
+
retrieved_docs = await rag_system.adv_query(content, keywords=keywords, top_k=5)
|
157 |
if retrieved_docs:
|
158 |
logger.info(f"Retrieved {len(retrieved_docs)} documents for context.")
|
159 |
# Format the retrieved documents as a context string
|
indexed_links.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2024-12-26 15:02:10,578 - INFO - 06c0cc2a-ec63-4458-a4e9-ea6e96e0bbb4_https://sswalfa.surabaya.go.id/info/detail/izin-pengumpulan-sumbangan-bencana
|
2 |
+
2024-12-26 15:02:11,146 - INFO - 2c5a8065-7df2-42d0-8e1e-0e59804c6f25_https://sswalfa.surabaya.go.id/info/detail/izin-pemakaian-ruang-terbuka-hijau
|
3 |
+
2024-12-26 15:02:11,848 - INFO - 521bfbd9-654a-4cf9-8b6c-79de4b8e5cd2_https://sswalfa.surabaya.go.id/info/detail/pengganti-ipt
|
4 |
+
2024-12-26 15:02:12,549 - INFO - e4088c2a-b9ef-4c04-b2f8-eb703279ba21_https://sswalfa.surabaya.go.id/info/detail/rangkaian-pelayanan-surat-pernyataan-belum-menikah-lagi-bagi-jandaduda
|
5 |
+
2024-12-26 15:02:13,236 - INFO - 458cc97a-93db-44bf-b7f8-e60c22da2a5e_https://sswalfa.surabaya.go.id/info/detail/arahan-sistem-drainase
|