Spaces:
Paused
Paused
Shreyas094
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -15,8 +15,6 @@ from langchain_core.documents import Document
|
|
15 |
from huggingface_hub import InferenceClient
|
16 |
import inspect
|
17 |
import logging
|
18 |
-
import tempfile
|
19 |
-
import shutil
|
20 |
|
21 |
|
22 |
# Set up basic configuration for logging
|
@@ -48,19 +46,19 @@ llama_parser = LlamaParse(
|
|
48 |
language="en",
|
49 |
)
|
50 |
|
51 |
-
def load_document(
|
52 |
"""Loads and splits the document into pages."""
|
53 |
if parser == "pypdf":
|
54 |
-
loader = PyPDFLoader(
|
55 |
return loader.load_and_split()
|
56 |
elif parser == "llamaparse":
|
57 |
try:
|
58 |
-
documents = llama_parser.load_data(
|
59 |
-
return [Document(page_content=doc.text, metadata={"source":
|
60 |
except Exception as e:
|
61 |
print(f"Error using Llama Parse: {str(e)}")
|
62 |
print("Falling back to PyPDF parser")
|
63 |
-
loader = PyPDFLoader(
|
64 |
return loader.load_and_split()
|
65 |
else:
|
66 |
raise ValueError("Invalid parser specified. Use 'pypdf' or 'llamaparse'.")
|
@@ -87,29 +85,13 @@ def update_vectors(files, parser):
|
|
87 |
for file in files:
|
88 |
logging.info(f"Processing file: {file.name}")
|
89 |
try:
|
90 |
-
|
91 |
-
with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf', mode='wb') as temp_file:
|
92 |
-
# Write the content to the temporary file
|
93 |
-
temp_file.write(file.name.encode('utf-8')) # Write the file path
|
94 |
-
temp_file_path = temp_file.name
|
95 |
-
|
96 |
-
# Save the uploaded file
|
97 |
-
os.makedirs("uploaded_files", exist_ok=True)
|
98 |
-
file_path = os.path.join("uploaded_files", os.path.basename(file.name))
|
99 |
-
shutil.copy(file.name, file_path) # Copy the actual file
|
100 |
-
|
101 |
-
# Remove the temporary file
|
102 |
-
os.unlink(temp_file_path)
|
103 |
-
|
104 |
-
data = load_document(file_path, parser)
|
105 |
logging.info(f"Loaded {len(data)} chunks from {file.name}")
|
106 |
-
for chunk in data:
|
107 |
-
logging.info(f"Chunk content preview: {chunk.page_content[:100]}...") # Log first 100 characters of each chunk
|
108 |
all_data.extend(data)
|
109 |
total_chunks += len(data)
|
110 |
# Append new documents instead of replacing
|
111 |
-
if not any(doc["name"] ==
|
112 |
-
uploaded_documents.append({"name":
|
113 |
logging.info(f"Added new document to uploaded_documents: {file.name}")
|
114 |
else:
|
115 |
logging.info(f"Document already exists in uploaded_documents: {file.name}")
|
@@ -118,10 +100,16 @@ def update_vectors(files, parser):
|
|
118 |
|
119 |
logging.info(f"Total chunks processed: {total_chunks}")
|
120 |
|
121 |
-
if
|
|
|
|
|
|
|
|
|
|
|
122 |
database = FAISS.from_documents(all_data, embed)
|
123 |
-
|
124 |
-
|
|
|
125 |
|
126 |
return f"Vector store updated successfully. Processed {total_chunks} chunks from {len(files)} files using {parser}.", gr.CheckboxGroup(
|
127 |
choices=[doc["name"] for doc in uploaded_documents],
|
@@ -129,52 +117,6 @@ def update_vectors(files, parser):
|
|
129 |
label="Select documents to query"
|
130 |
)
|
131 |
|
132 |
-
def delete_selected_files(selected_files):
|
133 |
-
global uploaded_documents
|
134 |
-
if not selected_files:
|
135 |
-
return "No files selected for deletion.", document_selector
|
136 |
-
|
137 |
-
deleted_files = []
|
138 |
-
for file_name in selected_files:
|
139 |
-
# Remove the file from uploaded_documents
|
140 |
-
uploaded_documents = [doc for doc in uploaded_documents if doc["name"] != file_name]
|
141 |
-
|
142 |
-
# Delete the file from the file system if it exists
|
143 |
-
file_path = os.path.join("uploaded_files", file_name)
|
144 |
-
if os.path.exists(file_path):
|
145 |
-
os.remove(file_path)
|
146 |
-
|
147 |
-
deleted_files.append(file_name)
|
148 |
-
|
149 |
-
# Update the FAISS database
|
150 |
-
update_faiss_database()
|
151 |
-
|
152 |
-
remaining_files = [doc["name"] for doc in uploaded_documents]
|
153 |
-
return f"Deleted files: {', '.join(deleted_files)}", gr.CheckboxGroup(choices=remaining_files, value=remaining_files, label="Select documents to query")
|
154 |
-
|
155 |
-
def update_faiss_database():
|
156 |
-
global uploaded_documents
|
157 |
-
|
158 |
-
embed = get_embeddings()
|
159 |
-
all_data = []
|
160 |
-
|
161 |
-
for doc in uploaded_documents:
|
162 |
-
file_path = os.path.join("uploaded_files", doc["name"])
|
163 |
-
if os.path.exists(file_path):
|
164 |
-
data = load_document(file_path, parser="llamaparse") # or use your preferred parser
|
165 |
-
all_data.extend(data)
|
166 |
-
|
167 |
-
if all_data:
|
168 |
-
database = FAISS.from_documents(all_data, embed)
|
169 |
-
database.save_local("faiss_database")
|
170 |
-
logging.info("FAISS database updated after deletion")
|
171 |
-
else:
|
172 |
-
# If no documents left, remove the FAISS database
|
173 |
-
if os.path.exists("faiss_database"):
|
174 |
-
import shutil
|
175 |
-
shutil.rmtree("faiss_database")
|
176 |
-
logging.info("All documents deleted, FAISS database removed")
|
177 |
-
|
178 |
def generate_chunked_response(prompt, model, max_tokens=10000, num_calls=3, temperature=0.2, should_stop=False):
|
179 |
print(f"Starting generate_chunked_response with {num_calls} calls")
|
180 |
full_response = ""
|
@@ -305,6 +247,7 @@ def respond(message, history, model, temperature, num_calls, use_web_search, sel
|
|
305 |
logging.info(f"User Query: {message}")
|
306 |
logging.info(f"Model Used: {model}")
|
307 |
logging.info(f"Search Type: {'Web Search' if use_web_search else 'PDF Search'}")
|
|
|
308 |
logging.info(f"Selected Documents: {selected_docs}")
|
309 |
|
310 |
try:
|
@@ -322,7 +265,7 @@ def respond(message, history, model, temperature, num_calls, use_web_search, sel
|
|
322 |
|
323 |
# Filter relevant documents based on user selection
|
324 |
all_relevant_docs = retriever.get_relevant_documents(message)
|
325 |
-
relevant_docs = [doc for doc in all_relevant_docs if
|
326 |
|
327 |
if not relevant_docs:
|
328 |
yield "No relevant information found in the selected documents. Please try selecting different documents or rephrasing your query."
|
@@ -460,12 +403,8 @@ def get_response_from_pdf(query, model, selected_docs, num_calls=3, temperature=
|
|
460 |
relevant_docs = retriever.get_relevant_documents(query)
|
461 |
logging.info(f"Number of relevant documents retrieved: {len(relevant_docs)}")
|
462 |
|
463 |
-
# Log the sources of all retrieved documents
|
464 |
-
for doc in relevant_docs:
|
465 |
-
logging.info(f"Retrieved document source: {doc.metadata['source']}")
|
466 |
-
|
467 |
# Filter relevant_docs based on selected documents
|
468 |
-
filtered_docs = [doc for doc in relevant_docs if
|
469 |
logging.info(f"Number of filtered documents: {len(filtered_docs)}")
|
470 |
|
471 |
if not filtered_docs:
|
|
|
15 |
from huggingface_hub import InferenceClient
|
16 |
import inspect
|
17 |
import logging
|
|
|
|
|
18 |
|
19 |
|
20 |
# Set up basic configuration for logging
|
|
|
46 |
language="en",
|
47 |
)
|
48 |
|
49 |
+
def load_document(file: NamedTemporaryFile, parser: str = "llamaparse") -> List[Document]:
|
50 |
"""Loads and splits the document into pages."""
|
51 |
if parser == "pypdf":
|
52 |
+
loader = PyPDFLoader(file.name)
|
53 |
return loader.load_and_split()
|
54 |
elif parser == "llamaparse":
|
55 |
try:
|
56 |
+
documents = llama_parser.load_data(file.name)
|
57 |
+
return [Document(page_content=doc.text, metadata={"source": file.name}) for doc in documents]
|
58 |
except Exception as e:
|
59 |
print(f"Error using Llama Parse: {str(e)}")
|
60 |
print("Falling back to PyPDF parser")
|
61 |
+
loader = PyPDFLoader(file.name)
|
62 |
return loader.load_and_split()
|
63 |
else:
|
64 |
raise ValueError("Invalid parser specified. Use 'pypdf' or 'llamaparse'.")
|
|
|
85 |
for file in files:
|
86 |
logging.info(f"Processing file: {file.name}")
|
87 |
try:
|
88 |
+
data = load_document(file, parser)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
logging.info(f"Loaded {len(data)} chunks from {file.name}")
|
|
|
|
|
90 |
all_data.extend(data)
|
91 |
total_chunks += len(data)
|
92 |
# Append new documents instead of replacing
|
93 |
+
if not any(doc["name"] == file.name for doc in uploaded_documents):
|
94 |
+
uploaded_documents.append({"name": file.name, "selected": True})
|
95 |
logging.info(f"Added new document to uploaded_documents: {file.name}")
|
96 |
else:
|
97 |
logging.info(f"Document already exists in uploaded_documents: {file.name}")
|
|
|
100 |
|
101 |
logging.info(f"Total chunks processed: {total_chunks}")
|
102 |
|
103 |
+
if os.path.exists("faiss_database"):
|
104 |
+
logging.info("Updating existing FAISS database")
|
105 |
+
database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
|
106 |
+
database.add_documents(all_data)
|
107 |
+
else:
|
108 |
+
logging.info("Creating new FAISS database")
|
109 |
database = FAISS.from_documents(all_data, embed)
|
110 |
+
|
111 |
+
database.save_local("faiss_database")
|
112 |
+
logging.info("FAISS database saved")
|
113 |
|
114 |
return f"Vector store updated successfully. Processed {total_chunks} chunks from {len(files)} files using {parser}.", gr.CheckboxGroup(
|
115 |
choices=[doc["name"] for doc in uploaded_documents],
|
|
|
117 |
label="Select documents to query"
|
118 |
)
|
119 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
def generate_chunked_response(prompt, model, max_tokens=10000, num_calls=3, temperature=0.2, should_stop=False):
|
121 |
print(f"Starting generate_chunked_response with {num_calls} calls")
|
122 |
full_response = ""
|
|
|
247 |
logging.info(f"User Query: {message}")
|
248 |
logging.info(f"Model Used: {model}")
|
249 |
logging.info(f"Search Type: {'Web Search' if use_web_search else 'PDF Search'}")
|
250 |
+
|
251 |
logging.info(f"Selected Documents: {selected_docs}")
|
252 |
|
253 |
try:
|
|
|
265 |
|
266 |
# Filter relevant documents based on user selection
|
267 |
all_relevant_docs = retriever.get_relevant_documents(message)
|
268 |
+
relevant_docs = [doc for doc in all_relevant_docs if doc.metadata["source"] in selected_docs]
|
269 |
|
270 |
if not relevant_docs:
|
271 |
yield "No relevant information found in the selected documents. Please try selecting different documents or rephrasing your query."
|
|
|
403 |
relevant_docs = retriever.get_relevant_documents(query)
|
404 |
logging.info(f"Number of relevant documents retrieved: {len(relevant_docs)}")
|
405 |
|
|
|
|
|
|
|
|
|
406 |
# Filter relevant_docs based on selected documents
|
407 |
+
filtered_docs = [doc for doc in relevant_docs if doc.metadata["source"] in selected_docs]
|
408 |
logging.info(f"Number of filtered documents: {len(filtered_docs)}")
|
409 |
|
410 |
if not filtered_docs:
|