Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -108,13 +108,10 @@ class MistralRAGChatbot:
|
|
108 |
def __init__(self, vector_db_path: str, annoy_index_path: str):
|
109 |
self.embeddings, self.texts = self.load_vector_db(vector_db_path)
|
110 |
self.annoy_index = self.load_annoy_index(annoy_index_path, self.embeddings.shape[1])
|
111 |
-
self.tfidf_matrix, self.tfidf_vectorizer = self.calculate_tfidf(self.texts)
|
112 |
self.bm25 = BM25Okapi([text.split() for text in self.texts])
|
113 |
self.word2vec_model = self.train_word2vec(self.texts)
|
114 |
self.reranking_methods = {
|
115 |
-
# 'reciprocal_rank_fusion': self.reciprocal_rank_fusion,
|
116 |
-
# 'weighted_score_fusion': self.weighted_score_fusion,
|
117 |
-
# 'semantic_similarity': self.semantic_similarity_reranking,
|
118 |
'advanced_fusion': self.advanced_fusion_retrieval
|
119 |
}
|
120 |
logging.info("MistralRAGChatbot initialized successfully.")
|
@@ -134,11 +131,11 @@ class MistralRAGChatbot:
|
|
134 |
logging.info(f"Loaded Annoy index from {annoy_index_path}.")
|
135 |
return annoy_index
|
136 |
|
137 |
-
def calculate_tfidf(self, texts: List[str]) -> Tuple[np.ndarray, TfidfVectorizer]:
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
|
143 |
def train_word2vec(self, texts: List[str]) -> Word2Vec:
|
144 |
tokenized_texts = [text.split() for text in texts]
|
@@ -250,12 +247,12 @@ class MistralRAGChatbot:
|
|
250 |
logging.debug(f"Annoy retrieval returned {len(indices)} documents.")
|
251 |
return indices, scores
|
252 |
|
253 |
-
def retrieve_with_tfidf(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
|
260 |
def retrieve_with_bm25(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
|
261 |
tokenized_query = user_query.split()
|
@@ -278,24 +275,24 @@ class MistralRAGChatbot:
|
|
278 |
indices = np.argsort(-similarities)[:top_k]
|
279 |
return indices, similarities[indices].tolist()
|
280 |
|
281 |
-
def retrieve_with_euclidean(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
def retrieve_with_jaccard(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
|
300 |
def rerank_documents(
|
301 |
self,
|
@@ -312,54 +309,6 @@ class MistralRAGChatbot:
|
|
312 |
|
313 |
return reranked_docs
|
314 |
|
315 |
-
# def reciprocal_rank_fusion(self, user_query: str, docs: List[dict]) -> List[dict]:
|
316 |
-
# k = 60
|
317 |
-
# method_ranks = {}
|
318 |
-
# fused_scores = {}
|
319 |
-
# for doc in docs:
|
320 |
-
# method = doc['method']
|
321 |
-
# if method not in method_ranks:
|
322 |
-
# method_ranks[method] = {doc['index']: 1}
|
323 |
-
# else:
|
324 |
-
# method_ranks[method][doc['index']] = len(method_ranks[method]) + 1
|
325 |
-
# for doc in docs:
|
326 |
-
# idx = doc['index']
|
327 |
-
# if idx not in fused_scores:
|
328 |
-
# fused_scores[idx] = sum(1 / (k + rank) for method_rank in method_ranks.values() for i, rank in method_rank.items() if i == idx)
|
329 |
-
# reranked_docs = sorted(docs, key=lambda x: fused_scores.get(x['index'], 0), reverse=True)
|
330 |
-
# for doc in reranked_docs:
|
331 |
-
# doc['rrf_score'] = fused_scores.get(doc['index'], 0)
|
332 |
-
# return reranked_docs
|
333 |
-
|
334 |
-
# def weighted_score_fusion(self, user_query: str, docs: List[dict]) -> List[dict]:
|
335 |
-
# method_weights = {
|
336 |
-
# 'annoy': 0.3,
|
337 |
-
# 'tfidf': 0.2,
|
338 |
-
# 'bm25': 0.2,
|
339 |
-
# 'word2vec': 0.1,
|
340 |
-
# 'euclidean': 0.1,
|
341 |
-
# 'jaccard': 0.1
|
342 |
-
# }
|
343 |
-
# fused_scores = {}
|
344 |
-
# for doc in docs:
|
345 |
-
# idx = doc['index']
|
346 |
-
# if idx not in fused_scores:
|
347 |
-
# fused_scores[idx] = doc['score'] * method_weights[doc['method']]
|
348 |
-
# else:
|
349 |
-
# fused_scores[idx] += doc['score'] * method_weights[doc['method']]
|
350 |
-
|
351 |
-
# reranked_docs = sorted(docs, key=lambda x: fused_scores[x['index']], reverse=True)
|
352 |
-
# for doc in reranked_docs:
|
353 |
-
# doc['wsf_score'] = fused_scores[doc['index']]
|
354 |
-
# return reranked_docs
|
355 |
-
|
356 |
-
# def semantic_similarity_reranking(self, user_query: str, docs: List[dict]) -> List[dict]:
|
357 |
-
# query_embedding = np.mean([self.word2vec_model.wv[token] for token in user_query.split() if token in self.word2vec_model.wv], axis=0)
|
358 |
-
# for doc in docs:
|
359 |
-
# doc_embedding = np.mean([self.word2vec_model.wv[token] for token in doc['text'].split() if token in self.word2vec_model.wv], axis=0)
|
360 |
-
# doc_embedding = doc_embedding if doc_embedding.shape == query_embedding.shape else np.zeros(query_embedding.shape)
|
361 |
-
# doc['semantic_score'] = cosine_similarity([query_embedding], [doc_embedding])[0][0]
|
362 |
-
# return sorted(docs, key=lambda x: x['semantic_score'], reverse=True)
|
363 |
|
364 |
def build_prompt(self, context: str, user_query: str, response_style: str) -> str:
|
365 |
styles = {
|
|
|
108 |
def __init__(self, vector_db_path: str, annoy_index_path: str):
|
109 |
self.embeddings, self.texts = self.load_vector_db(vector_db_path)
|
110 |
self.annoy_index = self.load_annoy_index(annoy_index_path, self.embeddings.shape[1])
|
111 |
+
# self.tfidf_matrix, self.tfidf_vectorizer = self.calculate_tfidf(self.texts)
|
112 |
self.bm25 = BM25Okapi([text.split() for text in self.texts])
|
113 |
self.word2vec_model = self.train_word2vec(self.texts)
|
114 |
self.reranking_methods = {
|
|
|
|
|
|
|
115 |
'advanced_fusion': self.advanced_fusion_retrieval
|
116 |
}
|
117 |
logging.info("MistralRAGChatbot initialized successfully.")
|
|
|
131 |
logging.info(f"Loaded Annoy index from {annoy_index_path}.")
|
132 |
return annoy_index
|
133 |
|
134 |
+
# def calculate_tfidf(self, texts: List[str]) -> Tuple[np.ndarray, TfidfVectorizer]:
|
135 |
+
# vectorizer = TfidfVectorizer(stop_words='english')
|
136 |
+
# tfidf_matrix = vectorizer.fit_transform(texts)
|
137 |
+
# logging.info("TF-IDF matrix calculated.")
|
138 |
+
# return tfidf_matrix, vectorizer
|
139 |
|
140 |
def train_word2vec(self, texts: List[str]) -> Word2Vec:
|
141 |
tokenized_texts = [text.split() for text in texts]
|
|
|
247 |
logging.debug(f"Annoy retrieval returned {len(indices)} documents.")
|
248 |
return indices, scores
|
249 |
|
250 |
+
# def retrieve_with_tfidf(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
|
251 |
+
# query_vec = self.tfidf_vectorizer.transform([user_query])
|
252 |
+
# similarities = cosine_similarity(query_vec, self.tfidf_matrix).flatten()
|
253 |
+
# indices = np.argsort(-similarities)[:top_k]
|
254 |
+
# logging.debug(f"TF-IDF retrieval returned {len(indices)} documents.")
|
255 |
+
# return indices, similarities[indices].tolist()
|
256 |
|
257 |
def retrieve_with_bm25(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
|
258 |
tokenized_query = user_query.split()
|
|
|
275 |
indices = np.argsort(-similarities)[:top_k]
|
276 |
return indices, similarities[indices].tolist()
|
277 |
|
278 |
+
# def retrieve_with_euclidean(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
|
279 |
+
# distances = euclidean_distances([query_embedding], self.embeddings).flatten()
|
280 |
+
# indices = np.argsort(distances)[:top_k]
|
281 |
+
# logging.debug(f"Euclidean retrieval returned {len(indices)} documents.")
|
282 |
+
# return indices, distances[indices].tolist()
|
283 |
+
|
284 |
+
# def retrieve_with_jaccard(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
|
285 |
+
# query_set = set(user_query.lower().split())
|
286 |
+
# scores = []
|
287 |
+
# for doc in self.texts:
|
288 |
+
# doc_set = set(doc.lower().split())
|
289 |
+
# intersection = query_set.intersection(doc_set)
|
290 |
+
# union = query_set.union(doc_set)
|
291 |
+
# score = float(len(intersection)) / len(union) if union else 0
|
292 |
+
# scores.append(score)
|
293 |
+
# indices = np.argsort(-np.array(scores))[:top_k]
|
294 |
+
# logging.debug(f"Jaccard retrieval returned {len(indices)} documents.")
|
295 |
+
# return indices.tolist(), [scores[i] for i in indices]
|
296 |
|
297 |
def rerank_documents(
|
298 |
self,
|
|
|
309 |
|
310 |
return reranked_docs
|
311 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
312 |
|
313 |
def build_prompt(self, context: str, user_query: str, response_style: str) -> str:
|
314 |
styles = {
|