Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -108,7 +108,6 @@ class MistralRAGChatbot:
|
|
108 |
def __init__(self, vector_db_path: str, annoy_index_path: str):
|
109 |
self.embeddings, self.texts = self.load_vector_db(vector_db_path)
|
110 |
self.annoy_index = self.load_annoy_index(annoy_index_path, self.embeddings.shape[1])
|
111 |
-
# self.tfidf_matrix, self.tfidf_vectorizer = self.calculate_tfidf(self.texts)
|
112 |
self.bm25 = BM25Okapi([text.split() for text in self.texts])
|
113 |
self.word2vec_model = self.train_word2vec(self.texts)
|
114 |
self.reranking_methods = {
|
@@ -131,11 +130,6 @@ class MistralRAGChatbot:
|
|
131 |
logging.info(f"Loaded Annoy index from {annoy_index_path}.")
|
132 |
return annoy_index
|
133 |
|
134 |
-
# def calculate_tfidf(self, texts: List[str]) -> Tuple[np.ndarray, TfidfVectorizer]:
|
135 |
-
# vectorizer = TfidfVectorizer(stop_words='english')
|
136 |
-
# tfidf_matrix = vectorizer.fit_transform(texts)
|
137 |
-
# logging.info("TF-IDF matrix calculated.")
|
138 |
-
# return tfidf_matrix, vectorizer
|
139 |
|
140 |
def train_word2vec(self, texts: List[str]) -> Word2Vec:
|
141 |
tokenized_texts = [text.split() for text in texts]
|
@@ -247,12 +241,6 @@ class MistralRAGChatbot:
|
|
247 |
logging.debug(f"Annoy retrieval returned {len(indices)} documents.")
|
248 |
return indices, scores
|
249 |
|
250 |
-
# def retrieve_with_tfidf(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
|
251 |
-
# query_vec = self.tfidf_vectorizer.transform([user_query])
|
252 |
-
# similarities = cosine_similarity(query_vec, self.tfidf_matrix).flatten()
|
253 |
-
# indices = np.argsort(-similarities)[:top_k]
|
254 |
-
# logging.debug(f"TF-IDF retrieval returned {len(indices)} documents.")
|
255 |
-
# return indices, similarities[indices].tolist()
|
256 |
|
257 |
def retrieve_with_bm25(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
|
258 |
tokenized_query = user_query.split()
|
@@ -275,24 +263,6 @@ class MistralRAGChatbot:
|
|
275 |
indices = np.argsort(-similarities)[:top_k]
|
276 |
return indices, similarities[indices].tolist()
|
277 |
|
278 |
-
# def retrieve_with_euclidean(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
|
279 |
-
# distances = euclidean_distances([query_embedding], self.embeddings).flatten()
|
280 |
-
# indices = np.argsort(distances)[:top_k]
|
281 |
-
# logging.debug(f"Euclidean retrieval returned {len(indices)} documents.")
|
282 |
-
# return indices, distances[indices].tolist()
|
283 |
-
|
284 |
-
# def retrieve_with_jaccard(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
|
285 |
-
# query_set = set(user_query.lower().split())
|
286 |
-
# scores = []
|
287 |
-
# for doc in self.texts:
|
288 |
-
# doc_set = set(doc.lower().split())
|
289 |
-
# intersection = query_set.intersection(doc_set)
|
290 |
-
# union = query_set.union(doc_set)
|
291 |
-
# score = float(len(intersection)) / len(union) if union else 0
|
292 |
-
# scores.append(score)
|
293 |
-
# indices = np.argsort(-np.array(scores))[:top_k]
|
294 |
-
# logging.debug(f"Jaccard retrieval returned {len(indices)} documents.")
|
295 |
-
# return indices.tolist(), [scores[i] for i in indices]
|
296 |
|
297 |
def rerank_documents(
|
298 |
self,
|
|
|
108 |
def __init__(self, vector_db_path: str, annoy_index_path: str):
|
109 |
self.embeddings, self.texts = self.load_vector_db(vector_db_path)
|
110 |
self.annoy_index = self.load_annoy_index(annoy_index_path, self.embeddings.shape[1])
|
|
|
111 |
self.bm25 = BM25Okapi([text.split() for text in self.texts])
|
112 |
self.word2vec_model = self.train_word2vec(self.texts)
|
113 |
self.reranking_methods = {
|
|
|
130 |
logging.info(f"Loaded Annoy index from {annoy_index_path}.")
|
131 |
return annoy_index
|
132 |
|
|
|
|
|
|
|
|
|
|
|
133 |
|
134 |
def train_word2vec(self, texts: List[str]) -> Word2Vec:
|
135 |
tokenized_texts = [text.split() for text in texts]
|
|
|
241 |
logging.debug(f"Annoy retrieval returned {len(indices)} documents.")
|
242 |
return indices, scores
|
243 |
|
|
|
|
|
|
|
|
|
|
|
|
|
244 |
|
245 |
def retrieve_with_bm25(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
|
246 |
tokenized_query = user_query.split()
|
|
|
263 |
indices = np.argsort(-similarities)[:top_k]
|
264 |
return indices, similarities[indices].tolist()
|
265 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
266 |
|
267 |
def rerank_documents(
|
268 |
self,
|