pro-search-api / services /qdrant_searcher.py
vhr1007
adding embed-query
567e7ba
raw
history blame
2.19 kB
import logging
import torch
import numpy as np
from qdrant_client import QdrantClient
from qdrant_client.http.models import Filter, FieldCondition
class QdrantSearcher:
def __init__(self, qdrant_url, access_token):
# Removed the encoder since embeddings are precomputed externally
self.client = QdrantClient(url=qdrant_url, api_key=access_token)
def search_documents(self, collection_name, query_embedding, user_id, limit=3):
logging.info("Starting document search")
# Ensure the query_embedding is in the correct format (list)
if isinstance(query_embedding, torch.Tensor):
query_embedding = query_embedding.detach().numpy().tolist()
logging.info("Converted query embedding to list")
elif isinstance(query_embedding, np.ndarray):
query_embedding = query_embedding.tolist()
logging.info("Converted query embedding to list")
# Filter by user_id
query_filter = Filter(must=[FieldCondition(key="user_id", match={"value": user_id})])
try:
hits = self.client.search(
collection_name=collection_name,
query_vector=query_embedding,
limit=limit,
query_filter=query_filter
)
except Exception as e:
logging.error(f"Error during Qdrant search: {e}")
return None, str(e)
if not hits:
logging.info("No documents found for the given query")
return None, "No documents found for the given query."
hits_list = []
for hit in hits:
hit_info = {
"id": hit.id,
"score": hit.score,
"file_id": hit.payload.get('file_id'),
"organization_id": hit.payload.get('organization_id'),
"chunk_index": hit.payload.get('chunk_index'),
"chunk_text": hit.payload.get('chunk_text'),
"s3_bucket_key": hit.payload.get('s3_bucket_key')
}
hits_list.append(hit_info)
logging.info(f"Document search completed with {len(hits_list)} hits")
return hits_list, None