qcloud commited on
Commit
5bfdfae
·
1 Parent(s): d76fd60
app.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path
2
+
3
+ from pages.main_page import init_pages
4
+ from vector_db.vector_db_client import VectorDB
5
+ import configparser
6
+
7
+ CONFIG_MAP = {}
8
+ CONFIG_FILE = "conf/config.ini"
9
+
10
+
11
+ def init_config():
12
+ print(f"init configs {CONFIG_FILE}")
13
+ if not os.path.exists(CONFIG_FILE):
14
+ raise FileNotFoundError(f'The config file {CONFIG_FILE} not found.')
15
+
16
+ conf = configparser.ConfigParser()
17
+ conf.read(CONFIG_FILE, encoding="UTF-8")
18
+
19
+ for selection in conf.sections():
20
+ for k, v in conf.items(selection):
21
+ CONFIG_MAP[f'{selection}.{k}'] = v
22
+
23
+
24
+ if __name__ == "__main__":
25
+ init_config()
26
+ vdb = VectorDB(CONFIG_MAP)
27
+
28
+ server_name = CONFIG_MAP.get('server.name')
29
+ server_port = int(CONFIG_MAP.get('server.port'))
30
+ init_pages(vdb, server_name, server_port,CONFIG_MAP)
conf/config.ini ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ [vector_db]
3
+ address=http://lb-ozz7dtn0-l7dqtav6xoyir4bm.clb.ap-guangzhou.tencentclb.com:10000
4
+ key=6uwifScRaDLNND2970YKH4uIHe3eZZn37hYu3FN2
5
+ ai_db=test_ai_graph_db
6
+ ai_graph_emb_collection=test_ai_graph_collection
7
+
8
+ [download_model]
9
+ local_model_path=models/
10
+ model_name=openai/clip-vit-base-patch32
11
+
12
+ [graph_upload]
13
+ local_graph_path=graphs/
14
+
15
+
16
+ [server]
17
+ name=127.0.0.1
18
+ port=8080
graphs/.DS_Store ADDED
Binary file (6.15 kB). View file
 
models/.DS_Store ADDED
Binary file (6.15 kB). View file
 
pages/__init__.py ADDED
File without changes
pages/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (170 Bytes). View file
 
pages/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (157 Bytes). View file
 
pages/__pycache__/chat.cpython-311.pyc ADDED
Binary file (1.96 kB). View file
 
pages/__pycache__/chat_llm.cpython-311.pyc ADDED
Binary file (4.77 kB). View file
 
pages/__pycache__/chat_search.cpython-311.pyc ADDED
Binary file (2.23 kB). View file
 
pages/__pycache__/chat_search.cpython-312.pyc ADDED
Binary file (5.69 kB). View file
 
pages/__pycache__/file_upload.cpython-311.pyc ADDED
Binary file (3.81 kB). View file
 
pages/__pycache__/init_knowledge.cpython-311.pyc ADDED
Binary file (1.61 kB). View file
 
pages/__pycache__/initial_and_upload.cpython-312.pyc ADDED
Binary file (14.9 kB). View file
 
pages/__pycache__/knowledge.cpython-311.pyc ADDED
Binary file (2.83 kB). View file
 
pages/__pycache__/knowledge_store.cpython-311.pyc ADDED
Binary file (3.63 kB). View file
 
pages/__pycache__/main_page.cpython-311.pyc ADDED
Binary file (3.98 kB). View file
 
pages/__pycache__/main_page.cpython-312.pyc ADDED
Binary file (2.01 kB). View file
 
pages/chat_search.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from vector_db.vector_db_client import VectorDB
3
+ from PIL import Image
4
+ from transformers import AutoProcessor, CLIPModel
5
+ import os
6
+ import uuid
7
+ from tcvectordb.model.document import SearchParams
8
+ import traceback
9
+
10
+ LOCAL_MODEL_PATH = "download_model.local_model_path"
11
+ MODEL_NAME = "download_model.model_name"
12
+ LOCAL_GRAPH_PATH = "graph_upload.local_graph_path"
13
+
14
+ class ChatSearch:
15
+ def __init__(self, config, vdb: VectorDB):
16
+ self.vdb = vdb
17
+ self.model_name = config.get(MODEL_NAME)
18
+ self.local_model_path = config.get(LOCAL_MODEL_PATH)
19
+ self.local_graph_path = config.get(LOCAL_GRAPH_PATH)
20
+ self.model_cache_directory = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), self.local_model_path, self.model_name)
21
+ self.graph_cache_directory = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), self.local_graph_path)
22
+
23
+ def initial_model(self):
24
+ model = CLIPModel.from_pretrained(self.model_cache_directory)
25
+ processor = AutoProcessor.from_pretrained(self.model_cache_directory)
26
+ return model, processor
27
+
28
+ def search_result(self, image):
29
+ if image is None:
30
+ return "请先上传图片..."
31
+
32
+ if not os.path.exists(self.model_cache_directory):
33
+ return f"缓存目录 {self.model_cache_directory} 不存在,无法初始化模型。"
34
+
35
+ model, processor = self.initial_model()
36
+ try:
37
+ # 生成唯一的文件名
38
+ unique_filename = f"{uuid.uuid4().hex}.png"
39
+ image_path = os.path.join(self.graph_cache_directory, unique_filename)
40
+
41
+ # 保存图片到指定文件夹
42
+ image.save(image_path)
43
+
44
+ image_vector = self._process_image(image_path, model, processor).squeeze().tolist() # 转换为一维列表
45
+
46
+ # 假设你的 VectorDB 支持图片搜索
47
+ collection = self.vdb.get_collection()
48
+ res = collection.search(
49
+ vectors=[image_vector],
50
+ params=SearchParams(ef=200),
51
+ limit=10,
52
+ output_fields=['local_graph_path']
53
+ )
54
+
55
+ results = []
56
+ for i, docs in enumerate(res):
57
+ for doc in docs:
58
+ image_path = doc['local_graph_path']
59
+ try:
60
+ image = Image.open(image_path)
61
+ results.append(image)
62
+ except Exception as e:
63
+ print(f"无法加载图片 {image_path}: {e}")
64
+ return results
65
+ except Exception as e:
66
+ print(f"问题:{e}\n")
67
+ error_trace = traceback.format_exc()
68
+ print(error_trace)
69
+
70
+ def _process_image(self, image_path, emb_model, processor):
71
+ """
72
+ 处理单个图片文件,将其转换为向量。
73
+
74
+ 参数:
75
+ image_path (str): 图片文件的路径。
76
+
77
+ 返回:
78
+ torch.Tensor: 图片的向量表示。
79
+ """
80
+ image = Image.open(image_path)
81
+ inputs = processor(images=image, return_tensors="pt")
82
+ image_features = emb_model.get_image_features(**inputs)
83
+ return image_features
84
+
85
+ def get_chart(self):
86
+ return gr.Interface(
87
+ fn=self.search_result,
88
+ inputs=gr.Image(type="pil", label="上传图片"),
89
+ outputs=gr.Gallery(label="检索结果"),
90
+ theme="soft",
91
+ description="上传图片进行检索",
92
+ allow_flagging="never"
93
+ )
94
+
pages/initial_and_upload.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import gradio as gr
3
+ from huggingface_hub import snapshot_download
4
+ import os
5
+ import zipfile
6
+ from PIL import Image, UnidentifiedImageError
7
+ from transformers import AutoProcessor, CLIPModel
8
+ from vector_db.vector_db_client import VectorDB
9
+ from tcvectordb.model.document import Document
10
+ import uuid
11
+ import traceback
12
+ import numpy as np
13
+ # 生成随机的 UUID
14
+ LOCAL_MODEL_PATH = "download_model.local_model_path"
15
+ MODEL_NAME = "download_model.model_name"
16
+ LOCAL_GRAPH_PATH="graph_upload.local_graph_path"
17
+ os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
18
+ init_css="""
19
+ <style>
20
+ .equal-height-row {
21
+ display: flex;
22
+ }
23
+ .equal-height-column {
24
+ flex: 1;
25
+ display: flex;
26
+ flex-direction: column;
27
+ }
28
+ .equal-height-column > * {
29
+ flex: 1;
30
+ }
31
+ </style>
32
+ """
33
+ class Initial_and_Upload:
34
+
35
+ def __init__(self, config,vdb: VectorDB):
36
+ self.vdb = vdb
37
+ self.model_name = config.get(MODEL_NAME)
38
+ self.local_model_path = config.get(LOCAL_MODEL_PATH)
39
+ self.local_graph_path=config.get(LOCAL_GRAPH_PATH)
40
+ self.model_cache_directory = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), self.local_model_path, self.model_name)
41
+ self.graph_cache_directory = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), self.local_graph_path)
42
+
43
+ def initial_model(self):
44
+ model = CLIPModel.from_pretrained(self.model_cache_directory)
45
+ processor = AutoProcessor.from_pretrained(self.model_cache_directory)
46
+ return model,processor
47
+
48
+ def _download_model(self, model_name, progress=gr.Progress()):
49
+ """
50
+ 下载指定的Hugging Face模型并保存在指定位置。
51
+
52
+ 参数:
53
+ model_name (str): 模型在Hugging Face上的名字。
54
+ save_directory (str): 模型文件保存的位置。
55
+ """
56
+
57
+
58
+ os.environ['TRANSFORMERS_CACHE'] = self.model_cache_directory
59
+
60
+ # 创建保存目录(如果不存在)
61
+ if not os.path.exists(self.model_cache_directory):
62
+ os.makedirs(self.model_cache_directory)
63
+ text = f"[正在尝试下载] 模型 {model_name},因为涉及到模型相关的多个文件下载,进度仅在后台显示。\n"
64
+ progress(0.5, desc=text)
65
+ try:
66
+ # 下载模型
67
+ snapshot_download(
68
+ repo_id=model_name,
69
+ local_dir=self.model_cache_directory,
70
+ local_dir_use_symlinks=False,
71
+ )
72
+
73
+ progress(1, f"模型 {model_name} 已下载并保存在 {self.model_cache_directory}")
74
+ text += f"模型 {model_name} 已下载并保存在 {self.model_cache_directory}"
75
+
76
+ time.sleep(0.3)
77
+ return text
78
+ except Exception as e:
79
+ text += f"[下载失败] 失败原因:{e}"
80
+ return text
81
+
82
+ def _process_image(self, image_path,emb_model,processor):
83
+ """
84
+ 处理单个图片文件,将其转换为向量。
85
+
86
+ 参数:
87
+ image_path (str): 图片文件的路径。
88
+
89
+ 返回:
90
+ torch.Tensor: 图片的向量表示。
91
+ """
92
+
93
+ image = Image.open(image_path)
94
+ # image.verify() # 验证图片是否有效
95
+ inputs = processor(images=image, return_tensors="pt")
96
+ image_features = emb_model.get_image_features(**inputs)
97
+ return image_features
98
+
99
+ def _handle_upload(self, file, progress=gr.Progress()):
100
+ """
101
+ 处理上传的文件,识别是图片还是ZIP压缩包,并将图片转换为向量。
102
+
103
+ 参数:
104
+ file (file): 上传的文件。
105
+
106
+ 返回:
107
+ str: 文件类型和处理结果。
108
+ """
109
+ output_text = ""
110
+ image_vectors = []
111
+ if not os.path.exists(self.model_cache_directory):
112
+ output_text += f"缓存目录 {self.model_cache_directory} 不存在,无法初始化模型。"
113
+ else:
114
+ model, processor = self.initial_model()
115
+ collection = self.vdb.get_collection()
116
+
117
+ if zipfile.is_zipfile(file.name):
118
+ with zipfile.ZipFile(file.name, 'r') as zip_ref:
119
+ zip_ref.extractall(self.local_graph_path)
120
+ image_files = [file_name for file_name in zip_ref.namelist() if file_name.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.gif')) and not file_name.startswith('__MACOSX') and not file_name.startswith('._')]
121
+
122
+ total_files = len(image_files)
123
+ for i, file_name in enumerate(image_files):
124
+ image_path = os.path.join(self.local_graph_path, file_name)
125
+ try:
126
+ image_vector = self._process_image(image_path, model, processor).squeeze().tolist() # 转换为一维列表
127
+ random_uuid = str(uuid.uuid4()) # 转换为字符串
128
+ collection.upsert(documents=[Document(id=random_uuid, vector=image_vector, local_graph_path=image_path)], build_index=True)
129
+ output_text += f"处理图片: {file_name}\n"
130
+ except UnidentifiedImageError:
131
+ output_text += f"无法识别图片文件: {file_name}\n"
132
+
133
+ # 更新进度
134
+ progress((i + 1) / total_files)
135
+
136
+ output_text += "上传的是ZIP压缩包,已解压缩并处理所有图片。"
137
+ else:
138
+ try:
139
+ # 保存单张图片到指定文件夹
140
+ image_path = os.path.join(self.graph_cache_directory, os.path.basename(file.name))
141
+ with open(file.name, "rb") as f_src:
142
+ with open(image_path, "wb") as f_dst:
143
+ f_dst.write(f_src.read())
144
+
145
+ image_vector = self._process_image(image_path, model, processor).squeeze().tolist() # 转换为一维列表
146
+ random_uuid = str(uuid.uuid4()) # 转换为字符串
147
+ collection.upsert(documents=[Document(id=random_uuid, vector=image_vector, local_graph_path=image_path)], build_index=True)
148
+ output_text += "上传的是图片文件,并已处理。\n"
149
+
150
+ # 更新进度
151
+ progress(1.0)
152
+ except (IOError, SyntaxError) as e:
153
+ output_text += f"无法识别文件类型:{e}\n"
154
+
155
+ # 返回处理结果和图片向量
156
+ return output_text, image_vectors
157
+ def _initialize_vector_db(self, progress=gr.Progress()):
158
+ """
159
+ 初始化向量数据库。
160
+
161
+ 返回:
162
+ str: 初始化结果。
163
+ """
164
+ output_text = f"[正在尝试连接] VectorDB {self.vdb.address}\n"
165
+ progress(0, desc=output_text)
166
+ try:
167
+ client = self.vdb.create_client()
168
+ client.list_databases()
169
+ progress(0.05, f"[连接成功] VectorDB {self.vdb.address}\n")
170
+ output_text += f"[连接成功] VectorDB {self.vdb.address}\n"
171
+ client.close()
172
+
173
+ progress(0.1, f"[正在初始化] ai database '{self.vdb.db_name}'\n")
174
+ output_text += f"[正在初始化] ai database '{self.vdb.db_name}'\n"
175
+ self.vdb.init_database()
176
+ progress(0.3, f"[初始化完成] ai database '{self.vdb.db_name}'\n")
177
+ output_text += f"[初始化完成] ai database '{self.vdb.db_name}'\n"
178
+
179
+ progress(0.5, f"[正在初始化] ai collection '{self.vdb.ai_graph_emb_collection}'\n")
180
+ output_text += f"[正在初始化] ai collection '{self.vdb.ai_graph_emb_collection}'\n"
181
+ self.vdb.init_graph_collection()
182
+ progress(0.9, f"[初始化完成] ai collection '{self.vdb.ai_graph_emb_collection}'\n")
183
+ output_text += f"[初始化完成] ai collection '{self.vdb.ai_graph_emb_collection}'\n"
184
+
185
+ progress(1, f"您可以去图片上传栏目上传图片或ZIP压缩包,然后进一步进行[图片搜索]")
186
+ output_text += f"您可以去图片上传栏目上传图片或ZIP压缩包,然后进一步进行[图片搜索]"
187
+
188
+ time.sleep(0.3)
189
+ except Exception as e:
190
+ output_text += f"[数据库访问失败] 失败原因:{e}"
191
+ error_trace = traceback.format_exc()
192
+ print(error_trace)
193
+ return output_text
194
+
195
+ def get_init_panel(self):
196
+ with gr.Blocks() as demo:
197
+ gr.HTML(init_css)
198
+ with gr.Row():
199
+
200
+ with gr.Column():
201
+ model_name_input = gr.Textbox(lines=1, label="模型名称", placeholder="请输入Hugging Face模型名称...", value=self.model_name)
202
+ output = gr.Textbox(lines=10, label="下载进度", placeholder="下载进度将在这里显示...")
203
+ init_button = gr.Button("开始下载模型")
204
+
205
+ init_button.click(
206
+ fn=self._download_model,
207
+ inputs=[model_name_input],
208
+ outputs=output
209
+ )
210
+ with gr.Column():
211
+ db_init_output = gr.Textbox(lines=14.5, label="数据库初始化结果", placeholder="数据库初始化结果将在这里显示...")
212
+ db_init_button = gr.Button("初始化向量数据库")
213
+
214
+ db_init_button.click(
215
+ fn=self._initialize_vector_db,
216
+ inputs=[],
217
+ outputs=db_init_output
218
+ )
219
+ with gr.Row():
220
+ upload_file = gr.File(label="上传图片或ZIP压缩包")
221
+ with gr.Row():
222
+ upload_output = gr.Textbox(lines=10, label="上传结果", placeholder="上传结果将在这里显示...")
223
+ with gr.Row():
224
+ upload_button = gr.Button("上传文件")
225
+
226
+ upload_button.click(
227
+ fn=self._handle_upload,
228
+ inputs=[upload_file],
229
+ outputs=[upload_output, gr.State()]
230
+ )
231
+
232
+ return demo
pages/main_page.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from pages.initial_and_upload import Initial_and_Upload
3
+ from pages.chat_search import ChatSearch
4
+ from vector_db.vector_db_client import VectorDB
5
+
6
+ main_css = """
7
+ .secondary.svelte-cmf5ev {
8
+ background: #6366f1;
9
+ color: #ffffff;
10
+ }
11
+ .main-text {
12
+ color: #6366f1;
13
+ padding-top: 20px;
14
+ text-align: center;
15
+ }
16
+ .main-title {
17
+ font-size: 28px;
18
+ font-weight: bold;
19
+ padding-top: 10px;
20
+ padding-bottom: 15px;
21
+ color: #6366f1;
22
+ text-align: center;
23
+ }
24
+ """
25
+
26
+
27
+ def init_pages(vdb: VectorDB, server_name: str, server_port,config):
28
+ initial_and_upload = Initial_and_Upload(config,vdb)
29
+ chat_search=ChatSearch(config,vdb)
30
+
31
+ with gr.Blocks(title="Tencent VectorDB", theme="soft", css=main_css) as demo:
32
+ with gr.Row():
33
+ gr.HTML("<div class='main-title'>Tencent VectorDB AI Demo -- Graph search</div>")
34
+
35
+ with gr.Tab("初始化页面"):
36
+ initial_and_upload.get_init_panel()
37
+ with gr.Tab("图搜图界面"):
38
+ chat_search.get_chart()
39
+
40
+
41
+ demo.launch(server_name=server_name, server_port=server_port)
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ tencentcloud-sdk-python-common
2
+ pydantic
3
+ gradio==4.39.0
4
+ transformers==4.44.2
5
+ torch==2.4.1
6
+ torchvision==0.19.1
7
+ huggingface_hub
8
+ tcvectordb
9
+ fastapi==0.111.1
vector_db/__init__.py ADDED
File without changes
vector_db/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (174 Bytes). View file
 
vector_db/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (161 Bytes). View file
 
vector_db/__pycache__/vector_db_client.cpython-311.pyc ADDED
Binary file (3.73 kB). View file
 
vector_db/__pycache__/vector_db_client.cpython-312.pyc ADDED
Binary file (4.53 kB). View file
 
vector_db/vector_db_client.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tcvectordb
2
+ from tcvectordb.model.database import Database
3
+ from tcvectordb.model.collection import Collection
4
+ from tcvectordb.model.index import Index, VectorIndex, FilterIndex, HNSWParams
5
+ from tcvectordb.model.enum import FieldType, IndexType, MetricType
6
+ VDB_ADDRESS = "vector_db.address"
7
+ VDB_KEY = "vector_db.key"
8
+ AI_DB_NAME = "vector_db.ai_db"
9
+ AI_COLLECTION_NAME = "vector_db.ai_graph_emb_collection"
10
+
11
+
12
+ class VectorDB:
13
+ def __init__(self, config):
14
+ self.address = config.get(VDB_ADDRESS)
15
+ self.key = config.get(VDB_KEY)
16
+ self.db_name = config.get(AI_DB_NAME)
17
+ self.ai_graph_emb_collection = config.get(AI_COLLECTION_NAME)
18
+
19
+ print(f"Try to connect vector db {self.address}")
20
+ self.client = self.create_client()
21
+ self._test_simple()
22
+
23
+ def create_client(self):
24
+ return tcvectordb.RPCVectorDBClient(
25
+ url=self.address,
26
+ username='root',
27
+ key=self.key,
28
+ timeout=30
29
+ )
30
+
31
+ def _test_simple(self):
32
+ self.client.list_databases()
33
+
34
+ def init_database(self):
35
+ try:
36
+ self.client.create_database(self.db_name)
37
+ except tcvectordb.exceptions.VectorDBException:
38
+ self.client.drop_database(self.db_name)
39
+ self.client.create_database(self.db_name)
40
+
41
+ def init_graph_collection(self):
42
+ index = Index(
43
+ FilterIndex(name='id', field_type=FieldType.String, index_type=IndexType.PRIMARY_KEY),
44
+ FilterIndex(name='local_graph_path', field_type=FieldType.String, index_type=IndexType.FILTER),
45
+ VectorIndex(name='vector', dimension=512, index_type=IndexType.HNSW,
46
+ metric_type=MetricType.COSINE, params=HNSWParams(m=16, efconstruction=200))
47
+ )
48
+
49
+ database: Database = self.client.database(self.db_name)
50
+ try:
51
+ database.create_collection(name=self.ai_graph_emb_collection ,shard=1,replicas=2,index=index,
52
+ description='this is a collection of graph embedding'
53
+
54
+ )
55
+ except tcvectordb.exceptions.VectorDBException:
56
+ database.drop_collection(self.ai_graph_emb_collection)
57
+ database.create_collection(name=self.ai_graph_emb_collection ,shard=1,replicas=2,index=index,
58
+ description='this is a collection of graph embedding'
59
+
60
+ )
61
+ def get_collection(self) -> Collection:
62
+ database: Database = self.client.database(self.db_name)
63
+ return database.collection(self.ai_graph_emb_collection)