File size: 11,371 Bytes
0319a9a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
import pandas as pd
import os
from tqdm import tqdm

import numpy as np
from sklearn.metrics.pairwise import cosine_similarity


class Database:
    def __init__(self, parquet_path=None, customized_parquets = None):
        self.default_parquet_path = 'datas/database_4000.parquet'
        self.parquet_path = parquet_path or self.default_parquet_path

        self.default_customized_parquets = ["datas/customized_database_0.parquet"]
        self.customized_parquets = customized_parquets or self.default_customized_parquets

        self.datas = None
        self.last_save_table = None
        
        if os.path.exists(self.parquet_path):
            self.load_from_parquet(self.parquet_path)

        self.load_from_customized(self.customized_parquets)

        self.clip_extractor = None
        self.bge_extractor = None

        self.en_keyword2data = {}

    def build_en_keyword2index(self):
        # build in lower case
        self.en_keyword2data = {row['translated_word'].lower(): row for i, row in self.datas.iterrows()}

    def search_by_en_keyword(self, keyword):
        if len(self.en_keyword2data) == 0:
            self.build_en_keyword2index()

        keyword = keyword.lower()
        if keyword in self.en_keyword2data:
            ans = self.en_keyword2data[keyword].to_dict()
            del ans["clip_feature"]
            del ans["bge_feature"]
            return ans
        else:
            return None
    
    def load_from_parquet(self, parquet_path):
        self.datas = pd.read_parquet(parquet_path)

    def load_from_customized(self, customized_parquets=None):
        customized_parquets = customized_parquets or self.customized_parquets
        
        # Load each parquet file and concatenate them into the self.datas DataFrame
        for index, parquet_file in enumerate(customized_parquets):
            if os.path.exists(parquet_file):
                temp_df = pd.read_parquet(parquet_file)
                if self.datas is None:
                    self.datas = temp_df
                else:
                    self.datas = pd.concat([self.datas, temp_df], ignore_index=True)

                # if last parquet file
                if index == len(customized_parquets) - 1:
                    self.last_save_table = temp_df

        # if customized_parquets:
            # Record the last parquet file's contents as self.last_save_table
            

    def add_data(self, data, if_save=True):
        required_columns = ['keyword', 'name_in_cultivation', 'description_in_cultivation', 'translated_word', 'description']
        for column in required_columns:
            if column not in data:
                raise ValueError(f"Missing required field: {column}")
        
        # Optional field
        if 'founder' not in data:
            data['founder'] = ""
        
        # Extract features
        if self.clip_extractor is None:
            self.init_clip_extractor()
        if self.bge_extractor is None:
            self.init_bge_extractor()

        data['clip_feature'] = self.clip_extractor.extract_text(data['translated_word'] + '.' + data['description'])
        data['bge_feature'] = self.bge_extractor.extract([data['keyword']])[0].tolist()

        # Convert to DataFrame and add to self.datas
        data_df = pd.DataFrame([data])
        if self.datas is None:
            self.datas = data_df
        else:
            self.datas = pd.concat([self.datas, data_df], ignore_index=True)

        # set self.en_keyword2data to last row of self.datas
        self.en_keyword2data[data['translated_word'].lower()] = self.datas.iloc[-1]
        
        # Add to last_save_table
        if self.last_save_table is None:
            # self.last_save_table = data_df
            # create a new DataFrame with the same columns as self.datas
            self.last_save_table = pd.DataFrame(columns=self.datas.columns)
            
        self.last_save_table = pd.concat([self.last_save_table, data_df], ignore_index=True)
        
        if if_save:
            self.save_to_parquet(self.customized_parquets[-1], self.last_save_table )

    def add_datas(self, datas, if_save=True):
        for data in datas:
            self.add_data(data, if_save=False)
        if if_save:
            self.save_to_parquet(self.customized_parquets[-1], self.last_save_table)

    def init_from_excel(self, excel_path):
        df = pd.read_excel(excel_path)
        
        # Drop rows with any empty cell in the required columns
        df.dropna(subset=['keyword', 'name_in_cultivation', 'description_in_cultivation', 'translated_word', 'description'], inplace=True)
        
        # Add the new columns
        df['clip_feature'] = None
        df['bge_feature'] = None
        
        self.datas = df

        self.extract_clip()
        self.extract_bge()
    
    def save_to_parquet(self, parquet_path=None, df = None):
        
        parquet_path = parquet_path or self.default_parquet_path
        if df is None:
            if self.datas is not None:
                self.datas.to_parquet(parquet_path)
        else:
            df.to_parquet(parquet_path)

    def init_clip_extractor(self):
        if self.clip_extractor is None:
            try:
                from CLIPExtractor import CLIPExtractor
            except:
                from src.CLIPExtractor import CLIPExtractor

            cache_dir = "models"

            self.clip_extractor = CLIPExtractor(model_name = "openai/clip-vit-large-patch14",cache_dir = cache_dir)

    
    def extract_clip(self):
        if self.clip_extractor is None:
            self.init_clip_extractor()

        clip_features = []
        # for text in tqdm(self.datas['keyword'], desc='Extracting CLIP features'):
        for index, row in tqdm(self.datas.iterrows(), desc='Extracting CLIP features', total=len(self.datas)):
            text = row['translated_word'] + '.' + row['description']
            if text:
                feature = self.clip_extractor.extract_text(text)
            else:
                feature = None
            clip_features.append(feature)

        self.datas['clip_feature'] = clip_features

    def init_bge_extractor(self):
        if self.bge_extractor is None:
            try:
                from text_embedding import TextExtractor
            except:
                from src.text_embedding import TextExtractor

            self.bge_extractor = TextExtractor('BAAI/bge-small-zh-v1.5')

    def top_k_search(self, query_feature, attribute, top_k=15):
        # Ensure the attribute exists in the dataframe
        if attribute not in self.datas.columns:
            raise ValueError(f"Attribute {attribute} not found in the data.")
        
        # Convert query feature and attribute features to numpy arrays
        query_feature = np.array(query_feature).reshape(1, -1)
        attribute_features = np.stack(self.datas[attribute].dropna().values)
        
        # Compute cosine similarity between query and all attributes
        similarities = cosine_similarity(query_feature, attribute_features)[0]
        
        # Get the top_k indices based on similarity
        top_k_indices = np.argsort(similarities)[-top_k:][::-1]
        
        # Retrieve the top_k most similar items
        top_k_results = self.datas.iloc[top_k_indices].copy()
        
        top_k_results = top_k_results.drop(columns=['clip_feature', 'bge_feature'])

        top_k_results['similarity'] = similarities[top_k_indices]

        return top_k_results.to_dict(orient='records')

    def search_with_image_name(self, image_name):
        self.init_clip_extractor()

        img_feature = self.clip_extractor.extract_image_from_file(image_name)

        return self.top_k_search(img_feature, 'clip_feature')

    def search_with_image(self, image, if_opencv = False ):
        if self.clip_extractor is None:
            self.init_clip_extractor()

        img_feature = self.clip_extractor.extract_image(image, if_opencv = if_opencv)

        return self.top_k_search(img_feature, 'clip_feature')
    
    def search_with_chinese(self, text):
        if self.bge_extractor is None:
            self.init_bge_extractor()

        text_feature = self.bge_extractor.extract([text])[0].tolist()

        return self.top_k_search(text_feature, 'bge_feature')



    def extract_bge(self):
        if self.bge_extractor is None:
            self.init_bge_extractor()
        
        # Extract features for each row and store them in the bge_feature column
        bge_features = []
        for text in tqdm(self.datas['keyword'], desc='Extracting BGE features'):
            if text:
                feature = self.bge_extractor.extract([text])[0].tolist()
            else:
                feature = None
            bge_features.append(feature)
        
        self.datas['bge_feature'] = bge_features

if __name__ == '__main__':
    # Usage example
    db = Database()
    re_extract = False
    if db.datas is None or re_extract:
        print("Rebuilding database from excel file")
        db.init_from_excel('datas/database_4000.xlsx')
        db.save_to_parquet()

    # print(db.datas[0].keys())

    query_text = "钢琴"

    results = db.search_with_chinese(query_text)

    print(results[0].keys())

    for result in results[:3]:
        print(result)

    image_path = "datas/老虎.jpg"

    results = db.search_with_image_name(image_path)

    for result in results[:3]:
        print(result)
# 'keyword': '老虎狗', 'name_in_cultivation': '灵虎犬神', 'description_in_cultivation': '在九天灵脉汇聚的仙山之巅,灵虎犬神身披星图
# 斑纹,汲取日月精华,以雷霆之力守护仙脉,其双眼中映照着轮回之道,是修仙者追寻天地真理的指引,也是象征极致灵性的神秘灵兽。', 'translated_word': 'Tiger Dog', 'description': 'A Tiger Dog is a term that might refer to a mythical creature or a breed of dog with a distinctive and unusual appearance, resembling the features of a tiger. It could be characterized by its striking coat with patterns similar to those of a tiger, or by having a demeanor that is fierce and majestic like a tiger. This term is not commonly used in 
# conventional contexts and might be found in stories, folktales, or in the names of unique dog breeds that have been bred to exhibit such features.', 'founder': ''
    # test_new_data = {
    #     "keyword": "老虎狗2",
    #     "name_in_cultivation": "灵虎犬神",
    #     "description_in_cultivation": "在九天灵脉汇聚的仙山之巅,灵虎犬神身披星图斑纹,汲取日月精华,以雷霆之力守护仙脉,其双眼中映照着轮回之道,是修仙者追寻天地真理的指引,也是象征极致灵性的神秘灵兽。",
    #     "translated_word": "Tiger Dog",
    #     "description":"A Tiger Dog is a term that might refer to a mythical creature or a breed of dog with a distinctive and unusual appearance, resembling the features of a tiger. It could be characterized by its striking coat with patterns similar to those of a tiger, or by having a demeanor that is fierce and majestic like a tiger. This term is not commonly used in conventional contexts and might be found in stories, folktales, or in the names of unique dog breeds that have been bred to exhibit such features."
    # }

    # db.add_data(test_new_data)