Limour commited on
Commit
796ec76
1 Parent(s): f6342f9

Upload 6 files

Browse files
G2Retrieval_bce.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # conda install sentence-transformers -c conda-forge
2
+ from sentence_transformers import SentenceTransformer
3
+ import pandas as pd
4
+ from collections import defaultdict
5
+ import torch
6
+ from tqdm import tqdm
7
+ from test_pytrec_eval import ndcg_in_all
8
+
9
+ if torch.cuda.is_available():
10
+ device = torch.device('cuda')
11
+ else:
12
+ device = torch.device('cpu')
13
+
14
+
15
+ def load_dataset(path):
16
+ df = pd.read_parquet(path, engine="pyarrow")
17
+ return df
18
+
19
+
20
+ def load_all_dataset(path, convert=False):
21
+ qrels_pd = load_dataset(path + r'\qrels.parquet')
22
+ corpus = load_dataset(path + r'\corpus.parquet')
23
+ queries = load_dataset(path + r'\queries.parquet')
24
+ if convert:
25
+ qrels = defaultdict(dict)
26
+ for i, e in tqdm(qrels_pd.iterrows(), desc="load_all_dataset: Converting"):
27
+ qrels[e['qid']][e['cid']] = e['score']
28
+ else:
29
+ qrels = qrels_pd
30
+ return corpus, queries, qrels
31
+
32
+
33
+ corpus, queries, qrels = load_all_dataset(r'D:\datasets\G2Retrieval\data_sample2k')
34
+
35
+
36
+ randEmbed = False
37
+ if randEmbed:
38
+ corpusEmbeds = torch.rand((1, len(corpus)))
39
+ queriesEmbeds = torch.rand((len(queries), 1))
40
+ else:
41
+ with torch.no_grad():
42
+ path = r'D:\models\bce'
43
+ model = SentenceTransformer(path, device='cuda:0')
44
+
45
+ corpusEmbeds = model.encode(corpus['text'].values, normalize_embeddings=True, show_progress_bar=True, batch_size=32)
46
+ queriesEmbeds = model.encode(queries['text'].values, normalize_embeddings=True, show_progress_bar=True, batch_size=32)
47
+
48
+ queriesEmbeds = torch.tensor(queriesEmbeds, device=device)
49
+ corpusEmbeds = corpusEmbeds.T
50
+ corpusEmbeds = torch.tensor(corpusEmbeds, device=device)
51
+
52
+
53
+ @torch.no_grad()
54
+ def getTopK(corpusEmbeds, qEmbeds, qid, k=10):
55
+ scores = qEmbeds @ corpusEmbeds
56
+ top_k_indices = torch.argsort(scores, descending=True)[:k]
57
+ scores = scores.cpu()
58
+ top_k_indices = top_k_indices.cpu()
59
+ retn = []
60
+ for x in top_k_indices:
61
+ x = int(x)
62
+ retn.append((qid, corpus['cid'][x], float(scores[x])))
63
+ return retn
64
+
65
+
66
+ with torch.no_grad():
67
+ results = []
68
+ for i in tqdm(range(len(queries)), desc="Converting"):
69
+ results.extend(getTopK(corpusEmbeds, queriesEmbeds[i], queries['qid'][i]))
70
+
71
+ results = pd.DataFrame(results, columns=['qid', 'cid', 'score'])
72
+ results['score'] = results['score'].astype(float)
73
+ tmp = ndcg_in_all(qrels, results)
74
+ ndcgs = torch.tensor([x for x in tmp.values()], device=device)
75
+
76
+ mean = torch.mean(ndcgs)
77
+ std = torch.std(ndcgs)
78
+
79
+ print(f'NDCG@10: {mean*100:.2f}±{std*100:.2f}')
80
+
81
+ # 手动释放CUDA缓存内存
82
+ del queriesEmbeds
83
+ del corpusEmbeds
84
+ del model
85
+ torch.cuda.empty_cache()
data_sample2k/corpus.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f5a0f3855efcac5c002fbe848258e4c5ac7b91116b2d9c74f84537de65c31ee
3
+ size 10757440
data_sample2k/qrels.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0a800d7a9d455e5ae2396ea149a8bbe8a96dbe75f16dae2217dbd42d5308005
3
+ size 1029949
data_sample2k/queries.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c88c67f903ef49258b332c515ad6ad985485dd146ed055d4d1d8bfb566a47665
3
+ size 873400
main.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import os
3
+ import gzip
4
+ import random
5
+ import re
6
+ from tqdm import tqdm
7
+ from collections import defaultdict
8
+
9
+
10
+ def get_all_files_in_directory(directory, ext=''):
11
+ all_files = []
12
+ for root, dirs, files in os.walk(directory):
13
+ root = root[len(directory):]
14
+ if root.startswith('\\') or root.startswith('/'):
15
+ root = root[1:]
16
+ for file in files:
17
+ if file.endswith(ext):
18
+ file_path = os.path.join(root, file)
19
+ all_files.append(file_path)
20
+ return all_files
21
+
22
+ reg_q = re.compile(r'''['"“”‘’「」『』]''')
23
+ reg_e = re.compile(r'''[?!。?!]''')
24
+ def readOne(filePath):
25
+ with gzip.open(filePath, 'rt', encoding='utf-8') if filePath.endswith('.gz') else open(filePath,
26
+ encoding='utf-8') as f:
27
+ retn = []
28
+ cache = ''
29
+ for line in f:
30
+ line = reg_q.sub('', line) # 删除引号
31
+ if len(cache) + len(line) < 384:
32
+ cache += line
33
+ continue
34
+ if not bool(reg_e.findall(line)):
35
+ cache += line
36
+ retn.append(cache.strip())
37
+ cache = ''
38
+ continue
39
+ i = 1
40
+ s = 0
41
+ while i <= len(line):
42
+ if len(cache) + (i - s) < 384: # 每 384 切一行
43
+ i = (384 - len(cache)) + s
44
+ if i > len(line):
45
+ break
46
+ cache += line[s:i]
47
+ s = i
48
+ if line[i-1] in ('?', '!', '。', '?', '!'):
49
+ cache += line[s:i]
50
+ s = i
51
+ retn.append(cache.strip())
52
+ cache = ''
53
+ i += 1
54
+ if len(line) > s:
55
+ cache += line[s:]
56
+
57
+ cache = cache.strip()
58
+ if cache:
59
+ retn.append(cache)
60
+ return retn
61
+
62
+
63
+ def load_dataset(path):
64
+ df = pd.read_parquet(path, engine="pyarrow")
65
+ return df
66
+
67
+
68
+ def load_all_dataset(path, convert=False):
69
+ qrels_pd = load_dataset(path + r'\qrels.parquet')
70
+ corpus = load_dataset(path + r'\corpus.parquet')
71
+ queries = load_dataset(path + r'\queries.parquet')
72
+ if convert:
73
+ qrels = defaultdict(dict)
74
+ for i, e in tqdm(qrels_pd.iterrows(), desc="load_all_dataset: Converting"):
75
+ qrels[e['qid']][e['cid']] = e['score']
76
+ else:
77
+ qrels = qrels_pd
78
+ return corpus, queries, qrels
79
+
80
+
81
+ def save_dataset(path, df):
82
+ return df.to_parquet(
83
+ path,
84
+ engine="pyarrow",
85
+ compression="gzip",
86
+ index=False
87
+ )
88
+
89
+
90
+ def save_all_dataset(path, corpus, queries, qrels):
91
+ save_dataset(path + r"\corpus.parquet", corpus)
92
+ save_dataset(path + r"\queries.parquet", queries)
93
+ save_dataset(path + r"\qrels.parquet", qrels)
94
+
95
+
96
+ def create_dataset(corpus, queries, qrels):
97
+ corpus_pd = pd.DataFrame(corpus, columns=['cid', 'text'])
98
+ queries_pd = pd.DataFrame(queries, columns=['qid', 'text'])
99
+ qrels_pd = pd.DataFrame(qrels, columns=['qid', 'cid', 'score'])
100
+
101
+ corpus_pd['cid'] = corpus_pd['cid'].astype(str)
102
+ queries_pd['qid'] = queries_pd['qid'].astype(str)
103
+ qrels_pd['qid'] = qrels_pd['qid'].astype(str)
104
+ qrels_pd['cid'] = qrels_pd['cid'].astype(str)
105
+ qrels_pd['score'] = qrels_pd['score'].astype(int)
106
+
107
+ return corpus_pd, queries_pd, qrels_pd
108
+
109
+
110
+ def sample_from_dataset(corpus, queries, qrels, k=2000):
111
+ sample_k = sorted(random.sample(queries['qid'].to_list(), k=k))
112
+ queries_pd = queries[queries['qid'].isin(sample_k)]
113
+ qrels_pd = qrels[qrels['qid'].isin(sample_k)]
114
+ corpus_pd = corpus[corpus['cid'].isin(qrels_pd['cid'])]
115
+
116
+ return corpus_pd, queries_pd, qrels_pd
117
+
118
+ path = r'D:\datasets\v-corpus-zh'
119
+ rawcorpus = get_all_files_in_directory(path, '.txt.gz')
120
+ corpus = []
121
+ queries = []
122
+ qrels = []
123
+
124
+ for sub_path in tqdm(rawcorpus, desc="Reading all data..."):
125
+ s_sub_path = sub_path.split('\\')
126
+ 会社 = s_sub_path[0]
127
+ if len(s_sub_path) == 3:
128
+ 系列 = None
129
+ 作品 = s_sub_path[-2]
130
+ 篇章 = s_sub_path[-1]
131
+ elif len(s_sub_path) == 4:
132
+ 系列 = s_sub_path[1]
133
+ 作品 = s_sub_path[-2]
134
+ 篇章 = s_sub_path[-1]
135
+ else:
136
+ print(s_sub_path)
137
+ raise ValueError('s_sub_path != 3 or 4')
138
+ print(会社, 系列, 作品, 篇章)
139
+ tmp = readOne(os.path.join(path, sub_path))
140
+ 阈值 = max(len(tmp) // 40, 4)
141
+ print(阈值)
142
+ old_rand = None
143
+ for i in range(len(tmp)):
144
+ rand = random.randint(0, 阈值)
145
+ if rand == 0:
146
+ queries.append((会社, 系列, 作品, 篇章, i/(len(tmp)-1), tmp[i]))
147
+ elif rand <= 4 or old_rand == 0:
148
+ corpus.append((会社, 系列, 作品, 篇章, i/(len(tmp)-1), tmp[i]))
149
+ else:
150
+ pass
151
+ old_rand = rand
152
+
153
+ for qid, q in tqdm(enumerate(queries), desc="计算 qrels 中..."):
154
+ for cid, c in enumerate(corpus):
155
+ if q[0] == c[0]:
156
+ s = 1
157
+ if q[1] is not None and q[1] == c[1]:
158
+ s += 4
159
+ if q[2] == q[2]:
160
+ s += 8
161
+ if q[3] == q[3]:
162
+ s += 8
163
+ ss = 1 - abs(q[4] - c[4])
164
+ s += (79 * ss)
165
+ qrels.append((qid, cid, s))
166
+
167
+ corpus_ = [(cid, c[5]) for cid, c in enumerate(corpus)]
168
+ queries_ = [(qid, q[5]) for qid, q in enumerate(queries)]
169
+
170
+ path = r'D:\datasets\G2Retrieval'
171
+ corpus_pd, queries_pd, qrels_pd = create_dataset(corpus_, queries_, qrels)
172
+ save_all_dataset(path + r'\data', corpus_pd, queries_pd, qrels_pd)
173
+ save_all_dataset(path + r'\data_sample2k', *sample_from_dataset(corpus_pd, queries_pd, qrels_pd))
test_pytrec_eval.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+ from tqdm import tqdm
4
+
5
+
6
+ def dcg(scores):
7
+ log2_i = np.log2(np.arange(2, len(scores) + 2))
8
+ return np.sum(scores / log2_i)
9
+
10
+
11
+ def idcg(rels, topk):
12
+ return dcg(np.sort(rels)[::-1][:topk])
13
+
14
+
15
+ def odcg(rels, predictions):
16
+ indices = np.argsort(predictions)[::-1]
17
+ return dcg(rels[indices])
18
+
19
+
20
+ def _ndcg(drels, dpredictions):
21
+ topk = len(dpredictions)
22
+ _idcg = idcg(np.array(drels['score']), topk)
23
+ tmp = drels[drels.index.isin(dpredictions.index)]
24
+ rels = dpredictions['score'].copy()
25
+ rels *= 0
26
+ rels.update(tmp['score'])
27
+ _odcg = odcg(rels.values, dpredictions['score'].values)
28
+ return float(_odcg / _idcg)
29
+
30
+
31
+ def ndcg(qrels, results):
32
+ drels = qrels.set_index('cid', inplace=False)
33
+ dpredictions = results.set_index('cid', inplace=False)
34
+ # print(drels, dpredictions)
35
+ return _ndcg(drels, dpredictions)
36
+
37
+
38
+ def ndcg_in_all(qrels, results):
39
+ retn = {}
40
+ _qrels = {qid: group for qid, group in qrels.groupby('qid')}
41
+ _results = {qid: group for qid, group in results.groupby('qid')}
42
+ for qid in tqdm(_qrels, desc="计算 ndcg 中..."):
43
+ retn[qid] = ndcg(_qrels[qid], _results[qid])
44
+ return retn
45
+
46
+
47
+ if __name__ == '__main__':
48
+ qrels = pd.DataFrame(
49
+ [
50
+ ['q1', 'd1', 1],
51
+ ['q1', 'd2', 2],
52
+ ['q1', 'd3', 3],
53
+ ['q1', 'd4', 4],
54
+ ['q2', 'd1', 2],
55
+ ['q2', 'd2', 1]
56
+ ],
57
+ columns=['qid', 'cid', 'score']
58
+ )
59
+
60
+ results = pd.DataFrame(
61
+ [
62
+ ['q1', 'd2', 1],
63
+ ['q1', 'd3', 2],
64
+ ['q1', 'd4', 3],
65
+ ['q2', 'd2', 1],
66
+ ['q2', 'd3', 2],
67
+ ['q2', 'd5', 2]
68
+ ],
69
+ columns=['qid', 'cid', 'score']
70
+ )
71
+ print(ndcg_in_all(qrels, results))