H2Retrieval / new /main.py
Limour's picture
Upload main.py
6ca116a verified
import pandas as pd
import os
import gzip
import random
import re
from tqdm import tqdm
from collections import defaultdict
def get_all_files_in_directory(directory, ext=''):
all_files = []
for root, dirs, files in os.walk(directory):
root = root[len(directory):]
if root.startswith('\\') or root.startswith('/'):
root = root[1:]
for file in files:
if file.endswith(ext):
file_path = os.path.join(root, file)
all_files.append(file_path)
return all_files
reg_q = re.compile(r'''['"“”‘’「」『』]''')
reg_e = re.compile(r'''[?!。?!]''')
def readOne(filePath):
with gzip.open(filePath, 'rt', encoding='utf-8') if filePath.endswith('.gz') else open(filePath,
encoding='utf-8') as f:
retn = []
cache = ''
for line in f:
line = reg_q.sub('', line) # 删除引号
if len(cache) + len(line) < 384:
cache += line
continue
if not bool(reg_e.findall(line)):
cache += line
retn.append(cache.strip())
cache = ''
continue
i = 1
s = 0
while i <= len(line):
if len(cache) + (i - s) < 384: # 每 384 切一行
i = (384 - len(cache)) + s
if i > len(line):
break
cache += line[s:i]
s = i
if line[i-1] in ('?', '!', '。', '?', '!'):
cache += line[s:i]
s = i
retn.append(cache.strip())
cache = ''
i += 1
if len(line) > s:
cache += line[s:]
cache = cache.strip()
if cache:
retn.append(cache)
return retn
def load_dataset(path):
df = pd.read_parquet(path, engine="pyarrow")
return df
def load_all_dataset(path, convert=False):
qrels_pd = load_dataset(path + r'\qrels.parquet')
corpus = load_dataset(path + r'\corpus.parquet')
queries = load_dataset(path + r'\queries.parquet')
if convert:
qrels = defaultdict(dict)
for i, e in tqdm(qrels_pd.iterrows(), desc="load_all_dataset: Converting"):
qrels[e['qid']][e['cid']] = e['score']
else:
qrels = qrels_pd
return corpus, queries, qrels
def save_dataset(path, df):
return df.to_parquet(
path,
engine="pyarrow",
compression="gzip",
index=False
)
def save_all_dataset(path, corpus, queries, qrels):
save_dataset(path + r"\corpus.parquet", corpus)
save_dataset(path + r"\queries.parquet", queries)
save_dataset(path + r"\qrels.parquet", qrels)
def create_dataset(corpus, queries, qrels):
corpus_pd = pd.DataFrame(corpus, columns=['cid', 'text'])
queries_pd = pd.DataFrame(queries, columns=['qid', 'text'])
qrels_pd = pd.DataFrame(qrels, columns=['qid', 'cid', 'score'])
corpus_pd['cid'] = corpus_pd['cid'].astype(str)
queries_pd['qid'] = queries_pd['qid'].astype(str)
qrels_pd['qid'] = qrels_pd['qid'].astype(str)
qrels_pd['cid'] = qrels_pd['cid'].astype(str)
qrels_pd['score'] = qrels_pd['score'].astype(int)
return corpus_pd, queries_pd, qrels_pd
def sample_from_dataset(corpus, queries, qrels, k=5000):
sample_k = sorted(random.sample(queries['qid'].to_list(), k=k))
queries_pd = queries[queries['qid'].isin(sample_k)]
qrels_pd = qrels[qrels['qid'].isin(sample_k)]
corpus_pd = corpus[corpus['cid'].isin(qrels_pd['cid'])]
return corpus_pd, queries_pd, qrels_pd
path = r'D:\datasets\h-corpus\h-ss-corpus'
rawcorpus = get_all_files_in_directory(path, '.txt.gz')
corpus = []
queries = []
qrels = []
for sub_path in tqdm(rawcorpus, desc="Reading all data..."):
tmp = readOne(os.path.join(path, sub_path))
if len(tmp) < 5:
continue
阈值 = max(len(tmp) // 4, 4) # 大约每个文件抽 4*5 = 20 条语料
# print(阈值)
old_rand = None
for i in range(len(tmp)):
rand = random.randint(0, 阈值)
if rand == 0 and (old_rand is None or old_rand != 0):
queries.append((sub_path, i/(len(tmp)-1), tmp[i]))
elif rand <= 4 or old_rand == 0:
corpus.append((sub_path, i/(len(tmp)-1), tmp[i]))
rand = 1
else:
pass
old_rand = rand
tmp = random.sample(range(len(queries)), k=5000)
tmp.sort()
queries = [queries[i] for i in tmp]
sidx = 0
for qid, q in tqdm(enumerate(queries), desc="计算 qrels 中..."):
mt = False
for cid in range(sidx, len(corpus)):
c = corpus[cid]
if q[0] == c[0]:
mt = True
ss = 1 - abs(q[1] - c[1])
qrels.append((qid, cid, 100 * ss))
else:
if mt:
if qid + 1 < len(queries) and q[0] != queries[qid+1][0]:
sidx = cid + 1
break
corpus_ = [(cid, c[2]) for cid, c in enumerate(corpus)]
queries_ = [(qid, q[2]) for qid, q in enumerate(queries)]
path = r'D:\datasets\H2Retrieval\new_fix'
corpus_pd, queries_pd, qrels_pd = create_dataset(corpus_, queries_, qrels)
tmp = corpus_pd[corpus_pd['cid'].isin(qrels_pd['cid'])]
corpus_pd = tmp
save_all_dataset(path + r'\data', corpus_pd, queries_pd, qrels_pd)
save_all_dataset(path + r'\data_sample1k', *sample_from_dataset(corpus_pd, queries_pd, qrels_pd, k=1000))
# save_all_dataset(path + r'\data_sample1k', *sample_from_dataset(*load_all_dataset(r'D:\datasets\H2Retrieval\new\data_sample5k'), k=1000))
# tmp = load_all_dataset(r'D:\datasets\H2Retrieval\new\data')