germandpr-beir / germandpr-beir.py
PM-AI's picture
Update germandpr-beir.py
a2dc602
raw
history blame
6.14 kB
import json
import os.path
import datasets
_VERSION = "1.0.0"
_DESCRIPTION = "Deepset's germanDPR dataset made compatible with BEIR benchmark framework. One version contains " \
"the original dataset 1:1 (but deduplicated) and the other dataset is furhter preprocessed. " \
"See official dataset card for dataset usage with BEIR."
_SUBSETS = ["original-queries", "original-corpus", "original-qrels",
"processed-queries", "processed-corpus", "original-qrels"]
class GermanDPRBeir(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = (
[
datasets.BuilderConfig(
name="original-queries",
description=f"BEIR queries created 1:1 but deduplicated from deepset/germanDPR.",
version=_VERSION,
),
datasets.BuilderConfig(
name="original-corpus",
description=f"BEIR corpus created 1:1 but deduplicated from deepset/germanDPR.",
version=_VERSION,
),
datasets.BuilderConfig(
name="original-qrels",
description=f"BEIR qrels for original version of deepset/germanDPR.",
version=_VERSION,
),
datasets.BuilderConfig(
name="processed-queries",
description=f"BEIR queries created, deduplicated and further text-processed from deepset/germanDPR.",
version=_VERSION,
),
datasets.BuilderConfig(
name="processed-corpus",
description=f"BEIR corpus created, deduplicated and further text-processed from deepset/germanDPR.",
version=_VERSION,
),
datasets.BuilderConfig(
name="processed-qrels",
description=f"BEIR qrels for processed version of deepset/germanDPR.",
version=_VERSION,
)
]
)
DEFAULT_CONFIG_NAME = _SUBSETS[0]
def _info(self):
name = self.config.name
if name.endswith("queries"):
features = {
"_id": datasets.Value("string"),
"text": datasets.Value("string")
}
elif name.endswith("corpus"):
features = {
"_id": datasets.Value("string"),
"title": datasets.Value("string"),
"text": datasets.Value("string"),
}
elif name.endswith("qrels"):
# name == qrels
features = {
"query-id": datasets.Value("string"),
"corpus-id": datasets.Value("string"),
"score": datasets.Value("int32")
}
else:
raise ValueError(f'Unknown subset, choose from: {", ".join(_SUBSETS)}')
return datasets.DatasetInfo(
description=f"{_DESCRIPTION}\n{self.config.description}",
features=datasets.Features(features),
supervised_keys=None,
homepage="https://huggingface.co/datasets/PM-AI/germandpr-beir",
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
name = self.config.name
if name.startswith("original"):
dl_path = dl_manager.download_and_extract("https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/original.tar.gz")
elif name.startswith("processed"):
dl_path = dl_manager.download_and_extract("https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/processed.tar.gz")
else:
raise ValueError(f'Unknown subset, choose from: {", ".join(_SUBSETS)}')
type1, type2 = name.split("-")
if type2 in ["corpus", "queries"]:
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": os.path.join(dl_path, f'{type1}/train/{type2}.jsonl')}),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": os.path.join(dl_path, f'{type1}/test/{type2}.jsonl')})
]
elif type2 == "qrels":
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": os.path.join(dl_path, f'{type1}/train/qrels/train.tsv')}),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": os.path.join(dl_path, f'{type1}/test/qrels/test.tsv')})
]
else:
raise ValueError(f'Unknown subset, choose from: {", ".join(_SUBSETS)}')
def _generate_queries_data(self, filepath):
print("filepath: ", filepath)
with open(filepath, "r", encoding="utf-8") as in_file:
for idx, line in enumerate(in_file):
data = json.loads(line)
yield idx, data
def _generate_corpus_data(self, filepath):
with open(filepath, "r", encoding="utf-8") as in_file:
for idx, line in enumerate(in_file):
data = json.loads(line)
if "metadata" in data:
del data["metadata"]
yield idx, data
def _generate_qrel_data(self, filepath):
with open(filepath, "r", encoding="utf-8") as in_file:
in_file.readline() # first line is header
for idx, line in enumerate(in_file):
qid, cid, score = line.rstrip().split("\t")
yield idx, {"query-id": qid, "corpus-id": cid, "score": score}
def _generate_examples(self, filepath):
"""Yields examples."""
name = self.config.name
if name.endswith("queries"):
return self._generate_queries_data(filepath)
elif name.endswith("corpus"):
return self._generate_corpus_data(filepath)
elif name.endswith("qrels"):
return self._generate_qrel_data(filepath)
else:
raise ValueError(f'Unknown subset, choose from: {", ".join(_SUBSETS)}')