germandpr-beir / germandpr-beir.py
PM-AI's picture
Create germandpr-beir.py
533c16c
raw
history blame
4.15 kB
import datasets
_VERSION = "1.0.0"
_DESCRIPTION = "Deepset's germanDPR dataset made compatible with BEIR benchmark framework. One version contains " \
"the original dataset 1:1 and the other dataset is preprocessed. See official dataset card for " \
"usage of dataset with BEIR."
_SUBSETS = ["queries-original", "corpus-original", "queries-processed", "corpus-processed", "qrels"]
class GermanDPRBeir(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = (
[
datasets.BuilderConfig(
name="queries-original",
description=f"BEIR queries created 1:1 from deepset/germanDPR.",
version=_VERSION,
),
datasets.BuilderConfig(
name="corpus-original",
description=f"BEIR corpus created 1:1 from deepset/germanDPR.",
version=_VERSION,
),
datasets.BuilderConfig(
name="queries-processed",
description=f"BEIR queries created and further text-processed from deepset/germanDPR.",
version=_VERSION,
),
datasets.BuilderConfig(
name="corpus-processed",
description=f"BEIR corpus created and further text-processed from deepset/germanDPR.",
version=_VERSION,
),
datasets.BuilderConfig(
name="qrels",
description=f"BEIR qrels created from deepset/germanDPR for train and test split.",
version=_VERSION,
)
]
)
DEFAULT_CONFIG_NAME = "qrels"
def _info(self):
name = self.config.name
_SPLITS = ["queries-original", "corpus-original", "queries-processed", "corpus-processed", "qrels"]
if name.startswith("queries"):
features = {
"_id": datasets.Value("string"),
"text": datasets.Value("string")
}
elif name.startswith("corpus"):
features = {
"_id": datasets.Value("string"),
"title": datasets.Value("string"),
"text": datasets.Value("string"),
}
else:
# name == qrels
features = {
"query-id": datasets.Value("string"),
"corpus-id": datasets.Value("string"),
"score": datasets.Value("int32")
}
return datasets.DatasetInfo(
description=f"{_DESCRIPTION}\n{self.config.description}",
features=datasets.Features(features),
supervised_keys=None,
homepage="https://huggingface.co/datasets/PM-AI/germandpr-beir",
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
_SPLITS = ["queries-original", "corpus-original", "queries-processed", "corpus-processed", "qrels"]
name = self.config.name
if name == "qrels":
dl_path = dl_manager.download([
"https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/qrels/test.tsv",
"https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/qrels/train.tsv"
])
else:
dl_path = dl_manager.download(f"https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/{name}.jsonl")
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path, "split": "train"}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": dl_path, "split": "test"})
]
def _generate_examples(self, filepath, split):
"""Yields examples."""
name = self.config.name
if name.startswith("queries"):
yield 0, {"_id": "1", "text": "text"}
elif name.startswith("corpus"):
yield 0, {"_id": "1", "title": "title", "text": "text"}
else:
# name == qrels
filepath = [x for x in filepath if x.endswith(f"{split}.tsv")]
yield 0, {"query-id": "", "corpus-id": "", "score": 1}