Datasets:
File size: 4,147 Bytes
533c16c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import datasets
_VERSION = "1.0.0"
_DESCRIPTION = "Deepset's germanDPR dataset made compatible with BEIR benchmark framework. One version contains " \
"the original dataset 1:1 and the other dataset is preprocessed. See official dataset card for " \
"usage of dataset with BEIR."
_SUBSETS = ["queries-original", "corpus-original", "queries-processed", "corpus-processed", "qrels"]
class GermanDPRBeir(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = (
[
datasets.BuilderConfig(
name="queries-original",
description=f"BEIR queries created 1:1 from deepset/germanDPR.",
version=_VERSION,
),
datasets.BuilderConfig(
name="corpus-original",
description=f"BEIR corpus created 1:1 from deepset/germanDPR.",
version=_VERSION,
),
datasets.BuilderConfig(
name="queries-processed",
description=f"BEIR queries created and further text-processed from deepset/germanDPR.",
version=_VERSION,
),
datasets.BuilderConfig(
name="corpus-processed",
description=f"BEIR corpus created and further text-processed from deepset/germanDPR.",
version=_VERSION,
),
datasets.BuilderConfig(
name="qrels",
description=f"BEIR qrels created from deepset/germanDPR for train and test split.",
version=_VERSION,
)
]
)
DEFAULT_CONFIG_NAME = "qrels"
def _info(self):
name = self.config.name
_SPLITS = ["queries-original", "corpus-original", "queries-processed", "corpus-processed", "qrels"]
if name.startswith("queries"):
features = {
"_id": datasets.Value("string"),
"text": datasets.Value("string")
}
elif name.startswith("corpus"):
features = {
"_id": datasets.Value("string"),
"title": datasets.Value("string"),
"text": datasets.Value("string"),
}
else:
# name == qrels
features = {
"query-id": datasets.Value("string"),
"corpus-id": datasets.Value("string"),
"score": datasets.Value("int32")
}
return datasets.DatasetInfo(
description=f"{_DESCRIPTION}\n{self.config.description}",
features=datasets.Features(features),
supervised_keys=None,
homepage="https://huggingface.co/datasets/PM-AI/germandpr-beir",
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
_SPLITS = ["queries-original", "corpus-original", "queries-processed", "corpus-processed", "qrels"]
name = self.config.name
if name == "qrels":
dl_path = dl_manager.download([
"https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/qrels/test.tsv",
"https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/qrels/train.tsv"
])
else:
dl_path = dl_manager.download(f"https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/{name}.jsonl")
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path, "split": "train"}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": dl_path, "split": "test"})
]
def _generate_examples(self, filepath, split):
"""Yields examples."""
name = self.config.name
if name.startswith("queries"):
yield 0, {"_id": "1", "text": "text"}
elif name.startswith("corpus"):
yield 0, {"_id": "1", "title": "title", "text": "text"}
else:
# name == qrels
filepath = [x for x in filepath if x.endswith(f"{split}.tsv")]
yield 0, {"query-id": "", "corpus-id": "", "score": 1}
|