Datasets:
File size: 6,140 Bytes
47bdded a2dc602 47bdded 533c16c a2dc602 533c16c a2dc602 533c16c a2dc602 533c16c a2dc602 533c16c a2dc602 533c16c a2dc602 533c16c a2dc602 533c16c a2dc602 533c16c a2dc602 533c16c a2dc602 533c16c a2dc602 533c16c a2dc602 533c16c a2dc602 47bdded a2dc602 47bdded a2dc602 47bdded a2dc602 47bdded a2dc602 533c16c 47bdded a2dc602 47bdded 533c16c 47bdded 533c16c a2dc602 47bdded a2dc602 47bdded a2dc602 47bdded a2dc602 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
import json
import os.path
import datasets
_VERSION = "1.0.0"
_DESCRIPTION = "Deepset's germanDPR dataset made compatible with BEIR benchmark framework. One version contains " \
"the original dataset 1:1 (but deduplicated) and the other dataset is furhter preprocessed. " \
"See official dataset card for dataset usage with BEIR."
_SUBSETS = ["original-queries", "original-corpus", "original-qrels",
"processed-queries", "processed-corpus", "original-qrels"]
class GermanDPRBeir(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = (
[
datasets.BuilderConfig(
name="original-queries",
description=f"BEIR queries created 1:1 but deduplicated from deepset/germanDPR.",
version=_VERSION,
),
datasets.BuilderConfig(
name="original-corpus",
description=f"BEIR corpus created 1:1 but deduplicated from deepset/germanDPR.",
version=_VERSION,
),
datasets.BuilderConfig(
name="original-qrels",
description=f"BEIR qrels for original version of deepset/germanDPR.",
version=_VERSION,
),
datasets.BuilderConfig(
name="processed-queries",
description=f"BEIR queries created, deduplicated and further text-processed from deepset/germanDPR.",
version=_VERSION,
),
datasets.BuilderConfig(
name="processed-corpus",
description=f"BEIR corpus created, deduplicated and further text-processed from deepset/germanDPR.",
version=_VERSION,
),
datasets.BuilderConfig(
name="processed-qrels",
description=f"BEIR qrels for processed version of deepset/germanDPR.",
version=_VERSION,
)
]
)
DEFAULT_CONFIG_NAME = _SUBSETS[0]
def _info(self):
name = self.config.name
if name.endswith("queries"):
features = {
"_id": datasets.Value("string"),
"text": datasets.Value("string")
}
elif name.endswith("corpus"):
features = {
"_id": datasets.Value("string"),
"title": datasets.Value("string"),
"text": datasets.Value("string"),
}
elif name.endswith("qrels"):
# name == qrels
features = {
"query-id": datasets.Value("string"),
"corpus-id": datasets.Value("string"),
"score": datasets.Value("int32")
}
else:
raise ValueError(f'Unknown subset, choose from: {", ".join(_SUBSETS)}')
return datasets.DatasetInfo(
description=f"{_DESCRIPTION}\n{self.config.description}",
features=datasets.Features(features),
supervised_keys=None,
homepage="https://huggingface.co/datasets/PM-AI/germandpr-beir",
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
name = self.config.name
if name.startswith("original"):
dl_path = dl_manager.download_and_extract("https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/original.tar.gz")
elif name.startswith("processed"):
dl_path = dl_manager.download_and_extract("https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/processed.tar.gz")
else:
raise ValueError(f'Unknown subset, choose from: {", ".join(_SUBSETS)}')
type1, type2 = name.split("-")
if type2 in ["corpus", "queries"]:
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": os.path.join(dl_path, f'{type1}/train/{type2}.jsonl')}),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": os.path.join(dl_path, f'{type1}/test/{type2}.jsonl')})
]
elif type2 == "qrels":
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": os.path.join(dl_path, f'{type1}/train/qrels/train.tsv')}),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": os.path.join(dl_path, f'{type1}/test/qrels/test.tsv')})
]
else:
raise ValueError(f'Unknown subset, choose from: {", ".join(_SUBSETS)}')
def _generate_queries_data(self, filepath):
print("filepath: ", filepath)
with open(filepath, "r", encoding="utf-8") as in_file:
for idx, line in enumerate(in_file):
data = json.loads(line)
yield idx, data
def _generate_corpus_data(self, filepath):
with open(filepath, "r", encoding="utf-8") as in_file:
for idx, line in enumerate(in_file):
data = json.loads(line)
if "metadata" in data:
del data["metadata"]
yield idx, data
def _generate_qrel_data(self, filepath):
with open(filepath, "r", encoding="utf-8") as in_file:
in_file.readline() # first line is header
for idx, line in enumerate(in_file):
qid, cid, score = line.rstrip().split("\t")
yield idx, {"query-id": qid, "corpus-id": cid, "score": score}
def _generate_examples(self, filepath):
"""Yields examples."""
name = self.config.name
if name.endswith("queries"):
return self._generate_queries_data(filepath)
elif name.endswith("corpus"):
return self._generate_corpus_data(filepath)
elif name.endswith("qrels"):
return self._generate_qrel_data(filepath)
else:
raise ValueError(f'Unknown subset, choose from: {", ".join(_SUBSETS)}')
|