PM-AI commited on
Commit
533c16c
1 Parent(s): 4e4ae72

Create germandpr-beir.py

Browse files
Files changed (1) hide show
  1. germandpr-beir.py +104 -0
germandpr-beir.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+
3
+
4
+ _VERSION = "1.0.0"
5
+
6
+ _DESCRIPTION = "Deepset's germanDPR dataset made compatible with BEIR benchmark framework. One version contains " \
7
+ "the original dataset 1:1 and the other dataset is preprocessed. See official dataset card for " \
8
+ "usage of dataset with BEIR."
9
+
10
+ _SUBSETS = ["queries-original", "corpus-original", "queries-processed", "corpus-processed", "qrels"]
11
+
12
+
13
+ class GermanDPRBeir(datasets.GeneratorBasedBuilder):
14
+ BUILDER_CONFIGS = (
15
+ [
16
+ datasets.BuilderConfig(
17
+ name="queries-original",
18
+ description=f"BEIR queries created 1:1 from deepset/germanDPR.",
19
+ version=_VERSION,
20
+ ),
21
+ datasets.BuilderConfig(
22
+ name="corpus-original",
23
+ description=f"BEIR corpus created 1:1 from deepset/germanDPR.",
24
+ version=_VERSION,
25
+ ),
26
+ datasets.BuilderConfig(
27
+ name="queries-processed",
28
+ description=f"BEIR queries created and further text-processed from deepset/germanDPR.",
29
+ version=_VERSION,
30
+ ),
31
+ datasets.BuilderConfig(
32
+ name="corpus-processed",
33
+ description=f"BEIR corpus created and further text-processed from deepset/germanDPR.",
34
+ version=_VERSION,
35
+ ),
36
+ datasets.BuilderConfig(
37
+ name="qrels",
38
+ description=f"BEIR qrels created from deepset/germanDPR for train and test split.",
39
+ version=_VERSION,
40
+ )
41
+ ]
42
+ )
43
+
44
+ DEFAULT_CONFIG_NAME = "qrels"
45
+
46
+ def _info(self):
47
+ name = self.config.name
48
+ _SPLITS = ["queries-original", "corpus-original", "queries-processed", "corpus-processed", "qrels"]
49
+
50
+ if name.startswith("queries"):
51
+ features = {
52
+ "_id": datasets.Value("string"),
53
+ "text": datasets.Value("string")
54
+ }
55
+ elif name.startswith("corpus"):
56
+ features = {
57
+ "_id": datasets.Value("string"),
58
+ "title": datasets.Value("string"),
59
+ "text": datasets.Value("string"),
60
+ }
61
+ else:
62
+ # name == qrels
63
+ features = {
64
+ "query-id": datasets.Value("string"),
65
+ "corpus-id": datasets.Value("string"),
66
+ "score": datasets.Value("int32")
67
+ }
68
+
69
+ return datasets.DatasetInfo(
70
+ description=f"{_DESCRIPTION}\n{self.config.description}",
71
+ features=datasets.Features(features),
72
+ supervised_keys=None,
73
+ homepage="https://huggingface.co/datasets/PM-AI/germandpr-beir",
74
+ )
75
+
76
+ def _split_generators(self, dl_manager):
77
+ """Returns SplitGenerators."""
78
+ _SPLITS = ["queries-original", "corpus-original", "queries-processed", "corpus-processed", "qrels"]
79
+
80
+ name = self.config.name
81
+ if name == "qrels":
82
+ dl_path = dl_manager.download([
83
+ "https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/qrels/test.tsv",
84
+ "https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/qrels/train.tsv"
85
+ ])
86
+ else:
87
+ dl_path = dl_manager.download(f"https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/{name}.jsonl")
88
+
89
+ return [
90
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path, "split": "train"}),
91
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": dl_path, "split": "test"})
92
+ ]
93
+
94
+ def _generate_examples(self, filepath, split):
95
+ """Yields examples."""
96
+ name = self.config.name
97
+ if name.startswith("queries"):
98
+ yield 0, {"_id": "1", "text": "text"}
99
+ elif name.startswith("corpus"):
100
+ yield 0, {"_id": "1", "title": "title", "text": "text"}
101
+ else:
102
+ # name == qrels
103
+ filepath = [x for x in filepath if x.endswith(f"{split}.tsv")]
104
+ yield 0, {"query-id": "", "corpus-id": "", "score": 1}