Datasets:

Languages:
Polish
ArXiv:
vshishkin commited on
Commit
e827dc5
1 Parent(s): 14ce267

Upload 3 files

Browse files
Files changed (3) hide show
  1. corpus.jsonl.gz +3 -0
  2. queries.jsonl.gz +3 -0
  3. stats-pl.py +57 -0
corpus.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7da0f81bbeae75353bbb9f142b77fdb254ce63ef481aa0c97904945e0d52b125
3
+ size 19228543
queries.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11ae5baa02c5e3fb1339f4aacfb2b04d65e339a7868afc4230f8aed826883e6e
3
+ size 137281390
stats-pl.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import csv
3
+ import os
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+
8
+ _DESCRIPTION = "cqadupstack stats Dataset"
9
+ _SPLITS = ["corpus", "queries"]
10
+
11
+ URL = ""
12
+ _URLs = {subset: URL + f"{subset}.jsonl.gz" for subset in _SPLITS}
13
+
14
+ class Stats(datasets.GeneratorBasedBuilder):
15
+
16
+ BUILDER_CONFIGS = [
17
+ datasets.BuilderConfig(
18
+ name=name,
19
+ description=f"This is the {name} in the BEIR-PL dataset.",
20
+ ) for name in _SPLITS
21
+ ]
22
+
23
+ def _info(self):
24
+
25
+ return datasets.DatasetInfo(
26
+ description=_DESCRIPTION,
27
+ features=datasets.Features({
28
+ "_id": datasets.Value("string"),
29
+ "title": datasets.Value("string"),
30
+ "text": datasets.Value("string"),
31
+ }),
32
+ supervised_keys=None,
33
+ )
34
+
35
+ def _split_generators(self, dl_manager):
36
+ """Returns SplitGenerators."""
37
+
38
+ my_urls = _URLs[self.config.name]
39
+ data_dir = dl_manager.download_and_extract(my_urls)
40
+
41
+ return [
42
+ datasets.SplitGenerator(
43
+ name=self.config.name,
44
+ # These kwargs will be passed to _generate_examples
45
+ gen_kwargs={"filepath": data_dir},
46
+ ),
47
+ ]
48
+
49
+ def _generate_examples(self, filepath):
50
+ """Yields examples."""
51
+ with open(filepath, encoding="utf-8") as f:
52
+ texts = f.readlines()
53
+ for i, text in enumerate(texts):
54
+ text = json.loads(text)
55
+ if 'metadata' in text: del text['metadata']
56
+ if "title" not in text: text["title"] = ""
57
+ yield i, text