parquet-converter commited on
Commit
43ede96
1 Parent(s): ff09238

Update parquet files

Browse files
wikipedia-nq-corpus.py DELETED
@@ -1,90 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Wikipedia NQ dataset."""
18
-
19
- import json
20
-
21
- import datasets
22
-
23
- _CITATION = """
24
- @inproceedings{karpukhin-etal-2020-dense,
25
- title = "Dense Passage Retrieval for Open-Domain Question Answering",
26
- author = "Karpukhin, Vladimir and Oguz, Barlas and Min, Sewon and Lewis, Patrick and Wu, Ledell and Edunov,
27
- Sergey and Chen, Danqi and Yih, Wen-tau",
28
- booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
29
- month = nov,
30
- year = "2020",
31
- address = "Online",
32
- publisher = "Association for Computational Linguistics",
33
- url = "https://www.aclweb.org/anthology/2020.emnlp-main.550",
34
- doi = "10.18653/v1/2020.emnlp-main.550",
35
- pages = "6769--6781",
36
- }
37
- """
38
-
39
- _DESCRIPTION = "dataset load script for Wikipedia NQ Corpus"
40
-
41
- _DATASET_URLS = {
42
- 'train': "https://huggingface.co/datasets/Tevatron/wikipedia-nq-corpus/resolve/main/corpus.jsonl.gz"
43
- }
44
-
45
-
46
- class WikipediaNqCorpus(datasets.GeneratorBasedBuilder):
47
- VERSION = datasets.Version("0.0.1")
48
-
49
- BUILDER_CONFIGS = [
50
- datasets.BuilderConfig(version=VERSION,
51
- description="Wikipedia Corpus 100-word splits"),
52
- ]
53
-
54
- def _info(self):
55
- features = datasets.Features(
56
- {'docid': datasets.Value('string'), 'text': datasets.Value('string'),
57
- 'title': datasets.Value('string')},
58
- )
59
- return datasets.DatasetInfo(
60
- # This is the description that will appear on the datasets page.
61
- description=_DESCRIPTION,
62
- # This defines the different columns of the dataset and their types
63
- features=features, # Here we define them above because they are different between the two configurations
64
- supervised_keys=None,
65
- # Homepage of the dataset for documentation
66
- homepage="",
67
- # License for the dataset if available
68
- license="",
69
- # Citation for the dataset
70
- citation=_CITATION,
71
- )
72
-
73
- def _split_generators(self, dl_manager):
74
- downloaded_files = dl_manager.download_and_extract(_DATASET_URLS)
75
- splits = [
76
- datasets.SplitGenerator(
77
- name="train",
78
- gen_kwargs={
79
- "filepath": downloaded_files["train"],
80
- },
81
- ),
82
- ]
83
- return splits
84
-
85
- def _generate_examples(self, filepath):
86
- """Yields examples."""
87
- with open(filepath, encoding="utf-8") as f:
88
- for line in f:
89
- data = json.loads(line)
90
- yield data['docid'], data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
corpus.jsonl.gz → xxazz--nq-corpus/json-train-00000-of-00003.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7a790d364845af62316c7baf03a01be21c2dbf2f12771413dd5fe48eebdfa34b
3
- size 498194799
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:275719676892da148015e20953468ab179079843e3d767cff7df4d8082add11b
3
+ size 287514891
xxazz--nq-corpus/json-train-00001-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a7532d3f5e6076366466f5f42e1f06c1d2bc470f16e917167c7b5ce0693c7d8
3
+ size 287225812
xxazz--nq-corpus/json-train-00002-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64457c00b8becb6f4259c2687a8bab705b64699114533452e7940438fe8e76b4
3
+ size 210061336