Datasets:
Tasks:
Question Answering
Formats:
parquet
Sub-tasks:
multiple-choice-qa
Languages:
English
Size:
10K - 100K
ArXiv:
License:
Convert dataset to Parquet (#4)
Browse files- Convert dataset to Parquet (cbc4c478c1a6a1975b46e0b10e2b5a9c4bd2bcc7)
- Add fold_0 data files (11f8e212e109b4570372a488bed4c066696e470f)
- Add fold_1 data files (2663839510ef595241de6377754548901793990b)
- Add fold_2 data files (600c081aabdbd68c9510a2969743f4b8d6cc3dcc)
- Add fold_3 data files (5eb0fd20dc2be8156206d845d8b29e1582c3164b)
- Add fold_4 data files (b2b5d0368c9804b7baaaf912fa5dcca1e3b8073a)
- Delete loading script (2fe4ec4a27b2e56b8227b69ad95cc96fc404f722)
- README.md +73 -28
- codah.py +0 -141
- codah/train-00000-of-00001.parquet +3 -0
- fold_0/test-00000-of-00001.parquet +3 -0
- fold_0/train-00000-of-00001.parquet +3 -0
- fold_0/validation-00000-of-00001.parquet +3 -0
- fold_1/test-00000-of-00001.parquet +3 -0
- fold_1/train-00000-of-00001.parquet +3 -0
- fold_1/validation-00000-of-00001.parquet +3 -0
- fold_2/test-00000-of-00001.parquet +3 -0
- fold_2/train-00000-of-00001.parquet +3 -0
- fold_2/validation-00000-of-00001.parquet +3 -0
- fold_3/test-00000-of-00001.parquet +3 -0
- fold_3/train-00000-of-00001.parquet +3 -0
- fold_3/validation-00000-of-00001.parquet +3 -0
- fold_4/test-00000-of-00001.parquet +3 -0
- fold_4/train-00000-of-00001.parquet +3 -0
- fold_4/validation-00000-of-00001.parquet +3 -0
README.md
CHANGED
@@ -42,10 +42,10 @@ dataset_info:
|
|
42 |
dtype: int32
|
43 |
splits:
|
44 |
- name: train
|
45 |
-
num_bytes:
|
46 |
num_examples: 2776
|
47 |
-
download_size:
|
48 |
-
dataset_size:
|
49 |
- config_name: fold_0
|
50 |
features:
|
51 |
- name: id
|
@@ -68,16 +68,16 @@ dataset_info:
|
|
68 |
dtype: int32
|
69 |
splits:
|
70 |
- name: train
|
71 |
-
num_bytes:
|
72 |
num_examples: 1665
|
73 |
- name: validation
|
74 |
-
num_bytes:
|
75 |
num_examples: 556
|
76 |
- name: test
|
77 |
-
num_bytes:
|
78 |
num_examples: 555
|
79 |
-
download_size:
|
80 |
-
dataset_size:
|
81 |
- config_name: fold_1
|
82 |
features:
|
83 |
- name: id
|
@@ -100,16 +100,16 @@ dataset_info:
|
|
100 |
dtype: int32
|
101 |
splits:
|
102 |
- name: train
|
103 |
-
num_bytes:
|
104 |
num_examples: 1665
|
105 |
- name: validation
|
106 |
-
num_bytes:
|
107 |
num_examples: 556
|
108 |
- name: test
|
109 |
-
num_bytes:
|
110 |
num_examples: 555
|
111 |
-
download_size:
|
112 |
-
dataset_size:
|
113 |
- config_name: fold_2
|
114 |
features:
|
115 |
- name: id
|
@@ -132,16 +132,16 @@ dataset_info:
|
|
132 |
dtype: int32
|
133 |
splits:
|
134 |
- name: train
|
135 |
-
num_bytes:
|
136 |
num_examples: 1665
|
137 |
- name: validation
|
138 |
-
num_bytes:
|
139 |
num_examples: 556
|
140 |
- name: test
|
141 |
-
num_bytes:
|
142 |
num_examples: 555
|
143 |
-
download_size:
|
144 |
-
dataset_size:
|
145 |
- config_name: fold_3
|
146 |
features:
|
147 |
- name: id
|
@@ -164,16 +164,16 @@ dataset_info:
|
|
164 |
dtype: int32
|
165 |
splits:
|
166 |
- name: train
|
167 |
-
num_bytes:
|
168 |
num_examples: 1665
|
169 |
- name: validation
|
170 |
-
num_bytes:
|
171 |
num_examples: 556
|
172 |
- name: test
|
173 |
-
num_bytes:
|
174 |
num_examples: 555
|
175 |
-
download_size:
|
176 |
-
dataset_size:
|
177 |
- config_name: fold_4
|
178 |
features:
|
179 |
- name: id
|
@@ -196,16 +196,61 @@ dataset_info:
|
|
196 |
dtype: int32
|
197 |
splits:
|
198 |
- name: train
|
199 |
-
num_bytes:
|
200 |
num_examples: 1665
|
201 |
- name: validation
|
202 |
-
num_bytes:
|
203 |
num_examples: 555
|
204 |
- name: test
|
205 |
-
num_bytes:
|
206 |
num_examples: 556
|
207 |
-
download_size:
|
208 |
-
dataset_size:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
209 |
---
|
210 |
|
211 |
# Dataset Card for COmmonsense Dataset Adversarially-authored by Humans
|
|
|
42 |
dtype: int32
|
43 |
splits:
|
44 |
- name: train
|
45 |
+
num_bytes: 571196
|
46 |
num_examples: 2776
|
47 |
+
download_size: 352902
|
48 |
+
dataset_size: 571196
|
49 |
- config_name: fold_0
|
50 |
features:
|
51 |
- name: id
|
|
|
68 |
dtype: int32
|
69 |
splits:
|
70 |
- name: train
|
71 |
+
num_bytes: 344900
|
72 |
num_examples: 1665
|
73 |
- name: validation
|
74 |
+
num_bytes: 114199
|
75 |
num_examples: 556
|
76 |
- name: test
|
77 |
+
num_bytes: 112097
|
78 |
num_examples: 555
|
79 |
+
download_size: 379179
|
80 |
+
dataset_size: 571196
|
81 |
- config_name: fold_1
|
82 |
features:
|
83 |
- name: id
|
|
|
100 |
dtype: int32
|
101 |
splits:
|
102 |
- name: train
|
103 |
+
num_bytes: 340978
|
104 |
num_examples: 1665
|
105 |
- name: validation
|
106 |
+
num_bytes: 114199
|
107 |
num_examples: 556
|
108 |
- name: test
|
109 |
+
num_bytes: 116019
|
110 |
num_examples: 555
|
111 |
+
download_size: 379728
|
112 |
+
dataset_size: 571196
|
113 |
- config_name: fold_2
|
114 |
features:
|
115 |
- name: id
|
|
|
132 |
dtype: int32
|
133 |
splits:
|
134 |
- name: train
|
135 |
+
num_bytes: 342281
|
136 |
num_examples: 1665
|
137 |
- name: validation
|
138 |
+
num_bytes: 114199
|
139 |
num_examples: 556
|
140 |
- name: test
|
141 |
+
num_bytes: 114716
|
142 |
num_examples: 555
|
143 |
+
download_size: 379126
|
144 |
+
dataset_size: 571196
|
145 |
- config_name: fold_3
|
146 |
features:
|
147 |
- name: id
|
|
|
164 |
dtype: int32
|
165 |
splits:
|
166 |
- name: train
|
167 |
+
num_bytes: 342832
|
168 |
num_examples: 1665
|
169 |
- name: validation
|
170 |
+
num_bytes: 114199
|
171 |
num_examples: 556
|
172 |
- name: test
|
173 |
+
num_bytes: 114165
|
174 |
num_examples: 555
|
175 |
+
download_size: 379178
|
176 |
+
dataset_size: 571196
|
177 |
- config_name: fold_4
|
178 |
features:
|
179 |
- name: id
|
|
|
196 |
dtype: int32
|
197 |
splits:
|
198 |
- name: train
|
199 |
+
num_bytes: 342832
|
200 |
num_examples: 1665
|
201 |
- name: validation
|
202 |
+
num_bytes: 114165
|
203 |
num_examples: 555
|
204 |
- name: test
|
205 |
+
num_bytes: 114199
|
206 |
num_examples: 556
|
207 |
+
download_size: 379178
|
208 |
+
dataset_size: 571196
|
209 |
+
configs:
|
210 |
+
- config_name: codah
|
211 |
+
data_files:
|
212 |
+
- split: train
|
213 |
+
path: codah/train-*
|
214 |
+
- config_name: fold_0
|
215 |
+
data_files:
|
216 |
+
- split: train
|
217 |
+
path: fold_0/train-*
|
218 |
+
- split: validation
|
219 |
+
path: fold_0/validation-*
|
220 |
+
- split: test
|
221 |
+
path: fold_0/test-*
|
222 |
+
- config_name: fold_1
|
223 |
+
data_files:
|
224 |
+
- split: train
|
225 |
+
path: fold_1/train-*
|
226 |
+
- split: validation
|
227 |
+
path: fold_1/validation-*
|
228 |
+
- split: test
|
229 |
+
path: fold_1/test-*
|
230 |
+
- config_name: fold_2
|
231 |
+
data_files:
|
232 |
+
- split: train
|
233 |
+
path: fold_2/train-*
|
234 |
+
- split: validation
|
235 |
+
path: fold_2/validation-*
|
236 |
+
- split: test
|
237 |
+
path: fold_2/test-*
|
238 |
+
- config_name: fold_3
|
239 |
+
data_files:
|
240 |
+
- split: train
|
241 |
+
path: fold_3/train-*
|
242 |
+
- split: validation
|
243 |
+
path: fold_3/validation-*
|
244 |
+
- split: test
|
245 |
+
path: fold_3/test-*
|
246 |
+
- config_name: fold_4
|
247 |
+
data_files:
|
248 |
+
- split: train
|
249 |
+
path: fold_4/train-*
|
250 |
+
- split: validation
|
251 |
+
path: fold_4/validation-*
|
252 |
+
- split: test
|
253 |
+
path: fold_4/test-*
|
254 |
---
|
255 |
|
256 |
# Dataset Card for COmmonsense Dataset Adversarially-authored by Humans
|
codah.py
DELETED
@@ -1,141 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
"""The COmmonsense Dataset Adversarially-authored by Humans (CODAH)"""
|
16 |
-
|
17 |
-
|
18 |
-
import csv
|
19 |
-
|
20 |
-
import datasets
|
21 |
-
|
22 |
-
|
23 |
-
_CITATION = """\
|
24 |
-
@inproceedings{chen2019codah,
|
25 |
-
title={CODAH: An Adversarially-Authored Question Answering Dataset for Common Sense},
|
26 |
-
author={Chen, Michael and D'Arcy, Mike and Liu, Alisa and Fernandez, Jared and Downey, Doug},
|
27 |
-
booktitle={Proceedings of the 3rd Workshop on Evaluating Vector Space Representations for NLP},
|
28 |
-
pages={63--69},
|
29 |
-
year={2019}
|
30 |
-
}
|
31 |
-
"""
|
32 |
-
|
33 |
-
_DESCRIPTION = """\
|
34 |
-
The COmmonsense Dataset Adversarially-authored by Humans (CODAH) is an evaluation set for commonsense \
|
35 |
-
question-answering in the sentence completion style of SWAG. As opposed to other automatically \
|
36 |
-
generated NLI datasets, CODAH is adversarially constructed by humans who can view feedback \
|
37 |
-
from a pre-trained model and use this information to design challenging commonsense questions. \
|
38 |
-
Our experimental results show that CODAH questions present a complementary extension to the SWAG dataset, testing additional modes of common sense.
|
39 |
-
"""
|
40 |
-
|
41 |
-
_URL = "https://raw.githubusercontent.com/Websail-NU/CODAH/master/data/"
|
42 |
-
_FULL_DATA_URL = _URL + "full_data.tsv"
|
43 |
-
|
44 |
-
QUESTION_CATEGORIES_MAPPING = {
|
45 |
-
"i": "Idioms",
|
46 |
-
"r": "Reference",
|
47 |
-
"p": "Polysemy",
|
48 |
-
"n": "Negation",
|
49 |
-
"q": "Quantitative",
|
50 |
-
"o": "Others",
|
51 |
-
}
|
52 |
-
|
53 |
-
|
54 |
-
class CodahConfig(datasets.BuilderConfig):
|
55 |
-
"""BuilderConfig for CODAH."""
|
56 |
-
|
57 |
-
def __init__(self, fold=None, **kwargs):
|
58 |
-
"""BuilderConfig for CODAH.
|
59 |
-
|
60 |
-
Args:
|
61 |
-
fold: `string`, official cross validation fold.
|
62 |
-
**kwargs: keyword arguments forwarded to super.
|
63 |
-
"""
|
64 |
-
super(CodahConfig, self).__init__(**kwargs)
|
65 |
-
self.fold = fold
|
66 |
-
|
67 |
-
|
68 |
-
class Codah(datasets.GeneratorBasedBuilder):
|
69 |
-
"""The COmmonsense Dataset Adversarially-authored by Humans (CODAH)"""
|
70 |
-
|
71 |
-
VERSION = datasets.Version("1.0.0")
|
72 |
-
BUILDER_CONFIGS = [
|
73 |
-
CodahConfig(name="codah", version=datasets.Version("1.0.0"), description="Full CODAH dataset", fold=None),
|
74 |
-
CodahConfig(
|
75 |
-
name="fold_0", version=datasets.Version("1.0.0"), description="Official CV split (fold_0)", fold="fold_0"
|
76 |
-
),
|
77 |
-
CodahConfig(
|
78 |
-
name="fold_1", version=datasets.Version("1.0.0"), description="Official CV split (fold_1)", fold="fold_1"
|
79 |
-
),
|
80 |
-
CodahConfig(
|
81 |
-
name="fold_2", version=datasets.Version("1.0.0"), description="Official CV split (fold_2)", fold="fold_2"
|
82 |
-
),
|
83 |
-
CodahConfig(
|
84 |
-
name="fold_3", version=datasets.Version("1.0.0"), description="Official CV split (fold_3)", fold="fold_3"
|
85 |
-
),
|
86 |
-
CodahConfig(
|
87 |
-
name="fold_4", version=datasets.Version("1.0.0"), description="Official CV split (fold_4)", fold="fold_4"
|
88 |
-
),
|
89 |
-
]
|
90 |
-
|
91 |
-
def _info(self):
|
92 |
-
return datasets.DatasetInfo(
|
93 |
-
description=_DESCRIPTION,
|
94 |
-
features=datasets.Features(
|
95 |
-
{
|
96 |
-
"id": datasets.Value("int32"),
|
97 |
-
"question_category": datasets.features.ClassLabel(
|
98 |
-
names=["Idioms", "Reference", "Polysemy", "Negation", "Quantitative", "Others"]
|
99 |
-
),
|
100 |
-
"question_propmt": datasets.Value("string"),
|
101 |
-
"candidate_answers": datasets.features.Sequence(datasets.Value("string")),
|
102 |
-
"correct_answer_idx": datasets.Value("int32"),
|
103 |
-
}
|
104 |
-
),
|
105 |
-
supervised_keys=None,
|
106 |
-
homepage="https://github.com/Websail-NU/CODAH",
|
107 |
-
citation=_CITATION,
|
108 |
-
)
|
109 |
-
|
110 |
-
def _split_generators(self, dl_manager):
|
111 |
-
if self.config.name == "codah":
|
112 |
-
data_file = dl_manager.download(_FULL_DATA_URL)
|
113 |
-
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data_file": data_file})]
|
114 |
-
|
115 |
-
base_url = f"{_URL}cv_split/{self.config.fold}/"
|
116 |
-
_urls = {
|
117 |
-
"train": base_url + "train.tsv",
|
118 |
-
"dev": base_url + "dev.tsv",
|
119 |
-
"test": base_url + "test.tsv",
|
120 |
-
}
|
121 |
-
downloaded_files = dl_manager.download_and_extract(_urls)
|
122 |
-
|
123 |
-
return [
|
124 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data_file": downloaded_files["train"]}),
|
125 |
-
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"data_file": downloaded_files["dev"]}),
|
126 |
-
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"data_file": downloaded_files["test"]}),
|
127 |
-
]
|
128 |
-
|
129 |
-
def _generate_examples(self, data_file):
|
130 |
-
with open(data_file, encoding="utf-8") as f:
|
131 |
-
rows = csv.reader(f, delimiter="\t")
|
132 |
-
for i, row in enumerate(rows):
|
133 |
-
question_category = QUESTION_CATEGORIES_MAPPING[row[0]] if row[0] != "" else -1
|
134 |
-
example = {
|
135 |
-
"id": i,
|
136 |
-
"question_category": question_category,
|
137 |
-
"question_propmt": row[1],
|
138 |
-
"candidate_answers": row[2:-1],
|
139 |
-
"correct_answer_idx": int(row[-1]),
|
140 |
-
}
|
141 |
-
yield i, example
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
codah/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8600f9d89cfbd531bcf83ccfe96266cf8d4cc51bde05527472bd4facaf99a2ad
|
3 |
+
size 352902
|
fold_0/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6cdea40eb5dbf2bdbca2cd6003a422af24b11079022d9bd723a0748f7197adc9
|
3 |
+
size 76393
|
fold_0/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f4d2272b9ca656264f06d9a145e397743bddf3d9c73e2ff0200ac75e30f26160
|
3 |
+
size 225466
|
fold_0/validation-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f42d874b57e8c175520d8c34328a7c4775904a969ffc78301bee3b5aa0470421
|
3 |
+
size 77320
|
fold_1/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e423399b731f6340bda743baa279258642cbfb06431686edf4083cb627ee3661
|
3 |
+
size 78824
|
fold_1/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4174d242926f44ff6268df8a0a81b10174eb3781747b0097094ec115a4b3e491
|
3 |
+
size 223584
|
fold_1/validation-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f42d874b57e8c175520d8c34328a7c4775904a969ffc78301bee3b5aa0470421
|
3 |
+
size 77320
|
fold_2/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9cc7ef3e24556e6b8fed428f24da49bcebb6b6a9b4331147b3bd5eb9d9a692c0
|
3 |
+
size 78336
|
fold_2/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7db996d3ad04d09572dd72ac7f83a3eef6c4ab7ab17b398ee64895e282fdaf81
|
3 |
+
size 223470
|
fold_2/validation-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f42d874b57e8c175520d8c34328a7c4775904a969ffc78301bee3b5aa0470421
|
3 |
+
size 77320
|
fold_3/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0f3d1ba74b98478c22e6cdb60f17df3a92ef8038523c043ac6e50ac1e531b0ea
|
3 |
+
size 77544
|
fold_3/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:53373aed4b855e2b90c796f25175c96e65b035f0f905582eca998090a1cec84f
|
3 |
+
size 224314
|
fold_3/validation-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f42d874b57e8c175520d8c34328a7c4775904a969ffc78301bee3b5aa0470421
|
3 |
+
size 77320
|
fold_4/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f42d874b57e8c175520d8c34328a7c4775904a969ffc78301bee3b5aa0470421
|
3 |
+
size 77320
|
fold_4/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:53373aed4b855e2b90c796f25175c96e65b035f0f905582eca998090a1cec84f
|
3 |
+
size 224314
|
fold_4/validation-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0f3d1ba74b98478c22e6cdb60f17df3a92ef8038523c043ac6e50ac1e531b0ea
|
3 |
+
size 77544
|