parquet-converter
commited on
Commit
•
e557c42
1
Parent(s):
504998b
Update parquet files
Browse files- .gitattributes +0 -27
- README.md +0 -210
- anli/art-train.parquet +3 -0
- anli/art-validation.parquet +3 -0
- art.py +0 -116
- dataset_infos.json +0 -1
.gitattributes
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
DELETED
@@ -1,210 +0,0 @@
|
|
1 |
-
---
|
2 |
-
annotations_creators:
|
3 |
-
- crowdsourced
|
4 |
-
language:
|
5 |
-
- en
|
6 |
-
language_creators:
|
7 |
-
- found
|
8 |
-
license:
|
9 |
-
- unknown
|
10 |
-
multilinguality:
|
11 |
-
- monolingual
|
12 |
-
pretty_name: Abductive Reasoning in narrative Text
|
13 |
-
size_categories:
|
14 |
-
- 100K<n<1M
|
15 |
-
source_datasets:
|
16 |
-
- original
|
17 |
-
task_categories:
|
18 |
-
- multiple-choice
|
19 |
-
- text-classification
|
20 |
-
task_ids:
|
21 |
-
- natural-language-inference
|
22 |
-
paperswithcode_id: art-dataset
|
23 |
-
tags:
|
24 |
-
- abductive-natural-language-inference
|
25 |
-
dataset_info:
|
26 |
-
features:
|
27 |
-
- name: observation_1
|
28 |
-
dtype: string
|
29 |
-
- name: observation_2
|
30 |
-
dtype: string
|
31 |
-
- name: hypothesis_1
|
32 |
-
dtype: string
|
33 |
-
- name: hypothesis_2
|
34 |
-
dtype: string
|
35 |
-
- name: label
|
36 |
-
dtype:
|
37 |
-
class_label:
|
38 |
-
names:
|
39 |
-
0: '0'
|
40 |
-
1: '1'
|
41 |
-
2: '2'
|
42 |
-
config_name: anli
|
43 |
-
splits:
|
44 |
-
- name: validation
|
45 |
-
num_bytes: 312314
|
46 |
-
num_examples: 1532
|
47 |
-
- name: train
|
48 |
-
num_bytes: 34046304
|
49 |
-
num_examples: 169654
|
50 |
-
download_size: 5118294
|
51 |
-
dataset_size: 34358618
|
52 |
-
---
|
53 |
-
|
54 |
-
# Dataset Card for "art"
|
55 |
-
|
56 |
-
## Table of Contents
|
57 |
-
- [Dataset Description](#dataset-description)
|
58 |
-
- [Dataset Summary](#dataset-summary)
|
59 |
-
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
60 |
-
- [Languages](#languages)
|
61 |
-
- [Dataset Structure](#dataset-structure)
|
62 |
-
- [Data Instances](#data-instances)
|
63 |
-
- [Data Fields](#data-fields)
|
64 |
-
- [Data Splits](#data-splits)
|
65 |
-
- [Dataset Creation](#dataset-creation)
|
66 |
-
- [Curation Rationale](#curation-rationale)
|
67 |
-
- [Source Data](#source-data)
|
68 |
-
- [Annotations](#annotations)
|
69 |
-
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
70 |
-
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
71 |
-
- [Social Impact of Dataset](#social-impact-of-dataset)
|
72 |
-
- [Discussion of Biases](#discussion-of-biases)
|
73 |
-
- [Other Known Limitations](#other-known-limitations)
|
74 |
-
- [Additional Information](#additional-information)
|
75 |
-
- [Dataset Curators](#dataset-curators)
|
76 |
-
- [Licensing Information](#licensing-information)
|
77 |
-
- [Citation Information](#citation-information)
|
78 |
-
- [Contributions](#contributions)
|
79 |
-
|
80 |
-
## Dataset Description
|
81 |
-
|
82 |
-
- **Homepage:** [https://leaderboard.allenai.org/anli/submissions/get-started](https://leaderboard.allenai.org/anli/submissions/get-started)
|
83 |
-
- **Repository:** https://github.com/allenai/abductive-commonsense-reasoning
|
84 |
-
- **Paper:** [Abductive Commonsense Reasoning](https://arxiv.org/abs/1908.05739)
|
85 |
-
- **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
86 |
-
- **Size of downloaded dataset files:** 4.88 MB
|
87 |
-
- **Size of the generated dataset:** 32.77 MB
|
88 |
-
- **Total amount of disk used:** 37.65 MB
|
89 |
-
|
90 |
-
### Dataset Summary
|
91 |
-
|
92 |
-
ART consists of over 20k commonsense narrative contexts and 200k explanations.
|
93 |
-
|
94 |
-
The Abductive Natural Language Inference Dataset from AI2.
|
95 |
-
|
96 |
-
### Supported Tasks and Leaderboards
|
97 |
-
|
98 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
99 |
-
|
100 |
-
### Languages
|
101 |
-
|
102 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
103 |
-
|
104 |
-
## Dataset Structure
|
105 |
-
|
106 |
-
### Data Instances
|
107 |
-
|
108 |
-
#### anli
|
109 |
-
|
110 |
-
- **Size of downloaded dataset files:** 4.88 MB
|
111 |
-
- **Size of the generated dataset:** 32.77 MB
|
112 |
-
- **Total amount of disk used:** 37.65 MB
|
113 |
-
|
114 |
-
An example of 'train' looks as follows.
|
115 |
-
```
|
116 |
-
{
|
117 |
-
"hypothesis_1": "Chad's car had all sorts of other problems besides alignment.",
|
118 |
-
"hypothesis_2": "Chad's car had all sorts of benefits other than being sexy.",
|
119 |
-
"label": 1,
|
120 |
-
"observation_1": "Chad went to get the wheel alignment measured on his car.",
|
121 |
-
"observation_2": "The mechanic provided a working alignment with new body work."
|
122 |
-
}
|
123 |
-
```
|
124 |
-
|
125 |
-
### Data Fields
|
126 |
-
|
127 |
-
The data fields are the same among all splits.
|
128 |
-
|
129 |
-
#### anli
|
130 |
-
- `observation_1`: a `string` feature.
|
131 |
-
- `observation_2`: a `string` feature.
|
132 |
-
- `hypothesis_1`: a `string` feature.
|
133 |
-
- `hypothesis_2`: a `string` feature.
|
134 |
-
- `label`: a classification label, with possible values including `0` (0), `1` (1), `2` (2).
|
135 |
-
|
136 |
-
### Data Splits
|
137 |
-
|
138 |
-
|name|train |validation|
|
139 |
-
|----|-----:|---------:|
|
140 |
-
|anli|169654| 1532|
|
141 |
-
|
142 |
-
## Dataset Creation
|
143 |
-
|
144 |
-
### Curation Rationale
|
145 |
-
|
146 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
147 |
-
|
148 |
-
### Source Data
|
149 |
-
|
150 |
-
#### Initial Data Collection and Normalization
|
151 |
-
|
152 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
153 |
-
|
154 |
-
#### Who are the source language producers?
|
155 |
-
|
156 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
157 |
-
|
158 |
-
### Annotations
|
159 |
-
|
160 |
-
#### Annotation process
|
161 |
-
|
162 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
163 |
-
|
164 |
-
#### Who are the annotators?
|
165 |
-
|
166 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
167 |
-
|
168 |
-
### Personal and Sensitive Information
|
169 |
-
|
170 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
171 |
-
|
172 |
-
## Considerations for Using the Data
|
173 |
-
|
174 |
-
### Social Impact of Dataset
|
175 |
-
|
176 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
177 |
-
|
178 |
-
### Discussion of Biases
|
179 |
-
|
180 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
181 |
-
|
182 |
-
### Other Known Limitations
|
183 |
-
|
184 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
185 |
-
|
186 |
-
## Additional Information
|
187 |
-
|
188 |
-
### Dataset Curators
|
189 |
-
|
190 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
191 |
-
|
192 |
-
### Licensing Information
|
193 |
-
|
194 |
-
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
195 |
-
|
196 |
-
### Citation Information
|
197 |
-
|
198 |
-
```
|
199 |
-
@inproceedings{Bhagavatula2020Abductive,
|
200 |
-
title={Abductive Commonsense Reasoning},
|
201 |
-
author={Chandra Bhagavatula and Ronan Le Bras and Chaitanya Malaviya and Keisuke Sakaguchi and Ari Holtzman and Hannah Rashkin and Doug Downey and Wen-tau Yih and Yejin Choi},
|
202 |
-
booktitle={International Conference on Learning Representations},
|
203 |
-
year={2020},
|
204 |
-
url={https://openreview.net/forum?id=Byg1v1HKDB}
|
205 |
-
}
|
206 |
-
```
|
207 |
-
|
208 |
-
### Contributions
|
209 |
-
|
210 |
-
Thanks to [@patrickvonplaten](https://github.com/patrickvonplaten), [@thomwolf](https://github.com/thomwolf), [@mariamabarham](https://github.com/mariamabarham), [@lewtun](https://github.com/lewtun), [@lhoestq](https://github.com/lhoestq) for adding this dataset.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
anli/art-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:021ef4c45476de1c786399775ec1bbeb3dd9c4547a9d9d6ad0720ffdf682abcc
|
3 |
+
size 8983168
|
anli/art-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:932867cc0ef6f5e4ce02806dfb01b6a1c84801974b8c01f608e4353096e6c662
|
3 |
+
size 208635
|
art.py
DELETED
@@ -1,116 +0,0 @@
|
|
1 |
-
"""TODO(art): Add a description here."""
|
2 |
-
|
3 |
-
|
4 |
-
import json
|
5 |
-
import os
|
6 |
-
|
7 |
-
import datasets
|
8 |
-
|
9 |
-
|
10 |
-
# TODO(art): BibTeX citation
|
11 |
-
_CITATION = """\
|
12 |
-
@InProceedings{anli,
|
13 |
-
author = {Chandra, Bhagavatula and Ronan, Le Bras and Chaitanya, Malaviya and Keisuke, Sakaguchi and Ari, Holtzman
|
14 |
-
and Hannah, Rashkin and Doug, Downey and Scott, Wen-tau Yih and Yejin, Choi},
|
15 |
-
title = {Abductive Commonsense Reasoning},
|
16 |
-
year = {2020}
|
17 |
-
}"""
|
18 |
-
|
19 |
-
# TODO(art):
|
20 |
-
_DESCRIPTION = """\
|
21 |
-
the Abductive Natural Language Inference Dataset from AI2
|
22 |
-
"""
|
23 |
-
_DATA_URL = "https://storage.googleapis.com/ai2-mosaic/public/alphanli/alphanli-train-dev.zip"
|
24 |
-
|
25 |
-
|
26 |
-
class ArtConfig(datasets.BuilderConfig):
|
27 |
-
"""BuilderConfig for Art."""
|
28 |
-
|
29 |
-
def __init__(self, **kwargs):
|
30 |
-
"""BuilderConfig for Art.
|
31 |
-
Args:
|
32 |
-
**kwargs: keyword arguments forwarded to super.
|
33 |
-
"""
|
34 |
-
super(ArtConfig, self).__init__(version=datasets.Version("0.1.0", ""), **kwargs)
|
35 |
-
|
36 |
-
|
37 |
-
class Art(datasets.GeneratorBasedBuilder):
|
38 |
-
"""TODO(art): Short description of my dataset."""
|
39 |
-
|
40 |
-
# TODO(art): Set up version.
|
41 |
-
VERSION = datasets.Version("0.1.0")
|
42 |
-
BUILDER_CONFIGS = [
|
43 |
-
ArtConfig(
|
44 |
-
name="anli",
|
45 |
-
description="""\
|
46 |
-
the Abductive Natural Language Inference Dataset from AI2.
|
47 |
-
""",
|
48 |
-
),
|
49 |
-
]
|
50 |
-
|
51 |
-
def _info(self):
|
52 |
-
# TODO(art): Specifies the datasets.DatasetInfo object
|
53 |
-
return datasets.DatasetInfo(
|
54 |
-
# This is the description that will appear on the datasets page.
|
55 |
-
description=_DESCRIPTION,
|
56 |
-
# datasets.features.FeatureConnectors
|
57 |
-
features=datasets.Features(
|
58 |
-
{
|
59 |
-
"observation_1": datasets.Value("string"),
|
60 |
-
"observation_2": datasets.Value("string"),
|
61 |
-
"hypothesis_1": datasets.Value("string"),
|
62 |
-
"hypothesis_2": datasets.Value("string"),
|
63 |
-
"label": datasets.features.ClassLabel(num_classes=3)
|
64 |
-
# These are the features of your dataset like images, labels ...
|
65 |
-
}
|
66 |
-
),
|
67 |
-
# If there's a common (input, target) tuple from the features,
|
68 |
-
# specify them here. They'll be used if as_supervised=True in
|
69 |
-
# builder.as_dataset.
|
70 |
-
supervised_keys=None,
|
71 |
-
# Homepage of the dataset for documentation
|
72 |
-
homepage="https://leaderboard.allenai.org/anli/submissions/get-started",
|
73 |
-
citation=_CITATION,
|
74 |
-
)
|
75 |
-
|
76 |
-
def _split_generators(self, dl_manager):
|
77 |
-
"""Returns SplitGenerators."""
|
78 |
-
# TODO(art): Downloads the data and defines the splits
|
79 |
-
# dl_manager is a datasets.download.DownloadManager that can be used to
|
80 |
-
# download and extract URLs
|
81 |
-
dl_dir = dl_manager.download_and_extract(_DATA_URL)
|
82 |
-
return [
|
83 |
-
datasets.SplitGenerator(
|
84 |
-
name=datasets.Split.VALIDATION,
|
85 |
-
gen_kwargs={
|
86 |
-
"filepath": os.path.join(dl_dir, "dev.jsonl"),
|
87 |
-
"labelpath": os.path.join(dl_dir, "dev-labels.lst"),
|
88 |
-
},
|
89 |
-
),
|
90 |
-
datasets.SplitGenerator(
|
91 |
-
name=datasets.Split.TRAIN,
|
92 |
-
gen_kwargs={
|
93 |
-
"filepath": os.path.join(dl_dir, "train.jsonl"),
|
94 |
-
"labelpath": os.path.join(dl_dir, "train-labels.lst"),
|
95 |
-
},
|
96 |
-
),
|
97 |
-
]
|
98 |
-
|
99 |
-
def _generate_examples(self, filepath, labelpath):
|
100 |
-
"""Yields examples."""
|
101 |
-
# TODO(art): Yields (key, example) tuples from the dataset
|
102 |
-
data = []
|
103 |
-
for line in open(filepath, encoding="utf-8"):
|
104 |
-
data.append(json.loads(line))
|
105 |
-
labels = []
|
106 |
-
with open(labelpath, encoding="utf-8") as f:
|
107 |
-
for word in f:
|
108 |
-
labels.append(word)
|
109 |
-
for idx, row in enumerate(data):
|
110 |
-
yield idx, {
|
111 |
-
"observation_1": row["obs1"],
|
112 |
-
"observation_2": row["obs2"],
|
113 |
-
"hypothesis_1": row["hyp1"],
|
114 |
-
"hypothesis_2": row["hyp2"],
|
115 |
-
"label": labels[idx],
|
116 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dataset_infos.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"anli": {"description": "the Abductive Natural Language Inference Dataset from AI2\n", "citation": "@InProceedings{anli,\n author = \"Chandra, Bhagavatula\n and Ronan, Le Bras\n and Chaitanya, Malaviya\n and Keisuke, Sakaguchi\n and Ari, Holtzman\n and Hannah, Rashkin\n and Doug, Downey\n and Scott, Wen-tau Yih\n and Yejin, Choi\",\n title = \"Abductive Commonsense Reasoning\",\n year = \"2020\",\n}", "homepage": "https://leaderboard.allenai.org/anli/submissions/get-started", "license": "", "features": {"observation_1": {"dtype": "string", "id": null, "_type": "Value"}, "observation_2": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis_1": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis_2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["0", "1", "2"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "supervised_keys": null, "builder_name": "art", "config_name": "anli", "version": {"version_str": "0.1.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 34046304, "num_examples": 169654, "dataset_name": "art"}, "validation": {"name": "validation", "num_bytes": 312314, "num_examples": 1532, "dataset_name": "art"}}, "download_checksums": {"https://storage.googleapis.com/ai2-mosaic/public/alphanli/alphanli-train-dev.zip": {"num_bytes": 5118294, "checksum": "24840b27553e93ec625ae020dbf78d92daeae4be31ebbd469a0c9f6f99ed1c8d"}}, "download_size": 5118294, "dataset_size": 34358618, "size_in_bytes": 39476912}}
|
|
|
|