Update files from the datasets library (from 1.1.3)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.1.3
- dataset_infos.json +1 -1
- kor_nli.py +8 -7
dataset_infos.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"multi_nli": {"description": " Korean Natural Language Inference datasets\n", "citation": "@article{ham2020kornli,\n title={KorNLI and KorSTS: New Benchmark Datasets for Korean Natural Language Understanding},\n author={Ham, Jiyeon and Choe, Yo Joong and Park, Kyubyong and Choi, Ilji and Soh, Hyungjoon},\n journal={arXiv preprint arXiv:2004.03289},\n year={2020}\n}\n", "homepage": "https://github.com/kakaobrain/KorNLUDatasets", "license": "", "features": {"
|
|
|
1 |
+
{"multi_nli": {"description": " Korean Natural Language Inference datasets\n", "citation": "@article{ham2020kornli,\n title={KorNLI and KorSTS: New Benchmark Datasets for Korean Natural Language Understanding},\n author={Ham, Jiyeon and Choe, Yo Joong and Park, Kyubyong and Choi, Ilji and Soh, Hyungjoon},\n journal={arXiv preprint arXiv:2004.03289},\n year={2020}\n}\n", "homepage": "https://github.com/kakaobrain/KorNLUDatasets", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "kor_nli", "config_name": "multi_nli", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 84729207, "num_examples": 392702, "dataset_name": "kor_nli"}}, "download_checksums": {"https://github.com/kakaobrain/KorNLUDatasets/archive/master.zip": {"num_bytes": 42113232, "checksum": "b1184d5e78a7d988400eabe3374b8a7e2abf182896f54e6e311c5173bb2c9bf5"}}, "download_size": 42113232, "post_processing_size": null, "dataset_size": 84729207, "size_in_bytes": 126842439}, "snli": {"description": " Korean Natural Language Inference datasets\n", "citation": "@article{ham2020kornli,\n title={KorNLI and KorSTS: New Benchmark Datasets for Korean Natural Language Understanding},\n author={Ham, Jiyeon and Choe, Yo Joong and Park, Kyubyong and Choi, Ilji and Soh, Hyungjoon},\n journal={arXiv preprint arXiv:2004.03289},\n year={2020}\n}\n", "homepage": "https://github.com/kakaobrain/KorNLUDatasets", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "kor_nli", "config_name": "snli", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 80137097, "num_examples": 550152, "dataset_name": "kor_nli"}}, "download_checksums": {"https://github.com/kakaobrain/KorNLUDatasets/archive/master.zip": {"num_bytes": 42113232, "checksum": "b1184d5e78a7d988400eabe3374b8a7e2abf182896f54e6e311c5173bb2c9bf5"}}, "download_size": 42113232, "post_processing_size": null, "dataset_size": 80137097, "size_in_bytes": 122250329}, "xnli": {"description": " Korean Natural Language Inference datasets\n", "citation": "@article{ham2020kornli,\n title={KorNLI and KorSTS: New Benchmark Datasets for Korean Natural Language Understanding},\n author={Ham, Jiyeon and Choe, Yo Joong and Park, Kyubyong and Choi, Ilji and Soh, Hyungjoon},\n journal={arXiv preprint arXiv:2004.03289},\n year={2020}\n}\n", "homepage": "https://github.com/kakaobrain/KorNLUDatasets", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "kor_nli", "config_name": "xnli", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 518830, "num_examples": 2490, "dataset_name": "kor_nli"}, "test": {"name": "test", "num_bytes": 1047437, "num_examples": 5010, "dataset_name": "kor_nli"}}, "download_checksums": {"https://github.com/kakaobrain/KorNLUDatasets/archive/master.zip": {"num_bytes": 42113232, "checksum": "b1184d5e78a7d988400eabe3374b8a7e2abf182896f54e6e311c5173bb2c9bf5"}}, "download_size": 42113232, "post_processing_size": null, "dataset_size": 1566267, "size_in_bytes": 43679499}}
|
kor_nli.py
CHANGED
@@ -2,7 +2,6 @@
|
|
2 |
|
3 |
from __future__ import absolute_import, division, print_function
|
4 |
|
5 |
-
import csv
|
6 |
import os
|
7 |
|
8 |
import datasets
|
@@ -58,9 +57,9 @@ class KorNli(datasets.GeneratorBasedBuilder):
|
|
58 |
features=datasets.Features(
|
59 |
{
|
60 |
# These are the features of your dataset like images, labels ...
|
61 |
-
"
|
62 |
-
"
|
63 |
-
"
|
64 |
}
|
65 |
),
|
66 |
# If there's a common (input, target) tuple from the features,
|
@@ -113,9 +112,11 @@ class KorNli(datasets.GeneratorBasedBuilder):
|
|
113 |
"""Yields examples."""
|
114 |
# TODO(kor_nli): Yields (key, example) tuples from the dataset
|
115 |
with open(filepath, encoding="utf-8") as f:
|
116 |
-
|
117 |
-
|
118 |
-
|
|
|
119 |
if len(row) != 3:
|
120 |
continue
|
|
|
121 |
yield id_, row
|
|
|
2 |
|
3 |
from __future__ import absolute_import, division, print_function
|
4 |
|
|
|
5 |
import os
|
6 |
|
7 |
import datasets
|
|
|
57 |
features=datasets.Features(
|
58 |
{
|
59 |
# These are the features of your dataset like images, labels ...
|
60 |
+
"premise": datasets.Value("string"),
|
61 |
+
"hypothesis": datasets.Value("string"),
|
62 |
+
"label": datasets.ClassLabel(names=["entailment", "neutral", "contradiction"]),
|
63 |
}
|
64 |
),
|
65 |
# If there's a common (input, target) tuple from the features,
|
|
|
112 |
"""Yields examples."""
|
113 |
# TODO(kor_nli): Yields (key, example) tuples from the dataset
|
114 |
with open(filepath, encoding="utf-8") as f:
|
115 |
+
next(f) # skip headers
|
116 |
+
columns = ("premise", "hypothesis", "label")
|
117 |
+
for id_, row in enumerate(f):
|
118 |
+
row = row.strip().split("\t")
|
119 |
if len(row) != 3:
|
120 |
continue
|
121 |
+
row = dict(zip(columns, row))
|
122 |
yield id_, row
|