Datasets:
Tasks:
Text2Text Generation
Modalities:
Text
Formats:
parquet
Sub-tasks:
text-simplification
Languages:
English
Size:
1K - 10K
License:
Commit
•
05b1aef
1
Parent(s):
ddf932f
Convert dataset to Parquet (#3)
Browse files- Convert dataset to Parquet (27439d74020cb9a058880281fbae055d80272c8b)
- Delete loading script (f12a08129c0fab378cccaae3103107bc594f0e6c)
- README.md +13 -6
- simplification/test-00000-of-00001.parquet +3 -0
- simplification/validation-00000-of-00001.parquet +3 -0
- turk.py +0 -117
README.md
CHANGED
@@ -17,24 +17,31 @@ task_categories:
|
|
17 |
- text2text-generation
|
18 |
task_ids:
|
19 |
- text-simplification
|
20 |
-
paperswithcode_id: null
|
21 |
pretty_name: TURK
|
22 |
dataset_info:
|
|
|
23 |
features:
|
24 |
- name: original
|
25 |
dtype: string
|
26 |
- name: simplifications
|
27 |
sequence: string
|
28 |
-
config_name: simplification
|
29 |
splits:
|
30 |
- name: validation
|
31 |
-
num_bytes:
|
32 |
num_examples: 2000
|
33 |
- name: test
|
34 |
-
num_bytes:
|
35 |
num_examples: 359
|
36 |
-
download_size:
|
37 |
-
dataset_size:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
---
|
39 |
|
40 |
# Dataset Card for TURK
|
|
|
17 |
- text2text-generation
|
18 |
task_ids:
|
19 |
- text-simplification
|
|
|
20 |
pretty_name: TURK
|
21 |
dataset_info:
|
22 |
+
config_name: simplification
|
23 |
features:
|
24 |
- name: original
|
25 |
dtype: string
|
26 |
- name: simplifications
|
27 |
sequence: string
|
|
|
28 |
splits:
|
29 |
- name: validation
|
30 |
+
num_bytes: 2120175
|
31 |
num_examples: 2000
|
32 |
- name: test
|
33 |
+
num_bytes: 396366
|
34 |
num_examples: 359
|
35 |
+
download_size: 895420
|
36 |
+
dataset_size: 2516541
|
37 |
+
configs:
|
38 |
+
- config_name: simplification
|
39 |
+
data_files:
|
40 |
+
- split: validation
|
41 |
+
path: simplification/validation-*
|
42 |
+
- split: test
|
43 |
+
path: simplification/test-*
|
44 |
+
default: true
|
45 |
---
|
46 |
|
47 |
# Dataset Card for TURK
|
simplification/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e608f09d30927c09086ce0f9e4b73a01f18c1fc21592eb151e26f54ecbb3dfe6
|
3 |
+
size 147436
|
simplification/validation-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8c37e709d58e2d74fd2f6d5cbf62e055a7591905eb0635bf3a8ddccb13618ff7
|
3 |
+
size 747984
|
turk.py
DELETED
@@ -1,117 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
"""TURKCorpus: a dataset for sentence simplification evaluation"""
|
16 |
-
|
17 |
-
|
18 |
-
import datasets
|
19 |
-
|
20 |
-
|
21 |
-
_CITATION = """\
|
22 |
-
@article{Xu-EtAl:2016:TACL,
|
23 |
-
author = {Wei Xu and Courtney Napoles and Ellie Pavlick and Quanze Chen and Chris Callison-Burch},
|
24 |
-
title = {Optimizing Statistical Machine Translation for Text Simplification},
|
25 |
-
journal = {Transactions of the Association for Computational Linguistics},
|
26 |
-
volume = {4},
|
27 |
-
year = {2016},
|
28 |
-
url = {https://cocoxu.github.io/publications/tacl2016-smt-simplification.pdf},
|
29 |
-
pages = {401--415}
|
30 |
-
}
|
31 |
-
}
|
32 |
-
"""
|
33 |
-
|
34 |
-
_DESCRIPTION = """\
|
35 |
-
TURKCorpus is a dataset for evaluating sentence simplification systems that focus on lexical paraphrasing,
|
36 |
-
as described in "Optimizing Statistical Machine Translation for Text Simplification". The corpus is composed of 2000 validation and 359 test original sentences that were each simplified 8 times by different annotators.
|
37 |
-
"""
|
38 |
-
|
39 |
-
_HOMEPAGE = "https://github.com/cocoxu/simplification"
|
40 |
-
|
41 |
-
_LICENSE = "GNU General Public License v3.0"
|
42 |
-
|
43 |
-
_URL_LIST = [
|
44 |
-
(
|
45 |
-
"test.8turkers.tok.norm",
|
46 |
-
"https://raw.githubusercontent.com/cocoxu/simplification/master/data/turkcorpus/test.8turkers.tok.norm",
|
47 |
-
),
|
48 |
-
(
|
49 |
-
"tune.8turkers.tok.norm",
|
50 |
-
"https://raw.githubusercontent.com/cocoxu/simplification/master/data/turkcorpus/tune.8turkers.tok.norm",
|
51 |
-
),
|
52 |
-
]
|
53 |
-
_URL_LIST += [
|
54 |
-
(
|
55 |
-
f"{spl}.8turkers.tok.turk.{i}",
|
56 |
-
f"https://raw.githubusercontent.com/cocoxu/simplification/master/data/turkcorpus/{spl}.8turkers.tok.turk.{i}",
|
57 |
-
)
|
58 |
-
for spl in ["tune", "test"]
|
59 |
-
for i in range(8)
|
60 |
-
]
|
61 |
-
|
62 |
-
_URLs = dict(_URL_LIST)
|
63 |
-
|
64 |
-
|
65 |
-
class Turk(datasets.GeneratorBasedBuilder):
|
66 |
-
|
67 |
-
VERSION = datasets.Version("1.0.0")
|
68 |
-
|
69 |
-
BUILDER_CONFIGS = [
|
70 |
-
datasets.BuilderConfig(
|
71 |
-
name="simplification",
|
72 |
-
version=VERSION,
|
73 |
-
description="A set of original sentences aligned with 8 possible simplifications for each.",
|
74 |
-
)
|
75 |
-
]
|
76 |
-
|
77 |
-
def _info(self):
|
78 |
-
features = datasets.Features(
|
79 |
-
{
|
80 |
-
"original": datasets.Value("string"),
|
81 |
-
"simplifications": datasets.Sequence(datasets.Value("string")),
|
82 |
-
}
|
83 |
-
)
|
84 |
-
return datasets.DatasetInfo(
|
85 |
-
description=_DESCRIPTION,
|
86 |
-
features=features,
|
87 |
-
supervised_keys=None,
|
88 |
-
homepage=_HOMEPAGE,
|
89 |
-
license=_LICENSE,
|
90 |
-
citation=_CITATION,
|
91 |
-
)
|
92 |
-
|
93 |
-
def _split_generators(self, dl_manager):
|
94 |
-
data_dir = dl_manager.download_and_extract(_URLs)
|
95 |
-
return [
|
96 |
-
datasets.SplitGenerator(
|
97 |
-
name=datasets.Split.VALIDATION,
|
98 |
-
gen_kwargs={
|
99 |
-
"filepaths": data_dir,
|
100 |
-
"split": "valid",
|
101 |
-
},
|
102 |
-
),
|
103 |
-
datasets.SplitGenerator(
|
104 |
-
name=datasets.Split.TEST,
|
105 |
-
gen_kwargs={"filepaths": data_dir, "split": "test"},
|
106 |
-
),
|
107 |
-
]
|
108 |
-
|
109 |
-
def _generate_examples(self, filepaths, split):
|
110 |
-
"""Yields examples."""
|
111 |
-
if split == "valid":
|
112 |
-
split = "tune"
|
113 |
-
files = [open(filepaths[f"{split}.8turkers.tok.norm"], encoding="utf-8")] + [
|
114 |
-
open(filepaths[f"{split}.8turkers.tok.turk.{i}"], encoding="utf-8") for i in range(8)
|
115 |
-
]
|
116 |
-
for id_, lines in enumerate(zip(*files)):
|
117 |
-
yield id_, {"original": lines[0].strip(), "simplifications": [line.strip() for line in lines[1:]]}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|