Datasets:

Modalities:
Text
Formats:
parquet
Languages:
Korean
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
4ae4e65
1 Parent(s): faf9bb3

Delete loading script

Browse files
Files changed (1) hide show
  1. kor_3i4k.py +0 -95
kor_3i4k.py DELETED
@@ -1,95 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """3i4K: Intonation-aided intention identification for Korean dataset"""
16
-
17
-
18
- import csv
19
-
20
- import datasets
21
- from datasets.tasks import TextClassification
22
-
23
-
24
- _CITATION = """\
25
- @article{cho2018speech,
26
- title={Speech Intention Understanding in a Head-final Language: A Disambiguation Utilizing Intonation-dependency},
27
- author={Cho, Won Ik and Lee, Hyeon Seung and Yoon, Ji Won and Kim, Seok Min and Kim, Nam Soo},
28
- journal={arXiv preprint arXiv:1811.04231},
29
- year={2018}
30
- }
31
- """
32
-
33
- _DESCRIPTION = """\
34
- This dataset is designed to identify speaker intention based on real-life spoken utterance in Korean into one of
35
- 7 categories: fragment, description, question, command, rhetorical question, rhetorical command, utterances.
36
- """
37
-
38
- _HOMEPAGE = "https://github.com/warnikchow/3i4k"
39
-
40
- _LICENSE = "CC BY-SA-4.0"
41
-
42
- _TRAIN_DOWNLOAD_URL = "https://raw.githubusercontent.com/warnikchow/3i4k/master/data/train_val_test/fci_train_val.txt"
43
- _TEST_DOWNLOAD_URL = "https://raw.githubusercontent.com/warnikchow/3i4k/master/data/train_val_test/fci_test.txt"
44
-
45
-
46
- class Kor_3i4k(datasets.GeneratorBasedBuilder):
47
- """Intonation-aided intention identification for Korean"""
48
-
49
- VERSION = datasets.Version("1.1.0")
50
-
51
- def _info(self):
52
-
53
- return datasets.DatasetInfo(
54
- description=_DESCRIPTION,
55
- features=datasets.Features(
56
- {
57
- "label": datasets.features.ClassLabel(
58
- names=[
59
- "fragment",
60
- "statement",
61
- "question",
62
- "command",
63
- "rhetorical question",
64
- "rhetorical command",
65
- "intonation-dependent utterance",
66
- ]
67
- ),
68
- "text": datasets.Value("string"),
69
- }
70
- ),
71
- supervised_keys=None,
72
- homepage=_HOMEPAGE,
73
- license=_LICENSE,
74
- citation=_CITATION,
75
- task_templates=[TextClassification(text_column="text", label_column="label")],
76
- )
77
-
78
- def _split_generators(self, dl_manager):
79
- """Returns SplitGenerators"""
80
-
81
- train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
82
- test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
83
- return [
84
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
85
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
86
- ]
87
-
88
- def _generate_examples(self, filepath):
89
- """Generates 3i4K examples"""
90
-
91
- with open(filepath, encoding="utf-8") as csv_file:
92
- data = csv.reader(csv_file, delimiter="\t")
93
- for id_, row in enumerate(data):
94
- label, text = row
95
- yield id_, {"label": int(label), "text": text}