Datasets:

Modalities:
Tabular
Text
Formats:
parquet
Libraries:
Datasets
pandas
License:
yslim0726 commited on
Commit
2a235ef
1 Parent(s): 979d08c

Upload glue.py

Browse files
Files changed (1) hide show
  1. glue.py +240 -0
glue.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import datasets
4
+ from datasets import BuilderConfig, Features, ClassLabel, Value, Sequence
5
+
6
+
7
+ _DESCRIPTION = """
8
+ # 한국어 지시학습 데이터셋
9
+ - glue 데이터셋을 한국어로 변역한 데이터셋
10
+ """
11
+
12
+ _CITATION = """
13
+ @inproceedings{KITD,
14
+ title={언어 번역 모델을 통한 한국어 지시 학습 데이터 세트 구축},
15
+ author={임영서, 추현창, 김산, 장진예, 정민영, 신사임},
16
+ booktitle={제 35회 한글 및 한국어 정보처리 학술대회},
17
+ pages={591--595},
18
+ month=oct,
19
+ year={2023}
20
+ }
21
+ """
22
+
23
+ # glue
24
+ _COLA_FEATURES = Features({
25
+ "data_index_by_user": Value(dtype="int32"),
26
+ "label": Value(dtype="int32"),
27
+ "sentence": Value(dtype="string"),
28
+ })
29
+
30
+ def _parsing_cola(file_path):
31
+ with open(file_path, mode="r") as f:
32
+ dataset = json.load(f)
33
+ for _idx, data in enumerate(dataset):
34
+ _data_index_by_user = data["data_index_by_user"]
35
+ _label = data["label"]
36
+ _sentence = data["sentence"]
37
+
38
+ yield _idx, {
39
+ "data_index_by_user": _data_index_by_user,
40
+ "label": _label,
41
+ "sentence": _sentence
42
+ }
43
+
44
+ _MRPC_FEATURES = Features({
45
+ "data_index_by_user": Value(dtype="int32"),
46
+ "sentence1": Value(dtype="string"),
47
+ "sentence2": Value(dtype="string"),
48
+ "label": Value(dtype="int32"),
49
+ "idx": Value(dtype="int32")
50
+ })
51
+
52
+ def _parsing_mrpc(file_path):
53
+ with open(file_path, mode="r") as f:
54
+ dataset = json.load(f)
55
+ for _i, data in enumerate(dataset):
56
+ _data_index_by_user = data["data_index_by_user"]
57
+ _sentence1 = data["sentence1"]
58
+ _sentence2 = data["sentence2"]
59
+ _label = data["label"]
60
+ _idx = data["idx"]
61
+
62
+ yield _i, {
63
+ "data_index_by_user": _data_index_by_user,
64
+ "sentence1": _sentence1,
65
+ "sentence2": _sentence2,
66
+ "label": _label,
67
+ "idx": _idx,
68
+ }
69
+
70
+ _QNLI_FEATURES = Features({
71
+ "data_index_by_user": Value(dtype="int32"),
72
+ "label": Value(dtype="int32"),
73
+ "question": Value(dtype="string"),
74
+ "sentence": Value(dtype="string"),
75
+ })
76
+
77
+ def _parsing_qnli(file_path):
78
+ with open(file_path, mode="r") as f:
79
+ dataset = json.load(f)
80
+ for _idx, data in enumerate(dataset):
81
+ _data_index_by_user = data["data_index_by_user"]
82
+ _label = data["label"]
83
+ _question = data["question"]
84
+ _sentence = data["sentence"]
85
+
86
+ yield _idx, {
87
+ "data_index_by_user": _data_index_by_user,
88
+ "label": _label,
89
+ "question": _question,
90
+ "sentence": _sentence,
91
+ }
92
+
93
+ _QQP_FEATURES = Features({
94
+ "data_index_by_user": Value(dtype="int32"),
95
+ "question1": Value(dtype="string"),
96
+ "question2": Value(dtype="string"),
97
+ "label": Value(dtype="int32"),
98
+ "idx": Value(dtype="int32")
99
+ })
100
+
101
+ def _parsing_qqp(file_path):
102
+ with open(file_path, mode="r") as f:
103
+ dataset = json.load(f)
104
+ for _i, data in enumerate(dataset):
105
+ _data_index_by_user = data["data_index_by_user"]
106
+ _question1 = data["question1"]
107
+ _question2 = data["question2"]
108
+ _label = data["label"]
109
+ _idx = data["idx"]
110
+
111
+ yield _i, {
112
+ "data_index_by_user": _data_index_by_user,
113
+ "question1": _question1,
114
+ "question2": _question2,
115
+ "label": _label,
116
+ "idx": _idx,
117
+ }
118
+
119
+ _WNLI_FEATURES = Features({
120
+ "data_index_by_user": Value(dtype="int32"),
121
+ "sentence1": Value(dtype="string"),
122
+ "sentence2": Value(dtype="string"),
123
+ "label": Value(dtype="int32"),
124
+ "idx": Value(dtype="int32")
125
+ })
126
+
127
+ def _parsing_wnli(file_path):
128
+ with open(file_path, mode="r") as f:
129
+ dataset = json.load(f)
130
+ for _i, data in enumerate(dataset):
131
+ _data_index_by_user = data["data_index_by_user"]
132
+ _sentence1 = data["sentence1"]
133
+ _sentence2 = data["sentence2"]
134
+ _label = data["label"]
135
+ _idx = data["idx"]
136
+
137
+ yield _i, {
138
+ "data_index_by_user": _data_index_by_user,
139
+ "sentence1": _sentence1,
140
+ "sentence2": _sentence2,
141
+ "label": _label,
142
+ "idx": _idx,
143
+ }
144
+
145
+ class GlueConfig(BuilderConfig):
146
+ def __init__(self, name, feature, reading_fn, parsing_fn, citation, **kwargs):
147
+ super(GlueConfig, self).__init__(
148
+ name = name,
149
+ version=datasets.Version("1.0.0"),
150
+ **kwargs)
151
+ self.feature = feature
152
+ self.reading_fn = reading_fn
153
+ self.parsing_fn = parsing_fn
154
+ self.citation = citation
155
+
156
+ class GLUE(datasets.GeneratorBasedBuilder):
157
+ BUILDER_CONFIGS = [
158
+ GlueConfig(
159
+ name = "cola",
160
+ data_dir = "./glue",
161
+ feature = _COLA_FEATURES,
162
+ reading_fn = _parsing_cola,
163
+ parsing_fn = lambda x:x,
164
+ citation = _CITATION,
165
+ ),
166
+ GlueConfig(
167
+ name = "mrpc",
168
+ data_dir = "./glue",
169
+ feature = _MRPC_FEATURES,
170
+ reading_fn = _parsing_mrpc,
171
+ parsing_fn = lambda x:x,
172
+ citation = _CITATION,
173
+ ),
174
+ GlueConfig(
175
+ name = "qnli",
176
+ data_dir = "./glue",
177
+ feature = _QNLI_FEATURES,
178
+ reading_fn = _parsing_qnli,
179
+ parsing_fn = lambda x:x,
180
+ citation = _CITATION,
181
+ ),
182
+ GlueConfig(
183
+ name = "qqp",
184
+ data_dir = "./glue",
185
+ feature = _QQP_FEATURES,
186
+ reading_fn = _parsing_qqp,
187
+ parsing_fn = lambda x:x,
188
+ citation = _CITATION,
189
+ ),
190
+ GlueConfig(
191
+ name = "wnli",
192
+ data_dir = "./glue",
193
+ feature = _WNLI_FEATURES,
194
+ reading_fn = _parsing_wnli,
195
+ parsing_fn = lambda x:x,
196
+ citation = _CITATION,
197
+ ),
198
+ ]
199
+
200
+ def _info(self) -> datasets.DatasetInfo:
201
+ """Returns the dataset metadata."""
202
+ return datasets.DatasetInfo(
203
+ description=_DESCRIPTION,
204
+ features=self.config.feature,
205
+ citation=_CITATION,
206
+ )
207
+
208
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
209
+ """Returns SplitGenerators"""
210
+ if self.config.name == "qqp":
211
+ path_kv = {
212
+ datasets.Split.TRAIN:[
213
+ os.path.join(dl_manager.manual_dir, f"{self.config.name}/train.json")
214
+ ],
215
+ }
216
+ else:
217
+ path_kv = {
218
+ datasets.Split.TRAIN:[
219
+ os.path.join(dl_manager.manual_dir, f"{self.config.name}/train.json")
220
+ ],
221
+ datasets.Split.VALIDATION:[
222
+ os.path.join(dl_manager.manual_dir, f"{self.config.name}/validation.json")
223
+ ],
224
+ datasets.Split.TEST:[
225
+ os.path.join(dl_manager.manual_dir, f"{self.config.name}/test.json")
226
+ ],
227
+ }
228
+ return [
229
+ datasets.SplitGenerator(name=k, gen_kwargs={"path_list": v})
230
+ for k, v in path_kv.items()
231
+ ]
232
+
233
+ def _generate_examples(self, path_list):
234
+ """Yields examples."""
235
+ for path in path_list:
236
+ try:
237
+ for example in iter(self.config.reading_fn(path)):
238
+ yield self.config.parsing_fn(example)
239
+ except Exception as e:
240
+ print(e)