jbaran commited on
Commit
b805ec1
0 Parent(s):

chore: add WSD dataset

Browse files
.gitattributes ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ # Audio files - uncompressed
37
+ *.pcm filter=lfs diff=lfs merge=lfs -text
38
+ *.sam filter=lfs diff=lfs merge=lfs -text
39
+ *.raw filter=lfs diff=lfs merge=lfs -text
40
+ # Audio files - compressed
41
+ *.aac filter=lfs diff=lfs merge=lfs -text
42
+ *.flac filter=lfs diff=lfs merge=lfs -text
43
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
44
+ *.ogg filter=lfs diff=lfs merge=lfs -text
45
+ *.wav filter=lfs diff=lfs merge=lfs -text
46
+ # Image files - uncompressed
47
+ *.bmp filter=lfs diff=lfs merge=lfs -text
48
+ *.gif filter=lfs diff=lfs merge=lfs -text
49
+ *.png filter=lfs diff=lfs merge=lfs -text
50
+ *.tiff filter=lfs diff=lfs merge=lfs -text
51
+ # Image files - compressed
52
+ *.jpg filter=lfs diff=lfs merge=lfs -text
53
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
54
+ *.webp filter=lfs diff=lfs merge=lfs -text
55
+ *.jsonl filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - expert-generated
4
+ language:
5
+ - pl
6
+ language_creators:
7
+ - expert-generated
8
+ - found
9
+ license:
10
+ - cc-by-4.0
11
+ multilinguality:
12
+ - monolingual
13
+ pretty_name: wsd-polish-datasets
14
+ size_categories:
15
+ - 1M<n<10M
16
+ source_datasets:
17
+ - original
18
+ tags: []
19
+ task_categories:
20
+ - token-classification
21
+ task_ids:
22
+ - word-sense-disambiguation
23
+ ---
24
+ # Word Sense Disambiguation Corpora for Polish
25
+
26
+ ## Table of Contents
27
+ - [Dataset Description](#dataset-description)
28
+ - [Dataset Summary](#dataset-summary)
29
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
30
+ - [Languages](#languages)
31
+ - [Dataset Structure](#dataset-structure)
32
+ - [Data Instances](#data-instances)
33
+ - [Data Fields](#data-fields)
34
+ - [Data Splits](#data-splits)
35
+ - [Dataset Creation](#dataset-creation)
36
+ - [Curation Rationale](#curation-rationale)
37
+ - [Source Data](#source-data)
38
+ - [Annotations](#annotations)
39
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
40
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
41
+ - [Social Impact of Dataset](#social-impact-of-dataset)
42
+ - [Discussion of Biases](#discussion-of-biases)
43
+ - [Other Known Limitations](#other-known-limitations)
44
+ - [Additional Information](#additional-information)
45
+ - [Dataset Curators](#dataset-curators)
46
+ - [Licensing Information](#licensing-information)
47
+ - [Citation Information](#citation-information)
48
+ - [Contributions](#contributions)
49
+
50
+ ## Dataset Description
51
+
52
+ - **Homepage:**
53
+ - **Repository:**
54
+ - **Paper:** https://link.springer.com/chapter/10.1007/978-3-031-08754-7_70
55
+ - **Point of Contact:** [email protected], [email protected]
56
+
57
+ ### Dataset Summary
58
+
59
+ `WSD Polish Datasets` are comprehensive benchmark for word sense disambiguation (WSD) classification task written in low-resource language Polish.
60
+ It consists of 6 distinct datasets, manually annotated based on plWordNet-4.2:
61
+ - KPWr
62
+ - KPWr-100
63
+ - Sherlock (SPEC)
64
+ - Skladnica
65
+ - WikiGlex
66
+ - EmoGlex
67
+ - Walenty
68
+
69
+
70
+ ### Supported Tasks and Leaderboards
71
+
72
+ Word sense disambiguation task
73
+
74
+ ### Languages
75
+
76
+ Polish language, PL
77
+
78
+ ## Dataset Structure
79
+
80
+ ### Data Instances
81
+
82
+ Data are structured in JSONL format, each single text sample is divided by sentence.
83
+
84
+ ```
85
+ {
86
+ "text": "Wpierw pani Hudson została zerwana z łóżka, po czym odegrała się na mnie, a ja - na tobie.",
87
+ "tokens": [
88
+ {
89
+ "index": 0,
90
+ "position": [
91
+ 0,
92
+ 6
93
+ ],
94
+ "orth": "Wpierw",
95
+ "lemma": "wpierw",
96
+ "pos": "adv",
97
+ "ctag": "adv"
98
+ },
99
+ {
100
+ "index": 1,
101
+ "position": [
102
+ 7,
103
+ 11
104
+ ],
105
+ "orth": "pani",
106
+ "lemma": "pani",
107
+ "pos": "noun",
108
+ "ctag": "subst:nom:f:sg"
109
+ },
110
+ {
111
+ "index": 2,
112
+ "position": [
113
+ 12,
114
+ 18
115
+ ],
116
+ "orth": "Hudson",
117
+ "lemma": "Hudson",
118
+ "pos": "noun",
119
+ "ctag": "subst:nom:f:sg"
120
+ },
121
+ {
122
+ "index": 3,
123
+ "position": [
124
+ 19,
125
+ 26
126
+ ],
127
+ "orth": "została",
128
+ "lemma": "zostać",
129
+ "pos": "verb",
130
+ "ctag": "praet:perf:f:sg"
131
+ },
132
+ {
133
+ "index": 4,
134
+ "position": [
135
+ 27,
136
+ 34
137
+ ],
138
+ "orth": "zerwana",
139
+ "lemma": "zerwać",
140
+ "pos": "verb",
141
+ "ctag": "ppas:perf:nom:f:aff:sg"
142
+ },
143
+ {
144
+ "index": 5,
145
+ "position": [
146
+ 35,
147
+ 36
148
+ ],
149
+ "orth": "z",
150
+ "lemma": "z",
151
+ "pos": "prep",
152
+ "ctag": "prep:gen:nwok"
153
+ },
154
+ {
155
+ "index": 6,
156
+ "position": [
157
+ 37,
158
+ 42
159
+ ],
160
+ "orth": "łóżka",
161
+ "lemma": "łóżko",
162
+ "pos": "noun",
163
+ "ctag": "subst:gen:n:sg"
164
+ },
165
+ {
166
+ "index": 7,
167
+ "position": [
168
+ 42,
169
+ 43
170
+ ],
171
+ "orth": ",",
172
+ "lemma": ",",
173
+ "pos": "interp",
174
+ "ctag": "interp"
175
+ },
176
+ ...
177
+ ],
178
+ "phrases": [
179
+ {
180
+ "token_indices": [
181
+ 10,
182
+ 11
183
+ ],
184
+ "head": 10,
185
+ "lemma": "odegrać się"
186
+ }
187
+ ],
188
+ "wsd": [
189
+ {
190
+ "token_id": 0,
191
+ "pl_sense": "najpierw.1.r",
192
+ "plWN_syn_id": "477654",
193
+ "plWN_lex_id": "718453",
194
+ "plWN_syn_id_new": "98ee84eb5f4611eda4930242ac130002",
195
+ "plWN_lex_id_new": "8c5acff25f4611eda4930242ac130002"
196
+ },
197
+ {
198
+ "token_id": 1,
199
+ "pl_sense": "niewiasta.1.n",
200
+ "plWN_syn_id": "129",
201
+ "plWN_lex_id": "4345",
202
+ "plWN_syn_id_new": "8e384db65f4611eda4930242ac130002",
203
+ "plWN_lex_id_new": "71c622355f4611eda4930242ac130002"
204
+ },
205
+ {
206
+ "token_id": 3,
207
+ "pl_sense": "zostać.6.v",
208
+ "plWN_syn_id": "7068800",
209
+ "plWN_lex_id": "7069137",
210
+ "plWN_syn_id_new": "9924a0215f4611eda4930242ac130002",
211
+ "plWN_lex_id_new": "8cfcc4395f4611eda4930242ac130002"
212
+ },
213
+ {
214
+ "token_id": 4,
215
+ "pl_sense": "zerwać.9.v",
216
+ "plWN_syn_id": "81101",
217
+ "plWN_lex_id": "81717",
218
+ "plWN_syn_id_new": "8fec998d5f4611eda4930242ac130002",
219
+ "plWN_lex_id_new": "754169415f4611eda4930242ac130002"
220
+ },
221
+ {
222
+ "token_id": 6,
223
+ "pl_sense": "łoże.1.n",
224
+ "plWN_syn_id": "7419",
225
+ "plWN_lex_id": "12010",
226
+ "plWN_syn_id_new": "8e88576c5f4611eda4930242ac130002",
227
+ "plWN_lex_id_new": "71f360dd5f4611eda4930242ac130002"
228
+ },
229
+ {
230
+ "token_id": 10,
231
+ "pl_sense": "zemścić_się.1.v",
232
+ "plWN_syn_id": "62160",
233
+ "plWN_lex_id": "88109",
234
+ "plWN_syn_id_new": "8fbbdfff5f4611eda4930242ac130002",
235
+ "plWN_lex_id_new": "7568f5085f4611eda4930242ac130002"
236
+ }
237
+ ]
238
+ }
239
+ ```
240
+
241
+ ### Data Fields
242
+
243
+ Description of json keys:
244
+ - `text`: text of the sentence
245
+ - `tokens`: list of tokens made by tokenization process
246
+ - `index`: token order index in sentence
247
+ - `position`: token chars span indices <included, excluded>
248
+ - `orth`: word
249
+ - `lemma`: lemmatised word
250
+ - `pos`: part of speech
251
+ - `ctag`: morphosyntactic tag
252
+ - `phrases`: list of multi-word
253
+ - `wsd`: annotation labels for the WSD task
254
+
255
+ ### Data Splits
256
+
257
+ [More Information Needed]
258
+
259
+ ## Dataset Creation
260
+
261
+ ### Curation Rationale
262
+
263
+ [More Information Needed]
264
+
265
+ ### Source Data
266
+
267
+ #### Initial Data Collection and Normalization
268
+
269
+ [More Information Needed]
270
+
271
+ #### Who are the source language producers?
272
+
273
+ [More Information Needed]
274
+
275
+ ### Annotations
276
+
277
+ #### Annotation process
278
+
279
+ [More Information Needed]
280
+
281
+ #### Who are the annotators?
282
+
283
+ [More Information Needed]
284
+
285
+ ### Personal and Sensitive Information
286
+
287
+ [More Information Needed]
288
+
289
+ ## Considerations for Using the Data
290
+
291
+ ### Social Impact of Dataset
292
+
293
+ [More Information Needed]
294
+
295
+ ### Discussion of Biases
296
+
297
+ [More Information Needed]
298
+
299
+ ### Other Known Limitations
300
+
301
+ [More Information Needed]
302
+
303
+ ## Additional Information
304
+
305
+ ### Dataset Curators
306
+
307
+ [More Information Needed]
308
+
309
+ ### Licensing Information
310
+
311
+ [More Information Needed]
312
+
313
+ ### Citation Information
314
+ ````
315
+ @InProceedings{10.1007/978-3-031-08754-7_70,
316
+ author="Janz, Arkadiusz
317
+ and Dziob, Agnieszka
318
+ and Oleksy, Marcin
319
+ and Baran, Joanna",
320
+ editor="Groen, Derek
321
+ and de Mulatier, Cl{\'e}lia
322
+ and Paszynski, Maciej
323
+ and Krzhizhanovskaya, Valeria V.
324
+ and Dongarra, Jack J.
325
+ and Sloot, Peter M. A.",
326
+ title="A Unified Sense Inventory for Word Sense Disambiguation in Polish",
327
+ booktitle="Computational Science -- ICCS 2022",
328
+ year="2022",
329
+ publisher="Springer International Publishing",
330
+ address="Cham",
331
+ pages="682--689",
332
+ isbn="978-3-031-08754-7"
333
+ }
334
+ ````
data/emoglex_sentences.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0360324c5edd83a3e9009d88c2962a5bd7a67d5b4b9e2e3427339bfd4cdb7505
3
+ size 19606435
data/emoglex_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e8770bbe03e4b1f40371fee4e865cb866587e032a25ce7c90e89386c2a3b24c
3
+ size 19541691
data/kpwr-100_sentences.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7964620c732f214c52c064e493023b8bccb469174cfcb631effc2260df7297f9
3
+ size 6783520
data/kpwr-100_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b542ac8c9efa1c05277f6921fcfd191328c087e341a5f1c62a70c277fa876d1
3
+ size 6786302
data/kpwr_sentences.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1762a62af051dca4bb2fec7fc77f4e29c45cf210f794baaddafc7fb46dfe7f68
3
+ size 55890081
data/kpwr_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b40bfebef0c98da8942f8b79ac45e28558ff3cda774d404dc5b9593db78ce8fc
3
+ size 55565252
data/sherlock_sentences.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3096832e730709dc94981a9e548374eca6378b0c26adcf26d6be66f12cad62a8
3
+ size 1905186
data/sherlock_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64048148dce064489e79bfab944d9479e04f612bcd1f033960e04a454ae8cd0d
3
+ size 1904051
data/skladnica_sentences.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c60b98d3a9f9b3af1bae44fbcde15c3e689e75d0e854dfbf5171ae3d88f8372b
3
+ size 25302985
data/skladnica_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56b5f3c7f3a2469b9150905b20db83604c5a741d62c0699ffb3e3d15f49f86f8
3
+ size 25157670
data/walenty_sentences.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f722f9e7371e43f77dfba35fb913a9836c9d55a3845fb0d44595b48d7655e56e
3
+ size 32400076
data/walenty_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ebc54c973200668a8e6d165ddf76247ec32fb37160f3ac4d6ea655b957a5647
3
+ size 32399556
data/wikiglex_sentences.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8135d4386adecb832b738ac9fd04eef3a3cfe542d2be95b8609bc00094661337
3
+ size 10155305
data/wikiglex_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89d972156cfb3ccb7bc23a05bf36bd9dd5d5716e52062179a29c6c08ca735c2d
3
+ size 10129402
wsd_polish_datasets.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import itertools
15
+ import json
16
+ from typing import Sequence
17
+
18
+ import datasets
19
+
20
+ logger = datasets.logging.get_logger(__name__)
21
+
22
+ _CITATION = """\
23
+ @InProceedings{10.1007/978-3-031-08754-7_70,
24
+ author="Janz, Arkadiusz
25
+ and Dziob, Agnieszka
26
+ and Oleksy, Marcin
27
+ and Baran, Joanna",
28
+ editor="Groen, Derek
29
+ and de Mulatier, Cl{\'e}lia
30
+ and Paszynski, Maciej
31
+ and Krzhizhanovskaya, Valeria V.
32
+ and Dongarra, Jack J.
33
+ and Sloot, Peter M. A.",
34
+ title="A Unified Sense Inventory for Word Sense Disambiguation in Polish",
35
+ booktitle="Computational Science -- ICCS 2022",
36
+ year="2022",
37
+ publisher="Springer International Publishing",
38
+ address="Cham",
39
+ pages="682--689",
40
+ isbn="978-3-031-08754-7"
41
+ }
42
+ """
43
+ _DESCRIPTION = """\
44
+ Polish WSD training data manually annotated by experts according to plWordNet-4.2.
45
+ """
46
+
47
+ _LICENSE = "cc-by-4.0"
48
+
49
+ _BASE_URL = "https://huggingface.co/datasets/clarin-knext/wsd_polish_datasets/resolve/main/data/"
50
+
51
+ _CORPUS_NAMES = [
52
+ "sherlock",
53
+ "skladnica",
54
+ "wikiglex",
55
+ "emoglex",
56
+ "walenty",
57
+ "kpwr",
58
+ "kpwr-100",
59
+ ]
60
+
61
+ _DATA_TYPES = [
62
+ "sentence",
63
+ "text",
64
+ ]
65
+
66
+ _URLS = {
67
+ "text": {corpus: f"{_BASE_URL}{corpus}_text.jsonl" for corpus in _CORPUS_NAMES},
68
+ "sentence": {
69
+ corpus: f"{_BASE_URL}{corpus}_sentences.jsonl" for corpus in _CORPUS_NAMES
70
+ },
71
+ }
72
+
73
+
74
+ class WsdPolishBuilderConfig(datasets.BuilderConfig):
75
+ def __init__(
76
+ self,
77
+ data_urls: Sequence[str],
78
+ corpus: str,
79
+ data_type: str,
80
+ **kwargs,
81
+ ):
82
+ super(WsdPolishBuilderConfig, self).__init__(
83
+ name=f"{corpus}_{data_type}",
84
+ version=datasets.Version("1.0.0"),
85
+ **kwargs,
86
+ )
87
+
88
+ self.data_type = data_type
89
+ self.corpus = corpus
90
+ self.data_urls = data_urls
91
+ if self.data_type not in _DATA_TYPES:
92
+ raise ValueError(
93
+ f"Corpus type {self.data_type} is not supported. Enter one of: {_DATA_TYPES}"
94
+ )
95
+ if self.corpus not in (*_CORPUS_NAMES, "all"):
96
+ raise ValueError(
97
+ f"Corpus name `{self.corpus}` is not available. Enter one of: {(*_CORPUS_NAMES, 'all')}"
98
+ )
99
+
100
+
101
+ class WsdPolishDataset(datasets.GeneratorBasedBuilder):
102
+ """Polish WSD training data"""
103
+
104
+ BUILDER_CONFIGS = [
105
+ WsdPolishBuilderConfig(
106
+ corpus=corpus_name,
107
+ data_type=data_type,
108
+ data_urls=[_URLS[data_type][corpus_name]],
109
+ description=f"Data part covering `{corpus_name}` corpora in `{data_type}` segmentation.",
110
+ )
111
+ for corpus_name, data_type in itertools.product(_CORPUS_NAMES, _DATA_TYPES)
112
+ ]
113
+ BUILDER_CONFIGS.extend(
114
+ [
115
+ WsdPolishBuilderConfig(
116
+ corpus="all",
117
+ data_type=data_type,
118
+ data_urls=_URLS[data_type].copy().values(),
119
+ description=f"Data part covering `all` corpora in `{data_type}` segmentation.",
120
+ )
121
+ for data_type in _DATA_TYPES
122
+ ]
123
+ )
124
+
125
+ DEFAULT_CONFIG_NAME = "skladnica_text"
126
+
127
+ def _info(self) -> datasets.DatasetInfo:
128
+ text_features = {
129
+ "text": datasets.Value("string"),
130
+ "tokens": datasets.features.Sequence(
131
+ dict(
132
+ {
133
+ "position": datasets.features.Sequence(
134
+ length=2,
135
+ feature=datasets.Value("int32"),
136
+ ),
137
+ "orth": datasets.Value("string"),
138
+ "lemma": datasets.Value("string"),
139
+ }
140
+ ),
141
+ ),
142
+ "phrases": datasets.features.Sequence(
143
+ dict(
144
+ {
145
+ "indices": datasets.features.Sequence(
146
+ feature=datasets.Value("int32")
147
+ ),
148
+ "head": datasets.Value("int32"),
149
+ "lemma": datasets.Value("string"),
150
+ }
151
+ ),
152
+ ),
153
+ "wsd": datasets.features.Sequence(
154
+ dict(
155
+ {
156
+ "index": datasets.Value("int32"),
157
+ "plWN_syn_id": datasets.Value("string"),
158
+ "plWN_lex_id": datasets.Value("string"),
159
+ }
160
+ ),
161
+ ),
162
+ }
163
+ if self.config.data_type == "sentence":
164
+ features = datasets.Features(
165
+ {
166
+ "sentences": datasets.features.Sequence(text_features),
167
+ }
168
+ )
169
+ else:
170
+ features = datasets.Features(text_features)
171
+
172
+ return datasets.DatasetInfo(
173
+ description=_DESCRIPTION,
174
+ features=features,
175
+ supervised_keys=None,
176
+ license=_LICENSE,
177
+ citation=_CITATION,
178
+ )
179
+
180
+ def _split_generators(self, dl_manager):
181
+ filepaths = dl_manager.download_and_extract(self.config.data_urls)
182
+ return [
183
+ datasets.SplitGenerator(
184
+ name=datasets.Split.TRAIN,
185
+ gen_kwargs={
186
+ "filepaths": filepaths,
187
+ },
188
+ ),
189
+ ]
190
+
191
+ def _generate_examples(self, filepaths: Sequence[str]):
192
+ key_iter = 0
193
+ for filepath in filepaths:
194
+ with open(filepath, encoding="utf-8") as f:
195
+ for data in (json.loads(line) for line in f):
196
+ if self.config.data_type == "sentence":
197
+ yield key_iter, {
198
+ "sentences": [
199
+ self._process_example(sent)
200
+ for sent in data["sentences"]
201
+ ]
202
+ }
203
+ else:
204
+ data.pop("context_file")
205
+ yield key_iter, self._process_example(data)
206
+
207
+ key_iter += 1
208
+
209
+ @staticmethod
210
+ def _process_example(data: dict) -> dict:
211
+ return {
212
+ "text": data["text"],
213
+ "tokens": [
214
+ {
215
+ "position": tok["position"],
216
+ "orth": tok["orth"],
217
+ "lemma": tok["lemma"],
218
+ }
219
+ for tok in data["tokens"]
220
+ ],
221
+ "wsd": [
222
+ {
223
+ "index": tok["index"],
224
+ "plWN_syn_id": tok["plWN_syn_id"],
225
+ "plWN_lex_id": tok["plWN_lex_id"],
226
+ }
227
+ for tok in data["wsd"]
228
+ ],
229
+ "phrases": [
230
+ {
231
+ "indices": tok["indices"],
232
+ "head": tok["head"],
233
+ "lemma": tok["lemma"],
234
+ }
235
+ for tok in data["phrases"]
236
+ ],
237
+ }