Datasets:

License:
gabrielaltay commited on
Commit
e5dbe31
1 Parent(s): 0e19a9c

upload hubscripts/ntcir_13_medweb_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. ntcir_13_medweb.py +409 -0
ntcir_13_medweb.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ NTCIR-13 MedWeb (Medical Natural Language Processing for Web Document) task requires
18
+ to perform a multi-label classification that labels for eight diseases/symptoms must
19
+ be assigned to each tweet. Given pseudo-tweets, the output are Positive:p or Negative:n
20
+ labels for eight diseases/symptoms. The achievements of this task can almost be
21
+ directly applied to a fundamental engine for actual applications.
22
+
23
+ This task provides pseudo-Twitter messages in a cross-language and multi-label corpus,
24
+ covering three languages (Japanese, English, and Chinese), and annotated with eight
25
+ labels such as influenza, diarrhea/stomachache, hay fever, cough/sore throat, headache,
26
+ fever, runny nose, and cold.
27
+
28
+ The dataset consists of a single archive file:
29
+ - ntcir13_MedWeb_taskdata.zip
30
+
31
+ which can be obtained after filling out a form to provide information about the
32
+ usage context under this URL: http://www.nii.ac.jp/dsc/idr/en/ntcir/ntcir.html
33
+
34
+ The zip archive contains a folder with name 'MedWeb_TestCollection'.
35
+ Inside this folder, there are the following individual data files:
36
+ ├── NTCIR-13_MedWeb_en_test.xlsx
37
+ ├── NTCIR-13_MedWeb_en_training.xlsx
38
+ ├── NTCIR-13_MedWeb_ja_test.xlsx
39
+ ├── NTCIR-13_MedWeb_ja_training.xlsx
40
+ ├── NTCIR-13_MedWeb_zh_test.xlsx
41
+ └── NTCIR-13_MedWeb_zh_training.xlsx
42
+
43
+ The excel sheets contain a training and test split for each of the languages
44
+ ('en' stands for 'english', 'ja' stands for 'japanese' and 'zh' stands for
45
+ (simplified) chinese).
46
+
47
+ The archive file containing this dataset must be on the users local machine
48
+ in a single directory that is passed to `datasets.load_dataset` via
49
+ the `data_dir` kwarg. This loader script will read this archive file
50
+ directly (i.e. the user should not uncompress, untar or unzip any of
51
+ the files).
52
+
53
+ For more information on this dataset, see:
54
+ http://research.nii.ac.jp/ntcir/permission/ntcir-13/perm-en-MedWeb.html
55
+ """
56
+
57
+ import re
58
+ from pathlib import Path
59
+ from typing import Dict, List, Tuple
60
+
61
+ import datasets
62
+ import pandas as pd
63
+
64
+ from .bigbiohub import text_features
65
+ from .bigbiohub import BigBioConfig
66
+ from .bigbiohub import Tasks
67
+
68
+ _LANGUAGES = ['English', 'Chinese', 'Japanese']
69
+ _PUBMED = False
70
+ _LOCAL = True
71
+ _CITATION = """\
72
+ @article{wakamiya2017overview,
73
+ author = {Shoko Wakamiya, Mizuki Morita, Yoshinobu Kano, Tomoko Ohkuma and Eiji Aramaki},
74
+ title = {Overview of the NTCIR-13 MedWeb Task},
75
+ journal = {Proceedings of the 13th NTCIR Conference on Evaluation of Information Access Technologies (NTCIR-13)},
76
+ year = {2017},
77
+ url = {
78
+ http://research.nii.ac.jp/ntcir/workshop/OnlineProceedings13/pdf/ntcir/01-NTCIR13-OV-MEDWEB-WakamiyaS.pdf
79
+ },
80
+ }
81
+ """
82
+
83
+ _DATASETNAME = "ntcir_13_medweb"
84
+ _DISPLAYNAME = "NTCIR-13 MedWeb"
85
+
86
+ _DESCRIPTION = """\
87
+ NTCIR-13 MedWeb (Medical Natural Language Processing for Web Document) task requires
88
+ to perform a multi-label classification that labels for eight diseases/symptoms must
89
+ be assigned to each tweet. Given pseudo-tweets, the output are Positive:p or Negative:n
90
+ labels for eight diseases/symptoms. The achievements of this task can almost be
91
+ directly applied to a fundamental engine for actual applications.
92
+
93
+ This task provides pseudo-Twitter messages in a cross-language and multi-label corpus,
94
+ covering three languages (Japanese, English, and Chinese), and annotated with eight
95
+ labels such as influenza, diarrhea/stomachache, hay fever, cough/sore throat, headache,
96
+ fever, runny nose, and cold.
97
+
98
+ For more information, see:
99
+ http://research.nii.ac.jp/ntcir/permission/ntcir-13/perm-en-MedWeb.html
100
+
101
+ As this dataset also provides a parallel corpus of pseudo-tweets for english,
102
+ japanese and chinese it can also be used to train translation models between
103
+ these three languages.
104
+ """
105
+
106
+ _HOMEPAGE = "http://research.nii.ac.jp/ntcir/permission/ntcir-13/perm-en-MedWeb.html"
107
+
108
+ _LICENSE = 'Creative Commons Attribution 4.0 International'
109
+
110
+ # NOTE: Data can only be obtained (locally) by first filling out form to provide
111
+ # information about usage context under this link: http://www.nii.ac.jp/dsc/idr/en/ntcir/ntcir.html
112
+ _URLS = {
113
+ _DATASETNAME: "ntcir13_MedWeb_taskdata.zip",
114
+ }
115
+
116
+ _SUPPORTED_TASKS = [
117
+ Tasks.TRANSLATION,
118
+ Tasks.TEXT_CLASSIFICATION,
119
+ ]
120
+
121
+ _SOURCE_VERSION = "1.0.0"
122
+
123
+ _BIGBIO_VERSION = "1.0.0"
124
+
125
+
126
+ class NTCIR13MedWebDataset(datasets.GeneratorBasedBuilder):
127
+ """
128
+ NTCIR-13 MedWeb (Medical Natural Language Processing for Web Document) task requires
129
+ to perform a multi-label classification that labels for eight diseases/symptoms must
130
+ be assigned to each tweet. Given pseudo-tweets, the output are Positive:p or Negative:n
131
+ labels for eight diseases/symptoms. The achievements of this task can almost be
132
+ directly applied to a fundamental engine for actual applications.
133
+
134
+ This task provides pseudo-Twitter messages in a cross-language and multi-label corpus,
135
+ covering three languages (Japanese, English, and Chinese), and annotated with eight
136
+ labels such as influenza, diarrhea/stomachache, hay fever, cough/sore throat, headache,
137
+ fever, runny nose, and cold.
138
+
139
+ For more information, see:
140
+ http://research.nii.ac.jp/ntcir/permission/ntcir-13/perm-en-MedWeb.html
141
+
142
+ As this dataset also provides a parallel corpus of pseudo-tweets for english,
143
+ japanese and chinese it can also be used to train translation models between
144
+ these three languages.
145
+ """
146
+
147
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
148
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
149
+
150
+ BUILDER_CONFIGS = [
151
+ # Source configuration - all classification data for all languages
152
+ BigBioConfig(
153
+ name="ntcir_13_medweb_source",
154
+ version=SOURCE_VERSION,
155
+ description="NTCIR 13 MedWeb source schema",
156
+ schema="source",
157
+ subset_id="ntcir_13_medweb_source",
158
+ )
159
+ ]
160
+ for language_name, language_code in (
161
+ ("Japanese", "ja"),
162
+ ("English", "en"),
163
+ ("Chinese", "zh"),
164
+ ):
165
+ # NOTE: BigBio text classification configurations
166
+ # Text classification data for each language
167
+ BUILDER_CONFIGS.append(
168
+ BigBioConfig(
169
+ name=f"ntcir_13_medweb_classification_{language_code}_bigbio_text",
170
+ version=BIGBIO_VERSION,
171
+ description=f"NTCIR 13 MedWeb BigBio {language_name} Classification schema",
172
+ schema="bigbio_text",
173
+ subset_id=f"ntcir_13_medweb_classification_{language_code}_bigbio_text",
174
+ ),
175
+ )
176
+
177
+ for target_language_name, target_language_code in (
178
+ ("Japanese", "ja"),
179
+ ("English", "en"),
180
+ ("Chinese", "zh"),
181
+ ):
182
+ # NOTE: BigBio text to text (translation) configurations
183
+ # Parallel text corpora for all pairs of languages
184
+ if language_name != target_language_name:
185
+ BUILDER_CONFIGS.append(
186
+ BigBioConfig(
187
+ name=f"ntcir_13_medweb_translation_{language_code}_{target_language_code}_bigbio_t2t",
188
+ version=BIGBIO_VERSION,
189
+ description=(
190
+ f"NTCIR 13 MedWeb BigBio {language_name} -> {target_language_name} translation schema",
191
+ ),
192
+ schema="bigbio_t2t",
193
+ subset_id=f"ntcir_13_medweb_translation_{language_code}_{target_language_code}_bigbio_t2t",
194
+ ),
195
+ )
196
+
197
+ DEFAULT_CONFIG_NAME = "ntcir_13_medweb_source"
198
+
199
+ def _info(self) -> datasets.DatasetInfo:
200
+ # Create the source schema; this schema will keep all keys/information/labels
201
+ # as close to the original dataset as possible.
202
+ if self.config.schema == "source":
203
+ features = datasets.Features(
204
+ {
205
+ "ID": datasets.Value("string"),
206
+ "Language": datasets.Value("string"),
207
+ "Tweet": datasets.Value("string"),
208
+ "Influenza": datasets.Value("string"),
209
+ "Diarrhea": datasets.Value("string"),
210
+ "Hayfever": datasets.Value("string"),
211
+ "Cough": datasets.Value("string"),
212
+ "Headache": datasets.Value("string"),
213
+ "Fever": datasets.Value("string"),
214
+ "Runnynose": datasets.Value("string"),
215
+ "Cold": datasets.Value("string"),
216
+ }
217
+ )
218
+ elif self.config.schema == "bigbio_text":
219
+ features = text_features
220
+ elif self.config.schema == "bigbio_t2t":
221
+ features = text2text_features
222
+
223
+ return datasets.DatasetInfo(
224
+ description=_DESCRIPTION,
225
+ features=features,
226
+ homepage=_HOMEPAGE,
227
+ license=str(_LICENSE),
228
+ citation=_CITATION,
229
+ )
230
+
231
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
232
+ """Returns SplitGenerators."""
233
+
234
+ if self.config.data_dir is None:
235
+ raise ValueError(
236
+ "This is a local dataset. Please pass the data_dir kwarg to load_dataset."
237
+ )
238
+ else:
239
+ data_dir = self.config.data_dir
240
+
241
+ raw_data_dir = dl_manager.download_and_extract(
242
+ str(Path(data_dir) / _URLS[_DATASETNAME])
243
+ )
244
+
245
+ data_dir = Path(raw_data_dir) / "MedWeb_TestCollection"
246
+
247
+ if self.config.schema == "source":
248
+ filepaths = {
249
+ datasets.Split.TRAIN: sorted(Path(data_dir).glob("*_training.xlsx")),
250
+ datasets.Split.TEST: sorted(Path(data_dir).glob("*_test.xlsx")),
251
+ }
252
+ elif self.config.schema == "bigbio_text":
253
+ # NOTE: Identify the language for the chosen subset using regex
254
+ pattern = r"ntcir_13_medweb_classification_(?P<language_code>ja|en|zh)_bigbio_text"
255
+ match = re.search(pattern=pattern, string=self.config.subset_id)
256
+
257
+ if not match:
258
+ raise ValueError(
259
+ "Unable to parse language code for text classification from dataset subset id: "
260
+ f"'{self.config.subset_id}'. Attempted to parse using this regex pattern: "
261
+ f"'{pattern}' but failed to get a match."
262
+ )
263
+
264
+ language_code = match.group("language_code")
265
+
266
+ filepaths = {
267
+ datasets.Split.TRAIN: (
268
+ Path(data_dir) / f"NTCIR-13_MedWeb_{language_code}_training.xlsx",
269
+ ),
270
+ datasets.Split.TEST: (
271
+ Path(data_dir) / f"NTCIR-13_MedWeb_{language_code}_test.xlsx",
272
+ ),
273
+ }
274
+ elif self.config.schema == "bigbio_t2t":
275
+ pattern = r"ntcir_13_medweb_translation_(?P<source_language_code>ja|en|zh)_(?P<target_language_code>ja|en|zh)_bigbio_t2t"
276
+ match = re.search(pattern=pattern, string=self.config.subset_id)
277
+
278
+ if not match:
279
+ raise ValueError(
280
+ "Unable to parse source and target language codes for translation "
281
+ f"from dataset subset id: '{self.config.subset_id}'. Attempted to parse "
282
+ f"using this regex pattern: '{pattern}' but failed to get a match."
283
+ )
284
+
285
+ source_language_code = match.group("source_language_code")
286
+ target_language_code = match.group("target_language_code")
287
+
288
+ filepaths = {
289
+ datasets.Split.TRAIN: (
290
+ Path(data_dir)
291
+ / f"NTCIR-13_MedWeb_{source_language_code}_training.xlsx",
292
+ Path(data_dir)
293
+ / f"NTCIR-13_MedWeb_{target_language_code}_training.xlsx",
294
+ ),
295
+ datasets.Split.TEST: (
296
+ Path(data_dir)
297
+ / f"NTCIR-13_MedWeb_{source_language_code}_test.xlsx",
298
+ Path(data_dir)
299
+ / f"NTCIR-13_MedWeb_{target_language_code}_test.xlsx",
300
+ ),
301
+ }
302
+
303
+ return [
304
+ datasets.SplitGenerator(
305
+ name=split_name,
306
+ gen_kwargs={
307
+ "filepaths": filepaths[split_name],
308
+ "split": split_name,
309
+ },
310
+ )
311
+ for split_name in (datasets.Split.TRAIN, datasets.Split.TEST)
312
+ ]
313
+
314
+ def _language_from_filepath(self, filepath: Path):
315
+ pattern = r"NTCIR-13_MedWeb_(?P<language_code>ja|en|zh)_(training|test).xlsx"
316
+ match = re.search(pattern=pattern, string=filepath.name)
317
+
318
+ if not match:
319
+ raise ValueError(
320
+ "Unable to parse language code from filename. "
321
+ f"Filename was: '{filepath.name}' and tried to parse using this "
322
+ f"regex pattern: '{pattern}' but failed to get a match."
323
+ )
324
+
325
+ return match.group("language_code")
326
+
327
+ def _generate_examples(
328
+ self, filepaths: Tuple[Path], split: str
329
+ ) -> Tuple[int, Dict]:
330
+ """Yields examples as (key, example) tuples."""
331
+
332
+ if self.config.schema == "source":
333
+ dataframes = []
334
+
335
+ for filepath in filepaths:
336
+ language_code = self._language_from_filepath(filepath)
337
+ df = pd.read_excel(filepath, sheet_name=f"{language_code}_{split}")
338
+ df["Language"] = language_code
339
+ dataframes.append(df)
340
+
341
+ df = pd.concat(dataframes)
342
+
343
+ for row_index, row in enumerate(df.itertuples(index=False)):
344
+ yield row_index, row._asdict()
345
+
346
+ elif self.config.schema == "bigbio_text":
347
+ (filepath,) = filepaths
348
+ language_code = self._language_from_filepath(filepath)
349
+
350
+ df = pd.read_excel(
351
+ filepath,
352
+ sheet_name=f"{language_code}_{split}",
353
+ )
354
+
355
+ label_column_names = [
356
+ column_name
357
+ for column_name in df.columns
358
+ if column_name not in ("ID", "Tweet")
359
+ ]
360
+ labels = (
361
+ df[label_column_names]
362
+ .apply(lambda row: row[row == "p"].index.tolist(), axis=1)
363
+ .values
364
+ )
365
+
366
+ ids = df["ID"]
367
+ tweets = df["Tweet"]
368
+
369
+ for row_index, (record_labels, record_id, tweet) in enumerate(
370
+ zip(labels, ids, tweets)
371
+ ):
372
+ yield row_index, {
373
+ "id": record_id,
374
+ "text": tweets,
375
+ "document_id": filepath.stem,
376
+ "labels": record_labels,
377
+ }
378
+ elif self.config.schema == "bigbio_t2t":
379
+ source_filepath, target_filepath = filepaths
380
+
381
+ source_language_code = self._language_from_filepath(source_filepath)
382
+ target_language_code = self._language_from_filepath(target_filepath)
383
+
384
+ source_df = pd.read_excel(
385
+ source_filepath,
386
+ sheet_name=f"{source_language_code}_{split}",
387
+ )[["ID", "Tweet"]]
388
+ source_df["id_int"] = source_df["ID"].str.extract(r"(\d+)").astype(int)
389
+
390
+ target_df = pd.read_excel(
391
+ target_filepath,
392
+ sheet_name=f"{target_language_code}_{split}",
393
+ )[["ID", "Tweet"]]
394
+ target_df["id_int"] = target_df["ID"].str.extract(r"(\d+)").astype(int)
395
+
396
+ df_combined = source_df.merge(
397
+ target_df, on="id_int", suffixes=("_source", "_target")
398
+ )[["id_int", "Tweet_source", "Tweet_target"]]
399
+
400
+ for row_index, record in enumerate(df_combined.itertuples(index=False)):
401
+ row = record._asdict()
402
+ yield row_index, {
403
+ "id": f"{row['id_int']}_{source_language_code}_{target_language_code}",
404
+ "document_id": f"t2t_{source_language_code}_{target_language_code}",
405
+ "text_1": row["Tweet_source"],
406
+ "text_2": row["Tweet_target"],
407
+ "text_1_name": source_language_code,
408
+ "text_2_name": target_language_code,
409
+ }