cdminix commited on
Commit
1022f4b
·
1 Parent(s): cc3528a

initial commit

Browse files
Files changed (1) hide show
  1. iwslt2011.py +274 -0
iwslt2011.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The IWSLT Challenge Dataset, adapted to punctuation as described by Ueffing et al. (2013)"""
2
+
3
+ from enum import Enum
4
+ from typing import Union
5
+ from abc import abstractmethod
6
+ import logging
7
+ import itertools
8
+
9
+ #import paired
10
+ from xml.dom import minidom
11
+ import nltk
12
+ import datasets
13
+ import numpy as np
14
+
15
+ nltk.download("punkt")
16
+ tknzr = nltk.tokenize.TweetTokenizer()
17
+
18
+ _CITATION = """\
19
+ @inproceedings{Ueffing2013,
20
+ title={Improved models for automatic punctuation prediction for spoken and written text},
21
+ author={B. Ueffing and M. Bisani and P. Vozila},
22
+ booktitle={INTERSPEECH},
23
+ year={2013}
24
+ }
25
+ @article{Federico2011,
26
+ author = {M. Federico and L. Bentivogli and M. Paul and S. Stüker},
27
+ year = {2011},
28
+ month = {01},
29
+ pages = {},
30
+ title = {Overview of the IWSLT 2011 Evaluation Campaign},
31
+ journal = {Proceedings of the International Workshop on Spoken Language Translation (IWSLT), San Francisco, CA}
32
+ }
33
+ """
34
+
35
+ _DESCRIPTION = """\
36
+ Both manual transcripts and ASR outputs from the IWSLT2011 speech translation evalutation campaign are often used for the related \
37
+ punctuation annotation task. This dataset takes care of preprocessing said transcripts and automatically inserts punctuation marks \
38
+ given in the manual transcripts in the ASR outputs using Levenshtein aligment.
39
+ """
40
+
41
+ _VERSION = "0.0.1"
42
+
43
+ def window(a, w = 4, o = 2):
44
+ sh = (a.size - w + 1, w)
45
+ st = a.strides * 2
46
+ view = np.lib.stride_tricks.as_strided(a, strides = st, shape = sh)[0::o]
47
+ return view.copy()
48
+
49
+ class Punctuation(Enum):
50
+ NONE = "<none>"
51
+ PERIOD = "<period>"
52
+ COMMA = "<comma>"
53
+ QUESTION = "<question>"
54
+
55
+ class LabelSubword(Enum):
56
+ IGNORE = "<ignore>"
57
+ NONE = "<none>"
58
+
59
+
60
+ class Task(Enum):
61
+ TAGGING = 0
62
+ SEQ2SEQ = 1
63
+
64
+ class TaggingTask:
65
+ """Treat punctuation prediction as a sequence tagging problem."""
66
+
67
+ def __init__(
68
+ self, window_size=120, window_stride_in_percent=0.5, include_reference=False
69
+ ):
70
+ self.window_size = window_size
71
+ self.window_stride_in_percent = window_stride_in_percent
72
+ self.include_reference = include_reference
73
+
74
+ def __eq__(self, other):
75
+ return Task.TAGGING == other
76
+
77
+ class DecodingStrategy:
78
+ """Strategy used to decode results."""
79
+
80
+ def __init__(
81
+ self, task: Union[TaggingTask]
82
+ ):
83
+ self.task = task
84
+
85
+ @abstractmethod
86
+ def decode():
87
+ pass
88
+
89
+ class AverageDecodingStrategy:
90
+ """Averages predictions together."""
91
+
92
+ def decode():
93
+ pass
94
+
95
+ class IWSLT11Config(datasets.BuilderConfig):
96
+ """The IWSLT11 Dataset."""
97
+
98
+ def __init__(
99
+ self,
100
+ task: Union[TaggingTask] = TaggingTask(),
101
+ segmented: bool = False,
102
+ asr_or_ref: str = "ref",
103
+ decoder: DecodingStrategy = AverageDecodingStrategy(),
104
+ tokenizer = None,
105
+ label_subword: LabelSubword = LabelSubword.IGNORE,
106
+ **kwargs
107
+ ):
108
+ """BuilderConfig for IWSLT2011.
109
+ Args:
110
+ task: the task to prepare the dataset for.
111
+ segmented: if segmentation present in IWSLT2011 should be respected. removes segmenation by default.
112
+ **kwargs: keyword arguments forwarded to super.
113
+ """
114
+ self.task = task
115
+ self.segmented = segmented
116
+ self.asr_or_ref = asr_or_ref
117
+ self.decoder = decoder
118
+ self.punctuation = [
119
+ Punctuation.NONE,
120
+ Punctuation.PERIOD,
121
+ Punctuation.COMMA,
122
+ Punctuation.QUESTION,
123
+ ]
124
+ if label_subword.IGNORE:
125
+ self.punctuation.append(label_subword.IGNORE)
126
+ self.label_subword = label_subword
127
+ self.tokenizer = tokenizer
128
+ super(IWSLT11Config, self).__init__(**kwargs)
129
+
130
+ def __eq__(self, other):
131
+ return True
132
+
133
+
134
+ class IWSLT11(datasets.GeneratorBasedBuilder):
135
+ """The IWSLT11 Dataset, adapted for punctuation prediction."""
136
+
137
+ BUILDER_CONFIGS = [
138
+ IWSLT11Config(name="ref", asr_or_ref="ref"),
139
+ IWSLT11Config(name="asr", asr_or_ref="asr"),
140
+ ]
141
+
142
+ def _info(self):
143
+ if self.config.task == Task.TAGGING:
144
+ return datasets.DatasetInfo(
145
+ description=_DESCRIPTION,
146
+ features=datasets.Features(
147
+ {
148
+ "ids": datasets.Sequence(datasets.Value("int32")),
149
+ "tokens": datasets.Sequence(datasets.Value("string")),
150
+ "labels": datasets.Sequence(
151
+ datasets.features.ClassLabel(
152
+ names=[p.name for p in self.config.punctuation]
153
+ )
154
+ ),
155
+ }
156
+ ),
157
+ supervised_keys=None,
158
+ homepage="http://iwslt2011.org/doku.php",
159
+ citation=_CITATION,
160
+ version=_VERSION,
161
+ )
162
+
163
+ def _split_generators(self, dl_manager):
164
+ """Returns SplitGenerators."""
165
+
166
+ urls_to_download = {
167
+ "train": "https://raw.githubusercontent.com/IsaacChanghau/neural_sequence_labeling/master/data/raw/LREC_converted/train.txt",
168
+ "valid": "https://github.com/IsaacChanghau/neural_sequence_labeling/blob/master/data/raw/LREC_converted/dev.txt?raw=true",
169
+ "test_ref": "https://github.com/IsaacChanghau/neural_sequence_labeling/raw/master/data/raw/LREC_converted/ref.txt",
170
+ "test_asr": "https://github.com/IsaacChanghau/neural_sequence_labeling/raw/master/data/raw/LREC_converted/asr.txt",
171
+ }
172
+ files = dl_manager.download_and_extract(urls_to_download)
173
+
174
+ if self.config.asr_or_ref == "asr":
175
+ return [
176
+ datasets.SplitGenerator(
177
+ name=datasets.Split.TRAIN,
178
+ gen_kwargs={
179
+ "filepath": files["train"]
180
+ },
181
+ ),
182
+ datasets.SplitGenerator(
183
+ name=datasets.Split.VALIDATION,
184
+ gen_kwargs={
185
+ "filepath": files["valid"]
186
+ },
187
+ ),
188
+ datasets.SplitGenerator(
189
+ name=datasets.Split.TEST,
190
+ gen_kwargs={
191
+ "filepath": files["test_asr"]
192
+ },
193
+ ),
194
+ ]
195
+ else:
196
+ return [
197
+ datasets.SplitGenerator(
198
+ name=datasets.Split.TRAIN,
199
+ gen_kwargs={
200
+ "filepath": files["train"]
201
+ },
202
+ ),
203
+ datasets.SplitGenerator(
204
+ name=datasets.Split.VALIDATION,
205
+ gen_kwargs={
206
+ "filepath": files["valid"]
207
+ },
208
+ ),
209
+ datasets.SplitGenerator(
210
+ name=datasets.Split.TEST,
211
+ gen_kwargs={
212
+ "filepath": files["test_ref"]
213
+ },
214
+ ),
215
+ ]
216
+
217
+ def _generate_examples(self, filepath):
218
+ logging.info("⏳ Generating examples from = %s", filepath)
219
+
220
+ text = open(filepath).read()
221
+ text = (
222
+ text
223
+ .replace(',COMMA', ',')
224
+ .replace('.PERIOD', '.')
225
+ .replace('?QUESTIONMARK', '?')
226
+ )
227
+ tokens = []
228
+ labels = []
229
+ for token in tknzr.tokenize(text):
230
+ if token in [',', '.', '?']:
231
+ if ',' in token:
232
+ labels[-1] = Punctuation.COMMA
233
+ if '.' in token:
234
+ labels[-1] = Punctuation.PERIOD
235
+ if '?' in token:
236
+ labels[-1] = Punctuation.QUESTION
237
+ else:
238
+ labels.append(Punctuation.NONE)
239
+ tokens.append(token)
240
+
241
+ tokens = np.array(tokens)
242
+ labels = np.array(labels)
243
+ token_len = len(tokens)
244
+ assert len(tokens) == len(labels)
245
+
246
+ if self.config.task == Task.TAGGING:
247
+ def apply_window(l):
248
+ return window(
249
+ l,
250
+ self.config.task.window_size,
251
+ int(self.config.task.window_size*self.config.task.window_stride_in_percent)
252
+ )
253
+ ids = apply_window(np.arange(len(tokens)))
254
+ tokens = apply_window(tokens)
255
+ labels = apply_window(labels)
256
+ for i, (ids, tokens, labels) in enumerate(zip(ids, tokens, labels)):
257
+ if self.config.tokenizer is None:
258
+ raise ValueError('tokenizer argument has to be passed to load_dataset')
259
+ else:
260
+ tokenized = self.config.tokenizer([tokens.tolist()], is_split_into_words=True, return_offsets_mapping=True, padding=True, truncation=True)
261
+ offsets = np.array(tokenized['offset_mapping'][0])
262
+ enc_labels = np.array([self.config.label_subword.name]*len(offsets), dtype=object)
263
+ # todo: check if performance changes if in-word is set to NONE
264
+ enc_labels[(offsets[:,0] == 0) & (offsets[:,1] != 0)] = [l.name for l in labels]
265
+ #print(enc_labels)
266
+ # not needed as long as the same tokenizer is used later?
267
+ # tokens = {k:v[0] for k,v in tokenized if k != 'offset_mapping'}
268
+ labels = enc_labels
269
+ yield i, {
270
+ "ids": ids,
271
+ "tokens": tokens,
272
+ "labels": labels,
273
+ }
274
+ logging.info(f"Loaded number of tokens = {token_len}")