Genius1237 commited on
Commit
d948b6d
1 Parent(s): d0e6d22

Add scripts used for trainin/evaluating multilingual models

Browse files
Files changed (2) hide show
  1. politeness_regressor.py +273 -0
  2. requirements.txt +5 -0
politeness_regressor.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from argparse import ArgumentParser
2
+ from typing import List, Dict
3
+
4
+ import numpy as np
5
+ import pytorch_lightning as pl
6
+ import sklearn.metrics
7
+ import sklearn.model_selection
8
+ import torch
9
+ import torch.optim
10
+ import torch.utils.data
11
+ import transformers
12
+ import pandas as pd
13
+ import random
14
+ import sklearn.metrics
15
+
16
+ try:
17
+ from polyglot.text import Text
18
+ except:
19
+ print("polyglot not installed. Cannot use --strategy_words")
20
+
21
+ class MyDataModule(pl.LightningDataModule):
22
+ def __init__(self, train_file, test_file, binary, tokenizer, max_length, batch_size, strategy_words_replacement_negate=False, strategy_words=None, random_masking_ratio=None):
23
+ super().__init__()
24
+ self.train_file = train_file
25
+ self.test_file = test_file
26
+ self.binary = binary
27
+ self.max_length = max_length
28
+ self.batch_size = batch_size
29
+ self.tokenizer = tokenizer
30
+
31
+ if strategy_words:
32
+ self.strategy_words = pd.read_csv(strategy_words)
33
+ self.strategy_words = set(list(self.strategy_words.values[:, 1:].reshape(-1)))
34
+ else:
35
+ self.strategy_words = None
36
+ self.strategy_words_replacement_negate = strategy_words_replacement_negate
37
+ self.random_masking_ratio = random_masking_ratio
38
+
39
+ @staticmethod
40
+ def read_file(file_name, text_only=False):
41
+ if file_name.split(".")[-1] == "csv":
42
+ df = pd.read_csv(file_name)
43
+ data = [(a, b) for a, b in zip(list(df['sentence']), df['score'])]
44
+ if text_only:
45
+ data = [t[0] for t in data]
46
+ else:
47
+ data = open(file_name).read().strip().split('\n')
48
+ return data
49
+
50
+ def setup(self, stage=None):
51
+ if self.train_file:
52
+ self.train_data = MyDataModule.read_file(self.train_file)
53
+ self.train_data, self.val_data = sklearn.model_selection.train_test_split(self.train_data, shuffle=False, test_size=0.2)
54
+ if self.test_file:
55
+ self.test_data = MyDataModule.read_file(self.test_file)
56
+
57
+ def prepare_dataloader(self, mode):
58
+ if mode == "train":
59
+ data = self.train_data
60
+ elif mode == "val":
61
+ data = self.val_data
62
+ else:
63
+ data = self.test_data
64
+
65
+ # tokenized = self.tokenizer([t[0] for t in data], padding="max_length", truncation=True, max_length=self.max_length, return_tensors="pt")
66
+ tokenized = MyDataModule.tokenize([t[0] for t in data], self.tokenizer, self.max_length, self.strategy_words_replacement_negate, self.strategy_words, self.random_masking_ratio)
67
+ if self.binary:
68
+ labels = torch.tensor([t[1] > 0 for t in data], dtype=int)
69
+ else:
70
+ labels = torch.tensor([t[1] for t in data])
71
+
72
+ if mode == "train":
73
+ weights = torch.zeros_like(labels)
74
+ weights[labels == 0] = labels.shape[0] - labels.sum()
75
+ weights[labels == 1] = labels.sum()
76
+ return torch.utils.data.DataLoader(torch.utils.data.TensorDataset(tokenized['input_ids'], tokenized['attention_mask'], labels), batch_size=self.batch_size, sampler=torch.utils.data.WeightedRandomSampler(1 / weights, len(weights), replacement=True))
77
+ else:
78
+ return torch.utils.data.DataLoader(torch.utils.data.TensorDataset(tokenized['input_ids'], tokenized['attention_mask'], labels), batch_size=self.batch_size)
79
+
80
+ @staticmethod
81
+ def tokenize(data: List[str], tokenizer, max_length, strategy_words_replacement_negate, strategy_words, random_masking_ratio):
82
+ if strategy_words is not None or random_masking_ratio is not None:
83
+ tokenized_data = []
84
+ for sentence in data:
85
+ words = Text(sentence).words
86
+ words = [t.lower() for t in words]
87
+ if strategy_words:
88
+ words = [t if ((t in strategy_words) != strategy_words_replacement_negate) else tokenizer.mask_token for t in words]
89
+ elif random_masking_ratio:
90
+ words = [t if random.random() <= random_masking_ratio else tokenizer.mask_token for t in words]
91
+ tokenized_data.append(' '.join(words))
92
+ out = tokenizer(tokenized_data, padding="max_length", truncation=True, max_length=max_length, return_tensors="pt")
93
+ # out['attention_mask'] = torch.tensor(out['input_ids'] != tokenizer.pad_token_id, dtype=int)
94
+ return out
95
+ else:
96
+ return tokenizer(data, padding="max_length", truncation=True, max_length=max_length, return_tensors="pt")
97
+
98
+ def train_dataloader(self):
99
+ return self.prepare_dataloader("train")
100
+ # return torch.utils.data.DataLoader(MyDataModule.CustomDataset1(self.tokenizer, self.train_data, self.max_length), batch_size=self.batch_size)
101
+
102
+ def test_dataloader(self):
103
+ return self.prepare_dataloader("test")
104
+ # return torch.utils.data.DataLoader(MyDataModule.CustomDataset1(self.tokenizer, self.test_data, self.max_length), batch_size=self.batch_size)
105
+
106
+ def val_dataloader(self):
107
+ return self.prepare_dataloader("val")
108
+ # return torch.utils.data.DataLoader(MyDataModule.CustomDataset1(self.tokenizer, self.val_data, self.max_length), batch_size=self.batch_size)
109
+
110
+
111
+ class RegressionModel(pl.LightningModule):
112
+ def __init__(self, pretrained_model, binary, learning_rate, num_warmup_steps, tokenizer):
113
+ super(RegressionModel, self).__init__()
114
+ self.save_hyperparameters()
115
+ self.pretrained_model = pretrained_model
116
+ self.binary = binary
117
+ self.learning_rate = learning_rate
118
+ self.num_warmup_steps = num_warmup_steps
119
+ self.tokenizer = tokenizer
120
+ self.model = transformers.AutoModelForSequenceClassification.from_pretrained(self.pretrained_model, num_labels=2 if self.binary else 1)
121
+
122
+ def forward(self, **kwargs):
123
+ return self.model(**kwargs)
124
+
125
+ def training_step(self, batch, batch_idx):
126
+ outputs = self.forward(input_ids=batch[0], attention_mask=batch[1], labels=batch[2])
127
+ loss = outputs['loss']
128
+ ret = {"loss": loss}
129
+ if self.binary:
130
+ acc = torch.tensor(batch[2] == torch.argmax(outputs['logits']), dtype=float).mean().item()
131
+ ret["acc"] = acc
132
+ else:
133
+ rmse = (torch.mean((batch[2] - outputs['logits'])**2)**0.5).item()
134
+ ret["rmse"] = rmse
135
+
136
+ return {"loss": loss, "log": ret}
137
+
138
+ def configure_optimizers(self):
139
+ optimizer = torch.optim.AdamW(self.parameters(), lr=self.learning_rate)
140
+ scheduler = transformers.get_linear_schedule_with_warmup(optimizer, self.num_warmup_steps, len(self.trainer.datamodule.train_dataloader()) // self.trainer.accumulate_grad_batches)
141
+ return [optimizer], [scheduler]
142
+
143
+ def test_step(self, batch, batch_idx):
144
+ return self.validation_step(batch, batch_idx, mode="test")
145
+
146
+ def validation_step(self, batch, batch_idx, mode="val"):
147
+ outputs = self.forward(input_ids=batch[0], attention_mask=batch[1], labels=batch[2])
148
+ loss = outputs['loss']
149
+ self.log("{}_loss".format(mode), loss, prog_bar=True)
150
+
151
+ ret = {"loss": loss}
152
+ if self.binary:
153
+ preds = torch.argmax(outputs['logits'], axis=1).tolist()
154
+ gold = batch[2].tolist()
155
+ ret["preds"] = preds
156
+ ret["gold"] = gold
157
+ # f1 = sklearn.metrics.f1_score(gold, preds)
158
+ # acc = sklearn.metrics.accuracy_score(gold, preds)
159
+ # ret["acc"] = acc
160
+ # ret["f1"] = f1
161
+ # self.log("{}_acc".format(mode), acc, prog_bar=True)
162
+ # self.log("{}_f1".format(mode), f1, prog_bar=True)
163
+ else:
164
+ preds = outputs['logits'].tolist()
165
+ gold = batch[2].tolist()
166
+ ret['preds'] = preds
167
+ ret['gold'] = gold
168
+ # rmse = (torch.mean((batch[2] - outputs['logits'])**2)**0.5).item()
169
+ # self.log("{}_rmse".format(mode), rmse, prog_bar=True)
170
+ # ret["rmse"] = rmse
171
+
172
+ return {"loss": loss, "log": ret}
173
+
174
+ def validation_epoch_end(self, outputs, mode="val"):
175
+ gold = []
176
+ preds = []
177
+ for batch in outputs:
178
+ gold.extend(batch['log']['gold'])
179
+ preds.extend(batch['log']['preds'])
180
+ if self.binary:
181
+ f1 = sklearn.metrics.f1_score(gold, preds)
182
+ acc = sklearn.metrics.accuracy_score(gold, preds)
183
+ self.log("{}_acc".format(mode), acc, prog_bar=True)
184
+ self.log("{}_f1".format(mode), f1, prog_bar=True)
185
+ else:
186
+ rmse = (torch.mean((torch.tensor(gold) - torch.tensor(preds))**2)**0.5).item()
187
+ self.log("{}_rmse".format(mode), rmse, prog_bar=True)
188
+
189
+ def test_epoch_end(self, outputs):
190
+ return self.validation_epoch_end(outputs, mode="test")
191
+
192
+ def predict_step(self, batch, batch_idx):
193
+ preds = self.forward(input_ids=batch[0], attention_mask=batch[1])
194
+ if self.binary:
195
+ ret = preds['logits'].tolist()
196
+ else:
197
+ ret = preds['logits'].view(-1).tolist()
198
+ return ret
199
+
200
+ @staticmethod
201
+ def add_model_specific_args(parent_parser):
202
+ parser = parent_parser.add_argument_group("RegressionModel")
203
+ parser.add_argument('--pretrained_model', type=str)
204
+ parser.add_argument('--learning_rate', type=float, default="5e-6")
205
+ parser.add_argument('--num_warmup_steps', type=float, default="0")
206
+ return parent_parser
207
+
208
+
209
+ if __name__ == "__main__":
210
+ parser = ArgumentParser()
211
+ parser.add_argument("--train", action="store_true")
212
+ parser.add_argument("--test", action="store_true")
213
+ parser.add_argument("--load_model", type=str)
214
+ parser.add_argument("--train_file", type=str)
215
+ parser.add_argument("--test_file", type=str)
216
+ parser.add_argument("--binary", action="store_true")
217
+ parser.add_argument("--seed", type=int, default=42)
218
+ parser.add_argument("--batch_size", type=int, default=64)
219
+ parser.add_argument("--max_length", type=int, default=128)
220
+ parser.add_argument("--model_save_location", type=str)
221
+ parser.add_argument("--preds_save_location", type=str)
222
+ parser.add_argument("--preds_save_logits", action="store_true")
223
+ parser.add_argument("--strategy_words", type=str)
224
+ parser.add_argument("--strategy_words_replacement_negate", action="store_true")
225
+ parser.add_argument("--random_masking_ratio", type=float)
226
+ parser = RegressionModel.add_model_specific_args(parser)
227
+ parser = pl.Trainer.add_argparse_args(parser)
228
+ args = parser.parse_args()
229
+ print(args)
230
+
231
+ pl.utilities.seed.seed_everything(seed=args.seed)
232
+ if args.load_model:
233
+ model = RegressionModel.load_from_checkpoint(args.load_model)
234
+ tokenizer = model.tokenizer
235
+ else:
236
+ tokenizer = transformers.AutoTokenizer.from_pretrained(args.pretrained_model)
237
+ model = RegressionModel(pretrained_model=args.pretrained_model, binary=args.binary, learning_rate=args.learning_rate, num_warmup_steps=args.num_warmup_steps, tokenizer=tokenizer)
238
+ trainer = pl.Trainer.from_argparse_args(args)
239
+
240
+ dataset = MyDataModule(train_file=args.train_file, test_file=args.test_file, binary=model.binary, max_length=args.max_length, batch_size=args.batch_size, tokenizer=tokenizer, strategy_words_replacement_negate=args.strategy_words_replacement_negate, strategy_words=args.strategy_words, random_masking_ratio=args.random_masking_ratio)
241
+ dataset.setup()
242
+
243
+ if args.train:
244
+ trainer.fit(model, dataset)
245
+
246
+ if args.test:
247
+ trainer.test(model, dataset.test_dataloader())
248
+
249
+ if args.preds_save_location:
250
+ data = MyDataModule.read_file(args.test_file, True)
251
+ strategy_words = None
252
+ if args.strategy_words:
253
+ strategy_words = pd.read_csv(args.strategy_words)
254
+ strategy_words = set(list(args.strategy_words.values[:, 1:].reshape(-1)))
255
+ tokenized = MyDataModule.tokenize(data, tokenizer, args.max_length, args.strategy_words_replacement_negate, strategy_words, args.random_masking_ratio)
256
+ input_data = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(tokenized['input_ids'], tokenized['attention_mask']), batch_size=args.batch_size)
257
+ preds = trainer.predict(model, input_data, return_predictions=True)
258
+ preds = [t for y in preds for t in y]
259
+ preds = torch.tensor(preds)
260
+ if model.binary:
261
+ if args.preds_save_logits:
262
+ preds = torch.softmax(preds, axis=1)[:, 1].tolist()
263
+ else:
264
+ preds = preds.argmax(axis=1).tolist()
265
+ else:
266
+ preds = preds.view(-1).tolist()
267
+ preds = [str(t) for t in preds]
268
+
269
+ with open(args.preds_save_location, 'w') as f:
270
+ f.write('\n'.join(preds) + '\n')
271
+
272
+ if args.model_save_location:
273
+ trainer.save_checkpoint(args.model_save_location, weights_only=True)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ numpy
2
+ pandas
3
+ torch
4
+ pytorch-lightning
5
+ sklearn