asahi417 commited on
Commit
419d31a
1 Parent(s): 89c175f
Files changed (1) hide show
  1. training_scripts/finetune_t5.py +6 -4
training_scripts/finetune_t5.py CHANGED
@@ -85,7 +85,7 @@ def train(
85
  skip_train: bool = False,
86
  skip_test: bool = False,
87
  skip_upload: bool = False,
88
- eval_batch_size: int = None):
89
  """Fine-tune seq2seq model."""
90
  logging.info(f'[CONFIG]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})')
91
  if not output_dir:
@@ -139,7 +139,7 @@ def train(
139
  lr = [1e-6, 1e-5, 1e-4] if lr is None else lr
140
  batch = [32] if not batch else batch
141
  epoch = [3, 5] if not epoch else epoch
142
- eval_batch_size = min(batch) if not eval_batch_size else eval_batch_size
143
  for n, (lr_tmp, batch_tmp, epoch_tmp) in enumerate(product(lr, batch, epoch)):
144
  logging.info(f"[TRAIN {n}/{len(lr) * len(batch) * len(epoch)}] lr: {lr_tmp}, batch: {batch_tmp}")
145
  output_dir_tmp = f"{output_dir}/model_lr_{lr_tmp}_batch_{batch_tmp}_epoch_{epoch_tmp}"
@@ -155,7 +155,7 @@ def train(
155
  learning_rate=lr_tmp,
156
  output_dir=output_dir_tmp,
157
  evaluation_strategy="no",
158
- per_device_eval_batch_size=eval_batch_size,
159
  seed=random_seed,
160
  per_device_train_batch_size=batch_tmp,
161
  ),
@@ -209,7 +209,7 @@ def train(
209
  device='cuda:0' if torch.cuda.is_available() else 'cpu',
210
  )
211
  input_data = [i[dataset_column_text] for i in dataset_instance[dataset_split_test]]
212
- output = pipe(input_data, batch_size=eval_batch_size)
213
  output = [i['generated_text'] for i in output]
214
  with open(f'{output_dir}/best_model/prediction_test.txt', 'w') as f:
215
  f.write('\n'.join(output))
@@ -281,6 +281,7 @@ if __name__ == '__main__':
281
  parser.add_argument('--lr', nargs='+', default=None, type=float)
282
  parser.add_argument('--epoch', nargs='+', default=None, type=int)
283
  parser.add_argument('--batch', nargs='+', default=None, type=int)
 
284
  parser.add_argument('--down-sample-train', default=None, type=int)
285
  parser.add_argument('--down-sample-validation', default=2000, type=int)
286
  parser.add_argument('--random-seed', default=42, type=int)
@@ -307,6 +308,7 @@ if __name__ == '__main__':
307
  lr=opt.lr,
308
  epoch=opt.epoch,
309
  batch=opt.batch,
 
310
  down_sample_train=opt.down_sample_train,
311
  down_sample_validation=opt.down_sample_validation,
312
  random_seed=opt.random_seed,
 
85
  skip_train: bool = False,
86
  skip_test: bool = False,
87
  skip_upload: bool = False,
88
+ batch_eval: int = None):
89
  """Fine-tune seq2seq model."""
90
  logging.info(f'[CONFIG]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})')
91
  if not output_dir:
 
139
  lr = [1e-6, 1e-5, 1e-4] if lr is None else lr
140
  batch = [32] if not batch else batch
141
  epoch = [3, 5] if not epoch else epoch
142
+ batch_eval = min(batch) if not batch_eval else batch_eval
143
  for n, (lr_tmp, batch_tmp, epoch_tmp) in enumerate(product(lr, batch, epoch)):
144
  logging.info(f"[TRAIN {n}/{len(lr) * len(batch) * len(epoch)}] lr: {lr_tmp}, batch: {batch_tmp}")
145
  output_dir_tmp = f"{output_dir}/model_lr_{lr_tmp}_batch_{batch_tmp}_epoch_{epoch_tmp}"
 
155
  learning_rate=lr_tmp,
156
  output_dir=output_dir_tmp,
157
  evaluation_strategy="no",
158
+ per_device_eval_batch_size=batch_eval,
159
  seed=random_seed,
160
  per_device_train_batch_size=batch_tmp,
161
  ),
 
209
  device='cuda:0' if torch.cuda.is_available() else 'cpu',
210
  )
211
  input_data = [i[dataset_column_text] for i in dataset_instance[dataset_split_test]]
212
+ output = pipe(input_data, batch_size=batch_eval)
213
  output = [i['generated_text'] for i in output]
214
  with open(f'{output_dir}/best_model/prediction_test.txt', 'w') as f:
215
  f.write('\n'.join(output))
 
281
  parser.add_argument('--lr', nargs='+', default=None, type=float)
282
  parser.add_argument('--epoch', nargs='+', default=None, type=int)
283
  parser.add_argument('--batch', nargs='+', default=None, type=int)
284
+ parser.add_argument('--batch-eval', type=int, default=None)
285
  parser.add_argument('--down-sample-train', default=None, type=int)
286
  parser.add_argument('--down-sample-validation', default=2000, type=int)
287
  parser.add_argument('--random-seed', default=42, type=int)
 
308
  lr=opt.lr,
309
  epoch=opt.epoch,
310
  batch=opt.batch,
311
+ batch_eval=opt.batch_eval,
312
  down_sample_train=opt.down_sample_train,
313
  down_sample_validation=opt.down_sample_validation,
314
  random_seed=opt.random_seed,