asahi417 commited on
Commit
7db50a7
1 Parent(s): c496b1c
Files changed (1) hide show
  1. training_scripts/finetune_t5.py +13 -9
training_scripts/finetune_t5.py CHANGED
@@ -148,7 +148,7 @@ def train(
148
  args=Seq2SeqTrainingArguments(
149
  num_train_epochs=epoch,
150
  learning_rate=lr_tmp,
151
- output_dir=f'{output_dir}/runs',
152
  evaluation_strategy='steps',
153
  eval_steps=eval_steps,
154
  per_device_eval_batch_size=eval_batch_size,
@@ -160,17 +160,21 @@ def train(
160
  eval_dataset=tokenized_dataset['validation_ds'],
161
  compute_metrics=compute_metric,
162
  )
163
- os.makedirs(f'{output_dir}/model_{n}', exist_ok=True)
164
- trainer.train()
165
- trainer.save_model(f'{output_dir}/model_{n}')
166
- tokenizer.save_pretrained(f'{output_dir}/model_{n}')
167
- # grid search
168
- with open(f'{output_dir}/model_{n}/hyperparameters.json', 'w') as f:
169
- json.dump({"learning_rate": lr_tmp, "batch_size": batch_tmp}, f)
 
 
 
 
 
170
  del trainer
171
  gc.collect()
172
  torch.cuda.empty_cache()
173
- logging.info(f'model saved at {output_dir}/model_{n}')
174
  else:
175
  logging.info('skip hyperparameter search & model training (already done)')
176
 
 
148
  args=Seq2SeqTrainingArguments(
149
  num_train_epochs=epoch,
150
  learning_rate=lr_tmp,
151
+ output_dir=f"{output_dir}/model_{n}",
152
  evaluation_strategy='steps',
153
  eval_steps=eval_steps,
154
  per_device_eval_batch_size=eval_batch_size,
 
160
  eval_dataset=tokenized_dataset['validation_ds'],
161
  compute_metrics=compute_metric,
162
  )
163
+ result = trainer.train()
164
+ trainer.save_model() # Saves the tokenizer too for easy upload
165
+ metrics = result.metrics
166
+ trainer.log_metrics("train", metrics)
167
+ trainer.save_metrics("train", metrics)
168
+ trainer.save_state()
169
+
170
+ # trainer.save_model(f'{output_dir}/model_{n}')
171
+ # tokenizer.save_pretrained(f'{output_dir}/model_{n}')
172
+ # # grid search
173
+ # with open(f'{output_dir}/model_{n}/hyperparameters.json', 'w') as f:
174
+ # json.dump({"learning_rate": lr_tmp, "batch_size": batch_tmp}, f)
175
  del trainer
176
  gc.collect()
177
  torch.cuda.empty_cache()
 
178
  else:
179
  logging.info('skip hyperparameter search & model training (already done)')
180