init
Browse files
training_scripts/finetune_t5.py
CHANGED
@@ -148,7 +148,7 @@ def train(
|
|
148 |
args=Seq2SeqTrainingArguments(
|
149 |
num_train_epochs=epoch,
|
150 |
learning_rate=lr_tmp,
|
151 |
-
output_dir=f
|
152 |
evaluation_strategy='steps',
|
153 |
eval_steps=eval_steps,
|
154 |
per_device_eval_batch_size=eval_batch_size,
|
@@ -160,17 +160,21 @@ def train(
|
|
160 |
eval_dataset=tokenized_dataset['validation_ds'],
|
161 |
compute_metrics=compute_metric,
|
162 |
)
|
163 |
-
|
164 |
-
trainer.
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
|
|
|
|
|
|
|
|
|
|
170 |
del trainer
|
171 |
gc.collect()
|
172 |
torch.cuda.empty_cache()
|
173 |
-
logging.info(f'model saved at {output_dir}/model_{n}')
|
174 |
else:
|
175 |
logging.info('skip hyperparameter search & model training (already done)')
|
176 |
|
|
|
148 |
args=Seq2SeqTrainingArguments(
|
149 |
num_train_epochs=epoch,
|
150 |
learning_rate=lr_tmp,
|
151 |
+
output_dir=f"{output_dir}/model_{n}",
|
152 |
evaluation_strategy='steps',
|
153 |
eval_steps=eval_steps,
|
154 |
per_device_eval_batch_size=eval_batch_size,
|
|
|
160 |
eval_dataset=tokenized_dataset['validation_ds'],
|
161 |
compute_metrics=compute_metric,
|
162 |
)
|
163 |
+
result = trainer.train()
|
164 |
+
trainer.save_model() # Saves the tokenizer too for easy upload
|
165 |
+
metrics = result.metrics
|
166 |
+
trainer.log_metrics("train", metrics)
|
167 |
+
trainer.save_metrics("train", metrics)
|
168 |
+
trainer.save_state()
|
169 |
+
|
170 |
+
# trainer.save_model(f'{output_dir}/model_{n}')
|
171 |
+
# tokenizer.save_pretrained(f'{output_dir}/model_{n}')
|
172 |
+
# # grid search
|
173 |
+
# with open(f'{output_dir}/model_{n}/hyperparameters.json', 'w') as f:
|
174 |
+
# json.dump({"learning_rate": lr_tmp, "batch_size": batch_tmp}, f)
|
175 |
del trainer
|
176 |
gc.collect()
|
177 |
torch.cuda.empty_cache()
|
|
|
178 |
else:
|
179 |
logging.info('skip hyperparameter search & model training (already done)')
|
180 |
|