init
Browse files
training_scripts/finetune_t5.py
CHANGED
@@ -181,12 +181,19 @@ def train(
|
|
181 |
torch.cuda.empty_cache()
|
182 |
# cuda.get_current_device().reset()
|
183 |
|
184 |
-
model_score =
|
185 |
for eval_file in glob(f"{output_dir}/model_*/eval_results.json"):
|
186 |
with open(eval_file) as f:
|
187 |
-
|
188 |
-
|
189 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
190 |
copy_tree(best_model, f'{output_dir}/best_model')
|
191 |
else:
|
192 |
logging.info('skip hyperparameter search & model training (already done)')
|
|
|
181 |
torch.cuda.empty_cache()
|
182 |
# cuda.get_current_device().reset()
|
183 |
|
184 |
+
model_score = []
|
185 |
for eval_file in glob(f"{output_dir}/model_*/eval_results.json"):
|
186 |
with open(eval_file) as f:
|
187 |
+
results = json.load(f)
|
188 |
+
model_score.append([os.path.dirname(eval_file), results['eval_loss'], results['eval_f1']])
|
189 |
+
logging.info("Search Result")
|
190 |
+
for i in model_score:
|
191 |
+
logging.info(i)
|
192 |
+
max_metric = max(model_score, key=lambda x: x[2])
|
193 |
+
if len([i for i in model_score if i[2] == max_metric]) > 1:
|
194 |
+
best_model = sorted(model_score, key=lambda x: x[1])[0][0]
|
195 |
+
else:
|
196 |
+
best_model = sorted(model_score, key=lambda x: x[2])[-1][0]
|
197 |
copy_tree(best_model, f'{output_dir}/best_model')
|
198 |
else:
|
199 |
logging.info('skip hyperparameter search & model training (already done)')
|
training_scripts/script.sh
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
python finetune_t5.py --dataset-name ja --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-ja --model-organization cardiffnlp
|
2 |
python finetune_t5.py --dataset-name gr --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-gr --model-organization cardiffnlp
|
3 |
python finetune_t5.py --dataset-name es --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-es --model-organization cardiffnlp
|
4 |
python finetune_t5.py --dataset-name en --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-en --model-organization cardiffnlp
|
|
|
1 |
+
python finetune_t5.py --dataset-name ja --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-ja --model-organization cardiffnlp --skip-upload
|
2 |
python finetune_t5.py --dataset-name gr --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-gr --model-organization cardiffnlp
|
3 |
python finetune_t5.py --dataset-name es --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-es --model-organization cardiffnlp
|
4 |
python finetune_t5.py --dataset-name en --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-en --model-organization cardiffnlp
|