asahi417 commited on
Commit
e78759d
·
1 Parent(s): 69299ee
training_scripts/finetune_t5.py CHANGED
@@ -26,6 +26,27 @@ os.environ['WANDB_DISABLED'] = 'true' # disable wandb
26
  _LR = [1e-6, 1e-5, 1e-4]
27
  _BATCH = 32
28
  _EPOCH = 5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
 
31
  def load_model(
@@ -94,11 +115,6 @@ def train(
94
  )
95
  # train
96
  trainer.train()
97
- # trainer.log_metrics("train", result.metrics)
98
- # trainer.save_metrics("train", result.metrics)
99
- # clean up memory
100
- # trainer.save_model()
101
- # trainer.save_state()
102
  del trainer
103
  del model
104
  gc.collect()
@@ -119,6 +135,15 @@ def get_f1_score(references: List[Set[str]], predictions: List[Set[str]]) -> flo
119
  return mean(scores)
120
 
121
 
 
 
 
 
 
 
 
 
 
122
  def get_metric(
123
  prediction_file: str,
124
  metric_file: str,
@@ -140,8 +165,8 @@ def get_metric(
140
  with open(prediction_file, 'w') as f:
141
  f.write('\n'.join(output))
142
  with open(prediction_file) as f:
143
- output = [set(i.split(',')) for i in f.read().split('\n')]
144
- label = [set(i.split(',')) for i in label]
145
  eval_metric = {'f1': get_f1_score(label, output)}
146
  logging.info(json.dumps(eval_metric, indent=4))
147
  with open(metric_file, 'w') as f:
@@ -198,6 +223,7 @@ def test(
198
  model_path = f'{output_dir}/best_model'
199
  if not os.path.exists(model_path):
200
  model_path = os.path.basename(model_name)
 
201
  prediction_file = f"{model_path}/prediction.{os.path.basename(dataset)}.{dataset_name}.txt"
202
  metric_file = f"{model_path}/metric.{os.path.basename(dataset)}.{dataset_name}.json"
203
  metric = get_metric(
 
26
  _LR = [1e-6, 1e-5, 1e-4]
27
  _BATCH = 32
28
  _EPOCH = 5
29
+ _CLASS_MAP = {
30
+ 'Arts & Culture': ['Τέχνες & Πολιτισμός', 'Arte y cultura', 'アート&カルチャー'],
31
+ 'Business & Entrepreneurs': ['Επιχειρήσεις & Επιχειρηματίες', 'Negocios y emprendedores', 'ビジネス'],
32
+ 'Celebrity & Pop Culture': ['Διασημότητες & Ποπ κουλτούρα', 'Celebridades y cultura pop', '芸能'],
33
+ 'Diaries & Daily Life': ['Ημερολόγια & Καθημερινή ζωή', 'Diarios y vida diaria', '日常'],
34
+ 'Family': ['Οικογένεια', 'Familia', '家族'],
35
+ 'Fashion & Style': ['Μόδα & Στυλ', 'Moda y estilo', 'ファッション'],
36
+ 'Film, TV & Video': ['Ταινίες, τηλεόραση & βίντεο', 'Cine, televisión y video', '映画&ラジオ'],
37
+ 'Fitness & Health': ['Γυμναστική & Υεία', 'Estado físico y salud', 'フィットネス&健康'],
38
+ 'Food & Dining': ['Φαγητό & Δείπνο', 'Comida y comedor', '料理'],
39
+ 'Learning & Educational': ['Μάθηση & Εκπαίδευση', 'Aprendizaje y educación', '教育関連'],
40
+ 'News & Social Concern': ['Ειδήσεις & Κοινωνία', 'Noticias e interés social', '社会'],
41
+ 'Relationships': ['Σχέσεις', 'Relaciones', '人間関係'],
42
+ 'Science & Technology': ['Επιστήμη & Τεχνολογία', 'Ciencia y Tecnología', 'サイエンス'],
43
+ 'Youth & Student Life': ['Νεανική & Φοιτητική ζωή', 'Juventud y Vida Estudiantil', '学校'],
44
+ 'Music': ['Μουσική', 'Música', '音楽'],
45
+ 'Gaming': ['Παιχνίδια', 'Juegos', 'ゲーム'],
46
+ 'Sports': ['Αθλητισμός', 'Deportes', 'スポーツ'],
47
+ 'Travel & Adventure': ['Ταξίδια & Περιπέτεια', 'Viajes y aventuras', '旅行'],
48
+ 'Other Hobbies': ['Άλλα χόμπι', 'Otros pasatiempos', 'その他']
49
+ }
50
 
51
 
52
  def load_model(
 
115
  )
116
  # train
117
  trainer.train()
 
 
 
 
 
118
  del trainer
119
  del model
120
  gc.collect()
 
135
  return mean(scores)
136
 
137
 
138
+ def unify_label(label: Set[str]):
139
+ new_label = []
140
+ for label_tmp in label:
141
+ label_en = [k for k, v in _CLASS_MAP.items() if label_tmp in v]
142
+ if label_en:
143
+ new_label.append(label_en[0])
144
+ return set(new_label)
145
+
146
+
147
  def get_metric(
148
  prediction_file: str,
149
  metric_file: str,
 
165
  with open(prediction_file, 'w') as f:
166
  f.write('\n'.join(output))
167
  with open(prediction_file) as f:
168
+ output = [unify_label(set(i.split(','))) for i in f.read().split('\n')]
169
+ label = [unify_label(set(i.split(','))) for i in label]
170
  eval_metric = {'f1': get_f1_score(label, output)}
171
  logging.info(json.dumps(eval_metric, indent=4))
172
  with open(metric_file, 'w') as f:
 
223
  model_path = f'{output_dir}/best_model'
224
  if not os.path.exists(model_path):
225
  model_path = os.path.basename(model_name)
226
+
227
  prediction_file = f"{model_path}/prediction.{os.path.basename(dataset)}.{dataset_name}.txt"
228
  metric_file = f"{model_path}/metric.{os.path.basename(dataset)}.{dataset_name}.json"
229
  metric = get_metric(
training_scripts/script.sh CHANGED
@@ -2,10 +2,20 @@
2
  # training
3
  ## en_2022
4
  python finetune_t5.py --dataset-name en_2022 --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022 --model-organization cardiffnlp --use-auth-token
5
- python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name en --skip-train --skip-validate --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022 --model-organization cardiffnlp --use-auth-token
6
- python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name ja --skip-train --skip-validate --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022 --model-organization cardiffnlp --use-auth-token
7
- python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name gr --skip-train --skip-validate --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022 --model-organization cardiffnlp --use-auth-token
8
- python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name es --skip-train --skip-validate --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022 --model-organization cardiffnlp --use-auth-token
 
 
 
 
 
 
 
 
 
 
9
 
10
  ## single
11
  python finetune_t5.py --dataset-name en --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en --model-organization cardiffnlp --use-auth-token
@@ -13,21 +23,8 @@ python finetune_t5.py --dataset-name ja --low-cpu-mem-usage --model-alias mt5-sm
13
  python finetune_t5.py --dataset-name gr --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-gr --model-organization cardiffnlp --use-auth-token
14
  python finetune_t5.py --dataset-name es --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-es --model-organization cardiffnlp --use-auth-token
15
 
16
-
17
  # continuous
18
  python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name es --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-es --model-organization cardiffnlp --use-auth-token
19
  python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name en --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-en --model-organization cardiffnlp --use-auth-token
20
  python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name ja --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-ja --model-organization cardiffnlp --use-auth-token
21
  python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name gr --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-gr --model-organization cardiffnlp --use-auth-token
22
-
23
- # mix
24
- python finetune_t5.py --dataset-name mix --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-mix --model-organization cardiffnlp --use-auth-token
25
-
26
-
27
-
28
-
29
-
30
- # Zero-shot
31
-
32
-
33
-
 
2
  # training
3
  ## en_2022
4
  python finetune_t5.py --dataset-name en_2022 --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022 --model-organization cardiffnlp --use-auth-token
5
+ git clone https://huggingface.co/cardiffnlp/mt5-small-tweet-topic-multi-en-2022
6
+ python finetune_t5.py -m mt5-small-tweet-topic-multi-en-2022 --dataset-name en --skip-train --skip-validate --skip-upload --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022 --model-organization cardiffnlp --use-auth-token
7
+ python finetune_t5.py -m mt5-small-tweet-topic-multi-en-2022 --dataset-name ja --skip-train --skip-validate --skip-upload --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022 --model-organization cardiffnlp --use-auth-token
8
+ python finetune_t5.py -m mt5-small-tweet-topic-multi-en-2022 --dataset-name gr --skip-train --skip-validate --skip-upload --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022 --model-organization cardiffnlp --use-auth-token
9
+ python finetune_t5.py -m mt5-small-tweet-topic-multi-en-2022 --dataset-name es --skip-train --skip-validate --skip-upload --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022 --model-organization cardiffnlp --use-auth-token
10
+ cd mt5-small-tweet-topic-multi-en-2022 && git add . && git commit -m "update" && git push origin main && cd ..
11
+ # mix
12
+ python finetune_t5.py --dataset-name mix --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-mix --model-organization cardiffnlp --use-auth-token
13
+ git clone https://huggingface.co/cardiffnlp/mt5-small-tweet-topic-multi-mix
14
+ python finetune_t5.py -m mt5-small-tweet-topic-multi-mix --dataset-name en --skip-train --skip-validate --skip-upload --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-mix --model-organization cardiffnlp --use-auth-token
15
+ python finetune_t5.py -m mt5-small-tweet-topic-multi-mix --dataset-name ja --skip-train --skip-validate --skip-upload --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-mix --model-organization cardiffnlp --use-auth-token
16
+ python finetune_t5.py -m mt5-small-tweet-topic-multi-mix --dataset-name gr --skip-train --skip-validate --skip-upload --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-mix --model-organization cardiffnlp --use-auth-token
17
+ python finetune_t5.py -m mt5-small-tweet-topic-multi-mix --dataset-name es --skip-train --skip-validate --skip-upload --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-mix --model-organization cardiffnlp --use-auth-token
18
+ cd mt5-small-tweet-topic-multi-mix && git add . && git commit -m "update" && git push origin main && cd ..
19
 
20
  ## single
21
  python finetune_t5.py --dataset-name en --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en --model-organization cardiffnlp --use-auth-token
 
23
  python finetune_t5.py --dataset-name gr --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-gr --model-organization cardiffnlp --use-auth-token
24
  python finetune_t5.py --dataset-name es --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-es --model-organization cardiffnlp --use-auth-token
25
 
 
26
  # continuous
27
  python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name es --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-es --model-organization cardiffnlp --use-auth-token
28
  python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name en --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-en --model-organization cardiffnlp --use-auth-token
29
  python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name ja --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-ja --model-organization cardiffnlp --use-auth-token
30
  python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name gr --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-gr --model-organization cardiffnlp --use-auth-token