Create finetune.py
Browse files- finetune.py +113 -0
finetune.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import load_dataset, Dataset
|
2 |
+
import random
|
3 |
+
import numpy as np
|
4 |
+
from transformers import (
|
5 |
+
AutoTokenizer,
|
6 |
+
DataCollatorWithPadding,
|
7 |
+
AutoModelForSequenceClassification,
|
8 |
+
TrainingArguments,
|
9 |
+
Trainer,
|
10 |
+
PreTrainedTokenizer,
|
11 |
+
ElectraForSequenceClassification,
|
12 |
+
EarlyStoppingCallback
|
13 |
+
)
|
14 |
+
from dataclasses import dataclass
|
15 |
+
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
|
16 |
+
|
17 |
+
|
18 |
+
def process(batch: dict, tokenizer: PreTrainedTokenizer) -> dict:
|
19 |
+
# SP and WP = Positive | WN and SN = Negative
|
20 |
+
# NU should randomly be Positive or Negative
|
21 |
+
new_labels = []
|
22 |
+
for label in batch["Polarity"]:
|
23 |
+
if label in ["SP", "WP"]:
|
24 |
+
new_labels.append(1)
|
25 |
+
elif label in ["WN", "SN"]:
|
26 |
+
new_labels.append(0)
|
27 |
+
elif label == "NU":
|
28 |
+
new_labels.append(random.choice([1, 0]))
|
29 |
+
else:
|
30 |
+
new_labels.append(label)
|
31 |
+
inputs = tokenizer(batch["Text"], truncation=True)
|
32 |
+
batch["input_ids"] = inputs["input_ids"]
|
33 |
+
batch["attention_mask"] = inputs["attention_mask"]
|
34 |
+
batch["labels"] = new_labels
|
35 |
+
return batch
|
36 |
+
|
37 |
+
|
38 |
+
def compute_metrics(eval_pred):
|
39 |
+
logits, labels = eval_pred
|
40 |
+
predictions = logits.argmax(-1)
|
41 |
+
accuracy = accuracy_score(labels, predictions)
|
42 |
+
precision, recall, f1, _ = precision_recall_fscore_support(
|
43 |
+
labels, predictions, average='binary'
|
44 |
+
)
|
45 |
+
return {
|
46 |
+
"accuracy": accuracy,
|
47 |
+
"precision": precision,
|
48 |
+
"recall": recall,
|
49 |
+
"f1": f1,
|
50 |
+
}
|
51 |
+
|
52 |
+
|
53 |
+
def pipeline(args):
|
54 |
+
model = AutoModelForSequenceClassification.from_pretrained(args.model_name, num_labels=2)
|
55 |
+
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
|
56 |
+
dataset = load_dataset(args.dataset_name)
|
57 |
+
dataset = dataset.map(process, batched=True, fn_kwargs={'tokenizer': tokenizer})
|
58 |
+
dataset = dataset["train"].train_test_split(args.split_ratio)
|
59 |
+
train_dataset = dataset["train"]
|
60 |
+
test_dataset = dataset["test"]
|
61 |
+
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
|
62 |
+
|
63 |
+
trainer = Trainer(
|
64 |
+
model=model,
|
65 |
+
args=TrainingArguments(
|
66 |
+
output_dir="./results",
|
67 |
+
learning_rate=args.learning_rate,
|
68 |
+
per_device_train_batch_size=args.batch_size,
|
69 |
+
per_device_eval_batch_size=args.batch_size,
|
70 |
+
num_train_epochs=args.epochs,
|
71 |
+
weight_decay=0.01,
|
72 |
+
eval_strategy="steps",
|
73 |
+
save_strategy="steps",
|
74 |
+
load_best_model_at_end=True,
|
75 |
+
report_to="none",
|
76 |
+
save_steps=500,
|
77 |
+
eval_steps=500,
|
78 |
+
save_total_limit=1,
|
79 |
+
logging_steps=500,
|
80 |
+
fp16=args.fp16,
|
81 |
+
greater_is_better=True,
|
82 |
+
metric_for_best_model="f1",
|
83 |
+
),
|
84 |
+
train_dataset=train_dataset,
|
85 |
+
eval_dataset=test_dataset,
|
86 |
+
processing_class=tokenizer,
|
87 |
+
data_collator=data_collator,
|
88 |
+
compute_metrics=compute_metrics,
|
89 |
+
callbacks=[EarlyStoppingCallback(early_stopping_patience=5)]
|
90 |
+
)
|
91 |
+
|
92 |
+
trainer.train()
|
93 |
+
trainer.evaluate()
|
94 |
+
trainer.predict(test_dataset)
|
95 |
+
|
96 |
+
# Push to Hub
|
97 |
+
trainer.push_to_hub(args.hub_location)
|
98 |
+
tokenizer.push_to_hub(args.hub_location)
|
99 |
+
|
100 |
+
@dataclass
|
101 |
+
class Arguments:
|
102 |
+
model_name: str = "csebuetnlp/banglabert"
|
103 |
+
dataset_name: str = "SayedShaun/sentigold"
|
104 |
+
split_ratio: float = 0.1
|
105 |
+
batch_size: int = 128
|
106 |
+
epochs: int = 40
|
107 |
+
learning_rate: float = 1e-5
|
108 |
+
fp16: bool = True
|
109 |
+
hub_location: str = "SayedShaun/bangla-classifier-binary"
|
110 |
+
|
111 |
+
if __name__=="__main__":
|
112 |
+
args = Arguments()
|
113 |
+
pipeline(args)
|