ajsbsd commited on
Commit
f7ca628
·
1 Parent(s): 79b3b83

Updated with test code for Google Colab

Browse files
Files changed (1) hide show
  1. README.md +92 -0
README.md CHANGED
@@ -49,4 +49,96 @@ dataset_info:
49
  num_examples: 33577
50
  download_size: 6999588
51
  dataset_size: 24777921
 
 
52
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  num_examples: 33577
50
  download_size: 6999588
51
  dataset_size: 24777921
52
+ language:
53
+ - en
54
  ---
55
+
56
+ Code to test on Colab
57
+
58
+ !pip install -q transformers[torch] tokenizers datasets evaluate rouge_score sentencepiece huggingface_hub --upgrade
59
+
60
+ from huggingface_hub import notebook_login
61
+
62
+ notebook_login()
63
+
64
+ import nltk
65
+ from datasets import load_dataset
66
+ import evaluate
67
+ import numpy as np
68
+ from transformers import T5Tokenizer, DataCollatorForSeq2Seq
69
+ from transformers import T5ForConditionalGeneration, Seq2SeqTrainingArguments, Seq2SeqTrainer
70
+
71
+ # Load and split the dataset
72
+ dataset = load_dataset("ajsbsd/presto")
73
+ dataset = dataset["train"].train_test_split(test_size=0.2)
74
+ #dataset = load_dataset("csv", data_files="./JEOPARDY_CSV.csv")
75
+ #dataset = dataset["train"].train_test_split(test_size=0.2)
76
+ # Load the tokenizer, model, and data collator
77
+ tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-small")
78
+ model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-small")
79
+ data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model)
80
+
81
+ # We prefix our tasks with "answer the question"
82
+ prefix = "answer the question: "
83
+
84
+ # Define our preprocessing function
85
+ def preprocess_function(examples):
86
+ """Add prefix to the sentences, tokenize the text, and set the labels"""
87
+ # The "inputs" are the tokenized answer:
88
+ inputs = [prefix + doc for doc in examples["inputs"]]
89
+ model_inputs = tokenizer(inputs, max_length=128, truncation=True)
90
+
91
+ # The "labels" are the tokenized outputs:
92
+ labels = tokenizer(text_target=examples["targets"], max_length=512, truncation=True)
93
+ model_inputs["labels"] = labels["input_ids"]
94
+ return model_inputs
95
+
96
+ # Map the preprocessing function across our dataset
97
+ tokenized_dataset = dataset.map(preprocess_function, batched=True)
98
+
99
+ # Set up Rouge score for evaluation
100
+ nltk.download("punkt", quiet=True)
101
+ metric = evaluate.load("rouge")
102
+
103
+ def compute_metrics(eval_preds):
104
+ preds, labels = eval_preds
105
+
106
+ # decode preds and labels
107
+ labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
108
+ decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
109
+ decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
110
+
111
+ # rougeLSum expects newline after each sentence
112
+ decoded_preds = ["\n".join(nltk.sent_tokenize(pred.strip())) for pred in decoded_preds]
113
+ decoded_labels = ["\n".join(nltk.sent_tokenize(label.strip())) for label in decoded_labels]
114
+
115
+ result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
116
+ return result
117
+
118
+ # Set up training arguments
119
+ training_args = Seq2SeqTrainingArguments(
120
+ output_dir="./results",
121
+ evaluation_strategy="epoch",
122
+ learning_rate=3e-4,
123
+ per_device_train_batch_size=8,
124
+ per_device_eval_batch_size=4,
125
+ weight_decay=0.01,
126
+ save_total_limit=3,
127
+ num_train_epochs=2,
128
+ predict_with_generate=True,
129
+ push_to_hub=False
130
+ )
131
+
132
+ # Set up trainer
133
+ trainer = Seq2SeqTrainer(
134
+ model=model,
135
+ args=training_args,
136
+ train_dataset=tokenized_dataset["train"],
137
+ eval_dataset=tokenized_dataset["test"],
138
+ tokenizer=tokenizer,
139
+ data_collator=data_collator,
140
+ compute_metrics=compute_metrics
141
+ )
142
+
143
+ # Train the model
144
+ trainer.train()