ajsbsd commited on
Commit
fb05949
·
1 Parent(s): 5074ae7

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +98 -3
README.md CHANGED
@@ -16,6 +16,9 @@ should probably proofread and complete it, then remove this comment. -->
16
  # flan-t5-base-openbsd-faq
17
 
18
  This model is a fine-tuned version of [google/flan-t5-base](https://huggingface.co/google/flan-t5-base) fintuned on [ajsbsd/openbsd-faq](https://huggingface.co/datasets/ajsbsd/openbsd-faq)
 
 
 
19
  It achieves the following results on the evaluation set:
20
  - Loss: 2.2385
21
  - Rouge1: 0.3935
@@ -25,18 +28,110 @@ It achieves the following results on the evaluation set:
25
 
26
  ## Model description
27
 
28
- More information needed
29
 
30
  ## Intended uses & limitations
31
 
32
- More information needed
33
 
34
  ## Training and evaluation data
35
 
36
- More information needed
37
 
38
  ## Training procedure
39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  ### Training hyperparameters
41
 
42
  The following hyperparameters were used during training:
 
16
  # flan-t5-base-openbsd-faq
17
 
18
  This model is a fine-tuned version of [google/flan-t5-base](https://huggingface.co/google/flan-t5-base) fintuned on [ajsbsd/openbsd-faq](https://huggingface.co/datasets/ajsbsd/openbsd-faq)
19
+
20
+ These are questions from https://www.openbsd.org/faq/faq1.html for use on ajsbsd.net
21
+
22
  It achieves the following results on the evaluation set:
23
  - Loss: 2.2385
24
  - Rouge1: 0.3935
 
28
 
29
  ## Model description
30
 
31
+ This model is a fine-tuned version of [google/flan-t5-base](https://huggingface.co/google/flan-t5-base)
32
 
33
  ## Intended uses & limitations
34
 
35
+ OpenBSD Q/A chat-bot.
36
 
37
  ## Training and evaluation data
38
 
39
+ Questions created from https://www.openbsd.org/faq/faq1.html in Q/A format for text2text generation.
40
 
41
  ## Training procedure
42
 
43
+ Trained at Google Colab with the following code.
44
+
45
+ !pip install -q transformers[torch] tokenizers datasets evaluate rouge_score sentencepiece huggingface_hub --upgrade
46
+
47
+ from huggingface_hub import notebook_login
48
+ notebook_login()
49
+
50
+ import nltk
51
+ from datasets import load_dataset
52
+ import evaluate
53
+ import numpy as np
54
+ from transformers import T5Tokenizer, DataCollatorForSeq2Seq
55
+ from transformers import T5ForConditionalGeneration, Seq2SeqTrainingArguments, Seq2SeqTrainer
56
+
57
+ # Load and split the dataset
58
+ dataset = load_dataset("ajsbsd/openbsd-faq")
59
+ dataset = dataset["train"].train_test_split(test_size=0.2)
60
+ #dataset = load_dataset("csv", data_files="./JEOPARDY_CSV.csv")
61
+ #dataset = dataset["train"].train_test_split(test_size=0.2)
62
+ # Load the tokenizer, model, and data collator
63
+ tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-base")
64
+ model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-base")
65
+ data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model)
66
+
67
+ # We prefix our tasks with "answer the question"
68
+ prefix = "Please answer this question: "
69
+
70
+ # Define our preprocessing function
71
+ def preprocess_function(examples):
72
+ """Add prefix to the sentences, tokenize the text, and set the labels"""
73
+ # The "inputs" are the tokenized answer:
74
+ inputs = [prefix + doc for doc in examples["question"]]
75
+ model_inputs = tokenizer(inputs, max_length=128, truncation=True)
76
+
77
+ # The "labels" are the tokenized outputs:
78
+ labels = tokenizer(text_target=examples["answer"], max_length=512, truncation=True)
79
+ model_inputs["labels"] = labels["input_ids"]
80
+ return model_inputs
81
+
82
+ # Map the preprocessing function across our dataset
83
+ tokenized_dataset = dataset.map(preprocess_function, batched=True)
84
+
85
+ # Set up Rouge score for evaluation
86
+ nltk.download("punkt", quiet=True)
87
+ metric = evaluate.load("rouge")
88
+
89
+ def compute_metrics(eval_preds):
90
+ preds, labels = eval_preds
91
+
92
+ # decode preds and labels
93
+ labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
94
+ decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
95
+ decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
96
+
97
+ # rougeLSum expects newline after each sentence
98
+ decoded_preds = ["\n".join(nltk.sent_tokenize(pred.strip())) for pred in decoded_preds]
99
+ decoded_labels = ["\n".join(nltk.sent_tokenize(label.strip())) for label in decoded_labels]
100
+
101
+ result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
102
+ return result
103
+
104
+ # Set up training arguments
105
+ training_args = Seq2SeqTrainingArguments(
106
+ output_dir="./flan-t5-base-openbsd-faq",
107
+ evaluation_strategy="epoch",
108
+ learning_rate=3e-4,
109
+ per_device_train_batch_size=8,
110
+ per_device_eval_batch_size=4,
111
+ weight_decay=0.01,
112
+ save_total_limit=3,
113
+ num_train_epochs=5,
114
+ predict_with_generate=True,
115
+ push_to_hub=False
116
+ )
117
+
118
+ # Set up trainer
119
+ trainer = Seq2SeqTrainer(
120
+ model=model,
121
+ args=training_args,
122
+ train_dataset=tokenized_dataset["train"],
123
+ eval_dataset=tokenized_dataset["test"],
124
+ tokenizer=tokenizer,
125
+ data_collator=data_collator,
126
+ compute_metrics=compute_metrics
127
+ )
128
+
129
+ # Train the model
130
+ trainer.train()
131
+
132
+ trainer.push_to_hub()
133
+
134
+
135
  ### Training hyperparameters
136
 
137
  The following hyperparameters were used during training: