Spaces:
Sleeping
Sleeping
hkanumilli
commited on
Commit
·
ea19ac8
1
Parent(s):
e061b58
emotion extracter application built using transfer learning
Browse files- .DS_Store +0 -0
- Emotion_classify_Data.csv +0 -0
- app.py +45 -0
- label_to_int_mapping.json +1 -0
- requirements.txt +6 -0
- train/from_scratch.py +167 -0
- train/newhead.py +19 -0
- train/train2.ipynb +831 -0
- train/transfer_learning.py +136 -0
- transferLearningResults/config.json +39 -0
- transferLearningResults/merges.txt +0 -0
- transferLearningResults/model.safetensors +3 -0
- transferLearningResults/model_state_dict.pt +3 -0
- transferLearningResults/special_tokens_map.json +51 -0
- transferLearningResults/tokenizer.json +0 -0
- transferLearningResults/tokenizer_config.json +57 -0
- transferLearningResults/vocab.json +0 -0
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
Emotion_classify_Data.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
app.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import json, torch
|
3 |
+
from transformers import AutoTokenizer, RobertaForSequenceClassification, RobertaConfig
|
4 |
+
|
5 |
+
# Load the configuration of your model
|
6 |
+
config = RobertaConfig.from_pretrained('cardiffnlp/twitter-roberta-base-emotion', num_labels=3)
|
7 |
+
|
8 |
+
# Instantiate the model using the specific class
|
9 |
+
model = RobertaForSequenceClassification(config)
|
10 |
+
|
11 |
+
# Load the state dictionary from your .pt file
|
12 |
+
state_dict = torch.load('transferLearningResults/model_state_dict.pt', map_location=torch.device('cpu'))
|
13 |
+
|
14 |
+
# Load the state dictionary into the model
|
15 |
+
model.load_state_dict(state_dict, strict=False)
|
16 |
+
|
17 |
+
# Switch to evaluation mode for inference
|
18 |
+
model.eval()
|
19 |
+
|
20 |
+
tokenizer = AutoTokenizer.from_pretrained('transferLearningResults')
|
21 |
+
|
22 |
+
# Load the label mapping
|
23 |
+
with open('label_to_int_mapping.json', 'r') as file:
|
24 |
+
label_mapping = json.load(file)
|
25 |
+
int_to_label = {int(k): v for k, v in label_mapping.items()} # Convert keys to integers
|
26 |
+
|
27 |
+
def predict_emotion(text):
|
28 |
+
# Tokenize the input text and convert to tensor
|
29 |
+
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512)
|
30 |
+
# Get model predictions
|
31 |
+
with torch.no_grad():
|
32 |
+
outputs = model(**inputs)
|
33 |
+
# Convert predictions to probabilities
|
34 |
+
probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1).squeeze()
|
35 |
+
# Convert probabilities to a readable format
|
36 |
+
probabilities_list = probabilities.tolist()
|
37 |
+
# Create a dictionary for the probabilities with labels
|
38 |
+
probabilities_dict = {int_to_label[i]: prob for i, prob in enumerate(probabilities_list)}
|
39 |
+
return probabilities_dict
|
40 |
+
|
41 |
+
# Create a Gradio interface
|
42 |
+
iface = gr.Interface(fn=predict_emotion, inputs="text", outputs=gr.outputs.Label(num_top_classes=3, type="confidences"))
|
43 |
+
|
44 |
+
# Run the app
|
45 |
+
iface.launch()
|
label_to_int_mapping.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"0": "anger", "1": "fear", "2": "joy"}
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio==3.21.0
|
2 |
+
jsonpointer==2.3
|
3 |
+
jsonschema==4.17.3
|
4 |
+
torch @ file:///Users/runner/miniforge3/conda-bld/pytorch-recipe_1660136156773/work
|
5 |
+
torchvision==0.13.1
|
6 |
+
transformers==4.26.0
|
train/from_scratch.py
ADDED
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import pandas as pd
|
3 |
+
from sklearn.preprocessing import LabelEncoder
|
4 |
+
from sklearn.model_selection import train_test_split
|
5 |
+
|
6 |
+
df = pd.read_csv('Emotion_classify_Data.csv')
|
7 |
+
|
8 |
+
"""
|
9 |
+
https://www.kaggle.com/code/vidhikishorwaghela/emonlp-decoding-human-feelings-with-deep-learning
|
10 |
+
"""
|
11 |
+
|
12 |
+
def preprocess_data(df):
|
13 |
+
"""
|
14 |
+
Preprocess the data by renaming columns, removing rows with missing values, and removing extra spaces.
|
15 |
+
"""
|
16 |
+
df = df.rename(columns={'Comment': 'text', 'Emotion': 'label'})
|
17 |
+
df = df.dropna()
|
18 |
+
df['text'] = df['text'].str.replace('\t', ' ').str.replace(' +', ' ', regex=True).str.strip()
|
19 |
+
df['label'] = df['label'].str.replace('\t', ' ').str.replace(' +', ' ', regex=True).str.strip()
|
20 |
+
return df
|
21 |
+
|
22 |
+
df = preprocess_data(df)
|
23 |
+
|
24 |
+
indep = df['text']
|
25 |
+
dep = df['label']
|
26 |
+
|
27 |
+
labelEncoder = LabelEncoder()
|
28 |
+
dep = labelEncoder.fit_transform(dep)
|
29 |
+
|
30 |
+
# First split: Separate out a training set and a temporary set
|
31 |
+
X_train, X_temp, y_train, y_temp = train_test_split(indep, dep, test_size=0.4, random_state=42)
|
32 |
+
|
33 |
+
# Second split: Divide the temporary set into validation and test sets
|
34 |
+
X_val, X_test, y_val, y_test = train_test_split(X_temp, y_temp, test_size=0.5, random_state=42)
|
35 |
+
|
36 |
+
import torch
|
37 |
+
import torch.nn as nn
|
38 |
+
|
39 |
+
class LSTMModel(nn.Module):
|
40 |
+
def __init__(self, max_words, max_len):
|
41 |
+
super(LSTMModel, self).__init__()
|
42 |
+
self.embedding = nn.Embedding(num_embeddings=max_words, embedding_dim=16, max_norm=max_len)
|
43 |
+
self.lstm = nn.LSTM(input_size=16, hidden_size=64, num_layers=1, batch_first=True, dropout=0.1)
|
44 |
+
self.fc = nn.Linear(in_features=64, out_features=3)
|
45 |
+
self.softmax = nn.Softmax(dim=1)
|
46 |
+
|
47 |
+
def forward(self, x):
|
48 |
+
x = self.embedding(x)
|
49 |
+
x, (hidden, cell) = self.lstm(x)
|
50 |
+
x = x[:, -1, :] # Get the last output of the sequence
|
51 |
+
x = self.fc(x)
|
52 |
+
x = self.softmax(x)
|
53 |
+
return x
|
54 |
+
|
55 |
+
# Usage
|
56 |
+
max_words = 10000 # Adjust as per your vocabulary size
|
57 |
+
max_len = 100 # Adjust as per your sequence length
|
58 |
+
model = LSTMModel(max_words, max_len)
|
59 |
+
|
60 |
+
tokenizer = Tokenizer(num_words=max_words, oov_token='<OOV>')
|
61 |
+
tokenizer.fit_on_texts(X_train)
|
62 |
+
X_train_seq = pad_sequences(tokenizer.texts_to_sequences(X_train), maxlen=max_len)
|
63 |
+
X_text_seq = pad_sequences(tokenizer.texts_to_sequences(X_test), maxlen=max_len)
|
64 |
+
|
65 |
+
import torch
|
66 |
+
from collections import Counter
|
67 |
+
from itertools import chain
|
68 |
+
|
69 |
+
# Create a vocabulary from the training set
|
70 |
+
def create_vocab(texts, max_words, oov_token='<OOV>'):
|
71 |
+
# Count the words
|
72 |
+
word_counts = Counter(chain.from_iterable([text.split() for text in texts]))
|
73 |
+
# Most common words
|
74 |
+
most_common = word_counts.most_common(max_words - 1) # Reserve one for OOV token
|
75 |
+
# Create the vocabulary
|
76 |
+
vocab = {word: idx + 1 for idx, (word, count) in enumerate(most_common)}
|
77 |
+
vocab[oov_token] = 0 # OOV token
|
78 |
+
return vocab
|
79 |
+
|
80 |
+
# Convert texts to sequences of indices
|
81 |
+
def texts_to_sequences(texts, vocab):
|
82 |
+
sequences = []
|
83 |
+
for text in texts:
|
84 |
+
sequence = [vocab.get(word, vocab['<OOV>']) for word in text.split()]
|
85 |
+
sequences.append(sequence)
|
86 |
+
return sequences
|
87 |
+
|
88 |
+
# Pad sequences to a fixed length
|
89 |
+
def pad_sequences(sequences, maxlen):
|
90 |
+
padded_sequences = torch.zeros((len(sequences), maxlen), dtype=torch.long)
|
91 |
+
for idx, sequence in enumerate(sequences):
|
92 |
+
if len(sequence) > maxlen:
|
93 |
+
sequence = sequence[:maxlen]
|
94 |
+
padded_sequences[idx, :len(sequence)] = torch.tensor(sequence)
|
95 |
+
return padded_sequences
|
96 |
+
|
97 |
+
# Create the vocabulary
|
98 |
+
vocab = create_vocab(X_train, max_words)
|
99 |
+
|
100 |
+
# Convert texts to sequences
|
101 |
+
X_train_seq = pad_sequences(texts_to_sequences(X_train, vocab), maxlen=max_len)
|
102 |
+
X_test_seq = pad_sequences(texts_to_sequences(X_test, vocab), maxlen=max_len)
|
103 |
+
|
104 |
+
import torch
|
105 |
+
import torch.nn as nn
|
106 |
+
from torch.utils.data import Dataset, DataLoader
|
107 |
+
|
108 |
+
# Convert labels to tensors
|
109 |
+
y_train_tensor = torch.tensor(y_train)
|
110 |
+
y_test_tensor = torch.tensor(y_test)
|
111 |
+
|
112 |
+
num_epochs = 10
|
113 |
+
|
114 |
+
# Create a custom dataset
|
115 |
+
class TextDataset(Dataset):
|
116 |
+
def __init__(self, sequences, labels):
|
117 |
+
self.sequences = sequences
|
118 |
+
self.labels = labels
|
119 |
+
|
120 |
+
def __len__(self):
|
121 |
+
return len(self.sequences)
|
122 |
+
|
123 |
+
def __getitem__(self, idx):
|
124 |
+
return self.sequences[idx], self.labels[idx]
|
125 |
+
|
126 |
+
# Create datasets
|
127 |
+
train_dataset = TextDataset(X_train_seq, y_train_tensor)
|
128 |
+
test_dataset = TextDataset(X_test_seq, y_test_tensor)
|
129 |
+
|
130 |
+
# Create dataloaders
|
131 |
+
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
|
132 |
+
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)
|
133 |
+
|
134 |
+
# Define the model
|
135 |
+
class LSTMModel(nn.Module):
|
136 |
+
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim):
|
137 |
+
super(LSTMModel, self).__init__()
|
138 |
+
self.embedding = nn.Embedding(vocab_size, embedding_dim)
|
139 |
+
self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True, dropout=0.1)
|
140 |
+
self.fc = nn.Linear(hidden_dim, output_dim)
|
141 |
+
|
142 |
+
def forward(self, x):
|
143 |
+
x = self.embedding(x)
|
144 |
+
x, (hidden, cell) = self.lstm(x)
|
145 |
+
x = self.fc(x[:, -1, :]) # Use the last hidden state
|
146 |
+
return x
|
147 |
+
|
148 |
+
# Instantiate the model
|
149 |
+
model = LSTMModel(max_words, 16, 64, 3)
|
150 |
+
|
151 |
+
# Loss and optimizer
|
152 |
+
criterion = nn.CrossEntropyLoss()
|
153 |
+
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
|
154 |
+
|
155 |
+
# Training loop
|
156 |
+
for epoch in range(num_epochs):
|
157 |
+
for inputs, labels in train_loader:
|
158 |
+
# Forward pass
|
159 |
+
outputs = model(inputs)
|
160 |
+
loss = criterion(outputs, labels)
|
161 |
+
|
162 |
+
# Backward and optimize
|
163 |
+
optimizer.zero_grad()
|
164 |
+
loss.backward()
|
165 |
+
optimizer.step()
|
166 |
+
|
167 |
+
print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
|
train/newhead.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
# Define a new classification head
|
4 |
+
class NewClassificationHead(torch.nn.Module):
|
5 |
+
def __init__(self, config):
|
6 |
+
super().__init__()
|
7 |
+
self.dense = torch.nn.Linear(config.hidden_size, config.hidden_size)
|
8 |
+
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)
|
9 |
+
self.out_proj = torch.nn.Linear(config.hidden_size, config.num_labels)
|
10 |
+
|
11 |
+
def forward(self, features, **kwargs):
|
12 |
+
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
|
13 |
+
x = self.dropout(x)
|
14 |
+
x = self.dense(x)
|
15 |
+
x = torch.nn.functional.relu(x)
|
16 |
+
x = self.dropout(x)
|
17 |
+
x = self.out_proj(x)
|
18 |
+
return x
|
19 |
+
|
train/train2.ipynb
ADDED
@@ -0,0 +1,831 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 1,
|
6 |
+
"id": "ed9bad4c-b546-43cd-b11d-39da03e3b2fc",
|
7 |
+
"metadata": {
|
8 |
+
"execution": {
|
9 |
+
"iopub.execute_input": "2023-11-25T03:08:25.222203Z",
|
10 |
+
"iopub.status.busy": "2023-11-25T03:08:25.221934Z",
|
11 |
+
"iopub.status.idle": "2023-11-25T03:09:12.123983Z",
|
12 |
+
"shell.execute_reply": "2023-11-25T03:09:12.123211Z",
|
13 |
+
"shell.execute_reply.started": "2023-11-25T03:08:25.222184Z"
|
14 |
+
}
|
15 |
+
},
|
16 |
+
"outputs": [
|
17 |
+
{
|
18 |
+
"name": "stdout",
|
19 |
+
"output_type": "stream",
|
20 |
+
"text": [
|
21 |
+
"Looking in indexes: https://pypi.org/simple, https://download.pytorch.org/whl/cpu:\n",
|
22 |
+
"Collecting pandas\n",
|
23 |
+
" Downloading pandas-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (12.4 MB)\n",
|
24 |
+
" |████████████████████████████████| 12.4 MB 9.3 MB/s \n",
|
25 |
+
"\u001b[?25hRequirement already satisfied: python-dateutil>=2.8.2 in /opt/pytorch/lib/python3.8/site-packages (from pandas) (2.8.2)\n",
|
26 |
+
"Requirement already satisfied: numpy>=1.20.3 in /opt/pytorch/lib/python3.8/site-packages (from pandas) (1.21.6)\n",
|
27 |
+
"Requirement already satisfied: pytz>=2020.1 in /opt/pytorch/lib/python3.8/site-packages (from pandas) (2023.3)\n",
|
28 |
+
"Collecting tzdata>=2022.1\n",
|
29 |
+
" Downloading tzdata-2023.3-py2.py3-none-any.whl (341 kB)\n",
|
30 |
+
" |████████████████████████████████| 341 kB 89.1 MB/s \n",
|
31 |
+
"\u001b[?25hRequirement already satisfied: six>=1.5 in /opt/pytorch/lib/python3.8/site-packages (from python-dateutil>=2.8.2->pandas) (1.16.0)\n",
|
32 |
+
"Installing collected packages: tzdata, pandas\n",
|
33 |
+
"Successfully installed pandas-2.0.3 tzdata-2023.3\n",
|
34 |
+
"Looking in indexes: https://pypi.org/simple, https://download.pytorch.org/whl/cpu:\n",
|
35 |
+
"Collecting scikit-learn\n",
|
36 |
+
" Downloading scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (11.1 MB)\n",
|
37 |
+
" |████████████████████████████████| 11.1 MB 9.1 MB/s \n",
|
38 |
+
"\u001b[?25hCollecting threadpoolctl>=2.0.0\n",
|
39 |
+
" Downloading threadpoolctl-3.2.0-py3-none-any.whl (15 kB)\n",
|
40 |
+
"Collecting joblib>=1.1.1\n",
|
41 |
+
" Downloading joblib-1.3.2-py3-none-any.whl (302 kB)\n",
|
42 |
+
" |████████████████████████████████| 302 kB 71.4 MB/s \n",
|
43 |
+
"\u001b[?25hCollecting scipy>=1.5.0\n",
|
44 |
+
" Downloading scipy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (34.5 MB)\n",
|
45 |
+
" |████████████████████████████████| 34.5 MB 70.3 MB/s \n",
|
46 |
+
"\u001b[?25hRequirement already satisfied: numpy<2.0,>=1.17.3 in /opt/pytorch/lib/python3.8/site-packages (from scikit-learn) (1.21.6)\n",
|
47 |
+
"Installing collected packages: threadpoolctl, scipy, joblib, scikit-learn\n",
|
48 |
+
"Successfully installed joblib-1.3.2 scikit-learn-1.3.2 scipy-1.10.1 threadpoolctl-3.2.0\n",
|
49 |
+
"Looking in indexes: https://pypi.org/simple, https://download.pytorch.org/whl/cpu:\n",
|
50 |
+
"Collecting datasets\n",
|
51 |
+
" Downloading datasets-2.15.0-py3-none-any.whl (521 kB)\n",
|
52 |
+
" |████████████████████████████████| 521 kB 8.7 MB/s \n",
|
53 |
+
"\u001b[?25hRequirement already satisfied: numpy>=1.17 in /opt/pytorch/lib/python3.8/site-packages (from datasets) (1.21.6)\n",
|
54 |
+
"Collecting fsspec[http]<=2023.10.0,>=2023.1.0\n",
|
55 |
+
" Downloading fsspec-2023.10.0-py3-none-any.whl (166 kB)\n",
|
56 |
+
" |████████████████████████████████| 166 kB 31.8 MB/s \n",
|
57 |
+
"\u001b[?25hCollecting pyarrow-hotfix\n",
|
58 |
+
" Downloading pyarrow_hotfix-0.6-py3-none-any.whl (7.9 kB)\n",
|
59 |
+
"Collecting dill<0.3.8,>=0.3.0\n",
|
60 |
+
" Downloading dill-0.3.7-py3-none-any.whl (115 kB)\n",
|
61 |
+
" |████████████████████████████████| 115 kB 29.8 MB/s \n",
|
62 |
+
"\u001b[?25hRequirement already satisfied: requests>=2.19.0 in /opt/pytorch/lib/python3.8/site-packages (from datasets) (2.31.0)\n",
|
63 |
+
"Requirement already satisfied: tqdm>=4.62.1 in /opt/pytorch/lib/python3.8/site-packages (from datasets) (4.65.0)\n",
|
64 |
+
"Requirement already satisfied: pandas in /opt/pytorch/lib/python3.8/site-packages (from datasets) (2.0.3)\n",
|
65 |
+
"Collecting huggingface-hub>=0.18.0\n",
|
66 |
+
" Downloading huggingface_hub-0.19.4-py3-none-any.whl (311 kB)\n",
|
67 |
+
" |████████████████████████████████| 311 kB 35.9 MB/s \n",
|
68 |
+
"\u001b[?25hRequirement already satisfied: packaging in /opt/pytorch/lib/python3.8/site-packages (from datasets) (23.1)\n",
|
69 |
+
"Requirement already satisfied: pyyaml>=5.1 in /opt/pytorch/lib/python3.8/site-packages (from datasets) (5.4.1)\n",
|
70 |
+
"Collecting xxhash\n",
|
71 |
+
" Downloading xxhash-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (194 kB)\n",
|
72 |
+
" |████████████████████████████████| 194 kB 51.7 MB/s \n",
|
73 |
+
"\u001b[?25hCollecting pyarrow>=8.0.0\n",
|
74 |
+
" Downloading pyarrow-14.0.1-cp38-cp38-manylinux_2_28_x86_64.whl (38.1 MB)\n",
|
75 |
+
" |████████████████████████████████| 38.1 MB 88.8 MB/s \n",
|
76 |
+
"\u001b[?25hCollecting multiprocess\n",
|
77 |
+
" Downloading multiprocess-0.70.15-py38-none-any.whl (132 kB)\n",
|
78 |
+
" |████████████████████████████████| 132 kB 63.6 MB/s \n",
|
79 |
+
"\u001b[?25hCollecting aiohttp\n",
|
80 |
+
" Downloading aiohttp-3.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.3 MB)\n",
|
81 |
+
" |████████████████████████████████| 1.3 MB 29.4 MB/s \n",
|
82 |
+
"\u001b[?25hCollecting async-timeout<5.0,>=4.0\n",
|
83 |
+
" Downloading async_timeout-4.0.3-py3-none-any.whl (5.7 kB)\n",
|
84 |
+
"Collecting multidict<7.0,>=4.5\n",
|
85 |
+
" Downloading multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (121 kB)\n",
|
86 |
+
" |████████████████████████████████| 121 kB 68.5 MB/s \n",
|
87 |
+
"\u001b[?25hCollecting aiosignal>=1.1.2\n",
|
88 |
+
" Downloading aiosignal-1.3.1-py3-none-any.whl (7.6 kB)\n",
|
89 |
+
"Collecting frozenlist>=1.1.1\n",
|
90 |
+
" Downloading frozenlist-1.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (220 kB)\n",
|
91 |
+
" |████████████████████████████████| 220 kB 73.2 MB/s \n",
|
92 |
+
"\u001b[?25hRequirement already satisfied: attrs>=17.3.0 in /opt/pytorch/lib/python3.8/site-packages (from aiohttp->datasets) (23.1.0)\n",
|
93 |
+
"Collecting yarl<2.0,>=1.0\n",
|
94 |
+
" Downloading yarl-1.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (307 kB)\n",
|
95 |
+
" |████████████████████████████████| 307 kB 17.4 MB/s \n",
|
96 |
+
"\u001b[?25hRequirement already satisfied: filelock in /opt/pytorch/lib/python3.8/site-packages (from huggingface-hub>=0.18.0->datasets) (3.12.2)\n",
|
97 |
+
"Requirement already satisfied: typing-extensions>=3.7.4.3 in /opt/pytorch/lib/python3.8/site-packages (from huggingface-hub>=0.18.0->datasets) (4.7.1)\n",
|
98 |
+
"Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/pytorch/lib/python3.8/site-packages (from requests>=2.19.0->datasets) (1.26.16)\n",
|
99 |
+
"Requirement already satisfied: idna<4,>=2.5 in /opt/pytorch/lib/python3.8/site-packages (from requests>=2.19.0->datasets) (3.4)\n",
|
100 |
+
"Requirement already satisfied: certifi>=2017.4.17 in /opt/pytorch/lib/python3.8/site-packages (from requests>=2.19.0->datasets) (2023.5.7)\n",
|
101 |
+
"Requirement already satisfied: charset-normalizer<4,>=2 in /opt/pytorch/lib/python3.8/site-packages (from requests>=2.19.0->datasets) (3.1.0)\n",
|
102 |
+
"Requirement already satisfied: pytz>=2020.1 in /opt/pytorch/lib/python3.8/site-packages (from pandas->datasets) (2023.3)\n",
|
103 |
+
"Requirement already satisfied: python-dateutil>=2.8.2 in /opt/pytorch/lib/python3.8/site-packages (from pandas->datasets) (2.8.2)\n",
|
104 |
+
"Requirement already satisfied: tzdata>=2022.1 in /opt/pytorch/lib/python3.8/site-packages (from pandas->datasets) (2023.3)\n",
|
105 |
+
"Requirement already satisfied: six>=1.5 in /opt/pytorch/lib/python3.8/site-packages (from python-dateutil>=2.8.2->pandas->datasets) (1.16.0)\n",
|
106 |
+
"Installing collected packages: multidict, frozenlist, yarl, async-timeout, aiosignal, fsspec, dill, aiohttp, xxhash, pyarrow-hotfix, pyarrow, multiprocess, huggingface-hub, datasets\n",
|
107 |
+
"Successfully installed aiohttp-3.9.0 aiosignal-1.3.1 async-timeout-4.0.3 datasets-2.15.0 dill-0.3.7 frozenlist-1.4.0 fsspec-2023.10.0 huggingface-hub-0.19.4 multidict-6.0.4 multiprocess-0.70.15 pyarrow-14.0.1 pyarrow-hotfix-0.6 xxhash-3.4.1 yarl-1.9.3\n",
|
108 |
+
"Looking in indexes: https://pypi.org/simple, https://download.pytorch.org/whl/cpu:\n",
|
109 |
+
"Collecting transformers\n",
|
110 |
+
" Downloading transformers-4.35.2-py3-none-any.whl (7.9 MB)\n",
|
111 |
+
" |████████████████████████████████| 7.9 MB 12.9 MB/s \n",
|
112 |
+
"\u001b[?25hRequirement already satisfied: numpy>=1.17 in /opt/pytorch/lib/python3.8/site-packages (from transformers) (1.21.6)\n",
|
113 |
+
"Requirement already satisfied: pyyaml>=5.1 in /opt/pytorch/lib/python3.8/site-packages (from transformers) (5.4.1)\n",
|
114 |
+
"Collecting regex!=2019.12.17\n",
|
115 |
+
" Downloading regex-2023.10.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (776 kB)\n",
|
116 |
+
" |████████████████████████████████| 776 kB 48.5 MB/s \n",
|
117 |
+
"\u001b[?25hRequirement already satisfied: tqdm>=4.27 in /opt/pytorch/lib/python3.8/site-packages (from transformers) (4.65.0)\n",
|
118 |
+
"Collecting safetensors>=0.3.1\n",
|
119 |
+
" Downloading safetensors-0.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.3 MB)\n",
|
120 |
+
" |████████████████████████████████| 1.3 MB 87.5 MB/s \n",
|
121 |
+
"\u001b[?25hRequirement already satisfied: huggingface-hub<1.0,>=0.16.4 in /opt/pytorch/lib/python3.8/site-packages (from transformers) (0.19.4)\n",
|
122 |
+
"Requirement already satisfied: packaging>=20.0 in /opt/pytorch/lib/python3.8/site-packages (from transformers) (23.1)\n",
|
123 |
+
"Collecting tokenizers<0.19,>=0.14\n",
|
124 |
+
" Downloading tokenizers-0.15.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.8 MB)\n",
|
125 |
+
" |████████████████████████████████| 3.8 MB 123.4 MB/s \n",
|
126 |
+
"\u001b[?25hRequirement already satisfied: requests in /opt/pytorch/lib/python3.8/site-packages (from transformers) (2.31.0)\n",
|
127 |
+
"Requirement already satisfied: filelock in /opt/pytorch/lib/python3.8/site-packages (from transformers) (3.12.2)\n",
|
128 |
+
"Requirement already satisfied: typing-extensions>=3.7.4.3 in /opt/pytorch/lib/python3.8/site-packages (from huggingface-hub<1.0,>=0.16.4->transformers) (4.7.1)\n",
|
129 |
+
"Requirement already satisfied: fsspec>=2023.5.0 in /opt/pytorch/lib/python3.8/site-packages (from huggingface-hub<1.0,>=0.16.4->transformers) (2023.10.0)\n",
|
130 |
+
"Requirement already satisfied: certifi>=2017.4.17 in /opt/pytorch/lib/python3.8/site-packages (from requests->transformers) (2023.5.7)\n",
|
131 |
+
"Requirement already satisfied: charset-normalizer<4,>=2 in /opt/pytorch/lib/python3.8/site-packages (from requests->transformers) (3.1.0)\n",
|
132 |
+
"Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/pytorch/lib/python3.8/site-packages (from requests->transformers) (1.26.16)\n",
|
133 |
+
"Requirement already satisfied: idna<4,>=2.5 in /opt/pytorch/lib/python3.8/site-packages (from requests->transformers) (3.4)\n",
|
134 |
+
"Installing collected packages: tokenizers, safetensors, regex, transformers\n",
|
135 |
+
"Successfully installed regex-2023.10.3 safetensors-0.4.0 tokenizers-0.15.0 transformers-4.35.2\n",
|
136 |
+
"Looking in indexes: https://pypi.org/simple, https://download.pytorch.org/whl/cpu:\n",
|
137 |
+
"Requirement already satisfied: transformers[torch] in /opt/pytorch/lib/python3.8/site-packages (4.35.2)\n",
|
138 |
+
"Requirement already satisfied: tokenizers<0.19,>=0.14 in /opt/pytorch/lib/python3.8/site-packages (from transformers[torch]) (0.15.0)\n",
|
139 |
+
"Requirement already satisfied: numpy>=1.17 in /opt/pytorch/lib/python3.8/site-packages (from transformers[torch]) (1.21.6)\n",
|
140 |
+
"Requirement already satisfied: requests in /opt/pytorch/lib/python3.8/site-packages (from transformers[torch]) (2.31.0)\n",
|
141 |
+
"Requirement already satisfied: filelock in /opt/pytorch/lib/python3.8/site-packages (from transformers[torch]) (3.12.2)\n",
|
142 |
+
"Requirement already satisfied: tqdm>=4.27 in /opt/pytorch/lib/python3.8/site-packages (from transformers[torch]) (4.65.0)\n",
|
143 |
+
"Requirement already satisfied: regex!=2019.12.17 in /opt/pytorch/lib/python3.8/site-packages (from transformers[torch]) (2023.10.3)\n",
|
144 |
+
"Requirement already satisfied: packaging>=20.0 in /opt/pytorch/lib/python3.8/site-packages (from transformers[torch]) (23.1)\n",
|
145 |
+
"Requirement already satisfied: pyyaml>=5.1 in /opt/pytorch/lib/python3.8/site-packages (from transformers[torch]) (5.4.1)\n",
|
146 |
+
"Requirement already satisfied: safetensors>=0.3.1 in /opt/pytorch/lib/python3.8/site-packages (from transformers[torch]) (0.4.0)\n",
|
147 |
+
"Requirement already satisfied: huggingface-hub<1.0,>=0.16.4 in /opt/pytorch/lib/python3.8/site-packages (from transformers[torch]) (0.19.4)\n",
|
148 |
+
"Collecting accelerate>=0.20.3\n",
|
149 |
+
" Downloading accelerate-0.24.1-py3-none-any.whl (261 kB)\n",
|
150 |
+
" |████████████████████████████████| 261 kB 9.6 MB/s \n",
|
151 |
+
"\u001b[?25hRequirement already satisfied: torch!=1.12.0,>=1.10 in /opt/pytorch/lib/python3.8/site-packages (from transformers[torch]) (2.0.1+cpu)\n",
|
152 |
+
"Requirement already satisfied: psutil in /opt/pytorch/lib/python3.8/site-packages (from accelerate>=0.20.3->transformers[torch]) (5.9.5)\n",
|
153 |
+
"Requirement already satisfied: fsspec>=2023.5.0 in /opt/pytorch/lib/python3.8/site-packages (from huggingface-hub<1.0,>=0.16.4->transformers[torch]) (2023.10.0)\n",
|
154 |
+
"Requirement already satisfied: typing-extensions>=3.7.4.3 in /opt/pytorch/lib/python3.8/site-packages (from huggingface-hub<1.0,>=0.16.4->transformers[torch]) (4.7.1)\n",
|
155 |
+
"Requirement already satisfied: sympy in /opt/pytorch/lib/python3.8/site-packages (from torch!=1.12.0,>=1.10->transformers[torch]) (1.12)\n",
|
156 |
+
"Requirement already satisfied: networkx in /opt/pytorch/lib/python3.8/site-packages (from torch!=1.12.0,>=1.10->transformers[torch]) (3.1)\n",
|
157 |
+
"Requirement already satisfied: jinja2 in /opt/pytorch/lib/python3.8/site-packages (from torch!=1.12.0,>=1.10->transformers[torch]) (3.1.2)\n",
|
158 |
+
"Requirement already satisfied: charset-normalizer<4,>=2 in /opt/pytorch/lib/python3.8/site-packages (from requests->transformers[torch]) (3.1.0)\n",
|
159 |
+
"Requirement already satisfied: certifi>=2017.4.17 in /opt/pytorch/lib/python3.8/site-packages (from requests->transformers[torch]) (2023.5.7)\n",
|
160 |
+
"Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/pytorch/lib/python3.8/site-packages (from requests->transformers[torch]) (1.26.16)\n",
|
161 |
+
"Requirement already satisfied: idna<4,>=2.5 in /opt/pytorch/lib/python3.8/site-packages (from requests->transformers[torch]) (3.4)\n",
|
162 |
+
"Requirement already satisfied: MarkupSafe>=2.0 in /opt/pytorch/lib/python3.8/site-packages (from jinja2->torch!=1.12.0,>=1.10->transformers[torch]) (2.1.3)\n",
|
163 |
+
"Requirement already satisfied: mpmath>=0.19 in /opt/pytorch/lib/python3.8/site-packages (from sympy->torch!=1.12.0,>=1.10->transformers[torch]) (1.3.0)\n",
|
164 |
+
"Installing collected packages: accelerate\n",
|
165 |
+
"Successfully installed accelerate-0.24.1\n",
|
166 |
+
"Looking in indexes: https://pypi.org/simple, https://download.pytorch.org/whl/cpu:\n",
|
167 |
+
"Requirement already satisfied: accelerate in /opt/pytorch/lib/python3.8/site-packages (0.24.1)\n",
|
168 |
+
"Requirement already satisfied: psutil in /opt/pytorch/lib/python3.8/site-packages (from accelerate) (5.9.5)\n",
|
169 |
+
"Requirement already satisfied: huggingface-hub in /opt/pytorch/lib/python3.8/site-packages (from accelerate) (0.19.4)\n",
|
170 |
+
"Requirement already satisfied: torch>=1.10.0 in /opt/pytorch/lib/python3.8/site-packages (from accelerate) (2.0.1+cpu)\n",
|
171 |
+
"Requirement already satisfied: packaging>=20.0 in /opt/pytorch/lib/python3.8/site-packages (from accelerate) (23.1)\n",
|
172 |
+
"Requirement already satisfied: pyyaml in /opt/pytorch/lib/python3.8/site-packages (from accelerate) (5.4.1)\n",
|
173 |
+
"Requirement already satisfied: numpy>=1.17 in /opt/pytorch/lib/python3.8/site-packages (from accelerate) (1.21.6)\n",
|
174 |
+
"Requirement already satisfied: sympy in /opt/pytorch/lib/python3.8/site-packages (from torch>=1.10.0->accelerate) (1.12)\n",
|
175 |
+
"Requirement already satisfied: networkx in /opt/pytorch/lib/python3.8/site-packages (from torch>=1.10.0->accelerate) (3.1)\n",
|
176 |
+
"Requirement already satisfied: jinja2 in /opt/pytorch/lib/python3.8/site-packages (from torch>=1.10.0->accelerate) (3.1.2)\n",
|
177 |
+
"Requirement already satisfied: filelock in /opt/pytorch/lib/python3.8/site-packages (from torch>=1.10.0->accelerate) (3.12.2)\n",
|
178 |
+
"Requirement already satisfied: typing-extensions in /opt/pytorch/lib/python3.8/site-packages (from torch>=1.10.0->accelerate) (4.7.1)\n",
|
179 |
+
"Requirement already satisfied: requests in /opt/pytorch/lib/python3.8/site-packages (from huggingface-hub->accelerate) (2.31.0)\n",
|
180 |
+
"Requirement already satisfied: fsspec>=2023.5.0 in /opt/pytorch/lib/python3.8/site-packages (from huggingface-hub->accelerate) (2023.10.0)\n",
|
181 |
+
"Requirement already satisfied: tqdm>=4.42.1 in /opt/pytorch/lib/python3.8/site-packages (from huggingface-hub->accelerate) (4.65.0)\n",
|
182 |
+
"Requirement already satisfied: MarkupSafe>=2.0 in /opt/pytorch/lib/python3.8/site-packages (from jinja2->torch>=1.10.0->accelerate) (2.1.3)\n",
|
183 |
+
"Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/pytorch/lib/python3.8/site-packages (from requests->huggingface-hub->accelerate) (1.26.16)\n",
|
184 |
+
"Requirement already satisfied: idna<4,>=2.5 in /opt/pytorch/lib/python3.8/site-packages (from requests->huggingface-hub->accelerate) (3.4)\n",
|
185 |
+
"Requirement already satisfied: charset-normalizer<4,>=2 in /opt/pytorch/lib/python3.8/site-packages (from requests->huggingface-hub->accelerate) (3.1.0)\n",
|
186 |
+
"Requirement already satisfied: certifi>=2017.4.17 in /opt/pytorch/lib/python3.8/site-packages (from requests->huggingface-hub->accelerate) (2023.5.7)\n",
|
187 |
+
"Requirement already satisfied: mpmath>=0.19 in /opt/pytorch/lib/python3.8/site-packages (from sympy->torch>=1.10.0->accelerate) (1.3.0)\n"
|
188 |
+
]
|
189 |
+
}
|
190 |
+
],
|
191 |
+
"source": [
|
192 |
+
"! pip install pandas\n",
|
193 |
+
"! pip install scikit-learn\n",
|
194 |
+
"! pip install datasets\n",
|
195 |
+
"! pip install transformers\n",
|
196 |
+
"! pip install transformers[torch]\n",
|
197 |
+
"! pip install accelerate -U"
|
198 |
+
]
|
199 |
+
},
|
200 |
+
{
|
201 |
+
"cell_type": "code",
|
202 |
+
"execution_count": 2,
|
203 |
+
"id": "fed20656-1f48-40d6-93e2-53aec7de522e",
|
204 |
+
"metadata": {
|
205 |
+
"execution": {
|
206 |
+
"iopub.execute_input": "2023-11-25T03:09:12.125910Z",
|
207 |
+
"iopub.status.busy": "2023-11-25T03:09:12.125577Z",
|
208 |
+
"iopub.status.idle": "2023-11-25T03:09:18.835267Z",
|
209 |
+
"shell.execute_reply": "2023-11-25T03:09:18.834607Z",
|
210 |
+
"shell.execute_reply.started": "2023-11-25T03:09:12.125891Z"
|
211 |
+
}
|
212 |
+
},
|
213 |
+
"outputs": [
|
214 |
+
{
|
215 |
+
"data": {
|
216 |
+
"application/vnd.jupyter.widget-view+json": {
|
217 |
+
"model_id": "bcd589e60cf34ea9a3336f439162493e",
|
218 |
+
"version_major": 2,
|
219 |
+
"version_minor": 0
|
220 |
+
},
|
221 |
+
"text/plain": [
|
222 |
+
"config.json: 0%| | 0.00/768 [00:00<?, ?B/s]"
|
223 |
+
]
|
224 |
+
},
|
225 |
+
"metadata": {},
|
226 |
+
"output_type": "display_data"
|
227 |
+
},
|
228 |
+
{
|
229 |
+
"data": {
|
230 |
+
"application/vnd.jupyter.widget-view+json": {
|
231 |
+
"model_id": "8e15325a648c4600b86d5bf7843f4e17",
|
232 |
+
"version_major": 2,
|
233 |
+
"version_minor": 0
|
234 |
+
},
|
235 |
+
"text/plain": [
|
236 |
+
"vocab.json: 0%| | 0.00/899k [00:00<?, ?B/s]"
|
237 |
+
]
|
238 |
+
},
|
239 |
+
"metadata": {},
|
240 |
+
"output_type": "display_data"
|
241 |
+
},
|
242 |
+
{
|
243 |
+
"data": {
|
244 |
+
"application/vnd.jupyter.widget-view+json": {
|
245 |
+
"model_id": "843081a871244baaac981a9314fbe9ce",
|
246 |
+
"version_major": 2,
|
247 |
+
"version_minor": 0
|
248 |
+
},
|
249 |
+
"text/plain": [
|
250 |
+
"merges.txt: 0%| | 0.00/456k [00:00<?, ?B/s]"
|
251 |
+
]
|
252 |
+
},
|
253 |
+
"metadata": {},
|
254 |
+
"output_type": "display_data"
|
255 |
+
},
|
256 |
+
{
|
257 |
+
"data": {
|
258 |
+
"application/vnd.jupyter.widget-view+json": {
|
259 |
+
"model_id": "73b439c39e7e45abb4647f7b2a234cdf",
|
260 |
+
"version_major": 2,
|
261 |
+
"version_minor": 0
|
262 |
+
},
|
263 |
+
"text/plain": [
|
264 |
+
"special_tokens_map.json: 0%| | 0.00/150 [00:00<?, ?B/s]"
|
265 |
+
]
|
266 |
+
},
|
267 |
+
"metadata": {},
|
268 |
+
"output_type": "display_data"
|
269 |
+
},
|
270 |
+
{
|
271 |
+
"data": {
|
272 |
+
"application/vnd.jupyter.widget-view+json": {
|
273 |
+
"model_id": "442db42ec20b42e499a53724fe071386",
|
274 |
+
"version_major": 2,
|
275 |
+
"version_minor": 0
|
276 |
+
},
|
277 |
+
"text/plain": [
|
278 |
+
"pytorch_model.bin: 0%| | 0.00/499M [00:00<?, ?B/s]"
|
279 |
+
]
|
280 |
+
},
|
281 |
+
"metadata": {},
|
282 |
+
"output_type": "display_data"
|
283 |
+
},
|
284 |
+
{
|
285 |
+
"name": "stderr",
|
286 |
+
"output_type": "stream",
|
287 |
+
"text": [
|
288 |
+
"Some weights of RobertaForSequenceClassification were not initialized from the model checkpoint at cardiffnlp/twitter-roberta-base-emotion and are newly initialized because the shapes did not match:\n",
|
289 |
+
"- classifier.out_proj.weight: found shape torch.Size([4, 768]) in the checkpoint and torch.Size([3, 768]) in the model instantiated\n",
|
290 |
+
"- classifier.out_proj.bias: found shape torch.Size([4]) in the checkpoint and torch.Size([3]) in the model instantiated\n",
|
291 |
+
"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
|
292 |
+
]
|
293 |
+
},
|
294 |
+
{
|
295 |
+
"data": {
|
296 |
+
"application/vnd.jupyter.widget-view+json": {
|
297 |
+
"model_id": "3385afba67e74f2a9346b0270da2b4c9",
|
298 |
+
"version_major": 2,
|
299 |
+
"version_minor": 0
|
300 |
+
},
|
301 |
+
"text/plain": [
|
302 |
+
"Map: 0%| | 0/4749 [00:00<?, ? examples/s]"
|
303 |
+
]
|
304 |
+
},
|
305 |
+
"metadata": {},
|
306 |
+
"output_type": "display_data"
|
307 |
+
},
|
308 |
+
{
|
309 |
+
"name": "stderr",
|
310 |
+
"output_type": "stream",
|
311 |
+
"text": [
|
312 |
+
"Asking to pad to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no padding.\n",
|
313 |
+
"Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no truncation.\n"
|
314 |
+
]
|
315 |
+
},
|
316 |
+
{
|
317 |
+
"data": {
|
318 |
+
"application/vnd.jupyter.widget-view+json": {
|
319 |
+
"model_id": "73fc1be3f9814543befa7cc8024957d5",
|
320 |
+
"version_major": 2,
|
321 |
+
"version_minor": 0
|
322 |
+
},
|
323 |
+
"text/plain": [
|
324 |
+
"Map: 0%| | 0/1188 [00:00<?, ? examples/s]"
|
325 |
+
]
|
326 |
+
},
|
327 |
+
"metadata": {},
|
328 |
+
"output_type": "display_data"
|
329 |
+
}
|
330 |
+
],
|
331 |
+
"source": [
|
332 |
+
"import torch\n",
|
333 |
+
"import pandas as pd\n",
|
334 |
+
"\n",
|
335 |
+
"from sklearn.preprocessing import LabelEncoder\n",
|
336 |
+
"from datasets import Dataset\n",
|
337 |
+
"from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer\n",
|
338 |
+
"from transformers import RobertaConfig, RobertaForSequenceClassification\n",
|
339 |
+
"from transformers import AdamW\n",
|
340 |
+
"\n",
|
341 |
+
"# Define a new classification head\n",
|
342 |
+
"class NewClassificationHead(torch.nn.Module):\n",
|
343 |
+
" def __init__(self, config):\n",
|
344 |
+
" super().__init__()\n",
|
345 |
+
" self.dense = torch.nn.Linear(config.hidden_size, config.hidden_size)\n",
|
346 |
+
" self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)\n",
|
347 |
+
" self.out_proj = torch.nn.Linear(config.hidden_size, config.num_labels)\n",
|
348 |
+
"\n",
|
349 |
+
" def forward(self, features, **kwargs):\n",
|
350 |
+
" x = features[:, 0, :] # take <s> token (equiv. to [CLS])\n",
|
351 |
+
" x = self.dropout(x)\n",
|
352 |
+
" x = self.dense(x)\n",
|
353 |
+
" x = torch.nn.functional.relu(x)\n",
|
354 |
+
" x = self.dropout(x)\n",
|
355 |
+
" x = self.out_proj(x)\n",
|
356 |
+
" return x\n",
|
357 |
+
"\n",
|
358 |
+
"def preprocess_data(df):\n",
|
359 |
+
" ## rename columns\n",
|
360 |
+
" df = df.rename(columns={'Comment': 'text', 'Emotion': 'label'})\n",
|
361 |
+
"\n",
|
362 |
+
" ## remove rows with missing values\n",
|
363 |
+
" df = df.dropna()\n",
|
364 |
+
" df['text'] = df['text'].str.replace('\\t', ' ') # Remove extra spaces - this line replaces any occurrence of two or more spaces with a single spac\n",
|
365 |
+
" df['text'] = df['text'].str.replace(' +', ' ', regex=True) # Remove extra spaces - this line replaces any occurrence of two or more spaces with a single space\n",
|
366 |
+
" df['text'] = df['text'].str.strip() # Remove extra spaces - this line replaces any occurrence of two or more spaces with a single space\n",
|
367 |
+
"\n",
|
368 |
+
" df['label'] = df['label'].str.replace('\\t', ' ') # Remove extra spaces - this line replaces any occurrence of two or more spaces with a single spac\n",
|
369 |
+
" df['label'] = df['label'].str.replace(' +', ' ', regex=True) # Remove extra spaces - this line replaces any occurrence of two or more spaces with a single space\n",
|
370 |
+
" df['label'] = df['label'].str.strip() # Remove extra spaces - this line replaces any occurrence of two or more spaces with a single space \n",
|
371 |
+
"\n",
|
372 |
+
" return df\n",
|
373 |
+
"\n",
|
374 |
+
"def encode_label(df):\n",
|
375 |
+
" le = LabelEncoder()\n",
|
376 |
+
" df['label'] = le.fit_transform(df['label'])\n",
|
377 |
+
" label_mapping = {label: index for index, label in enumerate(le.classes_)}\n",
|
378 |
+
" df['label'].map(label_mapping)\n",
|
379 |
+
" return df\n",
|
380 |
+
"\n",
|
381 |
+
"def generate_dataset(df, test_size=0.2):\n",
|
382 |
+
" \"\"\"\n",
|
383 |
+
" Convert to transformers dataset and split into train and test\n",
|
384 |
+
" \"\"\"\n",
|
385 |
+
" dataset = Dataset.from_pandas(df)\n",
|
386 |
+
" ds = dataset.train_test_split(test_size=test_size)\n",
|
387 |
+
" return ds\n",
|
388 |
+
"\n",
|
389 |
+
"def tokenize(batch):\n",
|
390 |
+
" return tokenizer(batch['text'], padding='max_length', truncation=True)\n",
|
391 |
+
"\n",
|
392 |
+
"\n",
|
393 |
+
"def compute_metrics(pred):\n",
|
394 |
+
" from sklearn.metrics import accuracy_score, precision_recall_fscore_support\n",
|
395 |
+
" labels = pred.label_ids\n",
|
396 |
+
" preds = pred.predictions.argmax(-1)\n",
|
397 |
+
" precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='weighted')\n",
|
398 |
+
" acc = accuracy_score(labels, preds)\n",
|
399 |
+
" return {\n",
|
400 |
+
" 'accuracy': acc,\n",
|
401 |
+
" 'f1': f1,\n",
|
402 |
+
" 'precision': precision,\n",
|
403 |
+
" 'recall': recall\n",
|
404 |
+
" }\n",
|
405 |
+
"\n",
|
406 |
+
"# Define model and training arguments\n",
|
407 |
+
"model_name = \"cardiffnlp/twitter-roberta-base-emotion\"\n",
|
408 |
+
"tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
|
409 |
+
"config = RobertaConfig.from_pretrained(model_name, num_labels=3) # Set the number of labels to 3\n",
|
410 |
+
"model = RobertaForSequenceClassification.from_pretrained(model_name, config=config, ignore_mismatched_sizes=True)\n",
|
411 |
+
"model.classifier = NewClassificationHead(config)\n",
|
412 |
+
"\n",
|
413 |
+
"df = pd.read_csv('Emotion_classify_Data.csv')\n",
|
414 |
+
"df = preprocess_data(df)\n",
|
415 |
+
"df = encode_label(df)\n",
|
416 |
+
"ds = generate_dataset(df)\n",
|
417 |
+
"ds = ds.map(tokenize, batched=True)\n",
|
418 |
+
"\n"
|
419 |
+
]
|
420 |
+
},
|
421 |
+
{
|
422 |
+
"cell_type": "code",
|
423 |
+
"execution_count": 3,
|
424 |
+
"id": "f3dd5334-f8b4-4f0d-b696-939f2d5174ba",
|
425 |
+
"metadata": {
|
426 |
+
"execution": {
|
427 |
+
"iopub.execute_input": "2023-11-25T03:09:18.836520Z",
|
428 |
+
"iopub.status.busy": "2023-11-25T03:09:18.836241Z",
|
429 |
+
"iopub.status.idle": "2023-11-25T03:09:18.845692Z",
|
430 |
+
"shell.execute_reply": "2023-11-25T03:09:18.844909Z",
|
431 |
+
"shell.execute_reply.started": "2023-11-25T03:09:18.836502Z"
|
432 |
+
}
|
433 |
+
},
|
434 |
+
"outputs": [
|
435 |
+
{
|
436 |
+
"name": "stderr",
|
437 |
+
"output_type": "stream",
|
438 |
+
"text": [
|
439 |
+
"/opt/pytorch/lib/python3.8/site-packages/transformers/optimization.py:411: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
|
440 |
+
" warnings.warn(\n"
|
441 |
+
]
|
442 |
+
}
|
443 |
+
],
|
444 |
+
"source": [
|
445 |
+
"# Freeze all layers first\n",
|
446 |
+
"for param in model.parameters():\n",
|
447 |
+
" param.requires_grad = False\n",
|
448 |
+
"\n",
|
449 |
+
"# Unfreeze the classifier layer\n",
|
450 |
+
"for param in model.classifier.parameters():\n",
|
451 |
+
" param.requires_grad = True\n",
|
452 |
+
"\n",
|
453 |
+
"\n",
|
454 |
+
"# Define different learning rates\n",
|
455 |
+
"head_lr = 3e-4 # Higher learning rate for the head\n",
|
456 |
+
"base_lr = head_lr/5 # Lower learning rate for the base layers\n",
|
457 |
+
"\n",
|
458 |
+
"# Group parameters and set learning rates\n",
|
459 |
+
"optimizer_grouped_parameters = [\n",
|
460 |
+
" {'params': model.classifier.parameters(), 'lr': head_lr},\n",
|
461 |
+
" {'params': [p for n, p in model.named_parameters() if 'classifier' not in n], 'lr': base_lr}\n",
|
462 |
+
"]\n",
|
463 |
+
"\n",
|
464 |
+
"optimizer = AdamW(optimizer_grouped_parameters)"
|
465 |
+
]
|
466 |
+
},
|
467 |
+
{
|
468 |
+
"cell_type": "code",
|
469 |
+
"execution_count": 4,
|
470 |
+
"id": "882c5342-a82a-4e5a-b0ad-eaaa4978831f",
|
471 |
+
"metadata": {
|
472 |
+
"execution": {
|
473 |
+
"iopub.execute_input": "2023-11-25T03:09:18.847637Z",
|
474 |
+
"iopub.status.busy": "2023-11-25T03:09:18.847285Z",
|
475 |
+
"iopub.status.idle": "2023-11-25T03:09:18.862687Z",
|
476 |
+
"shell.execute_reply": "2023-11-25T03:09:18.862118Z",
|
477 |
+
"shell.execute_reply.started": "2023-11-25T03:09:18.847619Z"
|
478 |
+
}
|
479 |
+
},
|
480 |
+
"outputs": [
|
481 |
+
{
|
482 |
+
"name": "stderr",
|
483 |
+
"output_type": "stream",
|
484 |
+
"text": [
|
485 |
+
"Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.\n"
|
486 |
+
]
|
487 |
+
}
|
488 |
+
],
|
489 |
+
"source": [
|
490 |
+
"training_args = TrainingArguments(\n",
|
491 |
+
" output_dir='./results', \n",
|
492 |
+
" num_train_epochs=10, \n",
|
493 |
+
" per_device_train_batch_size=16, \n",
|
494 |
+
" per_device_eval_batch_size=64, \n",
|
495 |
+
" warmup_steps=500, \n",
|
496 |
+
" weight_decay=0.01, \n",
|
497 |
+
" logging_dir='./logs',\n",
|
498 |
+
" save_strategy=\"no\",\n",
|
499 |
+
")\n",
|
500 |
+
"\n",
|
501 |
+
"trainer = Trainer(\n",
|
502 |
+
" model=model,\n",
|
503 |
+
" args=training_args,\n",
|
504 |
+
" train_dataset=ds['train'],\n",
|
505 |
+
" eval_dataset=ds['test'],\n",
|
506 |
+
" tokenizer=tokenizer,\n",
|
507 |
+
" optimizers=(optimizer, None), # No need to pass a learning rate scheduler if you're managing learning rates manually,\n",
|
508 |
+
" compute_metrics=compute_metrics\n",
|
509 |
+
")"
|
510 |
+
]
|
511 |
+
},
|
512 |
+
{
|
513 |
+
"cell_type": "code",
|
514 |
+
"execution_count": 5,
|
515 |
+
"id": "19f8b2f1-d03b-42c2-a0a1-2475f2dfde37",
|
516 |
+
"metadata": {
|
517 |
+
"execution": {
|
518 |
+
"iopub.execute_input": "2023-11-25T03:09:18.864992Z",
|
519 |
+
"iopub.status.busy": "2023-11-25T03:09:18.864819Z",
|
520 |
+
"iopub.status.idle": "2023-11-25T03:17:56.086914Z",
|
521 |
+
"shell.execute_reply": "2023-11-25T03:17:56.085959Z",
|
522 |
+
"shell.execute_reply.started": "2023-11-25T03:09:18.864977Z"
|
523 |
+
}
|
524 |
+
},
|
525 |
+
"outputs": [
|
526 |
+
{
|
527 |
+
"name": "stderr",
|
528 |
+
"output_type": "stream",
|
529 |
+
"text": [
|
530 |
+
"You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.\n"
|
531 |
+
]
|
532 |
+
},
|
533 |
+
{
|
534 |
+
"data": {
|
535 |
+
"text/html": [
|
536 |
+
"\n",
|
537 |
+
" <div>\n",
|
538 |
+
" \n",
|
539 |
+
" <progress value='2970' max='2970' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
|
540 |
+
" [2970/2970 08:36, Epoch 10/10]\n",
|
541 |
+
" </div>\n",
|
542 |
+
" <table border=\"1\" class=\"dataframe\">\n",
|
543 |
+
" <thead>\n",
|
544 |
+
" <tr style=\"text-align: left;\">\n",
|
545 |
+
" <th>Step</th>\n",
|
546 |
+
" <th>Training Loss</th>\n",
|
547 |
+
" </tr>\n",
|
548 |
+
" </thead>\n",
|
549 |
+
" <tbody>\n",
|
550 |
+
" <tr>\n",
|
551 |
+
" <td>500</td>\n",
|
552 |
+
" <td>0.678100</td>\n",
|
553 |
+
" </tr>\n",
|
554 |
+
" <tr>\n",
|
555 |
+
" <td>1000</td>\n",
|
556 |
+
" <td>0.537700</td>\n",
|
557 |
+
" </tr>\n",
|
558 |
+
" <tr>\n",
|
559 |
+
" <td>1500</td>\n",
|
560 |
+
" <td>0.514900</td>\n",
|
561 |
+
" </tr>\n",
|
562 |
+
" <tr>\n",
|
563 |
+
" <td>2000</td>\n",
|
564 |
+
" <td>0.474500</td>\n",
|
565 |
+
" </tr>\n",
|
566 |
+
" <tr>\n",
|
567 |
+
" <td>2500</td>\n",
|
568 |
+
" <td>0.450500</td>\n",
|
569 |
+
" </tr>\n",
|
570 |
+
" </tbody>\n",
|
571 |
+
"</table><p>"
|
572 |
+
],
|
573 |
+
"text/plain": [
|
574 |
+
"<IPython.core.display.HTML object>"
|
575 |
+
]
|
576 |
+
},
|
577 |
+
"metadata": {},
|
578 |
+
"output_type": "display_data"
|
579 |
+
},
|
580 |
+
{
|
581 |
+
"data": {
|
582 |
+
"text/plain": [
|
583 |
+
"TrainOutput(global_step=2970, training_loss=0.516797270598235, metrics={'train_runtime': 517.0884, 'train_samples_per_second': 91.841, 'train_steps_per_second': 5.744, 'total_flos': 1128914327325078.0, 'train_loss': 0.516797270598235, 'epoch': 10.0})"
|
584 |
+
]
|
585 |
+
},
|
586 |
+
"execution_count": 5,
|
587 |
+
"metadata": {},
|
588 |
+
"output_type": "execute_result"
|
589 |
+
}
|
590 |
+
],
|
591 |
+
"source": [
|
592 |
+
" trainer.train()"
|
593 |
+
]
|
594 |
+
},
|
595 |
+
{
|
596 |
+
"cell_type": "code",
|
597 |
+
"execution_count": 6,
|
598 |
+
"id": "208a5c13-31c7-4a03-b9a8-18146a265f73",
|
599 |
+
"metadata": {
|
600 |
+
"execution": {
|
601 |
+
"iopub.execute_input": "2023-11-25T03:17:56.091526Z",
|
602 |
+
"iopub.status.busy": "2023-11-25T03:17:56.091204Z",
|
603 |
+
"iopub.status.idle": "2023-11-25T03:18:09.179984Z",
|
604 |
+
"shell.execute_reply": "2023-11-25T03:18:09.179279Z",
|
605 |
+
"shell.execute_reply.started": "2023-11-25T03:17:56.091497Z"
|
606 |
+
}
|
607 |
+
},
|
608 |
+
"outputs": [
|
609 |
+
{
|
610 |
+
"data": {
|
611 |
+
"text/html": [
|
612 |
+
"\n",
|
613 |
+
" <div>\n",
|
614 |
+
" \n",
|
615 |
+
" <progress value='38' max='19' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
|
616 |
+
" [19/19 18:00]\n",
|
617 |
+
" </div>\n",
|
618 |
+
" "
|
619 |
+
],
|
620 |
+
"text/plain": [
|
621 |
+
"<IPython.core.display.HTML object>"
|
622 |
+
]
|
623 |
+
},
|
624 |
+
"metadata": {},
|
625 |
+
"output_type": "display_data"
|
626 |
+
},
|
627 |
+
{
|
628 |
+
"data": {
|
629 |
+
"text/plain": [
|
630 |
+
"{'eval_loss': 0.4612630307674408,\n",
|
631 |
+
" 'eval_accuracy': 0.8181818181818182,\n",
|
632 |
+
" 'eval_f1': 0.8180812962482343,\n",
|
633 |
+
" 'eval_precision': 0.8186808374254468,\n",
|
634 |
+
" 'eval_recall': 0.8181818181818182,\n",
|
635 |
+
" 'eval_runtime': 13.0807,\n",
|
636 |
+
" 'eval_samples_per_second': 90.821,\n",
|
637 |
+
" 'eval_steps_per_second': 1.453,\n",
|
638 |
+
" 'epoch': 10.0}"
|
639 |
+
]
|
640 |
+
},
|
641 |
+
"execution_count": 6,
|
642 |
+
"metadata": {},
|
643 |
+
"output_type": "execute_result"
|
644 |
+
}
|
645 |
+
],
|
646 |
+
"source": [
|
647 |
+
"trainer.evaluate()"
|
648 |
+
]
|
649 |
+
},
|
650 |
+
{
|
651 |
+
"cell_type": "code",
|
652 |
+
"execution_count": 7,
|
653 |
+
"id": "d90661bf-e22b-4dbf-980b-1c8ff69f625c",
|
654 |
+
"metadata": {
|
655 |
+
"execution": {
|
656 |
+
"iopub.execute_input": "2023-11-25T03:18:09.184420Z",
|
657 |
+
"iopub.status.busy": "2023-11-25T03:18:09.184230Z",
|
658 |
+
"iopub.status.idle": "2023-11-25T03:35:26.344692Z",
|
659 |
+
"shell.execute_reply": "2023-11-25T03:35:26.344122Z",
|
660 |
+
"shell.execute_reply.started": "2023-11-25T03:18:09.184402Z"
|
661 |
+
}
|
662 |
+
},
|
663 |
+
"outputs": [
|
664 |
+
{
|
665 |
+
"name": "stderr",
|
666 |
+
"output_type": "stream",
|
667 |
+
"text": [
|
668 |
+
"/opt/pytorch/lib/python3.8/site-packages/transformers/optimization.py:411: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
|
669 |
+
" warnings.warn(\n"
|
670 |
+
]
|
671 |
+
},
|
672 |
+
{
|
673 |
+
"data": {
|
674 |
+
"text/html": [
|
675 |
+
"\n",
|
676 |
+
" <div>\n",
|
677 |
+
" \n",
|
678 |
+
" <progress value='1485' max='1485' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
|
679 |
+
" [1485/1485 17:16, Epoch 5/5]\n",
|
680 |
+
" </div>\n",
|
681 |
+
" <table border=\"1\" class=\"dataframe\">\n",
|
682 |
+
" <thead>\n",
|
683 |
+
" <tr style=\"text-align: left;\">\n",
|
684 |
+
" <th>Step</th>\n",
|
685 |
+
" <th>Training Loss</th>\n",
|
686 |
+
" </tr>\n",
|
687 |
+
" </thead>\n",
|
688 |
+
" <tbody>\n",
|
689 |
+
" <tr>\n",
|
690 |
+
" <td>500</td>\n",
|
691 |
+
" <td>0.253200</td>\n",
|
692 |
+
" </tr>\n",
|
693 |
+
" <tr>\n",
|
694 |
+
" <td>1000</td>\n",
|
695 |
+
" <td>0.105000</td>\n",
|
696 |
+
" </tr>\n",
|
697 |
+
" </tbody>\n",
|
698 |
+
"</table><p>"
|
699 |
+
],
|
700 |
+
"text/plain": [
|
701 |
+
"<IPython.core.display.HTML object>"
|
702 |
+
]
|
703 |
+
},
|
704 |
+
"metadata": {},
|
705 |
+
"output_type": "display_data"
|
706 |
+
},
|
707 |
+
{
|
708 |
+
"data": {
|
709 |
+
"text/plain": [
|
710 |
+
"TrainOutput(global_step=1485, training_loss=0.13263646263867515, metrics={'train_runtime': 1037.0165, 'train_samples_per_second': 22.897, 'train_steps_per_second': 1.432, 'total_flos': 563885457261714.0, 'train_loss': 0.13263646263867515, 'epoch': 5.0})"
|
711 |
+
]
|
712 |
+
},
|
713 |
+
"execution_count": 7,
|
714 |
+
"metadata": {},
|
715 |
+
"output_type": "execute_result"
|
716 |
+
}
|
717 |
+
],
|
718 |
+
"source": [
|
719 |
+
"for param in model.parameters():\n",
|
720 |
+
" param.requires_grad = True\n",
|
721 |
+
"\n",
|
722 |
+
" \n",
|
723 |
+
"head_lr = 1e-4 # Slightly lower learning rate for the head\n",
|
724 |
+
"base_lr = 5e-6 # Much lower learning rate for the base layers\n",
|
725 |
+
"\n",
|
726 |
+
"optimizer_grouped_parameters = [\n",
|
727 |
+
" {'params': model.classifier.parameters(), 'lr': head_lr},\n",
|
728 |
+
" {'params': [p for n, p in model.named_parameters() if 'classifier' not in n], 'lr': base_lr}\n",
|
729 |
+
"]\n",
|
730 |
+
"\n",
|
731 |
+
"optimizer = AdamW(optimizer_grouped_parameters)\n",
|
732 |
+
"\n",
|
733 |
+
"training_args.num_train_epochs = 5 # Set the number of additional epochs\n",
|
734 |
+
"trainer.train()"
|
735 |
+
]
|
736 |
+
},
|
737 |
+
{
|
738 |
+
"cell_type": "code",
|
739 |
+
"execution_count": 10,
|
740 |
+
"id": "e4502600-7091-4a8a-83b6-5af5e249b7ca",
|
741 |
+
"metadata": {
|
742 |
+
"execution": {
|
743 |
+
"iopub.execute_input": "2023-11-25T03:35:44.942721Z",
|
744 |
+
"iopub.status.busy": "2023-11-25T03:35:44.942333Z",
|
745 |
+
"iopub.status.idle": "2023-11-25T03:35:57.188045Z",
|
746 |
+
"shell.execute_reply": "2023-11-25T03:35:57.187245Z",
|
747 |
+
"shell.execute_reply.started": "2023-11-25T03:35:44.942703Z"
|
748 |
+
}
|
749 |
+
},
|
750 |
+
"outputs": [
|
751 |
+
{
|
752 |
+
"data": {
|
753 |
+
"text/plain": [
|
754 |
+
"{'eval_loss': 0.2423660308122635,\n",
|
755 |
+
" 'eval_accuracy': 0.9671717171717171,\n",
|
756 |
+
" 'eval_f1': 0.9671861840444216,\n",
|
757 |
+
" 'eval_precision': 0.9672086987568536,\n",
|
758 |
+
" 'eval_recall': 0.9671717171717171,\n",
|
759 |
+
" 'eval_runtime': 12.2384,\n",
|
760 |
+
" 'eval_samples_per_second': 97.071,\n",
|
761 |
+
" 'eval_steps_per_second': 1.552,\n",
|
762 |
+
" 'epoch': 5.0}"
|
763 |
+
]
|
764 |
+
},
|
765 |
+
"execution_count": 10,
|
766 |
+
"metadata": {},
|
767 |
+
"output_type": "execute_result"
|
768 |
+
}
|
769 |
+
],
|
770 |
+
"source": [
|
771 |
+
"trainer.evaluate()"
|
772 |
+
]
|
773 |
+
},
|
774 |
+
{
|
775 |
+
"cell_type": "code",
|
776 |
+
"execution_count": 13,
|
777 |
+
"id": "190ff835-a7a2-465f-994d-73adb75950a3",
|
778 |
+
"metadata": {
|
779 |
+
"execution": {
|
780 |
+
"iopub.execute_input": "2023-11-25T03:39:58.975250Z",
|
781 |
+
"iopub.status.busy": "2023-11-25T03:39:58.974521Z",
|
782 |
+
"iopub.status.idle": "2023-11-25T03:39:59.367917Z",
|
783 |
+
"shell.execute_reply": "2023-11-25T03:39:59.367402Z",
|
784 |
+
"shell.execute_reply.started": "2023-11-25T03:39:58.975230Z"
|
785 |
+
}
|
786 |
+
},
|
787 |
+
"outputs": [
|
788 |
+
{
|
789 |
+
"data": {
|
790 |
+
"text/plain": [
|
791 |
+
"('transferLearningResults/tokenizer_config.json',\n",
|
792 |
+
" 'transferLearningResults/special_tokens_map.json',\n",
|
793 |
+
" 'transferLearningResults/vocab.json',\n",
|
794 |
+
" 'transferLearningResults/merges.txt',\n",
|
795 |
+
" 'transferLearningResults/added_tokens.json',\n",
|
796 |
+
" 'transferLearningResults/tokenizer.json')"
|
797 |
+
]
|
798 |
+
},
|
799 |
+
"execution_count": 13,
|
800 |
+
"metadata": {},
|
801 |
+
"output_type": "execute_result"
|
802 |
+
}
|
803 |
+
],
|
804 |
+
"source": [
|
805 |
+
"model.save_pretrained('transferLearningResults')\n",
|
806 |
+
"tokenizer.save_pretrained('transferLearningResults')"
|
807 |
+
]
|
808 |
+
}
|
809 |
+
],
|
810 |
+
"metadata": {
|
811 |
+
"kernelspec": {
|
812 |
+
"display_name": "Python 3 (ipykernel)",
|
813 |
+
"language": "python",
|
814 |
+
"name": "python3"
|
815 |
+
},
|
816 |
+
"language_info": {
|
817 |
+
"codemirror_mode": {
|
818 |
+
"name": "ipython",
|
819 |
+
"version": 3
|
820 |
+
},
|
821 |
+
"file_extension": ".py",
|
822 |
+
"mimetype": "text/x-python",
|
823 |
+
"name": "python",
|
824 |
+
"nbconvert_exporter": "python",
|
825 |
+
"pygments_lexer": "ipython3",
|
826 |
+
"version": "3.8.10"
|
827 |
+
}
|
828 |
+
},
|
829 |
+
"nbformat": 4,
|
830 |
+
"nbformat_minor": 5
|
831 |
+
}
|
train/transfer_learning.py
ADDED
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import pandas as pd
|
3 |
+
|
4 |
+
from sklearn.preprocessing import LabelEncoder
|
5 |
+
from datasets import Dataset
|
6 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
|
7 |
+
from transformers import RobertaConfig, RobertaForSequenceClassification
|
8 |
+
from transformers import AdamW
|
9 |
+
|
10 |
+
from newhead import NewClassificationHead
|
11 |
+
|
12 |
+
def preprocess_data(df):
|
13 |
+
"""
|
14 |
+
Preprocess the data by renaming columns, removing rows with missing values, and removing extra spaces.
|
15 |
+
"""
|
16 |
+
df = df.rename(columns={'Comment': 'text', 'Emotion': 'label'})
|
17 |
+
df = df.dropna()
|
18 |
+
df['text'] = df['text'].str.replace('\t', ' ').str.replace(' +', ' ', regex=True).str.strip()
|
19 |
+
df['label'] = df['label'].str.replace('\t', ' ').str.replace(' +', ' ', regex=True).str.strip()
|
20 |
+
return df
|
21 |
+
|
22 |
+
def encode_label(df):
|
23 |
+
"""
|
24 |
+
Encode the labels using LabelEncoder.
|
25 |
+
"""
|
26 |
+
label_encoder = LabelEncoder()
|
27 |
+
df['label'] = label_encoder.fit_transform(df['label'])
|
28 |
+
return df
|
29 |
+
|
30 |
+
def generate_dataset(df, test_size=0.2):
|
31 |
+
"""
|
32 |
+
Convert the DataFrame into a Dataset that can be used with transformers.
|
33 |
+
"""
|
34 |
+
return Dataset.from_pandas(df)
|
35 |
+
|
36 |
+
def tokenize(batch):
|
37 |
+
return tokenizer(batch['text'], padding='max_length', truncation=True)
|
38 |
+
|
39 |
+
|
40 |
+
def compute_metrics(pred):
|
41 |
+
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
|
42 |
+
labels = pred.label_ids
|
43 |
+
preds = pred.predictions.argmax(-1)
|
44 |
+
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='weighted')
|
45 |
+
acc = accuracy_score(labels, preds)
|
46 |
+
return {
|
47 |
+
'accuracy': acc,
|
48 |
+
'f1': f1,
|
49 |
+
'precision': precision,
|
50 |
+
'recall': recall
|
51 |
+
}
|
52 |
+
|
53 |
+
# Define model and training arguments
|
54 |
+
model_name = "cardiffnlp/twitter-roberta-base-emotion"
|
55 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
56 |
+
config = RobertaConfig.from_pretrained(model_name, num_labels=3) # Set the number of labels to 3
|
57 |
+
model = RobertaForSequenceClassification.from_pretrained(model_name, config=config, ignore_mismatched_sizes=True)
|
58 |
+
model.classifier = NewClassificationHead(config)
|
59 |
+
|
60 |
+
df = pd.read_csv('Emotion_classify_Data.csv')
|
61 |
+
df = preprocess_data(df)
|
62 |
+
df = encode_label(df)
|
63 |
+
ds = generate_dataset(df)
|
64 |
+
ds = ds.map(tokenize, batched=True)
|
65 |
+
|
66 |
+
|
67 |
+
### Transer Learning First
|
68 |
+
# Freeze all layers first
|
69 |
+
for param in model.parameters():
|
70 |
+
param.requires_grad = False
|
71 |
+
|
72 |
+
# Unfreeze the classifier layer
|
73 |
+
for param in model.classifier.parameters():
|
74 |
+
param.requires_grad = True
|
75 |
+
|
76 |
+
|
77 |
+
# Define different learning rates
|
78 |
+
head_lr = 3e-4 # Higher learning rate for the head
|
79 |
+
base_lr = head_lr/5 # Lower learning rate for the base layers
|
80 |
+
|
81 |
+
# Group parameters and set learning rates
|
82 |
+
optimizer_grouped_parameters = [
|
83 |
+
{'params': model.classifier.parameters(), 'lr': head_lr},
|
84 |
+
{'params': [p for n, p in model.named_parameters() if 'classifier' not in n], 'lr': base_lr}
|
85 |
+
]
|
86 |
+
|
87 |
+
optimizer = AdamW(optimizer_grouped_parameters)
|
88 |
+
|
89 |
+
## Training arguments
|
90 |
+
training_args = TrainingArguments(
|
91 |
+
output_dir='./results',
|
92 |
+
num_train_epochs=10,
|
93 |
+
per_device_train_batch_size=16,
|
94 |
+
per_device_eval_batch_size=64,
|
95 |
+
warmup_steps=500,
|
96 |
+
weight_decay=0.01,
|
97 |
+
logging_dir='./logs',
|
98 |
+
save_strategy="no",
|
99 |
+
)
|
100 |
+
|
101 |
+
trainer = Trainer(
|
102 |
+
model=model,
|
103 |
+
args=training_args,
|
104 |
+
train_dataset=ds['train'],
|
105 |
+
eval_dataset=ds['test'],
|
106 |
+
tokenizer=tokenizer,
|
107 |
+
optimizers=(optimizer, None), # No need to pass a learning rate scheduler if you're managing learning rates manually,
|
108 |
+
compute_metrics=compute_metrics
|
109 |
+
)
|
110 |
+
|
111 |
+
|
112 |
+
## Train the head of the model
|
113 |
+
trainer.train()
|
114 |
+
|
115 |
+
|
116 |
+
## Unfreeze all layers
|
117 |
+
for param in model.parameters():
|
118 |
+
param.requires_grad = True
|
119 |
+
|
120 |
+
|
121 |
+
head_lr = 1e-4 # Slightly lower learning rate for the head
|
122 |
+
base_lr = 5e-6 # Much lower learning rate for the base layers
|
123 |
+
|
124 |
+
optimizer_grouped_parameters = [
|
125 |
+
{'params': model.classifier.parameters(), 'lr': head_lr},
|
126 |
+
{'params': [p for n, p in model.named_parameters() if 'classifier' not in n], 'lr': base_lr}
|
127 |
+
]
|
128 |
+
|
129 |
+
## train the entire model
|
130 |
+
optimizer = AdamW(optimizer_grouped_parameters)
|
131 |
+
|
132 |
+
training_args.num_train_epochs = 5 # Set the number of additional epochs
|
133 |
+
trainer.train()
|
134 |
+
|
135 |
+
model.save_pretrained('transferLearningResults')
|
136 |
+
tokenizer.save_pretrained('transferLearningResults')
|
transferLearningResults/config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "cardiffnlp/twitter-roberta-base-emotion",
|
3 |
+
"architectures": [
|
4 |
+
"RobertaForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"classifier_dropout": null,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"gradient_checkpointing": false,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout_prob": 0.1,
|
13 |
+
"hidden_size": 768,
|
14 |
+
"id2label": {
|
15 |
+
"0": "LABEL_0",
|
16 |
+
"1": "LABEL_1",
|
17 |
+
"2": "LABEL_2"
|
18 |
+
},
|
19 |
+
"initializer_range": 0.02,
|
20 |
+
"intermediate_size": 3072,
|
21 |
+
"label2id": {
|
22 |
+
"LABEL_0": 0,
|
23 |
+
"LABEL_1": 1,
|
24 |
+
"LABEL_2": 2
|
25 |
+
},
|
26 |
+
"layer_norm_eps": 1e-05,
|
27 |
+
"max_position_embeddings": 514,
|
28 |
+
"model_type": "roberta",
|
29 |
+
"num_attention_heads": 12,
|
30 |
+
"num_hidden_layers": 12,
|
31 |
+
"pad_token_id": 1,
|
32 |
+
"position_embedding_type": "absolute",
|
33 |
+
"problem_type": "single_label_classification",
|
34 |
+
"torch_dtype": "float32",
|
35 |
+
"transformers_version": "4.35.2",
|
36 |
+
"type_vocab_size": 1,
|
37 |
+
"use_cache": true,
|
38 |
+
"vocab_size": 50265
|
39 |
+
}
|
transferLearningResults/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
transferLearningResults/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:96bda258d29b7f25866cb5b23ca0a977d0fd1a091d2af8258f59e9fe9528fc2d
|
3 |
+
size 498615900
|
transferLearningResults/model_state_dict.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1511ebb5405e20dfa9d040f6e3ddb59477ee93d34bfc69a7361a693a8d7b02d7
|
3 |
+
size 498676501
|
transferLearningResults/special_tokens_map.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"cls_token": {
|
10 |
+
"content": "<s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"eos_token": {
|
17 |
+
"content": "</s>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": true,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"mask_token": {
|
24 |
+
"content": "<mask>",
|
25 |
+
"lstrip": true,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"pad_token": {
|
31 |
+
"content": "<pad>",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": true,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
},
|
37 |
+
"sep_token": {
|
38 |
+
"content": "</s>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": true,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false
|
43 |
+
},
|
44 |
+
"unk_token": {
|
45 |
+
"content": "<unk>",
|
46 |
+
"lstrip": false,
|
47 |
+
"normalized": true,
|
48 |
+
"rstrip": false,
|
49 |
+
"single_word": false
|
50 |
+
}
|
51 |
+
}
|
transferLearningResults/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
transferLearningResults/tokenizer_config.json
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"added_tokens_decoder": {
|
4 |
+
"0": {
|
5 |
+
"content": "<s>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": true,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false,
|
10 |
+
"special": true
|
11 |
+
},
|
12 |
+
"1": {
|
13 |
+
"content": "<pad>",
|
14 |
+
"lstrip": false,
|
15 |
+
"normalized": true,
|
16 |
+
"rstrip": false,
|
17 |
+
"single_word": false,
|
18 |
+
"special": true
|
19 |
+
},
|
20 |
+
"2": {
|
21 |
+
"content": "</s>",
|
22 |
+
"lstrip": false,
|
23 |
+
"normalized": true,
|
24 |
+
"rstrip": false,
|
25 |
+
"single_word": false,
|
26 |
+
"special": true
|
27 |
+
},
|
28 |
+
"3": {
|
29 |
+
"content": "<unk>",
|
30 |
+
"lstrip": false,
|
31 |
+
"normalized": true,
|
32 |
+
"rstrip": false,
|
33 |
+
"single_word": false,
|
34 |
+
"special": true
|
35 |
+
},
|
36 |
+
"50264": {
|
37 |
+
"content": "<mask>",
|
38 |
+
"lstrip": true,
|
39 |
+
"normalized": false,
|
40 |
+
"rstrip": false,
|
41 |
+
"single_word": false,
|
42 |
+
"special": true
|
43 |
+
}
|
44 |
+
},
|
45 |
+
"bos_token": "<s>",
|
46 |
+
"clean_up_tokenization_spaces": true,
|
47 |
+
"cls_token": "<s>",
|
48 |
+
"eos_token": "</s>",
|
49 |
+
"errors": "replace",
|
50 |
+
"mask_token": "<mask>",
|
51 |
+
"model_max_length": 1000000000000000019884624838656,
|
52 |
+
"pad_token": "<pad>",
|
53 |
+
"sep_token": "</s>",
|
54 |
+
"tokenizer_class": "RobertaTokenizer",
|
55 |
+
"trim_offsets": true,
|
56 |
+
"unk_token": "<unk>"
|
57 |
+
}
|
transferLearningResults/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|