|
|
|
|
|
import torch |
|
import torch.nn as nn |
|
import torch.optim as optim |
|
from torch.utils.data import DataLoader |
|
from sklearn.model_selection import train_test_split |
|
from datasets import load_dataset |
|
from transformers import DistilBertTokenizer, DistilBertForSequenceClassification |
|
from tqdm import tqdm |
|
|
|
|
|
dataset = load_dataset("imdb") |
|
texts, labels = dataset["train"]["text"], dataset["train"]["label"] |
|
|
|
|
|
train_texts, val_texts, train_labels, val_labels = train_test_split(texts, labels, test_size=0.1, random_state=42) |
|
|
|
|
|
tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased") |
|
train_encodings = tokenizer(train_texts, truncation=True, padding=True, return_tensors="pt", max_length=256) |
|
val_encodings = tokenizer(val_texts, truncation=True, padding=True, return_tensors="pt", max_length=256) |
|
|
|
|
|
class SentimentAnalysisModel(nn.Module): |
|
def __init__(self): |
|
super(SentimentAnalysisModel, self).__init__() |
|
self.distilbert = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased", num_labels=2) |
|
|
|
def forward(self, input_ids, attention_mask): |
|
return self.distilbert(input_ids, attention_mask=attention_mask).logits |
|
|
|
|
|
model = SentimentAnalysisModel() |
|
criterion = nn.CrossEntropyLoss() |
|
optimizer = optim.AdamW(model.parameters(), lr=5e-5) |
|
|
|
|
|
train_labels = torch.tensor(train_labels) |
|
val_labels = torch.tensor(val_labels) |
|
|
|
|
|
train_dataset = torch.utils.data.TensorDataset(train_encodings["input_ids"], train_encodings["attention_mask"], train_labels) |
|
train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True) |
|
|
|
val_dataset = torch.utils.data.TensorDataset(val_encodings["input_ids"], val_encodings["attention_mask"], val_labels) |
|
val_loader = DataLoader(val_dataset, batch_size=8, shuffle=False) |
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
model.to(device) |
|
|
|
num_epochs = 5 |
|
for epoch in range(num_epochs): |
|
model.train() |
|
total_loss = 0.0 |
|
|
|
for input_ids, attention_mask, labels in tqdm(train_loader, desc=f"Epoch {epoch + 1}/{num_epochs}"): |
|
input_ids, attention_mask, labels = input_ids.to(device), attention_mask.to(device), labels.to(device) |
|
|
|
optimizer.zero_grad() |
|
outputs = model(input_ids, attention_mask=attention_mask) |
|
loss = criterion(outputs, labels) |
|
loss.backward() |
|
optimizer.step() |
|
|
|
total_loss += loss.item() |
|
|
|
print(f"Epoch {epoch + 1}/{num_epochs}, Average Loss: {total_loss / len(train_loader)}") |
|
|
|
|
|
torch.save(model.state_dict(), "sentiment_analysis_model.pth") |