File size: 1,445 Bytes
bad01c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import torch
from torch import nn

from src.utils import get_batch


@torch.no_grad()
def estimate_loss(model: nn.Module, eval_iters, block_size, batch_size, device):
    out = {}
    model.eval()
    for split in ["train", "val"]:
        losses = torch.zeros(eval_iters)
        for k in range(eval_iters):
            X, Y = get_batch(split, block_size, batch_size)
            X, Y = X.to(device), Y.to(device)
            logits, loss = model(X, Y)
            losses[k] = loss.item()
        out[split] = losses.mean()
    model.train()
    return out


def train(
    model,
    optimizer,
    max_iters,
    eval_interval,
    eval_iters,
    block_size,
    batch_size,
    device,
):
    val_loss = None
    for iter in range(max_iters):
        if iter % eval_interval == 0:
            losses = estimate_loss(model, eval_iters, block_size, batch_size, device)
            print(
                f"Step {iter}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}"
            )
            if val_loss is not None:
                if losses["val"] < val_loss:
                    torch.save(model, "checkpoints/model.pth")
            else:
                val_loss = losses["val"]

        xb, yb = get_batch("train", block_size, batch_size)
        xb, yb = xb.to(device), yb.to(device)

        logits, loss = model(xb, yb)

        optimizer.zero_grad(set_to_none=True)
        loss.backward()
        optimizer.step()