SeemG commited on
Commit
639e778
·
verified ·
1 Parent(s): a541f02

Update model.py

Browse files
Files changed (1) hide show
  1. model.py +358 -324
model.py CHANGED
@@ -1,325 +1,359 @@
1
- """
2
- Full definition of a GPT Language Model, all of it in this single file.
3
- """
4
-
5
- import math
6
- import inspect
7
- from dataclasses import dataclass
8
-
9
- import torch
10
- import torch.nn as nn
11
- from torch.nn import functional as F
12
-
13
- class LayerNorm(nn.Module):
14
- """ LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """
15
-
16
- def __init__(self, ndim, bias):
17
- super().__init__()
18
- self.weight = nn.Parameter(torch.ones(ndim))
19
- self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None
20
-
21
- def forward(self, input):
22
- return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5)
23
-
24
- class CausalSelfAttention(nn.Module):
25
-
26
- def __init__(self, config):
27
- super().__init__()
28
- assert config.n_embd % config.n_head == 0
29
- # key, query, value projections for all heads, but in a batch
30
- self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
31
- # output projection
32
- self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
33
- # regularization
34
- self.attn_dropout = nn.Dropout(config.dropout)
35
- self.resid_dropout = nn.Dropout(config.dropout)
36
- self.n_head = config.n_head
37
- self.n_embd = config.n_embd
38
- self.dropout = config.dropout
39
- # flash attention make GPU go brrrrr but support is only in PyTorch >= 2.0
40
- self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
41
- if not self.flash:
42
- print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")
43
- # causal mask to ensure that attention is only applied to the left in the input sequence
44
- self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size))
45
- .view(1, 1, config.block_size, config.block_size))
46
-
47
- def forward(self, x):
48
- B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
49
-
50
- # calculate query, key, values for all heads in batch and move head forward to be the batch dim
51
- q, k, v = self.c_attn(x).split(self.n_embd, dim=2)
52
- k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
53
- q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
54
- v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
55
-
56
- # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
57
- if self.flash:
58
- # efficient attention using Flash Attention CUDA kernels
59
- y = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=self.dropout if self.training else 0, is_causal=True)
60
- else:
61
- # manual implementation of attention
62
- att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
63
- att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf'))
64
- att = F.softmax(att, dim=-1)
65
- att = self.attn_dropout(att)
66
- y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
67
- y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
68
-
69
- # output projection
70
- y = self.resid_dropout(self.c_proj(y))
71
- return y
72
-
73
- class MLP(nn.Module):
74
-
75
- def __init__(self, config):
76
- super().__init__()
77
- self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias)
78
- self.gelu = nn.GELU()
79
- self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias)
80
- self.dropout = nn.Dropout(config.dropout)
81
-
82
- def forward(self, x):
83
- x = self.c_fc(x)
84
- x = self.gelu(x)
85
- x = self.c_proj(x)
86
- x = self.dropout(x)
87
- return x
88
-
89
- class Block(nn.Module):
90
-
91
- def __init__(self, config):
92
- super().__init__()
93
- self.ln_1 = LayerNorm(config.n_embd, bias=config.bias)
94
- self.attn = CausalSelfAttention(config)
95
- self.ln_2 = LayerNorm(config.n_embd, bias=config.bias)
96
- self.mlp = MLP(config)
97
-
98
- def forward(self, x):
99
- x = x + self.attn(self.ln_1(x))
100
- x = x + self.mlp(self.ln_2(x))
101
- return x
102
-
103
- @dataclass
104
- class GPTConfig:
105
- block_size: int = 1024
106
- vocab_size: int = 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency
107
- n_layer: int = 12
108
- n_head: int = 12
109
- n_embd: int = 768
110
- dropout: float = 0.0
111
- bias: bool = True # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster
112
-
113
- class GPT(nn.Module):
114
-
115
- def __init__(self, config):
116
- super().__init__()
117
- assert config.vocab_size is not None
118
- assert config.block_size is not None
119
- self.config = config
120
-
121
- self.transformer = nn.ModuleDict(dict(
122
- wte = nn.Embedding(config.vocab_size, config.n_embd),
123
- wpe = nn.Embedding(config.block_size, config.n_embd),
124
- drop = nn.Dropout(config.dropout),
125
- h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
126
- ln_f = LayerNorm(config.n_embd, bias=config.bias),
127
- ))
128
- self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
129
- # with weight tying when using torch.compile() some warnings get generated:
130
- # "UserWarning: functional_call was passed multiple values for tied weights.
131
- # This behavior is deprecated and will be an error in future versions"
132
- # not 100% sure what this is, so far seems to be harmless. TODO investigate
133
- self.transformer.wte.weight = self.lm_head.weight # https://paperswithcode.com/method/weight-tying
134
-
135
- # init all weights
136
- self.apply(self._init_weights)
137
- # apply special scaled init to the residual projections, per GPT-2 paper
138
- for pn, p in self.named_parameters():
139
- if pn.endswith('c_proj.weight'):
140
- torch.nn.init.normal_(p, mean=0.0, std=0.02/math.sqrt(2 * config.n_layer))
141
-
142
- # report number of parameters
143
- print("number of parameters: %.2fM" % (self.get_num_params()/1e6,))
144
-
145
- def get_num_params(self, non_embedding=True):
146
- """
147
- Return the number of parameters in the model.
148
- For non-embedding count (default), the position embeddings get subtracted.
149
- The token embeddings would too, except due to the parameter sharing these
150
- params are actually used as weights in the final layer, so we include them.
151
- """
152
- n_params = sum(p.numel() for p in self.parameters())
153
- if non_embedding:
154
- n_params -= self.transformer.wpe.weight.numel()
155
- return n_params
156
-
157
- def _init_weights(self, module):
158
- if isinstance(module, nn.Linear):
159
- torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
160
- if module.bias is not None:
161
- torch.nn.init.zeros_(module.bias)
162
- elif isinstance(module, nn.Embedding):
163
- torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
164
-
165
- def forward(self, idx, targets=None):
166
- device = idx.device
167
- b, t = idx.size()
168
- assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
169
- pos = torch.arange(0, t, dtype=torch.long, device=device) # shape (t)
170
-
171
- # forward the GPT model itself
172
- tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
173
- pos_emb = self.transformer.wpe(pos) # position embeddings of shape (t, n_embd)
174
- x = self.transformer.drop(tok_emb + pos_emb)
175
- for block in self.transformer.h:
176
- x = block(x)
177
- x = self.transformer.ln_f(x)
178
-
179
- if targets is not None:
180
- # if we are given some desired targets also calculate the loss
181
- logits = self.lm_head(x)
182
- loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
183
- else:
184
- # inference-time mini-optimization: only forward the lm_head on the very last position
185
- logits = self.lm_head(x[:, [-1], :]) # note: using list [-1] to preserve the time dim
186
- loss = None
187
-
188
- return logits, loss
189
-
190
- def crop_block_size(self, block_size):
191
- # model surgery to decrease the block size if necessary
192
- # e.g. we may load the GPT2 pretrained model checkpoint (block size 1024)
193
- # but want to use a smaller block size for some smaller, simpler model
194
- assert block_size <= self.config.block_size
195
- self.config.block_size = block_size
196
- self.transformer.wpe.weight = nn.Parameter(self.transformer.wpe.weight[:block_size])
197
- for block in self.transformer.h:
198
- if hasattr(block.attn, 'bias'):
199
- block.attn.bias = block.attn.bias[:,:,:block_size,:block_size]
200
-
201
- @classmethod
202
- def from_pretrained(cls, model_type, override_args=None):
203
- assert model_type in {'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'}
204
- override_args = override_args or {} # default to empty dict
205
- # only dropout can be overridden see more notes below
206
- assert all(k == 'dropout' for k in override_args)
207
- from transformers import GPT2LMHeadModel
208
- print("loading weights from pretrained gpt: %s" % model_type)
209
-
210
- # n_layer, n_head and n_embd are determined from model_type
211
- config_args = {
212
- 'gpt2': dict(n_layer=12, n_head=12, n_embd=768), # 124M params
213
- 'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024), # 350M params
214
- 'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280), # 774M params
215
- 'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600), # 1558M params
216
- }[model_type]
217
- print("forcing vocab_size=50257, block_size=1024, bias=True")
218
- config_args['vocab_size'] = 50257 # always 50257 for GPT model checkpoints
219
- config_args['block_size'] = 1024 # always 1024 for GPT model checkpoints
220
- config_args['bias'] = True # always True for GPT model checkpoints
221
- # we can override the dropout rate, if desired
222
- if 'dropout' in override_args:
223
- print(f"overriding dropout rate to {override_args['dropout']}")
224
- config_args['dropout'] = override_args['dropout']
225
- # create a from-scratch initialized minGPT model
226
- config = GPTConfig(**config_args)
227
- model = GPT(config)
228
- sd = model.state_dict()
229
- sd_keys = sd.keys()
230
- sd_keys = [k for k in sd_keys if not k.endswith('.attn.bias')] # discard this mask / buffer, not a param
231
-
232
- # init a huggingface/transformers model
233
- model_hf = GPT2LMHeadModel.from_pretrained(model_type)
234
- sd_hf = model_hf.state_dict()
235
-
236
- # copy while ensuring all of the parameters are aligned and match in names and shapes
237
- sd_keys_hf = sd_hf.keys()
238
- sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.masked_bias')] # ignore these, just a buffer
239
- sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.bias')] # same, just the mask (buffer)
240
- transposed = ['attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight']
241
- # basically the openai checkpoints use a "Conv1D" module, but we only want to use a vanilla Linear
242
- # this means that we have to transpose these weights when we import them
243
- assert len(sd_keys_hf) == len(sd_keys), f"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}"
244
- for k in sd_keys_hf:
245
- if any(k.endswith(w) for w in transposed):
246
- # special treatment for the Conv1D weights we need to transpose
247
- assert sd_hf[k].shape[::-1] == sd[k].shape
248
- with torch.no_grad():
249
- sd[k].copy_(sd_hf[k].t())
250
- else:
251
- # vanilla copy over the other parameters
252
- assert sd_hf[k].shape == sd[k].shape
253
- with torch.no_grad():
254
- sd[k].copy_(sd_hf[k])
255
-
256
- return model
257
-
258
- def configure_optimizers(self, weight_decay, learning_rate, betas, device_type):
259
- # start with all of the candidate parameters
260
- param_dict = {pn: p for pn, p in self.named_parameters()}
261
- # filter out those that do not require grad
262
- param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}
263
- # create optim groups. Any parameters that is 2D will be weight decayed, otherwise no.
264
- # i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.
265
- decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]
266
- nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]
267
- optim_groups = [
268
- {'params': decay_params, 'weight_decay': weight_decay},
269
- {'params': nodecay_params, 'weight_decay': 0.0}
270
- ]
271
- num_decay_params = sum(p.numel() for p in decay_params)
272
- num_nodecay_params = sum(p.numel() for p in nodecay_params)
273
- print(f"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters")
274
- print(f"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters")
275
- # Create AdamW optimizer and use the fused version if it is available
276
- fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters
277
- use_fused = fused_available and device_type == 'cuda'
278
- extra_args = dict(fused=True) if use_fused else dict()
279
- optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args)
280
- print(f"using fused AdamW: {use_fused}")
281
-
282
- return optimizer
283
-
284
- def estimate_mfu(self, fwdbwd_per_iter, dt):
285
- """ estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS """
286
- # first estimate the number of flops we do per iteration.
287
- # see PaLM paper Appendix B as ref: https://arxiv.org/abs/2204.02311
288
- N = self.get_num_params()
289
- cfg = self.config
290
- L, H, Q, T = cfg.n_layer, cfg.n_head, cfg.n_embd//cfg.n_head, cfg.block_size
291
- flops_per_token = 6*N + 12*L*H*Q*T
292
- flops_per_fwdbwd = flops_per_token * T
293
- flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter
294
- # express our flops throughput as ratio of A100 bfloat16 peak flops
295
- flops_achieved = flops_per_iter * (1.0/dt) # per second
296
- flops_promised = 312e12 # A100 GPU bfloat16 peak flops is 312 TFLOPS
297
- mfu = flops_achieved / flops_promised
298
- return mfu
299
-
300
- @torch.no_grad()
301
- def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):
302
- """
303
- Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete
304
- the sequence max_new_tokens times, feeding the predictions back into the model each time.
305
- Most likely you'll want to make sure to be in model.eval() mode of operation for this.
306
- """
307
- for _ in range(max_new_tokens):
308
- # if the sequence context is growing too long we must crop it at block_size
309
- idx_cond = idx if idx.size(1) <= self.config.block_size else idx[:, -self.config.block_size:]
310
- # forward the model to get the logits for the index in the sequence
311
- logits, _ = self(idx_cond)
312
- # pluck the logits at the final step and scale by desired temperature
313
- logits = logits[:, -1, :] / temperature
314
- # optionally crop the logits to only the top k options
315
- if top_k is not None:
316
- v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
317
- logits[logits < v[:, [-1]]] = -float('Inf')
318
- # apply softmax to convert logits to (normalized) probabilities
319
- probs = F.softmax(logits, dim=-1)
320
- # sample from the distribution
321
- idx_next = torch.multinomial(probs, num_samples=1)
322
- # append sampled index to the running sequence and continue
323
- idx = torch.cat((idx, idx_next), dim=1)
324
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325
  return idx
 
1
+ """
2
+ Full definition of a GPT Language Model, all of it in this single file.
3
+ """
4
+
5
+ import math
6
+ import inspect
7
+ from dataclasses import dataclass
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ from torch.nn import functional as F
12
+
13
+ class LayerNorm(nn.Module):
14
+ """ LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """
15
+
16
+ def __init__(self, ndim, bias):
17
+ super().__init__()
18
+ self.weight = nn.Parameter(torch.ones(ndim))
19
+ self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None
20
+
21
+ def forward(self, input):
22
+ return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5)
23
+
24
+ class CausalSelfAttention(nn.Module):
25
+
26
+ def __init__(self, config):
27
+ super().__init__()
28
+ assert config.n_embd % config.n_head == 0
29
+ # key, query, value projections for all heads, but in a batch
30
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
31
+ # output projection
32
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
33
+ # regularization
34
+ self.attn_dropout = nn.Dropout(config.dropout)
35
+ self.resid_dropout = nn.Dropout(config.dropout)
36
+ self.n_head = config.n_head
37
+ self.n_embd = config.n_embd
38
+ self.dropout = config.dropout
39
+ # flash attention make GPU go brrrrr but support is only in PyTorch >= 2.0
40
+ self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
41
+ if not self.flash:
42
+ print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")
43
+ # causal mask to ensure that attention is only applied to the left in the input sequence
44
+ self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size))
45
+ .view(1, 1, config.block_size, config.block_size))
46
+
47
+ def forward(self, x):
48
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
49
+
50
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
51
+ q, k, v = self.c_attn(x).split(self.n_embd, dim=2)
52
+ k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
53
+ q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
54
+ v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
55
+
56
+ # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
57
+ if self.flash:
58
+ # efficient attention using Flash Attention CUDA kernels
59
+ y = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=self.dropout if self.training else 0, is_causal=True)
60
+ else:
61
+ # manual implementation of attention
62
+ att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
63
+ att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf'))
64
+ att = F.softmax(att, dim=-1)
65
+ att = self.attn_dropout(att)
66
+ y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
67
+ y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
68
+
69
+ # output projection
70
+ y = self.resid_dropout(self.c_proj(y))
71
+ return y
72
+
73
+ # super simple bigram model
74
+ class BigramLanguageModel(nn.Module):
75
+
76
+ def __init__(self):
77
+ super().__init__()
78
+ # each token directly reads off the logits for the next token from a lookup table
79
+ self.token_embedding_table = nn.Embedding(vocab_size, n_embd)
80
+ self.position_embedding_table = nn.Embedding(block_size, n_embd)
81
+ self.blocks = nn.Sequential(*[Block(n_embd, n_head=n_head) for _ in range(n_layer)])
82
+ self.ln_f = nn.LayerNorm(n_embd) # final layer norm
83
+ self.lm_head = nn.Linear(n_embd, vocab_size)
84
+
85
+ def forward(self, idx, targets=None):
86
+ B, T = idx.shape
87
+
88
+ # idx and targets are both (B,T) tensor of integers
89
+ tok_emb = self.token_embedding_table(idx) # (B,T,C)
90
+ pos_emb = self.position_embedding_table(torch.arange(T, device=device)) # (T,C)
91
+ x = tok_emb + pos_emb # (B,T,C)
92
+ x = self.blocks(x) # (B,T,C)
93
+ x = self.ln_f(x) # (B,T,C)
94
+ logits = self.lm_head(x) # (B,T,vocab_size)
95
+
96
+ if targets is None:
97
+ loss = None
98
+ else:
99
+ B, T, C = logits.shape
100
+ logits = logits.view(B*T, C)
101
+ targets = targets.view(B*T)
102
+ loss = F.cross_entropy(logits, targets)
103
+
104
+ return logits, loss
105
+
106
+
107
+ class MLP(nn.Module):
108
+
109
+ def __init__(self, config):
110
+ super().__init__()
111
+ self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias)
112
+ self.gelu = nn.GELU()
113
+ self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias)
114
+ self.dropout = nn.Dropout(config.dropout)
115
+
116
+ def forward(self, x):
117
+ x = self.c_fc(x)
118
+ x = self.gelu(x)
119
+ x = self.c_proj(x)
120
+ x = self.dropout(x)
121
+ return x
122
+
123
+ class Block(nn.Module):
124
+
125
+ def __init__(self, config):
126
+ super().__init__()
127
+ self.ln_1 = LayerNorm(config.n_embd, bias=config.bias)
128
+ self.attn = CausalSelfAttention(config)
129
+ self.ln_2 = LayerNorm(config.n_embd, bias=config.bias)
130
+ self.mlp = MLP(config)
131
+
132
+ def forward(self, x):
133
+ x = x + self.attn(self.ln_1(x))
134
+ x = x + self.mlp(self.ln_2(x))
135
+ return x
136
+
137
+ @dataclass
138
+ class GPTConfig:
139
+ block_size: int = 1024
140
+ vocab_size: int = 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency
141
+ n_layer: int = 12
142
+ n_head: int = 12
143
+ n_embd: int = 768
144
+ dropout: float = 0.0
145
+ bias: bool = True # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster
146
+
147
+ class GPT(nn.Module):
148
+
149
+ def __init__(self, config):
150
+ super().__init__()
151
+ assert config.vocab_size is not None
152
+ assert config.block_size is not None
153
+ self.config = config
154
+
155
+ self.transformer = nn.ModuleDict(dict(
156
+ wte = nn.Embedding(config.vocab_size, config.n_embd),
157
+ wpe = nn.Embedding(config.block_size, config.n_embd),
158
+ drop = nn.Dropout(config.dropout),
159
+ h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
160
+ ln_f = LayerNorm(config.n_embd, bias=config.bias),
161
+ ))
162
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
163
+ # with weight tying when using torch.compile() some warnings get generated:
164
+ # "UserWarning: functional_call was passed multiple values for tied weights.
165
+ # This behavior is deprecated and will be an error in future versions"
166
+ # not 100% sure what this is, so far seems to be harmless. TODO investigate
167
+ self.transformer.wte.weight = self.lm_head.weight # https://paperswithcode.com/method/weight-tying
168
+
169
+ # init all weights
170
+ self.apply(self._init_weights)
171
+ # apply special scaled init to the residual projections, per GPT-2 paper
172
+ for pn, p in self.named_parameters():
173
+ if pn.endswith('c_proj.weight'):
174
+ torch.nn.init.normal_(p, mean=0.0, std=0.02/math.sqrt(2 * config.n_layer))
175
+
176
+ # report number of parameters
177
+ print("number of parameters: %.2fM" % (self.get_num_params()/1e6,))
178
+
179
+ def get_num_params(self, non_embedding=True):
180
+ """
181
+ Return the number of parameters in the model.
182
+ For non-embedding count (default), the position embeddings get subtracted.
183
+ The token embeddings would too, except due to the parameter sharing these
184
+ params are actually used as weights in the final layer, so we include them.
185
+ """
186
+ n_params = sum(p.numel() for p in self.parameters())
187
+ if non_embedding:
188
+ n_params -= self.transformer.wpe.weight.numel()
189
+ return n_params
190
+
191
+ def _init_weights(self, module):
192
+ if isinstance(module, nn.Linear):
193
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
194
+ if module.bias is not None:
195
+ torch.nn.init.zeros_(module.bias)
196
+ elif isinstance(module, nn.Embedding):
197
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
198
+
199
+ def forward(self, idx, targets=None):
200
+ device = idx.device
201
+ b, t = idx.size()
202
+ assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
203
+ pos = torch.arange(0, t, dtype=torch.long, device=device) # shape (t)
204
+
205
+ # forward the GPT model itself
206
+ tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
207
+ pos_emb = self.transformer.wpe(pos) # position embeddings of shape (t, n_embd)
208
+ x = self.transformer.drop(tok_emb + pos_emb)
209
+ for block in self.transformer.h:
210
+ x = block(x)
211
+ x = self.transformer.ln_f(x)
212
+
213
+ if targets is not None:
214
+ # if we are given some desired targets also calculate the loss
215
+ logits = self.lm_head(x)
216
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
217
+ else:
218
+ # inference-time mini-optimization: only forward the lm_head on the very last position
219
+ logits = self.lm_head(x[:, [-1], :]) # note: using list [-1] to preserve the time dim
220
+ loss = None
221
+
222
+ return logits, loss
223
+
224
+ def crop_block_size(self, block_size):
225
+ # model surgery to decrease the block size if necessary
226
+ # e.g. we may load the GPT2 pretrained model checkpoint (block size 1024)
227
+ # but want to use a smaller block size for some smaller, simpler model
228
+ assert block_size <= self.config.block_size
229
+ self.config.block_size = block_size
230
+ self.transformer.wpe.weight = nn.Parameter(self.transformer.wpe.weight[:block_size])
231
+ for block in self.transformer.h:
232
+ if hasattr(block.attn, 'bias'):
233
+ block.attn.bias = block.attn.bias[:,:,:block_size,:block_size]
234
+
235
+ @classmethod
236
+ def from_pretrained(cls, model_type, override_args=None):
237
+ assert model_type in {'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'}
238
+ override_args = override_args or {} # default to empty dict
239
+ # only dropout can be overridden see more notes below
240
+ assert all(k == 'dropout' for k in override_args)
241
+ from transformers import GPT2LMHeadModel
242
+ print("loading weights from pretrained gpt: %s" % model_type)
243
+
244
+ # n_layer, n_head and n_embd are determined from model_type
245
+ config_args = {
246
+ 'gpt2': dict(n_layer=12, n_head=12, n_embd=768), # 124M params
247
+ 'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024), # 350M params
248
+ 'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280), # 774M params
249
+ 'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600), # 1558M params
250
+ }[model_type]
251
+ print("forcing vocab_size=50257, block_size=1024, bias=True")
252
+ config_args['vocab_size'] = 50257 # always 50257 for GPT model checkpoints
253
+ config_args['block_size'] = 1024 # always 1024 for GPT model checkpoints
254
+ config_args['bias'] = True # always True for GPT model checkpoints
255
+ # we can override the dropout rate, if desired
256
+ if 'dropout' in override_args:
257
+ print(f"overriding dropout rate to {override_args['dropout']}")
258
+ config_args['dropout'] = override_args['dropout']
259
+ # create a from-scratch initialized minGPT model
260
+ config = GPTConfig(**config_args)
261
+ model = GPT(config)
262
+ sd = model.state_dict()
263
+ sd_keys = sd.keys()
264
+ sd_keys = [k for k in sd_keys if not k.endswith('.attn.bias')] # discard this mask / buffer, not a param
265
+
266
+ # init a huggingface/transformers model
267
+ model_hf = GPT2LMHeadModel.from_pretrained(model_type)
268
+ sd_hf = model_hf.state_dict()
269
+
270
+ # copy while ensuring all of the parameters are aligned and match in names and shapes
271
+ sd_keys_hf = sd_hf.keys()
272
+ sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.masked_bias')] # ignore these, just a buffer
273
+ sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.bias')] # same, just the mask (buffer)
274
+ transposed = ['attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight']
275
+ # basically the openai checkpoints use a "Conv1D" module, but we only want to use a vanilla Linear
276
+ # this means that we have to transpose these weights when we import them
277
+ assert len(sd_keys_hf) == len(sd_keys), f"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}"
278
+ for k in sd_keys_hf:
279
+ if any(k.endswith(w) for w in transposed):
280
+ # special treatment for the Conv1D weights we need to transpose
281
+ assert sd_hf[k].shape[::-1] == sd[k].shape
282
+ with torch.no_grad():
283
+ sd[k].copy_(sd_hf[k].t())
284
+ else:
285
+ # vanilla copy over the other parameters
286
+ assert sd_hf[k].shape == sd[k].shape
287
+ with torch.no_grad():
288
+ sd[k].copy_(sd_hf[k])
289
+
290
+ return model
291
+
292
+ def configure_optimizers(self, weight_decay, learning_rate, betas, device_type):
293
+ # start with all of the candidate parameters
294
+ param_dict = {pn: p for pn, p in self.named_parameters()}
295
+ # filter out those that do not require grad
296
+ param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}
297
+ # create optim groups. Any parameters that is 2D will be weight decayed, otherwise no.
298
+ # i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.
299
+ decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]
300
+ nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]
301
+ optim_groups = [
302
+ {'params': decay_params, 'weight_decay': weight_decay},
303
+ {'params': nodecay_params, 'weight_decay': 0.0}
304
+ ]
305
+ num_decay_params = sum(p.numel() for p in decay_params)
306
+ num_nodecay_params = sum(p.numel() for p in nodecay_params)
307
+ print(f"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters")
308
+ print(f"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters")
309
+ # Create AdamW optimizer and use the fused version if it is available
310
+ fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters
311
+ use_fused = fused_available and device_type == 'cuda'
312
+ extra_args = dict(fused=True) if use_fused else dict()
313
+ optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args)
314
+ print(f"using fused AdamW: {use_fused}")
315
+
316
+ return optimizer
317
+
318
+ def estimate_mfu(self, fwdbwd_per_iter, dt):
319
+ """ estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS """
320
+ # first estimate the number of flops we do per iteration.
321
+ # see PaLM paper Appendix B as ref: https://arxiv.org/abs/2204.02311
322
+ N = self.get_num_params()
323
+ cfg = self.config
324
+ L, H, Q, T = cfg.n_layer, cfg.n_head, cfg.n_embd//cfg.n_head, cfg.block_size
325
+ flops_per_token = 6*N + 12*L*H*Q*T
326
+ flops_per_fwdbwd = flops_per_token * T
327
+ flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter
328
+ # express our flops throughput as ratio of A100 bfloat16 peak flops
329
+ flops_achieved = flops_per_iter * (1.0/dt) # per second
330
+ flops_promised = 312e12 # A100 GPU bfloat16 peak flops is 312 TFLOPS
331
+ mfu = flops_achieved / flops_promised
332
+ return mfu
333
+
334
+ @torch.no_grad()
335
+ def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):
336
+ """
337
+ Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete
338
+ the sequence max_new_tokens times, feeding the predictions back into the model each time.
339
+ Most likely you'll want to make sure to be in model.eval() mode of operation for this.
340
+ """
341
+ for _ in range(max_new_tokens):
342
+ # if the sequence context is growing too long we must crop it at block_size
343
+ idx_cond = idx if idx.size(1) <= self.config.block_size else idx[:, -self.config.block_size:]
344
+ # forward the model to get the logits for the index in the sequence
345
+ logits, _ = self(idx_cond)
346
+ # pluck the logits at the final step and scale by desired temperature
347
+ logits = logits[:, -1, :] / temperature
348
+ # optionally crop the logits to only the top k options
349
+ if top_k is not None:
350
+ v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
351
+ logits[logits < v[:, [-1]]] = -float('Inf')
352
+ # apply softmax to convert logits to (normalized) probabilities
353
+ probs = F.softmax(logits, dim=-1)
354
+ # sample from the distribution
355
+ idx_next = torch.multinomial(probs, num_samples=1)
356
+ # append sampled index to the running sequence and continue
357
+ idx = torch.cat((idx, idx_next), dim=1)
358
+
359
  return idx