POSH
commited on
Commit
·
eddfef4
1
Parent(s):
2cdbf89
Initial commit
Browse files- LMConfig.py +56 -0
- config.json +31 -0
- generation_config.json +4 -0
- model.py +476 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +23 -0
- tokenizer.json +0 -0
- tokenizer_config.json +44 -0
LMConfig.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import PretrainedConfig
|
2 |
+
|
3 |
+
class LMConfig(PretrainedConfig):
|
4 |
+
model_type = "minimind" # 设置模型类型为 "minimind"
|
5 |
+
|
6 |
+
def __init__(
|
7 |
+
self,
|
8 |
+
dim: int = 768, # 模型维度,默认为 512
|
9 |
+
n_layers: int = 16, # Transformer 层数,默认为 8
|
10 |
+
n_heads: int = 16, # 注意力头数,默认为 16
|
11 |
+
n_kv_heads: int = 8, # KV 头数,默认为 8
|
12 |
+
vocab_size: int = 6400, # 词汇表大小,默认为 6400
|
13 |
+
hidden_dim: int = None, # 隐藏层维度,默认为 None
|
14 |
+
multiple_of: int = 64, # 隐藏层维度的倍数,默认为 64
|
15 |
+
norm_eps: float = 1e-5, # 归一化层的 epsilon 值,默认为 1e-5
|
16 |
+
max_seq_len: int = 512, # 最大序列长度,默认为 512
|
17 |
+
dropout: float = 0.0, # Dropout 概率,默认为 0.0
|
18 |
+
flash_attn: bool = True, # 是否使用 Flash Attention,默认为 True
|
19 |
+
####################################################
|
20 |
+
# 以下是 MOE(Mixture of Experts)的特定配置
|
21 |
+
# 当 use_moe 为 False 时,以下配置无效
|
22 |
+
####################################################
|
23 |
+
use_moe: bool = False, # 是否使用 MOE,默认为 False
|
24 |
+
num_experts_per_tok=2, # 每个 token 选择的专家数量,默认为 2
|
25 |
+
n_routed_experts=4, # 总的专家数量,默认为 4
|
26 |
+
n_shared_experts: bool = True, # 是否使用共享专家,默认为 True
|
27 |
+
scoring_func='softmax', # 评分函数,默认为 'softmax'
|
28 |
+
aux_loss_alpha=0.01, # 辅助损失的 alpha 参数,默认为 0.01
|
29 |
+
seq_aux=True, # 是否在序列级别上计算辅助损失,默认为 True
|
30 |
+
norm_topk_prob=True, # 是否标准化 top-k 概率,默认为 True
|
31 |
+
**kwargs,
|
32 |
+
):
|
33 |
+
self.dim = dim # 设置模型维度
|
34 |
+
self.n_layers = n_layers # 设置 Transformer 层数
|
35 |
+
self.n_heads = n_heads # 设置注意力头数
|
36 |
+
self.n_kv_heads = n_kv_heads # 设置 KV 头数
|
37 |
+
self.vocab_size = vocab_size # 设置词汇表大小
|
38 |
+
self.hidden_dim = hidden_dim # 设置隐藏层维度
|
39 |
+
self.multiple_of = multiple_of # 设置隐藏层维度的倍数
|
40 |
+
self.norm_eps = norm_eps # 设置归一化层的 epsilon 值
|
41 |
+
self.max_seq_len = max_seq_len # 设置最大序列长度
|
42 |
+
self.dropout = dropout # 设置 Dropout 概率
|
43 |
+
self.flash_attn = flash_attn # 设置是否使用 Flash Attention
|
44 |
+
####################################################
|
45 |
+
# 以下是 MOE(Mixture of Experts)的特定配置
|
46 |
+
# 当 use_moe 为 False 时,以下配置无效
|
47 |
+
####################################################
|
48 |
+
self.use_moe = use_moe # 设置是否使用 MOE
|
49 |
+
self.num_experts_per_tok = num_experts_per_tok # 设置每个 token 选择的专家数量
|
50 |
+
self.n_routed_experts = n_routed_experts # 设置总的专家数量
|
51 |
+
self.n_shared_experts = n_shared_experts # 设置是否使用共享专家
|
52 |
+
self.scoring_func = scoring_func # 设置评分函数
|
53 |
+
self.aux_loss_alpha = aux_loss_alpha # 设置辅助损失的 alpha 参数
|
54 |
+
self.seq_aux = seq_aux # 设置是否在序列级别上计算辅助损失
|
55 |
+
self.norm_topk_prob = norm_topk_prob # 设置是否标准化 top-k 概率
|
56 |
+
super().__init__(**kwargs) # 调用父类 PretrainedConfig 的初始化方法
|
config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"Transformer"
|
4 |
+
],
|
5 |
+
"auto_map": {
|
6 |
+
"AutoConfig": "LMConfig.LMConfig",
|
7 |
+
"AutoModelForCausalLM": "model.Transformer"
|
8 |
+
},
|
9 |
+
"aux_loss_alpha": 0.01,
|
10 |
+
"dim": 768,
|
11 |
+
"dropout": 0.0,
|
12 |
+
"flash_attn": true,
|
13 |
+
"hidden_dim": null,
|
14 |
+
"max_seq_len": 512,
|
15 |
+
"model_type": "minimind",
|
16 |
+
"multiple_of": 64,
|
17 |
+
"n_heads": 16,
|
18 |
+
"n_kv_heads": 8,
|
19 |
+
"n_layers": 16,
|
20 |
+
"n_routed_experts": 4,
|
21 |
+
"n_shared_experts": true,
|
22 |
+
"norm_eps": 1e-05,
|
23 |
+
"norm_topk_prob": true,
|
24 |
+
"num_experts_per_tok": 2,
|
25 |
+
"scoring_func": "softmax",
|
26 |
+
"seq_aux": true,
|
27 |
+
"torch_dtype": "float32",
|
28 |
+
"transformers_version": "4.45.2",
|
29 |
+
"use_moe": false,
|
30 |
+
"vocab_size": 6400
|
31 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"transformers_version": "4.45.2"
|
4 |
+
}
|
model.py
ADDED
@@ -0,0 +1,476 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import nn
|
3 |
+
from .LMConfig import LMConfig
|
4 |
+
import math
|
5 |
+
import torch.nn.functional as F
|
6 |
+
from typing import Optional
|
7 |
+
from transformers import PreTrainedModel
|
8 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
9 |
+
|
10 |
+
class RMSNorm(nn.Module):
|
11 |
+
def __init__(self, dim: int, eps: float) -> None:
|
12 |
+
super().__init__()
|
13 |
+
self.weight = nn.Parameter(torch.ones(dim))
|
14 |
+
self.eps = eps
|
15 |
+
|
16 |
+
def _norm(self, x):
|
17 |
+
return x * torch.rsqrt(self.eps + x.pow(2).mean(-1, keepdim = True))
|
18 |
+
|
19 |
+
def forward(self, x):
|
20 |
+
x = self._norm(x.float()).type_as(x) # 用 float 提高精确度,防止溢出
|
21 |
+
x = x * self.weight
|
22 |
+
return x
|
23 |
+
|
24 |
+
def repeat_kv(x: torch.Tensor, n_rep: int):
|
25 |
+
'''
|
26 |
+
x 是 key 或者 value ,大小是 (batch_size, seq_len, kv_heads, head_dim)
|
27 |
+
要把它复制 n_rep 遍,变成 (batch_size, seq_len, kv_heads * n_rep, head_dim)
|
28 |
+
'''
|
29 |
+
if n_rep == 1:
|
30 |
+
return x
|
31 |
+
else:
|
32 |
+
bs, seq_len, kv_heads, head_dim = x.shape
|
33 |
+
return x[:,:,:,None,:].expand(bs, seq_len, kv_heads, n_rep, head_dim).reshape(bs, seq_len, kv_heads * n_rep, head_dim)
|
34 |
+
# expand 的用法:只能拓展大小为1的维度,或者增加维度。并且expand并没有实际占用内存,它只是用广播而已
|
35 |
+
# 这句不能用 view,要不然报错:
|
36 |
+
# RuntimeError: view size is not compatible with input tensor's size and stride (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead.
|
37 |
+
|
38 |
+
def get_rotation(dim: int, seq_len: int, base: float = 10000.0):
|
39 |
+
'''
|
40 |
+
获得旋转矩阵,就是一个(seq_len, dim // 2)大小的矩阵W。
|
41 |
+
W[a][b] = cos(a*θ_b) + i*sin(a*θ_b) ,实际上就是模长为 1 ,旋转角度为 a*θ_b 的虚数向量
|
42 |
+
但是要注意,这里的 dim 并不是模型的大小,而是在每个注意力头里的 tensor 的大小。也就是 args.dim // args.n_heads
|
43 |
+
'''
|
44 |
+
angles = 1.0 / (base ** (torch.arange(0, dim, 2)[: dim // 2].float() / dim))
|
45 |
+
seq = torch.arange(0, seq_len, device = angles.device)
|
46 |
+
angle_matrix = torch.outer(seq, angles).float()
|
47 |
+
weight = torch.polar(torch.ones_like(angle_matrix), angle_matrix)
|
48 |
+
return weight
|
49 |
+
|
50 |
+
def position_encoding(xq, xk, weight):
|
51 |
+
# 先把 xq 和 xk 转化成虚数
|
52 |
+
# xq.shape = [bsz, seq_len, n_heads, head_dim]
|
53 |
+
# xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2)) # reshape 能处理内存不连续情况,view 不行
|
54 |
+
# xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
|
55 |
+
xq = xq.float()
|
56 |
+
xk = xk.float()
|
57 |
+
xq = torch.view_as_complex(xq.reshape(*xq.shape[:-1], -1, 2)) # reshape 能处理内存不连续情况,view 不行
|
58 |
+
xk = torch.view_as_complex(xk.reshape(*xk.shape[:-1], -1, 2))
|
59 |
+
|
60 |
+
# 相乘,然后转化成实数
|
61 |
+
# xq_ 变成[bsz, seq_len, n_heads, head_dim // 2],把weight变成[1, seq_len, 1, head_dim // 2]
|
62 |
+
# xq_pos = torch.view_as_real(weight[None, :, None, :] * xq_).flatten(3)
|
63 |
+
# xk_pos = torch.view_as_real(weight[None, :, None, :] * xk_).flatten(3)
|
64 |
+
xq = torch.view_as_real(weight[None, :, None, :] * xq).flatten(3)
|
65 |
+
xk = torch.view_as_real(weight[None, :, None, :] * xk).flatten(3)
|
66 |
+
# flatten(3)是把第三维度后面的内容全部合并成一维,因为虚数变实数之后就变成(b, s, n_h, h // 2, 2)了
|
67 |
+
|
68 |
+
# assert xq_pos.shape == xq.shape
|
69 |
+
# assert xk_pos.shape == xk.shape
|
70 |
+
|
71 |
+
return xq, xk
|
72 |
+
|
73 |
+
class Attention(nn.Module):
|
74 |
+
def __init__(self, args: LMConfig) -> None:
|
75 |
+
super().__init__()
|
76 |
+
self.dim = args.dim # 模型维度 512
|
77 |
+
self.n_heads = args.n_heads # 注意力头数 16
|
78 |
+
self.n_kv_heads = args.n_kv_heads # kv 头数 8
|
79 |
+
|
80 |
+
assert self.n_heads % self.n_kv_heads == 0
|
81 |
+
self.n_rep = self.n_heads // self.n_kv_heads # kv 重复次数
|
82 |
+
|
83 |
+
assert self.dim % self.n_heads == 0
|
84 |
+
self.head_dim = self.dim // self.n_heads # 每个注意力头里面的张量维度
|
85 |
+
|
86 |
+
self.wq = nn.Linear(self.dim, self.n_heads * self.head_dim, bias = False)
|
87 |
+
self.wk = nn.Linear(self.dim, self.n_kv_heads * self.head_dim, bias = False)
|
88 |
+
self.wv = nn.Linear(self.dim, self.n_kv_heads * self.head_dim, bias = False)
|
89 |
+
self.wo = nn.Linear(self.n_heads * self.head_dim, self.dim, bias = False)
|
90 |
+
|
91 |
+
# self.attn_dropout = nn.Dropout(args.dropout) # 注意力 dropout
|
92 |
+
self.resid_dropout = nn.Dropout(args.dropout) # 残差 dropout
|
93 |
+
|
94 |
+
self.dropout = args.dropout # 给 flash attn 用的
|
95 |
+
self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention') and args.flash_attn
|
96 |
+
# 判断是否使用 Flash Attention。后者令 is_causal=True 可以实现掩码注意力功能。
|
97 |
+
if not self.flash:
|
98 |
+
mask = torch.full((1, 1, args.max_seq_len, args.max_seq_len), float("-inf"))
|
99 |
+
mask = torch.triu(mask, diagonal = 1) # upper triangular 和 lower triangular
|
100 |
+
self.register_buffer("mask", mask) # 这样 mask 就不会反向更新了
|
101 |
+
|
102 |
+
# kv 缓存。因为测试的时候参数不再更新,所以每个 token 生成的 xk 和 xv 都不变。因此可以直接复用
|
103 |
+
self.k_cache, self.v_cache = None, None
|
104 |
+
|
105 |
+
def forward(self, x: torch.Tensor, weight: torch.Tensor, use_kv_cache = False):
|
106 |
+
# x 是(seq_len, dim)的输入,weight是旋转矩阵
|
107 |
+
# print("进来了 FORWARD!!")
|
108 |
+
bsz, seq_len, _ = x.shape
|
109 |
+
# print("进来了 FORWARD!!")
|
110 |
+
if use_kv_cache and self.eval(): # 评估模式,就是测试阶段的意思
|
111 |
+
# if self.k_cache is None or self.k_cache.shape[1] == x.shape[1] - 1: # x 的词数量比 k 缓存多一个
|
112 |
+
if self.k_cache is None or self.k_cache.shape[1] != x.shape[1] - 1:
|
113 |
+
# print("缓冲是 None!")
|
114 |
+
# self.k_cache.shape[1] != x.shape[1] - 1 这一句不能不写!
|
115 |
+
# 因为你每处理一段新的上下文,是不会创建新模型对象的。换言之只用一个模型,处理若干个问题
|
116 |
+
# 那么当你切换到新的上下文的时候,你的 kv 缓冲按理必须要清空。
|
117 |
+
# 那么怎么判断你是否切换了新的上下文呢?就用 self.k_cache.shape[1] != x.shape[1] - 1 方法
|
118 |
+
# 否则你会出现 reshape 大小不匹配的问题!
|
119 |
+
xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
|
120 |
+
else:
|
121 |
+
token = x[:, -1:, :]
|
122 |
+
# print("1 号concat:")
|
123 |
+
xq = torch.concat((torch.zeros_like(x[:, : -1, :]), self.wq(token)), dim = 1)
|
124 |
+
# 只更新最后一个 token 的值,因为后面要有残差,所以相当于对于前面的词向量什么都不做
|
125 |
+
# print("2 号concat:")
|
126 |
+
xk = torch.concat((self.k_cache, self.wk(token)), dim = 1)
|
127 |
+
# print("3 号concat:")
|
128 |
+
xv = torch.concat((self.v_cache, self.wv(token)), dim = 1)
|
129 |
+
# 复用之前的 xw 和 xv
|
130 |
+
self.k_cache, self.v_cache = xk, xv
|
131 |
+
else:
|
132 |
+
xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
|
133 |
+
|
134 |
+
xq = xq.reshape(bsz, seq_len, self.n_heads, self.head_dim)
|
135 |
+
xk = xk.reshape(bsz, seq_len, self.n_kv_heads, self.head_dim)
|
136 |
+
xv = xv.reshape(bsz, seq_len, self.n_kv_heads, self.head_dim)
|
137 |
+
|
138 |
+
xq, xk = position_encoding(xq, xk, weight) # 给 q 和 k 加位置编码
|
139 |
+
xk, xv = repeat_kv(xk, self.n_rep), repeat_kv(xv, self.n_rep) # 把 k 和 v 重复 n_rep 遍
|
140 |
+
|
141 |
+
xq = xq.transpose(1, 2)
|
142 |
+
xk = xk.transpose(1, 2)
|
143 |
+
xv = xv.transpose(1, 2)
|
144 |
+
|
145 |
+
if self.flash: # 直接算出 softmax(...) @ xv
|
146 |
+
# output = F.scaled_dot_product_attention(xq, xk, xv, attn_mask = None,
|
147 |
+
# dropout_p = self.dropout if self.training else 0.0,
|
148 |
+
# is_causal = True) # is_causal = True 表示使用掩码
|
149 |
+
x = F.scaled_dot_product_attention(xq, xk, xv, attn_mask = None,
|
150 |
+
dropout_p = self.dropout if self.training else 0.0,
|
151 |
+
is_causal = True) # is_causal = True 表示使用掩码
|
152 |
+
# self.training 用来指示模型当前是否处于训练模式
|
153 |
+
else:
|
154 |
+
# scores = xq @ xk.transpose(2, 3) / math.sqrt(self.head_dim) # (bs, n_head, seq_len, seq_len)
|
155 |
+
# assert hasattr(self, "mask")
|
156 |
+
# scores = scores + self.mask[:, :, : seq_len, : seq_len] # 掩码,把后文盖住
|
157 |
+
# output = F.softmax(scores, dim = -1) @ xv # (bs, n_head, seq_len, head_dim)
|
158 |
+
x = xq @ xk.transpose(2, 3) / math.sqrt(self.head_dim) # (bs, n_head, seq_len, seq_len)
|
159 |
+
assert hasattr(self, "mask")
|
160 |
+
x = x + self.mask[:, :, : seq_len, : seq_len] # 掩码,把后文盖住
|
161 |
+
x = F.softmax(x, dim = -1) @ xv # (bs, n_head, seq_len, head_dim)
|
162 |
+
|
163 |
+
x = x.transpose(1, 2).contiguous().view(bsz, seq_len, -1) # (bs, seq_len, dim)
|
164 |
+
x = self.resid_dropout(self.wo(x))
|
165 |
+
return x
|
166 |
+
|
167 |
+
class FeedForward(nn.Module):
|
168 |
+
def __init__(self, dim: int, hidden_dim: int, multi: int, dropout: float) -> None:
|
169 |
+
# hidden_dim 默认是 None
|
170 |
+
# multi 隐藏层维度的倍数,默认为 64
|
171 |
+
# dropout 默认是 0.0
|
172 |
+
super().__init__()
|
173 |
+
if hidden_dim is None:
|
174 |
+
hidden_dim = 4 * dim
|
175 |
+
hidden_dim = int(2 * hidden_dim / 3)
|
176 |
+
hidden_dim = multi * ((hidden_dim + multi - 1) // multi) # 没理解这么做的目的
|
177 |
+
# 最后算出来是 1408
|
178 |
+
self.w1 = nn.Linear(dim, hidden_dim, bias = False)
|
179 |
+
self.w2 = nn.Linear(dim, hidden_dim, bias = False)
|
180 |
+
self.w3 = nn.Linear(hidden_dim, dim, bias = False)
|
181 |
+
self.dropout = nn.Dropout(dropout)
|
182 |
+
|
183 |
+
def forward(self, x: torch.Tensor):
|
184 |
+
# return self.dropout(self.w3(F.silu(self.w1(x)) * self.w2(x)))
|
185 |
+
# return self.w3(F.silu(self.w1(x)) * self.w2(x))
|
186 |
+
x_2 = self.w2(x)
|
187 |
+
x = self.w1(x)
|
188 |
+
x = F.silu(x)
|
189 |
+
x = x * x_2
|
190 |
+
x = self.w3(x)
|
191 |
+
return x
|
192 |
+
|
193 |
+
|
194 |
+
class MoEGate(nn.Module):
|
195 |
+
def __init__(self, args: LMConfig) -> None:
|
196 |
+
super().__init__()
|
197 |
+
self.topk = args.num_experts_per_tok # top-k 里面的 k ,也就是选择的专家个数
|
198 |
+
self.gating_dim = args.dim # 门控维度,跟模型大小是一样的
|
199 |
+
self.n_routed_experts = args.num_experts_per_tok # 专家个数
|
200 |
+
self.scoring_func = args.scoring_func # 评分函数
|
201 |
+
self.norm_topk_prob = args.norm_topk_prob # 标准化 top-k 概率
|
202 |
+
self.alpha = args.aux_loss_alpha # 辅助损失函数的 alpha 参数
|
203 |
+
self.seq_aux = args.seq_aux # 是否在序列级别上计算辅助损失,默认为 True
|
204 |
+
self.w = nn.Linear(self.gating_dim, self.n_routed_experts, bias = False)
|
205 |
+
self.reset_parameters()
|
206 |
+
|
207 |
+
def reset_parameters(self) -> None:
|
208 |
+
import torch.nn.init as init
|
209 |
+
init.kaiming_normal_(self.w.weight) # 初始化参数
|
210 |
+
|
211 |
+
def forward(self, x: torch.Tensor):
|
212 |
+
bsz, seq_len, dim = x.shape
|
213 |
+
|
214 |
+
hidden_states = x.view(-1, dim)
|
215 |
+
scores = self.w(hidden_states) # (bsz * seq_len, n_routed_experts)
|
216 |
+
|
217 |
+
if self.scoring_func == "softmax":
|
218 |
+
scores = F.softmax(scores, dim = -1)
|
219 |
+
else:
|
220 |
+
raise NotImplementedError(f'insupportable scoring function for MoE gating: {self.scoring_func}')
|
221 |
+
# (bsz * seq_len, n_routed_experts),score[i][j] 表示每个序列里第 j 个专家的权重 / 评分
|
222 |
+
|
223 |
+
topk_weight, topk_idx = torch.topk(scores, self.topk, dim = -1, sorted = False)
|
224 |
+
# 获得k个最大的权重和对应的专家 (bsz * seq_len, k)
|
225 |
+
|
226 |
+
if self.norm_topk_prob: # 原文里还有判断self.topk > 1,我认为没有必要
|
227 |
+
denominator = topk_weight.sum(dim = -1) + 1e-20
|
228 |
+
topk_weight = topk_weight / denominator # 归一化权重
|
229 |
+
|
230 |
+
if self.training and self.alpha > 0:
|
231 |
+
# 训练阶段,并且 alpha > 0。要是 alpha <= 0 那 aux_loss 就是非正数,就是不合法的 loss
|
232 |
+
scores_for_aux = scores # (bsz * seq_len, n_routed_experts)
|
233 |
+
aux_topk = self.topk
|
234 |
+
topk_idx_for_aux_loss = topk_idx.view(bsz, -1) # (bsz, seq_len * k)
|
235 |
+
|
236 |
+
if self.seq_aux: # 在序列级别上计算辅助损失
|
237 |
+
scores_for_seq_aux = scores_for_aux.view(bsz, seq_len, -1).mean(dim = 1)
|
238 |
+
# 第一步:算出 ce (bsz * n_routed_experts)
|
239 |
+
ce = torch.zeros(bsz, self.n_routed_experts)
|
240 |
+
ce.scatter_add_(1, topk_idx_for_aux_loss, torch.ones(bsz, seq_len * aux_topk,
|
241 |
+
device = hidden_states.device).div_(
|
242 |
+
seq_len * aux_topk / self.n_routed_experts
|
243 |
+
))
|
244 |
+
# 保留 topk_idx 里面的bsz,用 idx 作为第二维度在 ce 里进行累加
|
245 |
+
# 每个 batch 里使用的专家总数就是 seq_len * k ,这可能就是为什么要除以 seq_len * aux_topk
|
246 |
+
# 最后还要乘一个专家个数,这个在下面不在序列级别算损失的时候也要用
|
247 |
+
# 第二步:ce 和 scores_for_seq_aux 按位相乘
|
248 |
+
# 第三步:按位相乘的效果按专家求和,得到长为 bsz 的序列,然后求均值,再乘以 alpha
|
249 |
+
aux_loss = (ce * scores_for_seq_aux).sum(dim = -1).mean() * self.alpha
|
250 |
+
else:
|
251 |
+
# 第一步:算出 ce (1, n_routed_experts)
|
252 |
+
# 具体方法是把 idx 展平做出独热编码,然后求均值;然后还要乘以专家个数
|
253 |
+
ce = F.one_hot(topk_idx_for_aux_loss.view(-1), num_classes = self.n_routed_experts).mean(dim = 0)
|
254 |
+
ce = ce * self.n_routed_experts
|
255 |
+
# 保证维度是专家个数。因为不一定所有专家都被选上,所以不指定的话有可能独热码维度比专家小
|
256 |
+
# 独热码返回的维度是 (bsz * seq_len * k, n_routed_experts)
|
257 |
+
# 第二步:算出每个专家权重的均值 (1, n_routed_experts)
|
258 |
+
# 具体方法是对 scores_for_aux (也就是上面求出的 scores )求均值
|
259 |
+
# 第三步:对上面二者求和再乘以 alpha
|
260 |
+
aux_loss = (ce * scores_for_aux.mean(dim = 0)).sum() * self.alpha
|
261 |
+
else:
|
262 |
+
aux_loss = None
|
263 |
+
return topk_weight, topk_idx, aux_loss
|
264 |
+
|
265 |
+
class MOEFeedForward(nn.Module):
|
266 |
+
def __init__(self, args: LMConfig) -> None:
|
267 |
+
super().__init__()
|
268 |
+
self.topk = args.num_experts_per_tok # top-k 里面的 k ,也就是选择的专家个数
|
269 |
+
self.n_routed_experts = args.num_experts_per_tok # 专家个数
|
270 |
+
self.experts = nn.ModuleList([
|
271 |
+
FeedForward(dim = args.dim,
|
272 |
+
hidden_dim = args.hidden_dim,
|
273 |
+
multi = args.multiple_of,
|
274 |
+
dropout = args.dropout)
|
275 |
+
for _ in range(self.n_routed_experts)
|
276 |
+
])
|
277 |
+
self.gate = MoEGate(args)
|
278 |
+
|
279 |
+
if args.n_shared_experts is not None:
|
280 |
+
self.shared_experts = FeedForward(
|
281 |
+
dim = args.dim,
|
282 |
+
hidden_dim = args.hidden_dim,
|
283 |
+
multi = args.multiple_of,
|
284 |
+
dropout = args.dropout
|
285 |
+
)
|
286 |
+
|
287 |
+
def work(self, x, topk_weight, topk_idx):
|
288 |
+
bsz, seq_len, dim = x.shape
|
289 |
+
# 先把 x 复制 k 份
|
290 |
+
x = x.view(-1, dim)
|
291 |
+
x = x.repeat_interleave(self.topk, dim = 0) # (bsz * seq_len * k, dim)
|
292 |
+
# 把权重展平
|
293 |
+
flat_topk_idx = topk_idx.view(-1) # (bsz * seq_len * k)
|
294 |
+
# 过专家
|
295 |
+
y = torch.empty_like(x, dtype = torch.float16) # (bsz * seq_len * k, dim)
|
296 |
+
for i in range(self.n_routed_experts):
|
297 |
+
y[flat_topk_idx == i] = self.experts[i](x[flat_topk_idx == i])
|
298 |
+
# 乘以权重
|
299 |
+
y = y.view(bsz, seq_len, self.topk, -1)
|
300 |
+
y = y * topk_weight.unsqueeze(-1).sum(dim = 1) # (bsz * seq_len, dim)
|
301 |
+
# 恢复成输入的形状
|
302 |
+
y = y.view(bsz, seq_len, -1)
|
303 |
+
return y
|
304 |
+
|
305 |
+
def forward(self, x):
|
306 |
+
# x 是(bsz, seq_len, dim)
|
307 |
+
topk_weight, topk_idx, _ = self.gate(x) # 确定选哪些专家及其权重
|
308 |
+
# (bsz * seq_len, k)
|
309 |
+
if self.training:
|
310 |
+
y = self.work(x, topk_weight, topk_idx)
|
311 |
+
else:
|
312 |
+
with torch.no_grad:
|
313 |
+
y = self.work(x, topk_weight, topk_idx)
|
314 |
+
|
315 |
+
if self.args.n_shared_experts is not None:
|
316 |
+
y = y + self.shared_experts(y)
|
317 |
+
|
318 |
+
return y
|
319 |
+
|
320 |
+
class TransformerBlock(nn.Module):
|
321 |
+
def __init__(self, layer_id: int, args: LMConfig) -> None:
|
322 |
+
# layer_id 是当前块的编号
|
323 |
+
super().__init__()
|
324 |
+
|
325 |
+
self.attn_norm = RMSNorm(dim = args.dim, eps = args.norm_eps)
|
326 |
+
self.attn = Attention(args)
|
327 |
+
|
328 |
+
self.ffn_norm = RMSNorm(dim = args.dim, eps = args.norm_eps)
|
329 |
+
if args.use_moe:
|
330 |
+
self.feed_forward = MOEFeedForward(args)
|
331 |
+
else:
|
332 |
+
self.feed_forward = FeedForward(dim = args.dim,
|
333 |
+
hidden_dim = args.hidden_dim,
|
334 |
+
multi = args.multiple_of,
|
335 |
+
dropout = args.dropout)
|
336 |
+
|
337 |
+
|
338 |
+
def forward(self, x, weight, use_kv_cache = False):
|
339 |
+
# print("吾来也!!")
|
340 |
+
# print("第一步")
|
341 |
+
# def forward(self, x: torch.Tensor, weight: torch.Tensor, use_kv_cache = False)
|
342 |
+
x = x + self.attn(self.attn_norm(x), weight, use_kv_cache)
|
343 |
+
# print("第二步")
|
344 |
+
x = x + self.feed_forward(self.ffn_norm(x))
|
345 |
+
# print("第三步")
|
346 |
+
return x
|
347 |
+
|
348 |
+
# class Transformer(nn.Module):
|
349 |
+
class Transformer(PreTrainedModel):
|
350 |
+
def __init__(self, args: LMConfig = None) -> None:
|
351 |
+
super().__init__(args)
|
352 |
+
if not args:
|
353 |
+
args = LMConfig()
|
354 |
+
|
355 |
+
self.args = args
|
356 |
+
|
357 |
+
self.embedding = nn.Embedding(args.vocab_size, args.dim)
|
358 |
+
self.dropout = nn.Dropout(args.dropout) # 在 embedding 之后就要进行一个 dropout
|
359 |
+
|
360 |
+
self.layers = nn.ModuleList() # Transformer 块
|
361 |
+
for i in range(args.n_layers):
|
362 |
+
self.layers.append(TransformerBlock(i, args))
|
363 |
+
|
364 |
+
# 下面是旋转位置嵌入的权重,尺寸是 (max_seq_len, weight.dim // 2)
|
365 |
+
rotation_weight = get_rotation(dim = args.dim // args.n_heads, seq_len = args.max_seq_len)
|
366 |
+
self.register_buffer('rotation_weight', rotation_weight, persistent = False)
|
367 |
+
|
368 |
+
self.norm = RMSNorm(dim = args.dim, eps = args.norm_eps)
|
369 |
+
self.output = nn.Linear(args.dim, args.vocab_size, bias = False) # 最后的线性层
|
370 |
+
|
371 |
+
self.embedding.weight = self.output.weight
|
372 |
+
|
373 |
+
self.OUT = CausalLMOutputWithPast()
|
374 |
+
|
375 |
+
def forward(self, tokens: Optional[torch.Tensor] = None, targets: Optional[torch.Tensor] = None,
|
376 |
+
use_kv_cache = False, **key_args):
|
377 |
+
# Optional[torch.Tensor] 的意思就是可以传入张量,也可以传入 None
|
378 |
+
if 'input_ids' in key_args:
|
379 |
+
tokens = key_args['input_ids']
|
380 |
+
if 'attention_mask' in key_args:
|
381 |
+
tokens = key_args['attention_mask']
|
382 |
+
|
383 |
+
_, seq_len = tokens.shape # 输入的文本,一共有 bsz 个 batch ,每个文本的长度是 seq_len
|
384 |
+
x = self.embedding(tokens) # x 的尺寸是 (bsz, seq_len, dim)
|
385 |
+
x = self.dropout(x)
|
386 |
+
|
387 |
+
# print("embedding完成!")
|
388 |
+
# 下面就是获得位置编码,然后过 transformer block
|
389 |
+
r_w = self.rotation_weight[: seq_len]
|
390 |
+
|
391 |
+
# print("旋转编码完成!")
|
392 |
+
for layer in self.layers:
|
393 |
+
x = layer(x, r_w, use_kv_cache)
|
394 |
+
# print("正在训练......")
|
395 |
+
|
396 |
+
# print("Transformer块完成!")
|
397 |
+
x = self.norm(x) # 过归一化
|
398 |
+
|
399 |
+
if targets is not None: # 就是训练阶段的意思
|
400 |
+
logits = self.output(x) # (bsz, seq_len, vocal_size)
|
401 |
+
# print("算出预测值!")
|
402 |
+
# self.last_loss = F.cross_entropy(logits.view(-1, logits.shape[-1]), targets.view(-1), ignore_index = -1)
|
403 |
+
last_loss = F.cross_entropy(logits.view(-1, logits.shape[-1]), targets.view(-1), ignore_index = -1)
|
404 |
+
# print("算出误差")
|
405 |
+
# targets 是每个输入序列的下一个词
|
406 |
+
# ignore_idx 表示填充值,这里就是 -1 ,表示遇到 tensor 里有 -1 的直接当作空值处理
|
407 |
+
# F.cross_entropy 会先自动进行 softmax
|
408 |
+
else: # 就是评估阶段的意思
|
409 |
+
logits = self.output(x[:, [-1], :]) # (bsz, 1, vocal_size),也就是每个 batch 的最后一个序列
|
410 |
+
# self.last_loss = None
|
411 |
+
last_loss = None
|
412 |
+
# 没明白为什么一个是类变量,一个不是
|
413 |
+
self.OUT.__setitem__('logits', logits)
|
414 |
+
self.OUT.__setitem__('last_loss', last_loss)
|
415 |
+
# print("返回!")
|
416 |
+
return self.OUT
|
417 |
+
|
418 |
+
@torch.inference_mode()
|
419 |
+
# 可参看 https://zhuanlan.zhihu.com/p/667025336
|
420 |
+
def generate(self, idx, eos, max_new_tokens, temperature = 0.7, top_k = None,
|
421 |
+
stream = True, repetition_penalty = 1., use_kv_cache = True):
|
422 |
+
# idx 是 (bsz, seq_len),每个 seq 里面都是文本的词的下标
|
423 |
+
# eos 是 结束符。如果最后推出来生成的内容是结束符,就停止生成
|
424 |
+
# max_new_tokens 是最多能生成的词的个数
|
425 |
+
# temperature 是用来平滑概率的。在原来概率的基础上 * temperature 再进行 softmax 就可以缩小各词概率之间的差距,让选择概率小的词的几率增大
|
426 |
+
# top_k 是 Top-K Sampling 的参数。如果 top_k 是 None,那就不进行 Top-K Sampling ;否则就让 Top-K Sampling 里面的 k 是 top_k
|
427 |
+
# stream 指的是流式输出。如果要流式输出,那就是说每次生成新词就直接输出;否则就是全生成完毕再输出
|
428 |
+
# repetition_penalty 是惩罚项,用来降低前文出现过的词的概率,否则可能会出现循环文本
|
429 |
+
bsz, seq_len = idx.shape
|
430 |
+
|
431 |
+
while idx.shape[1] < max_new_tokens - 1: # 文本的大小不超过最大的文本长度,就可以继续
|
432 |
+
res = self(idx, use_kv_cache = use_kv_cache)
|
433 |
+
logits = res.logits # (bsz, vocal_size)
|
434 |
+
logits = logits[:, -1, :] # (bsz, 1, vocal_size)
|
435 |
+
|
436 |
+
# 降低前文出现过的词的概率
|
437 |
+
for b in range(bsz): # 遍历每一个 batch
|
438 |
+
for token in set(idx.tolist()[b]): # 获得不重复的 token 序列
|
439 |
+
logits[b, token] /= repetition_penalty
|
440 |
+
|
441 |
+
# 利用 temperature 进行概率的平滑
|
442 |
+
if temperature == 0.0:
|
443 |
+
# 直接选概率最高的 token ,idx_nxt 的尺寸是 (bsz, 1)
|
444 |
+
_, idx_nxt = torch.topk(logits, k = 1, dim = -1)
|
445 |
+
else:
|
446 |
+
logits = logits / temperature
|
447 |
+
if top_k is not None:
|
448 |
+
# 把概率排名在 k 以外的概率都设置成 0 ,这样就防止选择到概率过低的 token
|
449 |
+
v, _ = torch.topk(logits, k = min(top_k, logits.shape[-1]), dim = -1)
|
450 |
+
# v 在每个 batch 里是从大到小排好序的,尺寸是 (bsz, top_k)
|
451 |
+
logits[logits < v[:, [-1]]] = -float("Inf")
|
452 |
+
# v[:, [-1]] 就是每一个 batch 里的最小概率的概率值,大小是 (bsz, 1)
|
453 |
+
# logits < v[:, [-1]] 返回一个大小和 logits 一样的,由 True 和 False 组成的矩阵
|
454 |
+
# 具体来说,如果 logits[i][j] < v[i][0],那[i][j]位置就返回 True ,否则是 False
|
455 |
+
# 设置成负无穷,这样用 softmax 转成概率就是 0 了
|
456 |
+
probs = F.softmax(logits, dim = -1)
|
457 |
+
idx_nxt = torch.multinomial(probs, num_samples = 1, generator = None)
|
458 |
+
# 根据 prob 随机选择一个 token
|
459 |
+
|
460 |
+
if idx_nxt == eos:
|
461 |
+
break # 可能有问题
|
462 |
+
|
463 |
+
idx = torch.concat((idx, idx_nxt), dim = -1) # 放入新生成的内容
|
464 |
+
|
465 |
+
if stream:
|
466 |
+
yield idx[:, seq_len:] # 每次新生成内容就s输出所有新生成的东西
|
467 |
+
|
468 |
+
if not stream:
|
469 |
+
yield idx[:, seq_len:]
|
470 |
+
|
471 |
+
@torch.inference_mode()
|
472 |
+
def eval_answer(self, idx): # 没看出有什么作用
|
473 |
+
idx_cond = idx if idx.shape[1] < self.args.max_seq_len else idx[:, -self.args.max_seq_len:]
|
474 |
+
res = self(idx_cond)
|
475 |
+
logits = res.logits[:, -1, :]
|
476 |
+
return logits
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dc60df5c47f03c442bd79e2779116eb2fe84e5c84563c11dbc73956c073a4301
|
3 |
+
size 435043986
|
special_tokens_map.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"unk_token": {
|
17 |
+
"content": "<unk>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
}
|
23 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"add_prefix_space": true,
|
5 |
+
"added_tokens_decoder": {
|
6 |
+
"0": {
|
7 |
+
"content": "<unk>",
|
8 |
+
"lstrip": false,
|
9 |
+
"normalized": false,
|
10 |
+
"rstrip": false,
|
11 |
+
"single_word": false,
|
12 |
+
"special": true
|
13 |
+
},
|
14 |
+
"1": {
|
15 |
+
"content": "<s>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": false,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false,
|
20 |
+
"special": true
|
21 |
+
},
|
22 |
+
"2": {
|
23 |
+
"content": "</s>",
|
24 |
+
"lstrip": false,
|
25 |
+
"normalized": false,
|
26 |
+
"rstrip": false,
|
27 |
+
"single_word": false,
|
28 |
+
"special": true
|
29 |
+
}
|
30 |
+
},
|
31 |
+
"additional_special_tokens": [],
|
32 |
+
"bos_token": "<s>",
|
33 |
+
"chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<s>user\\n' + content + '</s>\\n<s>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '</s>' + '\\n' }}{% endif %}{% endfor %}",
|
34 |
+
"clean_up_tokenization_spaces": false,
|
35 |
+
"eos_token": "</s>",
|
36 |
+
"legacy": true,
|
37 |
+
"model_max_length": 1000000000000000019884624838656,
|
38 |
+
"pad_token": null,
|
39 |
+
"sp_model_kwargs": {},
|
40 |
+
"spaces_between_special_tokens": false,
|
41 |
+
"tokenizer_class": "PreTrainedTokenizerFast",
|
42 |
+
"unk_token": "<unk>",
|
43 |
+
"use_default_system_prompt": false
|
44 |
+
}
|