chenjgtea
commited on
Commit
·
fc0d94d
1
Parent(s):
a536c15
dvae_sc 代码移植
Browse files- Chat2TTS/model/__init__.py +0 -0
- Chat2TTS/model/dvae_sc.py +286 -0
Chat2TTS/model/__init__.py
ADDED
File without changes
|
Chat2TTS/model/dvae_sc.py
ADDED
@@ -0,0 +1,286 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
from typing import List, Optional, Literal, Tuple
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import pybase16384 as b14
|
6 |
+
import torch
|
7 |
+
import torch.nn as nn
|
8 |
+
import torch.nn.functional as F
|
9 |
+
import torchaudio
|
10 |
+
from vector_quantize_pytorch import GroupedResidualFSQ
|
11 |
+
|
12 |
+
|
13 |
+
class ConvNeXtBlock(nn.Module):
|
14 |
+
def __init__(
|
15 |
+
self,
|
16 |
+
dim: int,
|
17 |
+
intermediate_dim: int,
|
18 |
+
kernel: int,
|
19 |
+
dilation: int,
|
20 |
+
layer_scale_init_value: float = 1e-6,
|
21 |
+
):
|
22 |
+
# ConvNeXt Block copied from Vocos.
|
23 |
+
super().__init__()
|
24 |
+
self.dwconv = nn.Conv1d(
|
25 |
+
dim,
|
26 |
+
dim,
|
27 |
+
kernel_size=kernel,
|
28 |
+
padding=dilation * (kernel // 2),
|
29 |
+
dilation=dilation,
|
30 |
+
groups=dim,
|
31 |
+
) # depthwise conv
|
32 |
+
|
33 |
+
self.norm = nn.LayerNorm(dim, eps=1e-6)
|
34 |
+
self.pwconv1 = nn.Linear(
|
35 |
+
dim, intermediate_dim
|
36 |
+
) # pointwise/1x1 convs, implemented with linear layers
|
37 |
+
self.act = nn.GELU()
|
38 |
+
self.pwconv2 = nn.Linear(intermediate_dim, dim)
|
39 |
+
self.gamma = (
|
40 |
+
nn.Parameter(layer_scale_init_value * torch.ones(dim), requires_grad=True)
|
41 |
+
if layer_scale_init_value > 0
|
42 |
+
else None
|
43 |
+
)
|
44 |
+
|
45 |
+
def forward(self, x: torch.Tensor, cond=None) -> torch.Tensor:
|
46 |
+
residual = x
|
47 |
+
|
48 |
+
y = self.dwconv(x)
|
49 |
+
y.transpose_(1, 2) # (B, C, T) -> (B, T, C)
|
50 |
+
x = self.norm(y)
|
51 |
+
del y
|
52 |
+
y = self.pwconv1(x)
|
53 |
+
del x
|
54 |
+
x = self.act(y)
|
55 |
+
del y
|
56 |
+
y = self.pwconv2(x)
|
57 |
+
del x
|
58 |
+
if self.gamma is not None:
|
59 |
+
y *= self.gamma
|
60 |
+
y.transpose_(1, 2) # (B, T, C) -> (B, C, T)
|
61 |
+
|
62 |
+
x = y + residual
|
63 |
+
del y
|
64 |
+
|
65 |
+
return x
|
66 |
+
|
67 |
+
|
68 |
+
class GFSQ(nn.Module):
|
69 |
+
|
70 |
+
def __init__(
|
71 |
+
self, dim: int, levels: List[int], G: int, R: int, eps=1e-5, transpose=True
|
72 |
+
):
|
73 |
+
super(GFSQ, self).__init__()
|
74 |
+
self.quantizer = GroupedResidualFSQ(
|
75 |
+
dim=dim,
|
76 |
+
levels=list(levels),
|
77 |
+
num_quantizers=R,
|
78 |
+
groups=G,
|
79 |
+
)
|
80 |
+
self.n_ind = math.prod(levels)
|
81 |
+
self.eps = eps
|
82 |
+
self.transpose = transpose
|
83 |
+
self.G = G
|
84 |
+
self.R = R
|
85 |
+
|
86 |
+
def _embed(self, x: torch.Tensor):
|
87 |
+
if self.transpose:
|
88 |
+
x = x.transpose(1, 2)
|
89 |
+
"""
|
90 |
+
x = rearrange(
|
91 |
+
x, "b t (g r) -> g b t r", g = self.G, r = self.R,
|
92 |
+
)
|
93 |
+
"""
|
94 |
+
x = x.view(x.size(0), x.size(1), self.G, self.R).permute(2, 0, 1, 3)
|
95 |
+
feat = self.quantizer.get_output_from_indices(x)
|
96 |
+
return feat.transpose_(1, 2) if self.transpose else feat
|
97 |
+
|
98 |
+
def __call__(self, x: torch.Tensor) -> torch.Tensor:
|
99 |
+
return super().__call__(x)
|
100 |
+
|
101 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
102 |
+
if self.transpose:
|
103 |
+
x.transpose_(1, 2)
|
104 |
+
# feat, ind = self.quantizer(x)
|
105 |
+
_, ind = self.quantizer(x)
|
106 |
+
"""
|
107 |
+
ind = rearrange(
|
108 |
+
ind, "g b t r ->b t (g r)",
|
109 |
+
)
|
110 |
+
"""
|
111 |
+
ind = ind.permute(1, 2, 0, 3).contiguous()
|
112 |
+
ind = ind.view(ind.size(0), ind.size(1), -1)
|
113 |
+
"""
|
114 |
+
embed_onehot_tmp = F.one_hot(ind.long(), self.n_ind)
|
115 |
+
embed_onehot = embed_onehot_tmp.to(x.dtype)
|
116 |
+
del embed_onehot_tmp
|
117 |
+
e_mean = torch.mean(embed_onehot, dim=[0, 1])
|
118 |
+
# e_mean = e_mean / (e_mean.sum(dim=1) + self.eps).unsqueeze(1)
|
119 |
+
torch.div(e_mean, (e_mean.sum(dim=1) + self.eps).unsqueeze(1), out=e_mean)
|
120 |
+
perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + self.eps), dim=1))
|
121 |
+
|
122 |
+
return
|
123 |
+
torch.zeros(perplexity.shape, dtype=x.dtype, device=x.device),
|
124 |
+
feat.transpose_(1, 2) if self.transpose else feat,
|
125 |
+
perplexity,
|
126 |
+
"""
|
127 |
+
return ind.transpose_(1, 2) if self.transpose else ind
|
128 |
+
|
129 |
+
|
130 |
+
class DVAEDecoder(nn.Module):
|
131 |
+
def __init__(
|
132 |
+
self,
|
133 |
+
idim: int,
|
134 |
+
odim: int,
|
135 |
+
n_layer=12,
|
136 |
+
bn_dim=64,
|
137 |
+
hidden=256,
|
138 |
+
kernel=7,
|
139 |
+
dilation=2,
|
140 |
+
up=False,
|
141 |
+
):
|
142 |
+
super().__init__()
|
143 |
+
self.up = up
|
144 |
+
self.conv_in = nn.Sequential(
|
145 |
+
nn.Conv1d(idim, bn_dim, 3, 1, 1),
|
146 |
+
nn.GELU(),
|
147 |
+
nn.Conv1d(bn_dim, hidden, 3, 1, 1),
|
148 |
+
)
|
149 |
+
self.decoder_block = nn.ModuleList(
|
150 |
+
[
|
151 |
+
ConvNeXtBlock(
|
152 |
+
hidden,
|
153 |
+
hidden * 4,
|
154 |
+
kernel,
|
155 |
+
dilation,
|
156 |
+
)
|
157 |
+
for _ in range(n_layer)
|
158 |
+
]
|
159 |
+
)
|
160 |
+
self.conv_out = nn.Conv1d(hidden, odim, kernel_size=1, bias=False)
|
161 |
+
|
162 |
+
def forward(self, x: torch.Tensor, conditioning=None) -> torch.Tensor:
|
163 |
+
# B, C, T
|
164 |
+
y = self.conv_in(x)
|
165 |
+
del x
|
166 |
+
for f in self.decoder_block:
|
167 |
+
y = f(y, conditioning)
|
168 |
+
|
169 |
+
x = self.conv_out(y)
|
170 |
+
del y
|
171 |
+
return x
|
172 |
+
|
173 |
+
|
174 |
+
class MelSpectrogramFeatures(torch.nn.Module):
|
175 |
+
def __init__(
|
176 |
+
self,
|
177 |
+
sample_rate=24000,
|
178 |
+
n_fft=1024,
|
179 |
+
hop_length=256,
|
180 |
+
n_mels=100,
|
181 |
+
padding: Literal["center", "same"] = "center",
|
182 |
+
):
|
183 |
+
super().__init__()
|
184 |
+
if padding not in ["center", "same"]:
|
185 |
+
raise ValueError("Padding must be 'center' or 'same'.")
|
186 |
+
self.padding = padding
|
187 |
+
self.mel_spec = torchaudio.transforms.MelSpectrogram(
|
188 |
+
sample_rate=sample_rate,
|
189 |
+
n_fft=n_fft,
|
190 |
+
hop_length=hop_length,
|
191 |
+
n_mels=n_mels,
|
192 |
+
center=padding == "center",
|
193 |
+
power=1,
|
194 |
+
)
|
195 |
+
|
196 |
+
def __call__(self, audio: torch.Tensor) -> torch.Tensor:
|
197 |
+
return super().__call__(audio)
|
198 |
+
|
199 |
+
def forward(self, audio: torch.Tensor) -> torch.Tensor:
|
200 |
+
mel: torch.Tensor = self.mel_spec(audio)
|
201 |
+
features = torch.log(torch.clip(mel, min=1e-5))
|
202 |
+
return features
|
203 |
+
|
204 |
+
|
205 |
+
class DVAE(nn.Module):
|
206 |
+
def __init__(
|
207 |
+
self,
|
208 |
+
decoder_config: dict,
|
209 |
+
encoder_config: Optional[dict] = None,
|
210 |
+
vq_config: Optional[dict] = None,
|
211 |
+
dim=512,
|
212 |
+
coef: Optional[str] = None,
|
213 |
+
):
|
214 |
+
super().__init__()
|
215 |
+
if coef is None:
|
216 |
+
coef = torch.rand(100)
|
217 |
+
else:
|
218 |
+
coef = torch.from_numpy(
|
219 |
+
np.copy(np.frombuffer(b14.decode_from_string(coef), dtype=np.float32))
|
220 |
+
)
|
221 |
+
self.register_buffer("coef", coef.unsqueeze(0).unsqueeze_(2))
|
222 |
+
|
223 |
+
if encoder_config is not None:
|
224 |
+
self.downsample_conv = nn.Sequential(
|
225 |
+
nn.Conv1d(100, dim, 3, 1, 1),
|
226 |
+
nn.GELU(),
|
227 |
+
nn.Conv1d(dim, dim, 4, 2, 1),
|
228 |
+
nn.GELU(),
|
229 |
+
)
|
230 |
+
self.preprocessor_mel = MelSpectrogramFeatures()
|
231 |
+
self.encoder: Optional[DVAEDecoder] = DVAEDecoder(**encoder_config)
|
232 |
+
|
233 |
+
self.decoder = DVAEDecoder(**decoder_config)
|
234 |
+
self.out_conv = nn.Conv1d(dim, 100, 3, 1, 1, bias=False)
|
235 |
+
if vq_config is not None:
|
236 |
+
self.vq_layer = GFSQ(**vq_config)
|
237 |
+
else:
|
238 |
+
self.vq_layer = None
|
239 |
+
|
240 |
+
def __repr__(self) -> str:
|
241 |
+
return b14.encode_to_string(
|
242 |
+
self.coef.cpu().numpy().astype(np.float32).tobytes()
|
243 |
+
)
|
244 |
+
|
245 |
+
def __call__(
|
246 |
+
self, inp: torch.Tensor, mode: Literal["encode", "decode"] = "decode"
|
247 |
+
) -> torch.Tensor:
|
248 |
+
return super().__call__(inp, mode)
|
249 |
+
|
250 |
+
@torch.inference_mode()
|
251 |
+
def forward(
|
252 |
+
self, inp: torch.Tensor, mode: Literal["encode", "decode"] = "decode"
|
253 |
+
) -> torch.Tensor:
|
254 |
+
if mode == "encode" and hasattr(self, "encoder") and self.vq_layer is not None:
|
255 |
+
mel = self.preprocessor_mel(inp)
|
256 |
+
x: torch.Tensor = self.downsample_conv(
|
257 |
+
torch.div(mel, self.coef.view(100, 1).expand(mel.shape), out=mel),
|
258 |
+
).unsqueeze_(0)
|
259 |
+
del mel
|
260 |
+
x = self.encoder(x)
|
261 |
+
ind = self.vq_layer(x)
|
262 |
+
del x
|
263 |
+
return ind
|
264 |
+
|
265 |
+
if self.vq_layer is not None:
|
266 |
+
vq_feats = self.vq_layer._embed(inp)
|
267 |
+
else:
|
268 |
+
vq_feats = inp
|
269 |
+
|
270 |
+
vq_feats = (
|
271 |
+
vq_feats.view(
|
272 |
+
(vq_feats.size(0), 2, vq_feats.size(1) // 2, vq_feats.size(2)),
|
273 |
+
)
|
274 |
+
.permute(0, 2, 3, 1)
|
275 |
+
.flatten(2)
|
276 |
+
)
|
277 |
+
|
278 |
+
dec_out = self.out_conv(
|
279 |
+
self.decoder(
|
280 |
+
x=vq_feats,
|
281 |
+
),
|
282 |
+
)
|
283 |
+
|
284 |
+
del vq_feats
|
285 |
+
|
286 |
+
return torch.mul(dec_out, self.coef, out=dec_out)
|