Spaces:
Running
Running
Upload 9 files
Browse files- attentions.py +326 -0
- commons.py +161 -0
- data_utils.py +551 -0
- mel_processing.py +150 -0
- models.py +1147 -0
- modules.py +390 -0
- pqmf.py +116 -0
- transforms.py +193 -0
- utils.py +359 -0
attentions.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import math
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
from torch import nn
|
6 |
+
from torch.nn import functional as F
|
7 |
+
|
8 |
+
import commons
|
9 |
+
import modules
|
10 |
+
from modules import LayerNorm
|
11 |
+
import logging
|
12 |
+
|
13 |
+
logger = logging.getLogger(__name__)
|
14 |
+
|
15 |
+
class Encoder(nn.Module):
|
16 |
+
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
|
17 |
+
super().__init__()
|
18 |
+
self.hidden_channels = hidden_channels
|
19 |
+
self.filter_channels = filter_channels
|
20 |
+
self.n_heads = n_heads
|
21 |
+
self.n_layers = n_layers
|
22 |
+
self.kernel_size = kernel_size
|
23 |
+
self.p_dropout = p_dropout
|
24 |
+
self.window_size = window_size
|
25 |
+
|
26 |
+
# bert-vits2
|
27 |
+
# self.cond_layer_idx = self.n_layers
|
28 |
+
# if "gin_channels" in kwargs:
|
29 |
+
# self.gin_channels = kwargs["gin_channels"]
|
30 |
+
# if self.gin_channels != 0:
|
31 |
+
# self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels)
|
32 |
+
# # vits2 says 3rd block, so idx is 2 by default
|
33 |
+
# self.cond_layer_idx = (
|
34 |
+
# kwargs["cond_layer_idx"] if "cond_layer_idx" in kwargs else 2
|
35 |
+
# )
|
36 |
+
# logging.debug(self.gin_channels, self.cond_layer_idx)
|
37 |
+
# assert (
|
38 |
+
# self.cond_layer_idx < self.n_layers
|
39 |
+
# ), "cond_layer_idx should be less than n_layers"
|
40 |
+
|
41 |
+
self.drop = nn.Dropout(p_dropout)
|
42 |
+
self.attn_layers = nn.ModuleList()
|
43 |
+
self.norm_layers_1 = nn.ModuleList()
|
44 |
+
self.ffn_layers = nn.ModuleList()
|
45 |
+
self.norm_layers_2 = nn.ModuleList()
|
46 |
+
for i in range(self.n_layers):
|
47 |
+
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
|
48 |
+
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
49 |
+
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
|
50 |
+
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
51 |
+
|
52 |
+
def forward(self, x, x_mask,g=None):
|
53 |
+
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
54 |
+
x = x * x_mask
|
55 |
+
for i in range(self.n_layers):
|
56 |
+
# vits2
|
57 |
+
# if i == self.cond_layer_idx and g is not None:
|
58 |
+
# g = self.spk_emb_linear(g.transpose(1, 2))
|
59 |
+
# g = g.transpose(1, 2)
|
60 |
+
# x = x + g
|
61 |
+
# x = x * x_mask
|
62 |
+
y = self.attn_layers[i](x, x, attn_mask)
|
63 |
+
y = self.drop(y)
|
64 |
+
x = self.norm_layers_1[i](x + y)
|
65 |
+
|
66 |
+
y = self.ffn_layers[i](x, x_mask)
|
67 |
+
y = self.drop(y)
|
68 |
+
x = self.norm_layers_2[i](x + y)
|
69 |
+
x = x * x_mask
|
70 |
+
return x
|
71 |
+
|
72 |
+
|
73 |
+
class Decoder(nn.Module):
|
74 |
+
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
|
75 |
+
super().__init__()
|
76 |
+
self.hidden_channels = hidden_channels
|
77 |
+
self.filter_channels = filter_channels
|
78 |
+
self.n_heads = n_heads
|
79 |
+
self.n_layers = n_layers
|
80 |
+
self.kernel_size = kernel_size
|
81 |
+
self.p_dropout = p_dropout
|
82 |
+
self.proximal_bias = proximal_bias
|
83 |
+
self.proximal_init = proximal_init
|
84 |
+
|
85 |
+
self.drop = nn.Dropout(p_dropout)
|
86 |
+
self.self_attn_layers = nn.ModuleList()
|
87 |
+
self.norm_layers_0 = nn.ModuleList()
|
88 |
+
self.encdec_attn_layers = nn.ModuleList()
|
89 |
+
self.norm_layers_1 = nn.ModuleList()
|
90 |
+
self.ffn_layers = nn.ModuleList()
|
91 |
+
self.norm_layers_2 = nn.ModuleList()
|
92 |
+
for i in range(self.n_layers):
|
93 |
+
self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
|
94 |
+
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
95 |
+
self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
|
96 |
+
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
97 |
+
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
|
98 |
+
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
99 |
+
|
100 |
+
def forward(self, x, x_mask, h, h_mask):
|
101 |
+
"""
|
102 |
+
x: decoder input
|
103 |
+
h: encoder output
|
104 |
+
"""
|
105 |
+
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
|
106 |
+
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
107 |
+
x = x * x_mask
|
108 |
+
for i in range(self.n_layers):
|
109 |
+
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
110 |
+
y = self.drop(y)
|
111 |
+
x = self.norm_layers_0[i](x + y)
|
112 |
+
|
113 |
+
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
114 |
+
y = self.drop(y)
|
115 |
+
x = self.norm_layers_1[i](x + y)
|
116 |
+
|
117 |
+
y = self.ffn_layers[i](x, x_mask)
|
118 |
+
y = self.drop(y)
|
119 |
+
x = self.norm_layers_2[i](x + y)
|
120 |
+
x = x * x_mask
|
121 |
+
return x
|
122 |
+
|
123 |
+
|
124 |
+
class MultiHeadAttention(nn.Module):
|
125 |
+
def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
|
126 |
+
super().__init__()
|
127 |
+
assert channels % n_heads == 0
|
128 |
+
|
129 |
+
self.channels = channels
|
130 |
+
self.out_channels = out_channels
|
131 |
+
self.n_heads = n_heads
|
132 |
+
self.p_dropout = p_dropout
|
133 |
+
self.window_size = window_size
|
134 |
+
self.heads_share = heads_share
|
135 |
+
self.block_length = block_length
|
136 |
+
self.proximal_bias = proximal_bias
|
137 |
+
self.proximal_init = proximal_init
|
138 |
+
self.attn = None
|
139 |
+
|
140 |
+
self.k_channels = channels // n_heads
|
141 |
+
self.conv_q = nn.Conv1d(channels, channels, 1)
|
142 |
+
self.conv_k = nn.Conv1d(channels, channels, 1)
|
143 |
+
self.conv_v = nn.Conv1d(channels, channels, 1)
|
144 |
+
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
145 |
+
self.drop = nn.Dropout(p_dropout)
|
146 |
+
|
147 |
+
if window_size is not None:
|
148 |
+
n_heads_rel = 1 if heads_share else n_heads
|
149 |
+
rel_stddev = self.k_channels**-0.5
|
150 |
+
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
151 |
+
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
152 |
+
|
153 |
+
nn.init.xavier_uniform_(self.conv_q.weight)
|
154 |
+
nn.init.xavier_uniform_(self.conv_k.weight)
|
155 |
+
nn.init.xavier_uniform_(self.conv_v.weight)
|
156 |
+
if proximal_init:
|
157 |
+
with torch.no_grad():
|
158 |
+
self.conv_k.weight.copy_(self.conv_q.weight)
|
159 |
+
self.conv_k.bias.copy_(self.conv_q.bias)
|
160 |
+
|
161 |
+
def forward(self, x, c, attn_mask=None):
|
162 |
+
q = self.conv_q(x)
|
163 |
+
k = self.conv_k(c)
|
164 |
+
v = self.conv_v(c)
|
165 |
+
|
166 |
+
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
167 |
+
|
168 |
+
x = self.conv_o(x)
|
169 |
+
return x
|
170 |
+
|
171 |
+
def attention(self, query, key, value, mask=None):
|
172 |
+
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
173 |
+
b, d, t_s, t_t = (*key.size(), query.size(2))
|
174 |
+
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
175 |
+
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
176 |
+
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
177 |
+
|
178 |
+
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
179 |
+
if self.window_size is not None:
|
180 |
+
assert t_s == t_t, "Relative attention is only available for self-attention."
|
181 |
+
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
182 |
+
rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
|
183 |
+
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
184 |
+
scores = scores + scores_local
|
185 |
+
if self.proximal_bias:
|
186 |
+
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
187 |
+
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
|
188 |
+
if mask is not None:
|
189 |
+
scores = scores.masked_fill(mask == 0, -1e4)
|
190 |
+
if self.block_length is not None:
|
191 |
+
assert t_s == t_t, "Local attention is only available for self-attention."
|
192 |
+
block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
|
193 |
+
scores = scores.masked_fill(block_mask == 0, -1e4)
|
194 |
+
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
195 |
+
p_attn = self.drop(p_attn)
|
196 |
+
output = torch.matmul(p_attn, value)
|
197 |
+
if self.window_size is not None:
|
198 |
+
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
199 |
+
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
|
200 |
+
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
|
201 |
+
output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
202 |
+
return output, p_attn
|
203 |
+
|
204 |
+
def _matmul_with_relative_values(self, x, y):
|
205 |
+
"""
|
206 |
+
x: [b, h, l, m]
|
207 |
+
y: [h or 1, m, d]
|
208 |
+
ret: [b, h, l, d]
|
209 |
+
"""
|
210 |
+
ret = torch.matmul(x, y.unsqueeze(0))
|
211 |
+
return ret
|
212 |
+
|
213 |
+
def _matmul_with_relative_keys(self, x, y):
|
214 |
+
"""
|
215 |
+
x: [b, h, l, d]
|
216 |
+
y: [h or 1, m, d]
|
217 |
+
ret: [b, h, l, m]
|
218 |
+
"""
|
219 |
+
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
220 |
+
return ret
|
221 |
+
|
222 |
+
def _get_relative_embeddings(self, relative_embeddings, length):
|
223 |
+
max_relative_position = 2 * self.window_size + 1
|
224 |
+
# Pad first before slice to avoid using cond ops.
|
225 |
+
pad_length = max(length - (self.window_size + 1), 0)
|
226 |
+
slice_start_position = max((self.window_size + 1) - length, 0)
|
227 |
+
slice_end_position = slice_start_position + 2 * length - 1
|
228 |
+
if pad_length > 0:
|
229 |
+
padded_relative_embeddings = F.pad(
|
230 |
+
relative_embeddings,
|
231 |
+
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
|
232 |
+
else:
|
233 |
+
padded_relative_embeddings = relative_embeddings
|
234 |
+
used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
|
235 |
+
return used_relative_embeddings
|
236 |
+
|
237 |
+
def _relative_position_to_absolute_position(self, x):
|
238 |
+
"""
|
239 |
+
x: [b, h, l, 2*l-1]
|
240 |
+
ret: [b, h, l, l]
|
241 |
+
"""
|
242 |
+
batch, heads, length, _ = x.size()
|
243 |
+
# Concat columns of pad to shift from relative to absolute indexing.
|
244 |
+
x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
|
245 |
+
|
246 |
+
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
247 |
+
x_flat = x.view([batch, heads, length * 2 * length])
|
248 |
+
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
|
249 |
+
|
250 |
+
# Reshape and slice out the padded elements.
|
251 |
+
x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
|
252 |
+
return x_final
|
253 |
+
|
254 |
+
def _absolute_position_to_relative_position(self, x):
|
255 |
+
"""
|
256 |
+
x: [b, h, l, l]
|
257 |
+
ret: [b, h, l, 2*l-1]
|
258 |
+
"""
|
259 |
+
batch, heads, length, _ = x.size()
|
260 |
+
# padd along column
|
261 |
+
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
|
262 |
+
x_flat = x.view([batch, heads, length**2 + length*(length -1)])
|
263 |
+
# add 0's in the beginning that will skew the elements after reshape
|
264 |
+
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
265 |
+
x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
|
266 |
+
return x_final
|
267 |
+
|
268 |
+
def _attention_bias_proximal(self, length):
|
269 |
+
"""Bias for self-attention to encourage attention to close positions.
|
270 |
+
Args:
|
271 |
+
length: an integer scalar.
|
272 |
+
Returns:
|
273 |
+
a Tensor with shape [1, 1, length, length]
|
274 |
+
"""
|
275 |
+
r = torch.arange(length, dtype=torch.float32)
|
276 |
+
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
277 |
+
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
278 |
+
|
279 |
+
|
280 |
+
class FFN(nn.Module):
|
281 |
+
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
|
282 |
+
super().__init__()
|
283 |
+
self.in_channels = in_channels
|
284 |
+
self.out_channels = out_channels
|
285 |
+
self.filter_channels = filter_channels
|
286 |
+
self.kernel_size = kernel_size
|
287 |
+
self.p_dropout = p_dropout
|
288 |
+
self.activation = activation
|
289 |
+
self.causal = causal
|
290 |
+
|
291 |
+
if causal:
|
292 |
+
self.padding = self._causal_padding
|
293 |
+
else:
|
294 |
+
self.padding = self._same_padding
|
295 |
+
|
296 |
+
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
297 |
+
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
298 |
+
self.drop = nn.Dropout(p_dropout)
|
299 |
+
|
300 |
+
def forward(self, x, x_mask):
|
301 |
+
x = self.conv_1(self.padding(x * x_mask))
|
302 |
+
if self.activation == "gelu":
|
303 |
+
x = x * torch.sigmoid(1.702 * x)
|
304 |
+
else:
|
305 |
+
x = torch.relu(x)
|
306 |
+
x = self.drop(x)
|
307 |
+
x = self.conv_2(self.padding(x * x_mask))
|
308 |
+
return x * x_mask
|
309 |
+
|
310 |
+
def _causal_padding(self, x):
|
311 |
+
if self.kernel_size == 1:
|
312 |
+
return x
|
313 |
+
pad_l = self.kernel_size - 1
|
314 |
+
pad_r = 0
|
315 |
+
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
316 |
+
x = F.pad(x, commons.convert_pad_shape(padding))
|
317 |
+
return x
|
318 |
+
|
319 |
+
def _same_padding(self, x):
|
320 |
+
if self.kernel_size == 1:
|
321 |
+
return x
|
322 |
+
pad_l = (self.kernel_size - 1) // 2
|
323 |
+
pad_r = self.kernel_size // 2
|
324 |
+
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
325 |
+
x = F.pad(x, commons.convert_pad_shape(padding))
|
326 |
+
return x
|
commons.py
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import numpy as np
|
3 |
+
import torch
|
4 |
+
from torch import nn
|
5 |
+
from torch.nn import functional as F
|
6 |
+
|
7 |
+
|
8 |
+
def init_weights(m, mean=0.0, std=0.01):
|
9 |
+
classname = m.__class__.__name__
|
10 |
+
if classname.find("Conv") != -1:
|
11 |
+
m.weight.data.normal_(mean, std)
|
12 |
+
|
13 |
+
|
14 |
+
def get_padding(kernel_size, dilation=1):
|
15 |
+
return int((kernel_size*dilation - dilation)/2)
|
16 |
+
|
17 |
+
|
18 |
+
def convert_pad_shape(pad_shape):
|
19 |
+
l = pad_shape[::-1]
|
20 |
+
pad_shape = [item for sublist in l for item in sublist]
|
21 |
+
return pad_shape
|
22 |
+
|
23 |
+
|
24 |
+
def intersperse(lst, item):
|
25 |
+
result = [item] * (len(lst) * 2 + 1)
|
26 |
+
result[1::2] = lst
|
27 |
+
return result
|
28 |
+
|
29 |
+
|
30 |
+
def kl_divergence(m_p, logs_p, m_q, logs_q):
|
31 |
+
"""KL(P||Q)"""
|
32 |
+
kl = (logs_q - logs_p) - 0.5
|
33 |
+
kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
|
34 |
+
return kl
|
35 |
+
|
36 |
+
|
37 |
+
def rand_gumbel(shape):
|
38 |
+
"""Sample from the Gumbel distribution, protect from overflows."""
|
39 |
+
uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
|
40 |
+
return -torch.log(-torch.log(uniform_samples))
|
41 |
+
|
42 |
+
|
43 |
+
def rand_gumbel_like(x):
|
44 |
+
g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
|
45 |
+
return g
|
46 |
+
|
47 |
+
|
48 |
+
def slice_segments(x, ids_str, segment_size=4):
|
49 |
+
ret = torch.zeros_like(x[:, :, :segment_size])
|
50 |
+
for i in range(x.size(0)):
|
51 |
+
idx_str = ids_str[i]
|
52 |
+
idx_end = idx_str + segment_size
|
53 |
+
ret[i] = x[i, :, idx_str:idx_end]
|
54 |
+
return ret
|
55 |
+
|
56 |
+
|
57 |
+
def rand_slice_segments(x, x_lengths=None, segment_size=4):
|
58 |
+
b, d, t = x.size()
|
59 |
+
if x_lengths is None:
|
60 |
+
x_lengths = t
|
61 |
+
ids_str_max = x_lengths - segment_size + 1
|
62 |
+
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
|
63 |
+
ret = slice_segments(x, ids_str, segment_size)
|
64 |
+
return ret, ids_str
|
65 |
+
|
66 |
+
|
67 |
+
def get_timing_signal_1d(
|
68 |
+
length, channels, min_timescale=1.0, max_timescale=1.0e4):
|
69 |
+
position = torch.arange(length, dtype=torch.float)
|
70 |
+
num_timescales = channels // 2
|
71 |
+
log_timescale_increment = (
|
72 |
+
math.log(float(max_timescale) / float(min_timescale)) /
|
73 |
+
(num_timescales - 1))
|
74 |
+
inv_timescales = min_timescale * torch.exp(
|
75 |
+
torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
|
76 |
+
scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
|
77 |
+
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
|
78 |
+
signal = F.pad(signal, [0, 0, 0, channels % 2])
|
79 |
+
signal = signal.view(1, channels, length)
|
80 |
+
return signal
|
81 |
+
|
82 |
+
|
83 |
+
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
|
84 |
+
b, channels, length = x.size()
|
85 |
+
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
86 |
+
return x + signal.to(dtype=x.dtype, device=x.device)
|
87 |
+
|
88 |
+
|
89 |
+
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
|
90 |
+
b, channels, length = x.size()
|
91 |
+
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
92 |
+
return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
|
93 |
+
|
94 |
+
|
95 |
+
def subsequent_mask(length):
|
96 |
+
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
|
97 |
+
return mask
|
98 |
+
|
99 |
+
|
100 |
+
@torch.jit.script
|
101 |
+
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
102 |
+
n_channels_int = n_channels[0]
|
103 |
+
in_act = input_a + input_b
|
104 |
+
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
105 |
+
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
106 |
+
acts = t_act * s_act
|
107 |
+
return acts
|
108 |
+
|
109 |
+
|
110 |
+
def convert_pad_shape(pad_shape):
|
111 |
+
l = pad_shape[::-1]
|
112 |
+
pad_shape = [item for sublist in l for item in sublist]
|
113 |
+
return pad_shape
|
114 |
+
|
115 |
+
|
116 |
+
def shift_1d(x):
|
117 |
+
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
|
118 |
+
return x
|
119 |
+
|
120 |
+
|
121 |
+
def sequence_mask(length, max_length=None):
|
122 |
+
if max_length is None:
|
123 |
+
max_length = length.max()
|
124 |
+
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
125 |
+
return x.unsqueeze(0) < length.unsqueeze(1)
|
126 |
+
|
127 |
+
|
128 |
+
def generate_path(duration, mask):
|
129 |
+
"""
|
130 |
+
duration: [b, 1, t_x]
|
131 |
+
mask: [b, 1, t_y, t_x]
|
132 |
+
"""
|
133 |
+
device = duration.device
|
134 |
+
|
135 |
+
b, _, t_y, t_x = mask.shape
|
136 |
+
cum_duration = torch.cumsum(duration, -1)
|
137 |
+
|
138 |
+
cum_duration_flat = cum_duration.view(b * t_x)
|
139 |
+
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
|
140 |
+
path = path.view(b, t_x, t_y)
|
141 |
+
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
|
142 |
+
path = path.unsqueeze(1).transpose(2,3) * mask
|
143 |
+
return path
|
144 |
+
|
145 |
+
|
146 |
+
def clip_grad_value_(parameters, clip_value, norm_type=2):
|
147 |
+
if isinstance(parameters, torch.Tensor):
|
148 |
+
parameters = [parameters]
|
149 |
+
parameters = list(filter(lambda p: p.grad is not None, parameters))
|
150 |
+
norm_type = float(norm_type)
|
151 |
+
if clip_value is not None:
|
152 |
+
clip_value = float(clip_value)
|
153 |
+
|
154 |
+
total_norm = 0
|
155 |
+
for p in parameters:
|
156 |
+
param_norm = p.grad.data.norm(norm_type)
|
157 |
+
total_norm += param_norm.item() ** norm_type
|
158 |
+
if clip_value is not None:
|
159 |
+
p.grad.data.clamp_(min=-clip_value, max=clip_value)
|
160 |
+
total_norm = total_norm ** (1. / norm_type)
|
161 |
+
return total_norm
|
data_utils.py
ADDED
@@ -0,0 +1,551 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
import os
|
3 |
+
import random
|
4 |
+
import numpy as np
|
5 |
+
import torch
|
6 |
+
import torch.utils.data
|
7 |
+
from loguru import logger
|
8 |
+
import commons
|
9 |
+
from mel_processing import spectrogram_torch, mel_spectrogram_torch
|
10 |
+
from utils import load_wav_to_torch, load_filepaths_and_text
|
11 |
+
from text import cleaned_text_to_sequence, get_bert
|
12 |
+
from text.cleaner import clean_text
|
13 |
+
from tqdm import tqdm
|
14 |
+
|
15 |
+
|
16 |
+
class TextAudioLoader(torch.utils.data.Dataset):
|
17 |
+
"""
|
18 |
+
1) loads audio, text pairs
|
19 |
+
2) normalizes text and converts them to sequences of integers
|
20 |
+
3) computes spectrograms from audio files.
|
21 |
+
"""
|
22 |
+
|
23 |
+
def __init__(self, audiopaths_and_text, hparams):
|
24 |
+
self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
|
25 |
+
self.max_wav_value = hparams.max_wav_value
|
26 |
+
self.sampling_rate = hparams.sampling_rate
|
27 |
+
self.filter_length = hparams.filter_length
|
28 |
+
self.hop_length = hparams.hop_length
|
29 |
+
self.win_length = hparams.win_length
|
30 |
+
self.sampling_rate = hparams.sampling_rate
|
31 |
+
|
32 |
+
self.cleaned_text = getattr(hparams, "cleaned_text", False)
|
33 |
+
|
34 |
+
self.add_blank = hparams.add_blank
|
35 |
+
self.min_text_len = getattr(hparams, "min_text_len", 1)
|
36 |
+
self.max_text_len = getattr(hparams, "max_text_len", 190)
|
37 |
+
|
38 |
+
random.seed(1234)
|
39 |
+
random.shuffle(self.audiopaths_and_text)
|
40 |
+
self._filter()
|
41 |
+
|
42 |
+
def _filter(self):
|
43 |
+
"""
|
44 |
+
Filter text & store spec lengths
|
45 |
+
"""
|
46 |
+
# Store spectrogram lengths for Bucketing
|
47 |
+
# wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
|
48 |
+
# spec_length = wav_length // hop_length
|
49 |
+
|
50 |
+
audiopaths_and_text_new = []
|
51 |
+
lengths = []
|
52 |
+
for audiopath, text in self.audiopaths_and_text:
|
53 |
+
if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
|
54 |
+
audiopaths_and_text_new.append([audiopath, text])
|
55 |
+
lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
|
56 |
+
self.audiopaths_and_text = audiopaths_and_text_new
|
57 |
+
self.lengths = lengths
|
58 |
+
|
59 |
+
def get_audio_text_pair(self, audiopath_and_text):
|
60 |
+
# separate filename and text
|
61 |
+
audiopath, text = audiopath_and_text[0], audiopath_and_text[1]
|
62 |
+
text = self.get_text(text)
|
63 |
+
spec, wav = self.get_audio(audiopath)
|
64 |
+
return (text, spec, wav)
|
65 |
+
|
66 |
+
def get_audio(self, filename):
|
67 |
+
audio, sampling_rate = load_wav_to_torch(filename)
|
68 |
+
if sampling_rate != self.sampling_rate:
|
69 |
+
raise ValueError(
|
70 |
+
"{} {} SR doesn't match target {} SR".format(
|
71 |
+
sampling_rate, self.sampling_rate
|
72 |
+
)
|
73 |
+
)
|
74 |
+
audio_norm = audio / self.max_wav_value
|
75 |
+
audio_norm = audio_norm.unsqueeze(0)
|
76 |
+
spec_filename = filename.replace(".wav", ".spec.pt")
|
77 |
+
if os.path.exists(spec_filename):
|
78 |
+
spec = torch.load(spec_filename)
|
79 |
+
else:
|
80 |
+
spec = spectrogram_torch(
|
81 |
+
audio_norm,
|
82 |
+
self.filter_length,
|
83 |
+
self.sampling_rate,
|
84 |
+
self.hop_length,
|
85 |
+
self.win_length,
|
86 |
+
center=False,
|
87 |
+
)
|
88 |
+
spec = torch.squeeze(spec, 0)
|
89 |
+
torch.save(spec, spec_filename)
|
90 |
+
return spec, audio_norm
|
91 |
+
|
92 |
+
def get_text(self, text):
|
93 |
+
if self.cleaned_text:
|
94 |
+
text_norm = cleaned_text_to_sequence(text)
|
95 |
+
else:
|
96 |
+
text_norm = text_to_sequence(text, self.text_cleaners)
|
97 |
+
if self.add_blank:
|
98 |
+
text_norm = commons.intersperse(text_norm, 0)
|
99 |
+
text_norm = torch.LongTensor(text_norm)
|
100 |
+
return text_norm
|
101 |
+
|
102 |
+
def __getitem__(self, index):
|
103 |
+
return self.get_audio_text_pair(self.audiopaths_and_text[index])
|
104 |
+
|
105 |
+
def __len__(self):
|
106 |
+
return len(self.audiopaths_and_text)
|
107 |
+
|
108 |
+
|
109 |
+
class TextAudioCollate:
|
110 |
+
"""Zero-pads model inputs and targets"""
|
111 |
+
|
112 |
+
def __init__(self, return_ids=False):
|
113 |
+
self.return_ids = return_ids
|
114 |
+
|
115 |
+
def __call__(self, batch):
|
116 |
+
"""Collate's training batch from normalized text and aduio
|
117 |
+
PARAMS
|
118 |
+
------
|
119 |
+
batch: [text_normalized, spec_normalized, wav_normalized]
|
120 |
+
"""
|
121 |
+
# Right zero-pad all one-hot text sequences to max input length
|
122 |
+
_, ids_sorted_decreasing = torch.sort(
|
123 |
+
torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True
|
124 |
+
)
|
125 |
+
|
126 |
+
max_text_len = max([len(x[0]) for x in batch])
|
127 |
+
max_spec_len = max([x[1].size(1) for x in batch])
|
128 |
+
max_wav_len = max([x[2].size(1) for x in batch])
|
129 |
+
|
130 |
+
text_lengths = torch.LongTensor(len(batch))
|
131 |
+
spec_lengths = torch.LongTensor(len(batch))
|
132 |
+
wav_lengths = torch.LongTensor(len(batch))
|
133 |
+
|
134 |
+
text_padded = torch.LongTensor(len(batch), max_text_len)
|
135 |
+
spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
|
136 |
+
wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
|
137 |
+
text_padded.zero_()
|
138 |
+
spec_padded.zero_()
|
139 |
+
wav_padded.zero_()
|
140 |
+
for i in range(len(ids_sorted_decreasing)):
|
141 |
+
row = batch[ids_sorted_decreasing[i]]
|
142 |
+
|
143 |
+
text = row[0]
|
144 |
+
text_padded[i, : text.size(0)] = text
|
145 |
+
text_lengths[i] = text.size(0)
|
146 |
+
|
147 |
+
spec = row[1]
|
148 |
+
spec_padded[i, :, : spec.size(1)] = spec
|
149 |
+
spec_lengths[i] = spec.size(1)
|
150 |
+
|
151 |
+
wav = row[2]
|
152 |
+
wav_padded[i, :, : wav.size(1)] = wav
|
153 |
+
wav_lengths[i] = wav.size(1)
|
154 |
+
|
155 |
+
if self.return_ids:
|
156 |
+
return (
|
157 |
+
text_padded,
|
158 |
+
text_lengths,
|
159 |
+
spec_padded,
|
160 |
+
spec_lengths,
|
161 |
+
wav_padded,
|
162 |
+
wav_lengths,
|
163 |
+
ids_sorted_decreasing,
|
164 |
+
)
|
165 |
+
return (
|
166 |
+
text_padded,
|
167 |
+
text_lengths,
|
168 |
+
spec_padded,
|
169 |
+
spec_lengths,
|
170 |
+
wav_padded,
|
171 |
+
wav_lengths,
|
172 |
+
)
|
173 |
+
|
174 |
+
|
175 |
+
"""Multi speaker version"""
|
176 |
+
|
177 |
+
|
178 |
+
class TextAudioSpeakerLoader(torch.utils.data.Dataset):
|
179 |
+
"""
|
180 |
+
1) loads audio, speaker_id, text pairs
|
181 |
+
2) normalizes text and converts them to sequences of integers
|
182 |
+
3) computes spectrograms from audio files.
|
183 |
+
"""
|
184 |
+
|
185 |
+
def __init__(self, audiopaths_sid_text, hparams):
|
186 |
+
self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
|
187 |
+
self.max_wav_value = hparams.max_wav_value
|
188 |
+
self.sampling_rate = hparams.sampling_rate
|
189 |
+
self.filter_length = hparams.filter_length
|
190 |
+
self.hop_length = hparams.hop_length
|
191 |
+
self.win_length = hparams.win_length
|
192 |
+
self.sampling_rate = hparams.sampling_rate
|
193 |
+
self.spk_map = hparams.spk2id
|
194 |
+
self.hparams = hparams
|
195 |
+
self.use_bert = hparams.use_bert
|
196 |
+
self.use_melorspec = False
|
197 |
+
|
198 |
+
self.use_mel_spec_posterior = getattr(
|
199 |
+
hparams, "use_mel_posterior_encoder", False
|
200 |
+
)
|
201 |
+
if self.use_mel_spec_posterior:
|
202 |
+
self.n_mel_channels = getattr(hparams, "n_mel_channels", 80)
|
203 |
+
|
204 |
+
self.cleaned_text = getattr(hparams, "cleaned_text", False)
|
205 |
+
|
206 |
+
self.add_blank = hparams.add_blank
|
207 |
+
self.min_text_len = getattr(hparams, "min_text_len", 1)
|
208 |
+
self.max_text_len = getattr(hparams, "max_text_len", 300)
|
209 |
+
|
210 |
+
random.seed(1234)
|
211 |
+
random.shuffle(self.audiopaths_sid_text)
|
212 |
+
self._filter()
|
213 |
+
|
214 |
+
def _filter(self):
|
215 |
+
"""
|
216 |
+
Filter text & store spec lengths
|
217 |
+
"""
|
218 |
+
# Store spectrogram lengths for Bucketing
|
219 |
+
# wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
|
220 |
+
# spec_length = wav_length // hop_length
|
221 |
+
|
222 |
+
audiopaths_sid_text_new = []
|
223 |
+
lengths = []
|
224 |
+
skipped = 0
|
225 |
+
logger.info("Init dataset...")
|
226 |
+
for _id, spk, language, text, phones, tone, word2ph in tqdm(
|
227 |
+
self.audiopaths_sid_text
|
228 |
+
):
|
229 |
+
audiopath = f"{_id}"
|
230 |
+
if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:
|
231 |
+
phones = phones.split(" ")
|
232 |
+
tone = [int(i) for i in tone.split(" ")]
|
233 |
+
word2ph = [int(i) for i in word2ph.split(" ")]
|
234 |
+
audiopaths_sid_text_new.append(
|
235 |
+
[audiopath, spk, language, text, phones, tone, word2ph]
|
236 |
+
)
|
237 |
+
lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
|
238 |
+
else:
|
239 |
+
skipped += 1
|
240 |
+
logger.info(
|
241 |
+
"skipped: "
|
242 |
+
+ str(skipped)
|
243 |
+
+ ", total: "
|
244 |
+
+ str(len(self.audiopaths_sid_text))
|
245 |
+
)
|
246 |
+
self.audiopaths_sid_text = audiopaths_sid_text_new
|
247 |
+
self.lengths = lengths
|
248 |
+
|
249 |
+
def get_audio_text_speaker_pair(self, audiopath_sid_text):
|
250 |
+
# separate filename, speaker_id and text
|
251 |
+
audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text
|
252 |
+
|
253 |
+
bert, phones, tone, language = self.get_text(
|
254 |
+
text, word2ph, phones, tone, language, audiopath
|
255 |
+
)
|
256 |
+
|
257 |
+
spec, wav = self.get_audio(audiopath)
|
258 |
+
sid = torch.LongTensor([int(self.spk_map[sid])])
|
259 |
+
return (phones, spec, wav, sid, tone, language, bert)
|
260 |
+
|
261 |
+
def get_audio(self, filename):
|
262 |
+
audio, sampling_rate = load_wav_to_torch(filename)
|
263 |
+
if sampling_rate != self.sampling_rate:
|
264 |
+
raise ValueError(
|
265 |
+
"{} {} SR doesn't match target {} SR".format(
|
266 |
+
filename, sampling_rate, self.sampling_rate
|
267 |
+
)
|
268 |
+
)
|
269 |
+
audio_norm = audio / self.max_wav_value
|
270 |
+
audio_norm = audio_norm.unsqueeze(0)
|
271 |
+
spec_filename = filename.replace(".wav", ".spec.pt")
|
272 |
+
if os.path.exists(spec_filename):
|
273 |
+
spec = torch.load(spec_filename)
|
274 |
+
else:
|
275 |
+
spec = spectrogram_torch(
|
276 |
+
audio_norm,
|
277 |
+
self.filter_length,
|
278 |
+
self.sampling_rate,
|
279 |
+
self.hop_length,
|
280 |
+
self.win_length,
|
281 |
+
center=False,
|
282 |
+
)
|
283 |
+
spec = torch.squeeze(spec, 0)
|
284 |
+
torch.save(spec, spec_filename)
|
285 |
+
return spec, audio_norm
|
286 |
+
|
287 |
+
def get_text(self, text, word2ph, phone, tone, language_str, wav_path):
|
288 |
+
phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
|
289 |
+
if self.add_blank:
|
290 |
+
phone = commons.intersperse(phone, 0)
|
291 |
+
tone = commons.intersperse(tone, 0)
|
292 |
+
language = commons.intersperse(language, 0)
|
293 |
+
for i in range(len(word2ph)):
|
294 |
+
word2ph[i] = word2ph[i] * 2
|
295 |
+
word2ph[0] += 1
|
296 |
+
bert_path = wav_path.replace(".wav", ".bert.pt")
|
297 |
+
try:
|
298 |
+
bert = torch.load(bert_path)
|
299 |
+
assert bert.shape[-1] == len(phone)
|
300 |
+
except:
|
301 |
+
bert = get_bert(text, word2ph, language_str)
|
302 |
+
torch.save(bert, bert_path)
|
303 |
+
assert bert.shape[-1] == len(phone), phone
|
304 |
+
|
305 |
+
if self.use_bert:
|
306 |
+
if language_str == "ZH":
|
307 |
+
bert = bert
|
308 |
+
else:
|
309 |
+
bert = torch.zeros(1024, len(phone))
|
310 |
+
else:
|
311 |
+
bert = torch.zeros(1024, len(phone))
|
312 |
+
assert bert.shape[-1] == len(phone), (
|
313 |
+
bert.shape,
|
314 |
+
len(phone),
|
315 |
+
sum(word2ph),
|
316 |
+
p1,
|
317 |
+
p2,
|
318 |
+
t1,
|
319 |
+
t2,
|
320 |
+
pold,
|
321 |
+
pold2,
|
322 |
+
word2ph,
|
323 |
+
text,
|
324 |
+
w2pho,
|
325 |
+
)
|
326 |
+
phone = torch.LongTensor(phone)
|
327 |
+
tone = torch.LongTensor(tone)
|
328 |
+
language = torch.LongTensor(language)
|
329 |
+
return bert, phone, tone, language
|
330 |
+
|
331 |
+
def get_sid(self, sid):
|
332 |
+
sid = torch.LongTensor([int(sid)])
|
333 |
+
return sid
|
334 |
+
|
335 |
+
def __getitem__(self, index):
|
336 |
+
return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
|
337 |
+
|
338 |
+
def __len__(self):
|
339 |
+
return len(self.audiopaths_sid_text)
|
340 |
+
|
341 |
+
|
342 |
+
class TextAudioSpeakerCollate:
|
343 |
+
"""Zero-pads model inputs and targets"""
|
344 |
+
|
345 |
+
def __init__(self, return_ids=False):
|
346 |
+
self.return_ids = return_ids
|
347 |
+
|
348 |
+
def __call__(self, batch):
|
349 |
+
"""Collate's training batch from normalized text, audio and speaker identities
|
350 |
+
PARAMS
|
351 |
+
------
|
352 |
+
batch: [text_normalized, spec_normalized, wav_normalized, sid]
|
353 |
+
"""
|
354 |
+
# Right zero-pad all one-hot text sequences to max input length
|
355 |
+
_, ids_sorted_decreasing = torch.sort(
|
356 |
+
torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True
|
357 |
+
)
|
358 |
+
|
359 |
+
max_text_len = max([len(x[0]) for x in batch])
|
360 |
+
max_spec_len = max([x[1].size(1) for x in batch])
|
361 |
+
max_wav_len = max([x[2].size(1) for x in batch])
|
362 |
+
|
363 |
+
text_lengths = torch.LongTensor(len(batch))
|
364 |
+
spec_lengths = torch.LongTensor(len(batch))
|
365 |
+
wav_lengths = torch.LongTensor(len(batch))
|
366 |
+
sid = torch.LongTensor(len(batch))
|
367 |
+
|
368 |
+
text_padded = torch.LongTensor(len(batch), max_text_len)
|
369 |
+
tone_padded = torch.LongTensor(len(batch), max_text_len)
|
370 |
+
language_padded = torch.LongTensor(len(batch), max_text_len)
|
371 |
+
bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)
|
372 |
+
spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
|
373 |
+
wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
|
374 |
+
|
375 |
+
text_padded.zero_()
|
376 |
+
tone_padded.zero_()
|
377 |
+
language_padded.zero_()
|
378 |
+
spec_padded.zero_()
|
379 |
+
wav_padded.zero_()
|
380 |
+
bert_padded.zero_()
|
381 |
+
for i in range(len(ids_sorted_decreasing)):
|
382 |
+
row = batch[ids_sorted_decreasing[i]]
|
383 |
+
|
384 |
+
text = row[0]
|
385 |
+
text_padded[i, : text.size(0)] = text
|
386 |
+
text_lengths[i] = text.size(0)
|
387 |
+
|
388 |
+
spec = row[1]
|
389 |
+
spec_padded[i, :, : spec.size(1)] = spec
|
390 |
+
spec_lengths[i] = spec.size(1)
|
391 |
+
|
392 |
+
wav = row[2]
|
393 |
+
wav_padded[i, :, : wav.size(1)] = wav
|
394 |
+
wav_lengths[i] = wav.size(1)
|
395 |
+
|
396 |
+
sid[i] = row[3]
|
397 |
+
|
398 |
+
tone = row[4]
|
399 |
+
tone_padded[i, : tone.size(0)] = tone
|
400 |
+
|
401 |
+
language = row[5]
|
402 |
+
language_padded[i, : language.size(0)] = language
|
403 |
+
|
404 |
+
bert = row[6]
|
405 |
+
bert_padded[i, :, : bert.size(1)] = bert
|
406 |
+
|
407 |
+
if self.return_ids:
|
408 |
+
return (
|
409 |
+
text_padded,
|
410 |
+
text_lengths,
|
411 |
+
spec_padded,
|
412 |
+
spec_lengths,
|
413 |
+
wav_padded,
|
414 |
+
wav_lengths,
|
415 |
+
sid,
|
416 |
+
tone_padded,
|
417 |
+
language_padded,
|
418 |
+
bert_padded,
|
419 |
+
ids_sorted_decreasing,
|
420 |
+
)
|
421 |
+
return (
|
422 |
+
text_padded,
|
423 |
+
text_lengths,
|
424 |
+
spec_padded,
|
425 |
+
spec_lengths,
|
426 |
+
wav_padded,
|
427 |
+
wav_lengths,
|
428 |
+
sid,
|
429 |
+
tone_padded,
|
430 |
+
language_padded,
|
431 |
+
bert_padded,
|
432 |
+
)
|
433 |
+
|
434 |
+
|
435 |
+
class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
|
436 |
+
"""
|
437 |
+
Maintain similar input lengths in a batch.
|
438 |
+
Length groups are specified by boundaries.
|
439 |
+
Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
|
440 |
+
|
441 |
+
It removes samples which are not included in the boundaries.
|
442 |
+
Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
|
443 |
+
"""
|
444 |
+
|
445 |
+
def __init__(
|
446 |
+
self,
|
447 |
+
dataset,
|
448 |
+
batch_size,
|
449 |
+
boundaries,
|
450 |
+
num_replicas=None,
|
451 |
+
rank=None,
|
452 |
+
shuffle=True,
|
453 |
+
):
|
454 |
+
super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
|
455 |
+
self.lengths = dataset.lengths
|
456 |
+
self.batch_size = batch_size
|
457 |
+
self.boundaries = boundaries
|
458 |
+
|
459 |
+
self.buckets, self.num_samples_per_bucket = self._create_buckets()
|
460 |
+
self.total_size = sum(self.num_samples_per_bucket)
|
461 |
+
self.num_samples = self.total_size // self.num_replicas
|
462 |
+
|
463 |
+
def _create_buckets(self):
|
464 |
+
buckets = [[] for _ in range(len(self.boundaries) - 1)]
|
465 |
+
for i in range(len(self.lengths)):
|
466 |
+
length = self.lengths[i]
|
467 |
+
idx_bucket = self._bisect(length)
|
468 |
+
if idx_bucket != -1:
|
469 |
+
buckets[idx_bucket].append(i)
|
470 |
+
|
471 |
+
for i in range(len(buckets) - 1, 0, -1):
|
472 |
+
if len(buckets[i]) == 0:
|
473 |
+
buckets.pop(i)
|
474 |
+
self.boundaries.pop(i + 1)
|
475 |
+
|
476 |
+
num_samples_per_bucket = []
|
477 |
+
for i in range(len(buckets)):
|
478 |
+
len_bucket = len(buckets[i])
|
479 |
+
total_batch_size = self.num_replicas * self.batch_size
|
480 |
+
rem = (
|
481 |
+
total_batch_size - (len_bucket % total_batch_size)
|
482 |
+
) % total_batch_size
|
483 |
+
num_samples_per_bucket.append(len_bucket + rem)
|
484 |
+
return buckets, num_samples_per_bucket
|
485 |
+
|
486 |
+
def __iter__(self):
|
487 |
+
# deterministically shuffle based on epoch
|
488 |
+
g = torch.Generator()
|
489 |
+
g.manual_seed(self.epoch)
|
490 |
+
|
491 |
+
indices = []
|
492 |
+
if self.shuffle:
|
493 |
+
for bucket in self.buckets:
|
494 |
+
indices.append(torch.randperm(len(bucket), generator=g).tolist())
|
495 |
+
else:
|
496 |
+
for bucket in self.buckets:
|
497 |
+
indices.append(list(range(len(bucket))))
|
498 |
+
|
499 |
+
batches = []
|
500 |
+
for i in range(len(self.buckets)):
|
501 |
+
bucket = self.buckets[i]
|
502 |
+
len_bucket = len(bucket)
|
503 |
+
ids_bucket = indices[i]
|
504 |
+
num_samples_bucket = self.num_samples_per_bucket[i]
|
505 |
+
|
506 |
+
# add extra samples to make it evenly divisible
|
507 |
+
rem = num_samples_bucket - len_bucket
|
508 |
+
ids_bucket = (
|
509 |
+
ids_bucket
|
510 |
+
+ ids_bucket * (rem // len_bucket)
|
511 |
+
+ ids_bucket[: (rem % len_bucket)]
|
512 |
+
)
|
513 |
+
|
514 |
+
# subsample
|
515 |
+
ids_bucket = ids_bucket[self.rank :: self.num_replicas]
|
516 |
+
|
517 |
+
# batching
|
518 |
+
for j in range(len(ids_bucket) // self.batch_size):
|
519 |
+
batch = [
|
520 |
+
bucket[idx]
|
521 |
+
for idx in ids_bucket[
|
522 |
+
j * self.batch_size : (j + 1) * self.batch_size
|
523 |
+
]
|
524 |
+
]
|
525 |
+
batches.append(batch)
|
526 |
+
|
527 |
+
if self.shuffle:
|
528 |
+
batch_ids = torch.randperm(len(batches), generator=g).tolist()
|
529 |
+
batches = [batches[i] for i in batch_ids]
|
530 |
+
self.batches = batches
|
531 |
+
|
532 |
+
assert len(self.batches) * self.batch_size == self.num_samples
|
533 |
+
return iter(self.batches)
|
534 |
+
|
535 |
+
def _bisect(self, x, lo=0, hi=None):
|
536 |
+
if hi is None:
|
537 |
+
hi = len(self.boundaries) - 1
|
538 |
+
|
539 |
+
if hi > lo:
|
540 |
+
mid = (hi + lo) // 2
|
541 |
+
if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
|
542 |
+
return mid
|
543 |
+
elif x <= self.boundaries[mid]:
|
544 |
+
return self._bisect(x, lo, mid)
|
545 |
+
else:
|
546 |
+
return self._bisect(x, mid + 1, hi)
|
547 |
+
else:
|
548 |
+
return -1
|
549 |
+
|
550 |
+
def __len__(self):
|
551 |
+
return self.num_samples // self.batch_size
|
mel_processing.py
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import os
|
3 |
+
import random
|
4 |
+
import torch
|
5 |
+
from torch import nn
|
6 |
+
import torch.nn.functional as F
|
7 |
+
import torch.utils.data
|
8 |
+
import numpy as np
|
9 |
+
import librosa
|
10 |
+
import librosa.util as librosa_util
|
11 |
+
from librosa.util import normalize, pad_center, tiny
|
12 |
+
from scipy.signal import get_window
|
13 |
+
from scipy.io.wavfile import read
|
14 |
+
from librosa.filters import mel as librosa_mel_fn
|
15 |
+
|
16 |
+
MAX_WAV_VALUE = 32768.0
|
17 |
+
|
18 |
+
|
19 |
+
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
|
20 |
+
"""
|
21 |
+
PARAMS
|
22 |
+
------
|
23 |
+
C: compression factor
|
24 |
+
"""
|
25 |
+
return torch.log(torch.clamp(x, min=clip_val) * C)
|
26 |
+
|
27 |
+
|
28 |
+
def dynamic_range_decompression_torch(x, C=1):
|
29 |
+
"""
|
30 |
+
PARAMS
|
31 |
+
------
|
32 |
+
C: compression factor used to compress
|
33 |
+
"""
|
34 |
+
return torch.exp(x) / C
|
35 |
+
|
36 |
+
|
37 |
+
def spectral_normalize_torch(magnitudes):
|
38 |
+
output = dynamic_range_compression_torch(magnitudes)
|
39 |
+
return output
|
40 |
+
|
41 |
+
|
42 |
+
def spectral_de_normalize_torch(magnitudes):
|
43 |
+
output = dynamic_range_decompression_torch(magnitudes)
|
44 |
+
return output
|
45 |
+
|
46 |
+
|
47 |
+
mel_basis = {}
|
48 |
+
hann_window = {}
|
49 |
+
|
50 |
+
|
51 |
+
def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
|
52 |
+
if torch.min(y) < -1.0:
|
53 |
+
print("min value is ", torch.min(y))
|
54 |
+
if torch.max(y) > 1.0:
|
55 |
+
print("max value is ", torch.max(y))
|
56 |
+
|
57 |
+
global hann_window
|
58 |
+
dtype_device = str(y.dtype) + "_" + str(y.device)
|
59 |
+
wnsize_dtype_device = str(win_size) + "_" + dtype_device
|
60 |
+
if wnsize_dtype_device not in hann_window:
|
61 |
+
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(
|
62 |
+
dtype=y.dtype, device=y.device
|
63 |
+
)
|
64 |
+
|
65 |
+
y = torch.nn.functional.pad(
|
66 |
+
y.unsqueeze(1),
|
67 |
+
(int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
|
68 |
+
mode="reflect",
|
69 |
+
)
|
70 |
+
y = y.squeeze(1)
|
71 |
+
|
72 |
+
spec = torch.stft(
|
73 |
+
y,
|
74 |
+
n_fft,
|
75 |
+
hop_length=hop_size,
|
76 |
+
win_length=win_size,
|
77 |
+
window=hann_window[wnsize_dtype_device],
|
78 |
+
center=center,
|
79 |
+
pad_mode="reflect",
|
80 |
+
normalized=False,
|
81 |
+
onesided=True,
|
82 |
+
return_complex=False,
|
83 |
+
)
|
84 |
+
|
85 |
+
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
86 |
+
return spec
|
87 |
+
|
88 |
+
|
89 |
+
def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
|
90 |
+
global mel_basis
|
91 |
+
dtype_device = str(spec.dtype) + "_" + str(spec.device)
|
92 |
+
fmax_dtype_device = str(fmax) + "_" + dtype_device
|
93 |
+
if fmax_dtype_device not in mel_basis:
|
94 |
+
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
|
95 |
+
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(
|
96 |
+
dtype=spec.dtype, device=spec.device
|
97 |
+
)
|
98 |
+
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
99 |
+
spec = spectral_normalize_torch(spec)
|
100 |
+
return spec
|
101 |
+
|
102 |
+
|
103 |
+
def mel_spectrogram_torch(
|
104 |
+
y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False
|
105 |
+
):
|
106 |
+
if torch.min(y) < -1.0:
|
107 |
+
print("min value is ", torch.min(y))
|
108 |
+
if torch.max(y) > 1.0:
|
109 |
+
print("max value is ", torch.max(y))
|
110 |
+
|
111 |
+
global mel_basis, hann_window
|
112 |
+
dtype_device = str(y.dtype) + "_" + str(y.device)
|
113 |
+
fmax_dtype_device = str(fmax) + "_" + dtype_device
|
114 |
+
wnsize_dtype_device = str(win_size) + "_" + dtype_device
|
115 |
+
if fmax_dtype_device not in mel_basis:
|
116 |
+
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
|
117 |
+
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(
|
118 |
+
dtype=y.dtype, device=y.device
|
119 |
+
)
|
120 |
+
if wnsize_dtype_device not in hann_window:
|
121 |
+
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(
|
122 |
+
dtype=y.dtype, device=y.device
|
123 |
+
)
|
124 |
+
|
125 |
+
y = torch.nn.functional.pad(
|
126 |
+
y.unsqueeze(1),
|
127 |
+
(int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
|
128 |
+
mode="reflect",
|
129 |
+
)
|
130 |
+
y = y.squeeze(1)
|
131 |
+
|
132 |
+
spec = torch.stft(
|
133 |
+
y,
|
134 |
+
n_fft,
|
135 |
+
hop_length=hop_size,
|
136 |
+
win_length=win_size,
|
137 |
+
window=hann_window[wnsize_dtype_device],
|
138 |
+
center=center,
|
139 |
+
pad_mode="reflect",
|
140 |
+
normalized=False,
|
141 |
+
onesided=True,
|
142 |
+
return_complex=False,
|
143 |
+
)
|
144 |
+
|
145 |
+
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
146 |
+
|
147 |
+
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
148 |
+
spec = spectral_normalize_torch(spec)
|
149 |
+
|
150 |
+
return spec
|
models.py
ADDED
@@ -0,0 +1,1147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import math
|
3 |
+
import torch
|
4 |
+
from torch import nn
|
5 |
+
from torch.nn import functional as F
|
6 |
+
|
7 |
+
import commons
|
8 |
+
import modules
|
9 |
+
import attentions
|
10 |
+
import monotonic_align
|
11 |
+
|
12 |
+
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
13 |
+
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
14 |
+
from commons import init_weights, get_padding
|
15 |
+
from pqmf import PQMF
|
16 |
+
from stft import TorchSTFT
|
17 |
+
import math
|
18 |
+
from text import symbols, num_tones, num_languages
|
19 |
+
|
20 |
+
|
21 |
+
class StochasticDurationPredictor(nn.Module):
|
22 |
+
def __init__(
|
23 |
+
self,
|
24 |
+
in_channels,
|
25 |
+
filter_channels,
|
26 |
+
kernel_size,
|
27 |
+
p_dropout,
|
28 |
+
n_flows=4,
|
29 |
+
gin_channels=0,
|
30 |
+
):
|
31 |
+
super().__init__()
|
32 |
+
filter_channels = in_channels # it needs to be removed from future version.
|
33 |
+
self.in_channels = in_channels
|
34 |
+
self.filter_channels = filter_channels
|
35 |
+
self.kernel_size = kernel_size
|
36 |
+
self.p_dropout = p_dropout
|
37 |
+
self.n_flows = n_flows
|
38 |
+
self.gin_channels = gin_channels
|
39 |
+
|
40 |
+
self.log_flow = modules.Log()
|
41 |
+
self.flows = nn.ModuleList()
|
42 |
+
self.flows.append(modules.ElementwiseAffine(2))
|
43 |
+
for i in range(n_flows):
|
44 |
+
self.flows.append(
|
45 |
+
modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)
|
46 |
+
)
|
47 |
+
self.flows.append(modules.Flip())
|
48 |
+
|
49 |
+
self.post_pre = nn.Conv1d(1, filter_channels, 1)
|
50 |
+
self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
51 |
+
self.post_convs = modules.DDSConv(
|
52 |
+
filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout
|
53 |
+
)
|
54 |
+
self.post_flows = nn.ModuleList()
|
55 |
+
self.post_flows.append(modules.ElementwiseAffine(2))
|
56 |
+
for i in range(4):
|
57 |
+
self.post_flows.append(
|
58 |
+
modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)
|
59 |
+
)
|
60 |
+
self.post_flows.append(modules.Flip())
|
61 |
+
|
62 |
+
self.pre = nn.Conv1d(in_channels, filter_channels, 1)
|
63 |
+
self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
64 |
+
self.convs = modules.DDSConv(
|
65 |
+
filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout
|
66 |
+
)
|
67 |
+
if gin_channels != 0:
|
68 |
+
self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
|
69 |
+
|
70 |
+
def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
|
71 |
+
x = torch.detach(x)
|
72 |
+
x = self.pre(x)
|
73 |
+
if g is not None:
|
74 |
+
g = torch.detach(g)
|
75 |
+
x = x + self.cond(g)
|
76 |
+
x = self.convs(x, x_mask)
|
77 |
+
x = self.proj(x) * x_mask
|
78 |
+
|
79 |
+
if not reverse:
|
80 |
+
flows = self.flows
|
81 |
+
assert w is not None
|
82 |
+
|
83 |
+
logdet_tot_q = 0
|
84 |
+
h_w = self.post_pre(w)
|
85 |
+
h_w = self.post_convs(h_w, x_mask)
|
86 |
+
h_w = self.post_proj(h_w) * x_mask
|
87 |
+
e_q = (
|
88 |
+
torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype)
|
89 |
+
* x_mask
|
90 |
+
)
|
91 |
+
z_q = e_q
|
92 |
+
for flow in self.post_flows:
|
93 |
+
z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
|
94 |
+
logdet_tot_q += logdet_q
|
95 |
+
z_u, z1 = torch.split(z_q, [1, 1], 1)
|
96 |
+
u = torch.sigmoid(z_u) * x_mask
|
97 |
+
z0 = (w - u) * x_mask
|
98 |
+
logdet_tot_q += torch.sum(
|
99 |
+
(F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]
|
100 |
+
)
|
101 |
+
logq = (
|
102 |
+
torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q**2)) * x_mask, [1, 2])
|
103 |
+
- logdet_tot_q
|
104 |
+
)
|
105 |
+
|
106 |
+
logdet_tot = 0
|
107 |
+
z0, logdet = self.log_flow(z0, x_mask)
|
108 |
+
logdet_tot += logdet
|
109 |
+
z = torch.cat([z0, z1], 1)
|
110 |
+
for flow in flows:
|
111 |
+
z, logdet = flow(z, x_mask, g=x, reverse=reverse)
|
112 |
+
logdet_tot = logdet_tot + logdet
|
113 |
+
nll = (
|
114 |
+
torch.sum(0.5 * (math.log(2 * math.pi) + (z**2)) * x_mask, [1, 2])
|
115 |
+
- logdet_tot
|
116 |
+
)
|
117 |
+
return nll + logq # [b]
|
118 |
+
else:
|
119 |
+
flows = list(reversed(self.flows))
|
120 |
+
flows = flows[:-2] + [flows[-1]] # remove a useless vflow
|
121 |
+
z = (
|
122 |
+
torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype)
|
123 |
+
* noise_scale
|
124 |
+
)
|
125 |
+
for flow in flows:
|
126 |
+
z = flow(z, x_mask, g=x, reverse=reverse)
|
127 |
+
z0, z1 = torch.split(z, [1, 1], 1)
|
128 |
+
logw = z0
|
129 |
+
return logw
|
130 |
+
|
131 |
+
|
132 |
+
class DurationPredictor(nn.Module):
|
133 |
+
def __init__(
|
134 |
+
self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0
|
135 |
+
):
|
136 |
+
super().__init__()
|
137 |
+
|
138 |
+
self.in_channels = in_channels
|
139 |
+
self.filter_channels = filter_channels
|
140 |
+
self.kernel_size = kernel_size
|
141 |
+
self.p_dropout = p_dropout
|
142 |
+
self.gin_channels = gin_channels
|
143 |
+
|
144 |
+
self.drop = nn.Dropout(p_dropout)
|
145 |
+
self.conv_1 = nn.Conv1d(
|
146 |
+
in_channels, filter_channels, kernel_size, padding=kernel_size // 2
|
147 |
+
)
|
148 |
+
self.norm_1 = modules.LayerNorm(filter_channels)
|
149 |
+
self.conv_2 = nn.Conv1d(
|
150 |
+
filter_channels, filter_channels, kernel_size, padding=kernel_size // 2
|
151 |
+
)
|
152 |
+
self.norm_2 = modules.LayerNorm(filter_channels)
|
153 |
+
self.proj = nn.Conv1d(filter_channels, 1, 1)
|
154 |
+
|
155 |
+
if gin_channels != 0:
|
156 |
+
self.cond = nn.Conv1d(gin_channels, in_channels, 1)
|
157 |
+
|
158 |
+
def forward(self, x, x_mask, g=None):
|
159 |
+
x = torch.detach(x)
|
160 |
+
if g is not None:
|
161 |
+
g = torch.detach(g)
|
162 |
+
x = x + self.cond(g)
|
163 |
+
x = self.conv_1(x * x_mask)
|
164 |
+
x = torch.relu(x)
|
165 |
+
x = self.norm_1(x)
|
166 |
+
x = self.drop(x)
|
167 |
+
x = self.conv_2(x * x_mask)
|
168 |
+
x = torch.relu(x)
|
169 |
+
x = self.norm_2(x)
|
170 |
+
x = self.drop(x)
|
171 |
+
x = self.proj(x * x_mask)
|
172 |
+
return x * x_mask
|
173 |
+
|
174 |
+
|
175 |
+
class TextEncoder(nn.Module):
|
176 |
+
def __init__(
|
177 |
+
self,
|
178 |
+
n_vocab,
|
179 |
+
out_channels,
|
180 |
+
hidden_channels,
|
181 |
+
filter_channels,
|
182 |
+
n_heads,
|
183 |
+
n_layers,
|
184 |
+
kernel_size,
|
185 |
+
p_dropout,
|
186 |
+
):
|
187 |
+
super().__init__()
|
188 |
+
self.n_vocab = n_vocab
|
189 |
+
self.out_channels = out_channels
|
190 |
+
self.hidden_channels = hidden_channels
|
191 |
+
self.filter_channels = filter_channels
|
192 |
+
self.n_heads = n_heads
|
193 |
+
self.n_layers = n_layers
|
194 |
+
self.kernel_size = kernel_size
|
195 |
+
self.p_dropout = p_dropout
|
196 |
+
|
197 |
+
self.emb = nn.Embedding(n_vocab, hidden_channels)
|
198 |
+
nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
|
199 |
+
self.tone_emb = nn.Embedding(num_tones, hidden_channels)
|
200 |
+
nn.init.normal_(self.tone_emb.weight, 0.0, hidden_channels**-0.5)
|
201 |
+
self.language_emb = nn.Embedding(num_languages, hidden_channels)
|
202 |
+
nn.init.normal_(self.language_emb.weight, 0.0, hidden_channels**-0.5)
|
203 |
+
self.bert_proj = nn.Conv1d(1024, hidden_channels, 1)
|
204 |
+
|
205 |
+
self.encoder = attentions.Encoder(
|
206 |
+
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
|
207 |
+
)
|
208 |
+
|
209 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
210 |
+
|
211 |
+
def forward(self, x, x_lengths, tone, language, bert, g=None):
|
212 |
+
bert_emb = self.bert_proj(bert).transpose(1, 2)
|
213 |
+
x = (
|
214 |
+
self.emb(x) + self.tone_emb(tone) + self.language_emb(language) + bert_emb
|
215 |
+
) * math.sqrt(
|
216 |
+
self.hidden_channels
|
217 |
+
) # [b, t, h]
|
218 |
+
x = torch.transpose(x, 1, -1) # [b, h, t]
|
219 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
|
220 |
+
x.dtype
|
221 |
+
)
|
222 |
+
|
223 |
+
x = self.encoder(x * x_mask, x_mask, g=g)
|
224 |
+
stats = self.proj(x) * x_mask
|
225 |
+
|
226 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
227 |
+
return x, m, logs, x_mask
|
228 |
+
|
229 |
+
|
230 |
+
class ResidualCouplingBlock(nn.Module):
|
231 |
+
def __init__(
|
232 |
+
self,
|
233 |
+
channels,
|
234 |
+
hidden_channels,
|
235 |
+
kernel_size,
|
236 |
+
dilation_rate,
|
237 |
+
n_layers,
|
238 |
+
n_flows=4,
|
239 |
+
gin_channels=0,
|
240 |
+
):
|
241 |
+
super().__init__()
|
242 |
+
self.channels = channels
|
243 |
+
self.hidden_channels = hidden_channels
|
244 |
+
self.kernel_size = kernel_size
|
245 |
+
self.dilation_rate = dilation_rate
|
246 |
+
self.n_layers = n_layers
|
247 |
+
self.n_flows = n_flows
|
248 |
+
self.gin_channels = gin_channels
|
249 |
+
|
250 |
+
self.flows = nn.ModuleList()
|
251 |
+
for i in range(n_flows):
|
252 |
+
self.flows.append(
|
253 |
+
modules.ResidualCouplingLayer(
|
254 |
+
channels,
|
255 |
+
hidden_channels,
|
256 |
+
kernel_size,
|
257 |
+
dilation_rate,
|
258 |
+
n_layers,
|
259 |
+
gin_channels=gin_channels,
|
260 |
+
mean_only=True,
|
261 |
+
)
|
262 |
+
)
|
263 |
+
self.flows.append(modules.Flip())
|
264 |
+
|
265 |
+
def forward(self, x, x_mask, g=None, reverse=False):
|
266 |
+
if not reverse:
|
267 |
+
for flow in self.flows:
|
268 |
+
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
269 |
+
else:
|
270 |
+
for flow in reversed(self.flows):
|
271 |
+
x = flow(x, x_mask, g=g, reverse=reverse)
|
272 |
+
return x
|
273 |
+
|
274 |
+
|
275 |
+
class PosteriorEncoder(nn.Module):
|
276 |
+
def __init__(
|
277 |
+
self,
|
278 |
+
in_channels,
|
279 |
+
out_channels,
|
280 |
+
hidden_channels,
|
281 |
+
kernel_size,
|
282 |
+
dilation_rate,
|
283 |
+
n_layers,
|
284 |
+
gin_channels=0,
|
285 |
+
):
|
286 |
+
super().__init__()
|
287 |
+
self.in_channels = in_channels
|
288 |
+
self.out_channels = out_channels
|
289 |
+
self.hidden_channels = hidden_channels
|
290 |
+
self.kernel_size = kernel_size
|
291 |
+
self.dilation_rate = dilation_rate
|
292 |
+
self.n_layers = n_layers
|
293 |
+
self.gin_channels = gin_channels
|
294 |
+
|
295 |
+
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
|
296 |
+
self.enc = modules.WN(
|
297 |
+
hidden_channels,
|
298 |
+
kernel_size,
|
299 |
+
dilation_rate,
|
300 |
+
n_layers,
|
301 |
+
gin_channels=gin_channels,
|
302 |
+
)
|
303 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
304 |
+
|
305 |
+
def forward(self, x, x_lengths, g=None):
|
306 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
|
307 |
+
x.dtype
|
308 |
+
)
|
309 |
+
x = self.pre(x) * x_mask
|
310 |
+
x = self.enc(x, x_mask, g=g)
|
311 |
+
stats = self.proj(x) * x_mask
|
312 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
313 |
+
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
|
314 |
+
return z, m, logs, x_mask
|
315 |
+
|
316 |
+
|
317 |
+
class iSTFT_Generator(torch.nn.Module):
|
318 |
+
def __init__(
|
319 |
+
self,
|
320 |
+
initial_channel,
|
321 |
+
resblock,
|
322 |
+
resblock_kernel_sizes,
|
323 |
+
resblock_dilation_sizes,
|
324 |
+
upsample_rates,
|
325 |
+
upsample_initial_channel,
|
326 |
+
upsample_kernel_sizes,
|
327 |
+
gen_istft_n_fft,
|
328 |
+
gen_istft_hop_size,
|
329 |
+
gin_channels=0,
|
330 |
+
):
|
331 |
+
super(iSTFT_Generator, self).__init__()
|
332 |
+
# self.h = h
|
333 |
+
self.gen_istft_n_fft = gen_istft_n_fft
|
334 |
+
self.gen_istft_hop_size = gen_istft_hop_size
|
335 |
+
|
336 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
337 |
+
self.num_upsamples = len(upsample_rates)
|
338 |
+
self.conv_pre = weight_norm(
|
339 |
+
Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
|
340 |
+
)
|
341 |
+
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
|
342 |
+
|
343 |
+
self.ups = nn.ModuleList()
|
344 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
345 |
+
self.ups.append(
|
346 |
+
weight_norm(
|
347 |
+
ConvTranspose1d(
|
348 |
+
upsample_initial_channel // (2**i),
|
349 |
+
upsample_initial_channel // (2 ** (i + 1)),
|
350 |
+
k,
|
351 |
+
u,
|
352 |
+
padding=(k - u) // 2,
|
353 |
+
)
|
354 |
+
)
|
355 |
+
)
|
356 |
+
|
357 |
+
self.resblocks = nn.ModuleList()
|
358 |
+
for i in range(len(self.ups)):
|
359 |
+
ch = upsample_initial_channel // (2 ** (i + 1))
|
360 |
+
for j, (k, d) in enumerate(
|
361 |
+
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
362 |
+
):
|
363 |
+
self.resblocks.append(resblock(ch, k, d))
|
364 |
+
|
365 |
+
self.post_n_fft = self.gen_istft_n_fft
|
366 |
+
self.conv_post = weight_norm(Conv1d(ch, self.post_n_fft + 2, 7, 1, padding=3))
|
367 |
+
self.ups.apply(init_weights)
|
368 |
+
self.conv_post.apply(init_weights)
|
369 |
+
self.reflection_pad = torch.nn.ReflectionPad1d((1, 0))
|
370 |
+
self.stft = TorchSTFT(
|
371 |
+
filter_length=self.gen_istft_n_fft,
|
372 |
+
hop_length=self.gen_istft_hop_size,
|
373 |
+
win_length=self.gen_istft_n_fft,
|
374 |
+
)
|
375 |
+
|
376 |
+
def forward(self, x, g=None):
|
377 |
+
|
378 |
+
x = self.conv_pre(x)
|
379 |
+
for i in range(self.num_upsamples):
|
380 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
381 |
+
x = self.ups[i](x)
|
382 |
+
xs = None
|
383 |
+
for j in range(self.num_kernels):
|
384 |
+
if xs is None:
|
385 |
+
xs = self.resblocks[i * self.num_kernels + j](x)
|
386 |
+
else:
|
387 |
+
xs += self.resblocks[i * self.num_kernels + j](x)
|
388 |
+
x = xs / self.num_kernels
|
389 |
+
x = F.leaky_relu(x)
|
390 |
+
x = self.reflection_pad(x)
|
391 |
+
x = self.conv_post(x)
|
392 |
+
spec = torch.exp(x[:, : self.post_n_fft // 2 + 1, :])
|
393 |
+
phase = math.pi * torch.sin(x[:, self.post_n_fft // 2 + 1 :, :])
|
394 |
+
out = self.stft.inverse(spec, phase).to(x.device)
|
395 |
+
return out, None
|
396 |
+
|
397 |
+
def remove_weight_norm(self):
|
398 |
+
print("Removing weight norm...")
|
399 |
+
for l in self.ups:
|
400 |
+
remove_weight_norm(l)
|
401 |
+
for l in self.resblocks:
|
402 |
+
l.remove_weight_norm()
|
403 |
+
remove_weight_norm(self.conv_pre)
|
404 |
+
remove_weight_norm(self.conv_post)
|
405 |
+
|
406 |
+
|
407 |
+
class Multiband_iSTFT_Generator(torch.nn.Module):
|
408 |
+
def __init__(
|
409 |
+
self,
|
410 |
+
initial_channel,
|
411 |
+
resblock,
|
412 |
+
resblock_kernel_sizes,
|
413 |
+
resblock_dilation_sizes,
|
414 |
+
upsample_rates,
|
415 |
+
upsample_initial_channel,
|
416 |
+
upsample_kernel_sizes,
|
417 |
+
gen_istft_n_fft,
|
418 |
+
gen_istft_hop_size,
|
419 |
+
subbands,
|
420 |
+
gin_channels=0,
|
421 |
+
):
|
422 |
+
super(Multiband_iSTFT_Generator, self).__init__()
|
423 |
+
# self.h = h
|
424 |
+
self.subbands = subbands
|
425 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
426 |
+
self.num_upsamples = len(upsample_rates)
|
427 |
+
self.conv_pre = weight_norm(
|
428 |
+
Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
|
429 |
+
)
|
430 |
+
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
|
431 |
+
|
432 |
+
self.ups = nn.ModuleList()
|
433 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
434 |
+
self.ups.append(
|
435 |
+
weight_norm(
|
436 |
+
ConvTranspose1d(
|
437 |
+
upsample_initial_channel // (2**i),
|
438 |
+
upsample_initial_channel // (2 ** (i + 1)),
|
439 |
+
k,
|
440 |
+
u,
|
441 |
+
padding=(k - u) // 2,
|
442 |
+
)
|
443 |
+
)
|
444 |
+
)
|
445 |
+
|
446 |
+
self.resblocks = nn.ModuleList()
|
447 |
+
for i in range(len(self.ups)):
|
448 |
+
ch = upsample_initial_channel // (2 ** (i + 1))
|
449 |
+
for j, (k, d) in enumerate(
|
450 |
+
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
451 |
+
):
|
452 |
+
self.resblocks.append(resblock(ch, k, d))
|
453 |
+
|
454 |
+
self.post_n_fft = gen_istft_n_fft
|
455 |
+
self.ups.apply(init_weights)
|
456 |
+
self.reflection_pad = torch.nn.ReflectionPad1d((1, 0))
|
457 |
+
self.reshape_pixelshuffle = []
|
458 |
+
|
459 |
+
self.subband_conv_post = weight_norm(
|
460 |
+
Conv1d(ch, self.subbands * (self.post_n_fft + 2), 7, 1, padding=3)
|
461 |
+
)
|
462 |
+
|
463 |
+
self.subband_conv_post.apply(init_weights)
|
464 |
+
|
465 |
+
self.gen_istft_n_fft = gen_istft_n_fft
|
466 |
+
self.gen_istft_hop_size = gen_istft_hop_size
|
467 |
+
|
468 |
+
def forward(self, x, g=None):
|
469 |
+
stft = TorchSTFT(
|
470 |
+
filter_length=self.gen_istft_n_fft,
|
471 |
+
hop_length=self.gen_istft_hop_size,
|
472 |
+
win_length=self.gen_istft_n_fft,
|
473 |
+
).to(x.device)
|
474 |
+
pqmf = PQMF(x.device)
|
475 |
+
|
476 |
+
x = self.conv_pre(x) # [B, ch, length]
|
477 |
+
|
478 |
+
for i in range(self.num_upsamples):
|
479 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
480 |
+
x = self.ups[i](x)
|
481 |
+
|
482 |
+
xs = None
|
483 |
+
for j in range(self.num_kernels):
|
484 |
+
if xs is None:
|
485 |
+
xs = self.resblocks[i * self.num_kernels + j](x)
|
486 |
+
else:
|
487 |
+
xs += self.resblocks[i * self.num_kernels + j](x)
|
488 |
+
x = xs / self.num_kernels
|
489 |
+
|
490 |
+
x = F.leaky_relu(x)
|
491 |
+
x = self.reflection_pad(x)
|
492 |
+
x = self.subband_conv_post(x)
|
493 |
+
x = torch.reshape(
|
494 |
+
x, (x.shape[0], self.subbands, x.shape[1] // self.subbands, x.shape[-1])
|
495 |
+
)
|
496 |
+
|
497 |
+
spec = torch.exp(x[:, :, : self.post_n_fft // 2 + 1, :])
|
498 |
+
phase = math.pi * torch.sin(x[:, :, self.post_n_fft // 2 + 1 :, :])
|
499 |
+
|
500 |
+
y_mb_hat = stft.inverse(
|
501 |
+
torch.reshape(
|
502 |
+
spec,
|
503 |
+
(
|
504 |
+
spec.shape[0] * self.subbands,
|
505 |
+
self.gen_istft_n_fft // 2 + 1,
|
506 |
+
spec.shape[-1],
|
507 |
+
),
|
508 |
+
),
|
509 |
+
torch.reshape(
|
510 |
+
phase,
|
511 |
+
(
|
512 |
+
phase.shape[0] * self.subbands,
|
513 |
+
self.gen_istft_n_fft // 2 + 1,
|
514 |
+
phase.shape[-1],
|
515 |
+
),
|
516 |
+
),
|
517 |
+
)
|
518 |
+
y_mb_hat = torch.reshape(
|
519 |
+
y_mb_hat, (x.shape[0], self.subbands, 1, y_mb_hat.shape[-1])
|
520 |
+
)
|
521 |
+
y_mb_hat = y_mb_hat.squeeze(-2)
|
522 |
+
|
523 |
+
y_g_hat = pqmf.synthesis(y_mb_hat)
|
524 |
+
|
525 |
+
return y_g_hat, y_mb_hat
|
526 |
+
|
527 |
+
def remove_weight_norm(self):
|
528 |
+
print("Removing weight norm...")
|
529 |
+
for l in self.ups:
|
530 |
+
remove_weight_norm(l)
|
531 |
+
for l in self.resblocks:
|
532 |
+
l.remove_weight_norm()
|
533 |
+
|
534 |
+
|
535 |
+
class Multistream_iSTFT_Generator(torch.nn.Module):
|
536 |
+
def __init__(
|
537 |
+
self,
|
538 |
+
initial_channel,
|
539 |
+
resblock,
|
540 |
+
resblock_kernel_sizes,
|
541 |
+
resblock_dilation_sizes,
|
542 |
+
upsample_rates,
|
543 |
+
upsample_initial_channel,
|
544 |
+
upsample_kernel_sizes,
|
545 |
+
gen_istft_n_fft,
|
546 |
+
gen_istft_hop_size,
|
547 |
+
subbands,
|
548 |
+
gin_channels=0,
|
549 |
+
):
|
550 |
+
super(Multistream_iSTFT_Generator, self).__init__()
|
551 |
+
# self.h = h
|
552 |
+
self.subbands = subbands
|
553 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
554 |
+
self.num_upsamples = len(upsample_rates)
|
555 |
+
self.conv_pre = weight_norm(
|
556 |
+
Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
|
557 |
+
)
|
558 |
+
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
|
559 |
+
|
560 |
+
self.ups = nn.ModuleList()
|
561 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
562 |
+
self.ups.append(
|
563 |
+
weight_norm(
|
564 |
+
ConvTranspose1d(
|
565 |
+
upsample_initial_channel // (2**i),
|
566 |
+
upsample_initial_channel // (2 ** (i + 1)),
|
567 |
+
k,
|
568 |
+
u,
|
569 |
+
padding=(k - u) // 2,
|
570 |
+
)
|
571 |
+
)
|
572 |
+
)
|
573 |
+
|
574 |
+
self.resblocks = nn.ModuleList()
|
575 |
+
for i in range(len(self.ups)):
|
576 |
+
ch = upsample_initial_channel // (2 ** (i + 1))
|
577 |
+
for j, (k, d) in enumerate(
|
578 |
+
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
579 |
+
):
|
580 |
+
self.resblocks.append(resblock(ch, k, d))
|
581 |
+
|
582 |
+
self.post_n_fft = gen_istft_n_fft
|
583 |
+
self.ups.apply(init_weights)
|
584 |
+
self.reflection_pad = torch.nn.ReflectionPad1d((1, 0))
|
585 |
+
self.reshape_pixelshuffle = []
|
586 |
+
|
587 |
+
self.subband_conv_post = weight_norm(
|
588 |
+
Conv1d(ch, self.subbands * (self.post_n_fft + 2), 7, 1, padding=3)
|
589 |
+
)
|
590 |
+
|
591 |
+
self.subband_conv_post.apply(init_weights)
|
592 |
+
|
593 |
+
self.gen_istft_n_fft = gen_istft_n_fft
|
594 |
+
self.gen_istft_hop_size = gen_istft_hop_size
|
595 |
+
|
596 |
+
updown_filter = torch.zeros(
|
597 |
+
(self.subbands, self.subbands, self.subbands)
|
598 |
+
).float()
|
599 |
+
for k in range(self.subbands):
|
600 |
+
updown_filter[k, k, 0] = 1.0
|
601 |
+
self.register_buffer("updown_filter", updown_filter)
|
602 |
+
self.multistream_conv_post = weight_norm(
|
603 |
+
Conv1d(4, 1, kernel_size=63, bias=False, padding=get_padding(63, 1))
|
604 |
+
)
|
605 |
+
self.multistream_conv_post.apply(init_weights)
|
606 |
+
|
607 |
+
def forward(self, x, g=None):
|
608 |
+
stft = TorchSTFT(
|
609 |
+
filter_length=self.gen_istft_n_fft,
|
610 |
+
hop_length=self.gen_istft_hop_size,
|
611 |
+
win_length=self.gen_istft_n_fft,
|
612 |
+
).to(x.device)
|
613 |
+
# pqmf = PQMF(x.device)
|
614 |
+
|
615 |
+
x = self.conv_pre(x) # [B, ch, length]
|
616 |
+
|
617 |
+
for i in range(self.num_upsamples):
|
618 |
+
|
619 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
620 |
+
x = self.ups[i](x)
|
621 |
+
|
622 |
+
xs = None
|
623 |
+
for j in range(self.num_kernels):
|
624 |
+
if xs is None:
|
625 |
+
xs = self.resblocks[i * self.num_kernels + j](x)
|
626 |
+
else:
|
627 |
+
xs += self.resblocks[i * self.num_kernels + j](x)
|
628 |
+
x = xs / self.num_kernels
|
629 |
+
|
630 |
+
x = F.leaky_relu(x)
|
631 |
+
x = self.reflection_pad(x)
|
632 |
+
x = self.subband_conv_post(x)
|
633 |
+
x = torch.reshape(
|
634 |
+
x, (x.shape[0], self.subbands, x.shape[1] // self.subbands, x.shape[-1])
|
635 |
+
)
|
636 |
+
|
637 |
+
spec = torch.exp(x[:, :, : self.post_n_fft // 2 + 1, :])
|
638 |
+
phase = math.pi * torch.sin(x[:, :, self.post_n_fft // 2 + 1 :, :])
|
639 |
+
|
640 |
+
y_mb_hat = stft.inverse(
|
641 |
+
torch.reshape(
|
642 |
+
spec,
|
643 |
+
(
|
644 |
+
spec.shape[0] * self.subbands,
|
645 |
+
self.gen_istft_n_fft // 2 + 1,
|
646 |
+
spec.shape[-1],
|
647 |
+
),
|
648 |
+
),
|
649 |
+
torch.reshape(
|
650 |
+
phase,
|
651 |
+
(
|
652 |
+
phase.shape[0] * self.subbands,
|
653 |
+
self.gen_istft_n_fft // 2 + 1,
|
654 |
+
phase.shape[-1],
|
655 |
+
),
|
656 |
+
),
|
657 |
+
)
|
658 |
+
y_mb_hat = torch.reshape(
|
659 |
+
y_mb_hat, (x.shape[0], self.subbands, 1, y_mb_hat.shape[-1])
|
660 |
+
)
|
661 |
+
y_mb_hat = y_mb_hat.squeeze(-2)
|
662 |
+
|
663 |
+
y_mb_hat = F.conv_transpose1d(
|
664 |
+
y_mb_hat,
|
665 |
+
self.updown_filter.cuda(x.device) * self.subbands,
|
666 |
+
stride=self.subbands,
|
667 |
+
)
|
668 |
+
|
669 |
+
y_g_hat = self.multistream_conv_post(y_mb_hat)
|
670 |
+
|
671 |
+
return y_g_hat, y_mb_hat
|
672 |
+
|
673 |
+
def remove_weight_norm(self):
|
674 |
+
print("Removing weight norm...")
|
675 |
+
for l in self.ups:
|
676 |
+
remove_weight_norm(l)
|
677 |
+
for l in self.resblocks:
|
678 |
+
l.remove_weight_norm()
|
679 |
+
|
680 |
+
|
681 |
+
class DiscriminatorP(torch.nn.Module):
|
682 |
+
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
683 |
+
super(DiscriminatorP, self).__init__()
|
684 |
+
self.period = period
|
685 |
+
self.use_spectral_norm = use_spectral_norm
|
686 |
+
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
687 |
+
self.convs = nn.ModuleList(
|
688 |
+
[
|
689 |
+
norm_f(
|
690 |
+
Conv2d(
|
691 |
+
1,
|
692 |
+
32,
|
693 |
+
(kernel_size, 1),
|
694 |
+
(stride, 1),
|
695 |
+
padding=(get_padding(kernel_size, 1), 0),
|
696 |
+
)
|
697 |
+
),
|
698 |
+
norm_f(
|
699 |
+
Conv2d(
|
700 |
+
32,
|
701 |
+
128,
|
702 |
+
(kernel_size, 1),
|
703 |
+
(stride, 1),
|
704 |
+
padding=(get_padding(kernel_size, 1), 0),
|
705 |
+
)
|
706 |
+
),
|
707 |
+
norm_f(
|
708 |
+
Conv2d(
|
709 |
+
128,
|
710 |
+
512,
|
711 |
+
(kernel_size, 1),
|
712 |
+
(stride, 1),
|
713 |
+
padding=(get_padding(kernel_size, 1), 0),
|
714 |
+
)
|
715 |
+
),
|
716 |
+
norm_f(
|
717 |
+
Conv2d(
|
718 |
+
512,
|
719 |
+
1024,
|
720 |
+
(kernel_size, 1),
|
721 |
+
(stride, 1),
|
722 |
+
padding=(get_padding(kernel_size, 1), 0),
|
723 |
+
)
|
724 |
+
),
|
725 |
+
norm_f(
|
726 |
+
Conv2d(
|
727 |
+
1024,
|
728 |
+
1024,
|
729 |
+
(kernel_size, 1),
|
730 |
+
1,
|
731 |
+
padding=(get_padding(kernel_size, 1), 0),
|
732 |
+
)
|
733 |
+
),
|
734 |
+
]
|
735 |
+
)
|
736 |
+
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
737 |
+
|
738 |
+
def forward(self, x):
|
739 |
+
fmap = []
|
740 |
+
|
741 |
+
# 1d to 2d
|
742 |
+
b, c, t = x.shape
|
743 |
+
if t % self.period != 0: # pad first
|
744 |
+
n_pad = self.period - (t % self.period)
|
745 |
+
x = F.pad(x, (0, n_pad), "reflect")
|
746 |
+
t = t + n_pad
|
747 |
+
x = x.view(b, c, t // self.period, self.period)
|
748 |
+
|
749 |
+
for l in self.convs:
|
750 |
+
x = l(x)
|
751 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
752 |
+
fmap.append(x)
|
753 |
+
x = self.conv_post(x)
|
754 |
+
fmap.append(x)
|
755 |
+
x = torch.flatten(x, 1, -1)
|
756 |
+
|
757 |
+
return x, fmap
|
758 |
+
|
759 |
+
|
760 |
+
class DiscriminatorS(torch.nn.Module):
|
761 |
+
def __init__(self, use_spectral_norm=False):
|
762 |
+
super(DiscriminatorS, self).__init__()
|
763 |
+
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
764 |
+
self.convs = nn.ModuleList(
|
765 |
+
[
|
766 |
+
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
|
767 |
+
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
768 |
+
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
769 |
+
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
770 |
+
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
771 |
+
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
772 |
+
]
|
773 |
+
)
|
774 |
+
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
775 |
+
|
776 |
+
def forward(self, x):
|
777 |
+
fmap = []
|
778 |
+
|
779 |
+
for l in self.convs:
|
780 |
+
x = l(x)
|
781 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
782 |
+
fmap.append(x)
|
783 |
+
x = self.conv_post(x)
|
784 |
+
fmap.append(x)
|
785 |
+
x = torch.flatten(x, 1, -1)
|
786 |
+
|
787 |
+
return x, fmap
|
788 |
+
|
789 |
+
|
790 |
+
class MultiPeriodDiscriminator(torch.nn.Module):
|
791 |
+
def __init__(self, use_spectral_norm=False):
|
792 |
+
super(MultiPeriodDiscriminator, self).__init__()
|
793 |
+
periods = [2, 3, 5, 7, 11]
|
794 |
+
|
795 |
+
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
796 |
+
discs = discs + [
|
797 |
+
DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
|
798 |
+
]
|
799 |
+
self.discriminators = nn.ModuleList(discs)
|
800 |
+
|
801 |
+
def forward(self, y, y_hat):
|
802 |
+
y_d_rs = []
|
803 |
+
y_d_gs = []
|
804 |
+
fmap_rs = []
|
805 |
+
fmap_gs = []
|
806 |
+
for i, d in enumerate(self.discriminators):
|
807 |
+
y_d_r, fmap_r = d(y)
|
808 |
+
y_d_g, fmap_g = d(y_hat)
|
809 |
+
y_d_rs.append(y_d_r)
|
810 |
+
y_d_gs.append(y_d_g)
|
811 |
+
fmap_rs.append(fmap_r)
|
812 |
+
fmap_gs.append(fmap_g)
|
813 |
+
|
814 |
+
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
815 |
+
|
816 |
+
|
817 |
+
class SynthesizerTrn(nn.Module):
|
818 |
+
"""
|
819 |
+
Synthesizer for Training
|
820 |
+
"""
|
821 |
+
|
822 |
+
def __init__(
|
823 |
+
self,
|
824 |
+
n_vocab,
|
825 |
+
spec_channels,
|
826 |
+
segment_size,
|
827 |
+
inter_channels,
|
828 |
+
hidden_channels,
|
829 |
+
filter_channels,
|
830 |
+
n_heads,
|
831 |
+
n_layers,
|
832 |
+
kernel_size,
|
833 |
+
p_dropout,
|
834 |
+
resblock,
|
835 |
+
resblock_kernel_sizes,
|
836 |
+
resblock_dilation_sizes,
|
837 |
+
upsample_rates,
|
838 |
+
upsample_initial_channel,
|
839 |
+
upsample_kernel_sizes,
|
840 |
+
gen_istft_n_fft=16,
|
841 |
+
gen_istft_hop_size=4,
|
842 |
+
n_speakers=0,
|
843 |
+
gin_channels=0,
|
844 |
+
use_sdp=True,
|
845 |
+
ms_istft_vits=False,
|
846 |
+
mb_istft_vits=False,
|
847 |
+
subbands=False,
|
848 |
+
istft_vits=False,
|
849 |
+
**kwargs
|
850 |
+
):
|
851 |
+
|
852 |
+
super().__init__()
|
853 |
+
self.n_vocab = n_vocab
|
854 |
+
self.spec_channels = spec_channels
|
855 |
+
self.inter_channels = inter_channels
|
856 |
+
self.hidden_channels = hidden_channels
|
857 |
+
self.filter_channels = filter_channels
|
858 |
+
self.n_heads = n_heads
|
859 |
+
self.n_layers = n_layers
|
860 |
+
self.kernel_size = kernel_size
|
861 |
+
self.p_dropout = p_dropout
|
862 |
+
self.resblock = resblock
|
863 |
+
self.resblock_kernel_sizes = resblock_kernel_sizes
|
864 |
+
self.resblock_dilation_sizes = resblock_dilation_sizes
|
865 |
+
self.upsample_rates = upsample_rates
|
866 |
+
self.upsample_initial_channel = upsample_initial_channel
|
867 |
+
self.upsample_kernel_sizes = upsample_kernel_sizes
|
868 |
+
self.segment_size = segment_size
|
869 |
+
self.n_speakers = n_speakers
|
870 |
+
self.gin_channels = gin_channels
|
871 |
+
self.ms_istft_vits = ms_istft_vits
|
872 |
+
self.mb_istft_vits = mb_istft_vits
|
873 |
+
self.istft_vits = istft_vits
|
874 |
+
|
875 |
+
self.use_sdp = use_sdp
|
876 |
+
|
877 |
+
self.enc_p = TextEncoder(
|
878 |
+
n_vocab,
|
879 |
+
inter_channels,
|
880 |
+
hidden_channels,
|
881 |
+
filter_channels,
|
882 |
+
n_heads,
|
883 |
+
n_layers,
|
884 |
+
kernel_size,
|
885 |
+
p_dropout,
|
886 |
+
)
|
887 |
+
if mb_istft_vits == True:
|
888 |
+
self.dec = Multiband_iSTFT_Generator(
|
889 |
+
inter_channels,
|
890 |
+
resblock,
|
891 |
+
resblock_kernel_sizes,
|
892 |
+
resblock_dilation_sizes,
|
893 |
+
upsample_rates,
|
894 |
+
upsample_initial_channel,
|
895 |
+
upsample_kernel_sizes,
|
896 |
+
gen_istft_n_fft,
|
897 |
+
gen_istft_hop_size,
|
898 |
+
subbands,
|
899 |
+
gin_channels=gin_channels,
|
900 |
+
)
|
901 |
+
print("Use Mutli-band iSTFT VITS")
|
902 |
+
elif ms_istft_vits == True:
|
903 |
+
print("Use Mutli-stream iSTFT VITS")
|
904 |
+
self.dec = Multistream_iSTFT_Generator(
|
905 |
+
inter_channels,
|
906 |
+
resblock,
|
907 |
+
resblock_kernel_sizes,
|
908 |
+
resblock_dilation_sizes,
|
909 |
+
upsample_rates,
|
910 |
+
upsample_initial_channel,
|
911 |
+
upsample_kernel_sizes,
|
912 |
+
gen_istft_n_fft,
|
913 |
+
gen_istft_hop_size,
|
914 |
+
subbands,
|
915 |
+
gin_channels=gin_channels,
|
916 |
+
)
|
917 |
+
elif istft_vits == True:
|
918 |
+
print("Use iSTFT-VITS")
|
919 |
+
self.dec = iSTFT_Generator(
|
920 |
+
inter_channels,
|
921 |
+
resblock,
|
922 |
+
resblock_kernel_sizes,
|
923 |
+
resblock_dilation_sizes,
|
924 |
+
upsample_rates,
|
925 |
+
upsample_initial_channel,
|
926 |
+
upsample_kernel_sizes,
|
927 |
+
gen_istft_n_fft,
|
928 |
+
gen_istft_hop_size,
|
929 |
+
gin_channels=gin_channels,
|
930 |
+
)
|
931 |
+
else:
|
932 |
+
print("Using origin VITS")
|
933 |
+
self.dec = Generator(
|
934 |
+
inter_channels,
|
935 |
+
resblock,
|
936 |
+
resblock_kernel_sizes,
|
937 |
+
resblock_dilation_sizes,
|
938 |
+
upsample_rates,
|
939 |
+
upsample_initial_channel,
|
940 |
+
upsample_kernel_sizes,
|
941 |
+
gin_channels=gin_channels,
|
942 |
+
)
|
943 |
+
|
944 |
+
self.enc_q = PosteriorEncoder(
|
945 |
+
spec_channels,
|
946 |
+
inter_channels,
|
947 |
+
hidden_channels,
|
948 |
+
5,
|
949 |
+
1,
|
950 |
+
16,
|
951 |
+
gin_channels=gin_channels,
|
952 |
+
)
|
953 |
+
self.flow = ResidualCouplingBlock(
|
954 |
+
inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels
|
955 |
+
)
|
956 |
+
# 随机时长预测器
|
957 |
+
if use_sdp:
|
958 |
+
self.dp = StochasticDurationPredictor(
|
959 |
+
hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels
|
960 |
+
)
|
961 |
+
else:
|
962 |
+
self.dp = DurationPredictor(
|
963 |
+
hidden_channels, 256, 3, 0.5, gin_channels=gin_channels
|
964 |
+
)
|
965 |
+
if n_speakers > 0:
|
966 |
+
self.emb_g = nn.Embedding(n_speakers, gin_channels)
|
967 |
+
|
968 |
+
def forward(self, x, x_lengths, y, y_lengths, sid, tone, language, bert):
|
969 |
+
if self.n_speakers > 0:
|
970 |
+
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
|
971 |
+
else:
|
972 |
+
g = None
|
973 |
+
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert, g=g)
|
974 |
+
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
975 |
+
z_p = self.flow(z, y_mask, g=g)
|
976 |
+
with torch.no_grad():
|
977 |
+
# negative cross-entropy
|
978 |
+
s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
|
979 |
+
neg_cent1 = torch.sum(
|
980 |
+
-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True
|
981 |
+
) # [b, 1, t_s]
|
982 |
+
neg_cent2 = torch.matmul(
|
983 |
+
-0.5 * (z_p**2).transpose(1, 2), s_p_sq_r
|
984 |
+
) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
|
985 |
+
neg_cent3 = torch.matmul(
|
986 |
+
z_p.transpose(1, 2), (m_p * s_p_sq_r)
|
987 |
+
) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
|
988 |
+
neg_cent4 = torch.sum(
|
989 |
+
-0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True
|
990 |
+
) # [b, 1, t_s]
|
991 |
+
neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
|
992 |
+
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
|
993 |
+
attn = (
|
994 |
+
monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))
|
995 |
+
.unsqueeze(1)
|
996 |
+
.detach()
|
997 |
+
)
|
998 |
+
w = attn.sum(2)
|
999 |
+
if self.use_sdp:
|
1000 |
+
l_length = self.dp(x, x_mask, w, g=g)
|
1001 |
+
l_length = l_length / torch.sum(x_mask)
|
1002 |
+
else:
|
1003 |
+
logw_ = torch.log(w + 1e-6) * x_mask
|
1004 |
+
logw = self.dp(x, x_mask, g=g)
|
1005 |
+
l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(
|
1006 |
+
x_mask
|
1007 |
+
) # for averaging
|
1008 |
+
# expand prior
|
1009 |
+
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
|
1010 |
+
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
|
1011 |
+
z_slice, ids_slice = commons.rand_slice_segments(
|
1012 |
+
z, y_lengths, self.segment_size
|
1013 |
+
)
|
1014 |
+
# 修改了解码器
|
1015 |
+
o, o_mb = self.dec(z_slice, g=g)
|
1016 |
+
return (
|
1017 |
+
o,
|
1018 |
+
o_mb,
|
1019 |
+
l_length,
|
1020 |
+
attn,
|
1021 |
+
ids_slice,
|
1022 |
+
x_mask,
|
1023 |
+
y_mask,
|
1024 |
+
(z, z_p, m_p, logs_p, m_q, logs_q),
|
1025 |
+
)
|
1026 |
+
|
1027 |
+
def infer(
|
1028 |
+
self,
|
1029 |
+
x,
|
1030 |
+
x_lengths,
|
1031 |
+
sid,
|
1032 |
+
tone,
|
1033 |
+
language,
|
1034 |
+
bert,
|
1035 |
+
noise_scale=0.667,
|
1036 |
+
length_scale=1,
|
1037 |
+
noise_scale_w=1,
|
1038 |
+
max_len=None,
|
1039 |
+
):
|
1040 |
+
if self.n_speakers > 0:
|
1041 |
+
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
|
1042 |
+
else:
|
1043 |
+
g = None
|
1044 |
+
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert, g=g)
|
1045 |
+
|
1046 |
+
if self.use_sdp:
|
1047 |
+
logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
|
1048 |
+
else:
|
1049 |
+
logw = self.dp(x, x_mask, g=g)
|
1050 |
+
|
1051 |
+
w = torch.exp(logw) * x_mask * length_scale
|
1052 |
+
w_ceil = torch.ceil(w) # 向上取整
|
1053 |
+
y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
|
1054 |
+
y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(
|
1055 |
+
x_mask.dtype
|
1056 |
+
)
|
1057 |
+
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
|
1058 |
+
attn = commons.generate_path(w_ceil, attn_mask)
|
1059 |
+
|
1060 |
+
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(
|
1061 |
+
1, 2
|
1062 |
+
) # [b, t', t], [b, t, d] -> [b, d, t']
|
1063 |
+
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(
|
1064 |
+
1, 2
|
1065 |
+
) # [b, t', t], [b, t, d] -> [b, d, t']
|
1066 |
+
|
1067 |
+
z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
|
1068 |
+
z = self.flow(z_p, y_mask, g=g, reverse=True)
|
1069 |
+
o, o_mb = self.dec((z * y_mask)[:, :, :max_len], g=g)
|
1070 |
+
return o, o_mb, attn, y_mask, (z, z_p, m_p, logs_p)
|
1071 |
+
|
1072 |
+
|
1073 |
+
class Generator(torch.nn.Module):
|
1074 |
+
def __init__(
|
1075 |
+
self,
|
1076 |
+
initial_channel,
|
1077 |
+
resblock,
|
1078 |
+
resblock_kernel_sizes,
|
1079 |
+
resblock_dilation_sizes,
|
1080 |
+
upsample_rates,
|
1081 |
+
upsample_initial_channel,
|
1082 |
+
upsample_kernel_sizes,
|
1083 |
+
gin_channels=0,
|
1084 |
+
):
|
1085 |
+
super(Generator, self).__init__()
|
1086 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
1087 |
+
self.num_upsamples = len(upsample_rates)
|
1088 |
+
self.conv_pre = Conv1d(
|
1089 |
+
initial_channel, upsample_initial_channel, 7, 1, padding=3
|
1090 |
+
)
|
1091 |
+
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
|
1092 |
+
|
1093 |
+
self.ups = nn.ModuleList()
|
1094 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
1095 |
+
self.ups.append(
|
1096 |
+
weight_norm(
|
1097 |
+
ConvTranspose1d(
|
1098 |
+
upsample_initial_channel // (2**i),
|
1099 |
+
upsample_initial_channel // (2 ** (i + 1)),
|
1100 |
+
k,
|
1101 |
+
u,
|
1102 |
+
padding=(k - u) // 2,
|
1103 |
+
)
|
1104 |
+
)
|
1105 |
+
)
|
1106 |
+
|
1107 |
+
self.resblocks = nn.ModuleList()
|
1108 |
+
for i in range(len(self.ups)):
|
1109 |
+
ch = upsample_initial_channel // (2 ** (i + 1))
|
1110 |
+
for j, (k, d) in enumerate(
|
1111 |
+
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
1112 |
+
):
|
1113 |
+
self.resblocks.append(resblock(ch, k, d))
|
1114 |
+
|
1115 |
+
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
1116 |
+
self.ups.apply(init_weights)
|
1117 |
+
|
1118 |
+
if gin_channels != 0:
|
1119 |
+
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
1120 |
+
|
1121 |
+
def forward(self, x, g=None):
|
1122 |
+
x = self.conv_pre(x)
|
1123 |
+
if g is not None:
|
1124 |
+
x = x + self.cond(g)
|
1125 |
+
|
1126 |
+
for i in range(self.num_upsamples):
|
1127 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
1128 |
+
x = self.ups[i](x)
|
1129 |
+
xs = None
|
1130 |
+
for j in range(self.num_kernels):
|
1131 |
+
if xs is None:
|
1132 |
+
xs = self.resblocks[i * self.num_kernels + j](x)
|
1133 |
+
else:
|
1134 |
+
xs += self.resblocks[i * self.num_kernels + j](x)
|
1135 |
+
x = xs / self.num_kernels
|
1136 |
+
x = F.leaky_relu(x)
|
1137 |
+
x = self.conv_post(x)
|
1138 |
+
x = torch.tanh(x)
|
1139 |
+
|
1140 |
+
return x
|
1141 |
+
|
1142 |
+
def remove_weight_norm(self):
|
1143 |
+
print("Removing weight norm...")
|
1144 |
+
for layer in self.ups:
|
1145 |
+
remove_weight_norm(layer)
|
1146 |
+
for layer in self.resblocks:
|
1147 |
+
layer.remove_weight_norm()
|
modules.py
ADDED
@@ -0,0 +1,390 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import math
|
3 |
+
import numpy as np
|
4 |
+
import scipy
|
5 |
+
import torch
|
6 |
+
from torch import nn
|
7 |
+
from torch.nn import functional as F
|
8 |
+
|
9 |
+
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
10 |
+
from torch.nn.utils import weight_norm, remove_weight_norm
|
11 |
+
|
12 |
+
import commons
|
13 |
+
from commons import init_weights, get_padding
|
14 |
+
from transforms import piecewise_rational_quadratic_transform
|
15 |
+
|
16 |
+
|
17 |
+
LRELU_SLOPE = 0.1
|
18 |
+
|
19 |
+
|
20 |
+
class LayerNorm(nn.Module):
|
21 |
+
def __init__(self, channels, eps=1e-5):
|
22 |
+
super().__init__()
|
23 |
+
self.channels = channels
|
24 |
+
self.eps = eps
|
25 |
+
|
26 |
+
self.gamma = nn.Parameter(torch.ones(channels))
|
27 |
+
self.beta = nn.Parameter(torch.zeros(channels))
|
28 |
+
|
29 |
+
def forward(self, x):
|
30 |
+
x = x.transpose(1, -1)
|
31 |
+
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
|
32 |
+
return x.transpose(1, -1)
|
33 |
+
|
34 |
+
|
35 |
+
class ConvReluNorm(nn.Module):
|
36 |
+
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
|
37 |
+
super().__init__()
|
38 |
+
self.in_channels = in_channels
|
39 |
+
self.hidden_channels = hidden_channels
|
40 |
+
self.out_channels = out_channels
|
41 |
+
self.kernel_size = kernel_size
|
42 |
+
self.n_layers = n_layers
|
43 |
+
self.p_dropout = p_dropout
|
44 |
+
assert n_layers > 1, "Number of layers should be larger than 0."
|
45 |
+
|
46 |
+
self.conv_layers = nn.ModuleList()
|
47 |
+
self.norm_layers = nn.ModuleList()
|
48 |
+
self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
|
49 |
+
self.norm_layers.append(LayerNorm(hidden_channels))
|
50 |
+
self.relu_drop = nn.Sequential(
|
51 |
+
nn.ReLU(),
|
52 |
+
nn.Dropout(p_dropout))
|
53 |
+
for _ in range(n_layers-1):
|
54 |
+
self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
|
55 |
+
self.norm_layers.append(LayerNorm(hidden_channels))
|
56 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
|
57 |
+
self.proj.weight.data.zero_()
|
58 |
+
self.proj.bias.data.zero_()
|
59 |
+
|
60 |
+
def forward(self, x, x_mask):
|
61 |
+
x_org = x
|
62 |
+
for i in range(self.n_layers):
|
63 |
+
x = self.conv_layers[i](x * x_mask)
|
64 |
+
x = self.norm_layers[i](x)
|
65 |
+
x = self.relu_drop(x)
|
66 |
+
x = x_org + self.proj(x)
|
67 |
+
return x * x_mask
|
68 |
+
|
69 |
+
|
70 |
+
class DDSConv(nn.Module):
|
71 |
+
"""
|
72 |
+
Dialted and Depth-Separable Convolution
|
73 |
+
"""
|
74 |
+
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
|
75 |
+
super().__init__()
|
76 |
+
self.channels = channels
|
77 |
+
self.kernel_size = kernel_size
|
78 |
+
self.n_layers = n_layers
|
79 |
+
self.p_dropout = p_dropout
|
80 |
+
|
81 |
+
self.drop = nn.Dropout(p_dropout)
|
82 |
+
self.convs_sep = nn.ModuleList()
|
83 |
+
self.convs_1x1 = nn.ModuleList()
|
84 |
+
self.norms_1 = nn.ModuleList()
|
85 |
+
self.norms_2 = nn.ModuleList()
|
86 |
+
for i in range(n_layers):
|
87 |
+
dilation = kernel_size ** i
|
88 |
+
padding = (kernel_size * dilation - dilation) // 2
|
89 |
+
self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
|
90 |
+
groups=channels, dilation=dilation, padding=padding
|
91 |
+
))
|
92 |
+
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
|
93 |
+
self.norms_1.append(LayerNorm(channels))
|
94 |
+
self.norms_2.append(LayerNorm(channels))
|
95 |
+
|
96 |
+
def forward(self, x, x_mask, g=None):
|
97 |
+
if g is not None:
|
98 |
+
x = x + g
|
99 |
+
for i in range(self.n_layers):
|
100 |
+
y = self.convs_sep[i](x * x_mask)
|
101 |
+
y = self.norms_1[i](y)
|
102 |
+
y = F.gelu(y)
|
103 |
+
y = self.convs_1x1[i](y)
|
104 |
+
y = self.norms_2[i](y)
|
105 |
+
y = F.gelu(y)
|
106 |
+
y = self.drop(y)
|
107 |
+
x = x + y
|
108 |
+
return x * x_mask
|
109 |
+
|
110 |
+
|
111 |
+
class WN(torch.nn.Module):
|
112 |
+
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
|
113 |
+
super(WN, self).__init__()
|
114 |
+
assert(kernel_size % 2 == 1)
|
115 |
+
self.hidden_channels =hidden_channels
|
116 |
+
self.kernel_size = kernel_size,
|
117 |
+
self.dilation_rate = dilation_rate
|
118 |
+
self.n_layers = n_layers
|
119 |
+
self.gin_channels = gin_channels
|
120 |
+
self.p_dropout = p_dropout
|
121 |
+
|
122 |
+
self.in_layers = torch.nn.ModuleList()
|
123 |
+
self.res_skip_layers = torch.nn.ModuleList()
|
124 |
+
self.drop = nn.Dropout(p_dropout)
|
125 |
+
|
126 |
+
if gin_channels != 0:
|
127 |
+
cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
|
128 |
+
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
|
129 |
+
|
130 |
+
for i in range(n_layers):
|
131 |
+
dilation = dilation_rate ** i
|
132 |
+
padding = int((kernel_size * dilation - dilation) / 2)
|
133 |
+
in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
|
134 |
+
dilation=dilation, padding=padding)
|
135 |
+
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
|
136 |
+
self.in_layers.append(in_layer)
|
137 |
+
|
138 |
+
# last one is not necessary
|
139 |
+
if i < n_layers - 1:
|
140 |
+
res_skip_channels = 2 * hidden_channels
|
141 |
+
else:
|
142 |
+
res_skip_channels = hidden_channels
|
143 |
+
|
144 |
+
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
|
145 |
+
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
|
146 |
+
self.res_skip_layers.append(res_skip_layer)
|
147 |
+
|
148 |
+
def forward(self, x, x_mask, g=None, **kwargs):
|
149 |
+
output = torch.zeros_like(x)
|
150 |
+
n_channels_tensor = torch.IntTensor([self.hidden_channels])
|
151 |
+
|
152 |
+
if g is not None:
|
153 |
+
g = self.cond_layer(g)
|
154 |
+
|
155 |
+
for i in range(self.n_layers):
|
156 |
+
x_in = self.in_layers[i](x)
|
157 |
+
if g is not None:
|
158 |
+
cond_offset = i * 2 * self.hidden_channels
|
159 |
+
g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
|
160 |
+
else:
|
161 |
+
g_l = torch.zeros_like(x_in)
|
162 |
+
|
163 |
+
acts = commons.fused_add_tanh_sigmoid_multiply(
|
164 |
+
x_in,
|
165 |
+
g_l,
|
166 |
+
n_channels_tensor)
|
167 |
+
acts = self.drop(acts)
|
168 |
+
|
169 |
+
res_skip_acts = self.res_skip_layers[i](acts)
|
170 |
+
if i < self.n_layers - 1:
|
171 |
+
res_acts = res_skip_acts[:,:self.hidden_channels,:]
|
172 |
+
x = (x + res_acts) * x_mask
|
173 |
+
output = output + res_skip_acts[:,self.hidden_channels:,:]
|
174 |
+
else:
|
175 |
+
output = output + res_skip_acts
|
176 |
+
return output * x_mask
|
177 |
+
|
178 |
+
def remove_weight_norm(self):
|
179 |
+
if self.gin_channels != 0:
|
180 |
+
torch.nn.utils.remove_weight_norm(self.cond_layer)
|
181 |
+
for l in self.in_layers:
|
182 |
+
torch.nn.utils.remove_weight_norm(l)
|
183 |
+
for l in self.res_skip_layers:
|
184 |
+
torch.nn.utils.remove_weight_norm(l)
|
185 |
+
|
186 |
+
|
187 |
+
class ResBlock1(torch.nn.Module):
|
188 |
+
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
|
189 |
+
super(ResBlock1, self).__init__()
|
190 |
+
self.convs1 = nn.ModuleList([
|
191 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
192 |
+
padding=get_padding(kernel_size, dilation[0]))),
|
193 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
194 |
+
padding=get_padding(kernel_size, dilation[1]))),
|
195 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
|
196 |
+
padding=get_padding(kernel_size, dilation[2])))
|
197 |
+
])
|
198 |
+
self.convs1.apply(init_weights)
|
199 |
+
|
200 |
+
self.convs2 = nn.ModuleList([
|
201 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
202 |
+
padding=get_padding(kernel_size, 1))),
|
203 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
204 |
+
padding=get_padding(kernel_size, 1))),
|
205 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
206 |
+
padding=get_padding(kernel_size, 1)))
|
207 |
+
])
|
208 |
+
self.convs2.apply(init_weights)
|
209 |
+
|
210 |
+
def forward(self, x, x_mask=None):
|
211 |
+
for c1, c2 in zip(self.convs1, self.convs2):
|
212 |
+
xt = F.leaky_relu(x, LRELU_SLOPE)
|
213 |
+
if x_mask is not None:
|
214 |
+
xt = xt * x_mask
|
215 |
+
xt = c1(xt)
|
216 |
+
xt = F.leaky_relu(xt, LRELU_SLOPE)
|
217 |
+
if x_mask is not None:
|
218 |
+
xt = xt * x_mask
|
219 |
+
xt = c2(xt)
|
220 |
+
x = xt + x
|
221 |
+
if x_mask is not None:
|
222 |
+
x = x * x_mask
|
223 |
+
return x
|
224 |
+
|
225 |
+
def remove_weight_norm(self):
|
226 |
+
for l in self.convs1:
|
227 |
+
remove_weight_norm(l)
|
228 |
+
for l in self.convs2:
|
229 |
+
remove_weight_norm(l)
|
230 |
+
|
231 |
+
|
232 |
+
class ResBlock2(torch.nn.Module):
|
233 |
+
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
|
234 |
+
super(ResBlock2, self).__init__()
|
235 |
+
self.convs = nn.ModuleList([
|
236 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
237 |
+
padding=get_padding(kernel_size, dilation[0]))),
|
238 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
239 |
+
padding=get_padding(kernel_size, dilation[1])))
|
240 |
+
])
|
241 |
+
self.convs.apply(init_weights)
|
242 |
+
|
243 |
+
def forward(self, x, x_mask=None):
|
244 |
+
for c in self.convs:
|
245 |
+
xt = F.leaky_relu(x, LRELU_SLOPE)
|
246 |
+
if x_mask is not None:
|
247 |
+
xt = xt * x_mask
|
248 |
+
xt = c(xt)
|
249 |
+
x = xt + x
|
250 |
+
if x_mask is not None:
|
251 |
+
x = x * x_mask
|
252 |
+
return x
|
253 |
+
|
254 |
+
def remove_weight_norm(self):
|
255 |
+
for l in self.convs:
|
256 |
+
remove_weight_norm(l)
|
257 |
+
|
258 |
+
|
259 |
+
class Log(nn.Module):
|
260 |
+
def forward(self, x, x_mask, reverse=False, **kwargs):
|
261 |
+
if not reverse:
|
262 |
+
y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
|
263 |
+
logdet = torch.sum(-y, [1, 2])
|
264 |
+
return y, logdet
|
265 |
+
else:
|
266 |
+
x = torch.exp(x) * x_mask
|
267 |
+
return x
|
268 |
+
|
269 |
+
|
270 |
+
class Flip(nn.Module):
|
271 |
+
def forward(self, x, *args, reverse=False, **kwargs):
|
272 |
+
x = torch.flip(x, [1])
|
273 |
+
if not reverse:
|
274 |
+
logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
|
275 |
+
return x, logdet
|
276 |
+
else:
|
277 |
+
return x
|
278 |
+
|
279 |
+
|
280 |
+
class ElementwiseAffine(nn.Module):
|
281 |
+
def __init__(self, channels):
|
282 |
+
super().__init__()
|
283 |
+
self.channels = channels
|
284 |
+
self.m = nn.Parameter(torch.zeros(channels,1))
|
285 |
+
self.logs = nn.Parameter(torch.zeros(channels,1))
|
286 |
+
|
287 |
+
def forward(self, x, x_mask, reverse=False, **kwargs):
|
288 |
+
if not reverse:
|
289 |
+
y = self.m + torch.exp(self.logs) * x
|
290 |
+
y = y * x_mask
|
291 |
+
logdet = torch.sum(self.logs * x_mask, [1,2])
|
292 |
+
return y, logdet
|
293 |
+
else:
|
294 |
+
x = (x - self.m) * torch.exp(-self.logs) * x_mask
|
295 |
+
return x
|
296 |
+
|
297 |
+
|
298 |
+
class ResidualCouplingLayer(nn.Module):
|
299 |
+
def __init__(self,
|
300 |
+
channels,
|
301 |
+
hidden_channels,
|
302 |
+
kernel_size,
|
303 |
+
dilation_rate,
|
304 |
+
n_layers,
|
305 |
+
p_dropout=0,
|
306 |
+
gin_channels=0,
|
307 |
+
mean_only=False):
|
308 |
+
assert channels % 2 == 0, "channels should be divisible by 2"
|
309 |
+
super().__init__()
|
310 |
+
self.channels = channels
|
311 |
+
self.hidden_channels = hidden_channels
|
312 |
+
self.kernel_size = kernel_size
|
313 |
+
self.dilation_rate = dilation_rate
|
314 |
+
self.n_layers = n_layers
|
315 |
+
self.half_channels = channels // 2
|
316 |
+
self.mean_only = mean_only
|
317 |
+
|
318 |
+
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
|
319 |
+
self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
|
320 |
+
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
|
321 |
+
self.post.weight.data.zero_()
|
322 |
+
self.post.bias.data.zero_()
|
323 |
+
|
324 |
+
def forward(self, x, x_mask, g=None, reverse=False):
|
325 |
+
x0, x1 = torch.split(x, [self.half_channels]*2, 1)
|
326 |
+
h = self.pre(x0) * x_mask
|
327 |
+
h = self.enc(h, x_mask, g=g)
|
328 |
+
stats = self.post(h) * x_mask
|
329 |
+
if not self.mean_only:
|
330 |
+
m, logs = torch.split(stats, [self.half_channels]*2, 1)
|
331 |
+
else:
|
332 |
+
m = stats
|
333 |
+
logs = torch.zeros_like(m)
|
334 |
+
|
335 |
+
if not reverse:
|
336 |
+
x1 = m + x1 * torch.exp(logs) * x_mask
|
337 |
+
x = torch.cat([x0, x1], 1)
|
338 |
+
logdet = torch.sum(logs, [1,2])
|
339 |
+
return x, logdet
|
340 |
+
else:
|
341 |
+
x1 = (x1 - m) * torch.exp(-logs) * x_mask
|
342 |
+
x = torch.cat([x0, x1], 1)
|
343 |
+
return x
|
344 |
+
|
345 |
+
|
346 |
+
class ConvFlow(nn.Module):
|
347 |
+
def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
|
348 |
+
super().__init__()
|
349 |
+
self.in_channels = in_channels
|
350 |
+
self.filter_channels = filter_channels
|
351 |
+
self.kernel_size = kernel_size
|
352 |
+
self.n_layers = n_layers
|
353 |
+
self.num_bins = num_bins
|
354 |
+
self.tail_bound = tail_bound
|
355 |
+
self.half_channels = in_channels // 2
|
356 |
+
|
357 |
+
self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
|
358 |
+
self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
|
359 |
+
self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
|
360 |
+
self.proj.weight.data.zero_()
|
361 |
+
self.proj.bias.data.zero_()
|
362 |
+
|
363 |
+
def forward(self, x, x_mask, g=None, reverse=False):
|
364 |
+
x0, x1 = torch.split(x, [self.half_channels]*2, 1)
|
365 |
+
h = self.pre(x0)
|
366 |
+
h = self.convs(h, x_mask, g=g)
|
367 |
+
h = self.proj(h) * x_mask
|
368 |
+
|
369 |
+
b, c, t = x0.shape
|
370 |
+
h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
|
371 |
+
|
372 |
+
unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
|
373 |
+
unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
|
374 |
+
unnormalized_derivatives = h[..., 2 * self.num_bins:]
|
375 |
+
|
376 |
+
x1, logabsdet = piecewise_rational_quadratic_transform(x1,
|
377 |
+
unnormalized_widths,
|
378 |
+
unnormalized_heights,
|
379 |
+
unnormalized_derivatives,
|
380 |
+
inverse=reverse,
|
381 |
+
tails='linear',
|
382 |
+
tail_bound=self.tail_bound
|
383 |
+
)
|
384 |
+
|
385 |
+
x = torch.cat([x0, x1], 1) * x_mask
|
386 |
+
logdet = torch.sum(logabsdet * x_mask, [1,2])
|
387 |
+
if not reverse:
|
388 |
+
return x, logdet
|
389 |
+
else:
|
390 |
+
return x
|
pqmf.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Copyright 2020 Tomoki Hayashi
|
4 |
+
# MIT License (https://opensource.org/licenses/MIT)
|
5 |
+
|
6 |
+
"""Pseudo QMF modules."""
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
import torch
|
10 |
+
import torch.nn.functional as F
|
11 |
+
|
12 |
+
from scipy.signal import kaiser
|
13 |
+
|
14 |
+
|
15 |
+
def design_prototype_filter(taps=62, cutoff_ratio=0.15, beta=9.0):
|
16 |
+
"""Design prototype filter for PQMF.
|
17 |
+
This method is based on `A Kaiser window approach for the design of prototype
|
18 |
+
filters of cosine modulated filterbanks`_.
|
19 |
+
Args:
|
20 |
+
taps (int): The number of filter taps.
|
21 |
+
cutoff_ratio (float): Cut-off frequency ratio.
|
22 |
+
beta (float): Beta coefficient for kaiser window.
|
23 |
+
Returns:
|
24 |
+
ndarray: Impluse response of prototype filter (taps + 1,).
|
25 |
+
.. _`A Kaiser window approach for the design of prototype filters of cosine modulated filterbanks`:
|
26 |
+
https://ieeexplore.ieee.org/abstract/document/681427
|
27 |
+
"""
|
28 |
+
# check the arguments are valid
|
29 |
+
assert taps % 2 == 0, "The number of taps mush be even number."
|
30 |
+
assert 0.0 < cutoff_ratio < 1.0, "Cutoff ratio must be > 0.0 and < 1.0."
|
31 |
+
|
32 |
+
# make initial filter
|
33 |
+
omega_c = np.pi * cutoff_ratio
|
34 |
+
with np.errstate(invalid='ignore'):
|
35 |
+
h_i = np.sin(omega_c * (np.arange(taps + 1) - 0.5 * taps)) \
|
36 |
+
/ (np.pi * (np.arange(taps + 1) - 0.5 * taps))
|
37 |
+
h_i[taps // 2] = np.cos(0) * cutoff_ratio # fix nan due to indeterminate form
|
38 |
+
|
39 |
+
# apply kaiser window
|
40 |
+
w = kaiser(taps + 1, beta)
|
41 |
+
h = h_i * w
|
42 |
+
|
43 |
+
return h
|
44 |
+
|
45 |
+
|
46 |
+
class PQMF(torch.nn.Module):
|
47 |
+
"""PQMF module.
|
48 |
+
This module is based on `Near-perfect-reconstruction pseudo-QMF banks`_.
|
49 |
+
.. _`Near-perfect-reconstruction pseudo-QMF banks`:
|
50 |
+
https://ieeexplore.ieee.org/document/258122
|
51 |
+
"""
|
52 |
+
|
53 |
+
def __init__(self, device, subbands=4, taps=62, cutoff_ratio=0.15, beta=9.0):
|
54 |
+
"""Initilize PQMF module.
|
55 |
+
Args:
|
56 |
+
subbands (int): The number of subbands.
|
57 |
+
taps (int): The number of filter taps.
|
58 |
+
cutoff_ratio (float): Cut-off frequency ratio.
|
59 |
+
beta (float): Beta coefficient for kaiser window.
|
60 |
+
"""
|
61 |
+
super(PQMF, self).__init__()
|
62 |
+
|
63 |
+
# define filter coefficient
|
64 |
+
h_proto = design_prototype_filter(taps, cutoff_ratio, beta)
|
65 |
+
h_analysis = np.zeros((subbands, len(h_proto)))
|
66 |
+
h_synthesis = np.zeros((subbands, len(h_proto)))
|
67 |
+
for k in range(subbands):
|
68 |
+
h_analysis[k] = 2 * h_proto * np.cos(
|
69 |
+
(2 * k + 1) * (np.pi / (2 * subbands)) *
|
70 |
+
(np.arange(taps + 1) - ((taps - 1) / 2)) +
|
71 |
+
(-1) ** k * np.pi / 4)
|
72 |
+
h_synthesis[k] = 2 * h_proto * np.cos(
|
73 |
+
(2 * k + 1) * (np.pi / (2 * subbands)) *
|
74 |
+
(np.arange(taps + 1) - ((taps - 1) / 2)) -
|
75 |
+
(-1) ** k * np.pi / 4)
|
76 |
+
|
77 |
+
# convert to tensor
|
78 |
+
analysis_filter = torch.from_numpy(h_analysis).float().unsqueeze(1).cuda(device)
|
79 |
+
synthesis_filter = torch.from_numpy(h_synthesis).float().unsqueeze(0).cuda(device)
|
80 |
+
|
81 |
+
# register coefficients as beffer
|
82 |
+
self.register_buffer("analysis_filter", analysis_filter)
|
83 |
+
self.register_buffer("synthesis_filter", synthesis_filter)
|
84 |
+
|
85 |
+
# filter for downsampling & upsampling
|
86 |
+
updown_filter = torch.zeros((subbands, subbands, subbands)).float().cuda(device)
|
87 |
+
for k in range(subbands):
|
88 |
+
updown_filter[k, k, 0] = 1.0
|
89 |
+
self.register_buffer("updown_filter", updown_filter)
|
90 |
+
self.subbands = subbands
|
91 |
+
|
92 |
+
# keep padding info
|
93 |
+
self.pad_fn = torch.nn.ConstantPad1d(taps // 2, 0.0)
|
94 |
+
|
95 |
+
def analysis(self, x):
|
96 |
+
"""Analysis with PQMF.
|
97 |
+
Args:
|
98 |
+
x (Tensor): Input tensor (B, 1, T).
|
99 |
+
Returns:
|
100 |
+
Tensor: Output tensor (B, subbands, T // subbands).
|
101 |
+
"""
|
102 |
+
x = F.conv1d(self.pad_fn(x), self.analysis_filter)
|
103 |
+
return F.conv1d(x, self.updown_filter, stride=self.subbands)
|
104 |
+
|
105 |
+
def synthesis(self, x):
|
106 |
+
"""Synthesis with PQMF.
|
107 |
+
Args:
|
108 |
+
x (Tensor): Input tensor (B, subbands, T // subbands).
|
109 |
+
Returns:
|
110 |
+
Tensor: Output tensor (B, 1, T).
|
111 |
+
"""
|
112 |
+
# NOTE(kan-bayashi): Power will be dreased so here multipy by # subbands.
|
113 |
+
# Not sure this is the correct way, it is better to check again.
|
114 |
+
# TODO(kan-bayashi): Understand the reconstruction procedure
|
115 |
+
x = F.conv_transpose1d(x, self.updown_filter * self.subbands, stride=self.subbands)
|
116 |
+
return F.conv1d(self.pad_fn(x), self.synthesis_filter)
|
transforms.py
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch.nn import functional as F
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
|
7 |
+
DEFAULT_MIN_BIN_WIDTH = 1e-3
|
8 |
+
DEFAULT_MIN_BIN_HEIGHT = 1e-3
|
9 |
+
DEFAULT_MIN_DERIVATIVE = 1e-3
|
10 |
+
|
11 |
+
|
12 |
+
def piecewise_rational_quadratic_transform(inputs,
|
13 |
+
unnormalized_widths,
|
14 |
+
unnormalized_heights,
|
15 |
+
unnormalized_derivatives,
|
16 |
+
inverse=False,
|
17 |
+
tails=None,
|
18 |
+
tail_bound=1.,
|
19 |
+
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
20 |
+
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
21 |
+
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
22 |
+
|
23 |
+
if tails is None:
|
24 |
+
spline_fn = rational_quadratic_spline
|
25 |
+
spline_kwargs = {}
|
26 |
+
else:
|
27 |
+
spline_fn = unconstrained_rational_quadratic_spline
|
28 |
+
spline_kwargs = {
|
29 |
+
'tails': tails,
|
30 |
+
'tail_bound': tail_bound
|
31 |
+
}
|
32 |
+
|
33 |
+
outputs, logabsdet = spline_fn(
|
34 |
+
inputs=inputs,
|
35 |
+
unnormalized_widths=unnormalized_widths,
|
36 |
+
unnormalized_heights=unnormalized_heights,
|
37 |
+
unnormalized_derivatives=unnormalized_derivatives,
|
38 |
+
inverse=inverse,
|
39 |
+
min_bin_width=min_bin_width,
|
40 |
+
min_bin_height=min_bin_height,
|
41 |
+
min_derivative=min_derivative,
|
42 |
+
**spline_kwargs
|
43 |
+
)
|
44 |
+
return outputs, logabsdet
|
45 |
+
|
46 |
+
|
47 |
+
def searchsorted(bin_locations, inputs, eps=1e-6):
|
48 |
+
bin_locations[..., -1] += eps
|
49 |
+
return torch.sum(
|
50 |
+
inputs[..., None] >= bin_locations,
|
51 |
+
dim=-1
|
52 |
+
) - 1
|
53 |
+
|
54 |
+
|
55 |
+
def unconstrained_rational_quadratic_spline(inputs,
|
56 |
+
unnormalized_widths,
|
57 |
+
unnormalized_heights,
|
58 |
+
unnormalized_derivatives,
|
59 |
+
inverse=False,
|
60 |
+
tails='linear',
|
61 |
+
tail_bound=1.,
|
62 |
+
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
63 |
+
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
64 |
+
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
65 |
+
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
|
66 |
+
outside_interval_mask = ~inside_interval_mask
|
67 |
+
|
68 |
+
outputs = torch.zeros_like(inputs)
|
69 |
+
logabsdet = torch.zeros_like(inputs)
|
70 |
+
|
71 |
+
if tails == 'linear':
|
72 |
+
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
|
73 |
+
constant = np.log(np.exp(1 - min_derivative) - 1)
|
74 |
+
unnormalized_derivatives[..., 0] = constant
|
75 |
+
unnormalized_derivatives[..., -1] = constant
|
76 |
+
|
77 |
+
outputs[outside_interval_mask] = inputs[outside_interval_mask]
|
78 |
+
logabsdet[outside_interval_mask] = 0
|
79 |
+
else:
|
80 |
+
raise RuntimeError('{} tails are not implemented.'.format(tails))
|
81 |
+
|
82 |
+
outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
|
83 |
+
inputs=inputs[inside_interval_mask],
|
84 |
+
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
|
85 |
+
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
|
86 |
+
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
|
87 |
+
inverse=inverse,
|
88 |
+
left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
|
89 |
+
min_bin_width=min_bin_width,
|
90 |
+
min_bin_height=min_bin_height,
|
91 |
+
min_derivative=min_derivative
|
92 |
+
)
|
93 |
+
|
94 |
+
return outputs, logabsdet
|
95 |
+
|
96 |
+
def rational_quadratic_spline(inputs,
|
97 |
+
unnormalized_widths,
|
98 |
+
unnormalized_heights,
|
99 |
+
unnormalized_derivatives,
|
100 |
+
inverse=False,
|
101 |
+
left=0., right=1., bottom=0., top=1.,
|
102 |
+
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
103 |
+
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
104 |
+
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
105 |
+
if torch.min(inputs) < left or torch.max(inputs) > right:
|
106 |
+
raise ValueError('Input to a transform is not within its domain')
|
107 |
+
|
108 |
+
num_bins = unnormalized_widths.shape[-1]
|
109 |
+
|
110 |
+
if min_bin_width * num_bins > 1.0:
|
111 |
+
raise ValueError('Minimal bin width too large for the number of bins')
|
112 |
+
if min_bin_height * num_bins > 1.0:
|
113 |
+
raise ValueError('Minimal bin height too large for the number of bins')
|
114 |
+
|
115 |
+
widths = F.softmax(unnormalized_widths, dim=-1)
|
116 |
+
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
|
117 |
+
cumwidths = torch.cumsum(widths, dim=-1)
|
118 |
+
cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
|
119 |
+
cumwidths = (right - left) * cumwidths + left
|
120 |
+
cumwidths[..., 0] = left
|
121 |
+
cumwidths[..., -1] = right
|
122 |
+
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
|
123 |
+
|
124 |
+
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
|
125 |
+
|
126 |
+
heights = F.softmax(unnormalized_heights, dim=-1)
|
127 |
+
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
|
128 |
+
cumheights = torch.cumsum(heights, dim=-1)
|
129 |
+
cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
|
130 |
+
cumheights = (top - bottom) * cumheights + bottom
|
131 |
+
cumheights[..., 0] = bottom
|
132 |
+
cumheights[..., -1] = top
|
133 |
+
heights = cumheights[..., 1:] - cumheights[..., :-1]
|
134 |
+
|
135 |
+
if inverse:
|
136 |
+
bin_idx = searchsorted(cumheights, inputs)[..., None]
|
137 |
+
else:
|
138 |
+
bin_idx = searchsorted(cumwidths, inputs)[..., None]
|
139 |
+
|
140 |
+
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
|
141 |
+
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
|
142 |
+
|
143 |
+
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
|
144 |
+
delta = heights / widths
|
145 |
+
input_delta = delta.gather(-1, bin_idx)[..., 0]
|
146 |
+
|
147 |
+
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
|
148 |
+
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
|
149 |
+
|
150 |
+
input_heights = heights.gather(-1, bin_idx)[..., 0]
|
151 |
+
|
152 |
+
if inverse:
|
153 |
+
a = (((inputs - input_cumheights) * (input_derivatives
|
154 |
+
+ input_derivatives_plus_one
|
155 |
+
- 2 * input_delta)
|
156 |
+
+ input_heights * (input_delta - input_derivatives)))
|
157 |
+
b = (input_heights * input_derivatives
|
158 |
+
- (inputs - input_cumheights) * (input_derivatives
|
159 |
+
+ input_derivatives_plus_one
|
160 |
+
- 2 * input_delta))
|
161 |
+
c = - input_delta * (inputs - input_cumheights)
|
162 |
+
|
163 |
+
discriminant = b.pow(2) - 4 * a * c
|
164 |
+
assert (discriminant >= 0).all()
|
165 |
+
|
166 |
+
root = (2 * c) / (-b - torch.sqrt(discriminant))
|
167 |
+
outputs = root * input_bin_widths + input_cumwidths
|
168 |
+
|
169 |
+
theta_one_minus_theta = root * (1 - root)
|
170 |
+
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
171 |
+
* theta_one_minus_theta)
|
172 |
+
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
|
173 |
+
+ 2 * input_delta * theta_one_minus_theta
|
174 |
+
+ input_derivatives * (1 - root).pow(2))
|
175 |
+
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
176 |
+
|
177 |
+
return outputs, -logabsdet
|
178 |
+
else:
|
179 |
+
theta = (inputs - input_cumwidths) / input_bin_widths
|
180 |
+
theta_one_minus_theta = theta * (1 - theta)
|
181 |
+
|
182 |
+
numerator = input_heights * (input_delta * theta.pow(2)
|
183 |
+
+ input_derivatives * theta_one_minus_theta)
|
184 |
+
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
185 |
+
* theta_one_minus_theta)
|
186 |
+
outputs = input_cumheights + numerator / denominator
|
187 |
+
|
188 |
+
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
|
189 |
+
+ 2 * input_delta * theta_one_minus_theta
|
190 |
+
+ input_derivatives * (1 - theta).pow(2))
|
191 |
+
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
192 |
+
|
193 |
+
return outputs, logabsdet
|
utils.py
ADDED
@@ -0,0 +1,359 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import glob
|
3 |
+
import sys
|
4 |
+
import argparse
|
5 |
+
import logging
|
6 |
+
import json
|
7 |
+
import subprocess
|
8 |
+
import numpy as np
|
9 |
+
from scipy.io.wavfile import read
|
10 |
+
import torch
|
11 |
+
|
12 |
+
MATPLOTLIB_FLAG = False
|
13 |
+
|
14 |
+
logging.basicConfig(stream=sys.stdout, level=logging.WARNING)
|
15 |
+
logger = logging
|
16 |
+
|
17 |
+
|
18 |
+
def load_checkpoint(checkpoint_path, model, optimizer=None, load_pretrain=False):
|
19 |
+
print("checkpoint_path is ", checkpoint_path)
|
20 |
+
assert os.path.isfile(checkpoint_path)
|
21 |
+
checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
|
22 |
+
iteration = checkpoint_dict["iteration"]
|
23 |
+
learning_rate = checkpoint_dict["learning_rate"]
|
24 |
+
if optimizer is not None:
|
25 |
+
optimizer.load_state_dict(checkpoint_dict["optimizer"])
|
26 |
+
saved_state_dict = checkpoint_dict["model"]
|
27 |
+
if hasattr(model, "module"):
|
28 |
+
state_dict = model.module.state_dict()
|
29 |
+
else:
|
30 |
+
state_dict = model.state_dict()
|
31 |
+
new_state_dict = {}
|
32 |
+
for k, v in state_dict.items():
|
33 |
+
try:
|
34 |
+
if load_pretrain:
|
35 |
+
# 底模训练
|
36 |
+
# 说话人信息掩盖掉
|
37 |
+
if k == "emb_g.weight":
|
38 |
+
new_state_dict[k] = v
|
39 |
+
else:
|
40 |
+
new_state_dict[k] = saved_state_dict[k]
|
41 |
+
else:
|
42 |
+
new_state_dict[k] = saved_state_dict[k]
|
43 |
+
except:
|
44 |
+
logger.info("%s is not in the checkpoint" % k)
|
45 |
+
new_state_dict[k] = v
|
46 |
+
if hasattr(model, "module"):
|
47 |
+
model.module.load_state_dict(new_state_dict)
|
48 |
+
else:
|
49 |
+
model.load_state_dict(new_state_dict)
|
50 |
+
logger.info(
|
51 |
+
"Loaded checkpoint '{}' (iteration {})".format(checkpoint_path, iteration)
|
52 |
+
)
|
53 |
+
return model, optimizer, learning_rate, iteration
|
54 |
+
|
55 |
+
|
56 |
+
def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
|
57 |
+
logger.info(
|
58 |
+
"Saving model and optimizer state at iteration {} to {}".format(
|
59 |
+
iteration, checkpoint_path
|
60 |
+
)
|
61 |
+
)
|
62 |
+
|
63 |
+
if hasattr(model, "module"):
|
64 |
+
state_dict = model.module.state_dict()
|
65 |
+
else:
|
66 |
+
state_dict = model.state_dict()
|
67 |
+
torch.save(
|
68 |
+
{
|
69 |
+
"model": state_dict,
|
70 |
+
"iteration": iteration,
|
71 |
+
"optimizer": optimizer.state_dict(),
|
72 |
+
"learning_rate": learning_rate,
|
73 |
+
},
|
74 |
+
checkpoint_path,
|
75 |
+
)
|
76 |
+
|
77 |
+
|
78 |
+
def summarize(
|
79 |
+
writer,
|
80 |
+
global_step,
|
81 |
+
scalars={},
|
82 |
+
histograms={},
|
83 |
+
images={},
|
84 |
+
audios={},
|
85 |
+
audio_sampling_rate=22050,
|
86 |
+
):
|
87 |
+
for k, v in scalars.items():
|
88 |
+
writer.add_scalar(k, v, global_step)
|
89 |
+
for k, v in histograms.items():
|
90 |
+
writer.add_histogram(k, v, global_step)
|
91 |
+
for k, v in images.items():
|
92 |
+
writer.add_image(k, v, global_step, dataformats="HWC")
|
93 |
+
for k, v in audios.items():
|
94 |
+
writer.add_audio(k, v, global_step, audio_sampling_rate)
|
95 |
+
|
96 |
+
|
97 |
+
def latest_checkpoint_path(dir_path, regex="G_*.pth"):
|
98 |
+
f_list = glob.glob(os.path.join(dir_path, regex))
|
99 |
+
f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
|
100 |
+
x = f_list[-1]
|
101 |
+
print(x)
|
102 |
+
return x
|
103 |
+
|
104 |
+
|
105 |
+
def clean_checkpoints(path_to_models="logs/44k/", n_ckpts_to_keep=2, sort_by_time=True):
|
106 |
+
"""Freeing up space by deleting saved ckpts
|
107 |
+
|
108 |
+
Arguments:
|
109 |
+
path_to_models -- Path to the model directory
|
110 |
+
n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth
|
111 |
+
sort_by_time -- True -> chronologically delete ckpts
|
112 |
+
False -> lexicographically delete ckpts
|
113 |
+
"""
|
114 |
+
import re
|
115 |
+
|
116 |
+
ckpts_files = [
|
117 |
+
f
|
118 |
+
for f in os.listdir(path_to_models)
|
119 |
+
if os.path.isfile(os.path.join(path_to_models, f))
|
120 |
+
]
|
121 |
+
|
122 |
+
def name_key(_f):
|
123 |
+
return int(re.compile("._(\\d+)\\.pth").match(_f).group(1))
|
124 |
+
|
125 |
+
def time_key(_f):
|
126 |
+
return os.path.getmtime(os.path.join(path_to_models, _f))
|
127 |
+
|
128 |
+
sort_key = time_key if sort_by_time else name_key
|
129 |
+
|
130 |
+
def x_sorted(_x):
|
131 |
+
return sorted(
|
132 |
+
[f for f in ckpts_files if f.startswith(_x) and not f.endswith("_0.pth")],
|
133 |
+
key=sort_key,
|
134 |
+
)
|
135 |
+
|
136 |
+
to_del = [
|
137 |
+
os.path.join(path_to_models, fn)
|
138 |
+
for fn in (x_sorted("G")[:-n_ckpts_to_keep] + x_sorted("D")[:-n_ckpts_to_keep])
|
139 |
+
]
|
140 |
+
|
141 |
+
def del_info(fn):
|
142 |
+
return logger.info(f".. Free up space by deleting ckpt {fn}")
|
143 |
+
|
144 |
+
def del_routine(x):
|
145 |
+
return [os.remove(x), del_info(x)]
|
146 |
+
|
147 |
+
[del_routine(fn) for fn in to_del]
|
148 |
+
|
149 |
+
|
150 |
+
def plot_spectrogram_to_numpy(spectrogram):
|
151 |
+
global MATPLOTLIB_FLAG
|
152 |
+
if not MATPLOTLIB_FLAG:
|
153 |
+
import matplotlib
|
154 |
+
|
155 |
+
matplotlib.use("Agg")
|
156 |
+
MATPLOTLIB_FLAG = True
|
157 |
+
mpl_logger = logging.getLogger("matplotlib")
|
158 |
+
mpl_logger.setLevel(logging.WARNING)
|
159 |
+
import matplotlib.pylab as plt
|
160 |
+
import numpy as np
|
161 |
+
|
162 |
+
fig, ax = plt.subplots(figsize=(10, 2))
|
163 |
+
im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none")
|
164 |
+
plt.colorbar(im, ax=ax)
|
165 |
+
plt.xlabel("Frames")
|
166 |
+
plt.ylabel("Frequency")
|
167 |
+
plt.tight_layout()
|
168 |
+
|
169 |
+
fig.canvas.draw()
|
170 |
+
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
|
171 |
+
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
172 |
+
plt.close()
|
173 |
+
return data
|
174 |
+
|
175 |
+
|
176 |
+
def plot_alignment_to_numpy(alignment, info=None):
|
177 |
+
global MATPLOTLIB_FLAG
|
178 |
+
if not MATPLOTLIB_FLAG:
|
179 |
+
import matplotlib
|
180 |
+
|
181 |
+
matplotlib.use("Agg")
|
182 |
+
MATPLOTLIB_FLAG = True
|
183 |
+
mpl_logger = logging.getLogger("matplotlib")
|
184 |
+
mpl_logger.setLevel(logging.WARNING)
|
185 |
+
import matplotlib.pylab as plt
|
186 |
+
import numpy as np
|
187 |
+
|
188 |
+
fig, ax = plt.subplots(figsize=(6, 4))
|
189 |
+
im = ax.imshow(
|
190 |
+
alignment.transpose(), aspect="auto", origin="lower", interpolation="none"
|
191 |
+
)
|
192 |
+
fig.colorbar(im, ax=ax)
|
193 |
+
xlabel = "Decoder timestep"
|
194 |
+
if info is not None:
|
195 |
+
xlabel += "\n\n" + info
|
196 |
+
plt.xlabel(xlabel)
|
197 |
+
plt.ylabel("Encoder timestep")
|
198 |
+
plt.tight_layout()
|
199 |
+
|
200 |
+
fig.canvas.draw()
|
201 |
+
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
|
202 |
+
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
203 |
+
plt.close()
|
204 |
+
return data
|
205 |
+
|
206 |
+
|
207 |
+
def load_wav_to_torch(full_path):
|
208 |
+
sampling_rate, data = read(full_path)
|
209 |
+
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
|
210 |
+
|
211 |
+
|
212 |
+
def load_filepaths_and_text(filename, split="|"):
|
213 |
+
with open(filename, encoding="utf-8") as f:
|
214 |
+
filepaths_and_text = [line.strip().split(split) for line in f]
|
215 |
+
return filepaths_and_text
|
216 |
+
|
217 |
+
|
218 |
+
def get_hparams(init=True):
|
219 |
+
parser = argparse.ArgumentParser()
|
220 |
+
parser.add_argument(
|
221 |
+
"-c",
|
222 |
+
"--config",
|
223 |
+
type=str,
|
224 |
+
default="./configs/single_istft.json",
|
225 |
+
help="JSON file for configuration",
|
226 |
+
)
|
227 |
+
parser.add_argument(
|
228 |
+
"-m", "--model", type=str, default="istft-vits", help="Model name"
|
229 |
+
)
|
230 |
+
parser.add_argument(
|
231 |
+
"-n", "--max_epochs", type=int, default=500, help="finetune epochs"
|
232 |
+
)
|
233 |
+
|
234 |
+
parser.add_argument(
|
235 |
+
"--cont",
|
236 |
+
action='store_true',
|
237 |
+
help="whether to continue training on the latest checkpoint",
|
238 |
+
)
|
239 |
+
parser.add_argument(
|
240 |
+
"--twp",
|
241 |
+
action="store_true",
|
242 |
+
help="whether to train with pretrained model",
|
243 |
+
)
|
244 |
+
|
245 |
+
args = parser.parse_args()
|
246 |
+
model_dir = os.path.join("/data/logs/istft", args.model)
|
247 |
+
|
248 |
+
if not os.path.exists(model_dir):
|
249 |
+
os.makedirs(model_dir)
|
250 |
+
config_path = args.config
|
251 |
+
config_save_path = os.path.join(model_dir, "config.json")
|
252 |
+
if init:
|
253 |
+
with open(config_path, "r") as f:
|
254 |
+
data = f.read()
|
255 |
+
with open(config_save_path, "w") as f:
|
256 |
+
f.write(data)
|
257 |
+
else:
|
258 |
+
with open(config_save_path, "r") as f:
|
259 |
+
data = f.read()
|
260 |
+
config = json.loads(data)
|
261 |
+
|
262 |
+
hparams = HParams(**config)
|
263 |
+
hparams.model_dir = model_dir
|
264 |
+
hparams.max_epochs = args.max_epochs
|
265 |
+
hparams.cont = args.cont
|
266 |
+
hparams.twp = args.twp
|
267 |
+
return hparams
|
268 |
+
|
269 |
+
|
270 |
+
def get_hparams_from_dir(model_dir):
|
271 |
+
config_save_path = os.path.join(model_dir, "config.json")
|
272 |
+
with open(config_save_path, "r") as f:
|
273 |
+
data = f.read()
|
274 |
+
config = json.loads(data)
|
275 |
+
|
276 |
+
hparams = HParams(**config)
|
277 |
+
hparams.model_dir = model_dir
|
278 |
+
return hparams
|
279 |
+
|
280 |
+
|
281 |
+
def get_hparams_from_file(config_path):
|
282 |
+
with open(config_path, "r") as f:
|
283 |
+
data = f.read()
|
284 |
+
config = json.loads(data)
|
285 |
+
|
286 |
+
hparams = HParams(**config)
|
287 |
+
return hparams
|
288 |
+
|
289 |
+
|
290 |
+
def check_git_hash(model_dir):
|
291 |
+
source_dir = os.path.dirname(os.path.realpath(__file__))
|
292 |
+
if not os.path.exists(os.path.join(source_dir, ".git")):
|
293 |
+
logger.warn(
|
294 |
+
"{} is not a git repository, therefore hash value comparison will be ignored.".format(
|
295 |
+
source_dir
|
296 |
+
)
|
297 |
+
)
|
298 |
+
return
|
299 |
+
|
300 |
+
cur_hash = subprocess.getoutput("git rev-parse HEAD")
|
301 |
+
|
302 |
+
path = os.path.join(model_dir, "githash")
|
303 |
+
if os.path.exists(path):
|
304 |
+
saved_hash = open(path).read()
|
305 |
+
if saved_hash != cur_hash:
|
306 |
+
logger.warn(
|
307 |
+
"git hash values are different. {}(saved) != {}(current)".format(
|
308 |
+
saved_hash[:8], cur_hash[:8]
|
309 |
+
)
|
310 |
+
)
|
311 |
+
else:
|
312 |
+
open(path, "w").write(cur_hash)
|
313 |
+
|
314 |
+
|
315 |
+
def get_logger(model_dir, filename="train.log"):
|
316 |
+
global logger
|
317 |
+
logger = logging.getLogger(os.path.basename(model_dir))
|
318 |
+
logger.setLevel(logging.DEBUG)
|
319 |
+
|
320 |
+
formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
|
321 |
+
if not os.path.exists(model_dir):
|
322 |
+
os.makedirs(model_dir)
|
323 |
+
h = logging.FileHandler(os.path.join(model_dir, filename))
|
324 |
+
h.setLevel(logging.DEBUG)
|
325 |
+
h.setFormatter(formatter)
|
326 |
+
logger.addHandler(h)
|
327 |
+
return logger
|
328 |
+
|
329 |
+
|
330 |
+
class HParams:
|
331 |
+
def __init__(self, **kwargs):
|
332 |
+
for k, v in kwargs.items():
|
333 |
+
if type(v) == dict:
|
334 |
+
v = HParams(**v)
|
335 |
+
self[k] = v
|
336 |
+
|
337 |
+
def keys(self):
|
338 |
+
return self.__dict__.keys()
|
339 |
+
|
340 |
+
def items(self):
|
341 |
+
return self.__dict__.items()
|
342 |
+
|
343 |
+
def values(self):
|
344 |
+
return self.__dict__.values()
|
345 |
+
|
346 |
+
def __len__(self):
|
347 |
+
return len(self.__dict__)
|
348 |
+
|
349 |
+
def __getitem__(self, key):
|
350 |
+
return getattr(self, key)
|
351 |
+
|
352 |
+
def __setitem__(self, key, value):
|
353 |
+
return setattr(self, key, value)
|
354 |
+
|
355 |
+
def __contains__(self, key):
|
356 |
+
return key in self.__dict__
|
357 |
+
|
358 |
+
def __repr__(self):
|
359 |
+
return self.__dict__.__repr__()
|