|
""" from https://github.com/jaywalnut310/glow-tts """ |
|
|
|
import math |
|
|
|
import torch |
|
import torch.nn as nn |
|
from einops import rearrange |
|
|
|
import pflow.utils as utils |
|
from pflow.utils.model import sequence_mask |
|
|
|
log = utils.get_pylogger(__name__) |
|
|
|
|
|
class LayerNorm(nn.Module): |
|
def __init__(self, channels, eps=1e-4): |
|
super().__init__() |
|
self.channels = channels |
|
self.eps = eps |
|
|
|
self.gamma = torch.nn.Parameter(torch.ones(channels)) |
|
self.beta = torch.nn.Parameter(torch.zeros(channels)) |
|
|
|
def forward(self, x): |
|
n_dims = len(x.shape) |
|
mean = torch.mean(x, 1, keepdim=True) |
|
variance = torch.mean((x - mean) ** 2, 1, keepdim=True) |
|
|
|
x = (x - mean) * torch.rsqrt(variance + self.eps) |
|
|
|
shape = [1, -1] + [1] * (n_dims - 2) |
|
x = x * self.gamma.view(*shape) + self.beta.view(*shape) |
|
return x |
|
|
|
|
|
class ConvReluNorm(nn.Module): |
|
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): |
|
super().__init__() |
|
self.in_channels = in_channels |
|
self.hidden_channels = hidden_channels |
|
self.out_channels = out_channels |
|
self.kernel_size = kernel_size |
|
self.n_layers = n_layers |
|
self.p_dropout = p_dropout |
|
|
|
self.conv_layers = torch.nn.ModuleList() |
|
self.norm_layers = torch.nn.ModuleList() |
|
self.conv_layers.append(torch.nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) |
|
self.norm_layers.append(LayerNorm(hidden_channels)) |
|
self.relu_drop = torch.nn.Sequential(torch.nn.ReLU(), torch.nn.Dropout(p_dropout)) |
|
for _ in range(n_layers - 1): |
|
self.conv_layers.append( |
|
torch.nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2) |
|
) |
|
self.norm_layers.append(LayerNorm(hidden_channels)) |
|
self.proj = torch.nn.Conv1d(hidden_channels, out_channels, 1) |
|
self.proj.weight.data.zero_() |
|
self.proj.bias.data.zero_() |
|
|
|
def forward(self, x, x_mask): |
|
x_org = x |
|
for i in range(self.n_layers): |
|
x = self.conv_layers[i](x * x_mask) |
|
x = self.norm_layers[i](x) |
|
x = self.relu_drop(x) |
|
x = x_org + self.proj(x) |
|
return x * x_mask |
|
|
|
|
|
class DurationPredictor(nn.Module): |
|
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout): |
|
super().__init__() |
|
self.in_channels = in_channels |
|
self.filter_channels = filter_channels |
|
self.p_dropout = p_dropout |
|
|
|
self.drop = torch.nn.Dropout(p_dropout) |
|
self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) |
|
self.norm_1 = LayerNorm(filter_channels) |
|
self.conv_2 = torch.nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) |
|
self.norm_2 = LayerNorm(filter_channels) |
|
self.proj = torch.nn.Conv1d(filter_channels, 1, 1) |
|
|
|
def forward(self, x, x_mask): |
|
x = self.conv_1(x * x_mask) |
|
x = torch.relu(x) |
|
x = self.norm_1(x) |
|
x = self.drop(x) |
|
x = self.conv_2(x * x_mask) |
|
x = torch.relu(x) |
|
x = self.norm_2(x) |
|
x = self.drop(x) |
|
x = self.proj(x * x_mask) |
|
return x * x_mask |
|
|
|
|
|
class RotaryPositionalEmbeddings(nn.Module): |
|
""" |
|
## RoPE module |
|
|
|
Rotary encoding transforms pairs of features by rotating in the 2D plane. |
|
That is, it organizes the $d$ features as $\frac{d}{2}$ pairs. |
|
Each pair can be considered a coordinate in a 2D plane, and the encoding will rotate it |
|
by an angle depending on the position of the token. |
|
""" |
|
|
|
def __init__(self, d: int, base: int = 10_000): |
|
r""" |
|
* `d` is the number of features $d$ |
|
* `base` is the constant used for calculating $\Theta$ |
|
""" |
|
super().__init__() |
|
|
|
self.base = base |
|
self.d = int(d) |
|
self.cos_cached = None |
|
self.sin_cached = None |
|
|
|
def _build_cache(self, x: torch.Tensor): |
|
r""" |
|
Cache $\cos$ and $\sin$ values |
|
""" |
|
|
|
if self.cos_cached is not None and x.shape[0] <= self.cos_cached.shape[0]: |
|
return |
|
|
|
|
|
seq_len = x.shape[0] |
|
|
|
|
|
theta = 1.0 / (self.base ** (torch.arange(0, self.d, 2).float() / self.d)).to(x.device) |
|
|
|
|
|
seq_idx = torch.arange(seq_len, device=x.device).float().to(x.device) |
|
|
|
|
|
idx_theta = torch.einsum("n,d->nd", seq_idx, theta) |
|
|
|
|
|
|
|
idx_theta2 = torch.cat([idx_theta, idx_theta], dim=1) |
|
|
|
|
|
self.cos_cached = idx_theta2.cos()[:, None, None, :] |
|
self.sin_cached = idx_theta2.sin()[:, None, None, :] |
|
|
|
def _neg_half(self, x: torch.Tensor): |
|
|
|
d_2 = self.d // 2 |
|
|
|
|
|
return torch.cat([-x[:, :, :, d_2:], x[:, :, :, :d_2]], dim=-1) |
|
|
|
def forward(self, x: torch.Tensor): |
|
""" |
|
* `x` is the Tensor at the head of a key or a query with shape `[seq_len, batch_size, n_heads, d]` |
|
""" |
|
|
|
x = rearrange(x, "b h t d -> t b h d") |
|
|
|
self._build_cache(x) |
|
|
|
|
|
x_rope, x_pass = x[..., : self.d], x[..., self.d :] |
|
|
|
|
|
|
|
neg_half_x = self._neg_half(x_rope) |
|
|
|
x_rope = (x_rope * self.cos_cached[: x.shape[0]]) + (neg_half_x * self.sin_cached[: x.shape[0]]) |
|
|
|
return rearrange(torch.cat((x_rope, x_pass), dim=-1), "t b h d -> b h t d") |
|
|
|
|
|
class MultiHeadAttention(nn.Module): |
|
def __init__( |
|
self, |
|
channels, |
|
out_channels, |
|
n_heads, |
|
heads_share=True, |
|
p_dropout=0.0, |
|
proximal_bias=False, |
|
proximal_init=False, |
|
): |
|
super().__init__() |
|
assert channels % n_heads == 0 |
|
|
|
self.channels = channels |
|
self.out_channels = out_channels |
|
self.n_heads = n_heads |
|
self.heads_share = heads_share |
|
self.proximal_bias = proximal_bias |
|
self.p_dropout = p_dropout |
|
self.attn = None |
|
|
|
self.k_channels = channels // n_heads |
|
self.conv_q = torch.nn.Conv1d(channels, channels, 1) |
|
self.conv_k = torch.nn.Conv1d(channels, channels, 1) |
|
self.conv_v = torch.nn.Conv1d(channels, channels, 1) |
|
|
|
|
|
self.query_rotary_pe = RotaryPositionalEmbeddings(self.k_channels * 0.5) |
|
self.key_rotary_pe = RotaryPositionalEmbeddings(self.k_channels * 0.5) |
|
|
|
self.conv_o = torch.nn.Conv1d(channels, out_channels, 1) |
|
self.drop = torch.nn.Dropout(p_dropout) |
|
|
|
torch.nn.init.xavier_uniform_(self.conv_q.weight) |
|
torch.nn.init.xavier_uniform_(self.conv_k.weight) |
|
if proximal_init: |
|
self.conv_k.weight.data.copy_(self.conv_q.weight.data) |
|
self.conv_k.bias.data.copy_(self.conv_q.bias.data) |
|
torch.nn.init.xavier_uniform_(self.conv_v.weight) |
|
|
|
def forward(self, x, c, attn_mask=None): |
|
q = self.conv_q(x) |
|
k = self.conv_k(c) |
|
v = self.conv_v(c) |
|
|
|
x, self.attn = self.attention(q, k, v, mask=attn_mask) |
|
|
|
x = self.conv_o(x) |
|
return x |
|
|
|
def attention(self, query, key, value, mask=None): |
|
b, d, t_s, t_t = (*key.size(), query.size(2)) |
|
query = rearrange(query, "b (h c) t-> b h t c", h=self.n_heads) |
|
key = rearrange(key, "b (h c) t-> b h t c", h=self.n_heads) |
|
value = rearrange(value, "b (h c) t-> b h t c", h=self.n_heads) |
|
|
|
query = self.query_rotary_pe(query) |
|
key = self.key_rotary_pe(key) |
|
|
|
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels) |
|
|
|
if self.proximal_bias: |
|
assert t_s == t_t, "Proximal bias is only available for self-attention." |
|
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) |
|
if mask is not None: |
|
scores = scores.masked_fill(mask == 0, -1e4) |
|
p_attn = torch.nn.functional.softmax(scores, dim=-1) |
|
p_attn = self.drop(p_attn) |
|
output = torch.matmul(p_attn, value) |
|
output = output.transpose(2, 3).contiguous().view(b, d, t_t) |
|
return output, p_attn |
|
|
|
@staticmethod |
|
def _attention_bias_proximal(length): |
|
r = torch.arange(length, dtype=torch.float32) |
|
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) |
|
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) |
|
|
|
|
|
class FFN(nn.Module): |
|
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0.0): |
|
super().__init__() |
|
self.in_channels = in_channels |
|
self.out_channels = out_channels |
|
self.filter_channels = filter_channels |
|
self.kernel_size = kernel_size |
|
self.p_dropout = p_dropout |
|
|
|
self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) |
|
self.conv_2 = torch.nn.Conv1d(filter_channels, out_channels, kernel_size, padding=kernel_size // 2) |
|
self.drop = torch.nn.Dropout(p_dropout) |
|
|
|
def forward(self, x, x_mask): |
|
x = self.conv_1(x * x_mask) |
|
x = torch.relu(x) |
|
x = self.drop(x) |
|
x = self.conv_2(x * x_mask) |
|
return x * x_mask |
|
|
|
|
|
class Encoder(nn.Module): |
|
def __init__( |
|
self, |
|
hidden_channels, |
|
filter_channels, |
|
n_heads, |
|
n_layers, |
|
kernel_size=1, |
|
p_dropout=0.0, |
|
**kwargs, |
|
): |
|
super().__init__() |
|
self.hidden_channels = hidden_channels |
|
self.filter_channels = filter_channels |
|
self.n_heads = n_heads |
|
self.n_layers = n_layers |
|
self.kernel_size = kernel_size |
|
self.p_dropout = p_dropout |
|
|
|
self.drop = torch.nn.Dropout(p_dropout) |
|
self.attn_layers = torch.nn.ModuleList() |
|
self.norm_layers_1 = torch.nn.ModuleList() |
|
self.ffn_layers = torch.nn.ModuleList() |
|
self.norm_layers_2 = torch.nn.ModuleList() |
|
for _ in range(self.n_layers): |
|
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) |
|
self.norm_layers_1.append(LayerNorm(hidden_channels)) |
|
self.ffn_layers.append( |
|
FFN( |
|
hidden_channels, |
|
hidden_channels, |
|
filter_channels, |
|
kernel_size, |
|
p_dropout=p_dropout, |
|
) |
|
) |
|
self.norm_layers_2.append(LayerNorm(hidden_channels)) |
|
|
|
def forward(self, x, x_mask): |
|
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) |
|
for i in range(self.n_layers): |
|
x = x * x_mask |
|
y = self.attn_layers[i](x, x, attn_mask) |
|
y = self.drop(y) |
|
x = self.norm_layers_1[i](x + y) |
|
y = self.ffn_layers[i](x, x_mask) |
|
y = self.drop(y) |
|
x = self.norm_layers_2[i](x + y) |
|
x = x * x_mask |
|
return x |
|
|
|
|
|
class TextEncoder(nn.Module): |
|
def __init__( |
|
self, |
|
encoder_type, |
|
encoder_params, |
|
duration_predictor_params, |
|
n_vocab, |
|
n_spks=1, |
|
spk_emb_dim=128, |
|
): |
|
super().__init__() |
|
self.encoder_type = encoder_type |
|
self.n_vocab = n_vocab |
|
self.n_feats = encoder_params.n_feats |
|
self.n_channels = encoder_params.n_channels |
|
self.spk_emb_dim = spk_emb_dim |
|
self.n_spks = n_spks |
|
|
|
self.emb = torch.nn.Embedding(n_vocab, self.n_channels) |
|
torch.nn.init.normal_(self.emb.weight, 0.0, self.n_channels**-0.5) |
|
|
|
if encoder_params.prenet: |
|
self.prenet = ConvReluNorm( |
|
self.n_channels, |
|
self.n_channels, |
|
self.n_channels, |
|
kernel_size=5, |
|
n_layers=3, |
|
p_dropout=0.5, |
|
) |
|
else: |
|
self.prenet = lambda x, x_mask: x |
|
|
|
self.encoder = Encoder( |
|
encoder_params.n_channels + (spk_emb_dim if n_spks > 1 else 0), |
|
encoder_params.filter_channels, |
|
encoder_params.n_heads, |
|
encoder_params.n_layers, |
|
encoder_params.kernel_size, |
|
encoder_params.p_dropout, |
|
) |
|
|
|
self.encoder_dp = Encoder( |
|
encoder_params.n_channels + (spk_emb_dim if n_spks > 1 else 0), |
|
encoder_params.filter_channels, |
|
encoder_params.n_heads, |
|
encoder_params.n_layers, |
|
encoder_params.kernel_size, |
|
encoder_params.p_dropout, |
|
) |
|
|
|
self.proj_m = torch.nn.Conv1d(self.n_channels + (spk_emb_dim if n_spks > 1 else 0), self.n_feats, 1) |
|
|
|
|
|
self.proj_w = DurationPredictor( |
|
self.n_channels + (spk_emb_dim if n_spks > 1 else 0), |
|
duration_predictor_params.filter_channels_dp, |
|
duration_predictor_params.kernel_size, |
|
duration_predictor_params.p_dropout, |
|
) |
|
|
|
def forward(self, x, x_lengths, spks=None): |
|
"""Run forward pass to the transformer based encoder and duration predictor |
|
|
|
Args: |
|
x (torch.Tensor): text input |
|
shape: (batch_size, max_text_length) |
|
x_lengths (torch.Tensor): text input lengths |
|
shape: (batch_size,) |
|
spks (torch.Tensor, optional): speaker ids. Defaults to None. |
|
shape: (batch_size,) |
|
|
|
Returns: |
|
mu (torch.Tensor): average output of the encoder |
|
shape: (batch_size, n_feats, max_text_length) |
|
logw (torch.Tensor): log duration predicted by the duration predictor |
|
shape: (batch_size, 1, max_text_length) |
|
x_mask (torch.Tensor): mask for the text input |
|
shape: (batch_size, 1, max_text_length) |
|
""" |
|
x = self.emb(x) * math.sqrt(self.n_channels) |
|
x = torch.transpose(x, 1, -1) |
|
x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) |
|
|
|
x = self.prenet(x, x_mask) |
|
if self.n_spks > 1: |
|
x = torch.cat([x, spks.unsqueeze(-1).repeat(1, 1, x.shape[-1])], dim=1) |
|
x_dp = torch.detach(x) |
|
x_dp = self.encoder_dp(x_dp, x_mask) |
|
|
|
x = self.encoder(x, x_mask) |
|
mu = self.proj_m(x) * x_mask |
|
|
|
|
|
|
|
logw = self.proj_w(x_dp, x_mask) |
|
|
|
return mu, logw, x_mask |
|
|