|
import math |
|
|
|
import torch |
|
import torch.nn.functional as F |
|
from torch import nn |
|
from torch.nn.modules.transformer import _get_activation_fn |
|
|
|
|
|
class TransformerDecoderLayer(nn.Module): |
|
r"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network. |
|
This standard decoder layer is based on the paper "Attention Is All You Need". |
|
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, |
|
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in |
|
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement |
|
in a different way during application. |
|
|
|
Args: |
|
d_model: the number of expected features in the input (required). |
|
nhead: the number of heads in the multiheadattention models (required). |
|
dim_feedforward: the dimension of the feedforward network model (default=2048). |
|
dropout: the dropout value (default=0.1). |
|
activation: the activation function of intermediate layer, relu or gelu (default=relu). |
|
|
|
Examples:: |
|
>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8) |
|
>>> memory = torch.rand(10, 32, 512) |
|
>>> tgt = torch.rand(20, 32, 512) |
|
>>> out = decoder_layer(tgt, memory) |
|
""" |
|
|
|
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, |
|
activation="relu", self_attn=True, siamese=False, debug=False): |
|
super().__init__() |
|
self.has_self_attn, self.siamese = self_attn, siamese |
|
self.debug = debug |
|
if self.has_self_attn: |
|
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) |
|
self.norm1 = nn.LayerNorm(d_model) |
|
self.dropout1 = nn.Dropout(dropout) |
|
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) |
|
|
|
self.linear1 = nn.Linear(d_model, dim_feedforward) |
|
self.dropout = nn.Dropout(dropout) |
|
self.linear2 = nn.Linear(dim_feedforward, d_model) |
|
|
|
self.norm2 = nn.LayerNorm(d_model) |
|
self.norm3 = nn.LayerNorm(d_model) |
|
self.dropout2 = nn.Dropout(dropout) |
|
self.dropout3 = nn.Dropout(dropout) |
|
if self.siamese: |
|
self.multihead_attn2 = nn.MultiheadAttention(d_model, nhead, dropout=dropout) |
|
|
|
self.activation = _get_activation_fn(activation) |
|
|
|
def __setstate__(self, state): |
|
if 'activation' not in state: |
|
state['activation'] = F.relu |
|
super().__setstate__(state) |
|
|
|
def forward(self, tgt, memory, tgt_mask=None, memory_mask=None, |
|
tgt_key_padding_mask=None, memory_key_padding_mask=None, |
|
memory2=None, memory_mask2=None, memory_key_padding_mask2=None): |
|
|
|
r"""Pass the inputs (and mask) through the decoder layer. |
|
|
|
Args: |
|
tgt: the sequence to the decoder layer (required). |
|
memory: the sequence from the last layer of the encoder (required). |
|
tgt_mask: the mask for the tgt sequence (optional). |
|
memory_mask: the mask for the memory sequence (optional). |
|
tgt_key_padding_mask: the mask for the tgt keys per batch (optional). |
|
memory_key_padding_mask: the mask for the memory keys per batch (optional). |
|
|
|
Shape: |
|
see the docs in Transformer class. |
|
""" |
|
if self.has_self_attn: |
|
tgt2, attn = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask, |
|
key_padding_mask=tgt_key_padding_mask) |
|
tgt = tgt + self.dropout1(tgt2) |
|
tgt = self.norm1(tgt) |
|
if self.debug: self.attn = attn |
|
tgt2, attn2 = self.multihead_attn(tgt, memory, memory, attn_mask=memory_mask, |
|
key_padding_mask=memory_key_padding_mask) |
|
if self.debug: self.attn2 = attn2 |
|
|
|
if self.siamese: |
|
tgt3, attn3 = self.multihead_attn2(tgt, memory2, memory2, attn_mask=memory_mask2, |
|
key_padding_mask=memory_key_padding_mask2) |
|
tgt = tgt + self.dropout2(tgt3) |
|
if self.debug: self.attn3 = attn3 |
|
|
|
tgt = tgt + self.dropout2(tgt2) |
|
tgt = self.norm2(tgt) |
|
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) |
|
tgt = tgt + self.dropout3(tgt2) |
|
tgt = self.norm3(tgt) |
|
|
|
return tgt |
|
|
|
|
|
class PositionalEncoding(nn.Module): |
|
r"""Inject some information about the relative or absolute position of the tokens |
|
in the sequence. The positional encodings have the same dimension as |
|
the embeddings, so that the two can be summed. Here, we use sine and cosine |
|
functions of different frequencies. |
|
.. math:: |
|
\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model)) |
|
\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model)) |
|
\text{where pos is the word position and i is the embed idx) |
|
Args: |
|
d_model: the embed dim (required). |
|
dropout: the dropout value (default=0.1). |
|
max_len: the max. length of the incoming sequence (default=5000). |
|
Examples: |
|
>>> pos_encoder = PositionalEncoding(d_model) |
|
""" |
|
|
|
def __init__(self, d_model, dropout=0.1, max_len=5000): |
|
super().__init__() |
|
self.dropout = nn.Dropout(p=dropout) |
|
|
|
pe = torch.zeros(max_len, d_model) |
|
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) |
|
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)) |
|
pe[:, 0::2] = torch.sin(position * div_term) |
|
pe[:, 1::2] = torch.cos(position * div_term) |
|
pe = pe.unsqueeze(0).transpose(0, 1) |
|
self.register_buffer('pe', pe) |
|
|
|
def forward(self, x): |
|
r"""Inputs of forward function |
|
Args: |
|
x: the sequence fed to the positional encoder model (required). |
|
Shape: |
|
x: [sequence length, batch size, embed dim] |
|
output: [sequence length, batch size, embed dim] |
|
Examples: |
|
>>> output = pos_encoder(x) |
|
""" |
|
|
|
x = x + self.pe[:x.size(0), :] |
|
return self.dropout(x) |
|
|