igorktech commited on
Commit
0b057e2
·
1 Parent(s): bf0312d

Model save

Browse files
Files changed (3) hide show
  1. configuration_hier.py +117 -0
  2. generation_config.json +5 -0
  3. modelling_hier.py +455 -0
configuration_hier.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+ from transformers.utils import logging
3
+
4
+ logger = logging.get_logger(__name__)
5
+
6
+ HIERBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
7
+ "igorktech/custom4": "https://huggingface.co/igorktech/custom4/resolve/main/config.json",
8
+ "igorktech/custom4": "https://huggingface.co/igorktech/custom4/resolve/main/config.json",
9
+ }
10
+
11
+
12
+ class HierBertConfig(PretrainedConfig):
13
+ r"""
14
+ This is the configuration class to store the configuration of a [`HierBertModel`]. It is used to
15
+ instantiate a HierBERT model according to the specified arguments, defining the model architecture. Instantiating a
16
+ configuration with the defaults will yield a similar configuration to that of the HierBERT
17
+ [HierBert](https://github.com/igorktech/hier-bert-pytorch) architecture.
18
+
19
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
20
+ documentation from [`PretrainedConfig`] for more information.
21
+
22
+
23
+ Args:
24
+ vocab_size (`int`, *optional*, defaults to 30522):
25
+ Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
26
+ `inputs_ids` passed when calling [`BertModel`] or [`TFBertModel`].
27
+ hidden_size (`int`, *optional*, defaults to 768):
28
+ Dimensionality of the encoder layers and the pooler layer.
29
+ num_hidden_layers (`int`, *optional*, defaults to 12):
30
+ Number of hidden layers in the Transformer encoder.
31
+ num_attention_heads (`int`, *optional*, defaults to 12):
32
+ Number of attention heads for each attention layer in the Transformer encoder.
33
+ intermediate_size (`int`, *optional*, defaults to 3072):
34
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
35
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
36
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
37
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
38
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
39
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
40
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
41
+ The dropout ratio for the attention probabilities.
42
+ max_position_embeddings (`int`, *optional*, defaults to 512):
43
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
44
+ just in case (e.g., 512 or 1024 or 2048).
45
+ type_vocab_size (`int`, *optional*, defaults to 2):
46
+ The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or [`TFBertModel`].
47
+ initializer_range (`float`, *optional*, defaults to 0.02):
48
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
49
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
50
+ The epsilon used by the layer normalization layers.
51
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
52
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
53
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
54
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
55
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
56
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
57
+ is_decoder (`bool`, *optional*, defaults to `False`):
58
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
59
+ use_cache (`bool`, *optional*, defaults to `True`):
60
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
61
+ relevant if `config.is_decoder=True`.
62
+ classifier_dropout (`float`, *optional*):
63
+ The dropout ratio for the classification head.
64
+ """
65
+
66
+ model_type = "hierarchical-bert"
67
+
68
+ def __init__(
69
+ self,
70
+ vocab_size=32000,
71
+ hidden_size=512,
72
+ num_hidden_layers=6,
73
+ num_attention_heads=8,
74
+ intermediate_size=2048,
75
+ hidden_act="gelu",
76
+ hidden_dropout_prob=0.1,
77
+ attention_probs_dropout_prob=0.1,
78
+ max_position_embeddings=512,
79
+ type_vocab_size=2,
80
+ initializer_range=0.02,
81
+ layer_norm_eps=1e-6,
82
+ norm_first=True,
83
+ pad_token_id=0,
84
+ sep_token_id=3,
85
+ position_embedding_type="absolute",
86
+ use_cache=True,
87
+ classifier_dropout=None,
88
+ auto_map={
89
+ "AutoConfig": "configuration_hier.HierBertConfig",
90
+ "AutoModel": "modelling_hier.HierBertModel",
91
+ "AutoModelForMaskedLM": "modelling_hier.HierBertForMaskedLM",
92
+ "AutoModelForSequenceClassification": "modelling_hier.HierBertForSequenceClassification",
93
+ },
94
+ **kwargs,
95
+ ):
96
+ super().__init__(
97
+ pad_token_id=pad_token_id,
98
+ sep_token_id=sep_token_id,
99
+ **kwargs)
100
+
101
+ self.vocab_size = vocab_size
102
+ self.hidden_size = hidden_size
103
+ self.num_hidden_layers = num_hidden_layers
104
+ self.num_attention_heads = num_attention_heads
105
+ self.hidden_act = hidden_act
106
+ self.intermediate_size = intermediate_size
107
+ self.hidden_dropout_prob = hidden_dropout_prob
108
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
109
+ self.max_position_embeddings = max_position_embeddings
110
+ self.type_vocab_size = type_vocab_size
111
+ self.initializer_range = initializer_range
112
+ self.layer_norm_eps = layer_norm_eps
113
+ self.norm_first = norm_first
114
+ self.position_embedding_type = position_embedding_type
115
+ self.use_cache = use_cache
116
+ self.classifier_dropout = classifier_dropout
117
+ self.auto_map = auto_map
generation_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "pad_token_id": 0,
4
+ "transformers_version": "4.31.0"
5
+ }
modelling_hier.py ADDED
@@ -0,0 +1,455 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ from typing import Optional, Any, Tuple
4
+
5
+ import torch
6
+ from torch import Tensor
7
+ import torch.nn.functional as F
8
+ from torch.nn.modules.module import Module
9
+ from torch.nn.modules.activation import MultiheadAttention
10
+ from torch.nn.modules.container import ModuleList
11
+ from torch.nn.init import xavier_uniform_
12
+ from torch.nn.modules.dropout import Dropout
13
+ from torch.nn.modules.linear import Linear
14
+ from torch.nn.modules.normalization import LayerNorm
15
+
16
+ from transformers.modeling_outputs import BaseModelOutputWithPooling
17
+ from transformers import PreTrainedModel
18
+ from transformers import BertForMaskedLM, BertForSequenceClassification
19
+
20
+ from .configuration_hier import HierBertConfig
21
+
22
+ import warnings
23
+
24
+ # Turn off all warnings
25
+ warnings.filterwarnings("ignore")
26
+
27
+
28
+ # Define masking
29
+ def gen_encoder_ut_mask(src_seq, input_mask, utt_loc):
30
+ def _gen_mask_hierarchical(A, src_pad_mask):
31
+ # A: (bs, 100, 100); 100 is max_len*2 same as input_ids
32
+ return ~(2 * A == (A + A.transpose(1, 2))).bool()
33
+
34
+ enc_mask_utt = _gen_mask_hierarchical(utt_loc.unsqueeze(1).expand(-1, src_seq.shape[1], -1), input_mask)
35
+ return enc_mask_utt
36
+
37
+
38
+ def _get_pe_inputs(src_seq, input_mask, utt_loc):
39
+ pe_utt_loc = torch.zeros(utt_loc.shape, device=utt_loc.device)
40
+ for i in range(1, utt_loc.shape[1]): # time
41
+ _logic = (utt_loc[:, i] == utt_loc[:, i - 1]).float()
42
+ pe_utt_loc[:, i] = pe_utt_loc[:, i - 1] + _logic - (1 - _logic) * pe_utt_loc[:, i - 1]
43
+ return pe_utt_loc
44
+
45
+
46
+ def _CLS_masks(src_seq, input_mask, utt_loc):
47
+ # HT-Encoder
48
+ pe_utt_loc = _get_pe_inputs(src_seq, input_mask, utt_loc)
49
+
50
+ # UT-MASK
51
+ enc_mask_utt = gen_encoder_ut_mask(src_seq, input_mask, utt_loc)
52
+
53
+ # CT-MASK
54
+ enc_mask_ct = ((pe_utt_loc + input_mask) != 0).unsqueeze(1).expand(-1, src_seq.shape[1], -1) # HIER-CLS style
55
+
56
+ return pe_utt_loc, enc_mask_utt, enc_mask_ct
57
+
58
+
59
+ def get_hier_encoder_mask(src_seq, input_mask, utt_loc, type: str):
60
+ # Padding correction
61
+ # No token other than padding should attend to padding
62
+ # But padding needs to attend to padding tokens for numerical stability reasons
63
+ utt_loc = utt_loc - 2 * input_mask * utt_loc
64
+
65
+ # CT-Mask type
66
+ assert type in ["hier", "cls", "full"]
67
+
68
+ if type == "hier": # HIER: Context through final utterance
69
+ raise Exception("Not used for BERT")
70
+ elif type == "cls": # HIER-CLS: Context through cls tokens
71
+ return _CLS_masks(src_seq, input_mask, utt_loc)
72
+ elif type == "full": # Ut-mask only, CT-mask: Full attention
73
+ raise Exception("Not used for BERT")
74
+
75
+ return None
76
+
77
+
78
+ def _get_clones(module, N):
79
+ return ModuleList([copy.deepcopy(module) for i in range(N)])
80
+
81
+
82
+ def _get_activation_fn(activation):
83
+ if activation == "relu":
84
+ return F.relu
85
+ elif activation == "gelu":
86
+ return F.gelu
87
+
88
+ raise RuntimeError("activation should be relu/gelu, not {}".format(activation))
89
+
90
+
91
+ class PositionalEmbedding(torch.nn.Module):
92
+
93
+ def __init__(self, config):
94
+ super().__init__()
95
+
96
+ self.max_len = config.max_position_embeddings
97
+ self.d_model = config.hidden_size
98
+ # Compute the positional encodings once in log space.
99
+ pe = torch.zeros(self.max_len, self.d_model).float()
100
+ pe.require_grad = False
101
+
102
+ position = torch.arange(0, self.max_len).float().unsqueeze(1)
103
+ div_term = (torch.arange(0, self.d_model, 2).float() * -(math.log(10000.0) / self.d_model)).exp()
104
+
105
+ pe[:, 0::2] = torch.sin(position * div_term)
106
+ pe[:, 1::2] = torch.cos(position * div_term)
107
+
108
+ pe = pe.unsqueeze(0)
109
+ self.register_buffer('pe', pe)
110
+
111
+ def forward(self, x):
112
+ # Shape of X : [N x L x d] or [N x L]
113
+ return self.pe[:, :x.size(1)]
114
+
115
+ def forward_by_index(self, loc):
116
+ return self.pe.expand(loc.shape[0], -1, -1).gather(1, loc.unsqueeze(2).expand(-1, -1, self.pe.shape[2]).long())
117
+
118
+
119
+ class TransformerEncoderLayer(Module):
120
+ r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
121
+ This standard encoder layer is based on the paper "Attention Is All You Need".
122
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
123
+ Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
124
+ Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
125
+ in a different way during application.
126
+ Args:
127
+ d_model: the number of expected features in the input (required).
128
+ nhead: the number of heads in the multiheadattention models (required).
129
+ dim_feedforward: the dimension of the feedforward network model (default=2048).
130
+ dropout: the dropout value (default=0.1).
131
+ activation: the activation function of intermediate layer, relu or gelu (default=relu).
132
+ layer_norm_eps: the eps value in layer normalization components (default=1e-5).
133
+ Examples::
134
+ >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
135
+ >>> src = torch.rand(10, 32, 512)
136
+ >>> out = encoder_layer(src)
137
+ """
138
+
139
+ def __init__(self, config):
140
+ super(TransformerEncoderLayer, self).__init__()
141
+
142
+ self.self_attn = MultiheadAttention(config.hidden_size,
143
+ config.num_attention_heads,
144
+ dropout=config.attention_probs_dropout_prob)
145
+ # Implementation of Feedforward model
146
+ self.linear1 = Linear(config.hidden_size, config.intermediate_size)
147
+ self.dropout = Dropout(config.hidden_dropout_prob)
148
+ self.linear2 = Linear(config.intermediate_size, config.hidden_size)
149
+
150
+ self.norm_first = config.norm_first
151
+ self.norm1 = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
152
+ self.norm2 = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
153
+ self.dropout1 = Dropout(config.hidden_dropout_prob)
154
+ self.dropout2 = Dropout(config.hidden_dropout_prob)
155
+
156
+ self.activation = _get_activation_fn(config.hidden_act)
157
+
158
+ def __setstate__(self, state):
159
+ if 'activation' not in state:
160
+ state['activation'] = F.relu
161
+ super(TransformerEncoderLayer, self).__setstate__(state)
162
+
163
+ def forward(self, src: Tensor, src_mask: Optional[Tensor] = None,
164
+ src_key_padding_mask: Optional[Tensor] = None) -> tuple[Tensor, Optional[Tensor]]:
165
+ r"""Pass the input through the encoder layer.
166
+ Args:
167
+ src: the sequence to the encoder layer (required).
168
+ src_mask: the mask for the src sequence (optional).
169
+ src_key_padding_mask: the mask for the src keys per batch (optional).
170
+ Shape:
171
+ see the docs in Transformer class.
172
+ """
173
+
174
+ # Extend mask
175
+ # src_mask = src_mask.repeat(self.self_attn.num_heads, 1, 1)
176
+
177
+ # PreLayerNorm
178
+ if self.norm_first:
179
+
180
+ src = self.norm1(src)
181
+ src_attn = self.self_attn(src, src, src, attn_mask=src_mask,
182
+ key_padding_mask=src_key_padding_mask, average_attn_weights=False) # [0]
183
+ src = src + self.dropout1(src_attn[0])
184
+ src = self.norm2(src)
185
+ src_ffn = self.linear2(self.dropout(self.activation(self.linear1(src))))
186
+ src = src + self.dropout2(src_ffn)
187
+
188
+ else:
189
+ src_attn = self.self_attn(src, src, src, attn_mask=src_mask,
190
+ key_padding_mask=src_key_padding_mask, average_attn_weights=False) # [0]
191
+ src = src + self.dropout1(src_attn[0])
192
+ src = self.norm1(src)
193
+ src_ffn = self.linear2(self.dropout(self.activation(self.linear1(src))))
194
+ src = src + self.dropout2(src_ffn)
195
+ src = self.norm2(src)
196
+ return src, src_attn[1]
197
+
198
+
199
+ class HierBert(Module):
200
+ r"""A transformer model. User is able to modify the attributes as needed. The architecture
201
+ is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer,
202
+ Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and
203
+ Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information
204
+ Processing Systems, pages 6000-6010. Users can build the BERT(https://arxiv.org/abs/1810.04805)
205
+ model with corresponding parameters.
206
+ Args:
207
+ d_model: the number of expected features in the encoder/decoder inputs (default=512).
208
+ nhead: the number of heads in the multiheadattention models (default=8).
209
+ num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6).
210
+ dim_feedforward: the dimension of the feedforward network model (default=2048).
211
+ dropout: the dropout value (default=0.1).
212
+ activation: the activation function of encoder/decoder intermediate layer, relu or gelu (default=relu).
213
+ custom_encoder: custom encoder (default=None).
214
+ custom_decoder: custom decoder (default=None).
215
+ layer_norm_eps: the eps value in layer normalization components (default=1e-5).
216
+ Examples::
217
+ # >>> transformer_model = HIERTransformer(nhead=16, num_encoder_layers=12)
218
+ # >>> src = torch.rand((10, 32, 512))
219
+ # >>> token_type_ids/utt_indices = torch.tensor([0, 0, 1, 1, 1, 2, 2, 3, 3, 3]) Represent each utterance to encode
220
+ # >>> out = transformer_model(src)
221
+ Note: A full example to apply nn.Transformer module for the word language model is available in
222
+ # https://github.com/pytorch/examples/tree/master/word_language_model
223
+ """
224
+
225
+ def __init__(self, config) -> None:
226
+ super(HierBert, self).__init__()
227
+ self.config = config
228
+ # Word Emb
229
+ self.word_embeddings = torch.nn.Embedding(config.vocab_size,
230
+ config.hidden_size,
231
+ padding_idx=config.pad_token_id)
232
+
233
+ # Pos Emb
234
+ self.post_word_emb = PositionalEmbedding(config)
235
+
236
+ # Encoder
237
+ self.enc_layers = _get_clones(TransformerEncoderLayer(config=config),
238
+ config.num_hidden_layers)
239
+ self.norm_e = LayerNorm(config.hidden_size,
240
+ eps=config.layer_norm_eps)
241
+
242
+ self._reset_parameters()
243
+ self.init_weights()
244
+
245
+ def init_weights(self) -> None:
246
+ initrange = 0.1
247
+ self.word_embeddings.weight.data.uniform_(-initrange, initrange)
248
+
249
+ # TODO: fix return dict
250
+ def forward(self, input_ids: Tensor,
251
+ attention_mask: Optional[Tensor] = None,
252
+ token_type_ids: Optional[Tensor] = None,
253
+ ct_mask_type: str = "cls",
254
+ output_attentions: Optional[bool] = True,
255
+ memory_key_padding_mask: Optional[Tensor] = None,
256
+ **kwargs
257
+ ):
258
+ r"""Take in and process masked source/target sequences.
259
+ Args:
260
+ input_ids/src: the sequence to the encoder (required).
261
+ src_mask: the additive mask for the src sequence (optional).
262
+
263
+ memory_mask: the additive mask for the encoder output (optional).
264
+ attention_mask/src_key_padding_mask: the ByteTensor mask for src keys per batch (optional).
265
+
266
+ memory_key_padding_mask: the ByteTensor mask for memory keys per batch (optional).
267
+ Shape:
268
+ - input_ids/src: :math:`(S, N, E)`.
269
+ - src_mask: :math:`(S, S)`.
270
+ - memory_mask: :math:`(T, S)`.
271
+ - not(attention_mask)/src_key_padding_mask: :math:`(N, S)`.
272
+ - token_type_ids/utt_indices: :math:`(N, S)`.
273
+ - memory_key_padding_mask: :math:`(N, S)`.
274
+ Note: [src/memory]_mask ensures that position i is allowed to attend the unmasked
275
+ positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
276
+ while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
277
+ are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
278
+ is provided, it will be added to the attention weight.
279
+ [src/memory]_key_padding_mask provides specified elements in the key to be ignored by
280
+ the attention. If a ByteTensor is provided, the non-zero positions will be ignored while the zero
281
+ positions will be unchanged. If a BoolTensor is provided, the positions with the
282
+ value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
283
+ - output: :math:`(T, N, E)`.
284
+ Note: Due to the multi-head attention architecture in the transformer model,
285
+ the output sequence length of a transformer is same as the input sequence
286
+ (i.e. target) length of the decode.
287
+ where S is the source sequence length, T is the target sequence length, N is the
288
+ batch size, E is the feature number
289
+ Examples:
290
+ # >>> output = transformer_model(src, src_mask=src_mask)
291
+ """
292
+ all_self_attentions = () if output_attentions else None
293
+ # print(input_ids.shape)
294
+
295
+ if attention_mask is None:
296
+ # Convert input_ids to attention mask
297
+ attention_mask = self.create_padding_mask(input_ids)
298
+ attention_mask = torch.tensor(attention_mask, dtype=torch.long)
299
+
300
+ if token_type_ids is None:
301
+ # Convert input_ids to token type IDs
302
+ token_type_ids = self.convert_input_ids_to_token_type_ids(input_ids)
303
+ print('token type ids model', token_type_ids)
304
+
305
+ src_key_padding_mask = torch.logical_not(attention_mask)
306
+ utt_indices = token_type_ids
307
+
308
+ pe_utt_loc, enc_mask_utt, enc_mask_ct = get_hier_encoder_mask(input_ids,
309
+ src_key_padding_mask,
310
+ utt_indices,
311
+ type=ct_mask_type)
312
+
313
+ # memory = self.encoder(input_ids, mask=src_mask, src_key_padding_mask=src_key_padding_mask)
314
+
315
+ # Encoding
316
+ # memory = input_ids
317
+
318
+ enc_inp = self.word_embeddings(input_ids.transpose(0, 1)) + self.post_word_emb.forward_by_index(
319
+ pe_utt_loc).transpose(0, 1)
320
+
321
+ # Basic config
322
+ # for i, layer in enumerate(self.enc_layers):
323
+ # if i == self.config.num_hidden_layers // 2:
324
+ # # Positional Embedding for Context Encoder
325
+ # enc_inp = enc_inp + self.post_word_emb(enc_inp.transpose(0, 1)).transpose(0, 1)
326
+ # if i < self.config.num_hidden_layers // 2:
327
+ # enc_inp = layer(enc_inp,
328
+ # src_key_padding_mask=src_key_padding_mask,
329
+ # src_mask=enc_mask_utt.float())
330
+ # else:
331
+ # enc_inp = layer(enc_inp,
332
+ # src_key_padding_mask=src_key_padding_mask,
333
+ # src_mask=enc_mask_ct)
334
+
335
+ # TODO: add layers configurations support and variations setup
336
+ # interleaved config (I3)
337
+ for i, layer in enumerate(self.enc_layers):
338
+ if i % (2 + 1) < 2:
339
+ # Shared encoders or Segment-wise encoders
340
+ # print("SWE")
341
+ enc_inp, att_w = layer(enc_inp,
342
+ src_key_padding_mask=src_key_padding_mask,
343
+ src_mask=enc_mask_utt.repeat(self.config.num_attention_heads, 1, 1))
344
+ else:
345
+ # Positional Embedding for Context Encoder if few connected CSE use it before
346
+ enc_inp = enc_inp + self.post_word_emb(enc_inp.transpose(0, 1)).transpose(0, 1)
347
+ # Context encoder or Cross-segment encoders
348
+ # print("CSE")
349
+ enc_inp, att_w = layer(enc_inp,
350
+ src_key_padding_mask=src_key_padding_mask,
351
+ src_mask=enc_mask_ct.repeat(self.config.num_attention_heads, 1, 1))
352
+ if output_attentions:
353
+ all_self_attentions = all_self_attentions + (att_w,)
354
+
355
+ if self.norm_e is not None:
356
+ enc_inp = self.norm_e(enc_inp)
357
+
358
+ encoder_output = enc_inp.transpose(0, 1)
359
+ hidden_states = encoder_output
360
+
361
+ pooled_output = hidden_states[:, 0, :]
362
+ outputs = (hidden_states, pooled_output, all_self_attentions)
363
+
364
+ return outputs
365
+
366
+ def create_padding_mask(self, token_ids):
367
+ padding_mask = torch.ne(token_ids, self.config.pad_token_id).int()
368
+ return padding_mask
369
+
370
+ def generate_square_subsequent_mask(self, sz: int) -> Tensor:
371
+ r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
372
+ Unmasked positions are filled with float(0.0).
373
+ """
374
+ mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
375
+ mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
376
+ return mask
377
+
378
+ def _reset_parameters(self):
379
+ r"""Initiate parameters in the transformer model."""
380
+
381
+ for p in self.parameters():
382
+ if p.dim() > 1:
383
+ xavier_uniform_(p)
384
+
385
+ def convert_input_ids_to_token_type_ids(self, input_ids):
386
+ token_type_ids = torch.zeros_like(input_ids)
387
+
388
+ for row, row_tensor in enumerate(input_ids):
389
+ sep_indices = torch.nonzero(row_tensor == self.config.sep_token_id)
390
+ prev_index = -1
391
+ for type_id, index in enumerate(sep_indices):
392
+ token_type_ids[row, prev_index + 1:index + 1] = type_id
393
+ prev_index = index
394
+
395
+ return token_type_ids
396
+
397
+
398
+ class HierBertModel(PreTrainedModel):
399
+ config_class = HierBertConfig
400
+ base_model_prefix = "hier"
401
+
402
+ def __init__(self, config):
403
+ super().__init__(config)
404
+
405
+ self.model = HierBert(config)
406
+
407
+ def forward(
408
+ self,
409
+ input_ids: Optional[torch.Tensor] = None,
410
+ attention_mask: Optional[torch.Tensor] = None,
411
+ token_type_ids: Optional[torch.Tensor] = None,
412
+ position_ids: Optional[torch.Tensor] = None,
413
+ inputs_embeds: Optional[torch.Tensor] = None,
414
+ output_attentions: Optional[bool] = None,
415
+ output_hidden_states: Optional[bool] = None,
416
+ return_dict: Optional[bool] = None,
417
+ **kwargs
418
+ ):
419
+ outputs = self.model(input_ids=input_ids,
420
+ attention_mask=attention_mask,
421
+ token_type_ids=token_type_ids,
422
+ position_ids=position_ids,
423
+ inputs_embeds=inputs_embeds,
424
+ output_attentions=output_attentions,
425
+ output_hidden_states=output_hidden_states,
426
+ return_dict=return_dict)
427
+ if not return_dict:
428
+ return outputs
429
+
430
+ return BaseModelOutputWithPooling(
431
+ last_hidden_state=outputs[0],
432
+ pooler_output=outputs[1],
433
+ attentions=outputs[2])
434
+
435
+ def get_input_embeddings(self):
436
+ return self.model.word_embeddings
437
+
438
+ def set_input_embeddings(self, value):
439
+ self.model.word_embeddings = value
440
+
441
+
442
+ class HierBertForMaskedLM(BertForMaskedLM):
443
+ config_class = HierBertConfig
444
+
445
+ def __init__(self, config):
446
+ super().__init__(config)
447
+ self.bert = HierBertModel(config)
448
+
449
+
450
+ class HierBertForSequenceClassification(BertForSequenceClassification):
451
+ config_class = HierBertConfig
452
+
453
+ def __init__(self, config):
454
+ super().__init__(config)
455
+ self.bert = HierBertModel(config)