Omar commited on
Commit
88cccb3
·
1 Parent(s): 62a5ce9
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. config.json +4 -2
  2. finetune/boolq/all_results.json +16 -0
  3. finetune/boolq/config.json +48 -0
  4. finetune/boolq/eval_results.json +11 -0
  5. finetune/boolq/merges.txt +0 -0
  6. finetune/boolq/modeling_structroberta.py +1533 -0
  7. finetune/boolq/predict_results.txt +724 -0
  8. finetune/boolq/pytorch_model.bin +3 -0
  9. finetune/boolq/special_tokens_map.json +15 -0
  10. finetune/boolq/tokenizer_config.json +65 -0
  11. finetune/boolq/train_results.json +8 -0
  12. finetune/boolq/trainer_state.json +36 -0
  13. finetune/boolq/training_args.bin +3 -0
  14. finetune/boolq/vocab.json +0 -0
  15. finetune/cola/all_results.json +16 -0
  16. finetune/cola/config.json +48 -0
  17. finetune/cola/eval_results.json +11 -0
  18. finetune/cola/merges.txt +0 -0
  19. finetune/cola/modeling_structroberta.py +1533 -0
  20. finetune/cola/predict_results.txt +1020 -0
  21. finetune/cola/pytorch_model.bin +3 -0
  22. finetune/cola/special_tokens_map.json +15 -0
  23. finetune/cola/tokenizer_config.json +65 -0
  24. finetune/cola/train_results.json +8 -0
  25. finetune/cola/trainer_state.json +103 -0
  26. finetune/cola/training_args.bin +3 -0
  27. finetune/cola/vocab.json +0 -0
  28. finetune/control_raising_control/all_results.json +16 -0
  29. finetune/control_raising_control/config.json +48 -0
  30. finetune/control_raising_control/eval_results.json +11 -0
  31. finetune/control_raising_control/merges.txt +0 -0
  32. finetune/control_raising_control/modeling_structroberta.py +1533 -0
  33. finetune/control_raising_control/predict_results.txt +0 -0
  34. finetune/control_raising_control/pytorch_model.bin +3 -0
  35. finetune/control_raising_control/special_tokens_map.json +15 -0
  36. finetune/control_raising_control/tokenizer_config.json +65 -0
  37. finetune/control_raising_control/train_results.json +8 -0
  38. finetune/control_raising_control/trainer_state.json +92 -0
  39. finetune/control_raising_control/training_args.bin +3 -0
  40. finetune/control_raising_control/vocab.json +0 -0
  41. finetune/control_raising_lexical_content_the/all_results.json +16 -0
  42. finetune/control_raising_lexical_content_the/config.json +48 -0
  43. finetune/control_raising_lexical_content_the/eval_results.json +11 -0
  44. finetune/control_raising_lexical_content_the/merges.txt +0 -0
  45. finetune/control_raising_lexical_content_the/modeling_structroberta.py +1533 -0
  46. finetune/control_raising_lexical_content_the/predict_results.txt +0 -0
  47. finetune/control_raising_lexical_content_the/pytorch_model.bin +3 -0
  48. finetune/control_raising_lexical_content_the/special_tokens_map.json +15 -0
  49. finetune/control_raising_lexical_content_the/tokenizer_config.json +65 -0
  50. finetune/control_raising_lexical_content_the/train_results.json +8 -0
config.json CHANGED
@@ -1,11 +1,13 @@
1
  {
2
  "architectures": [
3
- "StructRoberta"
 
4
  ],
5
  "attention_probs_dropout_prob": 0.1,
6
  "auto_map": {
7
  "AutoConfig": "modeling_structroberta.StructRobertaConfig",
8
- "AutoModelForMaskedLM": "modeling_structroberta.StructRoberta"
 
9
  },
10
  "bos_token_id": 0,
11
  "classifier_dropout": null,
 
1
  {
2
  "architectures": [
3
+ "StructRoberta",
4
+ "StructRobertaForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "auto_map": {
8
  "AutoConfig": "modeling_structroberta.StructRobertaConfig",
9
+ "AutoModelForMaskedLM": "modeling_structroberta.StructRoberta",
10
+ "AutoModelForSequenceClassification": "modeling_structroberta.StructRobertaForSequenceClassification"
11
  },
12
  "bos_token_id": 0,
13
  "classifier_dropout": null,
finetune/boolq/all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.6403872966766357,
4
+ "eval_f1": 0.7136563876651982,
5
+ "eval_loss": 1.1364797353744507,
6
+ "eval_mcc": 0.2349638460835621,
7
+ "eval_runtime": 1.5731,
8
+ "eval_samples": 723,
9
+ "eval_samples_per_second": 459.595,
10
+ "eval_steps_per_second": 57.847,
11
+ "train_loss": 0.27251947576349433,
12
+ "train_runtime": 112.6988,
13
+ "train_samples": 2072,
14
+ "train_samples_per_second": 183.853,
15
+ "train_steps_per_second": 2.928
16
+ }
finetune/boolq/config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "final_models/glue_models/structroberta_s2_50ep/",
3
+ "architectures": [
4
+ "StructRobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "auto_map": {
8
+ "AutoConfig": "modeling_structroberta.StructRobertaConfig",
9
+ "AutoModelForMaskedLM": "modeling_structroberta.StructRoberta",
10
+ "AutoModelForSequenceClassification": "modeling_structroberta.StructRobertaForSequenceClassification"
11
+ },
12
+ "bos_token_id": 0,
13
+ "classifier_dropout": null,
14
+ "conv_size": 9,
15
+ "eos_token_id": 2,
16
+ "hidden_act": "gelu",
17
+ "hidden_dropout_prob": 0.1,
18
+ "hidden_size": 768,
19
+ "id2label": {
20
+ "0": 0,
21
+ "1": 1
22
+ },
23
+ "initializer_range": 0.02,
24
+ "intermediate_size": 3072,
25
+ "label2id": {
26
+ "0": 0,
27
+ "1": 1
28
+ },
29
+ "layer_norm_eps": 1e-05,
30
+ "max_position_embeddings": 514,
31
+ "model_type": "roberta",
32
+ "n_parser_layers": 6,
33
+ "num_attention_heads": 12,
34
+ "num_hidden_layers": 12,
35
+ "pad_token_id": 1,
36
+ "position_embedding_type": "absolute",
37
+ "problem_type": "single_label_classification",
38
+ "relations": [
39
+ "head",
40
+ "child"
41
+ ],
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.26.1",
44
+ "type_vocab_size": 1,
45
+ "use_cache": true,
46
+ "vocab_size": 32000,
47
+ "weight_act": "softmax"
48
+ }
finetune/boolq/eval_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.6403872966766357,
4
+ "eval_f1": 0.7136563876651982,
5
+ "eval_loss": 1.1364797353744507,
6
+ "eval_mcc": 0.2349638460835621,
7
+ "eval_runtime": 1.5731,
8
+ "eval_samples": 723,
9
+ "eval_samples_per_second": 459.595,
10
+ "eval_steps_per_second": 57.847
11
+ }
finetune/boolq/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
finetune/boolq/modeling_structroberta.py ADDED
@@ -0,0 +1,1533 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch RoBERTa model."""
17
+
18
+ import math
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from packaging import version
24
+ from torch import nn
25
+ import torch.nn.functional as F
26
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
27
+
28
+ from transformers.activations import ACT2FN, gelu
29
+ from transformers.modeling_outputs import (
30
+ BaseModelOutputWithPastAndCrossAttentions,
31
+ BaseModelOutputWithPoolingAndCrossAttentions,
32
+ MaskedLMOutput,
33
+ SequenceClassifierOutput
34
+ )
35
+ from transformers.modeling_utils import (
36
+ PreTrainedModel,
37
+ apply_chunking_to_forward,
38
+ find_pruneable_heads_and_indices,
39
+ prune_linear_layer,
40
+ )
41
+ from transformers.utils import (
42
+ add_code_sample_docstrings,
43
+ add_start_docstrings,
44
+ add_start_docstrings_to_model_forward,
45
+ logging,
46
+ )
47
+ from transformers import RobertaConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CHECKPOINT_FOR_DOC = "roberta-base"
53
+ _CONFIG_FOR_DOC = "RobertaConfig"
54
+ _TOKENIZER_FOR_DOC = "RobertaTokenizer"
55
+
56
+ ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
57
+ "roberta-base",
58
+ "roberta-large",
59
+ "roberta-large-mnli",
60
+ "distilroberta-base",
61
+ "roberta-base-openai-detector",
62
+ "roberta-large-openai-detector",
63
+ # See all RoBERTa models at https://huggingface.co/models?filter=roberta
64
+ ]
65
+
66
+
67
+ class StructRobertaConfig(RobertaConfig):
68
+ model_type = "roberta"
69
+
70
+ def __init__(
71
+ self,
72
+ n_parser_layers=4,
73
+ conv_size=9,
74
+ relations=('head', 'child'),
75
+ weight_act='softmax',
76
+ **kwargs,
77
+ ):
78
+ super().__init__(**kwargs)
79
+ self.n_parser_layers = n_parser_layers
80
+ self.conv_size = conv_size
81
+ self.relations = relations
82
+ self.weight_act = weight_act
83
+
84
+ class Conv1d(nn.Module):
85
+ """1D convolution layer."""
86
+
87
+ def __init__(self, hidden_size, kernel_size, dilation=1):
88
+ """Initialization.
89
+
90
+ Args:
91
+ hidden_size: dimension of input embeddings
92
+ kernel_size: convolution kernel size
93
+ dilation: the spacing between the kernel points
94
+ """
95
+ super(Conv1d, self).__init__()
96
+
97
+ if kernel_size % 2 == 0:
98
+ padding = (kernel_size // 2) * dilation
99
+ self.shift = True
100
+ else:
101
+ padding = ((kernel_size - 1) // 2) * dilation
102
+ self.shift = False
103
+ self.conv = nn.Conv1d(
104
+ hidden_size,
105
+ hidden_size,
106
+ kernel_size,
107
+ padding=padding,
108
+ dilation=dilation)
109
+
110
+ def forward(self, x):
111
+ """Compute convolution.
112
+
113
+ Args:
114
+ x: input embeddings
115
+ Returns:
116
+ conv_output: convolution results
117
+ """
118
+
119
+ if self.shift:
120
+ return self.conv(x.transpose(1, 2)).transpose(1, 2)[:, 1:]
121
+ else:
122
+ return self.conv(x.transpose(1, 2)).transpose(1, 2)
123
+
124
+ class RobertaEmbeddings(nn.Module):
125
+ """
126
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
127
+ """
128
+
129
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
130
+ def __init__(self, config):
131
+ super().__init__()
132
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
133
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
134
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
135
+
136
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
137
+ # any TensorFlow checkpoint file
138
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
139
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
140
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
141
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
142
+ self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
143
+ if version.parse(torch.__version__) > version.parse("1.6.0"):
144
+ self.register_buffer(
145
+ "token_type_ids",
146
+ torch.zeros(self.position_ids.size(), dtype=torch.long),
147
+ persistent=False,
148
+ )
149
+
150
+ # End copy
151
+ self.padding_idx = config.pad_token_id
152
+ self.position_embeddings = nn.Embedding(
153
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
154
+ )
155
+
156
+ def forward(
157
+ self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
158
+ ):
159
+ if position_ids is None:
160
+ if input_ids is not None:
161
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
162
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
163
+ else:
164
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
165
+
166
+ if input_ids is not None:
167
+ input_shape = input_ids.size()
168
+ else:
169
+ input_shape = inputs_embeds.size()[:-1]
170
+
171
+ seq_length = input_shape[1]
172
+
173
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
174
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
175
+ # issue #5664
176
+ if token_type_ids is None:
177
+ if hasattr(self, "token_type_ids"):
178
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
179
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
180
+ token_type_ids = buffered_token_type_ids_expanded
181
+ else:
182
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
183
+
184
+ if inputs_embeds is None:
185
+ inputs_embeds = self.word_embeddings(input_ids)
186
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
187
+
188
+ embeddings = inputs_embeds + token_type_embeddings
189
+ if self.position_embedding_type == "absolute":
190
+ position_embeddings = self.position_embeddings(position_ids)
191
+ embeddings += position_embeddings
192
+ embeddings = self.LayerNorm(embeddings)
193
+ embeddings = self.dropout(embeddings)
194
+ return embeddings
195
+
196
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
197
+ """
198
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
199
+
200
+ Args:
201
+ inputs_embeds: torch.Tensor
202
+
203
+ Returns: torch.Tensor
204
+ """
205
+ input_shape = inputs_embeds.size()[:-1]
206
+ sequence_length = input_shape[1]
207
+
208
+ position_ids = torch.arange(
209
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
210
+ )
211
+ return position_ids.unsqueeze(0).expand(input_shape)
212
+
213
+
214
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta
215
+ class RobertaSelfAttention(nn.Module):
216
+ def __init__(self, config, position_embedding_type=None):
217
+ super().__init__()
218
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
219
+ raise ValueError(
220
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
221
+ f"heads ({config.num_attention_heads})"
222
+ )
223
+
224
+ self.num_attention_heads = config.num_attention_heads
225
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
226
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
227
+
228
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
229
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
230
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
231
+
232
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
233
+ self.position_embedding_type = position_embedding_type or getattr(
234
+ config, "position_embedding_type", "absolute"
235
+ )
236
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
237
+ self.max_position_embeddings = config.max_position_embeddings
238
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
239
+
240
+ self.is_decoder = config.is_decoder
241
+
242
+ def transpose_for_scores(self, x):
243
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
244
+ x = x.view(new_x_shape)
245
+ return x.permute(0, 2, 1, 3)
246
+
247
+ def forward(
248
+ self,
249
+ hidden_states: torch.Tensor,
250
+ attention_mask: Optional[torch.FloatTensor] = None,
251
+ head_mask: Optional[torch.FloatTensor] = None,
252
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
253
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
254
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
255
+ output_attentions: Optional[bool] = False,
256
+ parser_att_mask=None,
257
+ ) -> Tuple[torch.Tensor]:
258
+ mixed_query_layer = self.query(hidden_states)
259
+
260
+ # If this is instantiated as a cross-attention module, the keys
261
+ # and values come from an encoder; the attention mask needs to be
262
+ # such that the encoder's padding tokens are not attended to.
263
+ is_cross_attention = encoder_hidden_states is not None
264
+
265
+ if is_cross_attention and past_key_value is not None:
266
+ # reuse k,v, cross_attentions
267
+ key_layer = past_key_value[0]
268
+ value_layer = past_key_value[1]
269
+ attention_mask = encoder_attention_mask
270
+ elif is_cross_attention:
271
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
272
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
273
+ attention_mask = encoder_attention_mask
274
+ elif past_key_value is not None:
275
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
276
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
277
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
278
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
279
+ else:
280
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
281
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
282
+
283
+ query_layer = self.transpose_for_scores(mixed_query_layer)
284
+
285
+ if self.is_decoder:
286
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
287
+ # Further calls to cross_attention layer can then reuse all cross-attention
288
+ # key/value_states (first "if" case)
289
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
290
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
291
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
292
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
293
+ past_key_value = (key_layer, value_layer)
294
+
295
+ # Take the dot product between "query" and "key" to get the raw attention scores.
296
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
297
+
298
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
299
+ seq_length = hidden_states.size()[1]
300
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
301
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
302
+ distance = position_ids_l - position_ids_r
303
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
304
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
305
+
306
+ if self.position_embedding_type == "relative_key":
307
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
308
+ attention_scores = attention_scores + relative_position_scores
309
+ elif self.position_embedding_type == "relative_key_query":
310
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
311
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
312
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
313
+
314
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
315
+ if attention_mask is not None:
316
+ # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
317
+ attention_scores = attention_scores + attention_mask
318
+
319
+
320
+ if parser_att_mask is None:
321
+ # Normalize the attention scores to probabilities.
322
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
323
+ else:
324
+ attention_probs = torch.sigmoid(attention_scores) * parser_att_mask
325
+
326
+ # This is actually dropping out entire tokens to attend to, which might
327
+ # seem a bit unusual, but is taken from the original Transformer paper.
328
+ attention_probs = self.dropout(attention_probs)
329
+
330
+ # Mask heads if we want to
331
+ if head_mask is not None:
332
+ attention_probs = attention_probs * head_mask
333
+
334
+ context_layer = torch.matmul(attention_probs, value_layer)
335
+
336
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
337
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
338
+ context_layer = context_layer.view(new_context_layer_shape)
339
+
340
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
341
+
342
+ if self.is_decoder:
343
+ outputs = outputs + (past_key_value,)
344
+ return outputs
345
+
346
+
347
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput
348
+ class RobertaSelfOutput(nn.Module):
349
+ def __init__(self, config):
350
+ super().__init__()
351
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
352
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
353
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
354
+
355
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
356
+ hidden_states = self.dense(hidden_states)
357
+ hidden_states = self.dropout(hidden_states)
358
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
359
+ return hidden_states
360
+
361
+
362
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta
363
+ class RobertaAttention(nn.Module):
364
+ def __init__(self, config, position_embedding_type=None):
365
+ super().__init__()
366
+ self.self = RobertaSelfAttention(config, position_embedding_type=position_embedding_type)
367
+ self.output = RobertaSelfOutput(config)
368
+ self.pruned_heads = set()
369
+
370
+ def prune_heads(self, heads):
371
+ if len(heads) == 0:
372
+ return
373
+ heads, index = find_pruneable_heads_and_indices(
374
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
375
+ )
376
+
377
+ # Prune linear layers
378
+ self.self.query = prune_linear_layer(self.self.query, index)
379
+ self.self.key = prune_linear_layer(self.self.key, index)
380
+ self.self.value = prune_linear_layer(self.self.value, index)
381
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
382
+
383
+ # Update hyper params and store pruned heads
384
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
385
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
386
+ self.pruned_heads = self.pruned_heads.union(heads)
387
+
388
+ def forward(
389
+ self,
390
+ hidden_states: torch.Tensor,
391
+ attention_mask: Optional[torch.FloatTensor] = None,
392
+ head_mask: Optional[torch.FloatTensor] = None,
393
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
394
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
395
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
396
+ output_attentions: Optional[bool] = False,
397
+ parser_att_mask=None,
398
+ ) -> Tuple[torch.Tensor]:
399
+ self_outputs = self.self(
400
+ hidden_states,
401
+ attention_mask,
402
+ head_mask,
403
+ encoder_hidden_states,
404
+ encoder_attention_mask,
405
+ past_key_value,
406
+ output_attentions,
407
+ parser_att_mask=parser_att_mask,
408
+ )
409
+ attention_output = self.output(self_outputs[0], hidden_states)
410
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
411
+ return outputs
412
+
413
+
414
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate
415
+ class RobertaIntermediate(nn.Module):
416
+ def __init__(self, config):
417
+ super().__init__()
418
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
419
+ if isinstance(config.hidden_act, str):
420
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
421
+ else:
422
+ self.intermediate_act_fn = config.hidden_act
423
+
424
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
425
+ hidden_states = self.dense(hidden_states)
426
+ hidden_states = self.intermediate_act_fn(hidden_states)
427
+ return hidden_states
428
+
429
+
430
+ # Copied from transformers.models.bert.modeling_bert.BertOutput
431
+ class RobertaOutput(nn.Module):
432
+ def __init__(self, config):
433
+ super().__init__()
434
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
435
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
436
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
437
+
438
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
439
+ hidden_states = self.dense(hidden_states)
440
+ hidden_states = self.dropout(hidden_states)
441
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
442
+ return hidden_states
443
+
444
+
445
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta
446
+ class RobertaLayer(nn.Module):
447
+ def __init__(self, config):
448
+ super().__init__()
449
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
450
+ self.seq_len_dim = 1
451
+ self.attention = RobertaAttention(config)
452
+ self.is_decoder = config.is_decoder
453
+ self.add_cross_attention = config.add_cross_attention
454
+ if self.add_cross_attention:
455
+ if not self.is_decoder:
456
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
457
+ self.crossattention = RobertaAttention(config, position_embedding_type="absolute")
458
+ self.intermediate = RobertaIntermediate(config)
459
+ self.output = RobertaOutput(config)
460
+
461
+ def forward(
462
+ self,
463
+ hidden_states: torch.Tensor,
464
+ attention_mask: Optional[torch.FloatTensor] = None,
465
+ head_mask: Optional[torch.FloatTensor] = None,
466
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
467
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
468
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
469
+ output_attentions: Optional[bool] = False,
470
+ parser_att_mask=None,
471
+ ) -> Tuple[torch.Tensor]:
472
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
473
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
474
+ self_attention_outputs = self.attention(
475
+ hidden_states,
476
+ attention_mask,
477
+ head_mask,
478
+ output_attentions=output_attentions,
479
+ past_key_value=self_attn_past_key_value,
480
+ parser_att_mask=parser_att_mask,
481
+ )
482
+ attention_output = self_attention_outputs[0]
483
+
484
+ # if decoder, the last output is tuple of self-attn cache
485
+ if self.is_decoder:
486
+ outputs = self_attention_outputs[1:-1]
487
+ present_key_value = self_attention_outputs[-1]
488
+ else:
489
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
490
+
491
+ cross_attn_present_key_value = None
492
+ if self.is_decoder and encoder_hidden_states is not None:
493
+ if not hasattr(self, "crossattention"):
494
+ raise ValueError(
495
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
496
+ )
497
+
498
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
499
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
500
+ cross_attention_outputs = self.crossattention(
501
+ attention_output,
502
+ attention_mask,
503
+ head_mask,
504
+ encoder_hidden_states,
505
+ encoder_attention_mask,
506
+ cross_attn_past_key_value,
507
+ output_attentions,
508
+ )
509
+ attention_output = cross_attention_outputs[0]
510
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
511
+
512
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
513
+ cross_attn_present_key_value = cross_attention_outputs[-1]
514
+ present_key_value = present_key_value + cross_attn_present_key_value
515
+
516
+ layer_output = apply_chunking_to_forward(
517
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
518
+ )
519
+ outputs = (layer_output,) + outputs
520
+
521
+ # if decoder, return the attn key/values as the last output
522
+ if self.is_decoder:
523
+ outputs = outputs + (present_key_value,)
524
+
525
+ return outputs
526
+
527
+ def feed_forward_chunk(self, attention_output):
528
+ intermediate_output = self.intermediate(attention_output)
529
+ layer_output = self.output(intermediate_output, attention_output)
530
+ return layer_output
531
+
532
+
533
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta
534
+ class RobertaEncoder(nn.Module):
535
+ def __init__(self, config):
536
+ super().__init__()
537
+ self.config = config
538
+ self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])
539
+ self.gradient_checkpointing = False
540
+
541
+ def forward(
542
+ self,
543
+ hidden_states: torch.Tensor,
544
+ attention_mask: Optional[torch.FloatTensor] = None,
545
+ head_mask: Optional[torch.FloatTensor] = None,
546
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
547
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
548
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
549
+ use_cache: Optional[bool] = None,
550
+ output_attentions: Optional[bool] = False,
551
+ output_hidden_states: Optional[bool] = False,
552
+ return_dict: Optional[bool] = True,
553
+ parser_att_mask=None,
554
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
555
+ all_hidden_states = () if output_hidden_states else None
556
+ all_self_attentions = () if output_attentions else None
557
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
558
+
559
+ next_decoder_cache = () if use_cache else None
560
+ for i, layer_module in enumerate(self.layer):
561
+ if output_hidden_states:
562
+ all_hidden_states = all_hidden_states + (hidden_states,)
563
+
564
+ layer_head_mask = head_mask[i] if head_mask is not None else None
565
+ past_key_value = past_key_values[i] if past_key_values is not None else None
566
+
567
+ if self.gradient_checkpointing and self.training:
568
+
569
+ if use_cache:
570
+ logger.warning(
571
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
572
+ )
573
+ use_cache = False
574
+
575
+ def create_custom_forward(module):
576
+ def custom_forward(*inputs):
577
+ return module(*inputs, past_key_value, output_attentions)
578
+
579
+ return custom_forward
580
+
581
+ layer_outputs = torch.utils.checkpoint.checkpoint(
582
+ create_custom_forward(layer_module),
583
+ hidden_states,
584
+ attention_mask,
585
+ layer_head_mask,
586
+ encoder_hidden_states,
587
+ encoder_attention_mask,
588
+ )
589
+ else:
590
+ layer_outputs = layer_module(
591
+ hidden_states,
592
+ attention_mask,
593
+ layer_head_mask,
594
+ encoder_hidden_states,
595
+ encoder_attention_mask,
596
+ past_key_value,
597
+ output_attentions,
598
+ parser_att_mask=parser_att_mask[i],
599
+ )
600
+
601
+ hidden_states = layer_outputs[0]
602
+ if use_cache:
603
+ next_decoder_cache += (layer_outputs[-1],)
604
+ if output_attentions:
605
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
606
+ if self.config.add_cross_attention:
607
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
608
+
609
+ if output_hidden_states:
610
+ all_hidden_states = all_hidden_states + (hidden_states,)
611
+
612
+ if not return_dict:
613
+ return tuple(
614
+ v
615
+ for v in [
616
+ hidden_states,
617
+ next_decoder_cache,
618
+ all_hidden_states,
619
+ all_self_attentions,
620
+ all_cross_attentions,
621
+ ]
622
+ if v is not None
623
+ )
624
+ return BaseModelOutputWithPastAndCrossAttentions(
625
+ last_hidden_state=hidden_states,
626
+ past_key_values=next_decoder_cache,
627
+ hidden_states=all_hidden_states,
628
+ attentions=all_self_attentions,
629
+ cross_attentions=all_cross_attentions,
630
+ )
631
+
632
+
633
+ # Copied from transformers.models.bert.modeling_bert.BertPooler
634
+ class RobertaPooler(nn.Module):
635
+ def __init__(self, config):
636
+ super().__init__()
637
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
638
+ self.activation = nn.Tanh()
639
+
640
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
641
+ # We "pool" the model by simply taking the hidden state corresponding
642
+ # to the first token.
643
+ first_token_tensor = hidden_states[:, 0]
644
+ pooled_output = self.dense(first_token_tensor)
645
+ pooled_output = self.activation(pooled_output)
646
+ return pooled_output
647
+
648
+
649
+ class RobertaPreTrainedModel(PreTrainedModel):
650
+ """
651
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
652
+ models.
653
+ """
654
+
655
+ config_class = RobertaConfig
656
+ base_model_prefix = "roberta"
657
+ supports_gradient_checkpointing = True
658
+
659
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
660
+ def _init_weights(self, module):
661
+ """Initialize the weights"""
662
+ if isinstance(module, nn.Linear):
663
+ # Slightly different from the TF version which uses truncated_normal for initialization
664
+ # cf https://github.com/pytorch/pytorch/pull/5617
665
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
666
+ if module.bias is not None:
667
+ module.bias.data.zero_()
668
+ elif isinstance(module, nn.Embedding):
669
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
670
+ if module.padding_idx is not None:
671
+ module.weight.data[module.padding_idx].zero_()
672
+ elif isinstance(module, nn.LayerNorm):
673
+ if module.bias is not None:
674
+ module.bias.data.zero_()
675
+ module.weight.data.fill_(1.0)
676
+
677
+ def _set_gradient_checkpointing(self, module, value=False):
678
+ if isinstance(module, RobertaEncoder):
679
+ module.gradient_checkpointing = value
680
+
681
+ def update_keys_to_ignore(self, config, del_keys_to_ignore):
682
+ """Remove some keys from ignore list"""
683
+ if not config.tie_word_embeddings:
684
+ # must make a new list, or the class variable gets modified!
685
+ self._keys_to_ignore_on_save = [k for k in self._keys_to_ignore_on_save if k not in del_keys_to_ignore]
686
+ self._keys_to_ignore_on_load_missing = [
687
+ k for k in self._keys_to_ignore_on_load_missing if k not in del_keys_to_ignore
688
+ ]
689
+
690
+
691
+ ROBERTA_START_DOCSTRING = r"""
692
+
693
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
694
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
695
+ etc.)
696
+
697
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
698
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
699
+ and behavior.
700
+
701
+ Parameters:
702
+ config ([`RobertaConfig`]): Model configuration class with all the parameters of the
703
+ model. Initializing with a config file does not load the weights associated with the model, only the
704
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
705
+ """
706
+
707
+
708
+ ROBERTA_INPUTS_DOCSTRING = r"""
709
+ Args:
710
+ input_ids (`torch.LongTensor` of shape `({0})`):
711
+ Indices of input sequence tokens in the vocabulary.
712
+
713
+ Indices can be obtained using [`RobertaTokenizer`]. See [`PreTrainedTokenizer.encode`] and
714
+ [`PreTrainedTokenizer.__call__`] for details.
715
+
716
+ [What are input IDs?](../glossary#input-ids)
717
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
718
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
719
+
720
+ - 1 for tokens that are **not masked**,
721
+ - 0 for tokens that are **masked**.
722
+
723
+ [What are attention masks?](../glossary#attention-mask)
724
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
725
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
726
+ 1]`:
727
+
728
+ - 0 corresponds to a *sentence A* token,
729
+ - 1 corresponds to a *sentence B* token.
730
+
731
+ [What are token type IDs?](../glossary#token-type-ids)
732
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
733
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
734
+ config.max_position_embeddings - 1]`.
735
+
736
+ [What are position IDs?](../glossary#position-ids)
737
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
738
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
739
+
740
+ - 1 indicates the head is **not masked**,
741
+ - 0 indicates the head is **masked**.
742
+
743
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
744
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
745
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
746
+ model's internal embedding lookup matrix.
747
+ output_attentions (`bool`, *optional*):
748
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
749
+ tensors for more detail.
750
+ output_hidden_states (`bool`, *optional*):
751
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
752
+ more detail.
753
+ return_dict (`bool`, *optional*):
754
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
755
+ """
756
+
757
+
758
+ class RobertaModel(RobertaPreTrainedModel):
759
+ """
760
+
761
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
762
+ cross-attention is added between the self-attention layers, following the architecture described in *Attention is
763
+ all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
764
+ Kaiser and Illia Polosukhin.
765
+
766
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
767
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
768
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
769
+
770
+ .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
771
+
772
+ """
773
+
774
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
775
+
776
+ # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta
777
+ def __init__(self, config, add_pooling_layer=True):
778
+ super().__init__(config)
779
+ self.config = config
780
+
781
+ self.embeddings = RobertaEmbeddings(config)
782
+ self.encoder = RobertaEncoder(config)
783
+
784
+ self.pooler = RobertaPooler(config) if add_pooling_layer else None
785
+
786
+ # Initialize weights and apply final processing
787
+ self.post_init()
788
+
789
+ def get_input_embeddings(self):
790
+ return self.embeddings.word_embeddings
791
+
792
+ def set_input_embeddings(self, value):
793
+ self.embeddings.word_embeddings = value
794
+
795
+ def _prune_heads(self, heads_to_prune):
796
+ """
797
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
798
+ class PreTrainedModel
799
+ """
800
+ for layer, heads in heads_to_prune.items():
801
+ self.encoder.layer[layer].attention.prune_heads(heads)
802
+
803
+ # Copied from transformers.models.bert.modeling_bert.BertModel.forward
804
+ def forward(
805
+ self,
806
+ input_ids: Optional[torch.Tensor] = None,
807
+ attention_mask: Optional[torch.Tensor] = None,
808
+ token_type_ids: Optional[torch.Tensor] = None,
809
+ position_ids: Optional[torch.Tensor] = None,
810
+ head_mask: Optional[torch.Tensor] = None,
811
+ inputs_embeds: Optional[torch.Tensor] = None,
812
+ encoder_hidden_states: Optional[torch.Tensor] = None,
813
+ encoder_attention_mask: Optional[torch.Tensor] = None,
814
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
815
+ use_cache: Optional[bool] = None,
816
+ output_attentions: Optional[bool] = None,
817
+ output_hidden_states: Optional[bool] = None,
818
+ return_dict: Optional[bool] = None,
819
+ parser_att_mask=None,
820
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
821
+ r"""
822
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
823
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
824
+ the model is configured as a decoder.
825
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
826
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
827
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
828
+
829
+ - 1 for tokens that are **not masked**,
830
+ - 0 for tokens that are **masked**.
831
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
832
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
833
+
834
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
835
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
836
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
837
+ use_cache (`bool`, *optional*):
838
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
839
+ `past_key_values`).
840
+ """
841
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
842
+ output_hidden_states = (
843
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
844
+ )
845
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
846
+
847
+ if self.config.is_decoder:
848
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
849
+ else:
850
+ use_cache = False
851
+
852
+ if input_ids is not None and inputs_embeds is not None:
853
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
854
+ elif input_ids is not None:
855
+ input_shape = input_ids.size()
856
+ elif inputs_embeds is not None:
857
+ input_shape = inputs_embeds.size()[:-1]
858
+ else:
859
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
860
+
861
+ batch_size, seq_length = input_shape
862
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
863
+
864
+ # past_key_values_length
865
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
866
+
867
+ if attention_mask is None:
868
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
869
+
870
+ if token_type_ids is None:
871
+ if hasattr(self.embeddings, "token_type_ids"):
872
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
873
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
874
+ token_type_ids = buffered_token_type_ids_expanded
875
+ else:
876
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
877
+
878
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
879
+ # ourselves in which case we just need to make it broadcastable to all heads.
880
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
881
+
882
+ # If a 2D or 3D attention mask is provided for the cross-attention
883
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
884
+ if self.config.is_decoder and encoder_hidden_states is not None:
885
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
886
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
887
+ if encoder_attention_mask is None:
888
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
889
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
890
+ else:
891
+ encoder_extended_attention_mask = None
892
+
893
+ # Prepare head mask if needed
894
+ # 1.0 in head_mask indicate we keep the head
895
+ # attention_probs has shape bsz x n_heads x N x N
896
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
897
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
898
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
899
+
900
+ embedding_output = self.embeddings(
901
+ input_ids=input_ids,
902
+ position_ids=position_ids,
903
+ token_type_ids=token_type_ids,
904
+ inputs_embeds=inputs_embeds,
905
+ past_key_values_length=past_key_values_length,
906
+ )
907
+ encoder_outputs = self.encoder(
908
+ embedding_output,
909
+ attention_mask=extended_attention_mask,
910
+ head_mask=head_mask,
911
+ encoder_hidden_states=encoder_hidden_states,
912
+ encoder_attention_mask=encoder_extended_attention_mask,
913
+ past_key_values=past_key_values,
914
+ use_cache=use_cache,
915
+ output_attentions=output_attentions,
916
+ output_hidden_states=output_hidden_states,
917
+ return_dict=return_dict,
918
+ parser_att_mask=parser_att_mask
919
+ )
920
+ sequence_output = encoder_outputs[0]
921
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
922
+
923
+ if not return_dict:
924
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
925
+
926
+ return BaseModelOutputWithPoolingAndCrossAttentions(
927
+ last_hidden_state=sequence_output,
928
+ pooler_output=pooled_output,
929
+ past_key_values=encoder_outputs.past_key_values,
930
+ hidden_states=encoder_outputs.hidden_states,
931
+ attentions=encoder_outputs.attentions,
932
+ cross_attentions=encoder_outputs.cross_attentions,
933
+ )
934
+
935
+
936
+ class StructRoberta(RobertaPreTrainedModel):
937
+ _keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
938
+ _keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
939
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
940
+
941
+ def __init__(self, config):
942
+ super().__init__(config)
943
+
944
+ if config.is_decoder:
945
+ logger.warning(
946
+ "If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for "
947
+ "bi-directional self-attention."
948
+ )
949
+
950
+ self.parser_layers = nn.ModuleList([
951
+ nn.Sequential(Conv1d(config.hidden_size, config.conv_size),
952
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False),
953
+ nn.Tanh()) for i in range(config.n_parser_layers)])
954
+
955
+ self.distance_ff = nn.Sequential(
956
+ Conv1d(config.hidden_size, 2),
957
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False), nn.Tanh(),
958
+ nn.Linear(config.hidden_size, 1))
959
+
960
+ self.height_ff = nn.Sequential(
961
+ nn.Linear(config.hidden_size, config.hidden_size),
962
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False), nn.Tanh(),
963
+ nn.Linear(config.hidden_size, 1))
964
+
965
+ n_rel = len(config.relations)
966
+ self._rel_weight = nn.Parameter(torch.zeros((config.num_hidden_layers, config.num_attention_heads, n_rel)))
967
+ self._rel_weight.data.normal_(0, 0.1)
968
+
969
+ self._scaler = nn.Parameter(torch.zeros(2))
970
+
971
+ self.roberta = RobertaModel(config, add_pooling_layer=False)
972
+ self.lm_head = RobertaLMHead(config)
973
+
974
+ self.pad = config.pad_token_id
975
+
976
+ # The LM head weights require special treatment only when they are tied with the word embeddings
977
+ self.update_keys_to_ignore(config, ["lm_head.decoder.weight"])
978
+
979
+ # Initialize weights and apply final processing
980
+ self.post_init()
981
+
982
+ def get_output_embeddings(self):
983
+ return self.lm_head.decoder
984
+
985
+ def set_output_embeddings(self, new_embeddings):
986
+ self.lm_head.decoder = new_embeddings
987
+
988
+ @property
989
+ def scaler(self):
990
+ return self._scaler.exp()
991
+
992
+ @property
993
+ def rel_weight(self):
994
+ if self.config.weight_act == 'sigmoid':
995
+ return torch.sigmoid(self._rel_weight)
996
+ elif self.config.weight_act == 'softmax':
997
+ return torch.softmax(self._rel_weight, dim=-1)
998
+
999
+ def compute_block(self, distance, height):
1000
+ """Compute constituents from distance and height."""
1001
+
1002
+ beta_logits = (distance[:, None, :] - height[:, :, None]) * self.scaler[0]
1003
+
1004
+ gamma = torch.sigmoid(-beta_logits)
1005
+ ones = torch.ones_like(gamma)
1006
+
1007
+ block_mask_left = cummin(
1008
+ gamma.tril(-1) + ones.triu(0), reverse=True, max_value=1)
1009
+ block_mask_left = block_mask_left - F.pad(
1010
+ block_mask_left[:, :, :-1], (1, 0), value=0)
1011
+ block_mask_left.tril_(0)
1012
+
1013
+ block_mask_right = cummin(
1014
+ gamma.triu(0) + ones.tril(-1), exclusive=True, max_value=1)
1015
+ block_mask_right = block_mask_right - F.pad(
1016
+ block_mask_right[:, :, 1:], (0, 1), value=0)
1017
+ block_mask_right.triu_(0)
1018
+
1019
+ block_p = block_mask_left[:, :, :, None] * block_mask_right[:, :, None, :]
1020
+ block = cumsum(block_mask_left).tril(0) + cumsum(
1021
+ block_mask_right, reverse=True).triu(1)
1022
+
1023
+ return block_p, block
1024
+
1025
+ def compute_head(self, height):
1026
+ """Estimate head for each constituent."""
1027
+
1028
+ _, length = height.size()
1029
+ head_logits = height * self.scaler[1]
1030
+ index = torch.arange(length, device=height.device)
1031
+
1032
+ mask = (index[:, None, None] <= index[None, None, :]) * (
1033
+ index[None, None, :] <= index[None, :, None])
1034
+ head_logits = head_logits[:, None, None, :].repeat(1, length, length, 1)
1035
+ head_logits.masked_fill_(~mask[None, :, :, :], -1e9)
1036
+
1037
+ head_p = torch.softmax(head_logits, dim=-1)
1038
+
1039
+ return head_p
1040
+
1041
+ def parse(self, x):
1042
+ """Parse input sentence.
1043
+
1044
+ Args:
1045
+ x: input tokens (required).
1046
+ pos: position for each token (optional).
1047
+ Returns:
1048
+ distance: syntactic distance
1049
+ height: syntactic height
1050
+ """
1051
+
1052
+ mask = (x != self.pad)
1053
+ mask_shifted = F.pad(mask[:, 1:], (0, 1), value=0)
1054
+
1055
+ h = self.roberta.embeddings(x)
1056
+ for i in range(self.config.n_parser_layers):
1057
+ h = h.masked_fill(~mask[:, :, None], 0)
1058
+ h = self.parser_layers[i](h)
1059
+
1060
+ height = self.height_ff(h).squeeze(-1)
1061
+ height.masked_fill_(~mask, -1e9)
1062
+
1063
+ distance = self.distance_ff(h).squeeze(-1)
1064
+ distance.masked_fill_(~mask_shifted, 1e9)
1065
+
1066
+ # Calbrating the distance and height to the same level
1067
+ length = distance.size(1)
1068
+ height_max = height[:, None, :].expand(-1, length, -1)
1069
+ height_max = torch.cummax(
1070
+ height_max.triu(0) - torch.ones_like(height_max).tril(-1) * 1e9,
1071
+ dim=-1)[0].triu(0)
1072
+
1073
+ margin_left = torch.relu(
1074
+ F.pad(distance[:, :-1, None], (0, 0, 1, 0), value=1e9) - height_max)
1075
+ margin_right = torch.relu(distance[:, None, :] - height_max)
1076
+ margin = torch.where(margin_left > margin_right, margin_right,
1077
+ margin_left).triu(0)
1078
+
1079
+ margin_mask = torch.stack([mask_shifted] + [mask] * (length - 1), dim=1)
1080
+ margin.masked_fill_(~margin_mask, 0)
1081
+ margin = margin.max()
1082
+
1083
+ distance = distance - margin
1084
+
1085
+ return distance, height
1086
+
1087
+ def generate_mask(self, x, distance, height):
1088
+ """Compute head and cibling distribution for each token."""
1089
+
1090
+ bsz, length = x.size()
1091
+
1092
+ eye = torch.eye(length, device=x.device, dtype=torch.bool)
1093
+ eye = eye[None, :, :].expand((bsz, -1, -1))
1094
+
1095
+ block_p, block = self.compute_block(distance, height)
1096
+ head_p = self.compute_head(height)
1097
+ head = torch.einsum('blij,bijh->blh', block_p, head_p)
1098
+ head = head.masked_fill(eye, 0)
1099
+ child = head.transpose(1, 2)
1100
+ cibling = torch.bmm(head, child).masked_fill(eye, 0)
1101
+
1102
+ rel_list = []
1103
+ if 'head' in self.config.relations:
1104
+ rel_list.append(head)
1105
+ if 'child' in self.config.relations:
1106
+ rel_list.append(child)
1107
+ if 'cibling' in self.config.relations:
1108
+ rel_list.append(cibling)
1109
+
1110
+ rel = torch.stack(rel_list, dim=1)
1111
+
1112
+ rel_weight = self.rel_weight
1113
+
1114
+ dep = torch.einsum('lhr,brij->lbhij', rel_weight, rel)
1115
+ att_mask = dep.reshape(self.config.num_hidden_layers, bsz, self.config.num_attention_heads, length, length)
1116
+
1117
+ return att_mask, cibling, head, block
1118
+
1119
+ def forward(
1120
+ self,
1121
+ input_ids: Optional[torch.LongTensor] = None,
1122
+ attention_mask: Optional[torch.FloatTensor] = None,
1123
+ token_type_ids: Optional[torch.LongTensor] = None,
1124
+ position_ids: Optional[torch.LongTensor] = None,
1125
+ head_mask: Optional[torch.FloatTensor] = None,
1126
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1127
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1128
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1129
+ labels: Optional[torch.LongTensor] = None,
1130
+ output_attentions: Optional[bool] = None,
1131
+ output_hidden_states: Optional[bool] = None,
1132
+ return_dict: Optional[bool] = None,
1133
+ ) -> Union[Tuple, MaskedLMOutput]:
1134
+ r"""
1135
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1136
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1137
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1138
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1139
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1140
+ Used to hide legacy arguments that have been deprecated.
1141
+ """
1142
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1143
+
1144
+ distance, height = self.parse(input_ids)
1145
+ att_mask, cibling, head, block = self.generate_mask(input_ids, distance, height)
1146
+
1147
+ outputs = self.roberta(
1148
+ input_ids,
1149
+ attention_mask=attention_mask,
1150
+ token_type_ids=token_type_ids,
1151
+ position_ids=position_ids,
1152
+ head_mask=head_mask,
1153
+ inputs_embeds=inputs_embeds,
1154
+ encoder_hidden_states=encoder_hidden_states,
1155
+ encoder_attention_mask=encoder_attention_mask,
1156
+ output_attentions=output_attentions,
1157
+ output_hidden_states=output_hidden_states,
1158
+ return_dict=return_dict,
1159
+ parser_att_mask=att_mask,
1160
+ )
1161
+ sequence_output = outputs[0]
1162
+ prediction_scores = self.lm_head(sequence_output)
1163
+
1164
+ masked_lm_loss = None
1165
+ if labels is not None:
1166
+ loss_fct = CrossEntropyLoss()
1167
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1168
+
1169
+ if not return_dict:
1170
+ output = (prediction_scores,) + outputs[2:]
1171
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1172
+
1173
+ return MaskedLMOutput(
1174
+ loss=masked_lm_loss,
1175
+ logits=prediction_scores,
1176
+ hidden_states=outputs.hidden_states,
1177
+ attentions=outputs.attentions,
1178
+ )
1179
+
1180
+ class RobertaLMHead(nn.Module):
1181
+ """Roberta Head for masked language modeling."""
1182
+
1183
+ def __init__(self, config):
1184
+ super().__init__()
1185
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1186
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
1187
+
1188
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
1189
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
1190
+ self.decoder.bias = self.bias
1191
+
1192
+ def forward(self, features, **kwargs):
1193
+ x = self.dense(features)
1194
+ x = gelu(x)
1195
+ x = self.layer_norm(x)
1196
+
1197
+ # project back to size of vocabulary with bias
1198
+ x = self.decoder(x)
1199
+
1200
+ return x
1201
+
1202
+ def _tie_weights(self):
1203
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
1204
+ self.bias = self.decoder.bias
1205
+
1206
+ class StructRobertaForSequenceClassification(RobertaPreTrainedModel):
1207
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
1208
+
1209
+ def __init__(self, config):
1210
+ super().__init__(config)
1211
+ self.num_labels = config.num_labels
1212
+ self.config = config
1213
+
1214
+ self.parser_layers = nn.ModuleList([
1215
+ nn.Sequential(Conv1d(config.hidden_size, config.conv_size),
1216
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False),
1217
+ nn.Tanh()) for i in range(config.n_parser_layers)])
1218
+
1219
+ self.distance_ff = nn.Sequential(
1220
+ Conv1d(config.hidden_size, 2),
1221
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False), nn.Tanh(),
1222
+ nn.Linear(config.hidden_size, 1))
1223
+
1224
+ self.height_ff = nn.Sequential(
1225
+ nn.Linear(config.hidden_size, config.hidden_size),
1226
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False), nn.Tanh(),
1227
+ nn.Linear(config.hidden_size, 1))
1228
+
1229
+ n_rel = len(config.relations)
1230
+ self._rel_weight = nn.Parameter(torch.zeros((config.num_hidden_layers, config.num_attention_heads, n_rel)))
1231
+ self._rel_weight.data.normal_(0, 0.1)
1232
+
1233
+ self._scaler = nn.Parameter(torch.zeros(2))
1234
+
1235
+ self.pad = config.pad_token_id
1236
+
1237
+ self.roberta = RobertaModel(config, add_pooling_layer=False)
1238
+ self.classifier = RobertaClassificationHead(config)
1239
+
1240
+ # Initialize weights and apply final processing
1241
+ self.post_init()
1242
+
1243
+
1244
+ @property
1245
+ def scaler(self):
1246
+ return self._scaler.exp()
1247
+
1248
+ @property
1249
+ def rel_weight(self):
1250
+ if self.config.weight_act == 'sigmoid':
1251
+ return torch.sigmoid(self._rel_weight)
1252
+ elif self.config.weight_act == 'softmax':
1253
+ return torch.softmax(self._rel_weight, dim=-1)
1254
+
1255
+ def compute_block(self, distance, height):
1256
+ """Compute constituents from distance and height."""
1257
+
1258
+ beta_logits = (distance[:, None, :] - height[:, :, None]) * self.scaler[0]
1259
+
1260
+ gamma = torch.sigmoid(-beta_logits)
1261
+ ones = torch.ones_like(gamma)
1262
+
1263
+ block_mask_left = cummin(
1264
+ gamma.tril(-1) + ones.triu(0), reverse=True, max_value=1)
1265
+ block_mask_left = block_mask_left - F.pad(
1266
+ block_mask_left[:, :, :-1], (1, 0), value=0)
1267
+ block_mask_left.tril_(0)
1268
+
1269
+ block_mask_right = cummin(
1270
+ gamma.triu(0) + ones.tril(-1), exclusive=True, max_value=1)
1271
+ block_mask_right = block_mask_right - F.pad(
1272
+ block_mask_right[:, :, 1:], (0, 1), value=0)
1273
+ block_mask_right.triu_(0)
1274
+
1275
+ block_p = block_mask_left[:, :, :, None] * block_mask_right[:, :, None, :]
1276
+ block = cumsum(block_mask_left).tril(0) + cumsum(
1277
+ block_mask_right, reverse=True).triu(1)
1278
+
1279
+ return block_p, block
1280
+
1281
+ def compute_head(self, height):
1282
+ """Estimate head for each constituent."""
1283
+
1284
+ _, length = height.size()
1285
+ head_logits = height * self.scaler[1]
1286
+ index = torch.arange(length, device=height.device)
1287
+
1288
+ mask = (index[:, None, None] <= index[None, None, :]) * (
1289
+ index[None, None, :] <= index[None, :, None])
1290
+ head_logits = head_logits[:, None, None, :].repeat(1, length, length, 1)
1291
+ head_logits.masked_fill_(~mask[None, :, :, :], -1e9)
1292
+
1293
+ head_p = torch.softmax(head_logits, dim=-1)
1294
+
1295
+ return head_p
1296
+
1297
+ def parse(self, x):
1298
+ """Parse input sentence.
1299
+
1300
+ Args:
1301
+ x: input tokens (required).
1302
+ pos: position for each token (optional).
1303
+ Returns:
1304
+ distance: syntactic distance
1305
+ height: syntactic height
1306
+ """
1307
+
1308
+ mask = (x != self.pad)
1309
+ mask_shifted = F.pad(mask[:, 1:], (0, 1), value=0)
1310
+
1311
+ h = self.roberta.embeddings(x)
1312
+ for i in range(self.config.n_parser_layers):
1313
+ h = h.masked_fill(~mask[:, :, None], 0)
1314
+ h = self.parser_layers[i](h)
1315
+
1316
+ height = self.height_ff(h).squeeze(-1)
1317
+ height.masked_fill_(~mask, -1e9)
1318
+
1319
+ distance = self.distance_ff(h).squeeze(-1)
1320
+ distance.masked_fill_(~mask_shifted, 1e9)
1321
+
1322
+ # Calbrating the distance and height to the same level
1323
+ length = distance.size(1)
1324
+ height_max = height[:, None, :].expand(-1, length, -1)
1325
+ height_max = torch.cummax(
1326
+ height_max.triu(0) - torch.ones_like(height_max).tril(-1) * 1e9,
1327
+ dim=-1)[0].triu(0)
1328
+
1329
+ margin_left = torch.relu(
1330
+ F.pad(distance[:, :-1, None], (0, 0, 1, 0), value=1e9) - height_max)
1331
+ margin_right = torch.relu(distance[:, None, :] - height_max)
1332
+ margin = torch.where(margin_left > margin_right, margin_right,
1333
+ margin_left).triu(0)
1334
+
1335
+ margin_mask = torch.stack([mask_shifted] + [mask] * (length - 1), dim=1)
1336
+ margin.masked_fill_(~margin_mask, 0)
1337
+ margin = margin.max()
1338
+
1339
+ distance = distance - margin
1340
+
1341
+ return distance, height
1342
+
1343
+ def generate_mask(self, x, distance, height):
1344
+ """Compute head and cibling distribution for each token."""
1345
+
1346
+ bsz, length = x.size()
1347
+
1348
+ eye = torch.eye(length, device=x.device, dtype=torch.bool)
1349
+ eye = eye[None, :, :].expand((bsz, -1, -1))
1350
+
1351
+ block_p, block = self.compute_block(distance, height)
1352
+ head_p = self.compute_head(height)
1353
+ head = torch.einsum('blij,bijh->blh', block_p, head_p)
1354
+ head = head.masked_fill(eye, 0)
1355
+ child = head.transpose(1, 2)
1356
+ cibling = torch.bmm(head, child).masked_fill(eye, 0)
1357
+
1358
+ rel_list = []
1359
+ if 'head' in self.config.relations:
1360
+ rel_list.append(head)
1361
+ if 'child' in self.config.relations:
1362
+ rel_list.append(child)
1363
+ if 'cibling' in self.config.relations:
1364
+ rel_list.append(cibling)
1365
+
1366
+ rel = torch.stack(rel_list, dim=1)
1367
+
1368
+ rel_weight = self.rel_weight
1369
+
1370
+ dep = torch.einsum('lhr,brij->lbhij', rel_weight, rel)
1371
+ att_mask = dep.reshape(self.config.num_hidden_layers, bsz, self.config.num_attention_heads, length, length)
1372
+
1373
+ return att_mask, cibling, head, block
1374
+
1375
+ def forward(
1376
+ self,
1377
+ input_ids: Optional[torch.LongTensor] = None,
1378
+ attention_mask: Optional[torch.FloatTensor] = None,
1379
+ token_type_ids: Optional[torch.LongTensor] = None,
1380
+ position_ids: Optional[torch.LongTensor] = None,
1381
+ head_mask: Optional[torch.FloatTensor] = None,
1382
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1383
+ labels: Optional[torch.LongTensor] = None,
1384
+ output_attentions: Optional[bool] = None,
1385
+ output_hidden_states: Optional[bool] = None,
1386
+ return_dict: Optional[bool] = None,
1387
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1388
+ r"""
1389
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1390
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1391
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1392
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1393
+ """
1394
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1395
+
1396
+ distance, height = self.parse(input_ids)
1397
+ att_mask, cibling, head, block = self.generate_mask(input_ids, distance, height)
1398
+
1399
+ outputs = self.roberta(
1400
+ input_ids,
1401
+ attention_mask=attention_mask,
1402
+ token_type_ids=token_type_ids,
1403
+ position_ids=position_ids,
1404
+ head_mask=head_mask,
1405
+ inputs_embeds=inputs_embeds,
1406
+ output_attentions=output_attentions,
1407
+ output_hidden_states=output_hidden_states,
1408
+ return_dict=return_dict,
1409
+ parser_att_mask=att_mask,
1410
+ )
1411
+
1412
+ sequence_output = outputs[0]
1413
+ logits = self.classifier(sequence_output)
1414
+
1415
+ loss = None
1416
+ if labels is not None:
1417
+ if self.config.problem_type is None:
1418
+ if self.num_labels == 1:
1419
+ self.config.problem_type = "regression"
1420
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1421
+ self.config.problem_type = "single_label_classification"
1422
+ else:
1423
+ self.config.problem_type = "multi_label_classification"
1424
+
1425
+ if self.config.problem_type == "regression":
1426
+ loss_fct = MSELoss()
1427
+ if self.num_labels == 1:
1428
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1429
+ else:
1430
+ loss = loss_fct(logits, labels)
1431
+ elif self.config.problem_type == "single_label_classification":
1432
+ loss_fct = CrossEntropyLoss()
1433
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1434
+ elif self.config.problem_type == "multi_label_classification":
1435
+ loss_fct = BCEWithLogitsLoss()
1436
+ loss = loss_fct(logits, labels)
1437
+
1438
+ if not return_dict:
1439
+ output = (logits,) + outputs[2:]
1440
+ return ((loss,) + output) if loss is not None else output
1441
+
1442
+ return SequenceClassifierOutput(
1443
+ loss=loss,
1444
+ logits=logits,
1445
+ hidden_states=outputs.hidden_states,
1446
+ attentions=outputs.attentions,
1447
+ )
1448
+
1449
+
1450
+ class RobertaClassificationHead(nn.Module):
1451
+ """Head for sentence-level classification tasks."""
1452
+
1453
+ def __init__(self, config):
1454
+ super().__init__()
1455
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1456
+ classifier_dropout = (
1457
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1458
+ )
1459
+ self.dropout = nn.Dropout(classifier_dropout)
1460
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
1461
+
1462
+ def forward(self, features, **kwargs):
1463
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
1464
+ x = self.dropout(x)
1465
+ x = self.dense(x)
1466
+ x = torch.tanh(x)
1467
+ x = self.dropout(x)
1468
+ x = self.out_proj(x)
1469
+ return x
1470
+
1471
+
1472
+ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
1473
+ """
1474
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
1475
+ are ignored. This is modified from fairseq's `utils.make_positions`.
1476
+
1477
+ Args:
1478
+ x: torch.Tensor x:
1479
+
1480
+ Returns: torch.Tensor
1481
+ """
1482
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
1483
+ mask = input_ids.ne(padding_idx).int()
1484
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
1485
+ return incremental_indices.long() + padding_idx
1486
+
1487
+
1488
+ def cumprod(x, reverse=False, exclusive=False):
1489
+ """cumulative product."""
1490
+ if reverse:
1491
+ x = x.flip([-1])
1492
+
1493
+ if exclusive:
1494
+ x = F.pad(x[:, :, :-1], (1, 0), value=1)
1495
+
1496
+ cx = x.cumprod(-1)
1497
+
1498
+ if reverse:
1499
+ cx = cx.flip([-1])
1500
+ return cx
1501
+
1502
+
1503
+ def cumsum(x, reverse=False, exclusive=False):
1504
+ """cumulative sum."""
1505
+ bsz, _, length = x.size()
1506
+ device = x.device
1507
+ if reverse:
1508
+ if exclusive:
1509
+ w = torch.ones([bsz, length, length], device=device).tril(-1)
1510
+ else:
1511
+ w = torch.ones([bsz, length, length], device=device).tril(0)
1512
+ cx = torch.bmm(x, w)
1513
+ else:
1514
+ if exclusive:
1515
+ w = torch.ones([bsz, length, length], device=device).triu(1)
1516
+ else:
1517
+ w = torch.ones([bsz, length, length], device=device).triu(0)
1518
+ cx = torch.bmm(x, w)
1519
+ return cx
1520
+
1521
+
1522
+ def cummin(x, reverse=False, exclusive=False, max_value=1e9):
1523
+ """cumulative min."""
1524
+ if reverse:
1525
+ if exclusive:
1526
+ x = F.pad(x[:, :, 1:], (0, 1), value=max_value)
1527
+ x = x.flip([-1]).cummin(-1)[0].flip([-1])
1528
+ else:
1529
+ if exclusive:
1530
+ x = F.pad(x[:, :, :-1], (1, 0), value=max_value)
1531
+ x = x.cummin(-1)[0]
1532
+ return x
1533
+
finetune/boolq/predict_results.txt ADDED
@@ -0,0 +1,724 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ index prediction
2
+ 0 1
3
+ 1 0
4
+ 2 1
5
+ 3 1
6
+ 4 1
7
+ 5 0
8
+ 6 1
9
+ 7 0
10
+ 8 1
11
+ 9 0
12
+ 10 0
13
+ 11 0
14
+ 12 1
15
+ 13 0
16
+ 14 1
17
+ 15 0
18
+ 16 1
19
+ 17 0
20
+ 18 1
21
+ 19 1
22
+ 20 0
23
+ 21 1
24
+ 22 1
25
+ 23 0
26
+ 24 0
27
+ 25 1
28
+ 26 0
29
+ 27 1
30
+ 28 0
31
+ 29 0
32
+ 30 1
33
+ 31 1
34
+ 32 1
35
+ 33 1
36
+ 34 0
37
+ 35 1
38
+ 36 1
39
+ 37 1
40
+ 38 1
41
+ 39 1
42
+ 40 0
43
+ 41 0
44
+ 42 1
45
+ 43 1
46
+ 44 0
47
+ 45 1
48
+ 46 1
49
+ 47 1
50
+ 48 1
51
+ 49 1
52
+ 50 0
53
+ 51 1
54
+ 52 1
55
+ 53 1
56
+ 54 0
57
+ 55 1
58
+ 56 1
59
+ 57 0
60
+ 58 0
61
+ 59 0
62
+ 60 1
63
+ 61 1
64
+ 62 1
65
+ 63 1
66
+ 64 1
67
+ 65 1
68
+ 66 1
69
+ 67 1
70
+ 68 1
71
+ 69 1
72
+ 70 1
73
+ 71 1
74
+ 72 1
75
+ 73 1
76
+ 74 0
77
+ 75 1
78
+ 76 0
79
+ 77 0
80
+ 78 1
81
+ 79 1
82
+ 80 1
83
+ 81 0
84
+ 82 0
85
+ 83 1
86
+ 84 1
87
+ 85 1
88
+ 86 1
89
+ 87 1
90
+ 88 1
91
+ 89 0
92
+ 90 1
93
+ 91 1
94
+ 92 1
95
+ 93 1
96
+ 94 0
97
+ 95 1
98
+ 96 1
99
+ 97 1
100
+ 98 1
101
+ 99 1
102
+ 100 1
103
+ 101 0
104
+ 102 0
105
+ 103 0
106
+ 104 1
107
+ 105 1
108
+ 106 0
109
+ 107 0
110
+ 108 1
111
+ 109 0
112
+ 110 1
113
+ 111 1
114
+ 112 1
115
+ 113 1
116
+ 114 1
117
+ 115 1
118
+ 116 0
119
+ 117 1
120
+ 118 0
121
+ 119 1
122
+ 120 1
123
+ 121 0
124
+ 122 1
125
+ 123 0
126
+ 124 0
127
+ 125 1
128
+ 126 0
129
+ 127 1
130
+ 128 1
131
+ 129 1
132
+ 130 0
133
+ 131 1
134
+ 132 0
135
+ 133 1
136
+ 134 1
137
+ 135 1
138
+ 136 1
139
+ 137 0
140
+ 138 1
141
+ 139 1
142
+ 140 1
143
+ 141 1
144
+ 142 0
145
+ 143 1
146
+ 144 1
147
+ 145 1
148
+ 146 1
149
+ 147 0
150
+ 148 1
151
+ 149 1
152
+ 150 1
153
+ 151 1
154
+ 152 0
155
+ 153 0
156
+ 154 1
157
+ 155 0
158
+ 156 0
159
+ 157 1
160
+ 158 0
161
+ 159 1
162
+ 160 1
163
+ 161 1
164
+ 162 1
165
+ 163 1
166
+ 164 0
167
+ 165 0
168
+ 166 1
169
+ 167 1
170
+ 168 0
171
+ 169 0
172
+ 170 1
173
+ 171 1
174
+ 172 0
175
+ 173 1
176
+ 174 0
177
+ 175 1
178
+ 176 1
179
+ 177 1
180
+ 178 1
181
+ 179 1
182
+ 180 1
183
+ 181 1
184
+ 182 1
185
+ 183 0
186
+ 184 0
187
+ 185 1
188
+ 186 0
189
+ 187 1
190
+ 188 0
191
+ 189 1
192
+ 190 1
193
+ 191 0
194
+ 192 0
195
+ 193 1
196
+ 194 0
197
+ 195 0
198
+ 196 1
199
+ 197 1
200
+ 198 0
201
+ 199 1
202
+ 200 0
203
+ 201 1
204
+ 202 1
205
+ 203 1
206
+ 204 1
207
+ 205 1
208
+ 206 1
209
+ 207 0
210
+ 208 1
211
+ 209 1
212
+ 210 1
213
+ 211 1
214
+ 212 1
215
+ 213 0
216
+ 214 0
217
+ 215 0
218
+ 216 1
219
+ 217 1
220
+ 218 1
221
+ 219 1
222
+ 220 1
223
+ 221 1
224
+ 222 1
225
+ 223 0
226
+ 224 0
227
+ 225 0
228
+ 226 0
229
+ 227 1
230
+ 228 0
231
+ 229 1
232
+ 230 0
233
+ 231 1
234
+ 232 0
235
+ 233 1
236
+ 234 0
237
+ 235 1
238
+ 236 0
239
+ 237 1
240
+ 238 1
241
+ 239 1
242
+ 240 1
243
+ 241 1
244
+ 242 1
245
+ 243 1
246
+ 244 0
247
+ 245 1
248
+ 246 1
249
+ 247 1
250
+ 248 1
251
+ 249 1
252
+ 250 1
253
+ 251 0
254
+ 252 1
255
+ 253 0
256
+ 254 1
257
+ 255 1
258
+ 256 1
259
+ 257 1
260
+ 258 1
261
+ 259 0
262
+ 260 1
263
+ 261 0
264
+ 262 1
265
+ 263 1
266
+ 264 0
267
+ 265 1
268
+ 266 1
269
+ 267 0
270
+ 268 1
271
+ 269 1
272
+ 270 1
273
+ 271 1
274
+ 272 1
275
+ 273 1
276
+ 274 1
277
+ 275 1
278
+ 276 1
279
+ 277 1
280
+ 278 1
281
+ 279 1
282
+ 280 0
283
+ 281 1
284
+ 282 0
285
+ 283 0
286
+ 284 0
287
+ 285 0
288
+ 286 1
289
+ 287 0
290
+ 288 1
291
+ 289 0
292
+ 290 1
293
+ 291 0
294
+ 292 1
295
+ 293 1
296
+ 294 1
297
+ 295 1
298
+ 296 0
299
+ 297 1
300
+ 298 1
301
+ 299 1
302
+ 300 1
303
+ 301 1
304
+ 302 1
305
+ 303 0
306
+ 304 1
307
+ 305 0
308
+ 306 0
309
+ 307 1
310
+ 308 0
311
+ 309 0
312
+ 310 0
313
+ 311 1
314
+ 312 1
315
+ 313 0
316
+ 314 1
317
+ 315 1
318
+ 316 1
319
+ 317 1
320
+ 318 1
321
+ 319 1
322
+ 320 0
323
+ 321 0
324
+ 322 1
325
+ 323 1
326
+ 324 1
327
+ 325 0
328
+ 326 1
329
+ 327 1
330
+ 328 1
331
+ 329 1
332
+ 330 0
333
+ 331 1
334
+ 332 1
335
+ 333 1
336
+ 334 0
337
+ 335 0
338
+ 336 0
339
+ 337 1
340
+ 338 0
341
+ 339 1
342
+ 340 1
343
+ 341 0
344
+ 342 1
345
+ 343 0
346
+ 344 1
347
+ 345 1
348
+ 346 1
349
+ 347 1
350
+ 348 1
351
+ 349 1
352
+ 350 1
353
+ 351 0
354
+ 352 1
355
+ 353 1
356
+ 354 1
357
+ 355 0
358
+ 356 0
359
+ 357 1
360
+ 358 1
361
+ 359 1
362
+ 360 1
363
+ 361 1
364
+ 362 0
365
+ 363 1
366
+ 364 0
367
+ 365 1
368
+ 366 1
369
+ 367 1
370
+ 368 1
371
+ 369 1
372
+ 370 0
373
+ 371 1
374
+ 372 1
375
+ 373 1
376
+ 374 1
377
+ 375 1
378
+ 376 1
379
+ 377 1
380
+ 378 0
381
+ 379 0
382
+ 380 0
383
+ 381 0
384
+ 382 1
385
+ 383 0
386
+ 384 1
387
+ 385 0
388
+ 386 1
389
+ 387 1
390
+ 388 1
391
+ 389 1
392
+ 390 1
393
+ 391 0
394
+ 392 0
395
+ 393 1
396
+ 394 1
397
+ 395 0
398
+ 396 1
399
+ 397 0
400
+ 398 0
401
+ 399 1
402
+ 400 1
403
+ 401 0
404
+ 402 1
405
+ 403 1
406
+ 404 1
407
+ 405 1
408
+ 406 0
409
+ 407 0
410
+ 408 1
411
+ 409 1
412
+ 410 1
413
+ 411 0
414
+ 412 0
415
+ 413 1
416
+ 414 1
417
+ 415 0
418
+ 416 1
419
+ 417 0
420
+ 418 0
421
+ 419 1
422
+ 420 1
423
+ 421 0
424
+ 422 1
425
+ 423 0
426
+ 424 1
427
+ 425 0
428
+ 426 0
429
+ 427 1
430
+ 428 0
431
+ 429 1
432
+ 430 1
433
+ 431 0
434
+ 432 1
435
+ 433 1
436
+ 434 1
437
+ 435 1
438
+ 436 1
439
+ 437 1
440
+ 438 1
441
+ 439 1
442
+ 440 0
443
+ 441 0
444
+ 442 1
445
+ 443 1
446
+ 444 0
447
+ 445 1
448
+ 446 1
449
+ 447 1
450
+ 448 1
451
+ 449 1
452
+ 450 1
453
+ 451 1
454
+ 452 1
455
+ 453 0
456
+ 454 1
457
+ 455 1
458
+ 456 1
459
+ 457 1
460
+ 458 1
461
+ 459 1
462
+ 460 0
463
+ 461 1
464
+ 462 1
465
+ 463 0
466
+ 464 0
467
+ 465 0
468
+ 466 0
469
+ 467 1
470
+ 468 1
471
+ 469 0
472
+ 470 1
473
+ 471 1
474
+ 472 1
475
+ 473 1
476
+ 474 0
477
+ 475 0
478
+ 476 1
479
+ 477 0
480
+ 478 0
481
+ 479 0
482
+ 480 1
483
+ 481 0
484
+ 482 0
485
+ 483 1
486
+ 484 1
487
+ 485 1
488
+ 486 1
489
+ 487 1
490
+ 488 1
491
+ 489 0
492
+ 490 0
493
+ 491 1
494
+ 492 1
495
+ 493 1
496
+ 494 1
497
+ 495 1
498
+ 496 1
499
+ 497 0
500
+ 498 0
501
+ 499 1
502
+ 500 1
503
+ 501 0
504
+ 502 1
505
+ 503 1
506
+ 504 1
507
+ 505 0
508
+ 506 1
509
+ 507 1
510
+ 508 1
511
+ 509 1
512
+ 510 1
513
+ 511 1
514
+ 512 1
515
+ 513 1
516
+ 514 0
517
+ 515 0
518
+ 516 1
519
+ 517 1
520
+ 518 1
521
+ 519 1
522
+ 520 0
523
+ 521 1
524
+ 522 0
525
+ 523 1
526
+ 524 0
527
+ 525 0
528
+ 526 0
529
+ 527 1
530
+ 528 0
531
+ 529 0
532
+ 530 1
533
+ 531 0
534
+ 532 0
535
+ 533 1
536
+ 534 1
537
+ 535 1
538
+ 536 1
539
+ 537 0
540
+ 538 1
541
+ 539 0
542
+ 540 1
543
+ 541 1
544
+ 542 1
545
+ 543 1
546
+ 544 0
547
+ 545 1
548
+ 546 1
549
+ 547 1
550
+ 548 0
551
+ 549 1
552
+ 550 0
553
+ 551 0
554
+ 552 0
555
+ 553 0
556
+ 554 0
557
+ 555 0
558
+ 556 1
559
+ 557 0
560
+ 558 1
561
+ 559 1
562
+ 560 1
563
+ 561 1
564
+ 562 1
565
+ 563 1
566
+ 564 1
567
+ 565 0
568
+ 566 0
569
+ 567 0
570
+ 568 1
571
+ 569 0
572
+ 570 1
573
+ 571 1
574
+ 572 1
575
+ 573 1
576
+ 574 0
577
+ 575 0
578
+ 576 1
579
+ 577 1
580
+ 578 1
581
+ 579 1
582
+ 580 0
583
+ 581 1
584
+ 582 1
585
+ 583 1
586
+ 584 1
587
+ 585 1
588
+ 586 1
589
+ 587 1
590
+ 588 0
591
+ 589 0
592
+ 590 1
593
+ 591 1
594
+ 592 0
595
+ 593 1
596
+ 594 1
597
+ 595 1
598
+ 596 0
599
+ 597 1
600
+ 598 1
601
+ 599 0
602
+ 600 1
603
+ 601 0
604
+ 602 0
605
+ 603 1
606
+ 604 1
607
+ 605 0
608
+ 606 1
609
+ 607 1
610
+ 608 1
611
+ 609 1
612
+ 610 1
613
+ 611 1
614
+ 612 0
615
+ 613 1
616
+ 614 1
617
+ 615 0
618
+ 616 1
619
+ 617 1
620
+ 618 1
621
+ 619 1
622
+ 620 0
623
+ 621 1
624
+ 622 1
625
+ 623 1
626
+ 624 1
627
+ 625 1
628
+ 626 0
629
+ 627 0
630
+ 628 0
631
+ 629 0
632
+ 630 1
633
+ 631 1
634
+ 632 0
635
+ 633 1
636
+ 634 1
637
+ 635 0
638
+ 636 1
639
+ 637 1
640
+ 638 1
641
+ 639 1
642
+ 640 1
643
+ 641 0
644
+ 642 1
645
+ 643 0
646
+ 644 0
647
+ 645 1
648
+ 646 1
649
+ 647 0
650
+ 648 0
651
+ 649 0
652
+ 650 1
653
+ 651 0
654
+ 652 1
655
+ 653 0
656
+ 654 0
657
+ 655 1
658
+ 656 1
659
+ 657 0
660
+ 658 0
661
+ 659 1
662
+ 660 1
663
+ 661 0
664
+ 662 1
665
+ 663 1
666
+ 664 0
667
+ 665 1
668
+ 666 0
669
+ 667 1
670
+ 668 0
671
+ 669 1
672
+ 670 1
673
+ 671 1
674
+ 672 1
675
+ 673 1
676
+ 674 0
677
+ 675 0
678
+ 676 1
679
+ 677 1
680
+ 678 0
681
+ 679 1
682
+ 680 1
683
+ 681 1
684
+ 682 0
685
+ 683 0
686
+ 684 1
687
+ 685 1
688
+ 686 0
689
+ 687 1
690
+ 688 1
691
+ 689 1
692
+ 690 1
693
+ 691 0
694
+ 692 0
695
+ 693 0
696
+ 694 1
697
+ 695 1
698
+ 696 0
699
+ 697 1
700
+ 698 1
701
+ 699 1
702
+ 700 1
703
+ 701 1
704
+ 702 0
705
+ 703 1
706
+ 704 1
707
+ 705 1
708
+ 706 1
709
+ 707 1
710
+ 708 1
711
+ 709 1
712
+ 710 1
713
+ 711 0
714
+ 712 1
715
+ 713 0
716
+ 714 0
717
+ 715 1
718
+ 716 0
719
+ 717 0
720
+ 718 0
721
+ 719 0
722
+ 720 1
723
+ 721 1
724
+ 722 1
finetune/boolq/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e0a3f3a1a12a76a751ac481304d6a9fbb681fa72ead01edfe55d2b17a57fe41
3
+ size 577068929
finetune/boolq/special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
finetune/boolq/tokenizer_config.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "cls_token": {
12
+ "__type": "AddedToken",
13
+ "content": "<s>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "eos_token": {
20
+ "__type": "AddedToken",
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ },
27
+ "errors": "replace",
28
+ "mask_token": {
29
+ "__type": "AddedToken",
30
+ "content": "<mask>",
31
+ "lstrip": true,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ },
36
+ "model_max_length": 512,
37
+ "name_or_path": "final_models/glue_models/structroberta_s2_50ep/",
38
+ "pad_token": {
39
+ "__type": "AddedToken",
40
+ "content": "<pad>",
41
+ "lstrip": false,
42
+ "normalized": true,
43
+ "rstrip": false,
44
+ "single_word": false
45
+ },
46
+ "sep_token": {
47
+ "__type": "AddedToken",
48
+ "content": "</s>",
49
+ "lstrip": false,
50
+ "normalized": true,
51
+ "rstrip": false,
52
+ "single_word": false
53
+ },
54
+ "special_tokens_map_file": null,
55
+ "tokenizer_class": "RobertaTokenizer",
56
+ "trim_offsets": true,
57
+ "unk_token": {
58
+ "__type": "AddedToken",
59
+ "content": "<unk>",
60
+ "lstrip": false,
61
+ "normalized": true,
62
+ "rstrip": false,
63
+ "single_word": false
64
+ }
65
+ }
finetune/boolq/train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "train_loss": 0.27251947576349433,
4
+ "train_runtime": 112.6988,
5
+ "train_samples": 2072,
6
+ "train_samples_per_second": 183.853,
7
+ "train_steps_per_second": 2.928
8
+ }
finetune/boolq/trainer_state.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7136563876651982,
3
+ "best_model_checkpoint": "final_models/glue_models/structroberta_s2_50ep//finetune/boolq/checkpoint-200",
4
+ "epoch": 10.0,
5
+ "global_step": 330,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 6.06,
12
+ "eval_accuracy": 0.6403872966766357,
13
+ "eval_f1": 0.7136563876651982,
14
+ "eval_loss": 1.1364797353744507,
15
+ "eval_mcc": 0.2349638460835621,
16
+ "eval_runtime": 1.5486,
17
+ "eval_samples_per_second": 466.881,
18
+ "eval_steps_per_second": 58.764,
19
+ "step": 200
20
+ },
21
+ {
22
+ "epoch": 10.0,
23
+ "step": 330,
24
+ "total_flos": 1898035330867200.0,
25
+ "train_loss": 0.27251947576349433,
26
+ "train_runtime": 112.6988,
27
+ "train_samples_per_second": 183.853,
28
+ "train_steps_per_second": 2.928
29
+ }
30
+ ],
31
+ "max_steps": 330,
32
+ "num_train_epochs": 10,
33
+ "total_flos": 1898035330867200.0,
34
+ "trial_name": null,
35
+ "trial_params": null
36
+ }
finetune/boolq/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b963fe064bd4c60798341d3b1909122bedde4360e8a87969579eac82b19317ae
3
+ size 3503
finetune/boolq/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
finetune/cola/all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.7114818692207336,
4
+ "eval_f1": 0.8218181818181818,
5
+ "eval_loss": 0.5872910022735596,
6
+ "eval_mcc": 0.18948085721928823,
7
+ "eval_runtime": 2.2097,
8
+ "eval_samples": 1019,
9
+ "eval_samples_per_second": 461.149,
10
+ "eval_steps_per_second": 57.927,
11
+ "train_loss": 0.22346576005220414,
12
+ "train_runtime": 448.112,
13
+ "train_samples": 8164,
14
+ "train_samples_per_second": 182.187,
15
+ "train_steps_per_second": 2.856
16
+ }
finetune/cola/config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "final_models/glue_models/structroberta_s2_50ep/",
3
+ "architectures": [
4
+ "StructRobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "auto_map": {
8
+ "AutoConfig": "modeling_structroberta.StructRobertaConfig",
9
+ "AutoModelForMaskedLM": "modeling_structroberta.StructRoberta",
10
+ "AutoModelForSequenceClassification": "modeling_structroberta.StructRobertaForSequenceClassification"
11
+ },
12
+ "bos_token_id": 0,
13
+ "classifier_dropout": null,
14
+ "conv_size": 9,
15
+ "eos_token_id": 2,
16
+ "hidden_act": "gelu",
17
+ "hidden_dropout_prob": 0.1,
18
+ "hidden_size": 768,
19
+ "id2label": {
20
+ "0": 0,
21
+ "1": 1
22
+ },
23
+ "initializer_range": 0.02,
24
+ "intermediate_size": 3072,
25
+ "label2id": {
26
+ "0": 0,
27
+ "1": 1
28
+ },
29
+ "layer_norm_eps": 1e-05,
30
+ "max_position_embeddings": 514,
31
+ "model_type": "roberta",
32
+ "n_parser_layers": 6,
33
+ "num_attention_heads": 12,
34
+ "num_hidden_layers": 12,
35
+ "pad_token_id": 1,
36
+ "position_embedding_type": "absolute",
37
+ "problem_type": "single_label_classification",
38
+ "relations": [
39
+ "head",
40
+ "child"
41
+ ],
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.26.1",
44
+ "type_vocab_size": 1,
45
+ "use_cache": true,
46
+ "vocab_size": 32000,
47
+ "weight_act": "softmax"
48
+ }
finetune/cola/eval_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.7114818692207336,
4
+ "eval_f1": 0.8218181818181818,
5
+ "eval_loss": 0.5872910022735596,
6
+ "eval_mcc": 0.18948085721928823,
7
+ "eval_runtime": 2.2097,
8
+ "eval_samples": 1019,
9
+ "eval_samples_per_second": 461.149,
10
+ "eval_steps_per_second": 57.927
11
+ }
finetune/cola/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
finetune/cola/modeling_structroberta.py ADDED
@@ -0,0 +1,1533 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch RoBERTa model."""
17
+
18
+ import math
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from packaging import version
24
+ from torch import nn
25
+ import torch.nn.functional as F
26
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
27
+
28
+ from transformers.activations import ACT2FN, gelu
29
+ from transformers.modeling_outputs import (
30
+ BaseModelOutputWithPastAndCrossAttentions,
31
+ BaseModelOutputWithPoolingAndCrossAttentions,
32
+ MaskedLMOutput,
33
+ SequenceClassifierOutput
34
+ )
35
+ from transformers.modeling_utils import (
36
+ PreTrainedModel,
37
+ apply_chunking_to_forward,
38
+ find_pruneable_heads_and_indices,
39
+ prune_linear_layer,
40
+ )
41
+ from transformers.utils import (
42
+ add_code_sample_docstrings,
43
+ add_start_docstrings,
44
+ add_start_docstrings_to_model_forward,
45
+ logging,
46
+ )
47
+ from transformers import RobertaConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CHECKPOINT_FOR_DOC = "roberta-base"
53
+ _CONFIG_FOR_DOC = "RobertaConfig"
54
+ _TOKENIZER_FOR_DOC = "RobertaTokenizer"
55
+
56
+ ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
57
+ "roberta-base",
58
+ "roberta-large",
59
+ "roberta-large-mnli",
60
+ "distilroberta-base",
61
+ "roberta-base-openai-detector",
62
+ "roberta-large-openai-detector",
63
+ # See all RoBERTa models at https://huggingface.co/models?filter=roberta
64
+ ]
65
+
66
+
67
+ class StructRobertaConfig(RobertaConfig):
68
+ model_type = "roberta"
69
+
70
+ def __init__(
71
+ self,
72
+ n_parser_layers=4,
73
+ conv_size=9,
74
+ relations=('head', 'child'),
75
+ weight_act='softmax',
76
+ **kwargs,
77
+ ):
78
+ super().__init__(**kwargs)
79
+ self.n_parser_layers = n_parser_layers
80
+ self.conv_size = conv_size
81
+ self.relations = relations
82
+ self.weight_act = weight_act
83
+
84
+ class Conv1d(nn.Module):
85
+ """1D convolution layer."""
86
+
87
+ def __init__(self, hidden_size, kernel_size, dilation=1):
88
+ """Initialization.
89
+
90
+ Args:
91
+ hidden_size: dimension of input embeddings
92
+ kernel_size: convolution kernel size
93
+ dilation: the spacing between the kernel points
94
+ """
95
+ super(Conv1d, self).__init__()
96
+
97
+ if kernel_size % 2 == 0:
98
+ padding = (kernel_size // 2) * dilation
99
+ self.shift = True
100
+ else:
101
+ padding = ((kernel_size - 1) // 2) * dilation
102
+ self.shift = False
103
+ self.conv = nn.Conv1d(
104
+ hidden_size,
105
+ hidden_size,
106
+ kernel_size,
107
+ padding=padding,
108
+ dilation=dilation)
109
+
110
+ def forward(self, x):
111
+ """Compute convolution.
112
+
113
+ Args:
114
+ x: input embeddings
115
+ Returns:
116
+ conv_output: convolution results
117
+ """
118
+
119
+ if self.shift:
120
+ return self.conv(x.transpose(1, 2)).transpose(1, 2)[:, 1:]
121
+ else:
122
+ return self.conv(x.transpose(1, 2)).transpose(1, 2)
123
+
124
+ class RobertaEmbeddings(nn.Module):
125
+ """
126
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
127
+ """
128
+
129
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
130
+ def __init__(self, config):
131
+ super().__init__()
132
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
133
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
134
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
135
+
136
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
137
+ # any TensorFlow checkpoint file
138
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
139
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
140
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
141
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
142
+ self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
143
+ if version.parse(torch.__version__) > version.parse("1.6.0"):
144
+ self.register_buffer(
145
+ "token_type_ids",
146
+ torch.zeros(self.position_ids.size(), dtype=torch.long),
147
+ persistent=False,
148
+ )
149
+
150
+ # End copy
151
+ self.padding_idx = config.pad_token_id
152
+ self.position_embeddings = nn.Embedding(
153
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
154
+ )
155
+
156
+ def forward(
157
+ self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
158
+ ):
159
+ if position_ids is None:
160
+ if input_ids is not None:
161
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
162
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
163
+ else:
164
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
165
+
166
+ if input_ids is not None:
167
+ input_shape = input_ids.size()
168
+ else:
169
+ input_shape = inputs_embeds.size()[:-1]
170
+
171
+ seq_length = input_shape[1]
172
+
173
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
174
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
175
+ # issue #5664
176
+ if token_type_ids is None:
177
+ if hasattr(self, "token_type_ids"):
178
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
179
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
180
+ token_type_ids = buffered_token_type_ids_expanded
181
+ else:
182
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
183
+
184
+ if inputs_embeds is None:
185
+ inputs_embeds = self.word_embeddings(input_ids)
186
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
187
+
188
+ embeddings = inputs_embeds + token_type_embeddings
189
+ if self.position_embedding_type == "absolute":
190
+ position_embeddings = self.position_embeddings(position_ids)
191
+ embeddings += position_embeddings
192
+ embeddings = self.LayerNorm(embeddings)
193
+ embeddings = self.dropout(embeddings)
194
+ return embeddings
195
+
196
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
197
+ """
198
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
199
+
200
+ Args:
201
+ inputs_embeds: torch.Tensor
202
+
203
+ Returns: torch.Tensor
204
+ """
205
+ input_shape = inputs_embeds.size()[:-1]
206
+ sequence_length = input_shape[1]
207
+
208
+ position_ids = torch.arange(
209
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
210
+ )
211
+ return position_ids.unsqueeze(0).expand(input_shape)
212
+
213
+
214
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta
215
+ class RobertaSelfAttention(nn.Module):
216
+ def __init__(self, config, position_embedding_type=None):
217
+ super().__init__()
218
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
219
+ raise ValueError(
220
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
221
+ f"heads ({config.num_attention_heads})"
222
+ )
223
+
224
+ self.num_attention_heads = config.num_attention_heads
225
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
226
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
227
+
228
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
229
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
230
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
231
+
232
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
233
+ self.position_embedding_type = position_embedding_type or getattr(
234
+ config, "position_embedding_type", "absolute"
235
+ )
236
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
237
+ self.max_position_embeddings = config.max_position_embeddings
238
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
239
+
240
+ self.is_decoder = config.is_decoder
241
+
242
+ def transpose_for_scores(self, x):
243
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
244
+ x = x.view(new_x_shape)
245
+ return x.permute(0, 2, 1, 3)
246
+
247
+ def forward(
248
+ self,
249
+ hidden_states: torch.Tensor,
250
+ attention_mask: Optional[torch.FloatTensor] = None,
251
+ head_mask: Optional[torch.FloatTensor] = None,
252
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
253
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
254
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
255
+ output_attentions: Optional[bool] = False,
256
+ parser_att_mask=None,
257
+ ) -> Tuple[torch.Tensor]:
258
+ mixed_query_layer = self.query(hidden_states)
259
+
260
+ # If this is instantiated as a cross-attention module, the keys
261
+ # and values come from an encoder; the attention mask needs to be
262
+ # such that the encoder's padding tokens are not attended to.
263
+ is_cross_attention = encoder_hidden_states is not None
264
+
265
+ if is_cross_attention and past_key_value is not None:
266
+ # reuse k,v, cross_attentions
267
+ key_layer = past_key_value[0]
268
+ value_layer = past_key_value[1]
269
+ attention_mask = encoder_attention_mask
270
+ elif is_cross_attention:
271
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
272
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
273
+ attention_mask = encoder_attention_mask
274
+ elif past_key_value is not None:
275
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
276
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
277
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
278
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
279
+ else:
280
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
281
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
282
+
283
+ query_layer = self.transpose_for_scores(mixed_query_layer)
284
+
285
+ if self.is_decoder:
286
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
287
+ # Further calls to cross_attention layer can then reuse all cross-attention
288
+ # key/value_states (first "if" case)
289
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
290
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
291
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
292
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
293
+ past_key_value = (key_layer, value_layer)
294
+
295
+ # Take the dot product between "query" and "key" to get the raw attention scores.
296
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
297
+
298
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
299
+ seq_length = hidden_states.size()[1]
300
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
301
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
302
+ distance = position_ids_l - position_ids_r
303
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
304
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
305
+
306
+ if self.position_embedding_type == "relative_key":
307
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
308
+ attention_scores = attention_scores + relative_position_scores
309
+ elif self.position_embedding_type == "relative_key_query":
310
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
311
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
312
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
313
+
314
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
315
+ if attention_mask is not None:
316
+ # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
317
+ attention_scores = attention_scores + attention_mask
318
+
319
+
320
+ if parser_att_mask is None:
321
+ # Normalize the attention scores to probabilities.
322
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
323
+ else:
324
+ attention_probs = torch.sigmoid(attention_scores) * parser_att_mask
325
+
326
+ # This is actually dropping out entire tokens to attend to, which might
327
+ # seem a bit unusual, but is taken from the original Transformer paper.
328
+ attention_probs = self.dropout(attention_probs)
329
+
330
+ # Mask heads if we want to
331
+ if head_mask is not None:
332
+ attention_probs = attention_probs * head_mask
333
+
334
+ context_layer = torch.matmul(attention_probs, value_layer)
335
+
336
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
337
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
338
+ context_layer = context_layer.view(new_context_layer_shape)
339
+
340
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
341
+
342
+ if self.is_decoder:
343
+ outputs = outputs + (past_key_value,)
344
+ return outputs
345
+
346
+
347
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput
348
+ class RobertaSelfOutput(nn.Module):
349
+ def __init__(self, config):
350
+ super().__init__()
351
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
352
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
353
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
354
+
355
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
356
+ hidden_states = self.dense(hidden_states)
357
+ hidden_states = self.dropout(hidden_states)
358
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
359
+ return hidden_states
360
+
361
+
362
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta
363
+ class RobertaAttention(nn.Module):
364
+ def __init__(self, config, position_embedding_type=None):
365
+ super().__init__()
366
+ self.self = RobertaSelfAttention(config, position_embedding_type=position_embedding_type)
367
+ self.output = RobertaSelfOutput(config)
368
+ self.pruned_heads = set()
369
+
370
+ def prune_heads(self, heads):
371
+ if len(heads) == 0:
372
+ return
373
+ heads, index = find_pruneable_heads_and_indices(
374
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
375
+ )
376
+
377
+ # Prune linear layers
378
+ self.self.query = prune_linear_layer(self.self.query, index)
379
+ self.self.key = prune_linear_layer(self.self.key, index)
380
+ self.self.value = prune_linear_layer(self.self.value, index)
381
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
382
+
383
+ # Update hyper params and store pruned heads
384
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
385
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
386
+ self.pruned_heads = self.pruned_heads.union(heads)
387
+
388
+ def forward(
389
+ self,
390
+ hidden_states: torch.Tensor,
391
+ attention_mask: Optional[torch.FloatTensor] = None,
392
+ head_mask: Optional[torch.FloatTensor] = None,
393
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
394
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
395
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
396
+ output_attentions: Optional[bool] = False,
397
+ parser_att_mask=None,
398
+ ) -> Tuple[torch.Tensor]:
399
+ self_outputs = self.self(
400
+ hidden_states,
401
+ attention_mask,
402
+ head_mask,
403
+ encoder_hidden_states,
404
+ encoder_attention_mask,
405
+ past_key_value,
406
+ output_attentions,
407
+ parser_att_mask=parser_att_mask,
408
+ )
409
+ attention_output = self.output(self_outputs[0], hidden_states)
410
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
411
+ return outputs
412
+
413
+
414
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate
415
+ class RobertaIntermediate(nn.Module):
416
+ def __init__(self, config):
417
+ super().__init__()
418
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
419
+ if isinstance(config.hidden_act, str):
420
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
421
+ else:
422
+ self.intermediate_act_fn = config.hidden_act
423
+
424
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
425
+ hidden_states = self.dense(hidden_states)
426
+ hidden_states = self.intermediate_act_fn(hidden_states)
427
+ return hidden_states
428
+
429
+
430
+ # Copied from transformers.models.bert.modeling_bert.BertOutput
431
+ class RobertaOutput(nn.Module):
432
+ def __init__(self, config):
433
+ super().__init__()
434
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
435
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
436
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
437
+
438
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
439
+ hidden_states = self.dense(hidden_states)
440
+ hidden_states = self.dropout(hidden_states)
441
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
442
+ return hidden_states
443
+
444
+
445
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta
446
+ class RobertaLayer(nn.Module):
447
+ def __init__(self, config):
448
+ super().__init__()
449
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
450
+ self.seq_len_dim = 1
451
+ self.attention = RobertaAttention(config)
452
+ self.is_decoder = config.is_decoder
453
+ self.add_cross_attention = config.add_cross_attention
454
+ if self.add_cross_attention:
455
+ if not self.is_decoder:
456
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
457
+ self.crossattention = RobertaAttention(config, position_embedding_type="absolute")
458
+ self.intermediate = RobertaIntermediate(config)
459
+ self.output = RobertaOutput(config)
460
+
461
+ def forward(
462
+ self,
463
+ hidden_states: torch.Tensor,
464
+ attention_mask: Optional[torch.FloatTensor] = None,
465
+ head_mask: Optional[torch.FloatTensor] = None,
466
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
467
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
468
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
469
+ output_attentions: Optional[bool] = False,
470
+ parser_att_mask=None,
471
+ ) -> Tuple[torch.Tensor]:
472
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
473
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
474
+ self_attention_outputs = self.attention(
475
+ hidden_states,
476
+ attention_mask,
477
+ head_mask,
478
+ output_attentions=output_attentions,
479
+ past_key_value=self_attn_past_key_value,
480
+ parser_att_mask=parser_att_mask,
481
+ )
482
+ attention_output = self_attention_outputs[0]
483
+
484
+ # if decoder, the last output is tuple of self-attn cache
485
+ if self.is_decoder:
486
+ outputs = self_attention_outputs[1:-1]
487
+ present_key_value = self_attention_outputs[-1]
488
+ else:
489
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
490
+
491
+ cross_attn_present_key_value = None
492
+ if self.is_decoder and encoder_hidden_states is not None:
493
+ if not hasattr(self, "crossattention"):
494
+ raise ValueError(
495
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
496
+ )
497
+
498
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
499
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
500
+ cross_attention_outputs = self.crossattention(
501
+ attention_output,
502
+ attention_mask,
503
+ head_mask,
504
+ encoder_hidden_states,
505
+ encoder_attention_mask,
506
+ cross_attn_past_key_value,
507
+ output_attentions,
508
+ )
509
+ attention_output = cross_attention_outputs[0]
510
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
511
+
512
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
513
+ cross_attn_present_key_value = cross_attention_outputs[-1]
514
+ present_key_value = present_key_value + cross_attn_present_key_value
515
+
516
+ layer_output = apply_chunking_to_forward(
517
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
518
+ )
519
+ outputs = (layer_output,) + outputs
520
+
521
+ # if decoder, return the attn key/values as the last output
522
+ if self.is_decoder:
523
+ outputs = outputs + (present_key_value,)
524
+
525
+ return outputs
526
+
527
+ def feed_forward_chunk(self, attention_output):
528
+ intermediate_output = self.intermediate(attention_output)
529
+ layer_output = self.output(intermediate_output, attention_output)
530
+ return layer_output
531
+
532
+
533
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta
534
+ class RobertaEncoder(nn.Module):
535
+ def __init__(self, config):
536
+ super().__init__()
537
+ self.config = config
538
+ self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])
539
+ self.gradient_checkpointing = False
540
+
541
+ def forward(
542
+ self,
543
+ hidden_states: torch.Tensor,
544
+ attention_mask: Optional[torch.FloatTensor] = None,
545
+ head_mask: Optional[torch.FloatTensor] = None,
546
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
547
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
548
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
549
+ use_cache: Optional[bool] = None,
550
+ output_attentions: Optional[bool] = False,
551
+ output_hidden_states: Optional[bool] = False,
552
+ return_dict: Optional[bool] = True,
553
+ parser_att_mask=None,
554
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
555
+ all_hidden_states = () if output_hidden_states else None
556
+ all_self_attentions = () if output_attentions else None
557
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
558
+
559
+ next_decoder_cache = () if use_cache else None
560
+ for i, layer_module in enumerate(self.layer):
561
+ if output_hidden_states:
562
+ all_hidden_states = all_hidden_states + (hidden_states,)
563
+
564
+ layer_head_mask = head_mask[i] if head_mask is not None else None
565
+ past_key_value = past_key_values[i] if past_key_values is not None else None
566
+
567
+ if self.gradient_checkpointing and self.training:
568
+
569
+ if use_cache:
570
+ logger.warning(
571
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
572
+ )
573
+ use_cache = False
574
+
575
+ def create_custom_forward(module):
576
+ def custom_forward(*inputs):
577
+ return module(*inputs, past_key_value, output_attentions)
578
+
579
+ return custom_forward
580
+
581
+ layer_outputs = torch.utils.checkpoint.checkpoint(
582
+ create_custom_forward(layer_module),
583
+ hidden_states,
584
+ attention_mask,
585
+ layer_head_mask,
586
+ encoder_hidden_states,
587
+ encoder_attention_mask,
588
+ )
589
+ else:
590
+ layer_outputs = layer_module(
591
+ hidden_states,
592
+ attention_mask,
593
+ layer_head_mask,
594
+ encoder_hidden_states,
595
+ encoder_attention_mask,
596
+ past_key_value,
597
+ output_attentions,
598
+ parser_att_mask=parser_att_mask[i],
599
+ )
600
+
601
+ hidden_states = layer_outputs[0]
602
+ if use_cache:
603
+ next_decoder_cache += (layer_outputs[-1],)
604
+ if output_attentions:
605
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
606
+ if self.config.add_cross_attention:
607
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
608
+
609
+ if output_hidden_states:
610
+ all_hidden_states = all_hidden_states + (hidden_states,)
611
+
612
+ if not return_dict:
613
+ return tuple(
614
+ v
615
+ for v in [
616
+ hidden_states,
617
+ next_decoder_cache,
618
+ all_hidden_states,
619
+ all_self_attentions,
620
+ all_cross_attentions,
621
+ ]
622
+ if v is not None
623
+ )
624
+ return BaseModelOutputWithPastAndCrossAttentions(
625
+ last_hidden_state=hidden_states,
626
+ past_key_values=next_decoder_cache,
627
+ hidden_states=all_hidden_states,
628
+ attentions=all_self_attentions,
629
+ cross_attentions=all_cross_attentions,
630
+ )
631
+
632
+
633
+ # Copied from transformers.models.bert.modeling_bert.BertPooler
634
+ class RobertaPooler(nn.Module):
635
+ def __init__(self, config):
636
+ super().__init__()
637
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
638
+ self.activation = nn.Tanh()
639
+
640
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
641
+ # We "pool" the model by simply taking the hidden state corresponding
642
+ # to the first token.
643
+ first_token_tensor = hidden_states[:, 0]
644
+ pooled_output = self.dense(first_token_tensor)
645
+ pooled_output = self.activation(pooled_output)
646
+ return pooled_output
647
+
648
+
649
+ class RobertaPreTrainedModel(PreTrainedModel):
650
+ """
651
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
652
+ models.
653
+ """
654
+
655
+ config_class = RobertaConfig
656
+ base_model_prefix = "roberta"
657
+ supports_gradient_checkpointing = True
658
+
659
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
660
+ def _init_weights(self, module):
661
+ """Initialize the weights"""
662
+ if isinstance(module, nn.Linear):
663
+ # Slightly different from the TF version which uses truncated_normal for initialization
664
+ # cf https://github.com/pytorch/pytorch/pull/5617
665
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
666
+ if module.bias is not None:
667
+ module.bias.data.zero_()
668
+ elif isinstance(module, nn.Embedding):
669
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
670
+ if module.padding_idx is not None:
671
+ module.weight.data[module.padding_idx].zero_()
672
+ elif isinstance(module, nn.LayerNorm):
673
+ if module.bias is not None:
674
+ module.bias.data.zero_()
675
+ module.weight.data.fill_(1.0)
676
+
677
+ def _set_gradient_checkpointing(self, module, value=False):
678
+ if isinstance(module, RobertaEncoder):
679
+ module.gradient_checkpointing = value
680
+
681
+ def update_keys_to_ignore(self, config, del_keys_to_ignore):
682
+ """Remove some keys from ignore list"""
683
+ if not config.tie_word_embeddings:
684
+ # must make a new list, or the class variable gets modified!
685
+ self._keys_to_ignore_on_save = [k for k in self._keys_to_ignore_on_save if k not in del_keys_to_ignore]
686
+ self._keys_to_ignore_on_load_missing = [
687
+ k for k in self._keys_to_ignore_on_load_missing if k not in del_keys_to_ignore
688
+ ]
689
+
690
+
691
+ ROBERTA_START_DOCSTRING = r"""
692
+
693
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
694
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
695
+ etc.)
696
+
697
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
698
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
699
+ and behavior.
700
+
701
+ Parameters:
702
+ config ([`RobertaConfig`]): Model configuration class with all the parameters of the
703
+ model. Initializing with a config file does not load the weights associated with the model, only the
704
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
705
+ """
706
+
707
+
708
+ ROBERTA_INPUTS_DOCSTRING = r"""
709
+ Args:
710
+ input_ids (`torch.LongTensor` of shape `({0})`):
711
+ Indices of input sequence tokens in the vocabulary.
712
+
713
+ Indices can be obtained using [`RobertaTokenizer`]. See [`PreTrainedTokenizer.encode`] and
714
+ [`PreTrainedTokenizer.__call__`] for details.
715
+
716
+ [What are input IDs?](../glossary#input-ids)
717
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
718
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
719
+
720
+ - 1 for tokens that are **not masked**,
721
+ - 0 for tokens that are **masked**.
722
+
723
+ [What are attention masks?](../glossary#attention-mask)
724
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
725
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
726
+ 1]`:
727
+
728
+ - 0 corresponds to a *sentence A* token,
729
+ - 1 corresponds to a *sentence B* token.
730
+
731
+ [What are token type IDs?](../glossary#token-type-ids)
732
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
733
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
734
+ config.max_position_embeddings - 1]`.
735
+
736
+ [What are position IDs?](../glossary#position-ids)
737
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
738
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
739
+
740
+ - 1 indicates the head is **not masked**,
741
+ - 0 indicates the head is **masked**.
742
+
743
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
744
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
745
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
746
+ model's internal embedding lookup matrix.
747
+ output_attentions (`bool`, *optional*):
748
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
749
+ tensors for more detail.
750
+ output_hidden_states (`bool`, *optional*):
751
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
752
+ more detail.
753
+ return_dict (`bool`, *optional*):
754
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
755
+ """
756
+
757
+
758
+ class RobertaModel(RobertaPreTrainedModel):
759
+ """
760
+
761
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
762
+ cross-attention is added between the self-attention layers, following the architecture described in *Attention is
763
+ all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
764
+ Kaiser and Illia Polosukhin.
765
+
766
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
767
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
768
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
769
+
770
+ .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
771
+
772
+ """
773
+
774
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
775
+
776
+ # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta
777
+ def __init__(self, config, add_pooling_layer=True):
778
+ super().__init__(config)
779
+ self.config = config
780
+
781
+ self.embeddings = RobertaEmbeddings(config)
782
+ self.encoder = RobertaEncoder(config)
783
+
784
+ self.pooler = RobertaPooler(config) if add_pooling_layer else None
785
+
786
+ # Initialize weights and apply final processing
787
+ self.post_init()
788
+
789
+ def get_input_embeddings(self):
790
+ return self.embeddings.word_embeddings
791
+
792
+ def set_input_embeddings(self, value):
793
+ self.embeddings.word_embeddings = value
794
+
795
+ def _prune_heads(self, heads_to_prune):
796
+ """
797
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
798
+ class PreTrainedModel
799
+ """
800
+ for layer, heads in heads_to_prune.items():
801
+ self.encoder.layer[layer].attention.prune_heads(heads)
802
+
803
+ # Copied from transformers.models.bert.modeling_bert.BertModel.forward
804
+ def forward(
805
+ self,
806
+ input_ids: Optional[torch.Tensor] = None,
807
+ attention_mask: Optional[torch.Tensor] = None,
808
+ token_type_ids: Optional[torch.Tensor] = None,
809
+ position_ids: Optional[torch.Tensor] = None,
810
+ head_mask: Optional[torch.Tensor] = None,
811
+ inputs_embeds: Optional[torch.Tensor] = None,
812
+ encoder_hidden_states: Optional[torch.Tensor] = None,
813
+ encoder_attention_mask: Optional[torch.Tensor] = None,
814
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
815
+ use_cache: Optional[bool] = None,
816
+ output_attentions: Optional[bool] = None,
817
+ output_hidden_states: Optional[bool] = None,
818
+ return_dict: Optional[bool] = None,
819
+ parser_att_mask=None,
820
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
821
+ r"""
822
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
823
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
824
+ the model is configured as a decoder.
825
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
826
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
827
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
828
+
829
+ - 1 for tokens that are **not masked**,
830
+ - 0 for tokens that are **masked**.
831
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
832
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
833
+
834
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
835
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
836
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
837
+ use_cache (`bool`, *optional*):
838
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
839
+ `past_key_values`).
840
+ """
841
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
842
+ output_hidden_states = (
843
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
844
+ )
845
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
846
+
847
+ if self.config.is_decoder:
848
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
849
+ else:
850
+ use_cache = False
851
+
852
+ if input_ids is not None and inputs_embeds is not None:
853
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
854
+ elif input_ids is not None:
855
+ input_shape = input_ids.size()
856
+ elif inputs_embeds is not None:
857
+ input_shape = inputs_embeds.size()[:-1]
858
+ else:
859
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
860
+
861
+ batch_size, seq_length = input_shape
862
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
863
+
864
+ # past_key_values_length
865
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
866
+
867
+ if attention_mask is None:
868
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
869
+
870
+ if token_type_ids is None:
871
+ if hasattr(self.embeddings, "token_type_ids"):
872
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
873
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
874
+ token_type_ids = buffered_token_type_ids_expanded
875
+ else:
876
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
877
+
878
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
879
+ # ourselves in which case we just need to make it broadcastable to all heads.
880
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
881
+
882
+ # If a 2D or 3D attention mask is provided for the cross-attention
883
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
884
+ if self.config.is_decoder and encoder_hidden_states is not None:
885
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
886
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
887
+ if encoder_attention_mask is None:
888
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
889
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
890
+ else:
891
+ encoder_extended_attention_mask = None
892
+
893
+ # Prepare head mask if needed
894
+ # 1.0 in head_mask indicate we keep the head
895
+ # attention_probs has shape bsz x n_heads x N x N
896
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
897
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
898
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
899
+
900
+ embedding_output = self.embeddings(
901
+ input_ids=input_ids,
902
+ position_ids=position_ids,
903
+ token_type_ids=token_type_ids,
904
+ inputs_embeds=inputs_embeds,
905
+ past_key_values_length=past_key_values_length,
906
+ )
907
+ encoder_outputs = self.encoder(
908
+ embedding_output,
909
+ attention_mask=extended_attention_mask,
910
+ head_mask=head_mask,
911
+ encoder_hidden_states=encoder_hidden_states,
912
+ encoder_attention_mask=encoder_extended_attention_mask,
913
+ past_key_values=past_key_values,
914
+ use_cache=use_cache,
915
+ output_attentions=output_attentions,
916
+ output_hidden_states=output_hidden_states,
917
+ return_dict=return_dict,
918
+ parser_att_mask=parser_att_mask
919
+ )
920
+ sequence_output = encoder_outputs[0]
921
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
922
+
923
+ if not return_dict:
924
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
925
+
926
+ return BaseModelOutputWithPoolingAndCrossAttentions(
927
+ last_hidden_state=sequence_output,
928
+ pooler_output=pooled_output,
929
+ past_key_values=encoder_outputs.past_key_values,
930
+ hidden_states=encoder_outputs.hidden_states,
931
+ attentions=encoder_outputs.attentions,
932
+ cross_attentions=encoder_outputs.cross_attentions,
933
+ )
934
+
935
+
936
+ class StructRoberta(RobertaPreTrainedModel):
937
+ _keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
938
+ _keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
939
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
940
+
941
+ def __init__(self, config):
942
+ super().__init__(config)
943
+
944
+ if config.is_decoder:
945
+ logger.warning(
946
+ "If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for "
947
+ "bi-directional self-attention."
948
+ )
949
+
950
+ self.parser_layers = nn.ModuleList([
951
+ nn.Sequential(Conv1d(config.hidden_size, config.conv_size),
952
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False),
953
+ nn.Tanh()) for i in range(config.n_parser_layers)])
954
+
955
+ self.distance_ff = nn.Sequential(
956
+ Conv1d(config.hidden_size, 2),
957
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False), nn.Tanh(),
958
+ nn.Linear(config.hidden_size, 1))
959
+
960
+ self.height_ff = nn.Sequential(
961
+ nn.Linear(config.hidden_size, config.hidden_size),
962
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False), nn.Tanh(),
963
+ nn.Linear(config.hidden_size, 1))
964
+
965
+ n_rel = len(config.relations)
966
+ self._rel_weight = nn.Parameter(torch.zeros((config.num_hidden_layers, config.num_attention_heads, n_rel)))
967
+ self._rel_weight.data.normal_(0, 0.1)
968
+
969
+ self._scaler = nn.Parameter(torch.zeros(2))
970
+
971
+ self.roberta = RobertaModel(config, add_pooling_layer=False)
972
+ self.lm_head = RobertaLMHead(config)
973
+
974
+ self.pad = config.pad_token_id
975
+
976
+ # The LM head weights require special treatment only when they are tied with the word embeddings
977
+ self.update_keys_to_ignore(config, ["lm_head.decoder.weight"])
978
+
979
+ # Initialize weights and apply final processing
980
+ self.post_init()
981
+
982
+ def get_output_embeddings(self):
983
+ return self.lm_head.decoder
984
+
985
+ def set_output_embeddings(self, new_embeddings):
986
+ self.lm_head.decoder = new_embeddings
987
+
988
+ @property
989
+ def scaler(self):
990
+ return self._scaler.exp()
991
+
992
+ @property
993
+ def rel_weight(self):
994
+ if self.config.weight_act == 'sigmoid':
995
+ return torch.sigmoid(self._rel_weight)
996
+ elif self.config.weight_act == 'softmax':
997
+ return torch.softmax(self._rel_weight, dim=-1)
998
+
999
+ def compute_block(self, distance, height):
1000
+ """Compute constituents from distance and height."""
1001
+
1002
+ beta_logits = (distance[:, None, :] - height[:, :, None]) * self.scaler[0]
1003
+
1004
+ gamma = torch.sigmoid(-beta_logits)
1005
+ ones = torch.ones_like(gamma)
1006
+
1007
+ block_mask_left = cummin(
1008
+ gamma.tril(-1) + ones.triu(0), reverse=True, max_value=1)
1009
+ block_mask_left = block_mask_left - F.pad(
1010
+ block_mask_left[:, :, :-1], (1, 0), value=0)
1011
+ block_mask_left.tril_(0)
1012
+
1013
+ block_mask_right = cummin(
1014
+ gamma.triu(0) + ones.tril(-1), exclusive=True, max_value=1)
1015
+ block_mask_right = block_mask_right - F.pad(
1016
+ block_mask_right[:, :, 1:], (0, 1), value=0)
1017
+ block_mask_right.triu_(0)
1018
+
1019
+ block_p = block_mask_left[:, :, :, None] * block_mask_right[:, :, None, :]
1020
+ block = cumsum(block_mask_left).tril(0) + cumsum(
1021
+ block_mask_right, reverse=True).triu(1)
1022
+
1023
+ return block_p, block
1024
+
1025
+ def compute_head(self, height):
1026
+ """Estimate head for each constituent."""
1027
+
1028
+ _, length = height.size()
1029
+ head_logits = height * self.scaler[1]
1030
+ index = torch.arange(length, device=height.device)
1031
+
1032
+ mask = (index[:, None, None] <= index[None, None, :]) * (
1033
+ index[None, None, :] <= index[None, :, None])
1034
+ head_logits = head_logits[:, None, None, :].repeat(1, length, length, 1)
1035
+ head_logits.masked_fill_(~mask[None, :, :, :], -1e9)
1036
+
1037
+ head_p = torch.softmax(head_logits, dim=-1)
1038
+
1039
+ return head_p
1040
+
1041
+ def parse(self, x):
1042
+ """Parse input sentence.
1043
+
1044
+ Args:
1045
+ x: input tokens (required).
1046
+ pos: position for each token (optional).
1047
+ Returns:
1048
+ distance: syntactic distance
1049
+ height: syntactic height
1050
+ """
1051
+
1052
+ mask = (x != self.pad)
1053
+ mask_shifted = F.pad(mask[:, 1:], (0, 1), value=0)
1054
+
1055
+ h = self.roberta.embeddings(x)
1056
+ for i in range(self.config.n_parser_layers):
1057
+ h = h.masked_fill(~mask[:, :, None], 0)
1058
+ h = self.parser_layers[i](h)
1059
+
1060
+ height = self.height_ff(h).squeeze(-1)
1061
+ height.masked_fill_(~mask, -1e9)
1062
+
1063
+ distance = self.distance_ff(h).squeeze(-1)
1064
+ distance.masked_fill_(~mask_shifted, 1e9)
1065
+
1066
+ # Calbrating the distance and height to the same level
1067
+ length = distance.size(1)
1068
+ height_max = height[:, None, :].expand(-1, length, -1)
1069
+ height_max = torch.cummax(
1070
+ height_max.triu(0) - torch.ones_like(height_max).tril(-1) * 1e9,
1071
+ dim=-1)[0].triu(0)
1072
+
1073
+ margin_left = torch.relu(
1074
+ F.pad(distance[:, :-1, None], (0, 0, 1, 0), value=1e9) - height_max)
1075
+ margin_right = torch.relu(distance[:, None, :] - height_max)
1076
+ margin = torch.where(margin_left > margin_right, margin_right,
1077
+ margin_left).triu(0)
1078
+
1079
+ margin_mask = torch.stack([mask_shifted] + [mask] * (length - 1), dim=1)
1080
+ margin.masked_fill_(~margin_mask, 0)
1081
+ margin = margin.max()
1082
+
1083
+ distance = distance - margin
1084
+
1085
+ return distance, height
1086
+
1087
+ def generate_mask(self, x, distance, height):
1088
+ """Compute head and cibling distribution for each token."""
1089
+
1090
+ bsz, length = x.size()
1091
+
1092
+ eye = torch.eye(length, device=x.device, dtype=torch.bool)
1093
+ eye = eye[None, :, :].expand((bsz, -1, -1))
1094
+
1095
+ block_p, block = self.compute_block(distance, height)
1096
+ head_p = self.compute_head(height)
1097
+ head = torch.einsum('blij,bijh->blh', block_p, head_p)
1098
+ head = head.masked_fill(eye, 0)
1099
+ child = head.transpose(1, 2)
1100
+ cibling = torch.bmm(head, child).masked_fill(eye, 0)
1101
+
1102
+ rel_list = []
1103
+ if 'head' in self.config.relations:
1104
+ rel_list.append(head)
1105
+ if 'child' in self.config.relations:
1106
+ rel_list.append(child)
1107
+ if 'cibling' in self.config.relations:
1108
+ rel_list.append(cibling)
1109
+
1110
+ rel = torch.stack(rel_list, dim=1)
1111
+
1112
+ rel_weight = self.rel_weight
1113
+
1114
+ dep = torch.einsum('lhr,brij->lbhij', rel_weight, rel)
1115
+ att_mask = dep.reshape(self.config.num_hidden_layers, bsz, self.config.num_attention_heads, length, length)
1116
+
1117
+ return att_mask, cibling, head, block
1118
+
1119
+ def forward(
1120
+ self,
1121
+ input_ids: Optional[torch.LongTensor] = None,
1122
+ attention_mask: Optional[torch.FloatTensor] = None,
1123
+ token_type_ids: Optional[torch.LongTensor] = None,
1124
+ position_ids: Optional[torch.LongTensor] = None,
1125
+ head_mask: Optional[torch.FloatTensor] = None,
1126
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1127
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1128
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1129
+ labels: Optional[torch.LongTensor] = None,
1130
+ output_attentions: Optional[bool] = None,
1131
+ output_hidden_states: Optional[bool] = None,
1132
+ return_dict: Optional[bool] = None,
1133
+ ) -> Union[Tuple, MaskedLMOutput]:
1134
+ r"""
1135
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1136
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1137
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1138
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1139
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1140
+ Used to hide legacy arguments that have been deprecated.
1141
+ """
1142
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1143
+
1144
+ distance, height = self.parse(input_ids)
1145
+ att_mask, cibling, head, block = self.generate_mask(input_ids, distance, height)
1146
+
1147
+ outputs = self.roberta(
1148
+ input_ids,
1149
+ attention_mask=attention_mask,
1150
+ token_type_ids=token_type_ids,
1151
+ position_ids=position_ids,
1152
+ head_mask=head_mask,
1153
+ inputs_embeds=inputs_embeds,
1154
+ encoder_hidden_states=encoder_hidden_states,
1155
+ encoder_attention_mask=encoder_attention_mask,
1156
+ output_attentions=output_attentions,
1157
+ output_hidden_states=output_hidden_states,
1158
+ return_dict=return_dict,
1159
+ parser_att_mask=att_mask,
1160
+ )
1161
+ sequence_output = outputs[0]
1162
+ prediction_scores = self.lm_head(sequence_output)
1163
+
1164
+ masked_lm_loss = None
1165
+ if labels is not None:
1166
+ loss_fct = CrossEntropyLoss()
1167
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1168
+
1169
+ if not return_dict:
1170
+ output = (prediction_scores,) + outputs[2:]
1171
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1172
+
1173
+ return MaskedLMOutput(
1174
+ loss=masked_lm_loss,
1175
+ logits=prediction_scores,
1176
+ hidden_states=outputs.hidden_states,
1177
+ attentions=outputs.attentions,
1178
+ )
1179
+
1180
+ class RobertaLMHead(nn.Module):
1181
+ """Roberta Head for masked language modeling."""
1182
+
1183
+ def __init__(self, config):
1184
+ super().__init__()
1185
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1186
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
1187
+
1188
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
1189
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
1190
+ self.decoder.bias = self.bias
1191
+
1192
+ def forward(self, features, **kwargs):
1193
+ x = self.dense(features)
1194
+ x = gelu(x)
1195
+ x = self.layer_norm(x)
1196
+
1197
+ # project back to size of vocabulary with bias
1198
+ x = self.decoder(x)
1199
+
1200
+ return x
1201
+
1202
+ def _tie_weights(self):
1203
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
1204
+ self.bias = self.decoder.bias
1205
+
1206
+ class StructRobertaForSequenceClassification(RobertaPreTrainedModel):
1207
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
1208
+
1209
+ def __init__(self, config):
1210
+ super().__init__(config)
1211
+ self.num_labels = config.num_labels
1212
+ self.config = config
1213
+
1214
+ self.parser_layers = nn.ModuleList([
1215
+ nn.Sequential(Conv1d(config.hidden_size, config.conv_size),
1216
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False),
1217
+ nn.Tanh()) for i in range(config.n_parser_layers)])
1218
+
1219
+ self.distance_ff = nn.Sequential(
1220
+ Conv1d(config.hidden_size, 2),
1221
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False), nn.Tanh(),
1222
+ nn.Linear(config.hidden_size, 1))
1223
+
1224
+ self.height_ff = nn.Sequential(
1225
+ nn.Linear(config.hidden_size, config.hidden_size),
1226
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False), nn.Tanh(),
1227
+ nn.Linear(config.hidden_size, 1))
1228
+
1229
+ n_rel = len(config.relations)
1230
+ self._rel_weight = nn.Parameter(torch.zeros((config.num_hidden_layers, config.num_attention_heads, n_rel)))
1231
+ self._rel_weight.data.normal_(0, 0.1)
1232
+
1233
+ self._scaler = nn.Parameter(torch.zeros(2))
1234
+
1235
+ self.pad = config.pad_token_id
1236
+
1237
+ self.roberta = RobertaModel(config, add_pooling_layer=False)
1238
+ self.classifier = RobertaClassificationHead(config)
1239
+
1240
+ # Initialize weights and apply final processing
1241
+ self.post_init()
1242
+
1243
+
1244
+ @property
1245
+ def scaler(self):
1246
+ return self._scaler.exp()
1247
+
1248
+ @property
1249
+ def rel_weight(self):
1250
+ if self.config.weight_act == 'sigmoid':
1251
+ return torch.sigmoid(self._rel_weight)
1252
+ elif self.config.weight_act == 'softmax':
1253
+ return torch.softmax(self._rel_weight, dim=-1)
1254
+
1255
+ def compute_block(self, distance, height):
1256
+ """Compute constituents from distance and height."""
1257
+
1258
+ beta_logits = (distance[:, None, :] - height[:, :, None]) * self.scaler[0]
1259
+
1260
+ gamma = torch.sigmoid(-beta_logits)
1261
+ ones = torch.ones_like(gamma)
1262
+
1263
+ block_mask_left = cummin(
1264
+ gamma.tril(-1) + ones.triu(0), reverse=True, max_value=1)
1265
+ block_mask_left = block_mask_left - F.pad(
1266
+ block_mask_left[:, :, :-1], (1, 0), value=0)
1267
+ block_mask_left.tril_(0)
1268
+
1269
+ block_mask_right = cummin(
1270
+ gamma.triu(0) + ones.tril(-1), exclusive=True, max_value=1)
1271
+ block_mask_right = block_mask_right - F.pad(
1272
+ block_mask_right[:, :, 1:], (0, 1), value=0)
1273
+ block_mask_right.triu_(0)
1274
+
1275
+ block_p = block_mask_left[:, :, :, None] * block_mask_right[:, :, None, :]
1276
+ block = cumsum(block_mask_left).tril(0) + cumsum(
1277
+ block_mask_right, reverse=True).triu(1)
1278
+
1279
+ return block_p, block
1280
+
1281
+ def compute_head(self, height):
1282
+ """Estimate head for each constituent."""
1283
+
1284
+ _, length = height.size()
1285
+ head_logits = height * self.scaler[1]
1286
+ index = torch.arange(length, device=height.device)
1287
+
1288
+ mask = (index[:, None, None] <= index[None, None, :]) * (
1289
+ index[None, None, :] <= index[None, :, None])
1290
+ head_logits = head_logits[:, None, None, :].repeat(1, length, length, 1)
1291
+ head_logits.masked_fill_(~mask[None, :, :, :], -1e9)
1292
+
1293
+ head_p = torch.softmax(head_logits, dim=-1)
1294
+
1295
+ return head_p
1296
+
1297
+ def parse(self, x):
1298
+ """Parse input sentence.
1299
+
1300
+ Args:
1301
+ x: input tokens (required).
1302
+ pos: position for each token (optional).
1303
+ Returns:
1304
+ distance: syntactic distance
1305
+ height: syntactic height
1306
+ """
1307
+
1308
+ mask = (x != self.pad)
1309
+ mask_shifted = F.pad(mask[:, 1:], (0, 1), value=0)
1310
+
1311
+ h = self.roberta.embeddings(x)
1312
+ for i in range(self.config.n_parser_layers):
1313
+ h = h.masked_fill(~mask[:, :, None], 0)
1314
+ h = self.parser_layers[i](h)
1315
+
1316
+ height = self.height_ff(h).squeeze(-1)
1317
+ height.masked_fill_(~mask, -1e9)
1318
+
1319
+ distance = self.distance_ff(h).squeeze(-1)
1320
+ distance.masked_fill_(~mask_shifted, 1e9)
1321
+
1322
+ # Calbrating the distance and height to the same level
1323
+ length = distance.size(1)
1324
+ height_max = height[:, None, :].expand(-1, length, -1)
1325
+ height_max = torch.cummax(
1326
+ height_max.triu(0) - torch.ones_like(height_max).tril(-1) * 1e9,
1327
+ dim=-1)[0].triu(0)
1328
+
1329
+ margin_left = torch.relu(
1330
+ F.pad(distance[:, :-1, None], (0, 0, 1, 0), value=1e9) - height_max)
1331
+ margin_right = torch.relu(distance[:, None, :] - height_max)
1332
+ margin = torch.where(margin_left > margin_right, margin_right,
1333
+ margin_left).triu(0)
1334
+
1335
+ margin_mask = torch.stack([mask_shifted] + [mask] * (length - 1), dim=1)
1336
+ margin.masked_fill_(~margin_mask, 0)
1337
+ margin = margin.max()
1338
+
1339
+ distance = distance - margin
1340
+
1341
+ return distance, height
1342
+
1343
+ def generate_mask(self, x, distance, height):
1344
+ """Compute head and cibling distribution for each token."""
1345
+
1346
+ bsz, length = x.size()
1347
+
1348
+ eye = torch.eye(length, device=x.device, dtype=torch.bool)
1349
+ eye = eye[None, :, :].expand((bsz, -1, -1))
1350
+
1351
+ block_p, block = self.compute_block(distance, height)
1352
+ head_p = self.compute_head(height)
1353
+ head = torch.einsum('blij,bijh->blh', block_p, head_p)
1354
+ head = head.masked_fill(eye, 0)
1355
+ child = head.transpose(1, 2)
1356
+ cibling = torch.bmm(head, child).masked_fill(eye, 0)
1357
+
1358
+ rel_list = []
1359
+ if 'head' in self.config.relations:
1360
+ rel_list.append(head)
1361
+ if 'child' in self.config.relations:
1362
+ rel_list.append(child)
1363
+ if 'cibling' in self.config.relations:
1364
+ rel_list.append(cibling)
1365
+
1366
+ rel = torch.stack(rel_list, dim=1)
1367
+
1368
+ rel_weight = self.rel_weight
1369
+
1370
+ dep = torch.einsum('lhr,brij->lbhij', rel_weight, rel)
1371
+ att_mask = dep.reshape(self.config.num_hidden_layers, bsz, self.config.num_attention_heads, length, length)
1372
+
1373
+ return att_mask, cibling, head, block
1374
+
1375
+ def forward(
1376
+ self,
1377
+ input_ids: Optional[torch.LongTensor] = None,
1378
+ attention_mask: Optional[torch.FloatTensor] = None,
1379
+ token_type_ids: Optional[torch.LongTensor] = None,
1380
+ position_ids: Optional[torch.LongTensor] = None,
1381
+ head_mask: Optional[torch.FloatTensor] = None,
1382
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1383
+ labels: Optional[torch.LongTensor] = None,
1384
+ output_attentions: Optional[bool] = None,
1385
+ output_hidden_states: Optional[bool] = None,
1386
+ return_dict: Optional[bool] = None,
1387
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1388
+ r"""
1389
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1390
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1391
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1392
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1393
+ """
1394
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1395
+
1396
+ distance, height = self.parse(input_ids)
1397
+ att_mask, cibling, head, block = self.generate_mask(input_ids, distance, height)
1398
+
1399
+ outputs = self.roberta(
1400
+ input_ids,
1401
+ attention_mask=attention_mask,
1402
+ token_type_ids=token_type_ids,
1403
+ position_ids=position_ids,
1404
+ head_mask=head_mask,
1405
+ inputs_embeds=inputs_embeds,
1406
+ output_attentions=output_attentions,
1407
+ output_hidden_states=output_hidden_states,
1408
+ return_dict=return_dict,
1409
+ parser_att_mask=att_mask,
1410
+ )
1411
+
1412
+ sequence_output = outputs[0]
1413
+ logits = self.classifier(sequence_output)
1414
+
1415
+ loss = None
1416
+ if labels is not None:
1417
+ if self.config.problem_type is None:
1418
+ if self.num_labels == 1:
1419
+ self.config.problem_type = "regression"
1420
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1421
+ self.config.problem_type = "single_label_classification"
1422
+ else:
1423
+ self.config.problem_type = "multi_label_classification"
1424
+
1425
+ if self.config.problem_type == "regression":
1426
+ loss_fct = MSELoss()
1427
+ if self.num_labels == 1:
1428
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1429
+ else:
1430
+ loss = loss_fct(logits, labels)
1431
+ elif self.config.problem_type == "single_label_classification":
1432
+ loss_fct = CrossEntropyLoss()
1433
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1434
+ elif self.config.problem_type == "multi_label_classification":
1435
+ loss_fct = BCEWithLogitsLoss()
1436
+ loss = loss_fct(logits, labels)
1437
+
1438
+ if not return_dict:
1439
+ output = (logits,) + outputs[2:]
1440
+ return ((loss,) + output) if loss is not None else output
1441
+
1442
+ return SequenceClassifierOutput(
1443
+ loss=loss,
1444
+ logits=logits,
1445
+ hidden_states=outputs.hidden_states,
1446
+ attentions=outputs.attentions,
1447
+ )
1448
+
1449
+
1450
+ class RobertaClassificationHead(nn.Module):
1451
+ """Head for sentence-level classification tasks."""
1452
+
1453
+ def __init__(self, config):
1454
+ super().__init__()
1455
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1456
+ classifier_dropout = (
1457
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1458
+ )
1459
+ self.dropout = nn.Dropout(classifier_dropout)
1460
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
1461
+
1462
+ def forward(self, features, **kwargs):
1463
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
1464
+ x = self.dropout(x)
1465
+ x = self.dense(x)
1466
+ x = torch.tanh(x)
1467
+ x = self.dropout(x)
1468
+ x = self.out_proj(x)
1469
+ return x
1470
+
1471
+
1472
+ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
1473
+ """
1474
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
1475
+ are ignored. This is modified from fairseq's `utils.make_positions`.
1476
+
1477
+ Args:
1478
+ x: torch.Tensor x:
1479
+
1480
+ Returns: torch.Tensor
1481
+ """
1482
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
1483
+ mask = input_ids.ne(padding_idx).int()
1484
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
1485
+ return incremental_indices.long() + padding_idx
1486
+
1487
+
1488
+ def cumprod(x, reverse=False, exclusive=False):
1489
+ """cumulative product."""
1490
+ if reverse:
1491
+ x = x.flip([-1])
1492
+
1493
+ if exclusive:
1494
+ x = F.pad(x[:, :, :-1], (1, 0), value=1)
1495
+
1496
+ cx = x.cumprod(-1)
1497
+
1498
+ if reverse:
1499
+ cx = cx.flip([-1])
1500
+ return cx
1501
+
1502
+
1503
+ def cumsum(x, reverse=False, exclusive=False):
1504
+ """cumulative sum."""
1505
+ bsz, _, length = x.size()
1506
+ device = x.device
1507
+ if reverse:
1508
+ if exclusive:
1509
+ w = torch.ones([bsz, length, length], device=device).tril(-1)
1510
+ else:
1511
+ w = torch.ones([bsz, length, length], device=device).tril(0)
1512
+ cx = torch.bmm(x, w)
1513
+ else:
1514
+ if exclusive:
1515
+ w = torch.ones([bsz, length, length], device=device).triu(1)
1516
+ else:
1517
+ w = torch.ones([bsz, length, length], device=device).triu(0)
1518
+ cx = torch.bmm(x, w)
1519
+ return cx
1520
+
1521
+
1522
+ def cummin(x, reverse=False, exclusive=False, max_value=1e9):
1523
+ """cumulative min."""
1524
+ if reverse:
1525
+ if exclusive:
1526
+ x = F.pad(x[:, :, 1:], (0, 1), value=max_value)
1527
+ x = x.flip([-1]).cummin(-1)[0].flip([-1])
1528
+ else:
1529
+ if exclusive:
1530
+ x = F.pad(x[:, :, :-1], (1, 0), value=max_value)
1531
+ x = x.cummin(-1)[0]
1532
+ return x
1533
+
finetune/cola/predict_results.txt ADDED
@@ -0,0 +1,1020 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ index prediction
2
+ 0 1
3
+ 1 1
4
+ 2 1
5
+ 3 1
6
+ 4 1
7
+ 5 1
8
+ 6 0
9
+ 7 1
10
+ 8 1
11
+ 9 1
12
+ 10 1
13
+ 11 1
14
+ 12 1
15
+ 13 0
16
+ 14 1
17
+ 15 1
18
+ 16 1
19
+ 17 1
20
+ 18 1
21
+ 19 1
22
+ 20 0
23
+ 21 1
24
+ 22 1
25
+ 23 1
26
+ 24 1
27
+ 25 1
28
+ 26 1
29
+ 27 1
30
+ 28 0
31
+ 29 0
32
+ 30 1
33
+ 31 1
34
+ 32 1
35
+ 33 1
36
+ 34 1
37
+ 35 1
38
+ 36 1
39
+ 37 1
40
+ 38 1
41
+ 39 1
42
+ 40 1
43
+ 41 1
44
+ 42 1
45
+ 43 1
46
+ 44 1
47
+ 45 0
48
+ 46 1
49
+ 47 1
50
+ 48 1
51
+ 49 1
52
+ 50 1
53
+ 51 1
54
+ 52 1
55
+ 53 1
56
+ 54 1
57
+ 55 1
58
+ 56 1
59
+ 57 1
60
+ 58 1
61
+ 59 0
62
+ 60 0
63
+ 61 1
64
+ 62 1
65
+ 63 1
66
+ 64 1
67
+ 65 0
68
+ 66 1
69
+ 67 1
70
+ 68 1
71
+ 69 1
72
+ 70 1
73
+ 71 1
74
+ 72 1
75
+ 73 1
76
+ 74 1
77
+ 75 1
78
+ 76 1
79
+ 77 1
80
+ 78 1
81
+ 79 1
82
+ 80 1
83
+ 81 1
84
+ 82 1
85
+ 83 1
86
+ 84 1
87
+ 85 1
88
+ 86 0
89
+ 87 1
90
+ 88 1
91
+ 89 1
92
+ 90 1
93
+ 91 1
94
+ 92 1
95
+ 93 1
96
+ 94 1
97
+ 95 1
98
+ 96 1
99
+ 97 1
100
+ 98 1
101
+ 99 1
102
+ 100 1
103
+ 101 1
104
+ 102 1
105
+ 103 1
106
+ 104 1
107
+ 105 1
108
+ 106 1
109
+ 107 1
110
+ 108 1
111
+ 109 0
112
+ 110 1
113
+ 111 1
114
+ 112 1
115
+ 113 1
116
+ 114 1
117
+ 115 1
118
+ 116 1
119
+ 117 1
120
+ 118 1
121
+ 119 1
122
+ 120 1
123
+ 121 1
124
+ 122 1
125
+ 123 1
126
+ 124 1
127
+ 125 1
128
+ 126 1
129
+ 127 1
130
+ 128 1
131
+ 129 1
132
+ 130 1
133
+ 131 1
134
+ 132 1
135
+ 133 1
136
+ 134 1
137
+ 135 1
138
+ 136 1
139
+ 137 1
140
+ 138 1
141
+ 139 1
142
+ 140 1
143
+ 141 1
144
+ 142 1
145
+ 143 1
146
+ 144 1
147
+ 145 1
148
+ 146 1
149
+ 147 1
150
+ 148 1
151
+ 149 1
152
+ 150 1
153
+ 151 1
154
+ 152 1
155
+ 153 1
156
+ 154 1
157
+ 155 1
158
+ 156 1
159
+ 157 1
160
+ 158 1
161
+ 159 1
162
+ 160 1
163
+ 161 1
164
+ 162 0
165
+ 163 1
166
+ 164 1
167
+ 165 1
168
+ 166 0
169
+ 167 1
170
+ 168 0
171
+ 169 0
172
+ 170 1
173
+ 171 0
174
+ 172 1
175
+ 173 1
176
+ 174 1
177
+ 175 1
178
+ 176 1
179
+ 177 1
180
+ 178 1
181
+ 179 1
182
+ 180 1
183
+ 181 1
184
+ 182 1
185
+ 183 1
186
+ 184 1
187
+ 185 1
188
+ 186 1
189
+ 187 1
190
+ 188 1
191
+ 189 1
192
+ 190 0
193
+ 191 1
194
+ 192 1
195
+ 193 1
196
+ 194 1
197
+ 195 1
198
+ 196 0
199
+ 197 0
200
+ 198 1
201
+ 199 1
202
+ 200 1
203
+ 201 1
204
+ 202 1
205
+ 203 1
206
+ 204 1
207
+ 205 1
208
+ 206 1
209
+ 207 1
210
+ 208 1
211
+ 209 1
212
+ 210 1
213
+ 211 1
214
+ 212 1
215
+ 213 1
216
+ 214 1
217
+ 215 1
218
+ 216 1
219
+ 217 1
220
+ 218 1
221
+ 219 1
222
+ 220 1
223
+ 221 1
224
+ 222 1
225
+ 223 1
226
+ 224 1
227
+ 225 1
228
+ 226 1
229
+ 227 1
230
+ 228 1
231
+ 229 1
232
+ 230 1
233
+ 231 1
234
+ 232 0
235
+ 233 1
236
+ 234 1
237
+ 235 1
238
+ 236 1
239
+ 237 1
240
+ 238 1
241
+ 239 1
242
+ 240 1
243
+ 241 1
244
+ 242 1
245
+ 243 1
246
+ 244 1
247
+ 245 1
248
+ 246 1
249
+ 247 1
250
+ 248 1
251
+ 249 1
252
+ 250 1
253
+ 251 0
254
+ 252 1
255
+ 253 1
256
+ 254 1
257
+ 255 1
258
+ 256 1
259
+ 257 1
260
+ 258 1
261
+ 259 1
262
+ 260 1
263
+ 261 1
264
+ 262 0
265
+ 263 1
266
+ 264 1
267
+ 265 1
268
+ 266 0
269
+ 267 1
270
+ 268 1
271
+ 269 1
272
+ 270 1
273
+ 271 1
274
+ 272 1
275
+ 273 1
276
+ 274 1
277
+ 275 1
278
+ 276 0
279
+ 277 1
280
+ 278 1
281
+ 279 1
282
+ 280 1
283
+ 281 1
284
+ 282 1
285
+ 283 1
286
+ 284 1
287
+ 285 1
288
+ 286 0
289
+ 287 1
290
+ 288 1
291
+ 289 1
292
+ 290 1
293
+ 291 1
294
+ 292 1
295
+ 293 0
296
+ 294 1
297
+ 295 1
298
+ 296 0
299
+ 297 1
300
+ 298 1
301
+ 299 1
302
+ 300 1
303
+ 301 1
304
+ 302 1
305
+ 303 1
306
+ 304 1
307
+ 305 1
308
+ 306 1
309
+ 307 1
310
+ 308 1
311
+ 309 1
312
+ 310 1
313
+ 311 1
314
+ 312 1
315
+ 313 1
316
+ 314 1
317
+ 315 1
318
+ 316 1
319
+ 317 1
320
+ 318 1
321
+ 319 1
322
+ 320 1
323
+ 321 1
324
+ 322 1
325
+ 323 0
326
+ 324 1
327
+ 325 1
328
+ 326 1
329
+ 327 1
330
+ 328 1
331
+ 329 1
332
+ 330 1
333
+ 331 1
334
+ 332 1
335
+ 333 1
336
+ 334 1
337
+ 335 1
338
+ 336 1
339
+ 337 1
340
+ 338 1
341
+ 339 1
342
+ 340 1
343
+ 341 0
344
+ 342 1
345
+ 343 1
346
+ 344 1
347
+ 345 1
348
+ 346 1
349
+ 347 1
350
+ 348 1
351
+ 349 1
352
+ 350 1
353
+ 351 1
354
+ 352 1
355
+ 353 1
356
+ 354 1
357
+ 355 1
358
+ 356 1
359
+ 357 1
360
+ 358 1
361
+ 359 1
362
+ 360 1
363
+ 361 1
364
+ 362 1
365
+ 363 1
366
+ 364 1
367
+ 365 1
368
+ 366 1
369
+ 367 1
370
+ 368 1
371
+ 369 1
372
+ 370 1
373
+ 371 1
374
+ 372 1
375
+ 373 1
376
+ 374 1
377
+ 375 1
378
+ 376 1
379
+ 377 1
380
+ 378 1
381
+ 379 1
382
+ 380 1
383
+ 381 1
384
+ 382 1
385
+ 383 1
386
+ 384 1
387
+ 385 1
388
+ 386 1
389
+ 387 1
390
+ 388 1
391
+ 389 1
392
+ 390 1
393
+ 391 1
394
+ 392 0
395
+ 393 1
396
+ 394 1
397
+ 395 1
398
+ 396 1
399
+ 397 1
400
+ 398 1
401
+ 399 1
402
+ 400 1
403
+ 401 1
404
+ 402 1
405
+ 403 1
406
+ 404 1
407
+ 405 1
408
+ 406 1
409
+ 407 1
410
+ 408 1
411
+ 409 1
412
+ 410 1
413
+ 411 1
414
+ 412 1
415
+ 413 1
416
+ 414 1
417
+ 415 1
418
+ 416 1
419
+ 417 1
420
+ 418 1
421
+ 419 1
422
+ 420 1
423
+ 421 1
424
+ 422 1
425
+ 423 1
426
+ 424 1
427
+ 425 1
428
+ 426 1
429
+ 427 1
430
+ 428 1
431
+ 429 1
432
+ 430 1
433
+ 431 1
434
+ 432 1
435
+ 433 1
436
+ 434 1
437
+ 435 1
438
+ 436 1
439
+ 437 1
440
+ 438 0
441
+ 439 1
442
+ 440 1
443
+ 441 1
444
+ 442 1
445
+ 443 1
446
+ 444 1
447
+ 445 1
448
+ 446 1
449
+ 447 1
450
+ 448 1
451
+ 449 1
452
+ 450 1
453
+ 451 1
454
+ 452 1
455
+ 453 1
456
+ 454 1
457
+ 455 1
458
+ 456 1
459
+ 457 0
460
+ 458 0
461
+ 459 1
462
+ 460 1
463
+ 461 1
464
+ 462 1
465
+ 463 1
466
+ 464 1
467
+ 465 0
468
+ 466 1
469
+ 467 1
470
+ 468 1
471
+ 469 1
472
+ 470 1
473
+ 471 1
474
+ 472 1
475
+ 473 1
476
+ 474 1
477
+ 475 1
478
+ 476 1
479
+ 477 1
480
+ 478 0
481
+ 479 1
482
+ 480 1
483
+ 481 1
484
+ 482 1
485
+ 483 1
486
+ 484 1
487
+ 485 1
488
+ 486 1
489
+ 487 1
490
+ 488 1
491
+ 489 1
492
+ 490 1
493
+ 491 0
494
+ 492 1
495
+ 493 0
496
+ 494 1
497
+ 495 1
498
+ 496 1
499
+ 497 0
500
+ 498 1
501
+ 499 1
502
+ 500 1
503
+ 501 1
504
+ 502 1
505
+ 503 1
506
+ 504 0
507
+ 505 1
508
+ 506 0
509
+ 507 0
510
+ 508 0
511
+ 509 1
512
+ 510 1
513
+ 511 1
514
+ 512 1
515
+ 513 1
516
+ 514 1
517
+ 515 1
518
+ 516 1
519
+ 517 1
520
+ 518 1
521
+ 519 1
522
+ 520 0
523
+ 521 1
524
+ 522 1
525
+ 523 1
526
+ 524 1
527
+ 525 1
528
+ 526 1
529
+ 527 1
530
+ 528 1
531
+ 529 1
532
+ 530 1
533
+ 531 1
534
+ 532 1
535
+ 533 1
536
+ 534 1
537
+ 535 1
538
+ 536 1
539
+ 537 1
540
+ 538 1
541
+ 539 1
542
+ 540 1
543
+ 541 1
544
+ 542 1
545
+ 543 1
546
+ 544 1
547
+ 545 1
548
+ 546 1
549
+ 547 1
550
+ 548 1
551
+ 549 1
552
+ 550 1
553
+ 551 1
554
+ 552 1
555
+ 553 1
556
+ 554 1
557
+ 555 1
558
+ 556 1
559
+ 557 1
560
+ 558 1
561
+ 559 1
562
+ 560 1
563
+ 561 1
564
+ 562 1
565
+ 563 1
566
+ 564 0
567
+ 565 1
568
+ 566 1
569
+ 567 1
570
+ 568 1
571
+ 569 1
572
+ 570 1
573
+ 571 1
574
+ 572 1
575
+ 573 1
576
+ 574 1
577
+ 575 1
578
+ 576 1
579
+ 577 1
580
+ 578 1
581
+ 579 1
582
+ 580 1
583
+ 581 1
584
+ 582 1
585
+ 583 1
586
+ 584 1
587
+ 585 1
588
+ 586 1
589
+ 587 1
590
+ 588 1
591
+ 589 1
592
+ 590 1
593
+ 591 1
594
+ 592 1
595
+ 593 0
596
+ 594 0
597
+ 595 1
598
+ 596 0
599
+ 597 1
600
+ 598 0
601
+ 599 1
602
+ 600 0
603
+ 601 1
604
+ 602 0
605
+ 603 1
606
+ 604 0
607
+ 605 0
608
+ 606 1
609
+ 607 1
610
+ 608 1
611
+ 609 1
612
+ 610 1
613
+ 611 1
614
+ 612 0
615
+ 613 1
616
+ 614 1
617
+ 615 1
618
+ 616 1
619
+ 617 1
620
+ 618 1
621
+ 619 1
622
+ 620 1
623
+ 621 0
624
+ 622 1
625
+ 623 1
626
+ 624 1
627
+ 625 1
628
+ 626 0
629
+ 627 1
630
+ 628 1
631
+ 629 1
632
+ 630 1
633
+ 631 1
634
+ 632 1
635
+ 633 1
636
+ 634 1
637
+ 635 1
638
+ 636 1
639
+ 637 1
640
+ 638 1
641
+ 639 1
642
+ 640 1
643
+ 641 1
644
+ 642 1
645
+ 643 1
646
+ 644 1
647
+ 645 1
648
+ 646 1
649
+ 647 1
650
+ 648 1
651
+ 649 1
652
+ 650 1
653
+ 651 1
654
+ 652 1
655
+ 653 1
656
+ 654 1
657
+ 655 1
658
+ 656 1
659
+ 657 1
660
+ 658 1
661
+ 659 1
662
+ 660 1
663
+ 661 1
664
+ 662 1
665
+ 663 1
666
+ 664 1
667
+ 665 1
668
+ 666 1
669
+ 667 1
670
+ 668 1
671
+ 669 1
672
+ 670 1
673
+ 671 1
674
+ 672 1
675
+ 673 1
676
+ 674 1
677
+ 675 1
678
+ 676 1
679
+ 677 1
680
+ 678 1
681
+ 679 1
682
+ 680 1
683
+ 681 1
684
+ 682 0
685
+ 683 1
686
+ 684 1
687
+ 685 1
688
+ 686 1
689
+ 687 1
690
+ 688 1
691
+ 689 1
692
+ 690 1
693
+ 691 1
694
+ 692 1
695
+ 693 1
696
+ 694 1
697
+ 695 1
698
+ 696 1
699
+ 697 1
700
+ 698 1
701
+ 699 1
702
+ 700 1
703
+ 701 1
704
+ 702 1
705
+ 703 1
706
+ 704 1
707
+ 705 1
708
+ 706 1
709
+ 707 1
710
+ 708 1
711
+ 709 1
712
+ 710 1
713
+ 711 1
714
+ 712 1
715
+ 713 1
716
+ 714 1
717
+ 715 1
718
+ 716 1
719
+ 717 1
720
+ 718 1
721
+ 719 1
722
+ 720 1
723
+ 721 1
724
+ 722 1
725
+ 723 1
726
+ 724 1
727
+ 725 1
728
+ 726 1
729
+ 727 1
730
+ 728 1
731
+ 729 1
732
+ 730 1
733
+ 731 1
734
+ 732 1
735
+ 733 1
736
+ 734 1
737
+ 735 1
738
+ 736 1
739
+ 737 1
740
+ 738 1
741
+ 739 1
742
+ 740 1
743
+ 741 1
744
+ 742 1
745
+ 743 1
746
+ 744 1
747
+ 745 1
748
+ 746 1
749
+ 747 1
750
+ 748 1
751
+ 749 1
752
+ 750 1
753
+ 751 1
754
+ 752 1
755
+ 753 1
756
+ 754 1
757
+ 755 1
758
+ 756 1
759
+ 757 1
760
+ 758 1
761
+ 759 1
762
+ 760 1
763
+ 761 1
764
+ 762 1
765
+ 763 1
766
+ 764 1
767
+ 765 1
768
+ 766 1
769
+ 767 1
770
+ 768 1
771
+ 769 1
772
+ 770 1
773
+ 771 1
774
+ 772 1
775
+ 773 1
776
+ 774 1
777
+ 775 1
778
+ 776 1
779
+ 777 1
780
+ 778 1
781
+ 779 1
782
+ 780 1
783
+ 781 1
784
+ 782 1
785
+ 783 1
786
+ 784 1
787
+ 785 1
788
+ 786 1
789
+ 787 1
790
+ 788 1
791
+ 789 1
792
+ 790 1
793
+ 791 1
794
+ 792 1
795
+ 793 1
796
+ 794 1
797
+ 795 1
798
+ 796 1
799
+ 797 1
800
+ 798 1
801
+ 799 1
802
+ 800 1
803
+ 801 1
804
+ 802 1
805
+ 803 1
806
+ 804 1
807
+ 805 1
808
+ 806 1
809
+ 807 1
810
+ 808 1
811
+ 809 0
812
+ 810 1
813
+ 811 1
814
+ 812 1
815
+ 813 1
816
+ 814 1
817
+ 815 1
818
+ 816 1
819
+ 817 1
820
+ 818 1
821
+ 819 1
822
+ 820 1
823
+ 821 1
824
+ 822 1
825
+ 823 1
826
+ 824 1
827
+ 825 1
828
+ 826 1
829
+ 827 0
830
+ 828 1
831
+ 829 1
832
+ 830 1
833
+ 831 1
834
+ 832 1
835
+ 833 1
836
+ 834 1
837
+ 835 1
838
+ 836 1
839
+ 837 1
840
+ 838 1
841
+ 839 1
842
+ 840 1
843
+ 841 1
844
+ 842 1
845
+ 843 1
846
+ 844 0
847
+ 845 0
848
+ 846 1
849
+ 847 1
850
+ 848 1
851
+ 849 1
852
+ 850 1
853
+ 851 1
854
+ 852 1
855
+ 853 1
856
+ 854 1
857
+ 855 1
858
+ 856 1
859
+ 857 1
860
+ 858 1
861
+ 859 1
862
+ 860 1
863
+ 861 1
864
+ 862 1
865
+ 863 1
866
+ 864 1
867
+ 865 1
868
+ 866 1
869
+ 867 1
870
+ 868 1
871
+ 869 1
872
+ 870 1
873
+ 871 1
874
+ 872 0
875
+ 873 1
876
+ 874 1
877
+ 875 1
878
+ 876 1
879
+ 877 1
880
+ 878 1
881
+ 879 1
882
+ 880 1
883
+ 881 1
884
+ 882 1
885
+ 883 1
886
+ 884 1
887
+ 885 1
888
+ 886 1
889
+ 887 0
890
+ 888 0
891
+ 889 1
892
+ 890 1
893
+ 891 1
894
+ 892 1
895
+ 893 1
896
+ 894 1
897
+ 895 1
898
+ 896 1
899
+ 897 1
900
+ 898 0
901
+ 899 1
902
+ 900 1
903
+ 901 1
904
+ 902 1
905
+ 903 0
906
+ 904 1
907
+ 905 1
908
+ 906 1
909
+ 907 1
910
+ 908 0
911
+ 909 0
912
+ 910 1
913
+ 911 1
914
+ 912 1
915
+ 913 1
916
+ 914 1
917
+ 915 1
918
+ 916 1
919
+ 917 1
920
+ 918 1
921
+ 919 1
922
+ 920 1
923
+ 921 1
924
+ 922 1
925
+ 923 1
926
+ 924 1
927
+ 925 1
928
+ 926 1
929
+ 927 1
930
+ 928 1
931
+ 929 1
932
+ 930 1
933
+ 931 1
934
+ 932 1
935
+ 933 1
936
+ 934 1
937
+ 935 1
938
+ 936 1
939
+ 937 1
940
+ 938 1
941
+ 939 0
942
+ 940 1
943
+ 941 1
944
+ 942 1
945
+ 943 0
946
+ 944 0
947
+ 945 1
948
+ 946 1
949
+ 947 1
950
+ 948 1
951
+ 949 1
952
+ 950 0
953
+ 951 1
954
+ 952 1
955
+ 953 1
956
+ 954 1
957
+ 955 1
958
+ 956 1
959
+ 957 1
960
+ 958 1
961
+ 959 1
962
+ 960 1
963
+ 961 1
964
+ 962 1
965
+ 963 1
966
+ 964 1
967
+ 965 1
968
+ 966 1
969
+ 967 1
970
+ 968 1
971
+ 969 1
972
+ 970 1
973
+ 971 0
974
+ 972 1
975
+ 973 1
976
+ 974 1
977
+ 975 1
978
+ 976 1
979
+ 977 1
980
+ 978 0
981
+ 979 1
982
+ 980 1
983
+ 981 1
984
+ 982 0
985
+ 983 1
986
+ 984 1
987
+ 985 1
988
+ 986 1
989
+ 987 1
990
+ 988 1
991
+ 989 1
992
+ 990 1
993
+ 991 1
994
+ 992 0
995
+ 993 1
996
+ 994 1
997
+ 995 0
998
+ 996 1
999
+ 997 1
1000
+ 998 1
1001
+ 999 1
1002
+ 1000 1
1003
+ 1001 1
1004
+ 1002 1
1005
+ 1003 1
1006
+ 1004 1
1007
+ 1005 1
1008
+ 1006 1
1009
+ 1007 0
1010
+ 1008 1
1011
+ 1009 1
1012
+ 1010 1
1013
+ 1011 1
1014
+ 1012 1
1015
+ 1013 1
1016
+ 1014 1
1017
+ 1015 1
1018
+ 1016 1
1019
+ 1017 1
1020
+ 1018 1
finetune/cola/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d9a6c70a9395b75d78bc51261cc3c0534fcdec8677bda5d5c2f47fa9d229859
3
+ size 577068929
finetune/cola/special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
finetune/cola/tokenizer_config.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "cls_token": {
12
+ "__type": "AddedToken",
13
+ "content": "<s>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "eos_token": {
20
+ "__type": "AddedToken",
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ },
27
+ "errors": "replace",
28
+ "mask_token": {
29
+ "__type": "AddedToken",
30
+ "content": "<mask>",
31
+ "lstrip": true,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ },
36
+ "model_max_length": 512,
37
+ "name_or_path": "final_models/glue_models/structroberta_s2_50ep/",
38
+ "pad_token": {
39
+ "__type": "AddedToken",
40
+ "content": "<pad>",
41
+ "lstrip": false,
42
+ "normalized": true,
43
+ "rstrip": false,
44
+ "single_word": false
45
+ },
46
+ "sep_token": {
47
+ "__type": "AddedToken",
48
+ "content": "</s>",
49
+ "lstrip": false,
50
+ "normalized": true,
51
+ "rstrip": false,
52
+ "single_word": false
53
+ },
54
+ "special_tokens_map_file": null,
55
+ "tokenizer_class": "RobertaTokenizer",
56
+ "trim_offsets": true,
57
+ "unk_token": {
58
+ "__type": "AddedToken",
59
+ "content": "<unk>",
60
+ "lstrip": false,
61
+ "normalized": true,
62
+ "rstrip": false,
63
+ "single_word": false
64
+ }
65
+ }
finetune/cola/train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "train_loss": 0.22346576005220414,
4
+ "train_runtime": 448.112,
5
+ "train_samples": 8164,
6
+ "train_samples_per_second": 182.187,
7
+ "train_steps_per_second": 2.856
8
+ }
finetune/cola/trainer_state.json ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.8218181818181818,
3
+ "best_model_checkpoint": "final_models/glue_models/structroberta_s2_50ep//finetune/cola/checkpoint-200",
4
+ "epoch": 10.0,
5
+ "global_step": 1280,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.56,
12
+ "eval_accuracy": 0.7114818692207336,
13
+ "eval_f1": 0.8218181818181818,
14
+ "eval_loss": 0.5872910022735596,
15
+ "eval_mcc": 0.18948085721928823,
16
+ "eval_runtime": 2.1539,
17
+ "eval_samples_per_second": 473.102,
18
+ "eval_steps_per_second": 59.428,
19
+ "step": 200
20
+ },
21
+ {
22
+ "epoch": 3.12,
23
+ "eval_accuracy": 0.7173699736595154,
24
+ "eval_f1": 0.8132295719844358,
25
+ "eval_loss": 0.7240995168685913,
26
+ "eval_mcc": 0.26281332883560804,
27
+ "eval_runtime": 2.1671,
28
+ "eval_samples_per_second": 470.217,
29
+ "eval_steps_per_second": 59.066,
30
+ "step": 400
31
+ },
32
+ {
33
+ "epoch": 3.91,
34
+ "learning_rate": 3.0468750000000002e-05,
35
+ "loss": 0.4445,
36
+ "step": 500
37
+ },
38
+ {
39
+ "epoch": 4.69,
40
+ "eval_accuracy": 0.707556426525116,
41
+ "eval_f1": 0.807741935483871,
42
+ "eval_loss": 0.9681539535522461,
43
+ "eval_mcc": 0.23051552905227268,
44
+ "eval_runtime": 2.1721,
45
+ "eval_samples_per_second": 469.126,
46
+ "eval_steps_per_second": 58.929,
47
+ "step": 600
48
+ },
49
+ {
50
+ "epoch": 6.25,
51
+ "eval_accuracy": 0.7242394685745239,
52
+ "eval_f1": 0.8192926045016078,
53
+ "eval_loss": 1.2346603870391846,
54
+ "eval_mcc": 0.27596976231470044,
55
+ "eval_runtime": 2.1584,
56
+ "eval_samples_per_second": 472.114,
57
+ "eval_steps_per_second": 59.304,
58
+ "step": 800
59
+ },
60
+ {
61
+ "epoch": 7.81,
62
+ "learning_rate": 1.09375e-05,
63
+ "loss": 0.1091,
64
+ "step": 1000
65
+ },
66
+ {
67
+ "epoch": 7.81,
68
+ "eval_accuracy": 0.7163886427879333,
69
+ "eval_f1": 0.8084824387011266,
70
+ "eval_loss": 1.280194878578186,
71
+ "eval_mcc": 0.2778508397362399,
72
+ "eval_runtime": 2.1555,
73
+ "eval_samples_per_second": 472.74,
74
+ "eval_steps_per_second": 59.382,
75
+ "step": 1000
76
+ },
77
+ {
78
+ "epoch": 9.38,
79
+ "eval_accuracy": 0.7212954163551331,
80
+ "eval_f1": 0.8165374677002584,
81
+ "eval_loss": 1.5132513046264648,
82
+ "eval_mcc": 0.27088079409026794,
83
+ "eval_runtime": 2.1614,
84
+ "eval_samples_per_second": 471.454,
85
+ "eval_steps_per_second": 59.221,
86
+ "step": 1200
87
+ },
88
+ {
89
+ "epoch": 10.0,
90
+ "step": 1280,
91
+ "total_flos": 7478552336486400.0,
92
+ "train_loss": 0.22346576005220414,
93
+ "train_runtime": 448.112,
94
+ "train_samples_per_second": 182.187,
95
+ "train_steps_per_second": 2.856
96
+ }
97
+ ],
98
+ "max_steps": 1280,
99
+ "num_train_epochs": 10,
100
+ "total_flos": 7478552336486400.0,
101
+ "trial_name": null,
102
+ "trial_params": null
103
+ }
finetune/cola/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30a9dbc65f15536e743690a23652c91eeb58eaafa7cb9e0e8179845b89d9afa3
3
+ size 3503
finetune/cola/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
finetune/control_raising_control/all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.917276918888092,
4
+ "eval_f1": 0.9214503654296459,
5
+ "eval_loss": 0.5907565951347351,
6
+ "eval_mcc": 0.8416442761632831,
7
+ "eval_runtime": 28.9548,
8
+ "eval_samples": 13382,
9
+ "eval_samples_per_second": 462.169,
10
+ "eval_steps_per_second": 57.78,
11
+ "train_loss": 0.034445077063316856,
12
+ "train_runtime": 499.6416,
13
+ "train_samples": 6570,
14
+ "train_samples_per_second": 131.494,
15
+ "train_steps_per_second": 2.061
16
+ }
finetune/control_raising_control/config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "final_models/glue_models/structroberta_s2_50ep/",
3
+ "architectures": [
4
+ "StructRobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "auto_map": {
8
+ "AutoConfig": "modeling_structroberta.StructRobertaConfig",
9
+ "AutoModelForMaskedLM": "modeling_structroberta.StructRoberta",
10
+ "AutoModelForSequenceClassification": "modeling_structroberta.StructRobertaForSequenceClassification"
11
+ },
12
+ "bos_token_id": 0,
13
+ "classifier_dropout": null,
14
+ "conv_size": 9,
15
+ "eos_token_id": 2,
16
+ "hidden_act": "gelu",
17
+ "hidden_dropout_prob": 0.1,
18
+ "hidden_size": 768,
19
+ "id2label": {
20
+ "0": 0,
21
+ "1": 1
22
+ },
23
+ "initializer_range": 0.02,
24
+ "intermediate_size": 3072,
25
+ "label2id": {
26
+ "0": 0,
27
+ "1": 1
28
+ },
29
+ "layer_norm_eps": 1e-05,
30
+ "max_position_embeddings": 514,
31
+ "model_type": "roberta",
32
+ "n_parser_layers": 6,
33
+ "num_attention_heads": 12,
34
+ "num_hidden_layers": 12,
35
+ "pad_token_id": 1,
36
+ "position_embedding_type": "absolute",
37
+ "problem_type": "single_label_classification",
38
+ "relations": [
39
+ "head",
40
+ "child"
41
+ ],
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.26.1",
44
+ "type_vocab_size": 1,
45
+ "use_cache": true,
46
+ "vocab_size": 32000,
47
+ "weight_act": "softmax"
48
+ }
finetune/control_raising_control/eval_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.917276918888092,
4
+ "eval_f1": 0.9214503654296459,
5
+ "eval_loss": 0.5907565951347351,
6
+ "eval_mcc": 0.8416442761632831,
7
+ "eval_runtime": 28.9548,
8
+ "eval_samples": 13382,
9
+ "eval_samples_per_second": 462.169,
10
+ "eval_steps_per_second": 57.78
11
+ }
finetune/control_raising_control/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
finetune/control_raising_control/modeling_structroberta.py ADDED
@@ -0,0 +1,1533 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch RoBERTa model."""
17
+
18
+ import math
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from packaging import version
24
+ from torch import nn
25
+ import torch.nn.functional as F
26
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
27
+
28
+ from transformers.activations import ACT2FN, gelu
29
+ from transformers.modeling_outputs import (
30
+ BaseModelOutputWithPastAndCrossAttentions,
31
+ BaseModelOutputWithPoolingAndCrossAttentions,
32
+ MaskedLMOutput,
33
+ SequenceClassifierOutput
34
+ )
35
+ from transformers.modeling_utils import (
36
+ PreTrainedModel,
37
+ apply_chunking_to_forward,
38
+ find_pruneable_heads_and_indices,
39
+ prune_linear_layer,
40
+ )
41
+ from transformers.utils import (
42
+ add_code_sample_docstrings,
43
+ add_start_docstrings,
44
+ add_start_docstrings_to_model_forward,
45
+ logging,
46
+ )
47
+ from transformers import RobertaConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CHECKPOINT_FOR_DOC = "roberta-base"
53
+ _CONFIG_FOR_DOC = "RobertaConfig"
54
+ _TOKENIZER_FOR_DOC = "RobertaTokenizer"
55
+
56
+ ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
57
+ "roberta-base",
58
+ "roberta-large",
59
+ "roberta-large-mnli",
60
+ "distilroberta-base",
61
+ "roberta-base-openai-detector",
62
+ "roberta-large-openai-detector",
63
+ # See all RoBERTa models at https://huggingface.co/models?filter=roberta
64
+ ]
65
+
66
+
67
+ class StructRobertaConfig(RobertaConfig):
68
+ model_type = "roberta"
69
+
70
+ def __init__(
71
+ self,
72
+ n_parser_layers=4,
73
+ conv_size=9,
74
+ relations=('head', 'child'),
75
+ weight_act='softmax',
76
+ **kwargs,
77
+ ):
78
+ super().__init__(**kwargs)
79
+ self.n_parser_layers = n_parser_layers
80
+ self.conv_size = conv_size
81
+ self.relations = relations
82
+ self.weight_act = weight_act
83
+
84
+ class Conv1d(nn.Module):
85
+ """1D convolution layer."""
86
+
87
+ def __init__(self, hidden_size, kernel_size, dilation=1):
88
+ """Initialization.
89
+
90
+ Args:
91
+ hidden_size: dimension of input embeddings
92
+ kernel_size: convolution kernel size
93
+ dilation: the spacing between the kernel points
94
+ """
95
+ super(Conv1d, self).__init__()
96
+
97
+ if kernel_size % 2 == 0:
98
+ padding = (kernel_size // 2) * dilation
99
+ self.shift = True
100
+ else:
101
+ padding = ((kernel_size - 1) // 2) * dilation
102
+ self.shift = False
103
+ self.conv = nn.Conv1d(
104
+ hidden_size,
105
+ hidden_size,
106
+ kernel_size,
107
+ padding=padding,
108
+ dilation=dilation)
109
+
110
+ def forward(self, x):
111
+ """Compute convolution.
112
+
113
+ Args:
114
+ x: input embeddings
115
+ Returns:
116
+ conv_output: convolution results
117
+ """
118
+
119
+ if self.shift:
120
+ return self.conv(x.transpose(1, 2)).transpose(1, 2)[:, 1:]
121
+ else:
122
+ return self.conv(x.transpose(1, 2)).transpose(1, 2)
123
+
124
+ class RobertaEmbeddings(nn.Module):
125
+ """
126
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
127
+ """
128
+
129
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
130
+ def __init__(self, config):
131
+ super().__init__()
132
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
133
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
134
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
135
+
136
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
137
+ # any TensorFlow checkpoint file
138
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
139
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
140
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
141
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
142
+ self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
143
+ if version.parse(torch.__version__) > version.parse("1.6.0"):
144
+ self.register_buffer(
145
+ "token_type_ids",
146
+ torch.zeros(self.position_ids.size(), dtype=torch.long),
147
+ persistent=False,
148
+ )
149
+
150
+ # End copy
151
+ self.padding_idx = config.pad_token_id
152
+ self.position_embeddings = nn.Embedding(
153
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
154
+ )
155
+
156
+ def forward(
157
+ self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
158
+ ):
159
+ if position_ids is None:
160
+ if input_ids is not None:
161
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
162
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
163
+ else:
164
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
165
+
166
+ if input_ids is not None:
167
+ input_shape = input_ids.size()
168
+ else:
169
+ input_shape = inputs_embeds.size()[:-1]
170
+
171
+ seq_length = input_shape[1]
172
+
173
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
174
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
175
+ # issue #5664
176
+ if token_type_ids is None:
177
+ if hasattr(self, "token_type_ids"):
178
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
179
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
180
+ token_type_ids = buffered_token_type_ids_expanded
181
+ else:
182
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
183
+
184
+ if inputs_embeds is None:
185
+ inputs_embeds = self.word_embeddings(input_ids)
186
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
187
+
188
+ embeddings = inputs_embeds + token_type_embeddings
189
+ if self.position_embedding_type == "absolute":
190
+ position_embeddings = self.position_embeddings(position_ids)
191
+ embeddings += position_embeddings
192
+ embeddings = self.LayerNorm(embeddings)
193
+ embeddings = self.dropout(embeddings)
194
+ return embeddings
195
+
196
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
197
+ """
198
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
199
+
200
+ Args:
201
+ inputs_embeds: torch.Tensor
202
+
203
+ Returns: torch.Tensor
204
+ """
205
+ input_shape = inputs_embeds.size()[:-1]
206
+ sequence_length = input_shape[1]
207
+
208
+ position_ids = torch.arange(
209
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
210
+ )
211
+ return position_ids.unsqueeze(0).expand(input_shape)
212
+
213
+
214
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta
215
+ class RobertaSelfAttention(nn.Module):
216
+ def __init__(self, config, position_embedding_type=None):
217
+ super().__init__()
218
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
219
+ raise ValueError(
220
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
221
+ f"heads ({config.num_attention_heads})"
222
+ )
223
+
224
+ self.num_attention_heads = config.num_attention_heads
225
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
226
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
227
+
228
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
229
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
230
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
231
+
232
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
233
+ self.position_embedding_type = position_embedding_type or getattr(
234
+ config, "position_embedding_type", "absolute"
235
+ )
236
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
237
+ self.max_position_embeddings = config.max_position_embeddings
238
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
239
+
240
+ self.is_decoder = config.is_decoder
241
+
242
+ def transpose_for_scores(self, x):
243
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
244
+ x = x.view(new_x_shape)
245
+ return x.permute(0, 2, 1, 3)
246
+
247
+ def forward(
248
+ self,
249
+ hidden_states: torch.Tensor,
250
+ attention_mask: Optional[torch.FloatTensor] = None,
251
+ head_mask: Optional[torch.FloatTensor] = None,
252
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
253
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
254
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
255
+ output_attentions: Optional[bool] = False,
256
+ parser_att_mask=None,
257
+ ) -> Tuple[torch.Tensor]:
258
+ mixed_query_layer = self.query(hidden_states)
259
+
260
+ # If this is instantiated as a cross-attention module, the keys
261
+ # and values come from an encoder; the attention mask needs to be
262
+ # such that the encoder's padding tokens are not attended to.
263
+ is_cross_attention = encoder_hidden_states is not None
264
+
265
+ if is_cross_attention and past_key_value is not None:
266
+ # reuse k,v, cross_attentions
267
+ key_layer = past_key_value[0]
268
+ value_layer = past_key_value[1]
269
+ attention_mask = encoder_attention_mask
270
+ elif is_cross_attention:
271
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
272
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
273
+ attention_mask = encoder_attention_mask
274
+ elif past_key_value is not None:
275
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
276
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
277
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
278
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
279
+ else:
280
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
281
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
282
+
283
+ query_layer = self.transpose_for_scores(mixed_query_layer)
284
+
285
+ if self.is_decoder:
286
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
287
+ # Further calls to cross_attention layer can then reuse all cross-attention
288
+ # key/value_states (first "if" case)
289
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
290
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
291
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
292
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
293
+ past_key_value = (key_layer, value_layer)
294
+
295
+ # Take the dot product between "query" and "key" to get the raw attention scores.
296
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
297
+
298
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
299
+ seq_length = hidden_states.size()[1]
300
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
301
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
302
+ distance = position_ids_l - position_ids_r
303
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
304
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
305
+
306
+ if self.position_embedding_type == "relative_key":
307
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
308
+ attention_scores = attention_scores + relative_position_scores
309
+ elif self.position_embedding_type == "relative_key_query":
310
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
311
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
312
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
313
+
314
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
315
+ if attention_mask is not None:
316
+ # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
317
+ attention_scores = attention_scores + attention_mask
318
+
319
+
320
+ if parser_att_mask is None:
321
+ # Normalize the attention scores to probabilities.
322
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
323
+ else:
324
+ attention_probs = torch.sigmoid(attention_scores) * parser_att_mask
325
+
326
+ # This is actually dropping out entire tokens to attend to, which might
327
+ # seem a bit unusual, but is taken from the original Transformer paper.
328
+ attention_probs = self.dropout(attention_probs)
329
+
330
+ # Mask heads if we want to
331
+ if head_mask is not None:
332
+ attention_probs = attention_probs * head_mask
333
+
334
+ context_layer = torch.matmul(attention_probs, value_layer)
335
+
336
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
337
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
338
+ context_layer = context_layer.view(new_context_layer_shape)
339
+
340
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
341
+
342
+ if self.is_decoder:
343
+ outputs = outputs + (past_key_value,)
344
+ return outputs
345
+
346
+
347
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput
348
+ class RobertaSelfOutput(nn.Module):
349
+ def __init__(self, config):
350
+ super().__init__()
351
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
352
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
353
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
354
+
355
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
356
+ hidden_states = self.dense(hidden_states)
357
+ hidden_states = self.dropout(hidden_states)
358
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
359
+ return hidden_states
360
+
361
+
362
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta
363
+ class RobertaAttention(nn.Module):
364
+ def __init__(self, config, position_embedding_type=None):
365
+ super().__init__()
366
+ self.self = RobertaSelfAttention(config, position_embedding_type=position_embedding_type)
367
+ self.output = RobertaSelfOutput(config)
368
+ self.pruned_heads = set()
369
+
370
+ def prune_heads(self, heads):
371
+ if len(heads) == 0:
372
+ return
373
+ heads, index = find_pruneable_heads_and_indices(
374
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
375
+ )
376
+
377
+ # Prune linear layers
378
+ self.self.query = prune_linear_layer(self.self.query, index)
379
+ self.self.key = prune_linear_layer(self.self.key, index)
380
+ self.self.value = prune_linear_layer(self.self.value, index)
381
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
382
+
383
+ # Update hyper params and store pruned heads
384
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
385
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
386
+ self.pruned_heads = self.pruned_heads.union(heads)
387
+
388
+ def forward(
389
+ self,
390
+ hidden_states: torch.Tensor,
391
+ attention_mask: Optional[torch.FloatTensor] = None,
392
+ head_mask: Optional[torch.FloatTensor] = None,
393
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
394
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
395
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
396
+ output_attentions: Optional[bool] = False,
397
+ parser_att_mask=None,
398
+ ) -> Tuple[torch.Tensor]:
399
+ self_outputs = self.self(
400
+ hidden_states,
401
+ attention_mask,
402
+ head_mask,
403
+ encoder_hidden_states,
404
+ encoder_attention_mask,
405
+ past_key_value,
406
+ output_attentions,
407
+ parser_att_mask=parser_att_mask,
408
+ )
409
+ attention_output = self.output(self_outputs[0], hidden_states)
410
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
411
+ return outputs
412
+
413
+
414
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate
415
+ class RobertaIntermediate(nn.Module):
416
+ def __init__(self, config):
417
+ super().__init__()
418
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
419
+ if isinstance(config.hidden_act, str):
420
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
421
+ else:
422
+ self.intermediate_act_fn = config.hidden_act
423
+
424
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
425
+ hidden_states = self.dense(hidden_states)
426
+ hidden_states = self.intermediate_act_fn(hidden_states)
427
+ return hidden_states
428
+
429
+
430
+ # Copied from transformers.models.bert.modeling_bert.BertOutput
431
+ class RobertaOutput(nn.Module):
432
+ def __init__(self, config):
433
+ super().__init__()
434
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
435
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
436
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
437
+
438
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
439
+ hidden_states = self.dense(hidden_states)
440
+ hidden_states = self.dropout(hidden_states)
441
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
442
+ return hidden_states
443
+
444
+
445
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta
446
+ class RobertaLayer(nn.Module):
447
+ def __init__(self, config):
448
+ super().__init__()
449
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
450
+ self.seq_len_dim = 1
451
+ self.attention = RobertaAttention(config)
452
+ self.is_decoder = config.is_decoder
453
+ self.add_cross_attention = config.add_cross_attention
454
+ if self.add_cross_attention:
455
+ if not self.is_decoder:
456
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
457
+ self.crossattention = RobertaAttention(config, position_embedding_type="absolute")
458
+ self.intermediate = RobertaIntermediate(config)
459
+ self.output = RobertaOutput(config)
460
+
461
+ def forward(
462
+ self,
463
+ hidden_states: torch.Tensor,
464
+ attention_mask: Optional[torch.FloatTensor] = None,
465
+ head_mask: Optional[torch.FloatTensor] = None,
466
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
467
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
468
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
469
+ output_attentions: Optional[bool] = False,
470
+ parser_att_mask=None,
471
+ ) -> Tuple[torch.Tensor]:
472
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
473
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
474
+ self_attention_outputs = self.attention(
475
+ hidden_states,
476
+ attention_mask,
477
+ head_mask,
478
+ output_attentions=output_attentions,
479
+ past_key_value=self_attn_past_key_value,
480
+ parser_att_mask=parser_att_mask,
481
+ )
482
+ attention_output = self_attention_outputs[0]
483
+
484
+ # if decoder, the last output is tuple of self-attn cache
485
+ if self.is_decoder:
486
+ outputs = self_attention_outputs[1:-1]
487
+ present_key_value = self_attention_outputs[-1]
488
+ else:
489
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
490
+
491
+ cross_attn_present_key_value = None
492
+ if self.is_decoder and encoder_hidden_states is not None:
493
+ if not hasattr(self, "crossattention"):
494
+ raise ValueError(
495
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
496
+ )
497
+
498
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
499
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
500
+ cross_attention_outputs = self.crossattention(
501
+ attention_output,
502
+ attention_mask,
503
+ head_mask,
504
+ encoder_hidden_states,
505
+ encoder_attention_mask,
506
+ cross_attn_past_key_value,
507
+ output_attentions,
508
+ )
509
+ attention_output = cross_attention_outputs[0]
510
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
511
+
512
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
513
+ cross_attn_present_key_value = cross_attention_outputs[-1]
514
+ present_key_value = present_key_value + cross_attn_present_key_value
515
+
516
+ layer_output = apply_chunking_to_forward(
517
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
518
+ )
519
+ outputs = (layer_output,) + outputs
520
+
521
+ # if decoder, return the attn key/values as the last output
522
+ if self.is_decoder:
523
+ outputs = outputs + (present_key_value,)
524
+
525
+ return outputs
526
+
527
+ def feed_forward_chunk(self, attention_output):
528
+ intermediate_output = self.intermediate(attention_output)
529
+ layer_output = self.output(intermediate_output, attention_output)
530
+ return layer_output
531
+
532
+
533
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta
534
+ class RobertaEncoder(nn.Module):
535
+ def __init__(self, config):
536
+ super().__init__()
537
+ self.config = config
538
+ self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])
539
+ self.gradient_checkpointing = False
540
+
541
+ def forward(
542
+ self,
543
+ hidden_states: torch.Tensor,
544
+ attention_mask: Optional[torch.FloatTensor] = None,
545
+ head_mask: Optional[torch.FloatTensor] = None,
546
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
547
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
548
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
549
+ use_cache: Optional[bool] = None,
550
+ output_attentions: Optional[bool] = False,
551
+ output_hidden_states: Optional[bool] = False,
552
+ return_dict: Optional[bool] = True,
553
+ parser_att_mask=None,
554
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
555
+ all_hidden_states = () if output_hidden_states else None
556
+ all_self_attentions = () if output_attentions else None
557
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
558
+
559
+ next_decoder_cache = () if use_cache else None
560
+ for i, layer_module in enumerate(self.layer):
561
+ if output_hidden_states:
562
+ all_hidden_states = all_hidden_states + (hidden_states,)
563
+
564
+ layer_head_mask = head_mask[i] if head_mask is not None else None
565
+ past_key_value = past_key_values[i] if past_key_values is not None else None
566
+
567
+ if self.gradient_checkpointing and self.training:
568
+
569
+ if use_cache:
570
+ logger.warning(
571
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
572
+ )
573
+ use_cache = False
574
+
575
+ def create_custom_forward(module):
576
+ def custom_forward(*inputs):
577
+ return module(*inputs, past_key_value, output_attentions)
578
+
579
+ return custom_forward
580
+
581
+ layer_outputs = torch.utils.checkpoint.checkpoint(
582
+ create_custom_forward(layer_module),
583
+ hidden_states,
584
+ attention_mask,
585
+ layer_head_mask,
586
+ encoder_hidden_states,
587
+ encoder_attention_mask,
588
+ )
589
+ else:
590
+ layer_outputs = layer_module(
591
+ hidden_states,
592
+ attention_mask,
593
+ layer_head_mask,
594
+ encoder_hidden_states,
595
+ encoder_attention_mask,
596
+ past_key_value,
597
+ output_attentions,
598
+ parser_att_mask=parser_att_mask[i],
599
+ )
600
+
601
+ hidden_states = layer_outputs[0]
602
+ if use_cache:
603
+ next_decoder_cache += (layer_outputs[-1],)
604
+ if output_attentions:
605
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
606
+ if self.config.add_cross_attention:
607
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
608
+
609
+ if output_hidden_states:
610
+ all_hidden_states = all_hidden_states + (hidden_states,)
611
+
612
+ if not return_dict:
613
+ return tuple(
614
+ v
615
+ for v in [
616
+ hidden_states,
617
+ next_decoder_cache,
618
+ all_hidden_states,
619
+ all_self_attentions,
620
+ all_cross_attentions,
621
+ ]
622
+ if v is not None
623
+ )
624
+ return BaseModelOutputWithPastAndCrossAttentions(
625
+ last_hidden_state=hidden_states,
626
+ past_key_values=next_decoder_cache,
627
+ hidden_states=all_hidden_states,
628
+ attentions=all_self_attentions,
629
+ cross_attentions=all_cross_attentions,
630
+ )
631
+
632
+
633
+ # Copied from transformers.models.bert.modeling_bert.BertPooler
634
+ class RobertaPooler(nn.Module):
635
+ def __init__(self, config):
636
+ super().__init__()
637
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
638
+ self.activation = nn.Tanh()
639
+
640
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
641
+ # We "pool" the model by simply taking the hidden state corresponding
642
+ # to the first token.
643
+ first_token_tensor = hidden_states[:, 0]
644
+ pooled_output = self.dense(first_token_tensor)
645
+ pooled_output = self.activation(pooled_output)
646
+ return pooled_output
647
+
648
+
649
+ class RobertaPreTrainedModel(PreTrainedModel):
650
+ """
651
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
652
+ models.
653
+ """
654
+
655
+ config_class = RobertaConfig
656
+ base_model_prefix = "roberta"
657
+ supports_gradient_checkpointing = True
658
+
659
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
660
+ def _init_weights(self, module):
661
+ """Initialize the weights"""
662
+ if isinstance(module, nn.Linear):
663
+ # Slightly different from the TF version which uses truncated_normal for initialization
664
+ # cf https://github.com/pytorch/pytorch/pull/5617
665
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
666
+ if module.bias is not None:
667
+ module.bias.data.zero_()
668
+ elif isinstance(module, nn.Embedding):
669
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
670
+ if module.padding_idx is not None:
671
+ module.weight.data[module.padding_idx].zero_()
672
+ elif isinstance(module, nn.LayerNorm):
673
+ if module.bias is not None:
674
+ module.bias.data.zero_()
675
+ module.weight.data.fill_(1.0)
676
+
677
+ def _set_gradient_checkpointing(self, module, value=False):
678
+ if isinstance(module, RobertaEncoder):
679
+ module.gradient_checkpointing = value
680
+
681
+ def update_keys_to_ignore(self, config, del_keys_to_ignore):
682
+ """Remove some keys from ignore list"""
683
+ if not config.tie_word_embeddings:
684
+ # must make a new list, or the class variable gets modified!
685
+ self._keys_to_ignore_on_save = [k for k in self._keys_to_ignore_on_save if k not in del_keys_to_ignore]
686
+ self._keys_to_ignore_on_load_missing = [
687
+ k for k in self._keys_to_ignore_on_load_missing if k not in del_keys_to_ignore
688
+ ]
689
+
690
+
691
+ ROBERTA_START_DOCSTRING = r"""
692
+
693
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
694
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
695
+ etc.)
696
+
697
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
698
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
699
+ and behavior.
700
+
701
+ Parameters:
702
+ config ([`RobertaConfig`]): Model configuration class with all the parameters of the
703
+ model. Initializing with a config file does not load the weights associated with the model, only the
704
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
705
+ """
706
+
707
+
708
+ ROBERTA_INPUTS_DOCSTRING = r"""
709
+ Args:
710
+ input_ids (`torch.LongTensor` of shape `({0})`):
711
+ Indices of input sequence tokens in the vocabulary.
712
+
713
+ Indices can be obtained using [`RobertaTokenizer`]. See [`PreTrainedTokenizer.encode`] and
714
+ [`PreTrainedTokenizer.__call__`] for details.
715
+
716
+ [What are input IDs?](../glossary#input-ids)
717
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
718
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
719
+
720
+ - 1 for tokens that are **not masked**,
721
+ - 0 for tokens that are **masked**.
722
+
723
+ [What are attention masks?](../glossary#attention-mask)
724
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
725
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
726
+ 1]`:
727
+
728
+ - 0 corresponds to a *sentence A* token,
729
+ - 1 corresponds to a *sentence B* token.
730
+
731
+ [What are token type IDs?](../glossary#token-type-ids)
732
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
733
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
734
+ config.max_position_embeddings - 1]`.
735
+
736
+ [What are position IDs?](../glossary#position-ids)
737
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
738
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
739
+
740
+ - 1 indicates the head is **not masked**,
741
+ - 0 indicates the head is **masked**.
742
+
743
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
744
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
745
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
746
+ model's internal embedding lookup matrix.
747
+ output_attentions (`bool`, *optional*):
748
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
749
+ tensors for more detail.
750
+ output_hidden_states (`bool`, *optional*):
751
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
752
+ more detail.
753
+ return_dict (`bool`, *optional*):
754
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
755
+ """
756
+
757
+
758
+ class RobertaModel(RobertaPreTrainedModel):
759
+ """
760
+
761
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
762
+ cross-attention is added between the self-attention layers, following the architecture described in *Attention is
763
+ all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
764
+ Kaiser and Illia Polosukhin.
765
+
766
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
767
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
768
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
769
+
770
+ .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
771
+
772
+ """
773
+
774
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
775
+
776
+ # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta
777
+ def __init__(self, config, add_pooling_layer=True):
778
+ super().__init__(config)
779
+ self.config = config
780
+
781
+ self.embeddings = RobertaEmbeddings(config)
782
+ self.encoder = RobertaEncoder(config)
783
+
784
+ self.pooler = RobertaPooler(config) if add_pooling_layer else None
785
+
786
+ # Initialize weights and apply final processing
787
+ self.post_init()
788
+
789
+ def get_input_embeddings(self):
790
+ return self.embeddings.word_embeddings
791
+
792
+ def set_input_embeddings(self, value):
793
+ self.embeddings.word_embeddings = value
794
+
795
+ def _prune_heads(self, heads_to_prune):
796
+ """
797
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
798
+ class PreTrainedModel
799
+ """
800
+ for layer, heads in heads_to_prune.items():
801
+ self.encoder.layer[layer].attention.prune_heads(heads)
802
+
803
+ # Copied from transformers.models.bert.modeling_bert.BertModel.forward
804
+ def forward(
805
+ self,
806
+ input_ids: Optional[torch.Tensor] = None,
807
+ attention_mask: Optional[torch.Tensor] = None,
808
+ token_type_ids: Optional[torch.Tensor] = None,
809
+ position_ids: Optional[torch.Tensor] = None,
810
+ head_mask: Optional[torch.Tensor] = None,
811
+ inputs_embeds: Optional[torch.Tensor] = None,
812
+ encoder_hidden_states: Optional[torch.Tensor] = None,
813
+ encoder_attention_mask: Optional[torch.Tensor] = None,
814
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
815
+ use_cache: Optional[bool] = None,
816
+ output_attentions: Optional[bool] = None,
817
+ output_hidden_states: Optional[bool] = None,
818
+ return_dict: Optional[bool] = None,
819
+ parser_att_mask=None,
820
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
821
+ r"""
822
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
823
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
824
+ the model is configured as a decoder.
825
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
826
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
827
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
828
+
829
+ - 1 for tokens that are **not masked**,
830
+ - 0 for tokens that are **masked**.
831
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
832
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
833
+
834
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
835
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
836
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
837
+ use_cache (`bool`, *optional*):
838
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
839
+ `past_key_values`).
840
+ """
841
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
842
+ output_hidden_states = (
843
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
844
+ )
845
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
846
+
847
+ if self.config.is_decoder:
848
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
849
+ else:
850
+ use_cache = False
851
+
852
+ if input_ids is not None and inputs_embeds is not None:
853
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
854
+ elif input_ids is not None:
855
+ input_shape = input_ids.size()
856
+ elif inputs_embeds is not None:
857
+ input_shape = inputs_embeds.size()[:-1]
858
+ else:
859
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
860
+
861
+ batch_size, seq_length = input_shape
862
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
863
+
864
+ # past_key_values_length
865
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
866
+
867
+ if attention_mask is None:
868
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
869
+
870
+ if token_type_ids is None:
871
+ if hasattr(self.embeddings, "token_type_ids"):
872
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
873
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
874
+ token_type_ids = buffered_token_type_ids_expanded
875
+ else:
876
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
877
+
878
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
879
+ # ourselves in which case we just need to make it broadcastable to all heads.
880
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
881
+
882
+ # If a 2D or 3D attention mask is provided for the cross-attention
883
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
884
+ if self.config.is_decoder and encoder_hidden_states is not None:
885
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
886
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
887
+ if encoder_attention_mask is None:
888
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
889
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
890
+ else:
891
+ encoder_extended_attention_mask = None
892
+
893
+ # Prepare head mask if needed
894
+ # 1.0 in head_mask indicate we keep the head
895
+ # attention_probs has shape bsz x n_heads x N x N
896
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
897
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
898
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
899
+
900
+ embedding_output = self.embeddings(
901
+ input_ids=input_ids,
902
+ position_ids=position_ids,
903
+ token_type_ids=token_type_ids,
904
+ inputs_embeds=inputs_embeds,
905
+ past_key_values_length=past_key_values_length,
906
+ )
907
+ encoder_outputs = self.encoder(
908
+ embedding_output,
909
+ attention_mask=extended_attention_mask,
910
+ head_mask=head_mask,
911
+ encoder_hidden_states=encoder_hidden_states,
912
+ encoder_attention_mask=encoder_extended_attention_mask,
913
+ past_key_values=past_key_values,
914
+ use_cache=use_cache,
915
+ output_attentions=output_attentions,
916
+ output_hidden_states=output_hidden_states,
917
+ return_dict=return_dict,
918
+ parser_att_mask=parser_att_mask
919
+ )
920
+ sequence_output = encoder_outputs[0]
921
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
922
+
923
+ if not return_dict:
924
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
925
+
926
+ return BaseModelOutputWithPoolingAndCrossAttentions(
927
+ last_hidden_state=sequence_output,
928
+ pooler_output=pooled_output,
929
+ past_key_values=encoder_outputs.past_key_values,
930
+ hidden_states=encoder_outputs.hidden_states,
931
+ attentions=encoder_outputs.attentions,
932
+ cross_attentions=encoder_outputs.cross_attentions,
933
+ )
934
+
935
+
936
+ class StructRoberta(RobertaPreTrainedModel):
937
+ _keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
938
+ _keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
939
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
940
+
941
+ def __init__(self, config):
942
+ super().__init__(config)
943
+
944
+ if config.is_decoder:
945
+ logger.warning(
946
+ "If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for "
947
+ "bi-directional self-attention."
948
+ )
949
+
950
+ self.parser_layers = nn.ModuleList([
951
+ nn.Sequential(Conv1d(config.hidden_size, config.conv_size),
952
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False),
953
+ nn.Tanh()) for i in range(config.n_parser_layers)])
954
+
955
+ self.distance_ff = nn.Sequential(
956
+ Conv1d(config.hidden_size, 2),
957
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False), nn.Tanh(),
958
+ nn.Linear(config.hidden_size, 1))
959
+
960
+ self.height_ff = nn.Sequential(
961
+ nn.Linear(config.hidden_size, config.hidden_size),
962
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False), nn.Tanh(),
963
+ nn.Linear(config.hidden_size, 1))
964
+
965
+ n_rel = len(config.relations)
966
+ self._rel_weight = nn.Parameter(torch.zeros((config.num_hidden_layers, config.num_attention_heads, n_rel)))
967
+ self._rel_weight.data.normal_(0, 0.1)
968
+
969
+ self._scaler = nn.Parameter(torch.zeros(2))
970
+
971
+ self.roberta = RobertaModel(config, add_pooling_layer=False)
972
+ self.lm_head = RobertaLMHead(config)
973
+
974
+ self.pad = config.pad_token_id
975
+
976
+ # The LM head weights require special treatment only when they are tied with the word embeddings
977
+ self.update_keys_to_ignore(config, ["lm_head.decoder.weight"])
978
+
979
+ # Initialize weights and apply final processing
980
+ self.post_init()
981
+
982
+ def get_output_embeddings(self):
983
+ return self.lm_head.decoder
984
+
985
+ def set_output_embeddings(self, new_embeddings):
986
+ self.lm_head.decoder = new_embeddings
987
+
988
+ @property
989
+ def scaler(self):
990
+ return self._scaler.exp()
991
+
992
+ @property
993
+ def rel_weight(self):
994
+ if self.config.weight_act == 'sigmoid':
995
+ return torch.sigmoid(self._rel_weight)
996
+ elif self.config.weight_act == 'softmax':
997
+ return torch.softmax(self._rel_weight, dim=-1)
998
+
999
+ def compute_block(self, distance, height):
1000
+ """Compute constituents from distance and height."""
1001
+
1002
+ beta_logits = (distance[:, None, :] - height[:, :, None]) * self.scaler[0]
1003
+
1004
+ gamma = torch.sigmoid(-beta_logits)
1005
+ ones = torch.ones_like(gamma)
1006
+
1007
+ block_mask_left = cummin(
1008
+ gamma.tril(-1) + ones.triu(0), reverse=True, max_value=1)
1009
+ block_mask_left = block_mask_left - F.pad(
1010
+ block_mask_left[:, :, :-1], (1, 0), value=0)
1011
+ block_mask_left.tril_(0)
1012
+
1013
+ block_mask_right = cummin(
1014
+ gamma.triu(0) + ones.tril(-1), exclusive=True, max_value=1)
1015
+ block_mask_right = block_mask_right - F.pad(
1016
+ block_mask_right[:, :, 1:], (0, 1), value=0)
1017
+ block_mask_right.triu_(0)
1018
+
1019
+ block_p = block_mask_left[:, :, :, None] * block_mask_right[:, :, None, :]
1020
+ block = cumsum(block_mask_left).tril(0) + cumsum(
1021
+ block_mask_right, reverse=True).triu(1)
1022
+
1023
+ return block_p, block
1024
+
1025
+ def compute_head(self, height):
1026
+ """Estimate head for each constituent."""
1027
+
1028
+ _, length = height.size()
1029
+ head_logits = height * self.scaler[1]
1030
+ index = torch.arange(length, device=height.device)
1031
+
1032
+ mask = (index[:, None, None] <= index[None, None, :]) * (
1033
+ index[None, None, :] <= index[None, :, None])
1034
+ head_logits = head_logits[:, None, None, :].repeat(1, length, length, 1)
1035
+ head_logits.masked_fill_(~mask[None, :, :, :], -1e9)
1036
+
1037
+ head_p = torch.softmax(head_logits, dim=-1)
1038
+
1039
+ return head_p
1040
+
1041
+ def parse(self, x):
1042
+ """Parse input sentence.
1043
+
1044
+ Args:
1045
+ x: input tokens (required).
1046
+ pos: position for each token (optional).
1047
+ Returns:
1048
+ distance: syntactic distance
1049
+ height: syntactic height
1050
+ """
1051
+
1052
+ mask = (x != self.pad)
1053
+ mask_shifted = F.pad(mask[:, 1:], (0, 1), value=0)
1054
+
1055
+ h = self.roberta.embeddings(x)
1056
+ for i in range(self.config.n_parser_layers):
1057
+ h = h.masked_fill(~mask[:, :, None], 0)
1058
+ h = self.parser_layers[i](h)
1059
+
1060
+ height = self.height_ff(h).squeeze(-1)
1061
+ height.masked_fill_(~mask, -1e9)
1062
+
1063
+ distance = self.distance_ff(h).squeeze(-1)
1064
+ distance.masked_fill_(~mask_shifted, 1e9)
1065
+
1066
+ # Calbrating the distance and height to the same level
1067
+ length = distance.size(1)
1068
+ height_max = height[:, None, :].expand(-1, length, -1)
1069
+ height_max = torch.cummax(
1070
+ height_max.triu(0) - torch.ones_like(height_max).tril(-1) * 1e9,
1071
+ dim=-1)[0].triu(0)
1072
+
1073
+ margin_left = torch.relu(
1074
+ F.pad(distance[:, :-1, None], (0, 0, 1, 0), value=1e9) - height_max)
1075
+ margin_right = torch.relu(distance[:, None, :] - height_max)
1076
+ margin = torch.where(margin_left > margin_right, margin_right,
1077
+ margin_left).triu(0)
1078
+
1079
+ margin_mask = torch.stack([mask_shifted] + [mask] * (length - 1), dim=1)
1080
+ margin.masked_fill_(~margin_mask, 0)
1081
+ margin = margin.max()
1082
+
1083
+ distance = distance - margin
1084
+
1085
+ return distance, height
1086
+
1087
+ def generate_mask(self, x, distance, height):
1088
+ """Compute head and cibling distribution for each token."""
1089
+
1090
+ bsz, length = x.size()
1091
+
1092
+ eye = torch.eye(length, device=x.device, dtype=torch.bool)
1093
+ eye = eye[None, :, :].expand((bsz, -1, -1))
1094
+
1095
+ block_p, block = self.compute_block(distance, height)
1096
+ head_p = self.compute_head(height)
1097
+ head = torch.einsum('blij,bijh->blh', block_p, head_p)
1098
+ head = head.masked_fill(eye, 0)
1099
+ child = head.transpose(1, 2)
1100
+ cibling = torch.bmm(head, child).masked_fill(eye, 0)
1101
+
1102
+ rel_list = []
1103
+ if 'head' in self.config.relations:
1104
+ rel_list.append(head)
1105
+ if 'child' in self.config.relations:
1106
+ rel_list.append(child)
1107
+ if 'cibling' in self.config.relations:
1108
+ rel_list.append(cibling)
1109
+
1110
+ rel = torch.stack(rel_list, dim=1)
1111
+
1112
+ rel_weight = self.rel_weight
1113
+
1114
+ dep = torch.einsum('lhr,brij->lbhij', rel_weight, rel)
1115
+ att_mask = dep.reshape(self.config.num_hidden_layers, bsz, self.config.num_attention_heads, length, length)
1116
+
1117
+ return att_mask, cibling, head, block
1118
+
1119
+ def forward(
1120
+ self,
1121
+ input_ids: Optional[torch.LongTensor] = None,
1122
+ attention_mask: Optional[torch.FloatTensor] = None,
1123
+ token_type_ids: Optional[torch.LongTensor] = None,
1124
+ position_ids: Optional[torch.LongTensor] = None,
1125
+ head_mask: Optional[torch.FloatTensor] = None,
1126
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1127
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1128
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1129
+ labels: Optional[torch.LongTensor] = None,
1130
+ output_attentions: Optional[bool] = None,
1131
+ output_hidden_states: Optional[bool] = None,
1132
+ return_dict: Optional[bool] = None,
1133
+ ) -> Union[Tuple, MaskedLMOutput]:
1134
+ r"""
1135
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1136
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1137
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1138
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1139
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1140
+ Used to hide legacy arguments that have been deprecated.
1141
+ """
1142
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1143
+
1144
+ distance, height = self.parse(input_ids)
1145
+ att_mask, cibling, head, block = self.generate_mask(input_ids, distance, height)
1146
+
1147
+ outputs = self.roberta(
1148
+ input_ids,
1149
+ attention_mask=attention_mask,
1150
+ token_type_ids=token_type_ids,
1151
+ position_ids=position_ids,
1152
+ head_mask=head_mask,
1153
+ inputs_embeds=inputs_embeds,
1154
+ encoder_hidden_states=encoder_hidden_states,
1155
+ encoder_attention_mask=encoder_attention_mask,
1156
+ output_attentions=output_attentions,
1157
+ output_hidden_states=output_hidden_states,
1158
+ return_dict=return_dict,
1159
+ parser_att_mask=att_mask,
1160
+ )
1161
+ sequence_output = outputs[0]
1162
+ prediction_scores = self.lm_head(sequence_output)
1163
+
1164
+ masked_lm_loss = None
1165
+ if labels is not None:
1166
+ loss_fct = CrossEntropyLoss()
1167
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1168
+
1169
+ if not return_dict:
1170
+ output = (prediction_scores,) + outputs[2:]
1171
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1172
+
1173
+ return MaskedLMOutput(
1174
+ loss=masked_lm_loss,
1175
+ logits=prediction_scores,
1176
+ hidden_states=outputs.hidden_states,
1177
+ attentions=outputs.attentions,
1178
+ )
1179
+
1180
+ class RobertaLMHead(nn.Module):
1181
+ """Roberta Head for masked language modeling."""
1182
+
1183
+ def __init__(self, config):
1184
+ super().__init__()
1185
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1186
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
1187
+
1188
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
1189
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
1190
+ self.decoder.bias = self.bias
1191
+
1192
+ def forward(self, features, **kwargs):
1193
+ x = self.dense(features)
1194
+ x = gelu(x)
1195
+ x = self.layer_norm(x)
1196
+
1197
+ # project back to size of vocabulary with bias
1198
+ x = self.decoder(x)
1199
+
1200
+ return x
1201
+
1202
+ def _tie_weights(self):
1203
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
1204
+ self.bias = self.decoder.bias
1205
+
1206
+ class StructRobertaForSequenceClassification(RobertaPreTrainedModel):
1207
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
1208
+
1209
+ def __init__(self, config):
1210
+ super().__init__(config)
1211
+ self.num_labels = config.num_labels
1212
+ self.config = config
1213
+
1214
+ self.parser_layers = nn.ModuleList([
1215
+ nn.Sequential(Conv1d(config.hidden_size, config.conv_size),
1216
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False),
1217
+ nn.Tanh()) for i in range(config.n_parser_layers)])
1218
+
1219
+ self.distance_ff = nn.Sequential(
1220
+ Conv1d(config.hidden_size, 2),
1221
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False), nn.Tanh(),
1222
+ nn.Linear(config.hidden_size, 1))
1223
+
1224
+ self.height_ff = nn.Sequential(
1225
+ nn.Linear(config.hidden_size, config.hidden_size),
1226
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False), nn.Tanh(),
1227
+ nn.Linear(config.hidden_size, 1))
1228
+
1229
+ n_rel = len(config.relations)
1230
+ self._rel_weight = nn.Parameter(torch.zeros((config.num_hidden_layers, config.num_attention_heads, n_rel)))
1231
+ self._rel_weight.data.normal_(0, 0.1)
1232
+
1233
+ self._scaler = nn.Parameter(torch.zeros(2))
1234
+
1235
+ self.pad = config.pad_token_id
1236
+
1237
+ self.roberta = RobertaModel(config, add_pooling_layer=False)
1238
+ self.classifier = RobertaClassificationHead(config)
1239
+
1240
+ # Initialize weights and apply final processing
1241
+ self.post_init()
1242
+
1243
+
1244
+ @property
1245
+ def scaler(self):
1246
+ return self._scaler.exp()
1247
+
1248
+ @property
1249
+ def rel_weight(self):
1250
+ if self.config.weight_act == 'sigmoid':
1251
+ return torch.sigmoid(self._rel_weight)
1252
+ elif self.config.weight_act == 'softmax':
1253
+ return torch.softmax(self._rel_weight, dim=-1)
1254
+
1255
+ def compute_block(self, distance, height):
1256
+ """Compute constituents from distance and height."""
1257
+
1258
+ beta_logits = (distance[:, None, :] - height[:, :, None]) * self.scaler[0]
1259
+
1260
+ gamma = torch.sigmoid(-beta_logits)
1261
+ ones = torch.ones_like(gamma)
1262
+
1263
+ block_mask_left = cummin(
1264
+ gamma.tril(-1) + ones.triu(0), reverse=True, max_value=1)
1265
+ block_mask_left = block_mask_left - F.pad(
1266
+ block_mask_left[:, :, :-1], (1, 0), value=0)
1267
+ block_mask_left.tril_(0)
1268
+
1269
+ block_mask_right = cummin(
1270
+ gamma.triu(0) + ones.tril(-1), exclusive=True, max_value=1)
1271
+ block_mask_right = block_mask_right - F.pad(
1272
+ block_mask_right[:, :, 1:], (0, 1), value=0)
1273
+ block_mask_right.triu_(0)
1274
+
1275
+ block_p = block_mask_left[:, :, :, None] * block_mask_right[:, :, None, :]
1276
+ block = cumsum(block_mask_left).tril(0) + cumsum(
1277
+ block_mask_right, reverse=True).triu(1)
1278
+
1279
+ return block_p, block
1280
+
1281
+ def compute_head(self, height):
1282
+ """Estimate head for each constituent."""
1283
+
1284
+ _, length = height.size()
1285
+ head_logits = height * self.scaler[1]
1286
+ index = torch.arange(length, device=height.device)
1287
+
1288
+ mask = (index[:, None, None] <= index[None, None, :]) * (
1289
+ index[None, None, :] <= index[None, :, None])
1290
+ head_logits = head_logits[:, None, None, :].repeat(1, length, length, 1)
1291
+ head_logits.masked_fill_(~mask[None, :, :, :], -1e9)
1292
+
1293
+ head_p = torch.softmax(head_logits, dim=-1)
1294
+
1295
+ return head_p
1296
+
1297
+ def parse(self, x):
1298
+ """Parse input sentence.
1299
+
1300
+ Args:
1301
+ x: input tokens (required).
1302
+ pos: position for each token (optional).
1303
+ Returns:
1304
+ distance: syntactic distance
1305
+ height: syntactic height
1306
+ """
1307
+
1308
+ mask = (x != self.pad)
1309
+ mask_shifted = F.pad(mask[:, 1:], (0, 1), value=0)
1310
+
1311
+ h = self.roberta.embeddings(x)
1312
+ for i in range(self.config.n_parser_layers):
1313
+ h = h.masked_fill(~mask[:, :, None], 0)
1314
+ h = self.parser_layers[i](h)
1315
+
1316
+ height = self.height_ff(h).squeeze(-1)
1317
+ height.masked_fill_(~mask, -1e9)
1318
+
1319
+ distance = self.distance_ff(h).squeeze(-1)
1320
+ distance.masked_fill_(~mask_shifted, 1e9)
1321
+
1322
+ # Calbrating the distance and height to the same level
1323
+ length = distance.size(1)
1324
+ height_max = height[:, None, :].expand(-1, length, -1)
1325
+ height_max = torch.cummax(
1326
+ height_max.triu(0) - torch.ones_like(height_max).tril(-1) * 1e9,
1327
+ dim=-1)[0].triu(0)
1328
+
1329
+ margin_left = torch.relu(
1330
+ F.pad(distance[:, :-1, None], (0, 0, 1, 0), value=1e9) - height_max)
1331
+ margin_right = torch.relu(distance[:, None, :] - height_max)
1332
+ margin = torch.where(margin_left > margin_right, margin_right,
1333
+ margin_left).triu(0)
1334
+
1335
+ margin_mask = torch.stack([mask_shifted] + [mask] * (length - 1), dim=1)
1336
+ margin.masked_fill_(~margin_mask, 0)
1337
+ margin = margin.max()
1338
+
1339
+ distance = distance - margin
1340
+
1341
+ return distance, height
1342
+
1343
+ def generate_mask(self, x, distance, height):
1344
+ """Compute head and cibling distribution for each token."""
1345
+
1346
+ bsz, length = x.size()
1347
+
1348
+ eye = torch.eye(length, device=x.device, dtype=torch.bool)
1349
+ eye = eye[None, :, :].expand((bsz, -1, -1))
1350
+
1351
+ block_p, block = self.compute_block(distance, height)
1352
+ head_p = self.compute_head(height)
1353
+ head = torch.einsum('blij,bijh->blh', block_p, head_p)
1354
+ head = head.masked_fill(eye, 0)
1355
+ child = head.transpose(1, 2)
1356
+ cibling = torch.bmm(head, child).masked_fill(eye, 0)
1357
+
1358
+ rel_list = []
1359
+ if 'head' in self.config.relations:
1360
+ rel_list.append(head)
1361
+ if 'child' in self.config.relations:
1362
+ rel_list.append(child)
1363
+ if 'cibling' in self.config.relations:
1364
+ rel_list.append(cibling)
1365
+
1366
+ rel = torch.stack(rel_list, dim=1)
1367
+
1368
+ rel_weight = self.rel_weight
1369
+
1370
+ dep = torch.einsum('lhr,brij->lbhij', rel_weight, rel)
1371
+ att_mask = dep.reshape(self.config.num_hidden_layers, bsz, self.config.num_attention_heads, length, length)
1372
+
1373
+ return att_mask, cibling, head, block
1374
+
1375
+ def forward(
1376
+ self,
1377
+ input_ids: Optional[torch.LongTensor] = None,
1378
+ attention_mask: Optional[torch.FloatTensor] = None,
1379
+ token_type_ids: Optional[torch.LongTensor] = None,
1380
+ position_ids: Optional[torch.LongTensor] = None,
1381
+ head_mask: Optional[torch.FloatTensor] = None,
1382
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1383
+ labels: Optional[torch.LongTensor] = None,
1384
+ output_attentions: Optional[bool] = None,
1385
+ output_hidden_states: Optional[bool] = None,
1386
+ return_dict: Optional[bool] = None,
1387
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1388
+ r"""
1389
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1390
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1391
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1392
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1393
+ """
1394
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1395
+
1396
+ distance, height = self.parse(input_ids)
1397
+ att_mask, cibling, head, block = self.generate_mask(input_ids, distance, height)
1398
+
1399
+ outputs = self.roberta(
1400
+ input_ids,
1401
+ attention_mask=attention_mask,
1402
+ token_type_ids=token_type_ids,
1403
+ position_ids=position_ids,
1404
+ head_mask=head_mask,
1405
+ inputs_embeds=inputs_embeds,
1406
+ output_attentions=output_attentions,
1407
+ output_hidden_states=output_hidden_states,
1408
+ return_dict=return_dict,
1409
+ parser_att_mask=att_mask,
1410
+ )
1411
+
1412
+ sequence_output = outputs[0]
1413
+ logits = self.classifier(sequence_output)
1414
+
1415
+ loss = None
1416
+ if labels is not None:
1417
+ if self.config.problem_type is None:
1418
+ if self.num_labels == 1:
1419
+ self.config.problem_type = "regression"
1420
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1421
+ self.config.problem_type = "single_label_classification"
1422
+ else:
1423
+ self.config.problem_type = "multi_label_classification"
1424
+
1425
+ if self.config.problem_type == "regression":
1426
+ loss_fct = MSELoss()
1427
+ if self.num_labels == 1:
1428
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1429
+ else:
1430
+ loss = loss_fct(logits, labels)
1431
+ elif self.config.problem_type == "single_label_classification":
1432
+ loss_fct = CrossEntropyLoss()
1433
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1434
+ elif self.config.problem_type == "multi_label_classification":
1435
+ loss_fct = BCEWithLogitsLoss()
1436
+ loss = loss_fct(logits, labels)
1437
+
1438
+ if not return_dict:
1439
+ output = (logits,) + outputs[2:]
1440
+ return ((loss,) + output) if loss is not None else output
1441
+
1442
+ return SequenceClassifierOutput(
1443
+ loss=loss,
1444
+ logits=logits,
1445
+ hidden_states=outputs.hidden_states,
1446
+ attentions=outputs.attentions,
1447
+ )
1448
+
1449
+
1450
+ class RobertaClassificationHead(nn.Module):
1451
+ """Head for sentence-level classification tasks."""
1452
+
1453
+ def __init__(self, config):
1454
+ super().__init__()
1455
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1456
+ classifier_dropout = (
1457
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1458
+ )
1459
+ self.dropout = nn.Dropout(classifier_dropout)
1460
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
1461
+
1462
+ def forward(self, features, **kwargs):
1463
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
1464
+ x = self.dropout(x)
1465
+ x = self.dense(x)
1466
+ x = torch.tanh(x)
1467
+ x = self.dropout(x)
1468
+ x = self.out_proj(x)
1469
+ return x
1470
+
1471
+
1472
+ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
1473
+ """
1474
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
1475
+ are ignored. This is modified from fairseq's `utils.make_positions`.
1476
+
1477
+ Args:
1478
+ x: torch.Tensor x:
1479
+
1480
+ Returns: torch.Tensor
1481
+ """
1482
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
1483
+ mask = input_ids.ne(padding_idx).int()
1484
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
1485
+ return incremental_indices.long() + padding_idx
1486
+
1487
+
1488
+ def cumprod(x, reverse=False, exclusive=False):
1489
+ """cumulative product."""
1490
+ if reverse:
1491
+ x = x.flip([-1])
1492
+
1493
+ if exclusive:
1494
+ x = F.pad(x[:, :, :-1], (1, 0), value=1)
1495
+
1496
+ cx = x.cumprod(-1)
1497
+
1498
+ if reverse:
1499
+ cx = cx.flip([-1])
1500
+ return cx
1501
+
1502
+
1503
+ def cumsum(x, reverse=False, exclusive=False):
1504
+ """cumulative sum."""
1505
+ bsz, _, length = x.size()
1506
+ device = x.device
1507
+ if reverse:
1508
+ if exclusive:
1509
+ w = torch.ones([bsz, length, length], device=device).tril(-1)
1510
+ else:
1511
+ w = torch.ones([bsz, length, length], device=device).tril(0)
1512
+ cx = torch.bmm(x, w)
1513
+ else:
1514
+ if exclusive:
1515
+ w = torch.ones([bsz, length, length], device=device).triu(1)
1516
+ else:
1517
+ w = torch.ones([bsz, length, length], device=device).triu(0)
1518
+ cx = torch.bmm(x, w)
1519
+ return cx
1520
+
1521
+
1522
+ def cummin(x, reverse=False, exclusive=False, max_value=1e9):
1523
+ """cumulative min."""
1524
+ if reverse:
1525
+ if exclusive:
1526
+ x = F.pad(x[:, :, 1:], (0, 1), value=max_value)
1527
+ x = x.flip([-1]).cummin(-1)[0].flip([-1])
1528
+ else:
1529
+ if exclusive:
1530
+ x = F.pad(x[:, :, :-1], (1, 0), value=max_value)
1531
+ x = x.cummin(-1)[0]
1532
+ return x
1533
+
finetune/control_raising_control/predict_results.txt ADDED
The diff for this file is too large to render. See raw diff
 
finetune/control_raising_control/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0283a4ef1febb63e69f9d14a4377d328a1584182a5e73f1c06c8e632fcc1f1e6
3
+ size 577068929
finetune/control_raising_control/special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
finetune/control_raising_control/tokenizer_config.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "cls_token": {
12
+ "__type": "AddedToken",
13
+ "content": "<s>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "eos_token": {
20
+ "__type": "AddedToken",
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ },
27
+ "errors": "replace",
28
+ "mask_token": {
29
+ "__type": "AddedToken",
30
+ "content": "<mask>",
31
+ "lstrip": true,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ },
36
+ "model_max_length": 512,
37
+ "name_or_path": "final_models/glue_models/structroberta_s2_50ep/",
38
+ "pad_token": {
39
+ "__type": "AddedToken",
40
+ "content": "<pad>",
41
+ "lstrip": false,
42
+ "normalized": true,
43
+ "rstrip": false,
44
+ "single_word": false
45
+ },
46
+ "sep_token": {
47
+ "__type": "AddedToken",
48
+ "content": "</s>",
49
+ "lstrip": false,
50
+ "normalized": true,
51
+ "rstrip": false,
52
+ "single_word": false
53
+ },
54
+ "special_tokens_map_file": null,
55
+ "tokenizer_class": "RobertaTokenizer",
56
+ "trim_offsets": true,
57
+ "unk_token": {
58
+ "__type": "AddedToken",
59
+ "content": "<unk>",
60
+ "lstrip": false,
61
+ "normalized": true,
62
+ "rstrip": false,
63
+ "single_word": false
64
+ }
65
+ }
finetune/control_raising_control/train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "train_loss": 0.034445077063316856,
4
+ "train_runtime": 499.6416,
5
+ "train_samples": 6570,
6
+ "train_samples_per_second": 131.494,
7
+ "train_steps_per_second": 2.061
8
+ }
finetune/control_raising_control/trainer_state.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.9214503654296459,
3
+ "best_model_checkpoint": "final_models/glue_models/structroberta_s2_50ep//finetune/control_raising_control/checkpoint-600",
4
+ "epoch": 10.0,
5
+ "global_step": 1030,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.94,
12
+ "eval_accuracy": 0.9008369445800781,
13
+ "eval_f1": 0.9063976863934542,
14
+ "eval_loss": 0.4983138144016266,
15
+ "eval_mcc": 0.8099430728731271,
16
+ "eval_runtime": 28.5479,
17
+ "eval_samples_per_second": 468.756,
18
+ "eval_steps_per_second": 58.603,
19
+ "step": 200
20
+ },
21
+ {
22
+ "epoch": 3.88,
23
+ "eval_accuracy": 0.8905245661735535,
24
+ "eval_f1": 0.8989446092294957,
25
+ "eval_loss": 0.6522421836853027,
26
+ "eval_mcc": 0.7956233558781975,
27
+ "eval_runtime": 28.4619,
28
+ "eval_samples_per_second": 470.173,
29
+ "eval_steps_per_second": 58.78,
30
+ "step": 400
31
+ },
32
+ {
33
+ "epoch": 4.85,
34
+ "learning_rate": 2.5728155339805826e-05,
35
+ "loss": 0.0708,
36
+ "step": 500
37
+ },
38
+ {
39
+ "epoch": 5.83,
40
+ "eval_accuracy": 0.917276918888092,
41
+ "eval_f1": 0.9214503654296459,
42
+ "eval_loss": 0.5907565951347351,
43
+ "eval_mcc": 0.8416442761632831,
44
+ "eval_runtime": 28.5402,
45
+ "eval_samples_per_second": 468.882,
46
+ "eval_steps_per_second": 58.619,
47
+ "step": 600
48
+ },
49
+ {
50
+ "epoch": 7.77,
51
+ "eval_accuracy": 0.8982962369918823,
52
+ "eval_f1": 0.9057544491378713,
53
+ "eval_loss": 0.7629005312919617,
54
+ "eval_mcc": 0.810114823371703,
55
+ "eval_runtime": 28.587,
56
+ "eval_samples_per_second": 468.115,
57
+ "eval_steps_per_second": 58.523,
58
+ "step": 800
59
+ },
60
+ {
61
+ "epoch": 9.71,
62
+ "learning_rate": 1.4563106796116506e-06,
63
+ "loss": 0.0001,
64
+ "step": 1000
65
+ },
66
+ {
67
+ "epoch": 9.71,
68
+ "eval_accuracy": 0.8985951542854309,
69
+ "eval_f1": 0.9059923796328369,
70
+ "eval_loss": 0.7728782892227173,
71
+ "eval_mcc": 0.8105854793640443,
72
+ "eval_runtime": 28.6363,
73
+ "eval_samples_per_second": 467.31,
74
+ "eval_steps_per_second": 58.422,
75
+ "step": 1000
76
+ },
77
+ {
78
+ "epoch": 10.0,
79
+ "step": 1030,
80
+ "total_flos": 6018384229632000.0,
81
+ "train_loss": 0.034445077063316856,
82
+ "train_runtime": 499.6416,
83
+ "train_samples_per_second": 131.494,
84
+ "train_steps_per_second": 2.061
85
+ }
86
+ ],
87
+ "max_steps": 1030,
88
+ "num_train_epochs": 10,
89
+ "total_flos": 6018384229632000.0,
90
+ "trial_name": null,
91
+ "trial_params": null
92
+ }
finetune/control_raising_control/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae9c67440af1a85d1ae7bf62acde537c30644933f30b674dbb8e3231a016838b
3
+ size 3567
finetune/control_raising_control/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
finetune/control_raising_lexical_content_the/all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.6819880604743958,
4
+ "eval_f1": 0.579136690647482,
5
+ "eval_loss": 2.4814441204071045,
6
+ "eval_mcc": 0.41063817024619786,
7
+ "eval_runtime": 44.3345,
8
+ "eval_samples": 20603,
9
+ "eval_samples_per_second": 464.717,
10
+ "eval_steps_per_second": 58.104,
11
+ "train_loss": 0.016269592104793014,
12
+ "train_runtime": 588.4501,
13
+ "train_samples": 6816,
14
+ "train_samples_per_second": 115.83,
15
+ "train_steps_per_second": 1.818
16
+ }
finetune/control_raising_lexical_content_the/config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "final_models/glue_models/structroberta_s2_50ep/",
3
+ "architectures": [
4
+ "StructRobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "auto_map": {
8
+ "AutoConfig": "modeling_structroberta.StructRobertaConfig",
9
+ "AutoModelForMaskedLM": "modeling_structroberta.StructRoberta",
10
+ "AutoModelForSequenceClassification": "modeling_structroberta.StructRobertaForSequenceClassification"
11
+ },
12
+ "bos_token_id": 0,
13
+ "classifier_dropout": null,
14
+ "conv_size": 9,
15
+ "eos_token_id": 2,
16
+ "hidden_act": "gelu",
17
+ "hidden_dropout_prob": 0.1,
18
+ "hidden_size": 768,
19
+ "id2label": {
20
+ "0": 0,
21
+ "1": 1
22
+ },
23
+ "initializer_range": 0.02,
24
+ "intermediate_size": 3072,
25
+ "label2id": {
26
+ "0": 0,
27
+ "1": 1
28
+ },
29
+ "layer_norm_eps": 1e-05,
30
+ "max_position_embeddings": 514,
31
+ "model_type": "roberta",
32
+ "n_parser_layers": 6,
33
+ "num_attention_heads": 12,
34
+ "num_hidden_layers": 12,
35
+ "pad_token_id": 1,
36
+ "position_embedding_type": "absolute",
37
+ "problem_type": "single_label_classification",
38
+ "relations": [
39
+ "head",
40
+ "child"
41
+ ],
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.26.1",
44
+ "type_vocab_size": 1,
45
+ "use_cache": true,
46
+ "vocab_size": 32000,
47
+ "weight_act": "softmax"
48
+ }
finetune/control_raising_lexical_content_the/eval_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.6819880604743958,
4
+ "eval_f1": 0.579136690647482,
5
+ "eval_loss": 2.4814441204071045,
6
+ "eval_mcc": 0.41063817024619786,
7
+ "eval_runtime": 44.3345,
8
+ "eval_samples": 20603,
9
+ "eval_samples_per_second": 464.717,
10
+ "eval_steps_per_second": 58.104
11
+ }
finetune/control_raising_lexical_content_the/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
finetune/control_raising_lexical_content_the/modeling_structroberta.py ADDED
@@ -0,0 +1,1533 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch RoBERTa model."""
17
+
18
+ import math
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from packaging import version
24
+ from torch import nn
25
+ import torch.nn.functional as F
26
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
27
+
28
+ from transformers.activations import ACT2FN, gelu
29
+ from transformers.modeling_outputs import (
30
+ BaseModelOutputWithPastAndCrossAttentions,
31
+ BaseModelOutputWithPoolingAndCrossAttentions,
32
+ MaskedLMOutput,
33
+ SequenceClassifierOutput
34
+ )
35
+ from transformers.modeling_utils import (
36
+ PreTrainedModel,
37
+ apply_chunking_to_forward,
38
+ find_pruneable_heads_and_indices,
39
+ prune_linear_layer,
40
+ )
41
+ from transformers.utils import (
42
+ add_code_sample_docstrings,
43
+ add_start_docstrings,
44
+ add_start_docstrings_to_model_forward,
45
+ logging,
46
+ )
47
+ from transformers import RobertaConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CHECKPOINT_FOR_DOC = "roberta-base"
53
+ _CONFIG_FOR_DOC = "RobertaConfig"
54
+ _TOKENIZER_FOR_DOC = "RobertaTokenizer"
55
+
56
+ ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
57
+ "roberta-base",
58
+ "roberta-large",
59
+ "roberta-large-mnli",
60
+ "distilroberta-base",
61
+ "roberta-base-openai-detector",
62
+ "roberta-large-openai-detector",
63
+ # See all RoBERTa models at https://huggingface.co/models?filter=roberta
64
+ ]
65
+
66
+
67
+ class StructRobertaConfig(RobertaConfig):
68
+ model_type = "roberta"
69
+
70
+ def __init__(
71
+ self,
72
+ n_parser_layers=4,
73
+ conv_size=9,
74
+ relations=('head', 'child'),
75
+ weight_act='softmax',
76
+ **kwargs,
77
+ ):
78
+ super().__init__(**kwargs)
79
+ self.n_parser_layers = n_parser_layers
80
+ self.conv_size = conv_size
81
+ self.relations = relations
82
+ self.weight_act = weight_act
83
+
84
+ class Conv1d(nn.Module):
85
+ """1D convolution layer."""
86
+
87
+ def __init__(self, hidden_size, kernel_size, dilation=1):
88
+ """Initialization.
89
+
90
+ Args:
91
+ hidden_size: dimension of input embeddings
92
+ kernel_size: convolution kernel size
93
+ dilation: the spacing between the kernel points
94
+ """
95
+ super(Conv1d, self).__init__()
96
+
97
+ if kernel_size % 2 == 0:
98
+ padding = (kernel_size // 2) * dilation
99
+ self.shift = True
100
+ else:
101
+ padding = ((kernel_size - 1) // 2) * dilation
102
+ self.shift = False
103
+ self.conv = nn.Conv1d(
104
+ hidden_size,
105
+ hidden_size,
106
+ kernel_size,
107
+ padding=padding,
108
+ dilation=dilation)
109
+
110
+ def forward(self, x):
111
+ """Compute convolution.
112
+
113
+ Args:
114
+ x: input embeddings
115
+ Returns:
116
+ conv_output: convolution results
117
+ """
118
+
119
+ if self.shift:
120
+ return self.conv(x.transpose(1, 2)).transpose(1, 2)[:, 1:]
121
+ else:
122
+ return self.conv(x.transpose(1, 2)).transpose(1, 2)
123
+
124
+ class RobertaEmbeddings(nn.Module):
125
+ """
126
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
127
+ """
128
+
129
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
130
+ def __init__(self, config):
131
+ super().__init__()
132
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
133
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
134
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
135
+
136
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
137
+ # any TensorFlow checkpoint file
138
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
139
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
140
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
141
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
142
+ self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
143
+ if version.parse(torch.__version__) > version.parse("1.6.0"):
144
+ self.register_buffer(
145
+ "token_type_ids",
146
+ torch.zeros(self.position_ids.size(), dtype=torch.long),
147
+ persistent=False,
148
+ )
149
+
150
+ # End copy
151
+ self.padding_idx = config.pad_token_id
152
+ self.position_embeddings = nn.Embedding(
153
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
154
+ )
155
+
156
+ def forward(
157
+ self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
158
+ ):
159
+ if position_ids is None:
160
+ if input_ids is not None:
161
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
162
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
163
+ else:
164
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
165
+
166
+ if input_ids is not None:
167
+ input_shape = input_ids.size()
168
+ else:
169
+ input_shape = inputs_embeds.size()[:-1]
170
+
171
+ seq_length = input_shape[1]
172
+
173
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
174
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
175
+ # issue #5664
176
+ if token_type_ids is None:
177
+ if hasattr(self, "token_type_ids"):
178
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
179
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
180
+ token_type_ids = buffered_token_type_ids_expanded
181
+ else:
182
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
183
+
184
+ if inputs_embeds is None:
185
+ inputs_embeds = self.word_embeddings(input_ids)
186
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
187
+
188
+ embeddings = inputs_embeds + token_type_embeddings
189
+ if self.position_embedding_type == "absolute":
190
+ position_embeddings = self.position_embeddings(position_ids)
191
+ embeddings += position_embeddings
192
+ embeddings = self.LayerNorm(embeddings)
193
+ embeddings = self.dropout(embeddings)
194
+ return embeddings
195
+
196
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
197
+ """
198
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
199
+
200
+ Args:
201
+ inputs_embeds: torch.Tensor
202
+
203
+ Returns: torch.Tensor
204
+ """
205
+ input_shape = inputs_embeds.size()[:-1]
206
+ sequence_length = input_shape[1]
207
+
208
+ position_ids = torch.arange(
209
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
210
+ )
211
+ return position_ids.unsqueeze(0).expand(input_shape)
212
+
213
+
214
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta
215
+ class RobertaSelfAttention(nn.Module):
216
+ def __init__(self, config, position_embedding_type=None):
217
+ super().__init__()
218
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
219
+ raise ValueError(
220
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
221
+ f"heads ({config.num_attention_heads})"
222
+ )
223
+
224
+ self.num_attention_heads = config.num_attention_heads
225
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
226
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
227
+
228
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
229
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
230
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
231
+
232
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
233
+ self.position_embedding_type = position_embedding_type or getattr(
234
+ config, "position_embedding_type", "absolute"
235
+ )
236
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
237
+ self.max_position_embeddings = config.max_position_embeddings
238
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
239
+
240
+ self.is_decoder = config.is_decoder
241
+
242
+ def transpose_for_scores(self, x):
243
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
244
+ x = x.view(new_x_shape)
245
+ return x.permute(0, 2, 1, 3)
246
+
247
+ def forward(
248
+ self,
249
+ hidden_states: torch.Tensor,
250
+ attention_mask: Optional[torch.FloatTensor] = None,
251
+ head_mask: Optional[torch.FloatTensor] = None,
252
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
253
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
254
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
255
+ output_attentions: Optional[bool] = False,
256
+ parser_att_mask=None,
257
+ ) -> Tuple[torch.Tensor]:
258
+ mixed_query_layer = self.query(hidden_states)
259
+
260
+ # If this is instantiated as a cross-attention module, the keys
261
+ # and values come from an encoder; the attention mask needs to be
262
+ # such that the encoder's padding tokens are not attended to.
263
+ is_cross_attention = encoder_hidden_states is not None
264
+
265
+ if is_cross_attention and past_key_value is not None:
266
+ # reuse k,v, cross_attentions
267
+ key_layer = past_key_value[0]
268
+ value_layer = past_key_value[1]
269
+ attention_mask = encoder_attention_mask
270
+ elif is_cross_attention:
271
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
272
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
273
+ attention_mask = encoder_attention_mask
274
+ elif past_key_value is not None:
275
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
276
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
277
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
278
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
279
+ else:
280
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
281
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
282
+
283
+ query_layer = self.transpose_for_scores(mixed_query_layer)
284
+
285
+ if self.is_decoder:
286
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
287
+ # Further calls to cross_attention layer can then reuse all cross-attention
288
+ # key/value_states (first "if" case)
289
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
290
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
291
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
292
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
293
+ past_key_value = (key_layer, value_layer)
294
+
295
+ # Take the dot product between "query" and "key" to get the raw attention scores.
296
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
297
+
298
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
299
+ seq_length = hidden_states.size()[1]
300
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
301
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
302
+ distance = position_ids_l - position_ids_r
303
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
304
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
305
+
306
+ if self.position_embedding_type == "relative_key":
307
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
308
+ attention_scores = attention_scores + relative_position_scores
309
+ elif self.position_embedding_type == "relative_key_query":
310
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
311
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
312
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
313
+
314
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
315
+ if attention_mask is not None:
316
+ # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
317
+ attention_scores = attention_scores + attention_mask
318
+
319
+
320
+ if parser_att_mask is None:
321
+ # Normalize the attention scores to probabilities.
322
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
323
+ else:
324
+ attention_probs = torch.sigmoid(attention_scores) * parser_att_mask
325
+
326
+ # This is actually dropping out entire tokens to attend to, which might
327
+ # seem a bit unusual, but is taken from the original Transformer paper.
328
+ attention_probs = self.dropout(attention_probs)
329
+
330
+ # Mask heads if we want to
331
+ if head_mask is not None:
332
+ attention_probs = attention_probs * head_mask
333
+
334
+ context_layer = torch.matmul(attention_probs, value_layer)
335
+
336
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
337
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
338
+ context_layer = context_layer.view(new_context_layer_shape)
339
+
340
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
341
+
342
+ if self.is_decoder:
343
+ outputs = outputs + (past_key_value,)
344
+ return outputs
345
+
346
+
347
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput
348
+ class RobertaSelfOutput(nn.Module):
349
+ def __init__(self, config):
350
+ super().__init__()
351
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
352
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
353
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
354
+
355
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
356
+ hidden_states = self.dense(hidden_states)
357
+ hidden_states = self.dropout(hidden_states)
358
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
359
+ return hidden_states
360
+
361
+
362
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta
363
+ class RobertaAttention(nn.Module):
364
+ def __init__(self, config, position_embedding_type=None):
365
+ super().__init__()
366
+ self.self = RobertaSelfAttention(config, position_embedding_type=position_embedding_type)
367
+ self.output = RobertaSelfOutput(config)
368
+ self.pruned_heads = set()
369
+
370
+ def prune_heads(self, heads):
371
+ if len(heads) == 0:
372
+ return
373
+ heads, index = find_pruneable_heads_and_indices(
374
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
375
+ )
376
+
377
+ # Prune linear layers
378
+ self.self.query = prune_linear_layer(self.self.query, index)
379
+ self.self.key = prune_linear_layer(self.self.key, index)
380
+ self.self.value = prune_linear_layer(self.self.value, index)
381
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
382
+
383
+ # Update hyper params and store pruned heads
384
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
385
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
386
+ self.pruned_heads = self.pruned_heads.union(heads)
387
+
388
+ def forward(
389
+ self,
390
+ hidden_states: torch.Tensor,
391
+ attention_mask: Optional[torch.FloatTensor] = None,
392
+ head_mask: Optional[torch.FloatTensor] = None,
393
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
394
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
395
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
396
+ output_attentions: Optional[bool] = False,
397
+ parser_att_mask=None,
398
+ ) -> Tuple[torch.Tensor]:
399
+ self_outputs = self.self(
400
+ hidden_states,
401
+ attention_mask,
402
+ head_mask,
403
+ encoder_hidden_states,
404
+ encoder_attention_mask,
405
+ past_key_value,
406
+ output_attentions,
407
+ parser_att_mask=parser_att_mask,
408
+ )
409
+ attention_output = self.output(self_outputs[0], hidden_states)
410
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
411
+ return outputs
412
+
413
+
414
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate
415
+ class RobertaIntermediate(nn.Module):
416
+ def __init__(self, config):
417
+ super().__init__()
418
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
419
+ if isinstance(config.hidden_act, str):
420
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
421
+ else:
422
+ self.intermediate_act_fn = config.hidden_act
423
+
424
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
425
+ hidden_states = self.dense(hidden_states)
426
+ hidden_states = self.intermediate_act_fn(hidden_states)
427
+ return hidden_states
428
+
429
+
430
+ # Copied from transformers.models.bert.modeling_bert.BertOutput
431
+ class RobertaOutput(nn.Module):
432
+ def __init__(self, config):
433
+ super().__init__()
434
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
435
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
436
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
437
+
438
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
439
+ hidden_states = self.dense(hidden_states)
440
+ hidden_states = self.dropout(hidden_states)
441
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
442
+ return hidden_states
443
+
444
+
445
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta
446
+ class RobertaLayer(nn.Module):
447
+ def __init__(self, config):
448
+ super().__init__()
449
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
450
+ self.seq_len_dim = 1
451
+ self.attention = RobertaAttention(config)
452
+ self.is_decoder = config.is_decoder
453
+ self.add_cross_attention = config.add_cross_attention
454
+ if self.add_cross_attention:
455
+ if not self.is_decoder:
456
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
457
+ self.crossattention = RobertaAttention(config, position_embedding_type="absolute")
458
+ self.intermediate = RobertaIntermediate(config)
459
+ self.output = RobertaOutput(config)
460
+
461
+ def forward(
462
+ self,
463
+ hidden_states: torch.Tensor,
464
+ attention_mask: Optional[torch.FloatTensor] = None,
465
+ head_mask: Optional[torch.FloatTensor] = None,
466
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
467
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
468
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
469
+ output_attentions: Optional[bool] = False,
470
+ parser_att_mask=None,
471
+ ) -> Tuple[torch.Tensor]:
472
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
473
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
474
+ self_attention_outputs = self.attention(
475
+ hidden_states,
476
+ attention_mask,
477
+ head_mask,
478
+ output_attentions=output_attentions,
479
+ past_key_value=self_attn_past_key_value,
480
+ parser_att_mask=parser_att_mask,
481
+ )
482
+ attention_output = self_attention_outputs[0]
483
+
484
+ # if decoder, the last output is tuple of self-attn cache
485
+ if self.is_decoder:
486
+ outputs = self_attention_outputs[1:-1]
487
+ present_key_value = self_attention_outputs[-1]
488
+ else:
489
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
490
+
491
+ cross_attn_present_key_value = None
492
+ if self.is_decoder and encoder_hidden_states is not None:
493
+ if not hasattr(self, "crossattention"):
494
+ raise ValueError(
495
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
496
+ )
497
+
498
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
499
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
500
+ cross_attention_outputs = self.crossattention(
501
+ attention_output,
502
+ attention_mask,
503
+ head_mask,
504
+ encoder_hidden_states,
505
+ encoder_attention_mask,
506
+ cross_attn_past_key_value,
507
+ output_attentions,
508
+ )
509
+ attention_output = cross_attention_outputs[0]
510
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
511
+
512
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
513
+ cross_attn_present_key_value = cross_attention_outputs[-1]
514
+ present_key_value = present_key_value + cross_attn_present_key_value
515
+
516
+ layer_output = apply_chunking_to_forward(
517
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
518
+ )
519
+ outputs = (layer_output,) + outputs
520
+
521
+ # if decoder, return the attn key/values as the last output
522
+ if self.is_decoder:
523
+ outputs = outputs + (present_key_value,)
524
+
525
+ return outputs
526
+
527
+ def feed_forward_chunk(self, attention_output):
528
+ intermediate_output = self.intermediate(attention_output)
529
+ layer_output = self.output(intermediate_output, attention_output)
530
+ return layer_output
531
+
532
+
533
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta
534
+ class RobertaEncoder(nn.Module):
535
+ def __init__(self, config):
536
+ super().__init__()
537
+ self.config = config
538
+ self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])
539
+ self.gradient_checkpointing = False
540
+
541
+ def forward(
542
+ self,
543
+ hidden_states: torch.Tensor,
544
+ attention_mask: Optional[torch.FloatTensor] = None,
545
+ head_mask: Optional[torch.FloatTensor] = None,
546
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
547
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
548
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
549
+ use_cache: Optional[bool] = None,
550
+ output_attentions: Optional[bool] = False,
551
+ output_hidden_states: Optional[bool] = False,
552
+ return_dict: Optional[bool] = True,
553
+ parser_att_mask=None,
554
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
555
+ all_hidden_states = () if output_hidden_states else None
556
+ all_self_attentions = () if output_attentions else None
557
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
558
+
559
+ next_decoder_cache = () if use_cache else None
560
+ for i, layer_module in enumerate(self.layer):
561
+ if output_hidden_states:
562
+ all_hidden_states = all_hidden_states + (hidden_states,)
563
+
564
+ layer_head_mask = head_mask[i] if head_mask is not None else None
565
+ past_key_value = past_key_values[i] if past_key_values is not None else None
566
+
567
+ if self.gradient_checkpointing and self.training:
568
+
569
+ if use_cache:
570
+ logger.warning(
571
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
572
+ )
573
+ use_cache = False
574
+
575
+ def create_custom_forward(module):
576
+ def custom_forward(*inputs):
577
+ return module(*inputs, past_key_value, output_attentions)
578
+
579
+ return custom_forward
580
+
581
+ layer_outputs = torch.utils.checkpoint.checkpoint(
582
+ create_custom_forward(layer_module),
583
+ hidden_states,
584
+ attention_mask,
585
+ layer_head_mask,
586
+ encoder_hidden_states,
587
+ encoder_attention_mask,
588
+ )
589
+ else:
590
+ layer_outputs = layer_module(
591
+ hidden_states,
592
+ attention_mask,
593
+ layer_head_mask,
594
+ encoder_hidden_states,
595
+ encoder_attention_mask,
596
+ past_key_value,
597
+ output_attentions,
598
+ parser_att_mask=parser_att_mask[i],
599
+ )
600
+
601
+ hidden_states = layer_outputs[0]
602
+ if use_cache:
603
+ next_decoder_cache += (layer_outputs[-1],)
604
+ if output_attentions:
605
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
606
+ if self.config.add_cross_attention:
607
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
608
+
609
+ if output_hidden_states:
610
+ all_hidden_states = all_hidden_states + (hidden_states,)
611
+
612
+ if not return_dict:
613
+ return tuple(
614
+ v
615
+ for v in [
616
+ hidden_states,
617
+ next_decoder_cache,
618
+ all_hidden_states,
619
+ all_self_attentions,
620
+ all_cross_attentions,
621
+ ]
622
+ if v is not None
623
+ )
624
+ return BaseModelOutputWithPastAndCrossAttentions(
625
+ last_hidden_state=hidden_states,
626
+ past_key_values=next_decoder_cache,
627
+ hidden_states=all_hidden_states,
628
+ attentions=all_self_attentions,
629
+ cross_attentions=all_cross_attentions,
630
+ )
631
+
632
+
633
+ # Copied from transformers.models.bert.modeling_bert.BertPooler
634
+ class RobertaPooler(nn.Module):
635
+ def __init__(self, config):
636
+ super().__init__()
637
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
638
+ self.activation = nn.Tanh()
639
+
640
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
641
+ # We "pool" the model by simply taking the hidden state corresponding
642
+ # to the first token.
643
+ first_token_tensor = hidden_states[:, 0]
644
+ pooled_output = self.dense(first_token_tensor)
645
+ pooled_output = self.activation(pooled_output)
646
+ return pooled_output
647
+
648
+
649
+ class RobertaPreTrainedModel(PreTrainedModel):
650
+ """
651
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
652
+ models.
653
+ """
654
+
655
+ config_class = RobertaConfig
656
+ base_model_prefix = "roberta"
657
+ supports_gradient_checkpointing = True
658
+
659
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
660
+ def _init_weights(self, module):
661
+ """Initialize the weights"""
662
+ if isinstance(module, nn.Linear):
663
+ # Slightly different from the TF version which uses truncated_normal for initialization
664
+ # cf https://github.com/pytorch/pytorch/pull/5617
665
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
666
+ if module.bias is not None:
667
+ module.bias.data.zero_()
668
+ elif isinstance(module, nn.Embedding):
669
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
670
+ if module.padding_idx is not None:
671
+ module.weight.data[module.padding_idx].zero_()
672
+ elif isinstance(module, nn.LayerNorm):
673
+ if module.bias is not None:
674
+ module.bias.data.zero_()
675
+ module.weight.data.fill_(1.0)
676
+
677
+ def _set_gradient_checkpointing(self, module, value=False):
678
+ if isinstance(module, RobertaEncoder):
679
+ module.gradient_checkpointing = value
680
+
681
+ def update_keys_to_ignore(self, config, del_keys_to_ignore):
682
+ """Remove some keys from ignore list"""
683
+ if not config.tie_word_embeddings:
684
+ # must make a new list, or the class variable gets modified!
685
+ self._keys_to_ignore_on_save = [k for k in self._keys_to_ignore_on_save if k not in del_keys_to_ignore]
686
+ self._keys_to_ignore_on_load_missing = [
687
+ k for k in self._keys_to_ignore_on_load_missing if k not in del_keys_to_ignore
688
+ ]
689
+
690
+
691
+ ROBERTA_START_DOCSTRING = r"""
692
+
693
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
694
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
695
+ etc.)
696
+
697
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
698
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
699
+ and behavior.
700
+
701
+ Parameters:
702
+ config ([`RobertaConfig`]): Model configuration class with all the parameters of the
703
+ model. Initializing with a config file does not load the weights associated with the model, only the
704
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
705
+ """
706
+
707
+
708
+ ROBERTA_INPUTS_DOCSTRING = r"""
709
+ Args:
710
+ input_ids (`torch.LongTensor` of shape `({0})`):
711
+ Indices of input sequence tokens in the vocabulary.
712
+
713
+ Indices can be obtained using [`RobertaTokenizer`]. See [`PreTrainedTokenizer.encode`] and
714
+ [`PreTrainedTokenizer.__call__`] for details.
715
+
716
+ [What are input IDs?](../glossary#input-ids)
717
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
718
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
719
+
720
+ - 1 for tokens that are **not masked**,
721
+ - 0 for tokens that are **masked**.
722
+
723
+ [What are attention masks?](../glossary#attention-mask)
724
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
725
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
726
+ 1]`:
727
+
728
+ - 0 corresponds to a *sentence A* token,
729
+ - 1 corresponds to a *sentence B* token.
730
+
731
+ [What are token type IDs?](../glossary#token-type-ids)
732
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
733
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
734
+ config.max_position_embeddings - 1]`.
735
+
736
+ [What are position IDs?](../glossary#position-ids)
737
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
738
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
739
+
740
+ - 1 indicates the head is **not masked**,
741
+ - 0 indicates the head is **masked**.
742
+
743
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
744
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
745
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
746
+ model's internal embedding lookup matrix.
747
+ output_attentions (`bool`, *optional*):
748
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
749
+ tensors for more detail.
750
+ output_hidden_states (`bool`, *optional*):
751
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
752
+ more detail.
753
+ return_dict (`bool`, *optional*):
754
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
755
+ """
756
+
757
+
758
+ class RobertaModel(RobertaPreTrainedModel):
759
+ """
760
+
761
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
762
+ cross-attention is added between the self-attention layers, following the architecture described in *Attention is
763
+ all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
764
+ Kaiser and Illia Polosukhin.
765
+
766
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
767
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
768
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
769
+
770
+ .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
771
+
772
+ """
773
+
774
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
775
+
776
+ # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta
777
+ def __init__(self, config, add_pooling_layer=True):
778
+ super().__init__(config)
779
+ self.config = config
780
+
781
+ self.embeddings = RobertaEmbeddings(config)
782
+ self.encoder = RobertaEncoder(config)
783
+
784
+ self.pooler = RobertaPooler(config) if add_pooling_layer else None
785
+
786
+ # Initialize weights and apply final processing
787
+ self.post_init()
788
+
789
+ def get_input_embeddings(self):
790
+ return self.embeddings.word_embeddings
791
+
792
+ def set_input_embeddings(self, value):
793
+ self.embeddings.word_embeddings = value
794
+
795
+ def _prune_heads(self, heads_to_prune):
796
+ """
797
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
798
+ class PreTrainedModel
799
+ """
800
+ for layer, heads in heads_to_prune.items():
801
+ self.encoder.layer[layer].attention.prune_heads(heads)
802
+
803
+ # Copied from transformers.models.bert.modeling_bert.BertModel.forward
804
+ def forward(
805
+ self,
806
+ input_ids: Optional[torch.Tensor] = None,
807
+ attention_mask: Optional[torch.Tensor] = None,
808
+ token_type_ids: Optional[torch.Tensor] = None,
809
+ position_ids: Optional[torch.Tensor] = None,
810
+ head_mask: Optional[torch.Tensor] = None,
811
+ inputs_embeds: Optional[torch.Tensor] = None,
812
+ encoder_hidden_states: Optional[torch.Tensor] = None,
813
+ encoder_attention_mask: Optional[torch.Tensor] = None,
814
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
815
+ use_cache: Optional[bool] = None,
816
+ output_attentions: Optional[bool] = None,
817
+ output_hidden_states: Optional[bool] = None,
818
+ return_dict: Optional[bool] = None,
819
+ parser_att_mask=None,
820
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
821
+ r"""
822
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
823
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
824
+ the model is configured as a decoder.
825
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
826
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
827
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
828
+
829
+ - 1 for tokens that are **not masked**,
830
+ - 0 for tokens that are **masked**.
831
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
832
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
833
+
834
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
835
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
836
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
837
+ use_cache (`bool`, *optional*):
838
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
839
+ `past_key_values`).
840
+ """
841
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
842
+ output_hidden_states = (
843
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
844
+ )
845
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
846
+
847
+ if self.config.is_decoder:
848
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
849
+ else:
850
+ use_cache = False
851
+
852
+ if input_ids is not None and inputs_embeds is not None:
853
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
854
+ elif input_ids is not None:
855
+ input_shape = input_ids.size()
856
+ elif inputs_embeds is not None:
857
+ input_shape = inputs_embeds.size()[:-1]
858
+ else:
859
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
860
+
861
+ batch_size, seq_length = input_shape
862
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
863
+
864
+ # past_key_values_length
865
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
866
+
867
+ if attention_mask is None:
868
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
869
+
870
+ if token_type_ids is None:
871
+ if hasattr(self.embeddings, "token_type_ids"):
872
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
873
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
874
+ token_type_ids = buffered_token_type_ids_expanded
875
+ else:
876
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
877
+
878
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
879
+ # ourselves in which case we just need to make it broadcastable to all heads.
880
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
881
+
882
+ # If a 2D or 3D attention mask is provided for the cross-attention
883
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
884
+ if self.config.is_decoder and encoder_hidden_states is not None:
885
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
886
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
887
+ if encoder_attention_mask is None:
888
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
889
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
890
+ else:
891
+ encoder_extended_attention_mask = None
892
+
893
+ # Prepare head mask if needed
894
+ # 1.0 in head_mask indicate we keep the head
895
+ # attention_probs has shape bsz x n_heads x N x N
896
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
897
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
898
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
899
+
900
+ embedding_output = self.embeddings(
901
+ input_ids=input_ids,
902
+ position_ids=position_ids,
903
+ token_type_ids=token_type_ids,
904
+ inputs_embeds=inputs_embeds,
905
+ past_key_values_length=past_key_values_length,
906
+ )
907
+ encoder_outputs = self.encoder(
908
+ embedding_output,
909
+ attention_mask=extended_attention_mask,
910
+ head_mask=head_mask,
911
+ encoder_hidden_states=encoder_hidden_states,
912
+ encoder_attention_mask=encoder_extended_attention_mask,
913
+ past_key_values=past_key_values,
914
+ use_cache=use_cache,
915
+ output_attentions=output_attentions,
916
+ output_hidden_states=output_hidden_states,
917
+ return_dict=return_dict,
918
+ parser_att_mask=parser_att_mask
919
+ )
920
+ sequence_output = encoder_outputs[0]
921
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
922
+
923
+ if not return_dict:
924
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
925
+
926
+ return BaseModelOutputWithPoolingAndCrossAttentions(
927
+ last_hidden_state=sequence_output,
928
+ pooler_output=pooled_output,
929
+ past_key_values=encoder_outputs.past_key_values,
930
+ hidden_states=encoder_outputs.hidden_states,
931
+ attentions=encoder_outputs.attentions,
932
+ cross_attentions=encoder_outputs.cross_attentions,
933
+ )
934
+
935
+
936
+ class StructRoberta(RobertaPreTrainedModel):
937
+ _keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
938
+ _keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
939
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
940
+
941
+ def __init__(self, config):
942
+ super().__init__(config)
943
+
944
+ if config.is_decoder:
945
+ logger.warning(
946
+ "If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for "
947
+ "bi-directional self-attention."
948
+ )
949
+
950
+ self.parser_layers = nn.ModuleList([
951
+ nn.Sequential(Conv1d(config.hidden_size, config.conv_size),
952
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False),
953
+ nn.Tanh()) for i in range(config.n_parser_layers)])
954
+
955
+ self.distance_ff = nn.Sequential(
956
+ Conv1d(config.hidden_size, 2),
957
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False), nn.Tanh(),
958
+ nn.Linear(config.hidden_size, 1))
959
+
960
+ self.height_ff = nn.Sequential(
961
+ nn.Linear(config.hidden_size, config.hidden_size),
962
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False), nn.Tanh(),
963
+ nn.Linear(config.hidden_size, 1))
964
+
965
+ n_rel = len(config.relations)
966
+ self._rel_weight = nn.Parameter(torch.zeros((config.num_hidden_layers, config.num_attention_heads, n_rel)))
967
+ self._rel_weight.data.normal_(0, 0.1)
968
+
969
+ self._scaler = nn.Parameter(torch.zeros(2))
970
+
971
+ self.roberta = RobertaModel(config, add_pooling_layer=False)
972
+ self.lm_head = RobertaLMHead(config)
973
+
974
+ self.pad = config.pad_token_id
975
+
976
+ # The LM head weights require special treatment only when they are tied with the word embeddings
977
+ self.update_keys_to_ignore(config, ["lm_head.decoder.weight"])
978
+
979
+ # Initialize weights and apply final processing
980
+ self.post_init()
981
+
982
+ def get_output_embeddings(self):
983
+ return self.lm_head.decoder
984
+
985
+ def set_output_embeddings(self, new_embeddings):
986
+ self.lm_head.decoder = new_embeddings
987
+
988
+ @property
989
+ def scaler(self):
990
+ return self._scaler.exp()
991
+
992
+ @property
993
+ def rel_weight(self):
994
+ if self.config.weight_act == 'sigmoid':
995
+ return torch.sigmoid(self._rel_weight)
996
+ elif self.config.weight_act == 'softmax':
997
+ return torch.softmax(self._rel_weight, dim=-1)
998
+
999
+ def compute_block(self, distance, height):
1000
+ """Compute constituents from distance and height."""
1001
+
1002
+ beta_logits = (distance[:, None, :] - height[:, :, None]) * self.scaler[0]
1003
+
1004
+ gamma = torch.sigmoid(-beta_logits)
1005
+ ones = torch.ones_like(gamma)
1006
+
1007
+ block_mask_left = cummin(
1008
+ gamma.tril(-1) + ones.triu(0), reverse=True, max_value=1)
1009
+ block_mask_left = block_mask_left - F.pad(
1010
+ block_mask_left[:, :, :-1], (1, 0), value=0)
1011
+ block_mask_left.tril_(0)
1012
+
1013
+ block_mask_right = cummin(
1014
+ gamma.triu(0) + ones.tril(-1), exclusive=True, max_value=1)
1015
+ block_mask_right = block_mask_right - F.pad(
1016
+ block_mask_right[:, :, 1:], (0, 1), value=0)
1017
+ block_mask_right.triu_(0)
1018
+
1019
+ block_p = block_mask_left[:, :, :, None] * block_mask_right[:, :, None, :]
1020
+ block = cumsum(block_mask_left).tril(0) + cumsum(
1021
+ block_mask_right, reverse=True).triu(1)
1022
+
1023
+ return block_p, block
1024
+
1025
+ def compute_head(self, height):
1026
+ """Estimate head for each constituent."""
1027
+
1028
+ _, length = height.size()
1029
+ head_logits = height * self.scaler[1]
1030
+ index = torch.arange(length, device=height.device)
1031
+
1032
+ mask = (index[:, None, None] <= index[None, None, :]) * (
1033
+ index[None, None, :] <= index[None, :, None])
1034
+ head_logits = head_logits[:, None, None, :].repeat(1, length, length, 1)
1035
+ head_logits.masked_fill_(~mask[None, :, :, :], -1e9)
1036
+
1037
+ head_p = torch.softmax(head_logits, dim=-1)
1038
+
1039
+ return head_p
1040
+
1041
+ def parse(self, x):
1042
+ """Parse input sentence.
1043
+
1044
+ Args:
1045
+ x: input tokens (required).
1046
+ pos: position for each token (optional).
1047
+ Returns:
1048
+ distance: syntactic distance
1049
+ height: syntactic height
1050
+ """
1051
+
1052
+ mask = (x != self.pad)
1053
+ mask_shifted = F.pad(mask[:, 1:], (0, 1), value=0)
1054
+
1055
+ h = self.roberta.embeddings(x)
1056
+ for i in range(self.config.n_parser_layers):
1057
+ h = h.masked_fill(~mask[:, :, None], 0)
1058
+ h = self.parser_layers[i](h)
1059
+
1060
+ height = self.height_ff(h).squeeze(-1)
1061
+ height.masked_fill_(~mask, -1e9)
1062
+
1063
+ distance = self.distance_ff(h).squeeze(-1)
1064
+ distance.masked_fill_(~mask_shifted, 1e9)
1065
+
1066
+ # Calbrating the distance and height to the same level
1067
+ length = distance.size(1)
1068
+ height_max = height[:, None, :].expand(-1, length, -1)
1069
+ height_max = torch.cummax(
1070
+ height_max.triu(0) - torch.ones_like(height_max).tril(-1) * 1e9,
1071
+ dim=-1)[0].triu(0)
1072
+
1073
+ margin_left = torch.relu(
1074
+ F.pad(distance[:, :-1, None], (0, 0, 1, 0), value=1e9) - height_max)
1075
+ margin_right = torch.relu(distance[:, None, :] - height_max)
1076
+ margin = torch.where(margin_left > margin_right, margin_right,
1077
+ margin_left).triu(0)
1078
+
1079
+ margin_mask = torch.stack([mask_shifted] + [mask] * (length - 1), dim=1)
1080
+ margin.masked_fill_(~margin_mask, 0)
1081
+ margin = margin.max()
1082
+
1083
+ distance = distance - margin
1084
+
1085
+ return distance, height
1086
+
1087
+ def generate_mask(self, x, distance, height):
1088
+ """Compute head and cibling distribution for each token."""
1089
+
1090
+ bsz, length = x.size()
1091
+
1092
+ eye = torch.eye(length, device=x.device, dtype=torch.bool)
1093
+ eye = eye[None, :, :].expand((bsz, -1, -1))
1094
+
1095
+ block_p, block = self.compute_block(distance, height)
1096
+ head_p = self.compute_head(height)
1097
+ head = torch.einsum('blij,bijh->blh', block_p, head_p)
1098
+ head = head.masked_fill(eye, 0)
1099
+ child = head.transpose(1, 2)
1100
+ cibling = torch.bmm(head, child).masked_fill(eye, 0)
1101
+
1102
+ rel_list = []
1103
+ if 'head' in self.config.relations:
1104
+ rel_list.append(head)
1105
+ if 'child' in self.config.relations:
1106
+ rel_list.append(child)
1107
+ if 'cibling' in self.config.relations:
1108
+ rel_list.append(cibling)
1109
+
1110
+ rel = torch.stack(rel_list, dim=1)
1111
+
1112
+ rel_weight = self.rel_weight
1113
+
1114
+ dep = torch.einsum('lhr,brij->lbhij', rel_weight, rel)
1115
+ att_mask = dep.reshape(self.config.num_hidden_layers, bsz, self.config.num_attention_heads, length, length)
1116
+
1117
+ return att_mask, cibling, head, block
1118
+
1119
+ def forward(
1120
+ self,
1121
+ input_ids: Optional[torch.LongTensor] = None,
1122
+ attention_mask: Optional[torch.FloatTensor] = None,
1123
+ token_type_ids: Optional[torch.LongTensor] = None,
1124
+ position_ids: Optional[torch.LongTensor] = None,
1125
+ head_mask: Optional[torch.FloatTensor] = None,
1126
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1127
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1128
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1129
+ labels: Optional[torch.LongTensor] = None,
1130
+ output_attentions: Optional[bool] = None,
1131
+ output_hidden_states: Optional[bool] = None,
1132
+ return_dict: Optional[bool] = None,
1133
+ ) -> Union[Tuple, MaskedLMOutput]:
1134
+ r"""
1135
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1136
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1137
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1138
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1139
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1140
+ Used to hide legacy arguments that have been deprecated.
1141
+ """
1142
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1143
+
1144
+ distance, height = self.parse(input_ids)
1145
+ att_mask, cibling, head, block = self.generate_mask(input_ids, distance, height)
1146
+
1147
+ outputs = self.roberta(
1148
+ input_ids,
1149
+ attention_mask=attention_mask,
1150
+ token_type_ids=token_type_ids,
1151
+ position_ids=position_ids,
1152
+ head_mask=head_mask,
1153
+ inputs_embeds=inputs_embeds,
1154
+ encoder_hidden_states=encoder_hidden_states,
1155
+ encoder_attention_mask=encoder_attention_mask,
1156
+ output_attentions=output_attentions,
1157
+ output_hidden_states=output_hidden_states,
1158
+ return_dict=return_dict,
1159
+ parser_att_mask=att_mask,
1160
+ )
1161
+ sequence_output = outputs[0]
1162
+ prediction_scores = self.lm_head(sequence_output)
1163
+
1164
+ masked_lm_loss = None
1165
+ if labels is not None:
1166
+ loss_fct = CrossEntropyLoss()
1167
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1168
+
1169
+ if not return_dict:
1170
+ output = (prediction_scores,) + outputs[2:]
1171
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1172
+
1173
+ return MaskedLMOutput(
1174
+ loss=masked_lm_loss,
1175
+ logits=prediction_scores,
1176
+ hidden_states=outputs.hidden_states,
1177
+ attentions=outputs.attentions,
1178
+ )
1179
+
1180
+ class RobertaLMHead(nn.Module):
1181
+ """Roberta Head for masked language modeling."""
1182
+
1183
+ def __init__(self, config):
1184
+ super().__init__()
1185
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1186
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
1187
+
1188
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
1189
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
1190
+ self.decoder.bias = self.bias
1191
+
1192
+ def forward(self, features, **kwargs):
1193
+ x = self.dense(features)
1194
+ x = gelu(x)
1195
+ x = self.layer_norm(x)
1196
+
1197
+ # project back to size of vocabulary with bias
1198
+ x = self.decoder(x)
1199
+
1200
+ return x
1201
+
1202
+ def _tie_weights(self):
1203
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
1204
+ self.bias = self.decoder.bias
1205
+
1206
+ class StructRobertaForSequenceClassification(RobertaPreTrainedModel):
1207
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
1208
+
1209
+ def __init__(self, config):
1210
+ super().__init__(config)
1211
+ self.num_labels = config.num_labels
1212
+ self.config = config
1213
+
1214
+ self.parser_layers = nn.ModuleList([
1215
+ nn.Sequential(Conv1d(config.hidden_size, config.conv_size),
1216
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False),
1217
+ nn.Tanh()) for i in range(config.n_parser_layers)])
1218
+
1219
+ self.distance_ff = nn.Sequential(
1220
+ Conv1d(config.hidden_size, 2),
1221
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False), nn.Tanh(),
1222
+ nn.Linear(config.hidden_size, 1))
1223
+
1224
+ self.height_ff = nn.Sequential(
1225
+ nn.Linear(config.hidden_size, config.hidden_size),
1226
+ nn.LayerNorm(config.hidden_size, elementwise_affine=False), nn.Tanh(),
1227
+ nn.Linear(config.hidden_size, 1))
1228
+
1229
+ n_rel = len(config.relations)
1230
+ self._rel_weight = nn.Parameter(torch.zeros((config.num_hidden_layers, config.num_attention_heads, n_rel)))
1231
+ self._rel_weight.data.normal_(0, 0.1)
1232
+
1233
+ self._scaler = nn.Parameter(torch.zeros(2))
1234
+
1235
+ self.pad = config.pad_token_id
1236
+
1237
+ self.roberta = RobertaModel(config, add_pooling_layer=False)
1238
+ self.classifier = RobertaClassificationHead(config)
1239
+
1240
+ # Initialize weights and apply final processing
1241
+ self.post_init()
1242
+
1243
+
1244
+ @property
1245
+ def scaler(self):
1246
+ return self._scaler.exp()
1247
+
1248
+ @property
1249
+ def rel_weight(self):
1250
+ if self.config.weight_act == 'sigmoid':
1251
+ return torch.sigmoid(self._rel_weight)
1252
+ elif self.config.weight_act == 'softmax':
1253
+ return torch.softmax(self._rel_weight, dim=-1)
1254
+
1255
+ def compute_block(self, distance, height):
1256
+ """Compute constituents from distance and height."""
1257
+
1258
+ beta_logits = (distance[:, None, :] - height[:, :, None]) * self.scaler[0]
1259
+
1260
+ gamma = torch.sigmoid(-beta_logits)
1261
+ ones = torch.ones_like(gamma)
1262
+
1263
+ block_mask_left = cummin(
1264
+ gamma.tril(-1) + ones.triu(0), reverse=True, max_value=1)
1265
+ block_mask_left = block_mask_left - F.pad(
1266
+ block_mask_left[:, :, :-1], (1, 0), value=0)
1267
+ block_mask_left.tril_(0)
1268
+
1269
+ block_mask_right = cummin(
1270
+ gamma.triu(0) + ones.tril(-1), exclusive=True, max_value=1)
1271
+ block_mask_right = block_mask_right - F.pad(
1272
+ block_mask_right[:, :, 1:], (0, 1), value=0)
1273
+ block_mask_right.triu_(0)
1274
+
1275
+ block_p = block_mask_left[:, :, :, None] * block_mask_right[:, :, None, :]
1276
+ block = cumsum(block_mask_left).tril(0) + cumsum(
1277
+ block_mask_right, reverse=True).triu(1)
1278
+
1279
+ return block_p, block
1280
+
1281
+ def compute_head(self, height):
1282
+ """Estimate head for each constituent."""
1283
+
1284
+ _, length = height.size()
1285
+ head_logits = height * self.scaler[1]
1286
+ index = torch.arange(length, device=height.device)
1287
+
1288
+ mask = (index[:, None, None] <= index[None, None, :]) * (
1289
+ index[None, None, :] <= index[None, :, None])
1290
+ head_logits = head_logits[:, None, None, :].repeat(1, length, length, 1)
1291
+ head_logits.masked_fill_(~mask[None, :, :, :], -1e9)
1292
+
1293
+ head_p = torch.softmax(head_logits, dim=-1)
1294
+
1295
+ return head_p
1296
+
1297
+ def parse(self, x):
1298
+ """Parse input sentence.
1299
+
1300
+ Args:
1301
+ x: input tokens (required).
1302
+ pos: position for each token (optional).
1303
+ Returns:
1304
+ distance: syntactic distance
1305
+ height: syntactic height
1306
+ """
1307
+
1308
+ mask = (x != self.pad)
1309
+ mask_shifted = F.pad(mask[:, 1:], (0, 1), value=0)
1310
+
1311
+ h = self.roberta.embeddings(x)
1312
+ for i in range(self.config.n_parser_layers):
1313
+ h = h.masked_fill(~mask[:, :, None], 0)
1314
+ h = self.parser_layers[i](h)
1315
+
1316
+ height = self.height_ff(h).squeeze(-1)
1317
+ height.masked_fill_(~mask, -1e9)
1318
+
1319
+ distance = self.distance_ff(h).squeeze(-1)
1320
+ distance.masked_fill_(~mask_shifted, 1e9)
1321
+
1322
+ # Calbrating the distance and height to the same level
1323
+ length = distance.size(1)
1324
+ height_max = height[:, None, :].expand(-1, length, -1)
1325
+ height_max = torch.cummax(
1326
+ height_max.triu(0) - torch.ones_like(height_max).tril(-1) * 1e9,
1327
+ dim=-1)[0].triu(0)
1328
+
1329
+ margin_left = torch.relu(
1330
+ F.pad(distance[:, :-1, None], (0, 0, 1, 0), value=1e9) - height_max)
1331
+ margin_right = torch.relu(distance[:, None, :] - height_max)
1332
+ margin = torch.where(margin_left > margin_right, margin_right,
1333
+ margin_left).triu(0)
1334
+
1335
+ margin_mask = torch.stack([mask_shifted] + [mask] * (length - 1), dim=1)
1336
+ margin.masked_fill_(~margin_mask, 0)
1337
+ margin = margin.max()
1338
+
1339
+ distance = distance - margin
1340
+
1341
+ return distance, height
1342
+
1343
+ def generate_mask(self, x, distance, height):
1344
+ """Compute head and cibling distribution for each token."""
1345
+
1346
+ bsz, length = x.size()
1347
+
1348
+ eye = torch.eye(length, device=x.device, dtype=torch.bool)
1349
+ eye = eye[None, :, :].expand((bsz, -1, -1))
1350
+
1351
+ block_p, block = self.compute_block(distance, height)
1352
+ head_p = self.compute_head(height)
1353
+ head = torch.einsum('blij,bijh->blh', block_p, head_p)
1354
+ head = head.masked_fill(eye, 0)
1355
+ child = head.transpose(1, 2)
1356
+ cibling = torch.bmm(head, child).masked_fill(eye, 0)
1357
+
1358
+ rel_list = []
1359
+ if 'head' in self.config.relations:
1360
+ rel_list.append(head)
1361
+ if 'child' in self.config.relations:
1362
+ rel_list.append(child)
1363
+ if 'cibling' in self.config.relations:
1364
+ rel_list.append(cibling)
1365
+
1366
+ rel = torch.stack(rel_list, dim=1)
1367
+
1368
+ rel_weight = self.rel_weight
1369
+
1370
+ dep = torch.einsum('lhr,brij->lbhij', rel_weight, rel)
1371
+ att_mask = dep.reshape(self.config.num_hidden_layers, bsz, self.config.num_attention_heads, length, length)
1372
+
1373
+ return att_mask, cibling, head, block
1374
+
1375
+ def forward(
1376
+ self,
1377
+ input_ids: Optional[torch.LongTensor] = None,
1378
+ attention_mask: Optional[torch.FloatTensor] = None,
1379
+ token_type_ids: Optional[torch.LongTensor] = None,
1380
+ position_ids: Optional[torch.LongTensor] = None,
1381
+ head_mask: Optional[torch.FloatTensor] = None,
1382
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1383
+ labels: Optional[torch.LongTensor] = None,
1384
+ output_attentions: Optional[bool] = None,
1385
+ output_hidden_states: Optional[bool] = None,
1386
+ return_dict: Optional[bool] = None,
1387
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1388
+ r"""
1389
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1390
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1391
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1392
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1393
+ """
1394
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1395
+
1396
+ distance, height = self.parse(input_ids)
1397
+ att_mask, cibling, head, block = self.generate_mask(input_ids, distance, height)
1398
+
1399
+ outputs = self.roberta(
1400
+ input_ids,
1401
+ attention_mask=attention_mask,
1402
+ token_type_ids=token_type_ids,
1403
+ position_ids=position_ids,
1404
+ head_mask=head_mask,
1405
+ inputs_embeds=inputs_embeds,
1406
+ output_attentions=output_attentions,
1407
+ output_hidden_states=output_hidden_states,
1408
+ return_dict=return_dict,
1409
+ parser_att_mask=att_mask,
1410
+ )
1411
+
1412
+ sequence_output = outputs[0]
1413
+ logits = self.classifier(sequence_output)
1414
+
1415
+ loss = None
1416
+ if labels is not None:
1417
+ if self.config.problem_type is None:
1418
+ if self.num_labels == 1:
1419
+ self.config.problem_type = "regression"
1420
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1421
+ self.config.problem_type = "single_label_classification"
1422
+ else:
1423
+ self.config.problem_type = "multi_label_classification"
1424
+
1425
+ if self.config.problem_type == "regression":
1426
+ loss_fct = MSELoss()
1427
+ if self.num_labels == 1:
1428
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1429
+ else:
1430
+ loss = loss_fct(logits, labels)
1431
+ elif self.config.problem_type == "single_label_classification":
1432
+ loss_fct = CrossEntropyLoss()
1433
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1434
+ elif self.config.problem_type == "multi_label_classification":
1435
+ loss_fct = BCEWithLogitsLoss()
1436
+ loss = loss_fct(logits, labels)
1437
+
1438
+ if not return_dict:
1439
+ output = (logits,) + outputs[2:]
1440
+ return ((loss,) + output) if loss is not None else output
1441
+
1442
+ return SequenceClassifierOutput(
1443
+ loss=loss,
1444
+ logits=logits,
1445
+ hidden_states=outputs.hidden_states,
1446
+ attentions=outputs.attentions,
1447
+ )
1448
+
1449
+
1450
+ class RobertaClassificationHead(nn.Module):
1451
+ """Head for sentence-level classification tasks."""
1452
+
1453
+ def __init__(self, config):
1454
+ super().__init__()
1455
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1456
+ classifier_dropout = (
1457
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1458
+ )
1459
+ self.dropout = nn.Dropout(classifier_dropout)
1460
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
1461
+
1462
+ def forward(self, features, **kwargs):
1463
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
1464
+ x = self.dropout(x)
1465
+ x = self.dense(x)
1466
+ x = torch.tanh(x)
1467
+ x = self.dropout(x)
1468
+ x = self.out_proj(x)
1469
+ return x
1470
+
1471
+
1472
+ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
1473
+ """
1474
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
1475
+ are ignored. This is modified from fairseq's `utils.make_positions`.
1476
+
1477
+ Args:
1478
+ x: torch.Tensor x:
1479
+
1480
+ Returns: torch.Tensor
1481
+ """
1482
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
1483
+ mask = input_ids.ne(padding_idx).int()
1484
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
1485
+ return incremental_indices.long() + padding_idx
1486
+
1487
+
1488
+ def cumprod(x, reverse=False, exclusive=False):
1489
+ """cumulative product."""
1490
+ if reverse:
1491
+ x = x.flip([-1])
1492
+
1493
+ if exclusive:
1494
+ x = F.pad(x[:, :, :-1], (1, 0), value=1)
1495
+
1496
+ cx = x.cumprod(-1)
1497
+
1498
+ if reverse:
1499
+ cx = cx.flip([-1])
1500
+ return cx
1501
+
1502
+
1503
+ def cumsum(x, reverse=False, exclusive=False):
1504
+ """cumulative sum."""
1505
+ bsz, _, length = x.size()
1506
+ device = x.device
1507
+ if reverse:
1508
+ if exclusive:
1509
+ w = torch.ones([bsz, length, length], device=device).tril(-1)
1510
+ else:
1511
+ w = torch.ones([bsz, length, length], device=device).tril(0)
1512
+ cx = torch.bmm(x, w)
1513
+ else:
1514
+ if exclusive:
1515
+ w = torch.ones([bsz, length, length], device=device).triu(1)
1516
+ else:
1517
+ w = torch.ones([bsz, length, length], device=device).triu(0)
1518
+ cx = torch.bmm(x, w)
1519
+ return cx
1520
+
1521
+
1522
+ def cummin(x, reverse=False, exclusive=False, max_value=1e9):
1523
+ """cumulative min."""
1524
+ if reverse:
1525
+ if exclusive:
1526
+ x = F.pad(x[:, :, 1:], (0, 1), value=max_value)
1527
+ x = x.flip([-1]).cummin(-1)[0].flip([-1])
1528
+ else:
1529
+ if exclusive:
1530
+ x = F.pad(x[:, :, :-1], (1, 0), value=max_value)
1531
+ x = x.cummin(-1)[0]
1532
+ return x
1533
+
finetune/control_raising_lexical_content_the/predict_results.txt ADDED
The diff for this file is too large to render. See raw diff
 
finetune/control_raising_lexical_content_the/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c25d3935b6fb659850b62ae54a9716e957ad0f179fe6fd206175c80a06f17188
3
+ size 577068929
finetune/control_raising_lexical_content_the/special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
finetune/control_raising_lexical_content_the/tokenizer_config.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "cls_token": {
12
+ "__type": "AddedToken",
13
+ "content": "<s>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "eos_token": {
20
+ "__type": "AddedToken",
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ },
27
+ "errors": "replace",
28
+ "mask_token": {
29
+ "__type": "AddedToken",
30
+ "content": "<mask>",
31
+ "lstrip": true,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ },
36
+ "model_max_length": 512,
37
+ "name_or_path": "final_models/glue_models/structroberta_s2_50ep/",
38
+ "pad_token": {
39
+ "__type": "AddedToken",
40
+ "content": "<pad>",
41
+ "lstrip": false,
42
+ "normalized": true,
43
+ "rstrip": false,
44
+ "single_word": false
45
+ },
46
+ "sep_token": {
47
+ "__type": "AddedToken",
48
+ "content": "</s>",
49
+ "lstrip": false,
50
+ "normalized": true,
51
+ "rstrip": false,
52
+ "single_word": false
53
+ },
54
+ "special_tokens_map_file": null,
55
+ "tokenizer_class": "RobertaTokenizer",
56
+ "trim_offsets": true,
57
+ "unk_token": {
58
+ "__type": "AddedToken",
59
+ "content": "<unk>",
60
+ "lstrip": false,
61
+ "normalized": true,
62
+ "rstrip": false,
63
+ "single_word": false
64
+ }
65
+ }
finetune/control_raising_lexical_content_the/train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "train_loss": 0.016269592104793014,
4
+ "train_runtime": 588.4501,
5
+ "train_samples": 6816,
6
+ "train_samples_per_second": 115.83,
7
+ "train_steps_per_second": 1.818
8
+ }