Upload model
Browse files- config.json +1 -1
- configuration_bionextextractor.py +5 -1
- model.safetensors +2 -2
- modeling_bionextextractor.py +29 -12
config.json
CHANGED
@@ -60,6 +60,6 @@
|
|
60 |
"type_vocab_size": 2,
|
61 |
"update_vocab": 28899,
|
62 |
"use_cache": true,
|
63 |
-
"version": "0.1.
|
64 |
"vocab_size": 28899
|
65 |
}
|
|
|
60 |
"type_vocab_size": 2,
|
61 |
"update_vocab": 28899,
|
62 |
"use_cache": true,
|
63 |
+
"version": "0.1.1",
|
64 |
"vocab_size": 28899
|
65 |
}
|
configuration_bionextextractor.py
CHANGED
@@ -11,13 +11,17 @@ class BioNExtExtractorConfig(PretrainedConfig):
|
|
11 |
arch_type = "mha",
|
12 |
index_type = "both",
|
13 |
novel = True,
|
14 |
-
|
|
|
|
|
15 |
**kwargs,
|
16 |
):
|
17 |
self.version = version
|
18 |
self.arch_type = arch_type
|
19 |
self.index_type = index_type
|
20 |
self.novel = novel
|
|
|
|
|
21 |
super().__init__(**kwargs)
|
22 |
|
23 |
|
|
|
11 |
arch_type = "mha",
|
12 |
index_type = "both",
|
13 |
novel = True,
|
14 |
+
tokenizer_special_tokens = ['[s1]','[e1]', '[s2]','[e2]' ],
|
15 |
+
update_vocab = None,
|
16 |
+
version="0.1.1",
|
17 |
**kwargs,
|
18 |
):
|
19 |
self.version = version
|
20 |
self.arch_type = arch_type
|
21 |
self.index_type = index_type
|
22 |
self.novel = novel
|
23 |
+
self.tokenizer_special_tokens = tokenizer_special_tokens
|
24 |
+
self.update_vocab = update_vocab
|
25 |
super().__init__(**kwargs)
|
26 |
|
27 |
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:32f371a5688163ffd745b58918b63752337769ef7223c9ad3702e5af33d06bd1
|
3 |
+
size 1350787852
|
modeling_bionextextractor.py
CHANGED
@@ -47,9 +47,13 @@ class RelationClassifierBase(PreTrainedModel, RelationLossMixin):
|
|
47 |
def __init__(self, config):
|
48 |
super().__init__(config)
|
49 |
self.num_labels = config.num_labels
|
50 |
-
|
51 |
#print(config)
|
52 |
self.bert = BertModel(config, add_pooling_layer=False)
|
|
|
|
|
|
|
|
|
53 |
|
54 |
def group_embeddings_by_index(self, embeddings, indexes):
|
55 |
assert len(embeddings.shape)==3
|
@@ -126,6 +130,11 @@ class RelationClassifierBiLSTM(RelationClassifierBase):
|
|
126 |
self.lstm = torch.nn.LSTM(config.hidden_size, (config.hidden_size) // 2, self.num_lstm_layers, batch_first=True, bidirectional=True)
|
127 |
self.fc = torch.nn.Linear(config.hidden_size, self.num_labels) # 2 for bidirection
|
128 |
|
|
|
|
|
|
|
|
|
|
|
129 |
def classifier_representation(self, embeddings, mask=None):
|
130 |
out, _ = self.lstm(embeddings)
|
131 |
return out[:, -1, :]
|
@@ -139,6 +148,10 @@ class RelationAndNovelClassifierBiLSTM(RelationClassifierBiLSTM, RelationAndNove
|
|
139 |
super().__init__(config)
|
140 |
self.fc_novel = torch.nn.Linear(config.hidden_size, 2) # 2 for bidirection
|
141 |
|
|
|
|
|
|
|
|
|
142 |
def classifier(self, class_representation):
|
143 |
return super().classifier(class_representation), self.fc_novel(class_representation)
|
144 |
|
@@ -155,6 +168,13 @@ class RelationClassifierMHAttention(RelationClassifierBase):
|
|
155 |
self.fc1_activation = torch.nn.GELU(approximate='none')
|
156 |
self.fc2 = torch.nn.Linear(config.hidden_size//2, self.num_labels) # 2 for bidirection
|
157 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
158 |
def classifier_representation(self, embeddings, mask=None):
|
159 |
batch_size = embeddings.shape[0]
|
160 |
weight = self.weight.repeat(batch_size, 1, 1)
|
@@ -185,6 +205,11 @@ class RelationAndNovelClassifierMHAttention(RelationClassifierMHAttention, Relat
|
|
185 |
self.fc1_novel_activation = torch.nn.GELU(approximate='none')
|
186 |
self.fc2_novel = torch.nn.Linear(config.hidden_size//2, 2) # 2 for bidirection
|
187 |
|
|
|
|
|
|
|
|
|
|
|
188 |
def classifier(self, class_representation, relation_mask=None):
|
189 |
x = self.fc1_novel(class_representation)
|
190 |
x = self.fc1_novel_activation(x)
|
@@ -196,17 +221,9 @@ ARCH_MAPPING = {"mhawNovelty": RelationAndNovelClassifierMHAttention,
|
|
196 |
"bilstmwNovelty" : RelationAndNovelClassifierBiLSTM,
|
197 |
"bilstm": RelationClassifierBiLSTM}
|
198 |
|
199 |
-
|
|
|
|
|
200 |
config_class=BioNExtExtractorConfig
|
201 |
|
202 |
-
def __init__(self, config):
|
203 |
-
super().__init__(config)
|
204 |
-
|
205 |
-
if config.novel:
|
206 |
-
self.model = ARCH_MAPPING[f"{config.arch_type}wNovelty"](config)
|
207 |
-
else:
|
208 |
-
self.model = ARCH_MAPPING[config.arch_type](config)
|
209 |
-
|
210 |
-
def forward(self, *args, **kwargs):
|
211 |
-
return self.model(*args, **kwargs)
|
212 |
|
|
|
47 |
def __init__(self, config):
|
48 |
super().__init__(config)
|
49 |
self.num_labels = config.num_labels
|
50 |
+
self.config = config
|
51 |
#print(config)
|
52 |
self.bert = BertModel(config, add_pooling_layer=False)
|
53 |
+
|
54 |
+
def training_mode(self):
|
55 |
+
if self.config.update_vocab is not None:
|
56 |
+
self.bert.resize_token_embeddings(self.config.update_vocab)
|
57 |
|
58 |
def group_embeddings_by_index(self, embeddings, indexes):
|
59 |
assert len(embeddings.shape)==3
|
|
|
130 |
self.lstm = torch.nn.LSTM(config.hidden_size, (config.hidden_size) // 2, self.num_lstm_layers, batch_first=True, bidirectional=True)
|
131 |
self.fc = torch.nn.Linear(config.hidden_size, self.num_labels) # 2 for bidirection
|
132 |
|
133 |
+
def training_mode(self):
|
134 |
+
super().training_mode()
|
135 |
+
self.lstm.reset_parameters()
|
136 |
+
self.fc.reset_parameters()
|
137 |
+
|
138 |
def classifier_representation(self, embeddings, mask=None):
|
139 |
out, _ = self.lstm(embeddings)
|
140 |
return out[:, -1, :]
|
|
|
148 |
super().__init__(config)
|
149 |
self.fc_novel = torch.nn.Linear(config.hidden_size, 2) # 2 for bidirection
|
150 |
|
151 |
+
def training_mode(self):
|
152 |
+
super().training_mode()
|
153 |
+
self.fc_novel.reset_parameters()
|
154 |
+
|
155 |
def classifier(self, class_representation):
|
156 |
return super().classifier(class_representation), self.fc_novel(class_representation)
|
157 |
|
|
|
168 |
self.fc1_activation = torch.nn.GELU(approximate='none')
|
169 |
self.fc2 = torch.nn.Linear(config.hidden_size//2, self.num_labels) # 2 for bidirection
|
170 |
|
171 |
+
def training_mode(self):
|
172 |
+
super().training_mode()
|
173 |
+
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
|
174 |
+
self.MHattention_layer._reset_parameters()
|
175 |
+
self.fc1.reset_parameters()
|
176 |
+
self.fc2.reset_parameters()
|
177 |
+
|
178 |
def classifier_representation(self, embeddings, mask=None):
|
179 |
batch_size = embeddings.shape[0]
|
180 |
weight = self.weight.repeat(batch_size, 1, 1)
|
|
|
205 |
self.fc1_novel_activation = torch.nn.GELU(approximate='none')
|
206 |
self.fc2_novel = torch.nn.Linear(config.hidden_size//2, 2) # 2 for bidirection
|
207 |
|
208 |
+
def training_mode(self):
|
209 |
+
super().training_mode()
|
210 |
+
self.fc1_novel.reset_parameters()
|
211 |
+
self.fc2_novel.reset_parameters()
|
212 |
+
|
213 |
def classifier(self, class_representation, relation_mask=None):
|
214 |
x = self.fc1_novel(class_representation)
|
215 |
x = self.fc1_novel_activation(x)
|
|
|
221 |
"bilstmwNovelty" : RelationAndNovelClassifierBiLSTM,
|
222 |
"bilstm": RelationClassifierBiLSTM}
|
223 |
|
224 |
+
## Changing the name to be compatible with HF API
|
225 |
+
|
226 |
+
class BioNExtExtractorModel(RelationAndNovelClassifierMHAttention):
|
227 |
config_class=BioNExtExtractorConfig
|
228 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
229 |
|