File size: 8,704 Bytes
133928a f3edc31 133928a f3edc31 133928a f3edc31 0acaf35 7970e16 f3edc31 50c16f9 f3edc31 133928a eb5a3c6 133928a 0acaf35 32552bc 0acaf35 2aad792 0acaf35 7970e16 6d6b31c 133928a d99fa4e 133928a e27e5ab f3edc31 eb5a3c6 f3edc31 c8c9f56 133928a f3edc31 133928a f3edc31 133928a f3edc31 64cb177 0acaf35 32552bc 0acaf35 7970e16 6d6b31c 133928a f3edc31 4932186 9671a15 4932186 f3edc31 4932186 f3edc31 133928a f3edc31 133928a f3edc31 133928a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 |
import logging
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
from transformers import BertPreTrainedModel
from transformers.modeling_outputs import SequenceClassifierOutput
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
from .bert_layers_mosa import BertModel
logger = logging.getLogger(__name__)
class MosaicBertForEmbeddingGeneration(BertPreTrainedModel):
def __init__(self, config, add_pooling_layer=False, **kwargs):
"""
Initializes the BertEmbeddings class.
Args:
config (BertConfig): The configuration for the BERT model.
add_pooling_layer (bool, optional): Whether to add a pooling layer. Defaults to False.
"""
super().__init__(config)
assert (
config.num_hidden_layers >= config.num_embedding_layers
), "num_hidden_layers should be greater than or equal to num_embedding_layers"
self.config = config
self.bert = BertModel(config, add_pooling_layer=add_pooling_layer)
# this resets the weights
self.post_init()
@classmethod
def from_pretrained(
cls, pretrained_checkpoint, state_dict=None, config=None, *inputs, **kwargs
):
"""Load from pre-trained."""
# this gets a fresh init model
model = cls(config, *inputs, **kwargs)
# Download the model file
archive_file = hf_hub_download(
repo_id=pretrained_checkpoint,
filename="model.safetensors",
)
# Load the state_dict
state_dict = load_file(archive_file)
# add missing bert prefix
state_dict = {f'bert.{key}': value for key, value in state_dict.items()}
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
if len(missing_keys) > 0:
logger.warning(
f"Found these missing keys in the checkpoint: {', '.join(missing_keys)}"
)
logger.warning(f"the number of which is equal to {len(missing_keys)}")
if len(unexpected_keys) > 0:
logger.warning(
f"Found these unexpected keys in the checkpoint: {', '.join(unexpected_keys)}",
)
logger.warning(f"the number of which is equal to {len(unexpected_keys)}")
return model
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
subset_mask: Optional[torch.Tensor] = None,
output_all_encoded_layers: bool = True,
) -> torch.Tensor:
embedding_output = self.bert.embeddings(input_ids, token_type_ids, position_ids)
encoder_outputs_all = self.bert.encoder(
embedding_output,
attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
subset_mask=subset_mask,
)
# batch_size, hidden_dim
return encoder_outputs_all
class ClinicalMosaicForSequenceClassification(BertPreTrainedModel):
"""Bert Model transformer with a sequence classification/regression head.
This head is just a linear layer on top of the pooled output.
"""
def __init__(self, config, **kwargs):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = BertModel(config, add_pooling_layer=True)
classifier_dropout = (
config.classifier_dropout
if config.classifier_dropout is not None
else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# this resets the weights
self.post_init()
@classmethod
def from_pretrained(
cls, pretrained_checkpoint, state_dict=None, config=None, *inputs, **kwargs
):
"""Load from pre-trained."""
# this gets a fresh init model
model = cls(config, *inputs, **kwargs)
# Download the model file
archive_file = hf_hub_download(
repo_id=pretrained_checkpoint,
filename="model.safetensors",
)
# Load the state_dict
state_dict = load_file(archive_file)
# add missing bert prefix
state_dict = {f'bert.{key}': value for key, value in state_dict.items()}
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
# Calculate classifier parameters
num_classifier_params = config.hidden_size * config.num_labels + config.num_labels
classifier_keys = {"classifier.weight", "classifier.bias", "bert.pooler.dense.weight", "bert.pooler.dense.bias"}
# Check if only the classification layer is missing
if set(missing_keys) == classifier_keys:
print(
f"Checkpoint does not contain the classification layer "
f"({config.hidden_size}x{config.num_labels} + {config.num_labels} = {num_classifier_params} params). "
"It will be randomly initialized."
)
elif len(missing_keys) > 0:
logger.warning(
f"Checkpoint is missing {len(missing_keys)} parameters, including possibly critical ones: "
f"{', '.join(missing_keys)}"
)
if len(unexpected_keys) > 0:
logger.warning(
f"Found these unexpected keys in the checkpoint: {', '.join(unexpected_keys)}",
)
logger.warning(f"the number of which is equal to {len(unexpected_keys)}")
return model
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (
labels.dtype == torch.long or labels.dtype == torch.int
):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=None,
attentions=None,
) |