yangwang825 commited on
Commit
69a6b04
·
1 Parent(s): 42cfb92

Upload BertForSequenceClassification

Browse files
Files changed (3) hide show
  1. config.json +6 -1
  2. modeling_bert.py +2 -2
  3. pytorch_model.bin +1 -1
config.json CHANGED
@@ -1,7 +1,11 @@
1
  {
 
 
 
2
  "attention_probs_dropout_prob": 0.1,
3
  "auto_map": {
4
- "AutoConfig": "configuration_bert.BertConfig"
 
5
  },
6
  "classifier_dropout": null,
7
  "hidden_act": "gelu",
@@ -16,6 +20,7 @@
16
  "num_hidden_layers": 12,
17
  "pad_token_id": 0,
18
  "position_embedding_type": "absolute",
 
19
  "transformers_version": "4.33.3",
20
  "type_vocab_size": 2,
21
  "use_cache": true,
 
1
  {
2
+ "architectures": [
3
+ "BertForSequenceClassification"
4
+ ],
5
  "attention_probs_dropout_prob": 0.1,
6
  "auto_map": {
7
+ "AutoConfig": "configuration_bert.BertConfig",
8
+ "AutoModelForSequenceClassification": "modeling_bert.BertForSequenceClassification"
9
  },
10
  "classifier_dropout": null,
11
  "hidden_act": "gelu",
 
20
  "num_hidden_layers": 12,
21
  "pad_token_id": 0,
22
  "position_embedding_type": "absolute",
23
+ "torch_dtype": "float32",
24
  "transformers_version": "4.33.3",
25
  "type_vocab_size": 2,
26
  "use_cache": true,
modeling_bert.py CHANGED
@@ -50,14 +50,14 @@ class BertPooler(nn.Module):
50
 
51
  def __init__(self, config):
52
  super().__init__()
53
- self.affine = nn.Linear(config.hidden_size, config.hidden_size)
54
  self.activation = nn.Tanh()
55
 
56
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
57
  # We "pool" the model by simply taking the hidden state corresponding
58
  # to the first token.
59
  first_token_tensor = hidden_states[:, 0]
60
- pooled_output = self.affine(first_token_tensor)
61
  pooled_output = self.activation(pooled_output)
62
  return pooled_output
63
 
 
50
 
51
  def __init__(self, config):
52
  super().__init__()
53
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
54
  self.activation = nn.Tanh()
55
 
56
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
57
  # We "pool" the model by simply taking the hidden state corresponding
58
  # to the first token.
59
  first_token_tensor = hidden_states[:, 0]
60
+ pooled_output = self.dense(first_token_tensor)
61
  pooled_output = self.activation(pooled_output)
62
  return pooled_output
63
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a3b02788cfd1334be6bb658442941d4adce843058fd92515fc5f7de80ba8694
3
  size 438000689
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31b5cff2bb6cce0d41eceb729d8660438d177910122749eca6916b3f404c0f80
3
  size 438000689