hieuhocnlp commited on
Commit
6c333b4
1 Parent(s): b3cce83

Upload BiLSTM

Browse files
Files changed (4) hide show
  1. blstm_config.py +17 -0
  2. blstm_model.py +42 -0
  3. config.json +18 -0
  4. pytorch_model.bin +3 -0
blstm_config.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+ import torch
4
+
5
+ class BiLSTMConfig(PretrainedConfig):
6
+ def __init__(self, vocab_size=23626, embed_dim=100,
7
+ num_layers=1, hidden_dim=256, dropout=0.33,
8
+ output_dim=128, predict_output=10, **kwargs):
9
+
10
+ super().__init__(**kwargs)
11
+ self.vocab_size = vocab_size
12
+ self.embed_dim = embed_dim
13
+ self.num_layers = num_layers
14
+ self.hidden_dim = hidden_dim
15
+ self.dropout = dropout
16
+ self.output_dim = output_dim
17
+ self.predict_output = predict_output
blstm_model.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PreTrainedModel
2
+
3
+ from torch import nn
4
+ import torch
5
+
6
+ class BiLSTM(PreTrainedModel):
7
+ def __init__(self, config):
8
+ super().__init__(config)
9
+ self.hidden_dim = config.hidden_dim
10
+ self.predict_output = config.predict_output
11
+
12
+ self.embed_layer = nn.Embedding(config.vocab_size, config.embed_dim, padding_idx=0)
13
+ self.biLSTM = nn.LSTM(input_size=config.embed_dim,
14
+ hidden_size=config.hidden_dim // 2, # BiLSTM will concatenate the 2 directional LSTMs
15
+ num_layers=config.num_layers,
16
+ bidirectional=True,
17
+ batch_first=True)
18
+ self.linear = nn.Linear(config.hidden_dim, config.output_dim)
19
+ self.dropout = nn.Dropout(config.dropout)
20
+ self.elu = nn.ELU()
21
+ self.fc = nn.Linear(config.output_dim, config.predict_output)
22
+ # self.device_ = config.device
23
+
24
+ def forward(self, input): # input is a list of indices, shape batch_size, seq_len
25
+ x = self.embed_layer(input) # batch_size, seq_len, 100 (This is only when batch_first=True!!!!)
26
+ batch_size = x.size(0)
27
+ hidden, cell = self.init_hidden(batch_size)
28
+
29
+ out, hidden = self.biLSTM(x, (hidden, cell)) # seq_len, batch_size, (hidden_dim//2) * 2
30
+
31
+ out = self.dropout(out)
32
+
33
+ out = self.elu(self.linear(out)) # self.linear(out): batch_size, seq_len, output_dim
34
+
35
+ out = self.fc(out)
36
+
37
+ return out, hidden
38
+
39
+ def init_hidden(self, batch_size):
40
+ hidden = torch.zeros(2, batch_size, self.hidden_dim//2)
41
+ cell = torch.zeros(2, batch_size, self.hidden_dim//2)
42
+ return hidden, cell
config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BiLSTM"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "blstm_config.BiLSTMConfig",
7
+ "AutoModel": "blstm_model.BiLSTM"
8
+ },
9
+ "dropout": 0.33,
10
+ "embed_dim": 100,
11
+ "hidden_dim": 256,
12
+ "num_layers": 1,
13
+ "output_dim": 128,
14
+ "predict_output": 10,
15
+ "torch_dtype": "float32",
16
+ "transformers_version": "4.27.2",
17
+ "vocab_size": 23626
18
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d69933282398681daa6dfa3e9596c1e96707ceac15a031ce20cc2cc3b10fd9b
3
+ size 10533208