Omaratef3221 commited on
Commit
046d995
1 Parent(s): e7bcd22

Upload folder using huggingface_hub

Browse files
CustomBertForMaskedLM.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import BertForMaskedLM
2
+ import torch.nn as nn
3
+ from RBFLayer import RBFLayer # Assuming RBFLayer is your custom RBF implementation
4
+
5
+ class CustomBertForMaskedLM(BertForMaskedLM):
6
+ def __init__(self, config):
7
+ super().__init__(config)
8
+
9
+ # Replace the feedforward MLP layers with RBF layers in BERT's encoder
10
+ for i, layer in enumerate(self.bert.encoder.layer):
11
+ in_features = 768
12
+ intermediate_features = 3072
13
+
14
+ # Replace the intermediate dense layer (768 -> 3072) with RBF
15
+ layer.intermediate.dense = RBFLayer(
16
+ in_features_dim=in_features,
17
+ num_kernels=2, # Number of kernels in the RBF layer
18
+ out_features_dim=intermediate_features,
19
+ radial_function=gaussian_rbf,
20
+ norm_function=euclidean_norm
21
+ )
22
+
23
+ # Replace the output dense layer (3072 -> 768) with RBF
24
+ layer.output.dense = RBFLayer(
25
+ in_features_dim=intermediate_features,
26
+ num_kernels=2,
27
+ out_features_dim=in_features,
28
+ radial_function=gaussian_rbf,
29
+ norm_function=euclidean_norm
30
+ )
31
+
32
+ # Define radial basis and norm functions
33
+ def gaussian_rbf(x):
34
+ return torch.exp(-x**2)
35
+
36
+ def euclidean_norm(x):
37
+ return torch.norm(x, p=2, dim=-1)
CustomRBFBert.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ from RBFLayer import RBFLayer
5
+
6
+ def l_norm(x, p=2):
7
+ return torch.norm(x, p=p, dim=-1)
8
+
9
+
10
+ # Gaussian RBF
11
+ def rbf_gaussian(x):
12
+ return (-x.pow(2)).exp()
13
+
14
+ class CustomRBFFeedForward(nn.Module):
15
+ def __init__(self, in_features, out_features, num_kernels):
16
+ super(CustomRBFFeedForward, self).__init__()
17
+ # RBFLayer from the given implementation
18
+ self.rbf_layer = RBFLayer(
19
+ in_features_dim=in_features, # Input size (e.g., 896)
20
+ num_kernels=num_kernels, # Number of kernels in the RBF layer (can be tuned)
21
+ out_features_dim=out_features, # Output size (e.g., 4864)
22
+ radial_function=rbf_gaussian, # Use the Gaussian RBF
23
+ norm_function=l_norm # Use Euclidean norm
24
+ )
25
+
26
+ def forward(self, x):
27
+ # Apply the RBF layer to the input x
28
+ return self.rbf_layer(x)
RBFLayer.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from typing import Callable
4
+
5
+
6
+ class RBFLayer(nn.Module):
7
+ def __init__(self,
8
+ in_features_dim: int,
9
+ num_kernels: int,
10
+ out_features_dim: int,
11
+ radial_function: Callable[[torch.Tensor], torch.Tensor],
12
+ norm_function: Callable[[torch.Tensor], torch.Tensor],
13
+ normalization: bool = True,
14
+ initial_shape_parameter: torch.Tensor = None,
15
+ initial_centers_parameter: torch.Tensor = None,
16
+ initial_weights_parameters: torch.Tensor = None,
17
+ constant_shape_parameter: bool = False,
18
+ constant_centers_parameter: bool = False,
19
+ constant_weights_parameters: bool = False):
20
+ super(RBFLayer, self).__init__()
21
+
22
+ self.in_features_dim = in_features_dim
23
+ self.num_kernels = num_kernels
24
+ self.out_features_dim = out_features_dim
25
+ self.radial_function = radial_function
26
+ self.norm_function = norm_function
27
+ self.normalization = normalization
28
+
29
+ self.initial_shape_parameter = initial_shape_parameter
30
+ self.constant_shape_parameter = constant_shape_parameter
31
+
32
+ self.initial_centers_parameter = initial_centers_parameter
33
+ self.constant_centers_parameter = constant_centers_parameter
34
+
35
+ self.initial_weights_parameters = initial_weights_parameters
36
+ self.constant_weights_parameters = constant_weights_parameters
37
+
38
+ self._make_parameters()
39
+
40
+ def _make_parameters(self) -> None:
41
+ # Initialize linear combination weights
42
+ if self.constant_weights_parameters:
43
+ self.weights = nn.Parameter(self.initial_weights_parameters, requires_grad=False)
44
+ else:
45
+ self.weights = nn.Parameter(torch.zeros(self.out_features_dim, self.num_kernels, dtype=torch.float32))
46
+
47
+ # Initialize kernels' centers
48
+ if self.constant_centers_parameter:
49
+ self.kernels_centers = nn.Parameter(self.initial_centers_parameter, requires_grad=False)
50
+ else:
51
+ self.kernels_centers = nn.Parameter(torch.zeros(self.num_kernels, self.in_features_dim, dtype=torch.float32))
52
+
53
+ # Initialize shape parameter
54
+ if self.constant_shape_parameter:
55
+ self.log_shapes = nn.Parameter(self.initial_shape_parameter, requires_grad=False)
56
+ else:
57
+ self.log_shapes = nn.Parameter(torch.zeros(self.num_kernels, dtype=torch.float32))
58
+
59
+ self.reset()
60
+
61
+ def reset(self, upper_bound_kernels: float = 1.0, std_shapes: float = 0.1, gain_weights: float = 1.0) -> None:
62
+ if self.initial_centers_parameter is None:
63
+ nn.init.uniform_(self.kernels_centers, a=-upper_bound_kernels, b=upper_bound_kernels)
64
+
65
+ if self.initial_shape_parameter is None:
66
+ nn.init.normal_(self.log_shapes, mean=0.0, std=std_shapes)
67
+
68
+ if self.initial_weights_parameters is None:
69
+ nn.init.xavier_uniform_(self.weights, gain=gain_weights)
70
+
71
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
72
+ """
73
+ Computes the output of the RBF layer given an input tensor.
74
+ Input has size [batch_size, sequence_length, in_features].
75
+ """
76
+
77
+ batch_size = input.size(0)
78
+ sequence_length = input.size(1)
79
+
80
+ # Expand centers to match the batch and sequence length
81
+ c = self.kernels_centers.expand(batch_size, sequence_length, self.num_kernels, self.in_features_dim)
82
+
83
+ # Compute differences between input and centers
84
+ diff = input.unsqueeze(2) - c # Shape: [batch_size, sequence_length, num_kernels, in_features_dim]
85
+
86
+ # Apply norm function to get distances
87
+ r = self.norm_function(diff) # Shape: [batch_size, sequence_length, num_kernels]
88
+
89
+ # Apply shape parameters (log_shapes) to the distances
90
+ eps_r = self.log_shapes.exp().unsqueeze(0).unsqueeze(0) * r
91
+
92
+ # Apply radial basis function (e.g., Gaussian)
93
+ rbfs = self.radial_function(eps_r)
94
+
95
+ if self.normalization:
96
+ rbfs = rbfs / (1e-9 + rbfs.sum(dim=-1, keepdim=True))
97
+
98
+ # Combine RBF outputs using the weights
99
+ out = (self.weights.unsqueeze(0).unsqueeze(0) * rbfs.unsqueeze(2)).sum(dim=-1)
100
+
101
+ return out
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google-bert/bert-base-uncased",
3
+ "architectures": [
4
+ "CustomBertForMaskedLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.41.1",
23
+ "type_vocab_size": 2,
24
+ "use_cache": true,
25
+ "vocab_size": 30522,
26
+
27
+ "custom_layers": {
28
+ "use_rbf": true,
29
+ "rbf_num_kernels": 2,
30
+ "rbf_intermediate_size": 3072
31
+ }
32
+ }
generation_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "pad_token_id": 0,
4
+ "transformers_version": "4.41.1"
5
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc47217a75b4c60c273e782fa8855e06a91fb8dcebf301378e149221462519cd
3
+ size 212145392
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "BertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff