benjamin commited on
Commit
1d4dd5f
·
verified ·
1 Parent(s): c351757

Upload ZettHypernet

Browse files
Files changed (4) hide show
  1. config.json +17 -16
  2. configuration_hypernet.py +56 -0
  3. model.safetensors +3 -0
  4. modeling_hypernet.py +267 -0
config.json CHANGED
@@ -1,12 +1,15 @@
1
  {
2
- "_name_or_path": "meta-llama/Meta-Llama-3-8B",
3
  "architectures": [
4
- "LlamaForCausalLM"
5
  ],
6
- "attention_bias": false,
7
  "attention_dropout": 0.0,
8
- "bos_token_id": 128000,
9
- "eos_token_id": 128001,
 
 
 
 
10
  "hidden_act": "silu",
11
  "hidden_size": 4096,
12
  "hn_add_inter_token_attention": false,
@@ -21,7 +24,7 @@
21
  "hn_language_adapter_bottleneck_dim": 0,
22
  "hn_model_name_or_path": "roberta-base",
23
  "hn_model_type": "roberta",
24
- "hn_n_extra_tokens": 0,
25
  "hn_n_inter_token_blocks": 16,
26
  "hn_n_layers": 3,
27
  "hn_num_attention_heads": 32,
@@ -31,24 +34,22 @@
31
  "hn_surface_maxlen": 7,
32
  "initializer_range": 0.02,
33
  "intermediate_size": 14336,
34
- "max_position_embeddings": 8192,
35
- "model_type": "llama",
36
  "n_embd": 4096,
37
  "n_langs": 7,
38
- "name": "v7:llama3-8b_en+code:lw=0.5_long",
39
  "num_attention_heads": 32,
40
  "num_hidden_layers": 32,
41
  "num_key_value_heads": 8,
42
- "original_vocab_size": 128256,
43
- "pad_token_id": 128001,
44
- "pretraining_tp": 1,
45
  "rms_norm_eps": 1e-05,
46
- "rope_scaling": null,
47
- "rope_theta": 500000.0,
48
  "separate_out_embeddings": true,
 
49
  "tie_word_embeddings": false,
50
- "torch_dtype": "bfloat16",
51
- "transformers_version": "4.41.0.dev0",
52
  "use_cache": true,
53
  "use_unigram_bias": true,
54
  "vocab_size": 32896
 
1
  {
2
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
3
  "architectures": [
4
+ "ZettHypernet"
5
  ],
 
6
  "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_hypernet.ZettHypernetConfig",
9
+ "AutoModel": "modeling_hypernet.ZettHypernet"
10
+ },
11
+ "bos_token_id": 1,
12
+ "eos_token_id": 2,
13
  "hidden_act": "silu",
14
  "hidden_size": 4096,
15
  "hn_add_inter_token_attention": false,
 
24
  "hn_language_adapter_bottleneck_dim": 0,
25
  "hn_model_name_or_path": "roberta-base",
26
  "hn_model_type": "roberta",
27
+ "hn_n_extra_tokens": 522,
28
  "hn_n_inter_token_blocks": 16,
29
  "hn_n_layers": 3,
30
  "hn_num_attention_heads": 32,
 
34
  "hn_surface_maxlen": 7,
35
  "initializer_range": 0.02,
36
  "intermediate_size": 14336,
37
+ "max_position_embeddings": 32768,
 
38
  "n_embd": 4096,
39
  "n_langs": 7,
40
+ "name": "v7:mistral7b_en+code:lw=0.5_long",
41
  "num_attention_heads": 32,
42
  "num_hidden_layers": 32,
43
  "num_key_value_heads": 8,
44
+ "original_vocab_size": 32000,
45
+ "pad_token_id": 2,
 
46
  "rms_norm_eps": 1e-05,
47
+ "rope_theta": 10000.0,
 
48
  "separate_out_embeddings": true,
49
+ "sliding_window": 4096,
50
  "tie_word_embeddings": false,
51
+ "torch_dtype": "float32",
52
+ "transformers_version": "4.42.3",
53
  "use_cache": true,
54
  "use_unigram_bias": true,
55
  "vocab_size": 32896
configuration_hypernet.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+ class ZettHypernetConfig(PretrainedConfig):
4
+ def __init__(
5
+ self,
6
+ hn_model_name_or_path: str = "roberta-base",
7
+ hn_surface_maxlen: int = 16,
8
+ hn_n_layers: int = 3,
9
+ n_embd: int = 768,
10
+ hn_hidden_size: int = None,
11
+ hn_intermediate_size: int = None,
12
+ hn_rescale_embeddings: bool = False,
13
+ use_unigram_bias: bool = False,
14
+ hn_embed_target_priors: bool = False,
15
+ hn_add_inter_token_attention: bool = False,
16
+ hn_inter_token_attention_bias_by_priors: bool = False,
17
+ hn_inter_token_attention_bias_scaler: float = 1.0,
18
+ hn_n_inter_token_blocks: int = 16,
19
+ hn_language_adapter_bottleneck_dim: int = 0,
20
+ hn_embed_using_source_embeddings: bool = False,
21
+ hn_concat_last_hidden_state: bool = False,
22
+ hn_single_head: bool = False,
23
+ hn_predict_bias: bool = True,
24
+ hn_num_attention_heads: int = None,
25
+ hn_embed_lang_id: bool = False,
26
+ hn_model_type: str = "roberta",
27
+ n_langs: int = None, # set in train.py
28
+ **kwargs
29
+ ):
30
+ super().__init__(**kwargs)
31
+
32
+ self.model_type = "zett_hypernetwork"
33
+ self.hn_model_name_or_path = hn_model_name_or_path
34
+ self.hn_surface_maxlen = hn_surface_maxlen
35
+ self.hn_n_layers = hn_n_layers
36
+ self.n_embd = n_embd
37
+ self.hn_hidden_size = hn_hidden_size
38
+ self.hn_intermediate_size = hn_intermediate_size
39
+ self.hn_rescale_embeddings = hn_rescale_embeddings
40
+ self.use_unigram_bias = use_unigram_bias
41
+ self.hn_embed_target_priors = hn_embed_target_priors
42
+ self.hn_add_inter_token_attention = hn_add_inter_token_attention
43
+ self.hn_inter_token_attention_bias_by_priors = (
44
+ hn_inter_token_attention_bias_by_priors
45
+ )
46
+ self.hn_inter_token_attention_bias_scaler = hn_inter_token_attention_bias_scaler
47
+ self.hn_n_inter_token_blocks = hn_n_inter_token_blocks
48
+ self.hn_language_adapter_bottleneck_dim = hn_language_adapter_bottleneck_dim
49
+ self.hn_embed_using_source_embeddings = hn_embed_using_source_embeddings
50
+ self.hn_concat_last_hidden_state = hn_concat_last_hidden_state
51
+ self.hn_single_head = hn_single_head
52
+ self.hn_predict_bias = hn_predict_bias
53
+ self.hn_num_attention_heads = hn_num_attention_heads
54
+ self.hn_embed_lang_id = hn_embed_lang_id
55
+ self.hn_model_type = hn_model_type
56
+ self.n_langs = n_langs
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58ff19794dc856869f1c6a52df63ad0573d1081a2861929e7c48ae1634481af5
3
+ size 2710971844
modeling_hypernet.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .configuration_hypernet import ZettHypernetConfig
2
+ from transformers import PreTrainedModel, RobertaConfig, RobertaModel
3
+ from functools import partial
4
+
5
+ from torch import nn as nn
6
+ import torch
7
+ from torch.nn import functional as F
8
+
9
+ class Rescaler(nn.Module):
10
+ def __init__(self, dim: int):
11
+ super().__init__()
12
+
13
+ self.dim = dim
14
+
15
+ self.w = nn.Parameter(torch.ones((1, self.dim)), requires_grad=False)
16
+ self.b = nn.Parameter(torch.ones((1, self.dim)), requires_grad=False)
17
+
18
+ def __call__(self, x):
19
+ return self.w * x + self.b
20
+
21
+
22
+ class ProjectorBlock(nn.Module):
23
+ def __init__(self, input_dim: int, dim: int, intermediate_dim: int):
24
+ super().__init__()
25
+
26
+ self.input_dim = input_dim
27
+ self.dim = dim
28
+ self.intermediate_dim = intermediate_dim
29
+
30
+ self.dense1 = nn.Linear(self.input_dim, self.intermediate_dim)
31
+ self.dense2 = nn.Linear(self.intermediate_dim, self.dim)
32
+
33
+ self.ln = nn.LayerNorm(self.dim, eps=1e-6)
34
+
35
+ def __call__(self, x):
36
+ h = F.gelu(
37
+ self.dense2(F.gelu(self.dense1(x), approximate="tanh")),
38
+ approximate="tanh",
39
+ )
40
+ return self.ln(h + x)
41
+
42
+
43
+ class ZettHypernet(PreTrainedModel):
44
+ config_class = ZettHypernetConfig
45
+
46
+ def __init__(self, config: ZettHypernetConfig):
47
+ super().__init__(config)
48
+
49
+ self.config = config
50
+ self.has_separate_out_embeddings = getattr(
51
+ self.config, "separate_out_embeddings", False
52
+ )
53
+
54
+ if self.config.hn_embed_lang_id:
55
+ self.lang_embeddings = nn.Embedding(
56
+ self.config.n_langs, self.config.hn_hidden_size
57
+ )
58
+
59
+ if self.has_separate_out_embeddings:
60
+ n_in_embd = self.config.n_embd * 2
61
+ n_out_embd = self.config.n_embd
62
+ else:
63
+ n_in_embd = self.config.n_embd
64
+ n_out_embd = self.config.n_embd
65
+
66
+ if self.config.hn_model_type == "roberta":
67
+ config = RobertaConfig.from_pretrained(
68
+ self.config.hn_model_name_or_path
69
+ )
70
+ config.num_hidden_layers = self.config.hn_n_layers
71
+ config.hidden_size = self.config.hn_hidden_size
72
+ config.intermediate_size = self.config.hn_intermediate_size
73
+ if getattr(self.config, "hn_num_attention_heads", None) is None:
74
+ self.config.hn_num_attention_heads = self.config.hn_hidden_size // 64
75
+ config.num_attention_heads = self.config.hn_num_attention_heads
76
+ self.embed_init_range = config.initializer_range
77
+ module_class = partial(RobertaModel, add_pooling_layer=False)
78
+ elif self.config.hn_model_type == "t5":
79
+ raise NotImplementedError()
80
+
81
+ if self.config.hn_embed_using_source_embeddings:
82
+ # do not need to alloc embeddings since inputs_embeds is always used
83
+ config.vocab_size = self.config.pad_token_id + 1
84
+
85
+ if (
86
+ self.config.hn_add_inter_token_attention
87
+ or self.config.hn_embed_target_priors
88
+ ):
89
+ raise NotImplementedError()
90
+
91
+ self.pad_token_id = self.config.pad_token_id
92
+ assert self.pad_token_id is not None
93
+ self.model = module_class(config)
94
+
95
+ # need at least one embedding
96
+ self.fallback_embeddings = nn.Embedding(
97
+ max(self.config.hn_n_extra_tokens, 1), n_in_embd
98
+ )
99
+
100
+ if self.config.hn_embed_using_source_embeddings:
101
+ self.input_projection = nn.Sequential(
102
+ *[
103
+ nn.Linear(n_in_embd, self.config.hn_hidden_size),
104
+ ProjectorBlock(
105
+ self.config.hn_hidden_size,
106
+ self.config.hn_hidden_size,
107
+ self.config.hn_intermediate_size,
108
+ ),
109
+ ]
110
+ )
111
+
112
+ if self.config.hn_single_head:
113
+ self.output_projection = nn.Sequential(
114
+ *[
115
+ ProjectorBlock(
116
+ self.config.hn_hidden_size,
117
+ self.config.hn_hidden_size,
118
+ self.config.hn_intermediate_size,
119
+ ),
120
+ nn.Linear(self.config.hn_hidden_size, n_in_embd),
121
+ ]
122
+ )
123
+ else:
124
+ self.output_projection = nn.Sequential(
125
+ *[
126
+ ProjectorBlock(
127
+ self.config.hn_hidden_size,
128
+ self.config.hn_hidden_size,
129
+ self.config.hn_intermediate_size,
130
+ ),
131
+ nn.Linear(self.config.hn_hidden_size, n_out_embd),
132
+ ]
133
+ )
134
+ if self.has_separate_out_embeddings:
135
+ self.output_projection_out = nn.Sequential(
136
+ *[
137
+ ProjectorBlock(
138
+ self.config.hn_hidden_size,
139
+ self.config.hn_hidden_size,
140
+ self.config.hn_intermediate_size,
141
+ ),
142
+ nn.Linear(self.config.hn_hidden_size, self.config.n_embd),
143
+ ]
144
+ )
145
+
146
+ if self.config.hn_rescale_embeddings:
147
+ self.in_scaler = Rescaler(n_in_embd)
148
+ self.scaler = Rescaler(n_out_embd)
149
+
150
+ if self.has_separate_out_embeddings:
151
+ self.out_scaler = Rescaler(self.config.n_embd)
152
+
153
+ if getattr(self.config, "hn_predict_bias", False):
154
+ self.bias_projection = nn.Linear(self.config.hn_hidden_size, 1)
155
+
156
+ def __call__(
157
+ self,
158
+ target_surface_forms,
159
+ target_priors=None,
160
+ source_embeddings=None,
161
+ lang_index=None,
162
+ deterministic: bool = True,
163
+ ):
164
+ if target_priors is not None:
165
+ raise NotImplementedError()
166
+
167
+ if not self.config.hn_embed_using_source_embeddings:
168
+ raise NotImplementedError()
169
+
170
+ use_fallback = target_surface_forms >= self.config.original_vocab_size
171
+
172
+ main_ids = torch.minimum(
173
+ target_surface_forms, torch.tensor(self.config.original_vocab_size - 1, device=self.device)
174
+ )
175
+ fallback_ids = torch.maximum(
176
+ target_surface_forms - self.config.original_vocab_size, torch.tensor(0, device=self.device)
177
+ )
178
+
179
+ source_embeds = F.embedding(main_ids, weight=source_embeddings)
180
+
181
+ if self.config.hn_rescale_embeddings:
182
+ source_embeds = self.in_scaler(source_embeds)
183
+
184
+ inputs_embeds = torch.where(
185
+ use_fallback[..., None],
186
+ self.fallback_embeddings(fallback_ids),
187
+ source_embeds,
188
+ )
189
+ inputs_embeds = self.input_projection(inputs_embeds)
190
+ attention_mask = target_surface_forms != self.pad_token_id
191
+
192
+ if self.config.hn_embed_lang_id:
193
+ lang_embedding = self.lang_embeddings(lang_index).squeeze()
194
+ # position embed and type embed are added afterwards only in PT version so we need to subtract them here
195
+ lang_embedding -= self.model.embeddings.token_type_embeddings(
196
+ torch.tensor(0, device=self.device)
197
+ ) + self.model.embeddings.position_embeddings(
198
+ torch.tensor(attention_mask.shape[1], device=self.device)
199
+ )
200
+
201
+ lang_embedding = lang_embedding[None, None, :].expand(
202
+ inputs_embeds.shape[0], -1, -1
203
+ )
204
+
205
+ inputs_embeds = torch.cat(
206
+ [
207
+ inputs_embeds,
208
+ lang_embedding,
209
+ ],
210
+ axis=1,
211
+ )
212
+ attention_mask = torch.cat(
213
+ [
214
+ attention_mask,
215
+ torch.ones(lang_embedding.shape[:-1], dtype=torch.bool, device=self.device),
216
+ ],
217
+ axis=1,
218
+ )
219
+
220
+ position_ids = torch.broadcast_to(
221
+ torch.arange(torch.atleast_2d(attention_mask).shape[-1], device=self.device),
222
+ attention_mask.shape,
223
+ )
224
+
225
+ hidden_states = self.model(
226
+ inputs_embeds=inputs_embeds,
227
+ attention_mask=attention_mask,
228
+ position_ids=position_ids,
229
+ ).last_hidden_state
230
+
231
+ if self.config.hn_concat_last_hidden_state:
232
+ hidden_states = hidden_states.reshape(target_surface_forms.shape[0], -1)
233
+ else:
234
+ hidden_states = hidden_states[:, 0]
235
+
236
+ predicted_embeddings = self.output_projection(hidden_states)
237
+
238
+ if self.config.hn_single_head:
239
+ predicted_embeddings_in = predicted_embeddings[..., : self.config.n_embd]
240
+
241
+ if self.has_separate_out_embeddings:
242
+ predicted_embeddings_out = predicted_embeddings[
243
+ ..., self.config.n_embd :
244
+ ]
245
+ else:
246
+ predicted_embeddings_out = None
247
+ else:
248
+ predicted_embeddings_in = predicted_embeddings
249
+ if self.has_separate_out_embeddings:
250
+ predicted_embeddings_out = self.output_projection_out(hidden_states)
251
+ else:
252
+ predicted_embeddings_out = None
253
+
254
+ if self.config.hn_rescale_embeddings:
255
+ predicted_embeddings_in = self.scaler(predicted_embeddings_in)
256
+
257
+ if predicted_embeddings_out is not None:
258
+ predicted_embeddings_out = self.out_scaler(predicted_embeddings_out)
259
+
260
+ if getattr(self.config, "hn_predict_bias", False):
261
+ predicted_bias = self.bias_projection(hidden_states)[..., 0]
262
+ else:
263
+ predicted_bias = torch.zeros_like(
264
+ target_surface_forms[..., 0], dtype=self.dtype
265
+ )
266
+
267
+ return predicted_embeddings_in, predicted_embeddings_out, predicted_bias