fukugawa commited on
Commit
745a8e9
·
verified ·
1 Parent(s): 01799ff

Upload FlaxTransformerLMForCausalLM

Browse files
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "TransformerLMForCausalLM"
4
+ ],
5
+ "attention_dropout_rate": 0.1,
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_transformerlm.TransformerLMConfig",
8
+ "FlaxAutoModelForCausalLM": "modeling_transformerlm_flax.FlaxTransformerLMForCausalLM"
9
+ },
10
+ "bos_token_id": 50256,
11
+ "decode": true,
12
+ "deterministic": true,
13
+ "dropout_rate": 0.1,
14
+ "emb_dim": 768,
15
+ "eos_token_id": 50256,
16
+ "logits_via_embedding": false,
17
+ "max_len": 512,
18
+ "mlp_dim": 3072,
19
+ "model_type": "transformerlm",
20
+ "num_heads": 12,
21
+ "num_layers": 12,
22
+ "output_vocab_size": 30000,
23
+ "qkv_dim": 768,
24
+ "share_embeddings": false,
25
+ "tokenizer_class": "TransformerLMTokenizer",
26
+ "transformers_version": "4.38.2",
27
+ "vocab_size": 30000
28
+ }
configuration_transformerlm.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+
4
+ class TransformerLMConfig(PretrainedConfig):
5
+ model_type = "transformerlm"
6
+
7
+ def __init__(
8
+ self,
9
+ vocab_size: int = 30000,
10
+ output_vocab_size: int = 30000,
11
+ share_embeddings: bool = False,
12
+ logits_via_embedding: bool = False,
13
+ emb_dim: int = 512,
14
+ num_heads: int = 8,
15
+ num_layers: int = 6,
16
+ qkv_dim: int = 512,
17
+ mlp_dim: int = 2048,
18
+ max_len: int = 2048,
19
+ dropout_rate: float = 0.1,
20
+ attention_dropout_rate: float = 0.1,
21
+ deterministic: bool = False,
22
+ decode: bool = False,
23
+ bos_token_id=50256,
24
+ eos_token_id=50256,
25
+ **kwargs,
26
+ ):
27
+ self.vocab_size = vocab_size
28
+ self.output_vocab_size = output_vocab_size
29
+ self.share_embeddings = share_embeddings
30
+ self.logits_via_embedding = logits_via_embedding
31
+ self.emb_dim = emb_dim
32
+ self.num_heads = num_heads
33
+ self.num_layers = num_layers
34
+ self.qkv_dim = qkv_dim
35
+ self.mlp_dim = mlp_dim
36
+ self.max_len = max_len
37
+ self.dropout_rate = dropout_rate
38
+ self.attention_dropout_rate = attention_dropout_rate
39
+ self.deterministic = deterministic
40
+ self.decode = decode
41
+ self.bos_token_id = bos_token_id
42
+ self.eos_token_id = eos_token_id
43
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
flax_model.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f146d3ff30cefcdecb23e15f175395be65b13de37821c4d78b32feb8415f3666
3
+ size 524522413
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.38.2"
6
+ }
modeling_transformerlm_flax.py ADDED
@@ -0,0 +1,616 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The Flax Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Callable, Any, Optional, Tuple
16
+
17
+ import jax
18
+ import jax.numpy as jnp
19
+ import numpy as np
20
+ from flax import linen as nn
21
+ from flax import struct
22
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
23
+ from flax.traverse_util import flatten_dict, unflatten_dict
24
+ from jax import lax
25
+ from transformers.modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput
26
+ from transformers.modeling_flax_utils import FlaxPreTrainedModel
27
+
28
+ from .configuration_transformerlm import TransformerLMConfig
29
+
30
+
31
+ @struct.dataclass
32
+ class TransformerConfig:
33
+ """Global hyperparameters used to minimize obnoxious kwarg plumbing."""
34
+ vocab_size: int
35
+ output_vocab_size: int
36
+ share_embeddings: bool = False
37
+ logits_via_embedding: bool = False
38
+ dtype: Any = jnp.float32
39
+ emb_dim: int = 512
40
+ num_heads: int = 8
41
+ num_layers: int = 6
42
+ qkv_dim: int = 512
43
+ mlp_dim: int = 2048
44
+ max_len: int = 2048
45
+ dropout_rate: float = 0.1
46
+ attention_dropout_rate: float = 0.1
47
+ deterministic: bool = False
48
+ decode: bool = False
49
+ kernel_init: Callable = nn.initializers.xavier_uniform()
50
+ bias_init: Callable = nn.initializers.normal(stddev=1e-6)
51
+ posemb_init: Optional[Callable] = None
52
+
53
+
54
+ def shift_right(x, axis=1):
55
+ """Shift the input to the right by padding and slicing on axis."""
56
+ pad_widths = [(0, 0)] * len(x.shape)
57
+ pad_widths[axis] = (1, 0)
58
+ padded = jnp.pad(
59
+ x, pad_widths, mode='constant', constant_values=x.dtype.type(0))
60
+ return lax.dynamic_slice_in_dim(padded, 0, padded.shape[axis] - 1, axis)
61
+
62
+
63
+ def shift_inputs(x, segment_ids=None, axis=1):
64
+ """Shift inputs and replace EOS by 0 for packed inputs."""
65
+ shifted = shift_right(x, axis=axis)
66
+ # For packed targets, the first shifted token of a new sequence is made
67
+ # 0, rather than being the EOS token for the last sequence.
68
+ if segment_ids is not None:
69
+ shifted *= (segment_ids==shift_right(segment_ids, axis=axis))
70
+ return shifted
71
+
72
+
73
+ def sinusoidal_init(max_len=2048,
74
+ min_scale=1.0,
75
+ max_scale=10000.0):
76
+ """1D Sinusoidal Position Embedding Initializer.
77
+
78
+ Args:
79
+ max_len: maximum possible length for the input.
80
+ min_scale: float: minimum frequency-scale in sine grating.
81
+ max_scale: float: maximum frequency-scale in sine grating.
82
+
83
+ Returns:
84
+ output: init function returning `(1, max_len, d_feature)`
85
+ """
86
+
87
+ def init(key, shape, dtype=np.float32):
88
+ """Sinusoidal init."""
89
+ del key, dtype
90
+ d_feature = shape[-1]
91
+ pe = np.zeros((max_len, d_feature), dtype=np.float32)
92
+ position = np.arange(0, max_len)[:, np.newaxis]
93
+ scale_factor = -np.log(max_scale / min_scale) / (d_feature // 2 - 1)
94
+ div_term = min_scale * np.exp(np.arange(0, d_feature // 2) * scale_factor)
95
+ pe[:, :d_feature // 2] = np.sin(position * div_term)
96
+ pe[:, d_feature // 2: 2 * (d_feature // 2)] = np.cos(position * div_term)
97
+ pe = pe[np.newaxis, :, :] # [1, max_len, d_feature]
98
+ return jnp.array(pe)
99
+
100
+ return init
101
+
102
+
103
+ class AddPositionEmbs(nn.Module):
104
+ """Adds (optionally learned) positional embeddings to the inputs.
105
+
106
+ Args:
107
+ config: TransformerConfig dataclass containing hyperparameters.
108
+ decode: whether to run in single-position autoregressive mode.
109
+ """
110
+ config: TransformerConfig
111
+ decode: bool = False
112
+
113
+ @nn.compact
114
+ def __call__(self,
115
+ inputs,
116
+ inputs_positions=None):
117
+ """Applies AddPositionEmbs module.
118
+
119
+ By default this layer uses a fixed sinusoidal embedding table. If a
120
+ learned position embedding is desired, pass an initializer to
121
+ posemb_init in the configuration.
122
+
123
+ Args:
124
+ inputs: input data.
125
+ inputs_positions: input position indices for packed sequences.
126
+
127
+ Returns:
128
+ output: `(bs, timesteps, in_dim)`
129
+ """
130
+ config = self.config
131
+ # inputs.shape is (batch_size, seq_len, emb_dim)
132
+ assert inputs.ndim==3, ('Number of dimensions should be 3,'
133
+ ' but it is: %d' % inputs.ndim)
134
+ length = inputs.shape[1]
135
+ pos_emb_shape = (1, config.max_len, inputs.shape[-1])
136
+ if config.posemb_init is None:
137
+ # Use a fixed (non-learned) sinusoidal position embedding.
138
+ pos_embedding = sinusoidal_init(max_len=config.max_len)(None,
139
+ pos_emb_shape,
140
+ None)
141
+ else:
142
+ pos_embedding = self.param('pos_embedding', config.posemb_init,
143
+ pos_emb_shape)
144
+ pe = pos_embedding[:, :length, :]
145
+
146
+ # We use a cache position index for tracking decoding position.
147
+ if self.decode:
148
+ is_initialized = self.has_variable('cache', 'cache_index')
149
+ cache_index = self.variable('cache', 'cache_index',
150
+ lambda: jnp.array(0, dtype=jnp.uint32))
151
+ if is_initialized:
152
+ i = cache_index.value
153
+ cache_index.value = i + 1
154
+ _, _, df = pos_embedding.shape
155
+ pe = lax.dynamic_slice(pos_embedding,
156
+ jnp.array((0, i, 0)),
157
+ (1, 1, df))
158
+ if inputs_positions is None:
159
+ # normal unpacked case:
160
+ return inputs + pe
161
+ else:
162
+ # for packed data we need to use known position indices:
163
+ return inputs + jnp.take(pe[0], inputs_positions, axis=0)
164
+
165
+
166
+ class MlpBlock(nn.Module):
167
+ """Transformer MLP / feed-forward block.
168
+
169
+ Args:
170
+ config: TransformerConfig dataclass containing hyperparameters.
171
+ out_dim: optionally specify out dimension.
172
+ """
173
+ config: TransformerConfig
174
+ out_dim: Optional[int] = None
175
+
176
+ @nn.compact
177
+ def __call__(self, inputs):
178
+ """Applies Transformer MlpBlock module."""
179
+ config = self.config
180
+ actual_out_dim = (inputs.shape[-1] if self.out_dim is None
181
+ else self.out_dim)
182
+ x = nn.Dense(
183
+ config.mlp_dim,
184
+ dtype=config.dtype,
185
+ kernel_init=config.kernel_init,
186
+ bias_init=config.bias_init)(
187
+ inputs)
188
+ x = nn.relu(x)
189
+ x = nn.Dropout(rate=config.dropout_rate)(
190
+ x, deterministic=config.deterministic)
191
+ output = nn.Dense(
192
+ actual_out_dim,
193
+ dtype=config.dtype,
194
+ kernel_init=config.kernel_init,
195
+ bias_init=config.bias_init)(
196
+ x)
197
+ output = nn.Dropout(rate=config.dropout_rate)(
198
+ output, deterministic=config.deterministic)
199
+ return output
200
+
201
+
202
+ class EncoderDecoder1DBlock(nn.Module):
203
+ """Transformer encoder-decoder layer.
204
+
205
+ Args:
206
+ config: TransformerConfig dataclass containing hyperparameters.
207
+ """
208
+ config: TransformerConfig
209
+
210
+ @nn.compact
211
+ def __call__(self,
212
+ inputs,
213
+ decoder_mask=None,
214
+ encoder_decoder_mask=None):
215
+ """Applies EncoderDecoder1DBlock module.
216
+
217
+ Args:
218
+ inputs: input data for decoder
219
+ decoder_mask: decoder self-attention mask.
220
+ encoder_decoder_mask: encoder-decoder attention mask.
221
+
222
+ Returns:
223
+ output after transformer encoder-decoder block.
224
+ """
225
+ config = self.config
226
+
227
+ # Decoder block.
228
+ assert inputs.ndim==3
229
+ x = nn.LayerNorm(dtype=config.dtype)(inputs)
230
+ x = nn.SelfAttention(
231
+ num_heads=config.num_heads,
232
+ dtype=config.dtype,
233
+ qkv_features=config.qkv_dim,
234
+ kernel_init=config.kernel_init,
235
+ bias_init=config.bias_init,
236
+ use_bias=False,
237
+ broadcast_dropout=False,
238
+ dropout_rate=config.attention_dropout_rate,
239
+ deterministic=config.deterministic,
240
+ decode=config.decode)(x, decoder_mask)
241
+ x = nn.Dropout(rate=config.dropout_rate)(
242
+ x, deterministic=config.deterministic)
243
+ x = x + inputs
244
+
245
+ # MLP block.
246
+ z = nn.LayerNorm(dtype=config.dtype)(x)
247
+ z = MlpBlock(config=config)(z)
248
+
249
+ return x + z
250
+
251
+
252
+ # Copyright 2021 The Eleuther AI and The Google Flax Team Authors and The HuggingFace Inc. team.
253
+ #
254
+ # Licensed under the Apache License, Version 2.0 (the "License");
255
+ # you may not use this file except in compliance with the License.
256
+ # You may obtain a copy of the License at
257
+ #
258
+ # http://www.apache.org/licenses/LICENSE-2.0
259
+ #
260
+ # Unless required by applicable law or agreed to in writing, software
261
+ # distributed under the License is distributed on an "AS IS" BASIS,
262
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
263
+ # See the License for the specific language governing permissions and
264
+ # limitations under the License.
265
+
266
+ class FlaxTransformerLMPreTrainedModel(FlaxPreTrainedModel):
267
+ config_class = TransformerLMConfig
268
+ base_model_prefix = "decoder"
269
+ module_class: nn.Module = None
270
+
271
+ def __init__(
272
+ self,
273
+ config: TransformerLMConfig,
274
+ input_shape: Tuple = (1, 1),
275
+ seed: int = 0,
276
+ dtype: jnp.dtype = jnp.bfloat16,
277
+ _do_init: bool = True,
278
+ **kwargs,
279
+ ):
280
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
281
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
282
+
283
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
284
+ # init input tensors
285
+ input_ids = jnp.zeros(input_shape, dtype="i4")
286
+ attention_mask = jnp.ones_like(input_ids)
287
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
288
+ params_rng, dropout_rng = jax.random.split(rng)
289
+ rngs = {"params": params_rng, "dropout": dropout_rng}
290
+
291
+ random_params = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)["params"]
292
+
293
+ if params is not None:
294
+ random_params = flatten_dict(unfreeze(random_params))
295
+ params = flatten_dict(unfreeze(params))
296
+ for missing_key in self._missing_keys:
297
+ params[missing_key] = random_params[missing_key]
298
+ self._missing_keys = set()
299
+ return freeze(unflatten_dict(params))
300
+ else:
301
+ return random_params
302
+
303
+ def init_cache(self, batch_size, max_length):
304
+ r"""
305
+ Args:
306
+ batch_size (`int`):
307
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
308
+ max_length (`int`):
309
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
310
+ cache.
311
+ """
312
+ # init input variables to retrieve cache
313
+ input_ids = jnp.ones((batch_size, max_length))
314
+ attention_mask = jnp.ones_like(input_ids)
315
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
316
+
317
+ init_variables = self.module.init(
318
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
319
+ )
320
+ return unfreeze(init_variables["cache"])
321
+
322
+ def __call__(
323
+ self,
324
+ input_ids,
325
+ attention_mask=None,
326
+ position_ids=None,
327
+ params: dict = None,
328
+ past_key_values: dict = None,
329
+ dropout_rng: jax.random.PRNGKey = None,
330
+ train: bool = False,
331
+ output_attentions: Optional[bool] = None,
332
+ output_hidden_states: Optional[bool] = None,
333
+ return_dict: Optional[bool] = None,
334
+ ):
335
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
336
+ output_hidden_states = (
337
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
338
+ )
339
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
340
+
341
+ batch_size, sequence_length = input_ids.shape
342
+
343
+ if position_ids is None:
344
+ if past_key_values is not None:
345
+ raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.")
346
+
347
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
348
+
349
+ if attention_mask is None:
350
+ attention_mask = jnp.ones((batch_size, sequence_length))
351
+
352
+ # Handle any PRNG if needed
353
+ rngs = {}
354
+ if dropout_rng is not None:
355
+ rngs["dropout"] = dropout_rng
356
+
357
+ inputs = {"params": params or self.params}
358
+
359
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGPTNeoAttention module
360
+ if past_key_values:
361
+ inputs["cache"] = past_key_values
362
+ mutable = ["cache"]
363
+ else:
364
+ mutable = False
365
+
366
+ if input_ids.shape[1] > 1:
367
+ input_ids = jnp.insert(input_ids, 0, 0, axis=1) # Insert 0 at the beginning of prompt
368
+
369
+ # Progressive cache loop
370
+ if self.module.use_cache:
371
+ batch_size, seq_length = input_ids.shape
372
+ shape = (batch_size, seq_length, self.module.config.vocab_size)
373
+ logits = jnp.zeros(shape, dtype=self.dtype)
374
+
375
+ def loop_body_fn(i, state):
376
+ logits, cache = state
377
+ input_id = lax.dynamic_slice(input_ids, (0, i), (input_ids.shape[0], 1))
378
+ output = self.module.apply(
379
+ {
380
+ "params": inputs["params"],
381
+ "cache": cache
382
+ },
383
+ jnp.array(input_id, dtype="i4"),
384
+ jnp.array(attention_mask, dtype="i4"),
385
+ jnp.array(position_ids, dtype="i4"),
386
+ not train,
387
+ False,
388
+ output_attentions,
389
+ output_hidden_states,
390
+ return_dict,
391
+ rngs=rngs,
392
+ mutable=mutable,
393
+ )
394
+ lm_output, new_vars = output
395
+ logits = logits.at[:, i, :].set(lm_output.logits.squeeze(1))
396
+ return logits, new_vars["cache"]
397
+
398
+ cache = freeze(inputs["cache"])
399
+ initial_state = (logits, cache)
400
+ lm_logits, lm_cache = lax.fori_loop(0, seq_length, loop_body_fn, initial_state)
401
+
402
+ if seq_length > 1:
403
+ lm_logits = lm_logits[:, 1:, :] # Ignore leading zeros in prompts
404
+
405
+ lm_cache = {"cache": lm_cache}
406
+
407
+ if not return_dict:
408
+ outputs = (lm_logits,) + lm_cache["cache"]
409
+ else:
410
+ outputs = (FlaxCausalLMOutput(logits=lm_logits, hidden_states=None, attentions=None), lm_cache)
411
+ else:
412
+ output = self.module.apply(
413
+ inputs,
414
+ jnp.array(input_ids, dtype="i4"),
415
+ jnp.array(attention_mask, dtype="i4"),
416
+ jnp.array(position_ids, dtype="i4"),
417
+ not train,
418
+ False,
419
+ output_attentions,
420
+ output_hidden_states,
421
+ return_dict,
422
+ rngs=rngs,
423
+ mutable=mutable,
424
+ )
425
+ lm_logits = output.logits
426
+ if input_ids.shape[1] > 1:
427
+ lm_logits = lm_logits[:, 1:, :] # Ignore leading zeros in prompts
428
+
429
+ if not return_dict:
430
+ outputs = (lm_logits,) + output[1:]
431
+ else:
432
+ outputs = FlaxCausalLMOutput(logits=lm_logits, hidden_states=output.hidden_states, attentions=output.attentions)
433
+
434
+ # add updated cache to model output
435
+ if past_key_values is not None and return_dict:
436
+ outputs, past_key_values = outputs
437
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
438
+ return outputs
439
+ elif past_key_values is not None and not return_dict:
440
+ outputs, past_key_values = outputs
441
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
442
+
443
+ return outputs
444
+
445
+
446
+ class FlaxTransformerLMModule(nn.Module):
447
+ config: TransformerConfig
448
+
449
+ def setup(self):
450
+ config = self.config
451
+ self.output_embed = nn.Embed(
452
+ num_embeddings=config.output_vocab_size,
453
+ features=config.emb_dim,
454
+ embedding_init=nn.initializers.normal(stddev=1.0),
455
+ name='Embed_0'
456
+ )
457
+ self.pos_embed = AddPositionEmbs(config=config, decode=config.decode, name='posembed_output')
458
+ self.dropout = nn.Dropout(rate=config.dropout_rate)
459
+ self.h_layers = [EncoderDecoder1DBlock(config=config, name=f'encoderdecoderblock_{i}')
460
+ for i in range(config.num_layers)]
461
+ self.ln_f = nn.LayerNorm(dtype=config.dtype, name='encoderdecoder_norm')
462
+
463
+ @nn.compact
464
+ def __call__(
465
+ self,
466
+ input_ids,
467
+ attention_mask,
468
+ position_ids,
469
+ deterministic=True,
470
+ init_cache: bool = False,
471
+ output_attentions: bool = False,
472
+ output_hidden_states: bool = False,
473
+ return_dict: bool = True,
474
+ ):
475
+ config = self.config
476
+
477
+ y = input_ids.astype('int32')
478
+
479
+ y = self.output_embed(y)
480
+ y = self.pos_embed(y, inputs_positions=position_ids)
481
+ y = self.dropout(y, deterministic=config.deterministic)
482
+ y = y.astype(config.dtype)
483
+
484
+ for h in self.h_layers:
485
+ y = h(y, decoder_mask=attention_mask, encoder_decoder_mask=None)
486
+
487
+ outputs = (y, None, None)
488
+
489
+ hidden_states = outputs[0]
490
+ hidden_states = self.ln_f(hidden_states)
491
+
492
+ if output_hidden_states:
493
+ all_hidden_states = outputs[1] + (hidden_states,)
494
+ outputs = (hidden_states, all_hidden_states) + outputs[2:]
495
+ else:
496
+ outputs = (hidden_states,) + outputs[1:]
497
+
498
+ if not return_dict:
499
+ return tuple(v for v in outputs if v is not None)
500
+
501
+ return FlaxBaseModelOutput(
502
+ last_hidden_state=hidden_states,
503
+ hidden_states=outputs[1],
504
+ attentions=outputs[-1],
505
+ )
506
+
507
+
508
+ class FlaxTransformerLMModel(FlaxTransformerLMPreTrainedModel):
509
+ module_class = FlaxTransformerLMModule
510
+
511
+
512
+ class FlaxTransformerLMForCausalLMModule(nn.Module):
513
+ config: TransformerLMConfig
514
+ dtype: jnp.dtype = jnp.bfloat16
515
+ kernel_init: Callable = nn.initializers.xavier_uniform()
516
+ bias_init: Callable = nn.initializers.normal(stddev=1e-6)
517
+ posemb_init: Callable = None
518
+ use_cache = False
519
+
520
+ def convert_config(self, cfg: TransformerLMConfig):
521
+ return TransformerConfig(
522
+ vocab_size=cfg.vocab_size,
523
+ output_vocab_size=cfg.vocab_size,
524
+ logits_via_embedding=cfg.logits_via_embedding,
525
+ dtype=self.dtype,
526
+ emb_dim=cfg.emb_dim,
527
+ num_heads=cfg.num_heads,
528
+ num_layers=cfg.num_layers,
529
+ qkv_dim=cfg.qkv_dim,
530
+ mlp_dim=cfg.mlp_dim,
531
+ max_len=cfg.max_len,
532
+ dropout_rate=cfg.dropout_rate,
533
+ attention_dropout_rate=cfg.attention_dropout_rate,
534
+ deterministic=cfg.deterministic,
535
+ decode=cfg.decode and self.use_cache,
536
+ kernel_init=self.kernel_init,
537
+ bias_init=self.bias_init,
538
+ posemb_init=self.posemb_init,
539
+ )
540
+
541
+ def setup(self):
542
+ config_ext = self.convert_config(self.config)
543
+ self.transformer = FlaxTransformerLMModule(config_ext, name='decoder')
544
+ self.lm_head = nn.Dense(
545
+ self.config.output_vocab_size,
546
+ dtype=self.dtype,
547
+ kernel_init=self.kernel_init,
548
+ bias_init=self.bias_init,
549
+ name='logitdense',
550
+ )
551
+
552
+ @nn.compact
553
+ def __call__(
554
+ self,
555
+ input_ids,
556
+ attention_mask,
557
+ position_ids,
558
+ deterministic: bool = True,
559
+ init_cache: bool = False,
560
+ output_attentions: bool = False,
561
+ output_hidden_states: bool = False,
562
+ return_dict: bool = True,
563
+ ):
564
+ decoder_mask = None
565
+ inputs_positions = None
566
+
567
+ outputs = self.transformer(
568
+ input_ids,
569
+ decoder_mask,
570
+ inputs_positions,
571
+ deterministic=deterministic,
572
+ init_cache=init_cache,
573
+ output_attentions=output_attentions,
574
+ output_hidden_states=output_hidden_states,
575
+ return_dict=return_dict,
576
+ )
577
+ hidden_states = outputs[0]
578
+ lm_logits = self.lm_head(hidden_states)
579
+
580
+ if not return_dict:
581
+ return (lm_logits,) + outputs[1:]
582
+
583
+ return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
584
+
585
+
586
+ class FlaxTransformerLMForCausalLM(FlaxTransformerLMPreTrainedModel):
587
+ module_class = FlaxTransformerLMForCausalLMModule
588
+
589
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
590
+
591
+ self.module_class.use_cache = True
592
+
593
+ # initializing the cache
594
+ batch_size, seq_length = input_ids.shape
595
+
596
+ past_key_values = self.init_cache(batch_size, max_length)
597
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
598
+ # But since GPTNeo uses a causal mask, those positions are masked anyways.
599
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
600
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
601
+ if attention_mask is not None:
602
+ position_ids = attention_mask.cumsum(axis=-1) - 1
603
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
604
+ else:
605
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
606
+
607
+ return {
608
+ "past_key_values": past_key_values,
609
+ "attention_mask": extended_attention_mask,
610
+ "position_ids": position_ids,
611
+ }
612
+
613
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
614
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
615
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
616
+ return model_kwargs