fukugawa commited on
Commit
63f7abe
·
verified ·
1 Parent(s): d831d9c

Upload FlaxTransformerLMForCausalLM

Browse files
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "TransformerLMForCausalLM"
4
+ ],
5
+ "attention_dropout_rate": 0.1,
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_transformerlm.TransformerLMConfig",
8
+ "FlaxAutoModelForCausalLM": "modeling_transformerlm_flax.FlaxTransformerLMForCausalLM"
9
+ },
10
+ "bos_token_id": 50256,
11
+ "decode": true,
12
+ "deterministic": true,
13
+ "dropout_rate": 0.1,
14
+ "emb_dim": 2048,
15
+ "eos_token_id": 50256,
16
+ "logits_via_embedding": false,
17
+ "max_len": 1024,
18
+ "mlp_dim": 8192,
19
+ "model_type": "transformerlm",
20
+ "num_heads": 16,
21
+ "num_layers": 18,
22
+ "output_vocab_size": 30000,
23
+ "qkv_dim": 2048,
24
+ "share_embeddings": false,
25
+ "tokenizer_class": "TransformerLMTokenizer",
26
+ "transformers_version": "4.39.0",
27
+ "vocab_size": 30000
28
+ }
configuration_transformerlm.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+
4
+ class TransformerLMConfig(PretrainedConfig):
5
+ model_type = "transformerlm"
6
+
7
+ def __init__(
8
+ self,
9
+ vocab_size: int = 30000,
10
+ output_vocab_size: int = 30000,
11
+ share_embeddings: bool = False,
12
+ logits_via_embedding: bool = False,
13
+ emb_dim: int = 512,
14
+ num_heads: int = 8,
15
+ num_layers: int = 6,
16
+ qkv_dim: int = 512,
17
+ mlp_dim: int = 2048,
18
+ max_len: int = 2048,
19
+ dropout_rate: float = 0.1,
20
+ attention_dropout_rate: float = 0.1,
21
+ deterministic: bool = False,
22
+ decode: bool = False,
23
+ bos_token_id=50256,
24
+ eos_token_id=50256,
25
+ **kwargs,
26
+ ):
27
+ self.vocab_size = vocab_size
28
+ self.output_vocab_size = output_vocab_size
29
+ self.share_embeddings = share_embeddings
30
+ self.logits_via_embedding = logits_via_embedding
31
+ self.emb_dim = emb_dim
32
+ self.num_heads = num_heads
33
+ self.num_layers = num_layers
34
+ self.qkv_dim = qkv_dim
35
+ self.mlp_dim = mlp_dim
36
+ self.max_len = max_len
37
+ self.dropout_rate = dropout_rate
38
+ self.attention_dropout_rate = attention_dropout_rate
39
+ self.deterministic = deterministic
40
+ self.decode = decode
41
+ self.bos_token_id = bos_token_id
42
+ self.eos_token_id = eos_token_id
43
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
flax_model.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5722a40310b30e3ef3c3e3f5bfef62d0075b9b9e1590f18ca87c5a790a4b1d49
3
+ size 4116870925
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.39.0"
6
+ }
modeling_transformerlm_flax.py ADDED
@@ -0,0 +1,618 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The Flax Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Callable, Any, Optional, Tuple
16
+
17
+ import jax
18
+ import jax.numpy as jnp
19
+ import numpy as np
20
+ from flax import linen as nn
21
+ from flax import struct
22
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
23
+ from flax.traverse_util import flatten_dict, unflatten_dict
24
+ from jax import lax
25
+ from transformers.modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput
26
+ from transformers.modeling_flax_utils import FlaxPreTrainedModel
27
+
28
+ from .configuration_transformerlm import TransformerLMConfig
29
+
30
+
31
+ @struct.dataclass
32
+ class TransformerConfig:
33
+ """Global hyperparameters used to minimize obnoxious kwarg plumbing."""
34
+ vocab_size: int
35
+ output_vocab_size: int
36
+ share_embeddings: bool = False
37
+ logits_via_embedding: bool = False
38
+ dtype: Any = jnp.float32
39
+ emb_dim: int = 512
40
+ num_heads: int = 8
41
+ num_layers: int = 6
42
+ qkv_dim: int = 512
43
+ mlp_dim: int = 2048
44
+ max_len: int = 2048
45
+ dropout_rate: float = 0.1
46
+ attention_dropout_rate: float = 0.1
47
+ deterministic: bool = False
48
+ decode: bool = False
49
+ kernel_init: Callable = nn.initializers.xavier_uniform()
50
+ bias_init: Callable = nn.initializers.normal(stddev=1e-6)
51
+ posemb_init: Optional[Callable] = None
52
+
53
+
54
+ def shift_right(x, axis=1):
55
+ """Shift the input to the right by padding and slicing on axis."""
56
+ pad_widths = [(0, 0)] * len(x.shape)
57
+ pad_widths[axis] = (1, 0)
58
+ padded = jnp.pad(
59
+ x, pad_widths, mode='constant', constant_values=x.dtype.type(0))
60
+ return lax.dynamic_slice_in_dim(padded, 0, padded.shape[axis] - 1, axis)
61
+
62
+
63
+ def shift_inputs(x, segment_ids=None, axis=1):
64
+ """Shift inputs and replace EOS by 0 for packed inputs."""
65
+ shifted = shift_right(x, axis=axis)
66
+ # For packed targets, the first shifted token of a new sequence is made
67
+ # 0, rather than being the EOS token for the last sequence.
68
+ if segment_ids is not None:
69
+ shifted *= (segment_ids==shift_right(segment_ids, axis=axis))
70
+ return shifted
71
+
72
+
73
+ def sinusoidal_init(max_len=2048,
74
+ min_scale=1.0,
75
+ max_scale=10000.0):
76
+ """1D Sinusoidal Position Embedding Initializer.
77
+
78
+ Args:
79
+ max_len: maximum possible length for the input.
80
+ min_scale: float: minimum frequency-scale in sine grating.
81
+ max_scale: float: maximum frequency-scale in sine grating.
82
+
83
+ Returns:
84
+ output: init function returning `(1, max_len, d_feature)`
85
+ """
86
+
87
+ def init(key, shape, dtype=np.float32):
88
+ """Sinusoidal init."""
89
+ del key, dtype
90
+ d_feature = shape[-1]
91
+ pe = np.zeros((max_len, d_feature), dtype=np.float32)
92
+ position = np.arange(0, max_len)[:, np.newaxis]
93
+ scale_factor = -np.log(max_scale / min_scale) / (d_feature // 2 - 1)
94
+ div_term = min_scale * np.exp(np.arange(0, d_feature // 2) * scale_factor)
95
+ pe[:, :d_feature // 2] = np.sin(position * div_term)
96
+ pe[:, d_feature // 2: 2 * (d_feature // 2)] = np.cos(position * div_term)
97
+ pe = pe[np.newaxis, :, :] # [1, max_len, d_feature]
98
+ return jnp.array(pe)
99
+
100
+ return init
101
+
102
+
103
+ class AddPositionEmbs(nn.Module):
104
+ """Adds (optionally learned) positional embeddings to the inputs.
105
+
106
+ Args:
107
+ config: TransformerConfig dataclass containing hyperparameters.
108
+ decode: whether to run in single-position autoregressive mode.
109
+ """
110
+ config: TransformerConfig
111
+ decode: bool = False
112
+
113
+ @nn.compact
114
+ def __call__(self,
115
+ inputs,
116
+ inputs_positions=None):
117
+ """Applies AddPositionEmbs module.
118
+
119
+ By default this layer uses a fixed sinusoidal embedding table. If a
120
+ learned position embedding is desired, pass an initializer to
121
+ posemb_init in the configuration.
122
+
123
+ Args:
124
+ inputs: input data.
125
+ inputs_positions: input position indices for packed sequences.
126
+
127
+ Returns:
128
+ output: `(bs, timesteps, in_dim)`
129
+ """
130
+ config = self.config
131
+ # inputs.shape is (batch_size, seq_len, emb_dim)
132
+ assert inputs.ndim==3, ('Number of dimensions should be 3,'
133
+ ' but it is: %d' % inputs.ndim)
134
+ length = inputs.shape[1]
135
+ pos_emb_shape = (1, config.max_len, inputs.shape[-1])
136
+ if config.posemb_init is None:
137
+ # Use a fixed (non-learned) sinusoidal position embedding.
138
+ pos_embedding = sinusoidal_init(max_len=config.max_len)(None,
139
+ pos_emb_shape,
140
+ None)
141
+ else:
142
+ pos_embedding = self.param('pos_embedding', config.posemb_init,
143
+ pos_emb_shape)
144
+ pe = pos_embedding[:, :length, :]
145
+
146
+ # We use a cache position index for tracking decoding position.
147
+ if self.decode:
148
+ is_initialized = self.has_variable('cache', 'cache_index')
149
+ cache_index = self.variable('cache', 'cache_index',
150
+ lambda: jnp.array(0, dtype=jnp.uint32))
151
+ if is_initialized:
152
+ i = cache_index.value
153
+ cache_index.value = i + 1
154
+ _, _, df = pos_embedding.shape
155
+ pe = lax.dynamic_slice(pos_embedding,
156
+ jnp.array((0, i, 0)),
157
+ (1, 1, df))
158
+ if inputs_positions is None:
159
+ # normal unpacked case:
160
+ return inputs + pe
161
+ else:
162
+ # for packed data we need to use known position indices:
163
+ return inputs + jnp.take(pe[0], inputs_positions, axis=0)
164
+
165
+
166
+ class MlpBlock(nn.Module):
167
+ """Transformer MLP / feed-forward block.
168
+
169
+ Args:
170
+ config: TransformerConfig dataclass containing hyperparameters.
171
+ out_dim: optionally specify out dimension.
172
+ """
173
+ config: TransformerConfig
174
+ out_dim: Optional[int] = None
175
+
176
+ @nn.compact
177
+ def __call__(self, inputs):
178
+ """Applies Transformer MlpBlock module."""
179
+ config = self.config
180
+ actual_out_dim = (inputs.shape[-1] if self.out_dim is None
181
+ else self.out_dim)
182
+ x = nn.Dense(
183
+ config.mlp_dim,
184
+ dtype=config.dtype,
185
+ kernel_init=config.kernel_init,
186
+ bias_init=config.bias_init)(
187
+ inputs)
188
+ x = nn.relu(x)
189
+ x = nn.Dropout(rate=config.dropout_rate)(
190
+ x, deterministic=config.deterministic)
191
+ output = nn.Dense(
192
+ actual_out_dim,
193
+ dtype=config.dtype,
194
+ kernel_init=config.kernel_init,
195
+ bias_init=config.bias_init)(
196
+ x)
197
+ output = nn.Dropout(rate=config.dropout_rate)(
198
+ output, deterministic=config.deterministic)
199
+ return output
200
+
201
+
202
+ class EncoderDecoder1DBlock(nn.Module):
203
+ """Transformer encoder-decoder layer.
204
+
205
+ Args:
206
+ config: TransformerConfig dataclass containing hyperparameters.
207
+ """
208
+ config: TransformerConfig
209
+
210
+ @nn.compact
211
+ def __call__(self,
212
+ inputs,
213
+ decoder_mask=None,
214
+ encoder_decoder_mask=None):
215
+ """Applies EncoderDecoder1DBlock module.
216
+
217
+ Args:
218
+ inputs: input data for decoder
219
+ decoder_mask: decoder self-attention mask.
220
+ encoder_decoder_mask: encoder-decoder attention mask.
221
+
222
+ Returns:
223
+ output after transformer encoder-decoder block.
224
+ """
225
+ config = self.config
226
+
227
+ # Decoder block.
228
+ assert inputs.ndim==3
229
+ x = nn.LayerNorm(dtype=config.dtype)(inputs)
230
+ x = nn.SelfAttention(
231
+ num_heads=config.num_heads,
232
+ dtype=config.dtype,
233
+ qkv_features=config.qkv_dim,
234
+ kernel_init=config.kernel_init,
235
+ bias_init=config.bias_init,
236
+ use_bias=False,
237
+ broadcast_dropout=False,
238
+ dropout_rate=config.attention_dropout_rate,
239
+ deterministic=config.deterministic,
240
+ decode=config.decode)(x, decoder_mask)
241
+ x = nn.Dropout(rate=config.dropout_rate)(
242
+ x, deterministic=config.deterministic)
243
+ x = x + inputs
244
+
245
+ # MLP block.
246
+ z = nn.LayerNorm(dtype=config.dtype)(x)
247
+ z = MlpBlock(config=config)(z)
248
+
249
+ return x + z
250
+
251
+
252
+ # Copyright 2021 The Eleuther AI and The Google Flax Team Authors and The HuggingFace Inc. team.
253
+ #
254
+ # Licensed under the Apache License, Version 2.0 (the "License");
255
+ # you may not use this file except in compliance with the License.
256
+ # You may obtain a copy of the License at
257
+ #
258
+ # http://www.apache.org/licenses/LICENSE-2.0
259
+ #
260
+ # Unless required by applicable law or agreed to in writing, software
261
+ # distributed under the License is distributed on an "AS IS" BASIS,
262
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
263
+ # See the License for the specific language governing permissions and
264
+ # limitations under the License.
265
+
266
+ class FlaxTransformerLMPreTrainedModel(FlaxPreTrainedModel):
267
+ config_class = TransformerLMConfig
268
+ base_model_prefix = "decoder"
269
+ module_class: nn.Module = None
270
+
271
+ def __init__(
272
+ self,
273
+ config: TransformerLMConfig,
274
+ input_shape: Tuple = (1, 1),
275
+ seed: int = 0,
276
+ dtype: jnp.dtype = jnp.float32,
277
+ _do_init: bool = True,
278
+ **kwargs,
279
+ ):
280
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
281
+
282
+ def token_id_to_logits(state, token_id):
283
+ logits, cache = state
284
+ output = self.module.apply(
285
+ {
286
+ "params": self.params,
287
+ "cache": cache
288
+ },
289
+ token_id,
290
+ None,
291
+ None,
292
+ True,
293
+ False,
294
+ False,
295
+ False,
296
+ True,
297
+ rngs={},
298
+ mutable=["cache"],
299
+ )
300
+ lm_output, new_vars = output
301
+ logits = lm_output.logits
302
+ cache = unfreeze(new_vars["cache"])
303
+ return (logits, cache), logits
304
+
305
+ self.scan_body_fn = token_id_to_logits
306
+
307
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
308
+
309
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
310
+ # init input tensors
311
+ input_ids = jnp.zeros(input_shape, dtype="i4")
312
+ attention_mask = jnp.ones_like(input_ids)
313
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
314
+ params_rng, dropout_rng = jax.random.split(rng)
315
+ rngs = {"params": params_rng, "dropout": dropout_rng}
316
+
317
+ random_params = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)["params"]
318
+
319
+ if params is not None:
320
+ random_params = flatten_dict(unfreeze(random_params))
321
+ params = flatten_dict(unfreeze(params))
322
+ for missing_key in self._missing_keys:
323
+ params[missing_key] = random_params[missing_key]
324
+ self._missing_keys = set()
325
+ return freeze(unflatten_dict(params))
326
+ else:
327
+ return random_params
328
+
329
+ def init_cache(self, batch_size, max_length):
330
+ r"""
331
+ Args:
332
+ batch_size (`int`):
333
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
334
+ max_length (`int`):
335
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
336
+ cache.
337
+ """
338
+ # init input variables to retrieve cache
339
+ input_ids = jnp.ones((batch_size, max_length))
340
+ attention_mask = jnp.ones_like(input_ids)
341
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
342
+
343
+ init_variables = self.module.init(
344
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
345
+ )
346
+ return unfreeze(init_variables["cache"])
347
+
348
+ def __call__(
349
+ self,
350
+ input_ids,
351
+ attention_mask=None,
352
+ position_ids=None,
353
+ params: dict = None,
354
+ past_key_values: dict = None,
355
+ dropout_rng: jax.random.PRNGKey = None,
356
+ train: bool = False,
357
+ output_attentions: Optional[bool] = None,
358
+ output_hidden_states: Optional[bool] = None,
359
+ return_dict: Optional[bool] = None,
360
+ ):
361
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
362
+ output_hidden_states = (
363
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
364
+ )
365
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
366
+
367
+ batch_size, sequence_length = input_ids.shape
368
+
369
+ if position_ids is None:
370
+ if past_key_values is not None:
371
+ raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.")
372
+
373
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
374
+
375
+ if attention_mask is None:
376
+ attention_mask = jnp.ones((batch_size, sequence_length))
377
+
378
+ # Handle any PRNG if needed
379
+ rngs = {}
380
+ if dropout_rng is not None:
381
+ rngs["dropout"] = dropout_rng
382
+
383
+ inputs = {"params": params or self.params}
384
+
385
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGPTNeoAttention module
386
+ if past_key_values:
387
+ inputs["cache"] = past_key_values
388
+ mutable = ["cache"]
389
+ else:
390
+ mutable = False
391
+
392
+ if input_ids.shape[1] > 1:
393
+ input_ids = jnp.insert(input_ids, 0, 0, axis=1) # Insert 0 at the beginning of prompt
394
+
395
+ if self.module.use_cache:
396
+ # Progressive cache loop
397
+ seq_length = input_ids.shape[1]
398
+ vcab_size = self.module.config.vocab_size
399
+ logits = jnp.zeros((1, 1, vcab_size), dtype=self.dtype)
400
+ cache = inputs["cache"]
401
+ initial_state = (logits, cache)
402
+ input_tokens = jnp.reshape(input_ids, (seq_length, 1, 1))
403
+ last, all_logits = lax.scan(self.scan_body_fn, initial_state, input_tokens)
404
+ last_logits, last_cache = last
405
+ lm_logits = jnp.reshape(all_logits, (1, seq_length, vcab_size))
406
+
407
+ if input_ids.shape[1] > 1:
408
+ lm_logits = lm_logits[:, 1:, :] # Ignore leading zeros in prompts
409
+
410
+ if not return_dict:
411
+ outputs = (lm_logits,) + (last_cache,)
412
+ else:
413
+ outputs = (FlaxCausalLMOutput(logits=lm_logits, hidden_states=None, attentions=None), {"cache": last_cache})
414
+ else:
415
+ output = self.module.apply(
416
+ inputs,
417
+ jnp.array(input_ids, dtype="i4"),
418
+ jnp.array(attention_mask, dtype="i4"),
419
+ jnp.array(position_ids, dtype="i4"),
420
+ not train,
421
+ False,
422
+ output_attentions,
423
+ output_hidden_states,
424
+ return_dict,
425
+ rngs=rngs,
426
+ mutable=mutable,
427
+ )
428
+ lm_logits = output.logits
429
+
430
+ if input_ids.shape[1] > 1:
431
+ lm_logits = lm_logits[:, 1:, :] # Ignore leading zeros in prompts
432
+
433
+ if not return_dict:
434
+ outputs = (lm_logits,) + output[1:]
435
+ else:
436
+ outputs = FlaxCausalLMOutput(logits=lm_logits, hidden_states=output.hidden_states, attentions=output.attentions)
437
+
438
+ # add updated cache to model output
439
+ if past_key_values is not None and return_dict:
440
+ outputs, past_key_values = outputs
441
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
442
+ return outputs
443
+ elif past_key_values is not None and not return_dict:
444
+ outputs, past_key_values = outputs
445
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
446
+
447
+ return outputs
448
+
449
+
450
+ class FlaxTransformerLMModule(nn.Module):
451
+ config: TransformerConfig
452
+
453
+ def setup(self):
454
+ config = self.config
455
+ self.output_embed = nn.Embed(
456
+ num_embeddings=config.output_vocab_size,
457
+ features=config.emb_dim,
458
+ embedding_init=nn.initializers.normal(stddev=1.0),
459
+ name='Embed_0'
460
+ )
461
+ self.pos_embed = AddPositionEmbs(config=config, decode=config.decode, name='posembed_output')
462
+ self.dropout = nn.Dropout(rate=config.dropout_rate)
463
+ self.h_layers = [EncoderDecoder1DBlock(config=config, name=f'encoderdecoderblock_{i}')
464
+ for i in range(config.num_layers)]
465
+ self.ln_f = nn.LayerNorm(dtype=config.dtype, name='encoderdecoder_norm')
466
+
467
+ def __call__(
468
+ self,
469
+ input_ids,
470
+ attention_mask,
471
+ position_ids,
472
+ deterministic=True,
473
+ init_cache: bool = False,
474
+ output_attentions: bool = False,
475
+ output_hidden_states: bool = False,
476
+ return_dict: bool = True,
477
+ ):
478
+ config = self.config
479
+
480
+ y = input_ids.astype('int32')
481
+
482
+ y = self.output_embed(y)
483
+ y = self.pos_embed(y, inputs_positions=position_ids)
484
+ y = self.dropout(y, deterministic=config.deterministic)
485
+ y = y.astype(config.dtype)
486
+
487
+ for h in self.h_layers:
488
+ y = h(y, decoder_mask=attention_mask, encoder_decoder_mask=None)
489
+
490
+ outputs = (y, None, None)
491
+
492
+ hidden_states = outputs[0]
493
+ hidden_states = self.ln_f(hidden_states)
494
+
495
+ if output_hidden_states:
496
+ all_hidden_states = outputs[1] + (hidden_states,)
497
+ outputs = (hidden_states, all_hidden_states) + outputs[2:]
498
+ else:
499
+ outputs = (hidden_states,) + outputs[1:]
500
+
501
+ if not return_dict:
502
+ return tuple(v for v in outputs if v is not None)
503
+
504
+ return FlaxBaseModelOutput(
505
+ last_hidden_state=hidden_states,
506
+ hidden_states=outputs[1],
507
+ attentions=outputs[-1],
508
+ )
509
+
510
+
511
+ class FlaxTransformerLMModel(FlaxTransformerLMPreTrainedModel):
512
+ module_class = FlaxTransformerLMModule
513
+
514
+
515
+ class FlaxTransformerLMForCausalLMModule(nn.Module):
516
+ config: TransformerLMConfig
517
+ dtype: jnp.dtype = jnp.float32
518
+ kernel_init: Callable = nn.initializers.xavier_uniform()
519
+ bias_init: Callable = nn.initializers.normal(stddev=1e-6)
520
+ posemb_init: Callable = None
521
+ use_cache = False
522
+
523
+ def convert_config(self, cfg: TransformerLMConfig):
524
+ return TransformerConfig(
525
+ vocab_size=cfg.vocab_size,
526
+ output_vocab_size=cfg.vocab_size,
527
+ logits_via_embedding=cfg.logits_via_embedding,
528
+ dtype=self.dtype,
529
+ emb_dim=cfg.emb_dim,
530
+ num_heads=cfg.num_heads,
531
+ num_layers=cfg.num_layers,
532
+ qkv_dim=cfg.qkv_dim,
533
+ mlp_dim=cfg.mlp_dim,
534
+ max_len=cfg.max_len,
535
+ dropout_rate=cfg.dropout_rate,
536
+ attention_dropout_rate=cfg.attention_dropout_rate,
537
+ deterministic=cfg.deterministic,
538
+ decode=cfg.decode and self.use_cache,
539
+ kernel_init=self.kernel_init,
540
+ bias_init=self.bias_init,
541
+ posemb_init=self.posemb_init,
542
+ )
543
+
544
+ def setup(self):
545
+ config_ext = self.convert_config(self.config)
546
+ self.transformer = FlaxTransformerLMModule(config_ext, name='decoder')
547
+ self.lm_head = nn.Dense(
548
+ self.config.output_vocab_size,
549
+ dtype=self.dtype,
550
+ kernel_init=self.kernel_init,
551
+ bias_init=self.bias_init,
552
+ name='logitdense',
553
+ )
554
+
555
+ def __call__(
556
+ self,
557
+ input_ids,
558
+ attention_mask,
559
+ position_ids,
560
+ deterministic: bool = True,
561
+ init_cache: bool = False,
562
+ output_attentions: bool = False,
563
+ output_hidden_states: bool = False,
564
+ return_dict: bool = True,
565
+ ):
566
+ decoder_mask = None
567
+ inputs_positions = None
568
+
569
+ outputs = self.transformer(
570
+ input_ids,
571
+ decoder_mask,
572
+ inputs_positions,
573
+ deterministic=deterministic,
574
+ init_cache=init_cache,
575
+ output_attentions=output_attentions,
576
+ output_hidden_states=output_hidden_states,
577
+ return_dict=return_dict,
578
+ )
579
+ hidden_states = outputs[0]
580
+ lm_logits = self.lm_head(hidden_states)
581
+
582
+ if not return_dict:
583
+ return (lm_logits,) + outputs[1:]
584
+
585
+ return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
586
+
587
+
588
+ class FlaxTransformerLMForCausalLM(FlaxTransformerLMPreTrainedModel):
589
+ module_class = FlaxTransformerLMForCausalLMModule
590
+
591
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
592
+
593
+ self.module_class.use_cache = True
594
+
595
+ # initializing the cache
596
+ batch_size, seq_length = input_ids.shape
597
+
598
+ past_key_values = self.init_cache(batch_size, max_length)
599
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
600
+ # But since GPTNeo uses a causal mask, those positions are masked anyways.
601
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
602
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
603
+ if attention_mask is not None:
604
+ position_ids = attention_mask.cumsum(axis=-1) - 1
605
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
606
+ else:
607
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
608
+
609
+ return {
610
+ "past_key_values": past_key_values,
611
+ "attention_mask": extended_attention_mask,
612
+ "position_ids": position_ids,
613
+ }
614
+
615
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
616
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
617
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
618
+ return model_kwargs