mjschock commited on
Commit
b30aa71
·
verified ·
1 Parent(s): 00f43b2

Upload model

Browse files
Files changed (3) hide show
  1. config.json +6 -1
  2. model.safetensors +2 -2
  3. modeling_mamba.py +5 -2
config.json CHANGED
@@ -1,6 +1,10 @@
1
  {
 
 
 
2
  "auto_map": {
3
- "AutoConfig": "configuration_mamba.MambaConfig"
 
4
  },
5
  "bias": false,
6
  "conv_bias": true,
@@ -14,6 +18,7 @@
14
  "model_type": "mamba",
15
  "n_layer": 24,
16
  "pad_vocab_size_multiple": 8,
 
17
  "transformers_version": "4.37.2",
18
  "vocab_size": 50280
19
  }
 
1
  {
2
+ "architectures": [
3
+ "MambaModelForCausalLM"
4
+ ],
5
  "auto_map": {
6
+ "AutoConfig": "configuration_mamba.MambaConfig",
7
+ "AutoModelForCausalLM": "modeling_mamba.MambaModelForCausalLM"
8
  },
9
  "bias": false,
10
  "conv_bias": true,
 
18
  "model_type": "mamba",
19
  "n_layer": 24,
20
  "pad_vocab_size_multiple": 8,
21
+ "torch_dtype": "float32",
22
  "transformers_version": "4.37.2",
23
  "vocab_size": 50280
24
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:699ed6f59fb948186f449c5031e0dc659d504c90d7e018302aa1e190cdb40220
3
- size 516567560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bd3ca62665de4bfabff9d443f87a11090a10e505c0ccb56e6f9ca495b6e05bd
3
+ size 671027808
modeling_mamba.py CHANGED
@@ -311,7 +311,10 @@ class MambaModel(MambaPreTrainedModel):
311
  )
312
 
313
  class MambaModelForCausalLM(MambaPreTrainedModel):
314
- _tied_weights_keys = ["lm_head.weight"]
 
 
 
315
 
316
  def __init__(self, config, **kwargs):
317
  # super().__init__(config)
@@ -336,7 +339,7 @@ class MambaModelForCausalLM(MambaPreTrainedModel):
336
  bias=False,
337
  )
338
 
339
- self.lm_head.weight = self.backbone.embedding.weight
340
  self.post_init()
341
 
342
  # def get_input_embeddings(self):
 
311
  )
312
 
313
  class MambaModelForCausalLM(MambaPreTrainedModel):
314
+ _tied_weights_keys = [
315
+ "lm_head.weight",
316
+ "backbone.embedding.weight",
317
+ ]
318
 
319
  def __init__(self, config, **kwargs):
320
  # super().__init__(config)
 
339
  bias=False,
340
  )
341
 
342
+ # self.lm_head.weight = self.backbone.embedding.weight
343
  self.post_init()
344
 
345
  # def get_input_embeddings(self):