lhallee commited on
Commit
926ba9a
·
verified ·
1 Parent(s): ab48d15

Upload FastEsmForMaskedLM

Browse files
config.json CHANGED
@@ -1,37 +1,30 @@
1
- {
2
- "_name_or_path": "facebook/esm2_t36_3B_UR50D",
3
- "architectures": [
4
- "FastEsmForMaskedLM"
5
- ],
6
- "attention_probs_dropout_prob": 0.0,
7
- "auto_map": {
8
- "AutoConfig": "modeling_fastesm.FastEsmConfig",
9
- "AutoModel": "modeling_fastesm.FastEsmModel",
10
- "AutoModelForMaskedLM": "modeling_fastesm.FastEsmForMaskedLM",
11
- "AutoModelForSequenceClassification": "modeling_fastesm.FastEsmForSequenceClassification",
12
- "AutoModelForTokenClassification": "modeling_fastesm.FastEsmForTokenClassification"
13
- },
14
- "classifier_dropout": null,
15
- "emb_layer_norm_before": false,
16
- "esmfold_config": null,
17
- "hidden_act": "gelu",
18
- "hidden_dropout_prob": 0.0,
19
- "hidden_size": 2560,
20
- "initializer_range": 0.02,
21
- "intermediate_size": 10240,
22
- "is_folding_model": false,
23
- "layer_norm_eps": 1e-05,
24
- "mask_token_id": 32,
25
- "max_position_embeddings": 1026,
26
- "model_type": "fast_esm",
27
- "num_attention_heads": 40,
28
- "num_hidden_layers": 36,
29
- "pad_token_id": 1,
30
- "position_embedding_type": "rotary",
31
- "token_dropout": true,
32
- "torch_dtype": "float32",
33
- "transformers_version": "4.47.1",
34
- "use_cache": true,
35
- "vocab_list": null,
36
- "vocab_size": 33
37
- }
 
1
+ {
2
+ "_name_or_path": "facebook/esm2_t36_3B_UR50D",
3
+ "architectures": [
4
+ "FastEsmForMaskedLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "classifier_dropout": null,
8
+ "emb_layer_norm_before": false,
9
+ "esmfold_config": null,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.0,
12
+ "hidden_size": 2560,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 10240,
15
+ "is_folding_model": false,
16
+ "layer_norm_eps": 1e-05,
17
+ "mask_token_id": 32,
18
+ "max_position_embeddings": 1026,
19
+ "model_type": "fast_esm",
20
+ "num_attention_heads": 40,
21
+ "num_hidden_layers": 36,
22
+ "pad_token_id": 1,
23
+ "position_embedding_type": "rotary",
24
+ "token_dropout": true,
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.49.0",
27
+ "use_cache": true,
28
+ "vocab_list": null,
29
+ "vocab_size": 33
30
+ }
 
 
 
 
 
 
 
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:db10cd36a5ebca89ad292748321f232bc295d0b1ec2ed95ecf0079596204d32b
3
- size 1494886236
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:362272afed3123f2788035b35af9271ad4aa720ea4ab42fef6418296dafcb725
3
+ size 1494892176
model.safetensors.index.json CHANGED
@@ -1,8 +1,10 @@
1
  {
2
  "metadata": {
3
- "total_size": 11356021380
4
  },
5
  "weight_map": {
 
 
6
  "esm.embeddings.word_embeddings.weight": "model-00001-of-00003.safetensors",
7
  "esm.encoder.emb_layer_norm_after.bias": "model-00003-of-00003.safetensors",
8
  "esm.encoder.emb_layer_norm_after.weight": "model-00003-of-00003.safetensors",
 
1
  {
2
  "metadata": {
3
+ "total_size": 11356027144
4
  },
5
  "weight_map": {
6
+ "esm.contact_head.regression.bias": "model-00003-of-00003.safetensors",
7
+ "esm.contact_head.regression.weight": "model-00003-of-00003.safetensors",
8
  "esm.embeddings.word_embeddings.weight": "model-00001-of-00003.safetensors",
9
  "esm.encoder.emb_layer_norm_after.bias": "model-00003-of-00003.safetensors",
10
  "esm.encoder.emb_layer_norm_after.weight": "model-00003-of-00003.safetensors",