KazukiNakamae commited on
Commit
3ce1caf
1 Parent(s): eb3bece

Uploaded model and tokenizer files

Browse files
README.md CHANGED
@@ -1,5 +1,98 @@
1
- ---
2
- license: apache-2.0
3
- base_model:
4
- - zhihan1996/DNABERT-2-117M
5
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model:
4
+ - zhihan1996/DNABERT-2-117M
5
+ tags:
6
+ - biology
7
+ - medical
8
+ ---
9
+ This is one of the fine-tuned models, named SNL model, from [zhihan1996/DNABERT-2-117M
10
+ ](https://huggingface.co/zhihan1996/DNABERT-2-117M).
11
+
12
+ The SNL model can predict the RNA offtarget induced by cytosine base editors (CBEs).
13
+
14
+ Here is an example of using the model for RNA-off-target prediction.
15
+
16
+ **pred_rna_offtarget.py:**
17
+
18
+ ```python
19
+ import sys
20
+ import numpy as np
21
+ import torch
22
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
23
+
24
+ __authors__ = ["Kazuki Nakamae"]
25
+ __version__ = "1.0.0"
26
+
27
+ def pred_rna_offtarget(dna, model_dir):
28
+ try:
29
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
30
+ tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
31
+ model = AutoModelForSequenceClassification.from_pretrained(model_dir, trust_remote_code=True).to(device)
32
+ except Exception as e:
33
+ print(f"Error loading model from {model_dir}: {e}")
34
+ sys.exit(1)
35
+
36
+ inputs = tokenizer(dna, return_tensors='pt')
37
+ model.eval()
38
+ with torch.no_grad():
39
+ outputs = model(
40
+ inputs["input_ids"].to(device),
41
+ inputs["attention_mask"].to(device),
42
+ )
43
+ print("[Negative, Positive]")
44
+ print(outputs.logits)
45
+ y_preds = np.argmax(outputs.logits.to('cpu').detach().numpy().copy(), axis=1)
46
+
47
+ def id2label(x):
48
+ return model.config.id2label[x]
49
+ y_dash = [id2label(x) for x in y_preds]
50
+ print("Result:")
51
+ print(y_dash)
52
+ # LABEL_0: Not RNA-offtarget / LABEL_1: RNA-offtarget
53
+ return (dna, y_dash)
54
+
55
+ def print_usage():
56
+ print(f"Usage: {sys.argv[0]} <input DNA sequence> <DNABERT-2 model directory>")
57
+ print("Options:")
58
+ print(" -h, --help Show this help message and exit")
59
+ print(" -v, --version Show version information and exit")
60
+
61
+ def print_version():
62
+ print(f"{sys.argv[0]} version {__version__}")
63
+ print("Authors:", ", ".join(__authors__))
64
+
65
+ if __name__ == "__main__":
66
+ if len(sys.argv) != 3:
67
+ if len(sys.argv) == 2 and sys.argv[1] in ("-h", "--help"):
68
+ print_usage()
69
+ sys.exit(0)
70
+ elif len(sys.argv) == 2 and sys.argv[1] in ("-v", "--version"):
71
+ print_version()
72
+ sys.exit(0)
73
+ else:
74
+ print_usage()
75
+ sys.exit(1)
76
+
77
+ dna = sys.argv[1]
78
+ model_dir = sys.argv[2]
79
+
80
+ pred_rna_offtarget(dna, model_dir)
81
+ ```
82
+
83
+ ```bash
84
+ $ python pred_rna_offtarget.py GGCAGGGCTGGGGAAGCTTACTGTGTCCAAGAGCCTGCTG KazukiNakamae/SNLmodel;
85
+ [Negative, Positive]
86
+ tensor([[-0.7521, 0.4817]])
87
+ Result:
88
+ ['LABEL_1']
89
+ $ python pred_rna_offtarget.py GTCATCTAACAAAAATATTCCGTTGCAGGAAAAGCAAGCT KazukiNakamae/SNLmodel;
90
+ [Negative, Positive]
91
+ tensor([[ 0.9211, -0.8157]])
92
+ Result:
93
+ ['LABEL_0']
94
+ ```
95
+
96
+ #### Developers of the fine-tuned model
97
+ - [Takayuki Suzuki](https://github.com/szktkyk)
98
+ - [Kazuki Nakamae](https://github.com/KazukiNakamae)
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "tmp/DNABERT-2-CBE_Suzuki_Nakamae_v1/",
3
+ "alibi_starting_size": 512,
4
+ "architectures": [
5
+ "BertForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "auto_map": {
9
+ "AutoConfig": "zhihan1996/DNABERT-2-117M--configuration_bert.BertConfig",
10
+ "AutoModel": "zhihan1996/DNABERT-2-117M--bert_layers.BertModel",
11
+ "AutoModelForMaskedLM": "zhihan1996/DNABERT-2-117M--bert_layers.BertForMaskedLM",
12
+ "AutoModelForSequenceClassification": "zhihan1996/DNABERT-2-117M--bert_layers.BertForSequenceClassification"
13
+ },
14
+ "classifier_dropout": null,
15
+ "gradient_checkpointing": false,
16
+ "hidden_act": "gelu",
17
+ "hidden_dropout_prob": 0.1,
18
+ "hidden_size": 768,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "layer_norm_eps": 1e-12,
22
+ "max_position_embeddings": 512,
23
+ "num_attention_heads": 12,
24
+ "num_hidden_layers": 12,
25
+ "position_embedding_type": "absolute",
26
+ "problem_type": "single_label_classification",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.29.2",
29
+ "type_vocab_size": 2,
30
+ "use_cache": true,
31
+ "vocab_size": 4096
32
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2632dda98f60a768ef26c5932c48a650fa7f132b342153c759d9d7040c7bdda5
3
+ size 468326010
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "mask_token": "[MASK]",
5
+ "model_max_length": 10,
6
+ "pad_token": "[PAD]",
7
+ "padding_side": "right",
8
+ "sep_token": "[SEP]",
9
+ "tokenizer_class": "PreTrainedTokenizerFast",
10
+ "unk_token": "[UNK]"
11
+ }