| """ |
| /* |
| Copyright (c) 2023, thewall. |
| All rights reserved. |
| BSD 3-clause license: |
| Redistribution and use in source and binary forms, |
| with or without modification, are permitted provided |
| that the following conditions are met: |
| 1. Redistributions of source code must retain the |
| above copyright notice, this list of conditions |
| and the following disclaimer. |
| 2. Redistributions in binary form must reproduce |
| the above copyright notice, this list of conditions |
| and the following disclaimer in the documentation |
| and/or other materials provided with the distribution. |
| 3. Neither the name of the copyright holder nor the |
| names of its contributors may be used to endorse or |
| promote products derived from this software without |
| specific prior written permission. |
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| */ |
| """ |
| import os |
| import datasets |
| import torch |
| from torch import nn |
| import torch.nn.functional as F |
| import numpy as np |
| import pandas as pd |
| from typing import List |
| from functools import partial |
|
|
| DEEPBIND_MODEL_CONFIG = datasets.load_dataset(path="thewall/deepbindweight", split="all") |
| SELEX_CONFIG = pd.read_excel(DEEPBIND_MODEL_CONFIG[0]['selex'], index_col=0) |
|
|
| class DeepBind(nn.Module): |
| ALPHABET = "ATGCN" |
| ALPHABET_MAP = {key: i for i, key in enumerate(ALPHABET)} |
| ALPHABET_MAP["U"] = 1 |
| ALPHABET_COMPLEMENT = "TACGN" |
| COMPLEMENT_ID_MAP = torch.IntTensor([1, 0, 3, 2, 4]) |
|
|
| def __init__(self, reverse_complement=True, num_detectors=16, detector_len=24, has_avg_pooling=True, num_hidden=1, |
| tokenizer=None): |
| super(DeepBind, self).__init__() |
| self.reverse_complement = reverse_complement |
| self.num_detectors = num_detectors |
| self.detector_len = detector_len |
| self.has_avg_pooling = has_avg_pooling |
| self.num_hidden = num_hidden |
| self.build_embedding() |
| self.detectors = nn.Conv1d(4, num_detectors, detector_len) |
| if has_avg_pooling: |
| self.avg_pool = nn.AvgPool1d(detector_len) |
| self.max_pool = nn.MaxPool1d(detector_len) |
| fcs = [nn.Linear(num_detectors * 2 if self.has_avg_pooling else num_detectors, num_hidden)] |
| if num_hidden > 1: |
| fcs.append(nn.ReLU()) |
| fcs.append(nn.Linear(num_hidden, 1)) |
| self.fc = nn.Sequential(*fcs) |
| self.tokenizer = tokenizer if tokenizer is not None else self.get_tokenizer() |
|
|
| @classmethod |
| def get_tokenizer(cls): |
| from tokenizers import Tokenizer, models, decoders |
| tokenizer = Tokenizer(models.BPE(vocab=cls.ALPHABET_MAP, merges=[])) |
| tokenizer.decoder = decoders.ByteLevel() |
| return tokenizer |
|
|
| @classmethod |
| def complement_idxs_encode_batch(cls, idxs, reverse=False): |
| return torch.stack(list(map(partial(cls.complement_idxs_encode, reverse=reverse), idxs))) |
|
|
| @classmethod |
| def complement_idxs_encode(cls, idxs, reverse=False): |
| if reverse: |
| idxs = reversed(idxs) |
| return cls.COMPLEMENT_ID_MAP[idxs] |
|
|
| def build_embedding(self): |
| """ATGC->ACGT:0321""" |
| embedding = torch.zeros(5, 4) |
| embedding[0, 0] = 1 |
| embedding[1, 3] = 1 |
| embedding[2, 2] = 1 |
| embedding[3, 1] = 1 |
| embedding[-1] = 0.25 |
| self.embedding = nn.Embedding.from_pretrained(embedding, freeze=True) |
| return embedding |
|
|
| @property |
| def device(self): |
| return self.detectors.bias.device |
|
|
| def _load_detector(self, fobj): |
| |
| dtype = lambda x: torch.Tensor(eval(x)) |
| weight1 = self._load_param(fobj, "detectors", dtype).reshape(self.detector_len, 4, self.num_detectors) |
| biases1 = self._load_param(fobj, "thresholds", dtype) |
| |
| self.detectors.weight.data = weight1.permute(2, 1, 0).contiguous().to(device=self.detectors.weight.device) |
| self.detectors.bias.data = biases1.to(device=self.detectors.bias.device) |
|
|
| def _load_fc1(self, fobj): |
| num_hidden1 = self.num_detectors * 2 if self.has_avg_pooling else self.num_detectors |
| dtype = lambda x: torch.Tensor(np.array(eval(x))) |
| weight1 = self._load_param(fobj, "weights1", dtype).reshape(num_hidden1, self.num_hidden) |
| biases1 = self._load_param(fobj, "biases1", dtype) |
| self.fc[0].weight.data = weight1.T.contiguous().to(device=self.fc[0].weight.device) |
| self.fc[0].bias.data = biases1.to(device=self.fc[0].bias.device) |
|
|
| def _load_fc2(self, fobj): |
| dtype = lambda x: torch.Tensor(np.array(eval(x))) |
| weight2 = self._load_param(fobj, "weights2", dtype) |
| biases2 = self._load_param(fobj, "biases2", dtype) |
| assert not (weight2 is None and self.num_hidden > 1) |
| assert not (biases2 is None and self.num_hidden > 1) |
| if self.num_hidden > 1: |
| self.fc[2].weight.data = weight2.reshape(1, -1).to(device=self.fc[2].weight.device) |
| self.fc[2].bias.data = biases2.to(device=self.fc[2].bias.device) |
|
|
| @classmethod |
| def _load_param(cls, fobj, param_name, dtype): |
| line = fobj.readline().strip() |
| tmp = line.split("=") |
| assert tmp[0].strip() == param_name |
| if len(tmp) > 1 and len(tmp[1].strip()) > 0: |
| return dtype(tmp[1].strip()) |
|
|
| @classmethod |
| def load_model(cls, sra_id="ERR173157", file=None, ID=None): |
| if file is None: |
| if ID is None: |
| data = SELEX_CONFIG |
| ID = data.loc[sra_id]["ID"] |
| file = os.path.join(DEEPBIND_MODEL_CONFIG['config'][0], f"{ID}.txt") |
| keys = [("reverse_complement", lambda x: bool(eval(x))), ("num_detectors", int), ("detector_len", int), |
| ("has_avg_pooling", lambda x: bool(eval(x))), ("num_hidden", int)] |
|
|
| hparams = {} |
| with open(file) as fobj: |
| version = fobj.readline()[1:].strip() |
| for key in keys: |
| value = cls._load_param(fobj, key[0], key[1]) |
| hparams[key[0]] = value |
| if hparams['num_hidden'] == 0: |
| hparams['num_hidden'] = 1 |
| model = cls(**hparams) |
| model._load_detector(fobj) |
| model._load_fc1(fobj) |
| model._load_fc2(fobj) |
| print(f"load model from {file}") |
| return model |
|
|
| def inference(self, sequence: List[str], window_size=0, average_flag=False): |
| if isinstance(sequence, str): |
| sequence = [sequence] |
| ans = [] |
| self.tokenizer.no_padding() |
| for seq in sequence: |
| inputs = torch.IntTensor(self.tokenizer.encode(seq).ids).unsqueeze(0).to(device=self.device) |
| score = self.test(inputs, window_size, average_flag).item() |
| ans.append(score) |
| return ans |
|
|
| @torch.no_grad() |
| def batch_inference(self, sequences: List[str], window_size=0, average_flag=False): |
| if isinstance(sequences, str): |
| sequences = [sequences] |
| self.tokenizer.enable_padding() |
| encodings = self.tokenizer.encode_batch(sequences) |
| ids = torch.Tensor([encoding.ids for encoding in encodings]).to(device=self.device) |
| mask = torch.BoolTensor([encoding.attention_mask for encoding in encodings]).to(device=self.device) |
| seq_len = mask.sum(dim=1) |
| score = self.batch_scan_model(ids, seq_len, window_size, average_flag) |
| if self.reverse_complement: |
| rev_seq = self.complement_idxs_encode_batch(ids.cpu().long(), reverse=True) |
| rev_seq = torch.Tensor(rev_seq).to(device=self.device).float() |
| rev_score = self.batch_scan_model(rev_seq, seq_len, window_size, average_flag) |
| score = torch.stack([rev_score, score], dim=-1).max(dim=-1)[0] |
| return score.cpu().tolist() |
|
|
| def batch_scan_model(self, ids, seq_len, window_size: int = 0, average_flag: bool = False): |
| if window_size < 1: |
| window_size = int(self.detector_len * 1.5) |
| scores = torch.zeros_like(seq_len).float() |
| masked = seq_len <= window_size |
| for idx in torch.where(masked)[0]: |
| scores[idx] = self.forward(ids[idx:idx + 1, :seq_len[idx]].int()) |
| if torch.all(masked): |
| return scores |
| fold_ids = F.unfold(ids[~masked].unsqueeze(1).unsqueeze(1), kernel_size=(1, window_size), stride=1) |
| B, W, G = fold_ids.shape |
| fold_ids = fold_ids.permute(0, 2, 1).reshape(-1, W) |
| ans = self.forward(fold_ids.int()) |
| ans = ans.reshape(B, G) |
| if average_flag: |
| valid_len = seq_len - window_size + 1 |
| for idx, value in zip(torch.where(~masked)[0], ans): |
| scores[idx] = value[:valid_len[idx]].mean() |
| else: |
| unvalid_mask = torch.arange(G).unsqueeze(0).to(seq_len.device) >= ( |
| seq_len[~masked] - window_size + 1).unsqueeze(1) |
| ans[unvalid_mask] = -torch.inf |
| scores[~masked] = ans.max(dim=1)[0] |
| return scores |
|
|
| @torch.no_grad() |
| def test(self, seq: torch.IntTensor, window_size=0, average_flag=False): |
| score = self.scan_model(seq, window_size, average_flag) |
| if self.reverse_complement: |
| rev_seq = self.complement_idxs_encode_batch(seq.cpu().long(), reverse=True) |
| rev_seq = torch.IntTensor(rev_seq).to(device=seq.device) |
| rev_score = self.scan_model(rev_seq, window_size, average_flag) |
| score = torch.cat([rev_score, score], dim=-1).max(dim=-1)[0] |
| return score |
|
|
| def scan_model(self, seq: torch.IntTensor, window_size: int = 0, average_flag: bool = False): |
| seq_len = seq.shape[1] |
| if window_size < 1: |
| window_size = int(self.detector_len * 1.5) |
| if seq_len <= window_size: |
| return self.forward(seq) |
| else: |
| scores = [] |
| for i in range(0, seq_len - window_size + 1): |
| scores.append(self.forward(seq[:, i:i + window_size])) |
| scores = torch.stack(scores, dim=-1) |
| if average_flag: |
| return scores.mean(dim=-1) |
| else: |
| return scores.max(dim=-1)[0] |
|
|
| def forward(self, seq: torch.IntTensor): |
| seq = F.pad(seq, (self.detector_len - 1, self.detector_len - 1), value=4) |
| x = self.embedding(seq) |
| x = x.permute(0, 2, 1) |
| x = self.detectors(x) |
| x = torch.relu(x) |
| x = x.permute(0, 2, 1) |
| if self.has_avg_pooling: |
| x = torch.stack([torch.max(x, dim=1)[0], torch.mean(x, dim=1)], dim=-1) |
| x = torch.flatten(x, 1) |
| else: |
| x = torch.max(x, dim=1)[0] |
| x = x.squeeze(dim=-1) |
| x = self.fc(x) |
| return x |
|
|
|
|
| if __name__ == "__main__": |
| """ |
| AGGUAAUAAUUUGCAUGAAAUAACUUGGAGAGGAUAGC |
| AGACAGAGCUUCCAUCAGCGCUAGCAGCAGAGACCAUU |
| GAGGTTACGCGGCAAGATAA |
| TACCACTAGGGGGCGCCACC |
| |
| To generate 16 predictions (4 models, 4 sequences), run |
| the deepbind executable as follows: |
| |
| % deepbind example.ids < example.seq |
| D00210.001 D00120.001 D00410.003 D00328.003 |
| 7.451420 -0.166146 -0.408751 -0.026180 |
| -0.155398 4.113817 0.516956 -0.248167 |
| -0.140683 0.181295 5.885349 -0.026180 |
| -0.174985 -0.152521 -0.379695 17.682623 |
| """ |
| sequences = ["AGGUAAUAAUUUGCAUGAAAUAACUUGGAGAGGAUAGC", |
| "AGACAGAGCUUCCAUCAGCGCUAGCAGCAGAGACCAUU", |
| "GAGGTTACGCGGCAAGATAA", |
| "TACCACTAGGGGGCGCCACC"] |
| model = DeepBind.load_model(ID='D00410.003') |
| print(model.batch_inference(sequences)) |
|
|
| import random |
| import time |
| from tqdm import tqdm |
|
|
| sequences = ["".join([random.choice("ATGC") for _ in range(40)]) for i in range(1000)] |
|
|
|
|
| def test_fn(sequences, fn): |
| start_time = time.time() |
| for start in tqdm(range(0, len(sequences), 256)): |
| batch = sequences[start: min(start + 256, len(sequences))] |
| fn(batch) |
| print(time.time() - start_time) |
|
|
|
|
| |
| |
| model = model.cuda() |
| test_fn(sequences, model.batch_inference) |
| test_fn(sequences, model.inference) |
| test_fn(sequences, model.batch_inference) |
| test_fn(sequences, model.inference) |