Spaces:
Build error
Build error
| def atomwise_tokenizer(smi, exclusive_tokens=None): | |
| """ | |
| Tokenize a SMILES molecule at atom-level. | |
| """ | |
| import re | |
| pattern = "(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9])" | |
| regex = re.compile(pattern) | |
| tokens = [token for token in regex.findall(smi)] | |
| if exclusive_tokens: | |
| tokens = [tok if tok in exclusive_tokens or not tok.startswith('[') else '[UNK]' for tok in tokens] | |
| return tokens | |
| def kmer_tokenizer(smiles, ngram=4, stride=1, remove_last=False, exclusive_tokens=None): | |
| """ | |
| Tokenize a SMILES molecule into k-mers and return both the tokens and their token IDs. | |
| """ | |
| units = atomwise_tokenizer(smiles, exclusive_tokens=exclusive_tokens) # Atom-wise tokens from the SMILES | |
| if ngram == 1: | |
| tokens = units | |
| else: | |
| tokens = [''.join(units[i:i+ngram]) for i in range(0, len(units), stride) if len(units[i:i+ngram]) == ngram] | |
| if remove_last and tokens and len(tokens[-1]) < ngram: | |
| tokens = tokens[:-1] # Remove the last token if its length is less than ngram | |
| # Generating token IDs | |
| token_to_id = {} | |
| token_ids = [] | |
| for token in tokens: | |
| if token not in token_to_id: | |
| token_to_id[token] = len(token_to_id) # Assign a new ID based on the current size of the dictionary | |
| token_ids.append(token_to_id[token]) | |
| return tokens, token_ids | |
| # print(kmer_tokenizer('CC[N+](C)(C)Cc1ccccc1Br')) |