from typing import List, Dict import datasets from Bio import SeqIO import os import random from dataclasses import dataclass from collections import defaultdict _CITATION = """\ @article{o2016reference, title={Reference sequence (RefSeq) database at NCBI: current status, taxonomic expansion, and functional annotation}, author={O'Leary, Nuala A and Wright, Mathew W and Brister, J Rodney and Ciufo, Stacy and Haddad, Diana and McVeigh, Rich and Rajput, Bhanu and Robbertse, Barbara and Smith-White, Brian and Ako-Adjei, Danso and others}, journal={Nucleic acids research}, volume={44}, number={D1}, pages={D733--D745}, year={2016}, publisher={Oxford University Press} } """ _DESCRIPTION = """\ Dataset made of diverse genomes available on NCBI and coming from 11 different species. Default configuration "1kbp" yields chunks of 1kbp (100bp overlap on each side). The chunks of DNA are cleaned and processed so that they can only contain the letters A, T, C, G and N. Arabidopsis genome is split between validation and test sets, while all other genomes are used for training. """ _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/" _LICENSE = "https://www.ncbi.nlm.nih.gov/home/about/policies/" _CHUNK_LENGTHS = [1000,] @dataclass class GenomeStats: total_bases: int num_chunks: int records: List file_path: str species: str def filter_fn(char: str) -> str: """Transforms any letter different from a base nucleotide into an 'N'.""" if char in {'A', 'T', 'C', 'G'}: return char return 'N' def clean_sequence(seq: str) -> str: """Process a chunk of DNA to have all letters in upper and restricted to A, T, C, G and N.""" return ''.join(filter_fn(c) for c in seq.upper()) class PlantMultiSpeciesGenomesConfig(datasets.BuilderConfig): """BuilderConfig for the Plant Multi Species Pre-training Dataset.""" def __init__(self, *args, chunk_length: int, overlap: int = 100, **kwargs): num_kbp = int(chunk_length/1000) super().__init__( *args, name=f'{num_kbp}kbp', **kwargs, ) self.chunk_length = chunk_length self.overlap = overlap class PlantMultiSpeciesGenomes(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.1.0") BUILDER_CONFIG_CLASS = PlantMultiSpeciesGenomesConfig BUILDER_CONFIGS = [ PlantMultiSpeciesGenomesConfig(chunk_length=chunk_length) for chunk_length in _CHUNK_LENGTHS ] DEFAULT_CONFIG_NAME = "1kbp" def _info(self): features = datasets.Features({ "sequence": datasets.Value("string"), "description": datasets.Value("string"), "start_pos": datasets.Value("int32"), "end_pos": datasets.Value("int32"), "species": datasets.Value("string"), }) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _calculate_num_chunks(self, sequence_length: int, chunk_length: int, overlap: int) -> int: if sequence_length < chunk_length: return 0 return max(0, 1 + (sequence_length - chunk_length) // (chunk_length - overlap)) def _load_genome_stats(self, genome_files: List[str], chunk_length: int, overlap: int) -> Dict[str, GenomeStats]: genome_stats = {} print("\nCalculating genome statistics...") for file in genome_files: species = os.path.basename(file).split('.')[0] with open(file, 'rt') as f: records = list(SeqIO.parse(f, 'fasta')) file_bases = sum(len(str(record.seq)) for record in records) num_chunks = sum( self._calculate_num_chunks( len(str(record.seq)), chunk_length, overlap ) for record in records ) genome_stats[file] = GenomeStats( total_bases=file_bases, num_chunks=num_chunks, records=records, file_path=file, species=species ) print(f"\nFile: {os.path.basename(file)}") print(f"Total bases: {file_bases:,}") print(f"Number of records: {len(records)}") print(f"Estimated chunks: {num_chunks:,}") return genome_stats def _assign_splits(self, genome_stats: Dict[str, GenomeStats]) -> Dict[str, List]: """ Split strategy: - Half of Arabidopsis records for validation - Other half of Arabidopsis records for test - All other genomes for training """ splits = defaultdict(list) # Find Arabidopsis genome arabidopsis_file = None for file_path, stats in genome_stats.items(): if 'TAIR10' in file_path: arabidopsis_file = file_path break if arabidopsis_file: arabidopsis_stats = genome_stats[arabidopsis_file] records = arabidopsis_stats.records # Split Arabidopsis records into two roughly equal parts mid_point = len(records) // 2 val_records = records[:mid_point] test_records = records[mid_point:] splits['validation'].append((arabidopsis_file, val_records)) splits['test'].append((arabidopsis_file, test_records)) print(f"\nValidation set: Arabidopsis ({len(val_records)} records)") print(f"Test set: Arabidopsis ({len(test_records)} records)") # Add all other genomes to training set for file_path, stats in genome_stats.items(): if file_path != arabidopsis_file: splits['train'].append((file_path, stats.records)) print(f"Training set: {stats.species} (all records)") return splits def _split_generators(self, dl_manager: datasets.DownloadManager): # Download genome files filepaths_txt = dl_manager.download_and_extract('plant_genome_file_names.txt') with open(filepaths_txt) as f: filepaths = [os.path.join("plant_genomes", filepath.rstrip()) for filepath in f] genome_files = [dl_manager.download_and_extract(f) for f in filepaths] # Calculate genome statistics genome_stats = self._load_genome_stats( genome_files, self.config.chunk_length, self.config.overlap ) # Assign records to splits splits = self._assign_splits(genome_stats) # Generate split generators return [ datasets.SplitGenerator( name=getattr(datasets.Split, split.upper()), gen_kwargs={ "split_records": records, "chunk_length": self.config.chunk_length, "overlap": self.config.overlap } ) for split, records in splits.items() ] def _generate_examples(self, split_records, chunk_length: int, overlap: int): """Generate examples for each split.""" key = 0 for file_path, records in split_records: species = os.path.basename(file_path).split('.')[0] for record in records: sequence = clean_sequence(str(record.seq)) seq_length = len(sequence) if seq_length < chunk_length: continue pos = 0 while pos + chunk_length <= seq_length: chunk = sequence[pos:pos + chunk_length] yield key, { 'sequence': chunk, 'description': record.description, 'start_pos': pos, 'end_pos': pos + chunk_length, 'species': species, } key += 1 pos += chunk_length - overlap