idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
62,300
async def cancel_remaining ( self ) : self . _closed = True task_list = list ( self . _pending ) for task in task_list : task . cancel ( ) for task in task_list : with suppress ( CancelledError ) : await task
Cancel all remaining tasks .
62,301
async def _connect_one ( self , remote_address ) : loop = asyncio . get_event_loop ( ) for info in await loop . getaddrinfo ( str ( self . address . host ) , self . address . port , type = socket . SOCK_STREAM ) : client = self . protocol ( remote_address , self . auth ) sock = socket . socket ( family = info [ 0 ] ) try : sock . setblocking ( False ) await loop . sock_connect ( sock , info [ 4 ] ) await self . _handshake ( client , sock , loop ) self . peername = sock . getpeername ( ) return sock except ( OSError , SOCKSProtocolError ) as e : exception = e return exception
Connect to the proxy and perform a handshake requesting a connection .
62,302
async def _connect ( self , remote_addresses ) : assert remote_addresses exceptions = [ ] for remote_address in remote_addresses : sock = await self . _connect_one ( remote_address ) if isinstance ( sock , socket . socket ) : return sock , remote_address exceptions . append ( sock ) strings = set ( f'{exc!r}' for exc in exceptions ) raise ( exceptions [ 0 ] if len ( strings ) == 1 else OSError ( f'multiple exceptions: {", ".join(strings)}' ) )
Connect to the proxy and perform a handshake requesting a connection to each address in addresses .
62,303
async def _detect_proxy ( self ) : if self . protocol is SOCKS4a : remote_address = NetAddress ( 'www.apple.com' , 80 ) else : remote_address = NetAddress ( '8.8.8.8' , 53 ) sock = await self . _connect_one ( remote_address ) if isinstance ( sock , socket . socket ) : sock . close ( ) return True return isinstance ( sock , SOCKSFailure )
Return True if it appears we can connect to a SOCKS proxy otherwise False .
62,304
async def auto_detect_at_host ( cls , host , ports , auth ) : for port in ports : proxy = await cls . auto_detect_at_address ( NetAddress ( host , port ) , auth ) if proxy : return proxy return None
Try to detect a SOCKS proxy on a host on one of the ports .
62,305
async def create_connection ( self ) : connector = self . proxy or self . loop return await connector . create_connection ( self . session_factory , self . host , self . port , ** self . kwargs )
Initiate a connection .
62,306
def data_received ( self , framed_message ) : if self . verbosity >= 4 : self . logger . debug ( f'Received framed message {framed_message}' ) self . recv_size += len ( framed_message ) self . bump_cost ( len ( framed_message ) * self . bw_cost_per_byte ) self . framer . received_bytes ( framed_message )
Called by asyncio when a message comes in .
62,307
def pause_writing ( self ) : if not self . is_closing ( ) : self . _can_send . clear ( ) self . transport . pause_reading ( )
Transport calls when the send buffer is full .
62,308
def resume_writing ( self ) : if not self . _can_send . is_set ( ) : self . _can_send . set ( ) self . transport . resume_reading ( )
Transport calls when the send buffer has room .
62,309
def connection_made ( self , transport ) : self . transport = transport if self . _proxy is None : peername = transport . get_extra_info ( 'peername' ) self . _remote_address = NetAddress ( peername [ 0 ] , peername [ 1 ] ) self . _task = spawn_sync ( self . _process_messages ( ) , loop = self . loop )
Called by asyncio when a connection is established .
62,310
def connection_lost ( self , exc ) : if self . transport : self . transport = None self . closed_event . set ( ) self . _can_send . set ( ) self . loop . call_soon ( self . _task . cancel )
Called by asyncio when the connection closes .
62,311
def recalc_concurrency ( self ) : now = time . time ( ) self . cost = max ( 0 , self . cost - ( now - self . _cost_time ) * self . cost_decay_per_sec ) self . _cost_time = now self . _cost_last = self . cost value = self . _incoming_concurrency . max_concurrent cost_soft_range = self . cost_hard_limit - self . cost_soft_limit if cost_soft_range <= 0 : return cost = self . cost + self . extra_cost ( ) self . _cost_fraction = max ( 0.0 , ( cost - self . cost_soft_limit ) / cost_soft_range ) target = max ( 0 , ceil ( ( 1.0 - self . _cost_fraction ) * self . initial_concurrent ) ) if abs ( target - value ) > 1 : self . logger . info ( f'changing task concurrency from {value} to {target}' ) self . _incoming_concurrency . set_target ( target )
Call to recalculate sleeps and concurrency for the session . Called automatically if cost has drifted significantly . Otherwise can be called at regular intervals if desired .
62,312
async def close ( self , * , force_after = 30 ) : if self . transport : self . transport . close ( ) try : async with timeout_after ( force_after ) : await self . closed_event . wait ( ) except TaskTimeout : self . abort ( ) await self . closed_event . wait ( )
Close the connection and return when closed .
62,313
async def send_request ( self , method , args = ( ) ) : message , event = self . connection . send_request ( Request ( method , args ) ) return await self . _send_concurrent ( message , event , 1 )
Send an RPC request over the network .
62,314
async def send_notification ( self , method , args = ( ) ) : message = self . connection . send_notification ( Notification ( method , args ) ) await self . _send_message ( message )
Send an RPC notification over the network .
62,315
async def close ( self ) : if self . server : self . server . close ( ) await self . server . wait_closed ( ) self . server = None
Close the listening socket . This does not close any ServerSession objects created to handle incoming connections .
62,316
def _message_to_payload ( cls , message ) : try : return json . loads ( message . decode ( ) ) except UnicodeDecodeError : message = 'messages must be encoded in UTF-8' except json . JSONDecodeError : message = 'invalid JSON' raise cls . _error ( cls . PARSE_ERROR , message , True , None )
Returns a Python object or a ProtocolError .
62,317
def batch_message ( cls , batch , request_ids ) : assert isinstance ( batch , Batch ) if not cls . allow_batches : raise ProtocolError . invalid_request ( 'protocol does not permit batches' ) id_iter = iter ( request_ids ) rm = cls . request_message nm = cls . notification_message parts = ( rm ( request , next ( id_iter ) ) if isinstance ( request , Request ) else nm ( request ) for request in batch ) return cls . batch_message_from_parts ( parts )
Convert a request Batch to a message .
62,318
def batch_message_from_parts ( cls , messages ) : middle = b', ' . join ( messages ) if not middle : raise ProtocolError . empty_batch ( ) return b'' . join ( [ b'[' , middle , b']' ] )
Convert messages one per batch item into a batch message . At least one message must be passed .
62,319
def encode_payload ( cls , payload ) : try : return json . dumps ( payload ) . encode ( ) except TypeError : msg = f'JSON payload encoding error: {payload}' raise ProtocolError ( cls . INTERNAL_ERROR , msg ) from None
Encode a Python object as JSON and convert it to bytes .
62,320
def detect_protocol ( cls , message ) : main = cls . _message_to_payload ( message ) def protocol_for_payload ( payload ) : if not isinstance ( payload , dict ) : return JSONRPCLoose version = payload . get ( 'jsonrpc' ) if version == '2.0' : return JSONRPCv2 if version == '1.0' : return JSONRPCv1 if 'result' in payload and 'error' in payload : return JSONRPCv1 return JSONRPCLoose if isinstance ( main , list ) : parts = set ( protocol_for_payload ( payload ) for payload in main ) if len ( parts ) == 1 : return parts . pop ( ) for protocol in ( JSONRPCv2 , JSONRPCv1 ) : if protocol in parts : return protocol return JSONRPCLoose return protocol_for_payload ( main )
Attempt to detect the protocol from the message .
62,321
def receive_message ( self , message ) : if self . _protocol is JSONRPCAutoDetect : self . _protocol = JSONRPCAutoDetect . detect_protocol ( message ) try : item , request_id = self . _protocol . message_to_item ( message ) except ProtocolError as e : if e . response_msg_id is not id : return self . _receive_response ( e , e . response_msg_id ) raise if isinstance ( item , Request ) : item . send_result = partial ( self . _send_result , request_id ) return [ item ] if isinstance ( item , Notification ) : return [ item ] if isinstance ( item , Response ) : return self . _receive_response ( item . result , request_id ) assert isinstance ( item , list ) if all ( isinstance ( payload , dict ) and ( 'result' in payload or 'error' in payload ) for payload in item ) : return self . _receive_response_batch ( item ) else : return self . _receive_request_batch ( item )
Call with an unframed message received from the network .
62,322
def cancel_pending_requests ( self ) : exception = CancelledError ( ) for _request , event in self . _requests . values ( ) : event . result = exception event . set ( ) self . _requests . clear ( )
Cancel all pending requests .
62,323
def is_valid_hostname ( hostname ) : if not isinstance ( hostname , str ) : raise TypeError ( 'hostname must be a string' ) if hostname and hostname [ - 1 ] == "." : hostname = hostname [ : - 1 ] if not hostname or len ( hostname ) > 253 : return False labels = hostname . split ( '.' ) if re . match ( NUMERIC_REGEX , labels [ - 1 ] ) : return False return all ( LABEL_REGEX . match ( label ) for label in labels )
Return True if hostname is valid otherwise False .
62,324
def classify_host ( host ) : if isinstance ( host , ( IPv4Address , IPv6Address ) ) : return host if is_valid_hostname ( host ) : return host return ip_address ( host )
Host is an IPv4Address IPv6Address or a string .
62,325
def validate_port ( port ) : if not isinstance ( port , ( str , int ) ) : raise TypeError ( f'port must be an integer or string: {port}' ) if isinstance ( port , str ) and port . isdigit ( ) : port = int ( port ) if isinstance ( port , int ) and 0 < port <= 65535 : return port raise ValueError ( f'invalid port: {port}' )
Validate port and return it as an integer .
62,326
def validate_protocol ( protocol ) : if not re . match ( PROTOCOL_REGEX , protocol ) : raise ValueError ( f'invalid protocol: {protocol}' ) return protocol . lower ( )
Validate a protocol a string and return it .
62,327
def is_async_call ( func ) : while isinstance ( func , partial ) : func = func . func return inspect . iscoroutinefunction ( func )
inspect . iscoroutinefunction that looks through partials .
62,328
def from_string ( cls , string , * , default_func = None ) : if not isinstance ( string , str ) : raise TypeError ( f'service must be a string: {string}' ) parts = string . split ( '://' , 1 ) if len ( parts ) == 2 : protocol , address = parts else : item , = parts protocol = None if default_func : if default_func ( item , ServicePart . HOST ) and default_func ( item , ServicePart . PORT ) : protocol , address = item , '' else : protocol , address = default_func ( None , ServicePart . PROTOCOL ) , item if not protocol : raise ValueError ( f'invalid service string: {string}' ) if default_func : default_func = partial ( default_func , protocol . lower ( ) ) address = NetAddress . from_string ( address , default_func = default_func ) return cls ( protocol , address )
Construct a Service from a string .
62,329
def scrub ( self ) : LOG . info ( "Scrubbing out the nasty characters that break our parser." ) myfile = '/' . join ( ( self . rawdir , self . files [ 'data' ] [ 'file' ] ) ) tmpfile = '/' . join ( ( self . rawdir , self . files [ 'data' ] [ 'file' ] + '.tmp.gz' ) ) tmp = gzip . open ( tmpfile , 'wb' ) du = DipperUtil ( ) with gzip . open ( myfile , 'rb' ) as fh : filereader = io . TextIOWrapper ( fh , newline = "" ) for line in filereader : line = du . remove_control_characters ( line ) + '\n' tmp . write ( line . encode ( 'utf-8' ) ) tmp . close ( ) LOG . info ( "Replacing the original data with the scrubbed file." ) shutil . move ( tmpfile , myfile ) return
The XML file seems to have mixed - encoding ; we scrub out the control characters from the file for processing .
62,330
def process_associations ( self , limit ) : myfile = '/' . join ( ( self . rawdir , self . files [ 'data' ] [ 'file' ] ) ) f = gzip . open ( myfile , 'rb' ) filereader = io . TextIOWrapper ( f , newline = "" ) filereader . readline ( ) for event , elem in ET . iterparse ( filereader ) : self . process_xml_table ( elem , 'Article_Breed' , self . _process_article_breed_row , limit ) self . process_xml_table ( elem , 'Article_Phene' , self . _process_article_phene_row , limit ) self . process_xml_table ( elem , 'Breed_Phene' , self . _process_breed_phene_row , limit ) self . process_xml_table ( elem , 'Lida_Links' , self . _process_lida_links_row , limit ) self . process_xml_table ( elem , 'Phene_Gene' , self . _process_phene_gene_row , limit ) self . process_xml_table ( elem , 'Group_MPO' , self . _process_group_mpo_row , limit ) f . close ( ) return
Loop through the xml file and process the article - breed article - phene breed - phene phene - gene associations and the external links to LIDA .
62,331
def _process_article_phene_row ( self , row ) : phenotype_id = self . id_hash [ 'phene' ] . get ( row [ 'phene_id' ] ) article_id = self . id_hash [ 'article' ] . get ( row [ 'article_id' ] ) omia_id = self . _get_omia_id_from_phene_id ( phenotype_id ) if self . test_mode or omia_id not in self . test_ids [ 'disease' ] or phenotype_id is None or article_id is None : return self . graph . addTriple ( article_id , self . globaltt [ 'is_about' ] , phenotype_id ) return
Linking articles to species - specific phenes .
62,332
def filter_keep_phenotype_entry_ids ( self , entry ) : omim_id = str ( entry [ 'mimNumber' ] ) otype = self . globaltt [ 'obsolete' ] if omim_id in self . omim_type : otype = self . omim_type [ omim_id ] if otype == self . globaltt [ 'obsolete' ] and omim_id in self . omim_replaced : omim_id = self . omim_replaced [ omim_id ] otype = self . omim_type [ omim_id ] if otype not in ( self . globaltt [ 'Phenotype' ] , self . globaltt [ 'has_affected_feature' ] ) : omim_id = None return omim_id
doubt this should be kept
62,333
def make_spo ( sub , prd , obj ) : if prd == 'a' : prd = 'rdf:type' try : ( subcuri , subid ) = re . split ( r':' , sub ) except Exception : LOG . error ( "not a Subject Curie '%s'" , sub ) raise ValueError try : ( prdcuri , prdid ) = re . split ( r':' , prd ) except Exception : LOG . error ( "not a Predicate Curie '%s'" , prd ) raise ValueError objt = '' objcuri = None match = re . match ( CURIERE , obj ) if match is not None : try : ( objcuri , objid ) = re . split ( r':' , obj ) except ValueError : match = None if match is not None and objcuri in CURIEMAP : objt = CURIEMAP [ objcuri ] + objid . strip ( ) if objcuri != '_' or CURIEMAP [ objcuri ] != '_:b' : objt = '<' + objt + '>' elif obj . isnumeric ( ) : objt = '"' + obj + '"' else : obj = obj . strip ( '"' ) . replace ( '\\' , '\\\\' ) . replace ( '"' , '\'' ) obj = obj . replace ( '\n' , '\\n' ) . replace ( '\r' , '\\r' ) objt = '"' + obj + '"' if subcuri is not None and subcuri in CURIEMAP and prdcuri is not None and prdcuri in CURIEMAP : subjt = CURIEMAP [ subcuri ] + subid . strip ( ) if subcuri != '_' or CURIEMAP [ subcuri ] != '_:b' : subjt = '<' + subjt + '>' return subjt + ' <' + CURIEMAP [ prdcuri ] + prdid . strip ( ) + '> ' + objt + ' .' else : LOG . error ( 'Cant work with: <%s> %s , <%s> %s, %s' , subcuri , subid , prdcuri , prdid , objt ) return None
Decorates the three given strings as a line of ntriples
62,334
def write_spo ( sub , prd , obj ) : rcvtriples . append ( make_spo ( sub , prd , obj ) )
write triples to a buffer incase we decide to drop them
62,335
def make_allele_by_consequence ( self , consequence , gene_id , gene_symbol ) : allele_id = None type_id = self . resolve ( consequence , mandatory = False ) if type_id == consequence : LOG . warning ( "Consequence type unmapped: %s" , str ( consequence ) ) type_id = self . globaltt [ 'sequence_variant' ] allele_id = '' . join ( ( gene_id , type_id ) ) allele_id = re . sub ( r':' , '' , allele_id ) allele_id = '_:' + allele_id allele_label = ' ' . join ( ( consequence , 'allele in' , gene_symbol ) ) self . model . addIndividualToGraph ( allele_id , allele_label , type_id ) self . geno . addAlleleOfGene ( allele_id , gene_id ) return allele_id
Given a consequence label that describes a variation type create an anonymous variant of the specified gene as an instance of that consequence type .
62,336
def parse ( self , limit : Optional [ int ] = None ) : if limit is not None : LOG . info ( "Only parsing first %d rows" , limit ) LOG . info ( "Parsing files..." ) file_path = '/' . join ( ( self . rawdir , self . files [ 'developmental_disorders' ] [ 'file' ] ) ) with gzip . open ( file_path , 'rt' ) as csvfile : reader = csv . reader ( csvfile ) next ( reader ) for row in reader : if limit is None or reader . line_num <= ( limit + 1 ) : self . _add_gene_disease ( row ) else : break LOG . info ( "Done parsing." )
Here we parse each row of the gene to phenotype file
62,337
def _add_gene_disease ( self , row ) : col = self . files [ 'developmental_disorders' ] [ 'columns' ] if len ( row ) != len ( col ) : raise ValueError ( "Unexpected number of fields for row {}" . format ( row ) ) variant_label = "variant of {}" . format ( row [ col . index ( 'gene_symbol' ) ] ) disease_omim_id = row [ col . index ( 'disease_omim_id' ) ] if disease_omim_id == 'No disease mim' : disease_label = row [ col . index ( 'disease_label' ) ] if disease_label in self . mondo_map : disease_id = self . mondo_map [ disease_label ] else : return else : disease_id = 'OMIM:' + disease_omim_id hgnc_curie = 'HGNC:' + row [ col . index ( 'hgnc_id' ) ] relation_curie = self . resolve ( row [ col . index ( 'g2p_relation_label' ) ] ) mutation_consequence = row [ col . index ( 'mutation_consequence' ) ] if mutation_consequence not in ( 'uncertain' , '' ) : consequence_relation = self . resolve ( self . _get_consequence_predicate ( mutation_consequence ) ) consequence_curie = self . resolve ( mutation_consequence ) variant_label = "{} {}" . format ( mutation_consequence , variant_label ) else : consequence_relation = None consequence_curie = None allelic_requirement = row [ col . index ( 'allelic_requirement' ) ] if allelic_requirement != '' : requirement_curie = self . resolve ( allelic_requirement ) else : requirement_curie = None pmids = row [ col . index ( 'pmids' ) ] if pmids != '' : pmid_list = [ 'PMID:' + pmid for pmid in pmids . split ( ';' ) ] else : pmid_list = [ ] self . _build_gene_disease_model ( hgnc_curie , relation_curie , disease_id , variant_label , consequence_relation , consequence_curie , requirement_curie , pmid_list )
Parse and add gene variant disease model Model building happens in _build_gene_disease_model
62,338
def _build_gene_disease_model ( self , gene_id , relation_id , disease_id , variant_label , consequence_predicate = None , consequence_id = None , allelic_requirement = None , pmids = None ) : model = Model ( self . graph ) geno = Genotype ( self . graph ) pmids = [ ] if pmids is None else pmids is_variant = False variant_or_gene = gene_id variant_id_string = variant_label variant_bnode = self . make_id ( variant_id_string , "_" ) if consequence_predicate is not None and consequence_id is not None : is_variant = True model . addTriple ( variant_bnode , consequence_predicate , consequence_id ) if consequence_id . startswith ( ':' ) : model . addLabel ( consequence_id , consequence_id . strip ( ':' ) . replace ( '_' , ' ' ) ) if is_variant : variant_or_gene = variant_bnode model . addIndividualToGraph ( variant_bnode , variant_label , self . globaltt [ 'variant_locus' ] ) geno . addAffectedLocus ( variant_bnode , gene_id ) model . addBlankNodeAnnotation ( variant_bnode ) assoc = G2PAssoc ( self . graph , self . name , variant_or_gene , disease_id , relation_id ) assoc . source = pmids assoc . add_association_to_graph ( ) if allelic_requirement is not None and is_variant is False : model . addTriple ( assoc . assoc_id , self . globaltt [ 'has_allelic_requirement' ] , allelic_requirement ) if allelic_requirement . startswith ( ':' ) : model . addLabel ( allelic_requirement , allelic_requirement . strip ( ':' ) . replace ( '_' , ' ' ) )
Builds gene variant disease model
62,339
def _get_identifiers ( self , limit ) : LOG . info ( "getting identifier mapping" ) line_counter = 0 f = '/' . join ( ( self . rawdir , self . files [ 'identifiers' ] [ 'file' ] ) ) myzip = ZipFile ( f , 'r' ) fname = myzip . namelist ( ) [ 0 ] foundheader = False speciesfilters = 'Homo sapiens,Mus musculus' . split ( ',' ) with myzip . open ( fname , 'r' ) as csvfile : for line in csvfile : if not foundheader : if re . match ( r'BIOGRID_ID' , line . decode ( ) ) : foundheader = True continue line = line . decode ( ) . strip ( ) ( biogrid_num , id_num , id_type , organism_label ) = line . split ( '\t' ) if self . test_mode : graph = self . testgraph if int ( biogrid_num ) not in self . biogrid_ids : continue else : graph = self . graph model = Model ( graph ) biogrid_id = 'BIOGRID:' + biogrid_num prefix = self . localtt [ id_type ] geneidtypefilters = 'NCBIGene,MGI,ENSEMBL,ZFIN,HGNC' . split ( ',' ) if ( speciesfilters is not None ) and ( organism_label . strip ( ) in speciesfilters ) : line_counter += 1 if ( geneidtypefilters is not None ) and ( prefix in geneidtypefilters ) : mapped_id = ':' . join ( ( prefix , id_num ) ) model . addEquivalentClass ( biogrid_id , mapped_id ) elif id_type == 'OFFICIAL_SYMBOL' : model . addClassToGraph ( biogrid_id , id_num ) if not self . test_mode and limit is not None and line_counter > limit : break myzip . close ( ) return
This will process the id mapping file provided by Biogrid . The file has a very large header which we scan past then pull the identifiers and make equivalence axioms
62,340
def add_supporting_evidence ( self , evidence_line , evidence_type = None , label = None ) : self . graph . addTriple ( self . association , self . globaltt [ 'has_supporting_evidence_line' ] , evidence_line ) if evidence_type is not None : self . model . addIndividualToGraph ( evidence_line , label , evidence_type ) return
Add supporting line of evidence node to association id
62,341
def add_association_to_graph ( self ) : Assoc . add_association_to_graph ( self ) if self . start_stage_id or self . end_stage_id is not None : stage_process_id = '-' . join ( ( str ( self . start_stage_id ) , str ( self . end_stage_id ) ) ) stage_process_id = '_:' + re . sub ( r':' , '' , stage_process_id ) self . model . addIndividualToGraph ( stage_process_id , None , self . globaltt [ 'developmental_process' ] ) self . graph . addTriple ( stage_process_id , self . globaltt [ 'starts during' ] , self . start_stage_id ) self . graph . addTriple ( stage_process_id , self . globaltt [ 'ends during' ] , self . end_stage_id ) self . stage_process_id = stage_process_id self . graph . addTriple ( self . assoc_id , self . globaltt [ 'has_qualifier' ] , self . stage_process_id ) if self . environment_id is not None : self . graph . addTriple ( self . assoc_id , self . globaltt [ 'has_qualifier' ] , self . environment_id ) return
Overrides Association by including bnode support
62,342
def parse ( self , limit = None ) : if limit is not None : LOG . info ( "Only parsing first %s rows fo each file" , str ( limit ) ) LOG . info ( "Parsing files..." ) self . _process_straininfo ( limit ) self . _process_ontology_mappings_file ( limit ) self . _process_measurements_file ( limit ) self . _process_strainmeans_file ( limit ) self . _fill_provenance_graph ( limit ) LOG . info ( "Finished parsing." ) return
MPD data is delivered in four separate csv files and one xml file which we process iteratively and write out as one large graph .
62,343
def _add_g2p_assoc ( self , graph , strain_id , sex , assay_id , phenotypes , comment ) : geno = Genotype ( graph ) model = Model ( graph ) eco_id = self . globaltt [ 'experimental phenotypic evidence' ] strain_label = self . idlabel_hash . get ( strain_id ) genotype_id = '_:' + '-' . join ( ( re . sub ( r':' , '' , strain_id ) , 'genotype' ) ) genotype_label = '[' + strain_label + ']' sex_specific_genotype_id = '_:' + '-' . join ( ( re . sub ( r':' , '' , strain_id ) , sex , 'genotype' ) ) if strain_label is not None : sex_specific_genotype_label = strain_label + ' (' + sex + ')' else : sex_specific_genotype_label = strain_id + '(' + sex + ')' genotype_type = self . globaltt [ 'sex_qualified_genotype' ] if sex == 'm' : genotype_type = self . globaltt [ 'male_genotype' ] elif sex == 'f' : genotype_type = self . globaltt [ 'female_genotype' ] geno . addGenotype ( genotype_id , genotype_label , self . globaltt [ 'genomic_background' ] ) graph . addTriple ( strain_id , self . globaltt [ 'has_genotype' ] , genotype_id ) geno . addGenotype ( sex_specific_genotype_id , sex_specific_genotype_label , genotype_type ) graph . addTriple ( sex_specific_genotype_id , self . globaltt [ 'has_sex_agnostic_part' ] , genotype_id ) if phenotypes is not None : for phenotype_id in phenotypes : assoc = G2PAssoc ( graph , self . name , sex_specific_genotype_id , phenotype_id ) assoc . add_evidence ( assay_id ) assoc . add_evidence ( eco_id ) assoc . add_association_to_graph ( ) assoc_id = assoc . get_association_id ( ) model . addComment ( assoc_id , comment ) model . _addSexSpecificity ( assoc_id , self . resolve ( sex ) ) return
Create an association between a sex - specific strain id and each of the phenotypes . Here we create a genotype from the strain and a sex - specific genotype . Each of those genotypes are created as anonymous nodes .
62,344
def parse ( self , limit = None ) : if limit is not None : LOG . info ( "Only parsing first %s rows fo each file" , str ( limit ) ) LOG . info ( "Parsing files..." ) if self . test_only : self . test_mode = True for f in [ 'all' ] : file = '/' . join ( ( self . rawdir , self . files [ f ] [ 'file' ] ) ) self . _process_data ( file , limit ) LOG . info ( "Finished parsing" ) return
IMPC data is delivered in three separate csv files OR in one integrated file each with the same file format .
62,345
def addGeneToPathway ( self , gene_id , pathway_id ) : gene_product = '_:' + re . sub ( r':' , '' , gene_id ) + 'product' self . model . addIndividualToGraph ( gene_product , None , self . globaltt [ 'gene_product' ] ) self . graph . addTriple ( gene_id , self . globaltt [ 'has gene product' ] , gene_product ) self . addComponentToPathway ( gene_product , pathway_id ) return
When adding a gene to a pathway we create an intermediate gene product that is involved in the pathway through a blank node .
62,346
def addComponentToPathway ( self , component_id , pathway_id ) : self . graph . addTriple ( component_id , self . globaltt [ 'involved in' ] , pathway_id ) return
This can be used directly when the component is directly involved in the pathway . If a transforming event is performed on the component first then the addGeneToPathway should be used instead .
62,347
def write ( self , fmt = 'turtle' , stream = None ) : fmt_ext = { 'rdfxml' : 'xml' , 'turtle' : 'ttl' , 'nt' : 'nt' , 'nquads' : 'nq' , 'n3' : 'n3' } dest = None if self . name is not None : dest = '/' . join ( ( self . outdir , self . name ) ) if fmt in fmt_ext : dest = '.' . join ( ( dest , fmt_ext . get ( fmt ) ) ) else : dest = '.' . join ( ( dest , fmt ) ) LOG . info ( "Setting outfile to %s" , dest ) self . datasetfile = '/' . join ( ( self . outdir , self . name + '_dataset.ttl' ) ) LOG . info ( "Setting dataset file to %s" , self . datasetfile ) if self . dataset is not None and self . dataset . version is None : self . dataset . set_version_by_date ( ) LOG . info ( "No version for %s setting to date issued." , self . name ) else : LOG . warning ( "No output file set. Using stdout" ) stream = 'stdout' gu = GraphUtils ( None ) gu . write ( self . dataset . getGraph ( ) , 'turtle' , filename = self . datasetfile ) if self . test_mode : LOG . info ( "Setting testfile to %s" , self . testfile ) gu . write ( self . testgraph , 'turtle' , filename = self . testfile ) if stream is None : outfile = dest elif stream . lower ( ) . strip ( ) == 'stdout' : outfile = None else : LOG . error ( "I don't understand our stream." ) return gu . write ( self . graph , fmt , filename = outfile )
This convenience method will write out all of the graphs associated with the source . Right now these are hardcoded to be a single graph and a src_dataset . ttl and a src_test . ttl If you do not supply stream = stdout it will default write these to files .
62,348
def declareAsOntology ( self , graph ) : model = Model ( graph ) ontology_file_id = 'MonarchData:' + self . name + ".ttl" model . addOntologyDeclaration ( ontology_file_id ) cur_time = datetime . now ( ) t_string = cur_time . strftime ( "%Y-%m-%d" ) ontology_version = t_string archive_url = 'MonarchArchive:' + 'ttl/' + self . name + '.ttl' model . addOWLVersionIRI ( ontology_file_id , archive_url ) model . addOWLVersionInfo ( ontology_file_id , ontology_version )
The file we output needs to be declared as an ontology including it s version information .
62,349
def remove_backslash_r ( filename , encoding ) : with open ( filename , 'r' , encoding = encoding , newline = r'\n' ) as filereader : contents = filereader . read ( ) contents = re . sub ( r'\r' , '' , contents ) with open ( filename , "w" ) as filewriter : filewriter . truncate ( ) filewriter . write ( contents )
A helpful utility to remove Carriage Return from any file . This will read a file into memory and overwrite the contents of the original file .
62,350
def load_local_translationtable ( self , name ) : localtt_file = 'translationtable/' + name + '.yaml' try : with open ( localtt_file ) : pass except IOError : with open ( localtt_file , 'w' ) as write_yaml : yaml . dump ( { name : name } , write_yaml ) finally : with open ( localtt_file , 'r' ) as read_yaml : localtt = yaml . safe_load ( read_yaml ) self . localtcid = { v : k for k , v in localtt . items ( ) } return localtt
Load ingest specific translation from whatever they called something to the ontology label we need to map it to . To facilitate seeing more ontology lables in dipper ingests a reverse mapping from ontology lables to external strings is also generated and available as a dict localtcid
62,351
def addGene ( self , gene_id , gene_label , gene_type = None , gene_description = None ) : if gene_type is None : gene_type = self . globaltt [ 'gene' ] self . model . addClassToGraph ( gene_id , gene_label , gene_type , gene_description ) return
genes are classes
62,352
def get_ncbi_taxon_num_by_label ( label ) : req = { 'db' : 'taxonomy' , 'retmode' : 'json' , 'term' : label } req . update ( EREQ ) request = SESSION . get ( ESEARCH , params = req ) LOG . info ( 'fetching: %s' , request . url ) request . raise_for_status ( ) result = request . json ( ) [ 'esearchresult' ] if 'ERROR' in result : request = SESSION . get ( ESEARCH , params = req ) LOG . info ( 'fetching: %s' , request . url ) request . raise_for_status ( ) result = request . json ( ) [ 'esearchresult' ] tax_num = None if 'count' in result and str ( result [ 'count' ] ) == '1' : tax_num = result [ 'idlist' ] [ 0 ] else : LOG . warning ( 'ESEARCH for taxon label "%s" returns %s' , label , str ( result ) ) return tax_num
Here we want to look up the NCBI Taxon id using some kind of label . It will only return a result if there is a unique hit .
62,353
def set_association_id ( self , assoc_id = None ) : if assoc_id is None : self . assoc_id = self . make_association_id ( self . definedby , self . sub , self . rel , self . obj ) else : self . assoc_id = assoc_id return self . assoc_id
This will set the association ID based on the internal parts of the association . To be used in cases where an external association identifier should be used .
62,354
def make_association_id ( definedby , sub , pred , obj , attributes = None ) : items_to_hash = [ definedby , sub , pred , obj ] if attributes is not None and len ( attributes ) > 0 : items_to_hash += attributes items_to_hash = [ x for x in items_to_hash if x is not None ] assoc_id = ':' . join ( ( 'MONARCH' , GraphUtils . digest_id ( '+' . join ( items_to_hash ) ) ) ) assert assoc_id is not None return assoc_id
A method to create unique identifiers for OBAN - style associations based on all the parts of the association If any of the items is empty or None it will convert it to blank . It effectively digests the string of concatonated values . Subclasses of Assoc can submit an additional array of attributes that will be appeded to the ID .
62,355
def toRoman ( num ) : if not 0 < num < 5000 : raise ValueError ( "number %n out of range (must be 1..4999)" , num ) if int ( num ) != num : raise TypeError ( "decimals %n can not be converted" , num ) result = "" for numeral , integer in romanNumeralMap : while num >= integer : result += numeral num -= integer return result
convert integer to Roman numeral
62,356
def fromRoman ( strng ) : if not strng : raise TypeError ( 'Input can not be blank' ) if not romanNumeralPattern . search ( strng ) : raise ValueError ( 'Invalid Roman numeral: %s' , strng ) result = 0 index = 0 for numeral , integer in romanNumeralMap : while strng [ index : index + len ( numeral ) ] == numeral : result += integer index += len ( numeral ) return result
convert Roman numeral to integer
62,357
def _process_genotype_backgrounds ( self , limit = None ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) LOG . info ( "Processing genotype backgrounds" ) line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'backgrounds' ] [ 'file' ] ) ) geno = Genotype ( graph ) taxon_id = self . globaltt [ 'Danio rerio' ] model . addClassToGraph ( taxon_id , None ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( genotype_id , genotype_name , background_id , unused ) = row if self . test_mode and genotype_id not in self . test_ids [ 'genotype' ] : continue genotype_id = 'ZFIN:' + genotype_id . strip ( ) background_id = 'ZFIN:' + background_id . strip ( ) self . genotype_backgrounds [ genotype_id ] = background_id geno . addGenomicBackground ( background_id , None ) geno . addTaxon ( taxon_id , background_id ) geno . addGenotype ( genotype_id , None , self . globaltt [ 'intrinsic_genotype' ] ) geno . addGenomicBackgroundToGenotype ( background_id , genotype_id ) if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with genotype backgrounds" ) return
This table provides a mapping of genotypes to background genotypes Note that the background_id is also a genotype_id .
62,358
def _process_stages ( self , limit = None ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) LOG . info ( "Processing stages" ) line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'stage' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( stage_id , stage_obo_id , stage_name , begin_hours , end_hours ) = row stage_id = 'ZFIN:' + stage_id . strip ( ) model . addClassToGraph ( stage_id , stage_name ) model . addEquivalentClass ( stage_id , stage_obo_id ) if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with stages" ) return
This table provides mappings between ZFIN stage IDs and ZFS terms and includes the starting and ending hours for the developmental stage . Currently only processing the mapping from the ZFIN stage ID to the ZFS ID .
62,359
def _process_genes ( self , limit = None ) : LOG . info ( "Processing genes" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'gene' ] [ 'file' ] ) ) geno = Genotype ( graph ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( gene_id , gene_so_id , gene_symbol , ncbi_gene_id ) = row if self . test_mode and gene_id not in self . test_ids [ 'gene' ] : continue gene_id = 'ZFIN:' + gene_id . strip ( ) ncbi_gene_id = 'NCBIGene:' + ncbi_gene_id . strip ( ) self . id_label_map [ gene_id ] = gene_symbol if not self . test_mode and limit is not None and line_counter > limit : pass else : geno . addGene ( gene_id , gene_symbol ) model . addEquivalentClass ( gene_id , ncbi_gene_id ) LOG . info ( "Done with genes" ) return
This table provides the ZFIN gene id the SO type of the gene the gene symbol and the NCBI Gene ID .
62,360
def _process_features ( self , limit = None ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) LOG . info ( "Processing features" ) line_counter = 0 geno = Genotype ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'features' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( genomic_feature_id , feature_so_id , genomic_feature_abbreviation , genomic_feature_name , genomic_feature_type , mutagen , mutagee , construct_id , construct_name , construct_so_id , talen_crispr_id , talen_crispr_nam ) = row if self . test_mode and ( genomic_feature_id not in self . test_ids [ 'allele' ] ) : continue genomic_feature_id = 'ZFIN:' + genomic_feature_id . strip ( ) model . addIndividualToGraph ( genomic_feature_id , genomic_feature_name , feature_so_id ) model . addSynonym ( genomic_feature_id , genomic_feature_abbreviation ) if construct_id is not None and construct_id != '' : construct_id = 'ZFIN:' + construct_id . strip ( ) geno . addConstruct ( construct_id , construct_name , construct_so_id ) geno . addSequenceDerivesFrom ( genomic_feature_id , construct_id ) self . id_label_map [ genomic_feature_id ] = genomic_feature_abbreviation self . id_label_map [ construct_id ] = construct_name if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with features" ) return
This module provides information for the intrinsic and extrinsic genotype features of zebrafish . All items here are alterations and are therefore instances .
62,361
def _process_pubinfo ( self , limit = None ) : line_counter = 0 if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'pubs' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "latin-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 try : ( pub_id , pubmed_id , authors , title , journal , year , vol , pages ) = row except ValueError : try : ( pub_id , pubmed_id , authors , title , journal , year , vol , pages ) = row except ValueError : LOG . warning ( "Error parsing row %s: " , row ) if self . test_mode and ( 'ZFIN:' + pub_id not in self . test_ids [ 'pub' ] and 'PMID:' + pubmed_id not in self . test_ids [ 'pub' ] ) : continue pub_id = 'ZFIN:' + pub_id . strip ( ) alist = re . split ( r',' , authors ) if len ( alist ) > 1 : astring = ' ' . join ( ( alist [ 0 ] . strip ( ) , 'et al' ) ) else : astring = authors pub_label = '; ' . join ( ( astring , title , journal , year , vol , pages ) ) ref = Reference ( graph , pub_id ) ref . setShortCitation ( pub_label ) ref . setYear ( year ) ref . setTitle ( title ) if pubmed_id is not None and pubmed_id != '' : ref . setType ( self . globaltt [ 'journal article' ] ) pubmed_id = 'PMID:' + pubmed_id . strip ( ) rpm = Reference ( graph , pubmed_id , self . globaltt [ 'journal article' ] ) rpm . addRefToGraph ( ) model . addSameIndividual ( pub_id , pubmed_id ) model . makeLeader ( pubmed_id ) ref . addRefToGraph ( ) if not self . test_mode and limit is not None and line_counter > limit : break return
This will pull the zfin internal publication information and map them to their equivalent pmid and make labels .
62,362
def _process_pub2pubmed ( self , limit = None ) : line_counter = 0 if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'pub2pubmed' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "latin-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( pub_id , pubmed_id ) = row if self . test_mode and ( 'ZFIN:' + pub_id not in self . test_ids [ 'pub' ] and 'PMID:' + pubmed_id not in self . test_ids [ 'pub' ] ) : continue pub_id = 'ZFIN:' + pub_id . strip ( ) rtype = None if pubmed_id != '' and pubmed_id is not None : pubmed_id = 'PMID:' + pubmed_id . strip ( ) rtype = self . globaltt [ 'journal article' ] rpm = Reference ( graph , pubmed_id , rtype ) rpm . addRefToGraph ( ) model . addSameIndividual ( pub_id , pubmed_id ) ref = Reference ( graph , pub_id , rtype ) ref . addRefToGraph ( ) if not self . test_mode and limit is not None and line_counter > limit : break return
This will pull the zfin internal publication to pubmed mappings . Somewhat redundant with the process_pubinfo method but this includes additional mappings .
62,363
def _process_targeting_reagents ( self , reagent_type , limit = None ) : LOG . info ( "Processing Gene Targeting Reagents" ) if self . test_mode : graph = self . testgraph else : graph = self . graph line_counter = 0 model = Model ( graph ) geno = Genotype ( graph ) if reagent_type not in [ 'morph' , 'talen' , 'crispr' ] : LOG . error ( "You didn't specify the right kind of file type." ) return raw = '/' . join ( ( self . rawdir , self . files [ reagent_type ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 if reagent_type in [ 'morph' , 'crispr' ] : try : ( gene_num , gene_so_id , gene_symbol , reagent_num , reagent_so_id , reagent_symbol , reagent_sequence , publication , note ) = row except ValueError : ( gene_num , gene_so_id , gene_symbol , reagent_num , reagent_so_id , reagent_symbol , reagent_sequence , publication ) = row elif reagent_type == 'talen' : ( gene_num , gene_so_id , gene_symbol , reagent_num , reagent_so_id , reagent_symbol , reagent_sequence , reagent_sequence2 , publication , note ) = row else : return reagent_id = 'ZFIN:' + reagent_num . strip ( ) gene_id = 'ZFIN:' + gene_num . strip ( ) self . id_label_map [ reagent_id ] = reagent_symbol if self . test_mode and ( reagent_num not in self . test_ids [ 'morpholino' ] and gene_num not in self . test_ids [ 'gene' ] ) : continue geno . addGeneTargetingReagent ( reagent_id , reagent_symbol , reagent_so_id , gene_id ) if publication != '' : pubs = re . split ( r',' , publication . strip ( ) ) for pub in pubs : pub_id = 'ZFIN:' + pub . strip ( ) ref = Reference ( graph , pub_id ) ref . addRefToGraph ( ) graph . addTriple ( pub_id , self . globaltt [ 'mentions' ] , reagent_id ) if note != '' : model . addComment ( reagent_id , note ) if reagent_id not in self . variant_loci_genes : self . variant_loci_genes [ reagent_id ] = [ gene_id ] else : if gene_id not in self . variant_loci_genes [ reagent_id ] : self . variant_loci_genes [ reagent_id ] += [ gene_id ] if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with Reagent type %s" , reagent_type ) return
This method processes the gene targeting knockdown reagents such as morpholinos talens and crisprs . We create triples for the reagents and pass the data into a hash map for use in the pheno_enviro method .
62,364
def _process_uniprot_ids ( self , limit = None ) : LOG . info ( "Processing UniProt IDs" ) if self . test_mode : graph = self . testgraph else : graph = self . graph line_counter = 0 model = Model ( graph ) geno = Genotype ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'uniprot' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( gene_id , gene_so_id , gene_symbol , uniprot_id ) = row if self . test_mode and gene_id not in self . test_ids [ 'gene' ] : continue gene_id = 'ZFIN:' + gene_id . strip ( ) uniprot_id = 'UniProtKB:' + uniprot_id . strip ( ) geno . addGene ( gene_id , gene_symbol ) model . addIndividualToGraph ( uniprot_id , None , self . globaltt [ 'polypeptide' ] ) graph . addTriple ( gene_id , self . globaltt [ 'has gene product' ] , uniprot_id ) if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with UniProt IDs" ) return
This method processes the mappings from ZFIN gene IDs to UniProtKB IDs .
62,365
def get_orthology_evidence_code ( self , abbrev ) : eco_abbrev_map = { 'AA' : 'ECO:0000031' , 'CE' : 'ECO:0000008' , 'CL' : 'ECO:0000044' , 'FC' : 'ECO:0000012' , 'FH' : 'ECO:0000064' , 'IX' : 'ECO:0000040' , 'NS' : None , 'NT' : 'ECO:0000032' , 'SI' : 'ECO:0000094' , 'SL' : 'ECO:0000122' , 'SS' : 'ECO:0000024' , 'SU' : 'ECO:0000027' , 'XH' : 'ECO:0000002' , 'PT' : 'ECO:0000080' , 'OT' : None , } if abbrev not in eco_abbrev_map : LOG . warning ( "Evidence code for orthology (%s) not mapped" , str ( abbrev ) ) return eco_abbrev_map . get ( abbrev )
move to localtt & globltt
62,366
def _process_diseases ( self , limit = None ) : LOG . info ( "Processing diseases" ) if self . test_mode : graph = self . testgraph else : graph = self . graph line_counter = 0 model = Model ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'disease' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( disease_id , disease_name ) = row disease_id = 'KEGG-' + disease_id . strip ( ) if disease_id not in self . label_hash : self . label_hash [ disease_id ] = disease_name if self . test_mode and disease_id not in self . test_ids [ 'disease' ] : continue model . addClassToGraph ( disease_id , disease_name ) if not self . test_mode and ( limit is not None and line_counter > limit ) : break LOG . info ( "Done with diseases" ) return
This method processes the KEGG disease IDs .
62,367
def _process_genes ( self , limit = None ) : LOG . info ( "Processing genes" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 family = Family ( graph ) geno = Genotype ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'hsa_genes' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( gene_id , gene_name ) = row gene_id = 'KEGG-' + gene_id . strip ( ) gene_stuff = re . split ( 'r;' , gene_name ) symbollist = re . split ( r',' , gene_stuff [ 0 ] ) first_symbol = symbollist [ 0 ] . strip ( ) if gene_id not in self . label_hash : self . label_hash [ gene_id ] = first_symbol if self . test_mode and gene_id not in self . test_ids [ 'genes' ] : continue geno . addGene ( gene_id , first_symbol ) if len ( gene_stuff ) > 1 : description = gene_stuff [ 1 ] . strip ( ) model . addDefinition ( gene_id , description ) for i in enumerate ( symbollist , start = 1 ) : model . addSynonym ( gene_id , i [ 1 ] . strip ( ) ) if len ( gene_stuff ) > 2 : ko_part = gene_stuff [ 2 ] ko_match = re . search ( r'K\d+' , ko_part ) if ko_match is not None and len ( ko_match . groups ( ) ) == 1 : ko = 'KEGG-ko:' + ko_match . group ( 1 ) family . addMemberOf ( gene_id , ko ) if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with genes" ) return
This method processes the KEGG gene IDs . The label for the gene is pulled as the first symbol in the list of gene symbols ; the rest are added as synonyms . The long - form of the gene name is added as a definition . This is hardcoded to just processes human genes .
62,368
def _process_ortholog_classes ( self , limit = None ) : LOG . info ( "Processing ortholog classes" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'ortholog_classes' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( orthology_class_id , orthology_class_name ) = row if self . test_mode and orthology_class_id not in self . test_ids [ 'orthology_classes' ] : continue other_labels = re . split ( r'[;,]' , orthology_class_name ) orthology_label = other_labels [ 0 ] orthology_class_id = 'KEGG-' + orthology_class_id . strip ( ) orthology_type = self . globaltt [ 'gene_family' ] model . addClassToGraph ( orthology_class_id , orthology_label , orthology_type ) if len ( other_labels ) > 1 : for s in other_labels : model . addSynonym ( orthology_class_id , s . strip ( ) ) d = other_labels [ len ( other_labels ) - 1 ] model . addDescription ( orthology_class_id , d ) ec_matches = re . findall ( r'((?:\d+|\.|-){5,7})' , d ) if ec_matches is not None : for ecm in ec_matches : model . addXref ( orthology_class_id , 'EC:' + ecm ) if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with ortholog classes" ) return
This method add the KEGG orthology classes to the graph .
62,369
def _process_orthologs ( self , raw , limit = None ) : LOG . info ( "Processing orthologs" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( gene_id , orthology_class_id ) = row orthology_class_id = 'KEGG:' + orthology_class_id . strip ( ) gene_id = 'KEGG:' + gene_id . strip ( ) OrthologyAssoc ( graph , self . name , gene_id , None ) . add_gene_family_to_graph ( orthology_class_id ) model . addClassToGraph ( gene_id , None ) model . addClassToGraph ( orthology_class_id , None ) if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with orthologs" ) return
This method maps orthologs for a species to the KEGG orthology classes .
62,370
def _process_kegg_disease2gene ( self , limit = None ) : LOG . info ( "Processing KEGG disease to gene" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 geno = Genotype ( graph ) rel = self . globaltt [ 'is marker for' ] noomimset = set ( ) raw = '/' . join ( ( self . rawdir , self . files [ 'disease_gene' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( gene_id , disease_id ) = row if self . test_mode and gene_id not in self . test_ids [ 'genes' ] : continue gene_id = 'KEGG-' + gene_id . strip ( ) disease_id = 'KEGG-' + disease_id . strip ( ) if disease_id not in self . kegg_disease_hash : disease_label = None if disease_id in self . label_hash : disease_label = self . label_hash [ disease_id ] if re . search ( r'includ' , str ( disease_label ) ) : LOG . info ( "Skipping this association because " + "it's a grouping class: %s" , disease_label ) continue model . addClassToGraph ( disease_id , disease_label ) noomimset . add ( disease_id ) alt_locus_id = self . _make_variant_locus_id ( gene_id , disease_id ) alt_label = self . label_hash [ alt_locus_id ] model . addIndividualToGraph ( alt_locus_id , alt_label , self . globaltt [ 'variant_locus' ] ) geno . addAffectedLocus ( alt_locus_id , gene_id ) model . addBlankNodeAnnotation ( alt_locus_id ) assoc = G2PAssoc ( graph , self . name , alt_locus_id , disease_id , rel ) assoc . add_association_to_graph ( ) if not self . test_mode and ( limit is not None and line_counter > limit ) : break LOG . info ( "Done with KEGG disease to gene" ) LOG . info ( "Found %d diseases with no omim id" , len ( noomimset ) ) return
This method creates an association between diseases and their associated genes . We are being conservative here and only processing those diseases for which there is no mapping to OMIM .
62,371
def _process_omim2gene ( self , limit = None ) : LOG . info ( "Processing OMIM to KEGG gene" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 geno = Genotype ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'omim2gene' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( kegg_gene_id , omim_id , link_type ) = row if self . test_mode and kegg_gene_id not in self . test_ids [ 'genes' ] : continue kegg_gene_id = 'KEGG-' + kegg_gene_id . strip ( ) omim_id = re . sub ( r'omim' , 'OMIM' , omim_id ) if link_type == 'equivalent' : model . addClassToGraph ( omim_id , None ) geno . addGene ( kegg_gene_id , None ) if not DipperUtil . is_omim_disease ( omim_id ) : model . addEquivalentClass ( kegg_gene_id , omim_id ) elif link_type == 'reverse' : alt_locus_id = self . _make_variant_locus_id ( kegg_gene_id , omim_id ) alt_label = self . label_hash [ alt_locus_id ] model . addIndividualToGraph ( alt_locus_id , alt_label , self . globaltt [ 'variant_locus' ] ) geno . addAffectedLocus ( alt_locus_id , kegg_gene_id ) model . addBlankNodeAnnotation ( alt_locus_id ) rel = self . globaltt [ 'is marker for' ] assoc = G2PAssoc ( graph , self . name , alt_locus_id , omim_id , rel ) assoc . add_association_to_graph ( ) elif link_type == 'original' : LOG . info ( 'Unable to handle original link for %s-%s' , kegg_gene_id , omim_id ) else : LOG . warning ( 'Unhandled link type for %s-%s: %s' , kegg_gene_id , omim_id , link_type ) if ( not self . test_mode ) and ( limit is not None and line_counter > limit ) : break LOG . info ( "Done with OMIM to KEGG gene" ) return
This method maps the OMIM IDs and KEGG gene ID . Currently split based on the link_type field . Equivalent link types are mapped as gene XRefs . Reverse link types are mapped as disease to gene associations . Original link types are currently skipped .
62,372
def _process_genes_kegg2ncbi ( self , limit = None ) : LOG . info ( "Processing KEGG gene IDs to NCBI gene IDs" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'ncbi' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( kegg_gene_id , ncbi_gene_id , link_type ) = row if self . test_mode and kegg_gene_id not in self . test_ids [ 'genes' ] : continue ncbi_gene_id = re . sub ( r'ncbi-geneid' , 'NCBIGene' , ncbi_gene_id ) kegg_gene_id = 'KEGG-' + kegg_gene_id model . addClassToGraph ( kegg_gene_id , None ) model . addClassToGraph ( ncbi_gene_id , None ) model . addEquivalentClass ( kegg_gene_id , ncbi_gene_id ) if not self . test_mode and ( limit is not None and line_counter > limit ) : break LOG . info ( "Done with KEGG gene IDs to NCBI gene IDs" ) return
This method maps the KEGG human gene IDs to the corresponding NCBI Gene IDs .
62,373
def _process_pathway_disease ( self , limit ) : LOG . info ( "Processing KEGG pathways to disease ids" ) if self . test_mode : graph = self . testgraph else : graph = self . graph line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'pathway_disease' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( disease_id , kegg_pathway_num ) = row if self . test_mode and kegg_pathway_num not in self . test_ids [ 'pathway' ] : continue disease_id = 'KEGG-' + disease_id pathway_id = 'KEGG-' + kegg_pathway_num graph . addTriple ( pathway_id , self . globaltt [ 'causally upstream of or within' ] , disease_id ) if not self . test_mode and limit is not None and line_counter > limit : break return
We make a link between the pathway identifiers and any diseases associated with them . Since we model diseases as processes we make a triple saying that the pathway may be causally upstream of or within the disease process .
62,374
def _make_variant_locus_id ( self , gene_id , disease_id ) : alt_locus_id = '_:' + re . sub ( r':' , '' , gene_id ) + '-' + re . sub ( r':' , '' , disease_id ) + 'VL' alt_label = self . label_hash . get ( gene_id ) disease_label = self . label_hash . get ( disease_id ) if alt_label is not None and alt_label != '' : alt_label = 'some variant of ' + str ( alt_label ) if disease_label is not None and disease_label != '' : alt_label += ' that is associated with ' + str ( disease_label ) else : alt_label = None self . label_hash [ alt_locus_id ] = alt_label return alt_locus_id
We actually want the association between the gene and the disease to be via an alternate locus not the wildtype gene itself . so we make an anonymous alternate locus and put that in the association We also make the label for the anonymous class and add it to the label hash
62,375
def _fetch_disambiguating_assoc ( self ) : disambig_file = '/' . join ( ( self . rawdir , self . static_files [ 'publications' ] [ 'file' ] ) ) assoc_file = '/' . join ( ( self . rawdir , self . files [ 'chemical_disease_interactions' ] [ 'file' ] ) ) if os . path . exists ( disambig_file ) : dfile_dt = os . stat ( disambig_file ) afile_dt = os . stat ( assoc_file ) if dfile_dt < afile_dt : LOG . info ( "Local file date before chem-disease assoc file. " " Downloading..." ) else : LOG . info ( "Local file date after chem-disease assoc file. " " Skipping download." ) return all_pubs = set ( ) dual_evidence = re . compile ( r'^marker\/mechanism\|therapeutic$' ) with gzip . open ( assoc_file , 'rt' ) as tsvfile : reader = csv . reader ( tsvfile , delimiter = "\t" ) for row in reader : if re . match ( r'^#' , ' ' . join ( row ) ) : continue self . _check_list_len ( row , 10 ) ( chem_name , chem_id , cas_rn , disease_name , disease_id , direct_evidence , inferred_gene_symbol , inference_score , omim_ids , pubmed_ids ) = row if direct_evidence == '' or not re . match ( dual_evidence , direct_evidence ) : continue if pubmed_ids is not None and pubmed_ids != '' : all_pubs . update ( set ( re . split ( r'\|' , pubmed_ids ) ) ) sorted_pubs = sorted ( list ( all_pubs ) ) batch_size = 4000 params = { 'inputType' : 'reference' , 'report' : 'diseases_curated' , 'format' : 'tsv' , 'action' : 'Download' } url = 'http://ctdbase.org/tools/batchQuery.go?q' start = 0 end = min ( ( batch_size , len ( all_pubs ) ) ) with open ( disambig_file , 'wb' ) as dmbf : while start < len ( sorted_pubs ) : params [ 'inputTerms' ] = '|' . join ( sorted_pubs [ start : end ] ) LOG . info ( 'fetching %d (%d-%d) refs: %s' , len ( re . split ( r'\|' , params [ 'inputTerms' ] ) ) , start , end , params [ 'inputTerms' ] ) data = urllib . parse . urlencode ( params ) encoding = 'utf-8' binary_data = data . encode ( encoding ) req = urllib . request . Request ( url , binary_data ) resp = urllib . request . urlopen ( req ) dmbf . write ( resp . read ( ) ) start = end end = min ( ( start + batch_size , len ( sorted_pubs ) ) ) return
For any of the items in the chemical - disease association file that have ambiguous association types we fetch the disambiguated associations using the batch query API and store these in a file . Elsewhere we can loop through the file and create the appropriate associations .
62,376
def _make_association ( self , subject_id , object_id , rel_id , pubmed_ids ) : assoc = G2PAssoc ( self . graph , self . name , subject_id , object_id , rel_id ) if pubmed_ids is not None and len ( pubmed_ids ) > 0 : for pmid in pubmed_ids : ref = Reference ( self . graph , pmid , self . globaltt [ 'journal article' ] ) ref . addRefToGraph ( ) assoc . add_source ( pmid ) assoc . add_evidence ( self . globaltt [ 'traceable author statement' ] ) assoc . add_association_to_graph ( ) return
Make a reified association given an array of pubmed identifiers .
62,377
def checkIfRemoteIsNewer ( self , localfile , remote_size , remote_modify ) : is_remote_newer = False status = os . stat ( localfile ) LOG . info ( "\nLocal file size: %i" "\nLocal Timestamp: %s" , status [ ST_SIZE ] , datetime . fromtimestamp ( status . st_mtime ) ) remote_dt = Bgee . _convert_ftp_time_to_iso ( remote_modify ) if remote_dt != datetime . fromtimestamp ( status . st_mtime ) or status [ ST_SIZE ] != int ( remote_size ) : is_remote_newer = True LOG . info ( "Object on server is has different size %i and/or date %s" , remote_size , remote_dt ) return is_remote_newer
Overrides checkIfRemoteIsNewer in Source class
62,378
def _convert_ftp_time_to_iso ( ftp_time ) : date_time = datetime ( int ( ftp_time [ : 4 ] ) , int ( ftp_time [ 4 : 6 ] ) , int ( ftp_time [ 6 : 8 ] ) , int ( ftp_time [ 8 : 10 ] ) , int ( ftp_time [ 10 : 12 ] ) , int ( ftp_time [ 12 : 14 ] ) ) return date_time
Convert datetime in the format 20160705042714 to a datetime object
62,379
def fetch ( self , is_dl_forced = False ) : cxn = { } cxn [ 'host' ] = 'nif-db.crbs.ucsd.edu' cxn [ 'database' ] = 'disco_crawler' cxn [ 'port' ] = '5432' cxn [ 'user' ] = config . get_config ( ) [ 'user' ] [ 'disco' ] cxn [ 'password' ] = config . get_config ( ) [ 'keys' ] [ cxn [ 'user' ] ] self . dataset . setFileAccessUrl ( 'jdbc:postgresql://' + cxn [ 'host' ] + ':' + cxn [ 'port' ] + '/' + cxn [ 'database' ] , is_object_literal = True ) self . fetch_from_pgdb ( self . tables , cxn ) self . get_files ( is_dl_forced ) fstat = os . stat ( '/' . join ( ( self . rawdir , 'dvp.pr_nlx_157874_1' ) ) ) filedate = datetime . utcfromtimestamp ( fstat [ ST_CTIME ] ) . strftime ( "%Y-%m-%d" ) self . dataset . setVersion ( filedate ) return
connection details for DISCO
62,380
def parse ( self , limit = None ) : if limit is not None : LOG . info ( "Only parsing first %s rows of each file" , limit ) if self . test_only : self . test_mode = True LOG . info ( "Parsing files..." ) self . _process_nlx_157874_1_view ( '/' . join ( ( self . rawdir , 'dvp.pr_nlx_157874_1' ) ) , limit ) self . _map_eom_terms ( '/' . join ( ( self . rawdir , self . files [ 'map' ] [ 'file' ] ) ) , limit ) LOG . info ( "Finished parsing." ) self . testgraph = self . graph return
Over ride Source . parse inherited via PostgreSQLSource
62,381
def _process_gxd_genotype_view ( self , limit = None ) : line_counter = 0 if self . test_mode : graph = self . testgraph else : graph = self . graph geno = Genotype ( graph ) model = Model ( graph ) raw = '/' . join ( ( self . rawdir , 'gxd_genotype_view' ) ) LOG . info ( "getting genotypes and their backgrounds" ) with open ( raw , 'r' ) as f1 : f1 . readline ( ) for line in f1 : line = line . rstrip ( "\n" ) line_counter += 1 ( genotype_key , strain_key , strain , mgiid ) = line . split ( '\t' ) if self . test_mode is True : if int ( genotype_key ) not in self . test_keys . get ( 'genotype' ) : continue if self . idhash [ 'genotype' ] . get ( genotype_key ) is None : self . idhash [ 'genotype' ] [ genotype_key ] = mgiid geno . addGenotype ( mgiid , None ) strain_id = self . idhash [ 'strain' ] . get ( strain_key ) background_type = self . globaltt [ 'genomic_background' ] if strain_id is None or int ( strain_key ) < 0 : if strain_id is None : strain_id = self . _makeInternalIdentifier ( 'strain' , strain_key ) self . idhash [ 'strain' ] . update ( { strain_key : strain_id } ) model . addComment ( strain_id , "strain_key:" + strain_key ) elif int ( strain_key ) < 0 : strain_id = self . _makeInternalIdentifier ( 'strain' , re . sub ( r':' , '' , str ( strain_id ) ) ) strain_id += re . sub ( r':' , '' , str ( mgiid ) ) strain_id = re . sub ( r'^_' , '_:' , strain_id ) strain_id = re . sub ( r'::' , ':' , strain_id ) model . addDescription ( strain_id , "This genomic background is unknown. " + "This is a placeholder background for " + mgiid + "." ) background_type = self . globaltt [ 'unspecified_genomic_background' ] LOG . info ( "adding background as internal id: %s %s: %s" , strain_key , strain , strain_id ) geno . addGenomicBackgroundToGenotype ( strain_id , mgiid , background_type ) self . label_hash [ strain_id ] = strain self . geno_bkgd [ mgiid ] = strain_id if not self . test_mode and limit is not None and line_counter > limit : break return
This table indicates the relationship between a genotype and it s background strain . It leverages the Genotype class methods to do this .
62,382
def _process_gxd_genotype_summary_view ( self , limit = None ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 geno_hash = { } raw = '/' . join ( ( self . rawdir , 'gxd_genotype_summary_view' ) ) LOG . info ( "building labels for genotypes" ) with open ( raw , 'r' ) as f : f . readline ( ) for line in f : line = line . rstrip ( "\n" ) line_counter += 1 ( object_key , preferred , mgiid , subtype , short_description ) = line . split ( '\t' ) if self . test_mode is True : if int ( object_key ) not in self . test_keys . get ( 'genotype' ) : continue self . idhash [ 'genotype' ] [ object_key ] = mgiid if preferred == '1' : d = re . sub ( r'\,' , '/' , short_description . strip ( ) ) if mgiid not in geno_hash : geno_hash [ mgiid ] = { 'vslcs' : [ d ] , 'subtype' : subtype , 'key' : object_key } else : vslcs = geno_hash [ mgiid ] . get ( 'vslcs' ) vslcs . append ( d ) else : pass if not self . test_mode and limit is not None and line_counter > limit : break geno = Genotype ( graph ) for gt in geno_hash : genotype = geno_hash . get ( gt ) gvc = sorted ( genotype . get ( 'vslcs' ) ) label = '; ' . join ( gvc ) + ' [' + genotype . get ( 'subtype' ) + ']' geno . addGenotype ( gt , None ) model . addComment ( gt , self . _makeInternalIdentifier ( 'genotype' , genotype . get ( 'key' ) ) ) model . addSynonym ( gt , label . strip ( ) ) return
Add the genotype internal id to mgiid mapping to the idhashmap . Also add them as individuals to the graph . We re - format the label to put the background strain in brackets after the gvc .
62,383
def process_mgi_relationship_transgene_genes ( self , limit = None ) : if self . test_mode : graph = self . testgraph else : graph = self . graph LOG . info ( "getting transgene genes" ) raw = '/' . join ( ( self . rawdir , 'mgi_relationship_transgene_genes' ) ) geno = Genotype ( graph ) col = [ 'rel_key' , 'allele_key' , 'allele_id' , 'allele_label' , 'category_key' , 'category_name' , 'property_key' , 'property_name' , 'gene_num' ] with open ( raw , 'r' , encoding = "utf8" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) header = next ( filereader ) if header != col : LOG . error ( 'expected columns: %s\n\tBut got:\n%s' , col , header ) for row in filereader : allele_key = int ( row [ col . index ( 'allele_key' ) ] ) allele_id = row [ col . index ( 'allele_id' ) ] gene_num = int ( row [ col . index ( 'gene_num' ) ] ) if self . test_mode and allele_key not in self . test_keys . get ( 'allele' ) and gene_num not in self . test_ids : continue gene_id = 'NCBIGene:' + str ( gene_num ) seqalt_id = self . idhash [ 'seqalt' ] . get ( allele_key ) if seqalt_id is None : seqalt_id = allele_id geno . addSequenceDerivesFrom ( seqalt_id , gene_id ) if not self . test_mode and limit is not None and filereader . line_num > limit : break return
Here we have the relationship between MGI transgene alleles and the non - mouse gene ids that are part of them . We augment the allele with the transgene parts .
62,384
def _getnode ( self , curie ) : node = None if curie [ 0 ] == '_' : if self . are_bnodes_skized is True : node = self . skolemizeBlankNode ( curie ) else : node = BNode ( re . sub ( r'^_:|^_' , '' , curie , 1 ) ) elif curie [ : 4 ] == 'http' or curie [ : 3 ] == 'ftp' : node = URIRef ( curie ) else : iri = RDFGraph . curie_util . get_uri ( curie ) if iri is not None : node = URIRef ( RDFGraph . curie_util . get_uri ( curie ) ) prefix = curie . split ( ':' ) [ 0 ] if prefix not in self . namespace_manager . namespaces ( ) : mapped_iri = self . curie_map [ prefix ] self . bind ( prefix , Namespace ( mapped_iri ) ) else : LOG . error ( "couldn't make URI for %s" , curie ) return node
This is a wrapper for creating a URIRef or Bnode object with a given a curie or iri as a string .
62,385
def add_association_to_graph ( self ) : Assoc . add_association_to_graph ( self ) if self . onset is not None and self . onset != '' : self . graph . addTriple ( self . assoc_id , self . globaltt [ 'onset' ] , self . onset ) if self . frequency is not None and self . frequency != '' : self . graph . addTriple ( self . assoc_id , self . globaltt [ 'frequency' ] , self . frequency ) return
The reified relationship between a disease and a phenotype is decorated with some provenance information . This makes the assumption that both the disease and phenotype are classes .
62,386
def make_parent_bands ( self , band , child_bands ) : m = re . match ( r'([pq][A-H\d]+(?:\.\d+)?)' , band ) if len ( band ) > 0 : if m : p = str ( band [ 0 : len ( band ) - 1 ] ) p = re . sub ( r'\.$' , '' , p ) if p is not None : child_bands . add ( p ) self . make_parent_bands ( p , child_bands ) else : child_bands = set ( ) return child_bands
this will determine the grouping bands that it belongs to recursively 13q21 . 31 == > 13 13q 13q2 13q21 13q21 . 3 13q21 . 31
62,387
def get_curie ( self , uri ) : prefix = self . get_curie_prefix ( uri ) if prefix is not None : key = self . curie_map [ prefix ] return '%s:%s' % ( prefix , uri [ len ( key ) : len ( uri ) ] ) return None
Get a CURIE from a URI
62,388
def get_uri ( self , curie ) : if curie is None : return None parts = curie . split ( ':' ) if len ( parts ) == 1 : if curie != '' : LOG . error ( "Not a properly formed curie: \"%s\"" , curie ) return None prefix = parts [ 0 ] if prefix in self . curie_map : return '%s%s' % ( self . curie_map . get ( prefix ) , curie [ ( curie . index ( ':' ) + 1 ) : ] ) LOG . error ( "Curie prefix not defined for %s" , curie ) return None
Get a URI from a CURIE
62,389
def fetch ( self , is_dl_forced = False ) : host = config . get_config ( ) [ 'dbauth' ] [ 'coriell' ] [ 'host' ] key = config . get_config ( ) [ 'dbauth' ] [ 'coriell' ] [ 'private_key' ] user = config . get_config ( ) [ 'user' ] [ 'coriell' ] passwd = config . get_config ( ) [ 'keys' ] [ user ] with pysftp . Connection ( host , username = user , password = passwd , private_key = key ) as sftp : remote_files = sftp . listdir_attr ( ) files_by_repo = { } for attr in remote_files : mch = re . match ( '(NIGMS|NIA|NHGRI|NINDS)' , attr . filename ) if mch is not None and len ( mch . groups ( ) ) > 0 : files_by_repo [ mch . group ( 1 ) ] = attr for rmt in self . files : LOG . info ( "Checking on %s catalog file" , rmt ) fname = self . files [ rmt ] [ 'file' ] remotef = files_by_repo [ rmt ] target_name = '/' . join ( ( self . rawdir , fname ) ) fstat = None if os . path . exists ( target_name ) : fstat = os . stat ( target_name ) LOG . info ( "Local file date: %s" , datetime . utcfromtimestamp ( fstat [ stat . ST_CTIME ] ) ) if fstat is None or remotef . st_mtime > fstat [ stat . ST_CTIME ] : if fstat is None : LOG . info ( "File does not exist locally; downloading..." ) else : LOG . info ( "New version of %s catalog available; downloading..." , rmt ) sftp . get ( remotef . filename , target_name ) LOG . info ( "Fetched remote %s -> %s" , remotef . filename , target_name ) fstat = os . stat ( target_name ) filedate = datetime . utcfromtimestamp ( remotef . st_mtime ) . strftime ( "%Y-%m-%d" ) LOG . info ( "New file date: %s" , datetime . utcfromtimestamp ( fstat [ stat . ST_CTIME ] ) ) else : LOG . info ( "File %s exists; using local copy" , fname ) filedate = datetime . utcfromtimestamp ( fstat [ stat . ST_CTIME ] ) . strftime ( "%Y-%m-%d" ) self . dataset . setFileAccessUrl ( remotef . filename , True ) self . dataset . setVersion ( filedate ) return
Here we connect to the coriell sftp server using private connection details . They dump bi - weekly files with a timestamp in the filename . For each catalog we ping the remote site and pull the most - recently updated file renaming it to our local latest . csv .
62,390
def _process_collection ( self , collection_id , label , page ) : for graph in [ self . graph , self . testgraph ] : model = Model ( graph ) reference = Reference ( graph ) repo_id = 'CoriellCollection:' + collection_id repo_label = label repo_page = page model . addIndividualToGraph ( repo_id , repo_label , self . globaltt [ 'collection' ] ) reference . addPage ( repo_id , repo_page ) return
This function will process the data supplied internally about the repository from Coriell .
62,391
def _process_genotypes ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , 'genotype' ) ) LOG . info ( "building labels for genotypes" ) geno = Genotype ( graph ) fly_tax = self . globaltt [ 'Drosophila melanogaster' ] with open ( raw , 'r' ) as f : f . readline ( ) filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) for line in filereader : line_counter += 1 ( genotype_num , uniquename , description , name ) = line genotype_id = 'MONARCH:FBgeno' + str ( genotype_num ) self . idhash [ 'genotype' ] [ genotype_num ] = genotype_id if description == '' : description = None if not self . test_mode and limit is not None and line_counter > limit : pass else : if self . test_mode and int ( genotype_num ) not in self . test_keys [ 'genotype' ] : continue model . addIndividualToGraph ( genotype_id , uniquename , self . globaltt [ 'intrinsic_genotype' ] , description ) geno . addTaxon ( fly_tax , genotype_id ) genotype_iid = self . _makeInternalIdentifier ( 'genotype' , genotype_num ) model . addComment ( genotype_id , genotype_iid ) if name . strip ( ) != '' : model . addSynonym ( genotype_id , name ) return
Add the genotype internal id to flybase mapping to the idhashmap . Also add them as individuals to the graph .
62,392
def _process_stocks ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , 'stock' ) ) LOG . info ( "building labels for stocks" ) with open ( raw , 'r' ) as f : f . readline ( ) filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) for line in filereader : line_counter += 1 ( stock_id , dbxref_id , organism_id , name , uniquename , description , type_id , is_obsolete ) = line stock_num = stock_id stock_id = 'FlyBase:' + uniquename self . idhash [ 'stock' ] [ stock_num ] = stock_id stock_label = description organism_key = organism_id taxon = self . idhash [ 'organism' ] [ organism_key ] if not self . test_mode and limit is not None and line_counter > limit : pass else : if self . test_mode and int ( stock_num ) not in self . test_keys [ 'strain' ] : continue model . addClassToGraph ( taxon ) model . addIndividualToGraph ( stock_id , stock_label , taxon ) if is_obsolete == 't' : model . addDeprecatedIndividual ( stock_id ) return
Stock definitions . Here we instantiate them as instances of the given taxon .
62,393
def _process_pubs ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , 'pub' ) ) LOG . info ( "building labels for pubs" ) with open ( raw , 'r' ) as f : f . readline ( ) filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) for line in filereader : ( pub_id , title , volumetitle , volume , series_name , issue , pyear , pages , miniref , type_id , is_obsolete , publisher , pubplace , uniquename ) = line pub_num = pub_id pub_id = 'FlyBase:' + uniquename . strip ( ) self . idhash [ 'publication' ] [ pub_num ] = pub_id if not re . match ( r'(FBrf|multi)' , uniquename ) : continue line_counter += 1 reference = Reference ( graph , pub_id ) if title != '' : reference . setTitle ( title ) if pyear != '' : reference . setYear ( str ( pyear ) ) if miniref != '' : reference . setShortCitation ( miniref ) if not self . test_mode and limit is not None and line_counter > limit : pass else : if self . test_mode and int ( pub_num ) not in self . test_keys [ 'pub' ] : continue if is_obsolete == 't' : model . addDeprecatedIndividual ( pub_id ) else : reference . addRefToGraph ( ) return
Flybase publications .
62,394
def _process_environments ( self ) : if self . test_mode : graph = self . testgraph else : graph = self . graph raw = '/' . join ( ( self . rawdir , 'environment' ) ) LOG . info ( "building labels for environment" ) env_parts = { } label_map = { } env = Environment ( graph ) with open ( raw , 'r' ) as f : filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) f . readline ( ) for line in filereader : ( environment_id , uniquename , description ) = line environment_num = environment_id environment_internal_id = self . _makeInternalIdentifier ( 'environment' , environment_num ) if environment_num not in self . idhash [ 'environment' ] : self . idhash [ 'environment' ] [ environment_num ] = environment_internal_id environment_id = self . idhash [ 'environment' ] [ environment_num ] environment_label = uniquename if environment_label == 'unspecified' : environment_label += ' environment' env . addEnvironment ( environment_id , environment_label ) self . label_hash [ environment_id ] = environment_label components = re . split ( r'\|' , uniquename ) if len ( components ) > 1 : env_parts [ environment_id ] = components else : label_map [ environment_label ] = environment_id for eid in env_parts : eid = eid . strip ( ) for e in env_parts [ eid ] : env_id = label_map . get ( e . strip ( ) ) env . addComponentToEnvironment ( eid , env_id ) return
There s only about 30 environments in which the phenotypes are recorded . There are no externally accessible identifiers for environments so we make anonymous nodes for now . Some of the environments are comprised of > 1 of the other environments ; we do some simple parsing to match the strings of the environmental labels to the other atomic components .
62,395
def _process_stock_genotype ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph raw = '/' . join ( ( self . rawdir , 'stock_genotype' ) ) LOG . info ( "processing stock genotype" ) line_counter = 0 with open ( raw , 'r' ) as f : filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) f . readline ( ) for line in filereader : ( stock_genotype_id , stock_id , genotype_id ) = line stock_key = stock_id stock_id = self . idhash [ 'stock' ] [ stock_key ] genotype_key = genotype_id genotype_id = self . idhash [ 'genotype' ] [ genotype_key ] if self . test_mode and int ( genotype_key ) not in self . test_keys [ 'genotype' ] : continue graph . addTriple ( stock_id , self . globaltt [ 'has_genotype' ] , genotype_id ) line_counter += 1 if not self . test_mode and limit is not None and line_counter > limit : break return
The genotypes of the stocks .
62,396
def _process_dbxref ( self ) : raw = '/' . join ( ( self . rawdir , 'dbxref' ) ) LOG . info ( "processing dbxrefs" ) line_counter = 0 with open ( raw , 'r' ) as f : filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) f . readline ( ) for line in filereader : ( dbxref_id , db_id , accession , version , description , url ) = line accession = accession . strip ( ) db_id = db_id . strip ( ) if accession != '' and db_id in self . localtt : mch = re . match ( r'(doi|SO|GO|FBcv|FBbt_root|FBdv|FBgn|FBdv_root|FlyBase|FBbt):' , accession ) if mch : accession = re . sub ( mch . group ( 1 ) + r'\:' , '' , accession ) elif re . match ( r'(FlyBase miscellaneous CV|cell_lineprop|relationship type|FBgn$)' , accession ) : continue elif re . match ( r'\:' , accession ) : accession = re . sub ( r'\:' , '' , accession ) elif re . search ( r'\s' , accession ) : continue if re . match ( r'http' , accession ) : did = accession else : prefix = self . localtt [ db_id ] did = ':' . join ( ( prefix , accession ) ) if re . search ( r'\:' , accession ) and prefix != 'DOI' : LOG . warning ( 'id %s may be malformed; skipping' , did ) self . dbxrefs [ dbxref_id ] = { db_id : did } elif url != '' : self . dbxrefs [ dbxref_id ] = { db_id : url . strip ( ) } else : continue if int ( db_id ) == 2 and accession . strip ( ) == 'transgenic_transposon' : self . dbxrefs [ dbxref_id ] = { db_id : self . globaltt [ 'transgenic_transposable_element' ] } line_counter += 1 return
We bring in the dbxref identifiers and store them in a hashmap for lookup in other functions . Note that some dbxrefs aren t mapped to identifiers . For example 5004018 is mapped to a string endosome & imaginal disc epithelial cell | somatic clone ... In those cases there just isn t a dbxref that s used when referencing with a cvterm ; it ll just use the internal key .
62,397
def _process_phenotype ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) raw = '/' . join ( ( self . rawdir , 'phenotype' ) ) LOG . info ( "processing phenotype" ) line_counter = 0 with open ( raw , 'r' ) as f : filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) f . readline ( ) for line in filereader : ( phenotype_id , uniquename , observable_id , attr_id , value , cvalue_id , assay_id ) = line phenotype_key = phenotype_id phenotype_id = None phenotype_internal_id = self . _makeInternalIdentifier ( 'phenotype' , phenotype_key ) phenotype_label = None self . label_hash [ phenotype_internal_id ] = uniquename cvterm_id = None if observable_id != '' and int ( observable_id ) == 60468 : if cvalue_id in self . idhash [ 'cvterm' ] : cvterm_id = self . idhash [ 'cvterm' ] [ cvalue_id ] phenotype_id = self . idhash [ 'cvterm' ] [ cvalue_id ] elif observable_id in self . idhash [ 'cvterm' ] : cvterm_id = self . idhash [ 'cvterm' ] [ observable_id ] phenotype_id = self . idhash [ 'cvterm' ] [ observable_id ] + 'PHENOTYPE' if cvterm_id is not None and cvterm_id in self . label_hash : phenotype_label = self . label_hash [ cvterm_id ] phenotype_label += ' phenotype' self . label_hash [ phenotype_id ] = phenotype_label else : LOG . info ( 'cvtermid=%s not in label_hash' , cvterm_id ) else : LOG . info ( "No observable id or label for %s: %s" , phenotype_key , uniquename ) self . idhash [ 'phenotype' ] [ phenotype_key ] = phenotype_id if not self . test_mode and limit is not None and line_counter > limit : pass else : if phenotype_id is not None : model . addClassToGraph ( phenotype_id , phenotype_label ) line_counter += 1 return
Get the phenotypes and declare the classes . If the observable is unspecified then we assign the phenotype to the cvalue id ; otherwise we convert the phenotype into a uberpheno - style identifier simply based on the anatomical part that s affected ... that is listed as the observable_id concatenated with the literal PHENOTYPE
62,398
def _process_cvterm ( self ) : line_counter = 0 raw = '/' . join ( ( self . rawdir , 'cvterm' ) ) LOG . info ( "processing cvterms" ) with open ( raw , 'r' ) as f : f . readline ( ) filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) for line in filereader : line_counter += 1 ( cvterm_id , cv_id , definition , dbxref_id , is_obsolete , is_relationshiptype , name ) = line cvterm_key = cvterm_id cvterm_id = self . _makeInternalIdentifier ( 'cvterm' , cvterm_key ) self . label_hash [ cvterm_id ] = name self . idhash [ 'cvterm' ] [ cvterm_key ] = cvterm_id dbxrefs = self . dbxrefs . get ( dbxref_id ) if dbxrefs is not None : if len ( dbxrefs ) > 1 : LOG . info ( ">1 dbxref for this cvterm (%s: %s): %s" , str ( cvterm_id ) , name , dbxrefs . values ( ) ) elif len ( dbxrefs ) == 1 : did = dbxrefs . popitem ( ) [ 1 ] self . idhash [ 'cvterm' ] [ cvterm_key ] = did self . label_hash [ did ] = name return
CVterms are the internal identifiers for any controlled vocab or ontology term . Many are xrefd to actual ontologies . The actual external id is stored in the dbxref table which we place into the internal hashmap for lookup with the cvterm id . The name of the external term is stored in the name element of this table and we add that to the label hashmap for lookup elsewhere
62,399
def _process_organisms ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) raw = '/' . join ( ( self . rawdir , 'organism' ) ) LOG . info ( "processing organisms" ) line_counter = 0 with open ( raw , 'r' ) as f : filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) f . readline ( ) for line in filereader : ( organism_id , abbreviation , genus , species , common_name , comment ) = line line_counter += 1 tax_internal_id = self . _makeInternalIdentifier ( 'organism' , organism_id ) tax_label = ' ' . join ( ( genus , species ) ) tax_id = tax_internal_id self . idhash [ 'organism' ] [ organism_id ] = tax_id self . label_hash [ tax_id ] = tax_label if self . test_mode and int ( organism_id ) not in self . test_keys [ 'organism' ] : continue if not self . test_mode and limit is not None and line_counter > limit : pass else : model . addClassToGraph ( tax_id ) for s in [ common_name , abbreviation ] : if s is not None and s . strip ( ) != '' : model . addSynonym ( tax_id , s ) model . addComment ( tax_id , tax_internal_id ) return
The internal identifiers for the organisms in flybase