repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequencelengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
sequencelengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
monarch-initiative/dipper
dipper/sources/Decipher.py
Decipher.make_allele_by_consequence
def make_allele_by_consequence(self, consequence, gene_id, gene_symbol): """ Given a "consequence" label that describes a variation type, create an anonymous variant of the specified gene as an instance of that consequence type. :param consequence: :param gene_id: :param gene_symbol: :return: allele_id """ allele_id = None # Loss of function : Nonsense, frame-shifting indel, # essential splice site mutation, whole gene deletion or any other # mutation where functional analysis demonstrates clear reduction # or loss of function # All missense/in frame : Where all the mutations described in the data # source are either missense or in frame deletions and there is no # evidence favoring either loss-of-function, activating or # dominant negative effect # Dominant negative : Mutation within one allele of a gene that creates # a significantly greater deleterious effect on gene product # function than a monoallelic loss of function mutation # Activating : Mutation, usually missense that results in # a constitutive functional activation of the gene product # Increased gene dosage : Copy number variation that increases # the functional dosage of the gene # Cis-regulatory or promotor mutation : Mutation in cis-regulatory # elements that lies outwith the known transcription unit and # promotor of the controlled gene # Uncertain : Where the exact nature of the mutation is unclear or # not recorded type_id = self.resolve(consequence, mandatory=False) if type_id == consequence: LOG.warning("Consequence type unmapped: %s", str(consequence)) type_id = self.globaltt['sequence_variant'] # make the allele allele_id = ''.join((gene_id, type_id)) allele_id = re.sub(r':', '', allele_id) allele_id = '_:'+allele_id # make this a BNode allele_label = ' '.join((consequence, 'allele in', gene_symbol)) self.model.addIndividualToGraph(allele_id, allele_label, type_id) self.geno.addAlleleOfGene(allele_id, gene_id) return allele_id
python
def make_allele_by_consequence(self, consequence, gene_id, gene_symbol): """ Given a "consequence" label that describes a variation type, create an anonymous variant of the specified gene as an instance of that consequence type. :param consequence: :param gene_id: :param gene_symbol: :return: allele_id """ allele_id = None # Loss of function : Nonsense, frame-shifting indel, # essential splice site mutation, whole gene deletion or any other # mutation where functional analysis demonstrates clear reduction # or loss of function # All missense/in frame : Where all the mutations described in the data # source are either missense or in frame deletions and there is no # evidence favoring either loss-of-function, activating or # dominant negative effect # Dominant negative : Mutation within one allele of a gene that creates # a significantly greater deleterious effect on gene product # function than a monoallelic loss of function mutation # Activating : Mutation, usually missense that results in # a constitutive functional activation of the gene product # Increased gene dosage : Copy number variation that increases # the functional dosage of the gene # Cis-regulatory or promotor mutation : Mutation in cis-regulatory # elements that lies outwith the known transcription unit and # promotor of the controlled gene # Uncertain : Where the exact nature of the mutation is unclear or # not recorded type_id = self.resolve(consequence, mandatory=False) if type_id == consequence: LOG.warning("Consequence type unmapped: %s", str(consequence)) type_id = self.globaltt['sequence_variant'] # make the allele allele_id = ''.join((gene_id, type_id)) allele_id = re.sub(r':', '', allele_id) allele_id = '_:'+allele_id # make this a BNode allele_label = ' '.join((consequence, 'allele in', gene_symbol)) self.model.addIndividualToGraph(allele_id, allele_label, type_id) self.geno.addAlleleOfGene(allele_id, gene_id) return allele_id
[ "def", "make_allele_by_consequence", "(", "self", ",", "consequence", ",", "gene_id", ",", "gene_symbol", ")", ":", "allele_id", "=", "None", "# Loss of function : Nonsense, frame-shifting indel,", "# essential splice site mutation, whole gene deletion or any other", "# mutation where functional analysis demonstrates clear reduction", "# or loss of function", "# All missense/in frame : Where all the mutations described in the data", "# source are either missense or in frame deletions and there is no", "# evidence favoring either loss-of-function, activating or", "# dominant negative effect", "# Dominant negative : Mutation within one allele of a gene that creates", "# a significantly greater deleterious effect on gene product", "# function than a monoallelic loss of function mutation", "# Activating : Mutation, usually missense that results in", "# a constitutive functional activation of the gene product", "# Increased gene dosage : Copy number variation that increases", "# the functional dosage of the gene", "# Cis-regulatory or promotor mutation : Mutation in cis-regulatory", "# elements that lies outwith the known transcription unit and", "# promotor of the controlled gene", "# Uncertain : Where the exact nature of the mutation is unclear or", "# not recorded", "type_id", "=", "self", ".", "resolve", "(", "consequence", ",", "mandatory", "=", "False", ")", "if", "type_id", "==", "consequence", ":", "LOG", ".", "warning", "(", "\"Consequence type unmapped: %s\"", ",", "str", "(", "consequence", ")", ")", "type_id", "=", "self", ".", "globaltt", "[", "'sequence_variant'", "]", "# make the allele", "allele_id", "=", "''", ".", "join", "(", "(", "gene_id", ",", "type_id", ")", ")", "allele_id", "=", "re", ".", "sub", "(", "r':'", ",", "''", ",", "allele_id", ")", "allele_id", "=", "'_:'", "+", "allele_id", "# make this a BNode", "allele_label", "=", "' '", ".", "join", "(", "(", "consequence", ",", "'allele in'", ",", "gene_symbol", ")", ")", "self", ".", "model", ".", "addIndividualToGraph", "(", "allele_id", ",", "allele_label", ",", "type_id", ")", "self", ".", "geno", ".", "addAlleleOfGene", "(", "allele_id", ",", "gene_id", ")", "return", "allele_id" ]
Given a "consequence" label that describes a variation type, create an anonymous variant of the specified gene as an instance of that consequence type. :param consequence: :param gene_id: :param gene_symbol: :return: allele_id
[ "Given", "a", "consequence", "label", "that", "describes", "a", "variation", "type", "create", "an", "anonymous", "variant", "of", "the", "specified", "gene", "as", "an", "instance", "of", "that", "consequence", "type", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Decipher.py#L228-L277
train
251,200
monarch-initiative/dipper
dipper/sources/EBIGene2Phen.py
EBIGene2Phen.parse
def parse(self, limit: Optional[int]=None): """ Here we parse each row of the gene to phenotype file We create anonymous variants along with their attributes (allelic requirement, functional consequence) and connect these to genes and diseases genes are connected to variants via global_terms['has_affected_locus'] variants are connected to attributes via: global_terms['has_allelic_requirement'] global_terms['has_functional_consequence'] variants are connected to disease based on mappings to the DDD category column, see the translationtable specific to this source for mappings For cases where there are no disease OMIM id, we either use a disease cache file with mappings to MONDO that has been manually curated :param limit: {int} number of rows to parse :return: None """ if limit is not None: LOG.info("Only parsing first %d rows", limit) LOG.info("Parsing files...") file_path = '/'.join(( self.rawdir, self.files['developmental_disorders']['file'])) with gzip.open(file_path, 'rt') as csvfile: reader = csv.reader(csvfile) next(reader) # header for row in reader: if limit is None or reader.line_num <= (limit + 1): self._add_gene_disease(row) else: break LOG.info("Done parsing.")
python
def parse(self, limit: Optional[int]=None): """ Here we parse each row of the gene to phenotype file We create anonymous variants along with their attributes (allelic requirement, functional consequence) and connect these to genes and diseases genes are connected to variants via global_terms['has_affected_locus'] variants are connected to attributes via: global_terms['has_allelic_requirement'] global_terms['has_functional_consequence'] variants are connected to disease based on mappings to the DDD category column, see the translationtable specific to this source for mappings For cases where there are no disease OMIM id, we either use a disease cache file with mappings to MONDO that has been manually curated :param limit: {int} number of rows to parse :return: None """ if limit is not None: LOG.info("Only parsing first %d rows", limit) LOG.info("Parsing files...") file_path = '/'.join(( self.rawdir, self.files['developmental_disorders']['file'])) with gzip.open(file_path, 'rt') as csvfile: reader = csv.reader(csvfile) next(reader) # header for row in reader: if limit is None or reader.line_num <= (limit + 1): self._add_gene_disease(row) else: break LOG.info("Done parsing.")
[ "def", "parse", "(", "self", ",", "limit", ":", "Optional", "[", "int", "]", "=", "None", ")", ":", "if", "limit", "is", "not", "None", ":", "LOG", ".", "info", "(", "\"Only parsing first %d rows\"", ",", "limit", ")", "LOG", ".", "info", "(", "\"Parsing files...\"", ")", "file_path", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'developmental_disorders'", "]", "[", "'file'", "]", ")", ")", "with", "gzip", ".", "open", "(", "file_path", ",", "'rt'", ")", "as", "csvfile", ":", "reader", "=", "csv", ".", "reader", "(", "csvfile", ")", "next", "(", "reader", ")", "# header", "for", "row", "in", "reader", ":", "if", "limit", "is", "None", "or", "reader", ".", "line_num", "<=", "(", "limit", "+", "1", ")", ":", "self", ".", "_add_gene_disease", "(", "row", ")", "else", ":", "break", "LOG", ".", "info", "(", "\"Done parsing.\"", ")" ]
Here we parse each row of the gene to phenotype file We create anonymous variants along with their attributes (allelic requirement, functional consequence) and connect these to genes and diseases genes are connected to variants via global_terms['has_affected_locus'] variants are connected to attributes via: global_terms['has_allelic_requirement'] global_terms['has_functional_consequence'] variants are connected to disease based on mappings to the DDD category column, see the translationtable specific to this source for mappings For cases where there are no disease OMIM id, we either use a disease cache file with mappings to MONDO that has been manually curated :param limit: {int} number of rows to parse :return: None
[ "Here", "we", "parse", "each", "row", "of", "the", "gene", "to", "phenotype", "file" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/EBIGene2Phen.py#L104-L147
train
251,201
monarch-initiative/dipper
dipper/sources/EBIGene2Phen.py
EBIGene2Phen._add_gene_disease
def _add_gene_disease(self, row): # ::List getting syntax error here """ Parse and add gene variant disease model Model building happens in _build_gene_disease_model :param row {List}: single row from DDG2P.csv :return: None """ col = self.files['developmental_disorders']['columns'] if len(row) != len(col): raise ValueError("Unexpected number of fields for row {}".format(row)) variant_label = "variant of {}".format(row[col.index('gene_symbol')]) disease_omim_id = row[col.index('disease_omim_id')] if disease_omim_id == 'No disease mim': # check if we've manually curated disease_label = row[col.index('disease_label')] if disease_label in self.mondo_map: disease_id = self.mondo_map[disease_label] else: return # sorry for this else: disease_id = 'OMIM:' + disease_omim_id hgnc_curie = 'HGNC:' + row[col.index('hgnc_id')] relation_curie = self.resolve(row[col.index('g2p_relation_label')]) mutation_consequence = row[col.index('mutation_consequence')] if mutation_consequence not in ('uncertain', ''): consequence_relation = self.resolve( self._get_consequence_predicate(mutation_consequence)) consequence_curie = self.resolve(mutation_consequence) variant_label = "{} {}".format(mutation_consequence, variant_label) else: consequence_relation = None consequence_curie = None allelic_requirement = row[col.index('allelic_requirement')] if allelic_requirement != '': requirement_curie = self.resolve(allelic_requirement) else: requirement_curie = None pmids = row[col.index('pmids')] if pmids != '': pmid_list = ['PMID:' + pmid for pmid in pmids.split(';')] else: pmid_list = [] # build the model # Should we build a reusable object and/or tuple that # could be passed to a more general model builder for # this and orphanet (and maybe clinvar) self._build_gene_disease_model( hgnc_curie, relation_curie, disease_id, variant_label, consequence_relation, consequence_curie, requirement_curie, pmid_list )
python
def _add_gene_disease(self, row): # ::List getting syntax error here """ Parse and add gene variant disease model Model building happens in _build_gene_disease_model :param row {List}: single row from DDG2P.csv :return: None """ col = self.files['developmental_disorders']['columns'] if len(row) != len(col): raise ValueError("Unexpected number of fields for row {}".format(row)) variant_label = "variant of {}".format(row[col.index('gene_symbol')]) disease_omim_id = row[col.index('disease_omim_id')] if disease_omim_id == 'No disease mim': # check if we've manually curated disease_label = row[col.index('disease_label')] if disease_label in self.mondo_map: disease_id = self.mondo_map[disease_label] else: return # sorry for this else: disease_id = 'OMIM:' + disease_omim_id hgnc_curie = 'HGNC:' + row[col.index('hgnc_id')] relation_curie = self.resolve(row[col.index('g2p_relation_label')]) mutation_consequence = row[col.index('mutation_consequence')] if mutation_consequence not in ('uncertain', ''): consequence_relation = self.resolve( self._get_consequence_predicate(mutation_consequence)) consequence_curie = self.resolve(mutation_consequence) variant_label = "{} {}".format(mutation_consequence, variant_label) else: consequence_relation = None consequence_curie = None allelic_requirement = row[col.index('allelic_requirement')] if allelic_requirement != '': requirement_curie = self.resolve(allelic_requirement) else: requirement_curie = None pmids = row[col.index('pmids')] if pmids != '': pmid_list = ['PMID:' + pmid for pmid in pmids.split(';')] else: pmid_list = [] # build the model # Should we build a reusable object and/or tuple that # could be passed to a more general model builder for # this and orphanet (and maybe clinvar) self._build_gene_disease_model( hgnc_curie, relation_curie, disease_id, variant_label, consequence_relation, consequence_curie, requirement_curie, pmid_list )
[ "def", "_add_gene_disease", "(", "self", ",", "row", ")", ":", "# ::List getting syntax error here", "col", "=", "self", ".", "files", "[", "'developmental_disorders'", "]", "[", "'columns'", "]", "if", "len", "(", "row", ")", "!=", "len", "(", "col", ")", ":", "raise", "ValueError", "(", "\"Unexpected number of fields for row {}\"", ".", "format", "(", "row", ")", ")", "variant_label", "=", "\"variant of {}\"", ".", "format", "(", "row", "[", "col", ".", "index", "(", "'gene_symbol'", ")", "]", ")", "disease_omim_id", "=", "row", "[", "col", ".", "index", "(", "'disease_omim_id'", ")", "]", "if", "disease_omim_id", "==", "'No disease mim'", ":", "# check if we've manually curated", "disease_label", "=", "row", "[", "col", ".", "index", "(", "'disease_label'", ")", "]", "if", "disease_label", "in", "self", ".", "mondo_map", ":", "disease_id", "=", "self", ".", "mondo_map", "[", "disease_label", "]", "else", ":", "return", "# sorry for this", "else", ":", "disease_id", "=", "'OMIM:'", "+", "disease_omim_id", "hgnc_curie", "=", "'HGNC:'", "+", "row", "[", "col", ".", "index", "(", "'hgnc_id'", ")", "]", "relation_curie", "=", "self", ".", "resolve", "(", "row", "[", "col", ".", "index", "(", "'g2p_relation_label'", ")", "]", ")", "mutation_consequence", "=", "row", "[", "col", ".", "index", "(", "'mutation_consequence'", ")", "]", "if", "mutation_consequence", "not", "in", "(", "'uncertain'", ",", "''", ")", ":", "consequence_relation", "=", "self", ".", "resolve", "(", "self", ".", "_get_consequence_predicate", "(", "mutation_consequence", ")", ")", "consequence_curie", "=", "self", ".", "resolve", "(", "mutation_consequence", ")", "variant_label", "=", "\"{} {}\"", ".", "format", "(", "mutation_consequence", ",", "variant_label", ")", "else", ":", "consequence_relation", "=", "None", "consequence_curie", "=", "None", "allelic_requirement", "=", "row", "[", "col", ".", "index", "(", "'allelic_requirement'", ")", "]", "if", "allelic_requirement", "!=", "''", ":", "requirement_curie", "=", "self", ".", "resolve", "(", "allelic_requirement", ")", "else", ":", "requirement_curie", "=", "None", "pmids", "=", "row", "[", "col", ".", "index", "(", "'pmids'", ")", "]", "if", "pmids", "!=", "''", ":", "pmid_list", "=", "[", "'PMID:'", "+", "pmid", "for", "pmid", "in", "pmids", ".", "split", "(", "';'", ")", "]", "else", ":", "pmid_list", "=", "[", "]", "# build the model", "# Should we build a reusable object and/or tuple that", "# could be passed to a more general model builder for", "# this and orphanet (and maybe clinvar)", "self", ".", "_build_gene_disease_model", "(", "hgnc_curie", ",", "relation_curie", ",", "disease_id", ",", "variant_label", ",", "consequence_relation", ",", "consequence_curie", ",", "requirement_curie", ",", "pmid_list", ")" ]
Parse and add gene variant disease model Model building happens in _build_gene_disease_model :param row {List}: single row from DDG2P.csv :return: None
[ "Parse", "and", "add", "gene", "variant", "disease", "model", "Model", "building", "happens", "in", "_build_gene_disease_model" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/EBIGene2Phen.py#L149-L211
train
251,202
monarch-initiative/dipper
dipper/sources/EBIGene2Phen.py
EBIGene2Phen._build_gene_disease_model
def _build_gene_disease_model( self, gene_id, relation_id, disease_id, variant_label, consequence_predicate=None, consequence_id=None, allelic_requirement=None, pmids=None): """ Builds gene variant disease model :return: None """ model = Model(self.graph) geno = Genotype(self.graph) pmids = [] if pmids is None else pmids is_variant = False variant_or_gene = gene_id variant_id_string = variant_label variant_bnode = self.make_id(variant_id_string, "_") if consequence_predicate is not None \ and consequence_id is not None: is_variant = True model.addTriple(variant_bnode, consequence_predicate, consequence_id) # Hack to add labels to terms that # don't exist in an ontology if consequence_id.startswith(':'): model.addLabel(consequence_id, consequence_id.strip(':').replace('_', ' ')) if is_variant: variant_or_gene = variant_bnode # Typically we would type the variant using the # molecular consequence, but these are not specific # enough for us to make mappings (see translation table) model.addIndividualToGraph(variant_bnode, variant_label, self.globaltt['variant_locus']) geno.addAffectedLocus(variant_bnode, gene_id) model.addBlankNodeAnnotation(variant_bnode) assoc = G2PAssoc( self.graph, self.name, variant_or_gene, disease_id, relation_id) assoc.source = pmids assoc.add_association_to_graph() if allelic_requirement is not None and is_variant is False: model.addTriple( assoc.assoc_id, self.globaltt['has_allelic_requirement'], allelic_requirement) if allelic_requirement.startswith(':'): model.addLabel( allelic_requirement, allelic_requirement.strip(':').replace('_', ' '))
python
def _build_gene_disease_model( self, gene_id, relation_id, disease_id, variant_label, consequence_predicate=None, consequence_id=None, allelic_requirement=None, pmids=None): """ Builds gene variant disease model :return: None """ model = Model(self.graph) geno = Genotype(self.graph) pmids = [] if pmids is None else pmids is_variant = False variant_or_gene = gene_id variant_id_string = variant_label variant_bnode = self.make_id(variant_id_string, "_") if consequence_predicate is not None \ and consequence_id is not None: is_variant = True model.addTriple(variant_bnode, consequence_predicate, consequence_id) # Hack to add labels to terms that # don't exist in an ontology if consequence_id.startswith(':'): model.addLabel(consequence_id, consequence_id.strip(':').replace('_', ' ')) if is_variant: variant_or_gene = variant_bnode # Typically we would type the variant using the # molecular consequence, but these are not specific # enough for us to make mappings (see translation table) model.addIndividualToGraph(variant_bnode, variant_label, self.globaltt['variant_locus']) geno.addAffectedLocus(variant_bnode, gene_id) model.addBlankNodeAnnotation(variant_bnode) assoc = G2PAssoc( self.graph, self.name, variant_or_gene, disease_id, relation_id) assoc.source = pmids assoc.add_association_to_graph() if allelic_requirement is not None and is_variant is False: model.addTriple( assoc.assoc_id, self.globaltt['has_allelic_requirement'], allelic_requirement) if allelic_requirement.startswith(':'): model.addLabel( allelic_requirement, allelic_requirement.strip(':').replace('_', ' '))
[ "def", "_build_gene_disease_model", "(", "self", ",", "gene_id", ",", "relation_id", ",", "disease_id", ",", "variant_label", ",", "consequence_predicate", "=", "None", ",", "consequence_id", "=", "None", ",", "allelic_requirement", "=", "None", ",", "pmids", "=", "None", ")", ":", "model", "=", "Model", "(", "self", ".", "graph", ")", "geno", "=", "Genotype", "(", "self", ".", "graph", ")", "pmids", "=", "[", "]", "if", "pmids", "is", "None", "else", "pmids", "is_variant", "=", "False", "variant_or_gene", "=", "gene_id", "variant_id_string", "=", "variant_label", "variant_bnode", "=", "self", ".", "make_id", "(", "variant_id_string", ",", "\"_\"", ")", "if", "consequence_predicate", "is", "not", "None", "and", "consequence_id", "is", "not", "None", ":", "is_variant", "=", "True", "model", ".", "addTriple", "(", "variant_bnode", ",", "consequence_predicate", ",", "consequence_id", ")", "# Hack to add labels to terms that", "# don't exist in an ontology", "if", "consequence_id", ".", "startswith", "(", "':'", ")", ":", "model", ".", "addLabel", "(", "consequence_id", ",", "consequence_id", ".", "strip", "(", "':'", ")", ".", "replace", "(", "'_'", ",", "' '", ")", ")", "if", "is_variant", ":", "variant_or_gene", "=", "variant_bnode", "# Typically we would type the variant using the", "# molecular consequence, but these are not specific", "# enough for us to make mappings (see translation table)", "model", ".", "addIndividualToGraph", "(", "variant_bnode", ",", "variant_label", ",", "self", ".", "globaltt", "[", "'variant_locus'", "]", ")", "geno", ".", "addAffectedLocus", "(", "variant_bnode", ",", "gene_id", ")", "model", ".", "addBlankNodeAnnotation", "(", "variant_bnode", ")", "assoc", "=", "G2PAssoc", "(", "self", ".", "graph", ",", "self", ".", "name", ",", "variant_or_gene", ",", "disease_id", ",", "relation_id", ")", "assoc", ".", "source", "=", "pmids", "assoc", ".", "add_association_to_graph", "(", ")", "if", "allelic_requirement", "is", "not", "None", "and", "is_variant", "is", "False", ":", "model", ".", "addTriple", "(", "assoc", ".", "assoc_id", ",", "self", ".", "globaltt", "[", "'has_allelic_requirement'", "]", ",", "allelic_requirement", ")", "if", "allelic_requirement", ".", "startswith", "(", "':'", ")", ":", "model", ".", "addLabel", "(", "allelic_requirement", ",", "allelic_requirement", ".", "strip", "(", "':'", ")", ".", "replace", "(", "'_'", ",", "' '", ")", ")" ]
Builds gene variant disease model :return: None
[ "Builds", "gene", "variant", "disease", "model" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/EBIGene2Phen.py#L213-L274
train
251,203
monarch-initiative/dipper
dipper/sources/BioGrid.py
BioGrid._get_identifiers
def _get_identifiers(self, limit): """ This will process the id mapping file provided by Biogrid. The file has a very large header, which we scan past, then pull the identifiers, and make equivalence axioms :param limit: :return: """ LOG.info("getting identifier mapping") line_counter = 0 f = '/'.join((self.rawdir, self.files['identifiers']['file'])) myzip = ZipFile(f, 'r') # assume that the first entry is the item fname = myzip.namelist()[0] foundheader = False # TODO align this species filter with the one above # speciesfilters = 'Homo sapiens,Mus musculus,Drosophila melanogaster, # Danio rerio, Caenorhabditis elegans,Xenopus laevis'.split(',') speciesfilters = 'Homo sapiens,Mus musculus'.split(',') with myzip.open(fname, 'r') as csvfile: for line in csvfile: # skip header lines if not foundheader: if re.match(r'BIOGRID_ID', line.decode()): foundheader = True continue line = line.decode().strip() # BIOGRID_ID # IDENTIFIER_VALUE # IDENTIFIER_TYPE # ORGANISM_OFFICIAL_NAME # 1 814566 ENTREZ_GENE Arabidopsis thaliana (biogrid_num, id_num, id_type, organism_label) = line.split('\t') if self.test_mode: graph = self.testgraph # skip any genes that don't match our test set if int(biogrid_num) not in self.biogrid_ids: continue else: graph = self.graph model = Model(graph) # for each one of these, # create the node and add equivalent classes biogrid_id = 'BIOGRID:'+biogrid_num prefix = self.localtt[id_type] # TODO make these filters available as commandline options # geneidtypefilters='NCBIGene,OMIM,MGI,FlyBase,ZFIN,MGI,HGNC, # WormBase,XenBase,ENSEMBL,miRBase'.split(',') geneidtypefilters = 'NCBIGene,MGI,ENSEMBL,ZFIN,HGNC'.split(',') # proteinidtypefilters='HPRD,Swiss-Prot,NCBIProtein' if (speciesfilters is not None) \ and (organism_label.strip() in speciesfilters): line_counter += 1 if (geneidtypefilters is not None) \ and (prefix in geneidtypefilters): mapped_id = ':'.join((prefix, id_num)) model.addEquivalentClass(biogrid_id, mapped_id) # this symbol will only get attached to the biogrid class elif id_type == 'OFFICIAL_SYMBOL': model.addClassToGraph(biogrid_id, id_num) # elif (id_type == 'SYNONYM'): # FIXME - i am not sure these are synonyms, altids? # gu.addSynonym(g,biogrid_id,id_num) if not self.test_mode and limit is not None and line_counter > limit: break myzip.close() return
python
def _get_identifiers(self, limit): """ This will process the id mapping file provided by Biogrid. The file has a very large header, which we scan past, then pull the identifiers, and make equivalence axioms :param limit: :return: """ LOG.info("getting identifier mapping") line_counter = 0 f = '/'.join((self.rawdir, self.files['identifiers']['file'])) myzip = ZipFile(f, 'r') # assume that the first entry is the item fname = myzip.namelist()[0] foundheader = False # TODO align this species filter with the one above # speciesfilters = 'Homo sapiens,Mus musculus,Drosophila melanogaster, # Danio rerio, Caenorhabditis elegans,Xenopus laevis'.split(',') speciesfilters = 'Homo sapiens,Mus musculus'.split(',') with myzip.open(fname, 'r') as csvfile: for line in csvfile: # skip header lines if not foundheader: if re.match(r'BIOGRID_ID', line.decode()): foundheader = True continue line = line.decode().strip() # BIOGRID_ID # IDENTIFIER_VALUE # IDENTIFIER_TYPE # ORGANISM_OFFICIAL_NAME # 1 814566 ENTREZ_GENE Arabidopsis thaliana (biogrid_num, id_num, id_type, organism_label) = line.split('\t') if self.test_mode: graph = self.testgraph # skip any genes that don't match our test set if int(biogrid_num) not in self.biogrid_ids: continue else: graph = self.graph model = Model(graph) # for each one of these, # create the node and add equivalent classes biogrid_id = 'BIOGRID:'+biogrid_num prefix = self.localtt[id_type] # TODO make these filters available as commandline options # geneidtypefilters='NCBIGene,OMIM,MGI,FlyBase,ZFIN,MGI,HGNC, # WormBase,XenBase,ENSEMBL,miRBase'.split(',') geneidtypefilters = 'NCBIGene,MGI,ENSEMBL,ZFIN,HGNC'.split(',') # proteinidtypefilters='HPRD,Swiss-Prot,NCBIProtein' if (speciesfilters is not None) \ and (organism_label.strip() in speciesfilters): line_counter += 1 if (geneidtypefilters is not None) \ and (prefix in geneidtypefilters): mapped_id = ':'.join((prefix, id_num)) model.addEquivalentClass(biogrid_id, mapped_id) # this symbol will only get attached to the biogrid class elif id_type == 'OFFICIAL_SYMBOL': model.addClassToGraph(biogrid_id, id_num) # elif (id_type == 'SYNONYM'): # FIXME - i am not sure these are synonyms, altids? # gu.addSynonym(g,biogrid_id,id_num) if not self.test_mode and limit is not None and line_counter > limit: break myzip.close() return
[ "def", "_get_identifiers", "(", "self", ",", "limit", ")", ":", "LOG", ".", "info", "(", "\"getting identifier mapping\"", ")", "line_counter", "=", "0", "f", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'identifiers'", "]", "[", "'file'", "]", ")", ")", "myzip", "=", "ZipFile", "(", "f", ",", "'r'", ")", "# assume that the first entry is the item", "fname", "=", "myzip", ".", "namelist", "(", ")", "[", "0", "]", "foundheader", "=", "False", "# TODO align this species filter with the one above", "# speciesfilters = 'Homo sapiens,Mus musculus,Drosophila melanogaster,", "# Danio rerio, Caenorhabditis elegans,Xenopus laevis'.split(',')", "speciesfilters", "=", "'Homo sapiens,Mus musculus'", ".", "split", "(", "','", ")", "with", "myzip", ".", "open", "(", "fname", ",", "'r'", ")", "as", "csvfile", ":", "for", "line", "in", "csvfile", ":", "# skip header lines", "if", "not", "foundheader", ":", "if", "re", ".", "match", "(", "r'BIOGRID_ID'", ",", "line", ".", "decode", "(", ")", ")", ":", "foundheader", "=", "True", "continue", "line", "=", "line", ".", "decode", "(", ")", ".", "strip", "(", ")", "# BIOGRID_ID", "# IDENTIFIER_VALUE", "# IDENTIFIER_TYPE", "# ORGANISM_OFFICIAL_NAME", "# 1\t814566\tENTREZ_GENE\tArabidopsis thaliana", "(", "biogrid_num", ",", "id_num", ",", "id_type", ",", "organism_label", ")", "=", "line", ".", "split", "(", "'\\t'", ")", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "# skip any genes that don't match our test set", "if", "int", "(", "biogrid_num", ")", "not", "in", "self", ".", "biogrid_ids", ":", "continue", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "# for each one of these,", "# create the node and add equivalent classes", "biogrid_id", "=", "'BIOGRID:'", "+", "biogrid_num", "prefix", "=", "self", ".", "localtt", "[", "id_type", "]", "# TODO make these filters available as commandline options", "# geneidtypefilters='NCBIGene,OMIM,MGI,FlyBase,ZFIN,MGI,HGNC,", "# WormBase,XenBase,ENSEMBL,miRBase'.split(',')", "geneidtypefilters", "=", "'NCBIGene,MGI,ENSEMBL,ZFIN,HGNC'", ".", "split", "(", "','", ")", "# proteinidtypefilters='HPRD,Swiss-Prot,NCBIProtein'", "if", "(", "speciesfilters", "is", "not", "None", ")", "and", "(", "organism_label", ".", "strip", "(", ")", "in", "speciesfilters", ")", ":", "line_counter", "+=", "1", "if", "(", "geneidtypefilters", "is", "not", "None", ")", "and", "(", "prefix", "in", "geneidtypefilters", ")", ":", "mapped_id", "=", "':'", ".", "join", "(", "(", "prefix", ",", "id_num", ")", ")", "model", ".", "addEquivalentClass", "(", "biogrid_id", ",", "mapped_id", ")", "# this symbol will only get attached to the biogrid class", "elif", "id_type", "==", "'OFFICIAL_SYMBOL'", ":", "model", ".", "addClassToGraph", "(", "biogrid_id", ",", "id_num", ")", "# elif (id_type == 'SYNONYM'):", "# FIXME - i am not sure these are synonyms, altids?", "# gu.addSynonym(g,biogrid_id,id_num)", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "myzip", ".", "close", "(", ")", "return" ]
This will process the id mapping file provided by Biogrid. The file has a very large header, which we scan past, then pull the identifiers, and make equivalence axioms :param limit: :return:
[ "This", "will", "process", "the", "id", "mapping", "file", "provided", "by", "Biogrid", ".", "The", "file", "has", "a", "very", "large", "header", "which", "we", "scan", "past", "then", "pull", "the", "identifiers", "and", "make", "equivalence", "axioms" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/BioGrid.py#L201-L281
train
251,204
monarch-initiative/dipper
dipper/models/Evidence.py
Evidence.add_supporting_evidence
def add_supporting_evidence(self, evidence_line, evidence_type=None, label=None): """ Add supporting line of evidence node to association id :param evidence_line: curie or iri, evidence line :param evidence_type: curie or iri, evidence type if available :return: None """ self.graph.addTriple( self.association, self.globaltt['has_supporting_evidence_line'], evidence_line) if evidence_type is not None: self.model.addIndividualToGraph(evidence_line, label, evidence_type) return
python
def add_supporting_evidence(self, evidence_line, evidence_type=None, label=None): """ Add supporting line of evidence node to association id :param evidence_line: curie or iri, evidence line :param evidence_type: curie or iri, evidence type if available :return: None """ self.graph.addTriple( self.association, self.globaltt['has_supporting_evidence_line'], evidence_line) if evidence_type is not None: self.model.addIndividualToGraph(evidence_line, label, evidence_type) return
[ "def", "add_supporting_evidence", "(", "self", ",", "evidence_line", ",", "evidence_type", "=", "None", ",", "label", "=", "None", ")", ":", "self", ".", "graph", ".", "addTriple", "(", "self", ".", "association", ",", "self", ".", "globaltt", "[", "'has_supporting_evidence_line'", "]", ",", "evidence_line", ")", "if", "evidence_type", "is", "not", "None", ":", "self", ".", "model", ".", "addIndividualToGraph", "(", "evidence_line", ",", "label", ",", "evidence_type", ")", "return" ]
Add supporting line of evidence node to association id :param evidence_line: curie or iri, evidence line :param evidence_type: curie or iri, evidence type if available :return: None
[ "Add", "supporting", "line", "of", "evidence", "node", "to", "association", "id" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/Evidence.py#L34-L47
train
251,205
monarch-initiative/dipper
dipper/models/assoc/G2PAssoc.py
G2PAssoc.add_association_to_graph
def add_association_to_graph(self): """ Overrides Association by including bnode support The reified relationship between a genotype (or any genotype part) and a phenotype is decorated with some provenance information. This makes the assumption that both the genotype and phenotype are classes. currently hardcoded to map the annotation to the monarch namespace :param g: :return: """ Assoc.add_association_to_graph(self) # make a blank stage if self.start_stage_id or self.end_stage_id is not None: stage_process_id = '-'.join((str(self.start_stage_id), str(self.end_stage_id))) stage_process_id = '_:'+re.sub(r':', '', stage_process_id) self.model.addIndividualToGraph( stage_process_id, None, self.globaltt['developmental_process']) self.graph.addTriple( stage_process_id, self.globaltt['starts during'], self.start_stage_id) self.graph.addTriple( stage_process_id, self.globaltt['ends during'], self.end_stage_id) self.stage_process_id = stage_process_id self.graph.addTriple( self.assoc_id, self.globaltt['has_qualifier'], self.stage_process_id) if self.environment_id is not None: self.graph.addTriple( self.assoc_id, self.globaltt['has_qualifier'], self.environment_id) return
python
def add_association_to_graph(self): """ Overrides Association by including bnode support The reified relationship between a genotype (or any genotype part) and a phenotype is decorated with some provenance information. This makes the assumption that both the genotype and phenotype are classes. currently hardcoded to map the annotation to the monarch namespace :param g: :return: """ Assoc.add_association_to_graph(self) # make a blank stage if self.start_stage_id or self.end_stage_id is not None: stage_process_id = '-'.join((str(self.start_stage_id), str(self.end_stage_id))) stage_process_id = '_:'+re.sub(r':', '', stage_process_id) self.model.addIndividualToGraph( stage_process_id, None, self.globaltt['developmental_process']) self.graph.addTriple( stage_process_id, self.globaltt['starts during'], self.start_stage_id) self.graph.addTriple( stage_process_id, self.globaltt['ends during'], self.end_stage_id) self.stage_process_id = stage_process_id self.graph.addTriple( self.assoc_id, self.globaltt['has_qualifier'], self.stage_process_id) if self.environment_id is not None: self.graph.addTriple( self.assoc_id, self.globaltt['has_qualifier'], self.environment_id) return
[ "def", "add_association_to_graph", "(", "self", ")", ":", "Assoc", ".", "add_association_to_graph", "(", "self", ")", "# make a blank stage", "if", "self", ".", "start_stage_id", "or", "self", ".", "end_stage_id", "is", "not", "None", ":", "stage_process_id", "=", "'-'", ".", "join", "(", "(", "str", "(", "self", ".", "start_stage_id", ")", ",", "str", "(", "self", ".", "end_stage_id", ")", ")", ")", "stage_process_id", "=", "'_:'", "+", "re", ".", "sub", "(", "r':'", ",", "''", ",", "stage_process_id", ")", "self", ".", "model", ".", "addIndividualToGraph", "(", "stage_process_id", ",", "None", ",", "self", ".", "globaltt", "[", "'developmental_process'", "]", ")", "self", ".", "graph", ".", "addTriple", "(", "stage_process_id", ",", "self", ".", "globaltt", "[", "'starts during'", "]", ",", "self", ".", "start_stage_id", ")", "self", ".", "graph", ".", "addTriple", "(", "stage_process_id", ",", "self", ".", "globaltt", "[", "'ends during'", "]", ",", "self", ".", "end_stage_id", ")", "self", ".", "stage_process_id", "=", "stage_process_id", "self", ".", "graph", ".", "addTriple", "(", "self", ".", "assoc_id", ",", "self", ".", "globaltt", "[", "'has_qualifier'", "]", ",", "self", ".", "stage_process_id", ")", "if", "self", ".", "environment_id", "is", "not", "None", ":", "self", ".", "graph", ".", "addTriple", "(", "self", ".", "assoc_id", ",", "self", ".", "globaltt", "[", "'has_qualifier'", "]", ",", "self", ".", "environment_id", ")", "return" ]
Overrides Association by including bnode support The reified relationship between a genotype (or any genotype part) and a phenotype is decorated with some provenance information. This makes the assumption that both the genotype and phenotype are classes. currently hardcoded to map the annotation to the monarch namespace :param g: :return:
[ "Overrides", "Association", "by", "including", "bnode", "support" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/assoc/G2PAssoc.py#L66-L103
train
251,206
monarch-initiative/dipper
dipper/sources/MPD.py
MPD.parse
def parse(self, limit=None): """ MPD data is delivered in four separate csv files and one xml file, which we process iteratively and write out as one large graph. :param limit: :return: """ if limit is not None: LOG.info("Only parsing first %s rows fo each file", str(limit)) LOG.info("Parsing files...") self._process_straininfo(limit) # the following will provide us the hash-lookups # These must be processed in a specific order # mapping between assays and ontology terms self._process_ontology_mappings_file(limit) # this is the metadata about the measurements self._process_measurements_file(limit) # get all the measurements per strain self._process_strainmeans_file(limit) # The following will use the hash populated above # to lookup the ids when filling in the graph self._fill_provenance_graph(limit) LOG.info("Finished parsing.") return
python
def parse(self, limit=None): """ MPD data is delivered in four separate csv files and one xml file, which we process iteratively and write out as one large graph. :param limit: :return: """ if limit is not None: LOG.info("Only parsing first %s rows fo each file", str(limit)) LOG.info("Parsing files...") self._process_straininfo(limit) # the following will provide us the hash-lookups # These must be processed in a specific order # mapping between assays and ontology terms self._process_ontology_mappings_file(limit) # this is the metadata about the measurements self._process_measurements_file(limit) # get all the measurements per strain self._process_strainmeans_file(limit) # The following will use the hash populated above # to lookup the ids when filling in the graph self._fill_provenance_graph(limit) LOG.info("Finished parsing.") return
[ "def", "parse", "(", "self", ",", "limit", "=", "None", ")", ":", "if", "limit", "is", "not", "None", ":", "LOG", ".", "info", "(", "\"Only parsing first %s rows fo each file\"", ",", "str", "(", "limit", ")", ")", "LOG", ".", "info", "(", "\"Parsing files...\"", ")", "self", ".", "_process_straininfo", "(", "limit", ")", "# the following will provide us the hash-lookups", "# These must be processed in a specific order", "# mapping between assays and ontology terms", "self", ".", "_process_ontology_mappings_file", "(", "limit", ")", "# this is the metadata about the measurements", "self", ".", "_process_measurements_file", "(", "limit", ")", "# get all the measurements per strain", "self", ".", "_process_strainmeans_file", "(", "limit", ")", "# The following will use the hash populated above", "# to lookup the ids when filling in the graph", "self", ".", "_fill_provenance_graph", "(", "limit", ")", "LOG", ".", "info", "(", "\"Finished parsing.\"", ")", "return" ]
MPD data is delivered in four separate csv files and one xml file, which we process iteratively and write out as one large graph. :param limit: :return:
[ "MPD", "data", "is", "delivered", "in", "four", "separate", "csv", "files", "and", "one", "xml", "file", "which", "we", "process", "iteratively", "and", "write", "out", "as", "one", "large", "graph", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/MPD.py#L112-L142
train
251,207
monarch-initiative/dipper
dipper/sources/MPD.py
MPD._add_g2p_assoc
def _add_g2p_assoc(self, graph, strain_id, sex, assay_id, phenotypes, comment): """ Create an association between a sex-specific strain id and each of the phenotypes. Here, we create a genotype from the strain, and a sex-specific genotype. Each of those genotypes are created as anonymous nodes. The evidence code is hardcoded to be: ECO:experimental_phenotypic_evidence. :param g: :param strain_id: :param sex: :param assay_id: :param phenotypes: a list of phenotypes to association with the strain :param comment: :return: """ geno = Genotype(graph) model = Model(graph) eco_id = self.globaltt['experimental phenotypic evidence'] strain_label = self.idlabel_hash.get(strain_id) # strain genotype genotype_id = '_:'+'-'.join((re.sub(r':', '', strain_id), 'genotype')) genotype_label = '[' + strain_label + ']' sex_specific_genotype_id = '_:'+'-'.join(( re.sub(r':', '', strain_id), sex, 'genotype')) if strain_label is not None: sex_specific_genotype_label = strain_label + ' (' + sex + ')' else: sex_specific_genotype_label = strain_id + '(' + sex + ')' genotype_type = self.globaltt['sex_qualified_genotype'] if sex == 'm': genotype_type = self.globaltt['male_genotype'] elif sex == 'f': genotype_type = self.globaltt['female_genotype'] # add the genotype to strain connection geno.addGenotype( genotype_id, genotype_label, self.globaltt['genomic_background']) graph.addTriple( strain_id, self.globaltt['has_genotype'], genotype_id) geno.addGenotype( sex_specific_genotype_id, sex_specific_genotype_label, genotype_type) # add the strain as the background for the genotype graph.addTriple( sex_specific_genotype_id, self.globaltt['has_sex_agnostic_part'], genotype_id) # ############# BUILD THE G2P ASSOC ############# # TODO add more provenance info when that model is completed if phenotypes is not None: for phenotype_id in phenotypes: assoc = G2PAssoc( graph, self.name, sex_specific_genotype_id, phenotype_id) assoc.add_evidence(assay_id) assoc.add_evidence(eco_id) assoc.add_association_to_graph() assoc_id = assoc.get_association_id() model.addComment(assoc_id, comment) model._addSexSpecificity(assoc_id, self.resolve(sex)) return
python
def _add_g2p_assoc(self, graph, strain_id, sex, assay_id, phenotypes, comment): """ Create an association between a sex-specific strain id and each of the phenotypes. Here, we create a genotype from the strain, and a sex-specific genotype. Each of those genotypes are created as anonymous nodes. The evidence code is hardcoded to be: ECO:experimental_phenotypic_evidence. :param g: :param strain_id: :param sex: :param assay_id: :param phenotypes: a list of phenotypes to association with the strain :param comment: :return: """ geno = Genotype(graph) model = Model(graph) eco_id = self.globaltt['experimental phenotypic evidence'] strain_label = self.idlabel_hash.get(strain_id) # strain genotype genotype_id = '_:'+'-'.join((re.sub(r':', '', strain_id), 'genotype')) genotype_label = '[' + strain_label + ']' sex_specific_genotype_id = '_:'+'-'.join(( re.sub(r':', '', strain_id), sex, 'genotype')) if strain_label is not None: sex_specific_genotype_label = strain_label + ' (' + sex + ')' else: sex_specific_genotype_label = strain_id + '(' + sex + ')' genotype_type = self.globaltt['sex_qualified_genotype'] if sex == 'm': genotype_type = self.globaltt['male_genotype'] elif sex == 'f': genotype_type = self.globaltt['female_genotype'] # add the genotype to strain connection geno.addGenotype( genotype_id, genotype_label, self.globaltt['genomic_background']) graph.addTriple( strain_id, self.globaltt['has_genotype'], genotype_id) geno.addGenotype( sex_specific_genotype_id, sex_specific_genotype_label, genotype_type) # add the strain as the background for the genotype graph.addTriple( sex_specific_genotype_id, self.globaltt['has_sex_agnostic_part'], genotype_id) # ############# BUILD THE G2P ASSOC ############# # TODO add more provenance info when that model is completed if phenotypes is not None: for phenotype_id in phenotypes: assoc = G2PAssoc( graph, self.name, sex_specific_genotype_id, phenotype_id) assoc.add_evidence(assay_id) assoc.add_evidence(eco_id) assoc.add_association_to_graph() assoc_id = assoc.get_association_id() model.addComment(assoc_id, comment) model._addSexSpecificity(assoc_id, self.resolve(sex)) return
[ "def", "_add_g2p_assoc", "(", "self", ",", "graph", ",", "strain_id", ",", "sex", ",", "assay_id", ",", "phenotypes", ",", "comment", ")", ":", "geno", "=", "Genotype", "(", "graph", ")", "model", "=", "Model", "(", "graph", ")", "eco_id", "=", "self", ".", "globaltt", "[", "'experimental phenotypic evidence'", "]", "strain_label", "=", "self", ".", "idlabel_hash", ".", "get", "(", "strain_id", ")", "# strain genotype", "genotype_id", "=", "'_:'", "+", "'-'", ".", "join", "(", "(", "re", ".", "sub", "(", "r':'", ",", "''", ",", "strain_id", ")", ",", "'genotype'", ")", ")", "genotype_label", "=", "'['", "+", "strain_label", "+", "']'", "sex_specific_genotype_id", "=", "'_:'", "+", "'-'", ".", "join", "(", "(", "re", ".", "sub", "(", "r':'", ",", "''", ",", "strain_id", ")", ",", "sex", ",", "'genotype'", ")", ")", "if", "strain_label", "is", "not", "None", ":", "sex_specific_genotype_label", "=", "strain_label", "+", "' ('", "+", "sex", "+", "')'", "else", ":", "sex_specific_genotype_label", "=", "strain_id", "+", "'('", "+", "sex", "+", "')'", "genotype_type", "=", "self", ".", "globaltt", "[", "'sex_qualified_genotype'", "]", "if", "sex", "==", "'m'", ":", "genotype_type", "=", "self", ".", "globaltt", "[", "'male_genotype'", "]", "elif", "sex", "==", "'f'", ":", "genotype_type", "=", "self", ".", "globaltt", "[", "'female_genotype'", "]", "# add the genotype to strain connection", "geno", ".", "addGenotype", "(", "genotype_id", ",", "genotype_label", ",", "self", ".", "globaltt", "[", "'genomic_background'", "]", ")", "graph", ".", "addTriple", "(", "strain_id", ",", "self", ".", "globaltt", "[", "'has_genotype'", "]", ",", "genotype_id", ")", "geno", ".", "addGenotype", "(", "sex_specific_genotype_id", ",", "sex_specific_genotype_label", ",", "genotype_type", ")", "# add the strain as the background for the genotype", "graph", ".", "addTriple", "(", "sex_specific_genotype_id", ",", "self", ".", "globaltt", "[", "'has_sex_agnostic_part'", "]", ",", "genotype_id", ")", "# ############# BUILD THE G2P ASSOC #############", "# TODO add more provenance info when that model is completed", "if", "phenotypes", "is", "not", "None", ":", "for", "phenotype_id", "in", "phenotypes", ":", "assoc", "=", "G2PAssoc", "(", "graph", ",", "self", ".", "name", ",", "sex_specific_genotype_id", ",", "phenotype_id", ")", "assoc", ".", "add_evidence", "(", "assay_id", ")", "assoc", ".", "add_evidence", "(", "eco_id", ")", "assoc", ".", "add_association_to_graph", "(", ")", "assoc_id", "=", "assoc", ".", "get_association_id", "(", ")", "model", ".", "addComment", "(", "assoc_id", ",", "comment", ")", "model", ".", "_addSexSpecificity", "(", "assoc_id", ",", "self", ".", "resolve", "(", "sex", ")", ")", "return" ]
Create an association between a sex-specific strain id and each of the phenotypes. Here, we create a genotype from the strain, and a sex-specific genotype. Each of those genotypes are created as anonymous nodes. The evidence code is hardcoded to be: ECO:experimental_phenotypic_evidence. :param g: :param strain_id: :param sex: :param assay_id: :param phenotypes: a list of phenotypes to association with the strain :param comment: :return:
[ "Create", "an", "association", "between", "a", "sex", "-", "specific", "strain", "id", "and", "each", "of", "the", "phenotypes", ".", "Here", "we", "create", "a", "genotype", "from", "the", "strain", "and", "a", "sex", "-", "specific", "genotype", ".", "Each", "of", "those", "genotypes", "are", "created", "as", "anonymous", "nodes", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/MPD.py#L385-L457
train
251,208
monarch-initiative/dipper
dipper/sources/IMPC.py
IMPC.parse
def parse(self, limit=None): """ IMPC data is delivered in three separate csv files OR in one integrated file, each with the same file format. :param limit: :return: """ if limit is not None: LOG.info("Only parsing first %s rows fo each file", str(limit)) LOG.info("Parsing files...") if self.test_only: self.test_mode = True # for f in ['impc', 'euro', 'mgd', '3i']: for f in ['all']: file = '/'.join((self.rawdir, self.files[f]['file'])) self._process_data(file, limit) LOG.info("Finished parsing") return
python
def parse(self, limit=None): """ IMPC data is delivered in three separate csv files OR in one integrated file, each with the same file format. :param limit: :return: """ if limit is not None: LOG.info("Only parsing first %s rows fo each file", str(limit)) LOG.info("Parsing files...") if self.test_only: self.test_mode = True # for f in ['impc', 'euro', 'mgd', '3i']: for f in ['all']: file = '/'.join((self.rawdir, self.files[f]['file'])) self._process_data(file, limit) LOG.info("Finished parsing") return
[ "def", "parse", "(", "self", ",", "limit", "=", "None", ")", ":", "if", "limit", "is", "not", "None", ":", "LOG", ".", "info", "(", "\"Only parsing first %s rows fo each file\"", ",", "str", "(", "limit", ")", ")", "LOG", ".", "info", "(", "\"Parsing files...\"", ")", "if", "self", ".", "test_only", ":", "self", ".", "test_mode", "=", "True", "# for f in ['impc', 'euro', 'mgd', '3i']:", "for", "f", "in", "[", "'all'", "]", ":", "file", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "f", "]", "[", "'file'", "]", ")", ")", "self", ".", "_process_data", "(", "file", ",", "limit", ")", "LOG", ".", "info", "(", "\"Finished parsing\"", ")", "return" ]
IMPC data is delivered in three separate csv files OR in one integrated file, each with the same file format. :param limit: :return:
[ "IMPC", "data", "is", "delivered", "in", "three", "separate", "csv", "files", "OR", "in", "one", "integrated", "file", "each", "with", "the", "same", "file", "format", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/IMPC.py#L119-L143
train
251,209
monarch-initiative/dipper
dipper/models/Pathway.py
Pathway.addGeneToPathway
def addGeneToPathway(self, gene_id, pathway_id): """ When adding a gene to a pathway, we create an intermediate 'gene product' that is involved in the pathway, through a blank node. gene_id RO:has_gene_product _gene_product _gene_product RO:involved_in pathway_id :param pathway_id: :param gene_id: :return: """ gene_product = '_:'+re.sub(r':', '', gene_id) + 'product' self.model.addIndividualToGraph( gene_product, None, self.globaltt['gene_product']) self.graph.addTriple( gene_id, self.globaltt['has gene product'], gene_product) self.addComponentToPathway(gene_product, pathway_id) return
python
def addGeneToPathway(self, gene_id, pathway_id): """ When adding a gene to a pathway, we create an intermediate 'gene product' that is involved in the pathway, through a blank node. gene_id RO:has_gene_product _gene_product _gene_product RO:involved_in pathway_id :param pathway_id: :param gene_id: :return: """ gene_product = '_:'+re.sub(r':', '', gene_id) + 'product' self.model.addIndividualToGraph( gene_product, None, self.globaltt['gene_product']) self.graph.addTriple( gene_id, self.globaltt['has gene product'], gene_product) self.addComponentToPathway(gene_product, pathway_id) return
[ "def", "addGeneToPathway", "(", "self", ",", "gene_id", ",", "pathway_id", ")", ":", "gene_product", "=", "'_:'", "+", "re", ".", "sub", "(", "r':'", ",", "''", ",", "gene_id", ")", "+", "'product'", "self", ".", "model", ".", "addIndividualToGraph", "(", "gene_product", ",", "None", ",", "self", ".", "globaltt", "[", "'gene_product'", "]", ")", "self", ".", "graph", ".", "addTriple", "(", "gene_id", ",", "self", ".", "globaltt", "[", "'has gene product'", "]", ",", "gene_product", ")", "self", ".", "addComponentToPathway", "(", "gene_product", ",", "pathway_id", ")", "return" ]
When adding a gene to a pathway, we create an intermediate 'gene product' that is involved in the pathway, through a blank node. gene_id RO:has_gene_product _gene_product _gene_product RO:involved_in pathway_id :param pathway_id: :param gene_id: :return:
[ "When", "adding", "a", "gene", "to", "a", "pathway", "we", "create", "an", "intermediate", "gene", "product", "that", "is", "involved", "in", "the", "pathway", "through", "a", "blank", "node", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/Pathway.py#L50-L71
train
251,210
monarch-initiative/dipper
dipper/models/Pathway.py
Pathway.addComponentToPathway
def addComponentToPathway(self, component_id, pathway_id): """ This can be used directly when the component is directly involved in the pathway. If a transforming event is performed on the component first, then the addGeneToPathway should be used instead. :param pathway_id: :param component_id: :return: """ self.graph.addTriple(component_id, self.globaltt['involved in'], pathway_id) return
python
def addComponentToPathway(self, component_id, pathway_id): """ This can be used directly when the component is directly involved in the pathway. If a transforming event is performed on the component first, then the addGeneToPathway should be used instead. :param pathway_id: :param component_id: :return: """ self.graph.addTriple(component_id, self.globaltt['involved in'], pathway_id) return
[ "def", "addComponentToPathway", "(", "self", ",", "component_id", ",", "pathway_id", ")", ":", "self", ".", "graph", ".", "addTriple", "(", "component_id", ",", "self", ".", "globaltt", "[", "'involved in'", "]", ",", "pathway_id", ")", "return" ]
This can be used directly when the component is directly involved in the pathway. If a transforming event is performed on the component first, then the addGeneToPathway should be used instead. :param pathway_id: :param component_id: :return:
[ "This", "can", "be", "used", "directly", "when", "the", "component", "is", "directly", "involved", "in", "the", "pathway", ".", "If", "a", "transforming", "event", "is", "performed", "on", "the", "component", "first", "then", "the", "addGeneToPathway", "should", "be", "used", "instead", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/Pathway.py#L73-L85
train
251,211
monarch-initiative/dipper
dipper/sources/Source.py
Source.write
def write(self, fmt='turtle', stream=None): """ This convenience method will write out all of the graphs associated with the source. Right now these are hardcoded to be a single "graph" and a "src_dataset.ttl" and a "src_test.ttl" If you do not supply stream='stdout' it will default write these to files. In addition, if the version number isn't yet set in the dataset, it will be set to the date on file. :return: None """ fmt_ext = { 'rdfxml': 'xml', 'turtle': 'ttl', 'nt': 'nt', # ntriples 'nquads': 'nq', 'n3': 'n3' # notation3 } # make the regular graph output file dest = None if self.name is not None: dest = '/'.join((self.outdir, self.name)) if fmt in fmt_ext: dest = '.'.join((dest, fmt_ext.get(fmt))) else: dest = '.'.join((dest, fmt)) LOG.info("Setting outfile to %s", dest) # make the dataset_file name, always format as turtle self.datasetfile = '/'.join( (self.outdir, self.name + '_dataset.ttl')) LOG.info("Setting dataset file to %s", self.datasetfile) if self.dataset is not None and self.dataset.version is None: self.dataset.set_version_by_date() LOG.info("No version for %s setting to date issued.", self.name) else: LOG.warning("No output file set. Using stdout") stream = 'stdout' gu = GraphUtils(None) # the _dataset description is always turtle gu.write(self.dataset.getGraph(), 'turtle', filename=self.datasetfile) if self.test_mode: # unless we stop hardcoding, the test dataset is always turtle LOG.info("Setting testfile to %s", self.testfile) gu.write(self.testgraph, 'turtle', filename=self.testfile) # print graph out if stream is None: outfile = dest elif stream.lower().strip() == 'stdout': outfile = None else: LOG.error("I don't understand our stream.") return gu.write(self.graph, fmt, filename=outfile)
python
def write(self, fmt='turtle', stream=None): """ This convenience method will write out all of the graphs associated with the source. Right now these are hardcoded to be a single "graph" and a "src_dataset.ttl" and a "src_test.ttl" If you do not supply stream='stdout' it will default write these to files. In addition, if the version number isn't yet set in the dataset, it will be set to the date on file. :return: None """ fmt_ext = { 'rdfxml': 'xml', 'turtle': 'ttl', 'nt': 'nt', # ntriples 'nquads': 'nq', 'n3': 'n3' # notation3 } # make the regular graph output file dest = None if self.name is not None: dest = '/'.join((self.outdir, self.name)) if fmt in fmt_ext: dest = '.'.join((dest, fmt_ext.get(fmt))) else: dest = '.'.join((dest, fmt)) LOG.info("Setting outfile to %s", dest) # make the dataset_file name, always format as turtle self.datasetfile = '/'.join( (self.outdir, self.name + '_dataset.ttl')) LOG.info("Setting dataset file to %s", self.datasetfile) if self.dataset is not None and self.dataset.version is None: self.dataset.set_version_by_date() LOG.info("No version for %s setting to date issued.", self.name) else: LOG.warning("No output file set. Using stdout") stream = 'stdout' gu = GraphUtils(None) # the _dataset description is always turtle gu.write(self.dataset.getGraph(), 'turtle', filename=self.datasetfile) if self.test_mode: # unless we stop hardcoding, the test dataset is always turtle LOG.info("Setting testfile to %s", self.testfile) gu.write(self.testgraph, 'turtle', filename=self.testfile) # print graph out if stream is None: outfile = dest elif stream.lower().strip() == 'stdout': outfile = None else: LOG.error("I don't understand our stream.") return gu.write(self.graph, fmt, filename=outfile)
[ "def", "write", "(", "self", ",", "fmt", "=", "'turtle'", ",", "stream", "=", "None", ")", ":", "fmt_ext", "=", "{", "'rdfxml'", ":", "'xml'", ",", "'turtle'", ":", "'ttl'", ",", "'nt'", ":", "'nt'", ",", "# ntriples", "'nquads'", ":", "'nq'", ",", "'n3'", ":", "'n3'", "# notation3", "}", "# make the regular graph output file", "dest", "=", "None", "if", "self", ".", "name", "is", "not", "None", ":", "dest", "=", "'/'", ".", "join", "(", "(", "self", ".", "outdir", ",", "self", ".", "name", ")", ")", "if", "fmt", "in", "fmt_ext", ":", "dest", "=", "'.'", ".", "join", "(", "(", "dest", ",", "fmt_ext", ".", "get", "(", "fmt", ")", ")", ")", "else", ":", "dest", "=", "'.'", ".", "join", "(", "(", "dest", ",", "fmt", ")", ")", "LOG", ".", "info", "(", "\"Setting outfile to %s\"", ",", "dest", ")", "# make the dataset_file name, always format as turtle", "self", ".", "datasetfile", "=", "'/'", ".", "join", "(", "(", "self", ".", "outdir", ",", "self", ".", "name", "+", "'_dataset.ttl'", ")", ")", "LOG", ".", "info", "(", "\"Setting dataset file to %s\"", ",", "self", ".", "datasetfile", ")", "if", "self", ".", "dataset", "is", "not", "None", "and", "self", ".", "dataset", ".", "version", "is", "None", ":", "self", ".", "dataset", ".", "set_version_by_date", "(", ")", "LOG", ".", "info", "(", "\"No version for %s setting to date issued.\"", ",", "self", ".", "name", ")", "else", ":", "LOG", ".", "warning", "(", "\"No output file set. Using stdout\"", ")", "stream", "=", "'stdout'", "gu", "=", "GraphUtils", "(", "None", ")", "# the _dataset description is always turtle", "gu", ".", "write", "(", "self", ".", "dataset", ".", "getGraph", "(", ")", ",", "'turtle'", ",", "filename", "=", "self", ".", "datasetfile", ")", "if", "self", ".", "test_mode", ":", "# unless we stop hardcoding, the test dataset is always turtle", "LOG", ".", "info", "(", "\"Setting testfile to %s\"", ",", "self", ".", "testfile", ")", "gu", ".", "write", "(", "self", ".", "testgraph", ",", "'turtle'", ",", "filename", "=", "self", ".", "testfile", ")", "# print graph out", "if", "stream", "is", "None", ":", "outfile", "=", "dest", "elif", "stream", ".", "lower", "(", ")", ".", "strip", "(", ")", "==", "'stdout'", ":", "outfile", "=", "None", "else", ":", "LOG", ".", "error", "(", "\"I don't understand our stream.\"", ")", "return", "gu", ".", "write", "(", "self", ".", "graph", ",", "fmt", ",", "filename", "=", "outfile", ")" ]
This convenience method will write out all of the graphs associated with the source. Right now these are hardcoded to be a single "graph" and a "src_dataset.ttl" and a "src_test.ttl" If you do not supply stream='stdout' it will default write these to files. In addition, if the version number isn't yet set in the dataset, it will be set to the date on file. :return: None
[ "This", "convenience", "method", "will", "write", "out", "all", "of", "the", "graphs", "associated", "with", "the", "source", ".", "Right", "now", "these", "are", "hardcoded", "to", "be", "a", "single", "graph", "and", "a", "src_dataset", ".", "ttl", "and", "a", "src_test", ".", "ttl", "If", "you", "do", "not", "supply", "stream", "=", "stdout", "it", "will", "default", "write", "these", "to", "files", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Source.py#L161-L223
train
251,212
monarch-initiative/dipper
dipper/sources/Source.py
Source.declareAsOntology
def declareAsOntology(self, graph): """ The file we output needs to be declared as an ontology, including it's version information. TEC: I am not convinced dipper reformating external data as RDF triples makes an OWL ontology (nor that it should be considered a goal). Proper ontologies are built by ontologists. Dipper reformats data and anotates/decorates it with a minimal set of carefully arranged terms drawn from from multiple proper ontologies. Which allows the whole (dipper's RDF triples and parent ontologies) to function as a single ontology we can reason over when combined in a store such as SciGraph. Including more than the minimal ontological terms in dipper's RDF output constitutes a liability as it allows greater divergence between dipper artifacts and the proper ontologies. Further information will be augmented in the dataset object. :param version: :return: """ # <http://data.monarchinitiative.org/ttl/biogrid.ttl> a owl:Ontology ; # owl:versionInfo # <https://archive.monarchinitiative.org/YYYYMM/ttl/biogrid.ttl> model = Model(graph) # is self.outfile suffix set yet??? ontology_file_id = 'MonarchData:' + self.name + ".ttl" model.addOntologyDeclaration(ontology_file_id) # add timestamp as version info cur_time = datetime.now() t_string = cur_time.strftime("%Y-%m-%d") ontology_version = t_string # TEC this means the MonarchArchive IRI needs the release updated # maybe extract the version info from there # should not hardcode the suffix as it may change archive_url = 'MonarchArchive:' + 'ttl/' + self.name + '.ttl' model.addOWLVersionIRI(ontology_file_id, archive_url) model.addOWLVersionInfo(ontology_file_id, ontology_version)
python
def declareAsOntology(self, graph): """ The file we output needs to be declared as an ontology, including it's version information. TEC: I am not convinced dipper reformating external data as RDF triples makes an OWL ontology (nor that it should be considered a goal). Proper ontologies are built by ontologists. Dipper reformats data and anotates/decorates it with a minimal set of carefully arranged terms drawn from from multiple proper ontologies. Which allows the whole (dipper's RDF triples and parent ontologies) to function as a single ontology we can reason over when combined in a store such as SciGraph. Including more than the minimal ontological terms in dipper's RDF output constitutes a liability as it allows greater divergence between dipper artifacts and the proper ontologies. Further information will be augmented in the dataset object. :param version: :return: """ # <http://data.monarchinitiative.org/ttl/biogrid.ttl> a owl:Ontology ; # owl:versionInfo # <https://archive.monarchinitiative.org/YYYYMM/ttl/biogrid.ttl> model = Model(graph) # is self.outfile suffix set yet??? ontology_file_id = 'MonarchData:' + self.name + ".ttl" model.addOntologyDeclaration(ontology_file_id) # add timestamp as version info cur_time = datetime.now() t_string = cur_time.strftime("%Y-%m-%d") ontology_version = t_string # TEC this means the MonarchArchive IRI needs the release updated # maybe extract the version info from there # should not hardcode the suffix as it may change archive_url = 'MonarchArchive:' + 'ttl/' + self.name + '.ttl' model.addOWLVersionIRI(ontology_file_id, archive_url) model.addOWLVersionInfo(ontology_file_id, ontology_version)
[ "def", "declareAsOntology", "(", "self", ",", "graph", ")", ":", "# <http://data.monarchinitiative.org/ttl/biogrid.ttl> a owl:Ontology ;", "# owl:versionInfo", "# <https://archive.monarchinitiative.org/YYYYMM/ttl/biogrid.ttl>", "model", "=", "Model", "(", "graph", ")", "# is self.outfile suffix set yet???", "ontology_file_id", "=", "'MonarchData:'", "+", "self", ".", "name", "+", "\".ttl\"", "model", ".", "addOntologyDeclaration", "(", "ontology_file_id", ")", "# add timestamp as version info", "cur_time", "=", "datetime", ".", "now", "(", ")", "t_string", "=", "cur_time", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", "ontology_version", "=", "t_string", "# TEC this means the MonarchArchive IRI needs the release updated", "# maybe extract the version info from there", "# should not hardcode the suffix as it may change", "archive_url", "=", "'MonarchArchive:'", "+", "'ttl/'", "+", "self", ".", "name", "+", "'.ttl'", "model", ".", "addOWLVersionIRI", "(", "ontology_file_id", ",", "archive_url", ")", "model", ".", "addOWLVersionInfo", "(", "ontology_file_id", ",", "ontology_version", ")" ]
The file we output needs to be declared as an ontology, including it's version information. TEC: I am not convinced dipper reformating external data as RDF triples makes an OWL ontology (nor that it should be considered a goal). Proper ontologies are built by ontologists. Dipper reformats data and anotates/decorates it with a minimal set of carefully arranged terms drawn from from multiple proper ontologies. Which allows the whole (dipper's RDF triples and parent ontologies) to function as a single ontology we can reason over when combined in a store such as SciGraph. Including more than the minimal ontological terms in dipper's RDF output constitutes a liability as it allows greater divergence between dipper artifacts and the proper ontologies. Further information will be augmented in the dataset object. :param version: :return:
[ "The", "file", "we", "output", "needs", "to", "be", "declared", "as", "an", "ontology", "including", "it", "s", "version", "information", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Source.py#L614-L660
train
251,213
monarch-initiative/dipper
dipper/sources/Source.py
Source.remove_backslash_r
def remove_backslash_r(filename, encoding): """ A helpful utility to remove Carriage Return from any file. This will read a file into memory, and overwrite the contents of the original file. TODO: This function may be a liability :param filename: :return: """ with open(filename, 'r', encoding=encoding, newline=r'\n') as filereader: contents = filereader.read() contents = re.sub(r'\r', '', contents) with open(filename, "w") as filewriter: filewriter.truncate() filewriter.write(contents)
python
def remove_backslash_r(filename, encoding): """ A helpful utility to remove Carriage Return from any file. This will read a file into memory, and overwrite the contents of the original file. TODO: This function may be a liability :param filename: :return: """ with open(filename, 'r', encoding=encoding, newline=r'\n') as filereader: contents = filereader.read() contents = re.sub(r'\r', '', contents) with open(filename, "w") as filewriter: filewriter.truncate() filewriter.write(contents)
[ "def", "remove_backslash_r", "(", "filename", ",", "encoding", ")", ":", "with", "open", "(", "filename", ",", "'r'", ",", "encoding", "=", "encoding", ",", "newline", "=", "r'\\n'", ")", "as", "filereader", ":", "contents", "=", "filereader", ".", "read", "(", ")", "contents", "=", "re", ".", "sub", "(", "r'\\r'", ",", "''", ",", "contents", ")", "with", "open", "(", "filename", ",", "\"w\"", ")", "as", "filewriter", ":", "filewriter", ".", "truncate", "(", ")", "filewriter", ".", "write", "(", "contents", ")" ]
A helpful utility to remove Carriage Return from any file. This will read a file into memory, and overwrite the contents of the original file. TODO: This function may be a liability :param filename: :return:
[ "A", "helpful", "utility", "to", "remove", "Carriage", "Return", "from", "any", "file", ".", "This", "will", "read", "a", "file", "into", "memory", "and", "overwrite", "the", "contents", "of", "the", "original", "file", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Source.py#L664-L683
train
251,214
monarch-initiative/dipper
dipper/sources/Source.py
Source.load_local_translationtable
def load_local_translationtable(self, name): ''' Load "ingest specific" translation from whatever they called something to the ontology label we need to map it to. To facilitate seeing more ontology lables in dipper ingests a reverse mapping from ontology lables to external strings is also generated and available as a dict localtcid ''' localtt_file = 'translationtable/' + name + '.yaml' try: with open(localtt_file): pass except IOError: # write a stub file as a place holder if none exists with open(localtt_file, 'w') as write_yaml: yaml.dump({name: name}, write_yaml) finally: with open(localtt_file, 'r') as read_yaml: localtt = yaml.safe_load(read_yaml) # inverse local translation. # note: keeping this invertable will be work. # Useful to not litter an ingest with external syntax self.localtcid = {v: k for k, v in localtt.items()} return localtt
python
def load_local_translationtable(self, name): ''' Load "ingest specific" translation from whatever they called something to the ontology label we need to map it to. To facilitate seeing more ontology lables in dipper ingests a reverse mapping from ontology lables to external strings is also generated and available as a dict localtcid ''' localtt_file = 'translationtable/' + name + '.yaml' try: with open(localtt_file): pass except IOError: # write a stub file as a place holder if none exists with open(localtt_file, 'w') as write_yaml: yaml.dump({name: name}, write_yaml) finally: with open(localtt_file, 'r') as read_yaml: localtt = yaml.safe_load(read_yaml) # inverse local translation. # note: keeping this invertable will be work. # Useful to not litter an ingest with external syntax self.localtcid = {v: k for k, v in localtt.items()} return localtt
[ "def", "load_local_translationtable", "(", "self", ",", "name", ")", ":", "localtt_file", "=", "'translationtable/'", "+", "name", "+", "'.yaml'", "try", ":", "with", "open", "(", "localtt_file", ")", ":", "pass", "except", "IOError", ":", "# write a stub file as a place holder if none exists", "with", "open", "(", "localtt_file", ",", "'w'", ")", "as", "write_yaml", ":", "yaml", ".", "dump", "(", "{", "name", ":", "name", "}", ",", "write_yaml", ")", "finally", ":", "with", "open", "(", "localtt_file", ",", "'r'", ")", "as", "read_yaml", ":", "localtt", "=", "yaml", ".", "safe_load", "(", "read_yaml", ")", "# inverse local translation.", "# note: keeping this invertable will be work.", "# Useful to not litter an ingest with external syntax", "self", ".", "localtcid", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "localtt", ".", "items", "(", ")", "}", "return", "localtt" ]
Load "ingest specific" translation from whatever they called something to the ontology label we need to map it to. To facilitate seeing more ontology lables in dipper ingests a reverse mapping from ontology lables to external strings is also generated and available as a dict localtcid
[ "Load", "ingest", "specific", "translation", "from", "whatever", "they", "called", "something", "to", "the", "ontology", "label", "we", "need", "to", "map", "it", "to", ".", "To", "facilitate", "seeing", "more", "ontology", "lables", "in", "dipper", "ingests", "a", "reverse", "mapping", "from", "ontology", "lables", "to", "external", "strings", "is", "also", "generated", "and", "available", "as", "a", "dict", "localtcid" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Source.py#L739-L767
train
251,215
monarch-initiative/dipper
dipper/models/Genotype.py
Genotype.addGene
def addGene( self, gene_id, gene_label, gene_type=None, gene_description=None ): ''' genes are classes ''' if gene_type is None: gene_type = self.globaltt['gene'] self.model.addClassToGraph(gene_id, gene_label, gene_type, gene_description) return
python
def addGene( self, gene_id, gene_label, gene_type=None, gene_description=None ): ''' genes are classes ''' if gene_type is None: gene_type = self.globaltt['gene'] self.model.addClassToGraph(gene_id, gene_label, gene_type, gene_description) return
[ "def", "addGene", "(", "self", ",", "gene_id", ",", "gene_label", ",", "gene_type", "=", "None", ",", "gene_description", "=", "None", ")", ":", "if", "gene_type", "is", "None", ":", "gene_type", "=", "self", ".", "globaltt", "[", "'gene'", "]", "self", ".", "model", ".", "addClassToGraph", "(", "gene_id", ",", "gene_label", ",", "gene_type", ",", "gene_description", ")", "return" ]
genes are classes
[ "genes", "are", "classes" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/Genotype.py#L79-L87
train
251,216
monarch-initiative/dipper
dipper/utils/DipperUtil.py
DipperUtil.get_ncbi_taxon_num_by_label
def get_ncbi_taxon_num_by_label(label): """ Here we want to look up the NCBI Taxon id using some kind of label. It will only return a result if there is a unique hit. :return: """ req = {'db': 'taxonomy', 'retmode': 'json', 'term': label} req.update(EREQ) request = SESSION.get(ESEARCH, params=req) LOG.info('fetching: %s', request.url) request.raise_for_status() result = request.json()['esearchresult'] # Occasionally eutils returns the json blob # {'ERROR': 'Invalid db name specified: taxonomy'} if 'ERROR' in result: request = SESSION.get(ESEARCH, params=req) LOG.info('fetching: %s', request.url) request.raise_for_status() result = request.json()['esearchresult'] tax_num = None if 'count' in result and str(result['count']) == '1': tax_num = result['idlist'][0] else: # TODO throw errors LOG.warning('ESEARCH for taxon label "%s" returns %s', label, str(result)) return tax_num
python
def get_ncbi_taxon_num_by_label(label): """ Here we want to look up the NCBI Taxon id using some kind of label. It will only return a result if there is a unique hit. :return: """ req = {'db': 'taxonomy', 'retmode': 'json', 'term': label} req.update(EREQ) request = SESSION.get(ESEARCH, params=req) LOG.info('fetching: %s', request.url) request.raise_for_status() result = request.json()['esearchresult'] # Occasionally eutils returns the json blob # {'ERROR': 'Invalid db name specified: taxonomy'} if 'ERROR' in result: request = SESSION.get(ESEARCH, params=req) LOG.info('fetching: %s', request.url) request.raise_for_status() result = request.json()['esearchresult'] tax_num = None if 'count' in result and str(result['count']) == '1': tax_num = result['idlist'][0] else: # TODO throw errors LOG.warning('ESEARCH for taxon label "%s" returns %s', label, str(result)) return tax_num
[ "def", "get_ncbi_taxon_num_by_label", "(", "label", ")", ":", "req", "=", "{", "'db'", ":", "'taxonomy'", ",", "'retmode'", ":", "'json'", ",", "'term'", ":", "label", "}", "req", ".", "update", "(", "EREQ", ")", "request", "=", "SESSION", ".", "get", "(", "ESEARCH", ",", "params", "=", "req", ")", "LOG", ".", "info", "(", "'fetching: %s'", ",", "request", ".", "url", ")", "request", ".", "raise_for_status", "(", ")", "result", "=", "request", ".", "json", "(", ")", "[", "'esearchresult'", "]", "# Occasionally eutils returns the json blob", "# {'ERROR': 'Invalid db name specified: taxonomy'}", "if", "'ERROR'", "in", "result", ":", "request", "=", "SESSION", ".", "get", "(", "ESEARCH", ",", "params", "=", "req", ")", "LOG", ".", "info", "(", "'fetching: %s'", ",", "request", ".", "url", ")", "request", ".", "raise_for_status", "(", ")", "result", "=", "request", ".", "json", "(", ")", "[", "'esearchresult'", "]", "tax_num", "=", "None", "if", "'count'", "in", "result", "and", "str", "(", "result", "[", "'count'", "]", ")", "==", "'1'", ":", "tax_num", "=", "result", "[", "'idlist'", "]", "[", "0", "]", "else", ":", "# TODO throw errors", "LOG", ".", "warning", "(", "'ESEARCH for taxon label \"%s\" returns %s'", ",", "label", ",", "str", "(", "result", ")", ")", "return", "tax_num" ]
Here we want to look up the NCBI Taxon id using some kind of label. It will only return a result if there is a unique hit. :return:
[ "Here", "we", "want", "to", "look", "up", "the", "NCBI", "Taxon", "id", "using", "some", "kind", "of", "label", ".", "It", "will", "only", "return", "a", "result", "if", "there", "is", "a", "unique", "hit", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/utils/DipperUtil.py#L47-L78
train
251,217
monarch-initiative/dipper
dipper/models/assoc/Association.py
Assoc.set_association_id
def set_association_id(self, assoc_id=None): """ This will set the association ID based on the internal parts of the association. To be used in cases where an external association identifier should be used. :param assoc_id: :return: """ if assoc_id is None: self.assoc_id = self.make_association_id( self.definedby, self.sub, self.rel, self.obj) else: self.assoc_id = assoc_id return self.assoc_id
python
def set_association_id(self, assoc_id=None): """ This will set the association ID based on the internal parts of the association. To be used in cases where an external association identifier should be used. :param assoc_id: :return: """ if assoc_id is None: self.assoc_id = self.make_association_id( self.definedby, self.sub, self.rel, self.obj) else: self.assoc_id = assoc_id return self.assoc_id
[ "def", "set_association_id", "(", "self", ",", "assoc_id", "=", "None", ")", ":", "if", "assoc_id", "is", "None", ":", "self", ".", "assoc_id", "=", "self", ".", "make_association_id", "(", "self", ".", "definedby", ",", "self", ".", "sub", ",", "self", ".", "rel", ",", "self", ".", "obj", ")", "else", ":", "self", ".", "assoc_id", "=", "assoc_id", "return", "self", ".", "assoc_id" ]
This will set the association ID based on the internal parts of the association. To be used in cases where an external association identifier should be used. :param assoc_id: :return:
[ "This", "will", "set", "the", "association", "ID", "based", "on", "the", "internal", "parts", "of", "the", "association", ".", "To", "be", "used", "in", "cases", "where", "an", "external", "association", "identifier", "should", "be", "used", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/assoc/Association.py#L166-L184
train
251,218
monarch-initiative/dipper
dipper/models/assoc/Association.py
Assoc.make_association_id
def make_association_id(definedby, sub, pred, obj, attributes=None): """ A method to create unique identifiers for OBAN-style associations, based on all the parts of the association If any of the items is empty or None, it will convert it to blank. It effectively digests the string of concatonated values. Subclasses of Assoc can submit an additional array of attributes that will be appeded to the ID. Note this is equivalent to a RDF blank node :param definedby: The (data) resource that provided the annotation :param subject: :param predicate: :param object: :param attributes: :return: """ items_to_hash = [definedby, sub, pred, obj] if attributes is not None and len(attributes) > 0: items_to_hash += attributes items_to_hash = [x for x in items_to_hash if x is not None] assoc_id = ':'.join(('MONARCH', GraphUtils.digest_id('+'.join(items_to_hash)))) assert assoc_id is not None return assoc_id
python
def make_association_id(definedby, sub, pred, obj, attributes=None): """ A method to create unique identifiers for OBAN-style associations, based on all the parts of the association If any of the items is empty or None, it will convert it to blank. It effectively digests the string of concatonated values. Subclasses of Assoc can submit an additional array of attributes that will be appeded to the ID. Note this is equivalent to a RDF blank node :param definedby: The (data) resource that provided the annotation :param subject: :param predicate: :param object: :param attributes: :return: """ items_to_hash = [definedby, sub, pred, obj] if attributes is not None and len(attributes) > 0: items_to_hash += attributes items_to_hash = [x for x in items_to_hash if x is not None] assoc_id = ':'.join(('MONARCH', GraphUtils.digest_id('+'.join(items_to_hash)))) assert assoc_id is not None return assoc_id
[ "def", "make_association_id", "(", "definedby", ",", "sub", ",", "pred", ",", "obj", ",", "attributes", "=", "None", ")", ":", "items_to_hash", "=", "[", "definedby", ",", "sub", ",", "pred", ",", "obj", "]", "if", "attributes", "is", "not", "None", "and", "len", "(", "attributes", ")", ">", "0", ":", "items_to_hash", "+=", "attributes", "items_to_hash", "=", "[", "x", "for", "x", "in", "items_to_hash", "if", "x", "is", "not", "None", "]", "assoc_id", "=", "':'", ".", "join", "(", "(", "'MONARCH'", ",", "GraphUtils", ".", "digest_id", "(", "'+'", ".", "join", "(", "items_to_hash", ")", ")", ")", ")", "assert", "assoc_id", "is", "not", "None", "return", "assoc_id" ]
A method to create unique identifiers for OBAN-style associations, based on all the parts of the association If any of the items is empty or None, it will convert it to blank. It effectively digests the string of concatonated values. Subclasses of Assoc can submit an additional array of attributes that will be appeded to the ID. Note this is equivalent to a RDF blank node :param definedby: The (data) resource that provided the annotation :param subject: :param predicate: :param object: :param attributes: :return:
[ "A", "method", "to", "create", "unique", "identifiers", "for", "OBAN", "-", "style", "associations", "based", "on", "all", "the", "parts", "of", "the", "association", "If", "any", "of", "the", "items", "is", "empty", "or", "None", "it", "will", "convert", "it", "to", "blank", ".", "It", "effectively", "digests", "the", "string", "of", "concatonated", "values", ".", "Subclasses", "of", "Assoc", "can", "submit", "an", "additional", "array", "of", "attributes", "that", "will", "be", "appeded", "to", "the", "ID", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/assoc/Association.py#L250-L279
train
251,219
monarch-initiative/dipper
dipper/utils/romanplus.py
toRoman
def toRoman(num): """convert integer to Roman numeral""" if not 0 < num < 5000: raise ValueError("number %n out of range (must be 1..4999)", num) if int(num) != num: raise TypeError("decimals %n can not be converted", num) result = "" for numeral, integer in romanNumeralMap: while num >= integer: result += numeral num -= integer return result
python
def toRoman(num): """convert integer to Roman numeral""" if not 0 < num < 5000: raise ValueError("number %n out of range (must be 1..4999)", num) if int(num) != num: raise TypeError("decimals %n can not be converted", num) result = "" for numeral, integer in romanNumeralMap: while num >= integer: result += numeral num -= integer return result
[ "def", "toRoman", "(", "num", ")", ":", "if", "not", "0", "<", "num", "<", "5000", ":", "raise", "ValueError", "(", "\"number %n out of range (must be 1..4999)\"", ",", "num", ")", "if", "int", "(", "num", ")", "!=", "num", ":", "raise", "TypeError", "(", "\"decimals %n can not be converted\"", ",", "num", ")", "result", "=", "\"\"", "for", "numeral", ",", "integer", "in", "romanNumeralMap", ":", "while", "num", ">=", "integer", ":", "result", "+=", "numeral", "num", "-=", "integer", "return", "result" ]
convert integer to Roman numeral
[ "convert", "integer", "to", "Roman", "numeral" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/utils/romanplus.py#L38-L50
train
251,220
monarch-initiative/dipper
dipper/utils/romanplus.py
fromRoman
def fromRoman(strng): """convert Roman numeral to integer""" if not strng: raise TypeError('Input can not be blank') if not romanNumeralPattern.search(strng): raise ValueError('Invalid Roman numeral: %s', strng) result = 0 index = 0 for numeral, integer in romanNumeralMap: while strng[index:index+len(numeral)] == numeral: result += integer index += len(numeral) return result
python
def fromRoman(strng): """convert Roman numeral to integer""" if not strng: raise TypeError('Input can not be blank') if not romanNumeralPattern.search(strng): raise ValueError('Invalid Roman numeral: %s', strng) result = 0 index = 0 for numeral, integer in romanNumeralMap: while strng[index:index+len(numeral)] == numeral: result += integer index += len(numeral) return result
[ "def", "fromRoman", "(", "strng", ")", ":", "if", "not", "strng", ":", "raise", "TypeError", "(", "'Input can not be blank'", ")", "if", "not", "romanNumeralPattern", ".", "search", "(", "strng", ")", ":", "raise", "ValueError", "(", "'Invalid Roman numeral: %s'", ",", "strng", ")", "result", "=", "0", "index", "=", "0", "for", "numeral", ",", "integer", "in", "romanNumeralMap", ":", "while", "strng", "[", "index", ":", "index", "+", "len", "(", "numeral", ")", "]", "==", "numeral", ":", "result", "+=", "integer", "index", "+=", "len", "(", "numeral", ")", "return", "result" ]
convert Roman numeral to integer
[ "convert", "Roman", "numeral", "to", "integer" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/utils/romanplus.py#L70-L83
train
251,221
monarch-initiative/dipper
dipper/sources/ZFIN.py
ZFIN._process_genotype_backgrounds
def _process_genotype_backgrounds(self, limit=None): """ This table provides a mapping of genotypes to background genotypes Note that the background_id is also a genotype_id. Makes these triples: <ZFIN:genotype_id> GENO:has_reference_part <ZFIN:background_id> <ZFIN:background_id> a GENO:genomic_background <ZFIN:background_id> in_taxon <taxon_id> <taxon_id> a class :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) LOG.info("Processing genotype backgrounds") line_counter = 0 raw = '/'.join((self.rawdir, self.files['backgrounds']['file'])) geno = Genotype(graph) # Add the taxon as a class taxon_id = self.globaltt['Danio rerio'] model.addClassToGraph(taxon_id, None) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 # Genotype_ID Genotype_Name Background Background_Name (genotype_id, genotype_name, background_id, unused) = row if self.test_mode and genotype_id not in self.test_ids['genotype']: continue genotype_id = 'ZFIN:' + genotype_id.strip() background_id = 'ZFIN:' + background_id.strip() # store this in the hash for later lookup # when building fish genotypes self.genotype_backgrounds[genotype_id] = background_id # add the background into the graph, # in case we haven't seen it before geno.addGenomicBackground(background_id, None) # hang the taxon from the background geno.addTaxon(taxon_id, background_id) # add the intrinsic genotype to the graph # we DO NOT ADD THE LABEL here # as it doesn't include the background geno.addGenotype(genotype_id, None, self.globaltt['intrinsic_genotype']) # Add background to the intrinsic genotype geno.addGenomicBackgroundToGenotype(background_id, genotype_id) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with genotype backgrounds") return
python
def _process_genotype_backgrounds(self, limit=None): """ This table provides a mapping of genotypes to background genotypes Note that the background_id is also a genotype_id. Makes these triples: <ZFIN:genotype_id> GENO:has_reference_part <ZFIN:background_id> <ZFIN:background_id> a GENO:genomic_background <ZFIN:background_id> in_taxon <taxon_id> <taxon_id> a class :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) LOG.info("Processing genotype backgrounds") line_counter = 0 raw = '/'.join((self.rawdir, self.files['backgrounds']['file'])) geno = Genotype(graph) # Add the taxon as a class taxon_id = self.globaltt['Danio rerio'] model.addClassToGraph(taxon_id, None) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 # Genotype_ID Genotype_Name Background Background_Name (genotype_id, genotype_name, background_id, unused) = row if self.test_mode and genotype_id not in self.test_ids['genotype']: continue genotype_id = 'ZFIN:' + genotype_id.strip() background_id = 'ZFIN:' + background_id.strip() # store this in the hash for later lookup # when building fish genotypes self.genotype_backgrounds[genotype_id] = background_id # add the background into the graph, # in case we haven't seen it before geno.addGenomicBackground(background_id, None) # hang the taxon from the background geno.addTaxon(taxon_id, background_id) # add the intrinsic genotype to the graph # we DO NOT ADD THE LABEL here # as it doesn't include the background geno.addGenotype(genotype_id, None, self.globaltt['intrinsic_genotype']) # Add background to the intrinsic genotype geno.addGenomicBackgroundToGenotype(background_id, genotype_id) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with genotype backgrounds") return
[ "def", "_process_genotype_backgrounds", "(", "self", ",", "limit", "=", "None", ")", ":", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "LOG", ".", "info", "(", "\"Processing genotype backgrounds\"", ")", "line_counter", "=", "0", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'backgrounds'", "]", "[", "'file'", "]", ")", ")", "geno", "=", "Genotype", "(", "graph", ")", "# Add the taxon as a class", "taxon_id", "=", "self", ".", "globaltt", "[", "'Danio rerio'", "]", "model", ".", "addClassToGraph", "(", "taxon_id", ",", "None", ")", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"iso-8859-1\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "row", "in", "filereader", ":", "line_counter", "+=", "1", "# Genotype_ID \tGenotype_Name \tBackground \tBackground_Name", "(", "genotype_id", ",", "genotype_name", ",", "background_id", ",", "unused", ")", "=", "row", "if", "self", ".", "test_mode", "and", "genotype_id", "not", "in", "self", ".", "test_ids", "[", "'genotype'", "]", ":", "continue", "genotype_id", "=", "'ZFIN:'", "+", "genotype_id", ".", "strip", "(", ")", "background_id", "=", "'ZFIN:'", "+", "background_id", ".", "strip", "(", ")", "# store this in the hash for later lookup", "# when building fish genotypes", "self", ".", "genotype_backgrounds", "[", "genotype_id", "]", "=", "background_id", "# add the background into the graph,", "# in case we haven't seen it before", "geno", ".", "addGenomicBackground", "(", "background_id", ",", "None", ")", "# hang the taxon from the background", "geno", ".", "addTaxon", "(", "taxon_id", ",", "background_id", ")", "# add the intrinsic genotype to the graph", "# we DO NOT ADD THE LABEL here", "# as it doesn't include the background", "geno", ".", "addGenotype", "(", "genotype_id", ",", "None", ",", "self", ".", "globaltt", "[", "'intrinsic_genotype'", "]", ")", "# Add background to the intrinsic genotype", "geno", ".", "addGenomicBackgroundToGenotype", "(", "background_id", ",", "genotype_id", ")", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "LOG", ".", "info", "(", "\"Done with genotype backgrounds\"", ")", "return" ]
This table provides a mapping of genotypes to background genotypes Note that the background_id is also a genotype_id. Makes these triples: <ZFIN:genotype_id> GENO:has_reference_part <ZFIN:background_id> <ZFIN:background_id> a GENO:genomic_background <ZFIN:background_id> in_taxon <taxon_id> <taxon_id> a class :param limit: :return:
[ "This", "table", "provides", "a", "mapping", "of", "genotypes", "to", "background", "genotypes", "Note", "that", "the", "background_id", "is", "also", "a", "genotype_id", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1048-L1113
train
251,222
monarch-initiative/dipper
dipper/sources/ZFIN.py
ZFIN._process_stages
def _process_stages(self, limit=None): """ This table provides mappings between ZFIN stage IDs and ZFS terms, and includes the starting and ending hours for the developmental stage. Currently only processing the mapping from the ZFIN stage ID to the ZFS ID. :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) LOG.info("Processing stages") line_counter = 0 raw = '/'.join((self.rawdir, self.files['stage']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (stage_id, stage_obo_id, stage_name, begin_hours, end_hours # ,empty # till next time ) = row # Add the stage as a class, and it's obo equivalent stage_id = 'ZFIN:' + stage_id.strip() model.addClassToGraph(stage_id, stage_name) model.addEquivalentClass(stage_id, stage_obo_id) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with stages") return
python
def _process_stages(self, limit=None): """ This table provides mappings between ZFIN stage IDs and ZFS terms, and includes the starting and ending hours for the developmental stage. Currently only processing the mapping from the ZFIN stage ID to the ZFS ID. :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) LOG.info("Processing stages") line_counter = 0 raw = '/'.join((self.rawdir, self.files['stage']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (stage_id, stage_obo_id, stage_name, begin_hours, end_hours # ,empty # till next time ) = row # Add the stage as a class, and it's obo equivalent stage_id = 'ZFIN:' + stage_id.strip() model.addClassToGraph(stage_id, stage_name) model.addEquivalentClass(stage_id, stage_obo_id) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with stages") return
[ "def", "_process_stages", "(", "self", ",", "limit", "=", "None", ")", ":", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "LOG", ".", "info", "(", "\"Processing stages\"", ")", "line_counter", "=", "0", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'stage'", "]", "[", "'file'", "]", ")", ")", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"iso-8859-1\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "row", "in", "filereader", ":", "line_counter", "+=", "1", "(", "stage_id", ",", "stage_obo_id", ",", "stage_name", ",", "begin_hours", ",", "end_hours", "# ,empty # till next time", ")", "=", "row", "# Add the stage as a class, and it's obo equivalent", "stage_id", "=", "'ZFIN:'", "+", "stage_id", ".", "strip", "(", ")", "model", ".", "addClassToGraph", "(", "stage_id", ",", "stage_name", ")", "model", ".", "addEquivalentClass", "(", "stage_id", ",", "stage_obo_id", ")", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "LOG", ".", "info", "(", "\"Done with stages\"", ")", "return" ]
This table provides mappings between ZFIN stage IDs and ZFS terms, and includes the starting and ending hours for the developmental stage. Currently only processing the mapping from the ZFIN stage ID to the ZFS ID. :param limit: :return:
[ "This", "table", "provides", "mappings", "between", "ZFIN", "stage", "IDs", "and", "ZFS", "terms", "and", "includes", "the", "starting", "and", "ending", "hours", "for", "the", "developmental", "stage", ".", "Currently", "only", "processing", "the", "mapping", "from", "the", "ZFIN", "stage", "ID", "to", "the", "ZFS", "ID", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1174-L1211
train
251,223
monarch-initiative/dipper
dipper/sources/ZFIN.py
ZFIN._process_genes
def _process_genes(self, limit=None): """ This table provides the ZFIN gene id, the SO type of the gene, the gene symbol, and the NCBI Gene ID. Triples created: <gene id> a class <gene id> rdfs:label gene_symbol <gene id> equivalent class <ncbi_gene_id> :param limit: :return: """ LOG.info("Processing genes") if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, self.files['gene']['file'])) geno = Genotype(graph) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (gene_id, gene_so_id, gene_symbol, ncbi_gene_id # , empty # till next time ) = row if self.test_mode and gene_id not in self.test_ids['gene']: continue gene_id = 'ZFIN:' + gene_id.strip() ncbi_gene_id = 'NCBIGene:' + ncbi_gene_id.strip() self.id_label_map[gene_id] = gene_symbol if not self.test_mode and limit is not None and line_counter > limit: pass else: geno.addGene(gene_id, gene_symbol) model.addEquivalentClass(gene_id, ncbi_gene_id) LOG.info("Done with genes") return
python
def _process_genes(self, limit=None): """ This table provides the ZFIN gene id, the SO type of the gene, the gene symbol, and the NCBI Gene ID. Triples created: <gene id> a class <gene id> rdfs:label gene_symbol <gene id> equivalent class <ncbi_gene_id> :param limit: :return: """ LOG.info("Processing genes") if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, self.files['gene']['file'])) geno = Genotype(graph) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (gene_id, gene_so_id, gene_symbol, ncbi_gene_id # , empty # till next time ) = row if self.test_mode and gene_id not in self.test_ids['gene']: continue gene_id = 'ZFIN:' + gene_id.strip() ncbi_gene_id = 'NCBIGene:' + ncbi_gene_id.strip() self.id_label_map[gene_id] = gene_symbol if not self.test_mode and limit is not None and line_counter > limit: pass else: geno.addGene(gene_id, gene_symbol) model.addEquivalentClass(gene_id, ncbi_gene_id) LOG.info("Done with genes") return
[ "def", "_process_genes", "(", "self", ",", "limit", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"Processing genes\"", ")", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "line_counter", "=", "0", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'gene'", "]", "[", "'file'", "]", ")", ")", "geno", "=", "Genotype", "(", "graph", ")", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"iso-8859-1\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "row", "in", "filereader", ":", "line_counter", "+=", "1", "(", "gene_id", ",", "gene_so_id", ",", "gene_symbol", ",", "ncbi_gene_id", "# , empty # till next time", ")", "=", "row", "if", "self", ".", "test_mode", "and", "gene_id", "not", "in", "self", ".", "test_ids", "[", "'gene'", "]", ":", "continue", "gene_id", "=", "'ZFIN:'", "+", "gene_id", ".", "strip", "(", ")", "ncbi_gene_id", "=", "'NCBIGene:'", "+", "ncbi_gene_id", ".", "strip", "(", ")", "self", ".", "id_label_map", "[", "gene_id", "]", "=", "gene_symbol", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "pass", "else", ":", "geno", ".", "addGene", "(", "gene_id", ",", "gene_symbol", ")", "model", ".", "addEquivalentClass", "(", "gene_id", ",", "ncbi_gene_id", ")", "LOG", ".", "info", "(", "\"Done with genes\"", ")", "return" ]
This table provides the ZFIN gene id, the SO type of the gene, the gene symbol, and the NCBI Gene ID. Triples created: <gene id> a class <gene id> rdfs:label gene_symbol <gene id> equivalent class <ncbi_gene_id> :param limit: :return:
[ "This", "table", "provides", "the", "ZFIN", "gene", "id", "the", "SO", "type", "of", "the", "gene", "the", "gene", "symbol", "and", "the", "NCBI", "Gene", "ID", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1390-L1437
train
251,224
monarch-initiative/dipper
dipper/sources/ZFIN.py
ZFIN._process_features
def _process_features(self, limit=None): """ This module provides information for the intrinsic and extrinsic genotype features of zebrafish. All items here are 'alterations', and are therefore instances. sequence alteration ID, SO type, abbreviation, and relationship to the affected gene, with the gene's ID, symbol, and SO type (gene/pseudogene). Triples created: <gene id> a class: :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) LOG.info("Processing features") line_counter = 0 geno = Genotype(graph) raw = '/'.join((self.rawdir, self.files['features']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (genomic_feature_id, feature_so_id, genomic_feature_abbreviation, genomic_feature_name, genomic_feature_type, mutagen, mutagee, construct_id, construct_name, construct_so_id, talen_crispr_id, talen_crispr_nam # , empty ) = row if self.test_mode and ( genomic_feature_id not in self.test_ids['allele']): continue genomic_feature_id = 'ZFIN:' + genomic_feature_id.strip() model.addIndividualToGraph( genomic_feature_id, genomic_feature_name, feature_so_id) model.addSynonym( genomic_feature_id, genomic_feature_abbreviation) if construct_id is not None and construct_id != '': construct_id = 'ZFIN:' + construct_id.strip() geno.addConstruct( construct_id, construct_name, construct_so_id) geno.addSequenceDerivesFrom( genomic_feature_id, construct_id) # Note, we don't really care about how the variant was derived. # so we skip that. # add to the id-label map self.id_label_map[ genomic_feature_id] = genomic_feature_abbreviation self.id_label_map[construct_id] = construct_name if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with features") return
python
def _process_features(self, limit=None): """ This module provides information for the intrinsic and extrinsic genotype features of zebrafish. All items here are 'alterations', and are therefore instances. sequence alteration ID, SO type, abbreviation, and relationship to the affected gene, with the gene's ID, symbol, and SO type (gene/pseudogene). Triples created: <gene id> a class: :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) LOG.info("Processing features") line_counter = 0 geno = Genotype(graph) raw = '/'.join((self.rawdir, self.files['features']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (genomic_feature_id, feature_so_id, genomic_feature_abbreviation, genomic_feature_name, genomic_feature_type, mutagen, mutagee, construct_id, construct_name, construct_so_id, talen_crispr_id, talen_crispr_nam # , empty ) = row if self.test_mode and ( genomic_feature_id not in self.test_ids['allele']): continue genomic_feature_id = 'ZFIN:' + genomic_feature_id.strip() model.addIndividualToGraph( genomic_feature_id, genomic_feature_name, feature_so_id) model.addSynonym( genomic_feature_id, genomic_feature_abbreviation) if construct_id is not None and construct_id != '': construct_id = 'ZFIN:' + construct_id.strip() geno.addConstruct( construct_id, construct_name, construct_so_id) geno.addSequenceDerivesFrom( genomic_feature_id, construct_id) # Note, we don't really care about how the variant was derived. # so we skip that. # add to the id-label map self.id_label_map[ genomic_feature_id] = genomic_feature_abbreviation self.id_label_map[construct_id] = construct_name if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with features") return
[ "def", "_process_features", "(", "self", ",", "limit", "=", "None", ")", ":", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "LOG", ".", "info", "(", "\"Processing features\"", ")", "line_counter", "=", "0", "geno", "=", "Genotype", "(", "graph", ")", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'features'", "]", "[", "'file'", "]", ")", ")", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"iso-8859-1\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "row", "in", "filereader", ":", "line_counter", "+=", "1", "(", "genomic_feature_id", ",", "feature_so_id", ",", "genomic_feature_abbreviation", ",", "genomic_feature_name", ",", "genomic_feature_type", ",", "mutagen", ",", "mutagee", ",", "construct_id", ",", "construct_name", ",", "construct_so_id", ",", "talen_crispr_id", ",", "talen_crispr_nam", "# , empty", ")", "=", "row", "if", "self", ".", "test_mode", "and", "(", "genomic_feature_id", "not", "in", "self", ".", "test_ids", "[", "'allele'", "]", ")", ":", "continue", "genomic_feature_id", "=", "'ZFIN:'", "+", "genomic_feature_id", ".", "strip", "(", ")", "model", ".", "addIndividualToGraph", "(", "genomic_feature_id", ",", "genomic_feature_name", ",", "feature_so_id", ")", "model", ".", "addSynonym", "(", "genomic_feature_id", ",", "genomic_feature_abbreviation", ")", "if", "construct_id", "is", "not", "None", "and", "construct_id", "!=", "''", ":", "construct_id", "=", "'ZFIN:'", "+", "construct_id", ".", "strip", "(", ")", "geno", ".", "addConstruct", "(", "construct_id", ",", "construct_name", ",", "construct_so_id", ")", "geno", ".", "addSequenceDerivesFrom", "(", "genomic_feature_id", ",", "construct_id", ")", "# Note, we don't really care about how the variant was derived.", "# so we skip that.", "# add to the id-label map", "self", ".", "id_label_map", "[", "genomic_feature_id", "]", "=", "genomic_feature_abbreviation", "self", ".", "id_label_map", "[", "construct_id", "]", "=", "construct_name", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "LOG", ".", "info", "(", "\"Done with features\"", ")", "return" ]
This module provides information for the intrinsic and extrinsic genotype features of zebrafish. All items here are 'alterations', and are therefore instances. sequence alteration ID, SO type, abbreviation, and relationship to the affected gene, with the gene's ID, symbol, and SO type (gene/pseudogene). Triples created: <gene id> a class: :param limit: :return:
[ "This", "module", "provides", "information", "for", "the", "intrinsic", "and", "extrinsic", "genotype", "features", "of", "zebrafish", ".", "All", "items", "here", "are", "alterations", "and", "are", "therefore", "instances", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1439-L1505
train
251,225
monarch-initiative/dipper
dipper/sources/ZFIN.py
ZFIN._process_pubinfo
def _process_pubinfo(self, limit=None): """ This will pull the zfin internal publication information, and map them to their equivalent pmid, and make labels. Triples created: <pub_id> is an individual <pub_id> rdfs:label <pub_label> <pubmed_id> is an individual <pubmed_id> rdfs:label <pub_label> <pub_id> sameIndividual <pubmed_id> :param limit: :return: """ line_counter = 0 if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) raw = '/'.join((self.rawdir, self.files['pubs']['file'])) with open(raw, 'r', encoding="latin-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 try: (pub_id, pubmed_id, authors, title, journal, year, vol, pages) = row except ValueError: try: (pub_id, pubmed_id, authors, title, journal, year, vol, pages # , empty ) = row except ValueError: LOG.warning("Error parsing row %s: ", row) if self.test_mode and ( 'ZFIN:' + pub_id not in self.test_ids['pub'] and 'PMID:' + pubmed_id not in self.test_ids['pub']): continue pub_id = 'ZFIN:' + pub_id.strip() # trim the author list for ease of reading alist = re.split(r',', authors) if len(alist) > 1: astring = ' '.join((alist[0].strip(), 'et al')) else: astring = authors pub_label = '; '.join((astring, title, journal, year, vol, pages)) ref = Reference(graph, pub_id) ref.setShortCitation(pub_label) ref.setYear(year) ref.setTitle(title) if pubmed_id is not None and pubmed_id != '': # let's make an assumption that if there's a pubmed id, # that it is a journal article ref.setType(self.globaltt['journal article']) pubmed_id = 'PMID:' + pubmed_id.strip() rpm = Reference(graph, pubmed_id, self.globaltt['journal article']) rpm.addRefToGraph() model.addSameIndividual(pub_id, pubmed_id) model.makeLeader(pubmed_id) ref.addRefToGraph() if not self.test_mode and limit is not None and line_counter > limit: break return
python
def _process_pubinfo(self, limit=None): """ This will pull the zfin internal publication information, and map them to their equivalent pmid, and make labels. Triples created: <pub_id> is an individual <pub_id> rdfs:label <pub_label> <pubmed_id> is an individual <pubmed_id> rdfs:label <pub_label> <pub_id> sameIndividual <pubmed_id> :param limit: :return: """ line_counter = 0 if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) raw = '/'.join((self.rawdir, self.files['pubs']['file'])) with open(raw, 'r', encoding="latin-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 try: (pub_id, pubmed_id, authors, title, journal, year, vol, pages) = row except ValueError: try: (pub_id, pubmed_id, authors, title, journal, year, vol, pages # , empty ) = row except ValueError: LOG.warning("Error parsing row %s: ", row) if self.test_mode and ( 'ZFIN:' + pub_id not in self.test_ids['pub'] and 'PMID:' + pubmed_id not in self.test_ids['pub']): continue pub_id = 'ZFIN:' + pub_id.strip() # trim the author list for ease of reading alist = re.split(r',', authors) if len(alist) > 1: astring = ' '.join((alist[0].strip(), 'et al')) else: astring = authors pub_label = '; '.join((astring, title, journal, year, vol, pages)) ref = Reference(graph, pub_id) ref.setShortCitation(pub_label) ref.setYear(year) ref.setTitle(title) if pubmed_id is not None and pubmed_id != '': # let's make an assumption that if there's a pubmed id, # that it is a journal article ref.setType(self.globaltt['journal article']) pubmed_id = 'PMID:' + pubmed_id.strip() rpm = Reference(graph, pubmed_id, self.globaltt['journal article']) rpm.addRefToGraph() model.addSameIndividual(pub_id, pubmed_id) model.makeLeader(pubmed_id) ref.addRefToGraph() if not self.test_mode and limit is not None and line_counter > limit: break return
[ "def", "_process_pubinfo", "(", "self", ",", "limit", "=", "None", ")", ":", "line_counter", "=", "0", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'pubs'", "]", "[", "'file'", "]", ")", ")", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"latin-1\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "row", "in", "filereader", ":", "line_counter", "+=", "1", "try", ":", "(", "pub_id", ",", "pubmed_id", ",", "authors", ",", "title", ",", "journal", ",", "year", ",", "vol", ",", "pages", ")", "=", "row", "except", "ValueError", ":", "try", ":", "(", "pub_id", ",", "pubmed_id", ",", "authors", ",", "title", ",", "journal", ",", "year", ",", "vol", ",", "pages", "# , empty", ")", "=", "row", "except", "ValueError", ":", "LOG", ".", "warning", "(", "\"Error parsing row %s: \"", ",", "row", ")", "if", "self", ".", "test_mode", "and", "(", "'ZFIN:'", "+", "pub_id", "not", "in", "self", ".", "test_ids", "[", "'pub'", "]", "and", "'PMID:'", "+", "pubmed_id", "not", "in", "self", ".", "test_ids", "[", "'pub'", "]", ")", ":", "continue", "pub_id", "=", "'ZFIN:'", "+", "pub_id", ".", "strip", "(", ")", "# trim the author list for ease of reading", "alist", "=", "re", ".", "split", "(", "r','", ",", "authors", ")", "if", "len", "(", "alist", ")", ">", "1", ":", "astring", "=", "' '", ".", "join", "(", "(", "alist", "[", "0", "]", ".", "strip", "(", ")", ",", "'et al'", ")", ")", "else", ":", "astring", "=", "authors", "pub_label", "=", "'; '", ".", "join", "(", "(", "astring", ",", "title", ",", "journal", ",", "year", ",", "vol", ",", "pages", ")", ")", "ref", "=", "Reference", "(", "graph", ",", "pub_id", ")", "ref", ".", "setShortCitation", "(", "pub_label", ")", "ref", ".", "setYear", "(", "year", ")", "ref", ".", "setTitle", "(", "title", ")", "if", "pubmed_id", "is", "not", "None", "and", "pubmed_id", "!=", "''", ":", "# let's make an assumption that if there's a pubmed id,", "# that it is a journal article", "ref", ".", "setType", "(", "self", ".", "globaltt", "[", "'journal article'", "]", ")", "pubmed_id", "=", "'PMID:'", "+", "pubmed_id", ".", "strip", "(", ")", "rpm", "=", "Reference", "(", "graph", ",", "pubmed_id", ",", "self", ".", "globaltt", "[", "'journal article'", "]", ")", "rpm", ".", "addRefToGraph", "(", ")", "model", ".", "addSameIndividual", "(", "pub_id", ",", "pubmed_id", ")", "model", ".", "makeLeader", "(", "pubmed_id", ")", "ref", ".", "addRefToGraph", "(", ")", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "return" ]
This will pull the zfin internal publication information, and map them to their equivalent pmid, and make labels. Triples created: <pub_id> is an individual <pub_id> rdfs:label <pub_label> <pubmed_id> is an individual <pubmed_id> rdfs:label <pub_label> <pub_id> sameIndividual <pubmed_id> :param limit: :return:
[ "This", "will", "pull", "the", "zfin", "internal", "publication", "information", "and", "map", "them", "to", "their", "equivalent", "pmid", "and", "make", "labels", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1775-L1851
train
251,226
monarch-initiative/dipper
dipper/sources/ZFIN.py
ZFIN._process_pub2pubmed
def _process_pub2pubmed(self, limit=None): """ This will pull the zfin internal publication to pubmed mappings. Somewhat redundant with the process_pubinfo method, but this includes additional mappings. <pub_id> is an individual <pub_id> rdfs:label <pub_label> <pubmed_id> is an individual <pubmed_id> rdfs:label <pub_label> <pub_id> sameIndividual <pubmed_id> :param limit: :return: """ line_counter = 0 if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) raw = '/'.join((self.rawdir, self.files['pub2pubmed']['file'])) with open(raw, 'r', encoding="latin-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (pub_id, pubmed_id # , empty ) = row if self.test_mode and ( 'ZFIN:' + pub_id not in self.test_ids['pub'] and 'PMID:' + pubmed_id not in self.test_ids['pub']): continue pub_id = 'ZFIN:' + pub_id.strip() rtype = None if pubmed_id != '' and pubmed_id is not None: pubmed_id = 'PMID:' + pubmed_id.strip() rtype = self.globaltt['journal article'] rpm = Reference(graph, pubmed_id, rtype) rpm.addRefToGraph() model.addSameIndividual(pub_id, pubmed_id) ref = Reference(graph, pub_id, rtype) ref.addRefToGraph() if not self.test_mode and limit is not None and line_counter > limit: break return
python
def _process_pub2pubmed(self, limit=None): """ This will pull the zfin internal publication to pubmed mappings. Somewhat redundant with the process_pubinfo method, but this includes additional mappings. <pub_id> is an individual <pub_id> rdfs:label <pub_label> <pubmed_id> is an individual <pubmed_id> rdfs:label <pub_label> <pub_id> sameIndividual <pubmed_id> :param limit: :return: """ line_counter = 0 if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) raw = '/'.join((self.rawdir, self.files['pub2pubmed']['file'])) with open(raw, 'r', encoding="latin-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (pub_id, pubmed_id # , empty ) = row if self.test_mode and ( 'ZFIN:' + pub_id not in self.test_ids['pub'] and 'PMID:' + pubmed_id not in self.test_ids['pub']): continue pub_id = 'ZFIN:' + pub_id.strip() rtype = None if pubmed_id != '' and pubmed_id is not None: pubmed_id = 'PMID:' + pubmed_id.strip() rtype = self.globaltt['journal article'] rpm = Reference(graph, pubmed_id, rtype) rpm.addRefToGraph() model.addSameIndividual(pub_id, pubmed_id) ref = Reference(graph, pub_id, rtype) ref.addRefToGraph() if not self.test_mode and limit is not None and line_counter > limit: break return
[ "def", "_process_pub2pubmed", "(", "self", ",", "limit", "=", "None", ")", ":", "line_counter", "=", "0", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'pub2pubmed'", "]", "[", "'file'", "]", ")", ")", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"latin-1\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "row", "in", "filereader", ":", "line_counter", "+=", "1", "(", "pub_id", ",", "pubmed_id", "# , empty", ")", "=", "row", "if", "self", ".", "test_mode", "and", "(", "'ZFIN:'", "+", "pub_id", "not", "in", "self", ".", "test_ids", "[", "'pub'", "]", "and", "'PMID:'", "+", "pubmed_id", "not", "in", "self", ".", "test_ids", "[", "'pub'", "]", ")", ":", "continue", "pub_id", "=", "'ZFIN:'", "+", "pub_id", ".", "strip", "(", ")", "rtype", "=", "None", "if", "pubmed_id", "!=", "''", "and", "pubmed_id", "is", "not", "None", ":", "pubmed_id", "=", "'PMID:'", "+", "pubmed_id", ".", "strip", "(", ")", "rtype", "=", "self", ".", "globaltt", "[", "'journal article'", "]", "rpm", "=", "Reference", "(", "graph", ",", "pubmed_id", ",", "rtype", ")", "rpm", ".", "addRefToGraph", "(", ")", "model", ".", "addSameIndividual", "(", "pub_id", ",", "pubmed_id", ")", "ref", "=", "Reference", "(", "graph", ",", "pub_id", ",", "rtype", ")", "ref", ".", "addRefToGraph", "(", ")", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "return" ]
This will pull the zfin internal publication to pubmed mappings. Somewhat redundant with the process_pubinfo method, but this includes additional mappings. <pub_id> is an individual <pub_id> rdfs:label <pub_label> <pubmed_id> is an individual <pubmed_id> rdfs:label <pub_label> <pub_id> sameIndividual <pubmed_id> :param limit: :return:
[ "This", "will", "pull", "the", "zfin", "internal", "publication", "to", "pubmed", "mappings", ".", "Somewhat", "redundant", "with", "the", "process_pubinfo", "method", "but", "this", "includes", "additional", "mappings", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1853-L1901
train
251,227
monarch-initiative/dipper
dipper/sources/ZFIN.py
ZFIN._process_targeting_reagents
def _process_targeting_reagents(self, reagent_type, limit=None): """ This method processes the gene targeting knockdown reagents, such as morpholinos, talens, and crisprs. We create triples for the reagents and pass the data into a hash map for use in the pheno_enviro method. Morpholinos work similar to RNAi. TALENs are artificial restriction enzymes that can be used for genome editing in situ. CRISPRs are knockdown reagents, working similar to RNAi but at the transcriptional level instead of mRNA level. You can read more about TALEN and CRISPR techniques in review [Gaj et al] http://www.cell.com/trends/biotechnology/abstract/S0167-7799%2813%2900087-5 TODO add sequences Triples created: <reagent_id> is a gene_targeting_reagent <reagent_id> rdfs:label <reagent_symbol> <reagent_id> has type <reagent_so_id> <reagent_id> has comment <note> <publication_id> is an individual <publication_id> mentions <morpholino_id> :param reagent_type: should be one of: morph, talen, crispr :param limit: :return: """ LOG.info("Processing Gene Targeting Reagents") if self.test_mode: graph = self.testgraph else: graph = self.graph line_counter = 0 model = Model(graph) geno = Genotype(graph) if reagent_type not in ['morph', 'talen', 'crispr']: LOG.error("You didn't specify the right kind of file type.") return raw = '/'.join((self.rawdir, self.files[reagent_type]['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 if reagent_type in ['morph', 'crispr']: try: (gene_num, gene_so_id, gene_symbol, reagent_num, reagent_so_id, reagent_symbol, reagent_sequence, publication, note) = row except ValueError: # Catch lines without publication or note (gene_num, gene_so_id, gene_symbol, reagent_num, reagent_so_id, reagent_symbol, reagent_sequence, publication) = row elif reagent_type == 'talen': (gene_num, gene_so_id, gene_symbol, reagent_num, reagent_so_id, reagent_symbol, reagent_sequence, reagent_sequence2, publication, note) = row else: # should not get here return reagent_id = 'ZFIN:' + reagent_num.strip() gene_id = 'ZFIN:' + gene_num.strip() self.id_label_map[reagent_id] = reagent_symbol if self.test_mode and ( reagent_num not in self.test_ids['morpholino'] and gene_num not in self.test_ids['gene']): continue geno.addGeneTargetingReagent(reagent_id, reagent_symbol, reagent_so_id, gene_id) # The reagent targeted gene is added # in the pheno_environment processing function. # Add publication # note that the publications can be comma-delimited, # like: ZDB-PUB-100719-4,ZDB-PUB-130703-22 if publication != '': pubs = re.split(r',', publication.strip()) for pub in pubs: pub_id = 'ZFIN:' + pub.strip() ref = Reference(graph, pub_id) ref.addRefToGraph() graph.addTriple(pub_id, self.globaltt['mentions'], reagent_id) # Add comment? if note != '': model.addComment(reagent_id, note) # use the variant hash for reagents to list the affected genes if reagent_id not in self.variant_loci_genes: self.variant_loci_genes[reagent_id] = [gene_id] else: if gene_id not in self.variant_loci_genes[reagent_id]: self.variant_loci_genes[reagent_id] += [gene_id] if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with Reagent type %s", reagent_type) return
python
def _process_targeting_reagents(self, reagent_type, limit=None): """ This method processes the gene targeting knockdown reagents, such as morpholinos, talens, and crisprs. We create triples for the reagents and pass the data into a hash map for use in the pheno_enviro method. Morpholinos work similar to RNAi. TALENs are artificial restriction enzymes that can be used for genome editing in situ. CRISPRs are knockdown reagents, working similar to RNAi but at the transcriptional level instead of mRNA level. You can read more about TALEN and CRISPR techniques in review [Gaj et al] http://www.cell.com/trends/biotechnology/abstract/S0167-7799%2813%2900087-5 TODO add sequences Triples created: <reagent_id> is a gene_targeting_reagent <reagent_id> rdfs:label <reagent_symbol> <reagent_id> has type <reagent_so_id> <reagent_id> has comment <note> <publication_id> is an individual <publication_id> mentions <morpholino_id> :param reagent_type: should be one of: morph, talen, crispr :param limit: :return: """ LOG.info("Processing Gene Targeting Reagents") if self.test_mode: graph = self.testgraph else: graph = self.graph line_counter = 0 model = Model(graph) geno = Genotype(graph) if reagent_type not in ['morph', 'talen', 'crispr']: LOG.error("You didn't specify the right kind of file type.") return raw = '/'.join((self.rawdir, self.files[reagent_type]['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 if reagent_type in ['morph', 'crispr']: try: (gene_num, gene_so_id, gene_symbol, reagent_num, reagent_so_id, reagent_symbol, reagent_sequence, publication, note) = row except ValueError: # Catch lines without publication or note (gene_num, gene_so_id, gene_symbol, reagent_num, reagent_so_id, reagent_symbol, reagent_sequence, publication) = row elif reagent_type == 'talen': (gene_num, gene_so_id, gene_symbol, reagent_num, reagent_so_id, reagent_symbol, reagent_sequence, reagent_sequence2, publication, note) = row else: # should not get here return reagent_id = 'ZFIN:' + reagent_num.strip() gene_id = 'ZFIN:' + gene_num.strip() self.id_label_map[reagent_id] = reagent_symbol if self.test_mode and ( reagent_num not in self.test_ids['morpholino'] and gene_num not in self.test_ids['gene']): continue geno.addGeneTargetingReagent(reagent_id, reagent_symbol, reagent_so_id, gene_id) # The reagent targeted gene is added # in the pheno_environment processing function. # Add publication # note that the publications can be comma-delimited, # like: ZDB-PUB-100719-4,ZDB-PUB-130703-22 if publication != '': pubs = re.split(r',', publication.strip()) for pub in pubs: pub_id = 'ZFIN:' + pub.strip() ref = Reference(graph, pub_id) ref.addRefToGraph() graph.addTriple(pub_id, self.globaltt['mentions'], reagent_id) # Add comment? if note != '': model.addComment(reagent_id, note) # use the variant hash for reagents to list the affected genes if reagent_id not in self.variant_loci_genes: self.variant_loci_genes[reagent_id] = [gene_id] else: if gene_id not in self.variant_loci_genes[reagent_id]: self.variant_loci_genes[reagent_id] += [gene_id] if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with Reagent type %s", reagent_type) return
[ "def", "_process_targeting_reagents", "(", "self", ",", "reagent_type", ",", "limit", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"Processing Gene Targeting Reagents\"", ")", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "line_counter", "=", "0", "model", "=", "Model", "(", "graph", ")", "geno", "=", "Genotype", "(", "graph", ")", "if", "reagent_type", "not", "in", "[", "'morph'", ",", "'talen'", ",", "'crispr'", "]", ":", "LOG", ".", "error", "(", "\"You didn't specify the right kind of file type.\"", ")", "return", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "reagent_type", "]", "[", "'file'", "]", ")", ")", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"iso-8859-1\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "row", "in", "filereader", ":", "line_counter", "+=", "1", "if", "reagent_type", "in", "[", "'morph'", ",", "'crispr'", "]", ":", "try", ":", "(", "gene_num", ",", "gene_so_id", ",", "gene_symbol", ",", "reagent_num", ",", "reagent_so_id", ",", "reagent_symbol", ",", "reagent_sequence", ",", "publication", ",", "note", ")", "=", "row", "except", "ValueError", ":", "# Catch lines without publication or note", "(", "gene_num", ",", "gene_so_id", ",", "gene_symbol", ",", "reagent_num", ",", "reagent_so_id", ",", "reagent_symbol", ",", "reagent_sequence", ",", "publication", ")", "=", "row", "elif", "reagent_type", "==", "'talen'", ":", "(", "gene_num", ",", "gene_so_id", ",", "gene_symbol", ",", "reagent_num", ",", "reagent_so_id", ",", "reagent_symbol", ",", "reagent_sequence", ",", "reagent_sequence2", ",", "publication", ",", "note", ")", "=", "row", "else", ":", "# should not get here", "return", "reagent_id", "=", "'ZFIN:'", "+", "reagent_num", ".", "strip", "(", ")", "gene_id", "=", "'ZFIN:'", "+", "gene_num", ".", "strip", "(", ")", "self", ".", "id_label_map", "[", "reagent_id", "]", "=", "reagent_symbol", "if", "self", ".", "test_mode", "and", "(", "reagent_num", "not", "in", "self", ".", "test_ids", "[", "'morpholino'", "]", "and", "gene_num", "not", "in", "self", ".", "test_ids", "[", "'gene'", "]", ")", ":", "continue", "geno", ".", "addGeneTargetingReagent", "(", "reagent_id", ",", "reagent_symbol", ",", "reagent_so_id", ",", "gene_id", ")", "# The reagent targeted gene is added", "# in the pheno_environment processing function.", "# Add publication", "# note that the publications can be comma-delimited,", "# like: ZDB-PUB-100719-4,ZDB-PUB-130703-22", "if", "publication", "!=", "''", ":", "pubs", "=", "re", ".", "split", "(", "r','", ",", "publication", ".", "strip", "(", ")", ")", "for", "pub", "in", "pubs", ":", "pub_id", "=", "'ZFIN:'", "+", "pub", ".", "strip", "(", ")", "ref", "=", "Reference", "(", "graph", ",", "pub_id", ")", "ref", ".", "addRefToGraph", "(", ")", "graph", ".", "addTriple", "(", "pub_id", ",", "self", ".", "globaltt", "[", "'mentions'", "]", ",", "reagent_id", ")", "# Add comment?", "if", "note", "!=", "''", ":", "model", ".", "addComment", "(", "reagent_id", ",", "note", ")", "# use the variant hash for reagents to list the affected genes", "if", "reagent_id", "not", "in", "self", ".", "variant_loci_genes", ":", "self", ".", "variant_loci_genes", "[", "reagent_id", "]", "=", "[", "gene_id", "]", "else", ":", "if", "gene_id", "not", "in", "self", ".", "variant_loci_genes", "[", "reagent_id", "]", ":", "self", ".", "variant_loci_genes", "[", "reagent_id", "]", "+=", "[", "gene_id", "]", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "LOG", ".", "info", "(", "\"Done with Reagent type %s\"", ",", "reagent_type", ")", "return" ]
This method processes the gene targeting knockdown reagents, such as morpholinos, talens, and crisprs. We create triples for the reagents and pass the data into a hash map for use in the pheno_enviro method. Morpholinos work similar to RNAi. TALENs are artificial restriction enzymes that can be used for genome editing in situ. CRISPRs are knockdown reagents, working similar to RNAi but at the transcriptional level instead of mRNA level. You can read more about TALEN and CRISPR techniques in review [Gaj et al] http://www.cell.com/trends/biotechnology/abstract/S0167-7799%2813%2900087-5 TODO add sequences Triples created: <reagent_id> is a gene_targeting_reagent <reagent_id> rdfs:label <reagent_symbol> <reagent_id> has type <reagent_so_id> <reagent_id> has comment <note> <publication_id> is an individual <publication_id> mentions <morpholino_id> :param reagent_type: should be one of: morph, talen, crispr :param limit: :return:
[ "This", "method", "processes", "the", "gene", "targeting", "knockdown", "reagents", "such", "as", "morpholinos", "talens", "and", "crisprs", ".", "We", "create", "triples", "for", "the", "reagents", "and", "pass", "the", "data", "into", "a", "hash", "map", "for", "use", "in", "the", "pheno_enviro", "method", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1903-L2014
train
251,228
monarch-initiative/dipper
dipper/sources/ZFIN.py
ZFIN._process_uniprot_ids
def _process_uniprot_ids(self, limit=None): """ This method processes the mappings from ZFIN gene IDs to UniProtKB IDs. Triples created: <zfin_gene_id> a class <zfin_gene_id> rdfs:label gene_symbol <uniprot_id> is an Individual <uniprot_id> has type <polypeptide> <zfin_gene_id> has_gene_product <uniprot_id> :param limit: :return: """ LOG.info("Processing UniProt IDs") if self.test_mode: graph = self.testgraph else: graph = self.graph line_counter = 0 model = Model(graph) geno = Genotype(graph) raw = '/'.join((self.rawdir, self.files['uniprot']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (gene_id, gene_so_id, gene_symbol, uniprot_id # , empty ) = row if self.test_mode and gene_id not in self.test_ids['gene']: continue gene_id = 'ZFIN:' + gene_id.strip() uniprot_id = 'UniProtKB:' + uniprot_id.strip() geno.addGene(gene_id, gene_symbol) # TODO: Abstract to one of the model utilities model.addIndividualToGraph( uniprot_id, None, self.globaltt['polypeptide']) graph.addTriple( gene_id, self.globaltt['has gene product'], uniprot_id) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with UniProt IDs") return
python
def _process_uniprot_ids(self, limit=None): """ This method processes the mappings from ZFIN gene IDs to UniProtKB IDs. Triples created: <zfin_gene_id> a class <zfin_gene_id> rdfs:label gene_symbol <uniprot_id> is an Individual <uniprot_id> has type <polypeptide> <zfin_gene_id> has_gene_product <uniprot_id> :param limit: :return: """ LOG.info("Processing UniProt IDs") if self.test_mode: graph = self.testgraph else: graph = self.graph line_counter = 0 model = Model(graph) geno = Genotype(graph) raw = '/'.join((self.rawdir, self.files['uniprot']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (gene_id, gene_so_id, gene_symbol, uniprot_id # , empty ) = row if self.test_mode and gene_id not in self.test_ids['gene']: continue gene_id = 'ZFIN:' + gene_id.strip() uniprot_id = 'UniProtKB:' + uniprot_id.strip() geno.addGene(gene_id, gene_symbol) # TODO: Abstract to one of the model utilities model.addIndividualToGraph( uniprot_id, None, self.globaltt['polypeptide']) graph.addTriple( gene_id, self.globaltt['has gene product'], uniprot_id) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with UniProt IDs") return
[ "def", "_process_uniprot_ids", "(", "self", ",", "limit", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"Processing UniProt IDs\"", ")", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "line_counter", "=", "0", "model", "=", "Model", "(", "graph", ")", "geno", "=", "Genotype", "(", "graph", ")", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'uniprot'", "]", "[", "'file'", "]", ")", ")", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"iso-8859-1\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "row", "in", "filereader", ":", "line_counter", "+=", "1", "(", "gene_id", ",", "gene_so_id", ",", "gene_symbol", ",", "uniprot_id", "# , empty", ")", "=", "row", "if", "self", ".", "test_mode", "and", "gene_id", "not", "in", "self", ".", "test_ids", "[", "'gene'", "]", ":", "continue", "gene_id", "=", "'ZFIN:'", "+", "gene_id", ".", "strip", "(", ")", "uniprot_id", "=", "'UniProtKB:'", "+", "uniprot_id", ".", "strip", "(", ")", "geno", ".", "addGene", "(", "gene_id", ",", "gene_symbol", ")", "# TODO: Abstract to one of the model utilities", "model", ".", "addIndividualToGraph", "(", "uniprot_id", ",", "None", ",", "self", ".", "globaltt", "[", "'polypeptide'", "]", ")", "graph", ".", "addTriple", "(", "gene_id", ",", "self", ".", "globaltt", "[", "'has gene product'", "]", ",", "uniprot_id", ")", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "LOG", ".", "info", "(", "\"Done with UniProt IDs\"", ")", "return" ]
This method processes the mappings from ZFIN gene IDs to UniProtKB IDs. Triples created: <zfin_gene_id> a class <zfin_gene_id> rdfs:label gene_symbol <uniprot_id> is an Individual <uniprot_id> has type <polypeptide> <zfin_gene_id> has_gene_product <uniprot_id> :param limit: :return:
[ "This", "method", "processes", "the", "mappings", "from", "ZFIN", "gene", "IDs", "to", "UniProtKB", "IDs", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L2235-L2287
train
251,229
monarch-initiative/dipper
dipper/sources/ZFIN.py
ZFIN.get_orthology_evidence_code
def get_orthology_evidence_code(self, abbrev): ''' move to localtt & globltt ''' # AA Amino acid sequence comparison. # CE Coincident expression. # CL Conserved genome location (synteny). # FC Functional complementation. # FH Formation of functional heteropolymers. # IX Immunological cross-reaction. # NS Not specified. # NT Nucleotide sequence comparison. # SI Similar response to inhibitors. # SL Similar subcellular location. # SS Similar substrate specificity. # SU Similar subunit structure. # XH Cross-hybridization to same molecular probe. # PT Phylogenetic Tree. # OT Other eco_abbrev_map = { 'AA': 'ECO:0000031', # BLAST protein sequence similarity evidence 'CE': 'ECO:0000008', # expression evidence 'CL': 'ECO:0000044', # sequence similarity FIXME 'FC': 'ECO:0000012', # functional complementation # functional complementation in a heterologous system 'FH': 'ECO:0000064', 'IX': 'ECO:0000040', # immunological assay evidence 'NS': None, 'NT': 'ECO:0000032', # nucleotide blast 'SI': 'ECO:0000094', # biological assay evidence FIXME 'SL': 'ECO:0000122', # protein localization evidence FIXME 'SS': 'ECO:0000024', # protein binding evidence FIXME 'SU': 'ECO:0000027', # structural similarity evidence 'XH': 'ECO:0000002', # direct assay evidence FIXME 'PT': 'ECO:0000080', # phylogenetic evidence 'OT': None, } if abbrev not in eco_abbrev_map: LOG.warning("Evidence code for orthology (%s) not mapped", str(abbrev)) return eco_abbrev_map.get(abbrev)
python
def get_orthology_evidence_code(self, abbrev): ''' move to localtt & globltt ''' # AA Amino acid sequence comparison. # CE Coincident expression. # CL Conserved genome location (synteny). # FC Functional complementation. # FH Formation of functional heteropolymers. # IX Immunological cross-reaction. # NS Not specified. # NT Nucleotide sequence comparison. # SI Similar response to inhibitors. # SL Similar subcellular location. # SS Similar substrate specificity. # SU Similar subunit structure. # XH Cross-hybridization to same molecular probe. # PT Phylogenetic Tree. # OT Other eco_abbrev_map = { 'AA': 'ECO:0000031', # BLAST protein sequence similarity evidence 'CE': 'ECO:0000008', # expression evidence 'CL': 'ECO:0000044', # sequence similarity FIXME 'FC': 'ECO:0000012', # functional complementation # functional complementation in a heterologous system 'FH': 'ECO:0000064', 'IX': 'ECO:0000040', # immunological assay evidence 'NS': None, 'NT': 'ECO:0000032', # nucleotide blast 'SI': 'ECO:0000094', # biological assay evidence FIXME 'SL': 'ECO:0000122', # protein localization evidence FIXME 'SS': 'ECO:0000024', # protein binding evidence FIXME 'SU': 'ECO:0000027', # structural similarity evidence 'XH': 'ECO:0000002', # direct assay evidence FIXME 'PT': 'ECO:0000080', # phylogenetic evidence 'OT': None, } if abbrev not in eco_abbrev_map: LOG.warning("Evidence code for orthology (%s) not mapped", str(abbrev)) return eco_abbrev_map.get(abbrev)
[ "def", "get_orthology_evidence_code", "(", "self", ",", "abbrev", ")", ":", "# AA\tAmino acid sequence comparison.", "# CE\tCoincident expression.", "# CL\tConserved genome location (synteny).", "# FC\tFunctional complementation.", "# FH\tFormation of functional heteropolymers.", "# IX\tImmunological cross-reaction.", "# NS\tNot specified.", "# NT\tNucleotide sequence comparison.", "# SI\tSimilar response to inhibitors.", "# SL\tSimilar subcellular location.", "# SS\tSimilar substrate specificity.", "# SU\tSimilar subunit structure.", "# XH\tCross-hybridization to same molecular probe.", "# PT\tPhylogenetic Tree.", "# OT Other", "eco_abbrev_map", "=", "{", "'AA'", ":", "'ECO:0000031'", ",", "# BLAST protein sequence similarity evidence", "'CE'", ":", "'ECO:0000008'", ",", "# expression evidence", "'CL'", ":", "'ECO:0000044'", ",", "# sequence similarity FIXME", "'FC'", ":", "'ECO:0000012'", ",", "# functional complementation", "# functional complementation in a heterologous system", "'FH'", ":", "'ECO:0000064'", ",", "'IX'", ":", "'ECO:0000040'", ",", "# immunological assay evidence", "'NS'", ":", "None", ",", "'NT'", ":", "'ECO:0000032'", ",", "# nucleotide blast", "'SI'", ":", "'ECO:0000094'", ",", "# biological assay evidence FIXME", "'SL'", ":", "'ECO:0000122'", ",", "# protein localization evidence FIXME", "'SS'", ":", "'ECO:0000024'", ",", "# protein binding evidence FIXME", "'SU'", ":", "'ECO:0000027'", ",", "# structural similarity evidence", "'XH'", ":", "'ECO:0000002'", ",", "# direct assay evidence FIXME", "'PT'", ":", "'ECO:0000080'", ",", "# phylogenetic evidence", "'OT'", ":", "None", ",", "}", "if", "abbrev", "not", "in", "eco_abbrev_map", ":", "LOG", ".", "warning", "(", "\"Evidence code for orthology (%s) not mapped\"", ",", "str", "(", "abbrev", ")", ")", "return", "eco_abbrev_map", ".", "get", "(", "abbrev", ")" ]
move to localtt & globltt
[ "move", "to", "localtt", "&", "globltt" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L2798-L2840
train
251,230
monarch-initiative/dipper
dipper/sources/KEGG.py
KEGG._process_diseases
def _process_diseases(self, limit=None): """ This method processes the KEGG disease IDs. Triples created: <disease_id> is a class <disease_id> rdfs:label <disease_name> :param limit: :return: """ LOG.info("Processing diseases") if self.test_mode: graph = self.testgraph else: graph = self.graph line_counter = 0 model = Model(graph) raw = '/'.join((self.rawdir, self.files['disease']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (disease_id, disease_name) = row disease_id = 'KEGG-'+disease_id.strip() if disease_id not in self.label_hash: self.label_hash[disease_id] = disease_name if self.test_mode and disease_id not in self.test_ids['disease']: continue # Add the disease as a class. # we don't get all of these from MONDO yet see: # https://github.com/monarch-initiative/human-disease-ontology/issues/3 model.addClassToGraph(disease_id, disease_name) # not typing the diseases as DOID:4 yet because # I don't want to bulk up the graph unnecessarily if not self.test_mode and ( limit is not None and line_counter > limit): break LOG.info("Done with diseases") return
python
def _process_diseases(self, limit=None): """ This method processes the KEGG disease IDs. Triples created: <disease_id> is a class <disease_id> rdfs:label <disease_name> :param limit: :return: """ LOG.info("Processing diseases") if self.test_mode: graph = self.testgraph else: graph = self.graph line_counter = 0 model = Model(graph) raw = '/'.join((self.rawdir, self.files['disease']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (disease_id, disease_name) = row disease_id = 'KEGG-'+disease_id.strip() if disease_id not in self.label_hash: self.label_hash[disease_id] = disease_name if self.test_mode and disease_id not in self.test_ids['disease']: continue # Add the disease as a class. # we don't get all of these from MONDO yet see: # https://github.com/monarch-initiative/human-disease-ontology/issues/3 model.addClassToGraph(disease_id, disease_name) # not typing the diseases as DOID:4 yet because # I don't want to bulk up the graph unnecessarily if not self.test_mode and ( limit is not None and line_counter > limit): break LOG.info("Done with diseases") return
[ "def", "_process_diseases", "(", "self", ",", "limit", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"Processing diseases\"", ")", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "line_counter", "=", "0", "model", "=", "Model", "(", "graph", ")", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'disease'", "]", "[", "'file'", "]", ")", ")", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"iso-8859-1\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "row", "in", "filereader", ":", "line_counter", "+=", "1", "(", "disease_id", ",", "disease_name", ")", "=", "row", "disease_id", "=", "'KEGG-'", "+", "disease_id", ".", "strip", "(", ")", "if", "disease_id", "not", "in", "self", ".", "label_hash", ":", "self", ".", "label_hash", "[", "disease_id", "]", "=", "disease_name", "if", "self", ".", "test_mode", "and", "disease_id", "not", "in", "self", ".", "test_ids", "[", "'disease'", "]", ":", "continue", "# Add the disease as a class.", "# we don't get all of these from MONDO yet see:", "# https://github.com/monarch-initiative/human-disease-ontology/issues/3", "model", ".", "addClassToGraph", "(", "disease_id", ",", "disease_name", ")", "# not typing the diseases as DOID:4 yet because", "# I don't want to bulk up the graph unnecessarily", "if", "not", "self", ".", "test_mode", "and", "(", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ")", ":", "break", "LOG", ".", "info", "(", "\"Done with diseases\"", ")", "return" ]
This method processes the KEGG disease IDs. Triples created: <disease_id> is a class <disease_id> rdfs:label <disease_name> :param limit: :return:
[ "This", "method", "processes", "the", "KEGG", "disease", "IDs", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L224-L269
train
251,231
monarch-initiative/dipper
dipper/sources/KEGG.py
KEGG._process_genes
def _process_genes(self, limit=None): """ This method processes the KEGG gene IDs. The label for the gene is pulled as the first symbol in the list of gene symbols; the rest are added as synonyms. The long-form of the gene name is added as a definition. This is hardcoded to just processes human genes. Triples created: <gene_id> is a SO:gene <gene_id> rdfs:label <gene_name> :param limit: :return: """ LOG.info("Processing genes") if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 family = Family(graph) geno = Genotype(graph) raw = '/'.join((self.rawdir, self.files['hsa_genes']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (gene_id, gene_name) = row gene_id = 'KEGG-'+gene_id.strip() # the gene listing has a bunch of labels # that are delimited, as: # DST, BP240, BPA, BPAG1, CATX-15, CATX15, D6S1101, DMH, DT, # EBSB2, HSAN6, MACF2; dystonin; K10382 dystonin # it looks like the list is semicolon delimited # (symbol, name, gene_class) # where the symbol is a comma-delimited list # here, we split them up. # we will take the first abbreviation and make it the symbol # then take the rest as synonyms gene_stuff = re.split('r;', gene_name) symbollist = re.split(r',', gene_stuff[0]) first_symbol = symbollist[0].strip() if gene_id not in self.label_hash: self.label_hash[gene_id] = first_symbol if self.test_mode and gene_id not in self.test_ids['genes']: continue # Add the gene as a class. geno.addGene(gene_id, first_symbol) # add the long name as the description if len(gene_stuff) > 1: description = gene_stuff[1].strip() model.addDefinition(gene_id, description) # add the rest of the symbols as synonyms for i in enumerate(symbollist, start=1): model.addSynonym(gene_id, i[1].strip()) if len(gene_stuff) > 2: ko_part = gene_stuff[2] ko_match = re.search(r'K\d+', ko_part) if ko_match is not None and len(ko_match.groups()) == 1: ko = 'KEGG-ko:'+ko_match.group(1) family.addMemberOf(gene_id, ko) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with genes") return
python
def _process_genes(self, limit=None): """ This method processes the KEGG gene IDs. The label for the gene is pulled as the first symbol in the list of gene symbols; the rest are added as synonyms. The long-form of the gene name is added as a definition. This is hardcoded to just processes human genes. Triples created: <gene_id> is a SO:gene <gene_id> rdfs:label <gene_name> :param limit: :return: """ LOG.info("Processing genes") if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 family = Family(graph) geno = Genotype(graph) raw = '/'.join((self.rawdir, self.files['hsa_genes']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (gene_id, gene_name) = row gene_id = 'KEGG-'+gene_id.strip() # the gene listing has a bunch of labels # that are delimited, as: # DST, BP240, BPA, BPAG1, CATX-15, CATX15, D6S1101, DMH, DT, # EBSB2, HSAN6, MACF2; dystonin; K10382 dystonin # it looks like the list is semicolon delimited # (symbol, name, gene_class) # where the symbol is a comma-delimited list # here, we split them up. # we will take the first abbreviation and make it the symbol # then take the rest as synonyms gene_stuff = re.split('r;', gene_name) symbollist = re.split(r',', gene_stuff[0]) first_symbol = symbollist[0].strip() if gene_id not in self.label_hash: self.label_hash[gene_id] = first_symbol if self.test_mode and gene_id not in self.test_ids['genes']: continue # Add the gene as a class. geno.addGene(gene_id, first_symbol) # add the long name as the description if len(gene_stuff) > 1: description = gene_stuff[1].strip() model.addDefinition(gene_id, description) # add the rest of the symbols as synonyms for i in enumerate(symbollist, start=1): model.addSynonym(gene_id, i[1].strip()) if len(gene_stuff) > 2: ko_part = gene_stuff[2] ko_match = re.search(r'K\d+', ko_part) if ko_match is not None and len(ko_match.groups()) == 1: ko = 'KEGG-ko:'+ko_match.group(1) family.addMemberOf(gene_id, ko) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with genes") return
[ "def", "_process_genes", "(", "self", ",", "limit", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"Processing genes\"", ")", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "line_counter", "=", "0", "family", "=", "Family", "(", "graph", ")", "geno", "=", "Genotype", "(", "graph", ")", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'hsa_genes'", "]", "[", "'file'", "]", ")", ")", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"iso-8859-1\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "row", "in", "filereader", ":", "line_counter", "+=", "1", "(", "gene_id", ",", "gene_name", ")", "=", "row", "gene_id", "=", "'KEGG-'", "+", "gene_id", ".", "strip", "(", ")", "# the gene listing has a bunch of labels", "# that are delimited, as:", "# DST, BP240, BPA, BPAG1, CATX-15, CATX15, D6S1101, DMH, DT,", "# EBSB2, HSAN6, MACF2; dystonin; K10382 dystonin", "# it looks like the list is semicolon delimited", "# (symbol, name, gene_class)", "# where the symbol is a comma-delimited list", "# here, we split them up.", "# we will take the first abbreviation and make it the symbol", "# then take the rest as synonyms", "gene_stuff", "=", "re", ".", "split", "(", "'r;'", ",", "gene_name", ")", "symbollist", "=", "re", ".", "split", "(", "r','", ",", "gene_stuff", "[", "0", "]", ")", "first_symbol", "=", "symbollist", "[", "0", "]", ".", "strip", "(", ")", "if", "gene_id", "not", "in", "self", ".", "label_hash", ":", "self", ".", "label_hash", "[", "gene_id", "]", "=", "first_symbol", "if", "self", ".", "test_mode", "and", "gene_id", "not", "in", "self", ".", "test_ids", "[", "'genes'", "]", ":", "continue", "# Add the gene as a class.", "geno", ".", "addGene", "(", "gene_id", ",", "first_symbol", ")", "# add the long name as the description", "if", "len", "(", "gene_stuff", ")", ">", "1", ":", "description", "=", "gene_stuff", "[", "1", "]", ".", "strip", "(", ")", "model", ".", "addDefinition", "(", "gene_id", ",", "description", ")", "# add the rest of the symbols as synonyms", "for", "i", "in", "enumerate", "(", "symbollist", ",", "start", "=", "1", ")", ":", "model", ".", "addSynonym", "(", "gene_id", ",", "i", "[", "1", "]", ".", "strip", "(", ")", ")", "if", "len", "(", "gene_stuff", ")", ">", "2", ":", "ko_part", "=", "gene_stuff", "[", "2", "]", "ko_match", "=", "re", ".", "search", "(", "r'K\\d+'", ",", "ko_part", ")", "if", "ko_match", "is", "not", "None", "and", "len", "(", "ko_match", ".", "groups", "(", ")", ")", "==", "1", ":", "ko", "=", "'KEGG-ko:'", "+", "ko_match", ".", "group", "(", "1", ")", "family", ".", "addMemberOf", "(", "gene_id", ",", "ko", ")", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "LOG", ".", "info", "(", "\"Done with genes\"", ")", "return" ]
This method processes the KEGG gene IDs. The label for the gene is pulled as the first symbol in the list of gene symbols; the rest are added as synonyms. The long-form of the gene name is added as a definition. This is hardcoded to just processes human genes. Triples created: <gene_id> is a SO:gene <gene_id> rdfs:label <gene_name> :param limit: :return:
[ "This", "method", "processes", "the", "KEGG", "gene", "IDs", ".", "The", "label", "for", "the", "gene", "is", "pulled", "as", "the", "first", "symbol", "in", "the", "list", "of", "gene", "symbols", ";", "the", "rest", "are", "added", "as", "synonyms", ".", "The", "long", "-", "form", "of", "the", "gene", "name", "is", "added", "as", "a", "definition", ".", "This", "is", "hardcoded", "to", "just", "processes", "human", "genes", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L271-L352
train
251,232
monarch-initiative/dipper
dipper/sources/KEGG.py
KEGG._process_ortholog_classes
def _process_ortholog_classes(self, limit=None): """ This method add the KEGG orthology classes to the graph. If there's an embedded enzyme commission number, that is added as an xref. Triples created: <orthology_class_id> is a class <orthology_class_id> has label <orthology_symbols> <orthology_class_id> has description <orthology_description> :param limit: :return: """ LOG.info("Processing ortholog classes") if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, self.files['ortholog_classes']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (orthology_class_id, orthology_class_name) = row if self.test_mode and orthology_class_id \ not in self.test_ids['orthology_classes']: continue # The orthology class is essentially a KEGG gene ID # that is species agnostic. # Add the ID and label as a gene family class other_labels = re.split(r'[;,]', orthology_class_name) # the first one is the label we'll use orthology_label = other_labels[0] orthology_class_id = 'KEGG-'+orthology_class_id.strip() orthology_type = self.globaltt['gene_family'] model.addClassToGraph( orthology_class_id, orthology_label, orthology_type) if len(other_labels) > 1: # add the rest as synonyms # todo skip the first for s in other_labels: model.addSynonym(orthology_class_id, s.strip()) # add the last one as the description d = other_labels[len(other_labels)-1] model.addDescription(orthology_class_id, d) # add the enzyme commission number (EC:1.2.99.5)as an xref # sometimes there's two, like [EC:1.3.5.1 1.3.5.4] # can also have a dash, like EC:1.10.3.- ec_matches = re.findall(r'((?:\d+|\.|-){5,7})', d) if ec_matches is not None: for ecm in ec_matches: model.addXref(orthology_class_id, 'EC:' + ecm) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with ortholog classes") return
python
def _process_ortholog_classes(self, limit=None): """ This method add the KEGG orthology classes to the graph. If there's an embedded enzyme commission number, that is added as an xref. Triples created: <orthology_class_id> is a class <orthology_class_id> has label <orthology_symbols> <orthology_class_id> has description <orthology_description> :param limit: :return: """ LOG.info("Processing ortholog classes") if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, self.files['ortholog_classes']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (orthology_class_id, orthology_class_name) = row if self.test_mode and orthology_class_id \ not in self.test_ids['orthology_classes']: continue # The orthology class is essentially a KEGG gene ID # that is species agnostic. # Add the ID and label as a gene family class other_labels = re.split(r'[;,]', orthology_class_name) # the first one is the label we'll use orthology_label = other_labels[0] orthology_class_id = 'KEGG-'+orthology_class_id.strip() orthology_type = self.globaltt['gene_family'] model.addClassToGraph( orthology_class_id, orthology_label, orthology_type) if len(other_labels) > 1: # add the rest as synonyms # todo skip the first for s in other_labels: model.addSynonym(orthology_class_id, s.strip()) # add the last one as the description d = other_labels[len(other_labels)-1] model.addDescription(orthology_class_id, d) # add the enzyme commission number (EC:1.2.99.5)as an xref # sometimes there's two, like [EC:1.3.5.1 1.3.5.4] # can also have a dash, like EC:1.10.3.- ec_matches = re.findall(r'((?:\d+|\.|-){5,7})', d) if ec_matches is not None: for ecm in ec_matches: model.addXref(orthology_class_id, 'EC:' + ecm) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with ortholog classes") return
[ "def", "_process_ortholog_classes", "(", "self", ",", "limit", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"Processing ortholog classes\"", ")", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "line_counter", "=", "0", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'ortholog_classes'", "]", "[", "'file'", "]", ")", ")", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"iso-8859-1\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "row", "in", "filereader", ":", "line_counter", "+=", "1", "(", "orthology_class_id", ",", "orthology_class_name", ")", "=", "row", "if", "self", ".", "test_mode", "and", "orthology_class_id", "not", "in", "self", ".", "test_ids", "[", "'orthology_classes'", "]", ":", "continue", "# The orthology class is essentially a KEGG gene ID", "# that is species agnostic.", "# Add the ID and label as a gene family class", "other_labels", "=", "re", ".", "split", "(", "r'[;,]'", ",", "orthology_class_name", ")", "# the first one is the label we'll use", "orthology_label", "=", "other_labels", "[", "0", "]", "orthology_class_id", "=", "'KEGG-'", "+", "orthology_class_id", ".", "strip", "(", ")", "orthology_type", "=", "self", ".", "globaltt", "[", "'gene_family'", "]", "model", ".", "addClassToGraph", "(", "orthology_class_id", ",", "orthology_label", ",", "orthology_type", ")", "if", "len", "(", "other_labels", ")", ">", "1", ":", "# add the rest as synonyms", "# todo skip the first", "for", "s", "in", "other_labels", ":", "model", ".", "addSynonym", "(", "orthology_class_id", ",", "s", ".", "strip", "(", ")", ")", "# add the last one as the description", "d", "=", "other_labels", "[", "len", "(", "other_labels", ")", "-", "1", "]", "model", ".", "addDescription", "(", "orthology_class_id", ",", "d", ")", "# add the enzyme commission number (EC:1.2.99.5)as an xref", "# sometimes there's two, like [EC:1.3.5.1 1.3.5.4]", "# can also have a dash, like EC:1.10.3.-", "ec_matches", "=", "re", ".", "findall", "(", "r'((?:\\d+|\\.|-){5,7})'", ",", "d", ")", "if", "ec_matches", "is", "not", "None", ":", "for", "ecm", "in", "ec_matches", ":", "model", ".", "addXref", "(", "orthology_class_id", ",", "'EC:'", "+", "ecm", ")", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "LOG", ".", "info", "(", "\"Done with ortholog classes\"", ")", "return" ]
This method add the KEGG orthology classes to the graph. If there's an embedded enzyme commission number, that is added as an xref. Triples created: <orthology_class_id> is a class <orthology_class_id> has label <orthology_symbols> <orthology_class_id> has description <orthology_description> :param limit: :return:
[ "This", "method", "add", "the", "KEGG", "orthology", "classes", "to", "the", "graph", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L354-L423
train
251,233
monarch-initiative/dipper
dipper/sources/KEGG.py
KEGG._process_orthologs
def _process_orthologs(self, raw, limit=None): """ This method maps orthologs for a species to the KEGG orthology classes. Triples created: <gene_id> is a class <orthology_class_id> is a class <assoc_id> has subject <gene_id> <assoc_id> has object <orthology_class_id> :param limit: :return: """ LOG.info("Processing orthologs") if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (gene_id, orthology_class_id) = row orthology_class_id = 'KEGG:'+orthology_class_id.strip() gene_id = 'KEGG:' + gene_id.strip() # note that the panther_id references a group of orthologs, # and is not 1:1 with the rest # add the KO id as a gene-family grouping class OrthologyAssoc( graph, self.name, gene_id, None).add_gene_family_to_graph( orthology_class_id) # add gene and orthology class to graph; # assume labels will be taken care of elsewhere model.addClassToGraph(gene_id, None) model.addClassToGraph(orthology_class_id, None) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with orthologs") return
python
def _process_orthologs(self, raw, limit=None): """ This method maps orthologs for a species to the KEGG orthology classes. Triples created: <gene_id> is a class <orthology_class_id> is a class <assoc_id> has subject <gene_id> <assoc_id> has object <orthology_class_id> :param limit: :return: """ LOG.info("Processing orthologs") if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (gene_id, orthology_class_id) = row orthology_class_id = 'KEGG:'+orthology_class_id.strip() gene_id = 'KEGG:' + gene_id.strip() # note that the panther_id references a group of orthologs, # and is not 1:1 with the rest # add the KO id as a gene-family grouping class OrthologyAssoc( graph, self.name, gene_id, None).add_gene_family_to_graph( orthology_class_id) # add gene and orthology class to graph; # assume labels will be taken care of elsewhere model.addClassToGraph(gene_id, None) model.addClassToGraph(orthology_class_id, None) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with orthologs") return
[ "def", "_process_orthologs", "(", "self", ",", "raw", ",", "limit", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"Processing orthologs\"", ")", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "line_counter", "=", "0", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"iso-8859-1\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "row", "in", "filereader", ":", "line_counter", "+=", "1", "(", "gene_id", ",", "orthology_class_id", ")", "=", "row", "orthology_class_id", "=", "'KEGG:'", "+", "orthology_class_id", ".", "strip", "(", ")", "gene_id", "=", "'KEGG:'", "+", "gene_id", ".", "strip", "(", ")", "# note that the panther_id references a group of orthologs,", "# and is not 1:1 with the rest", "# add the KO id as a gene-family grouping class", "OrthologyAssoc", "(", "graph", ",", "self", ".", "name", ",", "gene_id", ",", "None", ")", ".", "add_gene_family_to_graph", "(", "orthology_class_id", ")", "# add gene and orthology class to graph;", "# assume labels will be taken care of elsewhere", "model", ".", "addClassToGraph", "(", "gene_id", ",", "None", ")", "model", ".", "addClassToGraph", "(", "orthology_class_id", ",", "None", ")", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "LOG", ".", "info", "(", "\"Done with orthologs\"", ")", "return" ]
This method maps orthologs for a species to the KEGG orthology classes. Triples created: <gene_id> is a class <orthology_class_id> is a class <assoc_id> has subject <gene_id> <assoc_id> has object <orthology_class_id> :param limit: :return:
[ "This", "method", "maps", "orthologs", "for", "a", "species", "to", "the", "KEGG", "orthology", "classes", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L425-L473
train
251,234
monarch-initiative/dipper
dipper/sources/KEGG.py
KEGG._process_kegg_disease2gene
def _process_kegg_disease2gene(self, limit=None): """ This method creates an association between diseases and their associated genes. We are being conservative here, and only processing those diseases for which there is no mapping to OMIM. Triples created: <alternate_locus> is an Individual <alternate_locus> has type <variant_locus> <alternate_locus> is an allele of <gene_id> <assoc_id> has subject <disease_id> <assoc_id> has object <gene_id> :param limit: :return: """ LOG.info("Processing KEGG disease to gene") if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 geno = Genotype(graph) rel = self.globaltt['is marker for'] noomimset = set() raw = '/'.join((self.rawdir, self.files['disease_gene']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (gene_id, disease_id) = row if self.test_mode and gene_id not in self.test_ids['genes']: continue gene_id = 'KEGG-' + gene_id.strip() disease_id = 'KEGG-' + disease_id.strip() # only add diseases for which # there is no omim id and not a grouping class if disease_id not in self.kegg_disease_hash: # add as a class disease_label = None if disease_id in self.label_hash: disease_label = self.label_hash[disease_id] if re.search(r'includ', str(disease_label)): # they use 'including' when it's a grouping class LOG.info( "Skipping this association because " + "it's a grouping class: %s", disease_label) continue # type this disease_id as a disease model.addClassToGraph(disease_id, disease_label) # , class_type=self.globaltt['disease']) noomimset.add(disease_id) alt_locus_id = self._make_variant_locus_id(gene_id, disease_id) alt_label = self.label_hash[alt_locus_id] model.addIndividualToGraph( alt_locus_id, alt_label, self.globaltt['variant_locus']) geno.addAffectedLocus(alt_locus_id, gene_id) model.addBlankNodeAnnotation(alt_locus_id) # Add the disease to gene relationship. assoc = G2PAssoc(graph, self.name, alt_locus_id, disease_id, rel) assoc.add_association_to_graph() if not self.test_mode and ( limit is not None and line_counter > limit): break LOG.info("Done with KEGG disease to gene") LOG.info("Found %d diseases with no omim id", len(noomimset)) return
python
def _process_kegg_disease2gene(self, limit=None): """ This method creates an association between diseases and their associated genes. We are being conservative here, and only processing those diseases for which there is no mapping to OMIM. Triples created: <alternate_locus> is an Individual <alternate_locus> has type <variant_locus> <alternate_locus> is an allele of <gene_id> <assoc_id> has subject <disease_id> <assoc_id> has object <gene_id> :param limit: :return: """ LOG.info("Processing KEGG disease to gene") if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 geno = Genotype(graph) rel = self.globaltt['is marker for'] noomimset = set() raw = '/'.join((self.rawdir, self.files['disease_gene']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (gene_id, disease_id) = row if self.test_mode and gene_id not in self.test_ids['genes']: continue gene_id = 'KEGG-' + gene_id.strip() disease_id = 'KEGG-' + disease_id.strip() # only add diseases for which # there is no omim id and not a grouping class if disease_id not in self.kegg_disease_hash: # add as a class disease_label = None if disease_id in self.label_hash: disease_label = self.label_hash[disease_id] if re.search(r'includ', str(disease_label)): # they use 'including' when it's a grouping class LOG.info( "Skipping this association because " + "it's a grouping class: %s", disease_label) continue # type this disease_id as a disease model.addClassToGraph(disease_id, disease_label) # , class_type=self.globaltt['disease']) noomimset.add(disease_id) alt_locus_id = self._make_variant_locus_id(gene_id, disease_id) alt_label = self.label_hash[alt_locus_id] model.addIndividualToGraph( alt_locus_id, alt_label, self.globaltt['variant_locus']) geno.addAffectedLocus(alt_locus_id, gene_id) model.addBlankNodeAnnotation(alt_locus_id) # Add the disease to gene relationship. assoc = G2PAssoc(graph, self.name, alt_locus_id, disease_id, rel) assoc.add_association_to_graph() if not self.test_mode and ( limit is not None and line_counter > limit): break LOG.info("Done with KEGG disease to gene") LOG.info("Found %d diseases with no omim id", len(noomimset)) return
[ "def", "_process_kegg_disease2gene", "(", "self", ",", "limit", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"Processing KEGG disease to gene\"", ")", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "line_counter", "=", "0", "geno", "=", "Genotype", "(", "graph", ")", "rel", "=", "self", ".", "globaltt", "[", "'is marker for'", "]", "noomimset", "=", "set", "(", ")", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'disease_gene'", "]", "[", "'file'", "]", ")", ")", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"iso-8859-1\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "row", "in", "filereader", ":", "line_counter", "+=", "1", "(", "gene_id", ",", "disease_id", ")", "=", "row", "if", "self", ".", "test_mode", "and", "gene_id", "not", "in", "self", ".", "test_ids", "[", "'genes'", "]", ":", "continue", "gene_id", "=", "'KEGG-'", "+", "gene_id", ".", "strip", "(", ")", "disease_id", "=", "'KEGG-'", "+", "disease_id", ".", "strip", "(", ")", "# only add diseases for which", "# there is no omim id and not a grouping class", "if", "disease_id", "not", "in", "self", ".", "kegg_disease_hash", ":", "# add as a class", "disease_label", "=", "None", "if", "disease_id", "in", "self", ".", "label_hash", ":", "disease_label", "=", "self", ".", "label_hash", "[", "disease_id", "]", "if", "re", ".", "search", "(", "r'includ'", ",", "str", "(", "disease_label", ")", ")", ":", "# they use 'including' when it's a grouping class", "LOG", ".", "info", "(", "\"Skipping this association because \"", "+", "\"it's a grouping class: %s\"", ",", "disease_label", ")", "continue", "# type this disease_id as a disease", "model", ".", "addClassToGraph", "(", "disease_id", ",", "disease_label", ")", "# , class_type=self.globaltt['disease'])", "noomimset", ".", "add", "(", "disease_id", ")", "alt_locus_id", "=", "self", ".", "_make_variant_locus_id", "(", "gene_id", ",", "disease_id", ")", "alt_label", "=", "self", ".", "label_hash", "[", "alt_locus_id", "]", "model", ".", "addIndividualToGraph", "(", "alt_locus_id", ",", "alt_label", ",", "self", ".", "globaltt", "[", "'variant_locus'", "]", ")", "geno", ".", "addAffectedLocus", "(", "alt_locus_id", ",", "gene_id", ")", "model", ".", "addBlankNodeAnnotation", "(", "alt_locus_id", ")", "# Add the disease to gene relationship.", "assoc", "=", "G2PAssoc", "(", "graph", ",", "self", ".", "name", ",", "alt_locus_id", ",", "disease_id", ",", "rel", ")", "assoc", ".", "add_association_to_graph", "(", ")", "if", "not", "self", ".", "test_mode", "and", "(", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ")", ":", "break", "LOG", ".", "info", "(", "\"Done with KEGG disease to gene\"", ")", "LOG", ".", "info", "(", "\"Found %d diseases with no omim id\"", ",", "len", "(", "noomimset", ")", ")", "return" ]
This method creates an association between diseases and their associated genes. We are being conservative here, and only processing those diseases for which there is no mapping to OMIM. Triples created: <alternate_locus> is an Individual <alternate_locus> has type <variant_locus> <alternate_locus> is an allele of <gene_id> <assoc_id> has subject <disease_id> <assoc_id> has object <gene_id> :param limit: :return:
[ "This", "method", "creates", "an", "association", "between", "diseases", "and", "their", "associated", "genes", ".", "We", "are", "being", "conservative", "here", "and", "only", "processing", "those", "diseases", "for", "which", "there", "is", "no", "mapping", "to", "OMIM", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L475-L551
train
251,235
monarch-initiative/dipper
dipper/sources/KEGG.py
KEGG._process_omim2gene
def _process_omim2gene(self, limit=None): """ This method maps the OMIM IDs and KEGG gene ID. Currently split based on the link_type field. Equivalent link types are mapped as gene XRefs. Reverse link types are mapped as disease to gene associations. Original link types are currently skipped. Triples created: <kegg_gene_id> is a Gene <omim_gene_id> is a Gene <kegg_gene_id>> hasXref <omim_gene_id> <assoc_id> has subject <omim_disease_id> <assoc_id> has object <kegg_gene_id> :param limit: :return: """ LOG.info("Processing OMIM to KEGG gene") if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 geno = Genotype(graph) raw = '/'.join((self.rawdir, self.files['omim2gene']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (kegg_gene_id, omim_id, link_type) = row if self.test_mode and kegg_gene_id not in self.test_ids['genes']: continue kegg_gene_id = 'KEGG-' + kegg_gene_id.strip() omim_id = re.sub(r'omim', 'OMIM', omim_id) if link_type == 'equivalent': # these are genes! # so add them as a class then make equivalence model.addClassToGraph(omim_id, None) geno.addGene(kegg_gene_id, None) if not DipperUtil.is_omim_disease(omim_id): model.addEquivalentClass(kegg_gene_id, omim_id) elif link_type == 'reverse': # make an association between an OMIM ID & the KEGG gene ID # we do this with omim ids because # they are more atomic than KEGG ids alt_locus_id = self._make_variant_locus_id(kegg_gene_id, omim_id) alt_label = self.label_hash[alt_locus_id] model.addIndividualToGraph( alt_locus_id, alt_label, self.globaltt['variant_locus']) geno.addAffectedLocus(alt_locus_id, kegg_gene_id) model.addBlankNodeAnnotation(alt_locus_id) # Add the disease to gene relationship. rel = self.globaltt['is marker for'] assoc = G2PAssoc(graph, self.name, alt_locus_id, omim_id, rel) assoc.add_association_to_graph() elif link_type == 'original': # these are sometimes a gene, and sometimes a disease LOG.info( 'Unable to handle original link for %s-%s', kegg_gene_id, omim_id) else: # don't know what these are LOG.warning( 'Unhandled link type for %s-%s: %s', kegg_gene_id, omim_id, link_type) if (not self.test_mode) and ( limit is not None and line_counter > limit): break LOG.info("Done with OMIM to KEGG gene") return
python
def _process_omim2gene(self, limit=None): """ This method maps the OMIM IDs and KEGG gene ID. Currently split based on the link_type field. Equivalent link types are mapped as gene XRefs. Reverse link types are mapped as disease to gene associations. Original link types are currently skipped. Triples created: <kegg_gene_id> is a Gene <omim_gene_id> is a Gene <kegg_gene_id>> hasXref <omim_gene_id> <assoc_id> has subject <omim_disease_id> <assoc_id> has object <kegg_gene_id> :param limit: :return: """ LOG.info("Processing OMIM to KEGG gene") if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 geno = Genotype(graph) raw = '/'.join((self.rawdir, self.files['omim2gene']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (kegg_gene_id, omim_id, link_type) = row if self.test_mode and kegg_gene_id not in self.test_ids['genes']: continue kegg_gene_id = 'KEGG-' + kegg_gene_id.strip() omim_id = re.sub(r'omim', 'OMIM', omim_id) if link_type == 'equivalent': # these are genes! # so add them as a class then make equivalence model.addClassToGraph(omim_id, None) geno.addGene(kegg_gene_id, None) if not DipperUtil.is_omim_disease(omim_id): model.addEquivalentClass(kegg_gene_id, omim_id) elif link_type == 'reverse': # make an association between an OMIM ID & the KEGG gene ID # we do this with omim ids because # they are more atomic than KEGG ids alt_locus_id = self._make_variant_locus_id(kegg_gene_id, omim_id) alt_label = self.label_hash[alt_locus_id] model.addIndividualToGraph( alt_locus_id, alt_label, self.globaltt['variant_locus']) geno.addAffectedLocus(alt_locus_id, kegg_gene_id) model.addBlankNodeAnnotation(alt_locus_id) # Add the disease to gene relationship. rel = self.globaltt['is marker for'] assoc = G2PAssoc(graph, self.name, alt_locus_id, omim_id, rel) assoc.add_association_to_graph() elif link_type == 'original': # these are sometimes a gene, and sometimes a disease LOG.info( 'Unable to handle original link for %s-%s', kegg_gene_id, omim_id) else: # don't know what these are LOG.warning( 'Unhandled link type for %s-%s: %s', kegg_gene_id, omim_id, link_type) if (not self.test_mode) and ( limit is not None and line_counter > limit): break LOG.info("Done with OMIM to KEGG gene") return
[ "def", "_process_omim2gene", "(", "self", ",", "limit", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"Processing OMIM to KEGG gene\"", ")", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "line_counter", "=", "0", "geno", "=", "Genotype", "(", "graph", ")", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'omim2gene'", "]", "[", "'file'", "]", ")", ")", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"iso-8859-1\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "row", "in", "filereader", ":", "line_counter", "+=", "1", "(", "kegg_gene_id", ",", "omim_id", ",", "link_type", ")", "=", "row", "if", "self", ".", "test_mode", "and", "kegg_gene_id", "not", "in", "self", ".", "test_ids", "[", "'genes'", "]", ":", "continue", "kegg_gene_id", "=", "'KEGG-'", "+", "kegg_gene_id", ".", "strip", "(", ")", "omim_id", "=", "re", ".", "sub", "(", "r'omim'", ",", "'OMIM'", ",", "omim_id", ")", "if", "link_type", "==", "'equivalent'", ":", "# these are genes!", "# so add them as a class then make equivalence", "model", ".", "addClassToGraph", "(", "omim_id", ",", "None", ")", "geno", ".", "addGene", "(", "kegg_gene_id", ",", "None", ")", "if", "not", "DipperUtil", ".", "is_omim_disease", "(", "omim_id", ")", ":", "model", ".", "addEquivalentClass", "(", "kegg_gene_id", ",", "omim_id", ")", "elif", "link_type", "==", "'reverse'", ":", "# make an association between an OMIM ID & the KEGG gene ID", "# we do this with omim ids because", "# they are more atomic than KEGG ids", "alt_locus_id", "=", "self", ".", "_make_variant_locus_id", "(", "kegg_gene_id", ",", "omim_id", ")", "alt_label", "=", "self", ".", "label_hash", "[", "alt_locus_id", "]", "model", ".", "addIndividualToGraph", "(", "alt_locus_id", ",", "alt_label", ",", "self", ".", "globaltt", "[", "'variant_locus'", "]", ")", "geno", ".", "addAffectedLocus", "(", "alt_locus_id", ",", "kegg_gene_id", ")", "model", ".", "addBlankNodeAnnotation", "(", "alt_locus_id", ")", "# Add the disease to gene relationship.", "rel", "=", "self", ".", "globaltt", "[", "'is marker for'", "]", "assoc", "=", "G2PAssoc", "(", "graph", ",", "self", ".", "name", ",", "alt_locus_id", ",", "omim_id", ",", "rel", ")", "assoc", ".", "add_association_to_graph", "(", ")", "elif", "link_type", "==", "'original'", ":", "# these are sometimes a gene, and sometimes a disease", "LOG", ".", "info", "(", "'Unable to handle original link for %s-%s'", ",", "kegg_gene_id", ",", "omim_id", ")", "else", ":", "# don't know what these are", "LOG", ".", "warning", "(", "'Unhandled link type for %s-%s: %s'", ",", "kegg_gene_id", ",", "omim_id", ",", "link_type", ")", "if", "(", "not", "self", ".", "test_mode", ")", "and", "(", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ")", ":", "break", "LOG", ".", "info", "(", "\"Done with OMIM to KEGG gene\"", ")", "return" ]
This method maps the OMIM IDs and KEGG gene ID. Currently split based on the link_type field. Equivalent link types are mapped as gene XRefs. Reverse link types are mapped as disease to gene associations. Original link types are currently skipped. Triples created: <kegg_gene_id> is a Gene <omim_gene_id> is a Gene <kegg_gene_id>> hasXref <omim_gene_id> <assoc_id> has subject <omim_disease_id> <assoc_id> has object <kegg_gene_id> :param limit: :return:
[ "This", "method", "maps", "the", "OMIM", "IDs", "and", "KEGG", "gene", "ID", ".", "Currently", "split", "based", "on", "the", "link_type", "field", ".", "Equivalent", "link", "types", "are", "mapped", "as", "gene", "XRefs", ".", "Reverse", "link", "types", "are", "mapped", "as", "disease", "to", "gene", "associations", ".", "Original", "link", "types", "are", "currently", "skipped", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L553-L634
train
251,236
monarch-initiative/dipper
dipper/sources/KEGG.py
KEGG._process_genes_kegg2ncbi
def _process_genes_kegg2ncbi(self, limit=None): """ This method maps the KEGG human gene IDs to the corresponding NCBI Gene IDs. Triples created: <kegg_gene_id> is a class <ncbi_gene_id> is a class <kegg_gene_id> equivalentClass <ncbi_gene_id> :param limit: :return: """ LOG.info("Processing KEGG gene IDs to NCBI gene IDs") if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, self.files['ncbi']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (kegg_gene_id, ncbi_gene_id, link_type) = row if self.test_mode and kegg_gene_id not in self.test_ids['genes']: continue # Adjust the NCBI gene ID prefix. ncbi_gene_id = re.sub(r'ncbi-geneid', 'NCBIGene', ncbi_gene_id) kegg_gene_id = 'KEGG-' + kegg_gene_id # Adding the KEGG gene ID to the graph here is redundant, # unless there happens to be additional gene IDs in this table # not present in the genes table. model.addClassToGraph(kegg_gene_id, None) model.addClassToGraph(ncbi_gene_id, None) model.addEquivalentClass(kegg_gene_id, ncbi_gene_id) if not self.test_mode and ( limit is not None and line_counter > limit): break LOG.info("Done with KEGG gene IDs to NCBI gene IDs") return
python
def _process_genes_kegg2ncbi(self, limit=None): """ This method maps the KEGG human gene IDs to the corresponding NCBI Gene IDs. Triples created: <kegg_gene_id> is a class <ncbi_gene_id> is a class <kegg_gene_id> equivalentClass <ncbi_gene_id> :param limit: :return: """ LOG.info("Processing KEGG gene IDs to NCBI gene IDs") if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, self.files['ncbi']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (kegg_gene_id, ncbi_gene_id, link_type) = row if self.test_mode and kegg_gene_id not in self.test_ids['genes']: continue # Adjust the NCBI gene ID prefix. ncbi_gene_id = re.sub(r'ncbi-geneid', 'NCBIGene', ncbi_gene_id) kegg_gene_id = 'KEGG-' + kegg_gene_id # Adding the KEGG gene ID to the graph here is redundant, # unless there happens to be additional gene IDs in this table # not present in the genes table. model.addClassToGraph(kegg_gene_id, None) model.addClassToGraph(ncbi_gene_id, None) model.addEquivalentClass(kegg_gene_id, ncbi_gene_id) if not self.test_mode and ( limit is not None and line_counter > limit): break LOG.info("Done with KEGG gene IDs to NCBI gene IDs") return
[ "def", "_process_genes_kegg2ncbi", "(", "self", ",", "limit", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"Processing KEGG gene IDs to NCBI gene IDs\"", ")", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "line_counter", "=", "0", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'ncbi'", "]", "[", "'file'", "]", ")", ")", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"iso-8859-1\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "row", "in", "filereader", ":", "line_counter", "+=", "1", "(", "kegg_gene_id", ",", "ncbi_gene_id", ",", "link_type", ")", "=", "row", "if", "self", ".", "test_mode", "and", "kegg_gene_id", "not", "in", "self", ".", "test_ids", "[", "'genes'", "]", ":", "continue", "# Adjust the NCBI gene ID prefix.", "ncbi_gene_id", "=", "re", ".", "sub", "(", "r'ncbi-geneid'", ",", "'NCBIGene'", ",", "ncbi_gene_id", ")", "kegg_gene_id", "=", "'KEGG-'", "+", "kegg_gene_id", "# Adding the KEGG gene ID to the graph here is redundant,", "# unless there happens to be additional gene IDs in this table", "# not present in the genes table.", "model", ".", "addClassToGraph", "(", "kegg_gene_id", ",", "None", ")", "model", ".", "addClassToGraph", "(", "ncbi_gene_id", ",", "None", ")", "model", ".", "addEquivalentClass", "(", "kegg_gene_id", ",", "ncbi_gene_id", ")", "if", "not", "self", ".", "test_mode", "and", "(", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ")", ":", "break", "LOG", ".", "info", "(", "\"Done with KEGG gene IDs to NCBI gene IDs\"", ")", "return" ]
This method maps the KEGG human gene IDs to the corresponding NCBI Gene IDs. Triples created: <kegg_gene_id> is a class <ncbi_gene_id> is a class <kegg_gene_id> equivalentClass <ncbi_gene_id> :param limit: :return:
[ "This", "method", "maps", "the", "KEGG", "human", "gene", "IDs", "to", "the", "corresponding", "NCBI", "Gene", "IDs", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L706-L754
train
251,237
monarch-initiative/dipper
dipper/sources/KEGG.py
KEGG._process_pathway_disease
def _process_pathway_disease(self, limit): """ We make a link between the pathway identifiers, and any diseases associated with them. Since we model diseases as processes, we make a triple saying that the pathway may be causally upstream of or within the disease process. :param limit: :return: """ LOG.info("Processing KEGG pathways to disease ids") if self.test_mode: graph = self.testgraph else: graph = self.graph line_counter = 0 raw = '/'.join((self.rawdir, self.files['pathway_disease']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (disease_id, kegg_pathway_num) = row if self.test_mode and kegg_pathway_num not in self.test_ids['pathway']: continue disease_id = 'KEGG-' + disease_id # will look like KEGG-path:map04130 or KEGG-path:hsa04130 pathway_id = 'KEGG-' + kegg_pathway_num graph.addTriple( pathway_id, self.globaltt['causally upstream of or within'], disease_id) if not self.test_mode and limit is not None and line_counter > limit: break return
python
def _process_pathway_disease(self, limit): """ We make a link between the pathway identifiers, and any diseases associated with them. Since we model diseases as processes, we make a triple saying that the pathway may be causally upstream of or within the disease process. :param limit: :return: """ LOG.info("Processing KEGG pathways to disease ids") if self.test_mode: graph = self.testgraph else: graph = self.graph line_counter = 0 raw = '/'.join((self.rawdir, self.files['pathway_disease']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (disease_id, kegg_pathway_num) = row if self.test_mode and kegg_pathway_num not in self.test_ids['pathway']: continue disease_id = 'KEGG-' + disease_id # will look like KEGG-path:map04130 or KEGG-path:hsa04130 pathway_id = 'KEGG-' + kegg_pathway_num graph.addTriple( pathway_id, self.globaltt['causally upstream of or within'], disease_id) if not self.test_mode and limit is not None and line_counter > limit: break return
[ "def", "_process_pathway_disease", "(", "self", ",", "limit", ")", ":", "LOG", ".", "info", "(", "\"Processing KEGG pathways to disease ids\"", ")", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "line_counter", "=", "0", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'pathway_disease'", "]", "[", "'file'", "]", ")", ")", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"iso-8859-1\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "row", "in", "filereader", ":", "line_counter", "+=", "1", "(", "disease_id", ",", "kegg_pathway_num", ")", "=", "row", "if", "self", ".", "test_mode", "and", "kegg_pathway_num", "not", "in", "self", ".", "test_ids", "[", "'pathway'", "]", ":", "continue", "disease_id", "=", "'KEGG-'", "+", "disease_id", "# will look like KEGG-path:map04130 or KEGG-path:hsa04130", "pathway_id", "=", "'KEGG-'", "+", "kegg_pathway_num", "graph", ".", "addTriple", "(", "pathway_id", ",", "self", ".", "globaltt", "[", "'causally upstream of or within'", "]", ",", "disease_id", ")", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "return" ]
We make a link between the pathway identifiers, and any diseases associated with them. Since we model diseases as processes, we make a triple saying that the pathway may be causally upstream of or within the disease process. :param limit: :return:
[ "We", "make", "a", "link", "between", "the", "pathway", "identifiers", "and", "any", "diseases", "associated", "with", "them", ".", "Since", "we", "model", "diseases", "as", "processes", "we", "make", "a", "triple", "saying", "that", "the", "pathway", "may", "be", "causally", "upstream", "of", "or", "within", "the", "disease", "process", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L792-L832
train
251,238
monarch-initiative/dipper
dipper/sources/KEGG.py
KEGG._make_variant_locus_id
def _make_variant_locus_id(self, gene_id, disease_id): """ We actually want the association between the gene and the disease to be via an alternate locus not the "wildtype" gene itself. so we make an anonymous alternate locus, and put that in the association We also make the label for the anonymous class, and add it to the label hash :param gene_id: :param disease_id: :return: """ alt_locus_id = '_:'+re.sub( r':', '', gene_id) + '-' + re.sub(r':', '', disease_id) + 'VL' alt_label = self.label_hash.get(gene_id) disease_label = self.label_hash.get(disease_id) if alt_label is not None and alt_label != '': alt_label = 'some variant of ' + str(alt_label) if disease_label is not None and disease_label != '': alt_label += ' that is associated with ' + str(disease_label) else: alt_label = None self.label_hash[alt_locus_id] = alt_label return alt_locus_id
python
def _make_variant_locus_id(self, gene_id, disease_id): """ We actually want the association between the gene and the disease to be via an alternate locus not the "wildtype" gene itself. so we make an anonymous alternate locus, and put that in the association We also make the label for the anonymous class, and add it to the label hash :param gene_id: :param disease_id: :return: """ alt_locus_id = '_:'+re.sub( r':', '', gene_id) + '-' + re.sub(r':', '', disease_id) + 'VL' alt_label = self.label_hash.get(gene_id) disease_label = self.label_hash.get(disease_id) if alt_label is not None and alt_label != '': alt_label = 'some variant of ' + str(alt_label) if disease_label is not None and disease_label != '': alt_label += ' that is associated with ' + str(disease_label) else: alt_label = None self.label_hash[alt_locus_id] = alt_label return alt_locus_id
[ "def", "_make_variant_locus_id", "(", "self", ",", "gene_id", ",", "disease_id", ")", ":", "alt_locus_id", "=", "'_:'", "+", "re", ".", "sub", "(", "r':'", ",", "''", ",", "gene_id", ")", "+", "'-'", "+", "re", ".", "sub", "(", "r':'", ",", "''", ",", "disease_id", ")", "+", "'VL'", "alt_label", "=", "self", ".", "label_hash", ".", "get", "(", "gene_id", ")", "disease_label", "=", "self", ".", "label_hash", ".", "get", "(", "disease_id", ")", "if", "alt_label", "is", "not", "None", "and", "alt_label", "!=", "''", ":", "alt_label", "=", "'some variant of '", "+", "str", "(", "alt_label", ")", "if", "disease_label", "is", "not", "None", "and", "disease_label", "!=", "''", ":", "alt_label", "+=", "' that is associated with '", "+", "str", "(", "disease_label", ")", "else", ":", "alt_label", "=", "None", "self", ".", "label_hash", "[", "alt_locus_id", "]", "=", "alt_label", "return", "alt_locus_id" ]
We actually want the association between the gene and the disease to be via an alternate locus not the "wildtype" gene itself. so we make an anonymous alternate locus, and put that in the association We also make the label for the anonymous class, and add it to the label hash :param gene_id: :param disease_id: :return:
[ "We", "actually", "want", "the", "association", "between", "the", "gene", "and", "the", "disease", "to", "be", "via", "an", "alternate", "locus", "not", "the", "wildtype", "gene", "itself", ".", "so", "we", "make", "an", "anonymous", "alternate", "locus", "and", "put", "that", "in", "the", "association", "We", "also", "make", "the", "label", "for", "the", "anonymous", "class", "and", "add", "it", "to", "the", "label", "hash" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L906-L933
train
251,239
monarch-initiative/dipper
dipper/sources/CTD.py
CTD._fetch_disambiguating_assoc
def _fetch_disambiguating_assoc(self): """ For any of the items in the chemical-disease association file that have ambiguous association types we fetch the disambiguated associations using the batch query API, and store these in a file. Elsewhere, we can loop through the file and create the appropriate associations. :return: """ disambig_file = '/'.join( (self.rawdir, self.static_files['publications']['file'])) assoc_file = '/'.join( (self.rawdir, self.files['chemical_disease_interactions']['file'])) # check if there is a local association file, # and download if it's dated later than the original intxn file if os.path.exists(disambig_file): dfile_dt = os.stat(disambig_file) afile_dt = os.stat(assoc_file) if dfile_dt < afile_dt: LOG.info( "Local file date before chem-disease assoc file. " " Downloading...") else: LOG.info( "Local file date after chem-disease assoc file. " " Skipping download.") return all_pubs = set() dual_evidence = re.compile(r'^marker\/mechanism\|therapeutic$') # first get all the unique publications with gzip.open(assoc_file, 'rt') as tsvfile: reader = csv.reader(tsvfile, delimiter="\t") for row in reader: if re.match(r'^#', ' '.join(row)): continue self._check_list_len(row, 10) (chem_name, chem_id, cas_rn, disease_name, disease_id, direct_evidence, inferred_gene_symbol, inference_score, omim_ids, pubmed_ids) = row if direct_evidence == '' or not \ re.match(dual_evidence, direct_evidence): continue if pubmed_ids is not None and pubmed_ids != '': all_pubs.update(set(re.split(r'\|', pubmed_ids))) sorted_pubs = sorted(list(all_pubs)) # now in batches of 4000, we fetch the chemical-disease associations batch_size = 4000 params = { 'inputType': 'reference', 'report': 'diseases_curated', 'format': 'tsv', 'action': 'Download' } url = 'http://ctdbase.org/tools/batchQuery.go?q' start = 0 end = min((batch_size, len(all_pubs))) # get them in batches of 4000 with open(disambig_file, 'wb') as dmbf: while start < len(sorted_pubs): params['inputTerms'] = '|'.join(sorted_pubs[start:end]) # fetch the data from url LOG.info( 'fetching %d (%d-%d) refs: %s', len(re.split(r'\|', params['inputTerms'])), start, end, params['inputTerms']) data = urllib.parse.urlencode(params) encoding = 'utf-8' binary_data = data.encode(encoding) req = urllib.request.Request(url, binary_data) resp = urllib.request.urlopen(req) dmbf.write(resp.read()) start = end end = min((start + batch_size, len(sorted_pubs))) return
python
def _fetch_disambiguating_assoc(self): """ For any of the items in the chemical-disease association file that have ambiguous association types we fetch the disambiguated associations using the batch query API, and store these in a file. Elsewhere, we can loop through the file and create the appropriate associations. :return: """ disambig_file = '/'.join( (self.rawdir, self.static_files['publications']['file'])) assoc_file = '/'.join( (self.rawdir, self.files['chemical_disease_interactions']['file'])) # check if there is a local association file, # and download if it's dated later than the original intxn file if os.path.exists(disambig_file): dfile_dt = os.stat(disambig_file) afile_dt = os.stat(assoc_file) if dfile_dt < afile_dt: LOG.info( "Local file date before chem-disease assoc file. " " Downloading...") else: LOG.info( "Local file date after chem-disease assoc file. " " Skipping download.") return all_pubs = set() dual_evidence = re.compile(r'^marker\/mechanism\|therapeutic$') # first get all the unique publications with gzip.open(assoc_file, 'rt') as tsvfile: reader = csv.reader(tsvfile, delimiter="\t") for row in reader: if re.match(r'^#', ' '.join(row)): continue self._check_list_len(row, 10) (chem_name, chem_id, cas_rn, disease_name, disease_id, direct_evidence, inferred_gene_symbol, inference_score, omim_ids, pubmed_ids) = row if direct_evidence == '' or not \ re.match(dual_evidence, direct_evidence): continue if pubmed_ids is not None and pubmed_ids != '': all_pubs.update(set(re.split(r'\|', pubmed_ids))) sorted_pubs = sorted(list(all_pubs)) # now in batches of 4000, we fetch the chemical-disease associations batch_size = 4000 params = { 'inputType': 'reference', 'report': 'diseases_curated', 'format': 'tsv', 'action': 'Download' } url = 'http://ctdbase.org/tools/batchQuery.go?q' start = 0 end = min((batch_size, len(all_pubs))) # get them in batches of 4000 with open(disambig_file, 'wb') as dmbf: while start < len(sorted_pubs): params['inputTerms'] = '|'.join(sorted_pubs[start:end]) # fetch the data from url LOG.info( 'fetching %d (%d-%d) refs: %s', len(re.split(r'\|', params['inputTerms'])), start, end, params['inputTerms']) data = urllib.parse.urlencode(params) encoding = 'utf-8' binary_data = data.encode(encoding) req = urllib.request.Request(url, binary_data) resp = urllib.request.urlopen(req) dmbf.write(resp.read()) start = end end = min((start + batch_size, len(sorted_pubs))) return
[ "def", "_fetch_disambiguating_assoc", "(", "self", ")", ":", "disambig_file", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "static_files", "[", "'publications'", "]", "[", "'file'", "]", ")", ")", "assoc_file", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'chemical_disease_interactions'", "]", "[", "'file'", "]", ")", ")", "# check if there is a local association file,", "# and download if it's dated later than the original intxn file", "if", "os", ".", "path", ".", "exists", "(", "disambig_file", ")", ":", "dfile_dt", "=", "os", ".", "stat", "(", "disambig_file", ")", "afile_dt", "=", "os", ".", "stat", "(", "assoc_file", ")", "if", "dfile_dt", "<", "afile_dt", ":", "LOG", ".", "info", "(", "\"Local file date before chem-disease assoc file. \"", "\" Downloading...\"", ")", "else", ":", "LOG", ".", "info", "(", "\"Local file date after chem-disease assoc file. \"", "\" Skipping download.\"", ")", "return", "all_pubs", "=", "set", "(", ")", "dual_evidence", "=", "re", ".", "compile", "(", "r'^marker\\/mechanism\\|therapeutic$'", ")", "# first get all the unique publications", "with", "gzip", ".", "open", "(", "assoc_file", ",", "'rt'", ")", "as", "tsvfile", ":", "reader", "=", "csv", ".", "reader", "(", "tsvfile", ",", "delimiter", "=", "\"\\t\"", ")", "for", "row", "in", "reader", ":", "if", "re", ".", "match", "(", "r'^#'", ",", "' '", ".", "join", "(", "row", ")", ")", ":", "continue", "self", ".", "_check_list_len", "(", "row", ",", "10", ")", "(", "chem_name", ",", "chem_id", ",", "cas_rn", ",", "disease_name", ",", "disease_id", ",", "direct_evidence", ",", "inferred_gene_symbol", ",", "inference_score", ",", "omim_ids", ",", "pubmed_ids", ")", "=", "row", "if", "direct_evidence", "==", "''", "or", "not", "re", ".", "match", "(", "dual_evidence", ",", "direct_evidence", ")", ":", "continue", "if", "pubmed_ids", "is", "not", "None", "and", "pubmed_ids", "!=", "''", ":", "all_pubs", ".", "update", "(", "set", "(", "re", ".", "split", "(", "r'\\|'", ",", "pubmed_ids", ")", ")", ")", "sorted_pubs", "=", "sorted", "(", "list", "(", "all_pubs", ")", ")", "# now in batches of 4000, we fetch the chemical-disease associations", "batch_size", "=", "4000", "params", "=", "{", "'inputType'", ":", "'reference'", ",", "'report'", ":", "'diseases_curated'", ",", "'format'", ":", "'tsv'", ",", "'action'", ":", "'Download'", "}", "url", "=", "'http://ctdbase.org/tools/batchQuery.go?q'", "start", "=", "0", "end", "=", "min", "(", "(", "batch_size", ",", "len", "(", "all_pubs", ")", ")", ")", "# get them in batches of 4000", "with", "open", "(", "disambig_file", ",", "'wb'", ")", "as", "dmbf", ":", "while", "start", "<", "len", "(", "sorted_pubs", ")", ":", "params", "[", "'inputTerms'", "]", "=", "'|'", ".", "join", "(", "sorted_pubs", "[", "start", ":", "end", "]", ")", "# fetch the data from url", "LOG", ".", "info", "(", "'fetching %d (%d-%d) refs: %s'", ",", "len", "(", "re", ".", "split", "(", "r'\\|'", ",", "params", "[", "'inputTerms'", "]", ")", ")", ",", "start", ",", "end", ",", "params", "[", "'inputTerms'", "]", ")", "data", "=", "urllib", ".", "parse", ".", "urlencode", "(", "params", ")", "encoding", "=", "'utf-8'", "binary_data", "=", "data", ".", "encode", "(", "encoding", ")", "req", "=", "urllib", ".", "request", ".", "Request", "(", "url", ",", "binary_data", ")", "resp", "=", "urllib", ".", "request", ".", "urlopen", "(", "req", ")", "dmbf", ".", "write", "(", "resp", ".", "read", "(", ")", ")", "start", "=", "end", "end", "=", "min", "(", "(", "start", "+", "batch_size", ",", "len", "(", "sorted_pubs", ")", ")", ")", "return" ]
For any of the items in the chemical-disease association file that have ambiguous association types we fetch the disambiguated associations using the batch query API, and store these in a file. Elsewhere, we can loop through the file and create the appropriate associations. :return:
[ "For", "any", "of", "the", "items", "in", "the", "chemical", "-", "disease", "association", "file", "that", "have", "ambiguous", "association", "types", "we", "fetch", "the", "disambiguated", "associations", "using", "the", "batch", "query", "API", "and", "store", "these", "in", "a", "file", ".", "Elsewhere", "we", "can", "loop", "through", "the", "file", "and", "create", "the", "appropriate", "associations", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/CTD.py#L243-L323
train
251,240
monarch-initiative/dipper
dipper/sources/CTD.py
CTD._make_association
def _make_association(self, subject_id, object_id, rel_id, pubmed_ids): """ Make a reified association given an array of pubmed identifiers. Args: :param subject_id id of the subject of the association (gene/chem) :param object_id id of the object of the association (disease) :param rel_id relationship id :param pubmed_ids an array of pubmed identifiers Returns: :return None """ # TODO pass in the relevant Assoc class rather than relying on G2P assoc = G2PAssoc(self.graph, self.name, subject_id, object_id, rel_id) if pubmed_ids is not None and len(pubmed_ids) > 0: for pmid in pubmed_ids: ref = Reference( self.graph, pmid, self.globaltt['journal article']) ref.addRefToGraph() assoc.add_source(pmid) assoc.add_evidence(self.globaltt['traceable author statement']) assoc.add_association_to_graph() return
python
def _make_association(self, subject_id, object_id, rel_id, pubmed_ids): """ Make a reified association given an array of pubmed identifiers. Args: :param subject_id id of the subject of the association (gene/chem) :param object_id id of the object of the association (disease) :param rel_id relationship id :param pubmed_ids an array of pubmed identifiers Returns: :return None """ # TODO pass in the relevant Assoc class rather than relying on G2P assoc = G2PAssoc(self.graph, self.name, subject_id, object_id, rel_id) if pubmed_ids is not None and len(pubmed_ids) > 0: for pmid in pubmed_ids: ref = Reference( self.graph, pmid, self.globaltt['journal article']) ref.addRefToGraph() assoc.add_source(pmid) assoc.add_evidence(self.globaltt['traceable author statement']) assoc.add_association_to_graph() return
[ "def", "_make_association", "(", "self", ",", "subject_id", ",", "object_id", ",", "rel_id", ",", "pubmed_ids", ")", ":", "# TODO pass in the relevant Assoc class rather than relying on G2P", "assoc", "=", "G2PAssoc", "(", "self", ".", "graph", ",", "self", ".", "name", ",", "subject_id", ",", "object_id", ",", "rel_id", ")", "if", "pubmed_ids", "is", "not", "None", "and", "len", "(", "pubmed_ids", ")", ">", "0", ":", "for", "pmid", "in", "pubmed_ids", ":", "ref", "=", "Reference", "(", "self", ".", "graph", ",", "pmid", ",", "self", ".", "globaltt", "[", "'journal article'", "]", ")", "ref", ".", "addRefToGraph", "(", ")", "assoc", ".", "add_source", "(", "pmid", ")", "assoc", ".", "add_evidence", "(", "self", ".", "globaltt", "[", "'traceable author statement'", "]", ")", "assoc", ".", "add_association_to_graph", "(", ")", "return" ]
Make a reified association given an array of pubmed identifiers. Args: :param subject_id id of the subject of the association (gene/chem) :param object_id id of the object of the association (disease) :param rel_id relationship id :param pubmed_ids an array of pubmed identifiers Returns: :return None
[ "Make", "a", "reified", "association", "given", "an", "array", "of", "pubmed", "identifiers", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/CTD.py#L485-L510
train
251,241
monarch-initiative/dipper
dipper/sources/Bgee.py
Bgee.checkIfRemoteIsNewer
def checkIfRemoteIsNewer(self, localfile, remote_size, remote_modify): """ Overrides checkIfRemoteIsNewer in Source class :param localfile: str file path :param remote_size: str bytes :param remote_modify: str last modify date in the form 20160705042714 :return: boolean True if remote file is newer else False """ is_remote_newer = False status = os.stat(localfile) LOG.info( "\nLocal file size: %i" "\nLocal Timestamp: %s", status[ST_SIZE], datetime.fromtimestamp(status.st_mtime)) remote_dt = Bgee._convert_ftp_time_to_iso(remote_modify) if remote_dt != datetime.fromtimestamp(status.st_mtime) or \ status[ST_SIZE] != int(remote_size): is_remote_newer = True LOG.info( "Object on server is has different size %i and/or date %s", remote_size, remote_dt) return is_remote_newer
python
def checkIfRemoteIsNewer(self, localfile, remote_size, remote_modify): """ Overrides checkIfRemoteIsNewer in Source class :param localfile: str file path :param remote_size: str bytes :param remote_modify: str last modify date in the form 20160705042714 :return: boolean True if remote file is newer else False """ is_remote_newer = False status = os.stat(localfile) LOG.info( "\nLocal file size: %i" "\nLocal Timestamp: %s", status[ST_SIZE], datetime.fromtimestamp(status.st_mtime)) remote_dt = Bgee._convert_ftp_time_to_iso(remote_modify) if remote_dt != datetime.fromtimestamp(status.st_mtime) or \ status[ST_SIZE] != int(remote_size): is_remote_newer = True LOG.info( "Object on server is has different size %i and/or date %s", remote_size, remote_dt) return is_remote_newer
[ "def", "checkIfRemoteIsNewer", "(", "self", ",", "localfile", ",", "remote_size", ",", "remote_modify", ")", ":", "is_remote_newer", "=", "False", "status", "=", "os", ".", "stat", "(", "localfile", ")", "LOG", ".", "info", "(", "\"\\nLocal file size: %i\"", "\"\\nLocal Timestamp: %s\"", ",", "status", "[", "ST_SIZE", "]", ",", "datetime", ".", "fromtimestamp", "(", "status", ".", "st_mtime", ")", ")", "remote_dt", "=", "Bgee", ".", "_convert_ftp_time_to_iso", "(", "remote_modify", ")", "if", "remote_dt", "!=", "datetime", ".", "fromtimestamp", "(", "status", ".", "st_mtime", ")", "or", "status", "[", "ST_SIZE", "]", "!=", "int", "(", "remote_size", ")", ":", "is_remote_newer", "=", "True", "LOG", ".", "info", "(", "\"Object on server is has different size %i and/or date %s\"", ",", "remote_size", ",", "remote_dt", ")", "return", "is_remote_newer" ]
Overrides checkIfRemoteIsNewer in Source class :param localfile: str file path :param remote_size: str bytes :param remote_modify: str last modify date in the form 20160705042714 :return: boolean True if remote file is newer else False
[ "Overrides", "checkIfRemoteIsNewer", "in", "Source", "class" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Bgee.py#L232-L256
train
251,242
monarch-initiative/dipper
dipper/sources/Bgee.py
Bgee._convert_ftp_time_to_iso
def _convert_ftp_time_to_iso(ftp_time): """ Convert datetime in the format 20160705042714 to a datetime object :return: datetime object """ date_time = datetime( int(ftp_time[:4]), int(ftp_time[4:6]), int(ftp_time[6:8]), int(ftp_time[8:10]), int(ftp_time[10:12]), int(ftp_time[12:14])) return date_time
python
def _convert_ftp_time_to_iso(ftp_time): """ Convert datetime in the format 20160705042714 to a datetime object :return: datetime object """ date_time = datetime( int(ftp_time[:4]), int(ftp_time[4:6]), int(ftp_time[6:8]), int(ftp_time[8:10]), int(ftp_time[10:12]), int(ftp_time[12:14])) return date_time
[ "def", "_convert_ftp_time_to_iso", "(", "ftp_time", ")", ":", "date_time", "=", "datetime", "(", "int", "(", "ftp_time", "[", ":", "4", "]", ")", ",", "int", "(", "ftp_time", "[", "4", ":", "6", "]", ")", ",", "int", "(", "ftp_time", "[", "6", ":", "8", "]", ")", ",", "int", "(", "ftp_time", "[", "8", ":", "10", "]", ")", ",", "int", "(", "ftp_time", "[", "10", ":", "12", "]", ")", ",", "int", "(", "ftp_time", "[", "12", ":", "14", "]", ")", ")", "return", "date_time" ]
Convert datetime in the format 20160705042714 to a datetime object :return: datetime object
[ "Convert", "datetime", "in", "the", "format", "20160705042714", "to", "a", "datetime", "object" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Bgee.py#L259-L268
train
251,243
monarch-initiative/dipper
dipper/sources/EOM.py
EOM.fetch
def fetch(self, is_dl_forced=False): '''connection details for DISCO''' cxn = {} cxn['host'] = 'nif-db.crbs.ucsd.edu' cxn['database'] = 'disco_crawler' cxn['port'] = '5432' cxn['user'] = config.get_config()['user']['disco'] cxn['password'] = config.get_config()['keys'][cxn['user']] self.dataset.setFileAccessUrl( 'jdbc:postgresql://'+cxn['host']+':'+cxn['port']+'/'+cxn['database'], is_object_literal=True) # process the tables # self.fetch_from_pgdb(self.tables,cxn,100) #for testing self.fetch_from_pgdb(self.tables, cxn) self.get_files(is_dl_forced) # FIXME: Everything needed for data provenance? fstat = os.stat('/'.join((self.rawdir, 'dvp.pr_nlx_157874_1'))) filedate = datetime.utcfromtimestamp(fstat[ST_CTIME]).strftime("%Y-%m-%d") self.dataset.setVersion(filedate) return
python
def fetch(self, is_dl_forced=False): '''connection details for DISCO''' cxn = {} cxn['host'] = 'nif-db.crbs.ucsd.edu' cxn['database'] = 'disco_crawler' cxn['port'] = '5432' cxn['user'] = config.get_config()['user']['disco'] cxn['password'] = config.get_config()['keys'][cxn['user']] self.dataset.setFileAccessUrl( 'jdbc:postgresql://'+cxn['host']+':'+cxn['port']+'/'+cxn['database'], is_object_literal=True) # process the tables # self.fetch_from_pgdb(self.tables,cxn,100) #for testing self.fetch_from_pgdb(self.tables, cxn) self.get_files(is_dl_forced) # FIXME: Everything needed for data provenance? fstat = os.stat('/'.join((self.rawdir, 'dvp.pr_nlx_157874_1'))) filedate = datetime.utcfromtimestamp(fstat[ST_CTIME]).strftime("%Y-%m-%d") self.dataset.setVersion(filedate) return
[ "def", "fetch", "(", "self", ",", "is_dl_forced", "=", "False", ")", ":", "cxn", "=", "{", "}", "cxn", "[", "'host'", "]", "=", "'nif-db.crbs.ucsd.edu'", "cxn", "[", "'database'", "]", "=", "'disco_crawler'", "cxn", "[", "'port'", "]", "=", "'5432'", "cxn", "[", "'user'", "]", "=", "config", ".", "get_config", "(", ")", "[", "'user'", "]", "[", "'disco'", "]", "cxn", "[", "'password'", "]", "=", "config", ".", "get_config", "(", ")", "[", "'keys'", "]", "[", "cxn", "[", "'user'", "]", "]", "self", ".", "dataset", ".", "setFileAccessUrl", "(", "'jdbc:postgresql://'", "+", "cxn", "[", "'host'", "]", "+", "':'", "+", "cxn", "[", "'port'", "]", "+", "'/'", "+", "cxn", "[", "'database'", "]", ",", "is_object_literal", "=", "True", ")", "# process the tables", "# self.fetch_from_pgdb(self.tables,cxn,100) #for testing", "self", ".", "fetch_from_pgdb", "(", "self", ".", "tables", ",", "cxn", ")", "self", ".", "get_files", "(", "is_dl_forced", ")", "# FIXME: Everything needed for data provenance?", "fstat", "=", "os", ".", "stat", "(", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "'dvp.pr_nlx_157874_1'", ")", ")", ")", "filedate", "=", "datetime", ".", "utcfromtimestamp", "(", "fstat", "[", "ST_CTIME", "]", ")", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", "self", ".", "dataset", ".", "setVersion", "(", "filedate", ")", "return" ]
connection details for DISCO
[ "connection", "details", "for", "DISCO" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/EOM.py#L63-L87
train
251,244
monarch-initiative/dipper
dipper/sources/EOM.py
EOM.parse
def parse(self, limit=None): ''' Over ride Source.parse inherited via PostgreSQLSource ''' if limit is not None: LOG.info("Only parsing first %s rows of each file", limit) if self.test_only: self.test_mode = True LOG.info("Parsing files...") self._process_nlx_157874_1_view( '/'.join((self.rawdir, 'dvp.pr_nlx_157874_1')), limit) self._map_eom_terms( '/'.join((self.rawdir, self.files['map']['file'])), limit) LOG.info("Finished parsing.") # since it's so small, # we default to copying the entire graph to the test set self.testgraph = self.graph return
python
def parse(self, limit=None): ''' Over ride Source.parse inherited via PostgreSQLSource ''' if limit is not None: LOG.info("Only parsing first %s rows of each file", limit) if self.test_only: self.test_mode = True LOG.info("Parsing files...") self._process_nlx_157874_1_view( '/'.join((self.rawdir, 'dvp.pr_nlx_157874_1')), limit) self._map_eom_terms( '/'.join((self.rawdir, self.files['map']['file'])), limit) LOG.info("Finished parsing.") # since it's so small, # we default to copying the entire graph to the test set self.testgraph = self.graph return
[ "def", "parse", "(", "self", ",", "limit", "=", "None", ")", ":", "if", "limit", "is", "not", "None", ":", "LOG", ".", "info", "(", "\"Only parsing first %s rows of each file\"", ",", "limit", ")", "if", "self", ".", "test_only", ":", "self", ".", "test_mode", "=", "True", "LOG", ".", "info", "(", "\"Parsing files...\"", ")", "self", ".", "_process_nlx_157874_1_view", "(", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "'dvp.pr_nlx_157874_1'", ")", ")", ",", "limit", ")", "self", ".", "_map_eom_terms", "(", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'map'", "]", "[", "'file'", "]", ")", ")", ",", "limit", ")", "LOG", ".", "info", "(", "\"Finished parsing.\"", ")", "# since it's so small,", "# we default to copying the entire graph to the test set", "self", ".", "testgraph", "=", "self", ".", "graph", "return" ]
Over ride Source.parse inherited via PostgreSQLSource
[ "Over", "ride", "Source", ".", "parse", "inherited", "via", "PostgreSQLSource" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/EOM.py#L89-L113
train
251,245
monarch-initiative/dipper
dipper/sources/MGI.py
MGI._process_gxd_genotype_view
def _process_gxd_genotype_view(self, limit=None): """ This table indicates the relationship between a genotype and it's background strain. It leverages the Genotype class methods to do this. Makes these triples: <MGI:genotypeid> GENO:has_reference_part <MGI:strainid> <MGI:strainid> a GENO:genomic_background If the genotype id isn't in the hashmap, it adds it here (but this shouldn't happen): <MGI:genotypeid> a GENO:genotype If the strain isn't in the hashmap, it also adds it here with a monarchized identifier using the unique key of the strain, formatted like: :_mgistrainkey12345 :param limit: :return: """ line_counter = 0 if self.test_mode: graph = self.testgraph else: graph = self.graph geno = Genotype(graph) model = Model(graph) raw = '/'.join((self.rawdir, 'gxd_genotype_view')) LOG.info("getting genotypes and their backgrounds") with open(raw, 'r') as f1: f1.readline() # read the header row; skip for line in f1: line = line.rstrip("\n") line_counter += 1 (genotype_key, strain_key, strain, mgiid) = line.split('\t') if self.test_mode is True: if int(genotype_key) not in self.test_keys.get('genotype'): continue if self.idhash['genotype'].get(genotype_key) is None: # just in case we haven't seen it before, # catch and add the id mapping here self.idhash['genotype'][genotype_key] = mgiid geno.addGenotype(mgiid, None) # the label is elsewhere... # need to add the MGI label as a synonym # if it's in the hash, # assume that the individual was created elsewhere strain_id = self.idhash['strain'].get(strain_key) background_type = self.globaltt['genomic_background'] if strain_id is None or int(strain_key) < 0: if strain_id is None: # some of the strains don't have public identifiers! # so we make one up, and add it to the hash strain_id = self._makeInternalIdentifier('strain', strain_key) self.idhash['strain'].update({strain_key: strain_id}) model.addComment(strain_id, "strain_key:" + strain_key) elif int(strain_key) < 0: # these are ones that are unidentified/unknown. # so add instances of each. strain_id = self._makeInternalIdentifier( 'strain', re.sub(r':', '', str(strain_id))) strain_id += re.sub(r':', '', str(mgiid)) strain_id = re.sub(r'^_', '_:', strain_id) strain_id = re.sub(r'::', ':', strain_id) model.addDescription( strain_id, "This genomic background is unknown. " + "This is a placeholder background for " + mgiid + ".") background_type = self.globaltt[ 'unspecified_genomic_background'] # add it back to the idhash LOG.info( "adding background as internal id: %s %s: %s", strain_key, strain, strain_id) geno.addGenomicBackgroundToGenotype( strain_id, mgiid, background_type) self.label_hash[strain_id] = strain # add BG to a hash so we can build the genotype label later self.geno_bkgd[mgiid] = strain_id if not self.test_mode and limit is not None and line_counter > limit: break return
python
def _process_gxd_genotype_view(self, limit=None): """ This table indicates the relationship between a genotype and it's background strain. It leverages the Genotype class methods to do this. Makes these triples: <MGI:genotypeid> GENO:has_reference_part <MGI:strainid> <MGI:strainid> a GENO:genomic_background If the genotype id isn't in the hashmap, it adds it here (but this shouldn't happen): <MGI:genotypeid> a GENO:genotype If the strain isn't in the hashmap, it also adds it here with a monarchized identifier using the unique key of the strain, formatted like: :_mgistrainkey12345 :param limit: :return: """ line_counter = 0 if self.test_mode: graph = self.testgraph else: graph = self.graph geno = Genotype(graph) model = Model(graph) raw = '/'.join((self.rawdir, 'gxd_genotype_view')) LOG.info("getting genotypes and their backgrounds") with open(raw, 'r') as f1: f1.readline() # read the header row; skip for line in f1: line = line.rstrip("\n") line_counter += 1 (genotype_key, strain_key, strain, mgiid) = line.split('\t') if self.test_mode is True: if int(genotype_key) not in self.test_keys.get('genotype'): continue if self.idhash['genotype'].get(genotype_key) is None: # just in case we haven't seen it before, # catch and add the id mapping here self.idhash['genotype'][genotype_key] = mgiid geno.addGenotype(mgiid, None) # the label is elsewhere... # need to add the MGI label as a synonym # if it's in the hash, # assume that the individual was created elsewhere strain_id = self.idhash['strain'].get(strain_key) background_type = self.globaltt['genomic_background'] if strain_id is None or int(strain_key) < 0: if strain_id is None: # some of the strains don't have public identifiers! # so we make one up, and add it to the hash strain_id = self._makeInternalIdentifier('strain', strain_key) self.idhash['strain'].update({strain_key: strain_id}) model.addComment(strain_id, "strain_key:" + strain_key) elif int(strain_key) < 0: # these are ones that are unidentified/unknown. # so add instances of each. strain_id = self._makeInternalIdentifier( 'strain', re.sub(r':', '', str(strain_id))) strain_id += re.sub(r':', '', str(mgiid)) strain_id = re.sub(r'^_', '_:', strain_id) strain_id = re.sub(r'::', ':', strain_id) model.addDescription( strain_id, "This genomic background is unknown. " + "This is a placeholder background for " + mgiid + ".") background_type = self.globaltt[ 'unspecified_genomic_background'] # add it back to the idhash LOG.info( "adding background as internal id: %s %s: %s", strain_key, strain, strain_id) geno.addGenomicBackgroundToGenotype( strain_id, mgiid, background_type) self.label_hash[strain_id] = strain # add BG to a hash so we can build the genotype label later self.geno_bkgd[mgiid] = strain_id if not self.test_mode and limit is not None and line_counter > limit: break return
[ "def", "_process_gxd_genotype_view", "(", "self", ",", "limit", "=", "None", ")", ":", "line_counter", "=", "0", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "geno", "=", "Genotype", "(", "graph", ")", "model", "=", "Model", "(", "graph", ")", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "'gxd_genotype_view'", ")", ")", "LOG", ".", "info", "(", "\"getting genotypes and their backgrounds\"", ")", "with", "open", "(", "raw", ",", "'r'", ")", "as", "f1", ":", "f1", ".", "readline", "(", ")", "# read the header row; skip", "for", "line", "in", "f1", ":", "line", "=", "line", ".", "rstrip", "(", "\"\\n\"", ")", "line_counter", "+=", "1", "(", "genotype_key", ",", "strain_key", ",", "strain", ",", "mgiid", ")", "=", "line", ".", "split", "(", "'\\t'", ")", "if", "self", ".", "test_mode", "is", "True", ":", "if", "int", "(", "genotype_key", ")", "not", "in", "self", ".", "test_keys", ".", "get", "(", "'genotype'", ")", ":", "continue", "if", "self", ".", "idhash", "[", "'genotype'", "]", ".", "get", "(", "genotype_key", ")", "is", "None", ":", "# just in case we haven't seen it before,", "# catch and add the id mapping here", "self", ".", "idhash", "[", "'genotype'", "]", "[", "genotype_key", "]", "=", "mgiid", "geno", ".", "addGenotype", "(", "mgiid", ",", "None", ")", "# the label is elsewhere...", "# need to add the MGI label as a synonym", "# if it's in the hash,", "# assume that the individual was created elsewhere", "strain_id", "=", "self", ".", "idhash", "[", "'strain'", "]", ".", "get", "(", "strain_key", ")", "background_type", "=", "self", ".", "globaltt", "[", "'genomic_background'", "]", "if", "strain_id", "is", "None", "or", "int", "(", "strain_key", ")", "<", "0", ":", "if", "strain_id", "is", "None", ":", "# some of the strains don't have public identifiers!", "# so we make one up, and add it to the hash", "strain_id", "=", "self", ".", "_makeInternalIdentifier", "(", "'strain'", ",", "strain_key", ")", "self", ".", "idhash", "[", "'strain'", "]", ".", "update", "(", "{", "strain_key", ":", "strain_id", "}", ")", "model", ".", "addComment", "(", "strain_id", ",", "\"strain_key:\"", "+", "strain_key", ")", "elif", "int", "(", "strain_key", ")", "<", "0", ":", "# these are ones that are unidentified/unknown.", "# so add instances of each.", "strain_id", "=", "self", ".", "_makeInternalIdentifier", "(", "'strain'", ",", "re", ".", "sub", "(", "r':'", ",", "''", ",", "str", "(", "strain_id", ")", ")", ")", "strain_id", "+=", "re", ".", "sub", "(", "r':'", ",", "''", ",", "str", "(", "mgiid", ")", ")", "strain_id", "=", "re", ".", "sub", "(", "r'^_'", ",", "'_:'", ",", "strain_id", ")", "strain_id", "=", "re", ".", "sub", "(", "r'::'", ",", "':'", ",", "strain_id", ")", "model", ".", "addDescription", "(", "strain_id", ",", "\"This genomic background is unknown. \"", "+", "\"This is a placeholder background for \"", "+", "mgiid", "+", "\".\"", ")", "background_type", "=", "self", ".", "globaltt", "[", "'unspecified_genomic_background'", "]", "# add it back to the idhash", "LOG", ".", "info", "(", "\"adding background as internal id: %s %s: %s\"", ",", "strain_key", ",", "strain", ",", "strain_id", ")", "geno", ".", "addGenomicBackgroundToGenotype", "(", "strain_id", ",", "mgiid", ",", "background_type", ")", "self", ".", "label_hash", "[", "strain_id", "]", "=", "strain", "# add BG to a hash so we can build the genotype label later", "self", ".", "geno_bkgd", "[", "mgiid", "]", "=", "strain_id", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "return" ]
This table indicates the relationship between a genotype and it's background strain. It leverages the Genotype class methods to do this. Makes these triples: <MGI:genotypeid> GENO:has_reference_part <MGI:strainid> <MGI:strainid> a GENO:genomic_background If the genotype id isn't in the hashmap, it adds it here (but this shouldn't happen): <MGI:genotypeid> a GENO:genotype If the strain isn't in the hashmap, it also adds it here with a monarchized identifier using the unique key of the strain, formatted like: :_mgistrainkey12345 :param limit: :return:
[ "This", "table", "indicates", "the", "relationship", "between", "a", "genotype", "and", "it", "s", "background", "strain", ".", "It", "leverages", "the", "Genotype", "class", "methods", "to", "do", "this", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/MGI.py#L335-L430
train
251,246
monarch-initiative/dipper
dipper/sources/MGI.py
MGI._process_gxd_genotype_summary_view
def _process_gxd_genotype_summary_view(self, limit=None): """ Add the genotype internal id to mgiid mapping to the idhashmap. Also, add them as individuals to the graph. We re-format the label to put the background strain in brackets after the gvc. We must pass through the file once to get the ids and aggregate the vslcs into a hashmap into the genotype Triples created: <genotype id> a GENO:intrinsic_genotype <genotype id> rdfs:label "<gvc> [bkgd]" :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 geno_hash = {} raw = '/'.join((self.rawdir, 'gxd_genotype_summary_view')) LOG.info("building labels for genotypes") with open(raw, 'r') as f: f.readline() # read the header row; skip for line in f: line = line.rstrip("\n") line_counter += 1 (object_key, preferred, mgiid, subtype, short_description) = line.split('\t') if self.test_mode is True: if int(object_key) not in self.test_keys.get('genotype'): continue # add the internal genotype to mgi mapping self.idhash['genotype'][object_key] = mgiid if preferred == '1': d = re.sub(r'\,', '/', short_description.strip()) if mgiid not in geno_hash: geno_hash[mgiid] = {'vslcs': [d], 'subtype': subtype, 'key': object_key} else: vslcs = geno_hash[mgiid].get('vslcs') vslcs.append(d) else: pass # TODO what to do with != preferred if not self.test_mode and limit is not None and line_counter > limit: break # now, loop through the hash and add the genotypes as individuals # we add the mgi genotype as a synonym # (we generate our own label later) geno = Genotype(graph) for gt in geno_hash: genotype = geno_hash.get(gt) gvc = sorted(genotype.get('vslcs')) label = '; '.join(gvc) + ' [' + genotype.get('subtype') + ']' geno.addGenotype(gt, None) model.addComment(gt, self._makeInternalIdentifier( 'genotype', genotype.get('key'))) model.addSynonym(gt, label.strip()) return
python
def _process_gxd_genotype_summary_view(self, limit=None): """ Add the genotype internal id to mgiid mapping to the idhashmap. Also, add them as individuals to the graph. We re-format the label to put the background strain in brackets after the gvc. We must pass through the file once to get the ids and aggregate the vslcs into a hashmap into the genotype Triples created: <genotype id> a GENO:intrinsic_genotype <genotype id> rdfs:label "<gvc> [bkgd]" :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 geno_hash = {} raw = '/'.join((self.rawdir, 'gxd_genotype_summary_view')) LOG.info("building labels for genotypes") with open(raw, 'r') as f: f.readline() # read the header row; skip for line in f: line = line.rstrip("\n") line_counter += 1 (object_key, preferred, mgiid, subtype, short_description) = line.split('\t') if self.test_mode is True: if int(object_key) not in self.test_keys.get('genotype'): continue # add the internal genotype to mgi mapping self.idhash['genotype'][object_key] = mgiid if preferred == '1': d = re.sub(r'\,', '/', short_description.strip()) if mgiid not in geno_hash: geno_hash[mgiid] = {'vslcs': [d], 'subtype': subtype, 'key': object_key} else: vslcs = geno_hash[mgiid].get('vslcs') vslcs.append(d) else: pass # TODO what to do with != preferred if not self.test_mode and limit is not None and line_counter > limit: break # now, loop through the hash and add the genotypes as individuals # we add the mgi genotype as a synonym # (we generate our own label later) geno = Genotype(graph) for gt in geno_hash: genotype = geno_hash.get(gt) gvc = sorted(genotype.get('vslcs')) label = '; '.join(gvc) + ' [' + genotype.get('subtype') + ']' geno.addGenotype(gt, None) model.addComment(gt, self._makeInternalIdentifier( 'genotype', genotype.get('key'))) model.addSynonym(gt, label.strip()) return
[ "def", "_process_gxd_genotype_summary_view", "(", "self", ",", "limit", "=", "None", ")", ":", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "line_counter", "=", "0", "geno_hash", "=", "{", "}", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "'gxd_genotype_summary_view'", ")", ")", "LOG", ".", "info", "(", "\"building labels for genotypes\"", ")", "with", "open", "(", "raw", ",", "'r'", ")", "as", "f", ":", "f", ".", "readline", "(", ")", "# read the header row; skip", "for", "line", "in", "f", ":", "line", "=", "line", ".", "rstrip", "(", "\"\\n\"", ")", "line_counter", "+=", "1", "(", "object_key", ",", "preferred", ",", "mgiid", ",", "subtype", ",", "short_description", ")", "=", "line", ".", "split", "(", "'\\t'", ")", "if", "self", ".", "test_mode", "is", "True", ":", "if", "int", "(", "object_key", ")", "not", "in", "self", ".", "test_keys", ".", "get", "(", "'genotype'", ")", ":", "continue", "# add the internal genotype to mgi mapping", "self", ".", "idhash", "[", "'genotype'", "]", "[", "object_key", "]", "=", "mgiid", "if", "preferred", "==", "'1'", ":", "d", "=", "re", ".", "sub", "(", "r'\\,'", ",", "'/'", ",", "short_description", ".", "strip", "(", ")", ")", "if", "mgiid", "not", "in", "geno_hash", ":", "geno_hash", "[", "mgiid", "]", "=", "{", "'vslcs'", ":", "[", "d", "]", ",", "'subtype'", ":", "subtype", ",", "'key'", ":", "object_key", "}", "else", ":", "vslcs", "=", "geno_hash", "[", "mgiid", "]", ".", "get", "(", "'vslcs'", ")", "vslcs", ".", "append", "(", "d", ")", "else", ":", "pass", "# TODO what to do with != preferred", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "# now, loop through the hash and add the genotypes as individuals", "# we add the mgi genotype as a synonym", "# (we generate our own label later)", "geno", "=", "Genotype", "(", "graph", ")", "for", "gt", "in", "geno_hash", ":", "genotype", "=", "geno_hash", ".", "get", "(", "gt", ")", "gvc", "=", "sorted", "(", "genotype", ".", "get", "(", "'vslcs'", ")", ")", "label", "=", "'; '", ".", "join", "(", "gvc", ")", "+", "' ['", "+", "genotype", ".", "get", "(", "'subtype'", ")", "+", "']'", "geno", ".", "addGenotype", "(", "gt", ",", "None", ")", "model", ".", "addComment", "(", "gt", ",", "self", ".", "_makeInternalIdentifier", "(", "'genotype'", ",", "genotype", ".", "get", "(", "'key'", ")", ")", ")", "model", ".", "addSynonym", "(", "gt", ",", "label", ".", "strip", "(", ")", ")", "return" ]
Add the genotype internal id to mgiid mapping to the idhashmap. Also, add them as individuals to the graph. We re-format the label to put the background strain in brackets after the gvc. We must pass through the file once to get the ids and aggregate the vslcs into a hashmap into the genotype Triples created: <genotype id> a GENO:intrinsic_genotype <genotype id> rdfs:label "<gvc> [bkgd]" :param limit: :return:
[ "Add", "the", "genotype", "internal", "id", "to", "mgiid", "mapping", "to", "the", "idhashmap", ".", "Also", "add", "them", "as", "individuals", "to", "the", "graph", ".", "We", "re", "-", "format", "the", "label", "to", "put", "the", "background", "strain", "in", "brackets", "after", "the", "gvc", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/MGI.py#L432-L503
train
251,247
monarch-initiative/dipper
dipper/sources/MGI.py
MGI.process_mgi_relationship_transgene_genes
def process_mgi_relationship_transgene_genes(self, limit=None): """ Here, we have the relationship between MGI transgene alleles, and the non-mouse gene ids that are part of them. We augment the allele with the transgene parts. :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph LOG.info("getting transgene genes") raw = '/'.join((self.rawdir, 'mgi_relationship_transgene_genes')) geno = Genotype(graph) col = [ 'rel_key', 'allele_key', 'allele_id', 'allele_label', 'category_key', 'category_name', 'property_key', 'property_name', 'gene_num' ] with open(raw, 'r', encoding="utf8") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') header = next(filereader) if header != col: LOG.error('expected columns: %s\n\tBut got:\n%s', col, header) for row in filereader: # rel_key, allele_key = int(row[col.index('allele_key')]) allele_id = row[col.index('allele_id')] # allele_label, # category_key, # category_name, # property_key, # property_name, gene_num = int(row[col.index('gene_num')]) if self.test_mode and allele_key not in self.test_keys.get('allele')\ and gene_num not in self.test_ids: continue gene_id = 'NCBIGene:' + str(gene_num) # geno.addParts(gene_id, allele_id, self.globaltt['has_variant_part']) seqalt_id = self.idhash['seqalt'].get(allele_key) if seqalt_id is None: seqalt_id = allele_id geno.addSequenceDerivesFrom(seqalt_id, gene_id) if not self.test_mode and limit is not None and \ filereader.line_num > limit: break return
python
def process_mgi_relationship_transgene_genes(self, limit=None): """ Here, we have the relationship between MGI transgene alleles, and the non-mouse gene ids that are part of them. We augment the allele with the transgene parts. :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph LOG.info("getting transgene genes") raw = '/'.join((self.rawdir, 'mgi_relationship_transgene_genes')) geno = Genotype(graph) col = [ 'rel_key', 'allele_key', 'allele_id', 'allele_label', 'category_key', 'category_name', 'property_key', 'property_name', 'gene_num' ] with open(raw, 'r', encoding="utf8") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') header = next(filereader) if header != col: LOG.error('expected columns: %s\n\tBut got:\n%s', col, header) for row in filereader: # rel_key, allele_key = int(row[col.index('allele_key')]) allele_id = row[col.index('allele_id')] # allele_label, # category_key, # category_name, # property_key, # property_name, gene_num = int(row[col.index('gene_num')]) if self.test_mode and allele_key not in self.test_keys.get('allele')\ and gene_num not in self.test_ids: continue gene_id = 'NCBIGene:' + str(gene_num) # geno.addParts(gene_id, allele_id, self.globaltt['has_variant_part']) seqalt_id = self.idhash['seqalt'].get(allele_key) if seqalt_id is None: seqalt_id = allele_id geno.addSequenceDerivesFrom(seqalt_id, gene_id) if not self.test_mode and limit is not None and \ filereader.line_num > limit: break return
[ "def", "process_mgi_relationship_transgene_genes", "(", "self", ",", "limit", "=", "None", ")", ":", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "LOG", ".", "info", "(", "\"getting transgene genes\"", ")", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "'mgi_relationship_transgene_genes'", ")", ")", "geno", "=", "Genotype", "(", "graph", ")", "col", "=", "[", "'rel_key'", ",", "'allele_key'", ",", "'allele_id'", ",", "'allele_label'", ",", "'category_key'", ",", "'category_name'", ",", "'property_key'", ",", "'property_name'", ",", "'gene_num'", "]", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"utf8\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "header", "=", "next", "(", "filereader", ")", "if", "header", "!=", "col", ":", "LOG", ".", "error", "(", "'expected columns: %s\\n\\tBut got:\\n%s'", ",", "col", ",", "header", ")", "for", "row", "in", "filereader", ":", "# rel_key,", "allele_key", "=", "int", "(", "row", "[", "col", ".", "index", "(", "'allele_key'", ")", "]", ")", "allele_id", "=", "row", "[", "col", ".", "index", "(", "'allele_id'", ")", "]", "# allele_label,", "# category_key,", "# category_name,", "# property_key,", "# property_name,", "gene_num", "=", "int", "(", "row", "[", "col", ".", "index", "(", "'gene_num'", ")", "]", ")", "if", "self", ".", "test_mode", "and", "allele_key", "not", "in", "self", ".", "test_keys", ".", "get", "(", "'allele'", ")", "and", "gene_num", "not", "in", "self", ".", "test_ids", ":", "continue", "gene_id", "=", "'NCBIGene:'", "+", "str", "(", "gene_num", ")", "# geno.addParts(gene_id, allele_id, self.globaltt['has_variant_part'])", "seqalt_id", "=", "self", ".", "idhash", "[", "'seqalt'", "]", ".", "get", "(", "allele_key", ")", "if", "seqalt_id", "is", "None", ":", "seqalt_id", "=", "allele_id", "geno", ".", "addSequenceDerivesFrom", "(", "seqalt_id", ",", "gene_id", ")", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "filereader", ".", "line_num", ">", "limit", ":", "break", "return" ]
Here, we have the relationship between MGI transgene alleles, and the non-mouse gene ids that are part of them. We augment the allele with the transgene parts. :param limit: :return:
[ "Here", "we", "have", "the", "relationship", "between", "MGI", "transgene", "alleles", "and", "the", "non", "-", "mouse", "gene", "ids", "that", "are", "part", "of", "them", ".", "We", "augment", "the", "allele", "with", "the", "transgene", "parts", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/MGI.py#L1891-L1944
train
251,248
monarch-initiative/dipper
dipper/graph/RDFGraph.py
RDFGraph._getnode
def _getnode(self, curie): # convention is lowercase names """ This is a wrapper for creating a URIRef or Bnode object with a given a curie or iri as a string. If an id starts with an underscore, it assigns it to a BNode, otherwise it creates it with a standard URIRef. Alternatively, self.skolemize_blank_node is True, it will skolemize the blank node :param curie: str identifier formatted as curie or iri :return: node: RDFLib URIRef or BNode object """ node = None if curie[0] == '_': if self.are_bnodes_skized is True: node = self.skolemizeBlankNode(curie) else: # delete the leading underscore to make it cleaner node = BNode(re.sub(r'^_:|^_', '', curie, 1)) # Check if curie string is actually an IRI elif curie[:4] == 'http' or curie[:3] == 'ftp': node = URIRef(curie) else: iri = RDFGraph.curie_util.get_uri(curie) if iri is not None: node = URIRef(RDFGraph.curie_util.get_uri(curie)) # Bind prefix map to graph prefix = curie.split(':')[0] if prefix not in self.namespace_manager.namespaces(): mapped_iri = self.curie_map[prefix] self.bind(prefix, Namespace(mapped_iri)) else: LOG.error("couldn't make URI for %s", curie) return node
python
def _getnode(self, curie): # convention is lowercase names """ This is a wrapper for creating a URIRef or Bnode object with a given a curie or iri as a string. If an id starts with an underscore, it assigns it to a BNode, otherwise it creates it with a standard URIRef. Alternatively, self.skolemize_blank_node is True, it will skolemize the blank node :param curie: str identifier formatted as curie or iri :return: node: RDFLib URIRef or BNode object """ node = None if curie[0] == '_': if self.are_bnodes_skized is True: node = self.skolemizeBlankNode(curie) else: # delete the leading underscore to make it cleaner node = BNode(re.sub(r'^_:|^_', '', curie, 1)) # Check if curie string is actually an IRI elif curie[:4] == 'http' or curie[:3] == 'ftp': node = URIRef(curie) else: iri = RDFGraph.curie_util.get_uri(curie) if iri is not None: node = URIRef(RDFGraph.curie_util.get_uri(curie)) # Bind prefix map to graph prefix = curie.split(':')[0] if prefix not in self.namespace_manager.namespaces(): mapped_iri = self.curie_map[prefix] self.bind(prefix, Namespace(mapped_iri)) else: LOG.error("couldn't make URI for %s", curie) return node
[ "def", "_getnode", "(", "self", ",", "curie", ")", ":", "# convention is lowercase names", "node", "=", "None", "if", "curie", "[", "0", "]", "==", "'_'", ":", "if", "self", ".", "are_bnodes_skized", "is", "True", ":", "node", "=", "self", ".", "skolemizeBlankNode", "(", "curie", ")", "else", ":", "# delete the leading underscore to make it cleaner", "node", "=", "BNode", "(", "re", ".", "sub", "(", "r'^_:|^_'", ",", "''", ",", "curie", ",", "1", ")", ")", "# Check if curie string is actually an IRI", "elif", "curie", "[", ":", "4", "]", "==", "'http'", "or", "curie", "[", ":", "3", "]", "==", "'ftp'", ":", "node", "=", "URIRef", "(", "curie", ")", "else", ":", "iri", "=", "RDFGraph", ".", "curie_util", ".", "get_uri", "(", "curie", ")", "if", "iri", "is", "not", "None", ":", "node", "=", "URIRef", "(", "RDFGraph", ".", "curie_util", ".", "get_uri", "(", "curie", ")", ")", "# Bind prefix map to graph", "prefix", "=", "curie", ".", "split", "(", "':'", ")", "[", "0", "]", "if", "prefix", "not", "in", "self", ".", "namespace_manager", ".", "namespaces", "(", ")", ":", "mapped_iri", "=", "self", ".", "curie_map", "[", "prefix", "]", "self", ".", "bind", "(", "prefix", ",", "Namespace", "(", "mapped_iri", ")", ")", "else", ":", "LOG", ".", "error", "(", "\"couldn't make URI for %s\"", ",", "curie", ")", "return", "node" ]
This is a wrapper for creating a URIRef or Bnode object with a given a curie or iri as a string. If an id starts with an underscore, it assigns it to a BNode, otherwise it creates it with a standard URIRef. Alternatively, self.skolemize_blank_node is True, it will skolemize the blank node :param curie: str identifier formatted as curie or iri :return: node: RDFLib URIRef or BNode object
[ "This", "is", "a", "wrapper", "for", "creating", "a", "URIRef", "or", "Bnode", "object", "with", "a", "given", "a", "curie", "or", "iri", "as", "a", "string", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/graph/RDFGraph.py#L92-L126
train
251,249
monarch-initiative/dipper
dipper/models/assoc/D2PAssoc.py
D2PAssoc.add_association_to_graph
def add_association_to_graph(self): """ The reified relationship between a disease and a phenotype is decorated with some provenance information. This makes the assumption that both the disease and phenotype are classes. :param g: :return: """ # add the basic association nodes # if rel == self.globaltt[['has disposition']: Assoc.add_association_to_graph(self) # anticipating trouble with onsets ranges that look like curies if self.onset is not None and self.onset != '': self.graph.addTriple(self.assoc_id, self.globaltt['onset'], self.onset) if self.frequency is not None and self.frequency != '': self.graph.addTriple( self.assoc_id, self.globaltt['frequency'], self.frequency) return
python
def add_association_to_graph(self): """ The reified relationship between a disease and a phenotype is decorated with some provenance information. This makes the assumption that both the disease and phenotype are classes. :param g: :return: """ # add the basic association nodes # if rel == self.globaltt[['has disposition']: Assoc.add_association_to_graph(self) # anticipating trouble with onsets ranges that look like curies if self.onset is not None and self.onset != '': self.graph.addTriple(self.assoc_id, self.globaltt['onset'], self.onset) if self.frequency is not None and self.frequency != '': self.graph.addTriple( self.assoc_id, self.globaltt['frequency'], self.frequency) return
[ "def", "add_association_to_graph", "(", "self", ")", ":", "# add the basic association nodes", "# if rel == self.globaltt[['has disposition']:", "Assoc", ".", "add_association_to_graph", "(", "self", ")", "# anticipating trouble with onsets ranges that look like curies", "if", "self", ".", "onset", "is", "not", "None", "and", "self", ".", "onset", "!=", "''", ":", "self", ".", "graph", ".", "addTriple", "(", "self", ".", "assoc_id", ",", "self", ".", "globaltt", "[", "'onset'", "]", ",", "self", ".", "onset", ")", "if", "self", ".", "frequency", "is", "not", "None", "and", "self", ".", "frequency", "!=", "''", ":", "self", ".", "graph", ".", "addTriple", "(", "self", ".", "assoc_id", ",", "self", ".", "globaltt", "[", "'frequency'", "]", ",", "self", ".", "frequency", ")", "return" ]
The reified relationship between a disease and a phenotype is decorated with some provenance information. This makes the assumption that both the disease and phenotype are classes. :param g: :return:
[ "The", "reified", "relationship", "between", "a", "disease", "and", "a", "phenotype", "is", "decorated", "with", "some", "provenance", "information", ".", "This", "makes", "the", "assumption", "that", "both", "the", "disease", "and", "phenotype", "are", "classes", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/assoc/D2PAssoc.py#L50-L75
train
251,250
monarch-initiative/dipper
dipper/sources/Monochrom.py
Monochrom.make_parent_bands
def make_parent_bands(self, band, child_bands): """ this will determine the grouping bands that it belongs to, recursively 13q21.31 ==> 13, 13q, 13q2, 13q21, 13q21.3, 13q21.31 :param band: :param child_bands: :return: """ m = re.match(r'([pq][A-H\d]+(?:\.\d+)?)', band) if len(band) > 0: if m: p = str(band[0:len(band)-1]) p = re.sub(r'\.$', '', p) if p is not None: child_bands.add(p) self.make_parent_bands(p, child_bands) else: child_bands = set() return child_bands
python
def make_parent_bands(self, band, child_bands): """ this will determine the grouping bands that it belongs to, recursively 13q21.31 ==> 13, 13q, 13q2, 13q21, 13q21.3, 13q21.31 :param band: :param child_bands: :return: """ m = re.match(r'([pq][A-H\d]+(?:\.\d+)?)', band) if len(band) > 0: if m: p = str(band[0:len(band)-1]) p = re.sub(r'\.$', '', p) if p is not None: child_bands.add(p) self.make_parent_bands(p, child_bands) else: child_bands = set() return child_bands
[ "def", "make_parent_bands", "(", "self", ",", "band", ",", "child_bands", ")", ":", "m", "=", "re", ".", "match", "(", "r'([pq][A-H\\d]+(?:\\.\\d+)?)'", ",", "band", ")", "if", "len", "(", "band", ")", ">", "0", ":", "if", "m", ":", "p", "=", "str", "(", "band", "[", "0", ":", "len", "(", "band", ")", "-", "1", "]", ")", "p", "=", "re", ".", "sub", "(", "r'\\.$'", ",", "''", ",", "p", ")", "if", "p", "is", "not", "None", ":", "child_bands", ".", "add", "(", "p", ")", "self", ".", "make_parent_bands", "(", "p", ",", "child_bands", ")", "else", ":", "child_bands", "=", "set", "(", ")", "return", "child_bands" ]
this will determine the grouping bands that it belongs to, recursively 13q21.31 ==> 13, 13q, 13q2, 13q21, 13q21.3, 13q21.31 :param band: :param child_bands: :return:
[ "this", "will", "determine", "the", "grouping", "bands", "that", "it", "belongs", "to", "recursively", "13q21", ".", "31", "==", ">", "13", "13q", "13q2", "13q21", "13q21", ".", "3", "13q21", ".", "31" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Monochrom.py#L334-L354
train
251,251
monarch-initiative/dipper
dipper/utils/CurieUtil.py
CurieUtil.get_curie
def get_curie(self, uri): '''Get a CURIE from a URI ''' prefix = self.get_curie_prefix(uri) if prefix is not None: key = self.curie_map[prefix] return '%s:%s' % (prefix, uri[len(key):len(uri)]) return None
python
def get_curie(self, uri): '''Get a CURIE from a URI ''' prefix = self.get_curie_prefix(uri) if prefix is not None: key = self.curie_map[prefix] return '%s:%s' % (prefix, uri[len(key):len(uri)]) return None
[ "def", "get_curie", "(", "self", ",", "uri", ")", ":", "prefix", "=", "self", ".", "get_curie_prefix", "(", "uri", ")", "if", "prefix", "is", "not", "None", ":", "key", "=", "self", ".", "curie_map", "[", "prefix", "]", "return", "'%s:%s'", "%", "(", "prefix", ",", "uri", "[", "len", "(", "key", ")", ":", "len", "(", "uri", ")", "]", ")", "return", "None" ]
Get a CURIE from a URI
[ "Get", "a", "CURIE", "from", "a", "URI" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/utils/CurieUtil.py#L31-L37
train
251,252
monarch-initiative/dipper
dipper/utils/CurieUtil.py
CurieUtil.get_uri
def get_uri(self, curie): ''' Get a URI from a CURIE ''' if curie is None: return None parts = curie.split(':') if len(parts) == 1: if curie != '': LOG.error("Not a properly formed curie: \"%s\"", curie) return None prefix = parts[0] if prefix in self.curie_map: return '%s%s' % (self.curie_map.get(prefix), curie[(curie.index(':') + 1):]) LOG.error("Curie prefix not defined for %s", curie) return None
python
def get_uri(self, curie): ''' Get a URI from a CURIE ''' if curie is None: return None parts = curie.split(':') if len(parts) == 1: if curie != '': LOG.error("Not a properly formed curie: \"%s\"", curie) return None prefix = parts[0] if prefix in self.curie_map: return '%s%s' % (self.curie_map.get(prefix), curie[(curie.index(':') + 1):]) LOG.error("Curie prefix not defined for %s", curie) return None
[ "def", "get_uri", "(", "self", ",", "curie", ")", ":", "if", "curie", "is", "None", ":", "return", "None", "parts", "=", "curie", ".", "split", "(", "':'", ")", "if", "len", "(", "parts", ")", "==", "1", ":", "if", "curie", "!=", "''", ":", "LOG", ".", "error", "(", "\"Not a properly formed curie: \\\"%s\\\"\"", ",", "curie", ")", "return", "None", "prefix", "=", "parts", "[", "0", "]", "if", "prefix", "in", "self", ".", "curie_map", ":", "return", "'%s%s'", "%", "(", "self", ".", "curie_map", ".", "get", "(", "prefix", ")", ",", "curie", "[", "(", "curie", ".", "index", "(", "':'", ")", "+", "1", ")", ":", "]", ")", "LOG", ".", "error", "(", "\"Curie prefix not defined for %s\"", ",", "curie", ")", "return", "None" ]
Get a URI from a CURIE
[ "Get", "a", "URI", "from", "a", "CURIE" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/utils/CurieUtil.py#L46-L60
train
251,253
monarch-initiative/dipper
dipper/sources/Coriell.py
Coriell.fetch
def fetch(self, is_dl_forced=False): """ Here we connect to the coriell sftp server using private connection details. They dump bi-weekly files with a timestamp in the filename. For each catalog, we ping the remote site and pull the most-recently updated file, renaming it to our local latest.csv. Be sure to have pg user/password connection details in your conf.yaml file, like: dbauth : {"coriell" : { "user" : "<username>", "password" : "<password>", "host" : <host>, "private_key"=path/to/rsa_key} } :param is_dl_forced: :return: """ host = config.get_config()['dbauth']['coriell']['host'] key = config.get_config()['dbauth']['coriell']['private_key'] user = config.get_config()['user']['coriell'] passwd = config.get_config()['keys'][user] with pysftp.Connection( host, username=user, password=passwd, private_key=key) as sftp: # check to make sure each file is in there # get the remote files remote_files = sftp.listdir_attr() files_by_repo = {} for attr in remote_files: # for each catalog, get the most-recent filename mch = re.match('(NIGMS|NIA|NHGRI|NINDS)', attr.filename) if mch is not None and len(mch.groups()) > 0: # there should just be one now files_by_repo[mch.group(1)] = attr # sort each array in hash, # & get the name and time of the most-recent file for each catalog for rmt in self.files: LOG.info("Checking on %s catalog file", rmt) fname = self.files[rmt]['file'] remotef = files_by_repo[rmt] target_name = '/'.join((self.rawdir, fname)) # check if the local file is out of date, if so, download. # otherwise, skip. # we rename (for simplicity) the original file fstat = None if os.path.exists(target_name): fstat = os.stat(target_name) LOG.info( "Local file date: %s", datetime.utcfromtimestamp(fstat[stat.ST_CTIME])) if fstat is None or remotef.st_mtime > fstat[stat.ST_CTIME]: if fstat is None: LOG.info("File does not exist locally; downloading...") else: LOG.info( "New version of %s catalog available; downloading...", rmt) sftp.get(remotef.filename, target_name) LOG.info( "Fetched remote %s -> %s", remotef.filename, target_name) fstat = os.stat(target_name) filedate = datetime.utcfromtimestamp( remotef.st_mtime).strftime("%Y-%m-%d") LOG.info( "New file date: %s", datetime.utcfromtimestamp(fstat[stat.ST_CTIME])) else: LOG.info("File %s exists; using local copy", fname) filedate = datetime.utcfromtimestamp( fstat[stat.ST_CTIME]).strftime("%Y-%m-%d") self.dataset.setFileAccessUrl(remotef.filename, True) self.dataset.setVersion(filedate) return
python
def fetch(self, is_dl_forced=False): """ Here we connect to the coriell sftp server using private connection details. They dump bi-weekly files with a timestamp in the filename. For each catalog, we ping the remote site and pull the most-recently updated file, renaming it to our local latest.csv. Be sure to have pg user/password connection details in your conf.yaml file, like: dbauth : {"coriell" : { "user" : "<username>", "password" : "<password>", "host" : <host>, "private_key"=path/to/rsa_key} } :param is_dl_forced: :return: """ host = config.get_config()['dbauth']['coriell']['host'] key = config.get_config()['dbauth']['coriell']['private_key'] user = config.get_config()['user']['coriell'] passwd = config.get_config()['keys'][user] with pysftp.Connection( host, username=user, password=passwd, private_key=key) as sftp: # check to make sure each file is in there # get the remote files remote_files = sftp.listdir_attr() files_by_repo = {} for attr in remote_files: # for each catalog, get the most-recent filename mch = re.match('(NIGMS|NIA|NHGRI|NINDS)', attr.filename) if mch is not None and len(mch.groups()) > 0: # there should just be one now files_by_repo[mch.group(1)] = attr # sort each array in hash, # & get the name and time of the most-recent file for each catalog for rmt in self.files: LOG.info("Checking on %s catalog file", rmt) fname = self.files[rmt]['file'] remotef = files_by_repo[rmt] target_name = '/'.join((self.rawdir, fname)) # check if the local file is out of date, if so, download. # otherwise, skip. # we rename (for simplicity) the original file fstat = None if os.path.exists(target_name): fstat = os.stat(target_name) LOG.info( "Local file date: %s", datetime.utcfromtimestamp(fstat[stat.ST_CTIME])) if fstat is None or remotef.st_mtime > fstat[stat.ST_CTIME]: if fstat is None: LOG.info("File does not exist locally; downloading...") else: LOG.info( "New version of %s catalog available; downloading...", rmt) sftp.get(remotef.filename, target_name) LOG.info( "Fetched remote %s -> %s", remotef.filename, target_name) fstat = os.stat(target_name) filedate = datetime.utcfromtimestamp( remotef.st_mtime).strftime("%Y-%m-%d") LOG.info( "New file date: %s", datetime.utcfromtimestamp(fstat[stat.ST_CTIME])) else: LOG.info("File %s exists; using local copy", fname) filedate = datetime.utcfromtimestamp( fstat[stat.ST_CTIME]).strftime("%Y-%m-%d") self.dataset.setFileAccessUrl(remotef.filename, True) self.dataset.setVersion(filedate) return
[ "def", "fetch", "(", "self", ",", "is_dl_forced", "=", "False", ")", ":", "host", "=", "config", ".", "get_config", "(", ")", "[", "'dbauth'", "]", "[", "'coriell'", "]", "[", "'host'", "]", "key", "=", "config", ".", "get_config", "(", ")", "[", "'dbauth'", "]", "[", "'coriell'", "]", "[", "'private_key'", "]", "user", "=", "config", ".", "get_config", "(", ")", "[", "'user'", "]", "[", "'coriell'", "]", "passwd", "=", "config", ".", "get_config", "(", ")", "[", "'keys'", "]", "[", "user", "]", "with", "pysftp", ".", "Connection", "(", "host", ",", "username", "=", "user", ",", "password", "=", "passwd", ",", "private_key", "=", "key", ")", "as", "sftp", ":", "# check to make sure each file is in there", "# get the remote files", "remote_files", "=", "sftp", ".", "listdir_attr", "(", ")", "files_by_repo", "=", "{", "}", "for", "attr", "in", "remote_files", ":", "# for each catalog, get the most-recent filename", "mch", "=", "re", ".", "match", "(", "'(NIGMS|NIA|NHGRI|NINDS)'", ",", "attr", ".", "filename", ")", "if", "mch", "is", "not", "None", "and", "len", "(", "mch", ".", "groups", "(", ")", ")", ">", "0", ":", "# there should just be one now", "files_by_repo", "[", "mch", ".", "group", "(", "1", ")", "]", "=", "attr", "# sort each array in hash,", "# & get the name and time of the most-recent file for each catalog", "for", "rmt", "in", "self", ".", "files", ":", "LOG", ".", "info", "(", "\"Checking on %s catalog file\"", ",", "rmt", ")", "fname", "=", "self", ".", "files", "[", "rmt", "]", "[", "'file'", "]", "remotef", "=", "files_by_repo", "[", "rmt", "]", "target_name", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "fname", ")", ")", "# check if the local file is out of date, if so, download.", "# otherwise, skip.", "# we rename (for simplicity) the original file", "fstat", "=", "None", "if", "os", ".", "path", ".", "exists", "(", "target_name", ")", ":", "fstat", "=", "os", ".", "stat", "(", "target_name", ")", "LOG", ".", "info", "(", "\"Local file date: %s\"", ",", "datetime", ".", "utcfromtimestamp", "(", "fstat", "[", "stat", ".", "ST_CTIME", "]", ")", ")", "if", "fstat", "is", "None", "or", "remotef", ".", "st_mtime", ">", "fstat", "[", "stat", ".", "ST_CTIME", "]", ":", "if", "fstat", "is", "None", ":", "LOG", ".", "info", "(", "\"File does not exist locally; downloading...\"", ")", "else", ":", "LOG", ".", "info", "(", "\"New version of %s catalog available; downloading...\"", ",", "rmt", ")", "sftp", ".", "get", "(", "remotef", ".", "filename", ",", "target_name", ")", "LOG", ".", "info", "(", "\"Fetched remote %s -> %s\"", ",", "remotef", ".", "filename", ",", "target_name", ")", "fstat", "=", "os", ".", "stat", "(", "target_name", ")", "filedate", "=", "datetime", ".", "utcfromtimestamp", "(", "remotef", ".", "st_mtime", ")", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", "LOG", ".", "info", "(", "\"New file date: %s\"", ",", "datetime", ".", "utcfromtimestamp", "(", "fstat", "[", "stat", ".", "ST_CTIME", "]", ")", ")", "else", ":", "LOG", ".", "info", "(", "\"File %s exists; using local copy\"", ",", "fname", ")", "filedate", "=", "datetime", ".", "utcfromtimestamp", "(", "fstat", "[", "stat", ".", "ST_CTIME", "]", ")", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", "self", ".", "dataset", ".", "setFileAccessUrl", "(", "remotef", ".", "filename", ",", "True", ")", "self", ".", "dataset", ".", "setVersion", "(", "filedate", ")", "return" ]
Here we connect to the coriell sftp server using private connection details. They dump bi-weekly files with a timestamp in the filename. For each catalog, we ping the remote site and pull the most-recently updated file, renaming it to our local latest.csv. Be sure to have pg user/password connection details in your conf.yaml file, like: dbauth : {"coriell" : { "user" : "<username>", "password" : "<password>", "host" : <host>, "private_key"=path/to/rsa_key} } :param is_dl_forced: :return:
[ "Here", "we", "connect", "to", "the", "coriell", "sftp", "server", "using", "private", "connection", "details", ".", "They", "dump", "bi", "-", "weekly", "files", "with", "a", "timestamp", "in", "the", "filename", ".", "For", "each", "catalog", "we", "ping", "the", "remote", "site", "and", "pull", "the", "most", "-", "recently", "updated", "file", "renaming", "it", "to", "our", "local", "latest", ".", "csv", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Coriell.py#L150-L224
train
251,254
monarch-initiative/dipper
dipper/sources/Coriell.py
Coriell._process_collection
def _process_collection(self, collection_id, label, page): """ This function will process the data supplied internally about the repository from Coriell. Triples: Repository a ERO:collection rdf:label Literal(label) foaf:page Literal(page) :param collection_id: :param label: :param page: :return: """ # ############# BUILD THE CELL LINE REPOSITORY ############# for graph in [self.graph, self.testgraph]: # TODO: How to devise a label for each repository? model = Model(graph) reference = Reference(graph) repo_id = 'CoriellCollection:' + collection_id repo_label = label repo_page = page model.addIndividualToGraph( repo_id, repo_label, self.globaltt['collection']) reference.addPage(repo_id, repo_page) return
python
def _process_collection(self, collection_id, label, page): """ This function will process the data supplied internally about the repository from Coriell. Triples: Repository a ERO:collection rdf:label Literal(label) foaf:page Literal(page) :param collection_id: :param label: :param page: :return: """ # ############# BUILD THE CELL LINE REPOSITORY ############# for graph in [self.graph, self.testgraph]: # TODO: How to devise a label for each repository? model = Model(graph) reference = Reference(graph) repo_id = 'CoriellCollection:' + collection_id repo_label = label repo_page = page model.addIndividualToGraph( repo_id, repo_label, self.globaltt['collection']) reference.addPage(repo_id, repo_page) return
[ "def", "_process_collection", "(", "self", ",", "collection_id", ",", "label", ",", "page", ")", ":", "# ############# BUILD THE CELL LINE REPOSITORY #############", "for", "graph", "in", "[", "self", ".", "graph", ",", "self", ".", "testgraph", "]", ":", "# TODO: How to devise a label for each repository?", "model", "=", "Model", "(", "graph", ")", "reference", "=", "Reference", "(", "graph", ")", "repo_id", "=", "'CoriellCollection:'", "+", "collection_id", "repo_label", "=", "label", "repo_page", "=", "page", "model", ".", "addIndividualToGraph", "(", "repo_id", ",", "repo_label", ",", "self", ".", "globaltt", "[", "'collection'", "]", ")", "reference", ".", "addPage", "(", "repo_id", ",", "repo_page", ")", "return" ]
This function will process the data supplied internally about the repository from Coriell. Triples: Repository a ERO:collection rdf:label Literal(label) foaf:page Literal(page) :param collection_id: :param label: :param page: :return:
[ "This", "function", "will", "process", "the", "data", "supplied", "internally", "about", "the", "repository", "from", "Coriell", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Coriell.py#L760-L788
train
251,255
monarch-initiative/dipper
dipper/sources/FlyBase.py
FlyBase._process_genotypes
def _process_genotypes(self, limit): """ Add the genotype internal id to flybase mapping to the idhashmap. Also, add them as individuals to the graph. Triples created: <genotype id> a GENO:intrinsic_genotype <genotype id> rdfs:label "<gvc> [bkgd]" :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, 'genotype')) LOG.info("building labels for genotypes") geno = Genotype(graph) fly_tax = self.globaltt['Drosophila melanogaster'] with open(raw, 'r') as f: f.readline() # read the header row; skip filereader = csv.reader(f, delimiter='\t', quotechar='\"') for line in filereader: line_counter += 1 (genotype_num, uniquename, description, name) = line # if self.test_mode is True: # if int(object_key) not in self.test_keys.get('genotype'): # continue # add the internal genotype to pub mapping genotype_id = 'MONARCH:FBgeno'+str(genotype_num) self.idhash['genotype'][genotype_num] = genotype_id if description == '': description = None if not self.test_mode and limit is not None and line_counter > limit: pass else: if self.test_mode and int(genotype_num) \ not in self.test_keys['genotype']: continue model.addIndividualToGraph( genotype_id, uniquename, self.globaltt['intrinsic_genotype'], description) # we know all genotypes are in flies # FIXME we assume here they are in melanogaster, # but that isn't necessarily true!!! # TODO should the taxon be == genomic background? geno.addTaxon(fly_tax, genotype_id) genotype_iid = self._makeInternalIdentifier( 'genotype', genotype_num) model.addComment( genotype_id, genotype_iid) if name.strip() != '': model.addSynonym(genotype_id, name) return
python
def _process_genotypes(self, limit): """ Add the genotype internal id to flybase mapping to the idhashmap. Also, add them as individuals to the graph. Triples created: <genotype id> a GENO:intrinsic_genotype <genotype id> rdfs:label "<gvc> [bkgd]" :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, 'genotype')) LOG.info("building labels for genotypes") geno = Genotype(graph) fly_tax = self.globaltt['Drosophila melanogaster'] with open(raw, 'r') as f: f.readline() # read the header row; skip filereader = csv.reader(f, delimiter='\t', quotechar='\"') for line in filereader: line_counter += 1 (genotype_num, uniquename, description, name) = line # if self.test_mode is True: # if int(object_key) not in self.test_keys.get('genotype'): # continue # add the internal genotype to pub mapping genotype_id = 'MONARCH:FBgeno'+str(genotype_num) self.idhash['genotype'][genotype_num] = genotype_id if description == '': description = None if not self.test_mode and limit is not None and line_counter > limit: pass else: if self.test_mode and int(genotype_num) \ not in self.test_keys['genotype']: continue model.addIndividualToGraph( genotype_id, uniquename, self.globaltt['intrinsic_genotype'], description) # we know all genotypes are in flies # FIXME we assume here they are in melanogaster, # but that isn't necessarily true!!! # TODO should the taxon be == genomic background? geno.addTaxon(fly_tax, genotype_id) genotype_iid = self._makeInternalIdentifier( 'genotype', genotype_num) model.addComment( genotype_id, genotype_iid) if name.strip() != '': model.addSynonym(genotype_id, name) return
[ "def", "_process_genotypes", "(", "self", ",", "limit", ")", ":", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "line_counter", "=", "0", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "'genotype'", ")", ")", "LOG", ".", "info", "(", "\"building labels for genotypes\"", ")", "geno", "=", "Genotype", "(", "graph", ")", "fly_tax", "=", "self", ".", "globaltt", "[", "'Drosophila melanogaster'", "]", "with", "open", "(", "raw", ",", "'r'", ")", "as", "f", ":", "f", ".", "readline", "(", ")", "# read the header row; skip", "filereader", "=", "csv", ".", "reader", "(", "f", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "line", "in", "filereader", ":", "line_counter", "+=", "1", "(", "genotype_num", ",", "uniquename", ",", "description", ",", "name", ")", "=", "line", "# if self.test_mode is True:", "# if int(object_key) not in self.test_keys.get('genotype'):", "# continue", "# add the internal genotype to pub mapping", "genotype_id", "=", "'MONARCH:FBgeno'", "+", "str", "(", "genotype_num", ")", "self", ".", "idhash", "[", "'genotype'", "]", "[", "genotype_num", "]", "=", "genotype_id", "if", "description", "==", "''", ":", "description", "=", "None", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "pass", "else", ":", "if", "self", ".", "test_mode", "and", "int", "(", "genotype_num", ")", "not", "in", "self", ".", "test_keys", "[", "'genotype'", "]", ":", "continue", "model", ".", "addIndividualToGraph", "(", "genotype_id", ",", "uniquename", ",", "self", ".", "globaltt", "[", "'intrinsic_genotype'", "]", ",", "description", ")", "# we know all genotypes are in flies", "# FIXME we assume here they are in melanogaster,", "# but that isn't necessarily true!!!", "# TODO should the taxon be == genomic background?", "geno", ".", "addTaxon", "(", "fly_tax", ",", "genotype_id", ")", "genotype_iid", "=", "self", ".", "_makeInternalIdentifier", "(", "'genotype'", ",", "genotype_num", ")", "model", ".", "addComment", "(", "genotype_id", ",", "genotype_iid", ")", "if", "name", ".", "strip", "(", ")", "!=", "''", ":", "model", ".", "addSynonym", "(", "genotype_id", ",", "name", ")", "return" ]
Add the genotype internal id to flybase mapping to the idhashmap. Also, add them as individuals to the graph. Triples created: <genotype id> a GENO:intrinsic_genotype <genotype id> rdfs:label "<gvc> [bkgd]" :param limit: :return:
[ "Add", "the", "genotype", "internal", "id", "to", "flybase", "mapping", "to", "the", "idhashmap", ".", "Also", "add", "them", "as", "individuals", "to", "the", "graph", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L358-L423
train
251,256
monarch-initiative/dipper
dipper/sources/FlyBase.py
FlyBase._process_stocks
def _process_stocks(self, limit): """ Stock definitions. Here we instantiate them as instances of the given taxon. :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, 'stock')) LOG.info("building labels for stocks") with open(raw, 'r') as f: f.readline() # read the header row; skip filereader = csv.reader(f, delimiter='\t', quotechar='\"') for line in filereader: line_counter += 1 (stock_id, dbxref_id, organism_id, name, uniquename, description, type_id, is_obsolete) = line # 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670 stock_num = stock_id stock_id = 'FlyBase:'+uniquename self.idhash['stock'][stock_num] = stock_id stock_label = description organism_key = organism_id taxon = self.idhash['organism'][organism_key] # from what i can tell, the dbxrefs are just more FBst, # so no added information vs uniquename if not self.test_mode and limit is not None and line_counter > limit: pass else: if self.test_mode \ and int(stock_num) not in self.test_keys['strain']: continue # tax_label = self.label_hash[taxon] # unused # add the tax in case it hasn't been already model.addClassToGraph(taxon) model.addIndividualToGraph(stock_id, stock_label, taxon) if is_obsolete == 't': model.addDeprecatedIndividual(stock_id) return
python
def _process_stocks(self, limit): """ Stock definitions. Here we instantiate them as instances of the given taxon. :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, 'stock')) LOG.info("building labels for stocks") with open(raw, 'r') as f: f.readline() # read the header row; skip filereader = csv.reader(f, delimiter='\t', quotechar='\"') for line in filereader: line_counter += 1 (stock_id, dbxref_id, organism_id, name, uniquename, description, type_id, is_obsolete) = line # 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670 stock_num = stock_id stock_id = 'FlyBase:'+uniquename self.idhash['stock'][stock_num] = stock_id stock_label = description organism_key = organism_id taxon = self.idhash['organism'][organism_key] # from what i can tell, the dbxrefs are just more FBst, # so no added information vs uniquename if not self.test_mode and limit is not None and line_counter > limit: pass else: if self.test_mode \ and int(stock_num) not in self.test_keys['strain']: continue # tax_label = self.label_hash[taxon] # unused # add the tax in case it hasn't been already model.addClassToGraph(taxon) model.addIndividualToGraph(stock_id, stock_label, taxon) if is_obsolete == 't': model.addDeprecatedIndividual(stock_id) return
[ "def", "_process_stocks", "(", "self", ",", "limit", ")", ":", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "line_counter", "=", "0", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "'stock'", ")", ")", "LOG", ".", "info", "(", "\"building labels for stocks\"", ")", "with", "open", "(", "raw", ",", "'r'", ")", "as", "f", ":", "f", ".", "readline", "(", ")", "# read the header row; skip", "filereader", "=", "csv", ".", "reader", "(", "f", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "line", "in", "filereader", ":", "line_counter", "+=", "1", "(", "stock_id", ",", "dbxref_id", ",", "organism_id", ",", "name", ",", "uniquename", ",", "description", ",", "type_id", ",", "is_obsolete", ")", "=", "line", "# 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670", "stock_num", "=", "stock_id", "stock_id", "=", "'FlyBase:'", "+", "uniquename", "self", ".", "idhash", "[", "'stock'", "]", "[", "stock_num", "]", "=", "stock_id", "stock_label", "=", "description", "organism_key", "=", "organism_id", "taxon", "=", "self", ".", "idhash", "[", "'organism'", "]", "[", "organism_key", "]", "# from what i can tell, the dbxrefs are just more FBst,", "# so no added information vs uniquename", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "pass", "else", ":", "if", "self", ".", "test_mode", "and", "int", "(", "stock_num", ")", "not", "in", "self", ".", "test_keys", "[", "'strain'", "]", ":", "continue", "# tax_label = self.label_hash[taxon] # unused", "# add the tax in case it hasn't been already", "model", ".", "addClassToGraph", "(", "taxon", ")", "model", ".", "addIndividualToGraph", "(", "stock_id", ",", "stock_label", ",", "taxon", ")", "if", "is_obsolete", "==", "'t'", ":", "model", ".", "addDeprecatedIndividual", "(", "stock_id", ")", "return" ]
Stock definitions. Here we instantiate them as instances of the given taxon. :param limit: :return:
[ "Stock", "definitions", ".", "Here", "we", "instantiate", "them", "as", "instances", "of", "the", "given", "taxon", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L426-L480
train
251,257
monarch-initiative/dipper
dipper/sources/FlyBase.py
FlyBase._process_pubs
def _process_pubs(self, limit): """ Flybase publications. :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, 'pub')) LOG.info("building labels for pubs") with open(raw, 'r') as f: f.readline() # read the header row; skip filereader = csv.reader(f, delimiter='\t', quotechar='\"') for line in filereader: (pub_id, title, volumetitle, volume, series_name, issue, pyear, pages, miniref, type_id, is_obsolete, publisher, pubplace, uniquename) = line # 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670 # if self.test_mode is True: # if int(object_key) not in self.test_keys.get('genotype'): # continue pub_num = pub_id pub_id = 'FlyBase:'+uniquename.strip() self.idhash['publication'][pub_num] = pub_id # TODO figure out the type of pub by type_id if not re.match(r'(FBrf|multi)', uniquename): continue line_counter += 1 reference = Reference(graph, pub_id) if title != '': reference.setTitle(title) if pyear != '': reference.setYear(str(pyear)) if miniref != '': reference.setShortCitation(miniref) if not self.test_mode and limit is not None and line_counter > limit: pass else: if self.test_mode and int(pub_num) not in self.test_keys['pub']: continue if is_obsolete == 't': model.addDeprecatedIndividual(pub_id) else: reference.addRefToGraph() return
python
def _process_pubs(self, limit): """ Flybase publications. :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, 'pub')) LOG.info("building labels for pubs") with open(raw, 'r') as f: f.readline() # read the header row; skip filereader = csv.reader(f, delimiter='\t', quotechar='\"') for line in filereader: (pub_id, title, volumetitle, volume, series_name, issue, pyear, pages, miniref, type_id, is_obsolete, publisher, pubplace, uniquename) = line # 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670 # if self.test_mode is True: # if int(object_key) not in self.test_keys.get('genotype'): # continue pub_num = pub_id pub_id = 'FlyBase:'+uniquename.strip() self.idhash['publication'][pub_num] = pub_id # TODO figure out the type of pub by type_id if not re.match(r'(FBrf|multi)', uniquename): continue line_counter += 1 reference = Reference(graph, pub_id) if title != '': reference.setTitle(title) if pyear != '': reference.setYear(str(pyear)) if miniref != '': reference.setShortCitation(miniref) if not self.test_mode and limit is not None and line_counter > limit: pass else: if self.test_mode and int(pub_num) not in self.test_keys['pub']: continue if is_obsolete == 't': model.addDeprecatedIndividual(pub_id) else: reference.addRefToGraph() return
[ "def", "_process_pubs", "(", "self", ",", "limit", ")", ":", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "line_counter", "=", "0", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "'pub'", ")", ")", "LOG", ".", "info", "(", "\"building labels for pubs\"", ")", "with", "open", "(", "raw", ",", "'r'", ")", "as", "f", ":", "f", ".", "readline", "(", ")", "# read the header row; skip", "filereader", "=", "csv", ".", "reader", "(", "f", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "line", "in", "filereader", ":", "(", "pub_id", ",", "title", ",", "volumetitle", ",", "volume", ",", "series_name", ",", "issue", ",", "pyear", ",", "pages", ",", "miniref", ",", "type_id", ",", "is_obsolete", ",", "publisher", ",", "pubplace", ",", "uniquename", ")", "=", "line", "# 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670", "# if self.test_mode is True:", "# if int(object_key) not in self.test_keys.get('genotype'):", "# continue", "pub_num", "=", "pub_id", "pub_id", "=", "'FlyBase:'", "+", "uniquename", ".", "strip", "(", ")", "self", ".", "idhash", "[", "'publication'", "]", "[", "pub_num", "]", "=", "pub_id", "# TODO figure out the type of pub by type_id", "if", "not", "re", ".", "match", "(", "r'(FBrf|multi)'", ",", "uniquename", ")", ":", "continue", "line_counter", "+=", "1", "reference", "=", "Reference", "(", "graph", ",", "pub_id", ")", "if", "title", "!=", "''", ":", "reference", ".", "setTitle", "(", "title", ")", "if", "pyear", "!=", "''", ":", "reference", ".", "setYear", "(", "str", "(", "pyear", ")", ")", "if", "miniref", "!=", "''", ":", "reference", ".", "setShortCitation", "(", "miniref", ")", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "pass", "else", ":", "if", "self", ".", "test_mode", "and", "int", "(", "pub_num", ")", "not", "in", "self", ".", "test_keys", "[", "'pub'", "]", ":", "continue", "if", "is_obsolete", "==", "'t'", ":", "model", ".", "addDeprecatedIndividual", "(", "pub_id", ")", "else", ":", "reference", ".", "addRefToGraph", "(", ")", "return" ]
Flybase publications. :param limit: :return:
[ "Flybase", "publications", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L483-L539
train
251,258
monarch-initiative/dipper
dipper/sources/FlyBase.py
FlyBase._process_environments
def _process_environments(self): """ There's only about 30 environments in which the phenotypes are recorded. There are no externally accessible identifiers for environments, so we make anonymous nodes for now. Some of the environments are comprised of >1 of the other environments; we do some simple parsing to match the strings of the environmental labels to the other atomic components. :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph raw = '/'.join((self.rawdir, 'environment')) LOG.info("building labels for environment") env_parts = {} label_map = {} env = Environment(graph) with open(raw, 'r') as f: filereader = csv.reader(f, delimiter='\t', quotechar='\"') f.readline() # read the header row; skip for line in filereader: (environment_id, uniquename, description) = line # 22 heat sensitive | tetracycline conditional environment_num = environment_id environment_internal_id = self._makeInternalIdentifier( 'environment', environment_num) if environment_num not in self.idhash['environment']: self.idhash['environment'][environment_num] = \ environment_internal_id environment_id = self.idhash['environment'][environment_num] environment_label = uniquename if environment_label == 'unspecified': environment_label += ' environment' env.addEnvironment(environment_id, environment_label) self.label_hash[environment_id] = environment_label # split up the environment into parts # if there's parts, then add them to the hash; # we'll match the components in a second pass components = re.split(r'\|', uniquename) if len(components) > 1: env_parts[environment_id] = components else: label_map[environment_label] = environment_id # ### end loop through file # build the environmental components for eid in env_parts: eid = eid.strip() for e in env_parts[eid]: # search for the environmental component by label env_id = label_map.get(e.strip()) env.addComponentToEnvironment(eid, env_id) return
python
def _process_environments(self): """ There's only about 30 environments in which the phenotypes are recorded. There are no externally accessible identifiers for environments, so we make anonymous nodes for now. Some of the environments are comprised of >1 of the other environments; we do some simple parsing to match the strings of the environmental labels to the other atomic components. :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph raw = '/'.join((self.rawdir, 'environment')) LOG.info("building labels for environment") env_parts = {} label_map = {} env = Environment(graph) with open(raw, 'r') as f: filereader = csv.reader(f, delimiter='\t', quotechar='\"') f.readline() # read the header row; skip for line in filereader: (environment_id, uniquename, description) = line # 22 heat sensitive | tetracycline conditional environment_num = environment_id environment_internal_id = self._makeInternalIdentifier( 'environment', environment_num) if environment_num not in self.idhash['environment']: self.idhash['environment'][environment_num] = \ environment_internal_id environment_id = self.idhash['environment'][environment_num] environment_label = uniquename if environment_label == 'unspecified': environment_label += ' environment' env.addEnvironment(environment_id, environment_label) self.label_hash[environment_id] = environment_label # split up the environment into parts # if there's parts, then add them to the hash; # we'll match the components in a second pass components = re.split(r'\|', uniquename) if len(components) > 1: env_parts[environment_id] = components else: label_map[environment_label] = environment_id # ### end loop through file # build the environmental components for eid in env_parts: eid = eid.strip() for e in env_parts[eid]: # search for the environmental component by label env_id = label_map.get(e.strip()) env.addComponentToEnvironment(eid, env_id) return
[ "def", "_process_environments", "(", "self", ")", ":", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "'environment'", ")", ")", "LOG", ".", "info", "(", "\"building labels for environment\"", ")", "env_parts", "=", "{", "}", "label_map", "=", "{", "}", "env", "=", "Environment", "(", "graph", ")", "with", "open", "(", "raw", ",", "'r'", ")", "as", "f", ":", "filereader", "=", "csv", ".", "reader", "(", "f", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "f", ".", "readline", "(", ")", "# read the header row; skip", "for", "line", "in", "filereader", ":", "(", "environment_id", ",", "uniquename", ",", "description", ")", "=", "line", "# 22 heat sensitive | tetracycline conditional", "environment_num", "=", "environment_id", "environment_internal_id", "=", "self", ".", "_makeInternalIdentifier", "(", "'environment'", ",", "environment_num", ")", "if", "environment_num", "not", "in", "self", ".", "idhash", "[", "'environment'", "]", ":", "self", ".", "idhash", "[", "'environment'", "]", "[", "environment_num", "]", "=", "environment_internal_id", "environment_id", "=", "self", ".", "idhash", "[", "'environment'", "]", "[", "environment_num", "]", "environment_label", "=", "uniquename", "if", "environment_label", "==", "'unspecified'", ":", "environment_label", "+=", "' environment'", "env", ".", "addEnvironment", "(", "environment_id", ",", "environment_label", ")", "self", ".", "label_hash", "[", "environment_id", "]", "=", "environment_label", "# split up the environment into parts", "# if there's parts, then add them to the hash;", "# we'll match the components in a second pass", "components", "=", "re", ".", "split", "(", "r'\\|'", ",", "uniquename", ")", "if", "len", "(", "components", ")", ">", "1", ":", "env_parts", "[", "environment_id", "]", "=", "components", "else", ":", "label_map", "[", "environment_label", "]", "=", "environment_id", "# ### end loop through file", "# build the environmental components", "for", "eid", "in", "env_parts", ":", "eid", "=", "eid", ".", "strip", "(", ")", "for", "e", "in", "env_parts", "[", "eid", "]", ":", "# search for the environmental component by label", "env_id", "=", "label_map", ".", "get", "(", "e", ".", "strip", "(", ")", ")", "env", ".", "addComponentToEnvironment", "(", "eid", ",", "env_id", ")", "return" ]
There's only about 30 environments in which the phenotypes are recorded. There are no externally accessible identifiers for environments, so we make anonymous nodes for now. Some of the environments are comprised of >1 of the other environments; we do some simple parsing to match the strings of the environmental labels to the other atomic components. :return:
[ "There", "s", "only", "about", "30", "environments", "in", "which", "the", "phenotypes", "are", "recorded", ".", "There", "are", "no", "externally", "accessible", "identifiers", "for", "environments", "so", "we", "make", "anonymous", "nodes", "for", "now", ".", "Some", "of", "the", "environments", "are", "comprised", "of", ">", "1", "of", "the", "other", "environments", ";", "we", "do", "some", "simple", "parsing", "to", "match", "the", "strings", "of", "the", "environmental", "labels", "to", "the", "other", "atomic", "components", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L542-L604
train
251,259
monarch-initiative/dipper
dipper/sources/FlyBase.py
FlyBase._process_stock_genotype
def _process_stock_genotype(self, limit): """ The genotypes of the stocks. :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph raw = '/'.join((self.rawdir, 'stock_genotype')) LOG.info("processing stock genotype") line_counter = 0 with open(raw, 'r') as f: filereader = csv.reader(f, delimiter='\t', quotechar='\"') f.readline() # read the header row; skip for line in filereader: (stock_genotype_id, stock_id, genotype_id) = line stock_key = stock_id stock_id = self.idhash['stock'][stock_key] genotype_key = genotype_id genotype_id = self.idhash['genotype'][genotype_key] if self.test_mode \ and int(genotype_key) not in self.test_keys['genotype']: continue graph.addTriple(stock_id, self.globaltt['has_genotype'], genotype_id) line_counter += 1 if not self.test_mode and limit is not None and line_counter > limit: break return
python
def _process_stock_genotype(self, limit): """ The genotypes of the stocks. :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph raw = '/'.join((self.rawdir, 'stock_genotype')) LOG.info("processing stock genotype") line_counter = 0 with open(raw, 'r') as f: filereader = csv.reader(f, delimiter='\t', quotechar='\"') f.readline() # read the header row; skip for line in filereader: (stock_genotype_id, stock_id, genotype_id) = line stock_key = stock_id stock_id = self.idhash['stock'][stock_key] genotype_key = genotype_id genotype_id = self.idhash['genotype'][genotype_key] if self.test_mode \ and int(genotype_key) not in self.test_keys['genotype']: continue graph.addTriple(stock_id, self.globaltt['has_genotype'], genotype_id) line_counter += 1 if not self.test_mode and limit is not None and line_counter > limit: break return
[ "def", "_process_stock_genotype", "(", "self", ",", "limit", ")", ":", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "'stock_genotype'", ")", ")", "LOG", ".", "info", "(", "\"processing stock genotype\"", ")", "line_counter", "=", "0", "with", "open", "(", "raw", ",", "'r'", ")", "as", "f", ":", "filereader", "=", "csv", ".", "reader", "(", "f", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "f", ".", "readline", "(", ")", "# read the header row; skip", "for", "line", "in", "filereader", ":", "(", "stock_genotype_id", ",", "stock_id", ",", "genotype_id", ")", "=", "line", "stock_key", "=", "stock_id", "stock_id", "=", "self", ".", "idhash", "[", "'stock'", "]", "[", "stock_key", "]", "genotype_key", "=", "genotype_id", "genotype_id", "=", "self", ".", "idhash", "[", "'genotype'", "]", "[", "genotype_key", "]", "if", "self", ".", "test_mode", "and", "int", "(", "genotype_key", ")", "not", "in", "self", ".", "test_keys", "[", "'genotype'", "]", ":", "continue", "graph", ".", "addTriple", "(", "stock_id", ",", "self", ".", "globaltt", "[", "'has_genotype'", "]", ",", "genotype_id", ")", "line_counter", "+=", "1", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "return" ]
The genotypes of the stocks. :param limit: :return:
[ "The", "genotypes", "of", "the", "stocks", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L926-L965
train
251,260
monarch-initiative/dipper
dipper/sources/FlyBase.py
FlyBase._process_dbxref
def _process_dbxref(self): """ We bring in the dbxref identifiers and store them in a hashmap for lookup in other functions. Note that some dbxrefs aren't mapped to identifiers. For example, 5004018 is mapped to a string, "endosome & imaginal disc epithelial cell | somatic clone..." In those cases, there just isn't a dbxref that's used when referencing with a cvterm; it'll just use the internal key. :return: """ raw = '/'.join((self.rawdir, 'dbxref')) LOG.info("processing dbxrefs") line_counter = 0 with open(raw, 'r') as f: filereader = csv.reader(f, delimiter='\t', quotechar='\"') f.readline() # read the header row; skip for line in filereader: (dbxref_id, db_id, accession, version, description, url) = line # dbxref_id db_id accession version description url # 1 2 SO:0000000 "" accession = accession.strip() db_id = db_id.strip() if accession != '' and db_id in self.localtt: # scrub some identifiers here mch = re.match( r'(doi|SO|GO|FBcv|FBbt_root|FBdv|FBgn|FBdv_root|FlyBase|FBbt):', accession) if mch: accession = re.sub(mch.group(1)+r'\:', '', accession) elif re.match( r'(FlyBase miscellaneous CV|cell_lineprop|relationship type|FBgn$)', accession): continue elif re.match(r'\:', accession): # starts with a colon accession = re.sub(r'\:', '', accession) elif re.search(r'\s', accession): # skip anything with a space # LOG.debug( # 'dbxref %s accession has a space: %s', dbxref_id, accession) continue if re.match(r'http', accession): did = accession else: prefix = self.localtt[db_id] did = ':'.join((prefix, accession)) if re.search(r'\:', accession) and prefix != 'DOI': LOG.warning('id %s may be malformed; skipping', did) self.dbxrefs[dbxref_id] = {db_id: did} elif url != '': self.dbxrefs[dbxref_id] = {db_id: url.strip()} else: continue # the following are some special cases that we scrub if int(db_id) == 2 and accession.strip() == 'transgenic_transposon': # transgenic_transposable_element self.dbxrefs[dbxref_id] = { db_id: self.globaltt['transgenic_transposable_element']} line_counter += 1 return
python
def _process_dbxref(self): """ We bring in the dbxref identifiers and store them in a hashmap for lookup in other functions. Note that some dbxrefs aren't mapped to identifiers. For example, 5004018 is mapped to a string, "endosome & imaginal disc epithelial cell | somatic clone..." In those cases, there just isn't a dbxref that's used when referencing with a cvterm; it'll just use the internal key. :return: """ raw = '/'.join((self.rawdir, 'dbxref')) LOG.info("processing dbxrefs") line_counter = 0 with open(raw, 'r') as f: filereader = csv.reader(f, delimiter='\t', quotechar='\"') f.readline() # read the header row; skip for line in filereader: (dbxref_id, db_id, accession, version, description, url) = line # dbxref_id db_id accession version description url # 1 2 SO:0000000 "" accession = accession.strip() db_id = db_id.strip() if accession != '' and db_id in self.localtt: # scrub some identifiers here mch = re.match( r'(doi|SO|GO|FBcv|FBbt_root|FBdv|FBgn|FBdv_root|FlyBase|FBbt):', accession) if mch: accession = re.sub(mch.group(1)+r'\:', '', accession) elif re.match( r'(FlyBase miscellaneous CV|cell_lineprop|relationship type|FBgn$)', accession): continue elif re.match(r'\:', accession): # starts with a colon accession = re.sub(r'\:', '', accession) elif re.search(r'\s', accession): # skip anything with a space # LOG.debug( # 'dbxref %s accession has a space: %s', dbxref_id, accession) continue if re.match(r'http', accession): did = accession else: prefix = self.localtt[db_id] did = ':'.join((prefix, accession)) if re.search(r'\:', accession) and prefix != 'DOI': LOG.warning('id %s may be malformed; skipping', did) self.dbxrefs[dbxref_id] = {db_id: did} elif url != '': self.dbxrefs[dbxref_id] = {db_id: url.strip()} else: continue # the following are some special cases that we scrub if int(db_id) == 2 and accession.strip() == 'transgenic_transposon': # transgenic_transposable_element self.dbxrefs[dbxref_id] = { db_id: self.globaltt['transgenic_transposable_element']} line_counter += 1 return
[ "def", "_process_dbxref", "(", "self", ")", ":", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "'dbxref'", ")", ")", "LOG", ".", "info", "(", "\"processing dbxrefs\"", ")", "line_counter", "=", "0", "with", "open", "(", "raw", ",", "'r'", ")", "as", "f", ":", "filereader", "=", "csv", ".", "reader", "(", "f", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "f", ".", "readline", "(", ")", "# read the header row; skip", "for", "line", "in", "filereader", ":", "(", "dbxref_id", ",", "db_id", ",", "accession", ",", "version", ",", "description", ",", "url", ")", "=", "line", "# dbxref_id\tdb_id\taccession\tversion\tdescription\turl", "# 1\t2\tSO:0000000\t\"\"", "accession", "=", "accession", ".", "strip", "(", ")", "db_id", "=", "db_id", ".", "strip", "(", ")", "if", "accession", "!=", "''", "and", "db_id", "in", "self", ".", "localtt", ":", "# scrub some identifiers here", "mch", "=", "re", ".", "match", "(", "r'(doi|SO|GO|FBcv|FBbt_root|FBdv|FBgn|FBdv_root|FlyBase|FBbt):'", ",", "accession", ")", "if", "mch", ":", "accession", "=", "re", ".", "sub", "(", "mch", ".", "group", "(", "1", ")", "+", "r'\\:'", ",", "''", ",", "accession", ")", "elif", "re", ".", "match", "(", "r'(FlyBase miscellaneous CV|cell_lineprop|relationship type|FBgn$)'", ",", "accession", ")", ":", "continue", "elif", "re", ".", "match", "(", "r'\\:'", ",", "accession", ")", ":", "# starts with a colon", "accession", "=", "re", ".", "sub", "(", "r'\\:'", ",", "''", ",", "accession", ")", "elif", "re", ".", "search", "(", "r'\\s'", ",", "accession", ")", ":", "# skip anything with a space", "# LOG.debug(", "# 'dbxref %s accession has a space: %s', dbxref_id, accession)", "continue", "if", "re", ".", "match", "(", "r'http'", ",", "accession", ")", ":", "did", "=", "accession", "else", ":", "prefix", "=", "self", ".", "localtt", "[", "db_id", "]", "did", "=", "':'", ".", "join", "(", "(", "prefix", ",", "accession", ")", ")", "if", "re", ".", "search", "(", "r'\\:'", ",", "accession", ")", "and", "prefix", "!=", "'DOI'", ":", "LOG", ".", "warning", "(", "'id %s may be malformed; skipping'", ",", "did", ")", "self", ".", "dbxrefs", "[", "dbxref_id", "]", "=", "{", "db_id", ":", "did", "}", "elif", "url", "!=", "''", ":", "self", ".", "dbxrefs", "[", "dbxref_id", "]", "=", "{", "db_id", ":", "url", ".", "strip", "(", ")", "}", "else", ":", "continue", "# the following are some special cases that we scrub", "if", "int", "(", "db_id", ")", "==", "2", "and", "accession", ".", "strip", "(", ")", "==", "'transgenic_transposon'", ":", "# transgenic_transposable_element", "self", ".", "dbxrefs", "[", "dbxref_id", "]", "=", "{", "db_id", ":", "self", ".", "globaltt", "[", "'transgenic_transposable_element'", "]", "}", "line_counter", "+=", "1", "return" ]
We bring in the dbxref identifiers and store them in a hashmap for lookup in other functions. Note that some dbxrefs aren't mapped to identifiers. For example, 5004018 is mapped to a string, "endosome & imaginal disc epithelial cell | somatic clone..." In those cases, there just isn't a dbxref that's used when referencing with a cvterm; it'll just use the internal key. :return:
[ "We", "bring", "in", "the", "dbxref", "identifiers", "and", "store", "them", "in", "a", "hashmap", "for", "lookup", "in", "other", "functions", ".", "Note", "that", "some", "dbxrefs", "aren", "t", "mapped", "to", "identifiers", ".", "For", "example", "5004018", "is", "mapped", "to", "a", "string", "endosome", "&", "imaginal", "disc", "epithelial", "cell", "|", "somatic", "clone", "...", "In", "those", "cases", "there", "just", "isn", "t", "a", "dbxref", "that", "s", "used", "when", "referencing", "with", "a", "cvterm", ";", "it", "ll", "just", "use", "the", "internal", "key", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L1033-L1103
train
251,261
monarch-initiative/dipper
dipper/sources/FlyBase.py
FlyBase._process_phenotype
def _process_phenotype(self, limit): """ Get the phenotypes, and declare the classes. If the "observable" is "unspecified", then we assign the phenotype to the "cvalue" id; otherwise we convert the phenotype into a uberpheno-style identifier, simply based on the anatomical part that's affected...that is listed as the observable_id, concatenated with the literal "PHENOTYPE" Note that some of the phenotypes no not have a dbxref to a FBcv; for these cases it will make a node with an anonymous node with an internal id like, "_fbcvtermkey100920PHENOTYPE". This is awkward, but not sure how else to construct identifiers. Maybe they should be fed back into Upheno and then leveraged by FB? Note that assay_id is the same for all current items, so we do nothing with this. :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) raw = '/'.join((self.rawdir, 'phenotype')) LOG.info("processing phenotype") line_counter = 0 with open(raw, 'r') as f: filereader = csv.reader(f, delimiter='\t', quotechar='\"') f.readline() # read the header row; skip for line in filereader: (phenotype_id, uniquename, observable_id, attr_id, value, cvalue_id, assay_id) = line # 8505 unspecified # 20142 mesothoracic leg disc | somatic clone 87719 60468 60468 60468 # 8507 sex comb | ectopic 88877 60468 60468 60468 # 8508 tarsal segment 83664 60468 60468 60468 # 18404 oocyte | oogenesis stage S9 86769 60468 60468 60468 # for now make these as phenotypic classes # will need to dbxref at some point phenotype_key = phenotype_id phenotype_id = None phenotype_internal_id = self._makeInternalIdentifier( 'phenotype', phenotype_key) phenotype_label = None self.label_hash[phenotype_internal_id] = uniquename cvterm_id = None if observable_id != '' and int(observable_id) == 60468: # undefined - typically these are already phenotypes if cvalue_id in self.idhash['cvterm']: cvterm_id = self.idhash['cvterm'][cvalue_id] phenotype_id = self.idhash['cvterm'][cvalue_id] elif observable_id in self.idhash['cvterm']: # observations to anatomical classes cvterm_id = self.idhash['cvterm'][observable_id] phenotype_id = self.idhash['cvterm'][observable_id] + 'PHENOTYPE' if cvterm_id is not None and cvterm_id in self.label_hash: phenotype_label = self.label_hash[cvterm_id] phenotype_label += ' phenotype' self.label_hash[phenotype_id] = phenotype_label else: LOG.info('cvtermid=%s not in label_hash', cvterm_id) else: LOG.info( "No observable id or label for %s: %s", phenotype_key, uniquename) # TODO store this composite phenotype in some way # as a proper class definition? self.idhash['phenotype'][phenotype_key] = phenotype_id # assay_id is currently only "undefined" key=60468 if not self.test_mode and\ limit is not None and line_counter > limit: pass else: if phenotype_id is not None: # assume that these fit into the phenotypic uberpheno # elsewhere model.addClassToGraph(phenotype_id, phenotype_label) line_counter += 1 return
python
def _process_phenotype(self, limit): """ Get the phenotypes, and declare the classes. If the "observable" is "unspecified", then we assign the phenotype to the "cvalue" id; otherwise we convert the phenotype into a uberpheno-style identifier, simply based on the anatomical part that's affected...that is listed as the observable_id, concatenated with the literal "PHENOTYPE" Note that some of the phenotypes no not have a dbxref to a FBcv; for these cases it will make a node with an anonymous node with an internal id like, "_fbcvtermkey100920PHENOTYPE". This is awkward, but not sure how else to construct identifiers. Maybe they should be fed back into Upheno and then leveraged by FB? Note that assay_id is the same for all current items, so we do nothing with this. :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) raw = '/'.join((self.rawdir, 'phenotype')) LOG.info("processing phenotype") line_counter = 0 with open(raw, 'r') as f: filereader = csv.reader(f, delimiter='\t', quotechar='\"') f.readline() # read the header row; skip for line in filereader: (phenotype_id, uniquename, observable_id, attr_id, value, cvalue_id, assay_id) = line # 8505 unspecified # 20142 mesothoracic leg disc | somatic clone 87719 60468 60468 60468 # 8507 sex comb | ectopic 88877 60468 60468 60468 # 8508 tarsal segment 83664 60468 60468 60468 # 18404 oocyte | oogenesis stage S9 86769 60468 60468 60468 # for now make these as phenotypic classes # will need to dbxref at some point phenotype_key = phenotype_id phenotype_id = None phenotype_internal_id = self._makeInternalIdentifier( 'phenotype', phenotype_key) phenotype_label = None self.label_hash[phenotype_internal_id] = uniquename cvterm_id = None if observable_id != '' and int(observable_id) == 60468: # undefined - typically these are already phenotypes if cvalue_id in self.idhash['cvterm']: cvterm_id = self.idhash['cvterm'][cvalue_id] phenotype_id = self.idhash['cvterm'][cvalue_id] elif observable_id in self.idhash['cvterm']: # observations to anatomical classes cvterm_id = self.idhash['cvterm'][observable_id] phenotype_id = self.idhash['cvterm'][observable_id] + 'PHENOTYPE' if cvterm_id is not None and cvterm_id in self.label_hash: phenotype_label = self.label_hash[cvterm_id] phenotype_label += ' phenotype' self.label_hash[phenotype_id] = phenotype_label else: LOG.info('cvtermid=%s not in label_hash', cvterm_id) else: LOG.info( "No observable id or label for %s: %s", phenotype_key, uniquename) # TODO store this composite phenotype in some way # as a proper class definition? self.idhash['phenotype'][phenotype_key] = phenotype_id # assay_id is currently only "undefined" key=60468 if not self.test_mode and\ limit is not None and line_counter > limit: pass else: if phenotype_id is not None: # assume that these fit into the phenotypic uberpheno # elsewhere model.addClassToGraph(phenotype_id, phenotype_label) line_counter += 1 return
[ "def", "_process_phenotype", "(", "self", ",", "limit", ")", ":", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "'phenotype'", ")", ")", "LOG", ".", "info", "(", "\"processing phenotype\"", ")", "line_counter", "=", "0", "with", "open", "(", "raw", ",", "'r'", ")", "as", "f", ":", "filereader", "=", "csv", ".", "reader", "(", "f", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "f", ".", "readline", "(", ")", "# read the header row; skip", "for", "line", "in", "filereader", ":", "(", "phenotype_id", ",", "uniquename", ",", "observable_id", ",", "attr_id", ",", "value", ",", "cvalue_id", ",", "assay_id", ")", "=", "line", "# 8505\tunspecified", "# 20142\tmesothoracic leg disc | somatic clone 87719 60468 60468 60468", "# 8507\tsex comb | ectopic 88877 60468 60468 60468", "# 8508\ttarsal segment\t83664 60468 60468 60468", "# 18404\toocyte | oogenesis stage S9\t86769 60468 60468 60468", "# for now make these as phenotypic classes", "# will need to dbxref at some point", "phenotype_key", "=", "phenotype_id", "phenotype_id", "=", "None", "phenotype_internal_id", "=", "self", ".", "_makeInternalIdentifier", "(", "'phenotype'", ",", "phenotype_key", ")", "phenotype_label", "=", "None", "self", ".", "label_hash", "[", "phenotype_internal_id", "]", "=", "uniquename", "cvterm_id", "=", "None", "if", "observable_id", "!=", "''", "and", "int", "(", "observable_id", ")", "==", "60468", ":", "# undefined - typically these are already phenotypes", "if", "cvalue_id", "in", "self", ".", "idhash", "[", "'cvterm'", "]", ":", "cvterm_id", "=", "self", ".", "idhash", "[", "'cvterm'", "]", "[", "cvalue_id", "]", "phenotype_id", "=", "self", ".", "idhash", "[", "'cvterm'", "]", "[", "cvalue_id", "]", "elif", "observable_id", "in", "self", ".", "idhash", "[", "'cvterm'", "]", ":", "# observations to anatomical classes", "cvterm_id", "=", "self", ".", "idhash", "[", "'cvterm'", "]", "[", "observable_id", "]", "phenotype_id", "=", "self", ".", "idhash", "[", "'cvterm'", "]", "[", "observable_id", "]", "+", "'PHENOTYPE'", "if", "cvterm_id", "is", "not", "None", "and", "cvterm_id", "in", "self", ".", "label_hash", ":", "phenotype_label", "=", "self", ".", "label_hash", "[", "cvterm_id", "]", "phenotype_label", "+=", "' phenotype'", "self", ".", "label_hash", "[", "phenotype_id", "]", "=", "phenotype_label", "else", ":", "LOG", ".", "info", "(", "'cvtermid=%s not in label_hash'", ",", "cvterm_id", ")", "else", ":", "LOG", ".", "info", "(", "\"No observable id or label for %s: %s\"", ",", "phenotype_key", ",", "uniquename", ")", "# TODO store this composite phenotype in some way", "# as a proper class definition?", "self", ".", "idhash", "[", "'phenotype'", "]", "[", "phenotype_key", "]", "=", "phenotype_id", "# assay_id is currently only \"undefined\" key=60468", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "pass", "else", ":", "if", "phenotype_id", "is", "not", "None", ":", "# assume that these fit into the phenotypic uberpheno", "# elsewhere", "model", ".", "addClassToGraph", "(", "phenotype_id", ",", "phenotype_label", ")", "line_counter", "+=", "1", "return" ]
Get the phenotypes, and declare the classes. If the "observable" is "unspecified", then we assign the phenotype to the "cvalue" id; otherwise we convert the phenotype into a uberpheno-style identifier, simply based on the anatomical part that's affected...that is listed as the observable_id, concatenated with the literal "PHENOTYPE" Note that some of the phenotypes no not have a dbxref to a FBcv; for these cases it will make a node with an anonymous node with an internal id like, "_fbcvtermkey100920PHENOTYPE". This is awkward, but not sure how else to construct identifiers. Maybe they should be fed back into Upheno and then leveraged by FB? Note that assay_id is the same for all current items, so we do nothing with this. :param limit: :return:
[ "Get", "the", "phenotypes", "and", "declare", "the", "classes", ".", "If", "the", "observable", "is", "unspecified", "then", "we", "assign", "the", "phenotype", "to", "the", "cvalue", "id", ";", "otherwise", "we", "convert", "the", "phenotype", "into", "a", "uberpheno", "-", "style", "identifier", "simply", "based", "on", "the", "anatomical", "part", "that", "s", "affected", "...", "that", "is", "listed", "as", "the", "observable_id", "concatenated", "with", "the", "literal", "PHENOTYPE" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L1105-L1195
train
251,262
monarch-initiative/dipper
dipper/sources/FlyBase.py
FlyBase._process_cvterm
def _process_cvterm(self): """ CVterms are the internal identifiers for any controlled vocab or ontology term. Many are xrefd to actual ontologies. The actual external id is stored in the dbxref table, which we place into the internal hashmap for lookup with the cvterm id. The name of the external term is stored in the "name" element of this table, and we add that to the label hashmap for lookup elsewhere :return: """ line_counter = 0 raw = '/'.join((self.rawdir, 'cvterm')) LOG.info("processing cvterms") with open(raw, 'r') as f: f.readline() # read the header row; skip filereader = csv.reader(f, delimiter='\t', quotechar='\"') for line in filereader: line_counter += 1 (cvterm_id, cv_id, definition, dbxref_id, is_obsolete, is_relationshiptype, name) = line # 316 6 1665919 0 0 rRNA_cleavage_snoRNA_primary_transcript # 28 5 1663309 0 0 synonym # 455 6 1665920 0 0 tmRNA # not sure the following is necessary # cv_prefixes = { # 6 : 'SO', # 20: 'FBcv', # 28: 'GO', # 29: 'GO', # 30: 'GO', # 31: 'FBcv', # not actually FBcv - I think FBbt. # 32: 'FBdv', # 37: 'GO', # these are relationships # 73: 'DOID' # } # if int(cv_id) not in cv_prefixes: # continue cvterm_key = cvterm_id cvterm_id = self._makeInternalIdentifier('cvterm', cvterm_key) self.label_hash[cvterm_id] = name self.idhash['cvterm'][cvterm_key] = cvterm_id # look up the dbxref_id for the cvterm # hopefully it's one-to-one dbxrefs = self.dbxrefs.get(dbxref_id) if dbxrefs is not None: if len(dbxrefs) > 1: LOG.info( ">1 dbxref for this cvterm (%s: %s): %s", str(cvterm_id), name, dbxrefs.values()) elif len(dbxrefs) == 1: # replace the cvterm with # the dbxref (external) identifier did = dbxrefs.popitem()[1] # get the value self.idhash['cvterm'][cvterm_key] = did # also add the label to the dbxref self.label_hash[did] = name return
python
def _process_cvterm(self): """ CVterms are the internal identifiers for any controlled vocab or ontology term. Many are xrefd to actual ontologies. The actual external id is stored in the dbxref table, which we place into the internal hashmap for lookup with the cvterm id. The name of the external term is stored in the "name" element of this table, and we add that to the label hashmap for lookup elsewhere :return: """ line_counter = 0 raw = '/'.join((self.rawdir, 'cvterm')) LOG.info("processing cvterms") with open(raw, 'r') as f: f.readline() # read the header row; skip filereader = csv.reader(f, delimiter='\t', quotechar='\"') for line in filereader: line_counter += 1 (cvterm_id, cv_id, definition, dbxref_id, is_obsolete, is_relationshiptype, name) = line # 316 6 1665919 0 0 rRNA_cleavage_snoRNA_primary_transcript # 28 5 1663309 0 0 synonym # 455 6 1665920 0 0 tmRNA # not sure the following is necessary # cv_prefixes = { # 6 : 'SO', # 20: 'FBcv', # 28: 'GO', # 29: 'GO', # 30: 'GO', # 31: 'FBcv', # not actually FBcv - I think FBbt. # 32: 'FBdv', # 37: 'GO', # these are relationships # 73: 'DOID' # } # if int(cv_id) not in cv_prefixes: # continue cvterm_key = cvterm_id cvterm_id = self._makeInternalIdentifier('cvterm', cvterm_key) self.label_hash[cvterm_id] = name self.idhash['cvterm'][cvterm_key] = cvterm_id # look up the dbxref_id for the cvterm # hopefully it's one-to-one dbxrefs = self.dbxrefs.get(dbxref_id) if dbxrefs is not None: if len(dbxrefs) > 1: LOG.info( ">1 dbxref for this cvterm (%s: %s): %s", str(cvterm_id), name, dbxrefs.values()) elif len(dbxrefs) == 1: # replace the cvterm with # the dbxref (external) identifier did = dbxrefs.popitem()[1] # get the value self.idhash['cvterm'][cvterm_key] = did # also add the label to the dbxref self.label_hash[did] = name return
[ "def", "_process_cvterm", "(", "self", ")", ":", "line_counter", "=", "0", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "'cvterm'", ")", ")", "LOG", ".", "info", "(", "\"processing cvterms\"", ")", "with", "open", "(", "raw", ",", "'r'", ")", "as", "f", ":", "f", ".", "readline", "(", ")", "# read the header row; skip", "filereader", "=", "csv", ".", "reader", "(", "f", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "line", "in", "filereader", ":", "line_counter", "+=", "1", "(", "cvterm_id", ",", "cv_id", ",", "definition", ",", "dbxref_id", ",", "is_obsolete", ",", "is_relationshiptype", ",", "name", ")", "=", "line", "# 316 6 1665919 0 0 rRNA_cleavage_snoRNA_primary_transcript", "# 28 5 1663309 0 0 synonym", "# 455 6 1665920 0 0 tmRNA", "# not sure the following is necessary", "# cv_prefixes = {", "# 6 : 'SO',", "# 20: 'FBcv',", "# 28: 'GO',", "# 29: 'GO',", "# 30: 'GO',", "# 31: 'FBcv', # not actually FBcv - I think FBbt.", "# 32: 'FBdv',", "# 37: 'GO', # these are relationships", "# 73: 'DOID'", "# }", "# if int(cv_id) not in cv_prefixes:", "# continue", "cvterm_key", "=", "cvterm_id", "cvterm_id", "=", "self", ".", "_makeInternalIdentifier", "(", "'cvterm'", ",", "cvterm_key", ")", "self", ".", "label_hash", "[", "cvterm_id", "]", "=", "name", "self", ".", "idhash", "[", "'cvterm'", "]", "[", "cvterm_key", "]", "=", "cvterm_id", "# look up the dbxref_id for the cvterm", "# hopefully it's one-to-one", "dbxrefs", "=", "self", ".", "dbxrefs", ".", "get", "(", "dbxref_id", ")", "if", "dbxrefs", "is", "not", "None", ":", "if", "len", "(", "dbxrefs", ")", ">", "1", ":", "LOG", ".", "info", "(", "\">1 dbxref for this cvterm (%s: %s): %s\"", ",", "str", "(", "cvterm_id", ")", ",", "name", ",", "dbxrefs", ".", "values", "(", ")", ")", "elif", "len", "(", "dbxrefs", ")", "==", "1", ":", "# replace the cvterm with", "# the dbxref (external) identifier", "did", "=", "dbxrefs", ".", "popitem", "(", ")", "[", "1", "]", "# get the value", "self", ".", "idhash", "[", "'cvterm'", "]", "[", "cvterm_key", "]", "=", "did", "# also add the label to the dbxref", "self", ".", "label_hash", "[", "did", "]", "=", "name", "return" ]
CVterms are the internal identifiers for any controlled vocab or ontology term. Many are xrefd to actual ontologies. The actual external id is stored in the dbxref table, which we place into the internal hashmap for lookup with the cvterm id. The name of the external term is stored in the "name" element of this table, and we add that to the label hashmap for lookup elsewhere :return:
[ "CVterms", "are", "the", "internal", "identifiers", "for", "any", "controlled", "vocab", "or", "ontology", "term", ".", "Many", "are", "xrefd", "to", "actual", "ontologies", ".", "The", "actual", "external", "id", "is", "stored", "in", "the", "dbxref", "table", "which", "we", "place", "into", "the", "internal", "hashmap", "for", "lookup", "with", "the", "cvterm", "id", ".", "The", "name", "of", "the", "external", "term", "is", "stored", "in", "the", "name", "element", "of", "this", "table", "and", "we", "add", "that", "to", "the", "label", "hashmap", "for", "lookup", "elsewhere" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L1324-L1389
train
251,263
monarch-initiative/dipper
dipper/sources/FlyBase.py
FlyBase._process_organisms
def _process_organisms(self, limit): """ The internal identifiers for the organisms in flybase :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) raw = '/'.join((self.rawdir, 'organism')) LOG.info("processing organisms") line_counter = 0 with open(raw, 'r') as f: filereader = csv.reader(f, delimiter='\t', quotechar='\"') f.readline() # read the header row; skip for line in filereader: (organism_id, abbreviation, genus, species, common_name, comment) = line # 1 Dmel Drosophila melanogaster fruit fly # 2 Comp Computational result line_counter += 1 tax_internal_id = self._makeInternalIdentifier('organism', organism_id) tax_label = ' '.join((genus, species)) tax_id = tax_internal_id self.idhash['organism'][organism_id] = tax_id self.label_hash[tax_id] = tax_label # we won't actually add the organism to the graph, # unless we actually use it therefore it is added outside of # this function if self.test_mode and int(organism_id) not in self.test_keys['organism']: continue if not self.test_mode and limit is not None and line_counter > limit: pass else: model.addClassToGraph(tax_id) for s in [common_name, abbreviation]: if s is not None and s.strip() != '': model.addSynonym(tax_id, s) model.addComment(tax_id, tax_internal_id) return
python
def _process_organisms(self, limit): """ The internal identifiers for the organisms in flybase :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) raw = '/'.join((self.rawdir, 'organism')) LOG.info("processing organisms") line_counter = 0 with open(raw, 'r') as f: filereader = csv.reader(f, delimiter='\t', quotechar='\"') f.readline() # read the header row; skip for line in filereader: (organism_id, abbreviation, genus, species, common_name, comment) = line # 1 Dmel Drosophila melanogaster fruit fly # 2 Comp Computational result line_counter += 1 tax_internal_id = self._makeInternalIdentifier('organism', organism_id) tax_label = ' '.join((genus, species)) tax_id = tax_internal_id self.idhash['organism'][organism_id] = tax_id self.label_hash[tax_id] = tax_label # we won't actually add the organism to the graph, # unless we actually use it therefore it is added outside of # this function if self.test_mode and int(organism_id) not in self.test_keys['organism']: continue if not self.test_mode and limit is not None and line_counter > limit: pass else: model.addClassToGraph(tax_id) for s in [common_name, abbreviation]: if s is not None and s.strip() != '': model.addSynonym(tax_id, s) model.addComment(tax_id, tax_internal_id) return
[ "def", "_process_organisms", "(", "self", ",", "limit", ")", ":", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "'organism'", ")", ")", "LOG", ".", "info", "(", "\"processing organisms\"", ")", "line_counter", "=", "0", "with", "open", "(", "raw", ",", "'r'", ")", "as", "f", ":", "filereader", "=", "csv", ".", "reader", "(", "f", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "f", ".", "readline", "(", ")", "# read the header row; skip", "for", "line", "in", "filereader", ":", "(", "organism_id", ",", "abbreviation", ",", "genus", ",", "species", ",", "common_name", ",", "comment", ")", "=", "line", "# 1\tDmel\tDrosophila\tmelanogaster\tfruit fly", "# 2\tComp\tComputational\tresult", "line_counter", "+=", "1", "tax_internal_id", "=", "self", ".", "_makeInternalIdentifier", "(", "'organism'", ",", "organism_id", ")", "tax_label", "=", "' '", ".", "join", "(", "(", "genus", ",", "species", ")", ")", "tax_id", "=", "tax_internal_id", "self", ".", "idhash", "[", "'organism'", "]", "[", "organism_id", "]", "=", "tax_id", "self", ".", "label_hash", "[", "tax_id", "]", "=", "tax_label", "# we won't actually add the organism to the graph,", "# unless we actually use it therefore it is added outside of", "# this function", "if", "self", ".", "test_mode", "and", "int", "(", "organism_id", ")", "not", "in", "self", ".", "test_keys", "[", "'organism'", "]", ":", "continue", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "pass", "else", ":", "model", ".", "addClassToGraph", "(", "tax_id", ")", "for", "s", "in", "[", "common_name", ",", "abbreviation", "]", ":", "if", "s", "is", "not", "None", "and", "s", ".", "strip", "(", ")", "!=", "''", ":", "model", ".", "addSynonym", "(", "tax_id", ",", "s", ")", "model", ".", "addComment", "(", "tax_id", ",", "tax_internal_id", ")", "return" ]
The internal identifiers for the organisms in flybase :param limit: :return:
[ "The", "internal", "identifiers", "for", "the", "organisms", "in", "flybase" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L1792-L1844
train
251,264
monarch-initiative/dipper
dipper/sources/NCBIGene.py
NCBIGene._add_gene_equivalencies
def _add_gene_equivalencies(self, xrefs, gene_id, taxon): """ Add equivalentClass and sameAs relationships Uses external resource map located in /resources/clique_leader.yaml to determine if an NCBITaxon ID space is a clique leader """ clique_map = self.open_and_parse_yaml(self.resources['clique_leader']) if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) filter_out = ['Vega', 'IMGT/GENE-DB', 'Araport'] # deal with the dbxrefs # MIM:614444|HGNC:HGNC:16851|Ensembl:ENSG00000136828|HPRD:11479|Vega:OTTHUMG00000020696 for dbxref in xrefs.strip().split('|'): prefix = ':'.join(dbxref.split(':')[:-1]).strip() if prefix in self.localtt: prefix = self.localtt[prefix] dbxref_curie = ':'.join((prefix, dbxref.split(':')[-1])) if dbxref_curie is not None and prefix != '': if prefix == 'HPRD': # proteins are not == genes. model.addTriple( gene_id, self.globaltt['has gene product'], dbxref_curie) continue # skip some of these for now based on curie prefix if prefix in filter_out: continue if prefix == 'ENSEMBL': model.addXref(gene_id, dbxref_curie) if prefix == 'OMIM': if DipperUtil.is_omim_disease(dbxref_curie): continue try: if self.class_or_indiv.get(gene_id) == 'C': model.addEquivalentClass(gene_id, dbxref_curie) if taxon in clique_map: if clique_map[taxon] == prefix: model.makeLeader(dbxref_curie) elif clique_map[taxon] == gene_id.split(':')[0]: model.makeLeader(gene_id) else: model.addSameIndividual(gene_id, dbxref_curie) except AssertionError as err: LOG.warning("Error parsing %s: %s", gene_id, err) return
python
def _add_gene_equivalencies(self, xrefs, gene_id, taxon): """ Add equivalentClass and sameAs relationships Uses external resource map located in /resources/clique_leader.yaml to determine if an NCBITaxon ID space is a clique leader """ clique_map = self.open_and_parse_yaml(self.resources['clique_leader']) if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) filter_out = ['Vega', 'IMGT/GENE-DB', 'Araport'] # deal with the dbxrefs # MIM:614444|HGNC:HGNC:16851|Ensembl:ENSG00000136828|HPRD:11479|Vega:OTTHUMG00000020696 for dbxref in xrefs.strip().split('|'): prefix = ':'.join(dbxref.split(':')[:-1]).strip() if prefix in self.localtt: prefix = self.localtt[prefix] dbxref_curie = ':'.join((prefix, dbxref.split(':')[-1])) if dbxref_curie is not None and prefix != '': if prefix == 'HPRD': # proteins are not == genes. model.addTriple( gene_id, self.globaltt['has gene product'], dbxref_curie) continue # skip some of these for now based on curie prefix if prefix in filter_out: continue if prefix == 'ENSEMBL': model.addXref(gene_id, dbxref_curie) if prefix == 'OMIM': if DipperUtil.is_omim_disease(dbxref_curie): continue try: if self.class_or_indiv.get(gene_id) == 'C': model.addEquivalentClass(gene_id, dbxref_curie) if taxon in clique_map: if clique_map[taxon] == prefix: model.makeLeader(dbxref_curie) elif clique_map[taxon] == gene_id.split(':')[0]: model.makeLeader(gene_id) else: model.addSameIndividual(gene_id, dbxref_curie) except AssertionError as err: LOG.warning("Error parsing %s: %s", gene_id, err) return
[ "def", "_add_gene_equivalencies", "(", "self", ",", "xrefs", ",", "gene_id", ",", "taxon", ")", ":", "clique_map", "=", "self", ".", "open_and_parse_yaml", "(", "self", ".", "resources", "[", "'clique_leader'", "]", ")", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "filter_out", "=", "[", "'Vega'", ",", "'IMGT/GENE-DB'", ",", "'Araport'", "]", "# deal with the dbxrefs", "# MIM:614444|HGNC:HGNC:16851|Ensembl:ENSG00000136828|HPRD:11479|Vega:OTTHUMG00000020696", "for", "dbxref", "in", "xrefs", ".", "strip", "(", ")", ".", "split", "(", "'|'", ")", ":", "prefix", "=", "':'", ".", "join", "(", "dbxref", ".", "split", "(", "':'", ")", "[", ":", "-", "1", "]", ")", ".", "strip", "(", ")", "if", "prefix", "in", "self", ".", "localtt", ":", "prefix", "=", "self", ".", "localtt", "[", "prefix", "]", "dbxref_curie", "=", "':'", ".", "join", "(", "(", "prefix", ",", "dbxref", ".", "split", "(", "':'", ")", "[", "-", "1", "]", ")", ")", "if", "dbxref_curie", "is", "not", "None", "and", "prefix", "!=", "''", ":", "if", "prefix", "==", "'HPRD'", ":", "# proteins are not == genes.", "model", ".", "addTriple", "(", "gene_id", ",", "self", ".", "globaltt", "[", "'has gene product'", "]", ",", "dbxref_curie", ")", "continue", "# skip some of these for now based on curie prefix", "if", "prefix", "in", "filter_out", ":", "continue", "if", "prefix", "==", "'ENSEMBL'", ":", "model", ".", "addXref", "(", "gene_id", ",", "dbxref_curie", ")", "if", "prefix", "==", "'OMIM'", ":", "if", "DipperUtil", ".", "is_omim_disease", "(", "dbxref_curie", ")", ":", "continue", "try", ":", "if", "self", ".", "class_or_indiv", ".", "get", "(", "gene_id", ")", "==", "'C'", ":", "model", ".", "addEquivalentClass", "(", "gene_id", ",", "dbxref_curie", ")", "if", "taxon", "in", "clique_map", ":", "if", "clique_map", "[", "taxon", "]", "==", "prefix", ":", "model", ".", "makeLeader", "(", "dbxref_curie", ")", "elif", "clique_map", "[", "taxon", "]", "==", "gene_id", ".", "split", "(", "':'", ")", "[", "0", "]", ":", "model", ".", "makeLeader", "(", "gene_id", ")", "else", ":", "model", ".", "addSameIndividual", "(", "gene_id", ",", "dbxref_curie", ")", "except", "AssertionError", "as", "err", ":", "LOG", ".", "warning", "(", "\"Error parsing %s: %s\"", ",", "gene_id", ",", "err", ")", "return" ]
Add equivalentClass and sameAs relationships Uses external resource map located in /resources/clique_leader.yaml to determine if an NCBITaxon ID space is a clique leader
[ "Add", "equivalentClass", "and", "sameAs", "relationships" ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/NCBIGene.py#L377-L430
train
251,265
monarch-initiative/dipper
dipper/sources/NCBIGene.py
NCBIGene._get_gene2pubmed
def _get_gene2pubmed(self, limit): """ Loops through the gene2pubmed file and adds a simple triple to say that a given publication is_about a gene. Publications are added as NamedIndividuals. These are filtered on the taxon. :param limit: :return: """ src_key = 'gene2pubmed' if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) LOG.info("Processing Gene records") line_counter = 0 myfile = '/'.join((self.rawdir, self.files[src_key]['file'])) LOG.info("FILE: %s", myfile) assoc_counter = 0 col = self.files[src_key]['columns'] with gzip.open(myfile, 'rb') as tsv: row = tsv.readline().decode().strip().split('\t') row[0] = row[0][1:] # strip comment if col != row: LOG.info( '%s\nExpected Headers:\t%s\nRecived Headers:\t %s\n', src_key, col, row) for line in tsv: line_counter += 1 # skip comments row = line.decode().strip().split('\t') if row[0][0] == '#': continue # (tax_num, gene_num, pubmed_num) = line.split('\t') # ## set id_filter=None in init if you don't want to have a filter # if self.id_filter is not None: # if ((self.id_filter == 'taxids' and \ # (int(tax_num) not in self.tax_ids)) # or (self.id_filter == 'geneids' and \ # (int(gene_num) not in self.gene_ids))): # continue # #### end filter gene_num = row[col.index('GeneID')].strip() if self.test_mode and int(gene_num) not in self.gene_ids: continue tax_num = row[col.index('tax_id')].strip() if not self.test_mode and tax_num not in self.tax_ids: continue pubmed_num = row[col.index('PubMed_ID')].strip() if gene_num == '-' or pubmed_num == '-': continue gene_id = ':'.join(('NCBIGene', gene_num)) pubmed_id = ':'.join(('PMID', pubmed_num)) if self.class_or_indiv.get(gene_id) == 'C': model.addClassToGraph(gene_id, None) else: model.addIndividualToGraph(gene_id, None) # add the publication as a NamedIndividual # add type publication model.addIndividualToGraph(pubmed_id, None, None) reference = Reference( graph, pubmed_id, self.globaltt['journal article']) reference.addRefToGraph() graph.addTriple( pubmed_id, self.globaltt['is_about'], gene_id) assoc_counter += 1 if not self.test_mode and limit is not None and line_counter > limit: break LOG.info( "Processed %d pub-gene associations", assoc_counter) return
python
def _get_gene2pubmed(self, limit): """ Loops through the gene2pubmed file and adds a simple triple to say that a given publication is_about a gene. Publications are added as NamedIndividuals. These are filtered on the taxon. :param limit: :return: """ src_key = 'gene2pubmed' if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) LOG.info("Processing Gene records") line_counter = 0 myfile = '/'.join((self.rawdir, self.files[src_key]['file'])) LOG.info("FILE: %s", myfile) assoc_counter = 0 col = self.files[src_key]['columns'] with gzip.open(myfile, 'rb') as tsv: row = tsv.readline().decode().strip().split('\t') row[0] = row[0][1:] # strip comment if col != row: LOG.info( '%s\nExpected Headers:\t%s\nRecived Headers:\t %s\n', src_key, col, row) for line in tsv: line_counter += 1 # skip comments row = line.decode().strip().split('\t') if row[0][0] == '#': continue # (tax_num, gene_num, pubmed_num) = line.split('\t') # ## set id_filter=None in init if you don't want to have a filter # if self.id_filter is not None: # if ((self.id_filter == 'taxids' and \ # (int(tax_num) not in self.tax_ids)) # or (self.id_filter == 'geneids' and \ # (int(gene_num) not in self.gene_ids))): # continue # #### end filter gene_num = row[col.index('GeneID')].strip() if self.test_mode and int(gene_num) not in self.gene_ids: continue tax_num = row[col.index('tax_id')].strip() if not self.test_mode and tax_num not in self.tax_ids: continue pubmed_num = row[col.index('PubMed_ID')].strip() if gene_num == '-' or pubmed_num == '-': continue gene_id = ':'.join(('NCBIGene', gene_num)) pubmed_id = ':'.join(('PMID', pubmed_num)) if self.class_or_indiv.get(gene_id) == 'C': model.addClassToGraph(gene_id, None) else: model.addIndividualToGraph(gene_id, None) # add the publication as a NamedIndividual # add type publication model.addIndividualToGraph(pubmed_id, None, None) reference = Reference( graph, pubmed_id, self.globaltt['journal article']) reference.addRefToGraph() graph.addTriple( pubmed_id, self.globaltt['is_about'], gene_id) assoc_counter += 1 if not self.test_mode and limit is not None and line_counter > limit: break LOG.info( "Processed %d pub-gene associations", assoc_counter) return
[ "def", "_get_gene2pubmed", "(", "self", ",", "limit", ")", ":", "src_key", "=", "'gene2pubmed'", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "LOG", ".", "info", "(", "\"Processing Gene records\"", ")", "line_counter", "=", "0", "myfile", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "src_key", "]", "[", "'file'", "]", ")", ")", "LOG", ".", "info", "(", "\"FILE: %s\"", ",", "myfile", ")", "assoc_counter", "=", "0", "col", "=", "self", ".", "files", "[", "src_key", "]", "[", "'columns'", "]", "with", "gzip", ".", "open", "(", "myfile", ",", "'rb'", ")", "as", "tsv", ":", "row", "=", "tsv", ".", "readline", "(", ")", ".", "decode", "(", ")", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "row", "[", "0", "]", "=", "row", "[", "0", "]", "[", "1", ":", "]", "# strip comment", "if", "col", "!=", "row", ":", "LOG", ".", "info", "(", "'%s\\nExpected Headers:\\t%s\\nRecived Headers:\\t %s\\n'", ",", "src_key", ",", "col", ",", "row", ")", "for", "line", "in", "tsv", ":", "line_counter", "+=", "1", "# skip comments", "row", "=", "line", ".", "decode", "(", ")", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "if", "row", "[", "0", "]", "[", "0", "]", "==", "'#'", ":", "continue", "# (tax_num, gene_num, pubmed_num) = line.split('\\t')", "# ## set id_filter=None in init if you don't want to have a filter", "# if self.id_filter is not None:", "# if ((self.id_filter == 'taxids' and \\", "# (int(tax_num) not in self.tax_ids))", "# or (self.id_filter == 'geneids' and \\", "# (int(gene_num) not in self.gene_ids))):", "# continue", "# #### end filter", "gene_num", "=", "row", "[", "col", ".", "index", "(", "'GeneID'", ")", "]", ".", "strip", "(", ")", "if", "self", ".", "test_mode", "and", "int", "(", "gene_num", ")", "not", "in", "self", ".", "gene_ids", ":", "continue", "tax_num", "=", "row", "[", "col", ".", "index", "(", "'tax_id'", ")", "]", ".", "strip", "(", ")", "if", "not", "self", ".", "test_mode", "and", "tax_num", "not", "in", "self", ".", "tax_ids", ":", "continue", "pubmed_num", "=", "row", "[", "col", ".", "index", "(", "'PubMed_ID'", ")", "]", ".", "strip", "(", ")", "if", "gene_num", "==", "'-'", "or", "pubmed_num", "==", "'-'", ":", "continue", "gene_id", "=", "':'", ".", "join", "(", "(", "'NCBIGene'", ",", "gene_num", ")", ")", "pubmed_id", "=", "':'", ".", "join", "(", "(", "'PMID'", ",", "pubmed_num", ")", ")", "if", "self", ".", "class_or_indiv", ".", "get", "(", "gene_id", ")", "==", "'C'", ":", "model", ".", "addClassToGraph", "(", "gene_id", ",", "None", ")", "else", ":", "model", ".", "addIndividualToGraph", "(", "gene_id", ",", "None", ")", "# add the publication as a NamedIndividual", "# add type publication", "model", ".", "addIndividualToGraph", "(", "pubmed_id", ",", "None", ",", "None", ")", "reference", "=", "Reference", "(", "graph", ",", "pubmed_id", ",", "self", ".", "globaltt", "[", "'journal article'", "]", ")", "reference", ".", "addRefToGraph", "(", ")", "graph", ".", "addTriple", "(", "pubmed_id", ",", "self", ".", "globaltt", "[", "'is_about'", "]", ",", "gene_id", ")", "assoc_counter", "+=", "1", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "LOG", ".", "info", "(", "\"Processed %d pub-gene associations\"", ",", "assoc_counter", ")", "return" ]
Loops through the gene2pubmed file and adds a simple triple to say that a given publication is_about a gene. Publications are added as NamedIndividuals. These are filtered on the taxon. :param limit: :return:
[ "Loops", "through", "the", "gene2pubmed", "file", "and", "adds", "a", "simple", "triple", "to", "say", "that", "a", "given", "publication", "is_about", "a", "gene", ".", "Publications", "are", "added", "as", "NamedIndividuals", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/NCBIGene.py#L516-L598
train
251,266
monarch-initiative/dipper
dipper/sources/OMIM.py
OMIM.process_entries
def process_entries( self, omimids, transform, included_fields=None, graph=None, limit=None, globaltt=None ): """ Given a list of omim ids, this will use the omim API to fetch the entries, according to the ```included_fields``` passed as a parameter. If a transformation function is supplied, this will iterate over each entry, and either add the results to the supplied ```graph``` or will return a set of processed entries that the calling function can further iterate. If no ```included_fields``` are provided, this will simply fetch the basic entry from omim, which includes an entry's: prefix, mimNumber, status, and titles. :param omimids: the set of omim entry ids to fetch using their API :param transform: Function to transform each omim entry when looping :param included_fields: A set of what fields are required to retrieve from the API :param graph: the graph to add the transformed data into :return: """ omimparams = {} # add the included_fields as parameters if included_fields is not None and included_fields: omimparams['include'] = ','.join(included_fields) processed_entries = list() # scrub any omim prefixes from the omimids before processing # cleanomimids = set() # for omimid in omimids: # scrubbed = str(omimid).split(':')[-1] # if re.match(r'^\d+$', str(scrubbed)): # cleanomimids.update(scrubbed) # omimids = list(cleanomimids) cleanomimids = [o.split(':')[-1] for o in omimids] diff = set(omimids) - set(cleanomimids) if diff: LOG.warning('OMIM has %i dirty bits see"\n %s', len(diff), str(diff)) omimids = cleanomimids else: cleanomimids = list() acc = 0 # for counting # note that you can only do request batches of 20 # see info about "Limits" at http://omim.org/help/api # TODO 2017 May seems a majority of many groups of 20 # are producing python None for RDF triple Objects groupsize = 20 if not self.test_mode and limit is not None: # just in case the limit is larger than the number of records, maxit = limit if limit > len(omimids): maxit = len(omimids) else: maxit = len(omimids) while acc < maxit: end = min((maxit, acc + groupsize)) # iterate through the omim ids list, # and fetch from the OMIM api in batches of 20 if self.test_mode: intersect = list( set([str(i) for i in self.test_ids]) & set(omimids[acc:end])) # some of the test ids are in the omimids if intersect: LOG.info("found test ids: %s", intersect) omimparams.update({'mimNumber': ','.join(intersect)}) else: acc += groupsize continue else: omimparams.update({'mimNumber': ','.join(omimids[acc:end])}) url = OMIMAPI + urllib.parse.urlencode(omimparams) try: req = urllib.request.urlopen(url) except HTTPError as e: # URLError? LOG.warning('fetching: %s', url) error_msg = e.read() if re.search(r'The API key: .* is invalid', str(error_msg)): msg = "API Key not valid" raise HTTPError(url, e.code, msg, e.hdrs, e.fp) LOG.error("Failed with: %s", str(error_msg)) break resp = req.read().decode() acc += groupsize myjson = json.loads(resp) # snag a copy with open('./raw/omim/_' + str(acc) + '.json', 'w') as fp: json.dump(myjson, fp) entries = myjson['omim']['entryList'] for e in entries: # apply the data transformation, and save it to the graph processed_entry = transform(e, graph, globaltt) if processed_entry is not None: processed_entries.append(processed_entry) # ### end iterating over batch of entries return processed_entries
python
def process_entries( self, omimids, transform, included_fields=None, graph=None, limit=None, globaltt=None ): """ Given a list of omim ids, this will use the omim API to fetch the entries, according to the ```included_fields``` passed as a parameter. If a transformation function is supplied, this will iterate over each entry, and either add the results to the supplied ```graph``` or will return a set of processed entries that the calling function can further iterate. If no ```included_fields``` are provided, this will simply fetch the basic entry from omim, which includes an entry's: prefix, mimNumber, status, and titles. :param omimids: the set of omim entry ids to fetch using their API :param transform: Function to transform each omim entry when looping :param included_fields: A set of what fields are required to retrieve from the API :param graph: the graph to add the transformed data into :return: """ omimparams = {} # add the included_fields as parameters if included_fields is not None and included_fields: omimparams['include'] = ','.join(included_fields) processed_entries = list() # scrub any omim prefixes from the omimids before processing # cleanomimids = set() # for omimid in omimids: # scrubbed = str(omimid).split(':')[-1] # if re.match(r'^\d+$', str(scrubbed)): # cleanomimids.update(scrubbed) # omimids = list(cleanomimids) cleanomimids = [o.split(':')[-1] for o in omimids] diff = set(omimids) - set(cleanomimids) if diff: LOG.warning('OMIM has %i dirty bits see"\n %s', len(diff), str(diff)) omimids = cleanomimids else: cleanomimids = list() acc = 0 # for counting # note that you can only do request batches of 20 # see info about "Limits" at http://omim.org/help/api # TODO 2017 May seems a majority of many groups of 20 # are producing python None for RDF triple Objects groupsize = 20 if not self.test_mode and limit is not None: # just in case the limit is larger than the number of records, maxit = limit if limit > len(omimids): maxit = len(omimids) else: maxit = len(omimids) while acc < maxit: end = min((maxit, acc + groupsize)) # iterate through the omim ids list, # and fetch from the OMIM api in batches of 20 if self.test_mode: intersect = list( set([str(i) for i in self.test_ids]) & set(omimids[acc:end])) # some of the test ids are in the omimids if intersect: LOG.info("found test ids: %s", intersect) omimparams.update({'mimNumber': ','.join(intersect)}) else: acc += groupsize continue else: omimparams.update({'mimNumber': ','.join(omimids[acc:end])}) url = OMIMAPI + urllib.parse.urlencode(omimparams) try: req = urllib.request.urlopen(url) except HTTPError as e: # URLError? LOG.warning('fetching: %s', url) error_msg = e.read() if re.search(r'The API key: .* is invalid', str(error_msg)): msg = "API Key not valid" raise HTTPError(url, e.code, msg, e.hdrs, e.fp) LOG.error("Failed with: %s", str(error_msg)) break resp = req.read().decode() acc += groupsize myjson = json.loads(resp) # snag a copy with open('./raw/omim/_' + str(acc) + '.json', 'w') as fp: json.dump(myjson, fp) entries = myjson['omim']['entryList'] for e in entries: # apply the data transformation, and save it to the graph processed_entry = transform(e, graph, globaltt) if processed_entry is not None: processed_entries.append(processed_entry) # ### end iterating over batch of entries return processed_entries
[ "def", "process_entries", "(", "self", ",", "omimids", ",", "transform", ",", "included_fields", "=", "None", ",", "graph", "=", "None", ",", "limit", "=", "None", ",", "globaltt", "=", "None", ")", ":", "omimparams", "=", "{", "}", "# add the included_fields as parameters", "if", "included_fields", "is", "not", "None", "and", "included_fields", ":", "omimparams", "[", "'include'", "]", "=", "','", ".", "join", "(", "included_fields", ")", "processed_entries", "=", "list", "(", ")", "# scrub any omim prefixes from the omimids before processing", "# cleanomimids = set()", "# for omimid in omimids:", "# scrubbed = str(omimid).split(':')[-1]", "# if re.match(r'^\\d+$', str(scrubbed)):", "# cleanomimids.update(scrubbed)", "# omimids = list(cleanomimids)", "cleanomimids", "=", "[", "o", ".", "split", "(", "':'", ")", "[", "-", "1", "]", "for", "o", "in", "omimids", "]", "diff", "=", "set", "(", "omimids", ")", "-", "set", "(", "cleanomimids", ")", "if", "diff", ":", "LOG", ".", "warning", "(", "'OMIM has %i dirty bits see\"\\n %s'", ",", "len", "(", "diff", ")", ",", "str", "(", "diff", ")", ")", "omimids", "=", "cleanomimids", "else", ":", "cleanomimids", "=", "list", "(", ")", "acc", "=", "0", "# for counting", "# note that you can only do request batches of 20", "# see info about \"Limits\" at http://omim.org/help/api", "# TODO 2017 May seems a majority of many groups of 20", "# are producing python None for RDF triple Objects", "groupsize", "=", "20", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", ":", "# just in case the limit is larger than the number of records,", "maxit", "=", "limit", "if", "limit", ">", "len", "(", "omimids", ")", ":", "maxit", "=", "len", "(", "omimids", ")", "else", ":", "maxit", "=", "len", "(", "omimids", ")", "while", "acc", "<", "maxit", ":", "end", "=", "min", "(", "(", "maxit", ",", "acc", "+", "groupsize", ")", ")", "# iterate through the omim ids list,", "# and fetch from the OMIM api in batches of 20", "if", "self", ".", "test_mode", ":", "intersect", "=", "list", "(", "set", "(", "[", "str", "(", "i", ")", "for", "i", "in", "self", ".", "test_ids", "]", ")", "&", "set", "(", "omimids", "[", "acc", ":", "end", "]", ")", ")", "# some of the test ids are in the omimids", "if", "intersect", ":", "LOG", ".", "info", "(", "\"found test ids: %s\"", ",", "intersect", ")", "omimparams", ".", "update", "(", "{", "'mimNumber'", ":", "','", ".", "join", "(", "intersect", ")", "}", ")", "else", ":", "acc", "+=", "groupsize", "continue", "else", ":", "omimparams", ".", "update", "(", "{", "'mimNumber'", ":", "','", ".", "join", "(", "omimids", "[", "acc", ":", "end", "]", ")", "}", ")", "url", "=", "OMIMAPI", "+", "urllib", ".", "parse", ".", "urlencode", "(", "omimparams", ")", "try", ":", "req", "=", "urllib", ".", "request", ".", "urlopen", "(", "url", ")", "except", "HTTPError", "as", "e", ":", "# URLError?", "LOG", ".", "warning", "(", "'fetching: %s'", ",", "url", ")", "error_msg", "=", "e", ".", "read", "(", ")", "if", "re", ".", "search", "(", "r'The API key: .* is invalid'", ",", "str", "(", "error_msg", ")", ")", ":", "msg", "=", "\"API Key not valid\"", "raise", "HTTPError", "(", "url", ",", "e", ".", "code", ",", "msg", ",", "e", ".", "hdrs", ",", "e", ".", "fp", ")", "LOG", ".", "error", "(", "\"Failed with: %s\"", ",", "str", "(", "error_msg", ")", ")", "break", "resp", "=", "req", ".", "read", "(", ")", ".", "decode", "(", ")", "acc", "+=", "groupsize", "myjson", "=", "json", ".", "loads", "(", "resp", ")", "# snag a copy", "with", "open", "(", "'./raw/omim/_'", "+", "str", "(", "acc", ")", "+", "'.json'", ",", "'w'", ")", "as", "fp", ":", "json", ".", "dump", "(", "myjson", ",", "fp", ")", "entries", "=", "myjson", "[", "'omim'", "]", "[", "'entryList'", "]", "for", "e", "in", "entries", ":", "# apply the data transformation, and save it to the graph", "processed_entry", "=", "transform", "(", "e", ",", "graph", ",", "globaltt", ")", "if", "processed_entry", "is", "not", "None", ":", "processed_entries", ".", "append", "(", "processed_entry", ")", "# ### end iterating over batch of entries", "return", "processed_entries" ]
Given a list of omim ids, this will use the omim API to fetch the entries, according to the ```included_fields``` passed as a parameter. If a transformation function is supplied, this will iterate over each entry, and either add the results to the supplied ```graph``` or will return a set of processed entries that the calling function can further iterate. If no ```included_fields``` are provided, this will simply fetch the basic entry from omim, which includes an entry's: prefix, mimNumber, status, and titles. :param omimids: the set of omim entry ids to fetch using their API :param transform: Function to transform each omim entry when looping :param included_fields: A set of what fields are required to retrieve from the API :param graph: the graph to add the transformed data into :return:
[ "Given", "a", "list", "of", "omim", "ids", "this", "will", "use", "the", "omim", "API", "to", "fetch", "the", "entries", "according", "to", "the", "included_fields", "passed", "as", "a", "parameter", ".", "If", "a", "transformation", "function", "is", "supplied", "this", "will", "iterate", "over", "each", "entry", "and", "either", "add", "the", "results", "to", "the", "supplied", "graph", "or", "will", "return", "a", "set", "of", "processed", "entries", "that", "the", "calling", "function", "can", "further", "iterate", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/OMIM.py#L253-L367
train
251,267
monarch-initiative/dipper
dipper/sources/OMIM.py
OMIM._process_all
def _process_all(self, limit): """ This takes the list of omim identifiers from the omim.txt.Z file, and iteratively queries the omim api for the json-formatted data. This will create OMIM classes, with the label, definition, and some synonyms. If an entry is "removed", it is added as a deprecated class. If an entry is "moved", it is deprecated and consider annotations are added. Additionally, we extract: *phenotypicSeries ids as superclasses *equivalent ids for Orphanet and UMLS If set to testMode, it will write only those items in the test_ids to the testgraph. :param limit: :return: """ omimids = self._get_omim_ids() LOG.info('Have %i omim numbers to fetch records from their API', len(omimids)) LOG.info('Have %i omim types ', len(self.omim_type)) if self.test_mode: graph = self.testgraph else: graph = self.graph geno = Genotype(graph) model = Model(graph) tax_label = 'Homo sapiens' tax_id = self.globaltt[tax_label] # add genome and taxon geno.addGenome(tax_id, tax_label) # tax label can get added elsewhere model.addClassToGraph(tax_id, None) # label added elsewhere includes = set() includes.add('all') self.process_entries( omimids, self._transform_entry, includes, graph, limit, self.globaltt)
python
def _process_all(self, limit): """ This takes the list of omim identifiers from the omim.txt.Z file, and iteratively queries the omim api for the json-formatted data. This will create OMIM classes, with the label, definition, and some synonyms. If an entry is "removed", it is added as a deprecated class. If an entry is "moved", it is deprecated and consider annotations are added. Additionally, we extract: *phenotypicSeries ids as superclasses *equivalent ids for Orphanet and UMLS If set to testMode, it will write only those items in the test_ids to the testgraph. :param limit: :return: """ omimids = self._get_omim_ids() LOG.info('Have %i omim numbers to fetch records from their API', len(omimids)) LOG.info('Have %i omim types ', len(self.omim_type)) if self.test_mode: graph = self.testgraph else: graph = self.graph geno = Genotype(graph) model = Model(graph) tax_label = 'Homo sapiens' tax_id = self.globaltt[tax_label] # add genome and taxon geno.addGenome(tax_id, tax_label) # tax label can get added elsewhere model.addClassToGraph(tax_id, None) # label added elsewhere includes = set() includes.add('all') self.process_entries( omimids, self._transform_entry, includes, graph, limit, self.globaltt)
[ "def", "_process_all", "(", "self", ",", "limit", ")", ":", "omimids", "=", "self", ".", "_get_omim_ids", "(", ")", "LOG", ".", "info", "(", "'Have %i omim numbers to fetch records from their API'", ",", "len", "(", "omimids", ")", ")", "LOG", ".", "info", "(", "'Have %i omim types '", ",", "len", "(", "self", ".", "omim_type", ")", ")", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "geno", "=", "Genotype", "(", "graph", ")", "model", "=", "Model", "(", "graph", ")", "tax_label", "=", "'Homo sapiens'", "tax_id", "=", "self", ".", "globaltt", "[", "tax_label", "]", "# add genome and taxon", "geno", ".", "addGenome", "(", "tax_id", ",", "tax_label", ")", "# tax label can get added elsewhere", "model", ".", "addClassToGraph", "(", "tax_id", ",", "None", ")", "# label added elsewhere", "includes", "=", "set", "(", ")", "includes", ".", "add", "(", "'all'", ")", "self", ".", "process_entries", "(", "omimids", ",", "self", ".", "_transform_entry", ",", "includes", ",", "graph", ",", "limit", ",", "self", ".", "globaltt", ")" ]
This takes the list of omim identifiers from the omim.txt.Z file, and iteratively queries the omim api for the json-formatted data. This will create OMIM classes, with the label, definition, and some synonyms. If an entry is "removed", it is added as a deprecated class. If an entry is "moved", it is deprecated and consider annotations are added. Additionally, we extract: *phenotypicSeries ids as superclasses *equivalent ids for Orphanet and UMLS If set to testMode, it will write only those items in the test_ids to the testgraph. :param limit: :return:
[ "This", "takes", "the", "list", "of", "omim", "identifiers", "from", "the", "omim", ".", "txt", ".", "Z", "file", "and", "iteratively", "queries", "the", "omim", "api", "for", "the", "json", "-", "formatted", "data", ".", "This", "will", "create", "OMIM", "classes", "with", "the", "label", "definition", "and", "some", "synonyms", ".", "If", "an", "entry", "is", "removed", "it", "is", "added", "as", "a", "deprecated", "class", ".", "If", "an", "entry", "is", "moved", "it", "is", "deprecated", "and", "consider", "annotations", "are", "added", "." ]
24cc80db355bbe15776edc5c7b41e0886959ba41
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/OMIM.py#L369-L412
train
251,268
ethereum/py-trie
trie/smt.py
SparseMerkleProof.update
def update(self, key: bytes, value: bytes, node_updates: Sequence[Hash32]): """ Merge an update for another key with the one we are tracking internally. :param key: keypath of the update we are processing :param value: value of the update we are processing :param node_updates: sequence of sibling nodes (in root->leaf order) must be at least as large as the first diverging key in the keypath """ validate_is_bytes(key) validate_length(key, self._key_size) # Path diff is the logical XOR of the updated key and this account path_diff = (to_int(self.key) ^ to_int(key)) # Same key (diff of 0), update the tracked value if path_diff == 0: self._value = value # No need to update branch else: # Find the first mismatched bit between keypaths. This is # where the branch point occurs, and we should update the # sibling node in the source branch at the branch point. # NOTE: Keys are in MSB->LSB (root->leaf) order. # Node lists are in root->leaf order. # Be sure to convert between them effectively. for bit in reversed(range(self._branch_size)): if path_diff & (1 << bit) > 0: branch_point = (self._branch_size - 1) - bit break # NOTE: node_updates only has to be as long as necessary # to obtain the update. This allows an optimization # of pruning updates to the maximum possible depth # that would be required to update, which may be # significantly smaller than the tree depth. if len(node_updates) <= branch_point: raise ValidationError("Updated node list is not deep enough") # Update sibling node in the branch where our key differs from the update self._branch[branch_point] = node_updates[branch_point]
python
def update(self, key: bytes, value: bytes, node_updates: Sequence[Hash32]): """ Merge an update for another key with the one we are tracking internally. :param key: keypath of the update we are processing :param value: value of the update we are processing :param node_updates: sequence of sibling nodes (in root->leaf order) must be at least as large as the first diverging key in the keypath """ validate_is_bytes(key) validate_length(key, self._key_size) # Path diff is the logical XOR of the updated key and this account path_diff = (to_int(self.key) ^ to_int(key)) # Same key (diff of 0), update the tracked value if path_diff == 0: self._value = value # No need to update branch else: # Find the first mismatched bit between keypaths. This is # where the branch point occurs, and we should update the # sibling node in the source branch at the branch point. # NOTE: Keys are in MSB->LSB (root->leaf) order. # Node lists are in root->leaf order. # Be sure to convert between them effectively. for bit in reversed(range(self._branch_size)): if path_diff & (1 << bit) > 0: branch_point = (self._branch_size - 1) - bit break # NOTE: node_updates only has to be as long as necessary # to obtain the update. This allows an optimization # of pruning updates to the maximum possible depth # that would be required to update, which may be # significantly smaller than the tree depth. if len(node_updates) <= branch_point: raise ValidationError("Updated node list is not deep enough") # Update sibling node in the branch where our key differs from the update self._branch[branch_point] = node_updates[branch_point]
[ "def", "update", "(", "self", ",", "key", ":", "bytes", ",", "value", ":", "bytes", ",", "node_updates", ":", "Sequence", "[", "Hash32", "]", ")", ":", "validate_is_bytes", "(", "key", ")", "validate_length", "(", "key", ",", "self", ".", "_key_size", ")", "# Path diff is the logical XOR of the updated key and this account", "path_diff", "=", "(", "to_int", "(", "self", ".", "key", ")", "^", "to_int", "(", "key", ")", ")", "# Same key (diff of 0), update the tracked value", "if", "path_diff", "==", "0", ":", "self", ".", "_value", "=", "value", "# No need to update branch", "else", ":", "# Find the first mismatched bit between keypaths. This is", "# where the branch point occurs, and we should update the", "# sibling node in the source branch at the branch point.", "# NOTE: Keys are in MSB->LSB (root->leaf) order.", "# Node lists are in root->leaf order.", "# Be sure to convert between them effectively.", "for", "bit", "in", "reversed", "(", "range", "(", "self", ".", "_branch_size", ")", ")", ":", "if", "path_diff", "&", "(", "1", "<<", "bit", ")", ">", "0", ":", "branch_point", "=", "(", "self", ".", "_branch_size", "-", "1", ")", "-", "bit", "break", "# NOTE: node_updates only has to be as long as necessary", "# to obtain the update. This allows an optimization", "# of pruning updates to the maximum possible depth", "# that would be required to update, which may be", "# significantly smaller than the tree depth.", "if", "len", "(", "node_updates", ")", "<=", "branch_point", ":", "raise", "ValidationError", "(", "\"Updated node list is not deep enough\"", ")", "# Update sibling node in the branch where our key differs from the update", "self", ".", "_branch", "[", "branch_point", "]", "=", "node_updates", "[", "branch_point", "]" ]
Merge an update for another key with the one we are tracking internally. :param key: keypath of the update we are processing :param value: value of the update we are processing :param node_updates: sequence of sibling nodes (in root->leaf order) must be at least as large as the first diverging key in the keypath
[ "Merge", "an", "update", "for", "another", "key", "with", "the", "one", "we", "are", "tracking", "internally", "." ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/smt.py#L144-L186
train
251,269
ethereum/py-trie
trie/smt.py
SparseMerkleTree._get
def _get(self, key: bytes) -> Tuple[bytes, Tuple[Hash32]]: """ Returns db value and branch in root->leaf order """ validate_is_bytes(key) validate_length(key, self._key_size) branch = [] target_bit = 1 << (self.depth - 1) path = to_int(key) node_hash = self.root_hash # Append the sibling node to the branch # Iterate on the parent for _ in range(self.depth): node = self.db[node_hash] left, right = node[:32], node[32:] if path & target_bit: branch.append(left) node_hash = right else: branch.append(right) node_hash = left target_bit >>= 1 # Value is the last hash in the chain # NOTE: Didn't do exception here for testing purposes return self.db[node_hash], tuple(branch)
python
def _get(self, key: bytes) -> Tuple[bytes, Tuple[Hash32]]: """ Returns db value and branch in root->leaf order """ validate_is_bytes(key) validate_length(key, self._key_size) branch = [] target_bit = 1 << (self.depth - 1) path = to_int(key) node_hash = self.root_hash # Append the sibling node to the branch # Iterate on the parent for _ in range(self.depth): node = self.db[node_hash] left, right = node[:32], node[32:] if path & target_bit: branch.append(left) node_hash = right else: branch.append(right) node_hash = left target_bit >>= 1 # Value is the last hash in the chain # NOTE: Didn't do exception here for testing purposes return self.db[node_hash], tuple(branch)
[ "def", "_get", "(", "self", ",", "key", ":", "bytes", ")", "->", "Tuple", "[", "bytes", ",", "Tuple", "[", "Hash32", "]", "]", ":", "validate_is_bytes", "(", "key", ")", "validate_length", "(", "key", ",", "self", ".", "_key_size", ")", "branch", "=", "[", "]", "target_bit", "=", "1", "<<", "(", "self", ".", "depth", "-", "1", ")", "path", "=", "to_int", "(", "key", ")", "node_hash", "=", "self", ".", "root_hash", "# Append the sibling node to the branch", "# Iterate on the parent", "for", "_", "in", "range", "(", "self", ".", "depth", ")", ":", "node", "=", "self", ".", "db", "[", "node_hash", "]", "left", ",", "right", "=", "node", "[", ":", "32", "]", ",", "node", "[", "32", ":", "]", "if", "path", "&", "target_bit", ":", "branch", ".", "append", "(", "left", ")", "node_hash", "=", "right", "else", ":", "branch", ".", "append", "(", "right", ")", "node_hash", "=", "left", "target_bit", ">>=", "1", "# Value is the last hash in the chain", "# NOTE: Didn't do exception here for testing purposes", "return", "self", ".", "db", "[", "node_hash", "]", ",", "tuple", "(", "branch", ")" ]
Returns db value and branch in root->leaf order
[ "Returns", "db", "value", "and", "branch", "in", "root", "-", ">", "leaf", "order" ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/smt.py#L271-L297
train
251,270
ethereum/py-trie
trie/smt.py
SparseMerkleTree.set
def set(self, key: bytes, value: bytes) -> Tuple[Hash32]: """ Returns all updated hashes in root->leaf order """ validate_is_bytes(key) validate_length(key, self._key_size) validate_is_bytes(value) path = to_int(key) node = value _, branch = self._get(key) proof_update = [] # Keep track of proof updates target_bit = 1 # branch is in root->leaf order, so flip for sibling_node in reversed(branch): # Set node_hash = keccak(node) proof_update.append(node_hash) self.db[node_hash] = node # Update if (path & target_bit): node = sibling_node + node_hash else: node = node_hash + sibling_node target_bit <<= 1 # Finally, update root hash self.root_hash = keccak(node) self.db[self.root_hash] = node # updates need to be in root->leaf order, so flip back return tuple(reversed(proof_update))
python
def set(self, key: bytes, value: bytes) -> Tuple[Hash32]: """ Returns all updated hashes in root->leaf order """ validate_is_bytes(key) validate_length(key, self._key_size) validate_is_bytes(value) path = to_int(key) node = value _, branch = self._get(key) proof_update = [] # Keep track of proof updates target_bit = 1 # branch is in root->leaf order, so flip for sibling_node in reversed(branch): # Set node_hash = keccak(node) proof_update.append(node_hash) self.db[node_hash] = node # Update if (path & target_bit): node = sibling_node + node_hash else: node = node_hash + sibling_node target_bit <<= 1 # Finally, update root hash self.root_hash = keccak(node) self.db[self.root_hash] = node # updates need to be in root->leaf order, so flip back return tuple(reversed(proof_update))
[ "def", "set", "(", "self", ",", "key", ":", "bytes", ",", "value", ":", "bytes", ")", "->", "Tuple", "[", "Hash32", "]", ":", "validate_is_bytes", "(", "key", ")", "validate_length", "(", "key", ",", "self", ".", "_key_size", ")", "validate_is_bytes", "(", "value", ")", "path", "=", "to_int", "(", "key", ")", "node", "=", "value", "_", ",", "branch", "=", "self", ".", "_get", "(", "key", ")", "proof_update", "=", "[", "]", "# Keep track of proof updates", "target_bit", "=", "1", "# branch is in root->leaf order, so flip", "for", "sibling_node", "in", "reversed", "(", "branch", ")", ":", "# Set", "node_hash", "=", "keccak", "(", "node", ")", "proof_update", ".", "append", "(", "node_hash", ")", "self", ".", "db", "[", "node_hash", "]", "=", "node", "# Update", "if", "(", "path", "&", "target_bit", ")", ":", "node", "=", "sibling_node", "+", "node_hash", "else", ":", "node", "=", "node_hash", "+", "sibling_node", "target_bit", "<<=", "1", "# Finally, update root hash", "self", ".", "root_hash", "=", "keccak", "(", "node", ")", "self", ".", "db", "[", "self", ".", "root_hash", "]", "=", "node", "# updates need to be in root->leaf order, so flip back", "return", "tuple", "(", "reversed", "(", "proof_update", ")", ")" ]
Returns all updated hashes in root->leaf order
[ "Returns", "all", "updated", "hashes", "in", "root", "-", ">", "leaf", "order" ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/smt.py#L299-L333
train
251,271
ethereum/py-trie
trie/smt.py
SparseMerkleTree.delete
def delete(self, key: bytes) -> Tuple[Hash32]: """ Equals to setting the value to None Returns all updated hashes in root->leaf order """ validate_is_bytes(key) validate_length(key, self._key_size) return self.set(key, self._default)
python
def delete(self, key: bytes) -> Tuple[Hash32]: """ Equals to setting the value to None Returns all updated hashes in root->leaf order """ validate_is_bytes(key) validate_length(key, self._key_size) return self.set(key, self._default)
[ "def", "delete", "(", "self", ",", "key", ":", "bytes", ")", "->", "Tuple", "[", "Hash32", "]", ":", "validate_is_bytes", "(", "key", ")", "validate_length", "(", "key", ",", "self", ".", "_key_size", ")", "return", "self", ".", "set", "(", "key", ",", "self", ".", "_default", ")" ]
Equals to setting the value to None Returns all updated hashes in root->leaf order
[ "Equals", "to", "setting", "the", "value", "to", "None", "Returns", "all", "updated", "hashes", "in", "root", "-", ">", "leaf", "order" ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/smt.py#L345-L353
train
251,272
ethereum/py-trie
trie/sync.py
HexaryTrieSync.next_batch
def next_batch(self, n=1): """Return the next requests that should be dispatched.""" if len(self.queue) == 0: return [] batch = list(reversed((self.queue[-n:]))) self.queue = self.queue[:-n] return batch
python
def next_batch(self, n=1): """Return the next requests that should be dispatched.""" if len(self.queue) == 0: return [] batch = list(reversed((self.queue[-n:]))) self.queue = self.queue[:-n] return batch
[ "def", "next_batch", "(", "self", ",", "n", "=", "1", ")", ":", "if", "len", "(", "self", ".", "queue", ")", "==", "0", ":", "return", "[", "]", "batch", "=", "list", "(", "reversed", "(", "(", "self", ".", "queue", "[", "-", "n", ":", "]", ")", ")", ")", "self", ".", "queue", "=", "self", ".", "queue", "[", ":", "-", "n", "]", "return", "batch" ]
Return the next requests that should be dispatched.
[ "Return", "the", "next", "requests", "that", "should", "be", "dispatched", "." ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/sync.py#L120-L126
train
251,273
ethereum/py-trie
trie/sync.py
HexaryTrieSync.schedule
def schedule(self, node_key, parent, depth, leaf_callback, is_raw=False): """Schedule a request for the node with the given key.""" if node_key in self._existing_nodes: self.logger.debug("Node %s already exists in db" % encode_hex(node_key)) return if node_key in self.db: self._existing_nodes.add(node_key) self.logger.debug("Node %s already exists in db" % encode_hex(node_key)) return if parent is not None: parent.dependencies += 1 existing = self.requests.get(node_key) if existing is not None: self.logger.debug( "Already requesting %s, will just update parents list" % node_key) existing.parents.append(parent) return request = SyncRequest(node_key, parent, depth, leaf_callback, is_raw) # Requests get added to both self.queue and self.requests; the former is used to keep # track which requests should be sent next, and the latter is used to avoid scheduling a # request for a given node multiple times. self.logger.debug("Scheduling retrieval of %s" % encode_hex(request.node_key)) self.requests[request.node_key] = request bisect.insort(self.queue, request)
python
def schedule(self, node_key, parent, depth, leaf_callback, is_raw=False): """Schedule a request for the node with the given key.""" if node_key in self._existing_nodes: self.logger.debug("Node %s already exists in db" % encode_hex(node_key)) return if node_key in self.db: self._existing_nodes.add(node_key) self.logger.debug("Node %s already exists in db" % encode_hex(node_key)) return if parent is not None: parent.dependencies += 1 existing = self.requests.get(node_key) if existing is not None: self.logger.debug( "Already requesting %s, will just update parents list" % node_key) existing.parents.append(parent) return request = SyncRequest(node_key, parent, depth, leaf_callback, is_raw) # Requests get added to both self.queue and self.requests; the former is used to keep # track which requests should be sent next, and the latter is used to avoid scheduling a # request for a given node multiple times. self.logger.debug("Scheduling retrieval of %s" % encode_hex(request.node_key)) self.requests[request.node_key] = request bisect.insort(self.queue, request)
[ "def", "schedule", "(", "self", ",", "node_key", ",", "parent", ",", "depth", ",", "leaf_callback", ",", "is_raw", "=", "False", ")", ":", "if", "node_key", "in", "self", ".", "_existing_nodes", ":", "self", ".", "logger", ".", "debug", "(", "\"Node %s already exists in db\"", "%", "encode_hex", "(", "node_key", ")", ")", "return", "if", "node_key", "in", "self", ".", "db", ":", "self", ".", "_existing_nodes", ".", "add", "(", "node_key", ")", "self", ".", "logger", ".", "debug", "(", "\"Node %s already exists in db\"", "%", "encode_hex", "(", "node_key", ")", ")", "return", "if", "parent", "is", "not", "None", ":", "parent", ".", "dependencies", "+=", "1", "existing", "=", "self", ".", "requests", ".", "get", "(", "node_key", ")", "if", "existing", "is", "not", "None", ":", "self", ".", "logger", ".", "debug", "(", "\"Already requesting %s, will just update parents list\"", "%", "node_key", ")", "existing", ".", "parents", ".", "append", "(", "parent", ")", "return", "request", "=", "SyncRequest", "(", "node_key", ",", "parent", ",", "depth", ",", "leaf_callback", ",", "is_raw", ")", "# Requests get added to both self.queue and self.requests; the former is used to keep", "# track which requests should be sent next, and the latter is used to avoid scheduling a", "# request for a given node multiple times.", "self", ".", "logger", ".", "debug", "(", "\"Scheduling retrieval of %s\"", "%", "encode_hex", "(", "request", ".", "node_key", ")", ")", "self", ".", "requests", "[", "request", ".", "node_key", "]", "=", "request", "bisect", ".", "insort", "(", "self", ".", "queue", ",", "request", ")" ]
Schedule a request for the node with the given key.
[ "Schedule", "a", "request", "for", "the", "node", "with", "the", "given", "key", "." ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/sync.py#L128-L155
train
251,274
ethereum/py-trie
trie/sync.py
HexaryTrieSync.get_children
def get_children(self, request): """Return all children of the node retrieved by the given request. :rtype: A two-tuple with one list containing the children that reference other nodes and another containing the leaf children. """ node = decode_node(request.data) return _get_children(node, request.depth)
python
def get_children(self, request): """Return all children of the node retrieved by the given request. :rtype: A two-tuple with one list containing the children that reference other nodes and another containing the leaf children. """ node = decode_node(request.data) return _get_children(node, request.depth)
[ "def", "get_children", "(", "self", ",", "request", ")", ":", "node", "=", "decode_node", "(", "request", ".", "data", ")", "return", "_get_children", "(", "node", ",", "request", ".", "depth", ")" ]
Return all children of the node retrieved by the given request. :rtype: A two-tuple with one list containing the children that reference other nodes and another containing the leaf children.
[ "Return", "all", "children", "of", "the", "node", "retrieved", "by", "the", "given", "request", "." ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/sync.py#L157-L164
train
251,275
ethereum/py-trie
trie/sync.py
HexaryTrieSync.process
def process(self, results): """Process request results. :param results: A list of two-tuples containing the node's key and data. """ for node_key, data in results: request = self.requests.get(node_key) if request is None: # This may happen if we resend a request for a node after waiting too long, # and then eventually get two responses with it. self.logger.info( "No SyncRequest found for %s, maybe we got more than one response for it" % encode_hex(node_key)) return if request.data is not None: raise SyncRequestAlreadyProcessed("%s has been processed already" % request) request.data = data if request.is_raw: self.commit(request) continue references, leaves = self.get_children(request) for depth, ref in references: self.schedule(ref, request, depth, request.leaf_callback) if request.leaf_callback is not None: for leaf in leaves: request.leaf_callback(leaf, request) if request.dependencies == 0: self.commit(request)
python
def process(self, results): """Process request results. :param results: A list of two-tuples containing the node's key and data. """ for node_key, data in results: request = self.requests.get(node_key) if request is None: # This may happen if we resend a request for a node after waiting too long, # and then eventually get two responses with it. self.logger.info( "No SyncRequest found for %s, maybe we got more than one response for it" % encode_hex(node_key)) return if request.data is not None: raise SyncRequestAlreadyProcessed("%s has been processed already" % request) request.data = data if request.is_raw: self.commit(request) continue references, leaves = self.get_children(request) for depth, ref in references: self.schedule(ref, request, depth, request.leaf_callback) if request.leaf_callback is not None: for leaf in leaves: request.leaf_callback(leaf, request) if request.dependencies == 0: self.commit(request)
[ "def", "process", "(", "self", ",", "results", ")", ":", "for", "node_key", ",", "data", "in", "results", ":", "request", "=", "self", ".", "requests", ".", "get", "(", "node_key", ")", "if", "request", "is", "None", ":", "# This may happen if we resend a request for a node after waiting too long,", "# and then eventually get two responses with it.", "self", ".", "logger", ".", "info", "(", "\"No SyncRequest found for %s, maybe we got more than one response for it\"", "%", "encode_hex", "(", "node_key", ")", ")", "return", "if", "request", ".", "data", "is", "not", "None", ":", "raise", "SyncRequestAlreadyProcessed", "(", "\"%s has been processed already\"", "%", "request", ")", "request", ".", "data", "=", "data", "if", "request", ".", "is_raw", ":", "self", ".", "commit", "(", "request", ")", "continue", "references", ",", "leaves", "=", "self", ".", "get_children", "(", "request", ")", "for", "depth", ",", "ref", "in", "references", ":", "self", ".", "schedule", "(", "ref", ",", "request", ",", "depth", ",", "request", ".", "leaf_callback", ")", "if", "request", ".", "leaf_callback", "is", "not", "None", ":", "for", "leaf", "in", "leaves", ":", "request", ".", "leaf_callback", "(", "leaf", ",", "request", ")", "if", "request", ".", "dependencies", "==", "0", ":", "self", ".", "commit", "(", "request", ")" ]
Process request results. :param results: A list of two-tuples containing the node's key and data.
[ "Process", "request", "results", "." ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/sync.py#L166-L199
train
251,276
ethereum/py-trie
trie/branches.py
check_if_branch_exist
def check_if_branch_exist(db, root_hash, key_prefix): """ Given a key prefix, return whether this prefix is the prefix of an existing key in the trie. """ validate_is_bytes(key_prefix) return _check_if_branch_exist(db, root_hash, encode_to_bin(key_prefix))
python
def check_if_branch_exist(db, root_hash, key_prefix): """ Given a key prefix, return whether this prefix is the prefix of an existing key in the trie. """ validate_is_bytes(key_prefix) return _check_if_branch_exist(db, root_hash, encode_to_bin(key_prefix))
[ "def", "check_if_branch_exist", "(", "db", ",", "root_hash", ",", "key_prefix", ")", ":", "validate_is_bytes", "(", "key_prefix", ")", "return", "_check_if_branch_exist", "(", "db", ",", "root_hash", ",", "encode_to_bin", "(", "key_prefix", ")", ")" ]
Given a key prefix, return whether this prefix is the prefix of an existing key in the trie.
[ "Given", "a", "key", "prefix", "return", "whether", "this", "prefix", "is", "the", "prefix", "of", "an", "existing", "key", "in", "the", "trie", "." ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/branches.py#L30-L37
train
251,277
ethereum/py-trie
trie/branches.py
get_branch
def get_branch(db, root_hash, key): """ Get a long-format Merkle branch """ validate_is_bytes(key) return tuple(_get_branch(db, root_hash, encode_to_bin(key)))
python
def get_branch(db, root_hash, key): """ Get a long-format Merkle branch """ validate_is_bytes(key) return tuple(_get_branch(db, root_hash, encode_to_bin(key)))
[ "def", "get_branch", "(", "db", ",", "root_hash", ",", "key", ")", ":", "validate_is_bytes", "(", "key", ")", "return", "tuple", "(", "_get_branch", "(", "db", ",", "root_hash", ",", "encode_to_bin", "(", "key", ")", ")", ")" ]
Get a long-format Merkle branch
[ "Get", "a", "long", "-", "format", "Merkle", "branch" ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/branches.py#L71-L77
train
251,278
ethereum/py-trie
trie/branches.py
get_witness_for_key_prefix
def get_witness_for_key_prefix(db, node_hash, key): """ Get all witness given a keypath prefix. Include 1. witness along the keypath and 2. witness in the subtrie of the last node in keypath """ validate_is_bytes(key) return tuple(_get_witness_for_key_prefix(db, node_hash, encode_to_bin(key)))
python
def get_witness_for_key_prefix(db, node_hash, key): """ Get all witness given a keypath prefix. Include 1. witness along the keypath and 2. witness in the subtrie of the last node in keypath """ validate_is_bytes(key) return tuple(_get_witness_for_key_prefix(db, node_hash, encode_to_bin(key)))
[ "def", "get_witness_for_key_prefix", "(", "db", ",", "node_hash", ",", "key", ")", ":", "validate_is_bytes", "(", "key", ")", "return", "tuple", "(", "_get_witness_for_key_prefix", "(", "db", ",", "node_hash", ",", "encode_to_bin", "(", "key", ")", ")", ")" ]
Get all witness given a keypath prefix. Include 1. witness along the keypath and 2. witness in the subtrie of the last node in keypath
[ "Get", "all", "witness", "given", "a", "keypath", "prefix", ".", "Include" ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/branches.py#L155-L165
train
251,279
ethereum/py-trie
trie/utils/nodes.py
encode_branch_node
def encode_branch_node(left_child_node_hash, right_child_node_hash): """ Serializes a branch node """ validate_is_bytes(left_child_node_hash) validate_length(left_child_node_hash, 32) validate_is_bytes(right_child_node_hash) validate_length(right_child_node_hash, 32) return BRANCH_TYPE_PREFIX + left_child_node_hash + right_child_node_hash
python
def encode_branch_node(left_child_node_hash, right_child_node_hash): """ Serializes a branch node """ validate_is_bytes(left_child_node_hash) validate_length(left_child_node_hash, 32) validate_is_bytes(right_child_node_hash) validate_length(right_child_node_hash, 32) return BRANCH_TYPE_PREFIX + left_child_node_hash + right_child_node_hash
[ "def", "encode_branch_node", "(", "left_child_node_hash", ",", "right_child_node_hash", ")", ":", "validate_is_bytes", "(", "left_child_node_hash", ")", "validate_length", "(", "left_child_node_hash", ",", "32", ")", "validate_is_bytes", "(", "right_child_node_hash", ")", "validate_length", "(", "right_child_node_hash", ",", "32", ")", "return", "BRANCH_TYPE_PREFIX", "+", "left_child_node_hash", "+", "right_child_node_hash" ]
Serializes a branch node
[ "Serializes", "a", "branch", "node" ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/nodes.py#L157-L165
train
251,280
ethereum/py-trie
trie/utils/nodes.py
encode_leaf_node
def encode_leaf_node(value): """ Serializes a leaf node """ validate_is_bytes(value) if value is None or value == b'': raise ValidationError("Value of leaf node can not be empty") return LEAF_TYPE_PREFIX + value
python
def encode_leaf_node(value): """ Serializes a leaf node """ validate_is_bytes(value) if value is None or value == b'': raise ValidationError("Value of leaf node can not be empty") return LEAF_TYPE_PREFIX + value
[ "def", "encode_leaf_node", "(", "value", ")", ":", "validate_is_bytes", "(", "value", ")", "if", "value", "is", "None", "or", "value", "==", "b''", ":", "raise", "ValidationError", "(", "\"Value of leaf node can not be empty\"", ")", "return", "LEAF_TYPE_PREFIX", "+", "value" ]
Serializes a leaf node
[ "Serializes", "a", "leaf", "node" ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/nodes.py#L168-L175
train
251,281
ethereum/py-trie
trie/utils/db.py
ScratchDB.batch_commit
def batch_commit(self, *, do_deletes=False): ''' Batch and commit and end of context ''' try: yield except Exception as exc: raise exc else: for key, value in self.cache.items(): if value is not DELETED: self.wrapped_db[key] = value elif do_deletes: self.wrapped_db.pop(key, None) # if do_deletes is False, ignore deletes to underlying db finally: self.cache = {}
python
def batch_commit(self, *, do_deletes=False): ''' Batch and commit and end of context ''' try: yield except Exception as exc: raise exc else: for key, value in self.cache.items(): if value is not DELETED: self.wrapped_db[key] = value elif do_deletes: self.wrapped_db.pop(key, None) # if do_deletes is False, ignore deletes to underlying db finally: self.cache = {}
[ "def", "batch_commit", "(", "self", ",", "*", ",", "do_deletes", "=", "False", ")", ":", "try", ":", "yield", "except", "Exception", "as", "exc", ":", "raise", "exc", "else", ":", "for", "key", ",", "value", "in", "self", ".", "cache", ".", "items", "(", ")", ":", "if", "value", "is", "not", "DELETED", ":", "self", ".", "wrapped_db", "[", "key", "]", "=", "value", "elif", "do_deletes", ":", "self", ".", "wrapped_db", ".", "pop", "(", "key", ",", "None", ")", "# if do_deletes is False, ignore deletes to underlying db", "finally", ":", "self", ".", "cache", "=", "{", "}" ]
Batch and commit and end of context
[ "Batch", "and", "commit", "and", "end", "of", "context" ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/db.py#L48-L64
train
251,282
ethereum/py-trie
trie/hexary.py
HexaryTrie._prune_node
def _prune_node(self, node): """ Prune the given node if context exits cleanly. """ if self.is_pruning: # node is mutable, so capture the key for later pruning now prune_key, node_body = self._node_to_db_mapping(node) should_prune = (node_body is not None) else: should_prune = False yield # Prune only if no exception is raised if should_prune: del self.db[prune_key]
python
def _prune_node(self, node): """ Prune the given node if context exits cleanly. """ if self.is_pruning: # node is mutable, so capture the key for later pruning now prune_key, node_body = self._node_to_db_mapping(node) should_prune = (node_body is not None) else: should_prune = False yield # Prune only if no exception is raised if should_prune: del self.db[prune_key]
[ "def", "_prune_node", "(", "self", ",", "node", ")", ":", "if", "self", ".", "is_pruning", ":", "# node is mutable, so capture the key for later pruning now", "prune_key", ",", "node_body", "=", "self", ".", "_node_to_db_mapping", "(", "node", ")", "should_prune", "=", "(", "node_body", "is", "not", "None", ")", "else", ":", "should_prune", "=", "False", "yield", "# Prune only if no exception is raised", "if", "should_prune", ":", "del", "self", ".", "db", "[", "prune_key", "]" ]
Prune the given node if context exits cleanly.
[ "Prune", "the", "given", "node", "if", "context", "exits", "cleanly", "." ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/hexary.py#L231-L246
train
251,283
ethereum/py-trie
trie/hexary.py
HexaryTrie._normalize_branch_node
def _normalize_branch_node(self, node): """ A branch node which is left with only a single non-blank item should be turned into either a leaf or extension node. """ iter_node = iter(node) if any(iter_node) and any(iter_node): return node if node[16]: return [compute_leaf_key([]), node[16]] sub_node_idx, sub_node_hash = next( (idx, v) for idx, v in enumerate(node[:16]) if v ) sub_node = self.get_node(sub_node_hash) sub_node_type = get_node_type(sub_node) if sub_node_type in {NODE_TYPE_LEAF, NODE_TYPE_EXTENSION}: with self._prune_node(sub_node): new_subnode_key = encode_nibbles(tuple(itertools.chain( [sub_node_idx], decode_nibbles(sub_node[0]), ))) return [new_subnode_key, sub_node[1]] elif sub_node_type == NODE_TYPE_BRANCH: subnode_hash = self._persist_node(sub_node) return [encode_nibbles([sub_node_idx]), subnode_hash] else: raise Exception("Invariant: this code block should be unreachable")
python
def _normalize_branch_node(self, node): """ A branch node which is left with only a single non-blank item should be turned into either a leaf or extension node. """ iter_node = iter(node) if any(iter_node) and any(iter_node): return node if node[16]: return [compute_leaf_key([]), node[16]] sub_node_idx, sub_node_hash = next( (idx, v) for idx, v in enumerate(node[:16]) if v ) sub_node = self.get_node(sub_node_hash) sub_node_type = get_node_type(sub_node) if sub_node_type in {NODE_TYPE_LEAF, NODE_TYPE_EXTENSION}: with self._prune_node(sub_node): new_subnode_key = encode_nibbles(tuple(itertools.chain( [sub_node_idx], decode_nibbles(sub_node[0]), ))) return [new_subnode_key, sub_node[1]] elif sub_node_type == NODE_TYPE_BRANCH: subnode_hash = self._persist_node(sub_node) return [encode_nibbles([sub_node_idx]), subnode_hash] else: raise Exception("Invariant: this code block should be unreachable")
[ "def", "_normalize_branch_node", "(", "self", ",", "node", ")", ":", "iter_node", "=", "iter", "(", "node", ")", "if", "any", "(", "iter_node", ")", "and", "any", "(", "iter_node", ")", ":", "return", "node", "if", "node", "[", "16", "]", ":", "return", "[", "compute_leaf_key", "(", "[", "]", ")", ",", "node", "[", "16", "]", "]", "sub_node_idx", ",", "sub_node_hash", "=", "next", "(", "(", "idx", ",", "v", ")", "for", "idx", ",", "v", "in", "enumerate", "(", "node", "[", ":", "16", "]", ")", "if", "v", ")", "sub_node", "=", "self", ".", "get_node", "(", "sub_node_hash", ")", "sub_node_type", "=", "get_node_type", "(", "sub_node", ")", "if", "sub_node_type", "in", "{", "NODE_TYPE_LEAF", ",", "NODE_TYPE_EXTENSION", "}", ":", "with", "self", ".", "_prune_node", "(", "sub_node", ")", ":", "new_subnode_key", "=", "encode_nibbles", "(", "tuple", "(", "itertools", ".", "chain", "(", "[", "sub_node_idx", "]", ",", "decode_nibbles", "(", "sub_node", "[", "0", "]", ")", ",", ")", ")", ")", "return", "[", "new_subnode_key", ",", "sub_node", "[", "1", "]", "]", "elif", "sub_node_type", "==", "NODE_TYPE_BRANCH", ":", "subnode_hash", "=", "self", ".", "_persist_node", "(", "sub_node", ")", "return", "[", "encode_nibbles", "(", "[", "sub_node_idx", "]", ")", ",", "subnode_hash", "]", "else", ":", "raise", "Exception", "(", "\"Invariant: this code block should be unreachable\"", ")" ]
A branch node which is left with only a single non-blank item should be turned into either a leaf or extension node.
[ "A", "branch", "node", "which", "is", "left", "with", "only", "a", "single", "non", "-", "blank", "item", "should", "be", "turned", "into", "either", "a", "leaf", "or", "extension", "node", "." ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/hexary.py#L324-L356
train
251,284
ethereum/py-trie
trie/hexary.py
HexaryTrie._delete_branch_node
def _delete_branch_node(self, node, trie_key): """ Delete a key from inside or underneath a branch node """ if not trie_key: node[-1] = BLANK_NODE return self._normalize_branch_node(node) node_to_delete = self.get_node(node[trie_key[0]]) sub_node = self._delete(node_to_delete, trie_key[1:]) encoded_sub_node = self._persist_node(sub_node) if encoded_sub_node == node[trie_key[0]]: return node node[trie_key[0]] = encoded_sub_node if encoded_sub_node == BLANK_NODE: return self._normalize_branch_node(node) return node
python
def _delete_branch_node(self, node, trie_key): """ Delete a key from inside or underneath a branch node """ if not trie_key: node[-1] = BLANK_NODE return self._normalize_branch_node(node) node_to_delete = self.get_node(node[trie_key[0]]) sub_node = self._delete(node_to_delete, trie_key[1:]) encoded_sub_node = self._persist_node(sub_node) if encoded_sub_node == node[trie_key[0]]: return node node[trie_key[0]] = encoded_sub_node if encoded_sub_node == BLANK_NODE: return self._normalize_branch_node(node) return node
[ "def", "_delete_branch_node", "(", "self", ",", "node", ",", "trie_key", ")", ":", "if", "not", "trie_key", ":", "node", "[", "-", "1", "]", "=", "BLANK_NODE", "return", "self", ".", "_normalize_branch_node", "(", "node", ")", "node_to_delete", "=", "self", ".", "get_node", "(", "node", "[", "trie_key", "[", "0", "]", "]", ")", "sub_node", "=", "self", ".", "_delete", "(", "node_to_delete", ",", "trie_key", "[", "1", ":", "]", ")", "encoded_sub_node", "=", "self", ".", "_persist_node", "(", "sub_node", ")", "if", "encoded_sub_node", "==", "node", "[", "trie_key", "[", "0", "]", "]", ":", "return", "node", "node", "[", "trie_key", "[", "0", "]", "]", "=", "encoded_sub_node", "if", "encoded_sub_node", "==", "BLANK_NODE", ":", "return", "self", ".", "_normalize_branch_node", "(", "node", ")", "return", "node" ]
Delete a key from inside or underneath a branch node
[ "Delete", "a", "key", "from", "inside", "or", "underneath", "a", "branch", "node" ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/hexary.py#L361-L381
train
251,285
ethereum/py-trie
trie/binary.py
BinaryTrie.get
def get(self, key): """ Fetches the value with a given keypath from the given node. Key will be encoded into binary array format first. """ validate_is_bytes(key) return self._get(self.root_hash, encode_to_bin(key))
python
def get(self, key): """ Fetches the value with a given keypath from the given node. Key will be encoded into binary array format first. """ validate_is_bytes(key) return self._get(self.root_hash, encode_to_bin(key))
[ "def", "get", "(", "self", ",", "key", ")", ":", "validate_is_bytes", "(", "key", ")", "return", "self", ".", "_get", "(", "self", ".", "root_hash", ",", "encode_to_bin", "(", "key", ")", ")" ]
Fetches the value with a given keypath from the given node. Key will be encoded into binary array format first.
[ "Fetches", "the", "value", "with", "a", "given", "keypath", "from", "the", "given", "node", "." ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/binary.py#L38-L46
train
251,286
ethereum/py-trie
trie/binary.py
BinaryTrie.set
def set(self, key, value): """ Sets the value at the given keypath from the given node Key will be encoded into binary array format first. """ validate_is_bytes(key) validate_is_bytes(value) self.root_hash = self._set(self.root_hash, encode_to_bin(key), value)
python
def set(self, key, value): """ Sets the value at the given keypath from the given node Key will be encoded into binary array format first. """ validate_is_bytes(key) validate_is_bytes(value) self.root_hash = self._set(self.root_hash, encode_to_bin(key), value)
[ "def", "set", "(", "self", ",", "key", ",", "value", ")", ":", "validate_is_bytes", "(", "key", ")", "validate_is_bytes", "(", "value", ")", "self", ".", "root_hash", "=", "self", ".", "_set", "(", "self", ".", "root_hash", ",", "encode_to_bin", "(", "key", ")", ",", "value", ")" ]
Sets the value at the given keypath from the given node Key will be encoded into binary array format first.
[ "Sets", "the", "value", "at", "the", "given", "keypath", "from", "the", "given", "node" ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/binary.py#L79-L88
train
251,287
ethereum/py-trie
trie/binary.py
BinaryTrie._set
def _set(self, node_hash, keypath, value, if_delete_subtrie=False): """ If if_delete_subtrie is set to True, what it will do is that it take in a keypath and traverse til the end of keypath, then delete the whole subtrie of that node. Note: keypath should be in binary array format, i.e., encoded by encode_to_bin() """ # Empty trie if node_hash == BLANK_HASH: if value: return self._hash_and_save( encode_kv_node(keypath, self._hash_and_save(encode_leaf_node(value))) ) else: return BLANK_HASH nodetype, left_child, right_child = parse_node(self.db[node_hash]) # Node is a leaf node if nodetype == LEAF_TYPE: # Keypath must match, there should be no remaining keypath if keypath: raise NodeOverrideError( "Fail to set the value because the prefix of it's key" " is the same as existing key") if if_delete_subtrie: return BLANK_HASH return self._hash_and_save(encode_leaf_node(value)) if value else BLANK_HASH # node is a key-value node elif nodetype == KV_TYPE: # Keypath too short if not keypath: if if_delete_subtrie: return BLANK_HASH else: raise NodeOverrideError( "Fail to set the value because it's key" " is the prefix of other existing key") return self._set_kv_node( keypath, node_hash, nodetype, left_child, right_child, value, if_delete_subtrie ) # node is a branch node elif nodetype == BRANCH_TYPE: # Keypath too short if not keypath: if if_delete_subtrie: return BLANK_HASH else: raise NodeOverrideError( "Fail to set the value because it's key" " is the prefix of other existing key") return self._set_branch_node( keypath, nodetype, left_child, right_child, value, if_delete_subtrie ) raise Exception("Invariant: This shouldn't ever happen")
python
def _set(self, node_hash, keypath, value, if_delete_subtrie=False): """ If if_delete_subtrie is set to True, what it will do is that it take in a keypath and traverse til the end of keypath, then delete the whole subtrie of that node. Note: keypath should be in binary array format, i.e., encoded by encode_to_bin() """ # Empty trie if node_hash == BLANK_HASH: if value: return self._hash_and_save( encode_kv_node(keypath, self._hash_and_save(encode_leaf_node(value))) ) else: return BLANK_HASH nodetype, left_child, right_child = parse_node(self.db[node_hash]) # Node is a leaf node if nodetype == LEAF_TYPE: # Keypath must match, there should be no remaining keypath if keypath: raise NodeOverrideError( "Fail to set the value because the prefix of it's key" " is the same as existing key") if if_delete_subtrie: return BLANK_HASH return self._hash_and_save(encode_leaf_node(value)) if value else BLANK_HASH # node is a key-value node elif nodetype == KV_TYPE: # Keypath too short if not keypath: if if_delete_subtrie: return BLANK_HASH else: raise NodeOverrideError( "Fail to set the value because it's key" " is the prefix of other existing key") return self._set_kv_node( keypath, node_hash, nodetype, left_child, right_child, value, if_delete_subtrie ) # node is a branch node elif nodetype == BRANCH_TYPE: # Keypath too short if not keypath: if if_delete_subtrie: return BLANK_HASH else: raise NodeOverrideError( "Fail to set the value because it's key" " is the prefix of other existing key") return self._set_branch_node( keypath, nodetype, left_child, right_child, value, if_delete_subtrie ) raise Exception("Invariant: This shouldn't ever happen")
[ "def", "_set", "(", "self", ",", "node_hash", ",", "keypath", ",", "value", ",", "if_delete_subtrie", "=", "False", ")", ":", "# Empty trie", "if", "node_hash", "==", "BLANK_HASH", ":", "if", "value", ":", "return", "self", ".", "_hash_and_save", "(", "encode_kv_node", "(", "keypath", ",", "self", ".", "_hash_and_save", "(", "encode_leaf_node", "(", "value", ")", ")", ")", ")", "else", ":", "return", "BLANK_HASH", "nodetype", ",", "left_child", ",", "right_child", "=", "parse_node", "(", "self", ".", "db", "[", "node_hash", "]", ")", "# Node is a leaf node", "if", "nodetype", "==", "LEAF_TYPE", ":", "# Keypath must match, there should be no remaining keypath", "if", "keypath", ":", "raise", "NodeOverrideError", "(", "\"Fail to set the value because the prefix of it's key\"", "\" is the same as existing key\"", ")", "if", "if_delete_subtrie", ":", "return", "BLANK_HASH", "return", "self", ".", "_hash_and_save", "(", "encode_leaf_node", "(", "value", ")", ")", "if", "value", "else", "BLANK_HASH", "# node is a key-value node", "elif", "nodetype", "==", "KV_TYPE", ":", "# Keypath too short", "if", "not", "keypath", ":", "if", "if_delete_subtrie", ":", "return", "BLANK_HASH", "else", ":", "raise", "NodeOverrideError", "(", "\"Fail to set the value because it's key\"", "\" is the prefix of other existing key\"", ")", "return", "self", ".", "_set_kv_node", "(", "keypath", ",", "node_hash", ",", "nodetype", ",", "left_child", ",", "right_child", ",", "value", ",", "if_delete_subtrie", ")", "# node is a branch node", "elif", "nodetype", "==", "BRANCH_TYPE", ":", "# Keypath too short", "if", "not", "keypath", ":", "if", "if_delete_subtrie", ":", "return", "BLANK_HASH", "else", ":", "raise", "NodeOverrideError", "(", "\"Fail to set the value because it's key\"", "\" is the prefix of other existing key\"", ")", "return", "self", ".", "_set_branch_node", "(", "keypath", ",", "nodetype", ",", "left_child", ",", "right_child", ",", "value", ",", "if_delete_subtrie", ")", "raise", "Exception", "(", "\"Invariant: This shouldn't ever happen\"", ")" ]
If if_delete_subtrie is set to True, what it will do is that it take in a keypath and traverse til the end of keypath, then delete the whole subtrie of that node. Note: keypath should be in binary array format, i.e., encoded by encode_to_bin()
[ "If", "if_delete_subtrie", "is", "set", "to", "True", "what", "it", "will", "do", "is", "that", "it", "take", "in", "a", "keypath", "and", "traverse", "til", "the", "end", "of", "keypath", "then", "delete", "the", "whole", "subtrie", "of", "that", "node", "." ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/binary.py#L90-L153
train
251,288
ethereum/py-trie
trie/binary.py
BinaryTrie.delete
def delete(self, key): """ Equals to setting the value to None """ validate_is_bytes(key) self.root_hash = self._set(self.root_hash, encode_to_bin(key), b'')
python
def delete(self, key): """ Equals to setting the value to None """ validate_is_bytes(key) self.root_hash = self._set(self.root_hash, encode_to_bin(key), b'')
[ "def", "delete", "(", "self", ",", "key", ")", ":", "validate_is_bytes", "(", "key", ")", "self", ".", "root_hash", "=", "self", ".", "_set", "(", "self", ".", "root_hash", ",", "encode_to_bin", "(", "key", ")", ",", "b''", ")" ]
Equals to setting the value to None
[ "Equals", "to", "setting", "the", "value", "to", "None" ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/binary.py#L297-L303
train
251,289
ethereum/py-trie
trie/binary.py
BinaryTrie.delete_subtrie
def delete_subtrie(self, key): """ Given a key prefix, delete the whole subtrie that starts with the key prefix. Key will be encoded into binary array format first. It will call `_set` with `if_delete_subtrie` set to True. """ validate_is_bytes(key) self.root_hash = self._set( self.root_hash, encode_to_bin(key), value=b'', if_delete_subtrie=True, )
python
def delete_subtrie(self, key): """ Given a key prefix, delete the whole subtrie that starts with the key prefix. Key will be encoded into binary array format first. It will call `_set` with `if_delete_subtrie` set to True. """ validate_is_bytes(key) self.root_hash = self._set( self.root_hash, encode_to_bin(key), value=b'', if_delete_subtrie=True, )
[ "def", "delete_subtrie", "(", "self", ",", "key", ")", ":", "validate_is_bytes", "(", "key", ")", "self", ".", "root_hash", "=", "self", ".", "_set", "(", "self", ".", "root_hash", ",", "encode_to_bin", "(", "key", ")", ",", "value", "=", "b''", ",", "if_delete_subtrie", "=", "True", ",", ")" ]
Given a key prefix, delete the whole subtrie that starts with the key prefix. Key will be encoded into binary array format first. It will call `_set` with `if_delete_subtrie` set to True.
[ "Given", "a", "key", "prefix", "delete", "the", "whole", "subtrie", "that", "starts", "with", "the", "key", "prefix", "." ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/binary.py#L305-L320
train
251,290
ethereum/py-trie
trie/binary.py
BinaryTrie._hash_and_save
def _hash_and_save(self, node): """ Saves a node into the database and returns its hash """ validate_is_bin_node(node) node_hash = keccak(node) self.db[node_hash] = node return node_hash
python
def _hash_and_save(self, node): """ Saves a node into the database and returns its hash """ validate_is_bin_node(node) node_hash = keccak(node) self.db[node_hash] = node return node_hash
[ "def", "_hash_and_save", "(", "self", ",", "node", ")", ":", "validate_is_bin_node", "(", "node", ")", "node_hash", "=", "keccak", "(", "node", ")", "self", ".", "db", "[", "node_hash", "]", "=", "node", "return", "node_hash" ]
Saves a node into the database and returns its hash
[ "Saves", "a", "node", "into", "the", "database", "and", "returns", "its", "hash" ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/binary.py#L338-L346
train
251,291
ethereum/py-trie
trie/utils/binaries.py
decode_from_bin
def decode_from_bin(input_bin): """ 0100000101010111010000110100100101001001 -> ASCII """ for chunk in partition_all(8, input_bin): yield sum( 2**exp * bit for exp, bit in enumerate(reversed(chunk)) )
python
def decode_from_bin(input_bin): """ 0100000101010111010000110100100101001001 -> ASCII """ for chunk in partition_all(8, input_bin): yield sum( 2**exp * bit for exp, bit in enumerate(reversed(chunk)) )
[ "def", "decode_from_bin", "(", "input_bin", ")", ":", "for", "chunk", "in", "partition_all", "(", "8", ",", "input_bin", ")", ":", "yield", "sum", "(", "2", "**", "exp", "*", "bit", "for", "exp", ",", "bit", "in", "enumerate", "(", "reversed", "(", "chunk", ")", ")", ")" ]
0100000101010111010000110100100101001001 -> ASCII
[ "0100000101010111010000110100100101001001", "-", ">", "ASCII" ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/binaries.py#L18-L27
train
251,292
ethereum/py-trie
trie/utils/binaries.py
encode_to_bin
def encode_to_bin(value): """ ASCII -> 0100000101010111010000110100100101001001 """ for char in value: for exp in EXP: if char & exp: yield True else: yield False
python
def encode_to_bin(value): """ ASCII -> 0100000101010111010000110100100101001001 """ for char in value: for exp in EXP: if char & exp: yield True else: yield False
[ "def", "encode_to_bin", "(", "value", ")", ":", "for", "char", "in", "value", ":", "for", "exp", "in", "EXP", ":", "if", "char", "&", "exp", ":", "yield", "True", "else", ":", "yield", "False" ]
ASCII -> 0100000101010111010000110100100101001001
[ "ASCII", "-", ">", "0100000101010111010000110100100101001001" ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/binaries.py#L31-L40
train
251,293
ethereum/py-trie
trie/utils/binaries.py
encode_from_bin_keypath
def encode_from_bin_keypath(input_bin): """ Encodes a sequence of 0s and 1s into tightly packed bytes Used in encoding key path of a KV-NODE """ padded_bin = bytes((4 - len(input_bin)) % 4) + input_bin prefix = TWO_BITS[len(input_bin) % 4] if len(padded_bin) % 8 == 4: return decode_from_bin(PREFIX_00 + prefix + padded_bin) else: return decode_from_bin(PREFIX_100000 + prefix + padded_bin)
python
def encode_from_bin_keypath(input_bin): """ Encodes a sequence of 0s and 1s into tightly packed bytes Used in encoding key path of a KV-NODE """ padded_bin = bytes((4 - len(input_bin)) % 4) + input_bin prefix = TWO_BITS[len(input_bin) % 4] if len(padded_bin) % 8 == 4: return decode_from_bin(PREFIX_00 + prefix + padded_bin) else: return decode_from_bin(PREFIX_100000 + prefix + padded_bin)
[ "def", "encode_from_bin_keypath", "(", "input_bin", ")", ":", "padded_bin", "=", "bytes", "(", "(", "4", "-", "len", "(", "input_bin", ")", ")", "%", "4", ")", "+", "input_bin", "prefix", "=", "TWO_BITS", "[", "len", "(", "input_bin", ")", "%", "4", "]", "if", "len", "(", "padded_bin", ")", "%", "8", "==", "4", ":", "return", "decode_from_bin", "(", "PREFIX_00", "+", "prefix", "+", "padded_bin", ")", "else", ":", "return", "decode_from_bin", "(", "PREFIX_100000", "+", "prefix", "+", "padded_bin", ")" ]
Encodes a sequence of 0s and 1s into tightly packed bytes Used in encoding key path of a KV-NODE
[ "Encodes", "a", "sequence", "of", "0s", "and", "1s", "into", "tightly", "packed", "bytes", "Used", "in", "encoding", "key", "path", "of", "a", "KV", "-", "NODE" ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/binaries.py#L43-L53
train
251,294
ethereum/py-trie
trie/utils/binaries.py
decode_to_bin_keypath
def decode_to_bin_keypath(path): """ Decodes bytes into a sequence of 0s and 1s Used in decoding key path of a KV-NODE """ path = encode_to_bin(path) if path[0] == 1: path = path[4:] assert path[0:2] == PREFIX_00 padded_len = TWO_BITS.index(path[2:4]) return path[4+((4 - padded_len) % 4):]
python
def decode_to_bin_keypath(path): """ Decodes bytes into a sequence of 0s and 1s Used in decoding key path of a KV-NODE """ path = encode_to_bin(path) if path[0] == 1: path = path[4:] assert path[0:2] == PREFIX_00 padded_len = TWO_BITS.index(path[2:4]) return path[4+((4 - padded_len) % 4):]
[ "def", "decode_to_bin_keypath", "(", "path", ")", ":", "path", "=", "encode_to_bin", "(", "path", ")", "if", "path", "[", "0", "]", "==", "1", ":", "path", "=", "path", "[", "4", ":", "]", "assert", "path", "[", "0", ":", "2", "]", "==", "PREFIX_00", "padded_len", "=", "TWO_BITS", ".", "index", "(", "path", "[", "2", ":", "4", "]", ")", "return", "path", "[", "4", "+", "(", "(", "4", "-", "padded_len", ")", "%", "4", ")", ":", "]" ]
Decodes bytes into a sequence of 0s and 1s Used in decoding key path of a KV-NODE
[ "Decodes", "bytes", "into", "a", "sequence", "of", "0s", "and", "1s", "Used", "in", "decoding", "key", "path", "of", "a", "KV", "-", "NODE" ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/binaries.py#L56-L66
train
251,295
ethereum/py-trie
trie/utils/nibbles.py
encode_nibbles
def encode_nibbles(nibbles): """ The Hex Prefix function """ if is_nibbles_terminated(nibbles): flag = HP_FLAG_2 else: flag = HP_FLAG_0 raw_nibbles = remove_nibbles_terminator(nibbles) is_odd = len(raw_nibbles) % 2 if is_odd: flagged_nibbles = tuple(itertools.chain( (flag + 1,), raw_nibbles, )) else: flagged_nibbles = tuple(itertools.chain( (flag, 0), raw_nibbles, )) prefixed_value = nibbles_to_bytes(flagged_nibbles) return prefixed_value
python
def encode_nibbles(nibbles): """ The Hex Prefix function """ if is_nibbles_terminated(nibbles): flag = HP_FLAG_2 else: flag = HP_FLAG_0 raw_nibbles = remove_nibbles_terminator(nibbles) is_odd = len(raw_nibbles) % 2 if is_odd: flagged_nibbles = tuple(itertools.chain( (flag + 1,), raw_nibbles, )) else: flagged_nibbles = tuple(itertools.chain( (flag, 0), raw_nibbles, )) prefixed_value = nibbles_to_bytes(flagged_nibbles) return prefixed_value
[ "def", "encode_nibbles", "(", "nibbles", ")", ":", "if", "is_nibbles_terminated", "(", "nibbles", ")", ":", "flag", "=", "HP_FLAG_2", "else", ":", "flag", "=", "HP_FLAG_0", "raw_nibbles", "=", "remove_nibbles_terminator", "(", "nibbles", ")", "is_odd", "=", "len", "(", "raw_nibbles", ")", "%", "2", "if", "is_odd", ":", "flagged_nibbles", "=", "tuple", "(", "itertools", ".", "chain", "(", "(", "flag", "+", "1", ",", ")", ",", "raw_nibbles", ",", ")", ")", "else", ":", "flagged_nibbles", "=", "tuple", "(", "itertools", ".", "chain", "(", "(", "flag", ",", "0", ")", ",", "raw_nibbles", ",", ")", ")", "prefixed_value", "=", "nibbles_to_bytes", "(", "flagged_nibbles", ")", "return", "prefixed_value" ]
The Hex Prefix function
[ "The", "Hex", "Prefix", "function" ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/nibbles.py#L78-L104
train
251,296
ethereum/py-trie
trie/utils/nibbles.py
decode_nibbles
def decode_nibbles(value): """ The inverse of the Hex Prefix function """ nibbles_with_flag = bytes_to_nibbles(value) flag = nibbles_with_flag[0] needs_terminator = flag in {HP_FLAG_2, HP_FLAG_2 + 1} is_odd_length = flag in {HP_FLAG_0 + 1, HP_FLAG_2 + 1} if is_odd_length: raw_nibbles = nibbles_with_flag[1:] else: raw_nibbles = nibbles_with_flag[2:] if needs_terminator: nibbles = add_nibbles_terminator(raw_nibbles) else: nibbles = raw_nibbles return nibbles
python
def decode_nibbles(value): """ The inverse of the Hex Prefix function """ nibbles_with_flag = bytes_to_nibbles(value) flag = nibbles_with_flag[0] needs_terminator = flag in {HP_FLAG_2, HP_FLAG_2 + 1} is_odd_length = flag in {HP_FLAG_0 + 1, HP_FLAG_2 + 1} if is_odd_length: raw_nibbles = nibbles_with_flag[1:] else: raw_nibbles = nibbles_with_flag[2:] if needs_terminator: nibbles = add_nibbles_terminator(raw_nibbles) else: nibbles = raw_nibbles return nibbles
[ "def", "decode_nibbles", "(", "value", ")", ":", "nibbles_with_flag", "=", "bytes_to_nibbles", "(", "value", ")", "flag", "=", "nibbles_with_flag", "[", "0", "]", "needs_terminator", "=", "flag", "in", "{", "HP_FLAG_2", ",", "HP_FLAG_2", "+", "1", "}", "is_odd_length", "=", "flag", "in", "{", "HP_FLAG_0", "+", "1", ",", "HP_FLAG_2", "+", "1", "}", "if", "is_odd_length", ":", "raw_nibbles", "=", "nibbles_with_flag", "[", "1", ":", "]", "else", ":", "raw_nibbles", "=", "nibbles_with_flag", "[", "2", ":", "]", "if", "needs_terminator", ":", "nibbles", "=", "add_nibbles_terminator", "(", "raw_nibbles", ")", "else", ":", "nibbles", "=", "raw_nibbles", "return", "nibbles" ]
The inverse of the Hex Prefix function
[ "The", "inverse", "of", "the", "Hex", "Prefix", "function" ]
d33108d21b54d59ee311f61d978496c84a6f1f8b
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/nibbles.py#L107-L127
train
251,297
neon-jungle/wagtailvideos
wagtailvideos/models.py
get_local_file
def get_local_file(file): """ Get a local version of the file, downloading it from the remote storage if required. The returned value should be used as a context manager to ensure any temporary files are cleaned up afterwards. """ try: with open(file.path): yield file.path except NotImplementedError: _, ext = os.path.splitext(file.name) with NamedTemporaryFile(prefix='wagtailvideo-', suffix=ext) as tmp: try: file.open('rb') for chunk in file.chunks(): tmp.write(chunk) finally: file.close() tmp.flush() yield tmp.name
python
def get_local_file(file): """ Get a local version of the file, downloading it from the remote storage if required. The returned value should be used as a context manager to ensure any temporary files are cleaned up afterwards. """ try: with open(file.path): yield file.path except NotImplementedError: _, ext = os.path.splitext(file.name) with NamedTemporaryFile(prefix='wagtailvideo-', suffix=ext) as tmp: try: file.open('rb') for chunk in file.chunks(): tmp.write(chunk) finally: file.close() tmp.flush() yield tmp.name
[ "def", "get_local_file", "(", "file", ")", ":", "try", ":", "with", "open", "(", "file", ".", "path", ")", ":", "yield", "file", ".", "path", "except", "NotImplementedError", ":", "_", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "file", ".", "name", ")", "with", "NamedTemporaryFile", "(", "prefix", "=", "'wagtailvideo-'", ",", "suffix", "=", "ext", ")", "as", "tmp", ":", "try", ":", "file", ".", "open", "(", "'rb'", ")", "for", "chunk", "in", "file", ".", "chunks", "(", ")", ":", "tmp", ".", "write", "(", "chunk", ")", "finally", ":", "file", ".", "close", "(", ")", "tmp", ".", "flush", "(", ")", "yield", "tmp", ".", "name" ]
Get a local version of the file, downloading it from the remote storage if required. The returned value should be used as a context manager to ensure any temporary files are cleaned up afterwards.
[ "Get", "a", "local", "version", "of", "the", "file", "downloading", "it", "from", "the", "remote", "storage", "if", "required", ".", "The", "returned", "value", "should", "be", "used", "as", "a", "context", "manager", "to", "ensure", "any", "temporary", "files", "are", "cleaned", "up", "afterwards", "." ]
05a43571ac4b5e7cf07fbb89e804e53447b699c2
https://github.com/neon-jungle/wagtailvideos/blob/05a43571ac4b5e7cf07fbb89e804e53447b699c2/wagtailvideos/models.py#L292-L311
train
251,298
getsentry/semaphore
py/semaphore/utils.py
rustcall
def rustcall(func, *args): """Calls rust method and does some error handling.""" lib.semaphore_err_clear() rv = func(*args) err = lib.semaphore_err_get_last_code() if not err: return rv msg = lib.semaphore_err_get_last_message() cls = exceptions_by_code.get(err, SemaphoreError) exc = cls(decode_str(msg)) backtrace = decode_str(lib.semaphore_err_get_backtrace()) if backtrace: exc.rust_info = backtrace raise exc
python
def rustcall(func, *args): """Calls rust method and does some error handling.""" lib.semaphore_err_clear() rv = func(*args) err = lib.semaphore_err_get_last_code() if not err: return rv msg = lib.semaphore_err_get_last_message() cls = exceptions_by_code.get(err, SemaphoreError) exc = cls(decode_str(msg)) backtrace = decode_str(lib.semaphore_err_get_backtrace()) if backtrace: exc.rust_info = backtrace raise exc
[ "def", "rustcall", "(", "func", ",", "*", "args", ")", ":", "lib", ".", "semaphore_err_clear", "(", ")", "rv", "=", "func", "(", "*", "args", ")", "err", "=", "lib", ".", "semaphore_err_get_last_code", "(", ")", "if", "not", "err", ":", "return", "rv", "msg", "=", "lib", ".", "semaphore_err_get_last_message", "(", ")", "cls", "=", "exceptions_by_code", ".", "get", "(", "err", ",", "SemaphoreError", ")", "exc", "=", "cls", "(", "decode_str", "(", "msg", ")", ")", "backtrace", "=", "decode_str", "(", "lib", ".", "semaphore_err_get_backtrace", "(", ")", ")", "if", "backtrace", ":", "exc", ".", "rust_info", "=", "backtrace", "raise", "exc" ]
Calls rust method and does some error handling.
[ "Calls", "rust", "method", "and", "does", "some", "error", "handling", "." ]
6f260b4092261e893b4debd9a3a7a78232f46c5e
https://github.com/getsentry/semaphore/blob/6f260b4092261e893b4debd9a3a7a78232f46c5e/py/semaphore/utils.py#L17-L30
train
251,299