idx
int64 0
63k
| question
stringlengths 61
4.03k
| target
stringlengths 6
1.23k
|
---|---|---|
62,400 | def _add_gene_equivalencies ( self , xrefs , gene_id , taxon ) : clique_map = self . open_and_parse_yaml ( self . resources [ 'clique_leader' ] ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) filter_out = [ 'Vega' , 'IMGT/GENE-DB' , 'Araport' ] for dbxref in xrefs . strip ( ) . split ( '|' ) : prefix = ':' . join ( dbxref . split ( ':' ) [ : - 1 ] ) . strip ( ) if prefix in self . localtt : prefix = self . localtt [ prefix ] dbxref_curie = ':' . join ( ( prefix , dbxref . split ( ':' ) [ - 1 ] ) ) if dbxref_curie is not None and prefix != '' : if prefix == 'HPRD' : model . addTriple ( gene_id , self . globaltt [ 'has gene product' ] , dbxref_curie ) continue if prefix in filter_out : continue if prefix == 'ENSEMBL' : model . addXref ( gene_id , dbxref_curie ) if prefix == 'OMIM' : if DipperUtil . is_omim_disease ( dbxref_curie ) : continue try : if self . class_or_indiv . get ( gene_id ) == 'C' : model . addEquivalentClass ( gene_id , dbxref_curie ) if taxon in clique_map : if clique_map [ taxon ] == prefix : model . makeLeader ( dbxref_curie ) elif clique_map [ taxon ] == gene_id . split ( ':' ) [ 0 ] : model . makeLeader ( gene_id ) else : model . addSameIndividual ( gene_id , dbxref_curie ) except AssertionError as err : LOG . warning ( "Error parsing %s: %s" , gene_id , err ) return | Add equivalentClass and sameAs relationships |
62,401 | def _get_gene2pubmed ( self , limit ) : src_key = 'gene2pubmed' if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) LOG . info ( "Processing Gene records" ) line_counter = 0 myfile = '/' . join ( ( self . rawdir , self . files [ src_key ] [ 'file' ] ) ) LOG . info ( "FILE: %s" , myfile ) assoc_counter = 0 col = self . files [ src_key ] [ 'columns' ] with gzip . open ( myfile , 'rb' ) as tsv : row = tsv . readline ( ) . decode ( ) . strip ( ) . split ( '\t' ) row [ 0 ] = row [ 0 ] [ 1 : ] if col != row : LOG . info ( '%s\nExpected Headers:\t%s\nRecived Headers:\t %s\n' , src_key , col , row ) for line in tsv : line_counter += 1 row = line . decode ( ) . strip ( ) . split ( '\t' ) if row [ 0 ] [ 0 ] == '#' : continue gene_num = row [ col . index ( 'GeneID' ) ] . strip ( ) if self . test_mode and int ( gene_num ) not in self . gene_ids : continue tax_num = row [ col . index ( 'tax_id' ) ] . strip ( ) if not self . test_mode and tax_num not in self . tax_ids : continue pubmed_num = row [ col . index ( 'PubMed_ID' ) ] . strip ( ) if gene_num == '-' or pubmed_num == '-' : continue gene_id = ':' . join ( ( 'NCBIGene' , gene_num ) ) pubmed_id = ':' . join ( ( 'PMID' , pubmed_num ) ) if self . class_or_indiv . get ( gene_id ) == 'C' : model . addClassToGraph ( gene_id , None ) else : model . addIndividualToGraph ( gene_id , None ) model . addIndividualToGraph ( pubmed_id , None , None ) reference = Reference ( graph , pubmed_id , self . globaltt [ 'journal article' ] ) reference . addRefToGraph ( ) graph . addTriple ( pubmed_id , self . globaltt [ 'is_about' ] , gene_id ) assoc_counter += 1 if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Processed %d pub-gene associations" , assoc_counter ) return | Loops through the gene2pubmed file and adds a simple triple to say that a given publication is_about a gene . Publications are added as NamedIndividuals . |
62,402 | def process_entries ( self , omimids , transform , included_fields = None , graph = None , limit = None , globaltt = None ) : omimparams = { } if included_fields is not None and included_fields : omimparams [ 'include' ] = ',' . join ( included_fields ) processed_entries = list ( ) cleanomimids = [ o . split ( ':' ) [ - 1 ] for o in omimids ] diff = set ( omimids ) - set ( cleanomimids ) if diff : LOG . warning ( 'OMIM has %i dirty bits see"\n %s' , len ( diff ) , str ( diff ) ) omimids = cleanomimids else : cleanomimids = list ( ) acc = 0 groupsize = 20 if not self . test_mode and limit is not None : maxit = limit if limit > len ( omimids ) : maxit = len ( omimids ) else : maxit = len ( omimids ) while acc < maxit : end = min ( ( maxit , acc + groupsize ) ) if self . test_mode : intersect = list ( set ( [ str ( i ) for i in self . test_ids ] ) & set ( omimids [ acc : end ] ) ) if intersect : LOG . info ( "found test ids: %s" , intersect ) omimparams . update ( { 'mimNumber' : ',' . join ( intersect ) } ) else : acc += groupsize continue else : omimparams . update ( { 'mimNumber' : ',' . join ( omimids [ acc : end ] ) } ) url = OMIMAPI + urllib . parse . urlencode ( omimparams ) try : req = urllib . request . urlopen ( url ) except HTTPError as e : LOG . warning ( 'fetching: %s' , url ) error_msg = e . read ( ) if re . search ( r'The API key: .* is invalid' , str ( error_msg ) ) : msg = "API Key not valid" raise HTTPError ( url , e . code , msg , e . hdrs , e . fp ) LOG . error ( "Failed with: %s" , str ( error_msg ) ) break resp = req . read ( ) . decode ( ) acc += groupsize myjson = json . loads ( resp ) with open ( './raw/omim/_' + str ( acc ) + '.json' , 'w' ) as fp : json . dump ( myjson , fp ) entries = myjson [ 'omim' ] [ 'entryList' ] for e in entries : processed_entry = transform ( e , graph , globaltt ) if processed_entry is not None : processed_entries . append ( processed_entry ) return processed_entries | Given a list of omim ids this will use the omim API to fetch the entries according to the included_fields passed as a parameter . If a transformation function is supplied this will iterate over each entry and either add the results to the supplied graph or will return a set of processed entries that the calling function can further iterate . |
62,403 | def _process_all ( self , limit ) : omimids = self . _get_omim_ids ( ) LOG . info ( 'Have %i omim numbers to fetch records from their API' , len ( omimids ) ) LOG . info ( 'Have %i omim types ' , len ( self . omim_type ) ) if self . test_mode : graph = self . testgraph else : graph = self . graph geno = Genotype ( graph ) model = Model ( graph ) tax_label = 'Homo sapiens' tax_id = self . globaltt [ tax_label ] geno . addGenome ( tax_id , tax_label ) model . addClassToGraph ( tax_id , None ) includes = set ( ) includes . add ( 'all' ) self . process_entries ( omimids , self . _transform_entry , includes , graph , limit , self . globaltt ) | This takes the list of omim identifiers from the omim . txt . Z file and iteratively queries the omim api for the json - formatted data . This will create OMIM classes with the label definition and some synonyms . If an entry is removed it is added as a deprecated class . If an entry is moved it is deprecated and consider annotations are added . |
62,404 | def update ( self , key : bytes , value : bytes , node_updates : Sequence [ Hash32 ] ) : validate_is_bytes ( key ) validate_length ( key , self . _key_size ) path_diff = ( to_int ( self . key ) ^ to_int ( key ) ) if path_diff == 0 : self . _value = value else : for bit in reversed ( range ( self . _branch_size ) ) : if path_diff & ( 1 << bit ) > 0 : branch_point = ( self . _branch_size - 1 ) - bit break if len ( node_updates ) <= branch_point : raise ValidationError ( "Updated node list is not deep enough" ) self . _branch [ branch_point ] = node_updates [ branch_point ] | Merge an update for another key with the one we are tracking internally . |
62,405 | def _get ( self , key : bytes ) -> Tuple [ bytes , Tuple [ Hash32 ] ] : validate_is_bytes ( key ) validate_length ( key , self . _key_size ) branch = [ ] target_bit = 1 << ( self . depth - 1 ) path = to_int ( key ) node_hash = self . root_hash for _ in range ( self . depth ) : node = self . db [ node_hash ] left , right = node [ : 32 ] , node [ 32 : ] if path & target_bit : branch . append ( left ) node_hash = right else : branch . append ( right ) node_hash = left target_bit >>= 1 return self . db [ node_hash ] , tuple ( branch ) | Returns db value and branch in root - > leaf order |
62,406 | def set ( self , key : bytes , value : bytes ) -> Tuple [ Hash32 ] : validate_is_bytes ( key ) validate_length ( key , self . _key_size ) validate_is_bytes ( value ) path = to_int ( key ) node = value _ , branch = self . _get ( key ) proof_update = [ ] target_bit = 1 for sibling_node in reversed ( branch ) : node_hash = keccak ( node ) proof_update . append ( node_hash ) self . db [ node_hash ] = node if ( path & target_bit ) : node = sibling_node + node_hash else : node = node_hash + sibling_node target_bit <<= 1 self . root_hash = keccak ( node ) self . db [ self . root_hash ] = node return tuple ( reversed ( proof_update ) ) | Returns all updated hashes in root - > leaf order |
62,407 | def delete ( self , key : bytes ) -> Tuple [ Hash32 ] : validate_is_bytes ( key ) validate_length ( key , self . _key_size ) return self . set ( key , self . _default ) | Equals to setting the value to None Returns all updated hashes in root - > leaf order |
62,408 | def next_batch ( self , n = 1 ) : if len ( self . queue ) == 0 : return [ ] batch = list ( reversed ( ( self . queue [ - n : ] ) ) ) self . queue = self . queue [ : - n ] return batch | Return the next requests that should be dispatched . |
62,409 | def schedule ( self , node_key , parent , depth , leaf_callback , is_raw = False ) : if node_key in self . _existing_nodes : self . logger . debug ( "Node %s already exists in db" % encode_hex ( node_key ) ) return if node_key in self . db : self . _existing_nodes . add ( node_key ) self . logger . debug ( "Node %s already exists in db" % encode_hex ( node_key ) ) return if parent is not None : parent . dependencies += 1 existing = self . requests . get ( node_key ) if existing is not None : self . logger . debug ( "Already requesting %s, will just update parents list" % node_key ) existing . parents . append ( parent ) return request = SyncRequest ( node_key , parent , depth , leaf_callback , is_raw ) self . logger . debug ( "Scheduling retrieval of %s" % encode_hex ( request . node_key ) ) self . requests [ request . node_key ] = request bisect . insort ( self . queue , request ) | Schedule a request for the node with the given key . |
62,410 | def get_children ( self , request ) : node = decode_node ( request . data ) return _get_children ( node , request . depth ) | Return all children of the node retrieved by the given request . |
62,411 | def process ( self , results ) : for node_key , data in results : request = self . requests . get ( node_key ) if request is None : self . logger . info ( "No SyncRequest found for %s, maybe we got more than one response for it" % encode_hex ( node_key ) ) return if request . data is not None : raise SyncRequestAlreadyProcessed ( "%s has been processed already" % request ) request . data = data if request . is_raw : self . commit ( request ) continue references , leaves = self . get_children ( request ) for depth , ref in references : self . schedule ( ref , request , depth , request . leaf_callback ) if request . leaf_callback is not None : for leaf in leaves : request . leaf_callback ( leaf , request ) if request . dependencies == 0 : self . commit ( request ) | Process request results . |
62,412 | def check_if_branch_exist ( db , root_hash , key_prefix ) : validate_is_bytes ( key_prefix ) return _check_if_branch_exist ( db , root_hash , encode_to_bin ( key_prefix ) ) | Given a key prefix return whether this prefix is the prefix of an existing key in the trie . |
62,413 | def get_branch ( db , root_hash , key ) : validate_is_bytes ( key ) return tuple ( _get_branch ( db , root_hash , encode_to_bin ( key ) ) ) | Get a long - format Merkle branch |
62,414 | def get_witness_for_key_prefix ( db , node_hash , key ) : validate_is_bytes ( key ) return tuple ( _get_witness_for_key_prefix ( db , node_hash , encode_to_bin ( key ) ) ) | Get all witness given a keypath prefix . Include |
62,415 | def encode_branch_node ( left_child_node_hash , right_child_node_hash ) : validate_is_bytes ( left_child_node_hash ) validate_length ( left_child_node_hash , 32 ) validate_is_bytes ( right_child_node_hash ) validate_length ( right_child_node_hash , 32 ) return BRANCH_TYPE_PREFIX + left_child_node_hash + right_child_node_hash | Serializes a branch node |
62,416 | def encode_leaf_node ( value ) : validate_is_bytes ( value ) if value is None or value == b'' : raise ValidationError ( "Value of leaf node can not be empty" ) return LEAF_TYPE_PREFIX + value | Serializes a leaf node |
62,417 | def batch_commit ( self , * , do_deletes = False ) : try : yield except Exception as exc : raise exc else : for key , value in self . cache . items ( ) : if value is not DELETED : self . wrapped_db [ key ] = value elif do_deletes : self . wrapped_db . pop ( key , None ) finally : self . cache = { } | Batch and commit and end of context |
62,418 | def _prune_node ( self , node ) : if self . is_pruning : prune_key , node_body = self . _node_to_db_mapping ( node ) should_prune = ( node_body is not None ) else : should_prune = False yield if should_prune : del self . db [ prune_key ] | Prune the given node if context exits cleanly . |
62,419 | def _normalize_branch_node ( self , node ) : iter_node = iter ( node ) if any ( iter_node ) and any ( iter_node ) : return node if node [ 16 ] : return [ compute_leaf_key ( [ ] ) , node [ 16 ] ] sub_node_idx , sub_node_hash = next ( ( idx , v ) for idx , v in enumerate ( node [ : 16 ] ) if v ) sub_node = self . get_node ( sub_node_hash ) sub_node_type = get_node_type ( sub_node ) if sub_node_type in { NODE_TYPE_LEAF , NODE_TYPE_EXTENSION } : with self . _prune_node ( sub_node ) : new_subnode_key = encode_nibbles ( tuple ( itertools . chain ( [ sub_node_idx ] , decode_nibbles ( sub_node [ 0 ] ) , ) ) ) return [ new_subnode_key , sub_node [ 1 ] ] elif sub_node_type == NODE_TYPE_BRANCH : subnode_hash = self . _persist_node ( sub_node ) return [ encode_nibbles ( [ sub_node_idx ] ) , subnode_hash ] else : raise Exception ( "Invariant: this code block should be unreachable" ) | A branch node which is left with only a single non - blank item should be turned into either a leaf or extension node . |
62,420 | def _delete_branch_node ( self , node , trie_key ) : if not trie_key : node [ - 1 ] = BLANK_NODE return self . _normalize_branch_node ( node ) node_to_delete = self . get_node ( node [ trie_key [ 0 ] ] ) sub_node = self . _delete ( node_to_delete , trie_key [ 1 : ] ) encoded_sub_node = self . _persist_node ( sub_node ) if encoded_sub_node == node [ trie_key [ 0 ] ] : return node node [ trie_key [ 0 ] ] = encoded_sub_node if encoded_sub_node == BLANK_NODE : return self . _normalize_branch_node ( node ) return node | Delete a key from inside or underneath a branch node |
62,421 | def get ( self , key ) : validate_is_bytes ( key ) return self . _get ( self . root_hash , encode_to_bin ( key ) ) | Fetches the value with a given keypath from the given node . |
62,422 | def set ( self , key , value ) : validate_is_bytes ( key ) validate_is_bytes ( value ) self . root_hash = self . _set ( self . root_hash , encode_to_bin ( key ) , value ) | Sets the value at the given keypath from the given node |
62,423 | def _set ( self , node_hash , keypath , value , if_delete_subtrie = False ) : if node_hash == BLANK_HASH : if value : return self . _hash_and_save ( encode_kv_node ( keypath , self . _hash_and_save ( encode_leaf_node ( value ) ) ) ) else : return BLANK_HASH nodetype , left_child , right_child = parse_node ( self . db [ node_hash ] ) if nodetype == LEAF_TYPE : if keypath : raise NodeOverrideError ( "Fail to set the value because the prefix of it's key" " is the same as existing key" ) if if_delete_subtrie : return BLANK_HASH return self . _hash_and_save ( encode_leaf_node ( value ) ) if value else BLANK_HASH elif nodetype == KV_TYPE : if not keypath : if if_delete_subtrie : return BLANK_HASH else : raise NodeOverrideError ( "Fail to set the value because it's key" " is the prefix of other existing key" ) return self . _set_kv_node ( keypath , node_hash , nodetype , left_child , right_child , value , if_delete_subtrie ) elif nodetype == BRANCH_TYPE : if not keypath : if if_delete_subtrie : return BLANK_HASH else : raise NodeOverrideError ( "Fail to set the value because it's key" " is the prefix of other existing key" ) return self . _set_branch_node ( keypath , nodetype , left_child , right_child , value , if_delete_subtrie ) raise Exception ( "Invariant: This shouldn't ever happen" ) | If if_delete_subtrie is set to True what it will do is that it take in a keypath and traverse til the end of keypath then delete the whole subtrie of that node . |
62,424 | def delete ( self , key ) : validate_is_bytes ( key ) self . root_hash = self . _set ( self . root_hash , encode_to_bin ( key ) , b'' ) | Equals to setting the value to None |
62,425 | def delete_subtrie ( self , key ) : validate_is_bytes ( key ) self . root_hash = self . _set ( self . root_hash , encode_to_bin ( key ) , value = b'' , if_delete_subtrie = True , ) | Given a key prefix delete the whole subtrie that starts with the key prefix . |
62,426 | def _hash_and_save ( self , node ) : validate_is_bin_node ( node ) node_hash = keccak ( node ) self . db [ node_hash ] = node return node_hash | Saves a node into the database and returns its hash |
62,427 | def decode_from_bin ( input_bin ) : for chunk in partition_all ( 8 , input_bin ) : yield sum ( 2 ** exp * bit for exp , bit in enumerate ( reversed ( chunk ) ) ) | 0100000101010111010000110100100101001001 - > ASCII |
62,428 | def encode_to_bin ( value ) : for char in value : for exp in EXP : if char & exp : yield True else : yield False | ASCII - > 0100000101010111010000110100100101001001 |
62,429 | def encode_from_bin_keypath ( input_bin ) : padded_bin = bytes ( ( 4 - len ( input_bin ) ) % 4 ) + input_bin prefix = TWO_BITS [ len ( input_bin ) % 4 ] if len ( padded_bin ) % 8 == 4 : return decode_from_bin ( PREFIX_00 + prefix + padded_bin ) else : return decode_from_bin ( PREFIX_100000 + prefix + padded_bin ) | Encodes a sequence of 0s and 1s into tightly packed bytes Used in encoding key path of a KV - NODE |
62,430 | def decode_to_bin_keypath ( path ) : path = encode_to_bin ( path ) if path [ 0 ] == 1 : path = path [ 4 : ] assert path [ 0 : 2 ] == PREFIX_00 padded_len = TWO_BITS . index ( path [ 2 : 4 ] ) return path [ 4 + ( ( 4 - padded_len ) % 4 ) : ] | Decodes bytes into a sequence of 0s and 1s Used in decoding key path of a KV - NODE |
62,431 | def encode_nibbles ( nibbles ) : if is_nibbles_terminated ( nibbles ) : flag = HP_FLAG_2 else : flag = HP_FLAG_0 raw_nibbles = remove_nibbles_terminator ( nibbles ) is_odd = len ( raw_nibbles ) % 2 if is_odd : flagged_nibbles = tuple ( itertools . chain ( ( flag + 1 , ) , raw_nibbles , ) ) else : flagged_nibbles = tuple ( itertools . chain ( ( flag , 0 ) , raw_nibbles , ) ) prefixed_value = nibbles_to_bytes ( flagged_nibbles ) return prefixed_value | The Hex Prefix function |
62,432 | def decode_nibbles ( value ) : nibbles_with_flag = bytes_to_nibbles ( value ) flag = nibbles_with_flag [ 0 ] needs_terminator = flag in { HP_FLAG_2 , HP_FLAG_2 + 1 } is_odd_length = flag in { HP_FLAG_0 + 1 , HP_FLAG_2 + 1 } if is_odd_length : raw_nibbles = nibbles_with_flag [ 1 : ] else : raw_nibbles = nibbles_with_flag [ 2 : ] if needs_terminator : nibbles = add_nibbles_terminator ( raw_nibbles ) else : nibbles = raw_nibbles return nibbles | The inverse of the Hex Prefix function |
62,433 | def get_local_file ( file ) : try : with open ( file . path ) : yield file . path except NotImplementedError : _ , ext = os . path . splitext ( file . name ) with NamedTemporaryFile ( prefix = 'wagtailvideo-' , suffix = ext ) as tmp : try : file . open ( 'rb' ) for chunk in file . chunks ( ) : tmp . write ( chunk ) finally : file . close ( ) tmp . flush ( ) yield tmp . name | Get a local version of the file downloading it from the remote storage if required . The returned value should be used as a context manager to ensure any temporary files are cleaned up afterwards . |
62,434 | def rustcall ( func , * args ) : lib . semaphore_err_clear ( ) rv = func ( * args ) err = lib . semaphore_err_get_last_code ( ) if not err : return rv msg = lib . semaphore_err_get_last_message ( ) cls = exceptions_by_code . get ( err , SemaphoreError ) exc = cls ( decode_str ( msg ) ) backtrace = decode_str ( lib . semaphore_err_get_backtrace ( ) ) if backtrace : exc . rust_info = backtrace raise exc | Calls rust method and does some error handling . |
62,435 | def decode_str ( s , free = False ) : try : if s . len == 0 : return u"" return ffi . unpack ( s . data , s . len ) . decode ( "utf-8" , "replace" ) finally : if free : lib . semaphore_str_free ( ffi . addressof ( s ) ) | Decodes a SymbolicStr |
62,436 | def encode_str ( s , mutable = False ) : rv = ffi . new ( "SemaphoreStr *" ) if isinstance ( s , text_type ) : s = s . encode ( "utf-8" ) if mutable : s = bytearray ( s ) rv . data = ffi . from_buffer ( s ) rv . len = len ( s ) attached_refs [ rv ] = s return rv | Encodes a SemaphoreStr |
62,437 | def decode_uuid ( value ) : return uuid . UUID ( bytes = bytes ( bytearray ( ffi . unpack ( value . data , 16 ) ) ) ) | Decodes the given uuid value . |
62,438 | def has_cargo_fmt ( ) : try : c = subprocess . Popen ( [ "cargo" , "fmt" , "--" , "--help" ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE , ) return c . wait ( ) == 0 except OSError : return False | Runs a quick check to see if cargo fmt is installed . |
62,439 | def get_modified_files ( ) : c = subprocess . Popen ( [ "git" , "diff-index" , "--cached" , "--name-only" , "HEAD" ] , stdout = subprocess . PIPE ) return c . communicate ( ) [ 0 ] . splitlines ( ) | Returns a list of all modified files . |
62,440 | def _get_next_chunk ( fp , previously_read_position , chunk_size ) : seek_position , read_size = _get_what_to_read_next ( fp , previously_read_position , chunk_size ) fp . seek ( seek_position ) read_content = fp . read ( read_size ) read_position = seek_position return read_content , read_position | Return next chunk of data that we would from the file pointer . |
62,441 | def _get_what_to_read_next ( fp , previously_read_position , chunk_size ) : seek_position = max ( previously_read_position - chunk_size , 0 ) read_size = chunk_size while seek_position > 0 : fp . seek ( seek_position ) if _is_partially_read_new_line ( fp . read ( 1 ) ) : seek_position -= 1 read_size += 1 else : break read_size = min ( previously_read_position - seek_position , read_size ) return seek_position , read_size | Return information on which file pointer position to read from and how many bytes . |
62,442 | def _remove_trailing_new_line ( l ) : for n in sorted ( new_lines_bytes , key = lambda x : len ( x ) , reverse = True ) : if l . endswith ( n ) : remove_new_line = slice ( None , - len ( n ) ) return l [ remove_new_line ] return l | Remove a single instance of new line at the end of l if it exists . |
62,443 | def _find_furthest_new_line ( read_buffer ) : new_line_positions = [ read_buffer . rfind ( n ) for n in new_lines_bytes ] return max ( new_line_positions ) | Return - 1 if read_buffer does not contain new line otherwise the position of the rightmost newline . |
62,444 | def add_to_buffer ( self , content , read_position ) : self . read_position = read_position if self . read_buffer is None : self . read_buffer = content else : self . read_buffer = content + self . read_buffer | Add additional bytes content as read from the read_position . |
62,445 | def yieldable ( self ) : if self . read_buffer is None : return False t = _remove_trailing_new_line ( self . read_buffer ) n = _find_furthest_new_line ( t ) if n >= 0 : return True if self . read_position == 0 and self . read_buffer is not None : return True return False | Return True if there is a line that the buffer can return False otherwise . |
62,446 | def return_line ( self ) : assert ( self . yieldable ( ) ) t = _remove_trailing_new_line ( self . read_buffer ) i = _find_furthest_new_line ( t ) if i >= 0 : l = i + 1 after_new_line = slice ( l , None ) up_to_include_new_line = slice ( 0 , l ) r = t [ after_new_line ] self . read_buffer = t [ up_to_include_new_line ] else : r = t self . read_buffer = None return r | Return a new line if it is available . |
62,447 | def read_until_yieldable ( self ) : while not self . yieldable ( ) : read_content , read_position = _get_next_chunk ( self . fp , self . read_position , self . chunk_size ) self . add_to_buffer ( read_content , read_position ) | Read in additional chunks until it is yieldable . |
62,448 | def next ( self ) : if self . closed : raise StopIteration if self . __buf . has_returned_every_line ( ) : self . close ( ) raise StopIteration self . __buf . read_until_yieldable ( ) r = self . __buf . return_line ( ) return r . decode ( self . encoding ) | Returns unicode string from the last line until the beginning of file . |
62,449 | def home ( request , chat_channel_name = None ) : if not chat_channel_name : chat_channel_name = 'homepage' context = { 'address' : chat_channel_name , 'history' : [ ] , } if ChatMessage . objects . filter ( channel = chat_channel_name ) . exists ( ) : context [ 'history' ] = ChatMessage . objects . filter ( channel = chat_channel_name ) websocket_prefix = "ws" websocket_port = 9000 context [ 'websocket_prefix' ] = websocket_prefix context [ 'websocket_port' ] = websocket_port return render ( request , 'chat.html' , context ) | if we have a chat_channel_name kwarg have the response include that channel name so the javascript knows to subscribe to that channel ... |
62,450 | def hendrixLauncher ( action , options , with_tiempo = False ) : if options [ 'key' ] and options [ 'cert' ] and options [ 'cache' ] : from hendrix . deploy import hybrid HendrixDeploy = hybrid . HendrixDeployHybrid elif options [ 'key' ] and options [ 'cert' ] : from hendrix . deploy import tls HendrixDeploy = tls . HendrixDeployTLS elif options [ 'cache' ] : HendrixDeploy = cache . HendrixDeployCache else : HendrixDeploy = base . HendrixDeploy if with_tiempo : deploy = HendrixDeploy ( action = 'start' , options = options ) deploy . run ( ) else : deploy = HendrixDeploy ( action , options ) deploy . run ( ) | Decides which version of HendrixDeploy to use and then launches it . |
62,451 | def logReload ( options ) : event_handler = Reload ( options ) observer = Observer ( ) observer . schedule ( event_handler , path = '.' , recursive = True ) observer . start ( ) try : while True : time . sleep ( 1 ) except KeyboardInterrupt : observer . stop ( ) pid = os . getpid ( ) chalk . eraser ( ) chalk . green ( '\nHendrix successfully closed.' ) os . kill ( pid , 15 ) observer . join ( ) exit ( '\n' ) | encompasses all the logic for reloading observer . |
62,452 | def launch ( * args , ** options ) : action = args [ 0 ] if options [ 'reload' ] : logReload ( options ) else : assignDeploymentInstance ( action , options ) | launch acts on the user specified action and options by executing Hedrix . run |
62,453 | def findSettingsModule ( ) : "Find the settings module dot path within django's manage.py file" try : with open ( 'manage.py' , 'r' ) as manage : manage_contents = manage . read ( ) search = re . search ( r"([\"\'](?P<module>[a-z\.]+)[\"\'])" , manage_contents ) if search : settings_mod = search . group ( "module" ) else : search = re . search ( "\".*?\"(,\\s)??\"(?P<module>.*?)\"\\)$" , manage_contents , re . I | re . S | re . M ) settings_mod = search . group ( "module" ) os . environ . setdefault ( 'DJANGO_SETTINGS_MODULE' , settings_mod ) except IOError as e : msg = ( str ( e ) + '\nPlease ensure that you are in the same directory ' 'as django\'s "manage.py" file.' ) raise IOError ( chalk . red ( msg ) , None , sys . exc_info ( ) [ 2 ] ) except AttributeError : settings_mod = '' return settings_mod | Find the settings module dot path within django s manage . py file |
62,454 | def subprocessLaunch ( ) : if not redis_available : raise RedisException ( "can't launch this subprocess without tiempo/redis." ) try : action = 'start' options = REDIS . get ( 'worker_args' ) assignDeploymentInstance ( action = 'start' , options = options ) except Exception : chalk . red ( '\n Encountered an unhandled exception while trying to %s hendrix.\n' % action , pipe = chalk . stderr ) raise | This function is called by the hxw script . It takes no arguments and returns an instance of HendrixDeploy |
62,455 | def main ( args = None ) : "The function to execute when running hx" if args is None : args = sys . argv [ 1 : ] options , args = HendrixOptionParser . parse_args ( args ) options = vars ( options ) try : action = args [ 0 ] except IndexError : HendrixOptionParser . print_help ( ) return exposeProject ( options ) options = djangoVsWsgi ( options ) options = devFriendly ( options ) redirect = noiseControl ( options ) try : launch ( * args , ** options ) except Exception : chalk . red ( '\n Encountered an unhandled exception while trying to %s hendrix.\n' % action , pipe = chalk . stderr ) raise | The function to execute when running hx |
62,456 | def handleHeader ( self , key , value ) : "extends handleHeader to save headers to a local response object" key_lower = key . lower ( ) if key_lower == 'location' : value = self . modLocationPort ( value ) self . _response . headers [ key_lower ] = value if key_lower != 'cache-control' : proxy . ProxyClient . handleHeader ( self , key , value ) | extends handleHeader to save headers to a local response object |
62,457 | def handleStatus ( self , version , code , message ) : "extends handleStatus to instantiate a local response object" proxy . ProxyClient . handleStatus ( self , version , code , message ) self . _response = client . Response ( version , code , message , { } , None ) | extends handleStatus to instantiate a local response object |
62,458 | def modLocationPort ( self , location ) : components = urlparse . urlparse ( location ) reverse_proxy_port = self . father . getHost ( ) . port reverse_proxy_host = self . father . getHost ( ) . host _components = components . _asdict ( ) _components [ 'netloc' ] = '%s:%d' % ( reverse_proxy_host , reverse_proxy_port ) return urlparse . urlunparse ( _components . values ( ) ) | Ensures that the location port is a the given port value Used in handleHeader |
62,459 | def handleResponsePart ( self , buffer ) : self . father . write ( buffer ) self . buffer . write ( buffer ) | Sends the content to the browser and keeps a local copy of it . buffer is just a str of the content to be shown father is the intial request . |
62,460 | def getChild ( self , path , request ) : return CacheProxyResource ( self . host , self . port , self . path + '/' + urlquote ( path , safe = "" ) , self . reactor ) | This is necessary because the parent class would call proxy . ReverseProxyResource instead of CacheProxyResource |
62,461 | def getChildWithDefault ( self , path , request ) : cached_resource = self . getCachedResource ( request ) if cached_resource : reactor . callInThread ( responseInColor , request , '200 OK' , cached_resource , 'Cached' , 'underscore' ) return cached_resource if path in self . children : return self . children [ path ] return self . getChild ( path , request ) | Retrieve a static or dynamically generated child resource from me . |
62,462 | def render ( self , request ) : if self . port == 80 : host = self . host else : host = "%s:%d" % ( self . host , self . port ) request . requestHeaders . addRawHeader ( 'host' , host ) request . content . seek ( 0 , 0 ) qs = urlparse . urlparse ( request . uri ) [ 4 ] if qs : rest = self . path + '?' + qs else : rest = self . path global_self = self . getGlobalSelf ( ) clientFactory = self . proxyClientFactoryClass ( request . method , rest , request . clientproto , request . getAllHeaders ( ) , request . content . read ( ) , request , global_self ) self . reactor . connectTCP ( self . host , self . port , clientFactory ) return NOT_DONE_YET | Render a request by forwarding it to the proxied server . |
62,463 | def getGlobalSelf ( self ) : transports = self . reactor . getReaders ( ) for transport in transports : try : resource = transport . factory . resource if isinstance ( resource , self . __class__ ) and resource . port == self . port : return resource except AttributeError : pass return | This searches the reactor for the original instance of CacheProxyResource . This is necessary because with each call of getChild a new instance of CacheProxyResource is created . |
62,464 | def dataReceived ( self , data ) : try : address = self . guid data = json . loads ( data ) threads . deferToThread ( send_signal , self . dispatcher , data ) if 'hx_subscribe' in data : return self . dispatcher . subscribe ( self . transport , data ) if 'address' in data : address = data [ 'address' ] else : address = self . guid self . dispatcher . send ( address , data ) except Exception as e : raise self . dispatcher . send ( self . guid , { 'message' : data , 'error' : str ( e ) } ) | Takes data which we assume is json encoded If data has a subject_id attribute we pass that to the dispatcher as the subject_id so it will get carried through into any return communications and be identifiable to the client |
62,465 | def connectionMade ( self ) : self . transport . uid = str ( uuid . uuid1 ( ) ) self . guid = self . dispatcher . add ( self . transport ) self . dispatcher . send ( self . guid , { 'setup_connection' : self . guid } ) | establish the address of this new connection and add it to the list of sockets managed by the dispatcher |
62,466 | def generateInitd ( conf_file ) : allowed_opts = [ 'virtualenv' , 'project_path' , 'settings' , 'processes' , 'http_port' , 'cache' , 'cache_port' , 'https_port' , 'key' , 'cert' ] base_opts = [ '--daemonize' , ] options = base_opts with open ( conf_file , 'r' ) as cfg : conf = yaml . load ( cfg ) conf_specs = set ( conf . keys ( ) ) if len ( conf_specs - set ( allowed_opts ) ) : raise RuntimeError ( 'Improperly configured.' ) try : virtualenv = conf . pop ( 'virtualenv' ) project_path = conf . pop ( 'project_path' ) except : raise RuntimeError ( 'Improperly configured.' ) cache = False if 'cache' in conf : cache = conf . pop ( 'cache' ) if not cache : options . append ( '--nocache' ) workers = 0 if 'processes' in conf : processes = conf . pop ( 'processes' ) workers = int ( processes ) - 1 if workers > 0 : options += [ '--workers' , str ( workers ) ] for key , value in conf . iteritems ( ) : options += [ '--%s' % key , str ( value ) ] with open ( os . path . join ( SHARE_PATH , 'init.d.j2' ) , 'r' ) as f : TEMPLATE_FILE = f . read ( ) template = jinja2 . Template ( TEMPLATE_FILE ) initd_content = template . render ( { 'venv_path' : virtualenv , 'project_path' : project_path , 'hendrix_opts' : ' ' . join ( options ) } ) return initd_content | Helper function to generate the text content needed to create an init . d executable |
62,467 | def startResponse ( self , status , headers , excInfo = None ) : self . status = status self . headers = headers self . reactor . callInThread ( responseInColor , self . request , status , headers ) return self . write | extends startResponse to call speakerBox in a thread |
62,468 | def cacheContent ( self , request , response , buffer ) : content = buffer . getvalue ( ) code = int ( response . code ) cache_it = False uri , bust = self . processURI ( request . uri , PREFIX ) if request . method == "GET" and code / 100 == 2 and not bust : cache_control = response . headers . get ( 'cache-control' ) if cache_control : params = dict ( urlparse . parse_qsl ( cache_control ) ) if int ( params . get ( 'max-age' , '0' ) ) > 0 : cache_it = True if cache_it : content = compressBuffer ( content ) self . addResource ( content , uri , response . headers ) buffer . close ( ) | Checks if the response should be cached . Caches the content in a gzipped format given that a cache_it flag is True To be used CacheClient |
62,469 | def get_additional_services ( settings_module ) : additional_services = [ ] if hasattr ( settings_module , 'HENDRIX_SERVICES' ) : for name , module_path in settings_module . HENDRIX_SERVICES : path_to_module , service_name = module_path . rsplit ( '.' , 1 ) resource_module = importlib . import_module ( path_to_module ) additional_services . append ( ( name , getattr ( resource_module , service_name ) ) ) return additional_services | if HENDRIX_SERVICES is specified in settings_module it should be a list twisted internet services |
62,470 | def get_additional_resources ( settings_module ) : additional_resources = [ ] if hasattr ( settings_module , 'HENDRIX_CHILD_RESOURCES' ) : for module_path in settings_module . HENDRIX_CHILD_RESOURCES : path_to_module , resource_name = module_path . rsplit ( '.' , 1 ) resource_module = importlib . import_module ( path_to_module ) additional_resources . append ( getattr ( resource_module , resource_name ) ) return additional_resources | if HENDRIX_CHILD_RESOURCES is specified in settings_module it should be a list resources subclassed from hendrix . contrib . NamedResource |
62,471 | def getConf ( cls , settings , options ) : "updates the options dict to use config options in the settings module" ports = [ 'http_port' , 'https_port' , 'cache_port' ] for port_name in ports : port = getattr ( settings , port_name . upper ( ) , None ) default = getattr ( defaults , port_name . upper ( ) ) if port and options . get ( port_name ) == default : options [ port_name ] = port _opts = [ ( 'key' , 'hx_private_key' ) , ( 'cert' , 'hx_certficate' ) , ( 'wsgi' , 'wsgi_application' ) ] for opt_name , settings_name in _opts : opt = getattr ( settings , settings_name . upper ( ) , None ) if opt : options [ opt_name ] = opt if not options [ 'settings' ] : options [ 'settings' ] = environ [ 'DJANGO_SETTINGS_MODULE' ] return options | updates the options dict to use config options in the settings module |
62,472 | def addHendrix ( self ) : self . hendrix = HendrixService ( self . application , threadpool = self . getThreadPool ( ) , resources = self . resources , services = self . services , loud = self . options [ 'loud' ] ) if self . options [ "https_only" ] is not True : self . hendrix . spawn_new_server ( self . options [ 'http_port' ] , HendrixTCPService ) | Instantiates a HendrixService with this object s threadpool . It will be added as a service later . |
62,473 | def catalogServers ( self , hendrix ) : "collects a list of service names serving on TCP or SSL" for service in hendrix . services : if isinstance ( service , ( TCPServer , SSLServer ) ) : self . servers . append ( service . name ) | collects a list of service names serving on TCP or SSL |
62,474 | def run ( self ) : "sets up the desired services and runs the requested action" self . addServices ( ) self . catalogServers ( self . hendrix ) action = self . action fd = self . options [ 'fd' ] if action . startswith ( 'start' ) : chalk . blue ( self . _listening_message ( ) ) getattr ( self , action ) ( fd ) try : self . reactor . run ( ) finally : shutil . rmtree ( PID_DIR , ignore_errors = True ) elif action == 'restart' : getattr ( self , action ) ( fd = fd ) else : getattr ( self , action ) ( ) | sets up the desired services and runs the requested action |
62,475 | def setFDs ( self ) : self . childFDs = { 0 : 0 , 1 : 1 , 2 : 2 } self . fds = { } for name in self . servers : self . port = self . hendrix . get_port ( name ) fd = self . port . fileno ( ) self . childFDs [ fd ] = fd self . fds [ name ] = fd | Iterator for file descriptors . Seperated from launchworkers for clarity and readability . |
62,476 | def addSubprocess ( self , fds , name , factory ) : self . _lock . run ( self . _addSubprocess , self , fds , name , factory ) | Public method for _addSubprocess . Wraps reactor . adoptStreamConnection in a simple DeferredLock to guarantee workers play well together . |
62,477 | def disownService ( self , name ) : _service = self . hendrix . getServiceNamed ( name ) _service . disownServiceParent ( ) return _service . factory | disowns a service on hendirix by name returns a factory for use in the adoptStreamPort part of setting up multiple processes |
62,478 | def get_pid ( options ) : namespace = options [ 'settings' ] if options [ 'settings' ] else options [ 'wsgi' ] return os . path . join ( '{}' , '{}_{}.pid' ) . format ( PID_DIR , options [ 'http_port' ] , namespace . replace ( '.' , '_' ) ) | returns The default location of the pid file for process management |
62,479 | def responseInColor ( request , status , headers , prefix = 'Response' , opts = None ) : "Prints the response info in color" code , message = status . split ( None , 1 ) message = '%s [%s] => Request %s %s %s on pid %d' % ( prefix , code , str ( request . host ) , request . method , request . path , os . getpid ( ) ) signal = int ( code ) / 100 if signal == 2 : chalk . green ( message , opts = opts ) elif signal == 3 : chalk . blue ( message , opts = opts ) else : chalk . red ( message , opts = opts ) | Prints the response info in color |
62,480 | def addLocalCacheService ( self ) : "adds a CacheService to the instatiated HendrixService" _cache = self . getCacheService ( ) _cache . setName ( 'cache_proxy' ) _cache . setServiceParent ( self . hendrix ) | adds a CacheService to the instatiated HendrixService |
62,481 | def addGlobalServices ( self ) : if self . options . get ( 'global_cache' ) and self . options . get ( 'cache' ) : _cache = self . getCacheService ( ) _cache . startService ( ) | This is where we put service that we don t want to be duplicated on worker subprocesses |
62,482 | def putNamedChild ( self , res ) : try : EmptyResource = resource . Resource namespace = res . namespace parts = namespace . strip ( '/' ) . split ( '/' ) parent = self children = self . children for name in parts [ : - 1 ] : child = children . get ( name ) if not child : child = EmptyResource ( ) parent . putChild ( name , child ) parent = child children = parent . children name = parts [ - 1 ] if children . get ( name ) : self . logger . warn ( 'A resource already exists at this path. Check ' 'your resources list to ensure each path is ' 'unique. The previous resource will be overridden.' ) parent . putChild ( name , res ) except AttributeError : msg = ( '%r improperly configured. additional_resources instances must' ' have a namespace attribute' ) % resource raise AttributeError ( msg , None , sys . exc_info ( ) [ 2 ] ) | putNamedChild takes either an instance of hendrix . contrib . NamedResource or any resource . Resource with a namespace attribute as a means of allowing application level control of resource namespacing . |
62,483 | def send_json_message ( address , message , ** kwargs ) : data = { 'message' : message , } if not kwargs . get ( 'subject_id' ) : data [ 'subject_id' ] = address data . update ( kwargs ) hxdispatcher . send ( address , data ) | a shortcut for message sending |
62,484 | def send_callback_json_message ( value , * args , ** kwargs ) : if value : kwargs [ 'result' ] = value send_json_message ( args [ 0 ] , args [ 1 ] , ** kwargs ) return value | useful for sending messages from callbacks as it puts the result of the callback in the dict for serialization |
62,485 | def send ( self , message ) : for transport in self . transports . values ( ) : transport . protocol . sendMessage ( message ) | sends whatever it is to each transport |
62,486 | def remove ( self , transport ) : if transport . uid in self . transports : del ( self . transports [ transport . uid ] ) | removes a transport if a member of this group |
62,487 | def add ( self , transport , address = None ) : if not address : address = str ( uuid . uuid1 ( ) ) if address in self . recipients : self . recipients [ address ] . add ( transport ) else : self . recipients [ address ] = RecipientManager ( transport , address ) return address | add a new recipient to be addressable by this MessageDispatcher generate a new uuid address if one is not specified |
62,488 | def remove ( self , transport ) : recipients = copy . copy ( self . recipients ) for address , recManager in recipients . items ( ) : recManager . remove ( transport ) if not len ( recManager . transports ) : del self . recipients [ address ] | removes a transport from all channels to which it belongs . |
62,489 | def send ( self , address , data_dict ) : if type ( address ) == list : recipients = [ self . recipients . get ( rec ) for rec in address ] else : recipients = [ self . recipients . get ( address ) ] if recipients : for recipient in recipients : if recipient : recipient . send ( json . dumps ( data_dict ) . encode ( ) ) | address can either be a string or a list of strings |
62,490 | def subscribe ( self , transport , data ) : self . add ( transport , address = data . get ( 'hx_subscribe' ) . encode ( ) ) self . send ( data [ 'hx_subscribe' ] , { 'message' : "%r is listening" % transport } ) | adds a transport to a channel |
62,491 | def cleanOptions ( options ) : _reload = options . pop ( 'reload' ) dev = options . pop ( 'dev' ) opts = [ ] store_true = [ '--nocache' , '--global_cache' , '--quiet' , '--loud' ] store_false = [ ] for key , value in options . items ( ) : key = '--' + key if ( key in store_true and value ) or ( key in store_false and not value ) : opts += [ key , ] elif value : opts += [ key , str ( value ) ] return _reload , opts | Takes an options dict and returns a tuple containing the daemonize boolean the reload boolean and the parsed list of cleaned options as would be expected to be passed to hx |
62,492 | def options ( argv = [ ] ) : parser = HendrixOptionParser parsed_args = parser . parse_args ( argv ) return vars ( parsed_args [ 0 ] ) | A helper function that returns a dictionary of the default key - values pairs |
62,493 | def remove ( self , participant ) : for topic , participants in list ( self . _participants_by_topic . items ( ) ) : self . unsubscribe ( participant , topic ) if not participants : del self . _participants_by_topic [ topic ] | Unsubscribe this participant from all topic to which it is subscribed . |
62,494 | def addSSLService ( self ) : "adds a SSLService to the instaitated HendrixService" https_port = self . options [ 'https_port' ] self . tls_service = HendrixTCPServiceWithTLS ( https_port , self . hendrix . site , self . key , self . cert , self . context_factory , self . context_factory_kwargs ) self . tls_service . setServiceParent ( self . hendrix ) | adds a SSLService to the instaitated HendrixService |
62,495 | def addResource ( self , content , uri , headers ) : self . cache [ uri ] = CachedResource ( content , headers ) | Adds the a hendrix . contrib . cache . resource . CachedResource to the ReverseProxy cache connection |
62,496 | def decompressBuffer ( buffer ) : "complements the compressBuffer function in CacheClient" zbuf = cStringIO . StringIO ( buffer ) zfile = gzip . GzipFile ( fileobj = zbuf ) deflated = zfile . read ( ) zfile . close ( ) return deflated | complements the compressBuffer function in CacheClient |
62,497 | def getMaxAge ( self ) : "get the max-age in seconds from the saved headers data" max_age = 0 cache_control = self . headers . get ( 'cache-control' ) if cache_control : params = dict ( urlparse . parse_qsl ( cache_control ) ) max_age = int ( params . get ( 'max-age' , '0' ) ) return max_age | get the max - age in seconds from the saved headers data |
62,498 | def getLastModified ( self ) : "returns the GMT last-modified datetime or None" last_modified = self . headers . get ( 'last-modified' ) if last_modified : last_modified = self . convertTimeString ( last_modified ) return last_modified | returns the GMT last - modified datetime or None |
62,499 | def getDate ( self ) : "returns the GMT response datetime or None" date = self . headers . get ( 'date' ) if date : date = self . convertTimeString ( date ) return date | returns the GMT response datetime or None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.