query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
sequencelengths
20
553
Show stats when pings are done
def dump_stats ( myStats ) : print ( "\n----%s PYTHON PING Statistics----" % ( myStats . thisIP ) ) if myStats . pktsSent > 0 : myStats . fracLoss = ( myStats . pktsSent - myStats . pktsRcvd ) / myStats . pktsSent print ( ( "%d packets transmitted, %d packets received, " "%0.1f%% packet loss" ) % ( myStats . pktsSent , myStats . pktsRcvd , 100.0 * myStats . fracLoss ) ) if myStats . pktsRcvd > 0 : print ( "round-trip (ms) min/avg/max = %d/%0.1f/%d" % ( myStats . minTime , myStats . totTime / myStats . pktsRcvd , myStats . maxTime ) ) print ( "" ) return
100
https://github.com/jay-johnson/network-pipeline/blob/4e53ae13fe12085e0cf2e5e1aff947368f4f1ffa/network_pipeline/scripts/icmp_send_msg.py#L470-L495
[ "def", "slice_reStructuredText", "(", "input", ",", "output", ")", ":", "LOGGER", ".", "info", "(", "\"{0} | Slicing '{1}' file!\"", ".", "format", "(", "slice_reStructuredText", ".", "__name__", ",", "input", ")", ")", "file", "=", "File", "(", "input", ")", "file", ".", "cache", "(", ")", "slices", "=", "OrderedDict", "(", ")", "for", "i", ",", "line", "in", "enumerate", "(", "file", ".", "content", ")", ":", "search", "=", "re", ".", "search", "(", "r\"^\\.\\. \\.(\\w+)\"", ",", "line", ")", "if", "search", ":", "slices", "[", "search", ".", "groups", "(", ")", "[", "0", "]", "]", "=", "i", "+", "SLICE_ATTRIBUTE_INDENT", "index", "=", "0", "for", "slice", ",", "slice_start", "in", "slices", ".", "iteritems", "(", ")", ":", "slice_file", "=", "File", "(", "os", ".", "path", ".", "join", "(", "output", ",", "\"{0}.{1}\"", ".", "format", "(", "slice", ",", "OUTPUT_FILES_EXTENSION", ")", ")", ")", "LOGGER", ".", "info", "(", "\"{0} | Outputing '{1}' file!\"", ".", "format", "(", "slice_reStructuredText", ".", "__name__", ",", "slice_file", ".", "path", ")", ")", "slice_end", "=", "index", "<", "(", "len", "(", "slices", ".", "values", "(", ")", ")", "-", "1", ")", "and", "slices", ".", "values", "(", ")", "[", "index", "+", "1", "]", "-", "SLICE_ATTRIBUTE_INDENT", "or", "len", "(", "file", ".", "content", ")", "for", "i", "in", "range", "(", "slice_start", ",", "slice_end", ")", ":", "skip_line", "=", "False", "for", "item", "in", "CONTENT_DELETION", ":", "if", "re", ".", "search", "(", "item", ",", "file", ".", "content", "[", "i", "]", ")", ":", "LOGGER", ".", "info", "(", "\"{0} | Skipping Line '{1}' with '{2}' content!\"", ".", "format", "(", "slice_reStructuredText", ".", "__name__", ",", "i", ",", "item", ")", ")", "skip_line", "=", "True", "break", "if", "skip_line", ":", "continue", "line", "=", "file", ".", "content", "[", "i", "]", "for", "pattern", ",", "value", "in", "STATEMENT_SUBSTITUTE", ".", "iteritems", "(", ")", ":", "line", "=", "re", ".", "sub", "(", "pattern", ",", "value", ",", "line", ")", "search", "=", "re", ".", "search", "(", "r\"- `[\\w ]+`_ \\(([\\w\\.]+)\\)\"", ",", "line", ")", "if", "search", ":", "LOGGER", ".", "info", "(", "\"{0} | Updating Line '{1}' link: '{2}'!\"", ".", "format", "(", "slice_reStructuredText", ".", "__name__", ",", "i", ",", "search", ".", "groups", "(", ")", "[", "0", "]", ")", ")", "line", "=", "\"- :ref:`{0}`\\n\"", ".", "format", "(", "search", ".", "groups", "(", ")", "[", "0", "]", ")", "slice_file", ".", "content", ".", "append", "(", "line", ")", "slice_file", ".", "write", "(", ")", "index", "+=", "1", "return", "True" ]
bootstrap - py package updatable? .
def updatable ( self ) : if self . latest_version > self . current_version : updatable_version = self . latest_version else : updatable_version = False return updatable_version
101
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/update.py#L29-L35
[ "def", "delete_binding", "(", "self", ",", "vhost", ",", "exchange", ",", "queue", ",", "rt_key", ")", ":", "vhost", "=", "quote", "(", "vhost", ",", "''", ")", "exchange", "=", "quote", "(", "exchange", ",", "''", ")", "queue", "=", "quote", "(", "queue", ",", "''", ")", "body", "=", "''", "path", "=", "Client", ".", "urls", "[", "'rt_bindings_between_exch_queue'", "]", "%", "(", "vhost", ",", "exchange", ",", "queue", ",", "rt_key", ")", "return", "self", ".", "_call", "(", "path", ",", "'DELETE'", ",", "headers", "=", "Client", ".", "json_headers", ")" ]
Show message updatable .
def show_message ( self ) : print ( 'current version: {current_version}\n' 'latest version : {latest_version}' . format ( current_version = self . current_version , latest_version = self . latest_version ) )
102
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/update.py#L37-L43
[ "def", "verify", "(", "self", ")", ":", "c", "=", "self", ".", "database", ".", "cursor", "(", ")", "non_exist", "=", "set", "(", ")", "no_db_entry", "=", "set", "(", "os", ".", "listdir", "(", "self", ".", "cache_dir", ")", ")", "try", ":", "no_db_entry", ".", "remove", "(", "'file_database.db'", ")", "no_db_entry", ".", "remove", "(", "'file_database.db-journal'", ")", "except", ":", "pass", "for", "row", "in", "c", ".", "execute", "(", "\"SELECT path FROM files\"", ")", ":", "path", "=", "row", "[", "0", "]", "repo_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "cache_dir", ",", "path", ")", "if", "os", ".", "path", ".", "exists", "(", "repo_path", ")", ":", "no_db_entry", ".", "remove", "(", "path", ")", "else", ":", "non_exist", ".", "add", "(", "path", ")", "if", "len", "(", "non_exist", ")", ">", "0", ":", "raise", "Exception", "(", "\"Found {} records in db for files that don't exist: {}\"", ".", "format", "(", "len", "(", "non_exist", ")", ",", "','", ".", "join", "(", "non_exist", ")", ")", ")", "if", "len", "(", "no_db_entry", ")", ">", "0", ":", "raise", "Exception", "(", "\"Found {} files that don't have db entries: {}\"", ".", "format", "(", "len", "(", "no_db_entry", ")", ",", "','", ".", "join", "(", "no_db_entry", ")", ")", ")" ]
Traverse the input otu - sequence file collect the non - unique OTU IDs and file the sequences associated with then under the unique OTU ID as defined by the input matrix .
def condense_otus ( otuF , nuniqueF ) : uniqueOTUs = set ( ) nuOTUs = { } # parse non-unique otu matrix for line in nuniqueF : line = line . split ( ) uOTU = line [ 0 ] for nuOTU in line [ 1 : ] : nuOTUs [ nuOTU ] = uOTU uniqueOTUs . add ( uOTU ) otuFilter = defaultdict ( list ) # parse otu sequence file for line in otuF : line = line . split ( ) otuID , seqIDs = line [ 0 ] , line [ 1 : ] if otuID in uniqueOTUs : otuFilter [ otuID ] . extend ( seqIDs ) elif otuID in nuOTUs : otuFilter [ nuOTUs [ otuID ] ] . extend ( seqIDs ) return otuFilter
103
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/pick_otus_condense.py#L14-L51
[ "def", "set_energy_range", "(", "self", ",", "logemin", ",", "logemax", ")", ":", "if", "logemin", "is", "None", ":", "logemin", "=", "self", ".", "log_energies", "[", "0", "]", "if", "logemax", "is", "None", ":", "logemax", "=", "self", ".", "log_energies", "[", "-", "1", "]", "imin", "=", "int", "(", "utils", ".", "val_to_edge", "(", "self", ".", "log_energies", ",", "logemin", ")", "[", "0", "]", ")", "imax", "=", "int", "(", "utils", ".", "val_to_edge", "(", "self", ".", "log_energies", ",", "logemax", ")", "[", "0", "]", ")", "if", "imin", "-", "imax", "==", "0", ":", "imin", "=", "int", "(", "len", "(", "self", ".", "log_energies", ")", "-", "1", ")", "imax", "=", "int", "(", "len", "(", "self", ".", "log_energies", ")", "-", "1", ")", "klims", "=", "self", ".", "like", ".", "logLike", ".", "klims", "(", ")", "if", "imin", "!=", "klims", "[", "0", "]", "or", "imax", "!=", "klims", "[", "1", "]", ":", "self", ".", "like", ".", "selectEbounds", "(", "imin", ",", "imax", ")", "return", "np", ".", "array", "(", "[", "self", ".", "log_energies", "[", "imin", "]", ",", "self", ".", "log_energies", "[", "imax", "]", "]", ")" ]
determine if read overlaps with rna if so count bases
def rna_bases ( rna_cov , scaffold , bases , line ) : start = int ( line [ 3 ] ) stop = start + bases - 1 if scaffold not in rna_cov : return rna_cov for pos in rna_cov [ scaffold ] [ 2 ] : ol = get_overlap ( [ start , stop ] , pos ) rna_cov [ scaffold ] [ 0 ] += ol return rna_cov
104
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_copies.py#L18-L29
[ "def", "_create_download_failed_message", "(", "exception", ",", "url", ")", ":", "message", "=", "'Failed to download from:\\n{}\\nwith {}:\\n{}'", ".", "format", "(", "url", ",", "exception", ".", "__class__", ".", "__name__", ",", "exception", ")", "if", "_is_temporal_problem", "(", "exception", ")", ":", "if", "isinstance", "(", "exception", ",", "requests", ".", "ConnectionError", ")", ":", "message", "+=", "'\\nPlease check your internet connection and try again.'", "else", ":", "message", "+=", "'\\nThere might be a problem in connection or the server failed to process '", "'your request. Please try again.'", "elif", "isinstance", "(", "exception", ",", "requests", ".", "HTTPError", ")", ":", "try", ":", "server_message", "=", "''", "for", "elem", "in", "decode_data", "(", "exception", ".", "response", ".", "content", ",", "MimeType", ".", "XML", ")", ":", "if", "'ServiceException'", "in", "elem", ".", "tag", "or", "'Message'", "in", "elem", ".", "tag", ":", "server_message", "+=", "elem", ".", "text", ".", "strip", "(", "'\\n\\t '", ")", "except", "ElementTree", ".", "ParseError", ":", "server_message", "=", "exception", ".", "response", ".", "text", "message", "+=", "'\\nServer response: \"{}\"'", ".", "format", "(", "server_message", ")", "return", "message" ]
parse ggKbase scaffold - to - bin mapping - scaffolds - to - bins and bins - to - scaffolds
def parse_s2bins ( s2bins ) : s2b = { } b2s = { } for line in s2bins : line = line . strip ( ) . split ( ) s , b = line [ 0 ] , line [ 1 ] if 'UNK' in b : continue if len ( line ) > 2 : g = ' ' . join ( line [ 2 : ] ) else : g = 'n/a' b = '%s\t%s' % ( b , g ) s2b [ s ] = b if b not in b2s : b2s [ b ] = [ ] b2s [ b ] . append ( s ) return s2b , b2s
105
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_copies.py#L31-L52
[ "def", "parse_stats_file", "(", "self", ",", "file_name", ")", ":", "stats", "=", "{", "}", "try", ":", "with", "open", "(", "file_name", ",", "\"r\"", ")", "as", "fhandle", ":", "fbuffer", "=", "[", "]", "save_buffer", "=", "False", "for", "line", "in", "fhandle", ":", "line", "=", "line", ".", "rstrip", "(", "\"\\n\"", ")", "line", "=", "self", ".", "_trim", "(", "line", ")", "if", "line", "==", "\"\"", "or", "line", ".", "startswith", "(", "\"#\"", ")", ":", "continue", "elif", "line", ".", "endswith", "(", "\"{\"", ")", ":", "save_buffer", "=", "True", "fbuffer", ".", "append", "(", "line", ")", "continue", "elif", "line", ".", "endswith", "(", "\"}\"", ")", ":", "tmp_dict", "=", "self", ".", "_parse_config_buffer", "(", "fbuffer", ")", "fbuffer", "=", "None", "fbuffer", "=", "list", "(", ")", "if", "len", "(", "tmp_dict", ")", "<", "1", ":", "continue", "if", "tmp_dict", "[", "\"_type\"", "]", "==", "\"info\"", ":", "stats", "[", "\"info\"", "]", "=", "tmp_dict", "elif", "tmp_dict", "[", "\"_type\"", "]", "==", "\"programstatus\"", ":", "stats", "[", "\"programstatus\"", "]", "=", "tmp_dict", "else", ":", "entity_type", "=", "tmp_dict", "[", "\"_type\"", "]", "if", "entity_type", "not", "in", "stats", ".", "keys", "(", ")", ":", "stats", "[", "entity_type", "]", "=", "[", "]", "stats", "[", "entity_type", "]", ".", "append", "(", "tmp_dict", ")", "continue", "elif", "save_buffer", "is", "True", ":", "fbuffer", ".", "append", "(", "line", ")", "except", "Exception", "as", "exception", ":", "self", ".", "log", ".", "info", "(", "\"Caught exception: %s\"", ",", "exception", ")", "return", "stats" ]
remove any bins that don t have 16S
def filter_missing_rna ( s2bins , bins2s , rna_cov ) : for bin , scaffolds in list ( bins2s . items ( ) ) : c = 0 for s in scaffolds : if s in rna_cov : c += 1 if c == 0 : del bins2s [ bin ] for scaffold , bin in list ( s2bins . items ( ) ) : if bin not in bins2s : del s2bins [ scaffold ] return s2bins , bins2s
106
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_copies.py#L76-L90
[ "def", "_parse_ftp_error", "(", "error", ")", ":", "# type: (ftplib.Error) -> Tuple[Text, Text]", "code", ",", "_", ",", "message", "=", "text_type", "(", "error", ")", ".", "partition", "(", "\" \"", ")", "return", "code", ",", "message" ]
calculate bin coverage
def calc_bin_cov ( scaffolds , cov ) : bases = sum ( [ cov [ i ] [ 0 ] for i in scaffolds if i in cov ] ) length = sum ( [ cov [ i ] [ 1 ] for i in scaffolds if i in cov ] ) if length == 0 : return 0 return float ( float ( bases ) / float ( length ) )
107
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_copies.py#L92-L100
[ "def", "follow", "(", "resources", ",", "*", "*", "kwargs", ")", ":", "# subscribe", "client", "=", "redis", ".", "Redis", "(", "decode_responses", "=", "True", ",", "*", "*", "kwargs", ")", "resources", "=", "resources", "if", "resources", "else", "find_resources", "(", "client", ")", "channels", "=", "[", "Keys", ".", "EXTERNAL", ".", "format", "(", "resource", ")", "for", "resource", "in", "resources", "]", "if", "resources", ":", "subscription", "=", "Subscription", "(", "client", ",", "*", "channels", ")", "# listen", "while", "resources", ":", "try", ":", "message", "=", "subscription", ".", "listen", "(", ")", "if", "message", "[", "'type'", "]", "==", "'message'", ":", "print", "(", "message", "[", "'data'", "]", ")", "except", "KeyboardInterrupt", ":", "break" ]
Make sure there is at least a translation has been filled in . If a default language has been specified make sure that it exists amongst translations .
def clean ( self ) : # First make sure the super's clean method is called upon. super ( TranslationFormSet , self ) . clean ( ) if settings . HIDE_LANGUAGE : return if len ( self . forms ) > 0 : # If a default language has been provided, make sure a translation # is available if settings . DEFAULT_LANGUAGE and not any ( self . errors ) : # Don't bother validating the formset unless each form is # valid on its own. Reference: # http://docs.djangoproject.com/en/dev/topics/forms/formsets/#custom-formset-validation for form in self . forms : language_code = form . cleaned_data . get ( 'language_code' , None ) if language_code == settings . DEFAULT_LANGUAGE : # All is good, don't bother checking any further return raise forms . ValidationError ( _ ( 'No translation provided for default language \'%s\'.' ) % settings . DEFAULT_LANGUAGE ) else : raise forms . ValidationError ( _ ( 'At least one translation should be provided.' ) )
108
https://github.com/dokterbob/django-multilingual-model/blob/2479b2c3d6f7b697e95aa1e082c8bc8699f1f638/multilingual_model/forms.py#L19-L58
[ "def", "remove_handlers_bound_to_instance", "(", "self", ",", "obj", ")", ":", "for", "handler", "in", "self", ".", "handlers", ":", "if", "handler", ".", "im_self", "==", "obj", ":", "self", "-=", "handler" ]
If a default language has been set and is still available in self . available_languages return it and remove it from the list .
def _get_default_language ( self ) : assert hasattr ( self , 'available_languages' ) , 'No available languages have been generated.' assert len ( self . available_languages ) > 0 , 'No available languages to select from.' if ( settings . DEFAULT_LANGUAGE and settings . DEFAULT_LANGUAGE in self . available_languages ) or ( 'language_code' not in self . form . base_fields ) : # Default language still available self . available_languages . remove ( settings . DEFAULT_LANGUAGE ) return settings . DEFAULT_LANGUAGE else : # Select the first item and return it return self . available_languages . pop ( 0 )
109
https://github.com/dokterbob/django-multilingual-model/blob/2479b2c3d6f7b697e95aa1e082c8bc8699f1f638/multilingual_model/forms.py#L68-L94
[ "def", "compute_busiest_date", "(", "feed", ":", "\"Feed\"", ",", "dates", ":", "List", "[", "str", "]", ")", "->", "str", ":", "f", "=", "feed", ".", "compute_trip_activity", "(", "dates", ")", "s", "=", "[", "(", "f", "[", "c", "]", ".", "sum", "(", ")", ",", "c", ")", "for", "c", "in", "f", ".", "columns", "if", "c", "!=", "\"trip_id\"", "]", "return", "max", "(", "s", ")", "[", "1", "]" ]
Construct the form overriding the initial value for language_code .
def _construct_form ( self , i , * * kwargs ) : if not settings . HIDE_LANGUAGE : self . _construct_available_languages ( ) form = super ( TranslationFormSet , self ) . _construct_form ( i , * * kwargs ) if settings . HIDE_LANGUAGE : form . instance . language_code = settings . DEFAULT_LANGUAGE else : language_code = form . instance . language_code if language_code : logger . debug ( u'Removing translation choice %s for instance %s' u' in form %d' , language_code , form . instance , i ) self . available_languages . remove ( language_code ) else : initial_language_code = self . _get_default_language ( ) logger . debug ( u'Preselecting language code %s for form %d' , initial_language_code , i ) form . initial [ 'language_code' ] = initial_language_code return form
110
https://github.com/dokterbob/django-multilingual-model/blob/2479b2c3d6f7b697e95aa1e082c8bc8699f1f638/multilingual_model/forms.py#L96-L128
[ "def", "_count_devices", "(", "self", ")", ":", "number_of_devices", "=", "ctypes", ".", "c_uint", "(", ")", "if", "ctypes", ".", "windll", ".", "user32", ".", "GetRawInputDeviceList", "(", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_int", ")", "(", ")", ",", "ctypes", ".", "byref", "(", "number_of_devices", ")", ",", "ctypes", ".", "sizeof", "(", "RawInputDeviceList", ")", ")", "==", "-", "1", ":", "warn", "(", "\"Call to GetRawInputDeviceList was unsuccessful.\"", "\"We have no idea if a mouse or keyboard is attached.\"", ",", "RuntimeWarning", ")", "return", "devices_found", "=", "(", "RawInputDeviceList", "*", "number_of_devices", ".", "value", ")", "(", ")", "if", "ctypes", ".", "windll", ".", "user32", ".", "GetRawInputDeviceList", "(", "devices_found", ",", "ctypes", ".", "byref", "(", "number_of_devices", ")", ",", "ctypes", ".", "sizeof", "(", "RawInputDeviceList", ")", ")", "==", "-", "1", ":", "warn", "(", "\"Call to GetRawInputDeviceList was unsuccessful.\"", "\"We have no idea if a mouse or keyboard is attached.\"", ",", "RuntimeWarning", ")", "return", "for", "device", "in", "devices_found", ":", "if", "device", ".", "dwType", "==", "0", ":", "self", ".", "_raw_device_counts", "[", "'mice'", "]", "+=", "1", "elif", "device", ".", "dwType", "==", "1", ":", "self", ".", "_raw_device_counts", "[", "'keyboards'", "]", "+=", "1", "elif", "device", ".", "dwType", "==", "2", ":", "self", ".", "_raw_device_counts", "[", "'otherhid'", "]", "+=", "1", "else", ":", "self", ".", "_raw_device_counts", "[", "'unknown'", "]", "+=", "1" ]
merge separate fastq files
def fq_merge ( R1 , R2 ) : c = itertools . cycle ( [ 1 , 2 , 3 , 4 ] ) for r1 , r2 in zip ( R1 , R2 ) : n = next ( c ) if n == 1 : pair = [ [ ] , [ ] ] pair [ 0 ] . append ( r1 . strip ( ) ) pair [ 1 ] . append ( r2 . strip ( ) ) if n == 4 : yield pair
111
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/fastq_merge.py#L13-L25
[ "def", "AddSession", "(", "self", ",", "session_id", ",", "seat", ",", "uid", ",", "username", ",", "active", ")", ":", "seat_path", "=", "dbus", ".", "ObjectPath", "(", "'/org/freedesktop/login1/seat/'", "+", "seat", ")", "if", "seat_path", "not", "in", "mockobject", ".", "objects", ":", "self", ".", "AddSeat", "(", "seat", ")", "user_path", "=", "dbus", ".", "ObjectPath", "(", "'/org/freedesktop/login1/user/%i'", "%", "uid", ")", "if", "user_path", "not", "in", "mockobject", ".", "objects", ":", "self", ".", "AddUser", "(", "uid", ",", "username", ",", "active", ")", "session_path", "=", "dbus", ".", "ObjectPath", "(", "'/org/freedesktop/login1/session/'", "+", "session_id", ")", "if", "session_path", "in", "mockobject", ".", "objects", ":", "raise", "dbus", ".", "exceptions", ".", "DBusException", "(", "'Session %s already exists'", "%", "session_id", ",", "name", "=", "MOCK_IFACE", "+", "'.SessionExists'", ")", "self", ".", "AddObject", "(", "session_path", ",", "'org.freedesktop.login1.Session'", ",", "{", "'Controllers'", ":", "dbus", ".", "Array", "(", "[", "]", ",", "signature", "=", "'s'", ")", ",", "'ResetControllers'", ":", "dbus", ".", "Array", "(", "[", "]", ",", "signature", "=", "'s'", ")", ",", "'Active'", ":", "active", ",", "'IdleHint'", ":", "False", ",", "'KillProcesses'", ":", "False", ",", "'Remote'", ":", "False", ",", "'Class'", ":", "'user'", ",", "'DefaultControlGroup'", ":", "'systemd:/user/%s/%s'", "%", "(", "username", ",", "session_id", ")", ",", "'Display'", ":", "os", ".", "getenv", "(", "'DISPLAY'", ",", "''", ")", ",", "'Id'", ":", "session_id", ",", "'Name'", ":", "username", ",", "'RemoteHost'", ":", "''", ",", "'RemoteUser'", ":", "''", ",", "'Service'", ":", "'dbusmock'", ",", "'State'", ":", "(", "active", "and", "'active'", "or", "'online'", ")", ",", "'TTY'", ":", "''", ",", "'Type'", ":", "'test'", ",", "'Seat'", ":", "(", "seat", ",", "seat_path", ")", ",", "'User'", ":", "(", "dbus", ".", "UInt32", "(", "uid", ")", ",", "user_path", ")", ",", "'Audit'", ":", "dbus", ".", "UInt32", "(", "0", ")", ",", "'Leader'", ":", "dbus", ".", "UInt32", "(", "1", ")", ",", "'VTNr'", ":", "dbus", ".", "UInt32", "(", "1", ")", ",", "'IdleSinceHint'", ":", "dbus", ".", "UInt64", "(", "0", ")", ",", "'IdleSinceHintMonotonic'", ":", "dbus", ".", "UInt64", "(", "0", ")", ",", "'Timestamp'", ":", "dbus", ".", "UInt64", "(", "42", ")", ",", "'TimestampMonotonic'", ":", "dbus", ".", "UInt64", "(", "42", ")", ",", "}", ",", "[", "(", "'Activate'", ",", "''", ",", "''", ",", "''", ")", ",", "(", "'Kill'", ",", "'ss'", ",", "''", ",", "''", ")", ",", "(", "'Lock'", ",", "''", ",", "''", ",", "''", ")", ",", "(", "'SetIdleHint'", ",", "'b'", ",", "''", ",", "''", ")", ",", "(", "'Terminate'", ",", "''", ",", "''", ",", "''", ")", ",", "(", "'Unlock'", ",", "''", ",", "''", ",", "''", ")", ",", "]", ")", "# add session to seat", "obj_seat", "=", "mockobject", ".", "objects", "[", "seat_path", "]", "cur_sessions", "=", "obj_seat", ".", "Get", "(", "'org.freedesktop.login1.Seat'", ",", "'Sessions'", ")", "cur_sessions", ".", "append", "(", "(", "session_id", ",", "session_path", ")", ")", "obj_seat", ".", "Set", "(", "'org.freedesktop.login1.Seat'", ",", "'Sessions'", ",", "cur_sessions", ")", "obj_seat", ".", "Set", "(", "'org.freedesktop.login1.Seat'", ",", "'ActiveSession'", ",", "(", "session_id", ",", "session_path", ")", ")", "# add session to user", "obj_user", "=", "mockobject", ".", "objects", "[", "user_path", "]", "cur_sessions", "=", "obj_user", ".", "Get", "(", "'org.freedesktop.login1.User'", ",", "'Sessions'", ")", "cur_sessions", ".", "append", "(", "(", "session_id", ",", "session_path", ")", ")", "obj_user", ".", "Set", "(", "'org.freedesktop.login1.User'", ",", "'Sessions'", ",", "cur_sessions", ")", "return", "session_path" ]
Creates hash ring .
def _build_circle ( self ) : total_weight = 0 for node in self . _nodes : total_weight += self . _weights . get ( node , 1 ) for node in self . _nodes : weight = self . _weights . get ( node , 1 ) ks = math . floor ( ( 40 * len ( self . _nodes ) * weight ) / total_weight ) for i in xrange ( 0 , int ( ks ) ) : b_key = self . _md5_digest ( '%s-%s-salt' % ( node , i ) ) for l in xrange ( 0 , 4 ) : key = ( ( b_key [ 3 + l * 4 ] << 24 ) | ( b_key [ 2 + l * 4 ] << 16 ) | ( b_key [ 1 + l * 4 ] << 8 ) | b_key [ l * 4 ] ) self . _hashring [ key ] = node self . _sorted_keys . append ( key ) self . _sorted_keys . sort ( )
112
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/contrib/ketama.py#L35-L60
[ "def", "libvlc_video_set_subtitle_file", "(", "p_mi", ",", "psz_subtitle", ")", ":", "f", "=", "_Cfunctions", ".", "get", "(", "'libvlc_video_set_subtitle_file'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_video_set_subtitle_file'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ",", ")", ",", "None", ",", "ctypes", ".", "c_int", ",", "MediaPlayer", ",", "ctypes", ".", "c_char_p", ")", "return", "f", "(", "p_mi", ",", "psz_subtitle", ")" ]
Return long integer for a given key that represent it place on the hash ring .
def _gen_key ( self , key ) : b_key = self . _md5_digest ( key ) return self . _hashi ( b_key , lambda x : x )
113
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/contrib/ketama.py#L78-L84
[ "def", "evaluate", "(", "self", ",", "verbose", "=", "False", ",", "decode", "=", "True", ",", "passes", "=", "None", ",", "num_threads", "=", "1", ",", "apply_experimental_transforms", "=", "True", ")", ":", "if", "isinstance", "(", "self", ".", "weld_expr", ",", "WeldObject", ")", ":", "old_context", "=", "dict", "(", "self", ".", "weld_expr", ".", "context", ")", "for", "key", "in", "self", ".", "weld_expr", ".", "context", ".", "keys", "(", ")", ":", "if", "LazyResult", ".", "_cache", ".", "contains", "(", "key", ")", ":", "self", ".", "weld_expr", ".", "context", "[", "key", "]", "=", "LazyResult", ".", "_cache", ".", "get", "(", "key", ")", "evaluated", "=", "self", ".", "weld_expr", ".", "evaluate", "(", "to_weld_vec", "(", "self", ".", "weld_type", ",", "self", ".", "ndim", ")", ",", "verbose", ",", "decode", ",", "passes", ",", "num_threads", ",", "apply_experimental_transforms", ")", "self", ".", "weld_expr", ".", "context", "=", "old_context", "return", "evaluated", "else", ":", "return", "self", ".", "weld_expr" ]
Returns True if there exists a custom image for app_id .
def has_custom_image ( user_context , app_id ) : possible_paths = _valid_custom_image_paths ( user_context , app_id ) return any ( map ( os . path . exists , possible_paths ) )
114
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/grid.py#L32-L35
[ "def", "add_worksheet_progress_percentage", "(", "portal", ")", ":", "add_metadata", "(", "portal", ",", "CATALOG_WORKSHEET_LISTING", ",", "\"getProgressPercentage\"", ")", "logger", ".", "info", "(", "\"Reindexing Worksheets ...\"", ")", "query", "=", "dict", "(", "portal_type", "=", "\"Worksheet\"", ")", "brains", "=", "api", ".", "search", "(", "query", ",", "CATALOG_WORKSHEET_LISTING", ")", "total", "=", "len", "(", "brains", ")", "for", "num", ",", "brain", "in", "enumerate", "(", "brains", ")", ":", "if", "num", "%", "100", "==", "0", ":", "logger", ".", "info", "(", "\"Reindexing open Worksheets: {}/{}\"", ".", "format", "(", "num", ",", "total", ")", ")", "worksheet", "=", "api", ".", "get_object", "(", "brain", ")", "worksheet", ".", "reindexObject", "(", ")" ]
Returns the custom image associated with a given app . If there are multiple candidate images on disk one is chosen arbitrarily .
def get_custom_image ( user_context , app_id ) : possible_paths = _valid_custom_image_paths ( user_context , app_id ) existing_images = filter ( os . path . exists , possible_paths ) if len ( existing_images ) > 0 : return existing_images [ 0 ]
115
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/grid.py#L37-L43
[ "def", "console_wait_for_keypress", "(", "flush", ":", "bool", ")", "->", "Key", ":", "key", "=", "Key", "(", ")", "lib", ".", "TCOD_console_wait_for_keypress_wrapper", "(", "key", ".", "key_p", ",", "flush", ")", "return", "key" ]
Sets the custom image for app_id to be the image located at image_path . If there already exists a custom image for app_id it will be deleted . Returns True is setting the image was successful .
def set_custom_image ( user_context , app_id , image_path ) : if image_path is None : return False if not os . path . exists ( image_path ) : return False ( root , ext ) = os . path . splitext ( image_path ) if not is_valid_extension ( ext ) : # TODO: Maybe log that this happened? return False # If we don't remove the old image then theres no guarantee that Steam will # show our new image when it launches. if has_custom_image ( user_context , app_id ) : img = get_custom_image ( user_context , app_id ) assert ( img is not None ) os . remove ( img ) # Set the new image parent_dir = paths . custom_images_directory ( user_context ) new_path = os . path . join ( parent_dir , app_id + ext ) shutil . copyfile ( image_path , new_path ) return True
116
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/grid.py#L45-L70
[ "def", "seconds_passed", "(", "self", ")", ":", "return", "int", "(", "(", "Date", "(", "self", ")", ".", "datetime", "-", "self", ".", "_STARTDATE", ".", "datetime", ")", ".", "total_seconds", "(", ")", ")" ]
Read an orthography profile from a metadata file or a default tab - separated profile file .
def from_file ( cls , fname , form = None ) : try : tg = TableGroup . from_file ( fname ) opfname = None except JSONDecodeError : tg = TableGroup . fromvalue ( cls . MD ) opfname = fname if len ( tg . tables ) != 1 : raise ValueError ( 'profile description must contain exactly one table' ) metadata = tg . common_props metadata . update ( fname = Path ( fname ) , form = form ) return cls ( * [ { k : None if ( k != cls . GRAPHEME_COL and v == cls . NULL ) else v for k , v in d . items ( ) } for d in tg . tables [ 0 ] . iterdicts ( fname = opfname ) ] , * * metadata )
117
https://github.com/cldf/segments/blob/9136a4ec89555bf9b574399ffbb07f3cc9a9f45f/src/segments/profile.py#L100-L117
[ "def", "fixpointmethod", "(", "self", ",", "cfg_node", ")", ":", "JOIN", "=", "self", ".", "join", "(", "cfg_node", ")", "# Assignment check", "if", "isinstance", "(", "cfg_node", ",", "AssignmentNode", ")", ":", "arrow_result", "=", "JOIN", "# Reassignment check", "if", "cfg_node", ".", "left_hand_side", "not", "in", "cfg_node", ".", "right_hand_side_variables", ":", "# Get previous assignments of cfg_node.left_hand_side and remove them from JOIN", "arrow_result", "=", "self", ".", "arrow", "(", "JOIN", ",", "cfg_node", ".", "left_hand_side", ")", "arrow_result", "=", "arrow_result", "|", "self", ".", "lattice", ".", "el2bv", "[", "cfg_node", "]", "constraint_table", "[", "cfg_node", "]", "=", "arrow_result", "# Default case", "else", ":", "constraint_table", "[", "cfg_node", "]", "=", "JOIN" ]
Create a Profile instance from the Unicode graphemes found in text .
def from_text ( cls , text , mapping = 'mapping' ) : graphemes = Counter ( grapheme_pattern . findall ( text ) ) specs = [ OrderedDict ( [ ( cls . GRAPHEME_COL , grapheme ) , ( 'frequency' , frequency ) , ( mapping , grapheme ) ] ) for grapheme , frequency in graphemes . most_common ( ) ] return cls ( * specs )
118
https://github.com/cldf/segments/blob/9136a4ec89555bf9b574399ffbb07f3cc9a9f45f/src/segments/profile.py#L120-L141
[ "def", "step", "(", "self", ",", "state", ",", "clamping", ")", ":", "ns", "=", "state", ".", "copy", "(", ")", "for", "var", "in", "state", ":", "if", "clamping", ".", "has_variable", "(", "var", ")", ":", "ns", "[", "var", "]", "=", "int", "(", "clamping", ".", "bool", "(", "var", ")", ")", "else", ":", "or_value", "=", "0", "for", "clause", ",", "_", "in", "self", ".", "in_edges_iter", "(", "var", ")", ":", "or_value", "=", "or_value", "or", "clause", ".", "bool", "(", "state", ")", "if", "or_value", ":", "break", "ns", "[", "var", "]", "=", "int", "(", "or_value", ")", "return", "ns" ]
split fasta file into separate fasta files based on list of scaffolds that belong to each separate file
def split_fasta ( f , id2f ) : opened = { } for seq in parse_fasta ( f ) : id = seq [ 0 ] . split ( '>' ) [ 1 ] . split ( ) [ 0 ] if id not in id2f : continue fasta = id2f [ id ] if fasta not in opened : opened [ fasta ] = '%s.fa' % fasta seq [ 1 ] += '\n' with open ( opened [ fasta ] , 'a+' ) as f_out : f_out . write ( '\n' . join ( seq ) )
119
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/name2fasta.py#L7-L22
[ "def", "get_queryset", "(", "self", ")", ":", "return", "Event", ".", "objects", ".", "filter", "(", "Q", "(", "startTime__gte", "=", "timezone", ".", "now", "(", ")", "-", "timedelta", "(", "days", "=", "90", ")", ")", "&", "(", "Q", "(", "series__isnull", "=", "False", ")", "|", "Q", "(", "publicevent__isnull", "=", "False", ")", ")", ")", ".", "annotate", "(", "count", "=", "Count", "(", "'eventregistration'", ")", ")", ".", "annotate", "(", "*", "*", "self", ".", "get_annotations", "(", ")", ")", ".", "exclude", "(", "Q", "(", "count", "=", "0", ")", "&", "Q", "(", "status__in", "=", "[", "Event", ".", "RegStatus", ".", "hidden", ",", "Event", ".", "RegStatus", ".", "regHidden", ",", "Event", ".", "RegStatus", ".", "disabled", "]", ")", ")" ]
Check whether pathname is a valid user data directory
def _is_user_directory ( self , pathname ) : fullpath = os . path . join ( self . userdata_location ( ) , pathname ) # SteamOS puts a directory named 'anonymous' in the userdata directory # by default. Since we assume that pathname is a userID, ignore any name # that can't be converted to a number return os . path . isdir ( fullpath ) and pathname . isdigit ( )
120
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/legacy/steam.py#L47-L58
[ "def", "pad_decr", "(", "ids", ")", ":", "if", "len", "(", "ids", ")", "<", "1", ":", "return", "list", "(", "ids", ")", "if", "not", "any", "(", "ids", ")", ":", "return", "[", "]", "# all padding.", "idx", "=", "-", "1", "while", "not", "ids", "[", "idx", "]", ":", "idx", "-=", "1", "if", "idx", "==", "-", "1", ":", "ids", "=", "ids", "else", ":", "ids", "=", "ids", "[", ":", "idx", "+", "1", "]", "return", "[", "i", "-", "1", "for", "i", "in", "ids", "]" ]
Returns an array of user ids for users on the filesystem
def local_users ( self ) : # Any users on the machine will have an entry inside of the userdata # folder. As such, the easiest way to find a list of all users on the # machine is to just list the folders inside userdata userdirs = filter ( self . _is_user_directory , os . listdir ( self . userdata_location ( ) ) ) # Exploits the fact that the directory is named the same as the user id return map ( lambda userdir : user . User ( self , int ( userdir ) ) , userdirs )
121
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/legacy/steam.py#L80-L87
[ "def", "change_thickness", "(", "self", ",", "element", ",", "thickness", ")", ":", "if", "element", "==", "\"header\"", ":", "self", ".", "data", ".", "append", "(", "Command", "(", "\"renewcommand\"", ",", "arguments", "=", "[", "NoEscape", "(", "r\"\\headrulewidth\"", ")", ",", "str", "(", "thickness", ")", "+", "'pt'", "]", ")", ")", "elif", "element", "==", "\"footer\"", ":", "self", ".", "data", ".", "append", "(", "Command", "(", "\"renewcommand\"", ",", "arguments", "=", "[", "NoEscape", "(", "r\"\\footrulewidth\"", ")", ",", "str", "(", "thickness", ")", "+", "'pt'", "]", ")", ")" ]
Calculates degree days starting with a series of temperature equivalent values
def _calculate_degree_days ( temperature_equivalent , base_temperature , cooling = False ) : if cooling : ret = temperature_equivalent - base_temperature else : ret = base_temperature - temperature_equivalent # degree days cannot be negative ret [ ret < 0 ] = 0 prefix = 'CDD' if cooling else 'HDD' ret . name = '{}_{}' . format ( prefix , base_temperature ) return ret
122
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/weather.py#L31-L59
[ "def", "undefine", "(", "self", ")", ":", "if", "lib", ".", "EnvUndefmessageHandler", "(", "self", ".", "_env", ",", "self", ".", "_cls", ",", "self", ".", "_idx", ")", "!=", "1", ":", "raise", "CLIPSError", "(", "self", ".", "_env", ")", "self", ".", "_env", "=", "None" ]
Development status .
def status ( self ) : return { self . _acronym_status ( l ) : l for l in self . resp_text . split ( '\n' ) if l . startswith ( self . prefix_status ) }
123
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/classifiers.py#L33-L36
[ "def", "on_response", "(", "self", ",", "ch", ",", "method_frame", ",", "props", ",", "body", ")", ":", "LOGGER", ".", "debug", "(", "\"rabbitmq.Requester.on_response\"", ")", "if", "self", ".", "corr_id", "==", "props", ".", "correlation_id", ":", "self", ".", "response", "=", "{", "'props'", ":", "props", ",", "'body'", ":", "body", "}", "else", ":", "LOGGER", ".", "warn", "(", "\"rabbitmq.Requester.on_response - discarded response : \"", "+", "str", "(", "props", ".", "correlation_id", ")", ")", "LOGGER", ".", "debug", "(", "\"natsd.Requester.on_response - discarded response : \"", "+", "str", "(", "{", "'properties'", ":", "props", ",", "'body'", ":", "body", "}", ")", ")" ]
OSI Approved license .
def licenses ( self ) : return { self . _acronym_lic ( l ) : l for l in self . resp_text . split ( '\n' ) if l . startswith ( self . prefix_lic ) }
124
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/classifiers.py#L43-L46
[ "async", "def", "logs", "(", "self", ",", "service_id", ":", "str", ",", "*", ",", "details", ":", "bool", "=", "False", ",", "follow", ":", "bool", "=", "False", ",", "stdout", ":", "bool", "=", "False", ",", "stderr", ":", "bool", "=", "False", ",", "since", ":", "int", "=", "0", ",", "timestamps", ":", "bool", "=", "False", ",", "is_tty", ":", "bool", "=", "False", ",", "tail", ":", "str", "=", "\"all\"", ")", "->", "Union", "[", "str", ",", "AsyncIterator", "[", "str", "]", "]", ":", "if", "stdout", "is", "False", "and", "stderr", "is", "False", ":", "raise", "TypeError", "(", "\"Need one of stdout or stderr\"", ")", "params", "=", "{", "\"details\"", ":", "details", ",", "\"follow\"", ":", "follow", ",", "\"stdout\"", ":", "stdout", ",", "\"stderr\"", ":", "stderr", ",", "\"since\"", ":", "since", ",", "\"timestamps\"", ":", "timestamps", ",", "\"tail\"", ":", "tail", ",", "}", "response", "=", "await", "self", ".", "docker", ".", "_query", "(", "\"services/{service_id}/logs\"", ".", "format", "(", "service_id", "=", "service_id", ")", ",", "method", "=", "\"GET\"", ",", "params", "=", "params", ",", ")", "return", "await", "multiplexed_result", "(", "response", ",", "follow", ",", "is_tty", "=", "is_tty", ")" ]
Remove prefix .
def licenses_desc ( self ) : return { self . _acronym_lic ( l ) : l . split ( self . prefix_lic ) [ 1 ] for l in self . resp_text . split ( '\n' ) if l . startswith ( self . prefix_lic ) }
125
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/classifiers.py#L48-L52
[ "def", "_add_dependency", "(", "self", ",", "dependency", ",", "var_name", "=", "None", ")", ":", "if", "var_name", "is", "None", ":", "var_name", "=", "next", "(", "self", ".", "temp_var_names", ")", "# Don't add duplicate dependencies", "if", "(", "dependency", ",", "var_name", ")", "not", "in", "self", ".", "dependencies", ":", "self", ".", "dependencies", ".", "append", "(", "(", "dependency", ",", "var_name", ")", ")", "return", "var_name" ]
Convert license acronym .
def _acronym_lic ( self , license_statement ) : pat = re . compile ( r'\(([\w+\W?\s?]+)\)' ) if pat . search ( license_statement ) : lic = pat . search ( license_statement ) . group ( 1 ) if lic . startswith ( 'CNRI' ) : acronym_licence = lic [ : 4 ] else : acronym_licence = lic . replace ( ' ' , '' ) else : acronym_licence = '' . join ( [ w [ 0 ] for w in license_statement . split ( self . prefix_lic ) [ 1 ] . split ( ) ] ) return acronym_licence
126
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/classifiers.py#L54-L67
[ "def", "volumes_delete", "(", "storage_pool", ",", "logger", ")", ":", "try", ":", "for", "vol_name", "in", "storage_pool", ".", "listVolumes", "(", ")", ":", "try", ":", "vol", "=", "storage_pool", ".", "storageVolLookupByName", "(", "vol_name", ")", "vol", ".", "delete", "(", "0", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volume %s.\"", ",", "vol_name", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volumes.\"", ")" ]
calc MD5 based on path
def calcMD5 ( path ) : # check that file exists if os . path . exists ( path ) is False : yield False else : command = [ 'md5sum' , path ] p = Popen ( command , stdout = PIPE ) for line in p . communicate ( ) [ 0 ] . splitlines ( ) : yield line . decode ( 'ascii' ) . strip ( ) . split ( ) [ 0 ] p . wait ( ) yield False
127
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/ncbi_download.py#L18-L31
[ "async", "def", "release_name_async", "(", "self", ",", "bus_name", ",", "error", "=", "None", ",", "timeout", "=", "DBUS", ".", "TIMEOUT_USE_DEFAULT", ")", ":", "assert", "self", ".", "loop", "!=", "None", ",", "\"no event loop to attach coroutine to\"", "return", "await", "self", ".", "connection", ".", "bus_release_name_async", "(", "bus_name", ",", "error", "=", "error", ",", "timeout", "=", "timeout", ")" ]
download files with wget
def wget ( ftp , f = False , exclude = False , name = False , md5 = False , tries = 10 ) : # file name if f is False : f = ftp . rsplit ( '/' , 1 ) [ - 1 ] # downloaded file if it does not already exist # check md5s on server (optional) t = 0 while md5check ( f , ftp , md5 , exclude ) is not True : t += 1 if name is not False : print ( '# downloading:' , name , f ) if exclude is False : command = 'wget -q --random-wait %s' % ( ftp ) else : command = 'wget -q --random-wait -R %s %s' % ( exclude , ftp ) p = Popen ( command , shell = True ) p . communicate ( ) if t >= tries : print ( 'not downloaded:' , name , f ) return [ f , False ] return [ f , True ]
128
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/ncbi_download.py#L74-L97
[ "def", "get_listing", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'listing'", ")", ":", "allEvents", "=", "self", ".", "get_allEvents", "(", ")", "openEvents", "=", "allEvents", ".", "filter", "(", "registrationOpen", "=", "True", ")", "closedEvents", "=", "allEvents", ".", "filter", "(", "registrationOpen", "=", "False", ")", "publicEvents", "=", "allEvents", ".", "instance_of", "(", "PublicEvent", ")", "allSeries", "=", "allEvents", ".", "instance_of", "(", "Series", ")", "self", ".", "listing", "=", "{", "'allEvents'", ":", "allEvents", ",", "'openEvents'", ":", "openEvents", ",", "'closedEvents'", ":", "closedEvents", ",", "'publicEvents'", ":", "publicEvents", ",", "'allSeries'", ":", "allSeries", ",", "'regOpenEvents'", ":", "publicEvents", ".", "filter", "(", "registrationOpen", "=", "True", ")", ".", "filter", "(", "Q", "(", "publicevent__category__isnull", "=", "True", ")", "|", "Q", "(", "publicevent__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'regClosedEvents'", ":", "publicEvents", ".", "filter", "(", "registrationOpen", "=", "False", ")", ".", "filter", "(", "Q", "(", "publicevent__category__isnull", "=", "True", ")", "|", "Q", "(", "publicevent__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'categorySeparateEvents'", ":", "publicEvents", ".", "filter", "(", "publicevent__category__separateOnRegistrationPage", "=", "True", ")", ".", "order_by", "(", "'publicevent__category'", ")", ",", "'regOpenSeries'", ":", "allSeries", ".", "filter", "(", "registrationOpen", "=", "True", ")", ".", "filter", "(", "Q", "(", "series__category__isnull", "=", "True", ")", "|", "Q", "(", "series__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'regClosedSeries'", ":", "allSeries", ".", "filter", "(", "registrationOpen", "=", "False", ")", ".", "filter", "(", "Q", "(", "series__category__isnull", "=", "True", ")", "|", "Q", "(", "series__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'categorySeparateSeries'", ":", "allSeries", ".", "filter", "(", "series__category__separateOnRegistrationPage", "=", "True", ")", ".", "order_by", "(", "'series__category'", ")", ",", "}", "return", "self", ".", "listing" ]
check that at least one of queries is in list l
def check ( line , queries ) : line = line . strip ( ) spLine = line . replace ( '.' , ' ' ) . split ( ) matches = set ( spLine ) . intersection ( queries ) if len ( matches ) > 0 : return matches , line . split ( '\t' ) return matches , False
129
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/ncbi_download.py#L99-L109
[ "def", "_OpenFile", "(", "self", ",", "path", ")", ":", "if", "not", "self", ".", "_registry_file_reader", ":", "return", "None", "return", "self", ".", "_registry_file_reader", ".", "Open", "(", "path", ",", "ascii_codepage", "=", "self", ".", "_ascii_codepage", ")" ]
search entrez using specified database and accession
def entrez ( db , acc ) : c1 = [ 'esearch' , '-db' , db , '-query' , acc ] c2 = [ 'efetch' , '-db' , 'BioSample' , '-format' , 'docsum' ] p1 = Popen ( c1 , stdout = PIPE , stderr = PIPE ) p2 = Popen ( c2 , stdin = p1 . stdout , stdout = PIPE , stderr = PIPE ) return p2 . communicate ( )
130
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/ncbi_download.py#L111-L120
[ "def", "parse_units", "(", "units_str", ")", ":", "if", "not", "len", "(", "units_str", ")", ":", "return", "units_str", ",", "(", "None", ",", "None", ")", "if", "units_str", "[", "-", "1", "]", "==", "']'", ":", "units", ",", "lims", "=", "units_str", ".", "rsplit", "(", "'['", ")", "# type: str, str", "else", ":", "units", "=", "units_str", "lims", "=", "'?, ?]'", "lims", "=", "tuple", "(", "[", "float", "(", "x", ")", "if", "x", ".", "strip", "(", ")", "!=", "'?'", "else", "None", "for", "x", "in", "lims", ".", "strip", "(", "']'", ")", ".", "split", "(", "','", ")", "]", ")", "return", "units", ".", "strip", "(", ")", ",", "lims" ]
attempt to use NCBI Entrez to get BioSample ID
def searchAccession ( acc ) : # try genbank file # genome database out , error = entrez ( 'genome' , acc ) for line in out . splitlines ( ) : line = line . decode ( 'ascii' ) . strip ( ) if 'Assembly_Accession' in line or 'BioSample' in line : newAcc = line . split ( '>' ) [ 1 ] . split ( '<' ) [ 0 ] . split ( '.' ) [ 0 ] . split ( ',' ) [ 0 ] if len ( newAcc ) > 0 : return ( True , acc , newAcc ) # nucleotide database out , error = entrez ( 'nucleotide' , acc ) for line in out . splitlines ( ) : line = line . decode ( 'ascii' ) . strip ( ) if 'Assembly_Accession' in line or 'BioSample' in line : newAcc = line . split ( '>' ) [ 1 ] . split ( '<' ) [ 0 ] . split ( '.' ) [ 0 ] . split ( ',' ) [ 0 ] if len ( newAcc ) > 0 : return ( True , acc , newAcc ) # assembly database out , error = entrez ( 'assembly' , acc ) for line in out . splitlines ( ) : line = line . decode ( 'ascii' ) . strip ( ) if 'Assembly_Accession' in line or 'BioSample' in line : newAcc = line . split ( '>' ) [ 1 ] . split ( '<' ) [ 0 ] . split ( '.' ) [ 0 ] . split ( ',' ) [ 0 ] if len ( newAcc ) > 0 : return ( True , acc , newAcc ) for error in error . splitlines ( ) : error = error . decode ( 'ascii' ) . strip ( ) if '500 Can' in error : return ( False , acc , 'no network' ) return ( False , acc , 'efetch failed' )
131
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/ncbi_download.py#L122-L156
[ "def", "_evictStaleDevices", "(", "self", ")", ":", "while", "self", ".", "running", ":", "expiredDeviceIds", "=", "[", "key", "for", "key", ",", "value", "in", "self", ".", "devices", ".", "items", "(", ")", "if", "value", ".", "hasExpired", "(", ")", "]", "for", "key", "in", "expiredDeviceIds", ":", "logger", ".", "warning", "(", "\"Device timeout, removing \"", "+", "key", ")", "del", "self", ".", "devices", "[", "key", "]", "time", ".", "sleep", "(", "1", ")", "# TODO send reset after a device fails", "logger", ".", "warning", "(", "\"DeviceCaretaker is now shutdown\"", ")" ]
download genome info from NCBI
def getFTPs ( accessions , ftp , search , exclude , convert = False , threads = 1 , attempt = 1 , max_attempts = 2 ) : info = wget ( ftp ) [ 0 ] allMatches = [ ] for genome in open ( info , encoding = 'utf8' ) : genome = str ( genome ) matches , genomeInfo = check ( genome , accessions ) if genomeInfo is not False : f = genomeInfo [ 0 ] + search Gftp = genomeInfo [ 19 ] Gftp = Gftp + '/' + search allMatches . extend ( matches ) yield ( Gftp , f , exclude , matches ) # print accessions that could not be matched # and whether or not they could be converted (optional) newAccs = [ ] missing = accessions . difference ( set ( allMatches ) ) if convert is True : pool = Pool ( threads ) pool = pool . imap_unordered ( searchAccession , missing ) for newAcc in tqdm ( pool , total = len ( missing ) ) : status , accession , newAcc = newAcc if status is True : newAccs . append ( newAcc ) print ( 'not found:' , accession , '->' , newAcc ) else : for accession in missing : print ( 'not found:' , accession ) # re-try after converting accessions (optional) if len ( newAccs ) > 0 and attempt <= max_attempts : print ( 'convert accession attempt' , attempt ) attempt += 1 for hit in getFTPs ( set ( newAccs ) , ftp , search , exclude , convert , threads = 1 , attempt = attempt ) : yield hit
132
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/ncbi_download.py#L158-L195
[ "def", "start_transmit", "(", "self", ",", "blocking", "=", "False", ",", "start_packet_groups", "=", "True", ",", "*", "ports", ")", ":", "port_list", "=", "self", ".", "set_ports_list", "(", "*", "ports", ")", "if", "start_packet_groups", ":", "port_list_for_packet_groups", "=", "self", ".", "ports", ".", "values", "(", ")", "port_list_for_packet_groups", "=", "self", ".", "set_ports_list", "(", "*", "port_list_for_packet_groups", ")", "self", ".", "api", ".", "call_rc", "(", "'ixClearTimeStamp {}'", ".", "format", "(", "port_list_for_packet_groups", ")", ")", "self", ".", "api", ".", "call_rc", "(", "'ixStartPacketGroups {}'", ".", "format", "(", "port_list_for_packet_groups", ")", ")", "self", ".", "api", ".", "call_rc", "(", "'ixStartTransmit {}'", ".", "format", "(", "port_list", ")", ")", "time", ".", "sleep", "(", "0.2", ")", "if", "blocking", ":", "self", ".", "wait_transmit", "(", "*", "ports", ")" ]
download genomes from NCBI
def download ( args ) : accessions , infoFTP = set ( args [ 'g' ] ) , args [ 'i' ] search , exclude = args [ 's' ] , args [ 'e' ] FTPs = getFTPs ( accessions , infoFTP , search , exclude , threads = args [ 't' ] , convert = args [ 'convert' ] ) if args [ 'test' ] is True : for genome in FTPs : print ( 'found:' , ';' . join ( genome [ - 1 ] ) , genome [ 0 ] ) return FTPs pool = Pool ( args [ 't' ] ) pool = pool . imap_unordered ( wgetGenome , FTPs ) files = [ ] for f in tqdm ( pool , total = len ( accessions ) ) : files . append ( f ) return files
133
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/ncbi_download.py#L204-L221
[ "def", "one_cycle_scheduler", "(", "lr_max", ":", "float", ",", "*", "*", "kwargs", ":", "Any", ")", "->", "OneCycleScheduler", ":", "return", "partial", "(", "OneCycleScheduler", ",", "lr_max", "=", "lr_max", ",", "*", "*", "kwargs", ")" ]
remove pesky characters from fasta file header
def fix_fasta ( fasta ) : for seq in parse_fasta ( fasta ) : seq [ 0 ] = remove_char ( seq [ 0 ] ) if len ( seq [ 1 ] ) > 0 : yield seq
134
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/fix_fasta.py#L18-L25
[ "def", "remove_server", "(", "self", ",", "server_id", ")", ":", "# Validate server_id", "server", "=", "self", ".", "_get_server", "(", "server_id", ")", "# Delete any instances we recorded to be cleaned up", "if", "server_id", "in", "self", ".", "_owned_subscriptions", ":", "inst_list", "=", "self", ".", "_owned_subscriptions", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_subscriptions", "[", "server_id", "]", "if", "server_id", "in", "self", ".", "_owned_filters", ":", "inst_list", "=", "self", ".", "_owned_filters", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_filters", "[", "server_id", "]", "if", "server_id", "in", "self", ".", "_owned_destinations", ":", "inst_list", "=", "self", ".", "_owned_destinations", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_destinations", "[", "server_id", "]", "# Remove server from this listener", "del", "self", ".", "_servers", "[", "server_id", "]" ]
Compute a DataFrame summary of a Stats object .
def _calc_frames ( stats ) : timings = [ ] callers = [ ] for key , values in iteritems ( stats . stats ) : timings . append ( pd . Series ( key + values [ : - 1 ] , index = timing_colnames , ) ) for caller_key , caller_values in iteritems ( values [ - 1 ] ) : callers . append ( pd . Series ( key + caller_key + caller_values , index = caller_columns , ) ) timings_df = pd . DataFrame ( timings ) callers_df = pd . DataFrame ( callers ) timings_df [ 'filename:funcname' ] = ( timings_df [ 'filename' ] + ':' + timings_df [ 'funcname' ] ) timings_df = timings_df . groupby ( 'filename:funcname' ) . sum ( ) return timings_df , callers_df
135
https://github.com/ssanderson/pstats-view/blob/62148d4e01765806bc5e6bb40628cdb186482c05/pstatsviewer/viewer.py#L40-L66
[ "def", "unbind", "(", "self", ",", "devices_to_unbind", ")", ":", "if", "self", ".", "entity_api_key", "==", "\"\"", ":", "return", "{", "'status'", ":", "'failure'", ",", "'response'", ":", "'No API key found in request'", "}", "url", "=", "self", ".", "base_url", "+", "\"api/0.1.0/subscribe/unbind\"", "headers", "=", "{", "\"apikey\"", ":", "self", ".", "entity_api_key", "}", "data", "=", "{", "\"exchange\"", ":", "\"amq.topic\"", ",", "\"keys\"", ":", "devices_to_unbind", ",", "\"queue\"", ":", "self", ".", "entity_id", "}", "with", "self", ".", "no_ssl_verification", "(", ")", ":", "r", "=", "requests", ".", "delete", "(", "url", ",", "json", "=", "data", ",", "headers", "=", "headers", ")", "print", "(", "r", ")", "response", "=", "dict", "(", ")", "if", "\"No API key\"", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "json", ".", "loads", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "'message'", "]", "elif", "'unbind'", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"success\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "response", "[", "\"response\"", "]", "=", "str", "(", "r", ")", "return", "response" ]
get unmapped reads
def unmapped ( sam , mates ) : for read in sam : if read . startswith ( '@' ) is True : continue read = read . strip ( ) . split ( ) if read [ 2 ] == '*' and read [ 6 ] == '*' : yield read elif mates is True : if read [ 2 ] == '*' or read [ 6 ] == '*' : yield read for i in read : if i == 'YT:Z:UP' : yield read
136
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/unmapped.py#L11-L26
[ "def", "syzygyJD", "(", "jd", ")", ":", "sun", "=", "swe", ".", "sweObjectLon", "(", "const", ".", "SUN", ",", "jd", ")", "moon", "=", "swe", ".", "sweObjectLon", "(", "const", ".", "MOON", ",", "jd", ")", "dist", "=", "angle", ".", "distance", "(", "sun", ",", "moon", ")", "# Offset represents the Syzygy type. ", "# Zero is conjunction and 180 is opposition.", "offset", "=", "180", "if", "(", "dist", ">=", "180", ")", "else", "0", "while", "abs", "(", "dist", ")", ">", "MAX_ERROR", ":", "jd", "=", "jd", "-", "dist", "/", "13.1833", "# Moon mean daily motion", "sun", "=", "swe", ".", "sweObjectLon", "(", "const", ".", "SUN", ",", "jd", ")", "moon", "=", "swe", ".", "sweObjectLon", "(", "const", ".", "MOON", ",", "jd", ")", "dist", "=", "angle", ".", "closestdistance", "(", "sun", "-", "offset", ",", "moon", ")", "return", "jd" ]
execute jobs in processes using N threads
def parallel ( processes , threads ) : pool = multithread ( threads ) pool . map ( run_process , processes ) pool . close ( ) pool . join ( )
137
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/parallel.py#L19-L26
[ "def", "missing_any", "(", "da", ",", "freq", ",", "*", "*", "kwds", ")", ":", "c", "=", "da", ".", "notnull", "(", ")", ".", "resample", "(", "time", "=", "freq", ")", ".", "sum", "(", "dim", "=", "'time'", ")", "if", "'-'", "in", "freq", ":", "pfreq", ",", "anchor", "=", "freq", ".", "split", "(", "'-'", ")", "else", ":", "pfreq", "=", "freq", "if", "pfreq", ".", "endswith", "(", "'S'", ")", ":", "start_time", "=", "c", ".", "indexes", "[", "'time'", "]", "end_time", "=", "start_time", ".", "shift", "(", "1", ",", "freq", "=", "freq", ")", "else", ":", "end_time", "=", "c", ".", "indexes", "[", "'time'", "]", "start_time", "=", "end_time", ".", "shift", "(", "-", "1", ",", "freq", "=", "freq", ")", "n", "=", "(", "end_time", "-", "start_time", ")", ".", "days", "nda", "=", "xr", ".", "DataArray", "(", "n", ".", "values", ",", "coords", "=", "{", "'time'", ":", "c", ".", "time", "}", ",", "dims", "=", "'time'", ")", "return", "c", "!=", "nda" ]
the final log processor that structlog requires to render .
def define_log_renderer ( fmt , fpath , quiet ) : # it must accept a logger, method_name and event_dict (just like processors) # but must return the rendered string, not a dictionary. # TODO tty logic if fmt : return structlog . processors . JSONRenderer ( ) if fpath is not None : return structlog . processors . JSONRenderer ( ) if sys . stderr . isatty ( ) and not quiet : return structlog . dev . ConsoleRenderer ( ) return structlog . processors . JSONRenderer ( )
138
https://github.com/deep-compute/basescript/blob/f7233963c5291530fcb2444a7f45b556e6407b90/basescript/log.py#L239-L256
[ "def", "right_click_specimen_equalarea", "(", "self", ",", "event", ")", ":", "if", "event", ".", "LeftIsDown", "(", ")", "or", "event", ".", "ButtonDClick", "(", ")", ":", "return", "elif", "self", ".", "specimen_EA_setting", "==", "\"Zoom\"", ":", "self", ".", "specimen_EA_setting", "=", "\"Pan\"", "try", ":", "self", ".", "toolbar2", ".", "pan", "(", "'off'", ")", "except", "TypeError", ":", "pass", "elif", "self", ".", "specimen_EA_setting", "==", "\"Pan\"", ":", "self", ".", "specimen_EA_setting", "=", "\"Zoom\"", "try", ":", "self", ".", "toolbar2", ".", "zoom", "(", ")", "except", "TypeError", ":", "pass" ]
Add unique id type and hostname
def _structlog_default_keys_processor ( logger_class , log_method , event ) : global HOSTNAME if 'id' not in event : event [ 'id' ] = '%s_%s' % ( datetime . utcnow ( ) . strftime ( '%Y%m%dT%H%M%S' ) , uuid . uuid1 ( ) . hex ) if 'type' not in event : event [ 'type' ] = 'log' event [ 'host' ] = HOSTNAME return event
139
https://github.com/deep-compute/basescript/blob/f7233963c5291530fcb2444a7f45b556e6407b90/basescript/log.py#L258-L273
[ "def", "_openResources", "(", "self", ")", ":", "try", ":", "rate", ",", "data", "=", "scipy", ".", "io", ".", "wavfile", ".", "read", "(", "self", ".", "_fileName", ",", "mmap", "=", "True", ")", "except", "Exception", "as", "ex", ":", "logger", ".", "warning", "(", "ex", ")", "logger", ".", "warning", "(", "\"Unable to read wav with memmory mapping. Trying without now.\"", ")", "rate", ",", "data", "=", "scipy", ".", "io", ".", "wavfile", ".", "read", "(", "self", ".", "_fileName", ",", "mmap", "=", "False", ")", "self", ".", "_array", "=", "data", "self", ".", "attributes", "[", "'rate'", "]", "=", "rate" ]
log processors that structlog executes before final rendering
def define_log_processors ( ) : # these processors should accept logger, method_name and event_dict # and return a new dictionary which will be passed as event_dict to the next one. return [ structlog . processors . TimeStamper ( fmt = "iso" ) , _structlog_default_keys_processor , structlog . stdlib . PositionalArgumentsFormatter ( ) , structlog . processors . StackInfoRenderer ( ) , structlog . processors . format_exc_info , ]
140
https://github.com/deep-compute/basescript/blob/f7233963c5291530fcb2444a7f45b556e6407b90/basescript/log.py#L352-L364
[ "def", "new_pin", "(", "self", ",", "min_length", "=", "4", ",", "min_common", "=", "1000", ",", "timeout", "=", "20", ",", "refresh_timeout", "=", "3", ")", ":", "self", ".", "refresh", "(", "count_common", "=", "min_length", ",", "min_common", "=", "min_common", ",", "timeout", "=", "refresh_timeout", ")", "rating", "=", "self", ".", "sentence_tool", ".", "rate", "(", "self", ".", "tokens", ")", "start", "=", "time", "(", ")", "while", "time", "(", ")", "-", "start", "<", "timeout", ":", "pin", "=", "''", "for", "token", ",", "commonness", "in", "rating", ":", "if", "commonness", ">=", "min_common", ":", "key", "=", "self", ".", "mnemonic", ".", "word_to_key", "(", "'major_system'", ",", "token", ".", "lower", "(", ")", ")", "if", "key", "is", "not", "None", ":", "pin", "+=", "key", "if", "len", "(", "pin", ")", "<", "min_length", ":", "self", ".", "refresh", "(", "count_common", "=", "min_length", ",", "min_common", "=", "min_common", ",", "timeout", "=", "refresh_timeout", ")", "rating", "=", "self", ".", "sentence_tool", ".", "rate", "(", "self", ".", "tokens", ")", "else", ":", "return", "pin", ",", "list", "(", "self", ".", "overlap_pin", "(", "pin", ",", "self", ".", "tokens", ")", ")", "return", "None" ]
configures a logger when required write to stderr or a file
def _configure_logger ( fmt , quiet , level , fpath , pre_hooks , post_hooks , metric_grouping_interval ) : # NOTE not thread safe. Multiple BaseScripts cannot be instantiated concurrently. level = getattr ( logging , level . upper ( ) ) global _GLOBAL_LOG_CONFIGURED if _GLOBAL_LOG_CONFIGURED : return # since the hooks need to run through structlog, need to wrap them like processors def wrap_hook ( fn ) : @ wraps ( fn ) def processor ( logger , method_name , event_dict ) : fn ( event_dict ) return event_dict return processor processors = define_log_processors ( ) processors . extend ( [ wrap_hook ( h ) for h in pre_hooks ] ) if metric_grouping_interval : processors . append ( metrics_grouping_processor ) log_renderer = define_log_renderer ( fmt , fpath , quiet ) stderr_required = ( not quiet ) pretty_to_stderr = ( stderr_required and ( fmt == "pretty" or ( fmt is None and sys . stderr . isatty ( ) ) ) ) should_inject_pretty_renderer = ( pretty_to_stderr and not isinstance ( log_renderer , structlog . dev . ConsoleRenderer ) ) if should_inject_pretty_renderer : stderr_required = False processors . append ( StderrConsoleRenderer ( ) ) processors . append ( log_renderer ) processors . extend ( [ wrap_hook ( h ) for h in post_hooks ] ) streams = [ ] # we need to use a stream if we are writing to both file and stderr, and both are json if stderr_required : streams . append ( sys . stderr ) if fpath is not None : # TODO handle creating a directory for this log file ? # TODO set mode and encoding appropriately streams . append ( open ( fpath , 'a' ) ) assert len ( streams ) != 0 , "cannot configure logger for 0 streams" stream = streams [ 0 ] if len ( streams ) == 1 else Stream ( * streams ) atexit . register ( stream . close ) # a global level struct log config unless otherwise specified. structlog . configure ( processors = processors , context_class = dict , logger_factory = LevelLoggerFactory ( stream , level = level ) , wrapper_class = BoundLevelLogger , cache_logger_on_first_use = True , ) # TODO take care of removing other handlers stdlib_root_log = logging . getLogger ( ) stdlib_root_log . addHandler ( StdlibStructlogHandler ( ) ) stdlib_root_log . setLevel ( level ) _GLOBAL_LOG_CONFIGURED = True
141
https://github.com/deep-compute/basescript/blob/f7233963c5291530fcb2444a7f45b556e6407b90/basescript/log.py#L366-L447
[ "def", "_get_aggregated_info", "(", "self", ")", ":", "agg_results", "=", "{", "}", "for", "key", "in", "self", ".", "aggregated_info", "[", "'occurrences'", "]", ":", "agg_results", "[", "key", "]", "=", "{", "'occurrences'", ":", "self", ".", "aggregated_info", "[", "'occurrences'", "]", ".", "get", "(", "key", ")", ",", "'coverage'", ":", "(", "float", "(", "self", ".", "aggregated_info", "[", "'occurrences'", "]", ".", "get", "(", "key", ")", ")", "/", "float", "(", "self", ".", "get_metadata", "(", "'items_count'", ")", ")", ")", "*", "100", "}", "return", "agg_results" ]
Instead of using a processor adding basic information like caller filename etc here .
def _add_base_info ( self , event_dict ) : f = sys . _getframe ( ) level_method_frame = f . f_back caller_frame = level_method_frame . f_back return event_dict
142
https://github.com/deep-compute/basescript/blob/f7233963c5291530fcb2444a7f45b556e6407b90/basescript/log.py#L121-L129
[ "def", "_clamp_string", "(", "self", ",", "row_item", ",", "column_index", ",", "delimiter", "=", "''", ")", ":", "width", "=", "(", "self", ".", "_table", ".", "column_widths", "[", "column_index", "]", "-", "self", ".", "_table", ".", "left_padding_widths", "[", "column_index", "]", "-", "self", ".", "_table", ".", "right_padding_widths", "[", "column_index", "]", ")", "if", "termwidth", "(", "row_item", ")", "<=", "width", ":", "return", "row_item", "else", ":", "if", "width", "-", "len", "(", "delimiter", ")", ">=", "0", ":", "clamped_string", "=", "(", "textwrap", "(", "row_item", ",", "width", "-", "len", "(", "delimiter", ")", ")", "[", "0", "]", "+", "delimiter", ")", "else", ":", "clamped_string", "=", "delimiter", "[", ":", "width", "]", "return", "clamped_string" ]
Propagate a method call to the wrapped logger .
def _proxy_to_logger ( self , method_name , event , * event_args , * * event_kw ) : if isinstance ( event , bytes ) : event = event . decode ( 'utf-8' ) if event_args : event_kw [ 'positional_args' ] = event_args return super ( BoundLevelLogger , self ) . _proxy_to_logger ( method_name , event = event , * * event_kw )
143
https://github.com/deep-compute/basescript/blob/f7233963c5291530fcb2444a7f45b556e6407b90/basescript/log.py#L211-L229
[ "def", "calculateOverlapCurve", "(", "sp", ",", "inputVectors", ")", ":", "columnNumber", "=", "np", ".", "prod", "(", "sp", ".", "getColumnDimensions", "(", ")", ")", "numInputVector", ",", "inputSize", "=", "inputVectors", ".", "shape", "outputColumns", "=", "np", ".", "zeros", "(", "(", "numInputVector", ",", "columnNumber", ")", ",", "dtype", "=", "uintType", ")", "outputColumnsCorrupted", "=", "np", ".", "zeros", "(", "(", "numInputVector", ",", "columnNumber", ")", ",", "dtype", "=", "uintType", ")", "noiseLevelList", "=", "np", ".", "linspace", "(", "0", ",", "1.0", ",", "21", ")", "inputOverlapScore", "=", "np", ".", "zeros", "(", "(", "numInputVector", ",", "len", "(", "noiseLevelList", ")", ")", ")", "outputOverlapScore", "=", "np", ".", "zeros", "(", "(", "numInputVector", ",", "len", "(", "noiseLevelList", ")", ")", ")", "for", "i", "in", "range", "(", "numInputVector", ")", ":", "for", "j", "in", "range", "(", "len", "(", "noiseLevelList", ")", ")", ":", "inputVectorCorrupted", "=", "copy", ".", "deepcopy", "(", "inputVectors", "[", "i", "]", "[", ":", "]", ")", "corruptSparseVector", "(", "inputVectorCorrupted", ",", "noiseLevelList", "[", "j", "]", ")", "sp", ".", "compute", "(", "inputVectors", "[", "i", "]", "[", ":", "]", ",", "False", ",", "outputColumns", "[", "i", "]", "[", ":", "]", ")", "sp", ".", "compute", "(", "inputVectorCorrupted", ",", "False", ",", "outputColumnsCorrupted", "[", "i", "]", "[", ":", "]", ")", "inputOverlapScore", "[", "i", "]", "[", "j", "]", "=", "percentOverlap", "(", "inputVectors", "[", "i", "]", "[", ":", "]", ",", "inputVectorCorrupted", ")", "outputOverlapScore", "[", "i", "]", "[", "j", "]", "=", "percentOverlap", "(", "outputColumns", "[", "i", "]", "[", ":", "]", ",", "outputColumnsCorrupted", "[", "i", "]", "[", ":", "]", ")", "return", "noiseLevelList", ",", "inputOverlapScore", ",", "outputOverlapScore" ]
Given four points of a rectangle translate the rectangle to the specified x and y coordinates and optionally change the width .
def translate ( rect , x , y , width = 1 ) : return ( ( rect [ 0 ] [ 0 ] + x , rect [ 0 ] [ 1 ] + y ) , ( rect [ 1 ] [ 0 ] + x , rect [ 1 ] [ 1 ] + y ) , ( rect [ 2 ] [ 0 ] + x + width , rect [ 2 ] [ 1 ] + y ) , ( rect [ 3 ] [ 0 ] + x + width , rect [ 3 ] [ 1 ] + y ) )
144
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/core_overlap_plot.py#L57-L74
[ "def", "render_unregistered", "(", "error", "=", "None", ")", ":", "return", "template", "(", "read_index_template", "(", ")", ",", "registered", "=", "False", ",", "error", "=", "error", ",", "seeder_data", "=", "None", ",", "url_id", "=", "None", ",", ")" ]
remove problem characters from string
def remove_bad ( string ) : remove = [ ':' , ',' , '(' , ')' , ' ' , '|' , ';' , '\'' ] for c in remove : string = string . replace ( c , '_' ) return string
145
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rax.py#L43-L50
[ "def", "getAssociation", "(", "self", ",", "server_url", ",", "handle", "=", "None", ")", ":", "if", "handle", "is", "None", ":", "handle", "=", "''", "# The filename with the empty handle is a prefix of all other", "# associations for the given server URL.", "filename", "=", "self", ".", "getAssociationFilename", "(", "server_url", ",", "handle", ")", "if", "handle", ":", "return", "self", ".", "_getAssociation", "(", "filename", ")", "else", ":", "association_files", "=", "os", ".", "listdir", "(", "self", ".", "association_dir", ")", "matching_files", "=", "[", "]", "# strip off the path to do the comparison", "name", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "for", "association_file", "in", "association_files", ":", "if", "association_file", ".", "startswith", "(", "name", ")", ":", "matching_files", ".", "append", "(", "association_file", ")", "matching_associations", "=", "[", "]", "# read the matching files and sort by time issued", "for", "name", "in", "matching_files", ":", "full_name", "=", "os", ".", "path", ".", "join", "(", "self", ".", "association_dir", ",", "name", ")", "association", "=", "self", ".", "_getAssociation", "(", "full_name", ")", "if", "association", "is", "not", "None", ":", "matching_associations", ".", "append", "(", "(", "association", ".", "issued", ",", "association", ")", ")", "matching_associations", ".", "sort", "(", ")", "# return the most recently issued one.", "if", "matching_associations", ":", "(", "_", ",", "assoc", ")", "=", "matching_associations", "[", "-", "1", "]", "return", "assoc", "else", ":", "return", "None" ]
make copy of sequences with short identifier
def get_ids ( a ) : a_id = '%s.id.fa' % ( a . rsplit ( '.' , 1 ) [ 0 ] ) a_id_lookup = '%s.id.lookup' % ( a . rsplit ( '.' , 1 ) [ 0 ] ) if check ( a_id ) is True : return a_id , a_id_lookup a_id_f = open ( a_id , 'w' ) a_id_lookup_f = open ( a_id_lookup , 'w' ) ids = [ ] for seq in parse_fasta ( open ( a ) ) : id = id_generator ( ) while id in ids : id = id_generator ( ) ids . append ( id ) header = seq [ 0 ] . split ( '>' ) [ 1 ] name = remove_bad ( header ) seq [ 0 ] = '>%s %s' % ( id , header ) print ( '\n' . join ( seq ) , file = a_id_f ) print ( '%s\t%s\t%s' % ( id , name , header ) , file = a_id_lookup_f ) return a_id , a_id_lookup
146
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rax.py#L55-L76
[ "def", "face_index", "(", "vertices", ")", ":", "new_verts", "=", "[", "]", "face_indices", "=", "[", "]", "for", "wall", "in", "vertices", ":", "face_wall", "=", "[", "]", "for", "vert", "in", "wall", ":", "if", "new_verts", ":", "if", "not", "np", ".", "isclose", "(", "vert", ",", "new_verts", ")", ".", "all", "(", "axis", "=", "1", ")", ".", "any", "(", ")", ":", "new_verts", ".", "append", "(", "vert", ")", "else", ":", "new_verts", ".", "append", "(", "vert", ")", "face_index", "=", "np", ".", "where", "(", "np", ".", "isclose", "(", "vert", ",", "new_verts", ")", ".", "all", "(", "axis", "=", "1", ")", ")", "[", "0", "]", "[", "0", "]", "face_wall", ".", "append", "(", "face_index", ")", "face_indices", ".", "append", "(", "face_wall", ")", "return", "np", ".", "array", "(", "new_verts", ")", ",", "np", ".", "array", "(", "face_indices", ")" ]
convert fasta to phylip because RAxML is ridiculous
def convert2phylip ( convert ) : out = '%s.phy' % ( convert . rsplit ( '.' , 1 ) [ 0 ] ) if check ( out ) is False : convert = open ( convert , 'rU' ) out_f = open ( out , 'w' ) alignments = AlignIO . parse ( convert , "fasta" ) AlignIO . write ( alignments , out , "phylip" ) return out
147
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rax.py#L78-L88
[ "def", "set_item", "(", "key", ",", "value", ")", ":", "CACHED_KEY_FILE", "=", "os", ".", "path", ".", "join", "(", "CURRENT_DIR", ",", "key", ")", "open", "(", "CACHED_KEY_FILE", ",", "\"wb\"", ")", ".", "write", "(", "json", ".", "dumps", "(", "{", "\"_\"", ":", "value", "}", ")", ".", "encode", "(", "'UTF-8'", ")", ")", "return", "value" ]
run IQ - Tree
def run_iqtree ( phy , model , threads , cluster , node ) : # set ppn based on threads if threads > 24 : ppn = 24 else : ppn = threads tree = '%s.treefile' % ( phy ) if check ( tree ) is False : if model is False : model = 'TEST' dir = os . getcwd ( ) command = 'iqtree-omp -s %s -m %s -nt %s -quiet' % ( phy , model , threads ) if cluster is False : p = Popen ( command , shell = True ) else : if node is False : node = '1' qsub = 'qsub -l nodes=%s:ppn=%s -m e -N iqtree' % ( node , ppn ) command = 'cd /tmp; mkdir iqtree; cd iqtree; cp %s/%s .; %s; mv * %s/; rm -r ../iqtree' % ( dir , phy , command , dir ) re_call = 'cd %s; %s --no-fast --iq' % ( dir . rsplit ( '/' , 1 ) [ 0 ] , ' ' . join ( sys . argv ) ) p = Popen ( 'echo "%s;%s" | %s' % ( command , re_call , qsub ) , shell = True ) p . communicate ( ) return tree
148
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rax.py#L163-L190
[ "def", "private_messenger", "(", ")", ":", "while", "__websocket_server_running__", ":", "pipein", "=", "open", "(", "PRIVATE_PIPE", ",", "'r'", ")", "line", "=", "pipein", ".", "readline", "(", ")", ".", "replace", "(", "'\\n'", ",", "''", ")", ".", "replace", "(", "'\\r'", ",", "''", ")", "if", "line", "!=", "''", ":", "message", "=", "json", ".", "loads", "(", "line", ")", "WebSocketHandler", ".", "send_private_message", "(", "user_id", "=", "message", "[", "'user_id'", "]", ",", "message", "=", "message", ")", "print", "line", "remaining_lines", "=", "pipein", ".", "read", "(", ")", "pipein", ".", "close", "(", ")", "pipeout", "=", "open", "(", "PRIVATE_PIPE", ",", "'w'", ")", "pipeout", ".", "write", "(", "remaining_lines", ")", "pipeout", ".", "close", "(", ")", "else", ":", "pipein", ".", "close", "(", ")", "time", ".", "sleep", "(", "0.05", ")" ]
get the names for sequences in the raxml tree
def fix_tree ( tree , a_id_lookup , out ) : if check ( out ) is False and check ( tree ) is True : tree = open ( tree ) . read ( ) for line in open ( a_id_lookup ) : id , name , header = line . strip ( ) . split ( '\t' ) tree = tree . replace ( id + ':' , name + ':' ) out_f = open ( out , 'w' ) print ( tree . strip ( ) , file = out_f ) return out
149
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rax.py#L192-L203
[ "def", "start", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "is_running", "(", ")", ":", "self", ".", "websock_url", "=", "self", ".", "chrome", ".", "start", "(", "*", "*", "kwargs", ")", "self", ".", "websock", "=", "websocket", ".", "WebSocketApp", "(", "self", ".", "websock_url", ")", "self", ".", "websock_thread", "=", "WebsockReceiverThread", "(", "self", ".", "websock", ",", "name", "=", "'WebsockThread:%s'", "%", "self", ".", "chrome", ".", "port", ")", "self", ".", "websock_thread", ".", "start", "(", ")", "self", ".", "_wait_for", "(", "lambda", ":", "self", ".", "websock_thread", ".", "is_open", ",", "timeout", "=", "30", ")", "# tell browser to send us messages we're interested in", "self", ".", "send_to_chrome", "(", "method", "=", "'Network.enable'", ")", "self", ".", "send_to_chrome", "(", "method", "=", "'Page.enable'", ")", "self", ".", "send_to_chrome", "(", "method", "=", "'Console.enable'", ")", "self", ".", "send_to_chrome", "(", "method", "=", "'Runtime.enable'", ")", "self", ".", "send_to_chrome", "(", "method", "=", "'ServiceWorker.enable'", ")", "self", ".", "send_to_chrome", "(", "method", "=", "'ServiceWorker.setForceUpdateOnPageLoad'", ")", "# disable google analytics", "self", ".", "send_to_chrome", "(", "method", "=", "'Network.setBlockedURLs'", ",", "params", "=", "{", "'urls'", ":", "[", "'*google-analytics.com/analytics.js'", ",", "'*google-analytics.com/ga.js'", "]", "}", ")" ]
Creates a new Nydus cluster from the given settings .
def create_cluster ( settings ) : # Pull in our client settings = copy . deepcopy ( settings ) backend = settings . pop ( 'engine' , settings . pop ( 'backend' , None ) ) if isinstance ( backend , basestring ) : Conn = import_string ( backend ) elif backend : Conn = backend else : raise KeyError ( 'backend' ) # Pull in our cluster cluster = settings . pop ( 'cluster' , None ) if not cluster : Cluster = Conn . get_cluster ( ) elif isinstance ( cluster , basestring ) : Cluster = import_string ( cluster ) else : Cluster = cluster # Pull in our router router = settings . pop ( 'router' , None ) if not router : Router = BaseRouter elif isinstance ( router , basestring ) : Router = import_string ( router ) else : Router = router # Build the connection cluster return Cluster ( router = Router , backend = Conn , * * settings )
150
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/__init__.py#L28-L82
[ "def", "libvlc_media_player_has_vout", "(", "p_mi", ")", ":", "f", "=", "_Cfunctions", ".", "get", "(", "'libvlc_media_player_has_vout'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_media_player_has_vout'", ",", "(", "(", "1", ",", ")", ",", ")", ",", "None", ",", "ctypes", ".", "c_uint", ",", "MediaPlayer", ")", "return", "f", "(", "p_mi", ")" ]
Gets the translation of a specific field for a specific language code .
def _get_translation ( self , field , code ) : if not code in self . _translation_cache : translations = self . translations . select_related ( ) logger . debug ( u'Matched with field %s for language %s. Attempting lookup.' , field , code ) try : translation_obj = translations . get ( language_code = code ) except ObjectDoesNotExist : translation_obj = None self . _translation_cache [ code ] = translation_obj logger . debug ( u'Translation not found in cache.' ) else : logger . debug ( u'Translation found in cache.' ) # Get the translation from the cache translation_obj = self . _translation_cache . get ( code ) # If this is none, it means that a translation does not exist # It is important to cache this one as well if not translation_obj : raise ObjectDoesNotExist field_value = getattr ( translation_obj , field ) logger . debug ( u'Found translation object %s, returning value %s.' , translation_obj , field_value ) return field_value
151
https://github.com/dokterbob/django-multilingual-model/blob/2479b2c3d6f7b697e95aa1e082c8bc8699f1f638/multilingual_model/models.py#L44-L90
[ "def", "on_open", "(", "self", ",", "func", ")", ":", "self", ".", "_on_open", "=", "func", "if", "self", ".", "opened", ":", "# pragma: no cover", "self", ".", "_on_open", "(", "self", ")", "return", "func" ]
Wrapper to allow for easy unicode representation of an object by the specified property . If this wrapper is not able to find the right translation of the specified property it will return the default value instead .
def unicode_wrapper ( self , property , default = ugettext ( 'Untitled' ) ) : # TODO: Test coverage! try : value = getattr ( self , property ) except ValueError : logger . warn ( u'ValueError rendering unicode for %s object.' , self . _meta . object_name ) value = None if not value : value = default return value
152
https://github.com/dokterbob/django-multilingual-model/blob/2479b2c3d6f7b697e95aa1e082c8bc8699f1f638/multilingual_model/models.py#L202-L228
[ "def", "color", "(", "requestContext", ",", "seriesList", ",", "theColor", ")", ":", "for", "series", "in", "seriesList", ":", "series", ".", "color", "=", "theColor", "return", "seriesList" ]
remove insertion columns from aligned fasta file
def strip_inserts ( fasta ) : for seq in parse_fasta ( fasta ) : seq [ 1 ] = '' . join ( [ b for b in seq [ 1 ] if b == '-' or b . isupper ( ) ] ) yield seq
153
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/strip_align_inserts.py#L12-L18
[ "def", "bucket", "(", "self", ",", "experiment", ",", "user_id", ",", "bucketing_id", ")", ":", "if", "not", "experiment", ":", "return", "None", "# Determine if experiment is in a mutually exclusive group", "if", "experiment", ".", "groupPolicy", "in", "GROUP_POLICIES", ":", "group", "=", "self", ".", "config", ".", "get_group", "(", "experiment", ".", "groupId", ")", "if", "not", "group", ":", "return", "None", "user_experiment_id", "=", "self", ".", "find_bucket", "(", "bucketing_id", ",", "experiment", ".", "groupId", ",", "group", ".", "trafficAllocation", ")", "if", "not", "user_experiment_id", ":", "self", ".", "config", ".", "logger", ".", "info", "(", "'User \"%s\" is in no experiment.'", "%", "user_id", ")", "return", "None", "if", "user_experiment_id", "!=", "experiment", ".", "id", ":", "self", ".", "config", ".", "logger", ".", "info", "(", "'User \"%s\" is not in experiment \"%s\" of group %s.'", "%", "(", "user_id", ",", "experiment", ".", "key", ",", "experiment", ".", "groupId", ")", ")", "return", "None", "self", ".", "config", ".", "logger", ".", "info", "(", "'User \"%s\" is in experiment %s of group %s.'", "%", "(", "user_id", ",", "experiment", ".", "key", ",", "experiment", ".", "groupId", ")", ")", "# Bucket user if not in white-list and in group (if any)", "variation_id", "=", "self", ".", "find_bucket", "(", "bucketing_id", ",", "experiment", ".", "id", ",", "experiment", ".", "trafficAllocation", ")", "if", "variation_id", ":", "variation", "=", "self", ".", "config", ".", "get_variation_from_id", "(", "experiment", ".", "key", ",", "variation_id", ")", "self", ".", "config", ".", "logger", ".", "info", "(", "'User \"%s\" is in variation \"%s\" of experiment %s.'", "%", "(", "user_id", ",", "variation", ".", "key", ",", "experiment", ".", "key", ")", ")", "return", "variation", "self", ".", "config", ".", "logger", ".", "info", "(", "'User \"%s\" is in no variation.'", "%", "user_id", ")", "return", "None" ]
Transform a string s graphemes into the mappings given in a different column in the orthography profile .
def transform ( self , word , column = Profile . GRAPHEME_COL , error = errors . replace ) : assert self . op , 'method can only be called with orthography profile.' if column != Profile . GRAPHEME_COL and column not in self . op . column_labels : raise ValueError ( "Column {0} not found in profile." . format ( column ) ) word = self . op . tree . parse ( word , error ) if column == Profile . GRAPHEME_COL : return word out = [ ] for token in word : try : target = self . op . graphemes [ token ] [ column ] except KeyError : target = self . _errors [ 'replace' ] ( token ) if target is not None : if isinstance ( target , ( tuple , list ) ) : out . extend ( target ) else : out . append ( target ) return out
154
https://github.com/cldf/segments/blob/9136a4ec89555bf9b574399ffbb07f3cc9a9f45f/src/segments/tokenizer.py#L231-L270
[ "def", "match_published_date", "(", "self", ",", "start", ",", "end", ",", "match", ")", ":", "self", ".", "_match_minimum_date_time", "(", "'publishedDate'", ",", "start", ",", "match", ")", "self", ".", "_match_maximum_date_time", "(", "'publishedDate'", ",", "end", ",", "match", ")" ]
Function to tokenize input string and return output of str with ortho rules applied .
def rules ( self , word ) : return self . _rules . apply ( word ) if self . _rules else word
155
https://github.com/cldf/segments/blob/9136a4ec89555bf9b574399ffbb07f3cc9a9f45f/src/segments/tokenizer.py#L272-L288
[ "def", "lv_grid_generators_bus_bar", "(", "nd", ")", ":", "lv_stats", "=", "{", "}", "for", "la", "in", "nd", ".", "_mv_grid_districts", "[", "0", "]", ".", "lv_load_areas", "(", ")", ":", "for", "lvgd", "in", "la", ".", "lv_grid_districts", "(", ")", ":", "station_neighbors", "=", "list", "(", "lvgd", ".", "lv_grid", ".", "_graph", "[", "lvgd", ".", "lv_grid", ".", "_station", "]", ".", "keys", "(", ")", ")", "# check if nodes of a statio are members of list generators", "station_generators", "=", "[", "x", "for", "x", "in", "station_neighbors", "if", "x", "in", "lvgd", ".", "lv_grid", ".", "generators", "(", ")", "]", "lv_stats", "[", "repr", "(", "lvgd", ".", "lv_grid", ".", "_station", ")", "]", "=", "station_generators", "return", "lv_stats" ]
Given a string that is space - delimited on Unicode grapheme clusters group Unicode modifier letters with their preceding base characters deal with tie bars etc .
def combine_modifiers ( self , graphemes ) : result = [ ] temp = "" count = len ( graphemes ) for grapheme in reversed ( graphemes ) : count -= 1 if len ( grapheme ) == 1 and unicodedata . category ( grapheme ) == "Lm" and not ord ( grapheme ) in [ 712 , 716 ] : temp = grapheme + temp # hack for the cases where a space modifier is the first character in the # string if count == 0 : result [ - 1 ] = temp + result [ - 1 ] continue # pragma: no cover # catch and repair stress marks if len ( grapheme ) == 1 and ord ( grapheme ) in [ 712 , 716 ] : result [ - 1 ] = grapheme + result [ - 1 ] temp = "" continue # combine contour tone marks (non-accents) if len ( grapheme ) == 1 and unicodedata . category ( grapheme ) == "Sk" : if len ( result ) == 0 : result . append ( grapheme ) temp = "" continue else : if unicodedata . category ( result [ - 1 ] [ 0 ] ) == "Sk" : result [ - 1 ] = grapheme + result [ - 1 ] temp = "" continue result . append ( grapheme + temp ) temp = "" # last check for tie bars segments = result [ : : - 1 ] i = 0 r = [ ] while i < len ( segments ) : # tie bars if ord ( segments [ i ] [ - 1 ] ) in [ 865 , 860 ] : r . append ( segments [ i ] + segments [ i + 1 ] ) i += 2 else : r . append ( segments [ i ] ) i += 1 return r
156
https://github.com/cldf/segments/blob/9136a4ec89555bf9b574399ffbb07f3cc9a9f45f/src/segments/tokenizer.py#L290-L349
[ "def", "save_model", "(", "self", ",", "file_name", "=", "None", ")", ":", "if", "file_name", "is", "None", ":", "logger", ".", "error", "(", "'Missing file name'", ")", "return", "pcc", ".", "model_to_owl", "(", "self", ".", "model", ",", "file_name", ")" ]
parse catalytic RNAs to gff format
def parse_catalytic ( insertion , gff ) : offset = insertion [ 'offset' ] GeneStrand = insertion [ 'strand' ] if type ( insertion [ 'intron' ] ) is not str : return gff for intron in parse_fasta ( insertion [ 'intron' ] . split ( '|' ) ) : ID , annot , strand , pos = intron [ 0 ] . split ( '>' ) [ 1 ] . split ( ) Start , End = [ int ( i ) for i in pos . split ( '-' ) ] if strand != GeneStrand : if strand == '+' : strand = '-' else : strand = '+' Start , End = End - 2 , Start - 2 Start , End = abs ( Start + offset ) - 1 , abs ( End + offset ) - 1 gff [ '#seqname' ] . append ( insertion [ 'ID' ] ) gff [ 'source' ] . append ( 'Rfam' ) gff [ 'feature' ] . append ( 'Catalytic RNA' ) gff [ 'start' ] . append ( Start ) gff [ 'end' ] . append ( End ) gff [ 'score' ] . append ( '.' ) gff [ 'strand' ] . append ( strand ) gff [ 'frame' ] . append ( '.' ) gff [ 'attribute' ] . append ( 'ID=%s; Name=%s' % ( ID , annot ) ) return gff
157
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions_gff.py#L13-L40
[ "def", "communityvisibilitystate", "(", "self", ")", ":", "if", "self", ".", "_communityvisibilitystate", "==", "None", ":", "return", "None", "elif", "self", ".", "_communityvisibilitystate", "in", "self", ".", "VisibilityState", ":", "return", "self", ".", "VisibilityState", "[", "self", ".", "_communityvisibilitystate", "]", "else", ":", "#Invalid State", "return", "None" ]
parse ORF to gff format
def parse_orf ( insertion , gff ) : offset = insertion [ 'offset' ] if type ( insertion [ 'orf' ] ) is not str : return gff for orf in parse_fasta ( insertion [ 'orf' ] . split ( '|' ) ) : ID = orf [ 0 ] . split ( '>' ) [ 1 ] . split ( ) [ 0 ] Start , End , strand = [ int ( i ) for i in orf [ 0 ] . split ( ' # ' ) [ 1 : 4 ] ] if strand == 1 : strand = '+' else : strand = '-' GeneStrand = insertion [ 'strand' ] if strand != GeneStrand : if strand == '+' : strand = '-' else : strand = '+' Start , End = End - 2 , Start - 2 Start , End = abs ( Start + offset ) - 1 , abs ( End + offset ) - 1 annot = orf [ 0 ] . split ( ) [ 1 ] if annot == 'n/a' : annot = 'unknown' gff [ '#seqname' ] . append ( insertion [ 'ID' ] ) gff [ 'source' ] . append ( 'Prodigal and Pfam' ) gff [ 'feature' ] . append ( 'CDS' ) gff [ 'start' ] . append ( Start ) gff [ 'end' ] . append ( End ) gff [ 'score' ] . append ( '.' ) gff [ 'strand' ] . append ( strand ) gff [ 'frame' ] . append ( '.' ) gff [ 'attribute' ] . append ( 'ID=%s; Name=%s' % ( ID , annot ) ) return gff
158
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions_gff.py#L42-L76
[ "def", "list_blobs", "(", "call", "=", "None", ",", "kwargs", "=", "None", ")", ":", "# pylint: disable=unused-argument", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "if", "'container'", "not", "in", "kwargs", ":", "raise", "SaltCloudSystemExit", "(", "'A container must be specified'", ")", "storageservice", "=", "_get_block_blob_service", "(", "kwargs", ")", "ret", "=", "{", "}", "try", ":", "for", "blob", "in", "storageservice", ".", "list_blobs", "(", "kwargs", "[", "'container'", "]", ")", ".", "items", ":", "ret", "[", "blob", ".", "name", "]", "=", "{", "'blob_type'", ":", "blob", ".", "properties", ".", "blob_type", ",", "'last_modified'", ":", "blob", ".", "properties", ".", "last_modified", ".", "isoformat", "(", ")", ",", "'server_encrypted'", ":", "blob", ".", "properties", ".", "server_encrypted", ",", "}", "except", "Exception", "as", "exc", ":", "log", ".", "warning", "(", "six", ".", "text_type", "(", "exc", ")", ")", "return", "ret" ]
parse insertion to gff format
def parse_insertion ( insertion , gff ) : offset = insertion [ 'offset' ] for ins in parse_fasta ( insertion [ 'insertion sequence' ] . split ( '|' ) ) : strand = insertion [ 'strand' ] ID = ins [ 0 ] . split ( '>' ) [ 1 ] . split ( ) [ 0 ] Start , End = [ int ( i ) for i in ins [ 0 ] . split ( 'gene-pos=' , 1 ) [ 1 ] . split ( ) [ 0 ] . split ( '-' ) ] Start , End = abs ( Start + offset ) , abs ( End + offset ) if strand == '-' : Start , End = End , Start gff [ '#seqname' ] . append ( insertion [ 'ID' ] ) gff [ 'source' ] . append ( insertion [ 'source' ] ) gff [ 'feature' ] . append ( 'IVS' ) gff [ 'start' ] . append ( Start ) gff [ 'end' ] . append ( End ) gff [ 'score' ] . append ( '.' ) gff [ 'strand' ] . append ( strand ) # same as rRNA gff [ 'frame' ] . append ( '.' ) gff [ 'attribute' ] . append ( 'ID=%s' % ( ID ) ) return gff
159
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions_gff.py#L78-L99
[ "def", "update_cluster", "(", "cluster_ref", ",", "cluster_spec", ")", ":", "cluster_name", "=", "get_managed_object_name", "(", "cluster_ref", ")", "log", ".", "trace", "(", "'Updating cluster \\'%s\\''", ",", "cluster_name", ")", "try", ":", "task", "=", "cluster_ref", ".", "ReconfigureComputeResource_Task", "(", "cluster_spec", ",", "modify", "=", "True", ")", "except", "vim", ".", "fault", ".", "NoPermission", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "'Not enough permissions. Required privilege: '", "'{}'", ".", "format", "(", "exc", ".", "privilegeId", ")", ")", "except", "vim", ".", "fault", ".", "VimFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "exc", ".", "msg", ")", "except", "vmodl", ".", "RuntimeFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareRuntimeError", "(", "exc", ".", "msg", ")", "wait_for_task", "(", "task", ",", "cluster_name", ",", "'ClusterUpdateTask'", ")" ]
parse rRNA to gff format
def parse_rRNA ( insertion , seq , gff ) : offset = insertion [ 'offset' ] strand = insertion [ 'strand' ] for rRNA in parse_masked ( seq , 0 ) [ 0 ] : rRNA = '' . join ( rRNA ) Start = seq [ 1 ] . find ( rRNA ) + 1 End = Start + len ( rRNA ) - 1 if strand == '-' : Start , End = End - 2 , Start - 2 pos = ( abs ( Start + offset ) - 1 , abs ( End + offset ) - 1 ) Start , End = min ( pos ) , max ( pos ) source = insertion [ 'source' ] annot = '%s rRNA' % ( source . split ( 'from' , 1 ) [ 0 ] ) gff [ '#seqname' ] . append ( insertion [ 'ID' ] ) gff [ 'source' ] . append ( source ) gff [ 'feature' ] . append ( 'rRNA' ) gff [ 'start' ] . append ( Start ) gff [ 'end' ] . append ( End ) gff [ 'score' ] . append ( '.' ) gff [ 'strand' ] . append ( strand ) gff [ 'frame' ] . append ( '.' ) gff [ 'attribute' ] . append ( 'Name=%s' % ( annot ) ) return gff
160
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions_gff.py#L122-L147
[ "def", "get_owned_subscriptions", "(", "self", ",", "server_id", ")", ":", "# Validate server_id", "self", ".", "_get_server", "(", "server_id", ")", "return", "list", "(", "self", ".", "_owned_subscriptions", "[", "server_id", "]", ")" ]
convert iTable to gff file
def iTable2GFF ( iTable , fa , contig = False ) : columns = [ '#seqname' , 'source' , 'feature' , 'start' , 'end' , 'score' , 'strand' , 'frame' , 'attribute' ] gff = { c : [ ] for c in columns } for insertion in iTable . iterrows ( ) : insertion = insertion [ 1 ] if insertion [ 'ID' ] not in fa : continue # rRNA strand strand = insertion [ 'sequence' ] . split ( 'strand=' , 1 ) [ 1 ] . split ( ) [ 0 ] # set rRNA positions for reporting features on contig or extracted sequence if contig is True : gene = [ int ( i ) for i in insertion [ 'sequence' ] . split ( 'pos=' , 1 ) [ 1 ] . split ( ) [ 0 ] . split ( '-' ) ] if strand == '-' : offset = - 1 * ( gene [ 1 ] ) else : offset = gene [ 0 ] else : strand = '+' gene = [ 1 , int ( insertion [ 'sequence' ] . split ( 'total-len=' , 1 ) [ 1 ] . split ( ) [ 0 ] ) ] offset = gene [ 0 ] insertion [ 'strand' ] = strand insertion [ 'offset' ] = offset # source for prediction source = insertion [ 'sequence' ] . split ( '::model' , 1 ) [ 0 ] . rsplit ( ' ' , 1 ) [ - 1 ] insertion [ 'source' ] = source # rRNA gene geneAnnot = '%s rRNA gene' % ( source . split ( 'from' , 1 ) [ 0 ] ) geneNum = insertion [ 'sequence' ] . split ( 'seq=' , 1 ) [ 1 ] . split ( ) [ 0 ] gff [ '#seqname' ] . append ( insertion [ 'ID' ] ) gff [ 'source' ] . append ( source ) gff [ 'feature' ] . append ( 'Gene' ) gff [ 'start' ] . append ( gene [ 0 ] ) gff [ 'end' ] . append ( gene [ 1 ] ) gff [ 'score' ] . append ( '.' ) gff [ 'strand' ] . append ( strand ) gff [ 'frame' ] . append ( '.' ) gff [ 'attribute' ] . append ( 'ID=%s; Name=%s' % ( geneNum , geneAnnot ) ) # rRNA gff = parse_rRNA ( insertion , fa [ insertion [ 'ID' ] ] , gff ) # insertions gff = parse_insertion ( insertion , gff ) # orfs gff = parse_orf ( insertion , gff ) # catalytic RNAs gff = parse_catalytic ( insertion , gff ) return pd . DataFrame ( gff ) [ columns ] . drop_duplicates ( )
161
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions_gff.py#L149-L197
[ "def", "get_owned_subscriptions", "(", "self", ",", "server_id", ")", ":", "# Validate server_id", "self", ".", "_get_server", "(", "server_id", ")", "return", "list", "(", "self", ".", "_owned_subscriptions", "[", "server_id", "]", ")" ]
Given an abundance table group the counts by every taxonomic level .
def summarize_taxa ( biom ) : tamtcounts = defaultdict ( int ) tot_seqs = 0.0 for row , col , amt in biom [ 'data' ] : tot_seqs += amt rtax = biom [ 'rows' ] [ row ] [ 'metadata' ] [ 'taxonomy' ] for i , t in enumerate ( rtax ) : t = t . strip ( ) if i == len ( rtax ) - 1 and len ( t ) > 3 and len ( rtax [ - 1 ] ) > 3 : t = 's__' + rtax [ i - 1 ] . strip ( ) . split ( '_' ) [ - 1 ] + '_' + t . split ( '_' ) [ - 1 ] tamtcounts [ t ] += amt lvlData = { lvl : levelData ( tamtcounts , tot_seqs , lvl ) for lvl in [ 'k' , 'p' , 'c' , 'o' , 'f' , 'g' , 's' ] } return tot_seqs , lvlData
162
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/biom_phyla_summary.py#L27-L46
[ "async", "def", "async_init", "(", "self", ")", "->", "None", ":", "if", "not", "self", ".", "_client_established", ":", "await", "self", ".", "request", "(", "'put'", ",", "'clients/{0}'", ".", "format", "(", "self", ".", "client_uuid", ")", ",", "data", "=", "{", "'app_id'", ":", "DEFAULT_APP_ID", ",", "'app_version'", ":", "DEFAULT_APP_VERSION", ",", "'locale'", ":", "self", ".", "_locale", "}", ")", "self", ".", "_client_established", "=", "True", "resp", "=", "await", "self", ".", "request", "(", "'post'", ",", "'clients/{0}/sessions'", ".", "format", "(", "self", ".", "client_uuid", ")", ",", "data", "=", "{", "'email'", ":", "self", ".", "_email", ",", "'password'", ":", "self", ".", "_password", "}", ")", "if", "not", "self", ".", "user_uuid", ":", "self", ".", "user_uuid", "=", "resp", "[", "'result'", "]", "[", "'user'", "]", "[", "'user_uuid'", "]", "self", ".", "_session_expiry", "=", "resp", "[", "'result'", "]", "[", "'session_expiration_timestamp'", "]", "self", ".", "tiles", "=", "Tile", "(", "self", ".", "request", ",", "self", ".", "user_uuid", ")" ]
Returns the path to the custom image set for this game or None if no image is set
def custom_image ( self , user ) : for ext in self . valid_custom_image_extensions ( ) : image_location = self . _custom_image_path ( user , ext ) if os . path . isfile ( image_location ) : return image_location return None
163
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/legacy/game.py#L41-L48
[ "def", "wncond", "(", "left", ",", "right", ",", "window", ")", ":", "assert", "isinstance", "(", "window", ",", "stypes", ".", "SpiceCell", ")", "assert", "window", ".", "dtype", "==", "1", "left", "=", "ctypes", ".", "c_double", "(", "left", ")", "right", "=", "ctypes", ".", "c_double", "(", "right", ")", "libspice", ".", "wncond_c", "(", "left", ",", "right", ",", "ctypes", ".", "byref", "(", "window", ")", ")", "return", "window" ]
Sets a custom image for the game . image_path should refer to an image file on disk
def set_image ( self , user , image_path ) : _ , ext = os . path . splitext ( image_path ) shutil . copy ( image_path , self . _custom_image_path ( user , ext ) )
164
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/legacy/game.py#L50-L54
[ "def", "_update", "(", "self", ")", ":", "self", ".", "clear", "(", ")", "self", ".", "_set_boutons_communs", "(", ")", "if", "self", ".", "interface", ":", "self", ".", "addSeparator", "(", ")", "l_actions", "=", "self", ".", "interface", ".", "get_actions_toolbar", "(", ")", "self", ".", "_set_boutons_interface", "(", "l_actions", ")" ]
get a list of mapped reads
def sam_list ( sam ) : list = [ ] for file in sam : for line in file : if line . startswith ( '@' ) is False : line = line . strip ( ) . split ( ) id , map = line [ 0 ] , int ( line [ 1 ] ) if map != 4 and map != 8 : list . append ( id ) return set ( list )
165
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/filter_fastq_sam.py#L7-L19
[ "def", "search", "(", "query", ",", "team", "=", "None", ")", ":", "if", "team", "is", "None", ":", "team", "=", "_find_logged_in_team", "(", ")", "if", "team", "is", "not", "None", ":", "session", "=", "_get_session", "(", "team", ")", "response", "=", "session", ".", "get", "(", "\"%s/api/search/\"", "%", "get_registry_url", "(", "team", ")", ",", "params", "=", "dict", "(", "q", "=", "query", ")", ")", "print", "(", "\"* Packages in team %s\"", "%", "team", ")", "packages", "=", "response", ".", "json", "(", ")", "[", "'packages'", "]", "for", "pkg", "in", "packages", ":", "print", "(", "(", "\"%s:\"", "%", "team", ")", "+", "(", "\"%(owner)s/%(name)s\"", "%", "pkg", ")", ")", "if", "len", "(", "packages", ")", "==", "0", ":", "print", "(", "\"(No results)\"", ")", "print", "(", "\"* Packages in public cloud\"", ")", "public_session", "=", "_get_session", "(", "None", ")", "response", "=", "public_session", ".", "get", "(", "\"%s/api/search/\"", "%", "get_registry_url", "(", "None", ")", ",", "params", "=", "dict", "(", "q", "=", "query", ")", ")", "packages", "=", "response", ".", "json", "(", ")", "[", "'packages'", "]", "for", "pkg", "in", "packages", ":", "print", "(", "\"%(owner)s/%(name)s\"", "%", "pkg", ")", "if", "len", "(", "packages", ")", "==", "0", ":", "print", "(", "\"(No results)\"", ")" ]
get a list of mapped reads require that both pairs are mapped in the sam file in order to remove the reads
def sam_list_paired ( sam ) : list = [ ] pair = [ '1' , '2' ] prev = '' for file in sam : for line in file : if line . startswith ( '@' ) is False : line = line . strip ( ) . split ( ) id , map = line [ 0 ] , int ( line [ 1 ] ) if map != 4 and map != 8 : read = id . rsplit ( '/' ) [ 0 ] if read == prev : list . append ( read ) prev = read return set ( list )
166
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/filter_fastq_sam.py#L21-L39
[ "def", "start", "(", "self", ")", ":", "self", ".", "startTime", "=", "time", ".", "time", "(", ")", "self", ".", "configure", "(", "text", "=", "'{0:<d} s'", ".", "format", "(", "0", ")", ")", "self", ".", "update", "(", ")" ]
require that both pairs are mapped in the sam file in order to remove the reads
def filter_paired ( list ) : pairs = { } filtered = [ ] for id in list : read = id . rsplit ( '/' ) [ 0 ] if read not in pairs : pairs [ read ] = [ ] pairs [ read ] . append ( id ) for read in pairs : ids = pairs [ read ] if len ( ids ) == 2 : filtered . extend ( ids ) return set ( filtered )
167
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/filter_fastq_sam.py#L41-L56
[ "def", "_retry_until_powered_on", "(", "self", ",", "power", ")", ":", "# If the system is in the same power state as", "# requested by the user, it gives the error", "# InvalidOperationForSystemState. To avoid this error", "# the power state is checked before power on", "# operation is performed.", "status", "=", "self", ".", "get_host_power_status", "(", ")", "if", "(", "status", "!=", "power", ")", ":", "self", ".", "_perform_power_op", "(", "POWER_STATE", "[", "power", "]", ")", "return", "self", ".", "get_host_power_status", "(", ")", "else", ":", "return", "status" ]
print fastq from sam
def sam2fastq ( line ) : fastq = [ ] fastq . append ( '@%s' % line [ 0 ] ) fastq . append ( line [ 9 ] ) fastq . append ( '+%s' % line [ 0 ] ) fastq . append ( line [ 10 ] ) return fastq
168
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/mapped.py#L13-L22
[ "def", "_createDatabase", "(", "self", ")", ":", "# create experiment metadata table", "command", "=", "\"\"\"\n CREATE TABLE {tn} (\n {k} INT PRIMARY KEY NOT NULL,\n START_TIME INT NOT NULL,\n END_TIME INT NOT NULL,\n ELAPSED_TIME INT NOT NULL,\n SETUP_TIME INT NOT NULL,\n EXPERIMENT_TIME INT NOT NULL,\n TEARDOWN_TIME INT NOT NULL,\n STATUS BOOLEAN NOT NULL)\n \"\"\"", "self", ".", "_connection", ".", "execute", "(", "command", ".", "format", "(", "tn", "=", "self", ".", "EXPERIMENT_METADATA_TABLE", ",", "k", "=", "self", ".", "EXPERIMENT_ID", ")", ")", "# commit the changes", "self", ".", "commit", "(", ")" ]
- check to see if the read maps with < = threshold number of mismatches - mm_option = one or both depending on whether or not one or both reads in a pair need to pass the mismatch threshold - pair can be False if read does not have a pair - make sure alignment score is not 0 which would indicate that the read was not aligned to the reference
def check_mismatches ( read , pair , mismatches , mm_option , req_map ) : # if read is not paired, make sure it is mapped and that mm <= thresh if pair is False : mm = count_mismatches ( read ) if mm is False : return False # if no threshold is supplied, return True if mismatches is False : return True # passes threshold? if mm <= mismatches : return True # paired reads r_mm = count_mismatches ( read ) p_mm = count_mismatches ( pair ) # if neither read is mapped, return False if r_mm is False and p_mm is False : return False # if no threshold, return True if mismatches is False : return True # if req_map is True, both reads have to map if req_map is True : if r_mm is False or p_mm is False : return False ## if option is 'one,' only one read has to pass threshold if mm_option == 'one' : if ( r_mm is not False and r_mm <= mismatches ) or ( p_mm is not False and p_mm <= mismatches ) : return True ## if option is 'both,' both reads have to pass threshold if mm_option == 'both' : ## if one read in pair does not map to the scaffold, ## make sure the other read passes threshold if r_mm is False : if p_mm <= mismatches : return True elif p_mm is False : if r_mm <= mismatches : return True elif ( r_mm is not False and r_mm <= mismatches ) and ( p_mm is not False and p_mm <= mismatches ) : return True return False
169
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/mapped.py#L36-L84
[ "async", "def", "chain", "(", "*", "sources", ")", ":", "for", "source", "in", "sources", ":", "async", "with", "streamcontext", "(", "source", ")", "as", "streamer", ":", "async", "for", "item", "in", "streamer", ":", "yield", "item" ]
determine whether or not reads map to specific region of scaffold
def check_region ( read , pair , region ) : if region is False : return True for mapping in read , pair : if mapping is False : continue start , length = int ( mapping [ 3 ] ) , len ( mapping [ 9 ] ) r = [ start , start + length - 1 ] if get_overlap ( r , region ) > 0 : return True return False
170
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/mapped.py#L92-L105
[ "def", "Run", "(", "self", ")", ":", "global", "DB", "# pylint: disable=global-statement", "global", "REL_DB", "# pylint: disable=global-statement", "global", "BLOBS", "# pylint: disable=global-statement", "if", "flags", ".", "FLAGS", ".", "list_storage", ":", "self", ".", "_ListStorageOptions", "(", ")", "sys", ".", "exit", "(", "0", ")", "try", ":", "cls", "=", "DataStore", ".", "GetPlugin", "(", "config", ".", "CONFIG", "[", "\"Datastore.implementation\"", "]", ")", "except", "KeyError", ":", "msg", "=", "(", "\"No Storage System %s found.\"", "%", "config", ".", "CONFIG", "[", "\"Datastore.implementation\"", "]", ")", "if", "config", ".", "CONFIG", "[", "\"Datastore.implementation\"", "]", "==", "\"SqliteDataStore\"", ":", "msg", "=", "\"The SQLite datastore is no longer supported.\"", "print", "(", "msg", ")", "print", "(", "\"Available options:\"", ")", "self", ".", "_ListStorageOptions", "(", ")", "raise", "ValueError", "(", "msg", ")", "DB", "=", "cls", "(", ")", "# pylint: disable=g-bad-name", "DB", ".", "Initialize", "(", ")", "atexit", ".", "register", "(", "DB", ".", "Flush", ")", "monitor_port", "=", "config", ".", "CONFIG", "[", "\"Monitoring.http_port\"", "]", "if", "monitor_port", "!=", "0", ":", "DB", ".", "InitializeMonitorThread", "(", ")", "# Initialize the blobstore.", "blobstore_name", "=", "config", ".", "CONFIG", ".", "Get", "(", "\"Blobstore.implementation\"", ")", "try", ":", "cls", "=", "blob_store", ".", "REGISTRY", "[", "blobstore_name", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"No blob store %s found.\"", "%", "blobstore_name", ")", "BLOBS", "=", "blob_store", ".", "BlobStoreValidationWrapper", "(", "cls", "(", ")", ")", "# Initialize a relational DB if configured.", "rel_db_name", "=", "config", ".", "CONFIG", "[", "\"Database.implementation\"", "]", "if", "not", "rel_db_name", ":", "return", "try", ":", "cls", "=", "registry_init", ".", "REGISTRY", "[", "rel_db_name", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Database %s not found.\"", "%", "rel_db_name", ")", "logging", ".", "info", "(", "\"Using database implementation %s\"", ",", "rel_db_name", ")", "REL_DB", "=", "db", ".", "DatabaseValidationWrapper", "(", "cls", "(", ")", ")" ]
Returns a Steam object representing the current Steam installation on the users computer . If the user doesn t have Steam installed returns None .
def get_steam ( ) : # Helper function which checks if the potential userdata directory exists # and returns a new Steam instance with that userdata directory if it does. # If the directory doesnt exist it returns None instead helper = lambda udd : Steam ( udd ) if os . path . exists ( udd ) else None # For both OS X and Linux, Steam stores it's userdata in a consistent # location. plat = platform . system ( ) if plat == 'Darwin' : return helper ( paths . default_osx_userdata_path ( ) ) if plat == 'Linux' : return helper ( paths . default_linux_userdata_path ( ) ) # Windows is a bit trickier. The userdata directory is stored in the Steam # installation directory, meaning that theoretically it could be anywhere. # Luckily, Valve stores the installation directory in the registry, so its # still possible for us to figure out automatically if plat == 'Windows' : possible_dir = winutils . find_userdata_directory ( ) # Unlike the others, `possible_dir` might be None (if something odd # happened with the registry) return helper ( possible_dir ) if possible_dir is not None else None # This should never be hit. Windows, OS X, and Linux should be the only # supported platforms. # TODO: Add logging here so that the user (developer) knows that something # odd happened. return None
171
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/steam.py#L12-L43
[ "def", "delete_binding", "(", "self", ",", "vhost", ",", "exchange", ",", "queue", ",", "rt_key", ")", ":", "vhost", "=", "quote", "(", "vhost", ",", "''", ")", "exchange", "=", "quote", "(", "exchange", ",", "''", ")", "queue", "=", "quote", "(", "queue", ",", "''", ")", "body", "=", "''", "path", "=", "Client", ".", "urls", "[", "'rt_bindings_between_exch_queue'", "]", "%", "(", "vhost", ",", "exchange", ",", "queue", ",", "rt_key", ")", "return", "self", ".", "_call", "(", "path", ",", "'DELETE'", ",", "headers", "=", "Client", ".", "json_headers", ")" ]
normalize from zero to one for row or table
def zero_to_one ( table , option ) : if option == 'table' : m = min ( min ( table ) ) ma = max ( max ( table ) ) t = [ ] for row in table : t_row = [ ] if option != 'table' : m , ma = min ( row ) , max ( row ) for i in row : if ma == m : t_row . append ( 0 ) else : t_row . append ( ( i - m ) / ( ma - m ) ) t . append ( t_row ) return t
172
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/transform.py#L18-L36
[ "def", "create_stream_subscription", "(", "self", ",", "stream", ",", "on_data", ",", "timeout", "=", "60", ")", ":", "options", "=", "rest_pb2", ".", "StreamSubscribeRequest", "(", ")", "options", ".", "stream", "=", "stream", "manager", "=", "WebSocketSubscriptionManager", "(", "self", ".", "_client", ",", "resource", "=", "'stream'", ",", "options", "=", "options", ")", "# Represent subscription as a future", "subscription", "=", "WebSocketSubscriptionFuture", "(", "manager", ")", "wrapped_callback", "=", "functools", ".", "partial", "(", "_wrap_callback_parse_stream_data", ",", "subscription", ",", "on_data", ")", "manager", ".", "open", "(", "wrapped_callback", ",", "instance", "=", "self", ".", "_instance", ")", "# Wait until a reply or exception is received", "subscription", ".", "reply", "(", "timeout", "=", "timeout", ")", "return", "subscription" ]
calculate percent of total
def pertotal ( table , option ) : if option == 'table' : total = sum ( [ i for line in table for i in line ] ) t = [ ] for row in table : t_row = [ ] if option != 'table' : total = sum ( row ) for i in row : if total == 0 : t_row . append ( 0 ) else : t_row . append ( i / total * 100 ) t . append ( t_row ) return t
173
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/transform.py#L38-L55
[ "def", "mod_repo", "(", "repo", ",", "*", "*", "kwargs", ")", ":", "repos", "=", "list_repos", "(", ")", "found", "=", "False", "uri", "=", "''", "if", "'uri'", "in", "kwargs", ":", "uri", "=", "kwargs", "[", "'uri'", "]", "for", "repository", "in", "repos", ":", "source", "=", "repos", "[", "repository", "]", "[", "0", "]", "if", "source", "[", "'name'", "]", "==", "repo", ":", "found", "=", "True", "repostr", "=", "''", "if", "'enabled'", "in", "kwargs", "and", "not", "kwargs", "[", "'enabled'", "]", ":", "repostr", "+=", "'# '", "if", "'compressed'", "in", "kwargs", ":", "repostr", "+=", "'src/gz '", "if", "kwargs", "[", "'compressed'", "]", "else", "'src'", "else", ":", "repostr", "+=", "'src/gz'", "if", "source", "[", "'compressed'", "]", "else", "'src'", "repo_alias", "=", "kwargs", "[", "'alias'", "]", "if", "'alias'", "in", "kwargs", "else", "repo", "if", "' '", "in", "repo_alias", ":", "repostr", "+=", "' \"{0}\"'", ".", "format", "(", "repo_alias", ")", "else", ":", "repostr", "+=", "' {0}'", ".", "format", "(", "repo_alias", ")", "repostr", "+=", "' {0}'", ".", "format", "(", "kwargs", "[", "'uri'", "]", "if", "'uri'", "in", "kwargs", "else", "source", "[", "'uri'", "]", ")", "trusted", "=", "kwargs", ".", "get", "(", "'trusted'", ")", "repostr", "=", "_set_trusted_option_if_needed", "(", "repostr", ",", "trusted", ")", "if", "trusted", "is", "not", "None", "else", "_set_trusted_option_if_needed", "(", "repostr", ",", "source", ".", "get", "(", "'trusted'", ")", ")", "_mod_repo_in_file", "(", "repo", ",", "repostr", ",", "source", "[", "'file'", "]", ")", "elif", "uri", "and", "source", "[", "'uri'", "]", "==", "uri", ":", "raise", "CommandExecutionError", "(", "'Repository \\'{0}\\' already exists as \\'{1}\\'.'", ".", "format", "(", "uri", ",", "source", "[", "'name'", "]", ")", ")", "if", "not", "found", ":", "# Need to add a new repo", "if", "'uri'", "not", "in", "kwargs", ":", "raise", "CommandExecutionError", "(", "'Repository \\'{0}\\' not found and no URI passed to create one.'", ".", "format", "(", "repo", ")", ")", "properties", "=", "{", "'uri'", ":", "kwargs", "[", "'uri'", "]", "}", "# If compressed is not defined, assume True", "properties", "[", "'compressed'", "]", "=", "kwargs", "[", "'compressed'", "]", "if", "'compressed'", "in", "kwargs", "else", "True", "# If enabled is not defined, assume True", "properties", "[", "'enabled'", "]", "=", "kwargs", "[", "'enabled'", "]", "if", "'enabled'", "in", "kwargs", "else", "True", "properties", "[", "'trusted'", "]", "=", "kwargs", ".", "get", "(", "'trusted'", ")", "_add_new_repo", "(", "repo", ",", "properties", ")", "if", "'refresh'", "in", "kwargs", ":", "refresh_db", "(", ")" ]
scale table based on the column with the largest sum
def scale ( table ) : t = [ ] columns = [ [ ] for i in table [ 0 ] ] for row in table : for i , v in enumerate ( row ) : columns [ i ] . append ( v ) sums = [ float ( sum ( i ) ) for i in columns ] scale_to = float ( max ( sums ) ) scale_factor = [ scale_to / i for i in sums if i != 0 ] for row in table : t . append ( [ a * b for a , b in zip ( row , scale_factor ) ] ) return t
174
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/transform.py#L79-L93
[ "def", "disable_paging", "(", "self", ",", "delay_factor", "=", "1", ")", ":", "check_command", "=", "\"get system status | grep Virtual\"", "output", "=", "self", ".", "send_command_timing", "(", "check_command", ")", "self", ".", "allow_disable_global", "=", "True", "self", ".", "vdoms", "=", "False", "self", ".", "_output_mode", "=", "\"more\"", "if", "\"Virtual domain configuration: enable\"", "in", "output", ":", "self", ".", "vdoms", "=", "True", "vdom_additional_command", "=", "\"config global\"", "output", "=", "self", ".", "send_command_timing", "(", "vdom_additional_command", ",", "delay_factor", "=", "2", ")", "if", "\"Command fail\"", "in", "output", ":", "self", ".", "allow_disable_global", "=", "False", "self", ".", "remote_conn", ".", "close", "(", ")", "self", ".", "establish_connection", "(", "width", "=", "100", ",", "height", "=", "1000", ")", "new_output", "=", "\"\"", "if", "self", ".", "allow_disable_global", ":", "self", ".", "_retrieve_output_mode", "(", ")", "disable_paging_commands", "=", "[", "\"config system console\"", ",", "\"set output standard\"", ",", "\"end\"", ",", "]", "# There is an extra 'end' required if in multi-vdoms are enabled", "if", "self", ".", "vdoms", ":", "disable_paging_commands", ".", "append", "(", "\"end\"", ")", "outputlist", "=", "[", "self", ".", "send_command_timing", "(", "command", ",", "delay_factor", "=", "2", ")", "for", "command", "in", "disable_paging_commands", "]", "# Should test output is valid", "new_output", "=", "self", ".", "RETURN", ".", "join", "(", "outputlist", ")", "return", "output", "+", "new_output" ]
fit to normal distribution
def norm ( table ) : print ( '# norm dist is broken' , file = sys . stderr ) exit ( ) from matplotlib . pyplot import hist as hist t = [ ] for i in table : t . append ( np . ndarray . tolist ( hist ( i , bins = len ( i ) , normed = True ) [ 0 ] ) ) return t
175
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/transform.py#L95-L105
[ "def", "scan_posts", "(", "self", ",", "really", "=", "True", ",", "ignore_quit", "=", "False", ",", "quiet", "=", "True", ")", ":", "while", "(", "self", ".", "db", ".", "exists", "(", "'site:lock'", ")", "and", "int", "(", "self", ".", "db", ".", "get", "(", "'site:lock'", ")", ")", "!=", "0", ")", ":", "self", ".", "logger", ".", "info", "(", "\"Waiting for DB lock...\"", ")", "time", ".", "sleep", "(", "0.5", ")", "self", ".", "db", ".", "incr", "(", "'site:lock'", ")", "self", ".", "logger", ".", "info", "(", "\"Lock acquired.\"", ")", "self", ".", "logger", ".", "info", "(", "\"Scanning site...\"", ")", "self", ".", "_site", ".", "scan_posts", "(", "really", ",", "ignore_quit", ",", "quiet", ")", "timeline", "=", "[", "]", "for", "post", "in", "self", ".", "_site", ".", "timeline", ":", "data", "=", "[", "post", ".", "source_path", ",", "post", ".", "folder", ",", "post", ".", "is_post", ",", "post", ".", "_template_name", ",", "post", ".", "compiler", ".", "name", "]", "timeline", ".", "append", "(", "json", ".", "dumps", "(", "data", ")", ")", "self", ".", "db", ".", "delete", "(", "'site:timeline'", ")", "if", "timeline", ":", "self", ".", "db", ".", "rpush", "(", "'site:timeline'", ",", "*", "timeline", ")", "self", ".", "_write_indexlist", "(", "'posts'", ")", "self", ".", "_write_indexlist", "(", "'all_posts'", ")", "self", ".", "_write_indexlist", "(", "'pages'", ")", "self", ".", "db", ".", "incr", "(", "'site:rev'", ")", "self", ".", "db", ".", "decr", "(", "'site:lock'", ")", "self", ".", "logger", ".", "info", "(", "\"Lock released.\"", ")", "self", ".", "logger", ".", "info", "(", "\"Site scanned.\"", ")", "self", ".", "reload_site", "(", ")" ]
log transform each value in table
def log_trans ( table ) : t = [ ] all = [ item for sublist in table for item in sublist ] if min ( all ) == 0 : scale = min ( [ i for i in all if i != 0 ] ) * 10e-10 else : scale = 0 for i in table : t . append ( np . ndarray . tolist ( np . log10 ( [ j + scale for j in i ] ) ) ) return t
176
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/transform.py#L107-L119
[ "def", "update_dvportgroup", "(", "portgroup_ref", ",", "spec", ")", ":", "pg_name", "=", "get_managed_object_name", "(", "portgroup_ref", ")", "log", ".", "trace", "(", "'Updating portgrouo %s'", ",", "pg_name", ")", "try", ":", "task", "=", "portgroup_ref", ".", "ReconfigureDVPortgroup_Task", "(", "spec", ")", "except", "vim", ".", "fault", ".", "NoPermission", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "'Not enough permissions. Required privilege: '", "'{0}'", ".", "format", "(", "exc", ".", "privilegeId", ")", ")", "except", "vim", ".", "fault", ".", "VimFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "exc", ".", "msg", ")", "except", "vmodl", ".", "RuntimeFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareRuntimeError", "(", "exc", ".", "msg", ")", "wait_for_task", "(", "task", ",", "pg_name", ",", "six", ".", "text_type", "(", "task", ".", "__class__", ")", ")" ]
box - cox transform table
def box_cox ( table ) : from scipy . stats import boxcox as bc t = [ ] for i in table : if min ( i ) == 0 : scale = min ( [ j for j in i if j != 0 ] ) * 10e-10 else : scale = 0 t . append ( np . ndarray . tolist ( bc ( np . array ( [ j + scale for j in i ] ) ) [ 0 ] ) ) return t
177
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/transform.py#L121-L133
[ "def", "users", "(", "self", ",", "start", "=", "1", ",", "num", "=", "10", ",", "sortField", "=", "\"fullName\"", ",", "sortOrder", "=", "\"asc\"", ",", "role", "=", "None", ")", ":", "users", "=", "[", "]", "url", "=", "self", ".", "_url", "+", "\"/users\"", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"start\"", ":", "start", ",", "\"num\"", ":", "num", "}", "if", "not", "role", "is", "None", ":", "params", "[", "'role'", "]", "=", "role", "if", "not", "sortField", "is", "None", ":", "params", "[", "'sortField'", "]", "=", "sortField", "if", "not", "sortOrder", "is", "None", ":", "params", "[", "'sortOrder'", "]", "=", "sortOrder", "from", ".", "_community", "import", "Community", "res", "=", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")", "if", "\"users\"", "in", "res", ":", "if", "len", "(", "res", "[", "'users'", "]", ")", ">", "0", ":", "parsed", "=", "urlparse", ".", "urlparse", "(", "self", ".", "_url", ")", "if", "parsed", ".", "netloc", ".", "lower", "(", ")", ".", "find", "(", "'arcgis.com'", ")", "==", "-", "1", ":", "cURL", "=", "\"%s://%s/%s/sharing/rest/community\"", "%", "(", "parsed", ".", "scheme", ",", "parsed", ".", "netloc", ",", "parsed", ".", "path", "[", "1", ":", "]", ".", "split", "(", "'/'", ")", "[", "0", "]", ")", "else", ":", "cURL", "=", "\"%s://%s/sharing/rest/community\"", "%", "(", "parsed", ".", "scheme", ",", "parsed", ".", "netloc", ")", "com", "=", "Community", "(", "url", "=", "cURL", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")", "for", "r", "in", "res", "[", "'users'", "]", ":", "users", ".", "append", "(", "com", ".", "users", ".", "user", "(", "r", "[", "\"username\"", "]", ")", ")", "res", "[", "'users'", "]", "=", "users", "return", "res" ]
inverse hyperbolic sine transformation
def inh ( table ) : t = [ ] for i in table : t . append ( np . ndarray . tolist ( np . arcsinh ( i ) ) ) return t
178
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/transform.py#L135-L142
[ "def", "_index_audio_cmu", "(", "self", ",", "basename", "=", "None", ",", "replace_already_indexed", "=", "False", ")", ":", "self", ".", "_prepare_audio", "(", "basename", "=", "basename", ",", "replace_already_indexed", "=", "replace_already_indexed", ")", "for", "staging_audio_basename", "in", "self", ".", "_list_audio_files", "(", "sub_dir", "=", "\"staging\"", ")", ":", "original_audio_name", "=", "''", ".", "join", "(", "staging_audio_basename", ".", "split", "(", "'.'", ")", "[", ":", "-", "1", "]", ")", "[", ":", "-", "3", "]", "pocketsphinx_command", "=", "''", ".", "join", "(", "[", "\"pocketsphinx_continuous\"", ",", "\"-infile\"", ",", "str", "(", "\"{}/staging/{}\"", ".", "format", "(", "self", ".", "src_dir", ",", "staging_audio_basename", ")", ")", ",", "\"-time\"", ",", "\"yes\"", ",", "\"-logfn\"", ",", "\"/dev/null\"", "]", ")", "try", ":", "if", "self", ".", "get_verbosity", "(", ")", ":", "print", "(", "\"Now indexing {}\"", ".", "format", "(", "staging_audio_basename", ")", ")", "output", "=", "subprocess", ".", "check_output", "(", "[", "\"pocketsphinx_continuous\"", ",", "\"-infile\"", ",", "str", "(", "\"{}/staging/{}\"", ".", "format", "(", "self", ".", "src_dir", ",", "staging_audio_basename", ")", ")", ",", "\"-time\"", ",", "\"yes\"", ",", "\"-logfn\"", ",", "\"/dev/null\"", "]", ",", "universal_newlines", "=", "True", ")", ".", "split", "(", "'\\n'", ")", "str_timestamps_with_sil_conf", "=", "list", "(", "map", "(", "lambda", "x", ":", "x", ".", "split", "(", "\" \"", ")", ",", "filter", "(", "None", ",", "output", "[", "1", ":", "]", ")", ")", ")", "# Timestamps are putted in a list of a single element. To match", "# Watson's output.", "self", ".", "__timestamps_unregulated", "[", "original_audio_name", "+", "\".wav\"", "]", "=", "[", "(", "self", ".", "_timestamp_extractor_cmu", "(", "staging_audio_basename", ",", "str_timestamps_with_sil_conf", ")", ")", "]", "if", "self", ".", "get_verbosity", "(", ")", ":", "print", "(", "\"Done indexing {}\"", ".", "format", "(", "staging_audio_basename", ")", ")", "except", "OSError", "as", "e", ":", "if", "self", ".", "get_verbosity", "(", ")", ":", "print", "(", "e", ",", "\"The command was: {}\"", ".", "format", "(", "pocketsphinx_command", ")", ")", "self", ".", "__errors", "[", "(", "time", "(", ")", ",", "staging_audio_basename", ")", "]", "=", "e", "self", ".", "_timestamp_regulator", "(", ")", "if", "self", ".", "get_verbosity", "(", ")", ":", "print", "(", "\"Finished indexing procedure\"", ")" ]
from SparCC - randomly draw from the corresponding posterior Dirichlet distribution with a uniform prior
def diri ( table ) : t = [ ] for i in table : a = [ j + 1 for j in i ] t . append ( np . ndarray . tolist ( np . random . mtrand . dirichlet ( a ) ) ) return t
179
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/transform.py#L144-L153
[ "def", "_ParseKeysFromFindSpecs", "(", "self", ",", "parser_mediator", ",", "win_registry", ",", "find_specs", ")", ":", "searcher", "=", "dfwinreg_registry_searcher", ".", "WinRegistrySearcher", "(", "win_registry", ")", "for", "registry_key_path", "in", "iter", "(", "searcher", ".", "Find", "(", "find_specs", "=", "find_specs", ")", ")", ":", "if", "parser_mediator", ".", "abort", ":", "break", "registry_key", "=", "searcher", ".", "GetKeyByPath", "(", "registry_key_path", ")", "self", ".", "_ParseKey", "(", "parser_mediator", ",", "registry_key", ")" ]
Given a list of sample IDs generate unique n - base barcodes for each . Note that only 4^n unique barcodes are possible .
def generate_barcodes ( nIds , codeLen = 12 ) : def next_code ( b , c , i ) : return c [ : i ] + b + ( c [ i + 1 : ] if i < - 1 else '' ) def rand_base ( ) : return random . choice ( [ 'A' , 'T' , 'C' , 'G' ] ) def rand_seq ( n ) : return '' . join ( [ rand_base ( ) for _ in range ( n ) ] ) # homopolymer filter regex: match if 4 identical bases in a row hpf = re . compile ( 'aaaa|cccc|gggg|tttt' , re . IGNORECASE ) while True : codes = [ rand_seq ( codeLen ) ] if ( hpf . search ( codes [ 0 ] ) is None ) : break idx = 0 while len ( codes ) < nIds : idx -= 1 if idx < - codeLen : idx = - 1 codes . append ( rand_seq ( codeLen ) ) else : nc = next_code ( rand_base ( ) , codes [ - 1 ] , idx ) if hpf . search ( nc ) is None : codes . append ( nc ) codes = list ( set ( codes ) ) return codes
180
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/sanger_qiimify.py#L94-L128
[ "def", "_optimize_providers", "(", "self", ",", "providers", ")", ":", "new_providers", "=", "{", "}", "provider_by_driver", "=", "{", "}", "for", "alias", ",", "driver", "in", "six", ".", "iteritems", "(", "providers", ")", ":", "for", "name", ",", "data", "in", "six", ".", "iteritems", "(", "driver", ")", ":", "if", "name", "not", "in", "provider_by_driver", ":", "provider_by_driver", "[", "name", "]", "=", "{", "}", "provider_by_driver", "[", "name", "]", "[", "alias", "]", "=", "data", "for", "driver", ",", "providers_data", "in", "six", ".", "iteritems", "(", "provider_by_driver", ")", ":", "fun", "=", "'{0}.optimize_providers'", ".", "format", "(", "driver", ")", "if", "fun", "not", "in", "self", ".", "clouds", ":", "log", ".", "debug", "(", "'The \\'%s\\' cloud driver is unable to be optimized.'", ",", "driver", ")", "for", "name", ",", "prov_data", "in", "six", ".", "iteritems", "(", "providers_data", ")", ":", "if", "name", "not", "in", "new_providers", ":", "new_providers", "[", "name", "]", "=", "{", "}", "new_providers", "[", "name", "]", "[", "driver", "]", "=", "prov_data", "continue", "new_data", "=", "self", ".", "clouds", "[", "fun", "]", "(", "providers_data", ")", "if", "new_data", ":", "for", "name", ",", "prov_data", "in", "six", ".", "iteritems", "(", "new_data", ")", ":", "if", "name", "not", "in", "new_providers", ":", "new_providers", "[", "name", "]", "=", "{", "}", "new_providers", "[", "name", "]", "[", "driver", "]", "=", "prov_data", "return", "new_providers" ]
Given a sample ID and a mapping modify a Sanger FASTA file to include the barcode and primer in the sequence data and change the description line as needed .
def scrobble_data_dir ( dataDir , sampleMap , outF , qualF = None , idopt = None , utf16 = False ) : seqcount = 0 outfiles = [ osp . split ( outF . name ) [ 1 ] ] if qualF : outfiles . append ( osp . split ( qualF . name ) [ 1 ] ) for item in os . listdir ( dataDir ) : if item in outfiles or not osp . isfile ( os . path . join ( dataDir , item ) ) : continue # FASTA files if osp . splitext ( item ) [ 1 ] in file_types [ 'fasta' ] : fh = open_enc ( os . path . join ( dataDir , item ) , utf16 ) records = SeqIO . parse ( fh , 'fasta' ) for record in records : if isinstance ( idopt , tuple ) : sep , field = idopt sampleID = record . id . split ( sep ) [ field - 1 ] else : sampleID = osp . splitext ( item ) [ 0 ] record . seq = ( sampleMap [ sampleID ] . barcode + sampleMap [ sampleID ] . primer + record . seq ) SeqIO . write ( record , outF , 'fasta' ) seqcount += 1 fh . close ( ) # QUAL files elif qualF and osp . splitext ( item ) [ 1 ] in file_types [ 'qual' ] : fh = open_enc ( os . path . join ( dataDir , item ) , utf16 ) records = SeqIO . parse ( fh , 'qual' ) for record in records : mi = sampleMap [ sampleMap . keys ( ) [ 0 ] ] quals = [ 40 for _ in range ( len ( mi . barcode ) + len ( mi . primer ) ) ] record . letter_annotations [ 'phred_quality' ] [ 0 : 0 ] = quals SeqIO . write ( record , qualF , 'qual' ) fh . close ( ) return seqcount
181
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/sanger_qiimify.py#L158-L199
[ "def", "num_fails", "(", "self", ")", ":", "n", "=", "len", "(", "self", ".", "failed_phase_list", ")", "if", "self", ".", "phase_stack", "[", "-", "1", "]", ".", "status", "in", "(", "SolverStatus", ".", "failed", ",", "SolverStatus", ".", "cyclic", ")", ":", "n", "+=", "1", "return", "n" ]
Uses the built - in argparse module to handle command - line options for the program .
def handle_program_options ( ) : parser = argparse . ArgumentParser ( description = "Convert Sanger-sequencing \ derived data files for use with the \ metagenomics analysis program QIIME, by \ extracting Sample ID information, adding\ barcodes and primers to the sequence \ data, and outputting a mapping file and\ single FASTA-formatted sequence file \ formed by concatenating all input data." ) parser . add_argument ( '-i' , '--input_dir' , required = True , help = "The directory containing sequence data files. \ Assumes all data files are placed in this \ directory. For files organized within folders by\ sample, use -s in addition." ) parser . add_argument ( '-m' , '--map_file' , default = 'map.txt' , help = "QIIME-formatted mapping file linking Sample IDs \ with barcodes and primers." ) parser . add_argument ( '-o' , '--output' , default = 'output.fasta' , metavar = 'OUTPUT_FILE' , help = "Single file containing all sequence data found \ in input_dir, FASTA-formatted with barcode and \ primer preprended to sequence. If the -q option \ is passed, any quality data will also be output \ to a single file of the same name with a .qual \ extension." ) parser . add_argument ( '-b' , '--barcode_length' , type = int , default = 12 , help = "Length of the generated barcode sequences. \ Default is 12 (QIIME default), minimum is 8." ) parser . add_argument ( '-q' , '--qual' , action = 'store_true' , default = False , help = "Instruct the program to look for quality \ input files" ) parser . add_argument ( '-u' , '--utf16' , action = 'store_true' , default = False , help = "UTF-16 encoded input files" ) parser . add_argument ( '-t' , '--treatment' , help = "Inserts an additional column into the mapping \ file specifying some treatment or other variable\ that separates the current set of sequences \ from any other set of seqeunces. For example:\ -t DiseaseState=healthy" ) # data input options sidGroup = parser . add_mutually_exclusive_group ( required = True ) sidGroup . add_argument ( '-d' , '--identifier_pattern' , action = ValidateIDPattern , nargs = 2 , metavar = ( 'SEPARATOR' , 'FIELD_NUMBER' ) , help = "Indicates how to extract the Sample ID from \ the description line. Specify two things: \ 1. Field separator, 2. Field number of Sample \ ID (1 or greater). If the separator is a space \ or tab, use \s or \\t respectively. \ Example: >ka-SampleID-2091, use -i - 2, \ indicating - is the separator and the Sample ID\ is field #2." ) sidGroup . add_argument ( '-f' , '--filename_sample_id' , action = 'store_true' , default = False , help = 'Specify that the program should\ the name of each fasta file as the Sample ID for use\ in the mapping file. This is meant to be used when \ all sequence data for a sample is stored in a single\ file.' ) return parser . parse_args ( )
182
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/sanger_qiimify.py#L202-L271
[ "def", "checkIsConsistent", "(", "self", ")", ":", "if", "is_an_array", "(", "self", ".", "mask", ")", "and", "self", ".", "mask", ".", "shape", "!=", "self", ".", "data", ".", "shape", ":", "raise", "ConsistencyError", "(", "\"Shape mismatch mask={}, data={}\"", ".", "format", "(", "self", ".", "mask", ".", "shape", "!=", "self", ".", "data", ".", "shape", ")", ")" ]
Applies the arcsine square root transform to the given BIOM - format table
def arcsin_sqrt ( biom_tbl ) : arcsint = lambda data , id_ , md : np . arcsin ( np . sqrt ( data ) ) tbl_relabd = relative_abd ( biom_tbl ) tbl_asin = tbl_relabd . transform ( arcsint , inplace = False ) return tbl_asin
183
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/transform_biom.py#L78-L88
[ "def", "create_reservation", "(", "self", ",", "username", ",", "domain", ",", "email", "=", "None", ")", ":", "password", "=", "self", ".", "get_random_password", "(", ")", "self", ".", "create", "(", "username", "=", "username", ",", "domain", "=", "domain", ",", "password", "=", "password", ",", "email", "=", "email", ")" ]
parse sam file and check mapping quality
def parse_sam ( sam , qual ) : for line in sam : if line . startswith ( '@' ) : continue line = line . strip ( ) . split ( ) if int ( line [ 4 ] ) == 0 or int ( line [ 4 ] ) < qual : continue yield line
184
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L23-L33
[ "def", "_parse_box_list", "(", "self", ",", "output", ")", ":", "# Parse box list output", "boxes", "=", "[", "]", "# initialize box values", "name", "=", "provider", "=", "version", "=", "None", "for", "timestamp", ",", "target", ",", "kind", ",", "data", "in", "self", ".", "_parse_machine_readable_output", "(", "output", ")", ":", "if", "kind", "==", "'box-name'", ":", "# finish the previous box, if any", "if", "name", "is", "not", "None", ":", "boxes", ".", "append", "(", "Box", "(", "name", "=", "name", ",", "provider", "=", "provider", ",", "version", "=", "version", ")", ")", "# start a new box", "name", "=", "data", "# box name", "provider", "=", "version", "=", "None", "elif", "kind", "==", "'box-provider'", ":", "provider", "=", "data", "elif", "kind", "==", "'box-version'", ":", "version", "=", "data", "# finish the previous box, if any", "if", "name", "is", "not", "None", ":", "boxes", ".", "append", "(", "Box", "(", "name", "=", "name", ",", "provider", "=", "provider", ",", "version", "=", "version", ")", ")", "return", "boxes" ]
reverse completement stats
def rc_stats ( stats ) : rc_nucs = { 'A' : 'T' , 'T' : 'A' , 'G' : 'C' , 'C' : 'G' , 'N' : 'N' } rcs = [ ] for pos in reversed ( stats ) : rc = { } rc [ 'reference frequencey' ] = pos [ 'reference frequency' ] rc [ 'consensus frequencey' ] = pos [ 'consensus frequency' ] rc [ 'In' ] = pos [ 'In' ] rc [ 'Del' ] = pos [ 'Del' ] rc [ 'ref' ] = rc_nucs [ pos [ 'ref' ] ] rc [ 'consensus' ] = ( rc_nucs [ pos [ 'consensus' ] [ 0 ] ] , pos [ 'consensus' ] [ 1 ] ) for base , stat in list ( pos . items ( ) ) : if base in rc_nucs : rc [ rc_nucs [ base ] ] = stat rcs . append ( rc ) return rcs
185
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L138-L156
[ "def", "load_projects", "(", "self", ")", ":", "server_config", "=", "Config", ".", "instance", "(", ")", ".", "get_section_config", "(", "\"Server\"", ")", "projects_path", "=", "os", ".", "path", ".", "expanduser", "(", "server_config", ".", "get", "(", "\"projects_path\"", ",", "\"~/GNS3/projects\"", ")", ")", "os", ".", "makedirs", "(", "projects_path", ",", "exist_ok", "=", "True", ")", "try", ":", "for", "project_path", "in", "os", ".", "listdir", "(", "projects_path", ")", ":", "project_dir", "=", "os", ".", "path", ".", "join", "(", "projects_path", ",", "project_path", ")", "if", "os", ".", "path", ".", "isdir", "(", "project_dir", ")", ":", "for", "file", "in", "os", ".", "listdir", "(", "project_dir", ")", ":", "if", "file", ".", "endswith", "(", "\".gns3\"", ")", ":", "try", ":", "yield", "from", "self", ".", "load_project", "(", "os", ".", "path", ".", "join", "(", "project_dir", ",", "file", ")", ",", "load", "=", "False", ")", "except", "(", "aiohttp", ".", "web_exceptions", ".", "HTTPConflict", ",", "NotImplementedError", ")", ":", "pass", "# Skip not compatible projects", "except", "OSError", "as", "e", ":", "log", ".", "error", "(", "str", "(", "e", ")", ")" ]
parse codon nucleotide positions in range start - > end wrt strand
def parse_codons ( ref , start , end , strand ) : codon = [ ] c = cycle ( [ 1 , 2 , 3 ] ) ref = ref [ start - 1 : end ] if strand == - 1 : ref = rc_stats ( ref ) for pos in ref : n = next ( c ) codon . append ( pos ) if n == 3 : yield codon codon = [ ]
186
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L158-L172
[ "def", "dump", "(", "self", ")", ":", "data", "=", "dict", "(", "# Sessions", "sessions_active", "=", "self", ".", "sess_active", ",", "# Connections", "connections_active", "=", "self", ".", "conn_active", ",", "connections_ps", "=", "self", ".", "conn_ps", ".", "last_average", ",", "# Packets", "packets_sent_ps", "=", "self", ".", "pack_sent_ps", ".", "last_average", ",", "packets_recv_ps", "=", "self", ".", "pack_recv_ps", ".", "last_average", ")", "for", "k", ",", "v", "in", "self", ".", "sess_transports", ".", "items", "(", ")", ":", "data", "[", "'transp_'", "+", "k", "]", "=", "v", "return", "data" ]
calculate coverage for positions in range start - > end
def calc_coverage ( ref , start , end , length , nucs ) : ref = ref [ start - 1 : end ] bases = 0 for pos in ref : for base , count in list ( pos . items ( ) ) : if base in nucs : bases += count return float ( bases ) / float ( length )
187
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L174-L184
[ "def", "catalogFactory", "(", "name", ",", "*", "*", "kwargs", ")", ":", "fn", "=", "lambda", "member", ":", "inspect", ".", "isclass", "(", "member", ")", "and", "member", ".", "__module__", "==", "__name__", "catalogs", "=", "odict", "(", "inspect", ".", "getmembers", "(", "sys", ".", "modules", "[", "__name__", "]", ",", "fn", ")", ")", "if", "name", "not", "in", "list", "(", "catalogs", ".", "keys", "(", ")", ")", ":", "msg", "=", "\"%s not found in catalogs:\\n %s\"", "%", "(", "name", ",", "list", "(", "kernels", ".", "keys", "(", ")", ")", ")", "logger", ".", "error", "(", "msg", ")", "msg", "=", "\"Unrecognized catalog: %s\"", "%", "name", "raise", "Exception", "(", "msg", ")", "return", "catalogs", "[", "name", "]", "(", "*", "*", "kwargs", ")" ]
parse gbk file
def parse_gbk ( gbks ) : for gbk in gbks : for record in SeqIO . parse ( open ( gbk ) , 'genbank' ) : for feature in record . features : if feature . type == 'gene' : try : locus = feature . qualifiers [ 'locus_tag' ] [ 0 ] except : continue if feature . type == 'CDS' : try : locus = feature . qualifiers [ 'locus_tag' ] [ 0 ] except : pass start = int ( feature . location . start ) + int ( feature . qualifiers [ 'codon_start' ] [ 0 ] ) end , strand = int ( feature . location . end ) , feature . location . strand if strand is None : strand = 1 else : strand = - 1 contig = record . id # contig = record.id.rsplit('.', 1)[0] yield contig , [ locus , [ start , end , strand ] , feature . qualifiers ]
188
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L186-L213
[ "def", "indication", "(", "self", ",", "apdu", ")", ":", "if", "_debug", ":", "ServerSSM", ".", "_debug", "(", "\"indication %r\"", ",", "apdu", ")", "if", "self", ".", "state", "==", "IDLE", ":", "self", ".", "idle", "(", "apdu", ")", "elif", "self", ".", "state", "==", "SEGMENTED_REQUEST", ":", "self", ".", "segmented_request", "(", "apdu", ")", "elif", "self", ".", "state", "==", "AWAIT_RESPONSE", ":", "self", ".", "await_response", "(", "apdu", ")", "elif", "self", ".", "state", "==", "SEGMENTED_RESPONSE", ":", "self", ".", "segmented_response", "(", "apdu", ")", "else", ":", "if", "_debug", ":", "ServerSSM", ".", "_debug", "(", "\" - invalid state\"", ")" ]
parse gene call information from Prodigal fasta output
def parse_fasta_annotations ( fastas , annot_tables , trans_table ) : if annot_tables is not False : annots = { } for table in annot_tables : for cds in open ( table ) : ID , start , end , strand = cds . strip ( ) . split ( ) annots [ ID ] = [ start , end , int ( strand ) ] for fasta in fastas : for seq in parse_fasta ( fasta ) : if ( '# ;gc_cont' not in seq [ 0 ] and '# ID=' not in seq [ 0 ] ) and annot_tables is False : print ( '# specify fasta from Prodigal or annotations table (-t)' , file = sys . stderr ) exit ( ) if 'ID=' in seq [ 0 ] : ID = seq [ 0 ] . rsplit ( 'ID=' , 1 ) [ 1 ] . split ( ';' , 1 ) [ 0 ] contig = seq [ 0 ] . split ( ) [ 0 ] . split ( '>' ) [ 1 ] . rsplit ( '_%s' % ( ID ) , 1 ) [ 0 ] else : contig = seq [ 0 ] . split ( ) [ 0 ] . split ( '>' ) [ 1 ] . rsplit ( '_' , 1 ) [ 0 ] locus = seq [ 0 ] . split ( ) [ 0 ] . split ( '>' ) [ 1 ] # annotation info from Prodigal if ( '# ;gc_cont' in seq [ 0 ] or '# ID=' in seq [ 0 ] ) : info = seq [ 0 ] . split ( ' # ' ) start , end , strand = int ( info [ 1 ] ) , int ( info [ 2 ] ) , info [ 3 ] if strand == '1' : strand = 1 else : strand = - 1 product = [ '' . join ( info [ 4 ] . split ( ) [ 1 : ] ) ] # annotation info from table else : start , end , strand = annots [ locus ] product = seq [ 0 ] . split ( ' ' , 1 ) [ 1 ] info = { 'transl_table' : [ trans_table ] , 'translation' : [ seq [ 1 ] ] , 'product' : product } yield contig , [ locus , [ start , end , strand ] , info ]
189
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L215-L252
[ "def", "initialize_communities_bucket", "(", ")", ":", "bucket_id", "=", "UUID", "(", "current_app", ".", "config", "[", "'COMMUNITIES_BUCKET_UUID'", "]", ")", "if", "Bucket", ".", "query", ".", "get", "(", "bucket_id", ")", ":", "raise", "FilesException", "(", "\"Bucket with UUID {} already exists.\"", ".", "format", "(", "bucket_id", ")", ")", "else", ":", "storage_class", "=", "current_app", ".", "config", "[", "'FILES_REST_DEFAULT_STORAGE_CLASS'", "]", "location", "=", "Location", ".", "get_default", "(", ")", "bucket", "=", "Bucket", "(", "id", "=", "bucket_id", ",", "location", "=", "location", ",", "default_storage_class", "=", "storage_class", ")", "db", ".", "session", ".", "add", "(", "bucket", ")", "db", ".", "session", ".", "commit", "(", ")" ]
parse annotations in either gbk or Prodigal fasta format
def parse_annotations ( annots , fmt , annot_tables , trans_table ) : annotations = { } # annotations[contig] = [features] # gbk format if fmt is False : for contig , feature in parse_gbk ( annots ) : if contig not in annotations : annotations [ contig ] = [ ] annotations [ contig ] . append ( feature ) # fasta format else : for contig , feature in parse_fasta_annotations ( annots , annot_tables , trans_table ) : if contig not in annotations : annotations [ contig ] = [ ] annotations [ contig ] . append ( feature ) return annotations
190
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L254-L271
[ "def", "delayed_close", "(", "self", ")", ":", "self", ".", "state", "=", "SESSION_STATE", ".", "CLOSING", "reactor", ".", "callLater", "(", "0", ",", "self", ".", "close", ")" ]
convert codon to amino acid
def codon2aa ( codon , trans_table ) : return Seq ( '' . join ( codon ) , IUPAC . ambiguous_dna ) . translate ( table = trans_table ) [ 0 ]
191
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L311-L315
[ "def", "process_config", "(", "config", ",", "config_data", ")", ":", "if", "'components'", "in", "config_data", ":", "process_components_config_section", "(", "config", ",", "config_data", "[", "'components'", "]", ")", "if", "'data'", "in", "config_data", ":", "process_data_config_section", "(", "config", ",", "config_data", "[", "'data'", "]", ")", "if", "'log'", "in", "config_data", ":", "process_log_config_section", "(", "config", ",", "config_data", "[", "'log'", "]", ")", "if", "'management'", "in", "config_data", ":", "process_management_config_section", "(", "config", ",", "config_data", "[", "'management'", "]", ")", "if", "'session'", "in", "config_data", ":", "process_session_config_section", "(", "config", ",", "config_data", "[", "'session'", "]", ")" ]
find consensus base based on nucleotide frequencies
def find_consensus ( bases ) : nucs = [ 'A' , 'T' , 'G' , 'C' , 'N' ] total = sum ( [ bases [ nuc ] for nuc in nucs if nuc in bases ] ) # save most common base as consensus (random nuc if there is a tie) try : top = max ( [ bases [ nuc ] for nuc in nucs if nuc in bases ] ) except : bases [ 'consensus' ] = ( 'N' , 'n/a' ) bases [ 'consensus frequency' ] = 'n/a' bases [ 'reference frequency' ] = 'n/a' return bases top = [ ( nuc , bases [ nuc ] ) for nuc in bases if bases [ nuc ] == top ] if top [ 0 ] [ 1 ] == 0 : bases [ 'consensus' ] = ( 'n/a' , 0 ) else : bases [ 'consensus' ] = random . choice ( top ) if total == 0 : c_freq = 'n/a' ref_freq = 'n/a' else : c_freq = float ( bases [ 'consensus' ] [ 1 ] ) / float ( total ) if bases [ 'ref' ] not in bases : ref_freq = 0 else : ref_freq = float ( bases [ bases [ 'ref' ] ] ) / float ( total ) bases [ 'consensus frequency' ] = c_freq bases [ 'reference frequency' ] = ref_freq return bases
192
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L371-L402
[ "def", "_create_download_failed_message", "(", "exception", ",", "url", ")", ":", "message", "=", "'Failed to download from:\\n{}\\nwith {}:\\n{}'", ".", "format", "(", "url", ",", "exception", ".", "__class__", ".", "__name__", ",", "exception", ")", "if", "_is_temporal_problem", "(", "exception", ")", ":", "if", "isinstance", "(", "exception", ",", "requests", ".", "ConnectionError", ")", ":", "message", "+=", "'\\nPlease check your internet connection and try again.'", "else", ":", "message", "+=", "'\\nThere might be a problem in connection or the server failed to process '", "'your request. Please try again.'", "elif", "isinstance", "(", "exception", ",", "requests", ".", "HTTPError", ")", ":", "try", ":", "server_message", "=", "''", "for", "elem", "in", "decode_data", "(", "exception", ".", "response", ".", "content", ",", "MimeType", ".", "XML", ")", ":", "if", "'ServiceException'", "in", "elem", ".", "tag", "or", "'Message'", "in", "elem", ".", "tag", ":", "server_message", "+=", "elem", ".", "text", ".", "strip", "(", "'\\n\\t '", ")", "except", "ElementTree", ".", "ParseError", ":", "server_message", "=", "exception", ".", "response", ".", "text", "message", "+=", "'\\nServer response: \"{}\"'", ".", "format", "(", "server_message", ")", "return", "message" ]
print consensensus sequences for each genome and sample
def print_consensus ( genomes ) : # generate consensus sequences cons = { } # cons[genome][sample][contig] = consensus for genome , contigs in list ( genomes . items ( ) ) : cons [ genome ] = { } for contig , samples in list ( contigs . items ( ) ) : for sample , stats in list ( samples . items ( ) ) : if sample not in cons [ genome ] : cons [ genome ] [ sample ] = { } seq = cons [ genome ] [ sample ] [ contig ] = [ ] for pos , ps in enumerate ( stats [ 'bp_stats' ] , 1 ) : ref , consensus = ps [ 'ref' ] , ps [ 'consensus' ] [ 0 ] if consensus == 'n/a' : consensus = ref . lower ( ) seq . append ( consensus ) # print consensus sequences for genome , samples in cons . items ( ) : for sample , contigs in samples . items ( ) : fn = '%s.%s.consensus.fa' % ( genome , sample ) f = open ( fn , 'w' ) for contig , seq in contigs . items ( ) : print ( '>%s' % ( contig ) , file = f ) print ( '' . join ( seq ) , file = f ) f . close ( ) return cons
193
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L451-L478
[ "def", "delete_datapoints_in_time_range", "(", "self", ",", "start_dt", "=", "None", ",", "end_dt", "=", "None", ")", ":", "start_dt", "=", "to_none_or_dt", "(", "validate_type", "(", "start_dt", ",", "datetime", ".", "datetime", ",", "type", "(", "None", ")", ")", ")", "end_dt", "=", "to_none_or_dt", "(", "validate_type", "(", "end_dt", ",", "datetime", ".", "datetime", ",", "type", "(", "None", ")", ")", ")", "params", "=", "{", "}", "if", "start_dt", "is", "not", "None", ":", "params", "[", "'startTime'", "]", "=", "isoformat", "(", "start_dt", ")", "if", "end_dt", "is", "not", "None", ":", "params", "[", "'endTime'", "]", "=", "isoformat", "(", "end_dt", ")", "self", ".", "_conn", ".", "delete", "(", "\"/ws/DataPoint/{stream_id}{querystring}\"", ".", "format", "(", "stream_id", "=", "self", ".", "get_stream_id", "(", ")", ",", "querystring", "=", "\"?\"", "+", "urllib", ".", "parse", ".", "urlencode", "(", "params", ")", "if", "params", "else", "\"\"", ",", ")", ")" ]
calculate genome coverage from scaffold coverage table
def parse_cov ( cov_table , scaffold2genome ) : size = { } # size[genome] = genome size mapped = { } # mapped[genome][sample] = mapped bases # parse coverage files for line in open ( cov_table ) : line = line . strip ( ) . split ( '\t' ) if line [ 0 ] . startswith ( '#' ) : samples = line [ 1 : ] samples = [ i . rsplit ( '/' , 1 ) [ - 1 ] . split ( '.' , 1 ) [ 0 ] for i in samples ] continue scaffold , length = line [ 0 ] . split ( ': ' ) length = float ( length ) covs = [ float ( i ) for i in line [ 1 : ] ] bases = [ c * length for c in covs ] if scaffold not in scaffold2genome : continue genome = scaffold2genome [ scaffold ] if genome not in size : size [ genome ] = 0 mapped [ genome ] = { sample : 0 for sample in samples } # keep track of genome size size [ genome ] += length # keep track of number of mapped bases for sample , count in zip ( samples , bases ) : mapped [ genome ] [ sample ] += count # calculate coverage from base counts and genome size coverage = { 'genome' : [ ] , 'genome size (bp)' : [ ] , 'sample' : [ ] , 'coverage' : [ ] } for genome , length in size . items ( ) : for sample in samples : cov = mapped [ genome ] [ sample ] / length coverage [ 'genome' ] . append ( genome ) coverage [ 'genome size (bp)' ] . append ( length ) coverage [ 'sample' ] . append ( sample ) coverage [ 'coverage' ] . append ( cov ) return pd . DataFrame ( coverage )
194
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_coverage.py#L13-L50
[ "def", "get_api_v1_info", "(", "api_prefix", ")", ":", "websocket_root", "=", "base_ws_uri", "(", ")", "+", "EVENTS_ENDPOINT", "docs_url", "=", "[", "'https://docs.bigchaindb.com/projects/server/en/v'", ",", "version", ".", "__version__", ",", "'/http-client-server-api.html'", ",", "]", "return", "{", "'docs'", ":", "''", ".", "join", "(", "docs_url", ")", ",", "'transactions'", ":", "'{}transactions/'", ".", "format", "(", "api_prefix", ")", ",", "'blocks'", ":", "'{}blocks/'", ".", "format", "(", "api_prefix", ")", ",", "'assets'", ":", "'{}assets/'", ".", "format", "(", "api_prefix", ")", ",", "'outputs'", ":", "'{}outputs/'", ".", "format", "(", "api_prefix", ")", ",", "'streams'", ":", "websocket_root", ",", "'metadata'", ":", "'{}metadata/'", ".", "format", "(", "api_prefix", ")", ",", "'validators'", ":", "'{}validators'", ".", "format", "(", "api_prefix", ")", ",", "}" ]
calculate genome coverage from scaffold coverage
def genome_coverage ( covs , s2b ) : COV = [ ] for cov in covs : COV . append ( parse_cov ( cov , s2b ) ) return pd . concat ( COV )
195
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_coverage.py#L52-L59
[ "def", "on_websocket_message", "(", "message", ":", "str", ")", "->", "None", ":", "msgs", "=", "json", ".", "loads", "(", "message", ")", "for", "msg", "in", "msgs", ":", "if", "not", "isinstance", "(", "msg", ",", "dict", ")", ":", "logger", ".", "error", "(", "'Invalid WS message format: {}'", ".", "format", "(", "message", ")", ")", "continue", "_type", "=", "msg", ".", "get", "(", "'type'", ")", "if", "_type", "==", "'log'", ":", "log_handler", "(", "msg", "[", "'level'", "]", ",", "msg", "[", "'message'", "]", ")", "elif", "_type", "==", "'event'", ":", "event_handler", "(", "msg", "[", "'event'", "]", ")", "elif", "_type", "==", "'response'", ":", "response_handler", "(", "msg", ")", "else", ":", "raise", "ValueError", "(", "'Unkown message type: {}'", ".", "format", "(", "message", ")", ")" ]
convert s2b files to dictionary
def parse_s2bs ( s2bs ) : s2b = { } for s in s2bs : for line in open ( s ) : line = line . strip ( ) . split ( '\t' ) s , b = line [ 0 ] , line [ 1 ] s2b [ s ] = b return s2b
196
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_coverage.py#L61-L71
[ "def", "surrounding_nodes", "(", "self", ",", "position", ")", ":", "n_node_index", ",", "n_node_position", ",", "n_node_error", "=", "self", ".", "nearest_node", "(", "position", ")", "if", "n_node_error", "==", "0.0", ":", "index_mod", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "n_node_index", ")", ")", ":", "new_point", "=", "np", ".", "asarray", "(", "n_node_position", ")", "new_point", "[", "i", "]", "+=", "1.e-5", "*", "np", ".", "abs", "(", "new_point", "[", "i", "]", ")", "try", ":", "self", ".", "nearest_node", "(", "tuple", "(", "new_point", ")", ")", "index_mod", ".", "append", "(", "-", "1", ")", "except", "ValueError", ":", "index_mod", ".", "append", "(", "1", ")", "else", ":", "# Check if node_position is larger or smaller in resp. axes than position", "index_mod", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "n_node_index", ")", ")", ":", "if", "n_node_position", "[", "i", "]", ">", "position", "[", "i", "]", ":", "index_mod", ".", "append", "(", "-", "1", ")", "else", ":", "index_mod", ".", "append", "(", "1", ")", "return", "tuple", "(", "n_node_index", ")", ",", "tuple", "(", "index_mod", ")" ]
convert fastas to s2b dictionary
def fa2s2b ( fastas ) : s2b = { } for fa in fastas : for seq in parse_fasta ( fa ) : s = seq [ 0 ] . split ( '>' , 1 ) [ 1 ] . split ( ) [ 0 ] s2b [ s ] = fa . rsplit ( '/' , 1 ) [ - 1 ] . rsplit ( '.' , 1 ) [ 0 ] return s2b
197
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_coverage.py#L73-L82
[ "def", "BuildChecks", "(", "self", ",", "request", ")", ":", "result", "=", "[", "]", "if", "request", ".", "HasField", "(", "\"start_time\"", ")", "or", "request", ".", "HasField", "(", "\"end_time\"", ")", ":", "def", "FilterTimestamp", "(", "file_stat", ",", "request", "=", "request", ")", ":", "return", "file_stat", ".", "HasField", "(", "\"st_mtime\"", ")", "and", "(", "file_stat", ".", "st_mtime", "<", "request", ".", "start_time", "or", "file_stat", ".", "st_mtime", ">", "request", ".", "end_time", ")", "result", ".", "append", "(", "FilterTimestamp", ")", "if", "request", ".", "HasField", "(", "\"min_file_size\"", ")", "or", "request", ".", "HasField", "(", "\"max_file_size\"", ")", ":", "def", "FilterSize", "(", "file_stat", ",", "request", "=", "request", ")", ":", "return", "file_stat", ".", "HasField", "(", "\"st_size\"", ")", "and", "(", "file_stat", ".", "st_size", "<", "request", ".", "min_file_size", "or", "file_stat", ".", "st_size", ">", "request", ".", "max_file_size", ")", "result", ".", "append", "(", "FilterSize", ")", "if", "request", ".", "HasField", "(", "\"perm_mode\"", ")", ":", "def", "FilterPerms", "(", "file_stat", ",", "request", "=", "request", ")", ":", "return", "(", "file_stat", ".", "st_mode", "&", "request", ".", "perm_mask", ")", "!=", "request", ".", "perm_mode", "result", ".", "append", "(", "FilterPerms", ")", "if", "request", ".", "HasField", "(", "\"uid\"", ")", ":", "def", "FilterUID", "(", "file_stat", ",", "request", "=", "request", ")", ":", "return", "file_stat", ".", "st_uid", "!=", "request", ".", "uid", "result", ".", "append", "(", "FilterUID", ")", "if", "request", ".", "HasField", "(", "\"gid\"", ")", ":", "def", "FilterGID", "(", "file_stat", ",", "request", "=", "request", ")", ":", "return", "file_stat", ".", "st_gid", "!=", "request", ".", "gid", "result", ".", "append", "(", "FilterGID", ")", "if", "request", ".", "HasField", "(", "\"path_regex\"", ")", ":", "regex", "=", "request", ".", "path_regex", "def", "FilterPath", "(", "file_stat", ",", "regex", "=", "regex", ")", ":", "\"\"\"Suppress any filename not matching the regular expression.\"\"\"", "return", "not", "regex", ".", "Search", "(", "file_stat", ".", "pathspec", ".", "Basename", "(", ")", ")", "result", ".", "append", "(", "FilterPath", ")", "if", "request", ".", "HasField", "(", "\"data_regex\"", ")", ":", "def", "FilterData", "(", "file_stat", ",", "*", "*", "_", ")", ":", "\"\"\"Suppress files that do not match the content.\"\"\"", "return", "not", "self", ".", "TestFileContent", "(", "file_stat", ")", "result", ".", "append", "(", "FilterData", ")", "return", "result" ]
Filters out sequences with too much ambiguity as defined by the method parameters .
def filter_ambiguity ( records , percent = 0.5 ) : # , repeats=6) seqs = [ ] # Ns = ''.join(['N' for _ in range(repeats)]) count = 0 for record in records : if record . seq . count ( 'N' ) / float ( len ( record ) ) < percent : # pos = record.seq.find(Ns) # if pos >= 0: # record.seq = Seq(str(record.seq)[:pos]) seqs . append ( record ) count += 1 return seqs , count
198
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/filter_ambiguity.py#L16-L41
[ "def", "delete_datapoint", "(", "self", ",", "datapoint", ")", ":", "datapoint", "=", "validate_type", "(", "datapoint", ",", "DataPoint", ")", "self", ".", "_conn", ".", "delete", "(", "\"/ws/DataPoint/{stream_id}/{datapoint_id}\"", ".", "format", "(", "stream_id", "=", "self", ".", "get_stream_id", "(", ")", ",", "datapoint_id", "=", "datapoint", ".", "get_id", "(", ")", ",", ")", ")" ]
Search package .
def package_existent ( name ) : try : response = requests . get ( PYPI_URL . format ( name ) ) if response . ok : msg = ( '[error] "{0}" is registered already in PyPI.\n' '\tSpecify another package name.' ) . format ( name ) raise Conflict ( msg ) except ( socket . gaierror , Timeout , ConnectionError , HTTPError ) as exc : raise BackendFailure ( exc )
199
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/pypi.py#L12-L33
[ "def", "_add_dependency", "(", "self", ",", "dependency", ",", "var_name", "=", "None", ")", ":", "if", "var_name", "is", "None", ":", "var_name", "=", "next", "(", "self", ".", "temp_var_names", ")", "# Don't add duplicate dependencies", "if", "(", "dependency", ",", "var_name", ")", "not", "in", "self", ".", "dependencies", ":", "self", ".", "dependencies", ".", "append", "(", "(", "dependency", ",", "var_name", ")", ")", "return", "var_name" ]