query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
sequencelengths
20
553
Parse a JSON string and build an entity .
def from_json ( cls , json_doc ) : try : d = json . load ( json_doc ) except AttributeError : # catch the read() error d = json . loads ( json_doc ) return cls . from_dict ( d )
600
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/entities.py#L490-L497
[ "def", "start_transmit", "(", "self", ",", "blocking", "=", "False", ",", "start_packet_groups", "=", "True", ",", "*", "ports", ")", ":", "port_list", "=", "self", ".", "set_ports_list", "(", "*", "ports", ")", "if", "start_packet_groups", ":", "port_list_for_packet_groups", "=", "self", ".", "ports", ".", "values", "(", ")", "port_list_for_packet_groups", "=", "self", ".", "set_ports_list", "(", "*", "port_list_for_packet_groups", ")", "self", ".", "api", ".", "call_rc", "(", "'ixClearTimeStamp {}'", ".", "format", "(", "port_list_for_packet_groups", ")", ")", "self", ".", "api", ".", "call_rc", "(", "'ixStartPacketGroups {}'", ".", "format", "(", "port_list_for_packet_groups", ")", ")", "self", ".", "api", ".", "call_rc", "(", "'ixStartTransmit {}'", ".", "format", "(", "port_list", ")", ")", "time", ".", "sleep", "(", "0.2", ")", "if", "blocking", ":", "self", ".", "wait_transmit", "(", "*", "ports", ")" ]
Return the multiple TypedField associated with this EntityList .
def _multiple_field ( cls ) : klassdict = cls . __dict__ try : # Checking for cls.entitylist_multifield would return any inherited # values, so we check the class __dict__ explicitly. return klassdict [ "_entitylist_multifield" ] [ 0 ] except ( KeyError , IndexError , TypeError ) : from . import fields multifield_tuple = tuple ( fields . find ( cls , multiple = True ) ) assert len ( multifield_tuple ) == 1 # Make sure that the multiple field actually has an Entity type. multifield = multifield_tuple [ 0 ] assert issubclass ( multifield . type_ , Entity ) # Store aside the multiple field. We wrap it in a tuple because # just doing ``cls._entitylist_multifield = multifield`` would # assign another TypedField descriptor to this class. We don't # want that. cls . _entitylist_multifield = multifield_tuple # Return the multiple TypedField return multifield_tuple [ 0 ]
601
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/entities.py#L594-L628
[ "def", "revealjs", "(", "basedir", "=", "None", ",", "title", "=", "None", ",", "subtitle", "=", "None", ",", "description", "=", "None", ",", "github_user", "=", "None", ",", "github_repo", "=", "None", ")", ":", "basedir", "=", "basedir", "or", "query_input", "(", "'Base dir of the presentation?'", ",", "default", "=", "'~/repos/my_presi'", ")", "revealjs_repo_name", "=", "'reveal.js'", "revealjs_dir", "=", "flo", "(", "'{basedir}/{revealjs_repo_name}'", ")", "_lazy_dict", "[", "'presi_title'", "]", "=", "title", "_lazy_dict", "[", "'presi_subtitle'", "]", "=", "subtitle", "_lazy_dict", "[", "'presi_description'", "]", "=", "description", "_lazy_dict", "[", "'github_user'", "]", "=", "github_user", "_lazy_dict", "[", "'github_repo'", "]", "=", "github_repo", "question", "=", "flo", "(", "\"Base dir already contains a sub dir '{revealjs_repo_name}'.\"", "' Reset (and re-download) reveal.js codebase?'", ")", "if", "not", "exists", "(", "revealjs_dir", ")", "or", "query_yes_no", "(", "question", ",", "default", "=", "'no'", ")", ":", "run", "(", "flo", "(", "'mkdir -p {basedir}'", ")", ")", "set_up_revealjs_codebase", "(", "basedir", ",", "revealjs_repo_name", ")", "install_plugins", "(", "revealjs_dir", ")", "apply_customizations", "(", "repo_dir", "=", "revealjs_dir", ")", "if", "exists", "(", "revealjs_dir", ")", ":", "install_files_in_basedir", "(", "basedir", ",", "repo_dir", "=", "revealjs_dir", ")", "init_git_repo", "(", "basedir", ")", "create_github_remote_repo", "(", "basedir", ")", "setup_npm", "(", "revealjs_dir", ")", "else", ":", "print", "(", "'abort'", ")" ]
Returns a dictionary of namespaces to be exported with an XML document .
def _finalize_namespaces ( self , ns_dict = None ) : if ns_dict : # Add the user's entries to our set for ns , alias in six . iteritems ( ns_dict ) : self . _collected_namespaces . add_namespace_uri ( ns , alias ) # Add the ID namespaces self . _collected_namespaces . add_namespace_uri ( ns_uri = idgen . get_id_namespace ( ) , prefix = idgen . get_id_namespace_alias ( ) ) # Remap the example namespace to the one expected by the APIs if the # sample example namespace is found. self . _fix_example_namespace ( ) # Add _input_namespaces for prefix , uri in six . iteritems ( self . _input_namespaces ) : self . _collected_namespaces . add_namespace_uri ( uri , prefix ) # Add some default XML namespaces to make sure they're there. self . _collected_namespaces . import_from ( namespaces . XML_NAMESPACES ) # python-stix's generateDS-generated binding classes can't handle # default namespaces. So make sure there are no preferred defaults in # the set. Get prefixes from the global namespace set if we have to. for ns_uri in self . _collected_namespaces . namespace_uris : preferred_prefix = self . _collected_namespaces . preferred_prefix_for_namespace ( ns_uri ) if preferred_prefix : continue # No preferred prefix set for namespace. Try to assign one. prefixes = self . _collected_namespaces . get_prefixes ( ns_uri ) if prefixes : prefix = next ( iter ( prefixes ) ) else : prefix = namespaces . lookup_name ( ns_uri ) if prefix is None : raise namespaces . NoPrefixesError ( ns_uri ) self . _collected_namespaces . set_preferred_prefix_for_namespace ( ns_uri = ns_uri , prefix = prefix , add_if_not_exist = True )
602
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/entities.py#L796-L856
[ "def", "ITRF_position_velocity_error", "(", "self", ",", "t", ")", ":", "rTEME", ",", "vTEME", ",", "error", "=", "self", ".", "_position_and_velocity_TEME_km", "(", "t", ")", "rTEME", "/=", "AU_KM", "vTEME", "/=", "AU_KM", "vTEME", "*=", "DAY_S", "rITRF", ",", "vITRF", "=", "TEME_to_ITRF", "(", "t", ".", "ut1", ",", "rTEME", ",", "vTEME", ")", "return", "rITRF", ",", "vITRF", ",", "error" ]
Get a list of all the communities .
def get ( self , query , sort , page , size ) : urlkwargs = { 'q' : query , 'sort' : sort , 'size' : size , } communities = Community . filter_communities ( query , sort ) page = communities . paginate ( page , size ) links = default_links_pagination_factory ( page , urlkwargs ) links_headers = map ( lambda key : ( 'link' , 'ref="{0}" href="{1}"' . format ( key , links [ key ] ) ) , links ) return self . make_response ( page , headers = links_headers , links_item_factory = default_links_item_factory , page = page , urlkwargs = urlkwargs , links_pagination_factory = default_links_pagination_factory , )
603
https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/views/api.py#L80-L129
[ "def", "_resample_regressor", "(", "hr_regressor", ",", "hr_frame_times", ",", "frame_times", ")", ":", "from", "scipy", ".", "interpolate", "import", "interp1d", "f", "=", "interp1d", "(", "hr_frame_times", ",", "hr_regressor", ")", "return", "f", "(", "frame_times", ")", ".", "T" ]
Get the details of the specified community .
def get ( self , community_id ) : community = Community . get ( community_id ) if not community : abort ( 404 ) etag = community . version_id self . check_etag ( etag ) response = self . make_response ( community , links_item_factory = default_links_item_factory ) response . set_etag ( etag ) return response
604
https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/views/api.py#L143-L185
[ "def", "_try_free_lease", "(", "self", ",", "shard_state", ",", "slice_retry", "=", "False", ")", ":", "@", "db", ".", "transactional", "def", "_tx", "(", ")", ":", "fresh_state", "=", "model", ".", "ShardState", ".", "get_by_shard_id", "(", "shard_state", ".", "shard_id", ")", "if", "fresh_state", "and", "fresh_state", ".", "active", ":", "# Free lease.", "fresh_state", ".", "slice_start_time", "=", "None", "fresh_state", ".", "slice_request_id", "=", "None", "if", "slice_retry", ":", "fresh_state", ".", "slice_retries", "+=", "1", "fresh_state", ".", "put", "(", ")", "try", ":", "_tx", "(", ")", "# pylint: disable=broad-except", "except", "Exception", ",", "e", ":", "logging", ".", "warning", "(", "e", ")", "logging", ".", "warning", "(", "\"Release lock for shard %s failed. Wait for lease to expire.\"", ",", "shard_state", ".", "shard_id", ")" ]
Factory function for a _Phylesystem object .
def Phylesystem ( repos_dict = None , repos_par = None , with_caching = True , repo_nexml2json = None , git_ssh = None , pkey = None , git_action_class = PhylesystemGitAction , mirror_info = None , new_study_prefix = None , infrastructure_commit_author = 'OpenTree API <[email protected]>' ) : if not repo_nexml2json : repo_nexml2json = get_config_setting ( 'phylesystem' , 'repo_nexml2json' ) global _THE_PHYLESYSTEM if _THE_PHYLESYSTEM is None : _THE_PHYLESYSTEM = _Phylesystem ( repos_dict = repos_dict , repos_par = repos_par , with_caching = with_caching , repo_nexml2json = repo_nexml2json , git_ssh = git_ssh , pkey = pkey , git_action_class = git_action_class , mirror_info = mirror_info , new_study_prefix = new_study_prefix , infrastructure_commit_author = infrastructure_commit_author ) return _THE_PHYLESYSTEM
605
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/phylesystem/phylesystem_umbrella.py#L273-L304
[ "def", "reward_bonus", "(", "self", ",", "assignment_id", ",", "amount", ",", "reason", ")", ":", "try", ":", "return", "self", ".", "mturkservice", ".", "grant_bonus", "(", "assignment_id", ",", "amount", ",", "reason", ")", "except", "MTurkServiceException", "as", "ex", ":", "logger", ".", "exception", "(", "str", "(", "ex", ")", ")" ]
Converts HTML5 character references within text_string to their corresponding unicode characters and returns converted string as type str .
def convert_html_entities ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : return html . unescape ( text_string ) . replace ( "&quot;" , "'" ) else : raise InputError ( "string not passed as argument for text_string" )
606
https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L33-L51
[ "def", "delete_files", "(", ")", ":", "session_token", "=", "request", ".", "headers", "[", "'session_token'", "]", "repository", "=", "request", ".", "headers", "[", "'repository'", "]", "#===", "current_user", "=", "have_authenticated_user", "(", "request", ".", "environ", "[", "'REMOTE_ADDR'", "]", ",", "repository", ",", "session_token", ")", "if", "current_user", "is", "False", ":", "return", "fail", "(", "user_auth_fail_msg", ")", "#===", "repository_path", "=", "config", "[", "'repositories'", "]", "[", "repository", "]", "[", "'path'", "]", "body_data", "=", "request", ".", "get_json", "(", ")", "def", "with_exclusive_lock", "(", ")", ":", "if", "not", "varify_user_lock", "(", "repository_path", ",", "session_token", ")", ":", "return", "fail", "(", "lock_fail_msg", ")", "try", ":", "data_store", "=", "versioned_storage", "(", "repository_path", ")", "if", "not", "data_store", ".", "have_active_commit", "(", ")", ":", "return", "fail", "(", "no_active_commit_msg", ")", "#-------------", "for", "fle", "in", "json", ".", "loads", "(", "body_data", "[", "'files'", "]", ")", ":", "data_store", ".", "fs_delete", "(", "fle", ")", "# updates the user lock expiry", "update_user_lock", "(", "repository_path", ",", "session_token", ")", "return", "success", "(", ")", "except", "Exception", ":", "return", "fail", "(", ")", "# pylint: disable=broad-except", "return", "lock_access", "(", "repository_path", ",", "with_exclusive_lock", ")" ]
Coverts Latin character references within text_string to their corresponding unicode characters and returns converted string as type str .
def convert_ligatures ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : for i in range ( 0 , len ( LIGATURES ) ) : text_string = text_string . replace ( LIGATURES [ str ( i ) ] [ "ligature" ] , LIGATURES [ str ( i ) ] [ "term" ] ) return text_string else : raise InputError ( "none type or string not passed as an argument" )
607
https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L53-L73
[ "def", "detect_protocol", "(", "cls", ",", "message", ")", ":", "main", "=", "cls", ".", "_message_to_payload", "(", "message", ")", "def", "protocol_for_payload", "(", "payload", ")", ":", "if", "not", "isinstance", "(", "payload", ",", "dict", ")", ":", "return", "JSONRPCLoose", "# Will error", "# Obey an explicit \"jsonrpc\"", "version", "=", "payload", ".", "get", "(", "'jsonrpc'", ")", "if", "version", "==", "'2.0'", ":", "return", "JSONRPCv2", "if", "version", "==", "'1.0'", ":", "return", "JSONRPCv1", "# Now to decide between JSONRPCLoose and JSONRPCv1 if possible", "if", "'result'", "in", "payload", "and", "'error'", "in", "payload", ":", "return", "JSONRPCv1", "return", "JSONRPCLoose", "if", "isinstance", "(", "main", ",", "list", ")", ":", "parts", "=", "set", "(", "protocol_for_payload", "(", "payload", ")", "for", "payload", "in", "main", ")", "# If all same protocol, return it", "if", "len", "(", "parts", ")", "==", "1", ":", "return", "parts", ".", "pop", "(", ")", "# If strict protocol detected, return it, preferring JSONRPCv2.", "# This means a batch of JSONRPCv1 will fail", "for", "protocol", "in", "(", "JSONRPCv2", ",", "JSONRPCv1", ")", ":", "if", "protocol", "in", "parts", ":", "return", "protocol", "# Will error if no parts", "return", "JSONRPCLoose", "return", "protocol_for_payload", "(", "main", ")" ]
Splits string and converts words not found within a pre - built dictionary to their most likely actual word based on a relative probability dictionary . Returns edited string as type str .
def correct_spelling ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : word_list = text_string . split ( ) spellchecked_word_list = [ ] for word in word_list : spellchecked_word_list . append ( spellcheck . correct_word ( word ) ) return " " . join ( spellchecked_word_list ) else : raise InputError ( "none type or string not passed as an argument" )
608
https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L75-L98
[ "def", "stop", "(", "self", ")", ":", "self", ".", "_server", ".", "shutdown", "(", ")", "self", ".", "_server", ".", "server_close", "(", ")", "self", ".", "_thread", ".", "join", "(", ")", "self", ".", "running", "=", "False" ]
Splits text_string into a list of sentences based on NLTK s english . pickle tokenizer and returns said list as type list of str .
def create_sentence_list ( text_string ) : if text_string is None or text_string == "" : return [ ] elif isinstance ( text_string , str ) : return SENTENCE_TOKENIZER . tokenize ( text_string ) else : raise InputError ( "non-string passed as argument for create_sentence_list" )
609
https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L100-L118
[ "def", "alpha_blend", "(", "self", ",", "other", ")", ":", "# get final alpha channel", "fa", "=", "self", ".", "__a", "+", "other", ".", "__a", "-", "(", "self", ".", "__a", "*", "other", ".", "__a", ")", "# get percentage of source alpha compared to final alpha", "if", "fa", "==", "0", ":", "sa", "=", "0", "else", ":", "sa", "=", "min", "(", "1.0", ",", "self", ".", "__a", "/", "other", ".", "__a", ")", "# destination percentage is just the additive inverse", "da", "=", "1.0", "-", "sa", "sr", ",", "sg", ",", "sb", "=", "[", "v", "*", "sa", "for", "v", "in", "self", ".", "__rgb", "]", "dr", ",", "dg", ",", "db", "=", "[", "v", "*", "da", "for", "v", "in", "other", ".", "__rgb", "]", "return", "Color", "(", "(", "sr", "+", "dr", ",", "sg", "+", "dg", ",", "sb", "+", "db", ")", ",", "'rgb'", ",", "fa", ",", "self", ".", "__wref", ")" ]
Extracts keywords from text_string using NLTK s list of English stopwords ignoring words of a length smaller than 3 and returns the new string as type str .
def keyword_tokenize ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : return " " . join ( [ word for word in KEYWORD_TOKENIZER . tokenize ( text_string ) if word not in STOPWORDS and len ( word ) >= 3 ] ) else : raise InputError ( "string not passed as argument for text_string" )
610
https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L120-L138
[ "def", "switch_company", "(", "ctx", ",", "company", ")", ":", "current_company", "=", "ctx", ".", "env", ".", "user", ".", "company_id", "ctx", ".", "env", ".", "user", ".", "company_id", "=", "safe_record", "(", "ctx", ",", "company", ")", "yield", "ctx", "ctx", ".", "env", ".", "user", ".", "company_id", "=", "current_company" ]
Returns base from of text_string using NLTK s WordNetLemmatizer as type str .
def lemmatize ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : return LEMMATIZER . lemmatize ( text_string ) else : raise InputError ( "string not passed as primary argument" )
611
https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L140-L157
[ "def", "check_origin", "(", "self", ",", "origin", ")", ":", "mod_opts", "=", "self", ".", "application", ".", "mod_opts", "if", "mod_opts", ".", "get", "(", "'cors_origin'", ")", ":", "return", "bool", "(", "_check_cors_origin", "(", "origin", ",", "mod_opts", "[", "'cors_origin'", "]", ")", ")", "else", ":", "return", "super", "(", "AllEventsHandler", ",", "self", ")", ".", "check_origin", "(", "origin", ")" ]
Converts text_string into lowercase and returns the converted string as type str .
def lowercase ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : return text_string . lower ( ) else : raise InputError ( "string not passed as argument for text_string" )
612
https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L159-L176
[ "def", "_eval_progress", "(", "self", ",", "match", ")", ":", "_locals", "=", "{", "k", ":", "safe_float", "(", "v", ")", "for", "k", ",", "v", "in", "match", ".", "groupdict", "(", ")", ".", "items", "(", ")", "}", "if", "\"x\"", "not", "in", "_locals", ":", "_locals", "[", "\"x\"", "]", "=", "[", "safe_float", "(", "x", ")", "for", "x", "in", "match", ".", "groups", "(", ")", "]", "try", ":", "return", "int", "(", "eval", "(", "self", ".", "progress_expr", ",", "{", "}", ",", "_locals", ")", ")", "except", ":", "return", "None" ]
Given each function within function_list applies the order of functions put forward onto text_string returning the processed string as type str .
def preprocess_text ( text_string , function_list ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : if isinstance ( function_list , list ) : for func in function_list : try : text_string = func ( text_string ) except ( NameError , TypeError ) : raise FunctionError ( "invalid function passed as element of function_list" ) except : raise return text_string else : raise InputError ( "list of functions not passed as argument for function_list" ) else : raise InputError ( "string not passed as argument for text_string" )
613
https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L178-L208
[ "def", "watchdog_handler", "(", "self", ")", ":", "_LOGGING", ".", "debug", "(", "'%s Watchdog expired. Resetting connection.'", ",", "self", ".", "name", ")", "self", ".", "watchdog", ".", "stop", "(", ")", "self", ".", "reset_thrd", ".", "set", "(", ")" ]
Removes any escape character within text_string and returns the new string as type str .
def remove_esc_chars ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : return " " . join ( re . sub ( r'\\\w' , "" , text_string ) . split ( ) ) else : raise InputError ( "string not passed as argument" )
614
https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L210-L227
[ "def", "saveWeightsToFile", "(", "self", ",", "filename", ",", "mode", "=", "'pickle'", ",", "counter", "=", "None", ")", ":", "self", ".", "saveWeights", "(", "filename", ",", "mode", ",", "counter", ")" ]
Removes any digit value discovered within text_string and returns the new string as type str .
def remove_numbers ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : return " " . join ( re . sub ( r'\b[\d.\/,]+' , "" , text_string ) . split ( ) ) else : raise InputError ( "string not passed as argument" )
615
https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L229-L246
[ "def", "_init_dataframes", "(", "self", ")", ":", "df", "=", "pd", ".", "read_sql_query", "(", "\"SELECT elapsed, epoch, scriptrun_time, custom_timers FROM result ORDER BY epoch ASC\"", ",", "db", ".", "get_conn", "(", ")", ")", "self", ".", "_get_all_timers", "(", "df", ")", "self", ".", "main_results", "=", "self", ".", "_get_processed_dataframe", "(", "df", ")", "# create all custom timers dataframes", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "self", ".", "_timers_values", ")", ":", "df", "=", "pd", ".", "DataFrame", "(", "value", ",", "columns", "=", "[", "'epoch'", ",", "'scriptrun_time'", "]", ")", "df", ".", "index", "=", "pd", ".", "to_datetime", "(", "df", "[", "'epoch'", "]", ",", "unit", "=", "'s'", ")", "timer_results", "=", "self", ".", "_get_processed_dataframe", "(", "df", ")", "self", ".", "timers_results", "[", "key", "]", "=", "timer_results", "# clear memory", "del", "self", ".", "_timers_values" ]
Removes any integer represented as a word within text_string and returns the new string as type str .
def remove_number_words ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : for word in NUMBER_WORDS : text_string = re . sub ( r'[\S]*\b' + word + r'[\S]*' , "" , text_string ) return " " . join ( text_string . split ( ) ) else : raise InputError ( "string not passed as argument" )
616
https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L248-L268
[ "def", "_init_dataframes", "(", "self", ")", ":", "df", "=", "pd", ".", "read_sql_query", "(", "\"SELECT elapsed, epoch, scriptrun_time, custom_timers FROM result ORDER BY epoch ASC\"", ",", "db", ".", "get_conn", "(", ")", ")", "self", ".", "_get_all_timers", "(", "df", ")", "self", ".", "main_results", "=", "self", ".", "_get_processed_dataframe", "(", "df", ")", "# create all custom timers dataframes", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "self", ".", "_timers_values", ")", ":", "df", "=", "pd", ".", "DataFrame", "(", "value", ",", "columns", "=", "[", "'epoch'", ",", "'scriptrun_time'", "]", ")", "df", ".", "index", "=", "pd", ".", "to_datetime", "(", "df", "[", "'epoch'", "]", ",", "unit", "=", "'s'", ")", "timer_results", "=", "self", ".", "_get_processed_dataframe", "(", "df", ")", "self", ".", "timers_results", "[", "key", "]", "=", "timer_results", "# clear memory", "del", "self", ".", "_timers_values" ]
Removes all URLs within text_string and returns the new string as type str .
def remove_urls ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : return " " . join ( re . sub ( r'http\S+' , "" , text_string ) . split ( ) ) else : raise InputError ( "string not passed as argument" )
617
https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L313-L330
[ "def", "create", "(", "cls", ",", "destination", ")", ":", "mdb_gz_b64", "=", "\"\"\"\\\n H4sICIenn1gC/25ldzIwMDMubWRiAO2de2wcRx3Hf7O7Pt/d3u6eLyEtVaOaqg+EkjQvuVVDwa9a\n jWXHdZxQQlCJ7fOrfp3OTpqkhVxTItFWIhVQVFBRVNIKRaColVpAUKGKRwwFqUAhKiBIpUaoVWP+\n qKgIIHL8Znb39u72znWJiWP3+9l473fzm/nNY3cdf2fmbBJEPdO9E+nebLq+fWC6vrWZOImen9D7\n 9sR+vPPNE0PZxo/TE5879mj+yNc3/OzAD2bXv3DmV9/o/8PZnxxr+/fDL2w79ulzN7e+/sS/zvzz\n w3+N1z28p3PTfQ3nfn/m2YmeFS2no89uWnvqwO5HUvd/5Phr938tes3j/zm5+qT41J8/P/iZx87/\n +qHrjgyduubG1t/+7eWB2XztTNuT+1clZt9c2/e7HRGizevWEwAAAAAAAACAhUEIwvE+PoRIO8K7\n FzT6obPPwTMBAAAAAAAAAABcfpzPXwya+Ispo1xlEO2KEEX9eaGyWnrqyKQ60tQ0AcNZRcR1RYuy\n +XZCxoqRzmaMI6cKGRJuJVrIEZUOQ9UrHStUYpyzKkdNmSPFDkM6aguhXMdVHCMuHXE2Suu4IFQJ\n l6CErNWUDouDlbdKOZIcrKLD4S5WdNhqIEodqlVaofKgVTHpiBQ6uLG0uaKsuYbf3IS8BmV1qFAm\n j1Z5Hbp06GWDKC+DTS00SRN8DFA/TXNfW6mXX3upj7+mOHWllzLAObN8du0gdSdlKO3ZcWqjMbaH\n uOQqtidViRF+P0HbOH2c3xm0lfMb1EH7uHZ5vp32c+ks+5PqfSeXS9NejjTAvZQpd7J3kuuJFqLE\n qYvuVa3Ocqk7OVXWNMFxZPRVtJ1zSXuCBrlkh+rjEF1Zlt5Dw6qN0xx5Bx3gGgbowVo56EIjkc9T\n xX9Jdd+5PKDOD6q3VQvwv7qiZ8st419cdYHlo6iuriF8X4HA590AsodXhvrsj0yMDPnAuI+ZvOrq\n 1o7K51Hdy7a8cdXNm5AedbfG5W3j3lOybxFZKb6zAgAAAAAAsNzQxAlbvnYJV3VcUU3/S2luBIKF\n ha+IlWp+wxW4IiRXRSXxKeNU1eOxUuUbSOIINbEM7WT506ZE3LASgCOeYJWCMcnCsI/u8eSsFEYR\n lnlbWa6+u0jTYqSkvuQL9G5CLFwTRBMAAAAAAAAAgMtW/79lyVdLKxW7oqDF3bXOniib0UD/m/xq\n loWqvFwt3DX/mrLNALIu3V35NkpK1JDmL+2XOmr9pf1gKiFY4I672wc0mveaf6zaenyKmljPT6t5\n hT7a6y13y0XqjFpwneJjRC0oRwvL3eUL2fHCcuyGIntjhTkDuZCd5Vc5j+HNUMyx+myYcpHW5YG5\n ZijUdbg2VFu4ZzzcHFM3seQLAAAAAAAAAMtc//9S6cm1emX97ytK1v81rHelhtfVfAFnseZXRdV9\n Ad7+dhGS5kbl3eqe/K8pU/nnYwX5X2VeoLbCZwHi7txD6aTELabnoLJ5AfPFC8JmFd3Pun+MlfM4\n q/846/4s62i5+8Dmc7EvSVN0UG2tL00p1uPXqZTt/G5QqX+5lbufz+mSctVzFce6upBrTG3Fd+cn\n pmiYrUyw8+GNfL4hn8/k83qZrVlyGzgPeqbhjcOqx7KMEZRpU/MPQ+rsldEtuYm8vExkznoMS+6b\n KC5TZRt8wVf4xEkFX4V5D/X2vYz1/EcR8yMAAAAAAACAJY0Qf/d3vLPUlb//b4Nzzv6W3Wevtl+1\n vmxts2LWTxOHErcm3jGfMUfNG0yMGQAAAAAAeJ/8rLwAMXIYRgCARFv8IIaYtKpGqCdqlN/2kupD\n /ob67qXhsi0lDh2Vp6728faO9tHuUflfWJ1wE0e6724f35XuG71r16Dr0FwH573by6rKi0N7RveN\n tnd6aTVBWrpjd3fnuJtsBMnDk90ju7zckSA5XGGtdGrK2dWhUnRcMgAAAAAAAAD4v2CIV6vqf82I\n Jusbcwsy7wkWSf/n1JQNq/Oc+uQGq/ecmsphYZ6Tn6XwRLjwxb7mTxDoakLgURUFshwAAAAAAAAA\n ljpCrHZ8W/f2/2NUAAAAAAAAAAAAhXH5RLm4IIbotqot7hbW/0MGWCp46/+pgpHwjZS3IyAlfMPy\n tgakNN+wfcPxNgukdN9I+kadt30gZfhGjW+s8I2V3s6CVNTbWZCK+Eatb3zAN1Z5mw5SMd+I+wZ+\n +QQAAAAAAAAA/K8IcdT27Zqi3/+HkQEAAAAAAAAAsGgkMQQLjSHqbQPDAAAAAAAAAAAALGuw/g8A\n AAAAAAAA4DJUqwsQI7cQDWlcLiMq1/9rcGMBAAAAAAAAAADLGuh/AAAAAAAAAAAA+h8AAAAAAAAA\n AABLHyHusDTPjtLzTtoxnRftUftqe8YatDA+AAAAAAAAAPDeqJN/KVt+et0R9PYnzz7W8PrZRv+V\n HblO6qEDNEXbaYDGqJemaYQmaYJThtnK8Gvzb1opfDRTPZmUlxUY86qgm/ZyFVkOOqCC3kLhoyEI\n qs8raBO10O0q3EYKH+uDcNq8wnVRH93D7evnYZhHG5kkB3a0OYO2ctCWV9ZR+FhT0l2HCzl6xVBz\n XZyPUvi4taTjcwRuVUF7uYW9HMy9MJspfGwMAoo5A+5Qwca8UHN2WogeU/fu0ito1vmjM+M85zzp\n fNG5zxl2djrNzk3O9+0m+yWrx2q0fpH4buJ4Yk3ig4lvmkfxx9gBAAAAAAC4OAylQfJ5h5pfSVCc\n f853gqSmWPSZux6xjUznltH2HT/flNu7++0NZ7/07cg/vnPbVu30y6d/NLvlabPh+j81v/Xc5g9l\n 1h2f+epn9+VPdN90OHHvU50fm94y/ZXvWQ/tP/yJG/NH3llz8A79tlNPG72DHSePHdzz2s3XPzVj\n vzSUvSHjVys1Rv5CSUv8pEvcEqkbV/KX35JaQ+npikmRS9o4rtYIt8RYnJa4Ou6SV6stTm+l7rcX\n q9qSy+23pCVIcgV/SZKuJj5CSRc4Y/PpkiesLJcI53J37NvFuQzv4peGL0/SypP+C+45xVAAMAEA\n \"\"\"", "pristine", "=", "StringIO", "(", ")", "pristine", ".", "write", "(", "base64", ".", "b64decode", "(", "mdb_gz_b64", ")", ")", "pristine", ".", "seek", "(", "0", ")", "pristine", "=", "gzip", ".", "GzipFile", "(", "fileobj", "=", "pristine", ",", "mode", "=", "'rb'", ")", "with", "open", "(", "destination", ",", "'wb'", ")", "as", "handle", ":", "shutil", ".", "copyfileobj", "(", "pristine", ",", "handle", ")", "return", "cls", "(", "destination", ")" ]
Removes all whitespace found within text_string and returns new string as type str .
def remove_whitespace ( text_string ) : if text_string is None or text_string == "" : return "" elif isinstance ( text_string , str ) : return " " . join ( text_string . split ( ) ) else : raise InputError ( "none type or string not passed as an argument" )
618
https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L332-L349
[ "def", "pull", "(", "self", ",", "arm_id", ",", "success", ",", "failure", ")", ":", "self", ".", "__beta_dist_dict", "[", "arm_id", "]", ".", "observe", "(", "success", ",", "failure", ")" ]
This is the primary method to override to ensure logging with extra options gets correctly specified .
def log ( self , level , message , * args , * * kwargs ) : extra = self . extras . copy ( ) extra . update ( kwargs . pop ( 'extra' , { } ) ) kwargs [ 'extra' ] = extra self . logger . log ( level , message , * args , * * kwargs )
619
https://github.com/romaryd/python-logging-mixin/blob/8ac77df5731b607e6ff9ef762e71398cb5a892ea/loggingmixin/__init__.py#L90-L99
[ "def", "read_tables", "(", "fstream", ")", ":", "table", "=", "read_table", "(", "fstream", ")", "while", "table", "is", "not", "None", ":", "yield", "table", "table", "=", "read_table", "(", "fstream", ")" ]
Specialized warnings system . If a warning subclass is passed into the keyword arguments and raise_warnings is True - the warnning will be passed to the warnings module .
def warning ( self , message , * args , * * kwargs ) : warncls = kwargs . pop ( 'warning' , None ) if warncls and self . raise_warnings : warnings . warn ( message , warncls ) return self . log ( logging . WARNING , message , * args , * * kwargs )
620
https://github.com/romaryd/python-logging-mixin/blob/8ac77df5731b607e6ff9ef762e71398cb5a892ea/loggingmixin/__init__.py#L107-L117
[ "def", "rebalance_replication_groups", "(", "self", ")", ":", "# Balance replicas over replication-groups for each partition", "if", "any", "(", "b", ".", "inactive", "for", "b", "in", "six", ".", "itervalues", "(", "self", ".", "cluster_topology", ".", "brokers", ")", ")", ":", "self", ".", "log", ".", "error", "(", "\"Impossible to rebalance replication groups because of inactive \"", "\"brokers.\"", ")", "raise", "RebalanceError", "(", "\"Impossible to rebalance replication groups because of inactive \"", "\"brokers\"", ")", "# Balance replica-count over replication-groups", "self", ".", "rebalance_replicas", "(", ")", "# Balance partition-count over replication-groups", "self", ".", "_rebalance_groups_partition_cnt", "(", ")" ]
Provide current user as extra context to the logger
def log ( self , level , message , * args , * * kwargs ) : extra = kwargs . pop ( 'extra' , { } ) extra . update ( { 'user' : self . user } ) kwargs [ 'extra' ] = extra super ( ServiceLogger , self ) . log ( level , message , * args , * * kwargs )
621
https://github.com/romaryd/python-logging-mixin/blob/8ac77df5731b607e6ff9ef762e71398cb5a892ea/loggingmixin/__init__.py#L147-L157
[ "def", "comparebed", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "comparebed", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "abed", ",", "bbed", "=", "args", "abed", "=", "Bed", "(", "abed", ")", "bbed", "=", "Bed", "(", "bbed", ")", "query_links", "(", "abed", ",", "bbed", ")", "query_links", "(", "bbed", ",", "abed", ")" ]
Instantiates and returns a ServiceLogger instance
def logger ( self ) : if not hasattr ( self , '_logger' ) or not self . _logger : self . _logger = ServiceLogger ( ) return self . _logger
622
https://github.com/romaryd/python-logging-mixin/blob/8ac77df5731b607e6ff9ef762e71398cb5a892ea/loggingmixin/__init__.py#L166-L172
[ "def", "comp_overlap_table", "(", "data", ")", ":", "headers", "=", "OrderedDict", "(", ")", "headers", "[", "'comp_rate'", "]", "=", "{", "'title'", ":", "'Compare rate'", ",", "'description'", ":", "'Ratio of known variants found in the reference set.'", ",", "'namespace'", ":", "'GATK'", ",", "'min'", ":", "0", ",", "'max'", ":", "100", ",", "'suffix'", ":", "'%'", ",", "'format'", ":", "'{:,.2f}'", ",", "'scale'", ":", "'Blues'", ",", "}", "headers", "[", "'concordant_rate'", "]", "=", "{", "'title'", ":", "'Concordant rate'", ",", "'description'", ":", "'Ratio of variants matching alleles in the reference set.'", ",", "'namespace'", ":", "'GATK'", ",", "'min'", ":", "0", ",", "'max'", ":", "100", ",", "'suffix'", ":", "'%'", ",", "'format'", ":", "'{:,.2f}'", ",", "'scale'", ":", "'Blues'", ",", "}", "headers", "[", "'eval_variants'", "]", "=", "{", "'title'", ":", "'M Evaluated variants'", ",", "'description'", ":", "'Number of called variants (millions)'", ",", "'namespace'", ":", "'GATK'", ",", "'min'", ":", "0", ",", "'modify'", ":", "lambda", "x", ":", "float", "(", "x", ")", "/", "1000000.0", "}", "headers", "[", "'known_sites'", "]", "=", "{", "'title'", ":", "'M Known sites'", ",", "'description'", ":", "'Number of known variants (millions)'", ",", "'namespace'", ":", "'GATK'", ",", "'min'", ":", "0", ",", "'modify'", ":", "lambda", "x", ":", "float", "(", "x", ")", "/", "1000000.0", "}", "headers", "[", "'novel_sites'", "]", "=", "{", "'title'", ":", "'M Novel sites'", ",", "'description'", ":", "'Number of novel variants (millions)'", ",", "'namespace'", ":", "'GATK'", ",", "'min'", ":", "0", ",", "'modify'", ":", "lambda", "x", ":", "float", "(", "x", ")", "/", "1000000.0", "}", "table_html", "=", "table", ".", "plot", "(", "data", ",", "headers", ",", "{", "'id'", ":", "'gatk_compare_overlap'", ",", "'table_title'", ":", "'GATK - Compare Overlap'", "}", ")", "return", "table_html" ]
Uses a peyotl wrapper around an Open Tree web service to get a list of studies including values value for a given property to be searched on porperty .
def ot_find_studies ( arg_dict , exact = True , verbose = False , oti_wrapper = None ) : if oti_wrapper is None : from peyotl . sugar import oti oti_wrapper = oti return oti_wrapper . find_studies ( arg_dict , exact = exact , verbose = verbose , wrap_response = True )
623
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/tutorials/ot-oti-find-studies.py#L12-L25
[ "def", "decode_offset_commit_response", "(", "cls", ",", "response", ")", ":", "return", "[", "kafka", ".", "structs", ".", "OffsetCommitResponsePayload", "(", "topic", ",", "partition", ",", "error", ")", "for", "topic", ",", "partitions", "in", "response", ".", "topics", "for", "partition", ",", "error", "in", "partitions", "]" ]
This function sets up a command - line option parser and then calls print_matching_trees to do all of the real work .
def main ( argv ) : import argparse description = 'Uses Open Tree of Life web services to try to find a tree with the value property pair specified. ' 'setting --fuzzy will allow fuzzy matching' parser = argparse . ArgumentParser ( prog = 'ot-get-tree' , description = description ) parser . add_argument ( 'arg_dict' , type = json . loads , help = 'name(s) for which we will try to find OTT IDs' ) parser . add_argument ( '--property' , default = None , type = str , required = False ) parser . add_argument ( '--fuzzy' , action = 'store_true' , default = False , required = False ) # exact matching and verbose not working atm... parser . add_argument ( '--verbose' , action = 'store_true' , default = False , required = False ) try : args = parser . parse_args ( argv ) arg_dict = args . arg_dict exact = not args . fuzzy verbose = args . verbose except : arg_dict = { 'ot:studyId' : 'ot_308' } sys . stderr . write ( 'Running a demonstration query with {}\n' . format ( arg_dict ) ) exact = True verbose = False print_matching_studies ( arg_dict , exact = exact , verbose = verbose )
624
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/tutorials/ot-oti-find-studies.py#L36-L59
[ "def", "delete_blobs", "(", "self", ",", "blobs", ",", "on_error", "=", "None", ",", "client", "=", "None", ")", ":", "for", "blob", "in", "blobs", ":", "try", ":", "blob_name", "=", "blob", "if", "not", "isinstance", "(", "blob_name", ",", "six", ".", "string_types", ")", ":", "blob_name", "=", "blob", ".", "name", "self", ".", "delete_blob", "(", "blob_name", ",", "client", "=", "client", ")", "except", "NotFound", ":", "if", "on_error", "is", "not", "None", ":", "on_error", "(", "blob", ")", "else", ":", "raise" ]
This function sets up a command - line option parser and then calls to do all of the real work .
def main ( argv ) : import argparse import codecs # have to be ready to deal with utf-8 names out = codecs . getwriter ( 'utf-8' ) ( sys . stdout ) description = '''Takes a series of at least 2 OTT ids and reports the OTT of their least inclusive taxonomic ancestor and that taxon's ancestors.''' parser = argparse . ArgumentParser ( prog = 'ot-taxo-mrca-to-root' , description = description ) parser . add_argument ( 'ids' , nargs = '+' , type = int , help = 'OTT IDs' ) args = parser . parse_args ( argv ) id_list = args . ids last_id = id_list . pop ( ) anc_list = get_taxonomic_ancestor_ids ( last_id ) common_anc = set ( anc_list ) for curr_id in id_list : curr_anc_set = set ( get_taxonomic_ancestor_ids ( curr_id ) ) common_anc &= curr_anc_set if not common_anc : break for anc_id in anc_list : if anc_id in common_anc : out . write ( '{}\n' . format ( anc_id ) )
625
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/tutorials/ot-taxo-mrca-to-root.py#L23-L46
[ "def", "saturation", "(", "self", ",", "value", ")", ":", "value", "=", "clean_float", "(", "value", ")", "if", "value", "is", "None", ":", "return", "try", ":", "unit_moisture_weight", "=", "self", ".", "unit_moist_weight", "-", "self", ".", "unit_dry_weight", "unit_moisture_volume", "=", "unit_moisture_weight", "/", "self", ".", "_pw", "saturation", "=", "unit_moisture_volume", "/", "self", ".", "_calc_unit_void_volume", "(", ")", "if", "saturation", "is", "not", "None", "and", "not", "ct", ".", "isclose", "(", "saturation", ",", "value", ",", "rel_tol", "=", "self", ".", "_tolerance", ")", ":", "raise", "ModelError", "(", "\"New saturation (%.3f) is inconsistent \"", "\"with calculated value (%.3f)\"", "%", "(", "value", ",", "saturation", ")", ")", "except", "TypeError", ":", "pass", "old_value", "=", "self", ".", "saturation", "self", ".", "_saturation", "=", "value", "try", ":", "self", ".", "recompute_all_weights_and_void", "(", ")", "self", ".", "_add_to_stack", "(", "\"saturation\"", ",", "value", ")", "except", "ModelError", "as", "e", ":", "self", ".", "_saturation", "=", "old_value", "raise", "ModelError", "(", "e", ")" ]
Determine if a value is a sequence type .
def is_sequence ( value ) : return ( hasattr ( value , "__iter__" ) and not isinstance ( value , ( six . string_types , six . binary_type ) ) )
626
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/datautils.py#L12-L23
[ "def", "user_agent", "(", "self", ",", "text", ",", "*", "*", "kwargs", ")", ":", "indicator_obj", "=", "UserAgent", "(", "text", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_indicator", "(", "indicator_obj", ")" ]
Import the class referred to by the fully qualified class path .
def import_class ( classpath ) : modname , classname = classpath . rsplit ( "." , 1 ) module = importlib . import_module ( modname ) klass = getattr ( module , classname ) return klass
627
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/datautils.py#L26-L42
[ "def", "make_random_models_table", "(", "n_sources", ",", "param_ranges", ",", "random_state", "=", "None", ")", ":", "prng", "=", "check_random_state", "(", "random_state", ")", "sources", "=", "Table", "(", ")", "for", "param_name", ",", "(", "lower", ",", "upper", ")", "in", "param_ranges", ".", "items", "(", ")", ":", "# Generate a column for every item in param_ranges, even if it", "# is not in the model (e.g. flux). However, such columns will", "# be ignored when rendering the image.", "sources", "[", "param_name", "]", "=", "prng", ".", "uniform", "(", "lower", ",", "upper", ",", "n_sources", ")", "return", "sources" ]
Attempt to return a Python class for the input class reference .
def resolve_class ( classref ) : if classref is None : return None elif isinstance ( classref , six . class_types ) : return classref elif isinstance ( classref , six . string_types ) : return import_class ( classref ) else : raise ValueError ( "Unable to resolve class for '%s'" % classref )
628
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/datautils.py#L45-L65
[ "def", "getInitialSample", "(", "self", ",", "wmg", ")", ":", "cands", "=", "range", "(", "len", "(", "wmg", ")", ")", "allPairs", "=", "itertools", ".", "combinations", "(", "cands", ",", "2", ")", "V", "=", "self", ".", "createBinaryRelation", "(", "len", "(", "cands", ")", ")", "for", "pair", "in", "allPairs", ":", "if", "wmg", "[", "pair", "[", "0", "]", "+", "1", "]", "[", "pair", "[", "1", "]", "+", "1", "]", ">", "0", ":", "V", "[", "pair", "[", "0", "]", "]", "[", "pair", "[", "1", "]", "]", "=", "1", "V", "[", "pair", "[", "1", "]", "]", "[", "pair", "[", "0", "]", "]", "=", "0", "else", ":", "V", "[", "pair", "[", "0", "]", "]", "[", "pair", "[", "1", "]", "]", "=", "0", "V", "[", "pair", "[", "1", "]", "]", "[", "pair", "[", "0", "]", "]", "=", "1", "return", "V" ]
Function decorator which checks that the decorated function is called with a set of required kwargs .
def needkwargs ( * argnames ) : required = set ( argnames ) def decorator ( func ) : def inner ( * args , * * kwargs ) : missing = required - set ( kwargs ) if missing : err = "%s kwargs are missing." % list ( missing ) raise ValueError ( err ) return func ( * args , * * kwargs ) return inner return decorator
629
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/datautils.py#L83-L104
[ "def", "_dd", "(", "self", ",", "file_name", ",", "offset", ",", "size", ",", "extension", ",", "output_file_name", "=", "None", ")", ":", "total_size", "=", "0", "# Default extracted file name is <displayed hex offset>.<extension>", "default_bname", "=", "\"%X\"", "%", "(", "offset", "+", "self", ".", "config", ".", "base", ")", "# Make sure the output file name is a string", "if", "output_file_name", "is", "not", "None", ":", "output_file_name", "=", "str", "(", "output_file_name", ")", "if", "self", ".", "max_size", "and", "size", ">", "self", ".", "max_size", ":", "size", "=", "self", ".", "max_size", "if", "not", "output_file_name", "or", "output_file_name", "is", "None", ":", "bname", "=", "default_bname", "else", ":", "# Strip the output file name of invalid/dangerous characters (like file paths)", "bname", "=", "os", ".", "path", ".", "basename", "(", "output_file_name", ")", "fname", "=", "unique_file_name", "(", "bname", ",", "extension", ")", "try", ":", "# If byte swapping is enabled, we need to start reading at a swap-size", "# aligned offset, then index in to the read data appropriately.", "if", "self", ".", "config", ".", "swap_size", ":", "adjust", "=", "offset", "%", "self", ".", "config", ".", "swap_size", "else", ":", "adjust", "=", "0", "offset", "-=", "adjust", "# Open the target file and seek to the offset", "fdin", "=", "self", ".", "config", ".", "open_file", "(", "file_name", ")", "fdin", ".", "seek", "(", "offset", ")", "# Open the output file", "try", ":", "fdout", "=", "BlockFile", "(", "fname", ",", "'w'", ")", "except", "KeyboardInterrupt", "as", "e", ":", "raise", "e", "except", "Exception", "as", "e", ":", "# Fall back to the default name if the requested name fails", "fname", "=", "unique_file_name", "(", "default_bname", ",", "extension", ")", "fdout", "=", "BlockFile", "(", "fname", ",", "'w'", ")", "while", "total_size", "<", "size", ":", "(", "data", ",", "dlen", ")", "=", "fdin", ".", "read_block", "(", ")", "if", "dlen", "<", "1", ":", "break", "else", ":", "total_size", "+=", "(", "dlen", "-", "adjust", ")", "if", "total_size", ">", "size", ":", "dlen", "-=", "(", "total_size", "-", "size", ")", "fdout", ".", "write", "(", "str2bytes", "(", "data", "[", "adjust", ":", "dlen", "]", ")", ")", "adjust", "=", "0", "# Cleanup", "fdout", ".", "close", "(", ")", "fdin", ".", "close", "(", ")", "except", "KeyboardInterrupt", "as", "e", ":", "raise", "e", "except", "Exception", "as", "e", ":", "raise", "Exception", "(", "\"Extractor.dd failed to extract data from '%s' to '%s': %s\"", "%", "(", "file_name", ",", "fname", ",", "str", "(", "e", ")", ")", ")", "binwalk", ".", "core", ".", "common", ".", "debug", "(", "\"Carved data block 0x%X - 0x%X from '%s' to '%s'\"", "%", "(", "offset", ",", "offset", "+", "size", ",", "file_name", ",", "fname", ")", ")", "return", "fname" ]
Connect to the APCUPSd NIS and request its status .
def get ( host = "localhost" , port = 3551 , timeout = 30 ) : sock = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) sock . settimeout ( timeout ) sock . connect ( ( host , port ) ) sock . send ( CMD_STATUS ) buffr = "" while not buffr . endswith ( EOF ) : buffr += sock . recv ( BUFFER_SIZE ) . decode ( ) sock . close ( ) return buffr
630
https://github.com/flyte/apcaccess/blob/0c8a5d5e4ba1c07110e411b4ffea4ddccef4829a/apcaccess/status.py#L31-L43
[ "def", "bucket", "(", "self", ",", "experiment", ",", "user_id", ",", "bucketing_id", ")", ":", "if", "not", "experiment", ":", "return", "None", "# Determine if experiment is in a mutually exclusive group", "if", "experiment", ".", "groupPolicy", "in", "GROUP_POLICIES", ":", "group", "=", "self", ".", "config", ".", "get_group", "(", "experiment", ".", "groupId", ")", "if", "not", "group", ":", "return", "None", "user_experiment_id", "=", "self", ".", "find_bucket", "(", "bucketing_id", ",", "experiment", ".", "groupId", ",", "group", ".", "trafficAllocation", ")", "if", "not", "user_experiment_id", ":", "self", ".", "config", ".", "logger", ".", "info", "(", "'User \"%s\" is in no experiment.'", "%", "user_id", ")", "return", "None", "if", "user_experiment_id", "!=", "experiment", ".", "id", ":", "self", ".", "config", ".", "logger", ".", "info", "(", "'User \"%s\" is not in experiment \"%s\" of group %s.'", "%", "(", "user_id", ",", "experiment", ".", "key", ",", "experiment", ".", "groupId", ")", ")", "return", "None", "self", ".", "config", ".", "logger", ".", "info", "(", "'User \"%s\" is in experiment %s of group %s.'", "%", "(", "user_id", ",", "experiment", ".", "key", ",", "experiment", ".", "groupId", ")", ")", "# Bucket user if not in white-list and in group (if any)", "variation_id", "=", "self", ".", "find_bucket", "(", "bucketing_id", ",", "experiment", ".", "id", ",", "experiment", ".", "trafficAllocation", ")", "if", "variation_id", ":", "variation", "=", "self", ".", "config", ".", "get_variation_from_id", "(", "experiment", ".", "key", ",", "variation_id", ")", "self", ".", "config", ".", "logger", ".", "info", "(", "'User \"%s\" is in variation \"%s\" of experiment %s.'", "%", "(", "user_id", ",", "variation", ".", "key", ",", "experiment", ".", "key", ")", ")", "return", "variation", "self", ".", "config", ".", "logger", ".", "info", "(", "'User \"%s\" is in no variation.'", "%", "user_id", ")", "return", "None" ]
Removes all units from the ends of the lines .
def strip_units_from_lines ( lines ) : for line in lines : for unit in ALL_UNITS : if line . endswith ( " %s" % unit ) : line = line [ : - 1 - len ( unit ) ] yield line
631
https://github.com/flyte/apcaccess/blob/0c8a5d5e4ba1c07110e411b4ffea4ddccef4829a/apcaccess/status.py#L69-L77
[ "def", "_validate_slices_form_uniform_grid", "(", "slice_datasets", ")", ":", "invariant_properties", "=", "[", "'Modality'", ",", "'SOPClassUID'", ",", "'SeriesInstanceUID'", ",", "'Rows'", ",", "'Columns'", ",", "'PixelSpacing'", ",", "'PixelRepresentation'", ",", "'BitsAllocated'", ",", "'BitsStored'", ",", "'HighBit'", ",", "]", "for", "property_name", "in", "invariant_properties", ":", "_slice_attribute_equal", "(", "slice_datasets", ",", "property_name", ")", "_validate_image_orientation", "(", "slice_datasets", "[", "0", "]", ".", "ImageOrientationPatient", ")", "_slice_ndarray_attribute_almost_equal", "(", "slice_datasets", ",", "'ImageOrientationPatient'", ",", "1e-5", ")", "slice_positions", "=", "_slice_positions", "(", "slice_datasets", ")", "_check_for_missing_slices", "(", "slice_positions", ")" ]
Print the status to stdout in the same format as the original apcaccess .
def print_status ( raw_status , strip_units = False ) : lines = split ( raw_status ) if strip_units : lines = strip_units_from_lines ( lines ) for line in lines : print ( line )
632
https://github.com/flyte/apcaccess/blob/0c8a5d5e4ba1c07110e411b4ffea4ddccef4829a/apcaccess/status.py#L80-L88
[ "def", "get_mandatory_sections", "(", "self", ")", ":", "return", "[", "s", "for", "s", "in", "self", ".", "opt", "if", "s", "not", "in", "self", ".", "optional_sections", "and", "s", "not", "in", "self", ".", "excluded_sections", "]" ]
If the taxa are being cached this call will create a the lineage spike for taxon child_taxon
def get_cached_parent_for_taxon ( self , child_taxon ) : if self . _ott_id2taxon is None : resp = child_taxon . _taxonomic_lineage [ 0 ] tl = child_taxon . _taxonomic_lineage [ 1 : ] assert 'taxonomic_lineage' not in resp resp [ 'taxonomic_lineage' ] = tl return TaxonWrapper ( taxonomy = child_taxon . taxonomy , taxomachine_wrapper = self . _wr , prop_dict = resp ) # TODO recursive (indirectly) else : anc = [ ] prev = None for resp in reversed ( child_taxon . _taxonomic_lineage ) : ott_id = resp [ 'ot:ottId' ] curr = self . _ott_id2taxon . get ( ott_id ) if curr is None : assert 'taxonomic_lineage' not in resp assert 'parent' not in resp resp [ 'parent' ] = prev resp [ 'taxonomic_lineage' ] = anc curr = TaxonWrapper ( taxonomy = child_taxon . taxonomy , taxomachine_wrapper = self . _wr , prop_dict = resp ) elif curr . _parent is None and prev is not None : curr . _parent = prev prev = curr anc . insert ( 0 , curr ) return prev
633
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/api/taxomachine.py#L373-L405
[ "def", "_get_request_param", "(", "self", ",", "request", ")", ":", "params", "=", "{", "}", "try", ":", "params", "=", "request", ".", "POST", ".", "copy", "(", ")", "if", "not", "params", ":", "params", "=", "json", ".", "loads", "(", "request", ".", "body", ")", "except", "Exception", ":", "pass", "for", "key", "in", "params", ":", "# replace a value to a masked characters", "if", "key", "in", "self", ".", "mask_fields", ":", "params", "[", "key", "]", "=", "'*'", "*", "8", "# when a file uploaded (E.g create image)", "files", "=", "request", ".", "FILES", ".", "values", "(", ")", "if", "list", "(", "files", ")", ":", "filenames", "=", "', '", ".", "join", "(", "[", "up_file", ".", "name", "for", "up_file", "in", "files", "]", ")", "params", "[", "'file_name'", "]", "=", "filenames", "try", ":", "return", "json", ".", "dumps", "(", "params", ",", "ensure_ascii", "=", "False", ")", "except", "Exception", ":", "return", "'Unserializable Object'" ]
Updates the field of info about an OTU that might not be filled in by a match_names or taxon call .
def update_empty_fields ( self , * * kwargs ) : if self . _is_deprecated is None : self . _is_deprecated = kwargs . get ( 'is_deprecated' ) if self . _is_dubious is None : self . _is_dubious = kwargs . get ( 'is_dubious' ) if self . _is_synonym is None : self . _is_synonym = kwargs . get ( 'is_synonym' ) if self . _synonyms is _EMPTY_TUPLE : self . _synonyms = kwargs . get ( 'synonyms' ) if self . _synonyms is None : self . _synonyms = _EMPTY_TUPLE if self . rank is None : self . _rank = kwargs . get ( 'rank' ) if self . _nomenclature_code : self . _nomenclature_code = kwargs . get ( 'nomenclature_code' ) if not self . _unique_name : self . _unique_name = kwargs . get ( 'unique_name' ) if self . _taxonomic_lineage is None : self . _taxonomic_lineage = kwargs . get ( 'taxonomic_lineage' ) if self . _parent is None : self . _parent = kwargs . get ( 'parent' ) if self . _parent is None and self . _taxomachine_wrapper is not None and self . _taxonomic_lineage : self . _fill_parent_attr ( )
634
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/api/taxon.py#L47-L70
[ "def", "disconnect", "(", "self", ",", "key", "=", "None", ",", "func", "=", "None", ")", ":", "if", "key", "is", "None", ":", "for", "key", ",", "connections", "in", "self", ".", "_connections", ".", "items", "(", ")", ":", "for", "conn", "in", "connections", "[", ":", "]", ":", "if", "func", "is", "None", "or", "conn", "is", "func", ":", "connections", ".", "remove", "(", "conn", ")", "else", ":", "connections", "=", "self", ".", "_connections", "[", "key", "]", "for", "conn", "in", "connections", "[", ":", "]", ":", "if", "func", "is", "None", "or", "conn", "is", "func", ":", "connections", ".", "remove", "(", "conn", ")" ]
Verifyies that ebt is the inverse of the edgeBySourceId data member of tree
def _check_rev_dict ( tree , ebt ) : ebs = defaultdict ( dict ) for edge in ebt . values ( ) : source_id = edge [ '@source' ] edge_id = edge [ '@id' ] ebs [ source_id ] [ edge_id ] = edge assert ebs == tree [ 'edgeBySourceId' ]
635
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/scripts/nexson/prune_to_clean_mapped.py#L42-L49
[ "def", "_prompt_username", "(", "prompt", "=", "\"Username: \"", ",", "prefill", "=", "None", ")", ":", "if", "prefill", ":", "readline", ".", "set_startup_hook", "(", "lambda", ":", "readline", ".", "insert_text", "(", "prefill", ")", ")", "try", ":", "return", "input", "(", "prompt", ")", ".", "strip", "(", ")", "except", "EOFError", ":", "print", "(", ")", "finally", ":", "readline", ".", "set_startup_hook", "(", ")" ]
creates a edge_by_target dict with the same edge objects as the edge_by_source . Also adds an
def _create_edge_by_target ( self ) : ebt = { } for edge_dict in self . _edge_by_source . values ( ) : for edge_id , edge in edge_dict . items ( ) : target_id = edge [ '@target' ] edge [ '@id' ] = edge_id assert target_id not in ebt ebt [ target_id ] = edge # _check_rev_dict(self._tree, ebt) return ebt
636
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/scripts/nexson/prune_to_clean_mapped.py#L86-L97
[ "def", "get_image_format", "(", "filename", ")", ":", "image", "=", "None", "bad_image", "=", "1", "image_format", "=", "NONE_FORMAT", "sequenced", "=", "False", "try", ":", "bad_image", "=", "Image", ".", "open", "(", "filename", ")", ".", "verify", "(", ")", "image", "=", "Image", ".", "open", "(", "filename", ")", "image_format", "=", "image", ".", "format", "sequenced", "=", "_is_image_sequenced", "(", "image", ")", "except", "(", "OSError", ",", "IOError", ",", "AttributeError", ")", ":", "pass", "if", "sequenced", ":", "image_format", "=", "gif", ".", "SEQUENCED_TEMPLATE", ".", "format", "(", "image_format", ")", "elif", "image", "is", "None", "or", "bad_image", "or", "image_format", "==", "NONE_FORMAT", ":", "image_format", "=", "ERROR_FORMAT", "comic_format", "=", "comic", ".", "get_comic_format", "(", "filename", ")", "if", "comic_format", ":", "image_format", "=", "comic_format", "if", "(", "Settings", ".", "verbose", ">", "1", ")", "and", "image_format", "==", "ERROR_FORMAT", "and", "(", "not", "Settings", ".", "list_only", ")", ":", "print", "(", "filename", ",", "\"doesn't look like an image or comic archive.\"", ")", "return", "image_format" ]
Remove nodes and edges from tree if they are not the ingroup or a descendant of it .
def prune_to_ingroup ( self ) : # Prune to just the ingroup if not self . _ingroup_node_id : _LOG . debug ( 'No ingroup node was specified.' ) self . _ingroup_node_id = self . root_node_id elif self . _ingroup_node_id != self . root_node_id : self . _do_prune_to_ingroup ( ) self . root_node_id = self . _ingroup_node_id else : _LOG . debug ( 'Ingroup node is root.' ) return self . root_node_id
637
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/scripts/nexson/prune_to_clean_mapped.py#L119-L130
[ "def", "_build_session", "(", "self", ",", "auth_class", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "session", "=", "requests", ".", "session", "(", ")", "session", ".", "auth", "=", "auth_class", "(", "*", "args", ",", "*", "*", "kwargs", ")", "session", ".", "headers", ".", "update", "(", "{", "'CB-VERSION'", ":", "self", ".", "API_VERSION", ",", "'Accept'", ":", "'application/json'", ",", "'Content-Type'", ":", "'application/json'", ",", "'User-Agent'", ":", "'coinbase/python/2.0'", "}", ")", "return", "session" ]
Prune node_id and the edges and nodes that are tipward of it . Caller must delete the edge to node_id .
def prune_clade ( self , node_id ) : to_del_nodes = [ node_id ] while bool ( to_del_nodes ) : node_id = to_del_nodes . pop ( 0 ) self . _flag_node_as_del_and_del_in_by_target ( node_id ) ebsd = self . _edge_by_source . get ( node_id ) if ebsd is not None : child_edges = list ( ebsd . values ( ) ) to_del_nodes . extend ( [ i [ '@target' ] for i in child_edges ] ) del self . _edge_by_source [ node_id ]
638
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/scripts/nexson/prune_to_clean_mapped.py#L147-L159
[ "def", "generateHOSequence", "(", "sequence", ",", "symbolsPerSequence", ",", "numColumns", ",", "sparsity", ")", ":", "sequenceHO", "=", "[", "]", "sparseCols", "=", "int", "(", "numColumns", "*", "sparsity", ")", "for", "symbol", "in", "range", "(", "symbolsPerSequence", ")", ":", "if", "symbol", "==", "0", "or", "symbol", "==", "(", "symbolsPerSequence", "-", "1", ")", ":", "sequenceHO", ".", "append", "(", "generateRandomSymbol", "(", "numColumns", ",", "sparseCols", ")", ")", "else", ":", "sequenceHO", ".", "append", "(", "sequence", "[", "symbol", "]", ")", "return", "sequenceHO" ]
Deletes to_par_edge and nd_id . To be used when nd_id is an out - degree = 1 node
def suppress_deg_one_node ( self , to_par_edge , nd_id , to_child_edge ) : # circumvent the node with nd_id to_child_edge_id = to_child_edge [ '@id' ] par = to_par_edge [ '@source' ] self . _edge_by_source [ par ] [ to_child_edge_id ] = to_child_edge to_child_edge [ '@source' ] = par # make it a tip... del self . _edge_by_source [ nd_id ] # delete it self . _del_tip ( nd_id )
639
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/scripts/nexson/prune_to_clean_mapped.py#L278-L288
[ "def", "_update_config_file", "(", "username", ",", "password", ",", "email", ",", "url", ",", "config_path", ")", ":", "try", ":", "# read the existing config", "config", "=", "json", ".", "load", "(", "open", "(", "config_path", ",", "\"r\"", ")", ")", "except", "ValueError", ":", "config", "=", "dict", "(", ")", "if", "not", "config", ".", "get", "(", "'auths'", ")", ":", "config", "[", "'auths'", "]", "=", "dict", "(", ")", "if", "not", "config", "[", "'auths'", "]", ".", "get", "(", "url", ")", ":", "config", "[", "'auths'", "]", "[", "url", "]", "=", "dict", "(", ")", "encoded_credentials", "=", "dict", "(", "auth", "=", "base64", ".", "b64encode", "(", "username", "+", "b':'", "+", "password", ")", ",", "email", "=", "email", ")", "config", "[", "'auths'", "]", "[", "url", "]", "=", "encoded_credentials", "try", ":", "json", ".", "dump", "(", "config", ",", "open", "(", "config_path", ",", "\"w\"", ")", ",", "indent", "=", "5", ",", "sort_keys", "=", "True", ")", "except", "Exception", "as", "exc", ":", "raise", "exceptions", ".", "AnsibleContainerConductorException", "(", "u\"Failed to write registry config to {0} - {1}\"", ".", "format", "(", "config_path", ",", "exc", ")", ")" ]
Describes the method .
def describe ( self ) : return { "name" : self . name , "params" : self . params , "returns" : self . returns , "description" : self . description , }
640
https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/method_info.py#L36-L47
[ "def", "compose", "(", "list_of_files", ",", "destination_file", ",", "files_metadata", "=", "None", ",", "content_type", "=", "None", ",", "retry_params", "=", "None", ",", "_account_id", "=", "None", ")", ":", "api", "=", "storage_api", ".", "_get_storage_api", "(", "retry_params", "=", "retry_params", ",", "account_id", "=", "_account_id", ")", "if", "os", ".", "getenv", "(", "'SERVER_SOFTWARE'", ")", ".", "startswith", "(", "'Dev'", ")", ":", "def", "_temp_func", "(", "file_list", ",", "destination_file", ",", "content_type", ")", ":", "bucket", "=", "'/'", "+", "destination_file", ".", "split", "(", "'/'", ")", "[", "1", "]", "+", "'/'", "with", "open", "(", "destination_file", ",", "'w'", ",", "content_type", "=", "content_type", ")", "as", "gcs_merge", ":", "for", "source_file", "in", "file_list", ":", "with", "open", "(", "bucket", "+", "source_file", "[", "'Name'", "]", ",", "'r'", ")", "as", "gcs_source", ":", "gcs_merge", ".", "write", "(", "gcs_source", ".", "read", "(", ")", ")", "compose_object", "=", "_temp_func", "else", ":", "compose_object", "=", "api", ".", "compose_object", "file_list", ",", "_", "=", "_validate_compose_list", "(", "destination_file", ",", "list_of_files", ",", "files_metadata", ",", "32", ")", "compose_object", "(", "file_list", ",", "destination_file", ",", "content_type", ")" ]
The parameters for this method in a JSON - compatible format
def params ( self ) : return [ { "name" : p_name , "type" : p_type . __name__ } for ( p_name , p_type ) in self . signature . parameter_types ]
641
https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/method_info.py#L50-L56
[ "def", "future_set_exception_unless_cancelled", "(", "future", ":", "\"Union[futures.Future[_T], Future[_T]]\"", ",", "exc", ":", "BaseException", ")", "->", "None", ":", "if", "not", "future", ".", "cancelled", "(", ")", ":", "future", ".", "set_exception", "(", "exc", ")", "else", ":", "app_log", ".", "error", "(", "\"Exception after Future was cancelled\"", ",", "exc_info", "=", "exc", ")" ]
The return type for this method in a JSON - compatible format .
def returns ( self ) : return_type = self . signature . return_type none_type = type ( None ) if return_type is not None and return_type is not none_type : return return_type . __name__
642
https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/method_info.py#L59-L69
[ "def", "purge_catalogue", "(", "self", ",", "flag_vector", ")", ":", "id0", "=", "np", ".", "where", "(", "flag_vector", ")", "[", "0", "]", "self", ".", "select_catalogue_events", "(", "id0", ")", "self", ".", "get_number_events", "(", ")" ]
Returns a signature object ensuring order of parameter names and types .
def create ( parameter_names , parameter_types , return_type ) : ordered_pairs = [ ( name , parameter_types [ name ] ) for name in parameter_names ] return MethodSignature ( ordered_pairs , return_type )
643
https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/method_info.py#L90-L102
[ "def", "unbind", "(", "self", ",", "devices_to_unbind", ")", ":", "if", "self", ".", "entity_api_key", "==", "\"\"", ":", "return", "{", "'status'", ":", "'failure'", ",", "'response'", ":", "'No API key found in request'", "}", "url", "=", "self", ".", "base_url", "+", "\"api/0.1.0/subscribe/unbind\"", "headers", "=", "{", "\"apikey\"", ":", "self", ".", "entity_api_key", "}", "data", "=", "{", "\"exchange\"", ":", "\"amq.topic\"", ",", "\"keys\"", ":", "devices_to_unbind", ",", "\"queue\"", ":", "self", ".", "entity_id", "}", "with", "self", ".", "no_ssl_verification", "(", ")", ":", "r", "=", "requests", ".", "delete", "(", "url", ",", "json", "=", "data", ",", "headers", "=", "headers", ")", "print", "(", "r", ")", "response", "=", "dict", "(", ")", "if", "\"No API key\"", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "json", ".", "loads", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "'message'", "]", "elif", "'unbind'", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"success\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "response", "[", "\"response\"", "]", "=", "str", "(", "r", ")", "return", "response" ]
Indirect recursion through _gen_hbf_el
def _hbf_handle_child_elements ( self , obj , ntl ) : # accumulate a list of the children names in ko, and # the a dictionary of tag to xml elements. # repetition of a tag means that it will map to a list of # xml elements cd = { } ko = [ ] ks = set ( ) for child in ntl : k = child . nodeName if k == 'meta' and ( not self . _badgerfish_style_conversion ) : matk , matv = self . _transform_meta_key_value ( child ) if matk is not None : _add_value_to_dict_bf ( obj , matk , matv ) else : if k not in ks : ko . append ( k ) ks . add ( k ) _add_value_to_dict_bf ( cd , k , child ) # Converts the child XML elements to dicts by recursion and # adds these to the dict. for k in ko : v = _index_list_of_values ( cd , k ) dcl = [ ] ct = None for xc in v : ct , dc = self . _gen_hbf_el ( xc ) dcl . append ( dc ) # this assertion will trip is the hacky stripping of namespaces # results in a name clash among the tags of the children assert ct not in obj obj [ ct ] = dcl # delete redundant about attributes that are used in XML, but not JSON (last rule of HoneyBadgerFish) _cull_redundant_about ( obj ) return obj
644
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/nexson_syntax/nexml2nexson.py#L169-L208
[ "def", "load_toml_rest_api_config", "(", "filename", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "LOGGER", ".", "info", "(", "\"Skipping rest api loading from non-existent config file: %s\"", ",", "filename", ")", "return", "RestApiConfig", "(", ")", "LOGGER", ".", "info", "(", "\"Loading rest api information from config: %s\"", ",", "filename", ")", "try", ":", "with", "open", "(", "filename", ")", "as", "fd", ":", "raw_config", "=", "fd", ".", "read", "(", ")", "except", "IOError", "as", "e", ":", "raise", "RestApiConfigurationError", "(", "\"Unable to load rest api configuration file: {}\"", ".", "format", "(", "str", "(", "e", ")", ")", ")", "toml_config", "=", "toml", ".", "loads", "(", "raw_config", ")", "invalid_keys", "=", "set", "(", "toml_config", ".", "keys", "(", ")", ")", ".", "difference", "(", "[", "'bind'", ",", "'connect'", ",", "'timeout'", ",", "'opentsdb_db'", ",", "'opentsdb_url'", ",", "'opentsdb_username'", ",", "'opentsdb_password'", ",", "'client_max_size'", "]", ")", "if", "invalid_keys", ":", "raise", "RestApiConfigurationError", "(", "\"Invalid keys in rest api config: {}\"", ".", "format", "(", "\", \"", ".", "join", "(", "sorted", "(", "list", "(", "invalid_keys", ")", ")", ")", ")", ")", "config", "=", "RestApiConfig", "(", "bind", "=", "toml_config", ".", "get", "(", "\"bind\"", ",", "None", ")", ",", "connect", "=", "toml_config", ".", "get", "(", "'connect'", ",", "None", ")", ",", "timeout", "=", "toml_config", ".", "get", "(", "'timeout'", ",", "None", ")", ",", "opentsdb_url", "=", "toml_config", ".", "get", "(", "'opentsdb_url'", ",", "None", ")", ",", "opentsdb_db", "=", "toml_config", ".", "get", "(", "'opentsdb_db'", ",", "None", ")", ",", "opentsdb_username", "=", "toml_config", ".", "get", "(", "'opentsdb_username'", ",", "None", ")", ",", "opentsdb_password", "=", "toml_config", ".", "get", "(", "'opentsdb_password'", ",", "None", ")", ",", "client_max_size", "=", "toml_config", ".", "get", "(", "'client_max_size'", ",", "None", ")", ")", "return", "config" ]
Returns an etree . ETCompatXMLParser instance .
def get_xml_parser ( encoding = None ) : parser = etree . ETCompatXMLParser ( huge_tree = True , remove_comments = True , strip_cdata = False , remove_blank_text = True , resolve_entities = False , encoding = encoding ) return parser
645
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/xml.py#L34-L45
[ "def", "get_bars", "(", "self", ",", "assets", ",", "data_frequency", ",", "bar_count", "=", "500", ")", ":", "assets_is_scalar", "=", "not", "isinstance", "(", "assets", ",", "(", "list", ",", "set", ",", "tuple", ")", ")", "is_daily", "=", "'d'", "in", "data_frequency", "# 'daily' or '1d'", "if", "assets_is_scalar", ":", "symbols", "=", "[", "assets", ".", "symbol", "]", "else", ":", "symbols", "=", "[", "asset", ".", "symbol", "for", "asset", "in", "assets", "]", "symbol_bars", "=", "self", ".", "_symbol_bars", "(", "symbols", ",", "'day'", "if", "is_daily", "else", "'minute'", ",", "limit", "=", "bar_count", ")", "if", "is_daily", ":", "intra_bars", "=", "{", "}", "symbol_bars_minute", "=", "self", ".", "_symbol_bars", "(", "symbols", ",", "'minute'", ",", "limit", "=", "1000", ")", "for", "symbol", ",", "df", "in", "symbol_bars_minute", ".", "items", "(", ")", ":", "agged", "=", "df", ".", "resample", "(", "'1D'", ")", ".", "agg", "(", "dict", "(", "open", "=", "'first'", ",", "high", "=", "'max'", ",", "low", "=", "'min'", ",", "close", "=", "'last'", ",", "volume", "=", "'sum'", ",", ")", ")", ".", "dropna", "(", ")", "intra_bars", "[", "symbol", "]", "=", "agged", "dfs", "=", "[", "]", "for", "asset", "in", "assets", "if", "not", "assets_is_scalar", "else", "[", "assets", "]", ":", "symbol", "=", "asset", ".", "symbol", "df", "=", "symbol_bars", ".", "get", "(", "symbol", ")", "if", "df", "is", "None", ":", "dfs", ".", "append", "(", "pd", ".", "DataFrame", "(", "[", "]", ",", "columns", "=", "[", "'open'", ",", "'high'", ",", "'low'", ",", "'close'", ",", "'volume'", "]", ")", ")", "continue", "if", "is_daily", ":", "agged", "=", "intra_bars", ".", "get", "(", "symbol", ")", "if", "agged", "is", "not", "None", "and", "len", "(", "agged", ".", "index", ")", ">", "0", "and", "agged", ".", "index", "[", "-", "1", "]", "not", "in", "df", ".", "index", ":", "if", "not", "(", "agged", ".", "index", "[", "-", "1", "]", ">", "df", ".", "index", "[", "-", "1", "]", ")", ":", "log", ".", "warn", "(", "(", "'agged.index[-1] = {}, df.index[-1] = {} '", "'for {}'", ")", ".", "format", "(", "agged", ".", "index", "[", "-", "1", "]", ",", "df", ".", "index", "[", "-", "1", "]", ",", "symbol", ")", ")", "df", "=", "df", ".", "append", "(", "agged", ".", "iloc", "[", "-", "1", "]", ")", "df", ".", "columns", "=", "pd", ".", "MultiIndex", ".", "from_product", "(", "[", "[", "asset", ",", "]", ",", "df", ".", "columns", "]", ")", "dfs", ".", "append", "(", "df", ")", "return", "pd", ".", "concat", "(", "dfs", ",", "axis", "=", "1", ")" ]
Returns an instance of lxml . etree . _Element for the given doc input .
def get_etree_root ( doc , encoding = None ) : tree = get_etree ( doc , encoding ) root = tree . getroot ( ) return root
646
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/xml.py#L58-L80
[ "def", "_calculateEncodingKey", "(", "comparator", ")", ":", "encodingName", "=", "None", "for", "k", ",", "v", "in", "list", "(", "_encodings", ".", "items", "(", ")", ")", ":", "if", "v", "==", "comparator", ":", "encodingName", "=", "k", "break", "return", "encodingName" ]
Removes all CDATA blocks from text if it contains them .
def strip_cdata ( text ) : if not is_cdata ( text ) : return text xml = "<e>{0}</e>" . format ( text ) node = etree . fromstring ( xml ) return node . text
647
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/xml.py#L114-L133
[ "def", "update", "(", "self", ",", "records", ",", "*", "*", "kw", ")", ":", "# ignore unknown fields\r", "kw", "=", "dict", "(", "[", "(", "k", ",", "v", ")", "for", "(", "k", ",", "v", ")", "in", "kw", ".", "iteritems", "(", ")", "if", "k", "in", "self", ".", "fields", "]", ")", "if", "isinstance", "(", "records", ",", "dict", ")", ":", "records", "=", "[", "records", "]", "# update indices\r", "for", "indx", "in", "set", "(", "self", ".", "indices", ".", "keys", "(", ")", ")", "&", "set", "(", "kw", ".", "keys", "(", ")", ")", ":", "for", "record", "in", "records", ":", "if", "record", "[", "indx", "]", "==", "kw", "[", "indx", "]", ":", "continue", "_id", "=", "record", "[", "\"__id__\"", "]", "# remove id for the old value\r", "old_pos", "=", "bisect", ".", "bisect", "(", "self", ".", "indices", "[", "indx", "]", "[", "record", "[", "indx", "]", "]", ",", "_id", ")", "-", "1", "del", "self", ".", "indices", "[", "indx", "]", "[", "record", "[", "indx", "]", "]", "[", "old_pos", "]", "if", "not", "self", ".", "indices", "[", "indx", "]", "[", "record", "[", "indx", "]", "]", ":", "del", "self", ".", "indices", "[", "indx", "]", "[", "record", "[", "indx", "]", "]", "# insert new value\r", "bisect", ".", "insort", "(", "self", ".", "indices", "[", "indx", "]", ".", "setdefault", "(", "kw", "[", "indx", "]", ",", "[", "]", ")", ",", "_id", ")", "for", "record", "in", "records", ":", "# update record values\r", "record", ".", "update", "(", "kw", ")", "# increment version number\r", "record", "[", "\"__version__\"", "]", "+=", "1" ]
Return True if the input value is valid for insertion into the inner list .
def _is_valid ( self , value ) : # Entities have an istypeof method that can perform more sophisticated # type checking. if hasattr ( self . _type , "istypeof" ) : return self . _type . istypeof ( value ) else : return isinstance ( value , self . _type )
648
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/typedlist.py#L40-L53
[ "def", "stop", "(", "self", ")", ":", "self", ".", "_flush", "(", ")", "filesize", "=", "self", ".", "file", ".", "tell", "(", ")", "super", "(", "BLFWriter", ",", "self", ")", ".", "stop", "(", ")", "# Write header in the beginning of the file", "header", "=", "[", "b\"LOGG\"", ",", "FILE_HEADER_SIZE", ",", "APPLICATION_ID", ",", "0", ",", "0", ",", "0", ",", "2", ",", "6", ",", "8", ",", "1", "]", "# The meaning of \"count of objects read\" is unknown", "header", ".", "extend", "(", "[", "filesize", ",", "self", ".", "uncompressed_size", ",", "self", ".", "count_of_objects", ",", "0", "]", ")", "header", ".", "extend", "(", "timestamp_to_systemtime", "(", "self", ".", "start_timestamp", ")", ")", "header", ".", "extend", "(", "timestamp_to_systemtime", "(", "self", ".", "stop_timestamp", ")", ")", "with", "open", "(", "self", ".", "file", ".", "name", ",", "\"r+b\"", ")", "as", "f", ":", "f", ".", "write", "(", "FILE_HEADER_STRUCT", ".", "pack", "(", "*", "header", ")", ")" ]
Attempt to coerce value into the correct type .
def _fix_value ( self , value ) : try : return self . _castfunc ( value ) except : error = "Can't put '{0}' ({1}) into a {2}. Expected a {3} object." error = error . format ( value , # Input value type ( value ) , # Type of input value type ( self ) , # Type of collection self . _type # Expected type of input value ) six . reraise ( TypeError , TypeError ( error ) , sys . exc_info ( ) [ - 1 ] )
649
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/typedlist.py#L55-L70
[ "def", "remove_stale_indexes_from_bika_catalog", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Removing stale indexes and metadata from bika_catalog ...\"", ")", "cat_id", "=", "\"bika_catalog\"", "indexes_to_remove", "=", "[", "\"getAnalyst\"", ",", "\"getAnalysts\"", ",", "\"getAnalysisService\"", ",", "\"getClientOrderNumber\"", ",", "\"getClientReference\"", ",", "\"getClientSampleID\"", ",", "\"getContactTitle\"", ",", "\"getDateDisposed\"", ",", "\"getDateExpired\"", ",", "\"getDateOpened\"", ",", "\"getDatePublished\"", ",", "\"getInvoiced\"", ",", "\"getPreserver\"", ",", "\"getSamplePointTitle\"", ",", "\"getSamplePointUID\"", ",", "\"getSampler\"", ",", "\"getScheduledSamplingSampler\"", ",", "\"getSamplingDate\"", ",", "\"getWorksheetTemplateTitle\"", ",", "\"BatchUID\"", ",", "]", "metadata_to_remove", "=", "[", "\"getAnalysts\"", ",", "\"getClientOrderNumber\"", ",", "\"getClientReference\"", ",", "\"getClientSampleID\"", ",", "\"getContactTitle\"", ",", "\"getSamplePointTitle\"", ",", "\"getAnalysisService\"", ",", "\"getDatePublished\"", ",", "]", "for", "index", "in", "indexes_to_remove", ":", "del_index", "(", "portal", ",", "cat_id", ",", "index", ")", "for", "metadata", "in", "metadata_to_remove", ":", "del_metadata", "(", "portal", ",", "cat_id", ",", "metadata", ")", "commit_transaction", "(", "portal", ")" ]
Generate a list quoted raw name signature type entries for this pairdef recursively traversing reference types
def members_entries ( self , all_are_optional : Optional [ bool ] = False ) -> List [ Tuple [ str , str ] ] : if self . _type_reference : rval : List [ Tuple [ str , str ] ] = [ ] for n , t in self . _context . reference ( self . _type_reference ) . members_entries ( all_are_optional ) : rval . append ( ( n , self . _ebnf . signature_cardinality ( t , all_are_optional ) . format ( name = n ) ) ) return rval else : sig = self . _ebnf . signature_cardinality ( self . _typ . reference_type ( ) , all_are_optional ) return [ ( name , sig . format ( name = name ) ) for name in self . _names ]
650
https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/jsg_pairdef_parser.py#L43-L57
[ "def", "get_context_data", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "request", ".", "session", ".", "set_test_cookie", "(", ")", "if", "not", "self", ".", "request", ".", "session", ".", "test_cookie_worked", "(", ")", ":", "messages", ".", "add_message", "(", "self", ".", "request", ",", "messages", ".", "ERROR", ",", "\"Please enable cookies.\"", ")", "self", ".", "request", ".", "session", ".", "delete_test_cookie", "(", ")", "return", "super", "(", ")", ".", "get_context_data", "(", "*", "*", "kwargs", ")" ]
Create an initializer entry for the entry
def _initializer_for ( self , raw_name : str , cooked_name : str , prefix : Optional [ str ] ) -> List [ str ] : mt_val = self . _ebnf . mt_value ( self . _typ ) rval = [ ] if is_valid_python ( raw_name ) : if prefix : # If a prefix exists, the input has already been processed - no if clause is necessary rval . append ( f"self.{raw_name} = {prefix}.{raw_name}" ) else : cons = raw_name rval . append ( f"self.{raw_name} = {cons}" ) elif is_valid_python ( cooked_name ) : if prefix : rval . append ( f"setattr(self, '{raw_name}', getattr({prefix}, '{raw_name}')" ) else : cons = f"{cooked_name} if {cooked_name} is not {mt_val} else _kwargs.get('{raw_name}', {mt_val})" rval . append ( f"setattr(self, '{raw_name}', {cons})" ) else : getter = f"_kwargs.get('{raw_name}', {mt_val})" if prefix : rval . append ( f"setattr(self, '{raw_name}', getattr({prefix}, '{getter}')" ) else : rval . append ( f"setattr(self, '{raw_name}', {getter})" ) return rval
651
https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/jsg_pairdef_parser.py#L96-L129
[ "def", "parse_tophat_log", "(", "self", ",", "raw_data", ")", ":", "if", "'Aligned pairs'", "in", "raw_data", ":", "# Paired end data", "regexes", "=", "{", "'overall_aligned_percent'", ":", "r\"([\\d\\.]+)% overall read mapping rate.\"", ",", "'concordant_aligned_percent'", ":", "r\"([\\d\\.]+)% concordant pair alignment rate.\"", ",", "'aligned_total'", ":", "r\"Aligned pairs:\\s+(\\d+)\"", ",", "'aligned_multimap'", ":", "r\"Aligned pairs:\\s+\\d+\\n\\s+of these:\\s+(\\d+)\"", ",", "'aligned_discordant'", ":", "r\"(\\d+) \\([\\s\\d\\.]+%\\) are discordant alignments\"", ",", "'total_reads'", ":", "r\"[Rr]eads:\\n\\s+Input\\s*:\\s+(\\d+)\"", ",", "}", "else", ":", "# Single end data", "regexes", "=", "{", "'total_reads'", ":", "r\"[Rr]eads:\\n\\s+Input\\s*:\\s+(\\d+)\"", ",", "'aligned_total'", ":", "r\"Mapped\\s*:\\s+(\\d+)\"", ",", "'aligned_multimap'", ":", "r\"of these\\s*:\\s+(\\d+)\"", ",", "'overall_aligned_percent'", ":", "r\"([\\d\\.]+)% overall read mapping rate.\"", ",", "}", "parsed_data", "=", "{", "}", "for", "k", ",", "r", "in", "regexes", ".", "items", "(", ")", ":", "r_search", "=", "re", ".", "search", "(", "r", ",", "raw_data", ",", "re", ".", "MULTILINE", ")", "if", "r_search", ":", "parsed_data", "[", "k", "]", "=", "float", "(", "r_search", ".", "group", "(", "1", ")", ")", "if", "len", "(", "parsed_data", ")", "==", "0", ":", "return", "None", "parsed_data", "[", "'concordant_aligned_percent'", "]", "=", "parsed_data", ".", "get", "(", "'concordant_aligned_percent'", ",", "0", ")", "parsed_data", "[", "'aligned_total'", "]", "=", "parsed_data", ".", "get", "(", "'aligned_total'", ",", "0", ")", "parsed_data", "[", "'aligned_multimap'", "]", "=", "parsed_data", ".", "get", "(", "'aligned_multimap'", ",", "0", ")", "parsed_data", "[", "'aligned_discordant'", "]", "=", "parsed_data", ".", "get", "(", "'aligned_discordant'", ",", "0", ")", "parsed_data", "[", "'unaligned_total'", "]", "=", "parsed_data", "[", "'total_reads'", "]", "-", "parsed_data", "[", "'aligned_total'", "]", "parsed_data", "[", "'aligned_not_multimapped_discordant'", "]", "=", "parsed_data", "[", "'aligned_total'", "]", "-", "parsed_data", "[", "'aligned_multimap'", "]", "-", "parsed_data", "[", "'aligned_discordant'", "]", "return", "parsed_data" ]
Raise AbsentLinkSecret if link secret is not set .
def _assert_link_secret ( self , action : str ) : if self . _link_secret is None : LOGGER . debug ( 'HolderProver._assert_link_secret: action %s requires link secret but it is not set' , action ) raise AbsentLinkSecret ( 'Action {} requires link secret but it is not set' . format ( action ) )
652
https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/holder_prover.py#L90-L99
[ "def", "init_recorder", "(", "self", ",", "recorder_config", ")", ":", "if", "not", "recorder_config", ":", "self", ".", "recorder", "=", "None", "self", ".", "recorder_path", "=", "None", "return", "if", "isinstance", "(", "recorder_config", ",", "str", ")", ":", "recorder_coll", "=", "recorder_config", "recorder_config", "=", "{", "}", "else", ":", "recorder_coll", "=", "recorder_config", "[", "'source_coll'", "]", "# TODO: support dedup", "dedup_index", "=", "None", "warc_writer", "=", "MultiFileWARCWriter", "(", "self", ".", "warcserver", ".", "archive_paths", ",", "max_size", "=", "int", "(", "recorder_config", ".", "get", "(", "'rollover_size'", ",", "1000000000", ")", ")", ",", "max_idle_secs", "=", "int", "(", "recorder_config", ".", "get", "(", "'rollover_idle_secs'", ",", "600", ")", ")", ",", "filename_template", "=", "recorder_config", ".", "get", "(", "'filename_template'", ")", ",", "dedup_index", "=", "dedup_index", ")", "self", ".", "recorder", "=", "RecorderApp", "(", "self", ".", "RECORD_SERVER", "%", "str", "(", "self", ".", "warcserver_server", ".", "port", ")", ",", "warc_writer", ",", "accept_colls", "=", "recorder_config", ".", "get", "(", "'source_filter'", ")", ")", "recorder_server", "=", "GeventServer", "(", "self", ".", "recorder", ",", "port", "=", "0", ")", "self", ".", "recorder_path", "=", "self", ".", "RECORD_API", "%", "(", "recorder_server", ".", "port", ",", "recorder_coll", ")" ]
Return list of revocation registry identifiers for which HolderProver has tails files .
def rev_regs ( self ) -> list : LOGGER . debug ( 'HolderProver.rev_regs >>>' ) rv = [ basename ( f ) for f in Tails . links ( self . _dir_tails ) ] LOGGER . debug ( 'HolderProver.rev_regs <<< %s' , rv ) return rv
653
https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/holder_prover.py#L539-L550
[ "def", "get_group", "(", "self", ")", ":", "if", "self", ".", "group", "is", "None", ":", "self", ".", "group", "=", "self", ".", "get_field", "(", "'group'", ")", "if", "self", ".", "group", "is", "not", "None", ":", "# group data from LightGBM is boundaries data, need to convert to group size", "self", ".", "group", "=", "np", ".", "diff", "(", "self", ".", "group", ")", "return", "self", ".", "group" ]
Create credential request as HolderProver and store in wallet ; return credential json and metadata json .
async def create_cred_req ( self , cred_offer_json : str , cd_id : str ) -> ( str , str ) : LOGGER . debug ( 'HolderProver.create_cred_req >>> cred_offer_json: %s, cd_id: %s' , cred_offer_json , cd_id ) self . _assert_link_secret ( 'create_cred_req' ) # Check that ledger has schema on ledger where cred def expects - in case of pool reset with extant wallet cred_def_json = await self . get_cred_def ( cd_id ) schema_seq_no = int ( json . loads ( cred_def_json ) [ 'schemaId' ] ) schema_json = await self . get_schema ( schema_seq_no ) schema = json . loads ( schema_json ) if not schema : LOGGER . debug ( 'HolderProver.create_cred_req: <!< absent schema@#%s, cred req may be for another ledger' , schema_seq_no ) raise AbsentSchema ( 'Absent schema@#{}, cred req may be for another ledger' . format ( schema_seq_no ) ) ( cred_req_json , cred_req_metadata_json ) = await anoncreds . prover_create_credential_req ( self . wallet . handle , self . did , cred_offer_json , cred_def_json , self . _link_secret ) rv = ( cred_req_json , cred_req_metadata_json ) LOGGER . debug ( 'HolderProver.create_cred_req <<< %s' , rv ) return rv
654
https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/holder_prover.py#L578-L612
[ "def", "mremove", "(", "self", ",", "class_name", ",", "names", ")", ":", "if", "class_name", "not", "in", "self", ".", "components", ":", "logger", ".", "error", "(", "\"Component class {} not found\"", ".", "format", "(", "class_name", ")", ")", "return", "None", "if", "not", "isinstance", "(", "names", ",", "pd", ".", "Index", ")", ":", "names", "=", "pd", ".", "Index", "(", "names", ")", "cls_df", "=", "self", ".", "df", "(", "class_name", ")", "cls_df", ".", "drop", "(", "names", ",", "inplace", "=", "True", ")", "pnl", "=", "self", ".", "pnl", "(", "class_name", ")", "for", "df", "in", "itervalues", "(", "pnl", ")", ":", "df", ".", "drop", "(", "df", ".", "columns", ".", "intersection", "(", "names", ")", ",", "axis", "=", "1", ",", "inplace", "=", "True", ")" ]
Load caches and archive enough to go offline and be able to generate proof on all credentials in wallet .
async def load_cache ( self , archive : bool = False ) -> int : LOGGER . debug ( 'HolderProver.load_cache >>> archive: %s' , archive ) rv = int ( time ( ) ) box_ids = json . loads ( await self . get_box_ids_json ( ) ) for s_id in box_ids [ 'schema_id' ] : with SCHEMA_CACHE . lock : await self . get_schema ( s_id ) for cd_id in box_ids [ 'cred_def_id' ] : with CRED_DEF_CACHE . lock : await self . get_cred_def ( cd_id ) for rr_id in box_ids [ 'rev_reg_id' ] : await self . _get_rev_reg_def ( rr_id ) with REVO_CACHE . lock : revo_cache_entry = REVO_CACHE . get ( rr_id , None ) if revo_cache_entry : try : await revo_cache_entry . get_delta_json ( self . _build_rr_delta_json , rv , rv ) except ClosedPool : LOGGER . warning ( 'Holder-Prover %s is offline from pool %s, cannot update revo cache reg delta for %s to %s' , self . wallet . name , self . pool . name , rr_id , rv ) if archive : Caches . archive ( self . dir_cache ) LOGGER . debug ( 'HolderProver.load_cache <<< %s' , rv ) return rv
655
https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/holder_prover.py#L649-L688
[ "def", "color_lerp", "(", "c1", ":", "Tuple", "[", "int", ",", "int", ",", "int", "]", ",", "c2", ":", "Tuple", "[", "int", ",", "int", ",", "int", "]", ",", "a", ":", "float", ")", "->", "Color", ":", "return", "Color", ".", "_new_from_cdata", "(", "lib", ".", "TCOD_color_lerp", "(", "c1", ",", "c2", ",", "a", ")", ")" ]
Get credentials from HolderProver wallet corresponding to proof request and filter criteria ; return credential identifiers from wallet and credentials json . Return empty set and empty production for no such credentials .
async def get_creds ( self , proof_req_json : str , filt : dict = None , filt_dflt_incl : bool = False ) -> ( Set [ str ] , str ) : LOGGER . debug ( 'HolderProver.get_creds >>> proof_req_json: %s, filt: %s' , proof_req_json , filt ) if filt is None : filt = { } rv = None creds_json = await anoncreds . prover_get_credentials_for_proof_req ( self . wallet . handle , proof_req_json ) creds = json . loads ( creds_json ) cred_ids = set ( ) if filt : for cd_id in filt : try : json . loads ( await self . get_cred_def ( cd_id ) ) except AbsentCredDef : LOGGER . warning ( 'HolderProver.get_creds: ignoring filter criterion, no cred def on %s' , cd_id ) filt . pop ( cd_id ) for inner_creds in { * * creds [ 'attrs' ] , * * creds [ 'predicates' ] } . values ( ) : for cred in inner_creds : # cred is a dict in a list of dicts cred_info = cred [ 'cred_info' ] if filt : cred_cd_id = cred_info [ 'cred_def_id' ] if cred_cd_id not in filt : if filt_dflt_incl : cred_ids . add ( cred_info [ 'referent' ] ) continue if 'attr-match' in ( filt [ cred_cd_id ] or { } ) : # maybe filt[cred_cd_id]: None if not { k : str ( filt [ cred_cd_id ] . get ( 'attr-match' , { } ) [ k ] ) for k in filt [ cred_cd_id ] . get ( 'attr-match' , { } ) } . items ( ) <= cred_info [ 'attrs' ] . items ( ) : continue if 'minima' in ( filt [ cred_cd_id ] or { } ) : # maybe filt[cred_cd_id]: None minima = filt [ cred_cd_id ] . get ( 'minima' , { } ) try : if any ( ( attr not in cred_info [ 'attrs' ] ) or ( int ( cred_info [ 'attrs' ] [ attr ] ) < int ( minima [ attr ] ) ) for attr in minima ) : continue except ValueError : continue # int conversion failed - reject candidate cred_ids . add ( cred_info [ 'referent' ] ) else : cred_ids . add ( cred_info [ 'referent' ] ) if filt : creds = json . loads ( prune_creds_json ( creds , cred_ids ) ) rv = ( cred_ids , json . dumps ( creds ) ) LOGGER . debug ( 'HolderProver.get_creds <<< %s' , rv ) return rv
656
https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/holder_prover.py#L784-L937
[ "def", "_check_array", "(", "self", ",", "X", ")", ":", "if", "isinstance", "(", "X", ",", "da", ".", "Array", ")", ":", "if", "X", ".", "ndim", "==", "2", "and", "X", ".", "numblocks", "[", "1", "]", ">", "1", ":", "logger", ".", "debug", "(", "\"auto-rechunking 'X'\"", ")", "if", "not", "np", ".", "isnan", "(", "X", ".", "chunks", "[", "0", "]", ")", ".", "any", "(", ")", ":", "X", "=", "X", ".", "rechunk", "(", "{", "0", ":", "\"auto\"", ",", "1", ":", "-", "1", "}", ")", "else", ":", "X", "=", "X", ".", "rechunk", "(", "{", "1", ":", "-", "1", "}", ")", "return", "X" ]
Get creds structure from HolderProver wallet by credential identifiers .
async def get_creds_by_id ( self , proof_req_json : str , cred_ids : set ) -> str : LOGGER . debug ( 'HolderProver.get_creds_by_id >>> proof_req_json: %s, cred_ids: %s' , proof_req_json , cred_ids ) creds_json = await anoncreds . prover_get_credentials_for_proof_req ( self . wallet . handle , proof_req_json ) # retain only creds of interest: find corresponding referents rv_json = prune_creds_json ( json . loads ( creds_json ) , cred_ids ) LOGGER . debug ( 'HolderProver.get_cred_by_referent <<< %s' , rv_json ) return rv_json
657
https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/holder_prover.py#L939-L955
[ "def", "update", "(", "self", ",", "other", ",", "join", "=", "'left'", ",", "overwrite", "=", "True", ",", "filter_func", "=", "None", ",", "errors", "=", "'ignore'", ")", ":", "import", "pandas", ".", "core", ".", "computation", ".", "expressions", "as", "expressions", "# TODO: Support other joins", "if", "join", "!=", "'left'", ":", "# pragma: no cover", "raise", "NotImplementedError", "(", "\"Only left join is supported\"", ")", "if", "errors", "not", "in", "[", "'ignore'", ",", "'raise'", "]", ":", "raise", "ValueError", "(", "\"The parameter errors must be either \"", "\"'ignore' or 'raise'\"", ")", "if", "not", "isinstance", "(", "other", ",", "DataFrame", ")", ":", "other", "=", "DataFrame", "(", "other", ")", "other", "=", "other", ".", "reindex_like", "(", "self", ")", "for", "col", "in", "self", ".", "columns", ":", "this", "=", "self", "[", "col", "]", ".", "_values", "that", "=", "other", "[", "col", "]", ".", "_values", "if", "filter_func", "is", "not", "None", ":", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "mask", "=", "~", "filter_func", "(", "this", ")", "|", "isna", "(", "that", ")", "else", ":", "if", "errors", "==", "'raise'", ":", "mask_this", "=", "notna", "(", "that", ")", "mask_that", "=", "notna", "(", "this", ")", "if", "any", "(", "mask_this", "&", "mask_that", ")", ":", "raise", "ValueError", "(", "\"Data overlaps.\"", ")", "if", "overwrite", ":", "mask", "=", "isna", "(", "that", ")", "else", ":", "mask", "=", "notna", "(", "this", ")", "# don't overwrite columns unecessarily", "if", "mask", ".", "all", "(", ")", ":", "continue", "self", "[", "col", "]", "=", "expressions", ".", "where", "(", "mask", ",", "this", ",", "that", ")" ]
Returns a histogram of your data .
def histogram ( data ) : ret = { } for datum in data : if datum in ret : ret [ datum ] += 1 else : ret [ datum ] = 1 return ret
658
https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/contrib/multi-module-example/typedjsonrpc_example/valid.py#L22-L36
[ "def", "write_lockfile", "(", "self", ",", "content", ")", ":", "s", "=", "self", ".", "_lockfile_encoder", ".", "encode", "(", "content", ")", "open_kwargs", "=", "{", "\"newline\"", ":", "self", ".", "_lockfile_newlines", ",", "\"encoding\"", ":", "\"utf-8\"", "}", "with", "vistir", ".", "contextmanagers", ".", "atomic_open_for_write", "(", "self", ".", "lockfile_location", ",", "*", "*", "open_kwargs", ")", "as", "f", ":", "f", ".", "write", "(", "s", ")", "# Write newline at end of document. GH-319.", "# Only need '\\n' here; the file object handles the rest.", "if", "not", "s", ".", "endswith", "(", "u\"\\n\"", ")", ":", "f", ".", "write", "(", "u\"\\n\"", ")" ]
Prints object key - value pairs in a custom format
def print_data ( data ) : print ( ", " . join ( [ "{}=>{}" . format ( key , value ) for key , value in data ] ) )
659
https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/contrib/multi-module-example/typedjsonrpc_example/valid.py#L65-L72
[ "def", "_update_dPrxy", "(", "self", ")", ":", "super", "(", "ExpCM_empirical_phi_divpressure", ",", "self", ")", ".", "_update_dPrxy", "(", ")", "if", "'omega2'", "in", "self", ".", "freeparams", ":", "with", "scipy", ".", "errstate", "(", "divide", "=", "'raise'", ",", "under", "=", "'raise'", ",", "over", "=", "'raise'", ",", "invalid", "=", "'ignore'", ")", ":", "scipy", ".", "copyto", "(", "self", ".", "dPrxy", "[", "'omega2'", "]", ",", "-", "self", ".", "ln_piAx_piAy_beta", "*", "self", ".", "Qxy", "*", "self", ".", "omega", "/", "(", "1", "-", "self", ".", "piAx_piAy_beta", ")", ",", "where", "=", "CODON_NONSYN", ")", "scipy", ".", "copyto", "(", "self", ".", "dPrxy", "[", "'omega2'", "]", ",", "self", ".", "Qxy", "*", "self", ".", "omega", ",", "where", "=", "scipy", ".", "logical_and", "(", "CODON_NONSYN", ",", "scipy", ".", "fabs", "(", "1", "-", "self", ".", "piAx_piAy_beta", ")", "<", "ALMOST_ZERO", ")", ")", "for", "r", "in", "range", "(", "self", ".", "nsites", ")", ":", "self", ".", "dPrxy", "[", "'omega2'", "]", "[", "r", "]", "*=", "self", ".", "deltar", "[", "r", "]", "_fill_diagonals", "(", "self", ".", "dPrxy", "[", "'omega2'", "]", ",", "self", ".", "_diag_indices", ")" ]
Find all files in a subdirectory and return paths relative to dir
def subdir_findall ( dir , subdir ) : strip_n = len ( dir . split ( '/' ) ) path = '/' . join ( ( dir , subdir ) ) return [ '/' . join ( s . split ( '/' ) [ strip_n : ] ) for s in setuptools . findall ( path ) ]
660
https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/setup.py#L82-L91
[ "def", "set_runtime_value_int", "(", "self", ",", "ihcid", ":", "int", ",", "value", ":", "int", ")", "->", "bool", ":", "if", "self", ".", "client", ".", "set_runtime_value_int", "(", "ihcid", ",", "value", ")", ":", "return", "True", "self", ".", "re_authenticate", "(", ")", "return", "self", ".", "client", ".", "set_runtime_value_int", "(", "ihcid", ",", "value", ")" ]
For a list of packages find the package_data
def find_package_data ( packages ) : package_data = { } for package in packages : package_data [ package ] = [ ] for subdir in find_subdirectories ( package ) : if '.' . join ( ( package , subdir ) ) in packages : # skip submodules logging . debug ( "skipping submodule %s/%s" % ( package , subdir ) ) continue if skip_tests and ( subdir == 'tests' ) : # skip tests logging . debug ( "skipping tests %s/%s" % ( package , subdir ) ) continue package_data [ package ] += subdir_findall ( package_to_path ( package ) , subdir ) return package_data
661
https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/setup.py#L93-L114
[ "def", "convert_prot", "(", "prot", ")", ":", "# https://msdn.microsoft.com/en-us/library/windows/desktop/aa366786(v=vs.85).aspx", "if", "prot", "&", "0x10", ":", "return", "4", "if", "prot", "&", "0x20", ":", "return", "5", "if", "prot", "&", "0x40", ":", "return", "7", "if", "prot", "&", "0x80", ":", "return", "7", "if", "prot", "&", "0x01", ":", "return", "0", "if", "prot", "&", "0x02", ":", "return", "1", "if", "prot", "&", "0x04", ":", "return", "3", "if", "prot", "&", "0x08", ":", "return", "3", "raise", "angr", ".", "errors", ".", "SimValueError", "(", "\"Unknown windows memory protection constant: %#x\"", "%", "prot", ")" ]
Main routine for metrics .
def process_file_metrics ( context , file_processors ) : file_metrics = OrderedDict ( ) # TODO make available the includes and excludes feature gitignore = [ ] if os . path . isfile ( '.gitignore' ) : with open ( '.gitignore' , 'r' ) as ifile : gitignore = ifile . read ( ) . splitlines ( ) in_files = glob_files ( context [ 'root_dir' ] , context [ 'in_file_names' ] , gitignore = gitignore ) # main loop for in_file , key in in_files : # print 'file %i: %s' % (i, in_file) try : with open ( in_file , 'rb' ) as ifile : code = ifile . read ( ) # lookup lexicographical scanner to use for this run try : lex = guess_lexer_for_filename ( in_file , code , encoding = 'guess' ) # encoding is 'guess', chardet', 'utf-8' except : pass else : token_list = lex . get_tokens ( code ) # parse code file_metrics [ key ] = OrderedDict ( ) file_metrics [ key ] . update ( compute_file_metrics ( file_processors , lex . name , key , token_list ) ) file_metrics [ key ] [ 'language' ] = lex . name except IOError as e : sys . stderr . writelines ( str ( e ) + " -- Skipping input file.\n\n" ) return file_metrics
662
https://github.com/finklabs/metrics/blob/fd9974af498831664b9ae8e8f3834e1ec2e8a699/metrics/metrics_utils.py#L117-L150
[ "def", "menu_item_remove_libraries_or_root_clicked", "(", "self", ",", "menu_item", ")", ":", "menu_item_text", "=", "self", ".", "get_menu_item_text", "(", "menu_item", ")", "logger", ".", "info", "(", "\"Delete item '{0}' pressed.\"", ".", "format", "(", "menu_item_text", ")", ")", "model", ",", "path", "=", "self", ".", "view", ".", "get_selection", "(", ")", ".", "get_selected", "(", ")", "if", "path", ":", "# Second confirmation to delete library", "tree_m_row", "=", "self", ".", "tree_store", "[", "path", "]", "library_os_path", ",", "library_path", ",", "library_name", ",", "item_key", "=", "self", ".", "extract_library_properties_from_selected_row", "(", ")", "# assert isinstance(tree_m_row[self.ITEM_STORAGE_ID], str)", "library_file_system_path", "=", "library_os_path", "if", "\"root\"", "in", "menu_item_text", ":", "button_texts", "=", "[", "menu_item_text", "+", "\"from tree and config\"", ",", "\"Cancel\"", "]", "partial_message", "=", "\"This will remove the library root from your configuration (config.yaml).\"", "else", ":", "button_texts", "=", "[", "menu_item_text", ",", "\"Cancel\"", "]", "partial_message", "=", "\"This folder will be removed from hard drive! You really wanna do that?\"", "message_string", "=", "\"You choose to {2} with \"", "\"\\n\\nlibrary tree path: {0}\"", "\"\\n\\nphysical path: {1}.\\n\\n\\n\"", "\"{3}\"", "\"\"", ".", "format", "(", "os", ".", "path", ".", "join", "(", "self", ".", "convert_if_human_readable", "(", "tree_m_row", "[", "self", ".", "LIB_PATH_STORAGE_ID", "]", ")", ",", "item_key", ")", ",", "library_file_system_path", ",", "menu_item_text", ".", "lower", "(", ")", ",", "partial_message", ")", "width", "=", "8", "*", "len", "(", "\"physical path: \"", "+", "library_file_system_path", ")", "dialog", "=", "RAFCONButtonDialog", "(", "message_string", ",", "button_texts", ",", "message_type", "=", "Gtk", ".", "MessageType", ".", "QUESTION", ",", "parent", "=", "self", ".", "get_root_window", "(", ")", ",", "width", "=", "min", "(", "width", ",", "1400", ")", ")", "response_id", "=", "dialog", ".", "run", "(", ")", "dialog", ".", "destroy", "(", ")", "if", "response_id", "==", "1", ":", "if", "\"root\"", "in", "menu_item_text", ":", "logger", ".", "info", "(", "\"Remove library root key '{0}' from config.\"", ".", "format", "(", "item_key", ")", ")", "from", "rafcon", ".", "gui", ".", "singleton", "import", "global_config", "library_paths", "=", "global_config", ".", "get_config_value", "(", "'LIBRARY_PATHS'", ")", "del", "library_paths", "[", "tree_m_row", "[", "self", ".", "LIB_KEY_STORAGE_ID", "]", "]", "global_config", ".", "save_configuration", "(", ")", "self", ".", "model", ".", "library_manager", ".", "refresh_libraries", "(", ")", "elif", "\"libraries\"", "in", "menu_item_text", ":", "logger", ".", "debug", "(", "\"Remove of all libraries in {} is triggered.\"", ".", "format", "(", "library_os_path", ")", ")", "import", "shutil", "shutil", ".", "rmtree", "(", "library_os_path", ")", "self", ".", "model", ".", "library_manager", ".", "refresh_libraries", "(", ")", "else", ":", "logger", ".", "debug", "(", "\"Remove of Library {} is triggered.\"", ".", "format", "(", "library_os_path", ")", ")", "self", ".", "model", ".", "library_manager", ".", "remove_library_from_file_system", "(", "library_path", ",", "library_name", ")", "elif", "response_id", "in", "[", "2", ",", "-", "4", "]", ":", "pass", "else", ":", "logger", ".", "warning", "(", "\"Response id: {} is not considered\"", ".", "format", "(", "response_id", ")", ")", "return", "True", "return", "False" ]
use processors to collect build metrics .
def process_build_metrics ( context , build_processors ) : build_metrics = OrderedDict ( ) # reset all processors for p in build_processors : p . reset ( ) # collect metrics from all processors for p in build_processors : build_metrics . update ( p . build_metrics ) return build_metrics
663
https://github.com/finklabs/metrics/blob/fd9974af498831664b9ae8e8f3834e1ec2e8a699/metrics/metrics_utils.py#L153-L165
[ "def", "_update_dPrxy", "(", "self", ")", ":", "super", "(", "ExpCM_empirical_phi_divpressure", ",", "self", ")", ".", "_update_dPrxy", "(", ")", "if", "'omega2'", "in", "self", ".", "freeparams", ":", "with", "scipy", ".", "errstate", "(", "divide", "=", "'raise'", ",", "under", "=", "'raise'", ",", "over", "=", "'raise'", ",", "invalid", "=", "'ignore'", ")", ":", "scipy", ".", "copyto", "(", "self", ".", "dPrxy", "[", "'omega2'", "]", ",", "-", "self", ".", "ln_piAx_piAy_beta", "*", "self", ".", "Qxy", "*", "self", ".", "omega", "/", "(", "1", "-", "self", ".", "piAx_piAy_beta", ")", ",", "where", "=", "CODON_NONSYN", ")", "scipy", ".", "copyto", "(", "self", ".", "dPrxy", "[", "'omega2'", "]", ",", "self", ".", "Qxy", "*", "self", ".", "omega", ",", "where", "=", "scipy", ".", "logical_and", "(", "CODON_NONSYN", ",", "scipy", ".", "fabs", "(", "1", "-", "self", ".", "piAx_piAy_beta", ")", "<", "ALMOST_ZERO", ")", ")", "for", "r", "in", "range", "(", "self", ".", "nsites", ")", ":", "self", ".", "dPrxy", "[", "'omega2'", "]", "[", "r", "]", "*=", "self", ".", "deltar", "[", "r", "]", "_fill_diagonals", "(", "self", ".", "dPrxy", "[", "'omega2'", "]", ",", "self", ".", "_diag_indices", ")" ]
Print the summary
def summary ( processors , metrics , context ) : # display aggregated metric values on language level def display_header ( processors , before = '' , after = '' ) : """Display the header for the summary results.""" print ( before , end = ' ' ) for processor in processors : processor . display_header ( ) print ( after ) def display_separator ( processors , before = '' , after = '' ) : """Display the header for the summary results.""" print ( before , end = ' ' ) for processor in processors : processor . display_separator ( ) print ( after ) def display_metrics ( processors , before = '' , after = '' , metrics = [ ] ) : """Display the header for the summary results.""" print ( before , end = ' ' ) for processor in processors : processor . display_metrics ( metrics ) print ( after ) summary = { } for m in metrics : lang = metrics [ m ] [ 'language' ] has_key = lang in summary if not has_key : summary [ lang ] = { 'file_count' : 0 , 'language' : lang } summary [ lang ] [ 'file_count' ] += 1 for i in metrics [ m ] : if i not in [ 'sloc' , 'comments' , 'mccabe' ] : # include metrics to be used continue if not has_key : summary [ lang ] [ i ] = 0 summary [ lang ] [ i ] += metrics [ m ] [ i ] total = { 'language' : 'Total' } for m in summary : for i in summary [ m ] : if i == 'language' : continue if i not in total : total [ i ] = 0 total [ i ] += summary [ m ] [ i ] print ( 'Metrics Summary:' ) display_header ( processors , 'Files' , '' ) display_separator ( processors , '-' * 5 , '' ) for k in sorted ( summary . keys ( ) , key = str . lower ) : display_metrics ( processors , '%5d' % summary [ k ] [ 'file_count' ] , '' , summary [ k ] ) display_separator ( processors , '-' * 5 , '' ) display_metrics ( processors , '%5d' % total [ 'file_count' ] , '' , total )
664
https://github.com/finklabs/metrics/blob/fd9974af498831664b9ae8e8f3834e1ec2e8a699/metrics/metrics_utils.py#L168-L224
[ "def", "datetime", "(", "self", ",", "field", "=", "None", ",", "val", "=", "None", ")", ":", "if", "val", "is", "None", ":", "def", "source", "(", ")", ":", "tzinfo", "=", "get_default_timezone", "(", ")", "if", "settings", ".", "USE_TZ", "else", "None", "return", "datetime", ".", "fromtimestamp", "(", "randrange", "(", "1", ",", "2100000000", ")", ",", "tzinfo", ")", "else", ":", "def", "source", "(", ")", ":", "tzinfo", "=", "get_default_timezone", "(", ")", "if", "settings", ".", "USE_TZ", "else", "None", "return", "datetime", ".", "fromtimestamp", "(", "int", "(", "val", ".", "strftime", "(", "\"%s\"", ")", ")", "+", "randrange", "(", "-", "365", "*", "24", "*", "3600", "*", "2", ",", "365", "*", "24", "*", "3600", "*", "2", ")", ",", "tzinfo", ")", "return", "self", ".", "get_allowed_value", "(", "source", ",", "field", ")" ]
Returns portfolios with U12 and U20 generators removed and generators of the same type at the same bus aggregated .
def get_portfolios3 ( ) : g1 = [ 0 ] g2 = [ 1 ] g7 = [ 2 ] g13 = [ 3 ] g14 = [ 4 ] # sync cond g15 = [ 5 ] g16 = [ 6 ] g18 = [ 7 ] g21 = [ 8 ] g22 = [ 9 ] g23 = [ 10 , 11 ] portfolios = [ g1 + g15 + g18 , g2 + g16 + g21 , g13 + g22 , g7 + g23 ] passive = g14 # sync_cond return portfolios , passive
665
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/ex6_1.py#L95-L118
[ "def", "duration", "(", "self", ")", ":", "if", "self", ".", "completion_ts", ":", "end", "=", "self", ".", "completed", "else", ":", "end", "=", "datetime", ".", "utcnow", "(", ")", "return", "end", "-", "self", ".", "started" ]
Convenience method for calling methods with walker .
def call ( self , tag_name : str , * args , * * kwargs ) : if hasattr ( self , tag_name ) : getattr ( self , tag_name ) ( * args , * * kwargs )
666
https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/backends/xml/parser.py#L85-L88
[ "def", "view_portfolio_loss", "(", "token", ",", "dstore", ")", ":", "data", "=", "portfolio_loss", "(", "dstore", ")", "# shape (R, L)", "loss_types", "=", "list", "(", "dstore", "[", "'oqparam'", "]", ".", "loss_dt", "(", ")", ".", "names", ")", "header", "=", "[", "'portfolio_loss'", "]", "+", "loss_types", "mean", "=", "[", "'mean'", "]", "+", "[", "row", ".", "mean", "(", ")", "for", "row", "in", "data", ".", "T", "]", "stddev", "=", "[", "'stddev'", "]", "+", "[", "row", ".", "std", "(", "ddof", "=", "1", ")", "for", "row", "in", "data", ".", "T", "]", "return", "rst_table", "(", "[", "mean", ",", "stddev", "]", ",", "header", ")" ]
Get the derivative of the variable create it if it doesn t exist .
def der ( self , x : Sym ) : name = 'der({:s})' . format ( x . name ( ) ) if name not in self . scope [ 'dvar' ] . keys ( ) : self . scope [ 'dvar' ] [ name ] = self . sym . sym ( name , * x . shape ) self . scope [ 'states' ] . append ( x . name ( ) ) return self . scope [ 'dvar' ] [ name ]
667
https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/backends/xml/parser.py#L94-L100
[ "def", "toGeoCoordinateString", "(", "self", ",", "sr", ",", "coordinates", ",", "conversionType", ",", "conversionMode", "=", "\"mgrsDefault\"", ",", "numOfDigits", "=", "None", ",", "rounding", "=", "True", ",", "addSpaces", "=", "True", ")", ":", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"sr\"", ":", "sr", ",", "\"coordinates\"", ":", "coordinates", ",", "\"conversionType\"", ":", "conversionType", "}", "url", "=", "self", ".", "_url", "+", "\"/toGeoCoordinateString\"", "if", "not", "conversionMode", "is", "None", ":", "params", "[", "'conversionMode'", "]", "=", "conversionMode", "if", "isinstance", "(", "numOfDigits", ",", "int", ")", ":", "params", "[", "'numOfDigits'", "]", "=", "numOfDigits", "if", "isinstance", "(", "rounding", ",", "int", ")", ":", "params", "[", "'rounding'", "]", "=", "rounding", "if", "isinstance", "(", "addSpaces", ",", "bool", ")", ":", "params", "[", "'addSpaces'", "]", "=", "addSpaces", "return", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ",", "securityHandler", "=", "self", ".", "_securityHandler", ")" ]
Create a gaussian noise variable
def noise_gaussian ( self , mean , std ) : assert std > 0 ng = self . sym . sym ( 'ng_{:d}' . format ( len ( self . scope [ 'ng' ] ) ) ) self . scope [ 'ng' ] . append ( ng ) return mean + std * ng
668
https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/backends/xml/parser.py#L126-L131
[ "def", "in_", "(", "this", ",", "that", ",", "axis", "=", "semantics", ".", "axis_default", ")", ":", "this", "=", "as_index", "(", "this", ",", "axis", "=", "axis", ",", "lex_as_struct", "=", "True", ",", "base", "=", "True", ")", "that", "=", "as_index", "(", "that", ",", "axis", "=", "axis", ",", "lex_as_struct", "=", "True", ")", "left", "=", "np", ".", "searchsorted", "(", "that", ".", "_keys", ",", "this", ".", "_keys", ",", "sorter", "=", "that", ".", "sorter", ",", "side", "=", "'left'", ")", "right", "=", "np", ".", "searchsorted", "(", "that", ".", "_keys", ",", "this", ".", "_keys", ",", "sorter", "=", "that", ".", "sorter", ",", "side", "=", "'right'", ")", "return", "left", "!=", "right" ]
Create a uniform noise variable
def noise_uniform ( self , lower_bound , upper_bound ) : assert upper_bound > lower_bound nu = self . sym . sym ( 'nu_{:d}' . format ( len ( self . scope [ 'nu' ] ) ) ) self . scope [ 'nu' ] . append ( nu ) return lower_bound + nu * ( upper_bound - lower_bound )
669
https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/backends/xml/parser.py#L133-L138
[ "def", "_findProteinClusters", "(", "protToPeps", ",", "pepToProts", ")", ":", "clusters", "=", "list", "(", ")", "resolvingProteins", "=", "set", "(", "protToPeps", ")", "while", "resolvingProteins", ":", "protein", "=", "resolvingProteins", ".", "pop", "(", ")", "proteinCluster", "=", "set", "(", "[", "protein", "]", ")", "peptides", "=", "set", "(", "protToPeps", "[", "protein", "]", ")", "parsedPeptides", "=", "set", "(", ")", "while", "len", "(", "peptides", ")", "!=", "len", "(", "parsedPeptides", ")", ":", "for", "peptide", "in", "peptides", ":", "proteinCluster", ".", "update", "(", "pepToProts", "[", "peptide", "]", ")", "parsedPeptides", ".", "update", "(", "peptides", ")", "for", "protein", "in", "proteinCluster", ":", "peptides", ".", "update", "(", "protToPeps", "[", "protein", "]", ")", "clusters", ".", "append", "(", "proteinCluster", ")", "resolvingProteins", "=", "resolvingProteins", ".", "difference", "(", "proteinCluster", ")", "return", "clusters" ]
Convenience function for printing indenting debug output .
def log ( self , * args , * * kwargs ) : if self . verbose : print ( ' ' * self . depth , * args , * * kwargs )
670
https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/backends/xml/parser.py#L158-L161
[ "def", "convert_row", "(", "self", ",", "row", ",", "schema", ",", "fallbacks", ")", ":", "for", "index", ",", "field", "in", "enumerate", "(", "schema", ".", "fields", ")", ":", "value", "=", "row", "[", "index", "]", "if", "index", "in", "fallbacks", ":", "value", "=", "_uncast_value", "(", "value", ",", "field", "=", "field", ")", "else", ":", "value", "=", "field", ".", "cast_value", "(", "value", ")", "row", "[", "index", "]", "=", "value", "return", "row" ]
Returns the 6 bus case from Wood & Wollenberg PG&C .
def get_case6ww ( ) : path = os . path . dirname ( pylon . __file__ ) path = os . path . join ( path , "test" , "data" ) path = os . path . join ( path , "case6ww" , "case6ww.pkl" ) case = pylon . Case . load ( path ) case . generators [ 0 ] . p_cost = ( 0.0 , 4.0 , 200.0 ) case . generators [ 1 ] . p_cost = ( 0.0 , 3.0 , 200.0 ) # case.generators[0].p_cost = (0.0, 5.1, 200.0) # 10% # case.generators[1].p_cost = (0.0, 4.5, 200.0) # 30% case . generators [ 2 ] . p_cost = ( 0.0 , 6.0 , 200.0 ) # passive # case.generators[0].c_shutdown = 100.0 # case.generators[1].c_shutdown = 100.0 # case.generators[2].c_shutdown = 100.0 case . generators [ 0 ] . p_min = 0.0 # TODO: Unit-decommitment. case . generators [ 1 ] . p_min = 0.0 case . generators [ 2 ] . p_min = 0.0 case . generators [ 0 ] . p_max = 110.0 case . generators [ 1 ] . p_max = 110.0 case . generators [ 2 ] . p_max = 220.0 # passive # FIXME: Correct generator naming order. for g in case . generators : g . name #pyreto.util.plotGenCost(case.generators) return case
671
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/common.py#L36-L70
[ "def", "reset", "(", "self", ")", ":", "self", ".", "minimum", "=", "None", "self", ".", "maximum", "=", "None", "self", ".", "start_time", "=", "None", "# datetime, absolute start time", "self", ".", "idx_current", "=", "None", "self", ".", "idx_markers", "=", "[", "]", "self", ".", "idx_annot", "=", "[", "]", "if", "self", ".", "scene", "is", "not", "None", ":", "self", ".", "scene", ".", "clear", "(", ")", "self", ".", "scene", "=", "None" ]
Returns the 24 bus IEEE Reliability Test System .
def get_case24_ieee_rts ( ) : path = os . path . dirname ( pylon . __file__ ) path = os . path . join ( path , "test" , "data" ) path = os . path . join ( path , "case24_ieee_rts" , "case24_ieee_rts.pkl" ) case = pylon . Case . load ( path ) # FIXME: Correct generator naming order. for g in case . generators : g . name return case
672
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/common.py#L107-L120
[ "def", "_export_with_html", "(", "self", ")", ":", "# pragma: no cover", "self", ".", "export_success", "=", "False", "try", ":", "tstamp", "=", "time", ".", "strftime", "(", "self", ".", "timestamp_format", ",", "self", ".", "_timestamp", ")", "substitutions", "=", "{", "}", "for", "(", "basename", ",", "ext", ")", ",", "entry", "in", "self", ".", "_files", ".", "items", "(", ")", ":", "(", "_", ",", "info", ")", "=", "entry", "html_key", "=", "self", ".", "_replacements", ".", "get", "(", "(", "basename", ",", "ext", ")", ",", "None", ")", "if", "html_key", "is", "None", ":", "continue", "filename", "=", "self", ".", "_format", "(", "basename", ",", "{", "'timestamp'", ":", "tstamp", ",", "'notebook'", ":", "self", ".", "notebook_name", "}", ")", "fpath", "=", "filename", "+", "(", "(", "'.%s'", "%", "ext", ")", "if", "ext", "else", "''", ")", "info", "=", "{", "'src'", ":", "fpath", ",", "'mime_type'", ":", "info", "[", "'mime_type'", "]", "}", "# No mime type", "if", "'mime_type'", "not", "in", "info", ":", "pass", "# Not displayable in an HTML tag", "elif", "info", "[", "'mime_type'", "]", "not", "in", "self", ".", "_tags", ":", "pass", "else", ":", "basename", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "fpath", ")", "truncated", "=", "self", ".", "_truncate_name", "(", "basename", ",", "ext", "[", "1", ":", "]", ")", "link_html", "=", "self", ".", "_format", "(", "self", ".", "_tags", "[", "info", "[", "'mime_type'", "]", "]", ",", "{", "'src'", ":", "truncated", ",", "'mime_type'", ":", "info", "[", "'mime_type'", "]", ",", "'css'", ":", "''", "}", ")", "substitutions", "[", "html_key", "]", "=", "(", "link_html", ",", "truncated", ")", "node", "=", "self", ".", "_get_notebook_node", "(", ")", "html", "=", "self", ".", "_generate_html", "(", "node", ",", "substitutions", ")", "export_filename", "=", "self", ".", "snapshot_name", "# Add the html snapshot", "super", "(", "NotebookArchive", ",", "self", ")", ".", "add", "(", "filename", "=", "export_filename", ",", "data", "=", "html", ",", "info", "=", "{", "'file-ext'", ":", "'html'", ",", "'mime_type'", ":", "'text/html'", ",", "'notebook'", ":", "self", ".", "notebook_name", "}", ")", "# Add cleared notebook", "cleared", "=", "self", ".", "_clear_notebook", "(", "node", ")", "super", "(", "NotebookArchive", ",", "self", ")", ".", "add", "(", "filename", "=", "export_filename", ",", "data", "=", "cleared", ",", "info", "=", "{", "'file-ext'", ":", "'ipynb'", ",", "'mime_type'", ":", "'text/json'", ",", "'notebook'", ":", "self", ".", "notebook_name", "}", ")", "# If store cleared_notebook... save here", "super", "(", "NotebookArchive", ",", "self", ")", ".", "export", "(", "timestamp", "=", "self", ".", "_timestamp", ",", "info", "=", "{", "'notebook'", ":", "self", ".", "notebook_name", "}", ")", "except", ":", "self", ".", "traceback", "=", "traceback", ".", "format_exc", "(", ")", "else", ":", "self", ".", "export_success", "=", "True" ]
Returns a tuple of task and agent for the given learner .
def get_discrete_task_agent ( generators , market , nStates , nOffer , markups , withholds , maxSteps , learner , Pd0 = None , Pd_min = 0.0 ) : env = pyreto . discrete . MarketEnvironment ( generators , market , numStates = nStates , numOffbids = nOffer , markups = markups , withholds = withholds , Pd0 = Pd0 , Pd_min = Pd_min ) task = pyreto . discrete . ProfitTask ( env , maxSteps = maxSteps ) nActions = len ( env . _allActions ) module = ActionValueTable ( numStates = nStates , numActions = nActions ) agent = LearningAgent ( module , learner ) return task , agent
673
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/common.py#L166-L184
[ "def", "random_rotation", "(", ")", ":", "rand_seed", "=", "np", ".", "random", ".", "rand", "(", "3", ",", "3", ")", "U", ",", "S", ",", "V", "=", "np", ".", "linalg", ".", "svd", "(", "rand_seed", ")", "return", "U" ]
Returns a task - agent tuple whose action is always zero .
def get_zero_task_agent ( generators , market , nOffer , maxSteps ) : env = pyreto . discrete . MarketEnvironment ( generators , market , nOffer ) task = pyreto . discrete . ProfitTask ( env , maxSteps = maxSteps ) agent = pyreto . util . ZeroAgent ( env . outdim , env . indim ) return task , agent
674
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/common.py#L211-L217
[ "def", "imsave", "(", "filename", ",", "data", ",", "maxval", "=", "None", ",", "pam", "=", "False", ")", ":", "try", ":", "netpbm", "=", "NetpbmFile", "(", "data", ",", "maxval", "=", "maxval", ")", "netpbm", ".", "write", "(", "filename", ",", "pam", "=", "pam", ")", "finally", ":", "netpbm", ".", "close", "(", ")" ]
Returns a task - agent tuple whose action is always minus one .
def get_neg_one_task_agent ( generators , market , nOffer , maxSteps ) : env = pyreto . discrete . MarketEnvironment ( generators , market , nOffer ) task = pyreto . discrete . ProfitTask ( env , maxSteps = maxSteps ) agent = pyreto . util . NegOneAgent ( env . outdim , env . indim ) return task , agent
675
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/common.py#L220-L226
[ "def", "imsave", "(", "filename", ",", "data", ",", "maxval", "=", "None", ",", "pam", "=", "False", ")", ":", "try", ":", "netpbm", "=", "NetpbmFile", "(", "data", ",", "maxval", "=", "maxval", ")", "netpbm", ".", "write", "(", "filename", ",", "pam", "=", "pam", ")", "finally", ":", "netpbm", ".", "close", "(", ")" ]
Runs the given experiment and returns the results .
def run_experiment ( experiment , roleouts , episodes , in_cloud = False , dynProfile = None ) : def run ( ) : if dynProfile is None : maxsteps = len ( experiment . profile ) # episode length else : maxsteps = dynProfile . shape [ 1 ] na = len ( experiment . agents ) ni = roleouts * episodes * maxsteps all_action = zeros ( ( na , 0 ) ) all_reward = zeros ( ( na , 0 ) ) epsilon = zeros ( ( na , ni ) ) # exploration rate # Converts to action vector in percentage markup values. vmarkup = vectorize ( get_markup ) for roleout in range ( roleouts ) : if dynProfile is not None : # Apply new load profile before each roleout (week). i = roleout * episodes # index of first profile value experiment . profile = dynProfile [ i : i + episodes , : ] # print "PROFILE:", experiment.profile, episodes experiment . doEpisodes ( episodes ) # number of samples per learning step nei = episodes * maxsteps # num interactions per role epi_action = zeros ( ( 0 , nei ) ) epi_reward = zeros ( ( 0 , nei ) ) for i , ( task , agent ) in enumerate ( zip ( experiment . tasks , experiment . agents ) ) : action = copy ( agent . history [ "action" ] ) reward = copy ( agent . history [ "reward" ] ) for j in range ( nei ) : if isinstance ( agent . learner , DirectSearchLearner ) : action [ j , : ] = task . denormalize ( action [ j , : ] ) k = nei * roleout epsilon [ i , k : k + nei ] = agent . learner . explorer . sigma [ 0 ] elif isinstance ( agent . learner , ValueBasedLearner ) : action [ j , : ] = vmarkup ( action [ j , : ] , task ) k = nei * roleout epsilon [ i , k : k + nei ] = agent . learner . explorer . epsilon else : action = vmarkup ( action , task ) # FIXME: Only stores action[0] for all interactions. epi_action = c_ [ epi_action . T , action [ : , 0 ] . flatten ( ) ] . T epi_reward = c_ [ epi_reward . T , reward . flatten ( ) ] . T if hasattr ( agent , "module" ) : print "PARAMS:" , agent . module . params agent . learn ( ) agent . reset ( ) all_action = c_ [ all_action , epi_action ] all_reward = c_ [ all_reward , epi_reward ] return all_action , all_reward , epsilon if in_cloud : import cloud job_id = cloud . call ( run , _high_cpu = False ) result = cloud . result ( job_id ) all_action , all_reward , epsilon = result else : all_action , all_reward , epsilon = run ( ) return all_action , all_reward , epsilon
676
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/common.py#L241-L314
[ "def", "scale_rows", "(", "A", ",", "v", ",", "copy", "=", "True", ")", ":", "v", "=", "np", ".", "ravel", "(", "v", ")", "M", ",", "N", "=", "A", ".", "shape", "if", "not", "isspmatrix", "(", "A", ")", ":", "raise", "ValueError", "(", "'scale rows needs a sparse matrix'", ")", "if", "M", "!=", "len", "(", "v", ")", ":", "raise", "ValueError", "(", "'scale vector has incompatible shape'", ")", "if", "copy", ":", "A", "=", "A", ".", "copy", "(", ")", "A", ".", "data", "=", "np", ".", "asarray", "(", "A", ".", "data", ",", "dtype", "=", "upcast", "(", "A", ".", "dtype", ",", "v", ".", "dtype", ")", ")", "else", ":", "v", "=", "np", ".", "asarray", "(", "v", ",", "dtype", "=", "A", ".", "dtype", ")", "if", "isspmatrix_csr", "(", "A", ")", ":", "csr_scale_rows", "(", "M", ",", "N", ",", "A", ".", "indptr", ",", "A", ".", "indices", ",", "A", ".", "data", ",", "v", ")", "elif", "isspmatrix_bsr", "(", "A", ")", ":", "R", ",", "C", "=", "A", ".", "blocksize", "bsr_scale_rows", "(", "int", "(", "M", "/", "R", ")", ",", "int", "(", "N", "/", "C", ")", ",", "R", ",", "C", ",", "A", ".", "indptr", ",", "A", ".", "indices", ",", "np", ".", "ravel", "(", "A", ".", "data", ")", ",", "v", ")", "elif", "isspmatrix_csc", "(", "A", ")", ":", "pyamg", ".", "amg_core", ".", "csc_scale_rows", "(", "M", ",", "N", ",", "A", ".", "indptr", ",", "A", ".", "indices", ",", "A", ".", "data", ",", "v", ")", "else", ":", "fmt", "=", "A", ".", "format", "A", "=", "scale_rows", "(", "csr_matrix", "(", "A", ")", ",", "v", ")", ".", "asformat", "(", "fmt", ")", "return", "A" ]
Returns percentages of peak load for all hours of the year .
def get_full_year ( ) : weekly = get_weekly ( ) daily = get_daily ( ) hourly_winter_wkdy , hourly_winter_wknd = get_winter_hourly ( ) hourly_summer_wkdy , hourly_summer_wknd = get_summer_hourly ( ) hourly_spring_autumn_wkdy , hourly_spring_autumn_wknd = get_spring_autumn_hourly ( ) fullyear = zeros ( 364 * 24 ) c = 0 l = [ ( 0 , 7 , hourly_winter_wkdy , hourly_winter_wknd ) , ( 8 , 16 , hourly_spring_autumn_wkdy , hourly_spring_autumn_wknd ) , ( 17 , 29 , hourly_summer_wkdy , hourly_summer_wknd ) , ( 30 , 42 , hourly_spring_autumn_wkdy , hourly_spring_autumn_wknd ) , ( 43 , 51 , hourly_winter_wkdy , hourly_winter_wknd ) ] for start , end , wkdy , wknd in l : for w in weekly [ start : end + 1 ] : for d in daily [ : 5 ] : for h in wkdy : fullyear [ c ] = w * ( d / 100.0 ) * ( h / 100.0 ) c += 1 for d in daily [ 5 : ] : for h in wknd : fullyear [ c ] = w * ( d / 100.0 ) * ( h / 100.0 ) c += 1 return fullyear
677
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/common.py#L426-L457
[ "def", "unbind", "(", "self", ",", "devices_to_unbind", ")", ":", "if", "self", ".", "entity_api_key", "==", "\"\"", ":", "return", "{", "'status'", ":", "'failure'", ",", "'response'", ":", "'No API key found in request'", "}", "url", "=", "self", ".", "base_url", "+", "\"api/0.1.0/subscribe/unbind\"", "headers", "=", "{", "\"apikey\"", ":", "self", ".", "entity_api_key", "}", "data", "=", "{", "\"exchange\"", ":", "\"amq.topic\"", ",", "\"keys\"", ":", "devices_to_unbind", ",", "\"queue\"", ":", "self", ".", "entity_id", "}", "with", "self", ".", "no_ssl_verification", "(", ")", ":", "r", "=", "requests", ".", "delete", "(", "url", ",", "json", "=", "data", ",", "headers", "=", "headers", ")", "print", "(", "r", ")", "response", "=", "dict", "(", ")", "if", "\"No API key\"", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "json", ".", "loads", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "'message'", "]", "elif", "'unbind'", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"success\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "response", "[", "\"response\"", "]", "=", "str", "(", "r", ")", "return", "response" ]
Returns percentages of peak load for all days of the year . Data from the IEEE RTS .
def get_all_days ( ) : weekly = get_weekly ( ) daily = get_daily ( ) return [ w * ( d / 100.0 ) for w in weekly for d in daily ]
678
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/common.py#L460-L467
[ "def", "_begin", "(", "self", ",", "retry_id", "=", "None", ")", ":", "if", "self", ".", "in_progress", ":", "msg", "=", "_CANT_BEGIN", ".", "format", "(", "self", ".", "_id", ")", "raise", "ValueError", "(", "msg", ")", "transaction_response", "=", "self", ".", "_client", ".", "_firestore_api", ".", "begin_transaction", "(", "self", ".", "_client", ".", "_database_string", ",", "options_", "=", "self", ".", "_options_protobuf", "(", "retry_id", ")", ",", "metadata", "=", "self", ".", "_client", ".", "_rpc_metadata", ",", ")", "self", ".", "_id", "=", "transaction_response", ".", "transaction" ]
Returns an experiment that uses Q - learning .
def get_q_experiment ( case , minor = 1 ) : gen = case . generators profile = array ( [ 1.0 ] ) maxSteps = len ( profile ) if minor == 1 : alpha = 0.3 # Learning rate. gamma = 0.99 # Discount factor # The closer epsilon gets to 0, the more greedy and less explorative. epsilon = 0.9 decay = 0.97 tau = 150.0 # Boltzmann temperature. qlambda = 0.9 elif minor == 2 : alpha = 0.1 # Learning rate. gamma = 0.99 # Discount factor # The closer epsilon gets to 0, the more greedy and less explorative. epsilon = 0.9 decay = 0.99 tau = 150.0 # Boltzmann temperature. qlambda = 0.9 else : raise ValueError market = pyreto . SmartMarket ( case , priceCap = cap , decommit = decommit , auctionType = auctionType ) experiment = pyreto . continuous . MarketExperiment ( [ ] , [ ] , market , profile ) for g in gen [ 0 : 2 ] : learner = Q ( alpha , gamma ) # learner = QLambda(alpha, gamma, qlambda) # learner = SARSA(alpha, gamma) learner . explorer . epsilon = epsilon learner . explorer . decay = decay # learner.explorer = BoltzmannExplorer(tau, decay) task , agent = get_discrete_task_agent ( [ g ] , market , nStates , nOffer , markups , withholds , maxSteps , learner ) experiment . tasks . append ( task ) experiment . agents . append ( agent ) # Passive agent. task , agent = get_zero_task_agent ( gen [ 2 : 3 ] , market , nOffer , maxSteps ) experiment . tasks . append ( task ) experiment . agents . append ( agent ) return experiment
679
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/ex5_1.py#L79-L133
[ "def", "subscribe_commit", "(", "self", ",", "repo_name", ",", "branch", ",", "from_commit_id", "=", "None", ")", ":", "repo", "=", "proto", ".", "Repo", "(", "name", "=", "repo_name", ")", "req", "=", "proto", ".", "SubscribeCommitRequest", "(", "repo", "=", "repo", ",", "branch", "=", "branch", ")", "if", "from_commit_id", "is", "not", "None", ":", "getattr", "(", "req", ",", "'from'", ")", ".", "CopyFrom", "(", "proto", ".", "Commit", "(", "repo", "=", "repo", ",", "id", "=", "from_commit_id", ")", ")", "res", "=", "self", ".", "stub", ".", "SubscribeCommit", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "return", "res" ]
Is the machine at it s limit of reactive power?
def q_limited ( self ) : if ( self . q >= self . q_max ) or ( self . q <= self . q_min ) : return True else : return False
680
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/generator.py#L156-L162
[ "def", "_read", "(", "self", ",", "directory", ",", "filename", ",", "session", ",", "path", ",", "name", ",", "extension", ",", "spatial", ",", "spatialReferenceID", ",", "replaceParamFile", ",", "force_relative", "=", "True", ")", ":", "self", ".", "project_directory", "=", "directory", "with", "tmp_chdir", "(", "directory", ")", ":", "# Headers to ignore", "HEADERS", "=", "(", "'GSSHAPROJECT'", ",", ")", "# WMS Cards to include (don't discount as comments)", "WMS_CARDS", "=", "(", "'#INDEXGRID_GUID'", ",", "'#PROJECTION_FILE'", ",", "'#LandSoil'", ",", "'#CHANNEL_POINT_INPUT_WMS'", ")", "GSSHAPY_CARDS", "=", "(", "'#GSSHAPY_EVENT_YML'", ",", ")", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "if", "not", "line", ".", "strip", "(", ")", ":", "# Skip empty lines", "continue", "elif", "'#'", "in", "line", ".", "split", "(", ")", "[", "0", "]", "and", "line", ".", "split", "(", ")", "[", "0", "]", "not", "in", "WMS_CARDS", "+", "GSSHAPY_CARDS", ":", "# Skip comments designated by the hash symbol", "# (with the exception of WMS_CARDS and GSSHAPY_CARDS)", "continue", "try", ":", "card", "=", "self", ".", "_extractCard", "(", "line", ",", "force_relative", ")", "except", ":", "card", "=", "self", ".", "_extractDirectoryCard", "(", "line", ",", "force_relative", ")", "# Now that the cardName and cardValue are separated", "# load them into the gsshapy objects", "if", "card", "[", "'name'", "]", "not", "in", "HEADERS", ":", "# Create GSSHAPY Project Card object", "prjCard", "=", "ProjectCard", "(", "name", "=", "card", "[", "'name'", "]", ",", "value", "=", "card", "[", "'value'", "]", ")", "# Associate ProjectCard with ProjectFile", "prjCard", ".", "projectFile", "=", "self", "# Extract MAP_TYPE card value for convenience working", "# with output maps", "if", "card", "[", "'name'", "]", "==", "'MAP_TYPE'", ":", "self", ".", "mapType", "=", "int", "(", "card", "[", "'value'", "]", ")", "# Assign properties", "self", ".", "srid", "=", "spatialReferenceID", "self", ".", "name", "=", "name", "self", ".", "fileExtension", "=", "extension" ]
Computes total cost for the generator at the given output level .
def total_cost ( self , p = None , p_cost = None , pcost_model = None ) : p = self . p if p is None else p p_cost = self . p_cost if p_cost is None else p_cost pcost_model = self . pcost_model if pcost_model is None else pcost_model p = 0.0 if not self . online else p if pcost_model == PW_LINEAR : n_segments = len ( p_cost ) - 1 # Iterate over the piece-wise linear segments. for i in range ( n_segments ) : x1 , y1 = p_cost [ i ] x2 , y2 = p_cost [ i + 1 ] m = ( y2 - y1 ) / ( x2 - x1 ) c = y1 - m * x1 if x1 <= p <= x2 : result = m * p + c break else : # print "TOTC:", self.name, p, self.p_max, p_cost # raise ValueError, "Value [%f] outwith pwl cost curve." % p # Use the last segment for values outwith the cost curve. logger . error ( "Value [%f] outside pwl cost curve [%s]." % ( p , p_cost [ - 1 ] [ 0 ] ) ) result = m * p + c elif pcost_model == POLYNOMIAL : # result = p_cost[-1] # for i in range(1, len(p_cost)): # result += p_cost[-(i + 1)] * p**i result = polyval ( p_cost , p ) else : raise ValueError if self . is_load : return - result else : return result
681
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/generator.py#L180-L220
[ "def", "_encode_wiki_sections", "(", "sections", ",", "vocab", ")", ":", "ids", "=", "[", "]", "section_boundaries", "=", "[", "]", "for", "i", ",", "section", "in", "enumerate", "(", "sections", ")", ":", "if", "i", ">", "0", ":", "# Skip including article title", "ids", ".", "extend", "(", "vocab", ".", "encode", "(", "_format_title", "(", "_normalize_text", "(", "section", ".", "title", ")", ")", ")", ")", "ids", ".", "extend", "(", "vocab", ".", "encode", "(", "_normalize_text", "(", "section", ".", "text", ")", ")", ")", "section_boundaries", ".", "append", "(", "len", "(", "ids", ")", ")", "return", "ids", ",", "section_boundaries" ]
Sets the piece - wise linear cost attribute converting the polynomial cost variable by evaluating at zero and then at n_points evenly spaced points between p_min and p_max .
def poly_to_pwl ( self , n_points = 4 ) : assert self . pcost_model == POLYNOMIAL p_min = self . p_min p_max = self . p_max p_cost = [ ] if p_min > 0.0 : # Make the first segment go from the origin to p_min. step = ( p_max - p_min ) / ( n_points - 2 ) y0 = self . total_cost ( 0.0 ) p_cost . append ( ( 0.0 , y0 ) ) x = p_min n_points -= 1 else : step = ( p_max - p_min ) / ( n_points - 1 ) x = 0.0 for _ in range ( n_points ) : y = self . total_cost ( x ) p_cost . append ( ( x , y ) ) x += step # Change the cost model and set the new cost. self . pcost_model = PW_LINEAR self . p_cost = p_cost
682
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/generator.py#L278-L308
[ "def", "get_translator", "(", "domain", ",", "directory", ",", "languages", "=", "None", ",", "translatorklass", "=", "Translator", ",", "fallback", "=", "False", ",", "fallbackklass", "=", "NullTranslator", ")", ":", "translator", "=", "gettext", ".", "translation", "(", "domain", ",", "localedir", "=", "directory", ",", "languages", "=", "languages", ",", "class_", "=", "translatorklass", ",", "fallback", "=", "fallback", ")", "if", "not", "isinstance", "(", "translator", ",", "gettext", ".", "GNUTranslations", ")", "and", "fallbackklass", ":", "translator", "=", "fallbackklass", "(", ")", "return", "translator" ]
Returns quantity and price offers created from the cost function .
def get_offers ( self , n_points = 6 ) : from pyreto . smart_market import Offer qtyprc = self . _get_qtyprc ( n_points ) return [ Offer ( self , qty , prc ) for qty , prc in qtyprc ]
683
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/generator.py#L311-L317
[ "def", "embedManifestDllCheck", "(", "target", ",", "source", ",", "env", ")", ":", "if", "env", ".", "get", "(", "'WINDOWS_EMBED_MANIFEST'", ",", "0", ")", ":", "manifestSrc", "=", "target", "[", "0", "]", ".", "get_abspath", "(", ")", "+", "'.manifest'", "if", "os", ".", "path", ".", "exists", "(", "manifestSrc", ")", ":", "ret", "=", "(", "embedManifestDllAction", ")", "(", "[", "target", "[", "0", "]", "]", ",", "None", ",", "env", ")", "if", "ret", ":", "raise", "SCons", ".", "Errors", ".", "UserError", "(", "\"Unable to embed manifest into %s\"", "%", "(", "target", "[", "0", "]", ")", ")", "return", "ret", "else", ":", "print", "(", "'(embed: no %s.manifest found; not embedding.)'", "%", "str", "(", "target", "[", "0", "]", ")", ")", "return", "0" ]
Returns quantity and price bids created from the cost function .
def get_bids ( self , n_points = 6 ) : from pyreto . smart_market import Bid qtyprc = self . _get_qtyprc ( n_points ) return [ Bid ( self , qty , prc ) for qty , prc in qtyprc ]
684
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/generator.py#L320-L326
[ "def", "parse_from_file", "(", "filename", ",", "nodata", "=", "False", ")", ":", "header", "=", "None", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "file", ":", "header", "=", "read_machine_header", "(", "file", ")", "meta_raw", "=", "file", ".", "read", "(", "header", "[", "'meta_len'", "]", ")", "meta", "=", "__parse_meta", "(", "meta_raw", ",", "header", ")", "data", "=", "b''", "if", "not", "nodata", ":", "data", "=", "__decompress", "(", "meta", ",", "file", ".", "read", "(", "header", "[", "'data_len'", "]", ")", ")", "return", "header", ",", "meta", ",", "data" ]
Updates the piece - wise linear total cost function using the given offer blocks .
def offers_to_pwl ( self , offers ) : assert not self . is_load # Only apply offers associated with this generator. g_offers = [ offer for offer in offers if offer . generator == self ] # Fliter out zero quantity offers. gt_zero = [ offr for offr in g_offers if round ( offr . quantity , 4 ) > 0.0 ] # Ignore withheld offers. valid = [ offer for offer in gt_zero if not offer . withheld ] p_offers = [ v for v in valid if not v . reactive ] q_offers = [ v for v in valid if v . reactive ] if p_offers : self . p_cost = self . _offbids_to_points ( p_offers ) self . pcost_model = PW_LINEAR self . online = True else : self . p_cost = [ ( 0.0 , 0.0 ) , ( self . p_max , 0.0 ) ] self . pcost_model = PW_LINEAR if q_offers : # Dispatch at zero real power without shutting down # if capacity offered for reactive power. self . p_min = 0.0 self . p_max = 0.0 self . online = True else : self . online = False if q_offers : self . q_cost = self . _offbids_to_points ( q_offers ) self . qcost_model = PW_LINEAR else : self . q_cost = None #[(0.0, 0.0), (self.q_max, 0.0)] self . qcost_model = PW_LINEAR if not len ( p_offers ) and not len ( q_offers ) : logger . info ( "No valid offers for generator [%s], shutting down." % self . name ) self . online = False self . _adjust_limits ( )
685
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/generator.py#L420-L466
[ "def", "to_struct", "(", "self", ")", ":", "structobj", "=", "self", ".", "struct_type", "(", ")", "for", "k", "in", "structobj", ".", "attributes", "(", ")", ":", "self", ".", "log", ".", "info", "(", "\"Setting attribute %s to %r\"", "%", "(", "k", ",", "getattr", "(", "self", ",", "k", ")", ")", ")", "setattr", "(", "structobj", ",", "k", ",", "getattr", "(", "self", ",", "k", ")", ")", "return", "structobj" ]
Updates the piece - wise linear total cost function using the given bid blocks .
def bids_to_pwl ( self , bids ) : assert self . is_load # Apply only those bids associated with this dispatchable load. vl_bids = [ bid for bid in bids if bid . vLoad == self ] # Filter out zero quantity bids. gt_zero = [ bid for bid in vl_bids if round ( bid . quantity , 4 ) > 0.0 ] # Ignore withheld offers. valid_bids = [ bid for bid in gt_zero if not bid . withheld ] p_bids = [ v for v in valid_bids if not v . reactive ] q_bids = [ v for v in valid_bids if v . reactive ] if p_bids : self . p_cost = self . _offbids_to_points ( p_bids , True ) self . pcost_model = PW_LINEAR self . online = True else : self . p_cost = [ ( 0.0 , 0.0 ) , ( self . p_max , 0.0 ) ] self . pcost_model = PW_LINEAR logger . info ( "No valid active power bids for dispatchable load " "[%s], shutting down." % self . name ) self . online = False if q_bids : self . q_cost = self . _offbids_to_points ( q_bids , True ) self . qcost_model = PW_LINEAR self . online = True else : self . q_cost = [ ( self . q_min , 0.0 ) , ( 0.0 , 0.0 ) , ( self . q_max , 0.0 ) ] self . qcost_model = PW_LINEAR # logger.info("No valid bids for dispatchable load, shutting down.") # self.online = False self . _adjust_limits ( )
686
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/generator.py#L469-L508
[ "def", "update_repodata", "(", "self", ",", "channels", "=", "None", ")", ":", "norm_channels", "=", "self", ".", "conda_get_condarc_channels", "(", "channels", "=", "channels", ",", "normalize", "=", "True", ")", "repodata_urls", "=", "self", ".", "_set_repo_urls_from_channels", "(", "norm_channels", ")", "self", ".", "_check_repos", "(", "repodata_urls", ")" ]
Sets the active power limits p_max and p_min according to the pwl cost function points .
def _adjust_limits ( self ) : if not self . is_load : # self.p_min = min([point[0] for point in self.p_cost]) self . p_max = max ( [ point [ 0 ] for point in self . p_cost ] ) else : p_min = min ( [ point [ 0 ] for point in self . p_cost ] ) self . p_max = 0.0 self . q_min = self . q_min * p_min / self . p_min self . q_max = self . q_max * p_min / self . p_min self . p_min = p_min
687
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/generator.py#L537-L549
[ "def", "AddEnumDescriptor", "(", "self", ",", "name", ",", "description", ",", "enum_values", ",", "enum_descriptions", ")", ":", "message", "=", "extended_descriptor", ".", "ExtendedEnumDescriptor", "(", ")", "message", ".", "name", "=", "self", ".", "__names", ".", "ClassName", "(", "name", ")", "message", ".", "description", "=", "util", ".", "CleanDescription", "(", "description", ")", "self", ".", "__DeclareDescriptor", "(", "message", ".", "name", ")", "for", "index", ",", "(", "enum_name", ",", "enum_description", ")", "in", "enumerate", "(", "zip", "(", "enum_values", ",", "enum_descriptions", ")", ")", ":", "enum_value", "=", "extended_descriptor", ".", "ExtendedEnumValueDescriptor", "(", ")", "enum_value", ".", "name", "=", "self", ".", "__names", ".", "NormalizeEnumName", "(", "enum_name", ")", "if", "enum_value", ".", "name", "!=", "enum_name", ":", "message", ".", "enum_mappings", ".", "append", "(", "extended_descriptor", ".", "ExtendedEnumDescriptor", ".", "JsonEnumMapping", "(", "python_name", "=", "enum_value", ".", "name", ",", "json_name", "=", "enum_name", ")", ")", "self", ".", "__AddImport", "(", "'from %s import encoding'", "%", "self", ".", "__base_files_package", ")", "enum_value", ".", "number", "=", "index", "enum_value", ".", "description", "=", "util", ".", "CleanDescription", "(", "enum_description", "or", "'<no description>'", ")", "message", ".", "values", ".", "append", "(", "enum_value", ")", "self", ".", "__RegisterDescriptor", "(", "message", ")" ]
The number of action values that the environment accepts .
def indim ( self ) : indim = self . numOffbids * len ( self . generators ) if self . maxWithhold is not None : return indim * 2 else : return indim
688
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pyreto/continuous/environment.py#L103-L111
[ "def", "install", "(", "self", ",", "ref", ",", "table_name", "=", "None", ",", "index_columns", "=", "None", ",", "logger", "=", "None", ")", ":", "try", ":", "obj_number", "=", "ObjectNumber", ".", "parse", "(", "ref", ")", "if", "isinstance", "(", "obj_number", ",", "TableNumber", ")", ":", "table", "=", "self", ".", "_library", ".", "table", "(", "ref", ")", "connection", "=", "self", ".", "_backend", ".", "_get_connection", "(", ")", "return", "self", ".", "_backend", ".", "install_table", "(", "connection", ",", "table", ",", "logger", "=", "logger", ")", "else", ":", "# assume partition", "raise", "NotObjectNumberError", "except", "NotObjectNumberError", ":", "# assume partition.", "partition", "=", "self", ".", "_library", ".", "partition", "(", "ref", ")", "connection", "=", "self", ".", "_backend", ".", "_get_connection", "(", ")", "return", "self", ".", "_backend", ".", "install", "(", "connection", ",", "partition", ",", "table_name", "=", "table_name", ",", "index_columns", "=", "index_columns", ",", "logger", "=", "logger", ")" ]
Returns an array of length nb where each value is the sum of the Lagrangian multipliers on the upper and the negative of the Lagrangian multipliers on the lower voltage limits .
def _getBusVoltageLambdaSensor ( self ) : muVmin = array ( [ b . mu_vmin for b in self . market . case . connected_buses ] ) muVmax = array ( [ b . mu_vmax for b in self . market . case . connected_buses ] ) muVmin = - 1.0 * muVmin diff = muVmin + muVmax return diff
689
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pyreto/continuous/environment.py#L177-L185
[ "def", "to_json", "(", "self", ")", ":", "result", "=", "super", "(", "FieldsResource", ",", "self", ")", ".", "to_json", "(", ")", "result", "[", "'fields'", "]", "=", "self", ".", "fields_with_locales", "(", ")", "return", "result" ]
Parse a Doxygen source file and return a dictionary of all the values . Values will be strings and lists of strings .
def DoxyfileParse ( file_contents ) : data = { } import shlex lex = shlex . shlex ( instream = file_contents , posix = True ) lex . wordchars += "*+./-:" lex . whitespace = lex . whitespace . replace ( "\n" , "" ) lex . escape = "" lineno = lex . lineno token = lex . get_token ( ) key = token # the first token should be a key last_token = "" key_token = False next_key = False new_data = True def append_data ( data , key , new_data , token ) : if new_data or len ( data [ key ] ) == 0 : data [ key ] . append ( token ) else : data [ key ] [ - 1 ] += token while token : if token in [ '\n' ] : if last_token not in [ '\\' ] : key_token = True elif token in [ '\\' ] : pass elif key_token : key = token key_token = False else : if token == "+=" : if not data . has_key ( key ) : data [ key ] = list ( ) elif token == "=" : if key == "TAGFILES" and data . has_key ( key ) : append_data ( data , key , False , "=" ) new_data = False else : data [ key ] = list ( ) else : append_data ( data , key , new_data , token ) new_data = True last_token = token token = lex . get_token ( ) if last_token == '\\' and token != '\n' : new_data = False append_data ( data , key , new_data , '\\' ) # compress lists of len 1 into single strings for ( k , v ) in data . items ( ) : if len ( v ) == 0 : data . pop ( k ) # items in the following list will be kept as lists and not converted to strings if k in [ "INPUT" , "FILE_PATTERNS" , "EXCLUDE_PATTERNS" , "TAGFILES" ] : continue if len ( v ) == 1 : data [ k ] = v [ 0 ] return data
690
https://github.com/melizalab/libtfr/blob/9f7e7705793d258a0b205f185b20e3bbcda473da/site_scons/site_tools/doxygen.py#L28-L97
[ "def", "pw_compare_class_sets", "(", "self", ",", "cset1", ":", "Set", "[", "ClassId", "]", ",", "cset2", ":", "Set", "[", "ClassId", "]", ")", "->", "Tuple", "[", "ICValue", ",", "ICValue", ",", "ICValue", "]", ":", "pairs", "=", "self", ".", "mica_ic_df", ".", "loc", "[", "cset1", ",", "cset2", "]", "max0", "=", "pairs", ".", "max", "(", "axis", "=", "0", ")", "max1", "=", "pairs", ".", "max", "(", "axis", "=", "1", ")", "idxmax0", "=", "pairs", ".", "idxmax", "(", "axis", "=", "0", ")", "idxmax1", "=", "pairs", ".", "idxmax", "(", "axis", "=", "1", ")", "mean0", "=", "max0", ".", "mean", "(", ")", "mean1", "=", "max1", ".", "mean", "(", ")", "return", "(", "mean0", "+", "mean1", ")", "/", "2", ",", "mean0", ",", "mean1" ]
Doxygen Doxyfile source scanner . This should scan the Doxygen file and add any files used to generate docs to the list of source files .
def DoxySourceScan ( node , env , path ) : default_file_patterns = [ '*.c' , '*.cc' , '*.cxx' , '*.cpp' , '*.c++' , '*.java' , '*.ii' , '*.ixx' , '*.ipp' , '*.i++' , '*.inl' , '*.h' , '*.hh ' , '*.hxx' , '*.hpp' , '*.h++' , '*.idl' , '*.odl' , '*.cs' , '*.php' , '*.php3' , '*.inc' , '*.m' , '*.mm' , '*.py' , ] default_exclude_patterns = [ '*~' , ] sources = [ ] data = DoxyfileParse ( node . get_contents ( ) ) if data . get ( "RECURSIVE" , "NO" ) == "YES" : recursive = True else : recursive = False file_patterns = data . get ( "FILE_PATTERNS" , default_file_patterns ) exclude_patterns = data . get ( "EXCLUDE_PATTERNS" , default_exclude_patterns ) # We're running in the top-level directory, but the doxygen # configuration file is in the same directory as node; this means # that relative pathnames in node must be adjusted before they can # go onto the sources list conf_dir = os . path . dirname ( str ( node ) ) for node in data . get ( "INPUT" , [ ] ) : if not os . path . isabs ( node ) : node = os . path . join ( conf_dir , node ) if os . path . isfile ( node ) : sources . append ( node ) elif os . path . isdir ( node ) : if recursive : for root , dirs , files in os . walk ( node ) : for f in files : filename = os . path . join ( root , f ) pattern_check = reduce ( lambda x , y : x or bool ( fnmatch ( filename , y ) ) , file_patterns , False ) exclude_check = reduce ( lambda x , y : x and fnmatch ( filename , y ) , exclude_patterns , True ) if pattern_check and not exclude_check : sources . append ( filename ) else : for pattern in file_patterns : sources . extend ( glob . glob ( "/" . join ( [ node , pattern ] ) ) ) # Add tagfiles to the list of source files: for node in data . get ( "TAGFILES" , [ ] ) : file = node . split ( "=" ) [ 0 ] if not os . path . isabs ( file ) : file = os . path . join ( conf_dir , file ) sources . append ( file ) # Add additional files to the list of source files: def append_additional_source ( option ) : file = data . get ( option , "" ) if file != "" : if not os . path . isabs ( file ) : file = os . path . join ( conf_dir , file ) if os . path . isfile ( file ) : sources . append ( file ) append_additional_source ( "HTML_STYLESHEET" ) append_additional_source ( "HTML_HEADER" ) append_additional_source ( "HTML_FOOTER" ) sources = map ( lambda path : env . File ( path ) , sources ) return sources
691
https://github.com/melizalab/libtfr/blob/9f7e7705793d258a0b205f185b20e3bbcda473da/site_scons/site_tools/doxygen.py#L99-L174
[ "def", "detach_volume", "(", "volume_id", ",", "profile", ",", "*", "*", "libcloud_kwargs", ")", ":", "conn", "=", "_get_driver", "(", "profile", "=", "profile", ")", "libcloud_kwargs", "=", "salt", ".", "utils", ".", "args", ".", "clean_kwargs", "(", "*", "*", "libcloud_kwargs", ")", "volume", "=", "_get_by_id", "(", "conn", ".", "list_volumes", "(", ")", ",", "volume_id", ")", "return", "conn", ".", "detach_volume", "(", "volume", ",", "*", "*", "libcloud_kwargs", ")" ]
Doxygen Doxyfile emitter
def DoxyEmitter ( source , target , env ) : # possible output formats and their default values and output locations output_formats = { "HTML" : ( "YES" , "html" ) , "LATEX" : ( "YES" , "latex" ) , "RTF" : ( "NO" , "rtf" ) , "MAN" : ( "YES" , "man" ) , "XML" : ( "NO" , "xml" ) , } data = DoxyfileParse ( source [ 0 ] . get_contents ( ) ) targets = [ ] out_dir = data . get ( "OUTPUT_DIRECTORY" , "." ) if not os . path . isabs ( out_dir ) : conf_dir = os . path . dirname ( str ( source [ 0 ] ) ) out_dir = os . path . join ( conf_dir , out_dir ) # add our output locations for ( k , v ) in output_formats . items ( ) : if data . get ( "GENERATE_" + k , v [ 0 ] ) == "YES" : targets . append ( env . Dir ( os . path . join ( out_dir , data . get ( k + "_OUTPUT" , v [ 1 ] ) ) ) ) # add the tag file if neccessary: tagfile = data . get ( "GENERATE_TAGFILE" , "" ) if tagfile != "" : if not os . path . isabs ( tagfile ) : conf_dir = os . path . dirname ( str ( source [ 0 ] ) ) tagfile = os . path . join ( conf_dir , tagfile ) targets . append ( env . File ( tagfile ) ) # don't clobber targets for node in targets : env . Precious ( node ) # set up cleaning stuff for node in targets : env . Clean ( node , node ) return ( targets , source )
692
https://github.com/melizalab/libtfr/blob/9f7e7705793d258a0b205f185b20e3bbcda473da/site_scons/site_tools/doxygen.py#L181-L221
[ "def", "indication", "(", "self", ",", "apdu", ")", ":", "if", "_debug", ":", "ServerSSM", ".", "_debug", "(", "\"indication %r\"", ",", "apdu", ")", "if", "self", ".", "state", "==", "IDLE", ":", "self", ".", "idle", "(", "apdu", ")", "elif", "self", ".", "state", "==", "SEGMENTED_REQUEST", ":", "self", ".", "segmented_request", "(", "apdu", ")", "elif", "self", ".", "state", "==", "AWAIT_RESPONSE", ":", "self", ".", "await_response", "(", "apdu", ")", "elif", "self", ".", "state", "==", "SEGMENTED_RESPONSE", ":", "self", ".", "segmented_response", "(", "apdu", ")", "else", ":", "if", "_debug", ":", "ServerSSM", ".", "_debug", "(", "\" - invalid state\"", ")" ]
Add builders and construction variables for the Doxygen tool . This is currently for Doxygen 1 . 4 . 6 .
def generate ( env ) : doxyfile_scanner = env . Scanner ( DoxySourceScan , "DoxySourceScan" , scan_check = DoxySourceScanCheck , ) import SCons . Builder doxyfile_builder = SCons . Builder . Builder ( action = "cd ${SOURCE.dir} && ${DOXYGEN} ${SOURCE.file}" , emitter = DoxyEmitter , target_factory = env . fs . Entry , single_source = True , source_scanner = doxyfile_scanner , ) env . Append ( BUILDERS = { 'Doxygen' : doxyfile_builder , } ) env . AppendUnique ( DOXYGEN = 'doxygen' , )
693
https://github.com/melizalab/libtfr/blob/9f7e7705793d258a0b205f185b20e3bbcda473da/site_scons/site_tools/doxygen.py#L223-L249
[ "def", "not_storable", "(", "_type", ")", ":", "return", "Storable", "(", "_type", ",", "handlers", "=", "StorableHandler", "(", "poke", "=", "fake_poke", ",", "peek", "=", "fail_peek", "(", "_type", ")", ")", ")" ]
Reset metric counter .
def reset ( self ) : self . _positions = [ ] self . _line = 1 self . _curr = None # current scope we are analyzing self . _scope = 0 self . language = None
694
https://github.com/finklabs/metrics/blob/fd9974af498831664b9ae8e8f3834e1ec2e8a699/metrics/position.py#L87-L93
[ "def", "union", "(", "self", ",", "*", "dstreams", ")", ":", "if", "not", "dstreams", ":", "raise", "ValueError", "(", "\"should have at least one DStream to union\"", ")", "if", "len", "(", "dstreams", ")", "==", "1", ":", "return", "dstreams", "[", "0", "]", "if", "len", "(", "set", "(", "s", ".", "_jrdd_deserializer", "for", "s", "in", "dstreams", ")", ")", ">", "1", ":", "raise", "ValueError", "(", "\"All DStreams should have same serializer\"", ")", "if", "len", "(", "set", "(", "s", ".", "_slideDuration", "for", "s", "in", "dstreams", ")", ")", ">", "1", ":", "raise", "ValueError", "(", "\"All DStreams should have same slide duration\"", ")", "cls", "=", "SparkContext", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "streaming", ".", "api", ".", "java", ".", "JavaDStream", "jdstreams", "=", "SparkContext", ".", "_gateway", ".", "new_array", "(", "cls", ",", "len", "(", "dstreams", ")", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "dstreams", ")", ")", ":", "jdstreams", "[", "i", "]", "=", "dstreams", "[", "i", "]", ".", "_jdstream", "return", "DStream", "(", "self", ".", "_jssc", ".", "union", "(", "jdstreams", ")", ",", "self", ",", "dstreams", "[", "0", "]", ".", "_jrdd_deserializer", ")" ]
we identified a scope and add it to positions .
def add_scope ( self , scope_type , scope_name , scope_start , is_method = False ) : if self . _curr is not None : self . _curr [ 'end' ] = scope_start - 1 # close last scope self . _curr = { 'type' : scope_type , 'name' : scope_name , 'start' : scope_start , 'end' : scope_start } if is_method and self . _positions : last = self . _positions [ - 1 ] if not 'methods' in last : last [ 'methods' ] = [ ] last [ 'methods' ] . append ( self . _curr ) else : self . _positions . append ( self . _curr )
695
https://github.com/finklabs/metrics/blob/fd9974af498831664b9ae8e8f3834e1ec2e8a699/metrics/position.py#L95-L110
[ "def", "delete_topic_groups", "(", "self", ",", "group_id", ",", "topic_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - group_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"group_id\"", "]", "=", "group_id", "# REQUIRED - PATH - topic_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"topic_id\"", "]", "=", "topic_id", "self", ".", "logger", ".", "debug", "(", "\"DELETE /api/v1/groups/{group_id}/discussion_topics/{topic_id} with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"DELETE\"", ",", "\"/api/v1/groups/{group_id}/discussion_topics/{topic_id}\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "no_data", "=", "True", ")" ]
count lines and track position of classes and functions
def process_token ( self , tok ) : if tok [ 0 ] == Token . Text : count = tok [ 1 ] . count ( '\n' ) if count : self . _line += count # adjust linecount if self . _detector . process ( tok ) : pass # works been completed in the detector elif tok [ 0 ] == Token . Punctuation : if tok [ 0 ] == Token . Punctuation and tok [ 1 ] == '{' : self . _scope += 1 if tok [ 0 ] == Token . Punctuation and tok [ 1 ] == '}' : self . _scope += - 1 if self . _scope == 0 and self . _curr is not None : self . _curr [ 'end' ] = self . _line # close last scope self . _curr = None elif tok [ 0 ] == Token . Name . Class and self . _scope == 0 : self . add_scope ( 'Class' , tok [ 1 ] , self . _line ) elif tok [ 0 ] == Token . Name . Function and self . _scope in [ 0 , 1 ] : self . add_scope ( 'Function' , tok [ 1 ] , self . _line , self . _scope == 1 )
696
https://github.com/finklabs/metrics/blob/fd9974af498831664b9ae8e8f3834e1ec2e8a699/metrics/position.py#L112-L132
[ "def", "process_schema", "(", "doc", ",", "resource", ",", "df", ")", ":", "from", "rowgenerators", "import", "SourceError", "from", "requests", ".", "exceptions", "import", "ConnectionError", "from", "metapack", ".", "cli", ".", "core", "import", "extract_path_name", ",", "alt_col_name", ",", "type_map", "from", "tableintuit", "import", "TypeIntuiter", "from", "rowgenerators", ".", "generator", ".", "python", "import", "PandasDataframeSource", "from", "appurl", "import", "parse_app_url", "try", ":", "doc", "[", "'Schema'", "]", "except", "KeyError", ":", "doc", ".", "new_section", "(", "'Schema'", ",", "[", "'DataType'", ",", "'Altname'", ",", "'Description'", "]", ")", "schema_name", "=", "resource", ".", "get_value", "(", "'schema'", ",", "resource", ".", "get_value", "(", "'name'", ")", ")", "schema_term", "=", "doc", ".", "find_first", "(", "term", "=", "'Table'", ",", "value", "=", "schema_name", ",", "section", "=", "'Schema'", ")", "if", "schema_term", ":", "logger", ".", "info", "(", "\"Found table for '{}'; skipping\"", ".", "format", "(", "schema_name", ")", ")", "return", "path", ",", "name", "=", "extract_path_name", "(", "resource", ".", "url", ")", "logger", ".", "info", "(", "\"Processing {}\"", ".", "format", "(", "resource", ".", "url", ")", ")", "si", "=", "PandasDataframeSource", "(", "parse_app_url", "(", "resource", ".", "url", ")", ",", "df", ",", "cache", "=", "doc", ".", "_cache", ",", ")", "try", ":", "ti", "=", "TypeIntuiter", "(", ")", ".", "run", "(", "si", ")", "except", "SourceError", "as", "e", ":", "logger", ".", "warn", "(", "\"Failed to process '{}'; {}\"", ".", "format", "(", "path", ",", "e", ")", ")", "return", "except", "ConnectionError", "as", "e", ":", "logger", ".", "warn", "(", "\"Failed to download '{}'; {}\"", ".", "format", "(", "path", ",", "e", ")", ")", "return", "table", "=", "doc", "[", "'Schema'", "]", ".", "new_term", "(", "'Table'", ",", "schema_name", ")", "logger", ".", "info", "(", "\"Adding table '{}' to metatab schema\"", ".", "format", "(", "schema_name", ")", ")", "for", "i", ",", "c", "in", "enumerate", "(", "ti", ".", "to_rows", "(", ")", ")", ":", "raw_alt_name", "=", "alt_col_name", "(", "c", "[", "'header'", "]", ",", "i", ")", "alt_name", "=", "raw_alt_name", "if", "raw_alt_name", "!=", "c", "[", "'header'", "]", "else", "''", "t", "=", "table", ".", "new_child", "(", "'Column'", ",", "c", "[", "'header'", "]", ",", "datatype", "=", "type_map", ".", "get", "(", "c", "[", "'resolved_type'", "]", ",", "c", "[", "'resolved_type'", "]", ")", ",", "altname", "=", "alt_name", ",", "description", "=", "df", "[", "c", "[", "'header'", "]", "]", ".", "description", "if", "hasattr", "(", "df", ",", "'description'", ")", "and", "df", "[", "c", "[", "'header'", "]", "]", ".", "description", "else", "''", ")", "return", "table" ]
Returns data from the OPF model .
def _unpack_model ( self , om ) : buses = om . case . connected_buses branches = om . case . online_branches gens = om . case . online_generators cp = om . get_cost_params ( ) # Bf = om._Bf # Pfinj = om._Pfinj return buses , branches , gens , cp
697
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/solver.py#L78-L90
[ "def", "append_to_multiple", "(", "self", ",", "d", ",", "value", ",", "selector", ",", "data_columns", "=", "None", ",", "axes", "=", "None", ",", "dropna", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "axes", "is", "not", "None", ":", "raise", "TypeError", "(", "\"axes is currently not accepted as a parameter to\"", "\" append_to_multiple; you can create the \"", "\"tables independently instead\"", ")", "if", "not", "isinstance", "(", "d", ",", "dict", ")", ":", "raise", "ValueError", "(", "\"append_to_multiple must have a dictionary specified as the \"", "\"way to split the value\"", ")", "if", "selector", "not", "in", "d", ":", "raise", "ValueError", "(", "\"append_to_multiple requires a selector that is in passed dict\"", ")", "# figure out the splitting axis (the non_index_axis)", "axis", "=", "list", "(", "set", "(", "range", "(", "value", ".", "ndim", ")", ")", "-", "set", "(", "_AXES_MAP", "[", "type", "(", "value", ")", "]", ")", ")", "[", "0", "]", "# figure out how to split the value", "remain_key", "=", "None", "remain_values", "=", "[", "]", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ":", "if", "v", "is", "None", ":", "if", "remain_key", "is", "not", "None", ":", "raise", "ValueError", "(", "\"append_to_multiple can only have one value in d that \"", "\"is None\"", ")", "remain_key", "=", "k", "else", ":", "remain_values", ".", "extend", "(", "v", ")", "if", "remain_key", "is", "not", "None", ":", "ordered", "=", "value", ".", "axes", "[", "axis", "]", "ordd", "=", "ordered", ".", "difference", "(", "Index", "(", "remain_values", ")", ")", "ordd", "=", "sorted", "(", "ordered", ".", "get_indexer", "(", "ordd", ")", ")", "d", "[", "remain_key", "]", "=", "ordered", ".", "take", "(", "ordd", ")", "# data_columns", "if", "data_columns", "is", "None", ":", "data_columns", "=", "d", "[", "selector", "]", "# ensure rows are synchronized across the tables", "if", "dropna", ":", "idxs", "=", "(", "value", "[", "cols", "]", ".", "dropna", "(", "how", "=", "'all'", ")", ".", "index", "for", "cols", "in", "d", ".", "values", "(", ")", ")", "valid_index", "=", "next", "(", "idxs", ")", "for", "index", "in", "idxs", ":", "valid_index", "=", "valid_index", ".", "intersection", "(", "index", ")", "value", "=", "value", ".", "loc", "[", "valid_index", "]", "# append", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ":", "dc", "=", "data_columns", "if", "k", "==", "selector", "else", "None", "# compute the val", "val", "=", "value", ".", "reindex", "(", "v", ",", "axis", "=", "axis", ")", "self", ".", "append", "(", "k", ",", "val", ",", "data_columns", "=", "dc", ",", "*", "*", "kwargs", ")" ]
Returns the problem dimensions .
def _dimension_data ( self , buses , branches , generators ) : ipol = [ i for i , g in enumerate ( generators ) if g . pcost_model == POLYNOMIAL ] ipwl = [ i for i , g in enumerate ( generators ) if g . pcost_model == PW_LINEAR ] nb = len ( buses ) nl = len ( branches ) # Number of general cost vars, w. nw = self . om . cost_N # Number of piece-wise linear costs. if "y" in [ v . name for v in self . om . vars ] : ny = self . om . get_var_N ( "y" ) else : ny = 0 # Total number of control variables of all types. nxyz = self . om . var_N return ipol , ipwl , nb , nl , nw , ny , nxyz
698
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/solver.py#L93-L112
[ "def", "ttl", "(", "self", ",", "value", ")", ":", "# get timer", "timer", "=", "getattr", "(", "self", ",", "Annotation", ".", "__TIMER", ",", "None", ")", "# if timer is running, stop the timer", "if", "timer", "is", "not", "None", ":", "timer", ".", "cancel", "(", ")", "# initialize timestamp", "timestamp", "=", "None", "# if value is None", "if", "value", "is", "None", ":", "# nonify timer", "timer", "=", "None", "else", ":", "# else, renew a timer", "# get timestamp", "timestamp", "=", "time", "(", ")", "+", "value", "# start a new timer", "timer", "=", "Timer", "(", "value", ",", "self", ".", "__del__", ")", "timer", ".", "start", "(", ")", "# set/update attributes", "setattr", "(", "self", ",", "Annotation", ".", "__TIMER", ",", "timer", ")", "setattr", "(", "self", ",", "Annotation", ".", "__TS", ",", "timestamp", ")" ]
Returns the linear problem constraints .
def _linear_constraints ( self , om ) : A , l , u = om . linear_constraints ( ) # l <= A*x <= u # Indexes for equality, greater than (unbounded above), less than # (unbounded below) and doubly-bounded box constraints. # ieq = flatnonzero( abs(u - l) <= EPS ) # igt = flatnonzero( (u >= 1e10) & (l > -1e10) ) # ilt = flatnonzero( (l <= -1e10) & (u < 1e10) ) # ibx = flatnonzero( (abs(u - l) > EPS) & (u < 1e10) & (l > -1e10) ) # Zero-sized sparse matrices not supported. Assume equality # constraints exist. ## AA = A[ieq, :] ## if len(ilt) > 0: ## AA = vstack([AA, A[ilt, :]], "csr") ## if len(igt) > 0: ## AA = vstack([AA, -A[igt, :]], "csr") ## if len(ibx) > 0: ## AA = vstack([AA, A[ibx, :], -A[ibx, :]], "csr") # # if len(ieq) or len(igt) or len(ilt) or len(ibx): # sig_idx = [(1, ieq), (1, ilt), (-1, igt), (1, ibx), (-1, ibx)] # AA = vstack([sig * A[idx, :] for sig, idx in sig_idx if len(idx)]) # else: # AA = None # # bb = r_[u[ieq, :], u[ilt], -l[igt], u[ibx], -l[ibx]] # # self._nieq = ieq.shape[0] # # return AA, bb return A , l , u
699
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/solver.py#L115-L149
[ "def", "join_time_series", "(", "serieses", ",", "ignore_year", "=", "False", ",", "T_s", "=", "None", ",", "aggregator", "=", "'mean'", ")", ":", "if", "ignore_year", ":", "df", "=", "pd", ".", "DataFrame", "(", ")", "for", "name", ",", "ts", "in", "serieses", ".", "iteritems", "(", ")", ":", "# FIXME: deal with leap years", "sod", "=", "np", ".", "array", "(", "map", "(", "lambda", "x", ":", "(", "x", ".", "hour", "*", "3600", "+", "x", ".", "minute", "*", "60", "+", "x", ".", "second", ")", ",", "ts", ".", "index", ".", "time", ")", ")", "# Coerce soy to an integer so that merge/join operations identify same values", "# (floats don't equal!?)", "soy", "=", "(", "ts", ".", "index", ".", "dayofyear", "+", "366", "*", "(", "ts", ".", "index", ".", "year", "-", "ts", ".", "index", ".", "year", "[", "0", "]", ")", ")", "*", "3600", "*", "24", "+", "sod", "ts2", "=", "pd", ".", "Series", "(", "ts", ".", "values", ",", "index", "=", "soy", ")", "ts2", "=", "ts2", ".", "dropna", "(", ")", "ts2", "=", "ts2", ".", "sort_index", "(", ")", "df2", "=", "pd", ".", "DataFrame", "(", "{", "name", ":", "ts2", ".", "values", "}", ",", "index", "=", "soy", ")", "df", "=", "df", ".", "join", "(", "df2", ",", "how", "=", "'outer'", ")", "if", "T_s", "and", "aggregator", ":", "df", "=", "df", ".", "groupby", "(", "lambda", "x", ":", "int", "(", "x", "/", "float", "(", "T_s", ")", ")", ")", ".", "aggregate", "(", "dict", "(", "(", "name", ",", "aggregator", ")", "for", "name", "in", "df", ".", "columns", ")", ")", "else", ":", "df", "=", "pd", ".", "DataFrame", "(", "serieses", ")", "if", "T_s", "and", "aggregator", ":", "x0", "=", "df", ".", "index", "[", "0", "]", "df", "=", "df", ".", "groupby", "(", "lambda", "x", ":", "int", "(", "(", "x", "-", "x0", ")", ".", "total_seconds", "(", ")", "/", "float", "(", "T_s", ")", ")", ")", ".", "aggregate", "(", "dict", "(", "(", "name", ",", "aggregator", ")", "for", "name", "in", "df", ".", "columns", ")", ")", "# FIXME: convert seconds since begninning of first year back into Timestamp instances", "return", "df" ]