idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
62,200
def startInventory ( self , proto = None , force_regen_rospec = False ) : if self . state == LLRPClient . STATE_INVENTORYING : logger . warn ( 'ignoring startInventory() while already inventorying' ) return None rospec = self . getROSpec ( force_new = force_regen_rospec ) [ 'ROSpec' ] logger . info ( 'starting inventory' ) enabled_rospec = defer . Deferred ( ) enabled_rospec . addCallback ( self . _setState_wrapper , LLRPClient . STATE_INVENTORYING ) enabled_rospec . addErrback ( self . panic , 'ENABLE_ROSPEC failed' ) logger . debug ( 'made enabled_rospec' ) added_rospec = defer . Deferred ( ) added_rospec . addCallback ( self . send_ENABLE_ROSPEC , rospec , onCompletion = enabled_rospec ) added_rospec . addErrback ( self . panic , 'ADD_ROSPEC failed' ) logger . debug ( 'made added_rospec' ) self . send_ADD_ROSPEC ( rospec , onCompletion = added_rospec )
Add a ROSpec to the reader and enable it .
62,201
def stopPolitely ( self , disconnect = False ) : logger . info ( 'stopping politely' ) if disconnect : logger . info ( 'will disconnect when stopped' ) self . disconnecting = True self . sendMessage ( { 'DELETE_ACCESSSPEC' : { 'Ver' : 1 , 'Type' : 41 , 'ID' : 0 , 'AccessSpecID' : 0 } } ) self . setState ( LLRPClient . STATE_SENT_DELETE_ACCESSSPEC ) d = defer . Deferred ( ) d . addCallback ( self . stopAllROSpecs ) d . addErrback ( self . panic , 'DELETE_ACCESSSPEC failed' ) self . _deferreds [ 'DELETE_ACCESSSPEC_RESPONSE' ] . append ( d ) return d
Delete all active ROSpecs . Return a Deferred that will be called when the DELETE_ROSPEC_RESPONSE comes back .
62,202
def parsePowerTable ( uhfbandcap ) : bandtbl = { k : v for k , v in uhfbandcap . items ( ) if k . startswith ( 'TransmitPowerLevelTableEntry' ) } tx_power_table = [ 0 ] * ( len ( bandtbl ) + 1 ) for k , v in bandtbl . items ( ) : idx = v [ 'Index' ] tx_power_table [ idx ] = int ( v [ 'TransmitPowerValue' ] ) / 100.0 return tx_power_table
Parse the transmit power table
62,203
def get_tx_power ( self , tx_power ) : if not self . tx_power_table : logger . warn ( 'get_tx_power(): tx_power_table is empty!' ) return { } logger . debug ( 'requested tx_power: %s' , tx_power ) min_power = self . tx_power_table . index ( min ( self . tx_power_table ) ) max_power = self . tx_power_table . index ( max ( self . tx_power_table ) ) ret = { } for antid , tx_power in tx_power . items ( ) : if tx_power == 0 : max_power_dbm = max ( self . tx_power_table ) tx_power = self . tx_power_table . index ( max_power_dbm ) ret [ antid ] = ( tx_power , max_power_dbm ) try : power_dbm = self . tx_power_table [ tx_power ] ret [ antid ] = ( tx_power , power_dbm ) except IndexError : raise LLRPError ( 'Invalid tx_power for antenna {}: ' 'requested={}, min_available={}, ' 'max_available={}' . format ( antid , self . tx_power , min_power , max_power ) ) return ret
Validates tx_power against self . tx_power_table
62,204
def setTxPower ( self , tx_power ) : tx_pow_validated = self . get_tx_power ( tx_power ) logger . debug ( 'tx_pow_validated: %s' , tx_pow_validated ) needs_update = False for ant , ( tx_pow_idx , tx_pow_dbm ) in tx_pow_validated . items ( ) : if self . tx_power [ ant ] != tx_pow_idx : self . tx_power [ ant ] = tx_pow_idx needs_update = True logger . debug ( 'tx_power for antenna %s: %s (%s dBm)' , ant , tx_pow_idx , tx_pow_dbm ) if needs_update and self . state == LLRPClient . STATE_INVENTORYING : logger . debug ( 'changing tx power; will stop politely, then resume' ) d = self . stopPolitely ( ) d . addCallback ( self . startInventory , force_regen_rospec = True )
Set the transmission power for one or more antennas .
62,205
def pause ( self , duration_seconds = 0 , force = False , force_regen_rospec = False ) : logger . debug ( 'pause(%s)' , duration_seconds ) if self . state != LLRPClient . STATE_INVENTORYING : if not force : logger . info ( 'ignoring pause(); not inventorying (state==%s)' , self . getStateName ( self . state ) ) return None else : logger . info ( 'forcing pause()' ) if duration_seconds : logger . info ( 'pausing for %s seconds' , duration_seconds ) rospec = self . getROSpec ( force_new = force_regen_rospec ) [ 'ROSpec' ] self . sendMessage ( { 'DISABLE_ROSPEC' : { 'Ver' : 1 , 'Type' : 25 , 'ID' : 0 , 'ROSpecID' : rospec [ 'ROSpecID' ] } } ) self . setState ( LLRPClient . STATE_PAUSING ) d = defer . Deferred ( ) d . addCallback ( self . _setState_wrapper , LLRPClient . STATE_PAUSED ) d . addErrback ( self . complain , 'pause() failed' ) self . _deferreds [ 'DISABLE_ROSPEC_RESPONSE' ] . append ( d ) if duration_seconds > 0 : startAgain = task . deferLater ( reactor , duration_seconds , lambda : None ) startAgain . addCallback ( lambda _ : self . resume ( ) ) return d
Pause an inventory operation for a set amount of time .
62,206
def sendMessage ( self , msg_dict ) : sent_ids = [ ] for name in msg_dict : self . last_msg_id += 1 msg_dict [ name ] [ 'ID' ] = self . last_msg_id sent_ids . append ( ( name , self . last_msg_id ) ) llrp_msg = LLRPMessage ( msgdict = msg_dict ) assert llrp_msg . msgbytes , "LLRPMessage is empty" self . transport . write ( llrp_msg . msgbytes ) return sent_ids
Serialize and send a dict LLRP Message
62,207
def buildProtocol ( self , addr ) : self . resetDelay ( ) clargs = self . client_args . copy ( ) hostport = '{}:{}' . format ( addr . host , addr . port ) logger . debug ( 'Building protocol for %s' , hostport ) if hostport in self . antenna_dict : clargs [ 'antennas' ] = [ int ( x ) for x in self . antenna_dict [ hostport ] . keys ( ) ] elif addr . host in self . antenna_dict : clargs [ 'antennas' ] = [ int ( x ) for x in self . antenna_dict [ addr . host ] . keys ( ) ] logger . debug ( 'Antennas in buildProtocol: %s' , clargs . get ( 'antennas' ) ) logger . debug ( '%s start_inventory: %s' , hostport , clargs . get ( 'start_inventory' ) ) if self . start_first and not self . protocols : clargs [ 'start_inventory' ] = True proto = LLRPClient ( factory = self , ** clargs ) for state , cbs in self . _state_callbacks . items ( ) : for cb in cbs : proto . addStateCallback ( state , cb ) for msg_type , cbs in self . _message_callbacks . items ( ) : for cb in cbs : proto . addMessageCallback ( msg_type , cb ) return proto
Get a new LLRP client protocol object .
62,208
def setTxPower ( self , tx_power , peername = None ) : if peername : protocols = [ p for p in self . protocols if p . peername [ 0 ] == peername ] else : protocols = self . protocols for proto in protocols : proto . setTxPower ( tx_power )
Set the transmit power on one or all readers
62,209
def politeShutdown ( self ) : protoDeferreds = [ ] for proto in self . protocols : protoDeferreds . append ( proto . stopPolitely ( disconnect = True ) ) return defer . DeferredList ( protoDeferreds )
Stop inventory on all connected readers .
62,210
def parse_sgtin_96 ( sgtin_96 ) : if not sgtin_96 : raise Exception ( 'Pass in a value.' ) if not sgtin_96 . startswith ( "30" ) : raise Exception ( 'Not SGTIN-96.' ) binary = "{0:020b}" . format ( int ( sgtin_96 , 16 ) ) . zfill ( 96 ) header = int ( binary [ : 8 ] , 2 ) tag_filter = int ( binary [ 8 : 11 ] , 2 ) partition = binary [ 11 : 14 ] partition_value = int ( partition , 2 ) m , l , n , k = SGTIN_96_PARTITION_MAP [ partition_value ] company_start = 8 + 3 + 3 company_end = company_start + m company_data = int ( binary [ company_start : company_end ] , 2 ) if company_data > pow ( 10 , l ) : raise Exception ( 'Company value is too large' ) company_prefix = str ( company_data ) . zfill ( l ) item_start = company_end item_end = item_start + n item_data = binary [ item_start : item_end ] item_number = int ( item_data , 2 ) item_reference = str ( item_number ) . zfill ( k ) serial = int ( binary [ - 38 : ] , 2 ) return { "header" : header , "filter" : tag_filter , "partition" : partition , "company_prefix" : company_prefix , "item_reference" : item_reference , "serial" : serial }
Given a SGTIN - 96 hex string parse each segment . Returns a dictionary of the segments .
62,211
def decode_param ( data ) : logger . debug ( 'decode_param data: %r' , data ) header_len = struct . calcsize ( '!HH' ) partype , parlen = struct . unpack ( '!HH' , data [ : header_len ] ) pardata = data [ header_len : parlen ] logger . debug ( 'decode_param pardata: %r' , pardata ) ret = { 'Type' : partype , } if partype == 1023 : vsfmt = '!II' vendor , subtype = struct . unpack ( vsfmt , pardata [ : struct . calcsize ( vsfmt ) ] ) ret [ 'Vendor' ] = vendor ret [ 'Subtype' ] = subtype ret [ 'Data' ] = pardata [ struct . calcsize ( vsfmt ) : ] else : ret [ 'Data' ] = pardata , return ret , data [ parlen : ]
Decode any parameter to a byte sequence .
62,212
def download_files ( file_list ) : for _ , source_data_file in file_list : sql_gz_name = source_data_file [ 'name' ] . split ( '/' ) [ - 1 ] msg = 'Downloading: %s' % ( sql_gz_name ) log . debug ( msg ) new_data = objectstore . get_object ( handelsregister_conn , source_data_file , 'handelsregister' ) with open ( 'data/{}' . format ( sql_gz_name ) , 'wb' ) as outputzip : outputzip . write ( new_data )
Download the latest data .
62,213
def get_connection ( store_settings : dict = { } ) -> Connection : store = store_settings if not store_settings : store = make_config_from_env ( ) os_options = { 'tenant_id' : store [ 'TENANT_ID' ] , 'region_name' : store [ 'REGION_NAME' ] , } use_internal = os . getenv ( 'OBJECTSTORE_LOCAL' , '' ) if use_internal : os_options [ 'endpoint_type' ] = 'internalURL' connection = Connection ( authurl = store [ 'AUTHURL' ] , user = store [ 'USER' ] , key = store [ 'PASSWORD' ] , tenant_name = store [ 'TENANT_NAME' ] , auth_version = store [ 'VERSION' ] , os_options = os_options ) return connection
get an objectsctore connection
62,214
def get_object ( connection , object_meta_data : dict , dirname : str ) : return connection . get_object ( dirname , object_meta_data [ 'name' ] ) [ 1 ]
Download object from objectstore . object_meta_data is an object retured when using get_full_container_list
62,215
def put_object ( connection , container : str , object_name : str , contents , content_type : str ) -> None : connection . put_object ( container , object_name , contents = contents , content_type = content_type )
Put file to objectstore
62,216
def delete_object ( connection , container : str , object_meta_data : dict ) -> None : connection . delete_object ( container , object_meta_data [ 'name' ] )
Delete single object from objectstore
62,217
def return_file_objects ( connection , container , prefix = 'database' ) : options = [ ] meta_data = objectstore . get_full_container_list ( connection , container , prefix = 'database' ) env = ENV . upper ( ) for o_info in meta_data : expected_file = f'database.{ENV}' if o_info [ 'name' ] . startswith ( expected_file ) : dt = dateparser . parse ( o_info [ 'last_modified' ] ) now = datetime . datetime . now ( ) delta = now - dt LOG . debug ( 'AGE: %d %s' , delta . days , expected_file ) options . append ( ( dt , o_info ) ) options . sort ( ) return options
Given connecton and container find database dumps
62,218
def remove_old_dumps ( connection , container : str , days = None ) : if not days : return if days < 20 : LOG . error ( 'A minimum of 20 backups is stored' ) return options = return_file_objects ( connection , container ) for dt , o_info in options : now = datetime . datetime . now ( ) delta = now - dt if delta . days > days : LOG . info ( 'Deleting %s' , o_info [ 'name' ] ) objectstore . delete_object ( connection , container , o_info )
Remove dumps older than x days
62,219
def download_database ( connection , container : str , target : str = "" ) : meta_data = objectstore . get_full_container_list ( connection , container , prefix = 'database' ) options = return_file_objects ( connection , container ) for o_info in meta_data : expected_file = f'database.{ENV}' LOG . info ( o_info [ 'name' ] ) if o_info [ 'name' ] . startswith ( expected_file ) : dt = dateparser . parse ( o_info [ 'last_modified' ] ) options . append ( ( dt , o_info ) ) options . sort ( ) if not options : LOG . error ( 'Dumps missing? ENVIRONMENT wrong? (acceptance / production' ) LOG . error ( 'Environtment {ENV}' ) sys . exit ( 1 ) newest = options [ - 1 ] [ 1 ] LOG . debug ( 'Downloading: %s' , ( newest [ 'name' ] ) ) target_file = os . path . join ( target , expected_file ) LOG . info ( 'TARGET: %s' , target_file ) if os . path . exists ( target_file ) : LOG . info ( 'Already downloaded' ) return LOG . error ( 'TARGET does not exists downloading...' ) new_data = objectstore . get_object ( connection , newest , container ) with open ( target_file , 'wb' ) as outputzip : outputzip . write ( new_data )
Download database dump
62,220
def remember ( self , user_name ) : log . debug ( 'Repoze OAuth remember' ) environ = toolkit . request . environ rememberer = self . _get_rememberer ( environ ) identity = { 'repoze.who.userid' : user_name } headers = rememberer . remember ( environ , identity ) for header , value in headers : toolkit . response . headers . add ( header , value )
Remember the authenticated identity .
62,221
def redirect_from_callback ( self ) : state = toolkit . request . params . get ( 'state' ) came_from = get_came_from ( state ) toolkit . response . status = 302 toolkit . response . location = came_from
Redirect to the callback URL after a successful authentication .
62,222
def can_share_folder ( self , user , folder ) : return folder . parent_id is None and folder . author_id == user . id
Return True if user can share folder .
62,223
def storage_color ( self , user_storage ) : p = user_storage . percentage if p >= 0 and p < 60 : return "success" if p >= 60 and p < 90 : return "warning" if p >= 90 and p <= 100 : return "danger" raise ValueError ( "percentage out of range" )
Return labels indicating amount of storage used .
62,224
def folder_created_message ( self , request , folder ) : messages . success ( request , _ ( "Folder {} was created" . format ( folder ) ) )
Send messages . success message after successful folder creation .
62,225
def document_created_message ( self , request , document ) : messages . success ( request , _ ( "Document {} was created" . format ( document ) ) )
Send messages . success message after successful document creation .
62,226
def folder_shared_message ( self , request , user , folder ) : messages . success ( request , _ ( "Folder {} is now shared with {}" . format ( folder , user ) ) )
Send messages . success message after successful share .
62,227
def folder_pre_delete ( self , request , folder ) : for m in folder . members ( ) : if m . __class__ == folder . __class__ : self . folder_pre_delete ( request , m ) m . delete ( )
Perform folder operations prior to deletions . For example deleting all contents .
62,228
def file_upload_to ( self , instance , filename ) : ext = filename . split ( "." ) [ - 1 ] filename = "{}.{}" . format ( uuid . uuid4 ( ) , ext ) return os . path . join ( "document" , filename )
Callable passed to the FileField s upload_to kwarg on Document . file
62,229
def for_user ( self , user ) : qs = SharedMemberQuerySet ( model = self . model , using = self . _db , user = user ) qs = qs . filter ( Q ( author = user ) | Q ( foldershareduser__user = user ) ) return qs . distinct ( ) & self . distinct ( )
All folders the given user can do something with .
62,230
def _parse_list ( cls , args ) : argparser = ArgumentParser ( prog = "cluster list" ) group = argparser . add_mutually_exclusive_group ( ) group . add_argument ( "--id" , dest = "cluster_id" , help = "show cluster with this id" ) group . add_argument ( "--label" , dest = "label" , help = "show cluster with this label" ) group . add_argument ( "--state" , dest = "state" , action = "store" , choices = [ 'up' , 'down' , 'pending' , 'terminating' ] , help = "list only clusters in the given state" ) pagination_group = group . add_argument_group ( ) pagination_group . add_argument ( "--page" , dest = "page" , action = "store" , type = int , help = "page number" ) pagination_group . add_argument ( "--per-page" , dest = "per_page" , action = "store" , type = int , help = "number of clusters to be retrieved per page" ) arguments = argparser . parse_args ( args ) return vars ( arguments )
Parse command line arguments to construct a dictionary of cluster parameters that can be used to determine which clusters to list .
62,231
def _parse_cluster_manage_command ( cls , args , action ) : argparser = ArgumentParser ( prog = "cluster_manage_command" ) group = argparser . add_mutually_exclusive_group ( required = True ) group . add_argument ( "--id" , dest = "cluster_id" , help = "execute on cluster with this id" ) group . add_argument ( "--label" , dest = "label" , help = "execute on cluster with this label" ) if action == "remove" or action == "update" : argparser . add_argument ( "--private_dns" , help = "the private_dns of the machine to be updated/removed" , required = True ) if action == "update" : argparser . add_argument ( "--command" , help = "the update command to be executed" , required = True , choices = [ "replace" ] ) arguments = argparser . parse_args ( args ) return arguments
Parse command line arguments for cluster manage commands .
62,232
def _parse_reassign_label ( cls , args ) : argparser = ArgumentParser ( prog = "cluster reassign_label" ) argparser . add_argument ( "destination_cluster" , metavar = "destination_cluster_id_label" , help = "id/label of the cluster to move the label to" ) argparser . add_argument ( "label" , help = "label to be moved from the source cluster" ) arguments = argparser . parse_args ( args ) return arguments
Parse command line arguments for reassigning label .
62,233
def reassign_label ( cls , destination_cluster , label ) : conn = Qubole . agent ( version = Cluster . api_version ) data = { "destination_cluster" : destination_cluster , "label" : label } return conn . put ( cls . rest_entity_path + "/reassign-label" , data )
Reassign a label from one cluster to another .
62,234
def _parse_snapshot_restore_command ( cls , args , action ) : argparser = ArgumentParser ( prog = "cluster %s" % action ) group = argparser . add_mutually_exclusive_group ( required = True ) group . add_argument ( "--id" , dest = "cluster_id" , help = "execute on cluster with this id" ) group . add_argument ( "--label" , dest = "label" , help = "execute on cluster with this label" ) argparser . add_argument ( "--s3_location" , help = "s3_location where backup is stored" , required = True ) if action == "snapshot" : argparser . add_argument ( "--backup_type" , help = "backup_type: full/incremental, default is full" ) elif action == "restore_point" : argparser . add_argument ( "--backup_id" , help = "back_id from which restoration will be done" , required = True ) argparser . add_argument ( "--table_names" , help = "table(s) which are to be restored" , required = True ) argparser . add_argument ( "--no-overwrite" , action = "store_false" , help = "With this option, restore overwrites to the existing table if theres any in restore target" ) argparser . add_argument ( "--no-automatic" , action = "store_false" , help = "With this option, all the dependencies are automatically restored together with this backup image following the correct order" ) arguments = argparser . parse_args ( args ) return arguments
Parse command line arguments for snapshot command .
62,235
def restore_point ( cls , cluster_id_label , s3_location , backup_id , table_names , overwrite = True , automatic = True ) : conn = Qubole . agent ( version = Cluster . api_version ) parameters = { } parameters [ 's3_location' ] = s3_location parameters [ 'backup_id' ] = backup_id parameters [ 'table_names' ] = table_names parameters [ 'overwrite' ] = overwrite parameters [ 'automatic' ] = automatic return conn . post ( cls . element_path ( cluster_id_label ) + "/restore_point" , data = parameters )
Restoring cluster from a given hbase snapshot id
62,236
def update_snapshot_schedule ( cls , cluster_id_label , s3_location = None , frequency_unit = None , frequency_num = None , status = None ) : conn = Qubole . agent ( version = Cluster . api_version ) data = { } if s3_location is not None : data [ "s3_location" ] = s3_location if frequency_unit is not None : data [ "frequency_unit" ] = frequency_unit if frequency_num is not None : data [ "frequency_num" ] = frequency_num if status is not None : data [ "status" ] = status return conn . put ( cls . element_path ( cluster_id_label ) + "/snapshot_schedule" , data )
Update for snapshot schedule
62,237
def set_spot_instance_settings ( self , maximum_bid_price_percentage = None , timeout_for_request = None , maximum_spot_instance_percentage = None ) : self . hadoop_settings [ 'spot_instance_settings' ] = { 'maximum_bid_price_percentage' : maximum_bid_price_percentage , 'timeout_for_request' : timeout_for_request , 'maximum_spot_instance_percentage' : maximum_spot_instance_percentage }
Purchase options for spot instances . Valid only when slave_request_type is hybrid or spot .
62,238
def set_stable_spot_instance_settings ( self , maximum_bid_price_percentage = None , timeout_for_request = None , allow_fallback = True ) : self . hadoop_settings [ 'stable_spot_instance_settings' ] = { 'maximum_bid_price_percentage' : maximum_bid_price_percentage , 'timeout_for_request' : timeout_for_request , 'allow_fallback' : allow_fallback }
Purchase options for stable spot instances .
62,239
def minimal_payload ( self ) : payload_dict = self . __dict__ payload_dict . pop ( "api_version" , None ) return util . _make_minimal ( payload_dict )
This method can be used to create the payload which is sent while creating or updating a cluster .
62,240
def _handle_error ( response ) : code = response . status_code if 200 <= code < 400 : return if code == 400 : sys . stderr . write ( response . text + "\n" ) raise BadRequest ( response ) elif code == 401 : sys . stderr . write ( response . text + "\n" ) raise UnauthorizedAccess ( response ) elif code == 403 : sys . stderr . write ( response . text + "\n" ) raise ForbiddenAccess ( response ) elif code == 404 : sys . stderr . write ( response . text + "\n" ) raise ResourceNotFound ( response ) elif code == 405 : sys . stderr . write ( response . text + "\n" ) raise MethodNotAllowed ( response ) elif code == 409 : sys . stderr . write ( response . text + "\n" ) raise ResourceConflict ( response ) elif code == 422 : sys . stderr . write ( response . text + "\n" ) raise ResourceInvalid ( response ) elif code in ( 449 , 502 , 503 , 504 ) : sys . stderr . write ( response . text + "\n" ) raise RetryWithDelay ( response ) elif 401 <= code < 500 : sys . stderr . write ( response . text + "\n" ) raise ClientError ( response ) elif 500 <= code < 600 : sys . stderr . write ( response . text + "\n" ) raise ServerError ( response ) else : raise ConnectionError ( response )
Raise exceptions in response to any http errors
62,241
def createTemplate ( data ) : conn = Qubole . agent ( ) return conn . post ( Template . rest_entity_path , data )
Create a new template .
62,242
def editTemplate ( id , data ) : conn = Qubole . agent ( ) return conn . put ( Template . element_path ( id ) , data )
Edit an existing template .
62,243
def viewTemplate ( id ) : conn = Qubole . agent ( ) return conn . get ( Template . element_path ( id ) )
View an existing Template details .
62,244
def submitTemplate ( id , data = { } ) : conn = Qubole . agent ( ) path = str ( id ) + "/run" return conn . post ( Template . element_path ( path ) , data )
Submit an existing Template .
62,245
def runTemplate ( id , data = { } ) : conn = Qubole . agent ( ) path = str ( id ) + "/run" res = conn . post ( Template . element_path ( path ) , data ) cmdType = res [ 'command_type' ] cmdId = res [ 'id' ] cmdClass = eval ( cmdType ) cmd = cmdClass . find ( cmdId ) while not Command . is_done ( cmd . status ) : time . sleep ( Qubole . poll_interval ) cmd = cmdClass . find ( cmd . id ) return Template . getResult ( cmdClass , cmd )
Run an existing Template and waits for the Result . Prints result to stdout .
62,246
def listTemplates ( data = { } ) : conn = Qubole . agent ( ) url_path = Template . rest_entity_path page_attr = [ ] if "page" in data and data [ "page" ] is not None : page_attr . append ( "page=%s" % data [ "page" ] ) if "per_page" in data and data [ "per_page" ] is not None : page_attr . append ( "per_page=%s" % data [ "per_page" ] ) if page_attr : url_path = "%s?%s" % ( url_path , "&" . join ( page_attr ) ) return conn . get ( url_path )
Fetch existing Templates details .
62,247
def edit ( args ) : tap = DbTap . find ( args . id ) options = { } if not args . name is None : options [ "db_name" ] = args . name if args . host is not None : options [ "db_host" ] = args . host if args . user is not None : options [ "db_user" ] = args . user if args . password is not None : options [ "db_passwd" ] = args . password if args . type is not None : options [ "db_type" ] = args . type if args . location is not None : options [ "db_location" ] = args . location if args . port is not None : options [ "port" ] = args . port tap = tap . edit ( ** options ) return json . dumps ( tap . attributes , sort_keys = True , indent = 4 )
Carefully setup a dict
62,248
def create ( cls , name , config = None , kind = "spark" ) : conn = Qubole . agent ( ) return conn . post ( cls . rest_entity_path , data = { 'name' : name , 'config' : config , 'kind' : kind } )
Create a new app .
62,249
def configure ( cls , api_token , api_url = "https://api.qubole.com/api/" , version = "v1.2" , poll_interval = 5 , skip_ssl_cert_check = False , cloud_name = "AWS" ) : cls . _auth = QuboleAuth ( api_token ) cls . api_token = api_token cls . version = version cls . baseurl = api_url if poll_interval < Qubole . MIN_POLL_INTERVAL : log . warn ( "Poll interval cannot be less than %s seconds. Setting it to %s seconds.\n" % ( Qubole . MIN_POLL_INTERVAL , Qubole . MIN_POLL_INTERVAL ) ) cls . poll_interval = Qubole . MIN_POLL_INTERVAL else : cls . poll_interval = poll_interval cls . skip_ssl_cert_check = skip_ssl_cert_check cls . cloud_name = cloud_name . lower ( ) cls . cached_agent = None
Set parameters governing interaction with QDS
62,250
def get_cluster_request_parameters ( cluster_info , cloud_config , engine_config ) : cluster_request = { } cloud_config = util . _make_minimal ( cloud_config . __dict__ ) if bool ( cloud_config ) : cluster_request [ 'cloud_config' ] = cloud_config engine_config = util . _make_minimal ( engine_config . __dict__ ) if bool ( engine_config ) : cluster_request [ 'engine_config' ] = engine_config cluster_request . update ( util . _make_minimal ( cluster_info . __dict__ ) ) return cluster_request
Use this to return final minimal request from cluster_info cloud_config or engine_config objects Alternatively call util . _make_minimal if only one object needs to be implemented
62,251
def create ( cls , cluster_info ) : conn = Qubole . agent ( version = "v2" ) return conn . post ( cls . rest_entity_path , data = cluster_info )
Create a new cluster using information provided in cluster_info .
62,252
def _download_to_local ( boto_conn , s3_path , fp , num_result_dir , delim = None ) : def _callback ( downloaded , total ) : if ( total is 0 ) or ( downloaded == total ) : return progress = downloaded * 100 / total sys . stderr . write ( '\r[{0}] {1}%' . format ( '#' * progress , progress ) ) sys . stderr . flush ( ) m = _URI_RE . match ( s3_path ) bucket_name = m . group ( 1 ) bucket = boto_conn . get_bucket ( bucket_name ) retries = 6 if s3_path . endswith ( '/' ) is False : key_name = m . group ( 2 ) key_instance = bucket . get_key ( key_name ) while key_instance is None and retries > 0 : retries = retries - 1 log . info ( "Results file is not available on s3. Retry: " + str ( 6 - retries ) ) time . sleep ( 10 ) key_instance = bucket . get_key ( key_name ) if key_instance is None : raise Exception ( "Results file not available on s3 yet. This can be because of s3 eventual consistency issues." ) log . info ( "Downloading file from %s" % s3_path ) if delim is None : try : key_instance . get_contents_to_file ( fp ) except boto . exception . S3ResponseError as e : if ( e . status == 403 ) : log . warn ( "Access denied while fetching the s3 object. Retrying without specifying the version...." ) key_instance . open ( ) fp . write ( key_instance . read ( ) ) key_instance . close ( ) else : raise else : _read_iteratively ( key_instance , fp , delim = delim ) else : key_prefix = m . group ( 2 ) bucket_paths = bucket . list ( key_prefix ) for one_path in bucket_paths : name = one_path . name if name . endswith ( '$folder$' ) : continue log . info ( "Downloading file from %s" % name ) if delim is None : one_path . get_contents_to_file ( fp ) else : _read_iteratively ( one_path , fp , delim = delim )
Downloads the contents of all objects in s3_path into fp
62,253
def cancel_id ( cls , id ) : conn = Qubole . agent ( ) data = { "status" : "kill" } return conn . put ( cls . element_path ( id ) , data )
Cancels command denoted by this id
62,254
def get_log_id ( cls , id ) : conn = Qubole . agent ( ) r = conn . get_raw ( cls . element_path ( id ) + "/logs" ) return r . text
Fetches log for the command represented by this id
62,255
def get_log ( self ) : log_path = self . meta_data [ 'logs_resource' ] conn = Qubole . agent ( ) r = conn . get_raw ( log_path ) return r . text
Fetches log for the command represented by this object
62,256
def get_results ( self , fp = sys . stdout , inline = True , delim = None , fetch = True , qlog = None , arguments = [ ] ) : result_path = self . meta_data [ 'results_resource' ] conn = Qubole . agent ( ) include_header = "false" if len ( arguments ) == 1 : include_header = arguments . pop ( 0 ) if include_header not in ( 'true' , 'false' ) : raise ParseError ( "incude_header can be either true or false" ) r = conn . get ( result_path , { 'inline' : inline , 'include_headers' : include_header } ) if r . get ( 'inline' ) : raw_results = r [ 'results' ] encoded_results = raw_results . encode ( 'utf8' ) if sys . version_info < ( 3 , 0 , 0 ) : fp . write ( encoded_results ) else : import io if isinstance ( fp , io . TextIOBase ) : if hasattr ( fp , 'buffer' ) : fp . buffer . write ( encoded_results ) else : fp . write ( raw_results ) elif isinstance ( fp , io . BufferedIOBase ) or isinstance ( fp , io . RawIOBase ) : fp . write ( encoded_results ) else : pass else : if fetch : storage_credentials = conn . get ( Account . credentials_rest_entity_path ) if storage_credentials [ 'region_endpoint' ] is not None : boto_conn = boto . connect_s3 ( aws_access_key_id = storage_credentials [ 'storage_access_key' ] , aws_secret_access_key = storage_credentials [ 'storage_secret_key' ] , security_token = storage_credentials [ 'session_token' ] , host = storage_credentials [ 'region_endpoint' ] ) else : boto_conn = boto . connect_s3 ( aws_access_key_id = storage_credentials [ 'storage_access_key' ] , aws_secret_access_key = storage_credentials [ 'storage_secret_key' ] , security_token = storage_credentials [ 'session_token' ] ) log . info ( "Starting download from result locations: [%s]" % "," . join ( r [ 'result_location' ] ) ) num_result_dir = Command . find ( self . id ) . num_result_dir if include_header . lower ( ) == "true" and qlog is not None : write_headers ( qlog , fp ) for s3_path in r [ 'result_location' ] : _download_to_local ( boto_conn , s3_path , fp , num_result_dir , delim = delim ) else : fp . write ( "," . join ( r [ 'result_location' ] ) )
Fetches the result for the command represented by this object
62,257
def pluralize ( singular ) : if singular in UNCOUNTABLES : return singular for i in IRREGULAR : if i [ 0 ] == singular : return i [ 1 ] for i in PLURALIZE_PATTERNS : if re . search ( i [ 0 ] , singular ) : return re . sub ( i [ 0 ] , i [ 1 ] , singular )
Convert singular word to its plural form .
62,258
def singularize ( plural ) : if plural in UNCOUNTABLES : return plural for i in IRREGULAR : if i [ 1 ] == plural : return i [ 0 ] for i in SINGULARIZE_PATTERNS : if re . search ( i [ 0 ] , plural ) : return re . sub ( i [ 0 ] , i [ 1 ] , plural ) return plural
Convert plural word to its singular form .
62,259
def camelize ( word ) : return '' . join ( w [ 0 ] . upper ( ) + w [ 1 : ] for w in re . sub ( '[^A-Z^a-z^0-9^:]+' , ' ' , word ) . split ( ' ' ) )
Convert a word from lower_with_underscores to CamelCase .
62,260
def _make_minimal ( dictionary ) : new_dict = { } for key , value in dictionary . items ( ) : if value is not None : if isinstance ( value , dict ) : new_value = _make_minimal ( value ) if new_value : new_dict [ key ] = new_value else : new_dict [ key ] = value return new_dict
This function removes all the keys whose value is either None or an empty dictionary .
62,261
def upload_profiler_report ( url , filename , config ) : try : logger . debug ( "Uploading profiler report to IOpipe" ) with open ( filename , "rb" ) as data : response = requests . put ( url , data = data , timeout = config [ "network_timeout" ] ) response . raise_for_status ( ) except Exception as e : logger . debug ( "Error while uploading profiler report: %s" , e ) if hasattr ( e , "response" ) : logger . debug ( e . response . content ) else : logger . debug ( "Profiler report uploaded successfully" ) finally : if os . path . isfile ( filename ) : os . remove ( filename )
Uploads a profiler report to IOpipe
62,262
def read_pid_stat ( pid ) : return { "utime" : random . randint ( 0 , 999999999 ) , "stime" : random . randint ( 0 , 999999999 ) , "cutime" : random . randint ( 0 , 999999999 ) , "cstime" : random . randint ( 0 , 999999999 ) , }
Mocks read_pid_stat as this is a Linux - specific operation .
62,263
def read_stat ( ) : return [ { "times" : { "user" : random . randint ( 0 , 999999999 ) , "nice" : random . randint ( 0 , 999999999 ) , "sys" : random . randint ( 0 , 999999999 ) , "idle" : random . randint ( 0 , 999999999 ) , "irq" : random . randint ( 0 , 999999999 ) , } } ]
Mocks read_stat as this is a Linux - specific operation .
62,264
def load_plugins ( self , plugins ) : def instantiate ( plugin ) : return plugin ( ) if inspect . isclass ( plugin ) else plugin loaded_plugins = [ ] plugins_seen = [ ] for plugin in reversed ( plugins ) : if not is_plugin ( plugin ) or plugin . name in plugins_seen : continue loaded_plugins . insert ( 0 , instantiate ( plugin ) ) plugins_seen . append ( plugin . name ) return loaded_plugins
Loads plugins that match the Plugin interface and are instantiated .
62,265
def run_hooks ( self , name , event = None , context = None ) : hooks = { "pre:setup" : lambda p : p . pre_setup ( self ) , "post:setup" : lambda p : p . post_setup ( self ) , "pre:invoke" : lambda p : p . pre_invoke ( event , context ) , "post:invoke" : lambda p : p . post_invoke ( event , context ) , "pre:report" : lambda p : p . pre_report ( self . report ) , "post:report" : lambda p : p . post_report ( self . report ) , } if name in hooks : for p in self . plugins : if p . enabled : try : hooks [ name ] ( p ) except Exception as e : logger . error ( "IOpipe plugin %s hook raised error" % ( name , str ( e ) ) ) logger . exception ( e )
Runs plugin hooks for each registered plugin .
62,266
def wait_for_futures ( self ) : [ future for future in futures . as_completed ( self . futures ) ] self . futures = [ ]
Wait for all futures to complete . This should be done at the end of an an invocation .
62,267
def validate_context ( self , context ) : return all ( [ hasattr ( context , attr ) for attr in [ "aws_request_id" , "function_name" , "function_version" , "get_remaining_time_in_millis" , "invoked_function_arn" , "log_group_name" , "log_stream_name" , "memory_limit_in_mb" , ] ] ) and callable ( context . get_remaining_time_in_millis )
Checks to see if we re working with a valid lambda context object .
62,268
def patch_session_send ( context , http_filter ) : if Session is None : return def send ( self , * args , ** kwargs ) : id = ensure_utf8 ( str ( uuid . uuid4 ( ) ) ) with context . iopipe . mark ( id ) : response = original_session_send ( self , * args , ** kwargs ) trace = context . iopipe . mark . measure ( id ) context . iopipe . mark . delete ( id ) collect_metrics_for_response ( response , context , trace , http_filter ) return response Session . send = send
Monkey patches requests Session class if available . Overloads the send method to add tracing and metrics collection .
62,269
def patch_botocore_session_send ( context , http_filter ) : if BotocoreSession is None : return def send ( self , * args , ** kwargs ) : id = str ( uuid . uuid4 ( ) ) with context . iopipe . mark ( id ) : response = original_botocore_session_send ( self , * args , ** kwargs ) trace = context . iopipe . mark . measure ( id ) context . iopipe . mark . delete ( id ) collect_metrics_for_response ( response , context , trace , http_filter ) return response BotocoreSession . send = send
Monkey patches botocore s vendored requests if available . Overloads the Session class send method to add tracing and metric collection .
62,270
def collect_metrics_for_response ( http_response , context , trace , http_filter ) : http_response = copy . deepcopy ( http_response ) if http_filter is not None and callable ( http_filter ) : http_response = http_filter ( http_response ) if http_response is False : return request = None if hasattr ( http_response , "request" ) : parsed_url = None if hasattr ( http_response . request , "url" ) : parsed_url = urlparse ( http_response . request . url ) request_headers = [ ] if hasattr ( http_response . request , "headers" ) : request_headers = [ { "key" : ensure_utf8 ( k ) , "string" : ensure_utf8 ( v ) } for k , v in http_response . request . headers . items ( ) if k . lower ( ) in INCLUDE_HEADERS ] request = Request ( hash = ensure_utf8 ( getattr ( parsed_url , "fragment" , None ) ) , headers = request_headers , hostname = ensure_utf8 ( getattr ( parsed_url , "hostname" , None ) ) , method = ensure_utf8 ( getattr ( http_response . request , "method" , None ) ) , path = ensure_utf8 ( getattr ( parsed_url , "path" , None ) ) , pathname = ensure_utf8 ( getattr ( parsed_url , "path" , None ) ) , port = ensure_utf8 ( getattr ( parsed_url , "port" , None ) ) , protocol = ensure_utf8 ( getattr ( parsed_url , "scheme" , None ) ) , query = ensure_utf8 ( getattr ( parsed_url , "query" , None ) ) , url = ensure_utf8 ( getattr ( http_response . request , "url" , None ) ) , ) response_headers = [ ] if hasattr ( http_response , "headers" ) : response_headers = [ { "key" : ensure_utf8 ( k ) , "string" : ensure_utf8 ( v ) } for k , v in http_response . headers . items ( ) if k . lower ( ) in INCLUDE_HEADERS ] response = Response ( headers = response_headers , statusCode = ensure_utf8 ( getattr ( http_response , "status_code" , None ) ) , statusMessage = None , ) context . iopipe . mark . http_trace ( trace , request , response )
Collects relevant metrics from a requests Response object and adds them to the IOpipe context .
62,271
def get_plugin_meta ( plugins ) : return [ { "name" : p . name , "version" : p . version , "homepage" : p . homepage , "enabled" : p . enabled , } for p in plugins if is_plugin ( p ) ]
Returns meta data about plugins .
62,272
def is_plugin ( plugin ) : try : return isinstance ( plugin , Plugin ) or issubclass ( plugin , Plugin ) except TypeError : return False
Returns true if the plugin implements the Plugin interface .
62,273
def with_metaclass ( meta , * bases ) : class metaclass ( meta ) : def __new__ ( cls , name , this_bases , d ) : return meta ( name , bases , d ) return type . __new__ ( metaclass , "temporary_class" , ( ) , { } )
Python 2 and 3 compatible way to do meta classes
62,274
def extract_context_data ( self ) : data = { } for k , v in { "functionName" : "function_name" , "functionVersion" : "function_version" , "memoryLimitInMB" : "memory_limit_in_mb" , "invokedFunctionArn" : "invoked_function_arn" , "awsRequestId" : "aws_request_id" , "logGroupName" : "log_group_name" , "logStreamName" : "log_stream_name" , } . items ( ) : if hasattr ( self . context , v ) : data [ k ] = getattr ( self . context , v ) if ( hasattr ( self . context , "invoked_function_arn" ) and "AWS_SAM_LOCAL" in os . environ ) : data [ "invokedFunctionArn" ] = ( "arn:aws:lambda:local:0:function:%s" % data . get ( "functionName" , "unknown" ) ) if hasattr ( self . context , "get_remaining_time_in_millis" ) and callable ( self . context . get_remaining_time_in_millis ) : data [ "getRemainingTimeInMillis" ] = self . context . get_remaining_time_in_millis ( ) data [ "traceId" ] = os . getenv ( "_X_AMZN_TRACE_ID" , "" ) return data
Returns the contents of a AWS Lambda context .
62,275
def retain_error ( self , error , frame = None ) : if frame is None : stack = traceback . format_exc ( ) self . labels . add ( "@iopipe/error" ) else : stack = "\n" . join ( traceback . format_stack ( frame ) ) self . labels . add ( "@iopipe/timeout" ) details = { "name" : type ( error ) . __name__ , "message" : "{}" . format ( error ) , "stack" : stack , } self . report [ "errors" ] = details
Adds details of an error to the report .
62,276
def prepare ( self , error = None , frame = None ) : if error : self . retain_error ( error , frame ) self . report [ "environment" ] [ "host" ] [ "boot_id" ] = system . read_bootid ( ) self . report [ "labels" ] = list ( self . labels ) meminfo = system . read_meminfo ( ) self . report . update ( { "aws" : self . extract_context_data ( ) , "timestampEnd" : int ( time . time ( ) * 1000 ) , } ) self . report [ "environment" ] [ "os" ] . update ( { "cpus" : system . read_stat ( ) , "freemem" : meminfo [ "MemFree" ] , "hostname" : system . read_hostname ( ) , "totalmem" : meminfo [ "MemTotal" ] , "usedmem" : meminfo [ "MemTotal" ] - meminfo [ "MemFree" ] , } ) self . report [ "environment" ] [ "os" ] [ "linux" ] [ "pid" ] = { "self" : { "stat" : system . read_pid_stat ( "self" ) , "stat_start" : self . stat_start , "status" : system . read_pid_status ( "self" ) , } } self . report [ "disk" ] = system . read_disk ( ) self . report [ "duration" ] = int ( ( monotonic ( ) - self . start_time ) * 1e9 )
Prepare the report to be sent to IOpipe .
62,277
def send ( self ) : if self . sent is True : return self . sent = True logger . debug ( "Sending report to IOpipe:" ) logger . debug ( json . dumps ( self . report , indent = 2 , sort_keys = True ) ) self . client . submit_future ( send_report , copy . deepcopy ( self . report ) , self . config )
Sends the report to IOpipe .
62,278
def send_report ( report , config ) : headers = { "Authorization" : "Bearer {}" . format ( config [ "token" ] ) } url = "https://{host}{path}" . format ( ** config ) try : response = session . post ( url , json = report , headers = headers , timeout = config [ "network_timeout" ] ) response . raise_for_status ( ) except Exception as e : logger . debug ( "Error sending report to IOpipe: %s" % e ) else : logger . debug ( "Report sent to IOpipe successfully" )
Sends the report to IOpipe s collector .
62,279
def upload_log_data ( url , stream_or_file , config ) : try : logger . debug ( "Uploading log data to IOpipe" ) if isinstance ( stream_or_file , StringIO ) : stream_or_file . seek ( 0 ) response = requests . put ( url , data = stream_or_file , timeout = config [ "network_timeout" ] ) else : with open ( stream_or_file , "rb" ) as data : response = requests . put ( url , data = data , timeout = config [ "network_timeout" ] ) response . raise_for_status ( ) except Exception as e : logger . debug ( "Error while uploading log data: %s" , e ) logger . exception ( e ) if hasattr ( e , "response" ) and hasattr ( e . response , "content" ) : logger . debug ( e . response . content ) else : logger . debug ( "Log data uploaded successfully" ) finally : if isinstance ( stream_or_file , str ) and os . path . exists ( stream_or_file ) : os . remove ( stream_or_file )
Uploads log data to IOpipe .
62,280
def get_signer_hostname ( ) : region = os . getenv ( "AWS_REGION" , "" ) region = region if region and region in SUPPORTED_REGIONS else "us-west-2" return "signer.{region}.iopipe.com" . format ( region = region )
Returns the IOpipe signer hostname for a region
62,281
def get_signed_request ( config , context , extension ) : url = "https://{hostname}/" . format ( hostname = get_signer_hostname ( ) ) try : logger . debug ( "Requesting signed request URL from %s" , url ) response = requests . post ( url , json = { "arn" : context . invoked_function_arn , "requestId" : context . aws_request_id , "timestamp" : int ( time . time ( ) * 1000 ) , "extension" : extension , } , headers = { "Authorization" : config [ "token" ] } , timeout = config [ "network_timeout" ] , ) response . raise_for_status ( ) except Exception as e : logger . debug ( "Error requesting signed request URL: %s" , e ) if hasattr ( e , "response" ) : logger . debug ( e . response . content ) else : response = response . json ( ) logger . debug ( "Signed request URL received for %s" , response [ "url" ] ) return response
Returns a signed request URL from IOpipe
62,282
def handler ( event , context ) : try : ip = requests . get ( "http://checkip.amazonaws.com/" ) except requests . RequestException as e : print ( e ) raise e return { "statusCode" : 200 , "body" : json . dumps ( { "message" : "hello world" , "location" : ip . text . replace ( "\n" , "" ) } ) , }
Sample pure Lambda function
62,283
def read_meminfo ( ) : data = { } with open ( "/proc/meminfo" , "rb" ) as meminfo_file : for row in meminfo_file : fields = row . split ( ) data [ fields [ 0 ] . decode ( "ascii" ) [ : - 1 ] ] = int ( fields [ 1 ] ) * 1024 return data
Returns system memory usage information .
62,284
def read_pid_stat ( pid = "self" ) : with open ( "/proc/%s/stat" % ( pid , ) , "rb" ) as f : stat = f . readline ( ) . split ( ) return { "utime" : int ( stat [ 13 ] ) , "stime" : int ( stat [ 14 ] ) , "cutime" : int ( stat [ 15 ] ) , "cstime" : int ( stat [ 16 ] ) , }
Returns system process stat information .
62,285
def read_pid_status ( pid = "self" ) : data = { } with open ( "/proc/%s/status" % ( pid , ) , "rb" ) as status_file : for row in status_file : fields = row . split ( ) if fields and fields [ 0 ] in [ b"VmRSS:" , b"Threads:" , b"FDSize:" ] : try : data [ fields [ 0 ] . decode ( "ascii" ) [ : - 1 ] ] = int ( fields [ 1 ] ) except ValueError : data [ fields [ 0 ] . decode ( "ascii" ) [ : - 1 ] ] = fields [ 1 ] . decode ( "ascii" ) return data
Returns the system process sstatus .
62,286
def read_stat ( ) : data = [ ] with open ( "/proc/stat" , "rb" ) as stat_file : for line in stat_file : cpu_stat = line . split ( ) if cpu_stat [ 0 ] [ : 3 ] != b"cpu" : break if len ( cpu_stat [ 0 ] ) == 3 : continue data . append ( { "times" : { "user" : int ( cpu_stat [ 1 ] ) , "nice" : int ( cpu_stat [ 2 ] ) , "sys" : int ( cpu_stat [ 3 ] ) , "idle" : int ( cpu_stat [ 4 ] ) , "irq" : int ( cpu_stat [ 6 ] ) , } } ) return data
Returns the system stat information .
62,287
def set_config ( ** config ) : config . setdefault ( "debug" , bool ( strtobool ( os . getenv ( "IOPIPE_DEBUG" , "false" ) ) ) ) config . setdefault ( "enabled" , bool ( strtobool ( os . getenv ( "IOPIPE_ENABLED" , "true" ) ) ) ) config . setdefault ( "host" , get_hostname ( ) ) config . setdefault ( "install_method" , os . getenv ( "IOPIPE_INSTALL_METHOD" , "manual" ) ) config . setdefault ( "network_timeout" , os . getenv ( "IOPIPE_NETWORK_TIMEOUT" , 5000 ) ) config . setdefault ( "path" , get_collector_path ( ) ) config . setdefault ( "plugins" , [ ] ) config . setdefault ( "sync_http" , False ) config . setdefault ( "timeout_window" , os . getenv ( "IOPIPE_TIMEOUT_WINDOW" , 500 ) ) config . setdefault ( "token" , os . getenv ( "IOPIPE_TOKEN" ) or os . getenv ( "IOPIPE_CLIENTID" ) or "" ) if "client_id" in config : config [ "token" ] = config . pop ( "client_id" ) if "url" in config : url = config . pop ( "url" ) config [ "host" ] = get_hostname ( url ) config [ "path" ] = get_collector_path ( url ) if "." in str ( config [ "network_timeout" ] ) : warnings . warn ( "IOpipe's 'network_timeout' is now in milliseconds, expressed as an integer" ) try : config [ "debug" ] = bool ( config [ "debug" ] ) except ValueError : config [ "debug" ] = False try : config [ "network_timeout" ] = int ( config [ "network_timeout" ] ) / 1000.0 except ValueError : config [ "network_timeout" ] = 5.0 if "." in str ( config [ "timeout_window" ] ) : warnings . warn ( "IOpipe's 'timeout_window' is now in milliseconds, expressed as an integer" ) try : config [ "timeout_window" ] = int ( config [ "timeout_window" ] ) / 1000.0 except ValueError : config [ "timeout_window" ] = 0.5 return config
Returns IOpipe configuration options setting defaults as necessary .
62,288
def b ( s ) : return s if isinstance ( s , bytes ) else s . encode ( locale . getpreferredencoding ( ) )
Encodes Unicode strings to byte strings if necessary .
62,289
def LogMsg ( msg ) : global headerlogged if headerlogged == 0 : print ( "{0:<8} {1:<90} {2}" . format ( "Time" , "MainThread" , "UpdateSNMPObjsThread" ) ) print ( "{0:-^120}" . format ( "-" ) ) headerlogged = 1 threadname = threading . currentThread ( ) . name funcname = sys . _getframe ( 1 ) . f_code . co_name if funcname == "<module>" : funcname = "Main code path" elif funcname == "LogNetSnmpMsg" : funcname = "net-snmp code" else : funcname = "{0}()" . format ( funcname ) if threadname == "MainThread" : logmsg = "{0} {1:<112.112}" . format ( time . strftime ( "%T" , time . localtime ( time . time ( ) ) ) , "{0}: {1}" . format ( funcname , msg ) ) else : logmsg = "{0} {1:>112.112}" . format ( time . strftime ( "%T" , time . localtime ( time . time ( ) ) ) , "{0}: {1}" . format ( funcname , msg ) ) print ( logmsg )
Writes a formatted log message with a timestamp to stdout .
62,290
def UpdateSNMPObjs ( ) : global threadingString LogMsg ( "Beginning data update." ) data = "" LogMsg ( "Calling external command \"sleep 5; date\"." ) proc = subprocess . Popen ( "sleep 5; date" , shell = True , env = { "LANG" : "C" } , stdout = subprocess . PIPE , stderr = subprocess . STDOUT ) output = proc . communicate ( ) [ 0 ] . splitlines ( ) [ 0 ] rc = proc . poll ( ) if rc != 0 : LogMsg ( "An error occured executing the command: {0}" . format ( output ) ) return msg = "Updating \"threadingString\" object with data \"{0}\"." LogMsg ( msg . format ( output ) ) threadingString . update ( output ) LogMsg ( "Data update done, exiting thread." )
Function that does the actual data update .
62,291
def getRegistered ( self , context = "" ) : myobjs = { } try : objs_iterator = self . _objs [ context ] . iteritems ( ) except AttributeError : objs_iterator = self . _objs [ context ] . items ( ) for oidstr , snmpobj in objs_iterator : myobjs [ oidstr ] = { "type" : type ( snmpobj ) . __name__ , "value" : snmpobj . value ( ) } return dict ( myobjs )
Returns a dictionary with the currently registered SNMP objects .
62,292
def start ( self ) : if self . _status != netsnmpAgentStatus . CONNECTED and self . _status != netsnmpAgentStatus . RECONNECTING : self . _status = netsnmpAgentStatus . FIRSTCONNECT libnsa . init_snmp ( b ( self . AgentName ) ) if self . _status == netsnmpAgentStatus . CONNECTFAILED : msg = "Error connecting to snmpd instance at \"{0}\" -- " "incorrect \"MasterSocket\" or snmpd not running?" msg = msg . format ( self . MasterSocket ) raise netsnmpAgentException ( msg )
Starts the agent . Among other things this means connecting to the master agent if configured that way .
62,293
def _adjust_trim_top ( self , canv , size ) : action = self . _scroll_action self . _scroll_action = None maxcol , maxrow = size trim_top = self . _trim_top canv_rows = canv . rows ( ) if trim_top < 0 : trim_top = canv_rows - maxrow + trim_top + 1 if canv_rows <= maxrow : self . _trim_top = 0 return def ensure_bounds ( new_trim_top ) : return max ( 0 , min ( canv_rows - maxrow , new_trim_top ) ) if action == SCROLL_LINE_UP : self . _trim_top = ensure_bounds ( trim_top - 1 ) elif action == SCROLL_LINE_DOWN : self . _trim_top = ensure_bounds ( trim_top + 1 ) elif action == SCROLL_PAGE_UP : self . _trim_top = ensure_bounds ( trim_top - maxrow + 1 ) elif action == SCROLL_PAGE_DOWN : self . _trim_top = ensure_bounds ( trim_top + maxrow - 1 ) elif action == SCROLL_TO_TOP : self . _trim_top = 0 elif action == SCROLL_TO_END : self . _trim_top = canv_rows - maxrow else : self . _trim_top = ensure_bounds ( trim_top ) if self . _old_cursor_coords is not None and self . _old_cursor_coords != canv . cursor : self . _old_cursor_coords = None curscol , cursrow = canv . cursor if cursrow < self . _trim_top : self . _trim_top = cursrow elif cursrow >= self . _trim_top + maxrow : self . _trim_top = max ( 0 , cursrow - maxrow + 1 )
Adjust self . _trim_top according to self . _scroll_action
62,294
def rows_max ( self , size = None , focus = False ) : if size is not None : ow = self . _original_widget ow_size = self . _get_original_widget_size ( size ) sizing = ow . sizing ( ) if FIXED in sizing : self . _rows_max_cached = ow . pack ( ow_size , focus ) [ 1 ] elif FLOW in sizing : self . _rows_max_cached = ow . rows ( ow_size , focus ) else : raise RuntimeError ( 'Not a flow/box widget: %r' % self . _original_widget ) return self . _rows_max_cached
Return the number of rows for size
62,295
def scrolling_base_widget ( self ) : def orig_iter ( w ) : while hasattr ( w , 'original_widget' ) : w = w . original_widget yield w yield w def is_scrolling_widget ( w ) : return hasattr ( w , 'get_scrollpos' ) and hasattr ( w , 'rows_max' ) for w in orig_iter ( self ) : if is_scrolling_widget ( w ) : return w raise ValueError ( 'Not compatible to be wrapped by ScrollBar: %r' % w )
Nearest original_widget that is compatible with the scrolling API
62,296
def ignore_after ( seconds , coro = None , * args , timeout_result = None ) : if coro : return _ignore_after_func ( seconds , False , coro , args , timeout_result ) return TimeoutAfter ( seconds , ignore = True )
Execute the specified coroutine and return its result . Issue a cancellation request after seconds have elapsed . When a timeout occurs no exception is raised . Instead timeout_result is returned .
62,297
def _add_task ( self , task ) : if hasattr ( task , '_task_group' ) : raise RuntimeError ( 'task is already part of a group' ) if self . _closed : raise RuntimeError ( 'task group is closed' ) task . _task_group = self if task . done ( ) : self . _done . append ( task ) else : self . _pending . add ( task ) task . add_done_callback ( self . _on_done )
Add an already existing task to the task group .
62,298
async def next_done ( self ) : if not self . _done and self . _pending : self . _done_event . clear ( ) await self . _done_event . wait ( ) if self . _done : return self . _done . popleft ( ) return None
Returns the next completed task . Returns None if no more tasks remain . A TaskGroup may also be used as an asynchronous iterator .
62,299
async def join ( self ) : def errored ( task ) : return not task . cancelled ( ) and task . exception ( ) try : if self . _wait in ( all , object ) : while True : task = await self . next_done ( ) if task is None : return if errored ( task ) : break if self . _wait is object : if task . cancelled ( ) or task . result ( ) is not None : return else : task = await self . next_done ( ) if task is None or not errored ( task ) : return finally : await self . cancel_remaining ( ) if errored ( task ) : raise task . exception ( )
Wait for tasks in the group to terminate according to the wait policy for the group .