Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
9,300
senaite/senaite.api
src/senaite/api/__init__.py
get_cancellation_status
def get_cancellation_status(brain_or_object, default="active"): """Get the `cancellation_state` of an object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Value of the review_status variable :rtype: String """ if is_brain(brain_or_object): return getattr(brain_or_object, "cancellation_state", default) workflows = get_workflows_for(brain_or_object) if 'bika_cancellation_workflow' not in workflows: return default return get_workflow_status_of(brain_or_object, 'cancellation_state')
python
def get_cancellation_status(brain_or_object, default="active"): """Get the `cancellation_state` of an object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Value of the review_status variable :rtype: String """ if is_brain(brain_or_object): return getattr(brain_or_object, "cancellation_state", default) workflows = get_workflows_for(brain_or_object) if 'bika_cancellation_workflow' not in workflows: return default return get_workflow_status_of(brain_or_object, 'cancellation_state')
['def', 'get_cancellation_status', '(', 'brain_or_object', ',', 'default', '=', '"active"', ')', ':', 'if', 'is_brain', '(', 'brain_or_object', ')', ':', 'return', 'getattr', '(', 'brain_or_object', ',', '"cancellation_state"', ',', 'default', ')', 'workflows', '=', 'get_workflows_for', '(', 'brain_or_object', ')', 'if', "'bika_cancellation_workflow'", 'not', 'in', 'workflows', ':', 'return', 'default', 'return', 'get_workflow_status_of', '(', 'brain_or_object', ',', "'cancellation_state'", ')']
Get the `cancellation_state` of an object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Value of the review_status variable :rtype: String
['Get', 'the', 'cancellation_state', 'of', 'an', 'object']
train
https://github.com/senaite/senaite.api/blob/c79c53abcbe6e3a5ab3ced86d2f455275efa20cf/src/senaite/api/__init__.py#L759-L772
9,301
guaix-ucm/pyemir
emirdrp/processing/bardetect.py
overlap
def overlap(intv1, intv2): """Overlaping of two intervals""" return max(0, min(intv1[1], intv2[1]) - max(intv1[0], intv2[0]))
python
def overlap(intv1, intv2): """Overlaping of two intervals""" return max(0, min(intv1[1], intv2[1]) - max(intv1[0], intv2[0]))
['def', 'overlap', '(', 'intv1', ',', 'intv2', ')', ':', 'return', 'max', '(', '0', ',', 'min', '(', 'intv1', '[', '1', ']', ',', 'intv2', '[', '1', ']', ')', '-', 'max', '(', 'intv1', '[', '0', ']', ',', 'intv2', '[', '0', ']', ')', ')']
Overlaping of two intervals
['Overlaping', 'of', 'two', 'intervals']
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/bardetect.py#L430-L432
9,302
piglei/uwsgi-sloth
uwsgi_sloth/tailer.py
Tailer.follow
def follow(self, delay=1.0): """\ Iterator generator that returns lines as data is added to the file. Based on: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/157035 """ # TODO: Handle log file rotation self.trailing = True unchanged_stats = 0 while not self.should_stop_follow: where = self.file.tell() line = self.file.readline() if line: if self.trailing and line in self.line_terminators: # This is just the line terminator added to the end of the file # before a new line, ignore. self.trailing = False continue if line[-1] in self.line_terminators: line = line[:-1] if line[-1:] == '\r\n' and '\r\n' in self.line_terminators: # found crlf line = line[:-1] self.trailing = False unchanged_stats = 0 yield line else: self.trailing = True self.seek(where) yield no_new_line # Try to catch up rotated log file unchanged_stats += 1 if unchanged_stats >= self.MAX_UNCHANGED_STATS and \ where != os.stat(self.file.name).st_size: logger.info('Reopen log file because file may has been rotated.') self.reopen_file() time.sleep(delay)
python
def follow(self, delay=1.0): """\ Iterator generator that returns lines as data is added to the file. Based on: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/157035 """ # TODO: Handle log file rotation self.trailing = True unchanged_stats = 0 while not self.should_stop_follow: where = self.file.tell() line = self.file.readline() if line: if self.trailing and line in self.line_terminators: # This is just the line terminator added to the end of the file # before a new line, ignore. self.trailing = False continue if line[-1] in self.line_terminators: line = line[:-1] if line[-1:] == '\r\n' and '\r\n' in self.line_terminators: # found crlf line = line[:-1] self.trailing = False unchanged_stats = 0 yield line else: self.trailing = True self.seek(where) yield no_new_line # Try to catch up rotated log file unchanged_stats += 1 if unchanged_stats >= self.MAX_UNCHANGED_STATS and \ where != os.stat(self.file.name).st_size: logger.info('Reopen log file because file may has been rotated.') self.reopen_file() time.sleep(delay)
['def', 'follow', '(', 'self', ',', 'delay', '=', '1.0', ')', ':', '# TODO: Handle log file rotation', 'self', '.', 'trailing', '=', 'True', 'unchanged_stats', '=', '0', 'while', 'not', 'self', '.', 'should_stop_follow', ':', 'where', '=', 'self', '.', 'file', '.', 'tell', '(', ')', 'line', '=', 'self', '.', 'file', '.', 'readline', '(', ')', 'if', 'line', ':', 'if', 'self', '.', 'trailing', 'and', 'line', 'in', 'self', '.', 'line_terminators', ':', '# This is just the line terminator added to the end of the file', '# before a new line, ignore.', 'self', '.', 'trailing', '=', 'False', 'continue', 'if', 'line', '[', '-', '1', ']', 'in', 'self', '.', 'line_terminators', ':', 'line', '=', 'line', '[', ':', '-', '1', ']', 'if', 'line', '[', '-', '1', ':', ']', '==', "'\\r\\n'", 'and', "'\\r\\n'", 'in', 'self', '.', 'line_terminators', ':', '# found crlf', 'line', '=', 'line', '[', ':', '-', '1', ']', 'self', '.', 'trailing', '=', 'False', 'unchanged_stats', '=', '0', 'yield', 'line', 'else', ':', 'self', '.', 'trailing', '=', 'True', 'self', '.', 'seek', '(', 'where', ')', 'yield', 'no_new_line', '# Try to catch up rotated log file', 'unchanged_stats', '+=', '1', 'if', 'unchanged_stats', '>=', 'self', '.', 'MAX_UNCHANGED_STATS', 'and', 'where', '!=', 'os', '.', 'stat', '(', 'self', '.', 'file', '.', 'name', ')', '.', 'st_size', ':', 'logger', '.', 'info', '(', "'Reopen log file because file may has been rotated.'", ')', 'self', '.', 'reopen_file', '(', ')', 'time', '.', 'sleep', '(', 'delay', ')']
\ Iterator generator that returns lines as data is added to the file. Based on: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/157035
['\\', 'Iterator', 'generator', 'that', 'returns', 'lines', 'as', 'data', 'is', 'added', 'to', 'the', 'file', '.']
train
https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/tailer.py#L167-L207
9,303
exxeleron/qPython
qpython/qconnection.py
QConnection.receive
def receive(self, data_only = True, **options): '''Reads and (optionally) parses the response from a q service. Retrieves query result along with meta-information: >>> q.query(qconnection.MessageType.SYNC,'{x}', 10) >>> print(q.receive(data_only = False, raw = False)) QMessage: message type: 2, data size: 13, is_compressed: False, data: 10 Retrieves parsed query result: >>> q.query(qconnection.MessageType.SYNC,'{x}', 10) >>> print(q.receive(data_only = True, raw = False)) 10 Retrieves not-parsed (raw) query result: >>> from binascii import hexlify >>> q.query(qconnection.MessageType.SYNC,'{x}', 10) >>> print(hexlify(q.receive(data_only = True, raw = True))) fa0a000000 :Parameters: - `data_only` (`boolean`) - if ``True`` returns only data part of the message, otherwise returns data and message meta-information encapsulated in :class:`.QMessage` instance :Options: - `raw` (`boolean`) - if ``True`` returns raw data chunk instead of parsed data, **Default**: ``False`` - `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are backed by raw q representation (:class:`.QTemporalList`, :class:`.QTemporal`) instances, otherwise are represented as `numpy datetime64`/`timedelta64` arrays and atoms, **Default**: ``False`` :returns: depending on parameter flags: :class:`.QMessage` instance, parsed message, raw data :raises: :class:`.QReaderException` ''' result = self._reader.read(**self._options.union_dict(**options)) return result.data if data_only else result
python
def receive(self, data_only = True, **options): '''Reads and (optionally) parses the response from a q service. Retrieves query result along with meta-information: >>> q.query(qconnection.MessageType.SYNC,'{x}', 10) >>> print(q.receive(data_only = False, raw = False)) QMessage: message type: 2, data size: 13, is_compressed: False, data: 10 Retrieves parsed query result: >>> q.query(qconnection.MessageType.SYNC,'{x}', 10) >>> print(q.receive(data_only = True, raw = False)) 10 Retrieves not-parsed (raw) query result: >>> from binascii import hexlify >>> q.query(qconnection.MessageType.SYNC,'{x}', 10) >>> print(hexlify(q.receive(data_only = True, raw = True))) fa0a000000 :Parameters: - `data_only` (`boolean`) - if ``True`` returns only data part of the message, otherwise returns data and message meta-information encapsulated in :class:`.QMessage` instance :Options: - `raw` (`boolean`) - if ``True`` returns raw data chunk instead of parsed data, **Default**: ``False`` - `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are backed by raw q representation (:class:`.QTemporalList`, :class:`.QTemporal`) instances, otherwise are represented as `numpy datetime64`/`timedelta64` arrays and atoms, **Default**: ``False`` :returns: depending on parameter flags: :class:`.QMessage` instance, parsed message, raw data :raises: :class:`.QReaderException` ''' result = self._reader.read(**self._options.union_dict(**options)) return result.data if data_only else result
['def', 'receive', '(', 'self', ',', 'data_only', '=', 'True', ',', '*', '*', 'options', ')', ':', 'result', '=', 'self', '.', '_reader', '.', 'read', '(', '*', '*', 'self', '.', '_options', '.', 'union_dict', '(', '*', '*', 'options', ')', ')', 'return', 'result', '.', 'data', 'if', 'data_only', 'else', 'result']
Reads and (optionally) parses the response from a q service. Retrieves query result along with meta-information: >>> q.query(qconnection.MessageType.SYNC,'{x}', 10) >>> print(q.receive(data_only = False, raw = False)) QMessage: message type: 2, data size: 13, is_compressed: False, data: 10 Retrieves parsed query result: >>> q.query(qconnection.MessageType.SYNC,'{x}', 10) >>> print(q.receive(data_only = True, raw = False)) 10 Retrieves not-parsed (raw) query result: >>> from binascii import hexlify >>> q.query(qconnection.MessageType.SYNC,'{x}', 10) >>> print(hexlify(q.receive(data_only = True, raw = True))) fa0a000000 :Parameters: - `data_only` (`boolean`) - if ``True`` returns only data part of the message, otherwise returns data and message meta-information encapsulated in :class:`.QMessage` instance :Options: - `raw` (`boolean`) - if ``True`` returns raw data chunk instead of parsed data, **Default**: ``False`` - `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are backed by raw q representation (:class:`.QTemporalList`, :class:`.QTemporal`) instances, otherwise are represented as `numpy datetime64`/`timedelta64` arrays and atoms, **Default**: ``False`` :returns: depending on parameter flags: :class:`.QMessage` instance, parsed message, raw data :raises: :class:`.QReaderException`
['Reads', 'and', '(', 'optionally', ')', 'parses', 'the', 'response', 'from', 'a', 'q', 'service', '.', 'Retrieves', 'query', 'result', 'along', 'with', 'meta', '-', 'information', ':', '>>>', 'q', '.', 'query', '(', 'qconnection', '.', 'MessageType', '.', 'SYNC', '{', 'x', '}', '10', ')', '>>>', 'print', '(', 'q', '.', 'receive', '(', 'data_only', '=', 'False', 'raw', '=', 'False', '))', 'QMessage', ':', 'message', 'type', ':', '2', 'data', 'size', ':', '13', 'is_compressed', ':', 'False', 'data', ':', '10']
train
https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qconnection.py#L341-L381
9,304
CityOfZion/neo-python-core
neocore/IO/BinaryReader.py
BinaryReader.ReadByte
def ReadByte(self, do_ord=True): """ Read a single byte. Args: do_ord (bool): (default True) convert the byte to an ordinal first. Returns: bytes: a single byte if successful. 0 (int) if an exception occurred. """ try: if do_ord: return ord(self.stream.read(1)) return self.stream.read(1) except Exception as e: logger.error("ord expected character but got none") return 0
python
def ReadByte(self, do_ord=True): """ Read a single byte. Args: do_ord (bool): (default True) convert the byte to an ordinal first. Returns: bytes: a single byte if successful. 0 (int) if an exception occurred. """ try: if do_ord: return ord(self.stream.read(1)) return self.stream.read(1) except Exception as e: logger.error("ord expected character but got none") return 0
['def', 'ReadByte', '(', 'self', ',', 'do_ord', '=', 'True', ')', ':', 'try', ':', 'if', 'do_ord', ':', 'return', 'ord', '(', 'self', '.', 'stream', '.', 'read', '(', '1', ')', ')', 'return', 'self', '.', 'stream', '.', 'read', '(', '1', ')', 'except', 'Exception', 'as', 'e', ':', 'logger', '.', 'error', '(', '"ord expected character but got none"', ')', 'return', '0']
Read a single byte. Args: do_ord (bool): (default True) convert the byte to an ordinal first. Returns: bytes: a single byte if successful. 0 (int) if an exception occurred.
['Read', 'a', 'single', 'byte', '.']
train
https://github.com/CityOfZion/neo-python-core/blob/786c02cc2f41712d70b1f064ae3d67f86167107f/neocore/IO/BinaryReader.py#L46-L62
9,305
Shinichi-Nakagawa/pitchpx
pitchpx/mlbam.py
MlbAm.scrape
def scrape(cls, start, end, output): """ Scrape a MLBAM Data :param start: Start Day(YYYYMMDD) :param end: End Day(YYYYMMDD) :param output: Output directory """ # Logger setting logging.basicConfig( level=logging.INFO, format="time:%(asctime)s.%(msecs)03d" + "\tmessage:%(message)s", datefmt="%Y-%m-%d %H:%M:%S" ) # validate for param_day in ({'name': 'Start Day', 'value': start}, {'name': 'End Day', 'value': end}): try: cls._validate_datetime(param_day['value']) except (validators.Invalid, MlbAmException) as e: raise MlbAmException('{msg} a {name}.'.format(name=param_day['name'], msg=e.msg)) cls._validate_datetime_from_to(start, end) # Download logging.info('->- MLBAM dataset download start') mlb = MlbAm(os.path.dirname(os.path.abspath(__file__)), output, cls._days(start, end)) mlb.download() logging.info('-<- MLBAM dataset download end')
python
def scrape(cls, start, end, output): """ Scrape a MLBAM Data :param start: Start Day(YYYYMMDD) :param end: End Day(YYYYMMDD) :param output: Output directory """ # Logger setting logging.basicConfig( level=logging.INFO, format="time:%(asctime)s.%(msecs)03d" + "\tmessage:%(message)s", datefmt="%Y-%m-%d %H:%M:%S" ) # validate for param_day in ({'name': 'Start Day', 'value': start}, {'name': 'End Day', 'value': end}): try: cls._validate_datetime(param_day['value']) except (validators.Invalid, MlbAmException) as e: raise MlbAmException('{msg} a {name}.'.format(name=param_day['name'], msg=e.msg)) cls._validate_datetime_from_to(start, end) # Download logging.info('->- MLBAM dataset download start') mlb = MlbAm(os.path.dirname(os.path.abspath(__file__)), output, cls._days(start, end)) mlb.download() logging.info('-<- MLBAM dataset download end')
['def', 'scrape', '(', 'cls', ',', 'start', ',', 'end', ',', 'output', ')', ':', '# Logger setting', 'logging', '.', 'basicConfig', '(', 'level', '=', 'logging', '.', 'INFO', ',', 'format', '=', '"time:%(asctime)s.%(msecs)03d"', '+', '"\\tmessage:%(message)s"', ',', 'datefmt', '=', '"%Y-%m-%d %H:%M:%S"', ')', '# validate', 'for', 'param_day', 'in', '(', '{', "'name'", ':', "'Start Day'", ',', "'value'", ':', 'start', '}', ',', '{', "'name'", ':', "'End Day'", ',', "'value'", ':', 'end', '}', ')', ':', 'try', ':', 'cls', '.', '_validate_datetime', '(', 'param_day', '[', "'value'", ']', ')', 'except', '(', 'validators', '.', 'Invalid', ',', 'MlbAmException', ')', 'as', 'e', ':', 'raise', 'MlbAmException', '(', "'{msg} a {name}.'", '.', 'format', '(', 'name', '=', 'param_day', '[', "'name'", ']', ',', 'msg', '=', 'e', '.', 'msg', ')', ')', 'cls', '.', '_validate_datetime_from_to', '(', 'start', ',', 'end', ')', '# Download', 'logging', '.', 'info', '(', "'->- MLBAM dataset download start'", ')', 'mlb', '=', 'MlbAm', '(', 'os', '.', 'path', '.', 'dirname', '(', 'os', '.', 'path', '.', 'abspath', '(', '__file__', ')', ')', ',', 'output', ',', 'cls', '.', '_days', '(', 'start', ',', 'end', ')', ')', 'mlb', '.', 'download', '(', ')', 'logging', '.', 'info', '(', "'-<- MLBAM dataset download end'", ')']
Scrape a MLBAM Data :param start: Start Day(YYYYMMDD) :param end: End Day(YYYYMMDD) :param output: Output directory
['Scrape', 'a', 'MLBAM', 'Data', ':', 'param', 'start', ':', 'Start', 'Day', '(', 'YYYYMMDD', ')', ':', 'param', 'end', ':', 'End', 'Day', '(', 'YYYYMMDD', ')', ':', 'param', 'output', ':', 'Output', 'directory']
train
https://github.com/Shinichi-Nakagawa/pitchpx/blob/5747402a0b3416f5e910b479e100df858f0b6440/pitchpx/mlbam.py#L186-L212
9,306
proycon/clam
clam/common/data.py
InputTemplate.json
def json(self): """Produce a JSON representation for the web interface""" d = { 'id': self.id, 'format': self.formatclass.__name__,'label': self.label, 'mimetype': self.formatclass.mimetype, 'schema': self.formatclass.schema } if self.unique: d['unique'] = True if self.filename: d['filename'] = self.filename if self.extension: d['extension'] = self.extension if self.acceptarchive: d['acceptarchive'] = self.acceptarchive #d['parameters'] = {} #The actual parameters are included as XML, and transformed by clam.js using XSLT (parameter.xsl) to generate the forms parametersxml = '' for parameter in self.parameters: parametersxml += parameter.xml() d['parametersxml'] = '<?xml version="1.0" encoding="utf-8" ?><parameters>' + parametersxml + '</parameters>' d['converters'] = [ {'id':x.id, 'label':x.label} for x in self.converters ] d['inputsources'] = [ {'id':x.id, 'label':x.label} for x in self.inputsources ] return json.dumps(d)
python
def json(self): """Produce a JSON representation for the web interface""" d = { 'id': self.id, 'format': self.formatclass.__name__,'label': self.label, 'mimetype': self.formatclass.mimetype, 'schema': self.formatclass.schema } if self.unique: d['unique'] = True if self.filename: d['filename'] = self.filename if self.extension: d['extension'] = self.extension if self.acceptarchive: d['acceptarchive'] = self.acceptarchive #d['parameters'] = {} #The actual parameters are included as XML, and transformed by clam.js using XSLT (parameter.xsl) to generate the forms parametersxml = '' for parameter in self.parameters: parametersxml += parameter.xml() d['parametersxml'] = '<?xml version="1.0" encoding="utf-8" ?><parameters>' + parametersxml + '</parameters>' d['converters'] = [ {'id':x.id, 'label':x.label} for x in self.converters ] d['inputsources'] = [ {'id':x.id, 'label':x.label} for x in self.inputsources ] return json.dumps(d)
['def', 'json', '(', 'self', ')', ':', 'd', '=', '{', "'id'", ':', 'self', '.', 'id', ',', "'format'", ':', 'self', '.', 'formatclass', '.', '__name__', ',', "'label'", ':', 'self', '.', 'label', ',', "'mimetype'", ':', 'self', '.', 'formatclass', '.', 'mimetype', ',', "'schema'", ':', 'self', '.', 'formatclass', '.', 'schema', '}', 'if', 'self', '.', 'unique', ':', 'd', '[', "'unique'", ']', '=', 'True', 'if', 'self', '.', 'filename', ':', 'd', '[', "'filename'", ']', '=', 'self', '.', 'filename', 'if', 'self', '.', 'extension', ':', 'd', '[', "'extension'", ']', '=', 'self', '.', 'extension', 'if', 'self', '.', 'acceptarchive', ':', 'd', '[', "'acceptarchive'", ']', '=', 'self', '.', 'acceptarchive', "#d['parameters'] = {}", '#The actual parameters are included as XML, and transformed by clam.js using XSLT (parameter.xsl) to generate the forms', 'parametersxml', '=', "''", 'for', 'parameter', 'in', 'self', '.', 'parameters', ':', 'parametersxml', '+=', 'parameter', '.', 'xml', '(', ')', 'd', '[', "'parametersxml'", ']', '=', '\'<?xml version="1.0" encoding="utf-8" ?><parameters>\'', '+', 'parametersxml', '+', "'</parameters>'", 'd', '[', "'converters'", ']', '=', '[', '{', "'id'", ':', 'x', '.', 'id', ',', "'label'", ':', 'x', '.', 'label', '}', 'for', 'x', 'in', 'self', '.', 'converters', ']', 'd', '[', "'inputsources'", ']', '=', '[', '{', "'id'", ':', 'x', '.', 'id', ',', "'label'", ':', 'x', '.', 'label', '}', 'for', 'x', 'in', 'self', '.', 'inputsources', ']', 'return', 'json', '.', 'dumps', '(', 'd', ')']
Produce a JSON representation for the web interface
['Produce', 'a', 'JSON', 'representation', 'for', 'the', 'web', 'interface']
train
https://github.com/proycon/clam/blob/09d15cfc26d7cbe0f5976cdd5424dc446d10dbf3/clam/common/data.py#L1524-L1545
9,307
ponty/psidialogs
psidialogs/__init__.py
ask_folder
def ask_folder(message='Select folder.', default='', title=''): """ A dialog to get a directory name. Returns the name of a directory, or None if user chose to cancel. If the "default" argument specifies a directory name, and that directory exists, then the dialog box will start with that directory. :param message: message to be displayed. :param title: window title :param default: default folder path :rtype: None or string """ return backend_api.opendialog("ask_folder", dict(message=message, default=default, title=title))
python
def ask_folder(message='Select folder.', default='', title=''): """ A dialog to get a directory name. Returns the name of a directory, or None if user chose to cancel. If the "default" argument specifies a directory name, and that directory exists, then the dialog box will start with that directory. :param message: message to be displayed. :param title: window title :param default: default folder path :rtype: None or string """ return backend_api.opendialog("ask_folder", dict(message=message, default=default, title=title))
['def', 'ask_folder', '(', 'message', '=', "'Select folder.'", ',', 'default', '=', "''", ',', 'title', '=', "''", ')', ':', 'return', 'backend_api', '.', 'opendialog', '(', '"ask_folder"', ',', 'dict', '(', 'message', '=', 'message', ',', 'default', '=', 'default', ',', 'title', '=', 'title', ')', ')']
A dialog to get a directory name. Returns the name of a directory, or None if user chose to cancel. If the "default" argument specifies a directory name, and that directory exists, then the dialog box will start with that directory. :param message: message to be displayed. :param title: window title :param default: default folder path :rtype: None or string
['A', 'dialog', 'to', 'get', 'a', 'directory', 'name', '.', 'Returns', 'the', 'name', 'of', 'a', 'directory', 'or', 'None', 'if', 'user', 'chose', 'to', 'cancel', '.', 'If', 'the', 'default', 'argument', 'specifies', 'a', 'directory', 'name', 'and', 'that', 'directory', 'exists', 'then', 'the', 'dialog', 'box', 'will', 'start', 'with', 'that', 'directory', '.']
train
https://github.com/ponty/psidialogs/blob/e385ab6b48cb43af52b810a1bf76a8135f4585b8/psidialogs/__init__.py#L101-L113
9,308
llllllllll/codetransformer
codetransformer/utils/pretty.py
walk_code
def walk_code(co, _prefix=''): """ Traverse a code object, finding all consts which are also code objects. Yields pairs of (name, code object). """ name = _prefix + co.co_name yield name, co yield from chain.from_iterable( walk_code(c, _prefix=_extend_name(name, co)) for c in co.co_consts if isinstance(c, CodeType) )
python
def walk_code(co, _prefix=''): """ Traverse a code object, finding all consts which are also code objects. Yields pairs of (name, code object). """ name = _prefix + co.co_name yield name, co yield from chain.from_iterable( walk_code(c, _prefix=_extend_name(name, co)) for c in co.co_consts if isinstance(c, CodeType) )
['def', 'walk_code', '(', 'co', ',', '_prefix', '=', "''", ')', ':', 'name', '=', '_prefix', '+', 'co', '.', 'co_name', 'yield', 'name', ',', 'co', 'yield', 'from', 'chain', '.', 'from_iterable', '(', 'walk_code', '(', 'c', ',', '_prefix', '=', '_extend_name', '(', 'name', ',', 'co', ')', ')', 'for', 'c', 'in', 'co', '.', 'co_consts', 'if', 'isinstance', '(', 'c', ',', 'CodeType', ')', ')']
Traverse a code object, finding all consts which are also code objects. Yields pairs of (name, code object).
['Traverse', 'a', 'code', 'object', 'finding', 'all', 'consts', 'which', 'are', 'also', 'code', 'objects', '.']
train
https://github.com/llllllllll/codetransformer/blob/c5f551e915df45adc7da7e0b1b635f0cc6a1bb27/codetransformer/utils/pretty.py#L149-L161
9,309
DataBiosphere/toil
src/toil/resource.py
ModuleDescriptor._resourcePath
def _resourcePath(self): """ The path to the directory that should be used when shipping this module and its siblings around as a resource. """ if self.fromVirtualEnv: return self.dirPath elif '.' in self.name: return os.path.join(self.dirPath, self._rootPackage()) else: initName = self._initModuleName(self.dirPath) if initName: raise ResourceException( "Toil does not support loading a user script from a package directory. You " "may want to remove %s from %s or invoke the user script as a module via " "'PYTHONPATH=\"%s\" python -m %s.%s'." % tuple(concat(initName, self.dirPath, os.path.split(self.dirPath), self.name))) return self.dirPath
python
def _resourcePath(self): """ The path to the directory that should be used when shipping this module and its siblings around as a resource. """ if self.fromVirtualEnv: return self.dirPath elif '.' in self.name: return os.path.join(self.dirPath, self._rootPackage()) else: initName = self._initModuleName(self.dirPath) if initName: raise ResourceException( "Toil does not support loading a user script from a package directory. You " "may want to remove %s from %s or invoke the user script as a module via " "'PYTHONPATH=\"%s\" python -m %s.%s'." % tuple(concat(initName, self.dirPath, os.path.split(self.dirPath), self.name))) return self.dirPath
['def', '_resourcePath', '(', 'self', ')', ':', 'if', 'self', '.', 'fromVirtualEnv', ':', 'return', 'self', '.', 'dirPath', 'elif', "'.'", 'in', 'self', '.', 'name', ':', 'return', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'dirPath', ',', 'self', '.', '_rootPackage', '(', ')', ')', 'else', ':', 'initName', '=', 'self', '.', '_initModuleName', '(', 'self', '.', 'dirPath', ')', 'if', 'initName', ':', 'raise', 'ResourceException', '(', '"Toil does not support loading a user script from a package directory. You "', '"may want to remove %s from %s or invoke the user script as a module via "', '"\'PYTHONPATH=\\"%s\\" python -m %s.%s\'."', '%', 'tuple', '(', 'concat', '(', 'initName', ',', 'self', '.', 'dirPath', ',', 'os', '.', 'path', '.', 'split', '(', 'self', '.', 'dirPath', ')', ',', 'self', '.', 'name', ')', ')', ')', 'return', 'self', '.', 'dirPath']
The path to the directory that should be used when shipping this module and its siblings around as a resource.
['The', 'path', 'to', 'the', 'directory', 'that', 'should', 'be', 'used', 'when', 'shipping', 'this', 'module', 'and', 'its', 'siblings', 'around', 'as', 'a', 'resource', '.']
train
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/resource.py#L543-L560
9,310
iotile/coretools
iotilecore/iotile/core/hw/update/records/reflash_tile.py
_parse_target
def _parse_target(target): """Parse a binary targeting information structure. This function only supports extracting the slot number or controller from the target and will raise an ArgumentError if more complicated targeting is desired. Args: target (bytes): The binary targeting data blob. Returns: dict: The parsed targeting data """ if len(target) != 8: raise ArgumentError("Invalid targeting data length", expected=8, length=len(target)) slot, match_op = struct.unpack("<B6xB", target) if match_op == _MATCH_CONTROLLER: return {'controller': True, 'slot': 0} elif match_op == _MATCH_SLOT: return {'controller': False, 'slot': slot} raise ArgumentError("Unsupported complex targeting specified", match_op=match_op)
python
def _parse_target(target): """Parse a binary targeting information structure. This function only supports extracting the slot number or controller from the target and will raise an ArgumentError if more complicated targeting is desired. Args: target (bytes): The binary targeting data blob. Returns: dict: The parsed targeting data """ if len(target) != 8: raise ArgumentError("Invalid targeting data length", expected=8, length=len(target)) slot, match_op = struct.unpack("<B6xB", target) if match_op == _MATCH_CONTROLLER: return {'controller': True, 'slot': 0} elif match_op == _MATCH_SLOT: return {'controller': False, 'slot': slot} raise ArgumentError("Unsupported complex targeting specified", match_op=match_op)
['def', '_parse_target', '(', 'target', ')', ':', 'if', 'len', '(', 'target', ')', '!=', '8', ':', 'raise', 'ArgumentError', '(', '"Invalid targeting data length"', ',', 'expected', '=', '8', ',', 'length', '=', 'len', '(', 'target', ')', ')', 'slot', ',', 'match_op', '=', 'struct', '.', 'unpack', '(', '"<B6xB"', ',', 'target', ')', 'if', 'match_op', '==', '_MATCH_CONTROLLER', ':', 'return', '{', "'controller'", ':', 'True', ',', "'slot'", ':', '0', '}', 'elif', 'match_op', '==', '_MATCH_SLOT', ':', 'return', '{', "'controller'", ':', 'False', ',', "'slot'", ':', 'slot', '}', 'raise', 'ArgumentError', '(', '"Unsupported complex targeting specified"', ',', 'match_op', '=', 'match_op', ')']
Parse a binary targeting information structure. This function only supports extracting the slot number or controller from the target and will raise an ArgumentError if more complicated targeting is desired. Args: target (bytes): The binary targeting data blob. Returns: dict: The parsed targeting data
['Parse', 'a', 'binary', 'targeting', 'information', 'structure', '.']
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/update/records/reflash_tile.py#L165-L188
9,311
danilobellini/audiolazy
audiolazy/lazy_stream.py
Streamix.add
def add(self, delta, data): """ Adds (enqueues) an iterable event to the mixer. Parameters ---------- delta : Time in samples since last added event. This can be zero and can be float. Use "s" object from sHz for time conversion. data : Iterable (e.g. a list, a tuple, a Stream) to be "played" by the mixer at the given time delta. See Also -------- sHz : Time in seconds (s) and frequency in hertz (Hz) constants from sample rate in samples/second. """ if delta < 0: raise ValueError("Delta time should be always positive") self._not_playing.append((delta, iter(data)))
python
def add(self, delta, data): """ Adds (enqueues) an iterable event to the mixer. Parameters ---------- delta : Time in samples since last added event. This can be zero and can be float. Use "s" object from sHz for time conversion. data : Iterable (e.g. a list, a tuple, a Stream) to be "played" by the mixer at the given time delta. See Also -------- sHz : Time in seconds (s) and frequency in hertz (Hz) constants from sample rate in samples/second. """ if delta < 0: raise ValueError("Delta time should be always positive") self._not_playing.append((delta, iter(data)))
['def', 'add', '(', 'self', ',', 'delta', ',', 'data', ')', ':', 'if', 'delta', '<', '0', ':', 'raise', 'ValueError', '(', '"Delta time should be always positive"', ')', 'self', '.', '_not_playing', '.', 'append', '(', '(', 'delta', ',', 'iter', '(', 'data', ')', ')', ')']
Adds (enqueues) an iterable event to the mixer. Parameters ---------- delta : Time in samples since last added event. This can be zero and can be float. Use "s" object from sHz for time conversion. data : Iterable (e.g. a list, a tuple, a Stream) to be "played" by the mixer at the given time delta. See Also -------- sHz : Time in seconds (s) and frequency in hertz (Hz) constants from sample rate in samples/second.
['Adds', '(', 'enqueues', ')', 'an', 'iterable', 'event', 'to', 'the', 'mixer', '.']
train
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_stream.py#L720-L742
9,312
laginha/django-mobileesp
src/django_mobileesp/mdetect.py
UAgentInfo.detectMeegoPhone
def detectMeegoPhone(self): """Return detection of a Meego phone Detects a phone running the Meego OS. """ return UAgentInfo.deviceMeego in self.__userAgent \ and UAgentInfo.mobi in self.__userAgent
python
def detectMeegoPhone(self): """Return detection of a Meego phone Detects a phone running the Meego OS. """ return UAgentInfo.deviceMeego in self.__userAgent \ and UAgentInfo.mobi in self.__userAgent
['def', 'detectMeegoPhone', '(', 'self', ')', ':', 'return', 'UAgentInfo', '.', 'deviceMeego', 'in', 'self', '.', '__userAgent', 'and', 'UAgentInfo', '.', 'mobi', 'in', 'self', '.', '__userAgent']
Return detection of a Meego phone Detects a phone running the Meego OS.
['Return', 'detection', 'of', 'a', 'Meego', 'phone']
train
https://github.com/laginha/django-mobileesp/blob/91d4babb2343b992970bdb076508d380680c8b7e/src/django_mobileesp/mdetect.py#L626-L632
9,313
monim67/django-bootstrap-datepicker-plus
bootstrap_datepicker_plus/_helpers.py
get_base_input
def get_base_input(test=False): """ Return DateTimeBaseInput class from django.forms.widgets module Return _compatibility.DateTimeBaseInput class for older django versions. """ from django.forms.widgets import DateTimeBaseInput if 'get_context' in dir(DateTimeBaseInput) and not test: # django version 1.11 and above base_input = DateTimeBaseInput else: # django version below 1.11 from bootstrap_datepicker_plus._compatibility import ( CompatibleDateTimeBaseInput ) base_input = CompatibleDateTimeBaseInput return base_input
python
def get_base_input(test=False): """ Return DateTimeBaseInput class from django.forms.widgets module Return _compatibility.DateTimeBaseInput class for older django versions. """ from django.forms.widgets import DateTimeBaseInput if 'get_context' in dir(DateTimeBaseInput) and not test: # django version 1.11 and above base_input = DateTimeBaseInput else: # django version below 1.11 from bootstrap_datepicker_plus._compatibility import ( CompatibleDateTimeBaseInput ) base_input = CompatibleDateTimeBaseInput return base_input
['def', 'get_base_input', '(', 'test', '=', 'False', ')', ':', 'from', 'django', '.', 'forms', '.', 'widgets', 'import', 'DateTimeBaseInput', 'if', "'get_context'", 'in', 'dir', '(', 'DateTimeBaseInput', ')', 'and', 'not', 'test', ':', '# django version 1.11 and above', 'base_input', '=', 'DateTimeBaseInput', 'else', ':', '# django version below 1.11', 'from', 'bootstrap_datepicker_plus', '.', '_compatibility', 'import', '(', 'CompatibleDateTimeBaseInput', ')', 'base_input', '=', 'CompatibleDateTimeBaseInput', 'return', 'base_input']
Return DateTimeBaseInput class from django.forms.widgets module Return _compatibility.DateTimeBaseInput class for older django versions.
['Return', 'DateTimeBaseInput', 'class', 'from', 'django', '.', 'forms', '.', 'widgets', 'module']
train
https://github.com/monim67/django-bootstrap-datepicker-plus/blob/55819bf12507c98dba91c702e224afd9bae3ef9a/bootstrap_datepicker_plus/_helpers.py#L4-L20
9,314
inveniosoftware/invenio-records-rest
invenio_records_rest/schemas/fields/datetime.py
DateString._serialize
def _serialize(self, value, attr, obj): """Serialize an ISO8601-formatted date.""" try: return super(DateString, self)._serialize( arrow.get(value).date(), attr, obj) except ParserError: return missing
python
def _serialize(self, value, attr, obj): """Serialize an ISO8601-formatted date.""" try: return super(DateString, self)._serialize( arrow.get(value).date(), attr, obj) except ParserError: return missing
['def', '_serialize', '(', 'self', ',', 'value', ',', 'attr', ',', 'obj', ')', ':', 'try', ':', 'return', 'super', '(', 'DateString', ',', 'self', ')', '.', '_serialize', '(', 'arrow', '.', 'get', '(', 'value', ')', '.', 'date', '(', ')', ',', 'attr', ',', 'obj', ')', 'except', 'ParserError', ':', 'return', 'missing']
Serialize an ISO8601-formatted date.
['Serialize', 'an', 'ISO8601', '-', 'formatted', 'date', '.']
train
https://github.com/inveniosoftware/invenio-records-rest/blob/e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9/invenio_records_rest/schemas/fields/datetime.py#L21-L27
9,315
mabuchilab/QNET
src/qnet/utils/properties_for_args.py
properties_for_args
def properties_for_args(cls, arg_names='_arg_names'): """For a class with an attribute `arg_names` containing a list of names, add a property for every name in that list. It is assumed that there is an instance attribute ``self._<arg_name>``, which is returned by the `arg_name` property. The decorator also adds a class attribute :attr:`_has_properties_for_args` that may be used to ensure that a class is decorated. """ from qnet.algebra.core.scalar_algebra import Scalar scalar_args = False if hasattr(cls, '_scalar_args'): scalar_args = cls._scalar_args for arg_name in getattr(cls, arg_names): def get_arg(self, name): val = getattr(self, "_%s" % name) if scalar_args: assert isinstance(val, Scalar) return val prop = property(partial(get_arg, name=arg_name)) doc = "The `%s` argument" % arg_name if scalar_args: doc += ", as a :class:`.Scalar` instance." else: doc += "." prop.__doc__ = doc setattr(cls, arg_name, prop) cls._has_properties_for_args = True return cls
python
def properties_for_args(cls, arg_names='_arg_names'): """For a class with an attribute `arg_names` containing a list of names, add a property for every name in that list. It is assumed that there is an instance attribute ``self._<arg_name>``, which is returned by the `arg_name` property. The decorator also adds a class attribute :attr:`_has_properties_for_args` that may be used to ensure that a class is decorated. """ from qnet.algebra.core.scalar_algebra import Scalar scalar_args = False if hasattr(cls, '_scalar_args'): scalar_args = cls._scalar_args for arg_name in getattr(cls, arg_names): def get_arg(self, name): val = getattr(self, "_%s" % name) if scalar_args: assert isinstance(val, Scalar) return val prop = property(partial(get_arg, name=arg_name)) doc = "The `%s` argument" % arg_name if scalar_args: doc += ", as a :class:`.Scalar` instance." else: doc += "." prop.__doc__ = doc setattr(cls, arg_name, prop) cls._has_properties_for_args = True return cls
['def', 'properties_for_args', '(', 'cls', ',', 'arg_names', '=', "'_arg_names'", ')', ':', 'from', 'qnet', '.', 'algebra', '.', 'core', '.', 'scalar_algebra', 'import', 'Scalar', 'scalar_args', '=', 'False', 'if', 'hasattr', '(', 'cls', ',', "'_scalar_args'", ')', ':', 'scalar_args', '=', 'cls', '.', '_scalar_args', 'for', 'arg_name', 'in', 'getattr', '(', 'cls', ',', 'arg_names', ')', ':', 'def', 'get_arg', '(', 'self', ',', 'name', ')', ':', 'val', '=', 'getattr', '(', 'self', ',', '"_%s"', '%', 'name', ')', 'if', 'scalar_args', ':', 'assert', 'isinstance', '(', 'val', ',', 'Scalar', ')', 'return', 'val', 'prop', '=', 'property', '(', 'partial', '(', 'get_arg', ',', 'name', '=', 'arg_name', ')', ')', 'doc', '=', '"The `%s` argument"', '%', 'arg_name', 'if', 'scalar_args', ':', 'doc', '+=', '", as a :class:`.Scalar` instance."', 'else', ':', 'doc', '+=', '"."', 'prop', '.', '__doc__', '=', 'doc', 'setattr', '(', 'cls', ',', 'arg_name', ',', 'prop', ')', 'cls', '.', '_has_properties_for_args', '=', 'True', 'return', 'cls']
For a class with an attribute `arg_names` containing a list of names, add a property for every name in that list. It is assumed that there is an instance attribute ``self._<arg_name>``, which is returned by the `arg_name` property. The decorator also adds a class attribute :attr:`_has_properties_for_args` that may be used to ensure that a class is decorated.
['For', 'a', 'class', 'with', 'an', 'attribute', 'arg_names', 'containing', 'a', 'list', 'of', 'names', 'add', 'a', 'property', 'for', 'every', 'name', 'in', 'that', 'list', '.']
train
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/utils/properties_for_args.py#L8-L36
9,316
erijo/tellcore-py
tellcore/telldus.py
Device.set_parameter
def set_parameter(self, name, value): """Set a parameter.""" self.lib.tdSetDeviceParameter(self.id, name, str(value))
python
def set_parameter(self, name, value): """Set a parameter.""" self.lib.tdSetDeviceParameter(self.id, name, str(value))
['def', 'set_parameter', '(', 'self', ',', 'name', ',', 'value', ')', ':', 'self', '.', 'lib', '.', 'tdSetDeviceParameter', '(', 'self', '.', 'id', ',', 'name', ',', 'str', '(', 'value', ')', ')']
Set a parameter.
['Set', 'a', 'parameter', '.']
train
https://github.com/erijo/tellcore-py/blob/7a1eb53e12ef039a2350933e502633df7560f6a8/tellcore/telldus.py#L341-L343
9,317
BlueBrain/NeuroM
neurom/io/datawrapper.py
DataWrapper.soma_points
def soma_points(self): '''Get the soma points''' db = self.data_block return db[db[:, COLS.TYPE] == POINT_TYPE.SOMA]
python
def soma_points(self): '''Get the soma points''' db = self.data_block return db[db[:, COLS.TYPE] == POINT_TYPE.SOMA]
['def', 'soma_points', '(', 'self', ')', ':', 'db', '=', 'self', '.', 'data_block', 'return', 'db', '[', 'db', '[', ':', ',', 'COLS', '.', 'TYPE', ']', '==', 'POINT_TYPE', '.', 'SOMA', ']']
Get the soma points
['Get', 'the', 'soma', 'points']
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/datawrapper.py#L83-L86
9,318
SCIP-Interfaces/PySCIPOpt
examples/finished/read_tsplib.py
read_atsplib
def read_atsplib(filename): "basic function for reading a ATSP problem on the TSPLIB format" "NOTE: only works for explicit matrices" if filename[-3:] == ".gz": f = gzip.open(filename, 'r') data = f.readlines() else: f = open(filename, 'r') data = f.readlines() for line in data: if line.find("DIMENSION") >= 0: n = int(line.split()[1]) break else: raise IOError("'DIMENSION' keyword not found in file '%s'" % filename) for line in data: if line.find("EDGE_WEIGHT_TYPE") >= 0: if line.split()[1] == "EXPLICIT": break else: raise IOError("'EDGE_WEIGHT_TYPE' is not 'EXPLICIT' in file '%s'" % filename) for k,line in enumerate(data): if line.find("EDGE_WEIGHT_SECTION") >= 0: break else: raise IOError("'EDGE_WEIGHT_SECTION' not found in file '%s'" % filename) c = {} # flatten list of distances dist = [] for line in data[k+1:]: if line.find("EOF") >= 0: break for val in line.split(): dist.append(int(val)) k = 0 for i in range(n): for j in range(n): c[i+1,j+1] = dist[k] k += 1 return n,c
python
def read_atsplib(filename): "basic function for reading a ATSP problem on the TSPLIB format" "NOTE: only works for explicit matrices" if filename[-3:] == ".gz": f = gzip.open(filename, 'r') data = f.readlines() else: f = open(filename, 'r') data = f.readlines() for line in data: if line.find("DIMENSION") >= 0: n = int(line.split()[1]) break else: raise IOError("'DIMENSION' keyword not found in file '%s'" % filename) for line in data: if line.find("EDGE_WEIGHT_TYPE") >= 0: if line.split()[1] == "EXPLICIT": break else: raise IOError("'EDGE_WEIGHT_TYPE' is not 'EXPLICIT' in file '%s'" % filename) for k,line in enumerate(data): if line.find("EDGE_WEIGHT_SECTION") >= 0: break else: raise IOError("'EDGE_WEIGHT_SECTION' not found in file '%s'" % filename) c = {} # flatten list of distances dist = [] for line in data[k+1:]: if line.find("EOF") >= 0: break for val in line.split(): dist.append(int(val)) k = 0 for i in range(n): for j in range(n): c[i+1,j+1] = dist[k] k += 1 return n,c
['def', 'read_atsplib', '(', 'filename', ')', ':', '"NOTE: only works for explicit matrices"', 'if', 'filename', '[', '-', '3', ':', ']', '==', '".gz"', ':', 'f', '=', 'gzip', '.', 'open', '(', 'filename', ',', "'r'", ')', 'data', '=', 'f', '.', 'readlines', '(', ')', 'else', ':', 'f', '=', 'open', '(', 'filename', ',', "'r'", ')', 'data', '=', 'f', '.', 'readlines', '(', ')', 'for', 'line', 'in', 'data', ':', 'if', 'line', '.', 'find', '(', '"DIMENSION"', ')', '>=', '0', ':', 'n', '=', 'int', '(', 'line', '.', 'split', '(', ')', '[', '1', ']', ')', 'break', 'else', ':', 'raise', 'IOError', '(', '"\'DIMENSION\' keyword not found in file \'%s\'"', '%', 'filename', ')', 'for', 'line', 'in', 'data', ':', 'if', 'line', '.', 'find', '(', '"EDGE_WEIGHT_TYPE"', ')', '>=', '0', ':', 'if', 'line', '.', 'split', '(', ')', '[', '1', ']', '==', '"EXPLICIT"', ':', 'break', 'else', ':', 'raise', 'IOError', '(', '"\'EDGE_WEIGHT_TYPE\' is not \'EXPLICIT\' in file \'%s\'"', '%', 'filename', ')', 'for', 'k', ',', 'line', 'in', 'enumerate', '(', 'data', ')', ':', 'if', 'line', '.', 'find', '(', '"EDGE_WEIGHT_SECTION"', ')', '>=', '0', ':', 'break', 'else', ':', 'raise', 'IOError', '(', '"\'EDGE_WEIGHT_SECTION\' not found in file \'%s\'"', '%', 'filename', ')', 'c', '=', '{', '}', '# flatten list of distances', 'dist', '=', '[', ']', 'for', 'line', 'in', 'data', '[', 'k', '+', '1', ':', ']', ':', 'if', 'line', '.', 'find', '(', '"EOF"', ')', '>=', '0', ':', 'break', 'for', 'val', 'in', 'line', '.', 'split', '(', ')', ':', 'dist', '.', 'append', '(', 'int', '(', 'val', ')', ')', 'k', '=', '0', 'for', 'i', 'in', 'range', '(', 'n', ')', ':', 'for', 'j', 'in', 'range', '(', 'n', ')', ':', 'c', '[', 'i', '+', '1', ',', 'j', '+', '1', ']', '=', 'dist', '[', 'k', ']', 'k', '+=', '1', 'return', 'n', ',', 'c']
basic function for reading a ATSP problem on the TSPLIB format
['basic', 'function', 'for', 'reading', 'a', 'ATSP', 'problem', 'on', 'the', 'TSPLIB', 'format']
train
https://github.com/SCIP-Interfaces/PySCIPOpt/blob/9c960b40d94a48b0304d73dbe28b467b9c065abe/examples/finished/read_tsplib.py#L216-L262
9,319
MolSSI-BSE/basis_set_exchange
basis_set_exchange/curate/compare.py
_reldiff
def _reldiff(a, b): """ Computes the relative difference of two floating-point numbers rel = abs(a-b)/min(abs(a), abs(b)) If a == 0 and b == 0, then 0.0 is returned Otherwise if a or b is 0.0, inf is returned. """ a = float(a) b = float(b) aa = abs(a) ba = abs(b) if a == 0.0 and b == 0.0: return 0.0 elif a == 0 or b == 0.0: return float('inf') return abs(a - b) / min(aa, ba)
python
def _reldiff(a, b): """ Computes the relative difference of two floating-point numbers rel = abs(a-b)/min(abs(a), abs(b)) If a == 0 and b == 0, then 0.0 is returned Otherwise if a or b is 0.0, inf is returned. """ a = float(a) b = float(b) aa = abs(a) ba = abs(b) if a == 0.0 and b == 0.0: return 0.0 elif a == 0 or b == 0.0: return float('inf') return abs(a - b) / min(aa, ba)
['def', '_reldiff', '(', 'a', ',', 'b', ')', ':', 'a', '=', 'float', '(', 'a', ')', 'b', '=', 'float', '(', 'b', ')', 'aa', '=', 'abs', '(', 'a', ')', 'ba', '=', 'abs', '(', 'b', ')', 'if', 'a', '==', '0.0', 'and', 'b', '==', '0.0', ':', 'return', '0.0', 'elif', 'a', '==', '0', 'or', 'b', '==', '0.0', ':', 'return', 'float', '(', "'inf'", ')', 'return', 'abs', '(', 'a', '-', 'b', ')', '/', 'min', '(', 'aa', ',', 'ba', ')']
Computes the relative difference of two floating-point numbers rel = abs(a-b)/min(abs(a), abs(b)) If a == 0 and b == 0, then 0.0 is returned Otherwise if a or b is 0.0, inf is returned.
['Computes', 'the', 'relative', 'difference', 'of', 'two', 'floating', '-', 'point', 'numbers']
train
https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/curate/compare.py#L9-L29
9,320
ucsb-cs-education/hairball
hairball/plugins/__init__.py
HairballPlugin.get_broadcast_events
def get_broadcast_events(cls, script): """Return a Counter of event-names that were broadcast. The Count will contain the key True if any of the broadcast blocks contain a parameter that is a variable. """ events = Counter() for name, _, block in cls.iter_blocks(script): if 'broadcast %s' in name: if isinstance(block.args[0], kurt.Block): events[True] += 1 else: events[block.args[0].lower()] += 1 return events
python
def get_broadcast_events(cls, script): """Return a Counter of event-names that were broadcast. The Count will contain the key True if any of the broadcast blocks contain a parameter that is a variable. """ events = Counter() for name, _, block in cls.iter_blocks(script): if 'broadcast %s' in name: if isinstance(block.args[0], kurt.Block): events[True] += 1 else: events[block.args[0].lower()] += 1 return events
['def', 'get_broadcast_events', '(', 'cls', ',', 'script', ')', ':', 'events', '=', 'Counter', '(', ')', 'for', 'name', ',', '_', ',', 'block', 'in', 'cls', '.', 'iter_blocks', '(', 'script', ')', ':', 'if', "'broadcast %s'", 'in', 'name', ':', 'if', 'isinstance', '(', 'block', '.', 'args', '[', '0', ']', ',', 'kurt', '.', 'Block', ')', ':', 'events', '[', 'True', ']', '+=', '1', 'else', ':', 'events', '[', 'block', '.', 'args', '[', '0', ']', '.', 'lower', '(', ')', ']', '+=', '1', 'return', 'events']
Return a Counter of event-names that were broadcast. The Count will contain the key True if any of the broadcast blocks contain a parameter that is a variable.
['Return', 'a', 'Counter', 'of', 'event', '-', 'names', 'that', 'were', 'broadcast', '.']
train
https://github.com/ucsb-cs-education/hairball/blob/c6da8971f8a34e88ce401d36b51431715e1dff5b/hairball/plugins/__init__.py#L120-L134
9,321
yjzhang/uncurl_python
uncurl/nb_clustering.py
log_ncr
def log_ncr(a, b): """ Returns log(nCr(a,b)), given that b<a. Does not assume that a and b are integers (uses log-gamma). """ val = gammaln(a+1) - gammaln(a-b+1) - gammaln(b+1) return val
python
def log_ncr(a, b): """ Returns log(nCr(a,b)), given that b<a. Does not assume that a and b are integers (uses log-gamma). """ val = gammaln(a+1) - gammaln(a-b+1) - gammaln(b+1) return val
['def', 'log_ncr', '(', 'a', ',', 'b', ')', ':', 'val', '=', 'gammaln', '(', 'a', '+', '1', ')', '-', 'gammaln', '(', 'a', '-', 'b', '+', '1', ')', '-', 'gammaln', '(', 'b', '+', '1', ')', 'return', 'val']
Returns log(nCr(a,b)), given that b<a. Does not assume that a and b are integers (uses log-gamma).
['Returns', 'log', '(', 'nCr', '(', 'a', 'b', '))', 'given', 'that', 'b<a', '.', 'Does', 'not', 'assume', 'that', 'a', 'and', 'b', 'are', 'integers', '(', 'uses', 'log', '-', 'gamma', ')', '.']
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_clustering.py#L22-L28
9,322
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/service_hooks/service_hooks_client.py
ServiceHooksClient.list_subscriptions
def list_subscriptions(self, publisher_id=None, event_type=None, consumer_id=None, consumer_action_id=None): """ListSubscriptions. Get a list of subscriptions. :param str publisher_id: ID for a subscription. :param str event_type: Maximum number of notifications to return. Default is 100. :param str consumer_id: ID for a consumer. :param str consumer_action_id: ID for a consumerActionId. :rtype: [Subscription] """ query_parameters = {} if publisher_id is not None: query_parameters['publisherId'] = self._serialize.query('publisher_id', publisher_id, 'str') if event_type is not None: query_parameters['eventType'] = self._serialize.query('event_type', event_type, 'str') if consumer_id is not None: query_parameters['consumerId'] = self._serialize.query('consumer_id', consumer_id, 'str') if consumer_action_id is not None: query_parameters['consumerActionId'] = self._serialize.query('consumer_action_id', consumer_action_id, 'str') response = self._send(http_method='GET', location_id='fc50d02a-849f-41fb-8af1-0a5216103269', version='5.0', query_parameters=query_parameters) return self._deserialize('[Subscription]', self._unwrap_collection(response))
python
def list_subscriptions(self, publisher_id=None, event_type=None, consumer_id=None, consumer_action_id=None): """ListSubscriptions. Get a list of subscriptions. :param str publisher_id: ID for a subscription. :param str event_type: Maximum number of notifications to return. Default is 100. :param str consumer_id: ID for a consumer. :param str consumer_action_id: ID for a consumerActionId. :rtype: [Subscription] """ query_parameters = {} if publisher_id is not None: query_parameters['publisherId'] = self._serialize.query('publisher_id', publisher_id, 'str') if event_type is not None: query_parameters['eventType'] = self._serialize.query('event_type', event_type, 'str') if consumer_id is not None: query_parameters['consumerId'] = self._serialize.query('consumer_id', consumer_id, 'str') if consumer_action_id is not None: query_parameters['consumerActionId'] = self._serialize.query('consumer_action_id', consumer_action_id, 'str') response = self._send(http_method='GET', location_id='fc50d02a-849f-41fb-8af1-0a5216103269', version='5.0', query_parameters=query_parameters) return self._deserialize('[Subscription]', self._unwrap_collection(response))
['def', 'list_subscriptions', '(', 'self', ',', 'publisher_id', '=', 'None', ',', 'event_type', '=', 'None', ',', 'consumer_id', '=', 'None', ',', 'consumer_action_id', '=', 'None', ')', ':', 'query_parameters', '=', '{', '}', 'if', 'publisher_id', 'is', 'not', 'None', ':', 'query_parameters', '[', "'publisherId'", ']', '=', 'self', '.', '_serialize', '.', 'query', '(', "'publisher_id'", ',', 'publisher_id', ',', "'str'", ')', 'if', 'event_type', 'is', 'not', 'None', ':', 'query_parameters', '[', "'eventType'", ']', '=', 'self', '.', '_serialize', '.', 'query', '(', "'event_type'", ',', 'event_type', ',', "'str'", ')', 'if', 'consumer_id', 'is', 'not', 'None', ':', 'query_parameters', '[', "'consumerId'", ']', '=', 'self', '.', '_serialize', '.', 'query', '(', "'consumer_id'", ',', 'consumer_id', ',', "'str'", ')', 'if', 'consumer_action_id', 'is', 'not', 'None', ':', 'query_parameters', '[', "'consumerActionId'", ']', '=', 'self', '.', '_serialize', '.', 'query', '(', "'consumer_action_id'", ',', 'consumer_action_id', ',', "'str'", ')', 'response', '=', 'self', '.', '_send', '(', 'http_method', '=', "'GET'", ',', 'location_id', '=', "'fc50d02a-849f-41fb-8af1-0a5216103269'", ',', 'version', '=', "'5.0'", ',', 'query_parameters', '=', 'query_parameters', ')', 'return', 'self', '.', '_deserialize', '(', "'[Subscription]'", ',', 'self', '.', '_unwrap_collection', '(', 'response', ')', ')']
ListSubscriptions. Get a list of subscriptions. :param str publisher_id: ID for a subscription. :param str event_type: Maximum number of notifications to return. Default is 100. :param str consumer_id: ID for a consumer. :param str consumer_action_id: ID for a consumerActionId. :rtype: [Subscription]
['ListSubscriptions', '.', 'Get', 'a', 'list', 'of', 'subscriptions', '.', ':', 'param', 'str', 'publisher_id', ':', 'ID', 'for', 'a', 'subscription', '.', ':', 'param', 'str', 'event_type', ':', 'Maximum', 'number', 'of', 'notifications', 'to', 'return', '.', 'Default', 'is', '100', '.', ':', 'param', 'str', 'consumer_id', ':', 'ID', 'for', 'a', 'consumer', '.', ':', 'param', 'str', 'consumer_action_id', ':', 'ID', 'for', 'a', 'consumerActionId', '.', ':', 'rtype', ':', '[', 'Subscription', ']']
train
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/service_hooks/service_hooks_client.py#L325-L347
9,323
pywbem/pywbem
pywbem/cim_operations.py
WBEMConnection.IterReferenceInstancePaths
def IterReferenceInstancePaths(self, InstanceName, ResultClass=None, Role=None, FilterQueryLanguage=None, FilterQuery=None, OperationTimeout=None, ContinueOnError=None, MaxObjectCount=DEFAULT_ITER_MAXOBJECTCOUNT, **extra): # pylint: disable=invalid-name """ Retrieve the instance paths of the association instances that reference a source instance, using the Python :term:`py:generator` idiom to return the result. *New in pywbem 0.10 as experimental and finalized in 0.12.* This method uses the corresponding pull operations if supported by the WBEM server or otherwise the corresponding traditional operation. This method is an alternative to using the pull operations directly, that frees the user of having to know whether the WBEM server supports pull operations. This method is a generator function that retrieves instance paths from the WBEM server and returns them one by one (using :keyword:`yield`) when the caller iterates through the returned generator object. The number of instance paths that are retrieved from the WBEM server in one request (and thus need to be materialized in this method) is up to the `MaxObjectCount` parameter if the corresponding pull operations are used, or the complete result set all at once if the corresponding traditional operation is used. By default, this method attempts to perform the corresponding pull operations (:meth:`~pywbem.WBEMConnection.OpenReferenceInstancePaths` and :meth:`~pywbem.WBEMConnection.PullInstancePaths`). If these pull operations are not supported by the WBEM server, this method falls back to using the corresponding traditional operation (:meth:`~pywbem.WBEMConnection.ReferenceNames`). Whether the WBEM server supports these pull operations is remembered in the :class:`~pywbem.WBEMConnection` object (by operation type), and avoids unnecessary attempts to try these pull operations on that connection in the future. The `use_pull_operations` init parameter of :class:`~pywbem.WBEMConnection` can be used to control the preference for always using pull operations, always using traditional operations, or using pull operations if supported by the WBEM server (the default). This method provides all of the controls of the corresponding pull operations except for the ability to set different response sizes on each request; the response size (defined by the `MaxObjectCount` parameter) is the same for all pull operations in the enumeration session. In addition, some functionality is only available if the corresponding pull operations are used by this method: * Filtering is not supported for the corresponding traditional operation so that setting the `FilterQuery` or `FilterQueryLanguage` parameters will be rejected if the corresponding traditional operation is used by this method. Note that this limitation is not a disadvantage compared to using the corresponding pull operations directly, because in both cases, the WBEM server must support the pull operations and their filtering capability in order for the filtering to work. * Setting the `ContinueOnError` parameter to `True` will be rejected if the corresponding traditional operation is used by this method. The enumeration session that is opened with the WBEM server when using pull operations is closed automatically when the returned generator object is exhausted, or when the generator object is closed using its :meth:`~py:generator.close` method (which may also be called before the generator is exhausted). Parameters: InstanceName (:class:`~pywbem.CIMInstanceName`): The instance path of the source instance. If this object does not specify a namespace, the default namespace of the connection is used. Its `host` attribute will be ignored. ResultClass (:term:`string` or :class:`~pywbem.CIMClassName`): Class name of an association class (case independent), to filter the result to include only traversals of that association class (or subclasses). `None` means that no such filtering is peformed. Role (:term:`string`): Role name (= property name) of the source end (case independent), to filter the result to include only traversals from that source role. `None` means that no such filtering is peformed. FilterQueryLanguage (:term:`string`): The name of the filter query language used for the `FilterQuery` parameter. The DMTF-defined Filter Query Language (see :term:`DSP0212`) is specified as "DMTF:FQL". If this parameter is not `None` and the traditional operation is used by this method, :exc:`~py:exceptions.ValueError` will be raised. Not all WBEM servers support filtering for this operation because it returns instance paths and the act of the server filtering requires that it generate instances just for that purpose and then discard them. FilterQuery (:term:`string`): The filter query in the query language defined by the `FilterQueryLanguage` parameter. If this parameter is not `None` and the traditional operation is used by this method, :exc:`~py:exceptions.ValueError` will be raised. OperationTimeout (:class:`~pywbem.Uint32`): Minimum time in seconds the WBEM Server shall maintain an open enumeration session after a previous Open or Pull request is sent to the client. Once this timeout time has expired, the WBEM server may close the enumeration session. * If not `None`, this parameter is sent to the WBEM server as the proposed timeout for the enumeration session. A value of 0 indicates that the server is expected to never time out. The server may reject the proposed value, causing a :class:`~pywbem.CIMError` to be raised with status code :attr:`~pywbem.CIM_ERR_INVALID_OPERATION_TIMEOUT`. * If `None`, this parameter is not passed to the WBEM server, and causes the server-implemented default timeout to be used. ContinueOnError (:class:`py:bool`): Indicates to the WBEM server to continue sending responses after an error response has been sent. * If `True`, the server is to continue sending responses after sending an error response. Not all servers support continuation on error; a server that does not support it must send an error response if `True` was specified, causing :class:`~pywbem.CIMError` to be raised with status code :attr:`~pywbem.CIM_ERR_CONTINUATION_ON_ERROR_NOT_SUPPORTED`. If the corresponding traditional operation is used by this method, :exc:`~py:exceptions.ValueError` will be raised. * If `False`, the server is requested to close the enumeration after sending an error response. * If `None`, this parameter is not passed to the WBEM server, and causes the server-implemented default behaviour to be used. :term:`DSP0200` defines that the server-implemented default is `False`. MaxObjectCount (:class:`~pywbem.Uint32`) Maximum number of instances the WBEM server may return for each of the open and pull requests issued during the iterations over the returned generator object. * If positive, the WBEM server is to return no more than the specified number of instances. * Zero is not allowed; it would mean that zero instances are to be returned for open and all pull requests issued to the server. * The default is defined as a system config variable. * `None` is not allowed. **extra : Additional keyword arguments are passed as additional operation parameters to the WBEM server. Note that :term:`DSP0200` does not define any additional parameters for this operation. Returns: :term:`py:generator` iterating :class:`~pywbem.CIMInstanceName`: A generator object that iterates the resulting CIM instance paths. These instance paths have their host and namespace components set. Raises: Exceptions described in :class:`~pywbem.WBEMConnection`. Example:: paths_generator = conn.IterReferenceInstancePaths('CIM_Blah') for path in paths_generator: print('path {0}'.format(path)) """ _validateIterCommonParams(MaxObjectCount, OperationTimeout) # Common variable for pull result tuple used by pulls and finally: pull_result = None try: # try / finally block to allow iter.close() if (self._use_ref_path_pull_operations is None or self._use_ref_path_pull_operations): try: # Open operation try block pull_result = self.OpenReferenceInstancePaths( InstanceName, ResultClass=ResultClass, Role=Role, FilterQueryLanguage=FilterQueryLanguage, FilterQuery=FilterQuery, OperationTimeout=OperationTimeout, ContinueOnError=ContinueOnError, MaxObjectCount=MaxObjectCount, **extra) # Open operation succeeded; set use_pull flag self._use_ref_path_pull_operations = True for inst in pull_result.paths: yield inst # Loop to pull while more while eos not returned. while not pull_result.eos: pull_result = self.PullInstancePaths( pull_result.context, MaxObjectCount=MaxObjectCount) for inst in pull_result.paths: yield inst pull_result = None # clear the pull_result return # If NOT_SUPPORTED and first request, set flag and try # alternative request operation. # If use_pull_operations is True, always raise the exception except CIMError as ce: if (self._use_ref_path_pull_operations is None and ce.status_code == CIM_ERR_NOT_SUPPORTED): self._use_ref_path_pull_operations = False else: raise # Alternate request if Pull not implemented. This does not allow # the FilterQuery or ContinueOnError assert self._use_ref_path_pull_operations is False if FilterQuery is not None or FilterQueryLanguage is not None: raise ValueError('ReferenceInstanceNnames does not support' ' FilterQuery.') if ContinueOnError is not None: raise ValueError('ReferenceInstanceNames does not support ' 'ContinueOnError.') enum_rslt = self.ReferenceNames( InstanceName, ResultClass=ResultClass, Role=Role, **extra) for inst in enum_rslt: yield inst # Cleanup if caller closess the iterator before exhausting it finally: # Cleanup only required if the pull context is open and not complete if pull_result is not None and not pull_result.eos: self.CloseEnumeration(pull_result.context) pull_result = None
python
def IterReferenceInstancePaths(self, InstanceName, ResultClass=None, Role=None, FilterQueryLanguage=None, FilterQuery=None, OperationTimeout=None, ContinueOnError=None, MaxObjectCount=DEFAULT_ITER_MAXOBJECTCOUNT, **extra): # pylint: disable=invalid-name """ Retrieve the instance paths of the association instances that reference a source instance, using the Python :term:`py:generator` idiom to return the result. *New in pywbem 0.10 as experimental and finalized in 0.12.* This method uses the corresponding pull operations if supported by the WBEM server or otherwise the corresponding traditional operation. This method is an alternative to using the pull operations directly, that frees the user of having to know whether the WBEM server supports pull operations. This method is a generator function that retrieves instance paths from the WBEM server and returns them one by one (using :keyword:`yield`) when the caller iterates through the returned generator object. The number of instance paths that are retrieved from the WBEM server in one request (and thus need to be materialized in this method) is up to the `MaxObjectCount` parameter if the corresponding pull operations are used, or the complete result set all at once if the corresponding traditional operation is used. By default, this method attempts to perform the corresponding pull operations (:meth:`~pywbem.WBEMConnection.OpenReferenceInstancePaths` and :meth:`~pywbem.WBEMConnection.PullInstancePaths`). If these pull operations are not supported by the WBEM server, this method falls back to using the corresponding traditional operation (:meth:`~pywbem.WBEMConnection.ReferenceNames`). Whether the WBEM server supports these pull operations is remembered in the :class:`~pywbem.WBEMConnection` object (by operation type), and avoids unnecessary attempts to try these pull operations on that connection in the future. The `use_pull_operations` init parameter of :class:`~pywbem.WBEMConnection` can be used to control the preference for always using pull operations, always using traditional operations, or using pull operations if supported by the WBEM server (the default). This method provides all of the controls of the corresponding pull operations except for the ability to set different response sizes on each request; the response size (defined by the `MaxObjectCount` parameter) is the same for all pull operations in the enumeration session. In addition, some functionality is only available if the corresponding pull operations are used by this method: * Filtering is not supported for the corresponding traditional operation so that setting the `FilterQuery` or `FilterQueryLanguage` parameters will be rejected if the corresponding traditional operation is used by this method. Note that this limitation is not a disadvantage compared to using the corresponding pull operations directly, because in both cases, the WBEM server must support the pull operations and their filtering capability in order for the filtering to work. * Setting the `ContinueOnError` parameter to `True` will be rejected if the corresponding traditional operation is used by this method. The enumeration session that is opened with the WBEM server when using pull operations is closed automatically when the returned generator object is exhausted, or when the generator object is closed using its :meth:`~py:generator.close` method (which may also be called before the generator is exhausted). Parameters: InstanceName (:class:`~pywbem.CIMInstanceName`): The instance path of the source instance. If this object does not specify a namespace, the default namespace of the connection is used. Its `host` attribute will be ignored. ResultClass (:term:`string` or :class:`~pywbem.CIMClassName`): Class name of an association class (case independent), to filter the result to include only traversals of that association class (or subclasses). `None` means that no such filtering is peformed. Role (:term:`string`): Role name (= property name) of the source end (case independent), to filter the result to include only traversals from that source role. `None` means that no such filtering is peformed. FilterQueryLanguage (:term:`string`): The name of the filter query language used for the `FilterQuery` parameter. The DMTF-defined Filter Query Language (see :term:`DSP0212`) is specified as "DMTF:FQL". If this parameter is not `None` and the traditional operation is used by this method, :exc:`~py:exceptions.ValueError` will be raised. Not all WBEM servers support filtering for this operation because it returns instance paths and the act of the server filtering requires that it generate instances just for that purpose and then discard them. FilterQuery (:term:`string`): The filter query in the query language defined by the `FilterQueryLanguage` parameter. If this parameter is not `None` and the traditional operation is used by this method, :exc:`~py:exceptions.ValueError` will be raised. OperationTimeout (:class:`~pywbem.Uint32`): Minimum time in seconds the WBEM Server shall maintain an open enumeration session after a previous Open or Pull request is sent to the client. Once this timeout time has expired, the WBEM server may close the enumeration session. * If not `None`, this parameter is sent to the WBEM server as the proposed timeout for the enumeration session. A value of 0 indicates that the server is expected to never time out. The server may reject the proposed value, causing a :class:`~pywbem.CIMError` to be raised with status code :attr:`~pywbem.CIM_ERR_INVALID_OPERATION_TIMEOUT`. * If `None`, this parameter is not passed to the WBEM server, and causes the server-implemented default timeout to be used. ContinueOnError (:class:`py:bool`): Indicates to the WBEM server to continue sending responses after an error response has been sent. * If `True`, the server is to continue sending responses after sending an error response. Not all servers support continuation on error; a server that does not support it must send an error response if `True` was specified, causing :class:`~pywbem.CIMError` to be raised with status code :attr:`~pywbem.CIM_ERR_CONTINUATION_ON_ERROR_NOT_SUPPORTED`. If the corresponding traditional operation is used by this method, :exc:`~py:exceptions.ValueError` will be raised. * If `False`, the server is requested to close the enumeration after sending an error response. * If `None`, this parameter is not passed to the WBEM server, and causes the server-implemented default behaviour to be used. :term:`DSP0200` defines that the server-implemented default is `False`. MaxObjectCount (:class:`~pywbem.Uint32`) Maximum number of instances the WBEM server may return for each of the open and pull requests issued during the iterations over the returned generator object. * If positive, the WBEM server is to return no more than the specified number of instances. * Zero is not allowed; it would mean that zero instances are to be returned for open and all pull requests issued to the server. * The default is defined as a system config variable. * `None` is not allowed. **extra : Additional keyword arguments are passed as additional operation parameters to the WBEM server. Note that :term:`DSP0200` does not define any additional parameters for this operation. Returns: :term:`py:generator` iterating :class:`~pywbem.CIMInstanceName`: A generator object that iterates the resulting CIM instance paths. These instance paths have their host and namespace components set. Raises: Exceptions described in :class:`~pywbem.WBEMConnection`. Example:: paths_generator = conn.IterReferenceInstancePaths('CIM_Blah') for path in paths_generator: print('path {0}'.format(path)) """ _validateIterCommonParams(MaxObjectCount, OperationTimeout) # Common variable for pull result tuple used by pulls and finally: pull_result = None try: # try / finally block to allow iter.close() if (self._use_ref_path_pull_operations is None or self._use_ref_path_pull_operations): try: # Open operation try block pull_result = self.OpenReferenceInstancePaths( InstanceName, ResultClass=ResultClass, Role=Role, FilterQueryLanguage=FilterQueryLanguage, FilterQuery=FilterQuery, OperationTimeout=OperationTimeout, ContinueOnError=ContinueOnError, MaxObjectCount=MaxObjectCount, **extra) # Open operation succeeded; set use_pull flag self._use_ref_path_pull_operations = True for inst in pull_result.paths: yield inst # Loop to pull while more while eos not returned. while not pull_result.eos: pull_result = self.PullInstancePaths( pull_result.context, MaxObjectCount=MaxObjectCount) for inst in pull_result.paths: yield inst pull_result = None # clear the pull_result return # If NOT_SUPPORTED and first request, set flag and try # alternative request operation. # If use_pull_operations is True, always raise the exception except CIMError as ce: if (self._use_ref_path_pull_operations is None and ce.status_code == CIM_ERR_NOT_SUPPORTED): self._use_ref_path_pull_operations = False else: raise # Alternate request if Pull not implemented. This does not allow # the FilterQuery or ContinueOnError assert self._use_ref_path_pull_operations is False if FilterQuery is not None or FilterQueryLanguage is not None: raise ValueError('ReferenceInstanceNnames does not support' ' FilterQuery.') if ContinueOnError is not None: raise ValueError('ReferenceInstanceNames does not support ' 'ContinueOnError.') enum_rslt = self.ReferenceNames( InstanceName, ResultClass=ResultClass, Role=Role, **extra) for inst in enum_rslt: yield inst # Cleanup if caller closess the iterator before exhausting it finally: # Cleanup only required if the pull context is open and not complete if pull_result is not None and not pull_result.eos: self.CloseEnumeration(pull_result.context) pull_result = None
['def', 'IterReferenceInstancePaths', '(', 'self', ',', 'InstanceName', ',', 'ResultClass', '=', 'None', ',', 'Role', '=', 'None', ',', 'FilterQueryLanguage', '=', 'None', ',', 'FilterQuery', '=', 'None', ',', 'OperationTimeout', '=', 'None', ',', 'ContinueOnError', '=', 'None', ',', 'MaxObjectCount', '=', 'DEFAULT_ITER_MAXOBJECTCOUNT', ',', '*', '*', 'extra', ')', ':', '# pylint: disable=invalid-name', '_validateIterCommonParams', '(', 'MaxObjectCount', ',', 'OperationTimeout', ')', '# Common variable for pull result tuple used by pulls and finally:', 'pull_result', '=', 'None', 'try', ':', '# try / finally block to allow iter.close()', 'if', '(', 'self', '.', '_use_ref_path_pull_operations', 'is', 'None', 'or', 'self', '.', '_use_ref_path_pull_operations', ')', ':', 'try', ':', '# Open operation try block', 'pull_result', '=', 'self', '.', 'OpenReferenceInstancePaths', '(', 'InstanceName', ',', 'ResultClass', '=', 'ResultClass', ',', 'Role', '=', 'Role', ',', 'FilterQueryLanguage', '=', 'FilterQueryLanguage', ',', 'FilterQuery', '=', 'FilterQuery', ',', 'OperationTimeout', '=', 'OperationTimeout', ',', 'ContinueOnError', '=', 'ContinueOnError', ',', 'MaxObjectCount', '=', 'MaxObjectCount', ',', '*', '*', 'extra', ')', '# Open operation succeeded; set use_pull flag', 'self', '.', '_use_ref_path_pull_operations', '=', 'True', 'for', 'inst', 'in', 'pull_result', '.', 'paths', ':', 'yield', 'inst', '# Loop to pull while more while eos not returned.', 'while', 'not', 'pull_result', '.', 'eos', ':', 'pull_result', '=', 'self', '.', 'PullInstancePaths', '(', 'pull_result', '.', 'context', ',', 'MaxObjectCount', '=', 'MaxObjectCount', ')', 'for', 'inst', 'in', 'pull_result', '.', 'paths', ':', 'yield', 'inst', 'pull_result', '=', 'None', '# clear the pull_result', 'return', '# If NOT_SUPPORTED and first request, set flag and try', '# alternative request operation.', '# If use_pull_operations is True, always raise the exception', 'except', 'CIMError', 'as', 'ce', ':', 'if', '(', 'self', '.', '_use_ref_path_pull_operations', 'is', 'None', 'and', 'ce', '.', 'status_code', '==', 'CIM_ERR_NOT_SUPPORTED', ')', ':', 'self', '.', '_use_ref_path_pull_operations', '=', 'False', 'else', ':', 'raise', '# Alternate request if Pull not implemented. This does not allow', '# the FilterQuery or ContinueOnError', 'assert', 'self', '.', '_use_ref_path_pull_operations', 'is', 'False', 'if', 'FilterQuery', 'is', 'not', 'None', 'or', 'FilterQueryLanguage', 'is', 'not', 'None', ':', 'raise', 'ValueError', '(', "'ReferenceInstanceNnames does not support'", "' FilterQuery.'", ')', 'if', 'ContinueOnError', 'is', 'not', 'None', ':', 'raise', 'ValueError', '(', "'ReferenceInstanceNames does not support '", "'ContinueOnError.'", ')', 'enum_rslt', '=', 'self', '.', 'ReferenceNames', '(', 'InstanceName', ',', 'ResultClass', '=', 'ResultClass', ',', 'Role', '=', 'Role', ',', '*', '*', 'extra', ')', 'for', 'inst', 'in', 'enum_rslt', ':', 'yield', 'inst', '# Cleanup if caller closess the iterator before exhausting it', 'finally', ':', '# Cleanup only required if the pull context is open and not complete', 'if', 'pull_result', 'is', 'not', 'None', 'and', 'not', 'pull_result', '.', 'eos', ':', 'self', '.', 'CloseEnumeration', '(', 'pull_result', '.', 'context', ')', 'pull_result', '=', 'None']
Retrieve the instance paths of the association instances that reference a source instance, using the Python :term:`py:generator` idiom to return the result. *New in pywbem 0.10 as experimental and finalized in 0.12.* This method uses the corresponding pull operations if supported by the WBEM server or otherwise the corresponding traditional operation. This method is an alternative to using the pull operations directly, that frees the user of having to know whether the WBEM server supports pull operations. This method is a generator function that retrieves instance paths from the WBEM server and returns them one by one (using :keyword:`yield`) when the caller iterates through the returned generator object. The number of instance paths that are retrieved from the WBEM server in one request (and thus need to be materialized in this method) is up to the `MaxObjectCount` parameter if the corresponding pull operations are used, or the complete result set all at once if the corresponding traditional operation is used. By default, this method attempts to perform the corresponding pull operations (:meth:`~pywbem.WBEMConnection.OpenReferenceInstancePaths` and :meth:`~pywbem.WBEMConnection.PullInstancePaths`). If these pull operations are not supported by the WBEM server, this method falls back to using the corresponding traditional operation (:meth:`~pywbem.WBEMConnection.ReferenceNames`). Whether the WBEM server supports these pull operations is remembered in the :class:`~pywbem.WBEMConnection` object (by operation type), and avoids unnecessary attempts to try these pull operations on that connection in the future. The `use_pull_operations` init parameter of :class:`~pywbem.WBEMConnection` can be used to control the preference for always using pull operations, always using traditional operations, or using pull operations if supported by the WBEM server (the default). This method provides all of the controls of the corresponding pull operations except for the ability to set different response sizes on each request; the response size (defined by the `MaxObjectCount` parameter) is the same for all pull operations in the enumeration session. In addition, some functionality is only available if the corresponding pull operations are used by this method: * Filtering is not supported for the corresponding traditional operation so that setting the `FilterQuery` or `FilterQueryLanguage` parameters will be rejected if the corresponding traditional operation is used by this method. Note that this limitation is not a disadvantage compared to using the corresponding pull operations directly, because in both cases, the WBEM server must support the pull operations and their filtering capability in order for the filtering to work. * Setting the `ContinueOnError` parameter to `True` will be rejected if the corresponding traditional operation is used by this method. The enumeration session that is opened with the WBEM server when using pull operations is closed automatically when the returned generator object is exhausted, or when the generator object is closed using its :meth:`~py:generator.close` method (which may also be called before the generator is exhausted). Parameters: InstanceName (:class:`~pywbem.CIMInstanceName`): The instance path of the source instance. If this object does not specify a namespace, the default namespace of the connection is used. Its `host` attribute will be ignored. ResultClass (:term:`string` or :class:`~pywbem.CIMClassName`): Class name of an association class (case independent), to filter the result to include only traversals of that association class (or subclasses). `None` means that no such filtering is peformed. Role (:term:`string`): Role name (= property name) of the source end (case independent), to filter the result to include only traversals from that source role. `None` means that no such filtering is peformed. FilterQueryLanguage (:term:`string`): The name of the filter query language used for the `FilterQuery` parameter. The DMTF-defined Filter Query Language (see :term:`DSP0212`) is specified as "DMTF:FQL". If this parameter is not `None` and the traditional operation is used by this method, :exc:`~py:exceptions.ValueError` will be raised. Not all WBEM servers support filtering for this operation because it returns instance paths and the act of the server filtering requires that it generate instances just for that purpose and then discard them. FilterQuery (:term:`string`): The filter query in the query language defined by the `FilterQueryLanguage` parameter. If this parameter is not `None` and the traditional operation is used by this method, :exc:`~py:exceptions.ValueError` will be raised. OperationTimeout (:class:`~pywbem.Uint32`): Minimum time in seconds the WBEM Server shall maintain an open enumeration session after a previous Open or Pull request is sent to the client. Once this timeout time has expired, the WBEM server may close the enumeration session. * If not `None`, this parameter is sent to the WBEM server as the proposed timeout for the enumeration session. A value of 0 indicates that the server is expected to never time out. The server may reject the proposed value, causing a :class:`~pywbem.CIMError` to be raised with status code :attr:`~pywbem.CIM_ERR_INVALID_OPERATION_TIMEOUT`. * If `None`, this parameter is not passed to the WBEM server, and causes the server-implemented default timeout to be used. ContinueOnError (:class:`py:bool`): Indicates to the WBEM server to continue sending responses after an error response has been sent. * If `True`, the server is to continue sending responses after sending an error response. Not all servers support continuation on error; a server that does not support it must send an error response if `True` was specified, causing :class:`~pywbem.CIMError` to be raised with status code :attr:`~pywbem.CIM_ERR_CONTINUATION_ON_ERROR_NOT_SUPPORTED`. If the corresponding traditional operation is used by this method, :exc:`~py:exceptions.ValueError` will be raised. * If `False`, the server is requested to close the enumeration after sending an error response. * If `None`, this parameter is not passed to the WBEM server, and causes the server-implemented default behaviour to be used. :term:`DSP0200` defines that the server-implemented default is `False`. MaxObjectCount (:class:`~pywbem.Uint32`) Maximum number of instances the WBEM server may return for each of the open and pull requests issued during the iterations over the returned generator object. * If positive, the WBEM server is to return no more than the specified number of instances. * Zero is not allowed; it would mean that zero instances are to be returned for open and all pull requests issued to the server. * The default is defined as a system config variable. * `None` is not allowed. **extra : Additional keyword arguments are passed as additional operation parameters to the WBEM server. Note that :term:`DSP0200` does not define any additional parameters for this operation. Returns: :term:`py:generator` iterating :class:`~pywbem.CIMInstanceName`: A generator object that iterates the resulting CIM instance paths. These instance paths have their host and namespace components set. Raises: Exceptions described in :class:`~pywbem.WBEMConnection`. Example:: paths_generator = conn.IterReferenceInstancePaths('CIM_Blah') for path in paths_generator: print('path {0}'.format(path))
['Retrieve', 'the', 'instance', 'paths', 'of', 'the', 'association', 'instances', 'that', 'reference', 'a', 'source', 'instance', 'using', 'the', 'Python', ':', 'term', ':', 'py', ':', 'generator', 'idiom', 'to', 'return', 'the', 'result', '.']
train
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/cim_operations.py#L5768-L6026
9,324
FNNDSC/pfmisc
pfmisc/C_snode.py
C_snode.printPre
def printPre(self, *args): """ get/set the str_pre string. """ if len(args): self.b_printPre = args[0] else: return self.b_printPre
python
def printPre(self, *args): """ get/set the str_pre string. """ if len(args): self.b_printPre = args[0] else: return self.b_printPre
['def', 'printPre', '(', 'self', ',', '*', 'args', ')', ':', 'if', 'len', '(', 'args', ')', ':', 'self', '.', 'b_printPre', '=', 'args', '[', '0', ']', 'else', ':', 'return', 'self', '.', 'b_printPre']
get/set the str_pre string.
['get', '/', 'set', 'the', 'str_pre', 'string', '.']
train
https://github.com/FNNDSC/pfmisc/blob/960b4d6135fcc50bed0a8e55db2ab1ddad9b99d8/pfmisc/C_snode.py#L222-L229
9,325
iotile/coretools
transport_plugins/native_ble/iotile_transport_native_ble/virtual_ble.py
NativeBLEVirtualInterface._send_rpc_response
def _send_rpc_response(self, *packets): """Send an RPC response. It is executed in the baBLE working thread: should not be blocking. The RPC response is notified in one or two packets depending on whether or not response data is included. If there is a temporary error sending one of the packets it is retried automatically. If there is a permanent error, it is logged and the response is abandoned. """ if len(packets) == 0: return handle, payload = packets[0] try: self._send_notification(handle, payload) except bable_interface.BaBLEException as err: if err.packet.status == 'Rejected': # If we are streaming too fast, back off and try again time.sleep(0.05) self._defer(self._send_rpc_response, list(packets)) else: self._audit('ErrorSendingRPCResponse') self._logger.exception("Error while sending RPC response, handle=%s, payload=%s", handle, payload) return if len(packets) > 1: self._defer(self._send_rpc_response, list(packets[1:]))
python
def _send_rpc_response(self, *packets): """Send an RPC response. It is executed in the baBLE working thread: should not be blocking. The RPC response is notified in one or two packets depending on whether or not response data is included. If there is a temporary error sending one of the packets it is retried automatically. If there is a permanent error, it is logged and the response is abandoned. """ if len(packets) == 0: return handle, payload = packets[0] try: self._send_notification(handle, payload) except bable_interface.BaBLEException as err: if err.packet.status == 'Rejected': # If we are streaming too fast, back off and try again time.sleep(0.05) self._defer(self._send_rpc_response, list(packets)) else: self._audit('ErrorSendingRPCResponse') self._logger.exception("Error while sending RPC response, handle=%s, payload=%s", handle, payload) return if len(packets) > 1: self._defer(self._send_rpc_response, list(packets[1:]))
['def', '_send_rpc_response', '(', 'self', ',', '*', 'packets', ')', ':', 'if', 'len', '(', 'packets', ')', '==', '0', ':', 'return', 'handle', ',', 'payload', '=', 'packets', '[', '0', ']', 'try', ':', 'self', '.', '_send_notification', '(', 'handle', ',', 'payload', ')', 'except', 'bable_interface', '.', 'BaBLEException', 'as', 'err', ':', 'if', 'err', '.', 'packet', '.', 'status', '==', "'Rejected'", ':', '# If we are streaming too fast, back off and try again', 'time', '.', 'sleep', '(', '0.05', ')', 'self', '.', '_defer', '(', 'self', '.', '_send_rpc_response', ',', 'list', '(', 'packets', ')', ')', 'else', ':', 'self', '.', '_audit', '(', "'ErrorSendingRPCResponse'", ')', 'self', '.', '_logger', '.', 'exception', '(', '"Error while sending RPC response, handle=%s, payload=%s"', ',', 'handle', ',', 'payload', ')', 'return', 'if', 'len', '(', 'packets', ')', '>', '1', ':', 'self', '.', '_defer', '(', 'self', '.', '_send_rpc_response', ',', 'list', '(', 'packets', '[', '1', ':', ']', ')', ')']
Send an RPC response. It is executed in the baBLE working thread: should not be blocking. The RPC response is notified in one or two packets depending on whether or not response data is included. If there is a temporary error sending one of the packets it is retried automatically. If there is a permanent error, it is logged and the response is abandoned.
['Send', 'an', 'RPC', 'response', '.', 'It', 'is', 'executed', 'in', 'the', 'baBLE', 'working', 'thread', ':', 'should', 'not', 'be', 'blocking', '.']
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/native_ble/iotile_transport_native_ble/virtual_ble.py#L416-L444
9,326
alephdata/memorious
memorious/logic/check.py
ContextCheck.match_regexp
def match_regexp(self, value, q, strict=False): """if value matches a regexp q""" value = stringify(value) mr = re.compile(q) if value is not None: if mr.match(value): return self.shout('%r not matching the regexp %r', strict, value, q)
python
def match_regexp(self, value, q, strict=False): """if value matches a regexp q""" value = stringify(value) mr = re.compile(q) if value is not None: if mr.match(value): return self.shout('%r not matching the regexp %r', strict, value, q)
['def', 'match_regexp', '(', 'self', ',', 'value', ',', 'q', ',', 'strict', '=', 'False', ')', ':', 'value', '=', 'stringify', '(', 'value', ')', 'mr', '=', 're', '.', 'compile', '(', 'q', ')', 'if', 'value', 'is', 'not', 'None', ':', 'if', 'mr', '.', 'match', '(', 'value', ')', ':', 'return', 'self', '.', 'shout', '(', "'%r not matching the regexp %r'", ',', 'strict', ',', 'value', ',', 'q', ')']
if value matches a regexp q
['if', 'value', 'matches', 'a', 'regexp', 'q']
train
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/check.py#L51-L58
9,327
sourceperl/pyModbusTCP
pyModbusTCP/client.py
ModbusClient.write_single_register
def write_single_register(self, reg_addr, reg_value): """Modbus function WRITE_SINGLE_REGISTER (0x06) :param reg_addr: register address (0 to 65535) :type reg_addr: int :param reg_value: register value to write :type reg_value: int :returns: True if write ok or None if fail :rtype: bool or None """ # check params if not (0 <= int(reg_addr) <= 65535): self.__debug_msg('write_single_register(): reg_addr out of range') return None if not (0 <= int(reg_value) <= 65535): self.__debug_msg('write_single_register(): reg_value out of range') return None # build frame tx_buffer = self._mbus_frame(const.WRITE_SINGLE_REGISTER, struct.pack('>HH', reg_addr, reg_value)) # send request s_send = self._send_mbus(tx_buffer) # check error if not s_send: return None # receive f_body = self._recv_mbus() # check error if not f_body: return None # check fix frame size if len(f_body) != 4: self.__last_error = const.MB_RECV_ERR self.__debug_msg('write_single_register(): rx frame size error') self.close() return None # register extract rx_reg_addr, rx_reg_value = struct.unpack('>HH', f_body) # check register write is_ok = (rx_reg_addr == reg_addr) and (rx_reg_value == reg_value) return True if is_ok else None
python
def write_single_register(self, reg_addr, reg_value): """Modbus function WRITE_SINGLE_REGISTER (0x06) :param reg_addr: register address (0 to 65535) :type reg_addr: int :param reg_value: register value to write :type reg_value: int :returns: True if write ok or None if fail :rtype: bool or None """ # check params if not (0 <= int(reg_addr) <= 65535): self.__debug_msg('write_single_register(): reg_addr out of range') return None if not (0 <= int(reg_value) <= 65535): self.__debug_msg('write_single_register(): reg_value out of range') return None # build frame tx_buffer = self._mbus_frame(const.WRITE_SINGLE_REGISTER, struct.pack('>HH', reg_addr, reg_value)) # send request s_send = self._send_mbus(tx_buffer) # check error if not s_send: return None # receive f_body = self._recv_mbus() # check error if not f_body: return None # check fix frame size if len(f_body) != 4: self.__last_error = const.MB_RECV_ERR self.__debug_msg('write_single_register(): rx frame size error') self.close() return None # register extract rx_reg_addr, rx_reg_value = struct.unpack('>HH', f_body) # check register write is_ok = (rx_reg_addr == reg_addr) and (rx_reg_value == reg_value) return True if is_ok else None
['def', 'write_single_register', '(', 'self', ',', 'reg_addr', ',', 'reg_value', ')', ':', '# check params', 'if', 'not', '(', '0', '<=', 'int', '(', 'reg_addr', ')', '<=', '65535', ')', ':', 'self', '.', '__debug_msg', '(', "'write_single_register(): reg_addr out of range'", ')', 'return', 'None', 'if', 'not', '(', '0', '<=', 'int', '(', 'reg_value', ')', '<=', '65535', ')', ':', 'self', '.', '__debug_msg', '(', "'write_single_register(): reg_value out of range'", ')', 'return', 'None', '# build frame', 'tx_buffer', '=', 'self', '.', '_mbus_frame', '(', 'const', '.', 'WRITE_SINGLE_REGISTER', ',', 'struct', '.', 'pack', '(', "'>HH'", ',', 'reg_addr', ',', 'reg_value', ')', ')', '# send request', 's_send', '=', 'self', '.', '_send_mbus', '(', 'tx_buffer', ')', '# check error', 'if', 'not', 's_send', ':', 'return', 'None', '# receive', 'f_body', '=', 'self', '.', '_recv_mbus', '(', ')', '# check error', 'if', 'not', 'f_body', ':', 'return', 'None', '# check fix frame size', 'if', 'len', '(', 'f_body', ')', '!=', '4', ':', 'self', '.', '__last_error', '=', 'const', '.', 'MB_RECV_ERR', 'self', '.', '__debug_msg', '(', "'write_single_register(): rx frame size error'", ')', 'self', '.', 'close', '(', ')', 'return', 'None', '# register extract', 'rx_reg_addr', ',', 'rx_reg_value', '=', 'struct', '.', 'unpack', '(', "'>HH'", ',', 'f_body', ')', '# check register write', 'is_ok', '=', '(', 'rx_reg_addr', '==', 'reg_addr', ')', 'and', '(', 'rx_reg_value', '==', 'reg_value', ')', 'return', 'True', 'if', 'is_ok', 'else', 'None']
Modbus function WRITE_SINGLE_REGISTER (0x06) :param reg_addr: register address (0 to 65535) :type reg_addr: int :param reg_value: register value to write :type reg_value: int :returns: True if write ok or None if fail :rtype: bool or None
['Modbus', 'function', 'WRITE_SINGLE_REGISTER', '(', '0x06', ')']
train
https://github.com/sourceperl/pyModbusTCP/blob/993f6e2f5ab52eba164be049e42cea560c3751a5/pyModbusTCP/client.py#L577-L617
9,328
pydata/xarray
xarray/core/utils.py
decode_numpy_dict_values
def decode_numpy_dict_values(attrs: Mapping[K, V]) -> Dict[K, V]: """Convert attribute values from numpy objects to native Python objects, for use in to_dict """ attrs = dict(attrs) for k, v in attrs.items(): if isinstance(v, np.ndarray): attrs[k] = v.tolist() elif isinstance(v, np.generic): attrs[k] = v.item() return attrs
python
def decode_numpy_dict_values(attrs: Mapping[K, V]) -> Dict[K, V]: """Convert attribute values from numpy objects to native Python objects, for use in to_dict """ attrs = dict(attrs) for k, v in attrs.items(): if isinstance(v, np.ndarray): attrs[k] = v.tolist() elif isinstance(v, np.generic): attrs[k] = v.item() return attrs
['def', 'decode_numpy_dict_values', '(', 'attrs', ':', 'Mapping', '[', 'K', ',', 'V', ']', ')', '->', 'Dict', '[', 'K', ',', 'V', ']', ':', 'attrs', '=', 'dict', '(', 'attrs', ')', 'for', 'k', ',', 'v', 'in', 'attrs', '.', 'items', '(', ')', ':', 'if', 'isinstance', '(', 'v', ',', 'np', '.', 'ndarray', ')', ':', 'attrs', '[', 'k', ']', '=', 'v', '.', 'tolist', '(', ')', 'elif', 'isinstance', '(', 'v', ',', 'np', '.', 'generic', ')', ':', 'attrs', '[', 'k', ']', '=', 'v', '.', 'item', '(', ')', 'return', 'attrs']
Convert attribute values from numpy objects to native Python objects, for use in to_dict
['Convert', 'attribute', 'values', 'from', 'numpy', 'objects', 'to', 'native', 'Python', 'objects', 'for', 'use', 'in', 'to_dict']
train
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/utils.py#L538-L548
9,329
Spinmob/spinmob
_pylab_colormap.py
colormap.set_image
def set_image(self, image='auto'): """ Set which pylab image to tweak. """ if image=="auto": image = _pylab.gca().images[0] self._image=image self.update_image()
python
def set_image(self, image='auto'): """ Set which pylab image to tweak. """ if image=="auto": image = _pylab.gca().images[0] self._image=image self.update_image()
['def', 'set_image', '(', 'self', ',', 'image', '=', "'auto'", ')', ':', 'if', 'image', '==', '"auto"', ':', 'image', '=', '_pylab', '.', 'gca', '(', ')', '.', 'images', '[', '0', ']', 'self', '.', '_image', '=', 'image', 'self', '.', 'update_image', '(', ')']
Set which pylab image to tweak.
['Set', 'which', 'pylab', 'image', 'to', 'tweak', '.']
train
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_pylab_colormap.py#L154-L160
9,330
mosdef-hub/mbuild
mbuild/lib/recipes/tiled_compound.py
TiledCompound._add_tile
def _add_tile(self, new_tile, ijk): """Add a tile with a label indicating its tiling position. """ tile_label = "{0}_{1}".format(self.name, '-'.join(str(d) for d in ijk)) self.add(new_tile, label=tile_label, inherit_periodicity=False)
python
def _add_tile(self, new_tile, ijk): """Add a tile with a label indicating its tiling position. """ tile_label = "{0}_{1}".format(self.name, '-'.join(str(d) for d in ijk)) self.add(new_tile, label=tile_label, inherit_periodicity=False)
['def', '_add_tile', '(', 'self', ',', 'new_tile', ',', 'ijk', ')', ':', 'tile_label', '=', '"{0}_{1}"', '.', 'format', '(', 'self', '.', 'name', ',', "'-'", '.', 'join', '(', 'str', '(', 'd', ')', 'for', 'd', 'in', 'ijk', ')', ')', 'self', '.', 'add', '(', 'new_tile', ',', 'label', '=', 'tile_label', ',', 'inherit_periodicity', '=', 'False', ')']
Add a tile with a label indicating its tiling position.
['Add', 'a', 'tile', 'with', 'a', 'label', 'indicating', 'its', 'tiling', 'position', '.']
train
https://github.com/mosdef-hub/mbuild/blob/dcb80a2becd5d0e6a7e3e7bcb1b59793c46a2dd3/mbuild/lib/recipes/tiled_compound.py#L117-L120
9,331
timothycrosley/isort
isort/isort.py
_SortImports._strip_top_comments
def _strip_top_comments(lines: Sequence[str], line_separator: str) -> str: """Strips # comments that exist at the top of the given lines""" lines = copy.copy(lines) while lines and lines[0].startswith("#"): lines = lines[1:] return line_separator.join(lines)
python
def _strip_top_comments(lines: Sequence[str], line_separator: str) -> str: """Strips # comments that exist at the top of the given lines""" lines = copy.copy(lines) while lines and lines[0].startswith("#"): lines = lines[1:] return line_separator.join(lines)
['def', '_strip_top_comments', '(', 'lines', ':', 'Sequence', '[', 'str', ']', ',', 'line_separator', ':', 'str', ')', '->', 'str', ':', 'lines', '=', 'copy', '.', 'copy', '(', 'lines', ')', 'while', 'lines', 'and', 'lines', '[', '0', ']', '.', 'startswith', '(', '"#"', ')', ':', 'lines', '=', 'lines', '[', '1', ':', ']', 'return', 'line_separator', '.', 'join', '(', 'lines', ')']
Strips # comments that exist at the top of the given lines
['Strips', '#', 'comments', 'that', 'exist', 'at', 'the', 'top', 'of', 'the', 'given', 'lines']
train
https://github.com/timothycrosley/isort/blob/493c02a1a000fe782cec56f1f43262bacb316381/isort/isort.py#L124-L129
9,332
zhmcclient/python-zhmcclient
zhmcclient/_user.py
User.remove_user_role
def remove_user_role(self, user_role): """ Remove the specified User Role from this User. This User must not be a system-defined or pattern-based user. Authorization requirements: * Task permission to the "Manage Users" task to modify a standard user or the "Manage User Templates" task to modify a template user. Parameters: user_role (:class:`~zhmcclient.UserRole`): User Role to be removed. Must not be `None`. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ body = { 'user-role-uri': user_role.uri } self.manager.session.post( self.uri + '/operations/remove-user-role', body=body)
python
def remove_user_role(self, user_role): """ Remove the specified User Role from this User. This User must not be a system-defined or pattern-based user. Authorization requirements: * Task permission to the "Manage Users" task to modify a standard user or the "Manage User Templates" task to modify a template user. Parameters: user_role (:class:`~zhmcclient.UserRole`): User Role to be removed. Must not be `None`. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ body = { 'user-role-uri': user_role.uri } self.manager.session.post( self.uri + '/operations/remove-user-role', body=body)
['def', 'remove_user_role', '(', 'self', ',', 'user_role', ')', ':', 'body', '=', '{', "'user-role-uri'", ':', 'user_role', '.', 'uri', '}', 'self', '.', 'manager', '.', 'session', '.', 'post', '(', 'self', '.', 'uri', '+', "'/operations/remove-user-role'", ',', 'body', '=', 'body', ')']
Remove the specified User Role from this User. This User must not be a system-defined or pattern-based user. Authorization requirements: * Task permission to the "Manage Users" task to modify a standard user or the "Manage User Templates" task to modify a template user. Parameters: user_role (:class:`~zhmcclient.UserRole`): User Role to be removed. Must not be `None`. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
['Remove', 'the', 'specified', 'User', 'Role', 'from', 'this', 'User', '.']
train
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_user.py#L301-L329
9,333
log2timeline/plaso
plaso/parsers/mediator.py
ParserMediator.ProduceExtractionWarning
def ProduceExtractionWarning(self, message, path_spec=None): """Produces an extraction warning. Args: message (str): message of the warning. path_spec (Optional[dfvfs.PathSpec]): path specification, where None will use the path specification of current file entry set in the mediator. Raises: RuntimeError: when storage writer is not set. """ if not self._storage_writer: raise RuntimeError('Storage writer not set.') if not path_spec and self._file_entry: path_spec = self._file_entry.path_spec parser_chain = self.GetParserChain() warning = warnings.ExtractionWarning( message=message, parser_chain=parser_chain, path_spec=path_spec) self._storage_writer.AddWarning(warning) self._number_of_warnings += 1 self.last_activity_timestamp = time.time()
python
def ProduceExtractionWarning(self, message, path_spec=None): """Produces an extraction warning. Args: message (str): message of the warning. path_spec (Optional[dfvfs.PathSpec]): path specification, where None will use the path specification of current file entry set in the mediator. Raises: RuntimeError: when storage writer is not set. """ if not self._storage_writer: raise RuntimeError('Storage writer not set.') if not path_spec and self._file_entry: path_spec = self._file_entry.path_spec parser_chain = self.GetParserChain() warning = warnings.ExtractionWarning( message=message, parser_chain=parser_chain, path_spec=path_spec) self._storage_writer.AddWarning(warning) self._number_of_warnings += 1 self.last_activity_timestamp = time.time()
['def', 'ProduceExtractionWarning', '(', 'self', ',', 'message', ',', 'path_spec', '=', 'None', ')', ':', 'if', 'not', 'self', '.', '_storage_writer', ':', 'raise', 'RuntimeError', '(', "'Storage writer not set.'", ')', 'if', 'not', 'path_spec', 'and', 'self', '.', '_file_entry', ':', 'path_spec', '=', 'self', '.', '_file_entry', '.', 'path_spec', 'parser_chain', '=', 'self', '.', 'GetParserChain', '(', ')', 'warning', '=', 'warnings', '.', 'ExtractionWarning', '(', 'message', '=', 'message', ',', 'parser_chain', '=', 'parser_chain', ',', 'path_spec', '=', 'path_spec', ')', 'self', '.', '_storage_writer', '.', 'AddWarning', '(', 'warning', ')', 'self', '.', '_number_of_warnings', '+=', '1', 'self', '.', 'last_activity_timestamp', '=', 'time', '.', 'time', '(', ')']
Produces an extraction warning. Args: message (str): message of the warning. path_spec (Optional[dfvfs.PathSpec]): path specification, where None will use the path specification of current file entry set in the mediator. Raises: RuntimeError: when storage writer is not set.
['Produces', 'an', 'extraction', 'warning', '.']
train
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/mediator.py#L524-L548
9,334
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_http_redirect.py
brocade_http_redirect.set_http_application_url_output_status_string
def set_http_application_url_output_status_string(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") set_http_application_url = ET.Element("set_http_application_url") config = set_http_application_url output = ET.SubElement(set_http_application_url, "output") status_string = ET.SubElement(output, "status-string") status_string.text = kwargs.pop('status_string') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def set_http_application_url_output_status_string(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") set_http_application_url = ET.Element("set_http_application_url") config = set_http_application_url output = ET.SubElement(set_http_application_url, "output") status_string = ET.SubElement(output, "status-string") status_string.text = kwargs.pop('status_string') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'set_http_application_url_output_status_string', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'set_http_application_url', '=', 'ET', '.', 'Element', '(', '"set_http_application_url"', ')', 'config', '=', 'set_http_application_url', 'output', '=', 'ET', '.', 'SubElement', '(', 'set_http_application_url', ',', '"output"', ')', 'status_string', '=', 'ET', '.', 'SubElement', '(', 'output', ',', '"status-string"', ')', 'status_string', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'status_string'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_http_redirect.py#L53-L64
9,335
batiste/django-page-cms
pages/managers.py
PageManager.navigation
def navigation(self): """Creates a :class:`QuerySet` of the published root pages.""" return self.on_site().filter( status=self.model.PUBLISHED).filter(parent__isnull=True)
python
def navigation(self): """Creates a :class:`QuerySet` of the published root pages.""" return self.on_site().filter( status=self.model.PUBLISHED).filter(parent__isnull=True)
['def', 'navigation', '(', 'self', ')', ':', 'return', 'self', '.', 'on_site', '(', ')', '.', 'filter', '(', 'status', '=', 'self', '.', 'model', '.', 'PUBLISHED', ')', '.', 'filter', '(', 'parent__isnull', '=', 'True', ')']
Creates a :class:`QuerySet` of the published root pages.
['Creates', 'a', ':', 'class', ':', 'QuerySet', 'of', 'the', 'published', 'root', 'pages', '.']
train
https://github.com/batiste/django-page-cms/blob/3c72111eb7c3997a63c462c1776ffd8ce8c50a5d/pages/managers.py#L64-L67
9,336
serkanyersen/underscore.py
src/underscore.py
underscore.memoize
def memoize(self, hasher=None): """ Memoize an expensive function by storing its results. """ ns = self.Namespace() ns.memo = {} if hasher is None: hasher = lambda x: x def memoized(*args, **kwargs): key = hasher(*args) if key not in ns.memo: ns.memo[key] = self.obj(*args, **kwargs) return ns.memo[key] return self._wrap(memoized)
python
def memoize(self, hasher=None): """ Memoize an expensive function by storing its results. """ ns = self.Namespace() ns.memo = {} if hasher is None: hasher = lambda x: x def memoized(*args, **kwargs): key = hasher(*args) if key not in ns.memo: ns.memo[key] = self.obj(*args, **kwargs) return ns.memo[key] return self._wrap(memoized)
['def', 'memoize', '(', 'self', ',', 'hasher', '=', 'None', ')', ':', 'ns', '=', 'self', '.', 'Namespace', '(', ')', 'ns', '.', 'memo', '=', '{', '}', 'if', 'hasher', 'is', 'None', ':', 'hasher', '=', 'lambda', 'x', ':', 'x', 'def', 'memoized', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'key', '=', 'hasher', '(', '*', 'args', ')', 'if', 'key', 'not', 'in', 'ns', '.', 'memo', ':', 'ns', '.', 'memo', '[', 'key', ']', '=', 'self', '.', 'obj', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'ns', '.', 'memo', '[', 'key', ']', 'return', 'self', '.', '_wrap', '(', 'memoized', ')']
Memoize an expensive function by storing its results.
['Memoize', 'an', 'expensive', 'function', 'by', 'storing', 'its', 'results', '.']
train
https://github.com/serkanyersen/underscore.py/blob/07c25c3f0f789536e4ad47aa315faccc0da9602f/src/underscore.py#L773-L787
9,337
googleapis/google-cloud-python
talent/google/cloud/talent_v4beta1/gapic/company_service_client.py
CompanyServiceClient.company_path
def company_path(cls, project, company): """Return a fully-qualified company string.""" return google.api_core.path_template.expand( "projects/{project}/companies/{company}", project=project, company=company )
python
def company_path(cls, project, company): """Return a fully-qualified company string.""" return google.api_core.path_template.expand( "projects/{project}/companies/{company}", project=project, company=company )
['def', 'company_path', '(', 'cls', ',', 'project', ',', 'company', ')', ':', 'return', 'google', '.', 'api_core', '.', 'path_template', '.', 'expand', '(', '"projects/{project}/companies/{company}"', ',', 'project', '=', 'project', ',', 'company', '=', 'company', ')']
Return a fully-qualified company string.
['Return', 'a', 'fully', '-', 'qualified', 'company', 'string', '.']
train
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/talent/google/cloud/talent_v4beta1/gapic/company_service_client.py#L81-L85
9,338
liamw9534/bt-manager
bt_manager/interface.py
BTInterface.get_property
def get_property(self, name=None): """ Helper to get a property value by name or all properties as a dictionary. See also :py:meth:`set_property` :param str name: defaults to None which means all properties in the object's dictionary are returned as a dict. Otherwise, the property name key is used and its value is returned. :return: Property value by property key, or a dictionary of all properties :raises KeyError: if the property key is not found in the object's dictionary :raises dbus.Exception: org.bluez.Error.DoesNotExist :raises dbus.Exception: org.bluez.Error.InvalidArguments """ if (name): return self._interface.GetProperties()[name] else: return self._interface.GetProperties()
python
def get_property(self, name=None): """ Helper to get a property value by name or all properties as a dictionary. See also :py:meth:`set_property` :param str name: defaults to None which means all properties in the object's dictionary are returned as a dict. Otherwise, the property name key is used and its value is returned. :return: Property value by property key, or a dictionary of all properties :raises KeyError: if the property key is not found in the object's dictionary :raises dbus.Exception: org.bluez.Error.DoesNotExist :raises dbus.Exception: org.bluez.Error.InvalidArguments """ if (name): return self._interface.GetProperties()[name] else: return self._interface.GetProperties()
['def', 'get_property', '(', 'self', ',', 'name', '=', 'None', ')', ':', 'if', '(', 'name', ')', ':', 'return', 'self', '.', '_interface', '.', 'GetProperties', '(', ')', '[', 'name', ']', 'else', ':', 'return', 'self', '.', '_interface', '.', 'GetProperties', '(', ')']
Helper to get a property value by name or all properties as a dictionary. See also :py:meth:`set_property` :param str name: defaults to None which means all properties in the object's dictionary are returned as a dict. Otherwise, the property name key is used and its value is returned. :return: Property value by property key, or a dictionary of all properties :raises KeyError: if the property key is not found in the object's dictionary :raises dbus.Exception: org.bluez.Error.DoesNotExist :raises dbus.Exception: org.bluez.Error.InvalidArguments
['Helper', 'to', 'get', 'a', 'property', 'value', 'by', 'name', 'or', 'all', 'properties', 'as', 'a', 'dictionary', '.']
train
https://github.com/liamw9534/bt-manager/blob/51be2919394ce8134c698359649bfad09eedf4ec/bt_manager/interface.py#L170-L191
9,339
napalm-automation/napalm
napalm/nxos_ssh/nxos_ssh.py
NXOSSSHDriver.get_interfaces_ip
def get_interfaces_ip(self): """ Get interface IP details. Returns a dictionary of dictionaries. Sample output: { "Ethernet2/3": { "ipv4": { "4.4.4.4": { "prefix_length": 16 } }, "ipv6": { "2001:db8::1": { "prefix_length": 10 }, "fe80::2ec2:60ff:fe4f:feb2": { "prefix_length": "128" } } }, "Ethernet2/2": { "ipv4": { "2.2.2.2": { "prefix_length": 27 } } } } """ interfaces_ip = {} ipv4_command = "show ip interface vrf all" ipv6_command = "show ipv6 interface vrf all" output_v4 = self._send_command(ipv4_command) output_v6 = self._send_command(ipv6_command) v4_interfaces = {} for line in output_v4.splitlines(): # Ethernet2/2, Interface status: protocol-up/link-up/admin-up, iod: 38, # IP address: 2.2.2.2, IP subnet: 2.2.2.0/27 route-preference: 0, tag: 0 # IP address: 3.3.3.3, IP subnet: 3.3.3.0/25 secondary route-preference: 0, tag: 0 if "Interface status" in line: interface = line.split(",")[0] continue if "IP address" in line: ip_address = line.split(",")[0].split()[2] try: prefix_len = int(line.split()[5].split("/")[1]) except ValueError: prefix_len = "N/A" val = {"prefix_length": prefix_len} v4_interfaces.setdefault(interface, {})[ip_address] = val v6_interfaces = {} for line in output_v6.splitlines(): # Ethernet2/4, Interface status: protocol-up/link-up/admin-up, iod: 40 # IPv6 address: # 2001:11:2233::a1/24 [VALID] # 2001:cc11:22bb:0:2ec2:60ff:fe4f:feb2/64 [VALID] # IPv6 subnet: 2001::/24 # IPv6 link-local address: fe80::2ec2:60ff:fe4f:feb2 (default) [VALID] # IPv6 address: fe80::a293:51ff:fe5f:5ce9 [VALID] if "Interface status" in line: interface = line.split(",")[0] continue if "VALID" in line: line = line.strip() if "link-local address" in line: # match the following format: # IPv6 link-local address: fe80::2ec2:60ff:fe4f:feb2 (default) [VALID] ip_address = line.split()[3] prefix_len = "64" elif "IPv6 address" in line: # match the following format: # IPv6 address: fe80::a293:51ff:fe5f:5ce9 [VALID] ip_address = line.split()[2] prefix_len = "64" else: ip_address, prefix_len = line.split()[0].split("/") prefix_len = int(prefix_len) val = {"prefix_length": prefix_len} v6_interfaces.setdefault(interface, {})[ip_address] = val # Join data from intermediate dictionaries. for interface, data in v4_interfaces.items(): interfaces_ip.setdefault(interface, {"ipv4": {}})["ipv4"] = data for interface, data in v6_interfaces.items(): interfaces_ip.setdefault(interface, {"ipv6": {}})["ipv6"] = data return interfaces_ip
python
def get_interfaces_ip(self): """ Get interface IP details. Returns a dictionary of dictionaries. Sample output: { "Ethernet2/3": { "ipv4": { "4.4.4.4": { "prefix_length": 16 } }, "ipv6": { "2001:db8::1": { "prefix_length": 10 }, "fe80::2ec2:60ff:fe4f:feb2": { "prefix_length": "128" } } }, "Ethernet2/2": { "ipv4": { "2.2.2.2": { "prefix_length": 27 } } } } """ interfaces_ip = {} ipv4_command = "show ip interface vrf all" ipv6_command = "show ipv6 interface vrf all" output_v4 = self._send_command(ipv4_command) output_v6 = self._send_command(ipv6_command) v4_interfaces = {} for line in output_v4.splitlines(): # Ethernet2/2, Interface status: protocol-up/link-up/admin-up, iod: 38, # IP address: 2.2.2.2, IP subnet: 2.2.2.0/27 route-preference: 0, tag: 0 # IP address: 3.3.3.3, IP subnet: 3.3.3.0/25 secondary route-preference: 0, tag: 0 if "Interface status" in line: interface = line.split(",")[0] continue if "IP address" in line: ip_address = line.split(",")[0].split()[2] try: prefix_len = int(line.split()[5].split("/")[1]) except ValueError: prefix_len = "N/A" val = {"prefix_length": prefix_len} v4_interfaces.setdefault(interface, {})[ip_address] = val v6_interfaces = {} for line in output_v6.splitlines(): # Ethernet2/4, Interface status: protocol-up/link-up/admin-up, iod: 40 # IPv6 address: # 2001:11:2233::a1/24 [VALID] # 2001:cc11:22bb:0:2ec2:60ff:fe4f:feb2/64 [VALID] # IPv6 subnet: 2001::/24 # IPv6 link-local address: fe80::2ec2:60ff:fe4f:feb2 (default) [VALID] # IPv6 address: fe80::a293:51ff:fe5f:5ce9 [VALID] if "Interface status" in line: interface = line.split(",")[0] continue if "VALID" in line: line = line.strip() if "link-local address" in line: # match the following format: # IPv6 link-local address: fe80::2ec2:60ff:fe4f:feb2 (default) [VALID] ip_address = line.split()[3] prefix_len = "64" elif "IPv6 address" in line: # match the following format: # IPv6 address: fe80::a293:51ff:fe5f:5ce9 [VALID] ip_address = line.split()[2] prefix_len = "64" else: ip_address, prefix_len = line.split()[0].split("/") prefix_len = int(prefix_len) val = {"prefix_length": prefix_len} v6_interfaces.setdefault(interface, {})[ip_address] = val # Join data from intermediate dictionaries. for interface, data in v4_interfaces.items(): interfaces_ip.setdefault(interface, {"ipv4": {}})["ipv4"] = data for interface, data in v6_interfaces.items(): interfaces_ip.setdefault(interface, {"ipv6": {}})["ipv6"] = data return interfaces_ip
['def', 'get_interfaces_ip', '(', 'self', ')', ':', 'interfaces_ip', '=', '{', '}', 'ipv4_command', '=', '"show ip interface vrf all"', 'ipv6_command', '=', '"show ipv6 interface vrf all"', 'output_v4', '=', 'self', '.', '_send_command', '(', 'ipv4_command', ')', 'output_v6', '=', 'self', '.', '_send_command', '(', 'ipv6_command', ')', 'v4_interfaces', '=', '{', '}', 'for', 'line', 'in', 'output_v4', '.', 'splitlines', '(', ')', ':', '# Ethernet2/2, Interface status: protocol-up/link-up/admin-up, iod: 38,', '# IP address: 2.2.2.2, IP subnet: 2.2.2.0/27 route-preference: 0, tag: 0', '# IP address: 3.3.3.3, IP subnet: 3.3.3.0/25 secondary route-preference: 0, tag: 0', 'if', '"Interface status"', 'in', 'line', ':', 'interface', '=', 'line', '.', 'split', '(', '","', ')', '[', '0', ']', 'continue', 'if', '"IP address"', 'in', 'line', ':', 'ip_address', '=', 'line', '.', 'split', '(', '","', ')', '[', '0', ']', '.', 'split', '(', ')', '[', '2', ']', 'try', ':', 'prefix_len', '=', 'int', '(', 'line', '.', 'split', '(', ')', '[', '5', ']', '.', 'split', '(', '"/"', ')', '[', '1', ']', ')', 'except', 'ValueError', ':', 'prefix_len', '=', '"N/A"', 'val', '=', '{', '"prefix_length"', ':', 'prefix_len', '}', 'v4_interfaces', '.', 'setdefault', '(', 'interface', ',', '{', '}', ')', '[', 'ip_address', ']', '=', 'val', 'v6_interfaces', '=', '{', '}', 'for', 'line', 'in', 'output_v6', '.', 'splitlines', '(', ')', ':', '# Ethernet2/4, Interface status: protocol-up/link-up/admin-up, iod: 40', '# IPv6 address:', '# 2001:11:2233::a1/24 [VALID]', '# 2001:cc11:22bb:0:2ec2:60ff:fe4f:feb2/64 [VALID]', '# IPv6 subnet: 2001::/24', '# IPv6 link-local address: fe80::2ec2:60ff:fe4f:feb2 (default) [VALID]', '# IPv6 address: fe80::a293:51ff:fe5f:5ce9 [VALID]', 'if', '"Interface status"', 'in', 'line', ':', 'interface', '=', 'line', '.', 'split', '(', '","', ')', '[', '0', ']', 'continue', 'if', '"VALID"', 'in', 'line', ':', 'line', '=', 'line', '.', 'strip', '(', ')', 'if', '"link-local address"', 'in', 'line', ':', '# match the following format:', '# IPv6 link-local address: fe80::2ec2:60ff:fe4f:feb2 (default) [VALID]', 'ip_address', '=', 'line', '.', 'split', '(', ')', '[', '3', ']', 'prefix_len', '=', '"64"', 'elif', '"IPv6 address"', 'in', 'line', ':', '# match the following format:', '# IPv6 address: fe80::a293:51ff:fe5f:5ce9 [VALID]', 'ip_address', '=', 'line', '.', 'split', '(', ')', '[', '2', ']', 'prefix_len', '=', '"64"', 'else', ':', 'ip_address', ',', 'prefix_len', '=', 'line', '.', 'split', '(', ')', '[', '0', ']', '.', 'split', '(', '"/"', ')', 'prefix_len', '=', 'int', '(', 'prefix_len', ')', 'val', '=', '{', '"prefix_length"', ':', 'prefix_len', '}', 'v6_interfaces', '.', 'setdefault', '(', 'interface', ',', '{', '}', ')', '[', 'ip_address', ']', '=', 'val', '# Join data from intermediate dictionaries.', 'for', 'interface', ',', 'data', 'in', 'v4_interfaces', '.', 'items', '(', ')', ':', 'interfaces_ip', '.', 'setdefault', '(', 'interface', ',', '{', '"ipv4"', ':', '{', '}', '}', ')', '[', '"ipv4"', ']', '=', 'data', 'for', 'interface', ',', 'data', 'in', 'v6_interfaces', '.', 'items', '(', ')', ':', 'interfaces_ip', '.', 'setdefault', '(', 'interface', ',', '{', '"ipv6"', ':', '{', '}', '}', ')', '[', '"ipv6"', ']', '=', 'data', 'return', 'interfaces_ip']
Get interface IP details. Returns a dictionary of dictionaries. Sample output: { "Ethernet2/3": { "ipv4": { "4.4.4.4": { "prefix_length": 16 } }, "ipv6": { "2001:db8::1": { "prefix_length": 10 }, "fe80::2ec2:60ff:fe4f:feb2": { "prefix_length": "128" } } }, "Ethernet2/2": { "ipv4": { "2.2.2.2": { "prefix_length": 27 } } } }
['Get', 'interface', 'IP', 'details', '.', 'Returns', 'a', 'dictionary', 'of', 'dictionaries', '.']
train
https://github.com/napalm-automation/napalm/blob/c11ae8bb5ce395698704a0051cdf8d144fbb150d/napalm/nxos_ssh/nxos_ssh.py#L934-L1024
9,340
mikekatz04/BOWIE
bowie/plotutils/plottypes.py
Ratio.make_plot
def make_plot(self): """Creates the ratio plot. """ # sets colormap for ratio comparison plot cmap = getattr(cm, self.colormap) # set values of ratio comparison contour normval = 2.0 num_contours = 40 # must be even levels = np.linspace(-normval, normval, num_contours) norm = colors.Normalize(-normval, normval) # find Loss/Gain contour and Ratio contour self.set_comparison() diff_out, loss_gain_contour = self.find_difference_contour() cmap.set_bad(color='white', alpha=0.001) # plot ratio contours sc = self.axis.contourf(self.xvals[0], self.yvals[0], diff_out, levels=levels, norm=norm, extend='both', cmap=cmap) self.colorbar.setup_colorbars(sc) # toggle line contours of orders of magnitude of ratio comparisons if self.order_contour_lines: self.axis.contour(self.xvals[0], self.yvals[0], diff_out, np.array( [-2.0, -1.0, 1.0, 2.0]), colors='black', linewidths=1.0) # plot loss gain contour if self.loss_gain_status is True: # if there is no loss/gain contours, this will produce an error, # so we catch the exception. try: # make hatching cs = self.axis.contourf(self.xvals[0], self.yvals[0], loss_gain_contour, levels=[-2, -0.5, 0.5, 2], colors='none', hatches=['x', None, '+']) # make loss/gain contour outline self.axis.contour(self.xvals[0], self.yvals[0], loss_gain_contour, 3, colors='black', linewidths=2) except ValueError: pass if self.add_legend: loss_patch = Patch(fill=None, label='Loss', hatch='x', linestyle='--', linewidth=2) gain_patch = Patch(fill=None, label='Gain', hatch='+', linestyle='-', linewidth=2) legend = self.axis.legend(handles=[loss_patch, gain_patch], **self.legend_kwargs) return
python
def make_plot(self): """Creates the ratio plot. """ # sets colormap for ratio comparison plot cmap = getattr(cm, self.colormap) # set values of ratio comparison contour normval = 2.0 num_contours = 40 # must be even levels = np.linspace(-normval, normval, num_contours) norm = colors.Normalize(-normval, normval) # find Loss/Gain contour and Ratio contour self.set_comparison() diff_out, loss_gain_contour = self.find_difference_contour() cmap.set_bad(color='white', alpha=0.001) # plot ratio contours sc = self.axis.contourf(self.xvals[0], self.yvals[0], diff_out, levels=levels, norm=norm, extend='both', cmap=cmap) self.colorbar.setup_colorbars(sc) # toggle line contours of orders of magnitude of ratio comparisons if self.order_contour_lines: self.axis.contour(self.xvals[0], self.yvals[0], diff_out, np.array( [-2.0, -1.0, 1.0, 2.0]), colors='black', linewidths=1.0) # plot loss gain contour if self.loss_gain_status is True: # if there is no loss/gain contours, this will produce an error, # so we catch the exception. try: # make hatching cs = self.axis.contourf(self.xvals[0], self.yvals[0], loss_gain_contour, levels=[-2, -0.5, 0.5, 2], colors='none', hatches=['x', None, '+']) # make loss/gain contour outline self.axis.contour(self.xvals[0], self.yvals[0], loss_gain_contour, 3, colors='black', linewidths=2) except ValueError: pass if self.add_legend: loss_patch = Patch(fill=None, label='Loss', hatch='x', linestyle='--', linewidth=2) gain_patch = Patch(fill=None, label='Gain', hatch='+', linestyle='-', linewidth=2) legend = self.axis.legend(handles=[loss_patch, gain_patch], **self.legend_kwargs) return
['def', 'make_plot', '(', 'self', ')', ':', '# sets colormap for ratio comparison plot', 'cmap', '=', 'getattr', '(', 'cm', ',', 'self', '.', 'colormap', ')', '# set values of ratio comparison contour', 'normval', '=', '2.0', 'num_contours', '=', '40', '# must be even', 'levels', '=', 'np', '.', 'linspace', '(', '-', 'normval', ',', 'normval', ',', 'num_contours', ')', 'norm', '=', 'colors', '.', 'Normalize', '(', '-', 'normval', ',', 'normval', ')', '# find Loss/Gain contour and Ratio contour', 'self', '.', 'set_comparison', '(', ')', 'diff_out', ',', 'loss_gain_contour', '=', 'self', '.', 'find_difference_contour', '(', ')', 'cmap', '.', 'set_bad', '(', 'color', '=', "'white'", ',', 'alpha', '=', '0.001', ')', '# plot ratio contours', 'sc', '=', 'self', '.', 'axis', '.', 'contourf', '(', 'self', '.', 'xvals', '[', '0', ']', ',', 'self', '.', 'yvals', '[', '0', ']', ',', 'diff_out', ',', 'levels', '=', 'levels', ',', 'norm', '=', 'norm', ',', 'extend', '=', "'both'", ',', 'cmap', '=', 'cmap', ')', 'self', '.', 'colorbar', '.', 'setup_colorbars', '(', 'sc', ')', '# toggle line contours of orders of magnitude of ratio comparisons', 'if', 'self', '.', 'order_contour_lines', ':', 'self', '.', 'axis', '.', 'contour', '(', 'self', '.', 'xvals', '[', '0', ']', ',', 'self', '.', 'yvals', '[', '0', ']', ',', 'diff_out', ',', 'np', '.', 'array', '(', '[', '-', '2.0', ',', '-', '1.0', ',', '1.0', ',', '2.0', ']', ')', ',', 'colors', '=', "'black'", ',', 'linewidths', '=', '1.0', ')', '# plot loss gain contour', 'if', 'self', '.', 'loss_gain_status', 'is', 'True', ':', '# if there is no loss/gain contours, this will produce an error,', '# so we catch the exception.', 'try', ':', '# make hatching', 'cs', '=', 'self', '.', 'axis', '.', 'contourf', '(', 'self', '.', 'xvals', '[', '0', ']', ',', 'self', '.', 'yvals', '[', '0', ']', ',', 'loss_gain_contour', ',', 'levels', '=', '[', '-', '2', ',', '-', '0.5', ',', '0.5', ',', '2', ']', ',', 'colors', '=', "'none'", ',', 'hatches', '=', '[', "'x'", ',', 'None', ',', "'+'", ']', ')', '# make loss/gain contour outline', 'self', '.', 'axis', '.', 'contour', '(', 'self', '.', 'xvals', '[', '0', ']', ',', 'self', '.', 'yvals', '[', '0', ']', ',', 'loss_gain_contour', ',', '3', ',', 'colors', '=', "'black'", ',', 'linewidths', '=', '2', ')', 'except', 'ValueError', ':', 'pass', 'if', 'self', '.', 'add_legend', ':', 'loss_patch', '=', 'Patch', '(', 'fill', '=', 'None', ',', 'label', '=', "'Loss'", ',', 'hatch', '=', "'x'", ',', 'linestyle', '=', "'--'", ',', 'linewidth', '=', '2', ')', 'gain_patch', '=', 'Patch', '(', 'fill', '=', 'None', ',', 'label', '=', "'Gain'", ',', 'hatch', '=', "'+'", ',', 'linestyle', '=', "'-'", ',', 'linewidth', '=', '2', ')', 'legend', '=', 'self', '.', 'axis', '.', 'legend', '(', 'handles', '=', '[', 'loss_patch', ',', 'gain_patch', ']', ',', '*', '*', 'self', '.', 'legend_kwargs', ')', 'return']
Creates the ratio plot.
['Creates', 'the', 'ratio', 'plot', '.']
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/plottypes.py#L71-L123
9,341
openid/JWTConnect-Python-CryptoJWT
src/cryptojwt/jwe/jwekey.py
JWEKey.enc_setup
def enc_setup(self, enc_alg, msg, auth_data=b'', key=None, iv=""): """ Encrypt JWE content. :param enc_alg: The JWE "enc" value specifying the encryption algorithm :param msg: The plain text message :param auth_data: Additional authenticated data :param key: Key (CEK) :return: Tuple (ciphertext, tag), both as bytes """ iv = self._generate_iv(enc_alg, iv) if enc_alg in ["A192GCM", "A128GCM", "A256GCM"]: aes = AES_GCMEncrypter(key=key) ctx, tag = split_ctx_and_tag(aes.encrypt(msg, iv, auth_data)) elif enc_alg in ["A128CBC-HS256", "A192CBC-HS384", "A256CBC-HS512"]: aes = AES_CBCEncrypter(key=key) ctx, tag = aes.encrypt(msg, iv, auth_data) else: raise NotSupportedAlgorithm(enc_alg) return ctx, tag, aes.key
python
def enc_setup(self, enc_alg, msg, auth_data=b'', key=None, iv=""): """ Encrypt JWE content. :param enc_alg: The JWE "enc" value specifying the encryption algorithm :param msg: The plain text message :param auth_data: Additional authenticated data :param key: Key (CEK) :return: Tuple (ciphertext, tag), both as bytes """ iv = self._generate_iv(enc_alg, iv) if enc_alg in ["A192GCM", "A128GCM", "A256GCM"]: aes = AES_GCMEncrypter(key=key) ctx, tag = split_ctx_and_tag(aes.encrypt(msg, iv, auth_data)) elif enc_alg in ["A128CBC-HS256", "A192CBC-HS384", "A256CBC-HS512"]: aes = AES_CBCEncrypter(key=key) ctx, tag = aes.encrypt(msg, iv, auth_data) else: raise NotSupportedAlgorithm(enc_alg) return ctx, tag, aes.key
['def', 'enc_setup', '(', 'self', ',', 'enc_alg', ',', 'msg', ',', 'auth_data', '=', "b''", ',', 'key', '=', 'None', ',', 'iv', '=', '""', ')', ':', 'iv', '=', 'self', '.', '_generate_iv', '(', 'enc_alg', ',', 'iv', ')', 'if', 'enc_alg', 'in', '[', '"A192GCM"', ',', '"A128GCM"', ',', '"A256GCM"', ']', ':', 'aes', '=', 'AES_GCMEncrypter', '(', 'key', '=', 'key', ')', 'ctx', ',', 'tag', '=', 'split_ctx_and_tag', '(', 'aes', '.', 'encrypt', '(', 'msg', ',', 'iv', ',', 'auth_data', ')', ')', 'elif', 'enc_alg', 'in', '[', '"A128CBC-HS256"', ',', '"A192CBC-HS384"', ',', '"A256CBC-HS512"', ']', ':', 'aes', '=', 'AES_CBCEncrypter', '(', 'key', '=', 'key', ')', 'ctx', ',', 'tag', '=', 'aes', '.', 'encrypt', '(', 'msg', ',', 'iv', ',', 'auth_data', ')', 'else', ':', 'raise', 'NotSupportedAlgorithm', '(', 'enc_alg', ')', 'return', 'ctx', ',', 'tag', ',', 'aes', '.', 'key']
Encrypt JWE content. :param enc_alg: The JWE "enc" value specifying the encryption algorithm :param msg: The plain text message :param auth_data: Additional authenticated data :param key: Key (CEK) :return: Tuple (ciphertext, tag), both as bytes
['Encrypt', 'JWE', 'content', '.']
train
https://github.com/openid/JWTConnect-Python-CryptoJWT/blob/8863cfbfe77ca885084870b234a66b55bd52930c/src/cryptojwt/jwe/jwekey.py#L42-L63
9,342
gwastro/pycbc
pycbc/strain/lines.py
calibration_lines
def calibration_lines(freqs, data, tref=None): """ Extract the calibration lines from strain data. Parameters ---------- freqs: list List containing the frequencies of the calibration lines. data: pycbc.types.TimeSeries Strain data to extract the calibration lines from. tref: {None, float}, optional Reference time for the line. If None, will use data.start_time. Returns ------- data: pycbc.types.TimeSeries The strain data with the calibration lines removed. """ if tref is None: tref = float(data.start_time) for freq in freqs: measured_line = matching_line(freq, data, tref, bin_size=data.duration) data -= measured_line.data.real return data
python
def calibration_lines(freqs, data, tref=None): """ Extract the calibration lines from strain data. Parameters ---------- freqs: list List containing the frequencies of the calibration lines. data: pycbc.types.TimeSeries Strain data to extract the calibration lines from. tref: {None, float}, optional Reference time for the line. If None, will use data.start_time. Returns ------- data: pycbc.types.TimeSeries The strain data with the calibration lines removed. """ if tref is None: tref = float(data.start_time) for freq in freqs: measured_line = matching_line(freq, data, tref, bin_size=data.duration) data -= measured_line.data.real return data
['def', 'calibration_lines', '(', 'freqs', ',', 'data', ',', 'tref', '=', 'None', ')', ':', 'if', 'tref', 'is', 'None', ':', 'tref', '=', 'float', '(', 'data', '.', 'start_time', ')', 'for', 'freq', 'in', 'freqs', ':', 'measured_line', '=', 'matching_line', '(', 'freq', ',', 'data', ',', 'tref', ',', 'bin_size', '=', 'data', '.', 'duration', ')', 'data', '-=', 'measured_line', '.', 'data', '.', 'real', 'return', 'data']
Extract the calibration lines from strain data. Parameters ---------- freqs: list List containing the frequencies of the calibration lines. data: pycbc.types.TimeSeries Strain data to extract the calibration lines from. tref: {None, float}, optional Reference time for the line. If None, will use data.start_time. Returns ------- data: pycbc.types.TimeSeries The strain data with the calibration lines removed.
['Extract', 'the', 'calibration', 'lines', 'from', 'strain', 'data', '.']
train
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/strain/lines.py#L138-L162
9,343
hydpy-dev/hydpy
hydpy/auxs/xmltools.py
XSDWriter.get_itemtypesinsertion
def get_itemtypesinsertion(cls, itemgroup, indent) -> str: """Return a string defining the required types for the given exchange item group. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_itemtypesinsertion( ... 'setitems', 1)) # doctest: +ELLIPSIS <complexType name="arma_v1_setitemsType"> ... </complexType> <BLANKLINE> <complexType name="dam_v001_setitemsType"> ... <complexType name="nodes_setitemsType"> ... """ subs = [] for modelname in cls.get_modelnames(): subs.append(cls.get_itemtypeinsertion(itemgroup, modelname, indent)) subs.append(cls.get_nodesitemtypeinsertion(itemgroup, indent)) return '\n'.join(subs)
python
def get_itemtypesinsertion(cls, itemgroup, indent) -> str: """Return a string defining the required types for the given exchange item group. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_itemtypesinsertion( ... 'setitems', 1)) # doctest: +ELLIPSIS <complexType name="arma_v1_setitemsType"> ... </complexType> <BLANKLINE> <complexType name="dam_v001_setitemsType"> ... <complexType name="nodes_setitemsType"> ... """ subs = [] for modelname in cls.get_modelnames(): subs.append(cls.get_itemtypeinsertion(itemgroup, modelname, indent)) subs.append(cls.get_nodesitemtypeinsertion(itemgroup, indent)) return '\n'.join(subs)
['def', 'get_itemtypesinsertion', '(', 'cls', ',', 'itemgroup', ',', 'indent', ')', '->', 'str', ':', 'subs', '=', '[', ']', 'for', 'modelname', 'in', 'cls', '.', 'get_modelnames', '(', ')', ':', 'subs', '.', 'append', '(', 'cls', '.', 'get_itemtypeinsertion', '(', 'itemgroup', ',', 'modelname', ',', 'indent', ')', ')', 'subs', '.', 'append', '(', 'cls', '.', 'get_nodesitemtypeinsertion', '(', 'itemgroup', ',', 'indent', ')', ')', 'return', "'\\n'", '.', 'join', '(', 'subs', ')']
Return a string defining the required types for the given exchange item group. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_itemtypesinsertion( ... 'setitems', 1)) # doctest: +ELLIPSIS <complexType name="arma_v1_setitemsType"> ... </complexType> <BLANKLINE> <complexType name="dam_v001_setitemsType"> ... <complexType name="nodes_setitemsType"> ...
['Return', 'a', 'string', 'defining', 'the', 'required', 'types', 'for', 'the', 'given', 'exchange', 'item', 'group', '.']
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/auxs/xmltools.py#L1887-L1907
9,344
google/grr
grr/server/grr_response_server/signed_binary_utils.py
StreamSignedBinaryContents
def StreamSignedBinaryContents(blob_iterator, chunk_size = 1024 ): """Yields the contents of the given binary in chunks of the given size. Args: blob_iterator: An Iterator over all the binary's blobs. chunk_size: Size, in bytes, of the chunks to yield. """ all_blobs_read = False byte_buffer = io.BytesIO() while not all_blobs_read or byte_buffer.getvalue(): while not all_blobs_read and byte_buffer.tell() < chunk_size: try: blob = next(blob_iterator) except StopIteration: all_blobs_read = True break byte_buffer.write(blob.data) if byte_buffer.tell() > 0: # Yield a chunk of the signed binary and reset the buffer to contain # only data that hasn't been sent yet. byte_buffer.seek(0) yield byte_buffer.read(chunk_size) byte_buffer = io.BytesIO(byte_buffer.read()) byte_buffer.seek(0, io.SEEK_END)
python
def StreamSignedBinaryContents(blob_iterator, chunk_size = 1024 ): """Yields the contents of the given binary in chunks of the given size. Args: blob_iterator: An Iterator over all the binary's blobs. chunk_size: Size, in bytes, of the chunks to yield. """ all_blobs_read = False byte_buffer = io.BytesIO() while not all_blobs_read or byte_buffer.getvalue(): while not all_blobs_read and byte_buffer.tell() < chunk_size: try: blob = next(blob_iterator) except StopIteration: all_blobs_read = True break byte_buffer.write(blob.data) if byte_buffer.tell() > 0: # Yield a chunk of the signed binary and reset the buffer to contain # only data that hasn't been sent yet. byte_buffer.seek(0) yield byte_buffer.read(chunk_size) byte_buffer = io.BytesIO(byte_buffer.read()) byte_buffer.seek(0, io.SEEK_END)
['def', 'StreamSignedBinaryContents', '(', 'blob_iterator', ',', 'chunk_size', '=', '1024', ')', ':', 'all_blobs_read', '=', 'False', 'byte_buffer', '=', 'io', '.', 'BytesIO', '(', ')', 'while', 'not', 'all_blobs_read', 'or', 'byte_buffer', '.', 'getvalue', '(', ')', ':', 'while', 'not', 'all_blobs_read', 'and', 'byte_buffer', '.', 'tell', '(', ')', '<', 'chunk_size', ':', 'try', ':', 'blob', '=', 'next', '(', 'blob_iterator', ')', 'except', 'StopIteration', ':', 'all_blobs_read', '=', 'True', 'break', 'byte_buffer', '.', 'write', '(', 'blob', '.', 'data', ')', 'if', 'byte_buffer', '.', 'tell', '(', ')', '>', '0', ':', '# Yield a chunk of the signed binary and reset the buffer to contain', "# only data that hasn't been sent yet.", 'byte_buffer', '.', 'seek', '(', '0', ')', 'yield', 'byte_buffer', '.', 'read', '(', 'chunk_size', ')', 'byte_buffer', '=', 'io', '.', 'BytesIO', '(', 'byte_buffer', '.', 'read', '(', ')', ')', 'byte_buffer', '.', 'seek', '(', '0', ',', 'io', '.', 'SEEK_END', ')']
Yields the contents of the given binary in chunks of the given size. Args: blob_iterator: An Iterator over all the binary's blobs. chunk_size: Size, in bytes, of the chunks to yield.
['Yields', 'the', 'contents', 'of', 'the', 'given', 'binary', 'in', 'chunks', 'of', 'the', 'given', 'size', '.']
train
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/signed_binary_utils.py#L278-L303
9,345
maximtrp/scikit-posthocs
scikit_posthocs/_plotting.py
sign_array
def sign_array(p_values, alpha=0.05): """ Significance array Converts an array with p values to a significance array where 0 is False (not significant), 1 is True (significant), and -1 is for diagonal elements. Parameters ---------- p_values : array_like or ndarray An array, any object exposing the array interface, containing p values. alpha : float, optional Significance level. Default is 0.05. Returns ------- Numpy array where 0 is False (not significant), 1 is True (significant), and -1 is for diagonal elements. Examples -------- >>> p_values = np.array([[ 0. , 0.00119517, 0.00278329], [ 0.00119517, 0. , 0.18672227], [ 0.00278329, 0.18672227, 0. ]]) >>> ph.sign_array(p_values) array([[-1, 1, 1], [ 1, -1, 0], [ 1, 0, -1]]) """ p_values = np.array(p_values) p_values[p_values > alpha] = 0 p_values[(p_values < alpha) & (p_values > 0)] = 1 np.fill_diagonal(p_values, -1) return p_values
python
def sign_array(p_values, alpha=0.05): """ Significance array Converts an array with p values to a significance array where 0 is False (not significant), 1 is True (significant), and -1 is for diagonal elements. Parameters ---------- p_values : array_like or ndarray An array, any object exposing the array interface, containing p values. alpha : float, optional Significance level. Default is 0.05. Returns ------- Numpy array where 0 is False (not significant), 1 is True (significant), and -1 is for diagonal elements. Examples -------- >>> p_values = np.array([[ 0. , 0.00119517, 0.00278329], [ 0.00119517, 0. , 0.18672227], [ 0.00278329, 0.18672227, 0. ]]) >>> ph.sign_array(p_values) array([[-1, 1, 1], [ 1, -1, 0], [ 1, 0, -1]]) """ p_values = np.array(p_values) p_values[p_values > alpha] = 0 p_values[(p_values < alpha) & (p_values > 0)] = 1 np.fill_diagonal(p_values, -1) return p_values
['def', 'sign_array', '(', 'p_values', ',', 'alpha', '=', '0.05', ')', ':', 'p_values', '=', 'np', '.', 'array', '(', 'p_values', ')', 'p_values', '[', 'p_values', '>', 'alpha', ']', '=', '0', 'p_values', '[', '(', 'p_values', '<', 'alpha', ')', '&', '(', 'p_values', '>', '0', ')', ']', '=', '1', 'np', '.', 'fill_diagonal', '(', 'p_values', ',', '-', '1', ')', 'return', 'p_values']
Significance array Converts an array with p values to a significance array where 0 is False (not significant), 1 is True (significant), and -1 is for diagonal elements. Parameters ---------- p_values : array_like or ndarray An array, any object exposing the array interface, containing p values. alpha : float, optional Significance level. Default is 0.05. Returns ------- Numpy array where 0 is False (not significant), 1 is True (significant), and -1 is for diagonal elements. Examples -------- >>> p_values = np.array([[ 0. , 0.00119517, 0.00278329], [ 0.00119517, 0. , 0.18672227], [ 0.00278329, 0.18672227, 0. ]]) >>> ph.sign_array(p_values) array([[-1, 1, 1], [ 1, -1, 0], [ 1, 0, -1]])
['Significance', 'array']
train
https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_plotting.py#L8-L47
9,346
dslackw/slpkg
slpkg/sbo/network.py
SBoNetwork.choice_info
def choice_info(self): """View .info file """ info = ReadSBo(self.sbo_url).info(self.name, ".info") fill = self.fill_pager(info) self.pager(info + fill)
python
def choice_info(self): """View .info file """ info = ReadSBo(self.sbo_url).info(self.name, ".info") fill = self.fill_pager(info) self.pager(info + fill)
['def', 'choice_info', '(', 'self', ')', ':', 'info', '=', 'ReadSBo', '(', 'self', '.', 'sbo_url', ')', '.', 'info', '(', 'self', '.', 'name', ',', '".info"', ')', 'fill', '=', 'self', '.', 'fill_pager', '(', 'info', ')', 'self', '.', 'pager', '(', 'info', '+', 'fill', ')']
View .info file
['View', '.', 'info', 'file']
train
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/sbo/network.py#L201-L206
9,347
googleapis/google-cloud-python
bigquery_storage/google/cloud/bigquery_storage_v1beta1/reader.py
ReadRowsPage._parse_block
def _parse_block(self): """Parse metadata and rows from the block only once.""" if self._iter_rows is not None: return rows = _avro_rows(self._block, self._avro_schema) self._num_items = self._block.avro_rows.row_count self._remaining = self._block.avro_rows.row_count self._iter_rows = iter(rows)
python
def _parse_block(self): """Parse metadata and rows from the block only once.""" if self._iter_rows is not None: return rows = _avro_rows(self._block, self._avro_schema) self._num_items = self._block.avro_rows.row_count self._remaining = self._block.avro_rows.row_count self._iter_rows = iter(rows)
['def', '_parse_block', '(', 'self', ')', ':', 'if', 'self', '.', '_iter_rows', 'is', 'not', 'None', ':', 'return', 'rows', '=', '_avro_rows', '(', 'self', '.', '_block', ',', 'self', '.', '_avro_schema', ')', 'self', '.', '_num_items', '=', 'self', '.', '_block', '.', 'avro_rows', '.', 'row_count', 'self', '.', '_remaining', '=', 'self', '.', '_block', '.', 'avro_rows', '.', 'row_count', 'self', '.', '_iter_rows', '=', 'iter', '(', 'rows', ')']
Parse metadata and rows from the block only once.
['Parse', 'metadata', 'and', 'rows', 'from', 'the', 'block', 'only', 'once', '.']
train
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery_storage/google/cloud/bigquery_storage_v1beta1/reader.py#L301-L309
9,348
CentOS/python-cicoclient
cicoclient/utils.py
get_dict_properties
def get_dict_properties(item, fields, mixed_case_fields=[], formatters={}): """Return a tuple containing the item properties. :param item: a single dict resource :param fields: tuple of strings with the desired field names :param mixed_case_fields: tuple of field names to preserve case :param formatters: dictionary mapping field names to callables to format the values """ row = [] for field in fields: if field in mixed_case_fields: field_name = field.replace(' ', '_') else: field_name = field.lower().replace(' ', '_') data = item[field_name] if field_name in item else '' if field in formatters: row.append(formatters[field](data)) else: row.append(data) return tuple(row)
python
def get_dict_properties(item, fields, mixed_case_fields=[], formatters={}): """Return a tuple containing the item properties. :param item: a single dict resource :param fields: tuple of strings with the desired field names :param mixed_case_fields: tuple of field names to preserve case :param formatters: dictionary mapping field names to callables to format the values """ row = [] for field in fields: if field in mixed_case_fields: field_name = field.replace(' ', '_') else: field_name = field.lower().replace(' ', '_') data = item[field_name] if field_name in item else '' if field in formatters: row.append(formatters[field](data)) else: row.append(data) return tuple(row)
['def', 'get_dict_properties', '(', 'item', ',', 'fields', ',', 'mixed_case_fields', '=', '[', ']', ',', 'formatters', '=', '{', '}', ')', ':', 'row', '=', '[', ']', 'for', 'field', 'in', 'fields', ':', 'if', 'field', 'in', 'mixed_case_fields', ':', 'field_name', '=', 'field', '.', 'replace', '(', "' '", ',', "'_'", ')', 'else', ':', 'field_name', '=', 'field', '.', 'lower', '(', ')', '.', 'replace', '(', "' '", ',', "'_'", ')', 'data', '=', 'item', '[', 'field_name', ']', 'if', 'field_name', 'in', 'item', 'else', "''", 'if', 'field', 'in', 'formatters', ':', 'row', '.', 'append', '(', 'formatters', '[', 'field', ']', '(', 'data', ')', ')', 'else', ':', 'row', '.', 'append', '(', 'data', ')', 'return', 'tuple', '(', 'row', ')']
Return a tuple containing the item properties. :param item: a single dict resource :param fields: tuple of strings with the desired field names :param mixed_case_fields: tuple of field names to preserve case :param formatters: dictionary mapping field names to callables to format the values
['Return', 'a', 'tuple', 'containing', 'the', 'item', 'properties', '.', ':', 'param', 'item', ':', 'a', 'single', 'dict', 'resource', ':', 'param', 'fields', ':', 'tuple', 'of', 'strings', 'with', 'the', 'desired', 'field', 'names', ':', 'param', 'mixed_case_fields', ':', 'tuple', 'of', 'field', 'names', 'to', 'preserve', 'case', ':', 'param', 'formatters', ':', 'dictionary', 'mapping', 'field', 'names', 'to', 'callables', 'to', 'format', 'the', 'values']
train
https://github.com/CentOS/python-cicoclient/blob/ffee34f446ceb25348b13a500d5c545df202c182/cicoclient/utils.py#L21-L41
9,349
datosgobar/pydatajson
pydatajson/federation.py
restore_organization_to_ckan
def restore_organization_to_ckan(catalog, owner_org, portal_url, apikey, dataset_list=None, download_strategy=None, generate_new_access_url=None): """Restaura los datasets de la organización de un catálogo al portal pasado por parámetro. Si hay temas presentes en el DataJson que no están en el portal de CKAN, los genera. Args: catalog (DataJson): El catálogo de origen que se restaura. portal_url (str): La URL del portal CKAN de destino. apikey (str): La apikey de un usuario con los permisos que le permitan crear o actualizar el dataset. dataset_list(list(str)): Los ids de los datasets a restaurar. Si no se pasa una lista, todos los datasests se restauran. owner_org (str): La organización a la cual pertencen los datasets. download_strategy(callable): Una función (catálogo, distribución)-> bool. Sobre las distribuciones que evalúa True, descarga el recurso en el downloadURL y lo sube al portal de destino. Por default no sube ninguna distribución. generate_new_access_url(list): Se pasan los ids de las distribuciones cuyo accessURL se regenerar en el portal de destino. Para el resto, el portal debe mantiene el valor pasado en el DataJson. Returns: list(str): La lista de ids de datasets subidos. """ push_new_themes(catalog, portal_url, apikey) restored = [] if dataset_list is None: try: dataset_list = [ds['identifier'] for ds in catalog.datasets] except KeyError: logger.exception('Hay datasets sin identificadores') return restored for dataset_id in dataset_list: try: restored_id = restore_dataset_to_ckan(catalog, owner_org, dataset_id, portal_url, apikey, download_strategy, generate_new_access_url) restored.append(restored_id) except (CKANAPIError, KeyError, AttributeError) as e: logger.exception('Ocurrió un error restaurando el dataset {}: {}' .format(dataset_id, str(e))) return restored
python
def restore_organization_to_ckan(catalog, owner_org, portal_url, apikey, dataset_list=None, download_strategy=None, generate_new_access_url=None): """Restaura los datasets de la organización de un catálogo al portal pasado por parámetro. Si hay temas presentes en el DataJson que no están en el portal de CKAN, los genera. Args: catalog (DataJson): El catálogo de origen que se restaura. portal_url (str): La URL del portal CKAN de destino. apikey (str): La apikey de un usuario con los permisos que le permitan crear o actualizar el dataset. dataset_list(list(str)): Los ids de los datasets a restaurar. Si no se pasa una lista, todos los datasests se restauran. owner_org (str): La organización a la cual pertencen los datasets. download_strategy(callable): Una función (catálogo, distribución)-> bool. Sobre las distribuciones que evalúa True, descarga el recurso en el downloadURL y lo sube al portal de destino. Por default no sube ninguna distribución. generate_new_access_url(list): Se pasan los ids de las distribuciones cuyo accessURL se regenerar en el portal de destino. Para el resto, el portal debe mantiene el valor pasado en el DataJson. Returns: list(str): La lista de ids de datasets subidos. """ push_new_themes(catalog, portal_url, apikey) restored = [] if dataset_list is None: try: dataset_list = [ds['identifier'] for ds in catalog.datasets] except KeyError: logger.exception('Hay datasets sin identificadores') return restored for dataset_id in dataset_list: try: restored_id = restore_dataset_to_ckan(catalog, owner_org, dataset_id, portal_url, apikey, download_strategy, generate_new_access_url) restored.append(restored_id) except (CKANAPIError, KeyError, AttributeError) as e: logger.exception('Ocurrió un error restaurando el dataset {}: {}' .format(dataset_id, str(e))) return restored
['def', 'restore_organization_to_ckan', '(', 'catalog', ',', 'owner_org', ',', 'portal_url', ',', 'apikey', ',', 'dataset_list', '=', 'None', ',', 'download_strategy', '=', 'None', ',', 'generate_new_access_url', '=', 'None', ')', ':', 'push_new_themes', '(', 'catalog', ',', 'portal_url', ',', 'apikey', ')', 'restored', '=', '[', ']', 'if', 'dataset_list', 'is', 'None', ':', 'try', ':', 'dataset_list', '=', '[', 'ds', '[', "'identifier'", ']', 'for', 'ds', 'in', 'catalog', '.', 'datasets', ']', 'except', 'KeyError', ':', 'logger', '.', 'exception', '(', "'Hay datasets sin identificadores'", ')', 'return', 'restored', 'for', 'dataset_id', 'in', 'dataset_list', ':', 'try', ':', 'restored_id', '=', 'restore_dataset_to_ckan', '(', 'catalog', ',', 'owner_org', ',', 'dataset_id', ',', 'portal_url', ',', 'apikey', ',', 'download_strategy', ',', 'generate_new_access_url', ')', 'restored', '.', 'append', '(', 'restored_id', ')', 'except', '(', 'CKANAPIError', ',', 'KeyError', ',', 'AttributeError', ')', 'as', 'e', ':', 'logger', '.', 'exception', '(', "'Ocurrió un error restaurando el dataset {}: {}'", '.', 'format', '(', 'dataset_id', ',', 'str', '(', 'e', ')', ')', ')', 'return', 'restored']
Restaura los datasets de la organización de un catálogo al portal pasado por parámetro. Si hay temas presentes en el DataJson que no están en el portal de CKAN, los genera. Args: catalog (DataJson): El catálogo de origen que se restaura. portal_url (str): La URL del portal CKAN de destino. apikey (str): La apikey de un usuario con los permisos que le permitan crear o actualizar el dataset. dataset_list(list(str)): Los ids de los datasets a restaurar. Si no se pasa una lista, todos los datasests se restauran. owner_org (str): La organización a la cual pertencen los datasets. download_strategy(callable): Una función (catálogo, distribución)-> bool. Sobre las distribuciones que evalúa True, descarga el recurso en el downloadURL y lo sube al portal de destino. Por default no sube ninguna distribución. generate_new_access_url(list): Se pasan los ids de las distribuciones cuyo accessURL se regenerar en el portal de destino. Para el resto, el portal debe mantiene el valor pasado en el DataJson. Returns: list(str): La lista de ids de datasets subidos.
['Restaura', 'los', 'datasets', 'de', 'la', 'organización', 'de', 'un', 'catálogo', 'al', 'portal', 'pasado', 'por', 'parámetro', '.', 'Si', 'hay', 'temas', 'presentes', 'en', 'el', 'DataJson', 'que', 'no', 'están', 'en', 'el', 'portal', 'de', 'CKAN', 'los', 'genera', '.']
train
https://github.com/datosgobar/pydatajson/blob/3141082ffbaa295e2deaf6ffbbc5a59f5859960e/pydatajson/federation.py#L496-L541
9,350
orbingol/NURBS-Python
geomdl/exchange.py
import_vmesh
def import_vmesh(file): """ Imports NURBS volume(s) from volume mesh (vmesh) file(s). :param file: path to a directory containing mesh files or a single mesh file :type file: str :return: list of NURBS volumes :rtype: list :raises GeomdlException: an error occurred reading the file """ imported_elements = [] if os.path.isfile(file): imported_elements.append(exch.import_vol_mesh(file)) elif os.path.isdir(file): files = sorted([os.path.join(file, f) for f in os.listdir(file)]) for f in files: imported_elements.append(exch.import_vol_mesh(f)) else: raise exch.GeomdlException("Input is not a file or a directory") return imported_elements
python
def import_vmesh(file): """ Imports NURBS volume(s) from volume mesh (vmesh) file(s). :param file: path to a directory containing mesh files or a single mesh file :type file: str :return: list of NURBS volumes :rtype: list :raises GeomdlException: an error occurred reading the file """ imported_elements = [] if os.path.isfile(file): imported_elements.append(exch.import_vol_mesh(file)) elif os.path.isdir(file): files = sorted([os.path.join(file, f) for f in os.listdir(file)]) for f in files: imported_elements.append(exch.import_vol_mesh(f)) else: raise exch.GeomdlException("Input is not a file or a directory") return imported_elements
['def', 'import_vmesh', '(', 'file', ')', ':', 'imported_elements', '=', '[', ']', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'file', ')', ':', 'imported_elements', '.', 'append', '(', 'exch', '.', 'import_vol_mesh', '(', 'file', ')', ')', 'elif', 'os', '.', 'path', '.', 'isdir', '(', 'file', ')', ':', 'files', '=', 'sorted', '(', '[', 'os', '.', 'path', '.', 'join', '(', 'file', ',', 'f', ')', 'for', 'f', 'in', 'os', '.', 'listdir', '(', 'file', ')', ']', ')', 'for', 'f', 'in', 'files', ':', 'imported_elements', '.', 'append', '(', 'exch', '.', 'import_vol_mesh', '(', 'f', ')', ')', 'else', ':', 'raise', 'exch', '.', 'GeomdlException', '(', '"Input is not a file or a directory"', ')', 'return', 'imported_elements']
Imports NURBS volume(s) from volume mesh (vmesh) file(s). :param file: path to a directory containing mesh files or a single mesh file :type file: str :return: list of NURBS volumes :rtype: list :raises GeomdlException: an error occurred reading the file
['Imports', 'NURBS', 'volume', '(', 's', ')', 'from', 'volume', 'mesh', '(', 'vmesh', ')', 'file', '(', 's', ')', '.']
train
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/exchange.py#L826-L844
9,351
mitsei/dlkit
dlkit/services/resource.py
Bin._set_bin_view
def _set_bin_view(self, session): """Sets the underlying bin view to match current view""" if self._bin_view == FEDERATED: try: session.use_federated_bin_view() except AttributeError: pass else: try: session.use_isolated_bin_view() except AttributeError: pass
python
def _set_bin_view(self, session): """Sets the underlying bin view to match current view""" if self._bin_view == FEDERATED: try: session.use_federated_bin_view() except AttributeError: pass else: try: session.use_isolated_bin_view() except AttributeError: pass
['def', '_set_bin_view', '(', 'self', ',', 'session', ')', ':', 'if', 'self', '.', '_bin_view', '==', 'FEDERATED', ':', 'try', ':', 'session', '.', 'use_federated_bin_view', '(', ')', 'except', 'AttributeError', ':', 'pass', 'else', ':', 'try', ':', 'session', '.', 'use_isolated_bin_view', '(', ')', 'except', 'AttributeError', ':', 'pass']
Sets the underlying bin view to match current view
['Sets', 'the', 'underlying', 'bin', 'view', 'to', 'match', 'current', 'view']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/resource.py#L1062-L1073
9,352
ff0000/scarlet
scarlet/cms/sites.py
AdminSite.login
def login(self, request, extra_context=None): """ Displays the login form for the given HttpRequest. """ from django.contrib.auth.views import login context = { 'title': _('Log in'), 'app_path': request.get_full_path(), REDIRECT_FIELD_NAME: request.get_full_path(), } context.update(extra_context or {}) defaults = { 'extra_context': context, 'authentication_form': self.login_form or AdminAuthenticationForm, 'template_name': self.login_template or 'cms/login.html', } return login(request, **defaults)
python
def login(self, request, extra_context=None): """ Displays the login form for the given HttpRequest. """ from django.contrib.auth.views import login context = { 'title': _('Log in'), 'app_path': request.get_full_path(), REDIRECT_FIELD_NAME: request.get_full_path(), } context.update(extra_context or {}) defaults = { 'extra_context': context, 'authentication_form': self.login_form or AdminAuthenticationForm, 'template_name': self.login_template or 'cms/login.html', } return login(request, **defaults)
['def', 'login', '(', 'self', ',', 'request', ',', 'extra_context', '=', 'None', ')', ':', 'from', 'django', '.', 'contrib', '.', 'auth', '.', 'views', 'import', 'login', 'context', '=', '{', "'title'", ':', '_', '(', "'Log in'", ')', ',', "'app_path'", ':', 'request', '.', 'get_full_path', '(', ')', ',', 'REDIRECT_FIELD_NAME', ':', 'request', '.', 'get_full_path', '(', ')', ',', '}', 'context', '.', 'update', '(', 'extra_context', 'or', '{', '}', ')', 'defaults', '=', '{', "'extra_context'", ':', 'context', ',', "'authentication_form'", ':', 'self', '.', 'login_form', 'or', 'AdminAuthenticationForm', ',', "'template_name'", ':', 'self', '.', 'login_template', 'or', "'cms/login.html'", ',', '}', 'return', 'login', '(', 'request', ',', '*', '*', 'defaults', ')']
Displays the login form for the given HttpRequest.
['Displays', 'the', 'login', 'form', 'for', 'the', 'given', 'HttpRequest', '.']
train
https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/cms/sites.py#L251-L267
9,353
Holzhaus/python-cmuclmtk
cmuclmtk/__init__.py
mergeidngram
def mergeidngram(output_file, input_files, n=3, ascii_input=False, ascii_output=False): """ Takes a set of id n-gram files (in either binary (by default) or ASCII (if specified) format - note that they should all be in the same format, however) and outputs a merged id N-gram. Notes : This function can also be used to convert id n-gram files between ascii and binary formats. """ cmd = ['mergeidngram'] if n: cmd.extend(['-n', n]) if ascii_input: cmd.append('-ascii_input') if ascii_output: cmd.append('-ascii_output') if len(input_file) > 1: raise MergeError("mergeidngram needs at least 1 input file") cmd.extend(input_files) # Ensure that every parameter is of type 'str' cmd = [str(x) for x in cmd] with open(output_file,'w+') as output_f: with output_to_debuglogger() as err_f: exitcode = subprocess.call(cmd, stdout=output_f, stderr=err_f) logger = logging.getLogger(__name__) logger.debug("Command '%s' returned with exit code '%d'." % (' '.join(cmd), exitcode)) if exitcode != 0: raise ConversionError("'%s' returned with non-zero exit status '%s'" % (cmd[0], exitcode))
python
def mergeidngram(output_file, input_files, n=3, ascii_input=False, ascii_output=False): """ Takes a set of id n-gram files (in either binary (by default) or ASCII (if specified) format - note that they should all be in the same format, however) and outputs a merged id N-gram. Notes : This function can also be used to convert id n-gram files between ascii and binary formats. """ cmd = ['mergeidngram'] if n: cmd.extend(['-n', n]) if ascii_input: cmd.append('-ascii_input') if ascii_output: cmd.append('-ascii_output') if len(input_file) > 1: raise MergeError("mergeidngram needs at least 1 input file") cmd.extend(input_files) # Ensure that every parameter is of type 'str' cmd = [str(x) for x in cmd] with open(output_file,'w+') as output_f: with output_to_debuglogger() as err_f: exitcode = subprocess.call(cmd, stdout=output_f, stderr=err_f) logger = logging.getLogger(__name__) logger.debug("Command '%s' returned with exit code '%d'." % (' '.join(cmd), exitcode)) if exitcode != 0: raise ConversionError("'%s' returned with non-zero exit status '%s'" % (cmd[0], exitcode))
['def', 'mergeidngram', '(', 'output_file', ',', 'input_files', ',', 'n', '=', '3', ',', 'ascii_input', '=', 'False', ',', 'ascii_output', '=', 'False', ')', ':', 'cmd', '=', '[', "'mergeidngram'", ']', 'if', 'n', ':', 'cmd', '.', 'extend', '(', '[', "'-n'", ',', 'n', ']', ')', 'if', 'ascii_input', ':', 'cmd', '.', 'append', '(', "'-ascii_input'", ')', 'if', 'ascii_output', ':', 'cmd', '.', 'append', '(', "'-ascii_output'", ')', 'if', 'len', '(', 'input_file', ')', '>', '1', ':', 'raise', 'MergeError', '(', '"mergeidngram needs at least 1 input file"', ')', 'cmd', '.', 'extend', '(', 'input_files', ')', "# Ensure that every parameter is of type 'str'", 'cmd', '=', '[', 'str', '(', 'x', ')', 'for', 'x', 'in', 'cmd', ']', 'with', 'open', '(', 'output_file', ',', "'w+'", ')', 'as', 'output_f', ':', 'with', 'output_to_debuglogger', '(', ')', 'as', 'err_f', ':', 'exitcode', '=', 'subprocess', '.', 'call', '(', 'cmd', ',', 'stdout', '=', 'output_f', ',', 'stderr', '=', 'err_f', ')', 'logger', '=', 'logging', '.', 'getLogger', '(', '__name__', ')', 'logger', '.', 'debug', '(', '"Command \'%s\' returned with exit code \'%d\'."', '%', '(', "' '", '.', 'join', '(', 'cmd', ')', ',', 'exitcode', ')', ')', 'if', 'exitcode', '!=', '0', ':', 'raise', 'ConversionError', '(', '"\'%s\' returned with non-zero exit status \'%s\'"', '%', '(', 'cmd', '[', '0', ']', ',', 'exitcode', ')', ')']
Takes a set of id n-gram files (in either binary (by default) or ASCII (if specified) format - note that they should all be in the same format, however) and outputs a merged id N-gram. Notes : This function can also be used to convert id n-gram files between ascii and binary formats.
['Takes', 'a', 'set', 'of', 'id', 'n', '-', 'gram', 'files', '(', 'in', 'either', 'binary', '(', 'by', 'default', ')', 'or', 'ASCII', '(', 'if', 'specified', ')', 'format', '-', 'note', 'that', 'they', 'should', 'all', 'be', 'in', 'the', 'same', 'format', 'however', ')', 'and', 'outputs', 'a', 'merged', 'id', 'N', '-', 'gram', '.']
train
https://github.com/Holzhaus/python-cmuclmtk/blob/67a5c6713c497ca644ea1c697a70e8d930c9d4b4/cmuclmtk/__init__.py#L360-L392
9,354
orb-framework/orb
orb/core/query.py
Query.inverted
def inverted(self): """ Returns an inverted copy of this query. :return <orb.Query> """ out = self.copy() out.setInverted(not self.isInverted()) return out
python
def inverted(self): """ Returns an inverted copy of this query. :return <orb.Query> """ out = self.copy() out.setInverted(not self.isInverted()) return out
['def', 'inverted', '(', 'self', ')', ':', 'out', '=', 'self', '.', 'copy', '(', ')', 'out', '.', 'setInverted', '(', 'not', 'self', '.', 'isInverted', '(', ')', ')', 'return', 'out']
Returns an inverted copy of this query. :return <orb.Query>
['Returns', 'an', 'inverted', 'copy', 'of', 'this', 'query', '.']
train
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/query.py#L894-L902
9,355
SuperCowPowers/workbench
workbench/server/data_store.py
DataStore.has_sample
def has_sample(self, md5): """Checks if data store has this sample. Args: md5: The md5 digest of the required sample. Returns: True if sample with this md5 is present, else False. """ # The easiest thing is to simply get the sample and if that # succeeds than return True, else return False sample = self.get_sample(md5) return True if sample else False
python
def has_sample(self, md5): """Checks if data store has this sample. Args: md5: The md5 digest of the required sample. Returns: True if sample with this md5 is present, else False. """ # The easiest thing is to simply get the sample and if that # succeeds than return True, else return False sample = self.get_sample(md5) return True if sample else False
['def', 'has_sample', '(', 'self', ',', 'md5', ')', ':', '# The easiest thing is to simply get the sample and if that', '# succeeds than return True, else return False', 'sample', '=', 'self', '.', 'get_sample', '(', 'md5', ')', 'return', 'True', 'if', 'sample', 'else', 'False']
Checks if data store has this sample. Args: md5: The md5 digest of the required sample. Returns: True if sample with this md5 is present, else False.
['Checks', 'if', 'data', 'store', 'has', 'this', 'sample', '.']
train
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/server/data_store.py#L265-L278
9,356
fermiPy/fermipy
fermipy/jobs/scatter_gather.py
ScatterGather.clean_jobs
def clean_jobs(self, recursive=False): """Clean up all the jobs associated with this object. If recursive is True this also clean jobs dispatch by this object.""" self._interface.clean_jobs(self.scatter_link, clean_all=recursive)
python
def clean_jobs(self, recursive=False): """Clean up all the jobs associated with this object. If recursive is True this also clean jobs dispatch by this object.""" self._interface.clean_jobs(self.scatter_link, clean_all=recursive)
['def', 'clean_jobs', '(', 'self', ',', 'recursive', '=', 'False', ')', ':', 'self', '.', '_interface', '.', 'clean_jobs', '(', 'self', '.', 'scatter_link', ',', 'clean_all', '=', 'recursive', ')']
Clean up all the jobs associated with this object. If recursive is True this also clean jobs dispatch by this object.
['Clean', 'up', 'all', 'the', 'jobs', 'associated', 'with', 'this', 'object', '.']
train
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/scatter_gather.py#L511-L517
9,357
tensorflow/mesh
mesh_tensorflow/ops.py
slicewise
def slicewise(tf_fn, xs, output_shape=None, output_dtype=None, splittable_dims=None, grad_function=None, name=None): """Slice-wise call to any tensorflow function. The output shape and dtype default to those of the first input. splittable_dims is a list of Dimensions which can be split while keeping the computation valid. Args: tf_fn: a function taking n tf.Tensors and returning a tf.Tensor xs: a list of n Tensors output_shape: a Shape (or list of shapes) output_dtype: a dtype (or list of dtypes) splittable_dims: a list of Dimensions which are ok to split grad_function: an optional gradients function. If None, use tf gradient. name: an optional string Returns: a Tensor (or a tuple of Tensors) """ multiple_outputs = isinstance(output_dtype, list) output_shapes = output_shape if multiple_outputs else [output_shape] output_dtypes = output_dtype if multiple_outputs else [output_dtype] op = SlicewiseOperation( tf_fn, xs, [convert_to_shape(shape) or xs[0].shape for shape in output_shapes], [dtype or xs[0].dtype for dtype in output_dtypes], splittable_dims, grad_function, name=name) return tuple(op.outputs) if multiple_outputs else op.outputs[0]
python
def slicewise(tf_fn, xs, output_shape=None, output_dtype=None, splittable_dims=None, grad_function=None, name=None): """Slice-wise call to any tensorflow function. The output shape and dtype default to those of the first input. splittable_dims is a list of Dimensions which can be split while keeping the computation valid. Args: tf_fn: a function taking n tf.Tensors and returning a tf.Tensor xs: a list of n Tensors output_shape: a Shape (or list of shapes) output_dtype: a dtype (or list of dtypes) splittable_dims: a list of Dimensions which are ok to split grad_function: an optional gradients function. If None, use tf gradient. name: an optional string Returns: a Tensor (or a tuple of Tensors) """ multiple_outputs = isinstance(output_dtype, list) output_shapes = output_shape if multiple_outputs else [output_shape] output_dtypes = output_dtype if multiple_outputs else [output_dtype] op = SlicewiseOperation( tf_fn, xs, [convert_to_shape(shape) or xs[0].shape for shape in output_shapes], [dtype or xs[0].dtype for dtype in output_dtypes], splittable_dims, grad_function, name=name) return tuple(op.outputs) if multiple_outputs else op.outputs[0]
['def', 'slicewise', '(', 'tf_fn', ',', 'xs', ',', 'output_shape', '=', 'None', ',', 'output_dtype', '=', 'None', ',', 'splittable_dims', '=', 'None', ',', 'grad_function', '=', 'None', ',', 'name', '=', 'None', ')', ':', 'multiple_outputs', '=', 'isinstance', '(', 'output_dtype', ',', 'list', ')', 'output_shapes', '=', 'output_shape', 'if', 'multiple_outputs', 'else', '[', 'output_shape', ']', 'output_dtypes', '=', 'output_dtype', 'if', 'multiple_outputs', 'else', '[', 'output_dtype', ']', 'op', '=', 'SlicewiseOperation', '(', 'tf_fn', ',', 'xs', ',', '[', 'convert_to_shape', '(', 'shape', ')', 'or', 'xs', '[', '0', ']', '.', 'shape', 'for', 'shape', 'in', 'output_shapes', ']', ',', '[', 'dtype', 'or', 'xs', '[', '0', ']', '.', 'dtype', 'for', 'dtype', 'in', 'output_dtypes', ']', ',', 'splittable_dims', ',', 'grad_function', ',', 'name', '=', 'name', ')', 'return', 'tuple', '(', 'op', '.', 'outputs', ')', 'if', 'multiple_outputs', 'else', 'op', '.', 'outputs', '[', '0', ']']
Slice-wise call to any tensorflow function. The output shape and dtype default to those of the first input. splittable_dims is a list of Dimensions which can be split while keeping the computation valid. Args: tf_fn: a function taking n tf.Tensors and returning a tf.Tensor xs: a list of n Tensors output_shape: a Shape (or list of shapes) output_dtype: a dtype (or list of dtypes) splittable_dims: a list of Dimensions which are ok to split grad_function: an optional gradients function. If None, use tf gradient. name: an optional string Returns: a Tensor (or a tuple of Tensors)
['Slice', '-', 'wise', 'call', 'to', 'any', 'tensorflow', 'function', '.']
train
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L1568-L1605
9,358
pyapi-gitlab/pyapi-gitlab
gitlab/base.py
Base.get
def get(self, uri, default_response=None, **kwargs): """ Call GET on the Gitlab server >>> gitlab = Gitlab(host='http://localhost:10080', verify_ssl=False) >>> gitlab.login(user='root', password='5iveL!fe') >>> gitlab.get('/users/5') :param uri: String with the URI for the endpoint to GET from :param default_response: Return value if JSONDecodeError :param kwargs: Key word arguments to use as GET arguments :return: Dictionary containing response data :raise: HttpError: If invalid response returned """ url = self.api_url + uri response = requests.get(url, params=kwargs, headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) return self.success_or_raise(response, default_response=default_response)
python
def get(self, uri, default_response=None, **kwargs): """ Call GET on the Gitlab server >>> gitlab = Gitlab(host='http://localhost:10080', verify_ssl=False) >>> gitlab.login(user='root', password='5iveL!fe') >>> gitlab.get('/users/5') :param uri: String with the URI for the endpoint to GET from :param default_response: Return value if JSONDecodeError :param kwargs: Key word arguments to use as GET arguments :return: Dictionary containing response data :raise: HttpError: If invalid response returned """ url = self.api_url + uri response = requests.get(url, params=kwargs, headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) return self.success_or_raise(response, default_response=default_response)
['def', 'get', '(', 'self', ',', 'uri', ',', 'default_response', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'url', '=', 'self', '.', 'api_url', '+', 'uri', 'response', '=', 'requests', '.', 'get', '(', 'url', ',', 'params', '=', 'kwargs', ',', 'headers', '=', 'self', '.', 'headers', ',', 'verify', '=', 'self', '.', 'verify_ssl', ',', 'auth', '=', 'self', '.', 'auth', ',', 'timeout', '=', 'self', '.', 'timeout', ')', 'return', 'self', '.', 'success_or_raise', '(', 'response', ',', 'default_response', '=', 'default_response', ')']
Call GET on the Gitlab server >>> gitlab = Gitlab(host='http://localhost:10080', verify_ssl=False) >>> gitlab.login(user='root', password='5iveL!fe') >>> gitlab.get('/users/5') :param uri: String with the URI for the endpoint to GET from :param default_response: Return value if JSONDecodeError :param kwargs: Key word arguments to use as GET arguments :return: Dictionary containing response data :raise: HttpError: If invalid response returned
['Call', 'GET', 'on', 'the', 'Gitlab', 'server']
train
https://github.com/pyapi-gitlab/pyapi-gitlab/blob/f74b6fb5c13cecae9524997847e928905cc60acf/gitlab/base.py#L55-L74
9,359
gtzampanakis/downloader
downloader.py
Downloader.open_url
def open_url(self, url, stale_after, parse_as_html = True, **kwargs): """ Download or retrieve from cache. url -- The URL to be downloaded, as a string. stale_after -- A network request for the url will be performed if the cached copy does not exist or if it exists but its age (in days) is larger or equal to the stale_after value. A non-positive value will force re-download. parse_as_html -- Parse the resource downloaded as HTML. This uses the lxml.html package to parse the resource leniently, thus it will not fail even for reasonably invalid HTML. This argument also decides the return type of this method; if True, then the return type is an ElementTree.Element root object; if False, the content of the resource is returned as a bytestring. Exceptions raised: BannedException -- If does_show_ban returns True. HTTPCodeNotOKError -- If the returned HTTP status code is not equal to 200. """ _LOGGER.info('open_url() received url: %s', url) today = datetime.date.today() threshold_date = today - datetime.timedelta(stale_after) downloaded = False with self._get_conn() as conn: rs = conn.execute(''' select content from cache where url = ? and date > ? ''', (url, _date_to_sqlite_str(threshold_date)) ) row = rs.fetchone() retry_run = kwargs.get('retry_run', False) assert (not retry_run) or (retry_run and row is None) if row is None: file_obj = self._download(url).get_file_obj() downloaded = True else: file_obj = cStringIO.StringIO(zlib.decompress(row[0])) if parse_as_html: tree = lxml.html.parse(file_obj) tree.getroot().url = url appears_to_be_banned = False if self.does_show_ban(tree.getroot()): appears_to_be_banned = True if downloaded: message = ('Function {f} claims we have been banned, ' 'it was called with an element parsed from url ' '(downloaded, not from cache): {u}' .format(f = self.does_show_ban, u = url)) _LOGGER.error(message) _LOGGER.info('Deleting url %s from the cache (if it exists) ' 'because it triggered ban page cache poisoning ' 'exception', url) with self._get_conn() as conn: conn.execute('delete from cache where url = ?', [str(url)]) if downloaded: raise BannedException(message) else: return self.open_url(url, stale_after, retry_run = True) else: tree = file_obj.read() if downloaded: # make_links_absolute should only be called when the document has a base_url # attribute, which it has not when it has been loaded from the database. So, # this "if" is needed: if parse_as_html: tree.getroot().make_links_absolute(tree.getroot().base_url) to_store = lxml.html.tostring( tree, pretty_print = True, encoding = 'utf-8' ) else: to_store = tree to_store = zlib.compress(to_store, 8) with self._get_conn() as conn: conn.execute(''' insert or replace into cache (url, date, content) values (?, ?, ?) ''', ( str(url), _date_to_sqlite_str(today), sqlite3.Binary(to_store) ) ) return tree
python
def open_url(self, url, stale_after, parse_as_html = True, **kwargs): """ Download or retrieve from cache. url -- The URL to be downloaded, as a string. stale_after -- A network request for the url will be performed if the cached copy does not exist or if it exists but its age (in days) is larger or equal to the stale_after value. A non-positive value will force re-download. parse_as_html -- Parse the resource downloaded as HTML. This uses the lxml.html package to parse the resource leniently, thus it will not fail even for reasonably invalid HTML. This argument also decides the return type of this method; if True, then the return type is an ElementTree.Element root object; if False, the content of the resource is returned as a bytestring. Exceptions raised: BannedException -- If does_show_ban returns True. HTTPCodeNotOKError -- If the returned HTTP status code is not equal to 200. """ _LOGGER.info('open_url() received url: %s', url) today = datetime.date.today() threshold_date = today - datetime.timedelta(stale_after) downloaded = False with self._get_conn() as conn: rs = conn.execute(''' select content from cache where url = ? and date > ? ''', (url, _date_to_sqlite_str(threshold_date)) ) row = rs.fetchone() retry_run = kwargs.get('retry_run', False) assert (not retry_run) or (retry_run and row is None) if row is None: file_obj = self._download(url).get_file_obj() downloaded = True else: file_obj = cStringIO.StringIO(zlib.decompress(row[0])) if parse_as_html: tree = lxml.html.parse(file_obj) tree.getroot().url = url appears_to_be_banned = False if self.does_show_ban(tree.getroot()): appears_to_be_banned = True if downloaded: message = ('Function {f} claims we have been banned, ' 'it was called with an element parsed from url ' '(downloaded, not from cache): {u}' .format(f = self.does_show_ban, u = url)) _LOGGER.error(message) _LOGGER.info('Deleting url %s from the cache (if it exists) ' 'because it triggered ban page cache poisoning ' 'exception', url) with self._get_conn() as conn: conn.execute('delete from cache where url = ?', [str(url)]) if downloaded: raise BannedException(message) else: return self.open_url(url, stale_after, retry_run = True) else: tree = file_obj.read() if downloaded: # make_links_absolute should only be called when the document has a base_url # attribute, which it has not when it has been loaded from the database. So, # this "if" is needed: if parse_as_html: tree.getroot().make_links_absolute(tree.getroot().base_url) to_store = lxml.html.tostring( tree, pretty_print = True, encoding = 'utf-8' ) else: to_store = tree to_store = zlib.compress(to_store, 8) with self._get_conn() as conn: conn.execute(''' insert or replace into cache (url, date, content) values (?, ?, ?) ''', ( str(url), _date_to_sqlite_str(today), sqlite3.Binary(to_store) ) ) return tree
['def', 'open_url', '(', 'self', ',', 'url', ',', 'stale_after', ',', 'parse_as_html', '=', 'True', ',', '*', '*', 'kwargs', ')', ':', '_LOGGER', '.', 'info', '(', "'open_url() received url: %s'", ',', 'url', ')', 'today', '=', 'datetime', '.', 'date', '.', 'today', '(', ')', 'threshold_date', '=', 'today', '-', 'datetime', '.', 'timedelta', '(', 'stale_after', ')', 'downloaded', '=', 'False', 'with', 'self', '.', '_get_conn', '(', ')', 'as', 'conn', ':', 'rs', '=', 'conn', '.', 'execute', '(', "'''\r\n\t\t\t\tselect content\r\n\t\t\t\tfrom cache\r\n\t\t\t\twhere url = ?\r\n\t\t\t\tand date > ?\r\n\t\t\t\t'''", ',', '(', 'url', ',', '_date_to_sqlite_str', '(', 'threshold_date', ')', ')', ')', 'row', '=', 'rs', '.', 'fetchone', '(', ')', 'retry_run', '=', 'kwargs', '.', 'get', '(', "'retry_run'", ',', 'False', ')', 'assert', '(', 'not', 'retry_run', ')', 'or', '(', 'retry_run', 'and', 'row', 'is', 'None', ')', 'if', 'row', 'is', 'None', ':', 'file_obj', '=', 'self', '.', '_download', '(', 'url', ')', '.', 'get_file_obj', '(', ')', 'downloaded', '=', 'True', 'else', ':', 'file_obj', '=', 'cStringIO', '.', 'StringIO', '(', 'zlib', '.', 'decompress', '(', 'row', '[', '0', ']', ')', ')', 'if', 'parse_as_html', ':', 'tree', '=', 'lxml', '.', 'html', '.', 'parse', '(', 'file_obj', ')', 'tree', '.', 'getroot', '(', ')', '.', 'url', '=', 'url', 'appears_to_be_banned', '=', 'False', 'if', 'self', '.', 'does_show_ban', '(', 'tree', '.', 'getroot', '(', ')', ')', ':', 'appears_to_be_banned', '=', 'True', 'if', 'downloaded', ':', 'message', '=', '(', "'Function {f} claims we have been banned, '", "'it was called with an element parsed from url '", "'(downloaded, not from cache): {u}'", '.', 'format', '(', 'f', '=', 'self', '.', 'does_show_ban', ',', 'u', '=', 'url', ')', ')', '_LOGGER', '.', 'error', '(', 'message', ')', '_LOGGER', '.', 'info', '(', "'Deleting url %s from the cache (if it exists) '", "'because it triggered ban page cache poisoning '", "'exception'", ',', 'url', ')', 'with', 'self', '.', '_get_conn', '(', ')', 'as', 'conn', ':', 'conn', '.', 'execute', '(', "'delete from cache where url = ?'", ',', '[', 'str', '(', 'url', ')', ']', ')', 'if', 'downloaded', ':', 'raise', 'BannedException', '(', 'message', ')', 'else', ':', 'return', 'self', '.', 'open_url', '(', 'url', ',', 'stale_after', ',', 'retry_run', '=', 'True', ')', 'else', ':', 'tree', '=', 'file_obj', '.', 'read', '(', ')', 'if', 'downloaded', ':', '# make_links_absolute should only be called when the document has a base_url\r', '# attribute, which it has not when it has been loaded from the database. So,\r', '# this "if" is needed:\r', 'if', 'parse_as_html', ':', 'tree', '.', 'getroot', '(', ')', '.', 'make_links_absolute', '(', 'tree', '.', 'getroot', '(', ')', '.', 'base_url', ')', 'to_store', '=', 'lxml', '.', 'html', '.', 'tostring', '(', 'tree', ',', 'pretty_print', '=', 'True', ',', 'encoding', '=', "'utf-8'", ')', 'else', ':', 'to_store', '=', 'tree', 'to_store', '=', 'zlib', '.', 'compress', '(', 'to_store', ',', '8', ')', 'with', 'self', '.', '_get_conn', '(', ')', 'as', 'conn', ':', 'conn', '.', 'execute', '(', "'''\r\n\t\t\t\t\tinsert or replace \r\n\t\t\t\t\tinto cache\r\n\t\t\t\t\t(url, date, content)\r\n\t\t\t\t\tvalues\r\n\t\t\t\t\t(?, ?, ?)\r\n\t\t\t\t\t'''", ',', '(', 'str', '(', 'url', ')', ',', '_date_to_sqlite_str', '(', 'today', ')', ',', 'sqlite3', '.', 'Binary', '(', 'to_store', ')', ')', ')', 'return', 'tree']
Download or retrieve from cache. url -- The URL to be downloaded, as a string. stale_after -- A network request for the url will be performed if the cached copy does not exist or if it exists but its age (in days) is larger or equal to the stale_after value. A non-positive value will force re-download. parse_as_html -- Parse the resource downloaded as HTML. This uses the lxml.html package to parse the resource leniently, thus it will not fail even for reasonably invalid HTML. This argument also decides the return type of this method; if True, then the return type is an ElementTree.Element root object; if False, the content of the resource is returned as a bytestring. Exceptions raised: BannedException -- If does_show_ban returns True. HTTPCodeNotOKError -- If the returned HTTP status code is not equal to 200.
['Download', 'or', 'retrieve', 'from', 'cache', '.', 'url', '--', 'The', 'URL', 'to', 'be', 'downloaded', 'as', 'a', 'string', '.', 'stale_after', '--', 'A', 'network', 'request', 'for', 'the', 'url', 'will', 'be', 'performed', 'if', 'the', 'cached', 'copy', 'does', 'not', 'exist', 'or', 'if', 'it', 'exists', 'but', 'its', 'age', '(', 'in', 'days', ')', 'is', 'larger', 'or', 'equal', 'to', 'the', 'stale_after', 'value', '.', 'A', 'non', '-', 'positive', 'value', 'will', 'force', 're', '-', 'download', '.', 'parse_as_html', '--', 'Parse', 'the', 'resource', 'downloaded', 'as', 'HTML', '.', 'This', 'uses', 'the', 'lxml', '.', 'html', 'package', 'to', 'parse', 'the', 'resource', 'leniently', 'thus', 'it', 'will', 'not', 'fail', 'even', 'for', 'reasonably', 'invalid', 'HTML', '.', 'This', 'argument', 'also', 'decides', 'the', 'return', 'type', 'of', 'this', 'method', ';', 'if', 'True', 'then', 'the', 'return', 'type', 'is', 'an', 'ElementTree', '.', 'Element', 'root', 'object', ';', 'if', 'False', 'the', 'content', 'of', 'the', 'resource', 'is', 'returned', 'as', 'a', 'bytestring', '.', 'Exceptions', 'raised', ':', 'BannedException', '--', 'If', 'does_show_ban', 'returns', 'True', '.', 'HTTPCodeNotOKError', '--', 'If', 'the', 'returned', 'HTTP', 'status', 'code', 'is', 'not', 'equal', 'to', '200', '.']
train
https://github.com/gtzampanakis/downloader/blob/7354f68adc72f2bfc472f41596af6ee8b3e6ea88/downloader.py#L192-L297
9,360
Crunch-io/crunch-cube
src/cr/cube/measures/pairwise_significance.py
PairwiseSignificance.summary_pairwise_indices
def summary_pairwise_indices(self): """ndarray containing tuples of pairwise indices for the column summary.""" summary_pairwise_indices = np.empty( self.values[0].t_stats.shape[1], dtype=object ) summary_pairwise_indices[:] = [ sig.summary_pairwise_indices for sig in self.values ] return summary_pairwise_indices
python
def summary_pairwise_indices(self): """ndarray containing tuples of pairwise indices for the column summary.""" summary_pairwise_indices = np.empty( self.values[0].t_stats.shape[1], dtype=object ) summary_pairwise_indices[:] = [ sig.summary_pairwise_indices for sig in self.values ] return summary_pairwise_indices
['def', 'summary_pairwise_indices', '(', 'self', ')', ':', 'summary_pairwise_indices', '=', 'np', '.', 'empty', '(', 'self', '.', 'values', '[', '0', ']', '.', 't_stats', '.', 'shape', '[', '1', ']', ',', 'dtype', '=', 'object', ')', 'summary_pairwise_indices', '[', ':', ']', '=', '[', 'sig', '.', 'summary_pairwise_indices', 'for', 'sig', 'in', 'self', '.', 'values', ']', 'return', 'summary_pairwise_indices']
ndarray containing tuples of pairwise indices for the column summary.
['ndarray', 'containing', 'tuples', 'of', 'pairwise', 'indices', 'for', 'the', 'column', 'summary', '.']
train
https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/measures/pairwise_significance.py#L60-L68
9,361
juiceinc/recipe
recipe/ingredients.py
Dimension.make_column_suffixes
def make_column_suffixes(self): """ Make sure we have the right column suffixes. These will be appended to `id` when generating the query. """ if self.column_suffixes: return self.column_suffixes if len(self.columns) == 0: return () elif len(self.columns) == 1: if self.formatters: return '_raw', else: return '', elif len(self.columns) == 2: if self.formatters: return '_id', '_raw', else: return '_id', '', else: raise BadIngredient( 'column_suffixes must be supplied if there is ' 'more than one column' )
python
def make_column_suffixes(self): """ Make sure we have the right column suffixes. These will be appended to `id` when generating the query. """ if self.column_suffixes: return self.column_suffixes if len(self.columns) == 0: return () elif len(self.columns) == 1: if self.formatters: return '_raw', else: return '', elif len(self.columns) == 2: if self.formatters: return '_id', '_raw', else: return '_id', '', else: raise BadIngredient( 'column_suffixes must be supplied if there is ' 'more than one column' )
['def', 'make_column_suffixes', '(', 'self', ')', ':', 'if', 'self', '.', 'column_suffixes', ':', 'return', 'self', '.', 'column_suffixes', 'if', 'len', '(', 'self', '.', 'columns', ')', '==', '0', ':', 'return', '(', ')', 'elif', 'len', '(', 'self', '.', 'columns', ')', '==', '1', ':', 'if', 'self', '.', 'formatters', ':', 'return', "'_raw'", ',', 'else', ':', 'return', "''", ',', 'elif', 'len', '(', 'self', '.', 'columns', ')', '==', '2', ':', 'if', 'self', '.', 'formatters', ':', 'return', "'_id'", ',', "'_raw'", ',', 'else', ':', 'return', "'_id'", ',', "''", ',', 'else', ':', 'raise', 'BadIngredient', '(', "'column_suffixes must be supplied if there is '", "'more than one column'", ')']
Make sure we have the right column suffixes. These will be appended to `id` when generating the query.
['Make', 'sure', 'we', 'have', 'the', 'right', 'column', 'suffixes', '.', 'These', 'will', 'be', 'appended', 'to', 'id', 'when', 'generating', 'the', 'query', '.']
train
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/ingredients.py#L294-L319
9,362
serge-sans-paille/pythran
pythran/analyses/aliases.py
Aliases.visit_FunctionDef
def visit_FunctionDef(self, node): ''' Initialise aliasing default value before visiting. Add aliasing values for : - Pythonic - globals declarations - current function arguments ''' self.aliases = IntrinsicAliases.copy() self.aliases.update((f.name, {f}) for f in self.global_declarations.values()) self.aliases.update((arg.id, {arg}) for arg in node.args.args) self.generic_visit(node) if Aliases.RetId in self.aliases: # parametrize the expression def parametrize(exp): # constant(?) or global -> no change if isinstance(exp, (ast.Index, Intrinsic, ast.FunctionDef)): return lambda _: {exp} elif isinstance(exp, ContainerOf): pcontainee = parametrize(exp.containee) index = exp.index return lambda args: { ContainerOf(pc, index) for pc in pcontainee(args) } elif isinstance(exp, ast.Name): try: w = node.args.args.index(exp) def return_alias(args): if w < len(args): return {args[w]} else: return {node.args.defaults[w - len(args)]} return return_alias except ValueError: return lambda _: self.get_unbound_value_set() elif isinstance(exp, ast.Subscript): values = parametrize(exp.value) slices = parametrize(exp.slice) return lambda args: { ast.Subscript(value, slice, ast.Load()) for value in values(args) for slice in slices(args)} else: return lambda _: self.get_unbound_value_set() # this is a little tricky: for each returned alias, # parametrize builds a function that, given a list of args, # returns the alias # then as we may have multiple returned alias, we compute the union # of these returned aliases return_aliases = [parametrize(ret_alias) for ret_alias in self.aliases[Aliases.RetId]] def merge_return_aliases(args): merged_return_aliases = set() for return_alias in return_aliases: merged_return_aliases.update(return_alias(args)) return merged_return_aliases node.return_alias = merge_return_aliases
python
def visit_FunctionDef(self, node): ''' Initialise aliasing default value before visiting. Add aliasing values for : - Pythonic - globals declarations - current function arguments ''' self.aliases = IntrinsicAliases.copy() self.aliases.update((f.name, {f}) for f in self.global_declarations.values()) self.aliases.update((arg.id, {arg}) for arg in node.args.args) self.generic_visit(node) if Aliases.RetId in self.aliases: # parametrize the expression def parametrize(exp): # constant(?) or global -> no change if isinstance(exp, (ast.Index, Intrinsic, ast.FunctionDef)): return lambda _: {exp} elif isinstance(exp, ContainerOf): pcontainee = parametrize(exp.containee) index = exp.index return lambda args: { ContainerOf(pc, index) for pc in pcontainee(args) } elif isinstance(exp, ast.Name): try: w = node.args.args.index(exp) def return_alias(args): if w < len(args): return {args[w]} else: return {node.args.defaults[w - len(args)]} return return_alias except ValueError: return lambda _: self.get_unbound_value_set() elif isinstance(exp, ast.Subscript): values = parametrize(exp.value) slices = parametrize(exp.slice) return lambda args: { ast.Subscript(value, slice, ast.Load()) for value in values(args) for slice in slices(args)} else: return lambda _: self.get_unbound_value_set() # this is a little tricky: for each returned alias, # parametrize builds a function that, given a list of args, # returns the alias # then as we may have multiple returned alias, we compute the union # of these returned aliases return_aliases = [parametrize(ret_alias) for ret_alias in self.aliases[Aliases.RetId]] def merge_return_aliases(args): merged_return_aliases = set() for return_alias in return_aliases: merged_return_aliases.update(return_alias(args)) return merged_return_aliases node.return_alias = merge_return_aliases
['def', 'visit_FunctionDef', '(', 'self', ',', 'node', ')', ':', 'self', '.', 'aliases', '=', 'IntrinsicAliases', '.', 'copy', '(', ')', 'self', '.', 'aliases', '.', 'update', '(', '(', 'f', '.', 'name', ',', '{', 'f', '}', ')', 'for', 'f', 'in', 'self', '.', 'global_declarations', '.', 'values', '(', ')', ')', 'self', '.', 'aliases', '.', 'update', '(', '(', 'arg', '.', 'id', ',', '{', 'arg', '}', ')', 'for', 'arg', 'in', 'node', '.', 'args', '.', 'args', ')', 'self', '.', 'generic_visit', '(', 'node', ')', 'if', 'Aliases', '.', 'RetId', 'in', 'self', '.', 'aliases', ':', '# parametrize the expression', 'def', 'parametrize', '(', 'exp', ')', ':', '# constant(?) or global -> no change', 'if', 'isinstance', '(', 'exp', ',', '(', 'ast', '.', 'Index', ',', 'Intrinsic', ',', 'ast', '.', 'FunctionDef', ')', ')', ':', 'return', 'lambda', '_', ':', '{', 'exp', '}', 'elif', 'isinstance', '(', 'exp', ',', 'ContainerOf', ')', ':', 'pcontainee', '=', 'parametrize', '(', 'exp', '.', 'containee', ')', 'index', '=', 'exp', '.', 'index', 'return', 'lambda', 'args', ':', '{', 'ContainerOf', '(', 'pc', ',', 'index', ')', 'for', 'pc', 'in', 'pcontainee', '(', 'args', ')', '}', 'elif', 'isinstance', '(', 'exp', ',', 'ast', '.', 'Name', ')', ':', 'try', ':', 'w', '=', 'node', '.', 'args', '.', 'args', '.', 'index', '(', 'exp', ')', 'def', 'return_alias', '(', 'args', ')', ':', 'if', 'w', '<', 'len', '(', 'args', ')', ':', 'return', '{', 'args', '[', 'w', ']', '}', 'else', ':', 'return', '{', 'node', '.', 'args', '.', 'defaults', '[', 'w', '-', 'len', '(', 'args', ')', ']', '}', 'return', 'return_alias', 'except', 'ValueError', ':', 'return', 'lambda', '_', ':', 'self', '.', 'get_unbound_value_set', '(', ')', 'elif', 'isinstance', '(', 'exp', ',', 'ast', '.', 'Subscript', ')', ':', 'values', '=', 'parametrize', '(', 'exp', '.', 'value', ')', 'slices', '=', 'parametrize', '(', 'exp', '.', 'slice', ')', 'return', 'lambda', 'args', ':', '{', 'ast', '.', 'Subscript', '(', 'value', ',', 'slice', ',', 'ast', '.', 'Load', '(', ')', ')', 'for', 'value', 'in', 'values', '(', 'args', ')', 'for', 'slice', 'in', 'slices', '(', 'args', ')', '}', 'else', ':', 'return', 'lambda', '_', ':', 'self', '.', 'get_unbound_value_set', '(', ')', '# this is a little tricky: for each returned alias,', '# parametrize builds a function that, given a list of args,', '# returns the alias', '# then as we may have multiple returned alias, we compute the union', '# of these returned aliases', 'return_aliases', '=', '[', 'parametrize', '(', 'ret_alias', ')', 'for', 'ret_alias', 'in', 'self', '.', 'aliases', '[', 'Aliases', '.', 'RetId', ']', ']', 'def', 'merge_return_aliases', '(', 'args', ')', ':', 'merged_return_aliases', '=', 'set', '(', ')', 'for', 'return_alias', 'in', 'return_aliases', ':', 'merged_return_aliases', '.', 'update', '(', 'return_alias', '(', 'args', ')', ')', 'return', 'merged_return_aliases', 'node', '.', 'return_alias', '=', 'merge_return_aliases']
Initialise aliasing default value before visiting. Add aliasing values for : - Pythonic - globals declarations - current function arguments
['Initialise', 'aliasing', 'default', 'value', 'before', 'visiting', '.']
train
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/analyses/aliases.py#L532-L600
9,363
Toilal/rebulk
rebulk/rebulk.py
Rebulk.regex
def regex(self, *pattern, **kwargs): """ Add re pattern :param pattern: :type pattern: :return: self :rtype: Rebulk """ self.pattern(self.build_re(*pattern, **kwargs)) return self
python
def regex(self, *pattern, **kwargs): """ Add re pattern :param pattern: :type pattern: :return: self :rtype: Rebulk """ self.pattern(self.build_re(*pattern, **kwargs)) return self
['def', 'regex', '(', 'self', ',', '*', 'pattern', ',', '*', '*', 'kwargs', ')', ':', 'self', '.', 'pattern', '(', 'self', '.', 'build_re', '(', '*', 'pattern', ',', '*', '*', 'kwargs', ')', ')', 'return', 'self']
Add re pattern :param pattern: :type pattern: :return: self :rtype: Rebulk
['Add', 're', 'pattern']
train
https://github.com/Toilal/rebulk/blob/7511a4671f2fd9493e3df1e5177b7656789069e8/rebulk/rebulk.py#L108-L118
9,364
tomduck/pandoc-xnos
pandocxnos/core.py
_join_strings
def _join_strings(x): """Joins adjacent Str elements found in the element list 'x'.""" for i in range(len(x)-1): # Process successive pairs of elements if x[i]['t'] == 'Str' and x[i+1]['t'] == 'Str': x[i]['c'] += x[i+1]['c'] del x[i+1] # In-place deletion of element from list return None # Forces processing to repeat return True
python
def _join_strings(x): """Joins adjacent Str elements found in the element list 'x'.""" for i in range(len(x)-1): # Process successive pairs of elements if x[i]['t'] == 'Str' and x[i+1]['t'] == 'Str': x[i]['c'] += x[i+1]['c'] del x[i+1] # In-place deletion of element from list return None # Forces processing to repeat return True
['def', '_join_strings', '(', 'x', ')', ':', 'for', 'i', 'in', 'range', '(', 'len', '(', 'x', ')', '-', '1', ')', ':', '# Process successive pairs of elements', 'if', 'x', '[', 'i', ']', '[', "'t'", ']', '==', "'Str'", 'and', 'x', '[', 'i', '+', '1', ']', '[', "'t'", ']', '==', "'Str'", ':', 'x', '[', 'i', ']', '[', "'c'", ']', '+=', 'x', '[', 'i', '+', '1', ']', '[', "'c'", ']', 'del', 'x', '[', 'i', '+', '1', ']', '# In-place deletion of element from list', 'return', 'None', '# Forces processing to repeat', 'return', 'True']
Joins adjacent Str elements found in the element list 'x'.
['Joins', 'adjacent', 'Str', 'elements', 'found', 'in', 'the', 'element', 'list', 'x', '.']
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/core.py#L436-L443
9,365
rbuffat/pyepw
pyepw/epw.py
DataPeriod.data_period_start_day_of_week
def data_period_start_day_of_week(self, value=None): """Corresponds to IDD Field `data_period_start_day_of_week` Args: value (str): value for IDD Field `data_period_start_day_of_week` Accepted values are: - Sunday - Monday - Tuesday - Wednesday - Thursday - Friday - Saturday if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError( 'value {} need to be of type str ' 'for field `data_period_start_day_of_week`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `data_period_start_day_of_week`') vals = set() vals.add("Sunday") vals.add("Monday") vals.add("Tuesday") vals.add("Wednesday") vals.add("Thursday") vals.add("Friday") vals.add("Saturday") if value not in vals: raise ValueError( 'value {} is not an accepted value for ' 'field `data_period_start_day_of_week`'.format(value)) self._data_period_start_day_of_week = value
python
def data_period_start_day_of_week(self, value=None): """Corresponds to IDD Field `data_period_start_day_of_week` Args: value (str): value for IDD Field `data_period_start_day_of_week` Accepted values are: - Sunday - Monday - Tuesday - Wednesday - Thursday - Friday - Saturday if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError( 'value {} need to be of type str ' 'for field `data_period_start_day_of_week`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `data_period_start_day_of_week`') vals = set() vals.add("Sunday") vals.add("Monday") vals.add("Tuesday") vals.add("Wednesday") vals.add("Thursday") vals.add("Friday") vals.add("Saturday") if value not in vals: raise ValueError( 'value {} is not an accepted value for ' 'field `data_period_start_day_of_week`'.format(value)) self._data_period_start_day_of_week = value
['def', 'data_period_start_day_of_week', '(', 'self', ',', 'value', '=', 'None', ')', ':', 'if', 'value', 'is', 'not', 'None', ':', 'try', ':', 'value', '=', 'str', '(', 'value', ')', 'except', 'ValueError', ':', 'raise', 'ValueError', '(', "'value {} need to be of type str '", "'for field `data_period_start_day_of_week`'", '.', 'format', '(', 'value', ')', ')', 'if', "','", 'in', 'value', ':', 'raise', 'ValueError', '(', "'value should not contain a comma '", "'for field `data_period_start_day_of_week`'", ')', 'vals', '=', 'set', '(', ')', 'vals', '.', 'add', '(', '"Sunday"', ')', 'vals', '.', 'add', '(', '"Monday"', ')', 'vals', '.', 'add', '(', '"Tuesday"', ')', 'vals', '.', 'add', '(', '"Wednesday"', ')', 'vals', '.', 'add', '(', '"Thursday"', ')', 'vals', '.', 'add', '(', '"Friday"', ')', 'vals', '.', 'add', '(', '"Saturday"', ')', 'if', 'value', 'not', 'in', 'vals', ':', 'raise', 'ValueError', '(', "'value {} is not an accepted value for '", "'field `data_period_start_day_of_week`'", '.', 'format', '(', 'value', ')', ')', 'self', '.', '_data_period_start_day_of_week', '=', 'value']
Corresponds to IDD Field `data_period_start_day_of_week` Args: value (str): value for IDD Field `data_period_start_day_of_week` Accepted values are: - Sunday - Monday - Tuesday - Wednesday - Thursday - Friday - Saturday if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
['Corresponds', 'to', 'IDD', 'Field', 'data_period_start_day_of_week']
train
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L5214-L5257
9,366
pyvisa/pyvisa-sim
pyvisa-sim/devices.py
Device._match_registers
def _match_registers(self, query): """Tries to match in status registers :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None """ if query in self._status_registers: register = self._status_registers[query] response = register.value logger.debug('Found response in status register: %s', repr(response)) register.clear() return response
python
def _match_registers(self, query): """Tries to match in status registers :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None """ if query in self._status_registers: register = self._status_registers[query] response = register.value logger.debug('Found response in status register: %s', repr(response)) register.clear() return response
['def', '_match_registers', '(', 'self', ',', 'query', ')', ':', 'if', 'query', 'in', 'self', '.', '_status_registers', ':', 'register', '=', 'self', '.', '_status_registers', '[', 'query', ']', 'response', '=', 'register', '.', 'value', 'logger', '.', 'debug', '(', "'Found response in status register: %s'", ',', 'repr', '(', 'response', ')', ')', 'register', '.', 'clear', '(', ')', 'return', 'response']
Tries to match in status registers :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None
['Tries', 'to', 'match', 'in', 'status', 'registers']
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/devices.py#L298-L313
9,367
adafruit/Adafruit_Python_PlatformDetect
adafruit_platformdetect/chip.py
Chip.id
def id(self): # pylint: disable=invalid-name,too-many-branches,too-many-return-statements """Return a unique id for the detected chip, if any.""" # There are some times we want to trick the platform detection # say if a raspberry pi doesn't have the right ID, or for testing try: return os.environ['BLINKA_FORCECHIP'] except KeyError: # no forced chip, continue with testing! pass # Special case, if we have an environment var set, we could use FT232H try: if os.environ['BLINKA_FT232H']: # we can't have ftdi1 as a dependency cause its wierd # to install, sigh. import ftdi1 as ftdi # pylint: disable=import-error try: ctx = None ctx = ftdi.new() # Create a libftdi context. # Enumerate FTDI devices. count, _ = ftdi.usb_find_all(ctx, 0, 0) if count < 0: raise RuntimeError('ftdi_usb_find_all returned error %d : %s' % count, ftdi.get_error_string(self._ctx)) if count == 0: raise RuntimeError('BLINKA_FT232H environment variable' + \ 'set, but no FT232H device found') finally: # Make sure to clean up list and context when done. if ctx is not None: ftdi.free(ctx) return FT232H except KeyError: # no FT232H environment var pass platform = sys.platform if platform == "linux" or platform == "linux2": return self._linux_id() if platform == "esp8266": return ESP8266 if platform == "samd21": return SAMD21 if platform == "pyboard": return STM32 # nothing found! return None
python
def id(self): # pylint: disable=invalid-name,too-many-branches,too-many-return-statements """Return a unique id for the detected chip, if any.""" # There are some times we want to trick the platform detection # say if a raspberry pi doesn't have the right ID, or for testing try: return os.environ['BLINKA_FORCECHIP'] except KeyError: # no forced chip, continue with testing! pass # Special case, if we have an environment var set, we could use FT232H try: if os.environ['BLINKA_FT232H']: # we can't have ftdi1 as a dependency cause its wierd # to install, sigh. import ftdi1 as ftdi # pylint: disable=import-error try: ctx = None ctx = ftdi.new() # Create a libftdi context. # Enumerate FTDI devices. count, _ = ftdi.usb_find_all(ctx, 0, 0) if count < 0: raise RuntimeError('ftdi_usb_find_all returned error %d : %s' % count, ftdi.get_error_string(self._ctx)) if count == 0: raise RuntimeError('BLINKA_FT232H environment variable' + \ 'set, but no FT232H device found') finally: # Make sure to clean up list and context when done. if ctx is not None: ftdi.free(ctx) return FT232H except KeyError: # no FT232H environment var pass platform = sys.platform if platform == "linux" or platform == "linux2": return self._linux_id() if platform == "esp8266": return ESP8266 if platform == "samd21": return SAMD21 if platform == "pyboard": return STM32 # nothing found! return None
['def', 'id', '(', 'self', ')', ':', '# pylint: disable=invalid-name,too-many-branches,too-many-return-statements', '# There are some times we want to trick the platform detection', "# say if a raspberry pi doesn't have the right ID, or for testing", 'try', ':', 'return', 'os', '.', 'environ', '[', "'BLINKA_FORCECHIP'", ']', 'except', 'KeyError', ':', '# no forced chip, continue with testing!', 'pass', '# Special case, if we have an environment var set, we could use FT232H', 'try', ':', 'if', 'os', '.', 'environ', '[', "'BLINKA_FT232H'", ']', ':', "# we can't have ftdi1 as a dependency cause its wierd", '# to install, sigh.', 'import', 'ftdi1', 'as', 'ftdi', '# pylint: disable=import-error', 'try', ':', 'ctx', '=', 'None', 'ctx', '=', 'ftdi', '.', 'new', '(', ')', '# Create a libftdi context.', '# Enumerate FTDI devices.', 'count', ',', '_', '=', 'ftdi', '.', 'usb_find_all', '(', 'ctx', ',', '0', ',', '0', ')', 'if', 'count', '<', '0', ':', 'raise', 'RuntimeError', '(', "'ftdi_usb_find_all returned error %d : %s'", '%', 'count', ',', 'ftdi', '.', 'get_error_string', '(', 'self', '.', '_ctx', ')', ')', 'if', 'count', '==', '0', ':', 'raise', 'RuntimeError', '(', "'BLINKA_FT232H environment variable'", '+', "'set, but no FT232H device found'", ')', 'finally', ':', '# Make sure to clean up list and context when done.', 'if', 'ctx', 'is', 'not', 'None', ':', 'ftdi', '.', 'free', '(', 'ctx', ')', 'return', 'FT232H', 'except', 'KeyError', ':', '# no FT232H environment var', 'pass', 'platform', '=', 'sys', '.', 'platform', 'if', 'platform', '==', '"linux"', 'or', 'platform', '==', '"linux2"', ':', 'return', 'self', '.', '_linux_id', '(', ')', 'if', 'platform', '==', '"esp8266"', ':', 'return', 'ESP8266', 'if', 'platform', '==', '"samd21"', ':', 'return', 'SAMD21', 'if', 'platform', '==', '"pyboard"', ':', 'return', 'STM32', '# nothing found!', 'return', 'None']
Return a unique id for the detected chip, if any.
['Return', 'a', 'unique', 'id', 'for', 'the', 'detected', 'chip', 'if', 'any', '.']
train
https://github.com/adafruit/Adafruit_Python_PlatformDetect/blob/cddd4d47e530026778dc4e3c3ccabad14e6eac46/adafruit_platformdetect/chip.py#L26-L70
9,368
cloudsmith-io/cloudsmith-cli
cloudsmith_cli/cli/config.py
Options.error_retry_codes
def error_retry_codes(self, value): """Set value for error_retry_codes.""" if isinstance(value, six.string_types): value = [int(x) for x in value.split(",")] self._set_option("error_retry_codes", value)
python
def error_retry_codes(self, value): """Set value for error_retry_codes.""" if isinstance(value, six.string_types): value = [int(x) for x in value.split(",")] self._set_option("error_retry_codes", value)
['def', 'error_retry_codes', '(', 'self', ',', 'value', ')', ':', 'if', 'isinstance', '(', 'value', ',', 'six', '.', 'string_types', ')', ':', 'value', '=', '[', 'int', '(', 'x', ')', 'for', 'x', 'in', 'value', '.', 'split', '(', '","', ')', ']', 'self', '.', '_set_option', '(', '"error_retry_codes"', ',', 'value', ')']
Set value for error_retry_codes.
['Set', 'value', 'for', 'error_retry_codes', '.']
train
https://github.com/cloudsmith-io/cloudsmith-cli/blob/5bc245ca5d0bfa85380be48e7c206b4c86cc6c8e/cloudsmith_cli/cli/config.py#L340-L344
9,369
mikedh/trimesh
trimesh/scene/cameras.py
Camera.focal
def focal(self): """ Get the focal length in pixels for the camera. Returns ------------ focal : (2,) float Focal length in pixels """ if self._focal is None: # calculate focal length from FOV focal = [(px / 2.0) / np.tan(np.radians(fov / 2.0)) for px, fov in zip(self._resolution, self.fov)] # store as correct dtype self._focal = np.asanyarray(focal, dtype=np.float64) return self._focal
python
def focal(self): """ Get the focal length in pixels for the camera. Returns ------------ focal : (2,) float Focal length in pixels """ if self._focal is None: # calculate focal length from FOV focal = [(px / 2.0) / np.tan(np.radians(fov / 2.0)) for px, fov in zip(self._resolution, self.fov)] # store as correct dtype self._focal = np.asanyarray(focal, dtype=np.float64) return self._focal
['def', 'focal', '(', 'self', ')', ':', 'if', 'self', '.', '_focal', 'is', 'None', ':', '# calculate focal length from FOV', 'focal', '=', '[', '(', 'px', '/', '2.0', ')', '/', 'np', '.', 'tan', '(', 'np', '.', 'radians', '(', 'fov', '/', '2.0', ')', ')', 'for', 'px', ',', 'fov', 'in', 'zip', '(', 'self', '.', '_resolution', ',', 'self', '.', 'fov', ')', ']', '# store as correct dtype', 'self', '.', '_focal', '=', 'np', '.', 'asanyarray', '(', 'focal', ',', 'dtype', '=', 'np', '.', 'float64', ')', 'return', 'self', '.', '_focal']
Get the focal length in pixels for the camera. Returns ------------ focal : (2,) float Focal length in pixels
['Get', 'the', 'focal', 'length', 'in', 'pixels', 'for', 'the', 'camera', '.']
train
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/scene/cameras.py#L180-L196
9,370
matthiask/django-cte-forest
cte_forest/models.py
CTENodeManager.leaves
def leaves(self): """ Returns a :class:`QuerySet` of all leaf nodes (nodes with no children). :return: A :class:`QuerySet` of all leaf nodes (nodes with no children). """ # We need to read the _cte_node_children attribute, so ensure it exists. self._ensure_parameters() return self.exclude( **{"%s__id__in" % self.model._cte_node_children: self.all()} )
python
def leaves(self): """ Returns a :class:`QuerySet` of all leaf nodes (nodes with no children). :return: A :class:`QuerySet` of all leaf nodes (nodes with no children). """ # We need to read the _cte_node_children attribute, so ensure it exists. self._ensure_parameters() return self.exclude( **{"%s__id__in" % self.model._cte_node_children: self.all()} )
['def', 'leaves', '(', 'self', ')', ':', '# We need to read the _cte_node_children attribute, so ensure it exists.', 'self', '.', '_ensure_parameters', '(', ')', 'return', 'self', '.', 'exclude', '(', '*', '*', '{', '"%s__id__in"', '%', 'self', '.', 'model', '.', '_cte_node_children', ':', 'self', '.', 'all', '(', ')', '}', ')']
Returns a :class:`QuerySet` of all leaf nodes (nodes with no children). :return: A :class:`QuerySet` of all leaf nodes (nodes with no children).
['Returns', 'a', ':', 'class', ':', 'QuerySet', 'of', 'all', 'leaf', 'nodes', '(', 'nodes', 'with', 'no', 'children', ')', '.']
train
https://github.com/matthiask/django-cte-forest/blob/7bff29d69eddfcf214e9cf61647c91d28655619c/cte_forest/models.py#L322-L333
9,371
AdvancedClimateSystems/uModbus
scripts/examples/simple_rtu_client.py
get_serial_port
def get_serial_port(): """ Return serial.Serial instance, ready to use for RS485.""" port = Serial(port='/dev/ttyS1', baudrate=9600, parity=PARITY_NONE, stopbits=1, bytesize=8, timeout=1) fh = port.fileno() # A struct with configuration for serial port. serial_rs485 = struct.pack('hhhhhhhh', 1, 0, 0, 0, 0, 0, 0, 0) fcntl.ioctl(fh, 0x542F, serial_rs485) return port
python
def get_serial_port(): """ Return serial.Serial instance, ready to use for RS485.""" port = Serial(port='/dev/ttyS1', baudrate=9600, parity=PARITY_NONE, stopbits=1, bytesize=8, timeout=1) fh = port.fileno() # A struct with configuration for serial port. serial_rs485 = struct.pack('hhhhhhhh', 1, 0, 0, 0, 0, 0, 0, 0) fcntl.ioctl(fh, 0x542F, serial_rs485) return port
['def', 'get_serial_port', '(', ')', ':', 'port', '=', 'Serial', '(', 'port', '=', "'/dev/ttyS1'", ',', 'baudrate', '=', '9600', ',', 'parity', '=', 'PARITY_NONE', ',', 'stopbits', '=', '1', ',', 'bytesize', '=', '8', ',', 'timeout', '=', '1', ')', 'fh', '=', 'port', '.', 'fileno', '(', ')', '# A struct with configuration for serial port.', 'serial_rs485', '=', 'struct', '.', 'pack', '(', "'hhhhhhhh'", ',', '1', ',', '0', ',', '0', ',', '0', ',', '0', ',', '0', ',', '0', ',', '0', ')', 'fcntl', '.', 'ioctl', '(', 'fh', ',', '0x542F', ',', 'serial_rs485', ')', 'return', 'port']
Return serial.Serial instance, ready to use for RS485.
['Return', 'serial', '.', 'Serial', 'instance', 'ready', 'to', 'use', 'for', 'RS485', '.']
train
https://github.com/AdvancedClimateSystems/uModbus/blob/0560a42308003f4072d988f28042b8d55b694ad4/scripts/examples/simple_rtu_client.py#L10-L21
9,372
databio/pypiper
pypiper/utils.py
add_pypiper_args
def add_pypiper_args(parser, groups=("pypiper", ), args=None, required=None, all_args=False): """ Use this to add standardized pypiper arguments to your python pipeline. There are two ways to use `add_pypiper_args`: by specifying argument groups, or by specifying individual arguments. Specifying argument groups will add multiple arguments to your parser; these convenient argument groupings make it easy to add arguments to certain types of pipeline. For example, to make a looper-compatible pipeline, use `groups = ["pypiper", "looper"]`. :param argparse.ArgumentParser parser: ArgumentParser object from a pipeline :param str | Iterable[str] groups: Adds arguments belong to specified group of args. Options: pypiper, config, looper, resources, common, ngs, all. :param str | Iterable[str] args: You may specify a list of specific arguments one by one. :param Iterable[str] required: Arguments to be flagged as 'required' by argparse. :param bool all_args: Whether to include all of pypiper's arguments defined here. :return argparse.ArgumentParser: A new ArgumentParser object, with selected pypiper arguments added """ args_to_add = _determine_args( argument_groups=groups, arguments=args, use_all_args=all_args) parser = _add_args(parser, args_to_add, required) return parser
python
def add_pypiper_args(parser, groups=("pypiper", ), args=None, required=None, all_args=False): """ Use this to add standardized pypiper arguments to your python pipeline. There are two ways to use `add_pypiper_args`: by specifying argument groups, or by specifying individual arguments. Specifying argument groups will add multiple arguments to your parser; these convenient argument groupings make it easy to add arguments to certain types of pipeline. For example, to make a looper-compatible pipeline, use `groups = ["pypiper", "looper"]`. :param argparse.ArgumentParser parser: ArgumentParser object from a pipeline :param str | Iterable[str] groups: Adds arguments belong to specified group of args. Options: pypiper, config, looper, resources, common, ngs, all. :param str | Iterable[str] args: You may specify a list of specific arguments one by one. :param Iterable[str] required: Arguments to be flagged as 'required' by argparse. :param bool all_args: Whether to include all of pypiper's arguments defined here. :return argparse.ArgumentParser: A new ArgumentParser object, with selected pypiper arguments added """ args_to_add = _determine_args( argument_groups=groups, arguments=args, use_all_args=all_args) parser = _add_args(parser, args_to_add, required) return parser
['def', 'add_pypiper_args', '(', 'parser', ',', 'groups', '=', '(', '"pypiper"', ',', ')', ',', 'args', '=', 'None', ',', 'required', '=', 'None', ',', 'all_args', '=', 'False', ')', ':', 'args_to_add', '=', '_determine_args', '(', 'argument_groups', '=', 'groups', ',', 'arguments', '=', 'args', ',', 'use_all_args', '=', 'all_args', ')', 'parser', '=', '_add_args', '(', 'parser', ',', 'args_to_add', ',', 'required', ')', 'return', 'parser']
Use this to add standardized pypiper arguments to your python pipeline. There are two ways to use `add_pypiper_args`: by specifying argument groups, or by specifying individual arguments. Specifying argument groups will add multiple arguments to your parser; these convenient argument groupings make it easy to add arguments to certain types of pipeline. For example, to make a looper-compatible pipeline, use `groups = ["pypiper", "looper"]`. :param argparse.ArgumentParser parser: ArgumentParser object from a pipeline :param str | Iterable[str] groups: Adds arguments belong to specified group of args. Options: pypiper, config, looper, resources, common, ngs, all. :param str | Iterable[str] args: You may specify a list of specific arguments one by one. :param Iterable[str] required: Arguments to be flagged as 'required' by argparse. :param bool all_args: Whether to include all of pypiper's arguments defined here. :return argparse.ArgumentParser: A new ArgumentParser object, with selected pypiper arguments added
['Use', 'this', 'to', 'add', 'standardized', 'pypiper', 'arguments', 'to', 'your', 'python', 'pipeline', '.']
train
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/utils.py#L31-L54
9,373
saltstack/salt
salt/modules/modjk.py
worker_edit
def worker_edit(worker, lbn, settings, profile='default'): ''' Edit the worker settings Note: http://tomcat.apache.org/connectors-doc/reference/status.html Data Parameters for the standard Update Action CLI Examples: .. code-block:: bash salt '*' modjk.worker_edit node1 loadbalancer1 "{'vwf': 500, 'vwd': 60}" salt '*' modjk.worker_edit node1 loadbalancer1 "{'vwf': 500, 'vwd': 60}" other-profile ''' settings['cmd'] = 'update' settings['mime'] = 'prop' settings['w'] = lbn settings['sw'] = worker return _do_http(settings, profile)['worker.result.type'] == 'OK'
python
def worker_edit(worker, lbn, settings, profile='default'): ''' Edit the worker settings Note: http://tomcat.apache.org/connectors-doc/reference/status.html Data Parameters for the standard Update Action CLI Examples: .. code-block:: bash salt '*' modjk.worker_edit node1 loadbalancer1 "{'vwf': 500, 'vwd': 60}" salt '*' modjk.worker_edit node1 loadbalancer1 "{'vwf': 500, 'vwd': 60}" other-profile ''' settings['cmd'] = 'update' settings['mime'] = 'prop' settings['w'] = lbn settings['sw'] = worker return _do_http(settings, profile)['worker.result.type'] == 'OK'
['def', 'worker_edit', '(', 'worker', ',', 'lbn', ',', 'settings', ',', 'profile', '=', "'default'", ')', ':', 'settings', '[', "'cmd'", ']', '=', "'update'", 'settings', '[', "'mime'", ']', '=', "'prop'", 'settings', '[', "'w'", ']', '=', 'lbn', 'settings', '[', "'sw'", ']', '=', 'worker', 'return', '_do_http', '(', 'settings', ',', 'profile', ')', '[', "'worker.result.type'", ']', '==', "'OK'"]
Edit the worker settings Note: http://tomcat.apache.org/connectors-doc/reference/status.html Data Parameters for the standard Update Action CLI Examples: .. code-block:: bash salt '*' modjk.worker_edit node1 loadbalancer1 "{'vwf': 500, 'vwd': 60}" salt '*' modjk.worker_edit node1 loadbalancer1 "{'vwf': 500, 'vwd': 60}" other-profile
['Edit', 'the', 'worker', 'settings']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/modjk.py#L507-L527
9,374
jart/fabulous
fabulous/text.py
get_font_files
def get_font_files(): """Returns a list of all font files we could find Returned as a list of dir/files tuples:: get_font_files() -> {'FontName': '/abs/FontName.ttf', ...] For example:: >>> fonts = get_font_files() >>> 'NotoSans-Bold' in fonts True >>> fonts['NotoSans-Bold'].endswith('/NotoSans-Bold.ttf') True """ roots = [ '/usr/share/fonts/truetype', # where ubuntu puts fonts '/usr/share/fonts', # where fedora puts fonts os.path.expanduser('~/.fonts'), # custom user fonts os.path.abspath(os.path.join(os.path.dirname(__file__), 'fonts')), ] result = {} for root in roots: for path, dirs, names in os.walk(root): for name in names: if name.endswith(('.ttf', '.otf')): result[name[:-4]] = os.path.join(path, name) return result
python
def get_font_files(): """Returns a list of all font files we could find Returned as a list of dir/files tuples:: get_font_files() -> {'FontName': '/abs/FontName.ttf', ...] For example:: >>> fonts = get_font_files() >>> 'NotoSans-Bold' in fonts True >>> fonts['NotoSans-Bold'].endswith('/NotoSans-Bold.ttf') True """ roots = [ '/usr/share/fonts/truetype', # where ubuntu puts fonts '/usr/share/fonts', # where fedora puts fonts os.path.expanduser('~/.fonts'), # custom user fonts os.path.abspath(os.path.join(os.path.dirname(__file__), 'fonts')), ] result = {} for root in roots: for path, dirs, names in os.walk(root): for name in names: if name.endswith(('.ttf', '.otf')): result[name[:-4]] = os.path.join(path, name) return result
['def', 'get_font_files', '(', ')', ':', 'roots', '=', '[', "'/usr/share/fonts/truetype'", ',', '# where ubuntu puts fonts', "'/usr/share/fonts'", ',', '# where fedora puts fonts', 'os', '.', 'path', '.', 'expanduser', '(', "'~/.fonts'", ')', ',', '# custom user fonts', 'os', '.', 'path', '.', 'abspath', '(', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'dirname', '(', '__file__', ')', ',', "'fonts'", ')', ')', ',', ']', 'result', '=', '{', '}', 'for', 'root', 'in', 'roots', ':', 'for', 'path', ',', 'dirs', ',', 'names', 'in', 'os', '.', 'walk', '(', 'root', ')', ':', 'for', 'name', 'in', 'names', ':', 'if', 'name', '.', 'endswith', '(', '(', "'.ttf'", ',', "'.otf'", ')', ')', ':', 'result', '[', 'name', '[', ':', '-', '4', ']', ']', '=', 'os', '.', 'path', '.', 'join', '(', 'path', ',', 'name', ')', 'return', 'result']
Returns a list of all font files we could find Returned as a list of dir/files tuples:: get_font_files() -> {'FontName': '/abs/FontName.ttf', ...] For example:: >>> fonts = get_font_files() >>> 'NotoSans-Bold' in fonts True >>> fonts['NotoSans-Bold'].endswith('/NotoSans-Bold.ttf') True
['Returns', 'a', 'list', 'of', 'all', 'font', 'files', 'we', 'could', 'find']
train
https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/text.py#L180-L208
9,375
spacetelescope/drizzlepac
drizzlepac/catalogs.py
generateCatalog
def generateCatalog(wcs, mode='automatic', catalog=None, src_find_filters=None, **kwargs): """ Function which determines what type of catalog object needs to be instantiated based on what type of source selection algorithm the user specified. Parameters ---------- wcs : obj WCS object generated by STWCS or PyWCS catalog : str or ndarray Filename of existing catalog or ndarray of image for generation of source catalog. kwargs : dict Parameters needed to interpret source catalog from input catalog with `findmode` being required. Returns ------- catalog : obj A Catalog-based class instance for keeping track of WCS and associated source catalog """ if not isinstance(catalog,Catalog): if mode == 'automatic': # if an array is provided as the source # Create a new catalog directly from the image catalog = ImageCatalog(wcs,catalog,src_find_filters,**kwargs) else: # a catalog file was provided as the catalog source catalog = UserCatalog(wcs,catalog,**kwargs) return catalog
python
def generateCatalog(wcs, mode='automatic', catalog=None, src_find_filters=None, **kwargs): """ Function which determines what type of catalog object needs to be instantiated based on what type of source selection algorithm the user specified. Parameters ---------- wcs : obj WCS object generated by STWCS or PyWCS catalog : str or ndarray Filename of existing catalog or ndarray of image for generation of source catalog. kwargs : dict Parameters needed to interpret source catalog from input catalog with `findmode` being required. Returns ------- catalog : obj A Catalog-based class instance for keeping track of WCS and associated source catalog """ if not isinstance(catalog,Catalog): if mode == 'automatic': # if an array is provided as the source # Create a new catalog directly from the image catalog = ImageCatalog(wcs,catalog,src_find_filters,**kwargs) else: # a catalog file was provided as the catalog source catalog = UserCatalog(wcs,catalog,**kwargs) return catalog
['def', 'generateCatalog', '(', 'wcs', ',', 'mode', '=', "'automatic'", ',', 'catalog', '=', 'None', ',', 'src_find_filters', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'not', 'isinstance', '(', 'catalog', ',', 'Catalog', ')', ':', 'if', 'mode', '==', "'automatic'", ':', '# if an array is provided as the source', '# Create a new catalog directly from the image', 'catalog', '=', 'ImageCatalog', '(', 'wcs', ',', 'catalog', ',', 'src_find_filters', ',', '*', '*', 'kwargs', ')', 'else', ':', '# a catalog file was provided as the catalog source', 'catalog', '=', 'UserCatalog', '(', 'wcs', ',', 'catalog', ',', '*', '*', 'kwargs', ')', 'return', 'catalog']
Function which determines what type of catalog object needs to be instantiated based on what type of source selection algorithm the user specified. Parameters ---------- wcs : obj WCS object generated by STWCS or PyWCS catalog : str or ndarray Filename of existing catalog or ndarray of image for generation of source catalog. kwargs : dict Parameters needed to interpret source catalog from input catalog with `findmode` being required. Returns ------- catalog : obj A Catalog-based class instance for keeping track of WCS and associated source catalog
['Function', 'which', 'determines', 'what', 'type', 'of', 'catalog', 'object', 'needs', 'to', 'be', 'instantiated', 'based', 'on', 'what', 'type', 'of', 'source', 'selection', 'algorithm', 'the', 'user', 'specified', '.']
train
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/catalogs.py#L43-L76
9,376
dpkp/kafka-python
kafka/metrics/stats/sensor.py
Sensor._check_quotas
def _check_quotas(self, time_ms): """ Check if we have violated our quota for any metric that has a configured quota """ for metric in self._metrics: if metric.config and metric.config.quota: value = metric.value(time_ms) if not metric.config.quota.is_acceptable(value): raise QuotaViolationError("'%s' violated quota. Actual: " "%d, Threshold: %d" % (metric.metric_name, value, metric.config.quota.bound))
python
def _check_quotas(self, time_ms): """ Check if we have violated our quota for any metric that has a configured quota """ for metric in self._metrics: if metric.config and metric.config.quota: value = metric.value(time_ms) if not metric.config.quota.is_acceptable(value): raise QuotaViolationError("'%s' violated quota. Actual: " "%d, Threshold: %d" % (metric.metric_name, value, metric.config.quota.bound))
['def', '_check_quotas', '(', 'self', ',', 'time_ms', ')', ':', 'for', 'metric', 'in', 'self', '.', '_metrics', ':', 'if', 'metric', '.', 'config', 'and', 'metric', '.', 'config', '.', 'quota', ':', 'value', '=', 'metric', '.', 'value', '(', 'time_ms', ')', 'if', 'not', 'metric', '.', 'config', '.', 'quota', '.', 'is_acceptable', '(', 'value', ')', ':', 'raise', 'QuotaViolationError', '(', '"\'%s\' violated quota. Actual: "', '"%d, Threshold: %d"', '%', '(', 'metric', '.', 'metric_name', ',', 'value', ',', 'metric', '.', 'config', '.', 'quota', '.', 'bound', ')', ')']
Check if we have violated our quota for any metric that has a configured quota
['Check', 'if', 'we', 'have', 'violated', 'our', 'quota', 'for', 'any', 'metric', 'that', 'has', 'a', 'configured', 'quota']
train
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/metrics/stats/sensor.py#L78-L91
9,377
anomaly/vishnu
vishnu/util.py
google_app_engine_ndb_delete_expired_sessions
def google_app_engine_ndb_delete_expired_sessions(dormant_for=86400, limit=500): """ Deletes expired sessions A session is expired if it expires date is set and has passed or if it has not been accessed for a given period of time. :param dormant_for: seconds since last access to delete sessions, defaults to 24 hours. :type dormant_for: int :param limit: amount to delete in one call of the method, the maximum and default for this is the NDB fetch limit of 500 :type limit: int """ from vishnu.backend.client.google_app_engine_ndb import VishnuSession from google.appengine.ext import ndb from datetime import datetime from datetime import timedelta now = datetime.utcnow() last_accessed = now - timedelta(seconds=dormant_for) query = VishnuSession.query(ndb.OR( ndb.AND(VishnuSession.expires <= now, VishnuSession.expires != None), VishnuSession.last_accessed <= last_accessed )) results = query.fetch(keys_only=True, limit=limit) ndb.delete_multi(results) return len(results) < limit
python
def google_app_engine_ndb_delete_expired_sessions(dormant_for=86400, limit=500): """ Deletes expired sessions A session is expired if it expires date is set and has passed or if it has not been accessed for a given period of time. :param dormant_for: seconds since last access to delete sessions, defaults to 24 hours. :type dormant_for: int :param limit: amount to delete in one call of the method, the maximum and default for this is the NDB fetch limit of 500 :type limit: int """ from vishnu.backend.client.google_app_engine_ndb import VishnuSession from google.appengine.ext import ndb from datetime import datetime from datetime import timedelta now = datetime.utcnow() last_accessed = now - timedelta(seconds=dormant_for) query = VishnuSession.query(ndb.OR( ndb.AND(VishnuSession.expires <= now, VishnuSession.expires != None), VishnuSession.last_accessed <= last_accessed )) results = query.fetch(keys_only=True, limit=limit) ndb.delete_multi(results) return len(results) < limit
['def', 'google_app_engine_ndb_delete_expired_sessions', '(', 'dormant_for', '=', '86400', ',', 'limit', '=', '500', ')', ':', 'from', 'vishnu', '.', 'backend', '.', 'client', '.', 'google_app_engine_ndb', 'import', 'VishnuSession', 'from', 'google', '.', 'appengine', '.', 'ext', 'import', 'ndb', 'from', 'datetime', 'import', 'datetime', 'from', 'datetime', 'import', 'timedelta', 'now', '=', 'datetime', '.', 'utcnow', '(', ')', 'last_accessed', '=', 'now', '-', 'timedelta', '(', 'seconds', '=', 'dormant_for', ')', 'query', '=', 'VishnuSession', '.', 'query', '(', 'ndb', '.', 'OR', '(', 'ndb', '.', 'AND', '(', 'VishnuSession', '.', 'expires', '<=', 'now', ',', 'VishnuSession', '.', 'expires', '!=', 'None', ')', ',', 'VishnuSession', '.', 'last_accessed', '<=', 'last_accessed', ')', ')', 'results', '=', 'query', '.', 'fetch', '(', 'keys_only', '=', 'True', ',', 'limit', '=', 'limit', ')', 'ndb', '.', 'delete_multi', '(', 'results', ')', 'return', 'len', '(', 'results', ')', '<', 'limit']
Deletes expired sessions A session is expired if it expires date is set and has passed or if it has not been accessed for a given period of time. :param dormant_for: seconds since last access to delete sessions, defaults to 24 hours. :type dormant_for: int :param limit: amount to delete in one call of the method, the maximum and default for this is the NDB fetch limit of 500 :type limit: int
['Deletes', 'expired', 'sessions', 'A', 'session', 'is', 'expired', 'if', 'it', 'expires', 'date', 'is', 'set', 'and', 'has', 'passed', 'or', 'if', 'it', 'has', 'not', 'been', 'accessed', 'for', 'a', 'given', 'period', 'of', 'time', '.']
train
https://github.com/anomaly/vishnu/blob/5b3a6a69beedc8554cc506ddfab273760d61dc65/vishnu/util.py#L3-L30
9,378
python-diamond/Diamond
src/collectors/onewire/onewire.py
OneWireCollector.get_default_config
def get_default_config(self): """ Returns the default collector settings """ config = super(OneWireCollector, self).get_default_config() config.update({ 'path': 'owfs', 'owfs': '/mnt/1wire', # 'scan': {'temperature': 't'}, # 'id:24.BB000000': {'file_with_value': 'alias'}, }) return config
python
def get_default_config(self): """ Returns the default collector settings """ config = super(OneWireCollector, self).get_default_config() config.update({ 'path': 'owfs', 'owfs': '/mnt/1wire', # 'scan': {'temperature': 't'}, # 'id:24.BB000000': {'file_with_value': 'alias'}, }) return config
['def', 'get_default_config', '(', 'self', ')', ':', 'config', '=', 'super', '(', 'OneWireCollector', ',', 'self', ')', '.', 'get_default_config', '(', ')', 'config', '.', 'update', '(', '{', "'path'", ':', "'owfs'", ',', "'owfs'", ':', "'/mnt/1wire'", ',', "# 'scan': {'temperature': 't'},", "# 'id:24.BB000000': {'file_with_value': 'alias'},", '}', ')', 'return', 'config']
Returns the default collector settings
['Returns', 'the', 'default', 'collector', 'settings']
train
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/onewire/onewire.py#L36-L47
9,379
googleads/googleads-python-lib
examples/adwords/v201809/advanced_operations/add_ad_customizer.py
RestrictFeedItemToAdGroup
def RestrictFeedItemToAdGroup(client, feed_item, adgroup_id): """Restricts the feed item to an ad group. Args: client: an AdWordsClient instance. feed_item: The feed item. adgroup_id: The ad group ID. """ # Get the FeedItemTargetService feed_item_target_service = client.GetService( 'FeedItemTargetService', 'v201809') # Optional: Restrict the first feed item to only serve with ads for the # specified ad group ID. ad_group_target = { 'xsi_type': 'FeedItemAdGroupTarget', 'feedId': feed_item['feedId'], 'feedItemId': feed_item['feedItemId'], 'adGroupId': adgroup_id } operation = {'operator': 'ADD', 'operand': ad_group_target} response = feed_item_target_service.mutate([operation]) new_ad_group_target = response['value'][0] print('Feed item target for feed ID %s and feed item ID %s was created to ' 'restrict serving to ad group ID %s' % (new_ad_group_target['feedId'], new_ad_group_target['feedItemId'], new_ad_group_target['adGroupId']))
python
def RestrictFeedItemToAdGroup(client, feed_item, adgroup_id): """Restricts the feed item to an ad group. Args: client: an AdWordsClient instance. feed_item: The feed item. adgroup_id: The ad group ID. """ # Get the FeedItemTargetService feed_item_target_service = client.GetService( 'FeedItemTargetService', 'v201809') # Optional: Restrict the first feed item to only serve with ads for the # specified ad group ID. ad_group_target = { 'xsi_type': 'FeedItemAdGroupTarget', 'feedId': feed_item['feedId'], 'feedItemId': feed_item['feedItemId'], 'adGroupId': adgroup_id } operation = {'operator': 'ADD', 'operand': ad_group_target} response = feed_item_target_service.mutate([operation]) new_ad_group_target = response['value'][0] print('Feed item target for feed ID %s and feed item ID %s was created to ' 'restrict serving to ad group ID %s' % (new_ad_group_target['feedId'], new_ad_group_target['feedItemId'], new_ad_group_target['adGroupId']))
['def', 'RestrictFeedItemToAdGroup', '(', 'client', ',', 'feed_item', ',', 'adgroup_id', ')', ':', '# Get the FeedItemTargetService', 'feed_item_target_service', '=', 'client', '.', 'GetService', '(', "'FeedItemTargetService'", ',', "'v201809'", ')', '# Optional: Restrict the first feed item to only serve with ads for the', '# specified ad group ID.', 'ad_group_target', '=', '{', "'xsi_type'", ':', "'FeedItemAdGroupTarget'", ',', "'feedId'", ':', 'feed_item', '[', "'feedId'", ']', ',', "'feedItemId'", ':', 'feed_item', '[', "'feedItemId'", ']', ',', "'adGroupId'", ':', 'adgroup_id', '}', 'operation', '=', '{', "'operator'", ':', "'ADD'", ',', "'operand'", ':', 'ad_group_target', '}', 'response', '=', 'feed_item_target_service', '.', 'mutate', '(', '[', 'operation', ']', ')', 'new_ad_group_target', '=', 'response', '[', "'value'", ']', '[', '0', ']', 'print', '(', "'Feed item target for feed ID %s and feed item ID %s was created to '", "'restrict serving to ad group ID %s'", '%', '(', 'new_ad_group_target', '[', "'feedId'", ']', ',', 'new_ad_group_target', '[', "'feedItemId'", ']', ',', 'new_ad_group_target', '[', "'adGroupId'", ']', ')', ')']
Restricts the feed item to an ad group. Args: client: an AdWordsClient instance. feed_item: The feed item. adgroup_id: The ad group ID.
['Restricts', 'the', 'feed', 'item', 'to', 'an', 'ad', 'group', '.']
train
https://github.com/googleads/googleads-python-lib/blob/aa3b1b474b0f9789ca55ca46f4b2b57aeae38874/examples/adwords/v201809/advanced_operations/add_ad_customizer.py#L128-L158
9,380
mryellow/maze_explorer
mazeexp/engine/world.py
WorldLayer.update_visited
def update_visited(self): """ Updates exploration map visited status """ assert isinstance(self.player.cshape.center, eu.Vector2) pos = self.player.cshape.center # Helper function def set_visited(layer, cell): if cell and not cell.properties.get('visited') and cell.tile and cell.tile.id > 0: cell.properties['visited'] = True self.reward_explore() # TODO: Decouple into view rendering # Change colour of visited cells key = layer.get_key_at_pixel(cell.x, cell.y) #layer.set_cell_color(key[0], key[1], [155,155,155]) layer.set_cell_opacity(key[0], key[1], 255*0.8) # End Helper # Get the current tile under player current = self.visit_layer.get_at_pixel(pos.x, pos.y) if current: # In spawn square if current == self.visit_layer.get_at_pixel(self.spawn.x, self.spawn.y): self.reward_goal() # Only record/reward exploration when battery is above 50% #if self.player.stats['battery'] > 50: set_visited(self.visit_layer, current) neighbours = self.visit_layer.get_neighbors(current) for cell in neighbours: neighbour = neighbours[cell] set_visited(self.visit_layer, neighbour)
python
def update_visited(self): """ Updates exploration map visited status """ assert isinstance(self.player.cshape.center, eu.Vector2) pos = self.player.cshape.center # Helper function def set_visited(layer, cell): if cell and not cell.properties.get('visited') and cell.tile and cell.tile.id > 0: cell.properties['visited'] = True self.reward_explore() # TODO: Decouple into view rendering # Change colour of visited cells key = layer.get_key_at_pixel(cell.x, cell.y) #layer.set_cell_color(key[0], key[1], [155,155,155]) layer.set_cell_opacity(key[0], key[1], 255*0.8) # End Helper # Get the current tile under player current = self.visit_layer.get_at_pixel(pos.x, pos.y) if current: # In spawn square if current == self.visit_layer.get_at_pixel(self.spawn.x, self.spawn.y): self.reward_goal() # Only record/reward exploration when battery is above 50% #if self.player.stats['battery'] > 50: set_visited(self.visit_layer, current) neighbours = self.visit_layer.get_neighbors(current) for cell in neighbours: neighbour = neighbours[cell] set_visited(self.visit_layer, neighbour)
['def', 'update_visited', '(', 'self', ')', ':', 'assert', 'isinstance', '(', 'self', '.', 'player', '.', 'cshape', '.', 'center', ',', 'eu', '.', 'Vector2', ')', 'pos', '=', 'self', '.', 'player', '.', 'cshape', '.', 'center', '# Helper function', 'def', 'set_visited', '(', 'layer', ',', 'cell', ')', ':', 'if', 'cell', 'and', 'not', 'cell', '.', 'properties', '.', 'get', '(', "'visited'", ')', 'and', 'cell', '.', 'tile', 'and', 'cell', '.', 'tile', '.', 'id', '>', '0', ':', 'cell', '.', 'properties', '[', "'visited'", ']', '=', 'True', 'self', '.', 'reward_explore', '(', ')', '# TODO: Decouple into view rendering', '# Change colour of visited cells', 'key', '=', 'layer', '.', 'get_key_at_pixel', '(', 'cell', '.', 'x', ',', 'cell', '.', 'y', ')', '#layer.set_cell_color(key[0], key[1], [155,155,155])', 'layer', '.', 'set_cell_opacity', '(', 'key', '[', '0', ']', ',', 'key', '[', '1', ']', ',', '255', '*', '0.8', ')', '# End Helper', '# Get the current tile under player', 'current', '=', 'self', '.', 'visit_layer', '.', 'get_at_pixel', '(', 'pos', '.', 'x', ',', 'pos', '.', 'y', ')', 'if', 'current', ':', '# In spawn square', 'if', 'current', '==', 'self', '.', 'visit_layer', '.', 'get_at_pixel', '(', 'self', '.', 'spawn', '.', 'x', ',', 'self', '.', 'spawn', '.', 'y', ')', ':', 'self', '.', 'reward_goal', '(', ')', '# Only record/reward exploration when battery is above 50%', "#if self.player.stats['battery'] > 50:", 'set_visited', '(', 'self', '.', 'visit_layer', ',', 'current', ')', 'neighbours', '=', 'self', '.', 'visit_layer', '.', 'get_neighbors', '(', 'current', ')', 'for', 'cell', 'in', 'neighbours', ':', 'neighbour', '=', 'neighbours', '[', 'cell', ']', 'set_visited', '(', 'self', '.', 'visit_layer', ',', 'neighbour', ')']
Updates exploration map visited status
['Updates', 'exploration', 'map', 'visited', 'status']
train
https://github.com/mryellow/maze_explorer/blob/ab8a25ccd05105d2fe57e0213d690cfc07e45827/mazeexp/engine/world.py#L318-L353
9,381
JamesPHoughton/pysd
pysd/pysd.py
read_vensim
def read_vensim(mdl_file): """ Construct a model from Vensim `.mdl` file. Parameters ---------- mdl_file : <string> The relative path filename for a raw Vensim `.mdl` file Returns ------- model: a PySD class object Elements from the python model are loaded into the PySD class and ready to run Examples -------- >>> model = read_vensim('../tests/test-models/samples/teacup/teacup.mdl') """ from .py_backend.vensim.vensim2py import translate_vensim from .py_backend import functions py_model_file = translate_vensim(mdl_file) model = functions.Model(py_model_file) model.mdl_file = mdl_file return model
python
def read_vensim(mdl_file): """ Construct a model from Vensim `.mdl` file. Parameters ---------- mdl_file : <string> The relative path filename for a raw Vensim `.mdl` file Returns ------- model: a PySD class object Elements from the python model are loaded into the PySD class and ready to run Examples -------- >>> model = read_vensim('../tests/test-models/samples/teacup/teacup.mdl') """ from .py_backend.vensim.vensim2py import translate_vensim from .py_backend import functions py_model_file = translate_vensim(mdl_file) model = functions.Model(py_model_file) model.mdl_file = mdl_file return model
['def', 'read_vensim', '(', 'mdl_file', ')', ':', 'from', '.', 'py_backend', '.', 'vensim', '.', 'vensim2py', 'import', 'translate_vensim', 'from', '.', 'py_backend', 'import', 'functions', 'py_model_file', '=', 'translate_vensim', '(', 'mdl_file', ')', 'model', '=', 'functions', '.', 'Model', '(', 'py_model_file', ')', 'model', '.', 'mdl_file', '=', 'mdl_file', 'return', 'model']
Construct a model from Vensim `.mdl` file. Parameters ---------- mdl_file : <string> The relative path filename for a raw Vensim `.mdl` file Returns ------- model: a PySD class object Elements from the python model are loaded into the PySD class and ready to run Examples -------- >>> model = read_vensim('../tests/test-models/samples/teacup/teacup.mdl')
['Construct', 'a', 'model', 'from', 'Vensim', '.', 'mdl', 'file', '.']
train
https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/pysd.py#L25-L49
9,382
fermiPy/fermipy
fermipy/diffuse/source_factory.py
make_mapcube_source
def make_mapcube_source(name, Spatial_Filename, spectrum): """Construct and return a `fermipy.roi_model.MapCubeSource` object """ data = dict(Spatial_Filename=Spatial_Filename) if spectrum is not None: data.update(spectrum) return roi_model.MapCubeSource(name, data)
python
def make_mapcube_source(name, Spatial_Filename, spectrum): """Construct and return a `fermipy.roi_model.MapCubeSource` object """ data = dict(Spatial_Filename=Spatial_Filename) if spectrum is not None: data.update(spectrum) return roi_model.MapCubeSource(name, data)
['def', 'make_mapcube_source', '(', 'name', ',', 'Spatial_Filename', ',', 'spectrum', ')', ':', 'data', '=', 'dict', '(', 'Spatial_Filename', '=', 'Spatial_Filename', ')', 'if', 'spectrum', 'is', 'not', 'None', ':', 'data', '.', 'update', '(', 'spectrum', ')', 'return', 'roi_model', '.', 'MapCubeSource', '(', 'name', ',', 'data', ')']
Construct and return a `fermipy.roi_model.MapCubeSource` object
['Construct', 'and', 'return', 'a', 'fermipy', '.', 'roi_model', '.', 'MapCubeSource', 'object']
train
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/source_factory.py#L34-L41
9,383
mdsol/rwslib
rwslib/builders/metadata.py
TranslatedText.build
def build(self, builder): """Build XML by appending to builder""" params = {} if self.lang is not None: params["xml:lang"] = self.lang builder.start("TranslatedText", params) builder.data(self.text) builder.end("TranslatedText")
python
def build(self, builder): """Build XML by appending to builder""" params = {} if self.lang is not None: params["xml:lang"] = self.lang builder.start("TranslatedText", params) builder.data(self.text) builder.end("TranslatedText")
['def', 'build', '(', 'self', ',', 'builder', ')', ':', 'params', '=', '{', '}', 'if', 'self', '.', 'lang', 'is', 'not', 'None', ':', 'params', '[', '"xml:lang"', ']', '=', 'self', '.', 'lang', 'builder', '.', 'start', '(', '"TranslatedText"', ',', 'params', ')', 'builder', '.', 'data', '(', 'self', '.', 'text', ')', 'builder', '.', 'end', '(', '"TranslatedText"', ')']
Build XML by appending to builder
['Build', 'XML', 'by', 'appending', 'to', 'builder']
train
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/builders/metadata.py#L250-L257
9,384
spacetelescope/synphot_refactor
synphot/units.py
spectral_density_vega
def spectral_density_vega(wav, vegaflux): """Flux equivalencies between PHOTLAM and VEGAMAG. Parameters ---------- wav : `~astropy.units.quantity.Quantity` Quantity associated with values being converted (e.g., wavelength or frequency). vegaflux : `~astropy.units.quantity.Quantity` Flux of Vega at ``wav``. Returns ------- eqv : list List of equivalencies. """ vega_photlam = vegaflux.to( PHOTLAM, equivalencies=u.spectral_density(wav)).value def converter(x): """Set nan/inf to -99 mag.""" val = -2.5 * np.log10(x / vega_photlam) result = np.zeros(val.shape, dtype=np.float64) - 99 mask = np.isfinite(val) if result.ndim > 0: result[mask] = val[mask] elif mask: result = np.asarray(val) return result def iconverter(x): return vega_photlam * 10**(-0.4 * x) return [(PHOTLAM, VEGAMAG, converter, iconverter)]
python
def spectral_density_vega(wav, vegaflux): """Flux equivalencies between PHOTLAM and VEGAMAG. Parameters ---------- wav : `~astropy.units.quantity.Quantity` Quantity associated with values being converted (e.g., wavelength or frequency). vegaflux : `~astropy.units.quantity.Quantity` Flux of Vega at ``wav``. Returns ------- eqv : list List of equivalencies. """ vega_photlam = vegaflux.to( PHOTLAM, equivalencies=u.spectral_density(wav)).value def converter(x): """Set nan/inf to -99 mag.""" val = -2.5 * np.log10(x / vega_photlam) result = np.zeros(val.shape, dtype=np.float64) - 99 mask = np.isfinite(val) if result.ndim > 0: result[mask] = val[mask] elif mask: result = np.asarray(val) return result def iconverter(x): return vega_photlam * 10**(-0.4 * x) return [(PHOTLAM, VEGAMAG, converter, iconverter)]
['def', 'spectral_density_vega', '(', 'wav', ',', 'vegaflux', ')', ':', 'vega_photlam', '=', 'vegaflux', '.', 'to', '(', 'PHOTLAM', ',', 'equivalencies', '=', 'u', '.', 'spectral_density', '(', 'wav', ')', ')', '.', 'value', 'def', 'converter', '(', 'x', ')', ':', '"""Set nan/inf to -99 mag."""', 'val', '=', '-', '2.5', '*', 'np', '.', 'log10', '(', 'x', '/', 'vega_photlam', ')', 'result', '=', 'np', '.', 'zeros', '(', 'val', '.', 'shape', ',', 'dtype', '=', 'np', '.', 'float64', ')', '-', '99', 'mask', '=', 'np', '.', 'isfinite', '(', 'val', ')', 'if', 'result', '.', 'ndim', '>', '0', ':', 'result', '[', 'mask', ']', '=', 'val', '[', 'mask', ']', 'elif', 'mask', ':', 'result', '=', 'np', '.', 'asarray', '(', 'val', ')', 'return', 'result', 'def', 'iconverter', '(', 'x', ')', ':', 'return', 'vega_photlam', '*', '10', '**', '(', '-', '0.4', '*', 'x', ')', 'return', '[', '(', 'PHOTLAM', ',', 'VEGAMAG', ',', 'converter', ',', 'iconverter', ')', ']']
Flux equivalencies between PHOTLAM and VEGAMAG. Parameters ---------- wav : `~astropy.units.quantity.Quantity` Quantity associated with values being converted (e.g., wavelength or frequency). vegaflux : `~astropy.units.quantity.Quantity` Flux of Vega at ``wav``. Returns ------- eqv : list List of equivalencies.
['Flux', 'equivalencies', 'between', 'PHOTLAM', 'and', 'VEGAMAG', '.']
train
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/units.py#L64-L99
9,385
hazelcast/hazelcast-python-client
hazelcast/connection.py
ConnectionManager.get_or_connect
def get_or_connect(self, address, authenticator=None): """ Gets the existing connection for a given address. If it does not exist, the system will try to connect asynchronously. In this case, it returns a Future. When the connection is established at some point in time, it can be retrieved by using the get_connection(:class:`~hazelcast.core.Address`) or from Future. :param address: (:class:`~hazelcast.core.Address`), the address to connect to. :param authenticator: (Function), function to be used for authentication (optional). :return: (:class:`~hazelcast.connection.Connection`), the existing connection or it returns a Future which includes asynchronously. """ if address in self.connections: return ImmediateFuture(self.connections[address]) else: with self._new_connection_mutex: if address in self._pending_connections: return self._pending_connections[address] else: authenticator = authenticator or self._cluster_authenticator try: translated_address = self._address_translator.translate(address) if translated_address is None: raise ValueError("Address translator could not translate address: {}".format(address)) connection = self._new_connection_func(translated_address, self._client.config.network_config.connection_timeout, self._client.config.network_config.socket_options, connection_closed_callback=self._connection_closed, message_callback=self._client.invoker._handle_client_message, network_config=self._client.config.network_config) except IOError: return ImmediateExceptionFuture(sys.exc_info()[1], sys.exc_info()[2]) future = authenticator(connection).continue_with(self.on_auth, connection, address) if not future.done(): self._pending_connections[address] = future return future
python
def get_or_connect(self, address, authenticator=None): """ Gets the existing connection for a given address. If it does not exist, the system will try to connect asynchronously. In this case, it returns a Future. When the connection is established at some point in time, it can be retrieved by using the get_connection(:class:`~hazelcast.core.Address`) or from Future. :param address: (:class:`~hazelcast.core.Address`), the address to connect to. :param authenticator: (Function), function to be used for authentication (optional). :return: (:class:`~hazelcast.connection.Connection`), the existing connection or it returns a Future which includes asynchronously. """ if address in self.connections: return ImmediateFuture(self.connections[address]) else: with self._new_connection_mutex: if address in self._pending_connections: return self._pending_connections[address] else: authenticator = authenticator or self._cluster_authenticator try: translated_address = self._address_translator.translate(address) if translated_address is None: raise ValueError("Address translator could not translate address: {}".format(address)) connection = self._new_connection_func(translated_address, self._client.config.network_config.connection_timeout, self._client.config.network_config.socket_options, connection_closed_callback=self._connection_closed, message_callback=self._client.invoker._handle_client_message, network_config=self._client.config.network_config) except IOError: return ImmediateExceptionFuture(sys.exc_info()[1], sys.exc_info()[2]) future = authenticator(connection).continue_with(self.on_auth, connection, address) if not future.done(): self._pending_connections[address] = future return future
['def', 'get_or_connect', '(', 'self', ',', 'address', ',', 'authenticator', '=', 'None', ')', ':', 'if', 'address', 'in', 'self', '.', 'connections', ':', 'return', 'ImmediateFuture', '(', 'self', '.', 'connections', '[', 'address', ']', ')', 'else', ':', 'with', 'self', '.', '_new_connection_mutex', ':', 'if', 'address', 'in', 'self', '.', '_pending_connections', ':', 'return', 'self', '.', '_pending_connections', '[', 'address', ']', 'else', ':', 'authenticator', '=', 'authenticator', 'or', 'self', '.', '_cluster_authenticator', 'try', ':', 'translated_address', '=', 'self', '.', '_address_translator', '.', 'translate', '(', 'address', ')', 'if', 'translated_address', 'is', 'None', ':', 'raise', 'ValueError', '(', '"Address translator could not translate address: {}"', '.', 'format', '(', 'address', ')', ')', 'connection', '=', 'self', '.', '_new_connection_func', '(', 'translated_address', ',', 'self', '.', '_client', '.', 'config', '.', 'network_config', '.', 'connection_timeout', ',', 'self', '.', '_client', '.', 'config', '.', 'network_config', '.', 'socket_options', ',', 'connection_closed_callback', '=', 'self', '.', '_connection_closed', ',', 'message_callback', '=', 'self', '.', '_client', '.', 'invoker', '.', '_handle_client_message', ',', 'network_config', '=', 'self', '.', '_client', '.', 'config', '.', 'network_config', ')', 'except', 'IOError', ':', 'return', 'ImmediateExceptionFuture', '(', 'sys', '.', 'exc_info', '(', ')', '[', '1', ']', ',', 'sys', '.', 'exc_info', '(', ')', '[', '2', ']', ')', 'future', '=', 'authenticator', '(', 'connection', ')', '.', 'continue_with', '(', 'self', '.', 'on_auth', ',', 'connection', ',', 'address', ')', 'if', 'not', 'future', '.', 'done', '(', ')', ':', 'self', '.', '_pending_connections', '[', 'address', ']', '=', 'future', 'return', 'future']
Gets the existing connection for a given address. If it does not exist, the system will try to connect asynchronously. In this case, it returns a Future. When the connection is established at some point in time, it can be retrieved by using the get_connection(:class:`~hazelcast.core.Address`) or from Future. :param address: (:class:`~hazelcast.core.Address`), the address to connect to. :param authenticator: (Function), function to be used for authentication (optional). :return: (:class:`~hazelcast.connection.Connection`), the existing connection or it returns a Future which includes asynchronously.
['Gets', 'the', 'existing', 'connection', 'for', 'a', 'given', 'address', '.', 'If', 'it', 'does', 'not', 'exist', 'the', 'system', 'will', 'try', 'to', 'connect', 'asynchronously', '.', 'In', 'this', 'case', 'it', 'returns', 'a', 'Future', '.', 'When', 'the', 'connection', 'is', 'established', 'at', 'some', 'point', 'in', 'time', 'it', 'can', 'be', 'retrieved', 'by', 'using', 'the', 'get_connection', '(', ':', 'class', ':', '~hazelcast', '.', 'core', '.', 'Address', ')', 'or', 'from', 'Future', '.']
train
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/connection.py#L87-L121
9,386
heronotears/lazyxml
lazyxml/builder.py
Builder.check_structure
def check_structure(self, keys): r"""Check structure availability by ``attrkey`` and ``valuekey`` option. """ return set(keys) <= set([self.__options['attrkey'], self.__options['valuekey']])
python
def check_structure(self, keys): r"""Check structure availability by ``attrkey`` and ``valuekey`` option. """ return set(keys) <= set([self.__options['attrkey'], self.__options['valuekey']])
['def', 'check_structure', '(', 'self', ',', 'keys', ')', ':', 'return', 'set', '(', 'keys', ')', '<=', 'set', '(', '[', 'self', '.', '__options', '[', "'attrkey'", ']', ',', 'self', '.', '__options', '[', "'valuekey'", ']', ']', ')']
r"""Check structure availability by ``attrkey`` and ``valuekey`` option.
['r', 'Check', 'structure', 'availability', 'by', 'attrkey', 'and', 'valuekey', 'option', '.']
train
https://github.com/heronotears/lazyxml/blob/e3f1ebd3f34cfa03d022ddec90e17d60c1c81953/lazyxml/builder.py#L130-L133
9,387
materialsproject/pymatgen
pymatgen/entries/entry_tools.py
EntrySet.from_csv
def from_csv(cls, filename: str): """ Imports PDEntries from a csv. Args: filename: Filename to import from. Returns: List of Elements, List of PDEntries """ with open(filename, "r", encoding="utf-8") as f: reader = csv.reader(f, delimiter=unicode2str(","), quotechar=unicode2str("\""), quoting=csv.QUOTE_MINIMAL) entries = list() header_read = False elements = None for row in reader: if not header_read: elements = row[1:(len(row) - 1)] header_read = True else: name = row[0] energy = float(row[-1]) comp = dict() for ind in range(1, len(row) - 1): if float(row[ind]) > 0: comp[Element(elements[ind - 1])] = float(row[ind]) entries.append(PDEntry(Composition(comp), energy, name)) return cls(entries)
python
def from_csv(cls, filename: str): """ Imports PDEntries from a csv. Args: filename: Filename to import from. Returns: List of Elements, List of PDEntries """ with open(filename, "r", encoding="utf-8") as f: reader = csv.reader(f, delimiter=unicode2str(","), quotechar=unicode2str("\""), quoting=csv.QUOTE_MINIMAL) entries = list() header_read = False elements = None for row in reader: if not header_read: elements = row[1:(len(row) - 1)] header_read = True else: name = row[0] energy = float(row[-1]) comp = dict() for ind in range(1, len(row) - 1): if float(row[ind]) > 0: comp[Element(elements[ind - 1])] = float(row[ind]) entries.append(PDEntry(Composition(comp), energy, name)) return cls(entries)
['def', 'from_csv', '(', 'cls', ',', 'filename', ':', 'str', ')', ':', 'with', 'open', '(', 'filename', ',', '"r"', ',', 'encoding', '=', '"utf-8"', ')', 'as', 'f', ':', 'reader', '=', 'csv', '.', 'reader', '(', 'f', ',', 'delimiter', '=', 'unicode2str', '(', '","', ')', ',', 'quotechar', '=', 'unicode2str', '(', '"\\""', ')', ',', 'quoting', '=', 'csv', '.', 'QUOTE_MINIMAL', ')', 'entries', '=', 'list', '(', ')', 'header_read', '=', 'False', 'elements', '=', 'None', 'for', 'row', 'in', 'reader', ':', 'if', 'not', 'header_read', ':', 'elements', '=', 'row', '[', '1', ':', '(', 'len', '(', 'row', ')', '-', '1', ')', ']', 'header_read', '=', 'True', 'else', ':', 'name', '=', 'row', '[', '0', ']', 'energy', '=', 'float', '(', 'row', '[', '-', '1', ']', ')', 'comp', '=', 'dict', '(', ')', 'for', 'ind', 'in', 'range', '(', '1', ',', 'len', '(', 'row', ')', '-', '1', ')', ':', 'if', 'float', '(', 'row', '[', 'ind', ']', ')', '>', '0', ':', 'comp', '[', 'Element', '(', 'elements', '[', 'ind', '-', '1', ']', ')', ']', '=', 'float', '(', 'row', '[', 'ind', ']', ')', 'entries', '.', 'append', '(', 'PDEntry', '(', 'Composition', '(', 'comp', ')', ',', 'energy', ',', 'name', ')', ')', 'return', 'cls', '(', 'entries', ')']
Imports PDEntries from a csv. Args: filename: Filename to import from. Returns: List of Elements, List of PDEntries
['Imports', 'PDEntries', 'from', 'a', 'csv', '.']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/entries/entry_tools.py#L259-L288
9,388
sassoo/goldman
goldman/deserializers/comma_sep.py
Parser._parse_keys
def _parse_keys(row, line_num): """ Perform some sanity checks on they keys Each key in the row should not be named None cause (that's an overrun). A key named `type` MUST be present on the row & have a string value. :param row: dict :param line_num: int """ link = 'tools.ietf.org/html/rfc4180#section-2' none_keys = [key for key in row.keys() if key is None] if none_keys: fail('You have more fields defined on row number {} ' 'than field headers in your CSV data. Please fix ' 'your request body.'.format(line_num), link) elif not row.get('type'): fail('Row number {} does not have a type value defined. ' 'Please fix your request body.'.format(line_num), link)
python
def _parse_keys(row, line_num): """ Perform some sanity checks on they keys Each key in the row should not be named None cause (that's an overrun). A key named `type` MUST be present on the row & have a string value. :param row: dict :param line_num: int """ link = 'tools.ietf.org/html/rfc4180#section-2' none_keys = [key for key in row.keys() if key is None] if none_keys: fail('You have more fields defined on row number {} ' 'than field headers in your CSV data. Please fix ' 'your request body.'.format(line_num), link) elif not row.get('type'): fail('Row number {} does not have a type value defined. ' 'Please fix your request body.'.format(line_num), link)
['def', '_parse_keys', '(', 'row', ',', 'line_num', ')', ':', 'link', '=', "'tools.ietf.org/html/rfc4180#section-2'", 'none_keys', '=', '[', 'key', 'for', 'key', 'in', 'row', '.', 'keys', '(', ')', 'if', 'key', 'is', 'None', ']', 'if', 'none_keys', ':', 'fail', '(', "'You have more fields defined on row number {} '", "'than field headers in your CSV data. Please fix '", "'your request body.'", '.', 'format', '(', 'line_num', ')', ',', 'link', ')', 'elif', 'not', 'row', '.', 'get', '(', "'type'", ')', ':', 'fail', '(', "'Row number {} does not have a type value defined. '", "'Please fix your request body.'", '.', 'format', '(', 'line_num', ')', ',', 'link', ')']
Perform some sanity checks on they keys Each key in the row should not be named None cause (that's an overrun). A key named `type` MUST be present on the row & have a string value. :param row: dict :param line_num: int
['Perform', 'some', 'sanity', 'checks', 'on', 'they', 'keys']
train
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/deserializers/comma_sep.py#L51-L73
9,389
FutunnOpen/futuquant
futuquant/common/sys_config.py
SysConfig.set_client_info
def set_client_info(cls, client_id, client_ver): """ .. py:function:: set_client_info(cls, client_id, client_ver) 设置调用api的客户端信息, 非必调接口 :param client_id: str, 客户端标识 :param client_ver: int, 客户端版本号 :return: None :example: .. code:: python from futuquant import * SysConfig.set_client_info("MyFutuQuant", 0) quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111) quote_ctx.close() """ SysConfig.CLINET_ID = client_id SysConfig.CLIENT_VER = client_ver
python
def set_client_info(cls, client_id, client_ver): """ .. py:function:: set_client_info(cls, client_id, client_ver) 设置调用api的客户端信息, 非必调接口 :param client_id: str, 客户端标识 :param client_ver: int, 客户端版本号 :return: None :example: .. code:: python from futuquant import * SysConfig.set_client_info("MyFutuQuant", 0) quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111) quote_ctx.close() """ SysConfig.CLINET_ID = client_id SysConfig.CLIENT_VER = client_ver
['def', 'set_client_info', '(', 'cls', ',', 'client_id', ',', 'client_ver', ')', ':', 'SysConfig', '.', 'CLINET_ID', '=', 'client_id', 'SysConfig', '.', 'CLIENT_VER', '=', 'client_ver']
.. py:function:: set_client_info(cls, client_id, client_ver) 设置调用api的客户端信息, 非必调接口 :param client_id: str, 客户端标识 :param client_ver: int, 客户端版本号 :return: None :example: .. code:: python from futuquant import * SysConfig.set_client_info("MyFutuQuant", 0) quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111) quote_ctx.close()
['..', 'py', ':', 'function', '::', 'set_client_info', '(', 'cls', 'client_id', 'client_ver', ')']
train
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/common/sys_config.py#L23-L45
9,390
dw/mitogen
mitogen/core.py
fire
def fire(obj, name, *args, **kwargs): """ Arrange for `func(*args, **kwargs)` to be invoked for every function registered for the named signal on `obj`. """ signals = vars(obj).get('_signals', {}) for func in signals.get(name, ()): func(*args, **kwargs)
python
def fire(obj, name, *args, **kwargs): """ Arrange for `func(*args, **kwargs)` to be invoked for every function registered for the named signal on `obj`. """ signals = vars(obj).get('_signals', {}) for func in signals.get(name, ()): func(*args, **kwargs)
['def', 'fire', '(', 'obj', ',', 'name', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'signals', '=', 'vars', '(', 'obj', ')', '.', 'get', '(', "'_signals'", ',', '{', '}', ')', 'for', 'func', 'in', 'signals', '.', 'get', '(', 'name', ',', '(', ')', ')', ':', 'func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')']
Arrange for `func(*args, **kwargs)` to be invoked for every function registered for the named signal on `obj`.
['Arrange', 'for', 'func', '(', '*', 'args', '**', 'kwargs', ')', 'to', 'be', 'invoked', 'for', 'every', 'function', 'registered', 'for', 'the', 'named', 'signal', 'on', 'obj', '.']
train
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/core.py#L400-L407
9,391
jonathf/chaospy
chaospy/distributions/operators/multiply.py
Mul._bnd
def _bnd(self, xloc, left, right, cache): """ Distribution bounds. Example: >>> print(chaospy.Uniform().range([-2, 0, 2, 4])) [[0. 0. 0. 0.] [1. 1. 1. 1.]] >>> print(Mul(chaospy.Uniform(), 2).range([-2, 0, 2, 4])) [[0. 0. 0. 0.] [2. 2. 2. 2.]] >>> print(Mul(2, chaospy.Uniform()).range([-2, 0, 2, 4])) [[0. 0. 0. 0.] [2. 2. 2. 2.]] >>> print(Mul(2, 2).range([-2, 0, 2, 4])) [[4. 4. 4. 4.] [4. 4. 4. 4.]] >>> dist = chaospy.Mul(chaospy.Iid(chaospy.Uniform(), 2), [1, 2]) >>> print(dist.range([[0.5, 0.6, 1.5], [0.5, 0.6, 1.5]])) [[[0. 0. 0.] [0. 0. 0.]] <BLANKLINE> [[1. 1. 1.] [2. 2. 2.]]] >>> dist = chaospy.Mul([2, 1], chaospy.Iid(chaospy.Uniform(), 2)) >>> print(dist.range([[0.5, 0.6, 1.5], [0.5, 0.6, 1.5]])) [[[0. 0. 0.] [0. 0. 0.]] <BLANKLINE> [[2. 2. 2.] [1. 1. 1.]]] >>> dist = chaospy.Mul(chaospy.Iid(chaospy.Uniform(), 2), [1, 2]) >>> print(dist.range([[0.5, 0.6, 1.5], [0.5, 0.6, 1.5]])) [[[0. 0. 0.] [0. 0. 0.]] <BLANKLINE> [[1. 1. 1.] [2. 2. 2.]]] """ left = evaluation.get_forward_cache(left, cache) right = evaluation.get_forward_cache(right, cache) if isinstance(left, Dist): if isinstance(right, Dist): raise evaluation.DependencyError( "under-defined distribution {} or {}".format(left, right)) elif not isinstance(right, Dist): return numpy.dot(left, right), numpy.dot(left, right) else: left = numpy.asfarray(left) if self.matrix: Ci = numpy.linalg.inv(left) xloc = numpy.dot(Ci, xloc) assert len(xloc) == len(right) elif len(left.shape) == 3: left_ = numpy.mean(left, 0) valids = left_ != 0 xloc.T[valids.T] = xloc.T[valids.T]/left_.T[valids.T] else: left = (left.T+numpy.zeros(xloc.shape).T).T valids = left != 0 xloc.T[valids.T] = xloc.T[valids.T]/left.T[valids.T] assert len(xloc) == len(right) lower, upper = evaluation.evaluate_bound(right, xloc, cache=cache) if self.matrix: lower = numpy.dot(lower.T, left.T).T upper = numpy.dot(upper.T, left.T).T elif len(left.shape) == 3: lower = numpy.where(left[0]*lower > 0, left[0]*lower, left[1]*lower) upper = numpy.where(left[1]*upper > 0, left[1]*upper, left[0]*upper) lower, upper = ( numpy.where(lower < upper, lower, upper), numpy.where(lower < upper, upper, lower), ) lower[(left[0] < 0) & (lower > 0)] = 0. assert len(lower) == len(right) else: lower *= left upper *= left lower, upper = ( numpy.where(lower < upper, lower, upper), numpy.where(lower < upper, upper, lower), ) return lower, upper right = numpy.asfarray(right) if self.matrix: Ci = numpy.linalg.inv(right) xloc = numpy.dot(xloc.T, Ci.T).T assert len(left) == len(xloc) elif len(right.shape) == 3: right_ = numpy.mean(right, 0) valids = right_ != 0 xloc.T[valids.T] = xloc.T[valids.T]/right_.T[valids.T] else: right = (right.T+numpy.zeros(xloc.shape).T).T valids = right != 0 xloc.T[valids.T] = xloc.T[valids.T]/right.T[valids.T] assert len(left) == len(xloc) lower, upper = evaluation.evaluate_bound(left, xloc, cache=cache) if self.matrix: lower = numpy.dot(lower.T, right.T).T upper = numpy.dot(upper.T, right.T).T elif len(right.shape) == 3: lower = numpy.where(right[0]*lower > 0, right[0]*lower, right[1]*lower) upper = numpy.where(right[1]*upper > 0, right[1]*upper, right[0]*upper) lower, upper = ( numpy.where(lower < upper, lower, upper), numpy.where(lower < upper, upper, lower), ) lower[(right[0] < 0) & (lower > 0)] = 0. else: lower *= right upper *= right lower, upper = ( numpy.where(lower < upper, lower, upper), numpy.where(lower < upper, upper, lower), ) assert lower.shape == xloc.shape assert upper.shape == xloc.shape return lower, upper
python
def _bnd(self, xloc, left, right, cache): """ Distribution bounds. Example: >>> print(chaospy.Uniform().range([-2, 0, 2, 4])) [[0. 0. 0. 0.] [1. 1. 1. 1.]] >>> print(Mul(chaospy.Uniform(), 2).range([-2, 0, 2, 4])) [[0. 0. 0. 0.] [2. 2. 2. 2.]] >>> print(Mul(2, chaospy.Uniform()).range([-2, 0, 2, 4])) [[0. 0. 0. 0.] [2. 2. 2. 2.]] >>> print(Mul(2, 2).range([-2, 0, 2, 4])) [[4. 4. 4. 4.] [4. 4. 4. 4.]] >>> dist = chaospy.Mul(chaospy.Iid(chaospy.Uniform(), 2), [1, 2]) >>> print(dist.range([[0.5, 0.6, 1.5], [0.5, 0.6, 1.5]])) [[[0. 0. 0.] [0. 0. 0.]] <BLANKLINE> [[1. 1. 1.] [2. 2. 2.]]] >>> dist = chaospy.Mul([2, 1], chaospy.Iid(chaospy.Uniform(), 2)) >>> print(dist.range([[0.5, 0.6, 1.5], [0.5, 0.6, 1.5]])) [[[0. 0. 0.] [0. 0. 0.]] <BLANKLINE> [[2. 2. 2.] [1. 1. 1.]]] >>> dist = chaospy.Mul(chaospy.Iid(chaospy.Uniform(), 2), [1, 2]) >>> print(dist.range([[0.5, 0.6, 1.5], [0.5, 0.6, 1.5]])) [[[0. 0. 0.] [0. 0. 0.]] <BLANKLINE> [[1. 1. 1.] [2. 2. 2.]]] """ left = evaluation.get_forward_cache(left, cache) right = evaluation.get_forward_cache(right, cache) if isinstance(left, Dist): if isinstance(right, Dist): raise evaluation.DependencyError( "under-defined distribution {} or {}".format(left, right)) elif not isinstance(right, Dist): return numpy.dot(left, right), numpy.dot(left, right) else: left = numpy.asfarray(left) if self.matrix: Ci = numpy.linalg.inv(left) xloc = numpy.dot(Ci, xloc) assert len(xloc) == len(right) elif len(left.shape) == 3: left_ = numpy.mean(left, 0) valids = left_ != 0 xloc.T[valids.T] = xloc.T[valids.T]/left_.T[valids.T] else: left = (left.T+numpy.zeros(xloc.shape).T).T valids = left != 0 xloc.T[valids.T] = xloc.T[valids.T]/left.T[valids.T] assert len(xloc) == len(right) lower, upper = evaluation.evaluate_bound(right, xloc, cache=cache) if self.matrix: lower = numpy.dot(lower.T, left.T).T upper = numpy.dot(upper.T, left.T).T elif len(left.shape) == 3: lower = numpy.where(left[0]*lower > 0, left[0]*lower, left[1]*lower) upper = numpy.where(left[1]*upper > 0, left[1]*upper, left[0]*upper) lower, upper = ( numpy.where(lower < upper, lower, upper), numpy.where(lower < upper, upper, lower), ) lower[(left[0] < 0) & (lower > 0)] = 0. assert len(lower) == len(right) else: lower *= left upper *= left lower, upper = ( numpy.where(lower < upper, lower, upper), numpy.where(lower < upper, upper, lower), ) return lower, upper right = numpy.asfarray(right) if self.matrix: Ci = numpy.linalg.inv(right) xloc = numpy.dot(xloc.T, Ci.T).T assert len(left) == len(xloc) elif len(right.shape) == 3: right_ = numpy.mean(right, 0) valids = right_ != 0 xloc.T[valids.T] = xloc.T[valids.T]/right_.T[valids.T] else: right = (right.T+numpy.zeros(xloc.shape).T).T valids = right != 0 xloc.T[valids.T] = xloc.T[valids.T]/right.T[valids.T] assert len(left) == len(xloc) lower, upper = evaluation.evaluate_bound(left, xloc, cache=cache) if self.matrix: lower = numpy.dot(lower.T, right.T).T upper = numpy.dot(upper.T, right.T).T elif len(right.shape) == 3: lower = numpy.where(right[0]*lower > 0, right[0]*lower, right[1]*lower) upper = numpy.where(right[1]*upper > 0, right[1]*upper, right[0]*upper) lower, upper = ( numpy.where(lower < upper, lower, upper), numpy.where(lower < upper, upper, lower), ) lower[(right[0] < 0) & (lower > 0)] = 0. else: lower *= right upper *= right lower, upper = ( numpy.where(lower < upper, lower, upper), numpy.where(lower < upper, upper, lower), ) assert lower.shape == xloc.shape assert upper.shape == xloc.shape return lower, upper
['def', '_bnd', '(', 'self', ',', 'xloc', ',', 'left', ',', 'right', ',', 'cache', ')', ':', 'left', '=', 'evaluation', '.', 'get_forward_cache', '(', 'left', ',', 'cache', ')', 'right', '=', 'evaluation', '.', 'get_forward_cache', '(', 'right', ',', 'cache', ')', 'if', 'isinstance', '(', 'left', ',', 'Dist', ')', ':', 'if', 'isinstance', '(', 'right', ',', 'Dist', ')', ':', 'raise', 'evaluation', '.', 'DependencyError', '(', '"under-defined distribution {} or {}"', '.', 'format', '(', 'left', ',', 'right', ')', ')', 'elif', 'not', 'isinstance', '(', 'right', ',', 'Dist', ')', ':', 'return', 'numpy', '.', 'dot', '(', 'left', ',', 'right', ')', ',', 'numpy', '.', 'dot', '(', 'left', ',', 'right', ')', 'else', ':', 'left', '=', 'numpy', '.', 'asfarray', '(', 'left', ')', 'if', 'self', '.', 'matrix', ':', 'Ci', '=', 'numpy', '.', 'linalg', '.', 'inv', '(', 'left', ')', 'xloc', '=', 'numpy', '.', 'dot', '(', 'Ci', ',', 'xloc', ')', 'assert', 'len', '(', 'xloc', ')', '==', 'len', '(', 'right', ')', 'elif', 'len', '(', 'left', '.', 'shape', ')', '==', '3', ':', 'left_', '=', 'numpy', '.', 'mean', '(', 'left', ',', '0', ')', 'valids', '=', 'left_', '!=', '0', 'xloc', '.', 'T', '[', 'valids', '.', 'T', ']', '=', 'xloc', '.', 'T', '[', 'valids', '.', 'T', ']', '/', 'left_', '.', 'T', '[', 'valids', '.', 'T', ']', 'else', ':', 'left', '=', '(', 'left', '.', 'T', '+', 'numpy', '.', 'zeros', '(', 'xloc', '.', 'shape', ')', '.', 'T', ')', '.', 'T', 'valids', '=', 'left', '!=', '0', 'xloc', '.', 'T', '[', 'valids', '.', 'T', ']', '=', 'xloc', '.', 'T', '[', 'valids', '.', 'T', ']', '/', 'left', '.', 'T', '[', 'valids', '.', 'T', ']', 'assert', 'len', '(', 'xloc', ')', '==', 'len', '(', 'right', ')', 'lower', ',', 'upper', '=', 'evaluation', '.', 'evaluate_bound', '(', 'right', ',', 'xloc', ',', 'cache', '=', 'cache', ')', 'if', 'self', '.', 'matrix', ':', 'lower', '=', 'numpy', '.', 'dot', '(', 'lower', '.', 'T', ',', 'left', '.', 'T', ')', '.', 'T', 'upper', '=', 'numpy', '.', 'dot', '(', 'upper', '.', 'T', ',', 'left', '.', 'T', ')', '.', 'T', 'elif', 'len', '(', 'left', '.', 'shape', ')', '==', '3', ':', 'lower', '=', 'numpy', '.', 'where', '(', 'left', '[', '0', ']', '*', 'lower', '>', '0', ',', 'left', '[', '0', ']', '*', 'lower', ',', 'left', '[', '1', ']', '*', 'lower', ')', 'upper', '=', 'numpy', '.', 'where', '(', 'left', '[', '1', ']', '*', 'upper', '>', '0', ',', 'left', '[', '1', ']', '*', 'upper', ',', 'left', '[', '0', ']', '*', 'upper', ')', 'lower', ',', 'upper', '=', '(', 'numpy', '.', 'where', '(', 'lower', '<', 'upper', ',', 'lower', ',', 'upper', ')', ',', 'numpy', '.', 'where', '(', 'lower', '<', 'upper', ',', 'upper', ',', 'lower', ')', ',', ')', 'lower', '[', '(', 'left', '[', '0', ']', '<', '0', ')', '&', '(', 'lower', '>', '0', ')', ']', '=', '0.', 'assert', 'len', '(', 'lower', ')', '==', 'len', '(', 'right', ')', 'else', ':', 'lower', '*=', 'left', 'upper', '*=', 'left', 'lower', ',', 'upper', '=', '(', 'numpy', '.', 'where', '(', 'lower', '<', 'upper', ',', 'lower', ',', 'upper', ')', ',', 'numpy', '.', 'where', '(', 'lower', '<', 'upper', ',', 'upper', ',', 'lower', ')', ',', ')', 'return', 'lower', ',', 'upper', 'right', '=', 'numpy', '.', 'asfarray', '(', 'right', ')', 'if', 'self', '.', 'matrix', ':', 'Ci', '=', 'numpy', '.', 'linalg', '.', 'inv', '(', 'right', ')', 'xloc', '=', 'numpy', '.', 'dot', '(', 'xloc', '.', 'T', ',', 'Ci', '.', 'T', ')', '.', 'T', 'assert', 'len', '(', 'left', ')', '==', 'len', '(', 'xloc', ')', 'elif', 'len', '(', 'right', '.', 'shape', ')', '==', '3', ':', 'right_', '=', 'numpy', '.', 'mean', '(', 'right', ',', '0', ')', 'valids', '=', 'right_', '!=', '0', 'xloc', '.', 'T', '[', 'valids', '.', 'T', ']', '=', 'xloc', '.', 'T', '[', 'valids', '.', 'T', ']', '/', 'right_', '.', 'T', '[', 'valids', '.', 'T', ']', 'else', ':', 'right', '=', '(', 'right', '.', 'T', '+', 'numpy', '.', 'zeros', '(', 'xloc', '.', 'shape', ')', '.', 'T', ')', '.', 'T', 'valids', '=', 'right', '!=', '0', 'xloc', '.', 'T', '[', 'valids', '.', 'T', ']', '=', 'xloc', '.', 'T', '[', 'valids', '.', 'T', ']', '/', 'right', '.', 'T', '[', 'valids', '.', 'T', ']', 'assert', 'len', '(', 'left', ')', '==', 'len', '(', 'xloc', ')', 'lower', ',', 'upper', '=', 'evaluation', '.', 'evaluate_bound', '(', 'left', ',', 'xloc', ',', 'cache', '=', 'cache', ')', 'if', 'self', '.', 'matrix', ':', 'lower', '=', 'numpy', '.', 'dot', '(', 'lower', '.', 'T', ',', 'right', '.', 'T', ')', '.', 'T', 'upper', '=', 'numpy', '.', 'dot', '(', 'upper', '.', 'T', ',', 'right', '.', 'T', ')', '.', 'T', 'elif', 'len', '(', 'right', '.', 'shape', ')', '==', '3', ':', 'lower', '=', 'numpy', '.', 'where', '(', 'right', '[', '0', ']', '*', 'lower', '>', '0', ',', 'right', '[', '0', ']', '*', 'lower', ',', 'right', '[', '1', ']', '*', 'lower', ')', 'upper', '=', 'numpy', '.', 'where', '(', 'right', '[', '1', ']', '*', 'upper', '>', '0', ',', 'right', '[', '1', ']', '*', 'upper', ',', 'right', '[', '0', ']', '*', 'upper', ')', 'lower', ',', 'upper', '=', '(', 'numpy', '.', 'where', '(', 'lower', '<', 'upper', ',', 'lower', ',', 'upper', ')', ',', 'numpy', '.', 'where', '(', 'lower', '<', 'upper', ',', 'upper', ',', 'lower', ')', ',', ')', 'lower', '[', '(', 'right', '[', '0', ']', '<', '0', ')', '&', '(', 'lower', '>', '0', ')', ']', '=', '0.', 'else', ':', 'lower', '*=', 'right', 'upper', '*=', 'right', 'lower', ',', 'upper', '=', '(', 'numpy', '.', 'where', '(', 'lower', '<', 'upper', ',', 'lower', ',', 'upper', ')', ',', 'numpy', '.', 'where', '(', 'lower', '<', 'upper', ',', 'upper', ',', 'lower', ')', ',', ')', 'assert', 'lower', '.', 'shape', '==', 'xloc', '.', 'shape', 'assert', 'upper', '.', 'shape', '==', 'xloc', '.', 'shape', 'return', 'lower', ',', 'upper']
Distribution bounds. Example: >>> print(chaospy.Uniform().range([-2, 0, 2, 4])) [[0. 0. 0. 0.] [1. 1. 1. 1.]] >>> print(Mul(chaospy.Uniform(), 2).range([-2, 0, 2, 4])) [[0. 0. 0. 0.] [2. 2. 2. 2.]] >>> print(Mul(2, chaospy.Uniform()).range([-2, 0, 2, 4])) [[0. 0. 0. 0.] [2. 2. 2. 2.]] >>> print(Mul(2, 2).range([-2, 0, 2, 4])) [[4. 4. 4. 4.] [4. 4. 4. 4.]] >>> dist = chaospy.Mul(chaospy.Iid(chaospy.Uniform(), 2), [1, 2]) >>> print(dist.range([[0.5, 0.6, 1.5], [0.5, 0.6, 1.5]])) [[[0. 0. 0.] [0. 0. 0.]] <BLANKLINE> [[1. 1. 1.] [2. 2. 2.]]] >>> dist = chaospy.Mul([2, 1], chaospy.Iid(chaospy.Uniform(), 2)) >>> print(dist.range([[0.5, 0.6, 1.5], [0.5, 0.6, 1.5]])) [[[0. 0. 0.] [0. 0. 0.]] <BLANKLINE> [[2. 2. 2.] [1. 1. 1.]]] >>> dist = chaospy.Mul(chaospy.Iid(chaospy.Uniform(), 2), [1, 2]) >>> print(dist.range([[0.5, 0.6, 1.5], [0.5, 0.6, 1.5]])) [[[0. 0. 0.] [0. 0. 0.]] <BLANKLINE> [[1. 1. 1.] [2. 2. 2.]]]
['Distribution', 'bounds', '.']
train
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/distributions/operators/multiply.py#L110-L242
9,392
tensorflow/tensor2tensor
tensor2tensor/layers/common_layers.py
unit_targeting
def unit_targeting(w, k): """Unit-level magnitude pruning.""" k = tf.to_int32(k) w_shape = shape_list(w) size = tf.to_int32(tf.reduce_prod(w_shape[:-1])) w = tf.reshape(w, [size, w_shape[-1]]) norm = tf.norm(w, axis=0) thres = tf.contrib.framework.sort(norm, axis=0)[k] mask = to_float(thres >= norm)[None, :] mask = tf.tile(mask, [size, 1]) return tf.reshape(mask, w_shape)
python
def unit_targeting(w, k): """Unit-level magnitude pruning.""" k = tf.to_int32(k) w_shape = shape_list(w) size = tf.to_int32(tf.reduce_prod(w_shape[:-1])) w = tf.reshape(w, [size, w_shape[-1]]) norm = tf.norm(w, axis=0) thres = tf.contrib.framework.sort(norm, axis=0)[k] mask = to_float(thres >= norm)[None, :] mask = tf.tile(mask, [size, 1]) return tf.reshape(mask, w_shape)
['def', 'unit_targeting', '(', 'w', ',', 'k', ')', ':', 'k', '=', 'tf', '.', 'to_int32', '(', 'k', ')', 'w_shape', '=', 'shape_list', '(', 'w', ')', 'size', '=', 'tf', '.', 'to_int32', '(', 'tf', '.', 'reduce_prod', '(', 'w_shape', '[', ':', '-', '1', ']', ')', ')', 'w', '=', 'tf', '.', 'reshape', '(', 'w', ',', '[', 'size', ',', 'w_shape', '[', '-', '1', ']', ']', ')', 'norm', '=', 'tf', '.', 'norm', '(', 'w', ',', 'axis', '=', '0', ')', 'thres', '=', 'tf', '.', 'contrib', '.', 'framework', '.', 'sort', '(', 'norm', ',', 'axis', '=', '0', ')', '[', 'k', ']', 'mask', '=', 'to_float', '(', 'thres', '>=', 'norm', ')', '[', 'None', ',', ':', ']', 'mask', '=', 'tf', '.', 'tile', '(', 'mask', ',', '[', 'size', ',', '1', ']', ')', 'return', 'tf', '.', 'reshape', '(', 'mask', ',', 'w_shape', ')']
Unit-level magnitude pruning.
['Unit', '-', 'level', 'magnitude', 'pruning', '.']
train
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3858-L3870
9,393
mitsei/dlkit
dlkit/json_/repository/sessions.py
CompositionAdminSession.get_composition_form_for_create
def get_composition_form_for_create(self, composition_record_types): """Gets the composition form for creating new compositions. A new form should be requested for each create transaction. arg: composition_record_types (osid.type.Type[]): array of composition record types return: (osid.repository.CompositionForm) - the composition form raise: NullArgument - ``composition_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.get_resource_form_for_create_template for arg in composition_record_types: if not isinstance(arg, ABCType): raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type') if composition_record_types == []: obj_form = objects.CompositionForm( repository_id=self._catalog_id, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) else: obj_form = objects.CompositionForm( repository_id=self._catalog_id, record_types=composition_record_types, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) self._forms[obj_form.get_id().get_identifier()] = not CREATED return obj_form
python
def get_composition_form_for_create(self, composition_record_types): """Gets the composition form for creating new compositions. A new form should be requested for each create transaction. arg: composition_record_types (osid.type.Type[]): array of composition record types return: (osid.repository.CompositionForm) - the composition form raise: NullArgument - ``composition_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.get_resource_form_for_create_template for arg in composition_record_types: if not isinstance(arg, ABCType): raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type') if composition_record_types == []: obj_form = objects.CompositionForm( repository_id=self._catalog_id, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) else: obj_form = objects.CompositionForm( repository_id=self._catalog_id, record_types=composition_record_types, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) self._forms[obj_form.get_id().get_identifier()] = not CREATED return obj_form
['def', 'get_composition_form_for_create', '(', 'self', ',', 'composition_record_types', ')', ':', '# Implemented from template for', '# osid.resource.ResourceAdminSession.get_resource_form_for_create_template', 'for', 'arg', 'in', 'composition_record_types', ':', 'if', 'not', 'isinstance', '(', 'arg', ',', 'ABCType', ')', ':', 'raise', 'errors', '.', 'InvalidArgument', '(', "'one or more argument array elements is not a valid OSID Type'", ')', 'if', 'composition_record_types', '==', '[', ']', ':', 'obj_form', '=', 'objects', '.', 'CompositionForm', '(', 'repository_id', '=', 'self', '.', '_catalog_id', ',', 'runtime', '=', 'self', '.', '_runtime', ',', 'effective_agent_id', '=', 'self', '.', 'get_effective_agent_id', '(', ')', ',', 'proxy', '=', 'self', '.', '_proxy', ')', 'else', ':', 'obj_form', '=', 'objects', '.', 'CompositionForm', '(', 'repository_id', '=', 'self', '.', '_catalog_id', ',', 'record_types', '=', 'composition_record_types', ',', 'runtime', '=', 'self', '.', '_runtime', ',', 'effective_agent_id', '=', 'self', '.', 'get_effective_agent_id', '(', ')', ',', 'proxy', '=', 'self', '.', '_proxy', ')', 'self', '.', '_forms', '[', 'obj_form', '.', 'get_id', '(', ')', '.', 'get_identifier', '(', ')', ']', '=', 'not', 'CREATED', 'return', 'obj_form']
Gets the composition form for creating new compositions. A new form should be requested for each create transaction. arg: composition_record_types (osid.type.Type[]): array of composition record types return: (osid.repository.CompositionForm) - the composition form raise: NullArgument - ``composition_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.*
['Gets', 'the', 'composition', 'form', 'for', 'creating', 'new', 'compositions', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/sessions.py#L3848-L3883
9,394
qacafe/cdrouter.py
cdrouter/tags.py
TagsService.get
def get(self, name): """Get a tag. :param name: Tag name as string. :return: :class:`tags.Tag <tags.Tag>` object :rtype: tags.Tag """ schema = TagSchema() resp = self.service.get_id(self.base, name) return self.service.decode(schema, resp)
python
def get(self, name): """Get a tag. :param name: Tag name as string. :return: :class:`tags.Tag <tags.Tag>` object :rtype: tags.Tag """ schema = TagSchema() resp = self.service.get_id(self.base, name) return self.service.decode(schema, resp)
['def', 'get', '(', 'self', ',', 'name', ')', ':', 'schema', '=', 'TagSchema', '(', ')', 'resp', '=', 'self', '.', 'service', '.', 'get_id', '(', 'self', '.', 'base', ',', 'name', ')', 'return', 'self', '.', 'service', '.', 'decode', '(', 'schema', ',', 'resp', ')']
Get a tag. :param name: Tag name as string. :return: :class:`tags.Tag <tags.Tag>` object :rtype: tags.Tag
['Get', 'a', 'tag', '.']
train
https://github.com/qacafe/cdrouter.py/blob/aacf2c6ab0b987250f7b1892f4bba14bb2b7dbe5/cdrouter/tags.py#L93-L102
9,395
Julian/jsonschema
jsonschema/_format.py
FormatChecker.checks
def checks(self, format, raises=()): """ Register a decorated function as validating a new format. Arguments: format (str): The format that the decorated function will check. raises (Exception): The exception(s) raised by the decorated function when an invalid instance is found. The exception object will be accessible as the `jsonschema.exceptions.ValidationError.cause` attribute of the resulting validation error. """ def _checks(func): self.checkers[format] = (func, raises) return func return _checks
python
def checks(self, format, raises=()): """ Register a decorated function as validating a new format. Arguments: format (str): The format that the decorated function will check. raises (Exception): The exception(s) raised by the decorated function when an invalid instance is found. The exception object will be accessible as the `jsonschema.exceptions.ValidationError.cause` attribute of the resulting validation error. """ def _checks(func): self.checkers[format] = (func, raises) return func return _checks
['def', 'checks', '(', 'self', ',', 'format', ',', 'raises', '=', '(', ')', ')', ':', 'def', '_checks', '(', 'func', ')', ':', 'self', '.', 'checkers', '[', 'format', ']', '=', '(', 'func', ',', 'raises', ')', 'return', 'func', 'return', '_checks']
Register a decorated function as validating a new format. Arguments: format (str): The format that the decorated function will check. raises (Exception): The exception(s) raised by the decorated function when an invalid instance is found. The exception object will be accessible as the `jsonschema.exceptions.ValidationError.cause` attribute of the resulting validation error.
['Register', 'a', 'decorated', 'function', 'as', 'validating', 'a', 'new', 'format', '.']
train
https://github.com/Julian/jsonschema/blob/a72332004cdc3ba456de7918bc32059822b2f69a/jsonschema/_format.py#L42-L66
9,396
ZELLMECHANIK-DRESDEN/dclab
dclab/features/volume.py
get_volume
def get_volume(cont, pos_x, pos_y, pix): """Calculate the volume of a polygon revolved around an axis The volume estimation assumes rotational symmetry. Green`s theorem and the Gaussian divergence theorem allow to formulate the volume as a line integral. Parameters ---------- cont: ndarray or list of ndarrays of shape (N,2) A 2D array that holds the contour of an event [px] e.g. obtained using `mm.contour` where `mm` is an instance of `RTDCBase`. The first and second columns of `cont` correspond to the x- and y-coordinates of the contour. pos_x: float or ndarray of length N The x coordinate(s) of the centroid of the event(s) [µm] e.g. obtained using `mm.pos_x` pos_y: float or ndarray of length N The y coordinate(s) of the centroid of the event(s) [µm] e.g. obtained using `mm.pos_y` px_um: float The detector pixel size in µm. e.g. obtained using: `mm.config["image"]["pix size"]` Returns ------- volume: float or ndarray volume in um^3 Notes ----- The computation of the volume is based on a full rotation of the upper and the lower halves of the contour from which the average is then used. The volume is computed radially from the the center position given by (`pos_x`, `pos_y`). For sufficiently smooth contours, such as densely sampled ellipses, the center position does not play an important role. For contours that are given on a coarse grid, as is the case for RT-DC, the center position must be given. References ---------- - Halpern et al. :cite:`Halpern2002`, chapter 5, Section 5.4 - This is a translation from a `Matlab script <http://de.mathworks.com/matlabcentral/fileexchange/36525-volrevolve>`_ by Geoff Olynyk. """ if np.isscalar(pos_x): cont = [cont] ret_list = False else: ret_list = True # Convert input to 1D arrays pos_x = np.atleast_1d(pos_x) pos_y = np.atleast_1d(pos_y) if pos_x.size != pos_y.size: raise ValueError("Size of `pos_x` and `pos_y` must match!") if pos_x.size > 1 and len(cont) <= 1: raise ValueError("Number of given contours too small!") # results are stored in a separate array initialized with nans v_avg = np.zeros_like(pos_x, dtype=float)*np.nan # v_avg has the shape of `pos_x`. We are iterating over the smallest # length for `cont` and `pos_x`. for ii in range(min(len(cont), pos_x.shape[0])): # If the contour has less than 4 pixels, the computation will fail. # In that case, the value np.nan is already assigned. cc = cont[ii] if cc.shape[0] >= 4: # Center contour coordinates with given centroid contour_x = cc[:, 0] - pos_x[ii] / pix contour_y = cc[:, 1] - pos_y[ii] / pix # Make sure contour is counter-clockwise contour_x, contour_y = counter_clockwise(contour_x, contour_y) # Which points are below the x-axis? (y<0)? ind_low = np.where(contour_y < 0) # These points will be shifted up to y=0 to build an x-axis # (wont contribute to lower volume). contour_y_low = np.copy(contour_y) contour_y_low[ind_low] = 0 # Which points are above the x-axis? (y>0)? ind_upp = np.where(contour_y > 0) # These points will be shifted down to y=0 to build an x-axis # (wont contribute to upper volume). contour_y_upp = np.copy(contour_y) contour_y_upp[ind_upp] = 0 # Move the contour to the left Z = contour_x # Last point of the contour has to overlap with the first point Z = np.hstack([Z, Z[0]]) Zp = Z[0:-1] dZ = Z[1:]-Zp # Last point of the contour has to overlap with the first point contour_y_low = np.hstack([contour_y_low, contour_y_low[0]]) contour_y_upp = np.hstack([contour_y_upp, contour_y_upp[0]]) vol_low = _vol_helper(contour_y_low, Z, Zp, dZ, pix) vol_upp = _vol_helper(contour_y_upp, Z, Zp, dZ, pix) v_avg[ii] = (vol_low + vol_upp) / 2 if not ret_list: # Do not return a list if the input contour was not in a list v_avg = v_avg[0] return v_avg
python
def get_volume(cont, pos_x, pos_y, pix): """Calculate the volume of a polygon revolved around an axis The volume estimation assumes rotational symmetry. Green`s theorem and the Gaussian divergence theorem allow to formulate the volume as a line integral. Parameters ---------- cont: ndarray or list of ndarrays of shape (N,2) A 2D array that holds the contour of an event [px] e.g. obtained using `mm.contour` where `mm` is an instance of `RTDCBase`. The first and second columns of `cont` correspond to the x- and y-coordinates of the contour. pos_x: float or ndarray of length N The x coordinate(s) of the centroid of the event(s) [µm] e.g. obtained using `mm.pos_x` pos_y: float or ndarray of length N The y coordinate(s) of the centroid of the event(s) [µm] e.g. obtained using `mm.pos_y` px_um: float The detector pixel size in µm. e.g. obtained using: `mm.config["image"]["pix size"]` Returns ------- volume: float or ndarray volume in um^3 Notes ----- The computation of the volume is based on a full rotation of the upper and the lower halves of the contour from which the average is then used. The volume is computed radially from the the center position given by (`pos_x`, `pos_y`). For sufficiently smooth contours, such as densely sampled ellipses, the center position does not play an important role. For contours that are given on a coarse grid, as is the case for RT-DC, the center position must be given. References ---------- - Halpern et al. :cite:`Halpern2002`, chapter 5, Section 5.4 - This is a translation from a `Matlab script <http://de.mathworks.com/matlabcentral/fileexchange/36525-volrevolve>`_ by Geoff Olynyk. """ if np.isscalar(pos_x): cont = [cont] ret_list = False else: ret_list = True # Convert input to 1D arrays pos_x = np.atleast_1d(pos_x) pos_y = np.atleast_1d(pos_y) if pos_x.size != pos_y.size: raise ValueError("Size of `pos_x` and `pos_y` must match!") if pos_x.size > 1 and len(cont) <= 1: raise ValueError("Number of given contours too small!") # results are stored in a separate array initialized with nans v_avg = np.zeros_like(pos_x, dtype=float)*np.nan # v_avg has the shape of `pos_x`. We are iterating over the smallest # length for `cont` and `pos_x`. for ii in range(min(len(cont), pos_x.shape[0])): # If the contour has less than 4 pixels, the computation will fail. # In that case, the value np.nan is already assigned. cc = cont[ii] if cc.shape[0] >= 4: # Center contour coordinates with given centroid contour_x = cc[:, 0] - pos_x[ii] / pix contour_y = cc[:, 1] - pos_y[ii] / pix # Make sure contour is counter-clockwise contour_x, contour_y = counter_clockwise(contour_x, contour_y) # Which points are below the x-axis? (y<0)? ind_low = np.where(contour_y < 0) # These points will be shifted up to y=0 to build an x-axis # (wont contribute to lower volume). contour_y_low = np.copy(contour_y) contour_y_low[ind_low] = 0 # Which points are above the x-axis? (y>0)? ind_upp = np.where(contour_y > 0) # These points will be shifted down to y=0 to build an x-axis # (wont contribute to upper volume). contour_y_upp = np.copy(contour_y) contour_y_upp[ind_upp] = 0 # Move the contour to the left Z = contour_x # Last point of the contour has to overlap with the first point Z = np.hstack([Z, Z[0]]) Zp = Z[0:-1] dZ = Z[1:]-Zp # Last point of the contour has to overlap with the first point contour_y_low = np.hstack([contour_y_low, contour_y_low[0]]) contour_y_upp = np.hstack([contour_y_upp, contour_y_upp[0]]) vol_low = _vol_helper(contour_y_low, Z, Zp, dZ, pix) vol_upp = _vol_helper(contour_y_upp, Z, Zp, dZ, pix) v_avg[ii] = (vol_low + vol_upp) / 2 if not ret_list: # Do not return a list if the input contour was not in a list v_avg = v_avg[0] return v_avg
['def', 'get_volume', '(', 'cont', ',', 'pos_x', ',', 'pos_y', ',', 'pix', ')', ':', 'if', 'np', '.', 'isscalar', '(', 'pos_x', ')', ':', 'cont', '=', '[', 'cont', ']', 'ret_list', '=', 'False', 'else', ':', 'ret_list', '=', 'True', '# Convert input to 1D arrays', 'pos_x', '=', 'np', '.', 'atleast_1d', '(', 'pos_x', ')', 'pos_y', '=', 'np', '.', 'atleast_1d', '(', 'pos_y', ')', 'if', 'pos_x', '.', 'size', '!=', 'pos_y', '.', 'size', ':', 'raise', 'ValueError', '(', '"Size of `pos_x` and `pos_y` must match!"', ')', 'if', 'pos_x', '.', 'size', '>', '1', 'and', 'len', '(', 'cont', ')', '<=', '1', ':', 'raise', 'ValueError', '(', '"Number of given contours too small!"', ')', '# results are stored in a separate array initialized with nans', 'v_avg', '=', 'np', '.', 'zeros_like', '(', 'pos_x', ',', 'dtype', '=', 'float', ')', '*', 'np', '.', 'nan', '# v_avg has the shape of `pos_x`. We are iterating over the smallest', '# length for `cont` and `pos_x`.', 'for', 'ii', 'in', 'range', '(', 'min', '(', 'len', '(', 'cont', ')', ',', 'pos_x', '.', 'shape', '[', '0', ']', ')', ')', ':', '# If the contour has less than 4 pixels, the computation will fail.', '# In that case, the value np.nan is already assigned.', 'cc', '=', 'cont', '[', 'ii', ']', 'if', 'cc', '.', 'shape', '[', '0', ']', '>=', '4', ':', '# Center contour coordinates with given centroid', 'contour_x', '=', 'cc', '[', ':', ',', '0', ']', '-', 'pos_x', '[', 'ii', ']', '/', 'pix', 'contour_y', '=', 'cc', '[', ':', ',', '1', ']', '-', 'pos_y', '[', 'ii', ']', '/', 'pix', '# Make sure contour is counter-clockwise', 'contour_x', ',', 'contour_y', '=', 'counter_clockwise', '(', 'contour_x', ',', 'contour_y', ')', '# Which points are below the x-axis? (y<0)?', 'ind_low', '=', 'np', '.', 'where', '(', 'contour_y', '<', '0', ')', '# These points will be shifted up to y=0 to build an x-axis', '# (wont contribute to lower volume).', 'contour_y_low', '=', 'np', '.', 'copy', '(', 'contour_y', ')', 'contour_y_low', '[', 'ind_low', ']', '=', '0', '# Which points are above the x-axis? (y>0)?', 'ind_upp', '=', 'np', '.', 'where', '(', 'contour_y', '>', '0', ')', '# These points will be shifted down to y=0 to build an x-axis', '# (wont contribute to upper volume).', 'contour_y_upp', '=', 'np', '.', 'copy', '(', 'contour_y', ')', 'contour_y_upp', '[', 'ind_upp', ']', '=', '0', '# Move the contour to the left', 'Z', '=', 'contour_x', '# Last point of the contour has to overlap with the first point', 'Z', '=', 'np', '.', 'hstack', '(', '[', 'Z', ',', 'Z', '[', '0', ']', ']', ')', 'Zp', '=', 'Z', '[', '0', ':', '-', '1', ']', 'dZ', '=', 'Z', '[', '1', ':', ']', '-', 'Zp', '# Last point of the contour has to overlap with the first point', 'contour_y_low', '=', 'np', '.', 'hstack', '(', '[', 'contour_y_low', ',', 'contour_y_low', '[', '0', ']', ']', ')', 'contour_y_upp', '=', 'np', '.', 'hstack', '(', '[', 'contour_y_upp', ',', 'contour_y_upp', '[', '0', ']', ']', ')', 'vol_low', '=', '_vol_helper', '(', 'contour_y_low', ',', 'Z', ',', 'Zp', ',', 'dZ', ',', 'pix', ')', 'vol_upp', '=', '_vol_helper', '(', 'contour_y_upp', ',', 'Z', ',', 'Zp', ',', 'dZ', ',', 'pix', ')', 'v_avg', '[', 'ii', ']', '=', '(', 'vol_low', '+', 'vol_upp', ')', '/', '2', 'if', 'not', 'ret_list', ':', '# Do not return a list if the input contour was not in a list', 'v_avg', '=', 'v_avg', '[', '0', ']', 'return', 'v_avg']
Calculate the volume of a polygon revolved around an axis The volume estimation assumes rotational symmetry. Green`s theorem and the Gaussian divergence theorem allow to formulate the volume as a line integral. Parameters ---------- cont: ndarray or list of ndarrays of shape (N,2) A 2D array that holds the contour of an event [px] e.g. obtained using `mm.contour` where `mm` is an instance of `RTDCBase`. The first and second columns of `cont` correspond to the x- and y-coordinates of the contour. pos_x: float or ndarray of length N The x coordinate(s) of the centroid of the event(s) [µm] e.g. obtained using `mm.pos_x` pos_y: float or ndarray of length N The y coordinate(s) of the centroid of the event(s) [µm] e.g. obtained using `mm.pos_y` px_um: float The detector pixel size in µm. e.g. obtained using: `mm.config["image"]["pix size"]` Returns ------- volume: float or ndarray volume in um^3 Notes ----- The computation of the volume is based on a full rotation of the upper and the lower halves of the contour from which the average is then used. The volume is computed radially from the the center position given by (`pos_x`, `pos_y`). For sufficiently smooth contours, such as densely sampled ellipses, the center position does not play an important role. For contours that are given on a coarse grid, as is the case for RT-DC, the center position must be given. References ---------- - Halpern et al. :cite:`Halpern2002`, chapter 5, Section 5.4 - This is a translation from a `Matlab script <http://de.mathworks.com/matlabcentral/fileexchange/36525-volrevolve>`_ by Geoff Olynyk.
['Calculate', 'the', 'volume', 'of', 'a', 'polygon', 'revolved', 'around', 'an', 'axis']
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/features/volume.py#L9-L121
9,397
pkkid/python-plexapi
plexapi/library.py
Library.search
def search(self, title=None, libtype=None, **kwargs): """ Searching within a library section is much more powerful. It seems certain attributes on the media objects can be targeted to filter this search down a bit, but I havent found the documentation for it. Example: "studio=Comedy%20Central" or "year=1999" "title=Kung Fu" all work. Other items such as actor=<id> seem to work, but require you already know the id of the actor. TLDR: This is untested but seems to work. Use library section search when you can. """ args = {} if title: args['title'] = title if libtype: args['type'] = utils.searchType(libtype) for attr, value in kwargs.items(): args[attr] = value key = '/library/all%s' % utils.joinArgs(args) return self.fetchItems(key)
python
def search(self, title=None, libtype=None, **kwargs): """ Searching within a library section is much more powerful. It seems certain attributes on the media objects can be targeted to filter this search down a bit, but I havent found the documentation for it. Example: "studio=Comedy%20Central" or "year=1999" "title=Kung Fu" all work. Other items such as actor=<id> seem to work, but require you already know the id of the actor. TLDR: This is untested but seems to work. Use library section search when you can. """ args = {} if title: args['title'] = title if libtype: args['type'] = utils.searchType(libtype) for attr, value in kwargs.items(): args[attr] = value key = '/library/all%s' % utils.joinArgs(args) return self.fetchItems(key)
['def', 'search', '(', 'self', ',', 'title', '=', 'None', ',', 'libtype', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'args', '=', '{', '}', 'if', 'title', ':', 'args', '[', "'title'", ']', '=', 'title', 'if', 'libtype', ':', 'args', '[', "'type'", ']', '=', 'utils', '.', 'searchType', '(', 'libtype', ')', 'for', 'attr', ',', 'value', 'in', 'kwargs', '.', 'items', '(', ')', ':', 'args', '[', 'attr', ']', '=', 'value', 'key', '=', "'/library/all%s'", '%', 'utils', '.', 'joinArgs', '(', 'args', ')', 'return', 'self', '.', 'fetchItems', '(', 'key', ')']
Searching within a library section is much more powerful. It seems certain attributes on the media objects can be targeted to filter this search down a bit, but I havent found the documentation for it. Example: "studio=Comedy%20Central" or "year=1999" "title=Kung Fu" all work. Other items such as actor=<id> seem to work, but require you already know the id of the actor. TLDR: This is untested but seems to work. Use library section search when you can.
['Searching', 'within', 'a', 'library', 'section', 'is', 'much', 'more', 'powerful', '.', 'It', 'seems', 'certain', 'attributes', 'on', 'the', 'media', 'objects', 'can', 'be', 'targeted', 'to', 'filter', 'this', 'search', 'down', 'a', 'bit', 'but', 'I', 'havent', 'found', 'the', 'documentation', 'for', 'it', '.']
train
https://github.com/pkkid/python-plexapi/blob/9efbde96441c2bfbf410eacfb46e811e108e8bbc/plexapi/library.py#L85-L102
9,398
chrisspen/dtree
dtree.py
Data.validate_row
def validate_row(self, row): """ Ensure each element in the row matches the schema. """ clean_row = {} if isinstance(row, (tuple, list)): assert self.header_order, "No attribute order specified." assert len(row) == len(self.header_order), \ "Row length does not match header length." itr = zip(self.header_order, row) else: assert isinstance(row, dict) itr = iteritems(row) for el_name, el_value in itr: if self.header_types[el_name] == ATTR_TYPE_DISCRETE: clean_row[el_name] = int(el_value) elif self.header_types[el_name] == ATTR_TYPE_CONTINUOUS: clean_row[el_name] = float(el_value) else: clean_row[el_name] = el_value return clean_row
python
def validate_row(self, row): """ Ensure each element in the row matches the schema. """ clean_row = {} if isinstance(row, (tuple, list)): assert self.header_order, "No attribute order specified." assert len(row) == len(self.header_order), \ "Row length does not match header length." itr = zip(self.header_order, row) else: assert isinstance(row, dict) itr = iteritems(row) for el_name, el_value in itr: if self.header_types[el_name] == ATTR_TYPE_DISCRETE: clean_row[el_name] = int(el_value) elif self.header_types[el_name] == ATTR_TYPE_CONTINUOUS: clean_row[el_name] = float(el_value) else: clean_row[el_name] = el_value return clean_row
['def', 'validate_row', '(', 'self', ',', 'row', ')', ':', 'clean_row', '=', '{', '}', 'if', 'isinstance', '(', 'row', ',', '(', 'tuple', ',', 'list', ')', ')', ':', 'assert', 'self', '.', 'header_order', ',', '"No attribute order specified."', 'assert', 'len', '(', 'row', ')', '==', 'len', '(', 'self', '.', 'header_order', ')', ',', '"Row length does not match header length."', 'itr', '=', 'zip', '(', 'self', '.', 'header_order', ',', 'row', ')', 'else', ':', 'assert', 'isinstance', '(', 'row', ',', 'dict', ')', 'itr', '=', 'iteritems', '(', 'row', ')', 'for', 'el_name', ',', 'el_value', 'in', 'itr', ':', 'if', 'self', '.', 'header_types', '[', 'el_name', ']', '==', 'ATTR_TYPE_DISCRETE', ':', 'clean_row', '[', 'el_name', ']', '=', 'int', '(', 'el_value', ')', 'elif', 'self', '.', 'header_types', '[', 'el_name', ']', '==', 'ATTR_TYPE_CONTINUOUS', ':', 'clean_row', '[', 'el_name', ']', '=', 'float', '(', 'el_value', ')', 'else', ':', 'clean_row', '[', 'el_name', ']', '=', 'el_value', 'return', 'clean_row']
Ensure each element in the row matches the schema.
['Ensure', 'each', 'element', 'in', 'the', 'row', 'matches', 'the', 'schema', '.']
train
https://github.com/chrisspen/dtree/blob/9e9c9992b22ad9a7e296af7e6837666b05db43ef/dtree.py#L755-L775
9,399
ThreshingFloor/libtf
libtf/logparsers/tf_generic_log.py
TFGenericLog._extract_features
def _extract_features(self): """ Get the feature data from the log file necessary for a reduction """ for parsed_line in self.parsed_lines: result = {'raw': parsed_line} if 'ip' in parsed_line: result['ip'] = parsed_line['ip'] if result['ip'] not in self.features['ips']: self.features['ips'].append(result['ip'])
python
def _extract_features(self): """ Get the feature data from the log file necessary for a reduction """ for parsed_line in self.parsed_lines: result = {'raw': parsed_line} if 'ip' in parsed_line: result['ip'] = parsed_line['ip'] if result['ip'] not in self.features['ips']: self.features['ips'].append(result['ip'])
['def', '_extract_features', '(', 'self', ')', ':', 'for', 'parsed_line', 'in', 'self', '.', 'parsed_lines', ':', 'result', '=', '{', "'raw'", ':', 'parsed_line', '}', 'if', "'ip'", 'in', 'parsed_line', ':', 'result', '[', "'ip'", ']', '=', 'parsed_line', '[', "'ip'", ']', 'if', 'result', '[', "'ip'", ']', 'not', 'in', 'self', '.', 'features', '[', "'ips'", ']', ':', 'self', '.', 'features', '[', "'ips'", ']', '.', 'append', '(', 'result', '[', "'ip'", ']', ')']
Get the feature data from the log file necessary for a reduction
['Get', 'the', 'feature', 'data', 'from', 'the', 'log', 'file', 'necessary', 'for', 'a', 'reduction']
train
https://github.com/ThreshingFloor/libtf/blob/f1a8710f750639c9b9e2a468ece0d2923bf8c3df/libtf/logparsers/tf_generic_log.py#L45-L55