text
stringlengths
75
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
0.18
def get_case_groups(adapter, total_cases, institute_id=None, slice_query=None): """Return the information about case groups Args: store(adapter.MongoAdapter) total_cases(int): Total number of cases slice_query(str): Query to filter cases to obtain statistics for. Returns: cases(dict): """ # Create a group with all cases in the database cases = [{'status': 'all', 'count': total_cases, 'percent': 1}] # Group the cases based on their status pipeline = [] group = {'$group' : {'_id': '$status', 'count': {'$sum': 1}}} subquery = {} if institute_id and slice_query: subquery = adapter.cases(owner=institute_id, name_query=slice_query, yield_query=True) elif institute_id: subquery = adapter.cases(owner=institute_id, yield_query=True) elif slice_query: subquery = adapter.cases(name_query=slice_query, yield_query=True) query = {'$match': subquery} if subquery else {} if query: pipeline.append(query) pipeline.append(group) res = adapter.case_collection.aggregate(pipeline) for status_group in res: cases.append({'status': status_group['_id'], 'count': status_group['count'], 'percent': status_group['count'] / total_cases}) return cases
[ "def", "get_case_groups", "(", "adapter", ",", "total_cases", ",", "institute_id", "=", "None", ",", "slice_query", "=", "None", ")", ":", "# Create a group with all cases in the database", "cases", "=", "[", "{", "'status'", ":", "'all'", ",", "'count'", ":", "total_cases", ",", "'percent'", ":", "1", "}", "]", "# Group the cases based on their status", "pipeline", "=", "[", "]", "group", "=", "{", "'$group'", ":", "{", "'_id'", ":", "'$status'", ",", "'count'", ":", "{", "'$sum'", ":", "1", "}", "}", "}", "subquery", "=", "{", "}", "if", "institute_id", "and", "slice_query", ":", "subquery", "=", "adapter", ".", "cases", "(", "owner", "=", "institute_id", ",", "name_query", "=", "slice_query", ",", "yield_query", "=", "True", ")", "elif", "institute_id", ":", "subquery", "=", "adapter", ".", "cases", "(", "owner", "=", "institute_id", ",", "yield_query", "=", "True", ")", "elif", "slice_query", ":", "subquery", "=", "adapter", ".", "cases", "(", "name_query", "=", "slice_query", ",", "yield_query", "=", "True", ")", "query", "=", "{", "'$match'", ":", "subquery", "}", "if", "subquery", "else", "{", "}", "if", "query", ":", "pipeline", ".", "append", "(", "query", ")", "pipeline", ".", "append", "(", "group", ")", "res", "=", "adapter", ".", "case_collection", ".", "aggregate", "(", "pipeline", ")", "for", "status_group", "in", "res", ":", "cases", ".", "append", "(", "{", "'status'", ":", "status_group", "[", "'_id'", "]", ",", "'count'", ":", "status_group", "[", "'count'", "]", ",", "'percent'", ":", "status_group", "[", "'count'", "]", "/", "total_cases", "}", ")", "return", "cases" ]
33.2
0.002195
def list_backups(path, limit=None): ''' .. versionadded:: 0.17.0 Lists the previous versions of a file backed up using Salt's :ref:`file state backup <file-state-backups>` system. path The path on the minion to check for backups limit Limit the number of results to the most recent N backups CLI Example: .. code-block:: bash salt '*' file.list_backups /foo/bar/baz.txt ''' path = os.path.expanduser(path) try: limit = int(limit) except TypeError: pass except ValueError: log.error('file.list_backups: \'limit\' value must be numeric') limit = None bkroot = _get_bkroot() parent_dir, basename = os.path.split(path) if salt.utils.platform.is_windows(): # ':' is an illegal filesystem path character on Windows src_dir = parent_dir.replace(':', '_') else: src_dir = parent_dir[1:] # Figure out full path of location of backup file in minion cache bkdir = os.path.join(bkroot, src_dir) if not os.path.isdir(bkdir): return {} files = {} for fname in [x for x in os.listdir(bkdir) if os.path.isfile(os.path.join(bkdir, x))]: if salt.utils.platform.is_windows(): # ':' is an illegal filesystem path character on Windows strpfmt = '{0}_%a_%b_%d_%H-%M-%S_%f_%Y'.format(basename) else: strpfmt = '{0}_%a_%b_%d_%H:%M:%S_%f_%Y'.format(basename) try: timestamp = datetime.datetime.strptime(fname, strpfmt) except ValueError: # File didn't match the strp format string, so it's not a backup # for this file. Move on to the next one. continue if salt.utils.platform.is_windows(): str_format = '%a %b %d %Y %H-%M-%S.%f' else: str_format = '%a %b %d %Y %H:%M:%S.%f' files.setdefault(timestamp, {})['Backup Time'] = \ timestamp.strftime(str_format) location = os.path.join(bkdir, fname) files[timestamp]['Size'] = os.stat(location).st_size files[timestamp]['Location'] = location return dict(list(zip( list(range(len(files))), [files[x] for x in sorted(files, reverse=True)[:limit]] )))
[ "def", "list_backups", "(", "path", ",", "limit", "=", "None", ")", ":", "path", "=", "os", ".", "path", ".", "expanduser", "(", "path", ")", "try", ":", "limit", "=", "int", "(", "limit", ")", "except", "TypeError", ":", "pass", "except", "ValueError", ":", "log", ".", "error", "(", "'file.list_backups: \\'limit\\' value must be numeric'", ")", "limit", "=", "None", "bkroot", "=", "_get_bkroot", "(", ")", "parent_dir", ",", "basename", "=", "os", ".", "path", ".", "split", "(", "path", ")", "if", "salt", ".", "utils", ".", "platform", ".", "is_windows", "(", ")", ":", "# ':' is an illegal filesystem path character on Windows", "src_dir", "=", "parent_dir", ".", "replace", "(", "':'", ",", "'_'", ")", "else", ":", "src_dir", "=", "parent_dir", "[", "1", ":", "]", "# Figure out full path of location of backup file in minion cache", "bkdir", "=", "os", ".", "path", ".", "join", "(", "bkroot", ",", "src_dir", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "bkdir", ")", ":", "return", "{", "}", "files", "=", "{", "}", "for", "fname", "in", "[", "x", "for", "x", "in", "os", ".", "listdir", "(", "bkdir", ")", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "bkdir", ",", "x", ")", ")", "]", ":", "if", "salt", ".", "utils", ".", "platform", ".", "is_windows", "(", ")", ":", "# ':' is an illegal filesystem path character on Windows", "strpfmt", "=", "'{0}_%a_%b_%d_%H-%M-%S_%f_%Y'", ".", "format", "(", "basename", ")", "else", ":", "strpfmt", "=", "'{0}_%a_%b_%d_%H:%M:%S_%f_%Y'", ".", "format", "(", "basename", ")", "try", ":", "timestamp", "=", "datetime", ".", "datetime", ".", "strptime", "(", "fname", ",", "strpfmt", ")", "except", "ValueError", ":", "# File didn't match the strp format string, so it's not a backup", "# for this file. Move on to the next one.", "continue", "if", "salt", ".", "utils", ".", "platform", ".", "is_windows", "(", ")", ":", "str_format", "=", "'%a %b %d %Y %H-%M-%S.%f'", "else", ":", "str_format", "=", "'%a %b %d %Y %H:%M:%S.%f'", "files", ".", "setdefault", "(", "timestamp", ",", "{", "}", ")", "[", "'Backup Time'", "]", "=", "timestamp", ".", "strftime", "(", "str_format", ")", "location", "=", "os", ".", "path", ".", "join", "(", "bkdir", ",", "fname", ")", "files", "[", "timestamp", "]", "[", "'Size'", "]", "=", "os", ".", "stat", "(", "location", ")", ".", "st_size", "files", "[", "timestamp", "]", "[", "'Location'", "]", "=", "location", "return", "dict", "(", "list", "(", "zip", "(", "list", "(", "range", "(", "len", "(", "files", ")", ")", ")", ",", "[", "files", "[", "x", "]", "for", "x", "in", "sorted", "(", "files", ",", "reverse", "=", "True", ")", "[", ":", "limit", "]", "]", ")", ")", ")" ]
32.304348
0.000435
def read_api_service_status(self, name, **kwargs): # noqa: E501 """read_api_service_status # noqa: E501 read status of the specified APIService # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_api_service_status(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the APIService (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1beta1APIService If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_api_service_status_with_http_info(name, **kwargs) # noqa: E501 else: (data) = self.read_api_service_status_with_http_info(name, **kwargs) # noqa: E501 return data
[ "def", "read_api_service_status", "(", "self", ",", "name", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "read_api_service_status_with_http_info", "(", "name", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "read_api_service_status_with_http_info", "(", "name", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
46.363636
0.001921
def _parse_ip_addr_show(raw_result): """ Parse the 'ip addr list dev' command raw output. :param str raw_result: os raw result string. :rtype: dict :return: The parsed result of the show interface command in a \ dictionary of the form: :: { 'os_index' : '0', 'dev' : 'eth0', 'falgs_str': 'BROADCAST,MULTICAST,UP,LOWER_UP', 'mtu': 1500, 'state': 'down', 'link_type' 'ether', 'mac_address': '00:50:56:01:2e:f6', 'inet': '20.1.1.2', 'inet_mask': '24', 'inet6': 'fe80::42:acff:fe11:2', 'inte6_mask': '64' } """ # does link exist? show_re = ( r'"(?P<dev>\S+)"\s+does not exist' ) re_result = search(show_re, raw_result) result = None if not (re_result): # match top two lines for serveral 'always there' variables show_re = ( r'\s*(?P<os_index>\d+):\s+(?P<dev>\S+):\s+<(?P<falgs_str>.*)?>.*?' r'mtu\s+(?P<mtu>\d+).+?state\s+(?P<state>\w+).*' r'\s*link/(?P<link_type>\w+)\s+(?P<mac_address>\S+)' ) re_result = search(show_re, raw_result, DOTALL) result = re_result.groupdict() # seek inet if its there show_re = ( r'((inet )\s*(?P<inet>[^/]+)/(?P<inet_mask>\d{1,2}))' ) re_result = search(show_re, raw_result) if (re_result): result.update(re_result.groupdict()) # seek inet6 if its there show_re = ( r'((?<=inet6 )(?P<inet6>[^/]+)/(?P<inet6_mask>\d{1,2}))' ) re_result = search(show_re, raw_result) if (re_result): result.update(re_result.groupdict()) # cleanup dictionary before returning for key, value in result.items(): if value is not None: if value.isdigit(): result[key] = int(value) return result
[ "def", "_parse_ip_addr_show", "(", "raw_result", ")", ":", "# does link exist?", "show_re", "=", "(", "r'\"(?P<dev>\\S+)\"\\s+does not exist'", ")", "re_result", "=", "search", "(", "show_re", ",", "raw_result", ")", "result", "=", "None", "if", "not", "(", "re_result", ")", ":", "# match top two lines for serveral 'always there' variables", "show_re", "=", "(", "r'\\s*(?P<os_index>\\d+):\\s+(?P<dev>\\S+):\\s+<(?P<falgs_str>.*)?>.*?'", "r'mtu\\s+(?P<mtu>\\d+).+?state\\s+(?P<state>\\w+).*'", "r'\\s*link/(?P<link_type>\\w+)\\s+(?P<mac_address>\\S+)'", ")", "re_result", "=", "search", "(", "show_re", ",", "raw_result", ",", "DOTALL", ")", "result", "=", "re_result", ".", "groupdict", "(", ")", "# seek inet if its there", "show_re", "=", "(", "r'((inet )\\s*(?P<inet>[^/]+)/(?P<inet_mask>\\d{1,2}))'", ")", "re_result", "=", "search", "(", "show_re", ",", "raw_result", ")", "if", "(", "re_result", ")", ":", "result", ".", "update", "(", "re_result", ".", "groupdict", "(", ")", ")", "# seek inet6 if its there", "show_re", "=", "(", "r'((?<=inet6 )(?P<inet6>[^/]+)/(?P<inet6_mask>\\d{1,2}))'", ")", "re_result", "=", "search", "(", "show_re", ",", "raw_result", ")", "if", "(", "re_result", ")", ":", "result", ".", "update", "(", "re_result", ".", "groupdict", "(", ")", ")", "# cleanup dictionary before returning", "for", "key", ",", "value", "in", "result", ".", "items", "(", ")", ":", "if", "value", "is", "not", "None", ":", "if", "value", ".", "isdigit", "(", ")", ":", "result", "[", "key", "]", "=", "int", "(", "value", ")", "return", "result" ]
29.515152
0.000497
def get_work_kind(self): """ We'll have a kind_slug like 'movies'. We need to translate that into a work `kind` like 'movie'. """ slugs_to_kinds = {v:k for k,v in Work.KIND_SLUGS.items()} return slugs_to_kinds.get(self.kind_slug, None)
[ "def", "get_work_kind", "(", "self", ")", ":", "slugs_to_kinds", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "Work", ".", "KIND_SLUGS", ".", "items", "(", ")", "}", "return", "slugs_to_kinds", ".", "get", "(", "self", ".", "kind_slug", ",", "None", ")" ]
39.571429
0.014134
def textMerge(self, second): """Merge two text nodes into one """ if second is None: second__o = None else: second__o = second._o ret = libxml2mod.xmlTextMerge(self._o, second__o) if ret is None:raise treeError('xmlTextMerge() failed') __tmp = xmlNode(_obj=ret) return __tmp
[ "def", "textMerge", "(", "self", ",", "second", ")", ":", "if", "second", "is", "None", ":", "second__o", "=", "None", "else", ":", "second__o", "=", "second", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlTextMerge", "(", "self", ".", "_o", ",", "second__o", ")", "if", "ret", "is", "None", ":", "raise", "treeError", "(", "'xmlTextMerge() failed'", ")", "__tmp", "=", "xmlNode", "(", "_obj", "=", "ret", ")", "return", "__tmp" ]
40.375
0.018182
def list_group_members(self, group_url, max_results=0): ''' a method to retrieve a list of members for a meetup group :param group_url: string with meetup urlname for group :param max_results: [optional] integer with number of members to include :return: dictionary with list of member details inside [json] key member_details = self._reconstruct_member({}) ''' # https://www.meetup.com/meetup_api/docs/:urlname/members/#list title = '%s.list_group_members' % self.__class__.__name__ # validate inputs input_fields = { 'group_url': group_url, 'max_results': max_results } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # construct request fields url = '%s/%s/members' % (self.endpoint, group_url) params = { 'fields': 'gender,birthday,last_event,messaging_pref,next_event,other_services,privacy,self,stats' } if max_results: params['page'] = str(max_results) # send request response_details = self._get_request(url, params=params) # reconstruct method output group_members = { 'json': [] } for key, value in response_details.items(): if key != 'json': group_members[key] = value for member in response_details['json']: group_members['json'].append(self._reconstruct_member(member)) return group_members
[ "def", "list_group_members", "(", "self", ",", "group_url", ",", "max_results", "=", "0", ")", ":", "# https://www.meetup.com/meetup_api/docs/:urlname/members/#list\r", "title", "=", "'%s.list_group_members'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs\r", "input_fields", "=", "{", "'group_url'", ":", "group_url", ",", "'max_results'", ":", "max_results", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "if", "value", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# construct request fields\r", "url", "=", "'%s/%s/members'", "%", "(", "self", ".", "endpoint", ",", "group_url", ")", "params", "=", "{", "'fields'", ":", "'gender,birthday,last_event,messaging_pref,next_event,other_services,privacy,self,stats'", "}", "if", "max_results", ":", "params", "[", "'page'", "]", "=", "str", "(", "max_results", ")", "# send request\r", "response_details", "=", "self", ".", "_get_request", "(", "url", ",", "params", "=", "params", ")", "# reconstruct method output\r", "group_members", "=", "{", "'json'", ":", "[", "]", "}", "for", "key", ",", "value", "in", "response_details", ".", "items", "(", ")", ":", "if", "key", "!=", "'json'", ":", "group_members", "[", "key", "]", "=", "value", "for", "member", "in", "response_details", "[", "'json'", "]", ":", "group_members", "[", "'json'", "]", ".", "append", "(", "self", ".", "_reconstruct_member", "(", "member", ")", ")", "return", "group_members" ]
35.06383
0.002361
def mean_squared_logarithmic_error(pred:Tensor, targ:Tensor)->Rank0Tensor: "Mean squared logarithmic error between `pred` and `targ`." pred,targ = flatten_check(pred,targ) return F.mse_loss(torch.log(1 + pred), torch.log(1 + targ))
[ "def", "mean_squared_logarithmic_error", "(", "pred", ":", "Tensor", ",", "targ", ":", "Tensor", ")", "->", "Rank0Tensor", ":", "pred", ",", "targ", "=", "flatten_check", "(", "pred", ",", "targ", ")", "return", "F", ".", "mse_loss", "(", "torch", ".", "log", "(", "1", "+", "pred", ")", ",", "torch", ".", "log", "(", "1", "+", "targ", ")", ")" ]
60
0.024691
def _parse_qstat_state(qstat_out, job_id): """Parse "state" column from `qstat` output for given job_id Returns state for the *first* job matching job_id. Returns 'u' if `qstat` output is empty or job_id is not found. """ if qstat_out.strip() == '': return 'u' lines = qstat_out.split('\n') # skip past header while not lines.pop(0).startswith('---'): pass for line in lines: if line: job, prior, name, user, state = line.strip().split()[0:5] if int(job) == int(job_id): return state return 'u'
[ "def", "_parse_qstat_state", "(", "qstat_out", ",", "job_id", ")", ":", "if", "qstat_out", ".", "strip", "(", ")", "==", "''", ":", "return", "'u'", "lines", "=", "qstat_out", ".", "split", "(", "'\\n'", ")", "# skip past header", "while", "not", "lines", ".", "pop", "(", "0", ")", ".", "startswith", "(", "'---'", ")", ":", "pass", "for", "line", "in", "lines", ":", "if", "line", ":", "job", ",", "prior", ",", "name", ",", "user", ",", "state", "=", "line", ".", "strip", "(", ")", ".", "split", "(", ")", "[", "0", ":", "5", "]", "if", "int", "(", "job", ")", "==", "int", "(", "job_id", ")", ":", "return", "state", "return", "'u'" ]
30.631579
0.001667
def integrate_scanpy(adatas, **kwargs): """Integrate a list of `scanpy.api.AnnData`. Parameters ---------- adatas : `list` of `scanpy.api.AnnData` Data sets to integrate. kwargs : `dict` See documentation for the `integrate()` method for a full list of parameters to use for batch correction. Returns ------- integrated Returns a list of `np.ndarray` with integrated low-dimensional embeddings. """ datasets_dimred, genes = integrate( [adata.X for adata in adatas], [adata.var_names.values for adata in adatas], **kwargs ) return datasets_dimred
[ "def", "integrate_scanpy", "(", "adatas", ",", "*", "*", "kwargs", ")", ":", "datasets_dimred", ",", "genes", "=", "integrate", "(", "[", "adata", ".", "X", "for", "adata", "in", "adatas", "]", ",", "[", "adata", ".", "var_names", ".", "values", "for", "adata", "in", "adatas", "]", ",", "*", "*", "kwargs", ")", "return", "datasets_dimred" ]
26.541667
0.001515
def _read2(self, length=None, use_compression=None, project=None, **kwargs): ''' :param length: Maximum number of bytes to be read :type length: integer :param project: project to use as context for this download (may affect which billing account is billed for this download). If specified, must be a project in which this file exists. If not specified, the project ID specified in the handler is used for the download, IF it contains this file. If set to DXFile.NO_PROJECT_HINT, no project ID is supplied for the download, even if the handler specifies a project ID. :type project: str or None :rtype: string :raises: :exc:`~dxpy.exceptions.ResourceNotFound` if *project* is supplied and it does not contain this file Returns the next *length* bytes, or all the bytes until the end of file (if no *length* is given or there are fewer than *length* bytes left in the file). .. note:: After the first call to read(), the project arg and passthrough kwargs are not respected while using the same response iterator (i.e. until next seek). ''' if self._file_length == None: desc = self.describe(**kwargs) if desc["state"] != "closed": raise DXFileError("Cannot read from file until it is in the closed state") self._file_length = int(desc["size"]) # If running on a worker, wait for the first file download chunk # to come back before issuing any more requests. This ensures # that all subsequent requests can take advantage of caching, # rather than having all of the first DXFILE_HTTP_THREADS # requests simultaneously hit a cold cache. Enforce a minimum # size for this heuristic so we don't incur the overhead for # tiny files (which wouldn't contribute as much to the load # anyway). get_first_chunk_sequentially = (self._file_length > 128 * 1024 and self._pos == 0 and dxpy.JOB_ID) if self._pos == self._file_length: return b"" if length == None or length > self._file_length - self._pos: length = self._file_length - self._pos buf = self._read_buf buf_remaining_bytes = dxpy.utils.string_buffer_length(buf) - buf.tell() if length <= buf_remaining_bytes: self._pos += length return buf.read(length) else: orig_buf_pos = buf.tell() orig_file_pos = self._pos buf.seek(0, os.SEEK_END) self._pos += buf_remaining_bytes while self._pos < orig_file_pos + length: remaining_len = orig_file_pos + length - self._pos if self._response_iterator is None: self._request_iterator = self._generate_read_requests( start_pos=self._pos, project=project, **kwargs) content = self._next_response_content(get_first_chunk_sequentially=get_first_chunk_sequentially) if len(content) < remaining_len: buf.write(content) self._pos += len(content) else: # response goes beyond requested length buf.write(content[:remaining_len]) self._pos += remaining_len self._read_buf = BytesIO() self._read_buf.write(content[remaining_len:]) self._read_buf.seek(0) buf.seek(orig_buf_pos) return buf.read()
[ "def", "_read2", "(", "self", ",", "length", "=", "None", ",", "use_compression", "=", "None", ",", "project", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_file_length", "==", "None", ":", "desc", "=", "self", ".", "describe", "(", "*", "*", "kwargs", ")", "if", "desc", "[", "\"state\"", "]", "!=", "\"closed\"", ":", "raise", "DXFileError", "(", "\"Cannot read from file until it is in the closed state\"", ")", "self", ".", "_file_length", "=", "int", "(", "desc", "[", "\"size\"", "]", ")", "# If running on a worker, wait for the first file download chunk", "# to come back before issuing any more requests. This ensures", "# that all subsequent requests can take advantage of caching,", "# rather than having all of the first DXFILE_HTTP_THREADS", "# requests simultaneously hit a cold cache. Enforce a minimum", "# size for this heuristic so we don't incur the overhead for", "# tiny files (which wouldn't contribute as much to the load", "# anyway).", "get_first_chunk_sequentially", "=", "(", "self", ".", "_file_length", ">", "128", "*", "1024", "and", "self", ".", "_pos", "==", "0", "and", "dxpy", ".", "JOB_ID", ")", "if", "self", ".", "_pos", "==", "self", ".", "_file_length", ":", "return", "b\"\"", "if", "length", "==", "None", "or", "length", ">", "self", ".", "_file_length", "-", "self", ".", "_pos", ":", "length", "=", "self", ".", "_file_length", "-", "self", ".", "_pos", "buf", "=", "self", ".", "_read_buf", "buf_remaining_bytes", "=", "dxpy", ".", "utils", ".", "string_buffer_length", "(", "buf", ")", "-", "buf", ".", "tell", "(", ")", "if", "length", "<=", "buf_remaining_bytes", ":", "self", ".", "_pos", "+=", "length", "return", "buf", ".", "read", "(", "length", ")", "else", ":", "orig_buf_pos", "=", "buf", ".", "tell", "(", ")", "orig_file_pos", "=", "self", ".", "_pos", "buf", ".", "seek", "(", "0", ",", "os", ".", "SEEK_END", ")", "self", ".", "_pos", "+=", "buf_remaining_bytes", "while", "self", ".", "_pos", "<", "orig_file_pos", "+", "length", ":", "remaining_len", "=", "orig_file_pos", "+", "length", "-", "self", ".", "_pos", "if", "self", ".", "_response_iterator", "is", "None", ":", "self", ".", "_request_iterator", "=", "self", ".", "_generate_read_requests", "(", "start_pos", "=", "self", ".", "_pos", ",", "project", "=", "project", ",", "*", "*", "kwargs", ")", "content", "=", "self", ".", "_next_response_content", "(", "get_first_chunk_sequentially", "=", "get_first_chunk_sequentially", ")", "if", "len", "(", "content", ")", "<", "remaining_len", ":", "buf", ".", "write", "(", "content", ")", "self", ".", "_pos", "+=", "len", "(", "content", ")", "else", ":", "# response goes beyond requested length", "buf", ".", "write", "(", "content", "[", ":", "remaining_len", "]", ")", "self", ".", "_pos", "+=", "remaining_len", "self", ".", "_read_buf", "=", "BytesIO", "(", ")", "self", ".", "_read_buf", ".", "write", "(", "content", "[", "remaining_len", ":", "]", ")", "self", ".", "_read_buf", ".", "seek", "(", "0", ")", "buf", ".", "seek", "(", "orig_buf_pos", ")", "return", "buf", ".", "read", "(", ")" ]
46.480519
0.002462
def _filter_ignored(self, entries, selector=None): """Given an opaque entry list, filter any ignored entries. :param entries: A list or generator that produces entries to filter. :param selector: A function that computes a path for an entry relative to the root of the ProjectTree, or None to use identity. """ selector = selector or (lambda x: x) prefixed_entries = [(self._append_slash_if_dir_path(selector(entry)), entry) for entry in entries] ignored_paths = set(self.ignore.match_files(path for path, _ in prefixed_entries)) return [entry for path, entry in prefixed_entries if path not in ignored_paths]
[ "def", "_filter_ignored", "(", "self", ",", "entries", ",", "selector", "=", "None", ")", ":", "selector", "=", "selector", "or", "(", "lambda", "x", ":", "x", ")", "prefixed_entries", "=", "[", "(", "self", ".", "_append_slash_if_dir_path", "(", "selector", "(", "entry", ")", ")", ",", "entry", ")", "for", "entry", "in", "entries", "]", "ignored_paths", "=", "set", "(", "self", ".", "ignore", ".", "match_files", "(", "path", "for", "path", ",", "_", "in", "prefixed_entries", ")", ")", "return", "[", "entry", "for", "path", ",", "entry", "in", "prefixed_entries", "if", "path", "not", "in", "ignored_paths", "]" ]
55.25
0.008902
def create_state_multi_precision(self, index, weight): """Creates auxiliary state for a given weight, including FP32 high precision copy if original weight is FP16. This method is provided to perform automatic mixed precision training for optimizers that do not support it themselves. Parameters ---------- index : int An unique index to identify the weight. weight : NDArray The weight. Returns ------- state : any obj The state associated with the weight. """ weight_master_copy = None if self.multi_precision and weight.dtype == numpy.float16: weight_master_copy = weight.astype(numpy.float32) return (weight_master_copy,) + (self.create_state(index, weight_master_copy),) if weight.dtype == numpy.float16 and not self.multi_precision: warnings.warn("Accumulating with float16 in optimizer can lead to " "poor accuracy or slow convergence. " "Consider using multi_precision=True option of the " "optimizer") return self.create_state(index, weight)
[ "def", "create_state_multi_precision", "(", "self", ",", "index", ",", "weight", ")", ":", "weight_master_copy", "=", "None", "if", "self", ".", "multi_precision", "and", "weight", ".", "dtype", "==", "numpy", ".", "float16", ":", "weight_master_copy", "=", "weight", ".", "astype", "(", "numpy", ".", "float32", ")", "return", "(", "weight_master_copy", ",", ")", "+", "(", "self", ".", "create_state", "(", "index", ",", "weight_master_copy", ")", ",", ")", "if", "weight", ".", "dtype", "==", "numpy", ".", "float16", "and", "not", "self", ".", "multi_precision", ":", "warnings", ".", "warn", "(", "\"Accumulating with float16 in optimizer can lead to \"", "\"poor accuracy or slow convergence. \"", "\"Consider using multi_precision=True option of the \"", "\"optimizer\"", ")", "return", "self", ".", "create_state", "(", "index", ",", "weight", ")" ]
41.482759
0.002437
def line_oriented(cls, line_oriented_options, console): """Given Goal.Options and a Console, yields functions for writing to stdout and stderr, respectively. The passed options instance will generally be the `Goal.Options` of a `LineOriented` `Goal`. """ if type(line_oriented_options) != cls.Options: raise AssertionError( 'Expected Options for `{}`, got: {}'.format(cls.__name__, line_oriented_options)) output_file = line_oriented_options.values.output_file sep = line_oriented_options.values.sep.encode('utf-8').decode('unicode_escape') stdout, stderr = console.stdout, console.stderr if output_file: stdout = open(output_file, 'w') try: print_stdout = lambda msg: print(msg, file=stdout, end=sep) print_stderr = lambda msg: print(msg, file=stderr) yield print_stdout, print_stderr finally: if output_file: stdout.close() else: stdout.flush() stderr.flush()
[ "def", "line_oriented", "(", "cls", ",", "line_oriented_options", ",", "console", ")", ":", "if", "type", "(", "line_oriented_options", ")", "!=", "cls", ".", "Options", ":", "raise", "AssertionError", "(", "'Expected Options for `{}`, got: {}'", ".", "format", "(", "cls", ".", "__name__", ",", "line_oriented_options", ")", ")", "output_file", "=", "line_oriented_options", ".", "values", ".", "output_file", "sep", "=", "line_oriented_options", ".", "values", ".", "sep", ".", "encode", "(", "'utf-8'", ")", ".", "decode", "(", "'unicode_escape'", ")", "stdout", ",", "stderr", "=", "console", ".", "stdout", ",", "console", ".", "stderr", "if", "output_file", ":", "stdout", "=", "open", "(", "output_file", ",", "'w'", ")", "try", ":", "print_stdout", "=", "lambda", "msg", ":", "print", "(", "msg", ",", "file", "=", "stdout", ",", "end", "=", "sep", ")", "print_stderr", "=", "lambda", "msg", ":", "print", "(", "msg", ",", "file", "=", "stderr", ")", "yield", "print_stdout", ",", "print_stderr", "finally", ":", "if", "output_file", ":", "stdout", ".", "close", "(", ")", "else", ":", "stdout", ".", "flush", "(", ")", "stderr", ".", "flush", "(", ")" ]
36.692308
0.015322
def stats(self, node_id=None, params=None): """ The Cluster Stats API allows to retrieve statistics from a cluster wide perspective. The API returns basic index metrics and information about the current nodes that form the cluster. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html>`_ :arg node_id: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes :arg flat_settings: Return settings in flat format (default: false) :arg timeout: Explicit operation timeout """ url = '/_cluster/stats' if node_id: url = _make_path('_cluster/stats/nodes', node_id) return self.transport.perform_request('GET', url, params=params)
[ "def", "stats", "(", "self", ",", "node_id", "=", "None", ",", "params", "=", "None", ")", ":", "url", "=", "'/_cluster/stats'", "if", "node_id", ":", "url", "=", "_make_path", "(", "'_cluster/stats/nodes'", ",", "node_id", ")", "return", "self", ".", "transport", ".", "perform_request", "(", "'GET'", ",", "url", ",", "params", "=", "params", ")" ]
51.111111
0.002134
def remove(self, interval): """ Returns self after removing the interval and balancing. If interval is not present, raise ValueError. """ # since this is a list, called methods can set this to [1], # making it true done = [] return self.remove_interval_helper(interval, done, should_raise_error=True)
[ "def", "remove", "(", "self", ",", "interval", ")", ":", "# since this is a list, called methods can set this to [1],", "# making it true", "done", "=", "[", "]", "return", "self", ".", "remove_interval_helper", "(", "interval", ",", "done", ",", "should_raise_error", "=", "True", ")" ]
35.6
0.008219
def where(self, custom_restrictions=[], **restrictions): """ Analog to SQL "WHERE". Does not perform a query until `select` is called. Returns a repo object. Options selected through keyword arguments are assumed to use == unles the value is a list, tuple, or dictionary. List or tuple values translate to an SQL `IN` over those values, and a dictionary looks up under a different table when joined. ex) >>> Repo("foos").where(id=11).select("*") SELECT foos.* FROM foos WHERE foos.id == 11 >>> Repo("foos").where([("id > ?", 12)]).select("*") SELECT foos.* FROM foos WHERE foos.id > 12 >>> Repo("foos").where(id=[1,2,3]).select("*") SELECT foos.* FROM foos WHERE foos.id IN (1, 2, 3) """ # Generate the SQL pieces and the relevant values standard_names, standard_values = self._standard_items(restrictions) custom_names, custom_values = self._custom_items(custom_restrictions) in_names, in_values = self._in_items(restrictions) query_names = standard_names + custom_names + in_names # Stitch them into a clause with values if query_names: self.where_values = standard_values + custom_values + in_values self.where_clause = "where {query} ".format( query=" and ".join(query_names)) return self
[ "def", "where", "(", "self", ",", "custom_restrictions", "=", "[", "]", ",", "*", "*", "restrictions", ")", ":", "# Generate the SQL pieces and the relevant values", "standard_names", ",", "standard_values", "=", "self", ".", "_standard_items", "(", "restrictions", ")", "custom_names", ",", "custom_values", "=", "self", ".", "_custom_items", "(", "custom_restrictions", ")", "in_names", ",", "in_values", "=", "self", ".", "_in_items", "(", "restrictions", ")", "query_names", "=", "standard_names", "+", "custom_names", "+", "in_names", "# Stitch them into a clause with values", "if", "query_names", ":", "self", ".", "where_values", "=", "standard_values", "+", "custom_values", "+", "in_values", "self", ".", "where_clause", "=", "\"where {query} \"", ".", "format", "(", "query", "=", "\" and \"", ".", "join", "(", "query_names", ")", ")", "return", "self" ]
49.392857
0.001418
def convert(input_file_name, **kwargs): """Convert CSV file to HTML table""" delimiter = kwargs["delimiter"] or "," quotechar = kwargs["quotechar"] or "|" if six.PY2: delimiter = delimiter.encode("utf-8") quotechar = quotechar.encode("utf-8") # Read CSV and form a header and rows list with open(input_file_name, "rb") as input_file: reader = csv.reader(input_file, encoding="utf-8", delimiter=delimiter, quotechar=quotechar) csv_headers = [] if not kwargs.get("no_header"): # Read header from first line csv_headers = next(reader) csv_rows = [row for row in reader if row] # Set default column name if header is not present if not csv_headers and len(csv_rows) > 0: end = len(csv_rows[0]) + 1 csv_headers = ["Column {}".format(n) for n in range(1, end)] # Render csv to HTML html = render_template(csv_headers, csv_rows, **kwargs) # Freeze all JS files in template return freeze_js(html)
[ "def", "convert", "(", "input_file_name", ",", "*", "*", "kwargs", ")", ":", "delimiter", "=", "kwargs", "[", "\"delimiter\"", "]", "or", "\",\"", "quotechar", "=", "kwargs", "[", "\"quotechar\"", "]", "or", "\"|\"", "if", "six", ".", "PY2", ":", "delimiter", "=", "delimiter", ".", "encode", "(", "\"utf-8\"", ")", "quotechar", "=", "quotechar", ".", "encode", "(", "\"utf-8\"", ")", "# Read CSV and form a header and rows list", "with", "open", "(", "input_file_name", ",", "\"rb\"", ")", "as", "input_file", ":", "reader", "=", "csv", ".", "reader", "(", "input_file", ",", "encoding", "=", "\"utf-8\"", ",", "delimiter", "=", "delimiter", ",", "quotechar", "=", "quotechar", ")", "csv_headers", "=", "[", "]", "if", "not", "kwargs", ".", "get", "(", "\"no_header\"", ")", ":", "# Read header from first line", "csv_headers", "=", "next", "(", "reader", ")", "csv_rows", "=", "[", "row", "for", "row", "in", "reader", "if", "row", "]", "# Set default column name if header is not present", "if", "not", "csv_headers", "and", "len", "(", "csv_rows", ")", ">", "0", ":", "end", "=", "len", "(", "csv_rows", "[", "0", "]", ")", "+", "1", "csv_headers", "=", "[", "\"Column {}\"", ".", "format", "(", "n", ")", "for", "n", "in", "range", "(", "1", ",", "end", ")", "]", "# Render csv to HTML", "html", "=", "render_template", "(", "csv_headers", ",", "csv_rows", ",", "*", "*", "kwargs", ")", "# Freeze all JS files in template", "return", "freeze_js", "(", "html", ")" ]
33.30303
0.000884
def plot_one_month(x, y, xlabel=None, ylabel=None, title=None, ylim=None): """时间跨度为一月。 major tick = every days """ plt.close("all") fig = plt.figure(figsize=(20, 10)) ax = fig.add_subplot(111) ax.plot(x, y) days = DayLocator(range(365)) daysFmt = DateFormatter("%Y-%m-%d") ax.xaxis.set_major_locator(days) ax.xaxis.set_major_formatter(daysFmt) ax.autoscale_view() ax.grid() plt.setp( ax.xaxis.get_majorticklabels(), rotation=90 ) if xlabel: plt.xlabel(xlabel) else: plt.xlabel("Time") if ylabel: plt.ylabel(ylabel) else: plt.ylabel("Value") if title: plt.title(title) else: plt.title("%s to %s" % (str(x[0]), str(x[-1]) ) ) if ylim: plt.ylim(ylim) else: plt.ylim([min(y) - (max(y) - min(y) ) * 0.05, max(y) + (max(y) - min(y) ) * 0.05]) return plt, ax
[ "def", "plot_one_month", "(", "x", ",", "y", ",", "xlabel", "=", "None", ",", "ylabel", "=", "None", ",", "title", "=", "None", ",", "ylim", "=", "None", ")", ":", "plt", ".", "close", "(", "\"all\"", ")", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "(", "20", ",", "10", ")", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ")", "ax", ".", "plot", "(", "x", ",", "y", ")", "days", "=", "DayLocator", "(", "range", "(", "365", ")", ")", "daysFmt", "=", "DateFormatter", "(", "\"%Y-%m-%d\"", ")", "ax", ".", "xaxis", ".", "set_major_locator", "(", "days", ")", "ax", ".", "xaxis", ".", "set_major_formatter", "(", "daysFmt", ")", "ax", ".", "autoscale_view", "(", ")", "ax", ".", "grid", "(", ")", "plt", ".", "setp", "(", "ax", ".", "xaxis", ".", "get_majorticklabels", "(", ")", ",", "rotation", "=", "90", ")", "if", "xlabel", ":", "plt", ".", "xlabel", "(", "xlabel", ")", "else", ":", "plt", ".", "xlabel", "(", "\"Time\"", ")", "if", "ylabel", ":", "plt", ".", "ylabel", "(", "ylabel", ")", "else", ":", "plt", ".", "ylabel", "(", "\"Value\"", ")", "if", "title", ":", "plt", ".", "title", "(", "title", ")", "else", ":", "plt", ".", "title", "(", "\"%s to %s\"", "%", "(", "str", "(", "x", "[", "0", "]", ")", ",", "str", "(", "x", "[", "-", "1", "]", ")", ")", ")", "if", "ylim", ":", "plt", ".", "ylim", "(", "ylim", ")", "else", ":", "plt", ".", "ylim", "(", "[", "min", "(", "y", ")", "-", "(", "max", "(", "y", ")", "-", "min", "(", "y", ")", ")", "*", "0.05", ",", "max", "(", "y", ")", "+", "(", "max", "(", "y", ")", "-", "min", "(", "y", ")", ")", "*", "0.05", "]", ")", "return", "plt", ",", "ax" ]
21.227273
0.018424
def abort(http_status_code, exc=None, **kwargs): """Raise a HTTPException for the given http_status_code. Attach any keyword arguments to the exception for later processing. From Flask-Restful. See NOTICE file for license information. """ try: sanic.exceptions.abort(http_status_code, exc) except sanic.exceptions.SanicException as err: err.data = kwargs err.exc = exc raise err
[ "def", "abort", "(", "http_status_code", ",", "exc", "=", "None", ",", "*", "*", "kwargs", ")", ":", "try", ":", "sanic", ".", "exceptions", ".", "abort", "(", "http_status_code", ",", "exc", ")", "except", "sanic", ".", "exceptions", ".", "SanicException", "as", "err", ":", "err", ".", "data", "=", "kwargs", "err", ".", "exc", "=", "exc", "raise", "err" ]
35.333333
0.002299
def enable_glut(self, app=None): """ Enable event loop integration with GLUT. Parameters ---------- app : ignored Ignored, it's only a placeholder to keep the call signature of all gui activation methods consistent, which simplifies the logic of supporting magics. Notes ----- This methods sets the PyOS_InputHook for GLUT, which allows the GLUT to integrate with terminal based applications like IPython. Due to GLUT limitations, it is currently not possible to start the event loop without first creating a window. You should thus not create another window but use instead the created one. See 'gui-glut.py' in the docs/examples/lib directory. The default screen mode is set to: glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH """ import OpenGL.GLUT as glut # @UnresolvedImport from pydev_ipython.inputhookglut import glut_display_mode, \ glut_close, glut_display, \ glut_idle, inputhook_glut if GUI_GLUT not in self._apps: glut.glutInit(sys.argv) glut.glutInitDisplayMode(glut_display_mode) # This is specific to freeglut if bool(glut.glutSetOption): glut.glutSetOption(glut.GLUT_ACTION_ON_WINDOW_CLOSE, glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS) glut.glutCreateWindow(sys.argv[0]) glut.glutReshapeWindow(1, 1) glut.glutHideWindow() glut.glutWMCloseFunc(glut_close) glut.glutDisplayFunc(glut_display) glut.glutIdleFunc(glut_idle) else: glut.glutWMCloseFunc(glut_close) glut.glutDisplayFunc(glut_display) glut.glutIdleFunc(glut_idle) self.set_inputhook(inputhook_glut) self._current_gui = GUI_GLUT self._apps[GUI_GLUT] = True
[ "def", "enable_glut", "(", "self", ",", "app", "=", "None", ")", ":", "import", "OpenGL", ".", "GLUT", "as", "glut", "# @UnresolvedImport", "from", "pydev_ipython", ".", "inputhookglut", "import", "glut_display_mode", ",", "glut_close", ",", "glut_display", ",", "glut_idle", ",", "inputhook_glut", "if", "GUI_GLUT", "not", "in", "self", ".", "_apps", ":", "glut", ".", "glutInit", "(", "sys", ".", "argv", ")", "glut", ".", "glutInitDisplayMode", "(", "glut_display_mode", ")", "# This is specific to freeglut", "if", "bool", "(", "glut", ".", "glutSetOption", ")", ":", "glut", ".", "glutSetOption", "(", "glut", ".", "GLUT_ACTION_ON_WINDOW_CLOSE", ",", "glut", ".", "GLUT_ACTION_GLUTMAINLOOP_RETURNS", ")", "glut", ".", "glutCreateWindow", "(", "sys", ".", "argv", "[", "0", "]", ")", "glut", ".", "glutReshapeWindow", "(", "1", ",", "1", ")", "glut", ".", "glutHideWindow", "(", ")", "glut", ".", "glutWMCloseFunc", "(", "glut_close", ")", "glut", ".", "glutDisplayFunc", "(", "glut_display", ")", "glut", ".", "glutIdleFunc", "(", "glut_idle", ")", "else", ":", "glut", ".", "glutWMCloseFunc", "(", "glut_close", ")", "glut", ".", "glutDisplayFunc", "(", "glut_display", ")", "glut", ".", "glutIdleFunc", "(", "glut_idle", ")", "self", ".", "set_inputhook", "(", "inputhook_glut", ")", "self", ".", "_current_gui", "=", "GUI_GLUT", "self", ".", "_apps", "[", "GUI_GLUT", "]", "=", "True" ]
39.96
0.002443
def isns_isns_vrf_isns_discovery_domain_isns_discovery_domain_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") isns = ET.SubElement(config, "isns", xmlns="urn:brocade.com:mgmt:brocade-isns") isns_vrf = ET.SubElement(isns, "isns-vrf") isns_vrf_instance_key = ET.SubElement(isns_vrf, "isns-vrf-instance") isns_vrf_instance_key.text = kwargs.pop('isns_vrf_instance') isns_discovery_domain = ET.SubElement(isns_vrf, "isns-discovery-domain") isns_discovery_domain_name = ET.SubElement(isns_discovery_domain, "isns-discovery-domain-name") isns_discovery_domain_name.text = kwargs.pop('isns_discovery_domain_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "isns_isns_vrf_isns_discovery_domain_isns_discovery_domain_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "isns", "=", "ET", ".", "SubElement", "(", "config", ",", "\"isns\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-isns\"", ")", "isns_vrf", "=", "ET", ".", "SubElement", "(", "isns", ",", "\"isns-vrf\"", ")", "isns_vrf_instance_key", "=", "ET", ".", "SubElement", "(", "isns_vrf", ",", "\"isns-vrf-instance\"", ")", "isns_vrf_instance_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'isns_vrf_instance'", ")", "isns_discovery_domain", "=", "ET", ".", "SubElement", "(", "isns_vrf", ",", "\"isns-discovery-domain\"", ")", "isns_discovery_domain_name", "=", "ET", ".", "SubElement", "(", "isns_discovery_domain", ",", "\"isns-discovery-domain-name\"", ")", "isns_discovery_domain_name", ".", "text", "=", "kwargs", ".", "pop", "(", "'isns_discovery_domain_name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
56.785714
0.008663
def from_computed_structure_entry(entry, miller_index, label=None, adsorbates=None, clean_entry=None, **kwargs): """ Returns SlabEntry from a ComputedStructureEntry """ return SlabEntry(entry.structure, entry.energy, miller_index, label=label, adsorbates=adsorbates, clean_entry=clean_entry, **kwargs)
[ "def", "from_computed_structure_entry", "(", "entry", ",", "miller_index", ",", "label", "=", "None", ",", "adsorbates", "=", "None", ",", "clean_entry", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "SlabEntry", "(", "entry", ".", "structure", ",", "entry", ".", "energy", ",", "miller_index", ",", "label", "=", "label", ",", "adsorbates", "=", "adsorbates", ",", "clean_entry", "=", "clean_entry", ",", "*", "*", "kwargs", ")" ]
55.714286
0.015152
def handle_json_wrapper_GET(self, handler, parsed_params): """Call handler and output the return value in JSON.""" schedule = self.server.schedule result = handler(parsed_params) content = ResultEncoder().encode(result) self.send_response(200) self.send_header('Content-Type', 'text/plain') self.send_header('Content-Length', str(len(content))) self.end_headers() self.wfile.write(content)
[ "def", "handle_json_wrapper_GET", "(", "self", ",", "handler", ",", "parsed_params", ")", ":", "schedule", "=", "self", ".", "server", ".", "schedule", "result", "=", "handler", "(", "parsed_params", ")", "content", "=", "ResultEncoder", "(", ")", ".", "encode", "(", "result", ")", "self", ".", "send_response", "(", "200", ")", "self", ".", "send_header", "(", "'Content-Type'", ",", "'text/plain'", ")", "self", ".", "send_header", "(", "'Content-Length'", ",", "str", "(", "len", "(", "content", ")", ")", ")", "self", ".", "end_headers", "(", ")", "self", ".", "wfile", ".", "write", "(", "content", ")" ]
41.6
0.002353
def draw_box(cb, x0, y0, w, h, fg=colors.default_fg, bg=colors.default_bg, h_seps=[], v_seps=[]): """ Draws a box in the given terminal. :type cb: cursebox.CurseBox """ w -= 1 h -= 1 corners = [(x0, y0), (x0 + w, y0), (x0, y0 + h), (x0 + w, y0 + h)] fg = fg() bg = bg() for i, c in enumerate(corners): cb.put(c[0], c[1], BOX_CORNERS[i], fg, bg) for s in h_seps + [0, h]: cb.put(x0 + 1, y0 + s, symbols["BOX_HORIZONTAL"] * (w - 1), fg, bg) for y in range(1, h): for s in v_seps + [0, w]: cb.put(x0 + s, y0 + y, symbols["BOX_VERTICAL"], fg, bg) for s in h_seps: cb.put(x0, y0 + s, symbols["BOX_X_LEFT"], fg, bg) cb.put(x0 + w, y0 + s, symbols["BOX_X_RIGHT"], fg, bg) for s in v_seps: cb.put(x0 + s, y0, symbols["BOX_X_TOP"], fg, bg) cb.put(x0 + s, y0 + h, symbols["BOX_X_BOTTOM"], fg, bg)
[ "def", "draw_box", "(", "cb", ",", "x0", ",", "y0", ",", "w", ",", "h", ",", "fg", "=", "colors", ".", "default_fg", ",", "bg", "=", "colors", ".", "default_bg", ",", "h_seps", "=", "[", "]", ",", "v_seps", "=", "[", "]", ")", ":", "w", "-=", "1", "h", "-=", "1", "corners", "=", "[", "(", "x0", ",", "y0", ")", ",", "(", "x0", "+", "w", ",", "y0", ")", ",", "(", "x0", ",", "y0", "+", "h", ")", ",", "(", "x0", "+", "w", ",", "y0", "+", "h", ")", "]", "fg", "=", "fg", "(", ")", "bg", "=", "bg", "(", ")", "for", "i", ",", "c", "in", "enumerate", "(", "corners", ")", ":", "cb", ".", "put", "(", "c", "[", "0", "]", ",", "c", "[", "1", "]", ",", "BOX_CORNERS", "[", "i", "]", ",", "fg", ",", "bg", ")", "for", "s", "in", "h_seps", "+", "[", "0", ",", "h", "]", ":", "cb", ".", "put", "(", "x0", "+", "1", ",", "y0", "+", "s", ",", "symbols", "[", "\"BOX_HORIZONTAL\"", "]", "*", "(", "w", "-", "1", ")", ",", "fg", ",", "bg", ")", "for", "y", "in", "range", "(", "1", ",", "h", ")", ":", "for", "s", "in", "v_seps", "+", "[", "0", ",", "w", "]", ":", "cb", ".", "put", "(", "x0", "+", "s", ",", "y0", "+", "y", ",", "symbols", "[", "\"BOX_VERTICAL\"", "]", ",", "fg", ",", "bg", ")", "for", "s", "in", "h_seps", ":", "cb", ".", "put", "(", "x0", ",", "y0", "+", "s", ",", "symbols", "[", "\"BOX_X_LEFT\"", "]", ",", "fg", ",", "bg", ")", "cb", ".", "put", "(", "x0", "+", "w", ",", "y0", "+", "s", ",", "symbols", "[", "\"BOX_X_RIGHT\"", "]", ",", "fg", ",", "bg", ")", "for", "s", "in", "v_seps", ":", "cb", ".", "put", "(", "x0", "+", "s", ",", "y0", ",", "symbols", "[", "\"BOX_X_TOP\"", "]", ",", "fg", ",", "bg", ")", "cb", ".", "put", "(", "x0", "+", "s", ",", "y0", "+", "h", ",", "symbols", "[", "\"BOX_X_BOTTOM\"", "]", ",", "fg", ",", "bg", ")" ]
31.034483
0.002155
def _add_parser_arguments_git(self, subparsers): """Create a sub-parsers for git subcommands. """ subparsers.add_parser( "git-clone", help="Clone all defined data repositories if they dont exist.") subparsers.add_parser( "git-push", help="Add all files to data repositories, commit, and push.") subparsers.add_parser( "git-pull", help="'Pull' all data repositories.") subparsers.add_parser( "git-reset-local", help="Hard reset all data repositories using local 'HEAD'.") subparsers.add_parser( "git-reset-origin", help="Hard reset all data repositories using 'origin/master'.") subparsers.add_parser( "git-status", help="Get the 'git status' of all data repositories.") return
[ "def", "_add_parser_arguments_git", "(", "self", ",", "subparsers", ")", ":", "subparsers", ".", "add_parser", "(", "\"git-clone\"", ",", "help", "=", "\"Clone all defined data repositories if they dont exist.\"", ")", "subparsers", ".", "add_parser", "(", "\"git-push\"", ",", "help", "=", "\"Add all files to data repositories, commit, and push.\"", ")", "subparsers", ".", "add_parser", "(", "\"git-pull\"", ",", "help", "=", "\"'Pull' all data repositories.\"", ")", "subparsers", ".", "add_parser", "(", "\"git-reset-local\"", ",", "help", "=", "\"Hard reset all data repositories using local 'HEAD'.\"", ")", "subparsers", ".", "add_parser", "(", "\"git-reset-origin\"", ",", "help", "=", "\"Hard reset all data repositories using 'origin/master'.\"", ")", "subparsers", ".", "add_parser", "(", "\"git-status\"", ",", "help", "=", "\"Get the 'git status' of all data repositories.\"", ")", "return" ]
31.107143
0.002227
def parse(self, fd): """very simple parser - but why would we want it to be complex?""" def resolve_args(args): # FIXME break this out, it's in common with the templating stuff elsewhere root = self.sections[0] val_dict = dict(('<' + t + '>', u) for (t, u) in root.get_variables().items()) resolved_args = [] for arg in args: for subst, value in val_dict.items(): arg = arg.replace(subst, value) resolved_args.append(arg) return resolved_args def handle_section_defn(keyword, parts): if keyword == '@HostAttrs': if len(parts) != 1: raise ParserException('usage: @HostAttrs <hostname>') if self.sections[0].has_pending_with(): raise ParserException('@with not supported with @HostAttrs') self.sections.append(HostAttrs(parts[0])) return True if keyword == 'Host': if len(parts) != 1: raise ParserException('usage: Host <hostname>') self.sections.append(Host(parts[0], self.sections[0].pop_pending_with())) return True def handle_vardef(root, keyword, parts): if keyword == '@with': root.add_pending_with(parts) return True def handle_set_args(_, parts): if len(parts) == 0: raise ParserException('usage: @args arg-name ...') if not self.is_include(): return if self._args is None or len(self._args) != len(parts): raise ParserException('required arguments not passed to include {url} ({parts})'.format( url=self._url, parts=', '.join(parts)) ) root = self.sections[0] for key, value in zip(parts, self._args): root.set_value(key, value) def handle_set_value(_, parts): if len(parts) != 2: raise ParserException('usage: @set <key> <value>') root = self.sections[0] root.set_value(*resolve_args(parts)) def handle_add_type(section, parts): if len(parts) != 1: raise ParserException('usage: @is <HostAttrName>') section.add_type(parts[0]) def handle_via(section, parts): if len(parts) != 1: raise ParserException('usage: @via <Hostname>') section.add_line( 'ProxyCommand', ('ssh {args} nc %h %p 2> /dev/null'.format(args=pipes.quote(resolve_args(parts)[0])), ) ) def handle_identity(section, parts): if len(parts) != 1: raise ParserException('usage: @identity <name>') section.add_identity(resolve_args(parts)[0]) def handle_include(_, parts): if len(parts) == 0: raise ParserException('usage: @include <https://...|/path/to/file.sedge> [arg ...]') url = parts[0] parsed_url = urllib.parse.urlparse(url) if parsed_url.scheme == 'https': req = requests.get(url, verify=self._verify_ssl) text = req.text elif parsed_url.scheme == 'file': with open(parsed_url.path) as fd: text = fd.read() elif parsed_url.scheme == '': path = os.path.expanduser(url) with open(path) as fd: text = fd.read() else: raise SecurityException('error: @includes may only use paths or https:// or file:// URLs') subconfig = SedgeEngine( self._key_library, StringIO(text), self._verify_ssl, url=url, args=resolve_args(parts[1:]), parent_keydefs=self.keydefs, via_include=True) self.includes.append((url, subconfig)) def handle_keydef(_, parts): if len(parts) < 2: raise ParserException('usage: @key <name> [fingerprint]...') name = parts[0] fingerprints = parts[1:] self.keydefs[name] = fingerprints def handle_keyword(section, keyword, parts): handlers = { '@set': handle_set_value, '@args': handle_set_args, '@is': handle_add_type, '@via': handle_via, '@include': handle_include, '@key': handle_keydef, '@identity': handle_identity } if keyword in handlers: handlers[keyword](section, parts) return True for line in (t.strip() for t in fd): if line.startswith('#') or line == '': continue keyword, parts = SedgeEngine.parse_config_line(line) if handle_section_defn(keyword, parts): continue if handle_vardef(self.sections[0], keyword, parts): continue current_section = self.sections[-1] if handle_keyword(current_section, keyword, parts): continue if keyword.startswith('@'): raise ParserException("unknown expansion keyword {}".format(keyword)) # use other rather than parts to avoid messing up user # whitespace; we don't handle quotes in here as we don't # need to current_section.add_line(keyword, parts)
[ "def", "parse", "(", "self", ",", "fd", ")", ":", "def", "resolve_args", "(", "args", ")", ":", "# FIXME break this out, it's in common with the templating stuff elsewhere", "root", "=", "self", ".", "sections", "[", "0", "]", "val_dict", "=", "dict", "(", "(", "'<'", "+", "t", "+", "'>'", ",", "u", ")", "for", "(", "t", ",", "u", ")", "in", "root", ".", "get_variables", "(", ")", ".", "items", "(", ")", ")", "resolved_args", "=", "[", "]", "for", "arg", "in", "args", ":", "for", "subst", ",", "value", "in", "val_dict", ".", "items", "(", ")", ":", "arg", "=", "arg", ".", "replace", "(", "subst", ",", "value", ")", "resolved_args", ".", "append", "(", "arg", ")", "return", "resolved_args", "def", "handle_section_defn", "(", "keyword", ",", "parts", ")", ":", "if", "keyword", "==", "'@HostAttrs'", ":", "if", "len", "(", "parts", ")", "!=", "1", ":", "raise", "ParserException", "(", "'usage: @HostAttrs <hostname>'", ")", "if", "self", ".", "sections", "[", "0", "]", ".", "has_pending_with", "(", ")", ":", "raise", "ParserException", "(", "'@with not supported with @HostAttrs'", ")", "self", ".", "sections", ".", "append", "(", "HostAttrs", "(", "parts", "[", "0", "]", ")", ")", "return", "True", "if", "keyword", "==", "'Host'", ":", "if", "len", "(", "parts", ")", "!=", "1", ":", "raise", "ParserException", "(", "'usage: Host <hostname>'", ")", "self", ".", "sections", ".", "append", "(", "Host", "(", "parts", "[", "0", "]", ",", "self", ".", "sections", "[", "0", "]", ".", "pop_pending_with", "(", ")", ")", ")", "return", "True", "def", "handle_vardef", "(", "root", ",", "keyword", ",", "parts", ")", ":", "if", "keyword", "==", "'@with'", ":", "root", ".", "add_pending_with", "(", "parts", ")", "return", "True", "def", "handle_set_args", "(", "_", ",", "parts", ")", ":", "if", "len", "(", "parts", ")", "==", "0", ":", "raise", "ParserException", "(", "'usage: @args arg-name ...'", ")", "if", "not", "self", ".", "is_include", "(", ")", ":", "return", "if", "self", ".", "_args", "is", "None", "or", "len", "(", "self", ".", "_args", ")", "!=", "len", "(", "parts", ")", ":", "raise", "ParserException", "(", "'required arguments not passed to include {url} ({parts})'", ".", "format", "(", "url", "=", "self", ".", "_url", ",", "parts", "=", "', '", ".", "join", "(", "parts", ")", ")", ")", "root", "=", "self", ".", "sections", "[", "0", "]", "for", "key", ",", "value", "in", "zip", "(", "parts", ",", "self", ".", "_args", ")", ":", "root", ".", "set_value", "(", "key", ",", "value", ")", "def", "handle_set_value", "(", "_", ",", "parts", ")", ":", "if", "len", "(", "parts", ")", "!=", "2", ":", "raise", "ParserException", "(", "'usage: @set <key> <value>'", ")", "root", "=", "self", ".", "sections", "[", "0", "]", "root", ".", "set_value", "(", "*", "resolve_args", "(", "parts", ")", ")", "def", "handle_add_type", "(", "section", ",", "parts", ")", ":", "if", "len", "(", "parts", ")", "!=", "1", ":", "raise", "ParserException", "(", "'usage: @is <HostAttrName>'", ")", "section", ".", "add_type", "(", "parts", "[", "0", "]", ")", "def", "handle_via", "(", "section", ",", "parts", ")", ":", "if", "len", "(", "parts", ")", "!=", "1", ":", "raise", "ParserException", "(", "'usage: @via <Hostname>'", ")", "section", ".", "add_line", "(", "'ProxyCommand'", ",", "(", "'ssh {args} nc %h %p 2> /dev/null'", ".", "format", "(", "args", "=", "pipes", ".", "quote", "(", "resolve_args", "(", "parts", ")", "[", "0", "]", ")", ")", ",", ")", ")", "def", "handle_identity", "(", "section", ",", "parts", ")", ":", "if", "len", "(", "parts", ")", "!=", "1", ":", "raise", "ParserException", "(", "'usage: @identity <name>'", ")", "section", ".", "add_identity", "(", "resolve_args", "(", "parts", ")", "[", "0", "]", ")", "def", "handle_include", "(", "_", ",", "parts", ")", ":", "if", "len", "(", "parts", ")", "==", "0", ":", "raise", "ParserException", "(", "'usage: @include <https://...|/path/to/file.sedge> [arg ...]'", ")", "url", "=", "parts", "[", "0", "]", "parsed_url", "=", "urllib", ".", "parse", ".", "urlparse", "(", "url", ")", "if", "parsed_url", ".", "scheme", "==", "'https'", ":", "req", "=", "requests", ".", "get", "(", "url", ",", "verify", "=", "self", ".", "_verify_ssl", ")", "text", "=", "req", ".", "text", "elif", "parsed_url", ".", "scheme", "==", "'file'", ":", "with", "open", "(", "parsed_url", ".", "path", ")", "as", "fd", ":", "text", "=", "fd", ".", "read", "(", ")", "elif", "parsed_url", ".", "scheme", "==", "''", ":", "path", "=", "os", ".", "path", ".", "expanduser", "(", "url", ")", "with", "open", "(", "path", ")", "as", "fd", ":", "text", "=", "fd", ".", "read", "(", ")", "else", ":", "raise", "SecurityException", "(", "'error: @includes may only use paths or https:// or file:// URLs'", ")", "subconfig", "=", "SedgeEngine", "(", "self", ".", "_key_library", ",", "StringIO", "(", "text", ")", ",", "self", ".", "_verify_ssl", ",", "url", "=", "url", ",", "args", "=", "resolve_args", "(", "parts", "[", "1", ":", "]", ")", ",", "parent_keydefs", "=", "self", ".", "keydefs", ",", "via_include", "=", "True", ")", "self", ".", "includes", ".", "append", "(", "(", "url", ",", "subconfig", ")", ")", "def", "handle_keydef", "(", "_", ",", "parts", ")", ":", "if", "len", "(", "parts", ")", "<", "2", ":", "raise", "ParserException", "(", "'usage: @key <name> [fingerprint]...'", ")", "name", "=", "parts", "[", "0", "]", "fingerprints", "=", "parts", "[", "1", ":", "]", "self", ".", "keydefs", "[", "name", "]", "=", "fingerprints", "def", "handle_keyword", "(", "section", ",", "keyword", ",", "parts", ")", ":", "handlers", "=", "{", "'@set'", ":", "handle_set_value", ",", "'@args'", ":", "handle_set_args", ",", "'@is'", ":", "handle_add_type", ",", "'@via'", ":", "handle_via", ",", "'@include'", ":", "handle_include", ",", "'@key'", ":", "handle_keydef", ",", "'@identity'", ":", "handle_identity", "}", "if", "keyword", "in", "handlers", ":", "handlers", "[", "keyword", "]", "(", "section", ",", "parts", ")", "return", "True", "for", "line", "in", "(", "t", ".", "strip", "(", ")", "for", "t", "in", "fd", ")", ":", "if", "line", ".", "startswith", "(", "'#'", ")", "or", "line", "==", "''", ":", "continue", "keyword", ",", "parts", "=", "SedgeEngine", ".", "parse_config_line", "(", "line", ")", "if", "handle_section_defn", "(", "keyword", ",", "parts", ")", ":", "continue", "if", "handle_vardef", "(", "self", ".", "sections", "[", "0", "]", ",", "keyword", ",", "parts", ")", ":", "continue", "current_section", "=", "self", ".", "sections", "[", "-", "1", "]", "if", "handle_keyword", "(", "current_section", ",", "keyword", ",", "parts", ")", ":", "continue", "if", "keyword", ".", "startswith", "(", "'@'", ")", ":", "raise", "ParserException", "(", "\"unknown expansion keyword {}\"", ".", "format", "(", "keyword", ")", ")", "# use other rather than parts to avoid messing up user", "# whitespace; we don't handle quotes in here as we don't", "# need to", "current_section", ".", "add_line", "(", "keyword", ",", "parts", ")" ]
40.49635
0.001935
def flair(self, name, text, css_class): """Sets flair for `user` in this subreddit (POST). Calls :meth:`narwal.Reddit.flairlist`. :param name: name of the user :param text: flair text to assign :param css_class: CSS class to assign to flair text """ return self._reddit.flair(self.display_name, name, text, css_class)
[ "def", "flair", "(", "self", ",", "name", ",", "text", ",", "css_class", ")", ":", "return", "self", ".", "_reddit", ".", "flair", "(", "self", ".", "display_name", ",", "name", ",", "text", ",", "css_class", ")" ]
46
0.010667
def forget(self, key): """ Remove an item from the cache. :param key: The cache key :type key: str :rtype: bool """ return bool(self._redis.delete(self._prefix + key))
[ "def", "forget", "(", "self", ",", "key", ")", ":", "return", "bool", "(", "self", ".", "_redis", ".", "delete", "(", "self", ".", "_prefix", "+", "key", ")", ")" ]
21.6
0.008889
def fit(self, X=None, u=None, s = None): """Fit X into an embedded space. Inputs ---------- X : array, shape (n_samples, n_features) u,s,v : svd decomposition of X (optional) Assigns ---------- embedding : array-like, shape (n_samples, n_components) Stores the embedding vectors. u,sv,v : singular value decomposition of data S, potentially with smoothing isort1 : sorting along first dimension of matrix isort2 : sorting along second dimension of matrix (if n_Y > 0) cmap: correlation of each item with all locations in the embedding map (before upsampling) A: PC coefficients of each Fourier mode """ X = X.copy() if self.mode is 'parallel': Xall = X.copy() X = np.reshape(Xall.copy(), (-1, Xall.shape[-1])) #X -= X.mean(axis=-1)[:,np.newaxis] if ((u is None)): # compute svd and keep iPC's of data nmin = min([X.shape[0], X.shape[1]]) nmin = np.minimum(nmin-1, self.nPC) u,sv,v = svdecon(np.float64(X), k=nmin) u = u * sv NN, self.nPC = u.shape # first smooth in Y (if n_Y > 0) self.u = u if self.mode is 'parallel': NN = Xall.shape[1] X = np.zeros((2, NN, u.shape[1]), 'float64') for j in range(2): Xall[j] -= Xall[j].mean(axis=-1)[:, np.newaxis] X[j] = Xall[j] @ self.v utu = np.sum(u**2, axis=1) ikeep = np.argmax(utu) #ikeep = int(NN/2) #ikeep = np.random.randint(0, NN) ccu = u @ u[ikeep,:] cmax = np.maximum(0, ccu)**2/utu ikeep = np.argsort(cmax)[::-1] ikeep = ikeep[:int(NN/10)] ikeep = np.sort(ikeep) if self.init == 'pca': U = svdecon(u[ikeep,:], k=2)[0] #U = u[ikeep, :2] usort = U * np.sign(skew(U, axis=0)) init_sort = np.argsort(usort[:, :self.n_components], axis=0) elif self.init == 'random': init_sort = np.random.permutation(len(ikeep))[:,np.newaxis] for j in range(1,self.n_components): init_sort = np.concatenate((init_sort, np.random.permutation(len(ikeep))[:,np.newaxis]), axis=-1) else: init_sort = self.init if self.n_components==1 and init_sort.ndim==1: init_sort = init_sort[:,np.newaxis] # now sort in X isort1, iclustup = self._map(u.copy(), self.n_components, self.n_X, init_sort, ikeep, s) self.isort = isort1 self.embedding = iclustup return self
[ "def", "fit", "(", "self", ",", "X", "=", "None", ",", "u", "=", "None", ",", "s", "=", "None", ")", ":", "X", "=", "X", ".", "copy", "(", ")", "if", "self", ".", "mode", "is", "'parallel'", ":", "Xall", "=", "X", ".", "copy", "(", ")", "X", "=", "np", ".", "reshape", "(", "Xall", ".", "copy", "(", ")", ",", "(", "-", "1", ",", "Xall", ".", "shape", "[", "-", "1", "]", ")", ")", "#X -= X.mean(axis=-1)[:,np.newaxis]", "if", "(", "(", "u", "is", "None", ")", ")", ":", "# compute svd and keep iPC's of data", "nmin", "=", "min", "(", "[", "X", ".", "shape", "[", "0", "]", ",", "X", ".", "shape", "[", "1", "]", "]", ")", "nmin", "=", "np", ".", "minimum", "(", "nmin", "-", "1", ",", "self", ".", "nPC", ")", "u", ",", "sv", ",", "v", "=", "svdecon", "(", "np", ".", "float64", "(", "X", ")", ",", "k", "=", "nmin", ")", "u", "=", "u", "*", "sv", "NN", ",", "self", ".", "nPC", "=", "u", ".", "shape", "# first smooth in Y (if n_Y > 0)", "self", ".", "u", "=", "u", "if", "self", ".", "mode", "is", "'parallel'", ":", "NN", "=", "Xall", ".", "shape", "[", "1", "]", "X", "=", "np", ".", "zeros", "(", "(", "2", ",", "NN", ",", "u", ".", "shape", "[", "1", "]", ")", ",", "'float64'", ")", "for", "j", "in", "range", "(", "2", ")", ":", "Xall", "[", "j", "]", "-=", "Xall", "[", "j", "]", ".", "mean", "(", "axis", "=", "-", "1", ")", "[", ":", ",", "np", ".", "newaxis", "]", "X", "[", "j", "]", "=", "Xall", "[", "j", "]", "@", "self", ".", "v", "utu", "=", "np", ".", "sum", "(", "u", "**", "2", ",", "axis", "=", "1", ")", "ikeep", "=", "np", ".", "argmax", "(", "utu", ")", "#ikeep = int(NN/2)", "#ikeep = np.random.randint(0, NN)", "ccu", "=", "u", "@", "u", "[", "ikeep", ",", ":", "]", "cmax", "=", "np", ".", "maximum", "(", "0", ",", "ccu", ")", "**", "2", "/", "utu", "ikeep", "=", "np", ".", "argsort", "(", "cmax", ")", "[", ":", ":", "-", "1", "]", "ikeep", "=", "ikeep", "[", ":", "int", "(", "NN", "/", "10", ")", "]", "ikeep", "=", "np", ".", "sort", "(", "ikeep", ")", "if", "self", ".", "init", "==", "'pca'", ":", "U", "=", "svdecon", "(", "u", "[", "ikeep", ",", ":", "]", ",", "k", "=", "2", ")", "[", "0", "]", "#U = u[ikeep, :2]", "usort", "=", "U", "*", "np", ".", "sign", "(", "skew", "(", "U", ",", "axis", "=", "0", ")", ")", "init_sort", "=", "np", ".", "argsort", "(", "usort", "[", ":", ",", ":", "self", ".", "n_components", "]", ",", "axis", "=", "0", ")", "elif", "self", ".", "init", "==", "'random'", ":", "init_sort", "=", "np", ".", "random", ".", "permutation", "(", "len", "(", "ikeep", ")", ")", "[", ":", ",", "np", ".", "newaxis", "]", "for", "j", "in", "range", "(", "1", ",", "self", ".", "n_components", ")", ":", "init_sort", "=", "np", ".", "concatenate", "(", "(", "init_sort", ",", "np", ".", "random", ".", "permutation", "(", "len", "(", "ikeep", ")", ")", "[", ":", ",", "np", ".", "newaxis", "]", ")", ",", "axis", "=", "-", "1", ")", "else", ":", "init_sort", "=", "self", ".", "init", "if", "self", ".", "n_components", "==", "1", "and", "init_sort", ".", "ndim", "==", "1", ":", "init_sort", "=", "init_sort", "[", ":", ",", "np", ".", "newaxis", "]", "# now sort in X", "isort1", ",", "iclustup", "=", "self", ".", "_map", "(", "u", ".", "copy", "(", ")", ",", "self", ".", "n_components", ",", "self", ".", "n_X", ",", "init_sort", ",", "ikeep", ",", "s", ")", "self", ".", "isort", "=", "isort1", "self", ".", "embedding", "=", "iclustup", "return", "self" ]
37.4
0.008188
def set_dict_value(dictionary, keys, value): """ Set a value in a (nested) dictionary by defining a list of keys. .. note:: Side-effects This function does not make a copy of dictionary, but directly edits it. Parameters ---------- dictionary : dict keys : List[Any] value : object Returns ------- dictionary : dict Examples -------- >>> d = {'a': {'b': 'c', 'd': 'e'}} >>> expected = {'a': {'b': 'foobar', 'd': 'e'}} >>> set_dict_value(d, ['a', 'b'], 'foobar') == expected True """ orig = dictionary for key in keys[:-1]: dictionary = dictionary.setdefault(key, {}) dictionary[keys[-1]] = value return orig
[ "def", "set_dict_value", "(", "dictionary", ",", "keys", ",", "value", ")", ":", "orig", "=", "dictionary", "for", "key", "in", "keys", "[", ":", "-", "1", "]", ":", "dictionary", "=", "dictionary", ".", "setdefault", "(", "key", ",", "{", "}", ")", "dictionary", "[", "keys", "[", "-", "1", "]", "]", "=", "value", "return", "orig" ]
23.533333
0.001361
def norm_vector(vector): """! @brief Calculates norm of an input vector that is known as a vector length. @param[in] vector (list): The input vector whose length is calculated. @return (double) vector norm known as vector length. """ length = 0.0 for component in vector: length += component * component length = length ** 0.5 return length
[ "def", "norm_vector", "(", "vector", ")", ":", "length", "=", "0.0", "for", "component", "in", "vector", ":", "length", "+=", "component", "*", "component", "length", "=", "length", "**", "0.5", "return", "length" ]
24.352941
0.016279
def inputtemplates(self): """Return all input templates as a list (of InputTemplate instances)""" l = [] for profile in self.profiles: l += profile.input return l
[ "def", "inputtemplates", "(", "self", ")", ":", "l", "=", "[", "]", "for", "profile", "in", "self", ".", "profiles", ":", "l", "+=", "profile", ".", "input", "return", "l" ]
33.5
0.019417
def xorg(name): ''' Set the keyboard layout for XOrg layout The keyboard layout to use ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if __salt__['keyboard.get_x']() == name: ret['result'] = True ret['comment'] = 'XOrg layout {0} already set'.format(name) return ret if __opts__['test']: ret['comment'] = 'XOrg layout {0} needs to be set'.format(name) return ret if __salt__['keyboard.set_x'](name): ret['changes'] = {'layout': name} ret['result'] = True ret['comment'] = 'Set XOrg keyboard layout {0}'.format(name) return ret else: ret['result'] = False ret['comment'] = 'Failed to set XOrg keyboard layout' return ret
[ "def", "xorg", "(", "name", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", "}", "if", "__salt__", "[", "'keyboard.get_x'", "]", "(", ")", "==", "name", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'XOrg layout {0} already set'", ".", "format", "(", "name", ")", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'XOrg layout {0} needs to be set'", ".", "format", "(", "name", ")", "return", "ret", "if", "__salt__", "[", "'keyboard.set_x'", "]", "(", "name", ")", ":", "ret", "[", "'changes'", "]", "=", "{", "'layout'", ":", "name", "}", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Set XOrg keyboard layout {0}'", ".", "format", "(", "name", ")", "return", "ret", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to set XOrg keyboard layout'", "return", "ret" ]
29.296296
0.001224
def walk(self, pattern=None, errors='strict'): """ D.walk() -> iterator over files and subdirs, recursively. The iterator yields path objects naming each child item of this directory and its descendants. This requires that D.isdir(). This performs a depth-first traversal of the directory tree. Each directory is returned just before all its children. The ``errors`` keyword argument controls behavior when an error occurs. The default is ``strict``, which causes an exception. The other allowed values are 'warn', which reports the error via ``warnings.warn()``, and ``ignore``. """ if errors not in ('strict', 'warn', 'ignore'): raise ValueError("invalid errors parameter") try: childList = self.listdir() except Exception: if errors == 'ignore': return elif errors == 'warn': warnings.warn( "Unable to list directory '%s': %s" % (self, sys.exc_info()[1]), TreeWalkWarning) return else: raise for child in childList: if pattern is None or child.fnmatch(pattern): yield child try: isdir = child.isdir() except Exception: if errors == 'ignore': isdir = False elif errors == 'warn': warnings.warn( "Unable to access '%s': %s" % (child, sys.exc_info()[1]), TreeWalkWarning) isdir = False else: raise if isdir: for item in child.walk(pattern, errors): yield item
[ "def", "walk", "(", "self", ",", "pattern", "=", "None", ",", "errors", "=", "'strict'", ")", ":", "if", "errors", "not", "in", "(", "'strict'", ",", "'warn'", ",", "'ignore'", ")", ":", "raise", "ValueError", "(", "\"invalid errors parameter\"", ")", "try", ":", "childList", "=", "self", ".", "listdir", "(", ")", "except", "Exception", ":", "if", "errors", "==", "'ignore'", ":", "return", "elif", "errors", "==", "'warn'", ":", "warnings", ".", "warn", "(", "\"Unable to list directory '%s': %s\"", "%", "(", "self", ",", "sys", ".", "exc_info", "(", ")", "[", "1", "]", ")", ",", "TreeWalkWarning", ")", "return", "else", ":", "raise", "for", "child", "in", "childList", ":", "if", "pattern", "is", "None", "or", "child", ".", "fnmatch", "(", "pattern", ")", ":", "yield", "child", "try", ":", "isdir", "=", "child", ".", "isdir", "(", ")", "except", "Exception", ":", "if", "errors", "==", "'ignore'", ":", "isdir", "=", "False", "elif", "errors", "==", "'warn'", ":", "warnings", ".", "warn", "(", "\"Unable to access '%s': %s\"", "%", "(", "child", ",", "sys", ".", "exc_info", "(", ")", "[", "1", "]", ")", ",", "TreeWalkWarning", ")", "isdir", "=", "False", "else", ":", "raise", "if", "isdir", ":", "for", "item", "in", "child", ".", "walk", "(", "pattern", ",", "errors", ")", ":", "yield", "item" ]
35.326923
0.001059
def links(self, base_link, current_page) -> dict: """ Return JSON paginate links """ max_pages = self.max_pages - 1 if \ self.max_pages > 0 else self.max_pages base_link = '/%s' % (base_link.strip("/")) self_page = current_page prev = current_page - 1 if current_page is not 0 else None prev_link = '%s/page/%s/%s' % (base_link, prev, self.limit) if \ prev is not None else None next = current_page + 1 if current_page < max_pages else None next_link = '%s/page/%s/%s' % (base_link, next, self.limit) if \ next is not None else None first = 0 last = max_pages return { 'self': '%s/page/%s/%s' % (base_link, self_page, self.limit), 'prev': prev_link, 'next': next_link, 'first': '%s/page/%s/%s' % (base_link, first, self.limit), 'last': '%s/page/%s/%s' % (base_link, last, self.limit), }
[ "def", "links", "(", "self", ",", "base_link", ",", "current_page", ")", "->", "dict", ":", "max_pages", "=", "self", ".", "max_pages", "-", "1", "if", "self", ".", "max_pages", ">", "0", "else", "self", ".", "max_pages", "base_link", "=", "'/%s'", "%", "(", "base_link", ".", "strip", "(", "\"/\"", ")", ")", "self_page", "=", "current_page", "prev", "=", "current_page", "-", "1", "if", "current_page", "is", "not", "0", "else", "None", "prev_link", "=", "'%s/page/%s/%s'", "%", "(", "base_link", ",", "prev", ",", "self", ".", "limit", ")", "if", "prev", "is", "not", "None", "else", "None", "next", "=", "current_page", "+", "1", "if", "current_page", "<", "max_pages", "else", "None", "next_link", "=", "'%s/page/%s/%s'", "%", "(", "base_link", ",", "next", ",", "self", ".", "limit", ")", "if", "next", "is", "not", "None", "else", "None", "first", "=", "0", "last", "=", "max_pages", "return", "{", "'self'", ":", "'%s/page/%s/%s'", "%", "(", "base_link", ",", "self_page", ",", "self", ".", "limit", ")", ",", "'prev'", ":", "prev_link", ",", "'next'", ":", "next_link", ",", "'first'", ":", "'%s/page/%s/%s'", "%", "(", "base_link", ",", "first", ",", "self", ".", "limit", ")", ",", "'last'", ":", "'%s/page/%s/%s'", "%", "(", "base_link", ",", "last", ",", "self", ".", "limit", ")", ",", "}" ]
46.571429
0.002004
def _implicit_solver(self): """Invertes and solves the matrix problem for diffusion matrix and temperature T. The method is called by the :func:`~climlab.process.implicit.ImplicitProcess._compute()` function of the :class:`~climlab.process.implicit.ImplicitProcess` class and solves the matrix problem .. math:: A \\cdot T_{\\textrm{new}} = T_{\\textrm{old}} for diffusion matrix A and corresponding temperatures. :math:`T_{\\textrm{old}}` is in this case the current state variable which already has been adjusted by the explicit processes. :math:`T_{\\textrm{new}}` is the new state of the variable. To derive the temperature tendency of the diffusion process the adjustment has to be calculated and muliplied with the timestep which is done by the :func:`~climlab.process.implicit.ImplicitProcess._compute()` function of the :class:`~climlab.process.implicit.ImplicitProcess` class. This method calculates the matrix inversion for every state variable and calling either :func:`solve_implicit_banded()` or :py:func:`numpy.linalg.solve()` dependent on the flag ``self.use_banded_solver``. :ivar dict state: method uses current state variables but does not modify them :ivar bool use_banded_solver: input flag whether to use :func:`_solve_implicit_banded()` or :py:func:`numpy.linalg.solve()` to do the matrix inversion :ivar array _diffTriDiag: the diffusion matrix which is given with the current state variable to the method solving the matrix problem """ #if self.update_diffusivity: # Time-stepping the diffusion is just inverting this matrix problem: newstate = {} for varname, value in self.state.items(): if self.use_banded_solver: newvar = _solve_implicit_banded(value, self._diffTriDiag) else: newvar = np.linalg.solve(self._diffTriDiag, value) newstate[varname] = newvar return newstate
[ "def", "_implicit_solver", "(", "self", ")", ":", "#if self.update_diffusivity:", "# Time-stepping the diffusion is just inverting this matrix problem:", "newstate", "=", "{", "}", "for", "varname", ",", "value", "in", "self", ".", "state", ".", "items", "(", ")", ":", "if", "self", ".", "use_banded_solver", ":", "newvar", "=", "_solve_implicit_banded", "(", "value", ",", "self", ".", "_diffTriDiag", ")", "else", ":", "newvar", "=", "np", ".", "linalg", ".", "solve", "(", "self", ".", "_diffTriDiag", ",", "value", ")", "newstate", "[", "varname", "]", "=", "newvar", "return", "newstate" ]
46.64
0.00126
def identify_denonavr_receivers(): """ Identify DenonAVR using SSDP and SCPD queries. Returns a list of dictionaries which includes all discovered Denon AVR devices with keys "host", "modelName", "friendlyName", "presentationURL". """ # Sending SSDP broadcast message to get devices devices = send_ssdp_broadcast() # Check which responding device is a DenonAVR device and prepare output receivers = [] for device in devices: try: receiver = evaluate_scpd_xml(device["URL"]) except ConnectionError: continue if receiver: receivers.append(receiver) return receivers
[ "def", "identify_denonavr_receivers", "(", ")", ":", "# Sending SSDP broadcast message to get devices", "devices", "=", "send_ssdp_broadcast", "(", ")", "# Check which responding device is a DenonAVR device and prepare output", "receivers", "=", "[", "]", "for", "device", "in", "devices", ":", "try", ":", "receiver", "=", "evaluate_scpd_xml", "(", "device", "[", "\"URL\"", "]", ")", "except", "ConnectionError", ":", "continue", "if", "receiver", ":", "receivers", ".", "append", "(", "receiver", ")", "return", "receivers" ]
30.952381
0.001493
def get_weather(test=False): """ Returns weather reports from the dataset. """ if _Constants._TEST or test: rows = _Constants._DATABASE.execute("SELECT data FROM weather LIMIT {hardware}".format( hardware=_Constants._HARDWARE)) data = [r[0] for r in rows] data = [_Auxiliary._byteify(_json.loads(r)) for r in data] return _Auxiliary._byteify(data) else: rows = _Constants._DATABASE.execute("SELECT data FROM weather".format( hardware=_Constants._HARDWARE)) data = [r[0] for r in rows] data = [_Auxiliary._byteify(_json.loads(r)) for r in data] return _Auxiliary._byteify(data)
[ "def", "get_weather", "(", "test", "=", "False", ")", ":", "if", "_Constants", ".", "_TEST", "or", "test", ":", "rows", "=", "_Constants", ".", "_DATABASE", ".", "execute", "(", "\"SELECT data FROM weather LIMIT {hardware}\"", ".", "format", "(", "hardware", "=", "_Constants", ".", "_HARDWARE", ")", ")", "data", "=", "[", "r", "[", "0", "]", "for", "r", "in", "rows", "]", "data", "=", "[", "_Auxiliary", ".", "_byteify", "(", "_json", ".", "loads", "(", "r", ")", ")", "for", "r", "in", "data", "]", "return", "_Auxiliary", ".", "_byteify", "(", "data", ")", "else", ":", "rows", "=", "_Constants", ".", "_DATABASE", ".", "execute", "(", "\"SELECT data FROM weather\"", ".", "format", "(", "hardware", "=", "_Constants", ".", "_HARDWARE", ")", ")", "data", "=", "[", "r", "[", "0", "]", "for", "r", "in", "rows", "]", "data", "=", "[", "_Auxiliary", ".", "_byteify", "(", "_json", ".", "loads", "(", "r", ")", ")", "for", "r", "in", "data", "]", "return", "_Auxiliary", ".", "_byteify", "(", "data", ")" ]
34.85
0.00838
def p_duration_duration_unit(self, p): 'duration : DURATION_UNIT' logger.debug('duration = 1 of duration unit %s', p[1]) p[0] = Duration.from_quantity_unit(1, p[1])
[ "def", "p_duration_duration_unit", "(", "self", ",", "p", ")", ":", "logger", ".", "debug", "(", "'duration = 1 of duration unit %s'", ",", "p", "[", "1", "]", ")", "p", "[", "0", "]", "=", "Duration", ".", "from_quantity_unit", "(", "1", ",", "p", "[", "1", "]", ")" ]
46.25
0.010638
async def close_wallet(handle: int) -> None: """ Closes opened wallet and frees allocated resources. :param handle: wallet handle returned by indy_open_wallet. :return: Error code """ logger = logging.getLogger(__name__) logger.debug("close_wallet: >>> handle: %i", handle) if not hasattr(close_wallet, "cb"): logger.debug("close_wallet: Creating callback") close_wallet.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32)) c_handle = c_int32(handle) await do_call('indy_close_wallet', c_handle, close_wallet.cb) logger.debug("close_wallet: <<<")
[ "async", "def", "close_wallet", "(", "handle", ":", "int", ")", "->", "None", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "debug", "(", "\"close_wallet: >>> handle: %i\"", ",", "handle", ")", "if", "not", "hasattr", "(", "close_wallet", ",", "\"cb\"", ")", ":", "logger", ".", "debug", "(", "\"close_wallet: Creating callback\"", ")", "close_wallet", ".", "cb", "=", "create_cb", "(", "CFUNCTYPE", "(", "None", ",", "c_int32", ",", "c_int32", ")", ")", "c_handle", "=", "c_int32", "(", "handle", ")", "await", "do_call", "(", "'indy_close_wallet'", ",", "c_handle", ",", "close_wallet", ".", "cb", ")", "logger", ".", "debug", "(", "\"close_wallet: <<<\"", ")" ]
28.363636
0.00155
def build_not_found(cls, errors=None): """Utility method to build a HTTP 404 Resource Error response""" errors = [errors] if not isinstance(errors, list) else errors return cls(Status.NOT_FOUND, errors)
[ "def", "build_not_found", "(", "cls", ",", "errors", "=", "None", ")", ":", "errors", "=", "[", "errors", "]", "if", "not", "isinstance", "(", "errors", ",", "list", ")", "else", "errors", "return", "cls", "(", "Status", ".", "NOT_FOUND", ",", "errors", ")" ]
55.75
0.00885
def term_from_uri(uri): """Removes prepended URI information from terms.""" if uri is None: return None # This insures that if we get a Literal with an integer value (as we # do for modification positions), it will get converted to a string, # not an integer. if isinstance(uri, rdflib.Literal): uri = str(uri.toPython()) # This is to handle URIs like # http://www.openbel.org/bel/namespace//MAPK%20Erk1/3%20Family # or # http://www.openbel.org/bel/namespace/MAPK%20Erk1/3%20Family # In the current implementation, the order of the patterns # matters. patterns = ['http://www.openbel.org/bel/namespace//(.*)', 'http://www.openbel.org/vocabulary//(.*)', 'http://www.openbel.org/bel//(.*)', 'http://www.openbel.org/bel/namespace/(.*)', 'http://www.openbel.org/vocabulary/(.*)', 'http://www.openbel.org/bel/(.*)'] for pr in patterns: match = re.match(pr, uri) if match is not None: term = match.groups()[0] term = unquote(term) return term # If none of the patterns match then the URI is actually a simple term # for instance a site: "341" or a substitution: "sub(V,600,E)" return uri
[ "def", "term_from_uri", "(", "uri", ")", ":", "if", "uri", "is", "None", ":", "return", "None", "# This insures that if we get a Literal with an integer value (as we", "# do for modification positions), it will get converted to a string,", "# not an integer.", "if", "isinstance", "(", "uri", ",", "rdflib", ".", "Literal", ")", ":", "uri", "=", "str", "(", "uri", ".", "toPython", "(", ")", ")", "# This is to handle URIs like", "# http://www.openbel.org/bel/namespace//MAPK%20Erk1/3%20Family", "# or", "# http://www.openbel.org/bel/namespace/MAPK%20Erk1/3%20Family", "# In the current implementation, the order of the patterns", "# matters.", "patterns", "=", "[", "'http://www.openbel.org/bel/namespace//(.*)'", ",", "'http://www.openbel.org/vocabulary//(.*)'", ",", "'http://www.openbel.org/bel//(.*)'", ",", "'http://www.openbel.org/bel/namespace/(.*)'", ",", "'http://www.openbel.org/vocabulary/(.*)'", ",", "'http://www.openbel.org/bel/(.*)'", "]", "for", "pr", "in", "patterns", ":", "match", "=", "re", ".", "match", "(", "pr", ",", "uri", ")", "if", "match", "is", "not", "None", ":", "term", "=", "match", ".", "groups", "(", ")", "[", "0", "]", "term", "=", "unquote", "(", "term", ")", "return", "term", "# If none of the patterns match then the URI is actually a simple term", "# for instance a site: \"341\" or a substitution: \"sub(V,600,E)\"", "return", "uri" ]
42.266667
0.000771
def cumsum(self, axis=0, *args, **kwargs): """ Cumulative sum of non-NA/null values. When performing the cumulative summation, any non-NA/null values will be skipped. The resulting SparseArray will preserve the locations of NaN values, but the fill value will be `np.nan` regardless. Parameters ---------- axis : int or None Axis over which to perform the cumulative summation. If None, perform cumulative summation over flattened array. Returns ------- cumsum : SparseArray """ nv.validate_cumsum(args, kwargs) if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour. raise ValueError("axis(={axis}) out of bounds".format(axis=axis)) if not self._null_fill_value: return SparseArray(self.to_dense()).cumsum() return SparseArray(self.sp_values.cumsum(), sparse_index=self.sp_index, fill_value=self.fill_value)
[ "def", "cumsum", "(", "self", ",", "axis", "=", "0", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nv", ".", "validate_cumsum", "(", "args", ",", "kwargs", ")", "if", "axis", "is", "not", "None", "and", "axis", ">=", "self", ".", "ndim", ":", "# Mimic ndarray behaviour.", "raise", "ValueError", "(", "\"axis(={axis}) out of bounds\"", ".", "format", "(", "axis", "=", "axis", ")", ")", "if", "not", "self", ".", "_null_fill_value", ":", "return", "SparseArray", "(", "self", ".", "to_dense", "(", ")", ")", ".", "cumsum", "(", ")", "return", "SparseArray", "(", "self", ".", "sp_values", ".", "cumsum", "(", ")", ",", "sparse_index", "=", "self", ".", "sp_index", ",", "fill_value", "=", "self", ".", "fill_value", ")" ]
35.892857
0.001938
def children(self): """ Children matches. """ if self._children is None: self._children = Matches(None, self.input_string) return self._children
[ "def", "children", "(", "self", ")", ":", "if", "self", ".", "_children", "is", "None", ":", "self", ".", "_children", "=", "Matches", "(", "None", ",", "self", ".", "input_string", ")", "return", "self", ".", "_children" ]
27.142857
0.010204
def add_volume(self,colorchange=True,column=None,name='',str='{name}',**kwargs): """ Add 'volume' study to QuantFigure.studies Parameters: colorchange : bool If True then each volume bar will have a fill color depending on if 'base' had a positive or negative change compared to the previous value If False then each volume bar will have a fill color depending on if the volume data itself had a positive or negative change compared to the previous value column :string Defines the data column name that contains the volume data. Default: 'volume' name : string Name given to the study str : string Label factory for studies The following wildcards can be used: {name} : Name of the column {study} : Name of the study {period} : Period used Examples: 'study: {study} - period: {period}' kwargs : base : string Defines the column which will define the positive/negative changes (if colorchange=True). Default = 'close' up_color : string Color for positive bars down_color : string Color for negative bars """ if not column: column=self._d['volume'] up_color=kwargs.pop('up_color',self.theme['up_color']) down_color=kwargs.pop('down_color',self.theme['down_color']) study={'kind':'volume', 'name':name, 'params':{'colorchange':colorchange,'base':'close','column':column, 'str':None}, 'display':utils.merge_dict({'up_color':up_color,'down_color':down_color},kwargs)} self._add_study(study)
[ "def", "add_volume", "(", "self", ",", "colorchange", "=", "True", ",", "column", "=", "None", ",", "name", "=", "''", ",", "str", "=", "'{name}'", ",", "*", "*", "kwargs", ")", ":", "if", "not", "column", ":", "column", "=", "self", ".", "_d", "[", "'volume'", "]", "up_color", "=", "kwargs", ".", "pop", "(", "'up_color'", ",", "self", ".", "theme", "[", "'up_color'", "]", ")", "down_color", "=", "kwargs", ".", "pop", "(", "'down_color'", ",", "self", ".", "theme", "[", "'down_color'", "]", ")", "study", "=", "{", "'kind'", ":", "'volume'", ",", "'name'", ":", "name", ",", "'params'", ":", "{", "'colorchange'", ":", "colorchange", ",", "'base'", ":", "'close'", ",", "'column'", ":", "column", ",", "'str'", ":", "None", "}", ",", "'display'", ":", "utils", ".", "merge_dict", "(", "{", "'up_color'", ":", "up_color", ",", "'down_color'", ":", "down_color", "}", ",", "kwargs", ")", "}", "self", ".", "_add_study", "(", "study", ")" ]
32.73913
0.055448
def create_command( principal, permissions, endpoint_plus_path, notify_email, notify_message ): """ Executor for `globus endpoint permission create` """ if not principal: raise click.UsageError("A security principal is required for this command") endpoint_id, path = endpoint_plus_path principal_type, principal_val = principal client = get_client() if principal_type == "identity": principal_val = maybe_lookup_identity_id(principal_val) if not principal_val: raise click.UsageError( "Identity does not exist. " "Use --provision-identity to auto-provision an identity." ) elif principal_type == "provision-identity": principal_val = maybe_lookup_identity_id(principal_val, provision=True) principal_type = "identity" if not notify_email: notify_message = None rule_data = assemble_generic_doc( "access", permissions=permissions, principal=principal_val, principal_type=principal_type, path=path, notify_email=notify_email, notify_message=notify_message, ) res = client.add_endpoint_acl_rule(endpoint_id, rule_data) formatted_print( res, text_format=FORMAT_TEXT_RECORD, fields=[("Message", "message"), ("Rule ID", "access_id")], )
[ "def", "create_command", "(", "principal", ",", "permissions", ",", "endpoint_plus_path", ",", "notify_email", ",", "notify_message", ")", ":", "if", "not", "principal", ":", "raise", "click", ".", "UsageError", "(", "\"A security principal is required for this command\"", ")", "endpoint_id", ",", "path", "=", "endpoint_plus_path", "principal_type", ",", "principal_val", "=", "principal", "client", "=", "get_client", "(", ")", "if", "principal_type", "==", "\"identity\"", ":", "principal_val", "=", "maybe_lookup_identity_id", "(", "principal_val", ")", "if", "not", "principal_val", ":", "raise", "click", ".", "UsageError", "(", "\"Identity does not exist. \"", "\"Use --provision-identity to auto-provision an identity.\"", ")", "elif", "principal_type", "==", "\"provision-identity\"", ":", "principal_val", "=", "maybe_lookup_identity_id", "(", "principal_val", ",", "provision", "=", "True", ")", "principal_type", "=", "\"identity\"", "if", "not", "notify_email", ":", "notify_message", "=", "None", "rule_data", "=", "assemble_generic_doc", "(", "\"access\"", ",", "permissions", "=", "permissions", ",", "principal", "=", "principal_val", ",", "principal_type", "=", "principal_type", ",", "path", "=", "path", ",", "notify_email", "=", "notify_email", ",", "notify_message", "=", "notify_message", ",", ")", "res", "=", "client", ".", "add_endpoint_acl_rule", "(", "endpoint_id", ",", "rule_data", ")", "formatted_print", "(", "res", ",", "text_format", "=", "FORMAT_TEXT_RECORD", ",", "fields", "=", "[", "(", "\"Message\"", ",", "\"message\"", ")", ",", "(", "\"Rule ID\"", ",", "\"access_id\"", ")", "]", ",", ")" ]
30.477273
0.001445
def create(cls, term, *ranges): """Instantiate the indexed sum while applying simplification rules""" if not isinstance(term, Scalar): term = ScalarValue.create(term) return super().create(term, *ranges)
[ "def", "create", "(", "cls", ",", "term", ",", "*", "ranges", ")", ":", "if", "not", "isinstance", "(", "term", ",", "Scalar", ")", ":", "term", "=", "ScalarValue", ".", "create", "(", "term", ")", "return", "super", "(", ")", ".", "create", "(", "term", ",", "*", "ranges", ")" ]
47
0.008368
def get_weather(self, time, max_hour=6): """Get the current weather data from met.no.""" if self.data is None: return {} ordered_entries = [] for time_entry in self.data['product']['time']: valid_from = parse_datetime(time_entry['@from']) valid_to = parse_datetime(time_entry['@to']) if time > valid_to: # Has already passed. Never select this. continue average_dist = (abs((valid_to - time).total_seconds()) + abs((valid_from - time).total_seconds())) if average_dist > max_hour * 3600: continue ordered_entries.append((average_dist, time_entry)) if not ordered_entries: return {} ordered_entries.sort(key=lambda item: item[0]) res = dict() res['datetime'] = time res['temperature'] = get_data('temperature', ordered_entries) res['condition'] = CONDITIONS.get(get_data('symbol', ordered_entries)) res['pressure'] = get_data('pressure', ordered_entries) res['humidity'] = get_data('humidity', ordered_entries) res['wind_speed'] = get_data('windSpeed', ordered_entries) res['wind_bearing'] = get_data('windDirection', ordered_entries) return res
[ "def", "get_weather", "(", "self", ",", "time", ",", "max_hour", "=", "6", ")", ":", "if", "self", ".", "data", "is", "None", ":", "return", "{", "}", "ordered_entries", "=", "[", "]", "for", "time_entry", "in", "self", ".", "data", "[", "'product'", "]", "[", "'time'", "]", ":", "valid_from", "=", "parse_datetime", "(", "time_entry", "[", "'@from'", "]", ")", "valid_to", "=", "parse_datetime", "(", "time_entry", "[", "'@to'", "]", ")", "if", "time", ">", "valid_to", ":", "# Has already passed. Never select this.", "continue", "average_dist", "=", "(", "abs", "(", "(", "valid_to", "-", "time", ")", ".", "total_seconds", "(", ")", ")", "+", "abs", "(", "(", "valid_from", "-", "time", ")", ".", "total_seconds", "(", ")", ")", ")", "if", "average_dist", ">", "max_hour", "*", "3600", ":", "continue", "ordered_entries", ".", "append", "(", "(", "average_dist", ",", "time_entry", ")", ")", "if", "not", "ordered_entries", ":", "return", "{", "}", "ordered_entries", ".", "sort", "(", "key", "=", "lambda", "item", ":", "item", "[", "0", "]", ")", "res", "=", "dict", "(", ")", "res", "[", "'datetime'", "]", "=", "time", "res", "[", "'temperature'", "]", "=", "get_data", "(", "'temperature'", ",", "ordered_entries", ")", "res", "[", "'condition'", "]", "=", "CONDITIONS", ".", "get", "(", "get_data", "(", "'symbol'", ",", "ordered_entries", ")", ")", "res", "[", "'pressure'", "]", "=", "get_data", "(", "'pressure'", ",", "ordered_entries", ")", "res", "[", "'humidity'", "]", "=", "get_data", "(", "'humidity'", ",", "ordered_entries", ")", "res", "[", "'wind_speed'", "]", "=", "get_data", "(", "'windSpeed'", ",", "ordered_entries", ")", "res", "[", "'wind_bearing'", "]", "=", "get_data", "(", "'windDirection'", ",", "ordered_entries", ")", "return", "res" ]
39.666667
0.001491
def get_user(request): """ Returns the user model instance associated with the given request session. If no user is retrieved an instance of `MojAnonymousUser` is returned. """ user = None try: user_id = request.session[SESSION_KEY] token = request.session[AUTH_TOKEN_SESSION_KEY] user_data = request.session[USER_DATA_SESSION_KEY] backend_path = request.session[BACKEND_SESSION_KEY] except KeyError: pass else: if backend_path in settings.AUTHENTICATION_BACKENDS: backend = load_backend(backend_path) user = backend.get_user(user_id, token, user_data) # Verify the session if hasattr(user, 'get_session_auth_hash'): session_hash = request.session.get(HASH_SESSION_KEY) session_hash_verified = session_hash and constant_time_compare( session_hash, user.get_session_auth_hash() ) if not session_hash_verified: request.session.flush() user = None return user or MojAnonymousUser()
[ "def", "get_user", "(", "request", ")", ":", "user", "=", "None", "try", ":", "user_id", "=", "request", ".", "session", "[", "SESSION_KEY", "]", "token", "=", "request", ".", "session", "[", "AUTH_TOKEN_SESSION_KEY", "]", "user_data", "=", "request", ".", "session", "[", "USER_DATA_SESSION_KEY", "]", "backend_path", "=", "request", ".", "session", "[", "BACKEND_SESSION_KEY", "]", "except", "KeyError", ":", "pass", "else", ":", "if", "backend_path", "in", "settings", ".", "AUTHENTICATION_BACKENDS", ":", "backend", "=", "load_backend", "(", "backend_path", ")", "user", "=", "backend", ".", "get_user", "(", "user_id", ",", "token", ",", "user_data", ")", "# Verify the session", "if", "hasattr", "(", "user", ",", "'get_session_auth_hash'", ")", ":", "session_hash", "=", "request", ".", "session", ".", "get", "(", "HASH_SESSION_KEY", ")", "session_hash_verified", "=", "session_hash", "and", "constant_time_compare", "(", "session_hash", ",", "user", ".", "get_session_auth_hash", "(", ")", ")", "if", "not", "session_hash_verified", ":", "request", ".", "session", ".", "flush", "(", ")", "user", "=", "None", "return", "user", "or", "MojAnonymousUser", "(", ")" ]
38.862069
0.000866
def promote(self): """ Mark object as alive, so it won't be collected during next run of the garbage collector. """ if self.expiry is not None: self.promoted = self.time_module.time() + self.expiry
[ "def", "promote", "(", "self", ")", ":", "if", "self", ".", "expiry", "is", "not", "None", ":", "self", ".", "promoted", "=", "self", ".", "time_module", ".", "time", "(", ")", "+", "self", ".", "expiry" ]
39.333333
0.008299
def delete_database(self, server_name, name): ''' Deletes an Azure SQL Database. server_name: Name of the server where the database is located. name: Name of the database to delete. ''' return self._perform_delete(self._get_databases_path(server_name, name))
[ "def", "delete_database", "(", "self", ",", "server_name", ",", "name", ")", ":", "return", "self", ".", "_perform_delete", "(", "self", ".", "_get_databases_path", "(", "server_name", ",", "name", ")", ")" ]
32.2
0.009063
def add_xtalographic_info(data_api, struct_inflator): """ Add the crystallographic data to the structure. :param data_api the interface to the decoded data :param struct_inflator the interface to put the data into the client object""" if data_api.unit_cell == None and data_api.space_group is not None: struct_inflator.set_xtal_info(data_api.space_group, constants.UNKNOWN_UNIT_CELL) elif data_api.unit_cell is not None and data_api.space_group is None: struct_inflator.set_xtal_info(constants.UNKNOWN_SPACE_GROUP, data_api.unit_cell) elif data_api.unit_cell is None and data_api.space_group is None: struct_inflator.set_xtal_info(constants.UNKNOWN_SPACE_GROUP, constants.UNKNOWN_UNIT_CELL) else: struct_inflator.set_xtal_info(data_api.space_group, data_api.unit_cell)
[ "def", "add_xtalographic_info", "(", "data_api", ",", "struct_inflator", ")", ":", "if", "data_api", ".", "unit_cell", "==", "None", "and", "data_api", ".", "space_group", "is", "not", "None", ":", "struct_inflator", ".", "set_xtal_info", "(", "data_api", ".", "space_group", ",", "constants", ".", "UNKNOWN_UNIT_CELL", ")", "elif", "data_api", ".", "unit_cell", "is", "not", "None", "and", "data_api", ".", "space_group", "is", "None", ":", "struct_inflator", ".", "set_xtal_info", "(", "constants", ".", "UNKNOWN_SPACE_GROUP", ",", "data_api", ".", "unit_cell", ")", "elif", "data_api", ".", "unit_cell", "is", "None", "and", "data_api", ".", "space_group", "is", "None", ":", "struct_inflator", ".", "set_xtal_info", "(", "constants", ".", "UNKNOWN_SPACE_GROUP", ",", "constants", ".", "UNKNOWN_UNIT_CELL", ")", "else", ":", "struct_inflator", ".", "set_xtal_info", "(", "data_api", ".", "space_group", ",", "data_api", ".", "unit_cell", ")" ]
60.125
0.008188
def tonnetz(y=None, sr=22050, chroma=None): '''Computes the tonal centroid features (tonnetz), following the method of [1]_. .. [1] Harte, C., Sandler, M., & Gasser, M. (2006). "Detecting Harmonic Change in Musical Audio." In Proceedings of the 1st ACM Workshop on Audio and Music Computing Multimedia (pp. 21-26). Santa Barbara, CA, USA: ACM Press. doi:10.1145/1178723.1178727. Parameters ---------- y : np.ndarray [shape=(n,)] or None Audio time series. sr : number > 0 [scalar] sampling rate of `y` chroma : np.ndarray [shape=(n_chroma, t)] or None Normalized energy for each chroma bin at each frame. If `None`, a cqt chromagram is performed. Returns ------- tonnetz : np.ndarray [shape(6, t)] Tonal centroid features for each frame. Tonnetz dimensions: - 0: Fifth x-axis - 1: Fifth y-axis - 2: Minor x-axis - 3: Minor y-axis - 4: Major x-axis - 5: Major y-axis See Also -------- chroma_cqt Compute a chromagram from a constant-Q transform. chroma_stft Compute a chromagram from an STFT spectrogram or waveform. Examples -------- Compute tonnetz features from the harmonic component of a song >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> y = librosa.effects.harmonic(y) >>> tonnetz = librosa.feature.tonnetz(y=y, sr=sr) >>> tonnetz array([[-0.073, -0.053, ..., -0.054, -0.073], [ 0.001, 0.001, ..., -0.054, -0.062], ..., [ 0.039, 0.034, ..., 0.044, 0.064], [ 0.005, 0.002, ..., 0.011, 0.017]]) Compare the tonnetz features to `chroma_cqt` >>> import matplotlib.pyplot as plt >>> plt.subplot(2, 1, 1) >>> librosa.display.specshow(tonnetz, y_axis='tonnetz') >>> plt.colorbar() >>> plt.title('Tonal Centroids (Tonnetz)') >>> plt.subplot(2, 1, 2) >>> librosa.display.specshow(librosa.feature.chroma_cqt(y, sr=sr), ... y_axis='chroma', x_axis='time') >>> plt.colorbar() >>> plt.title('Chroma') >>> plt.tight_layout() ''' if y is None and chroma is None: raise ParameterError('Either the audio samples or the chromagram must be ' 'passed as an argument.') if chroma is None: chroma = chroma_cqt(y=y, sr=sr) # Generate Transformation matrix dim_map = np.linspace(0, 12, num=chroma.shape[0], endpoint=False) scale = np.asarray([7. / 6, 7. / 6, 3. / 2, 3. / 2, 2. / 3, 2. / 3]) V = np.multiply.outer(scale, dim_map) # Even rows compute sin() V[::2] -= 0.5 R = np.array([1, 1, # Fifths 1, 1, # Minor 0.5, 0.5]) # Major phi = R[:, np.newaxis] * np.cos(np.pi * V) # Do the transform to tonnetz return phi.dot(util.normalize(chroma, norm=1, axis=0))
[ "def", "tonnetz", "(", "y", "=", "None", ",", "sr", "=", "22050", ",", "chroma", "=", "None", ")", ":", "if", "y", "is", "None", "and", "chroma", "is", "None", ":", "raise", "ParameterError", "(", "'Either the audio samples or the chromagram must be '", "'passed as an argument.'", ")", "if", "chroma", "is", "None", ":", "chroma", "=", "chroma_cqt", "(", "y", "=", "y", ",", "sr", "=", "sr", ")", "# Generate Transformation matrix", "dim_map", "=", "np", ".", "linspace", "(", "0", ",", "12", ",", "num", "=", "chroma", ".", "shape", "[", "0", "]", ",", "endpoint", "=", "False", ")", "scale", "=", "np", ".", "asarray", "(", "[", "7.", "/", "6", ",", "7.", "/", "6", ",", "3.", "/", "2", ",", "3.", "/", "2", ",", "2.", "/", "3", ",", "2.", "/", "3", "]", ")", "V", "=", "np", ".", "multiply", ".", "outer", "(", "scale", ",", "dim_map", ")", "# Even rows compute sin()", "V", "[", ":", ":", "2", "]", "-=", "0.5", "R", "=", "np", ".", "array", "(", "[", "1", ",", "1", ",", "# Fifths", "1", ",", "1", ",", "# Minor", "0.5", ",", "0.5", "]", ")", "# Major", "phi", "=", "R", "[", ":", ",", "np", ".", "newaxis", "]", "*", "np", ".", "cos", "(", "np", ".", "pi", "*", "V", ")", "# Do the transform to tonnetz", "return", "phi", ".", "dot", "(", "util", ".", "normalize", "(", "chroma", ",", "norm", "=", "1", ",", "axis", "=", "0", ")", ")" ]
29.68
0.000652
def spharm_lm(l, m, theta, phi, normalization='4pi', kind='real', csphase=1, degrees=True): """ Compute the spherical harmonic function for a specific degree and order. Usage ----- ylm = spharm (l, m, theta, phi, [normalization, kind, csphase, degrees]) Returns ------- ylm : float or complex The spherical harmonic function ylm, where l and m are the spherical harmonic degree and order, respectively. Parameters ---------- l : integer The spherical harmonic degree. m : integer The spherical harmonic order. theta : float The colatitude in degrees. phi : float The longitude in degrees. normalization : str, optional, default = '4pi' '4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized, orthonormalized, Schmidt semi-normalized, or unnormalized spherical harmonic functions, respectively. kind : str, optional, default = 'real' 'real' or 'complex' spherical harmonic coefficients. csphase : optional, integer, default = 1 If 1 (default), the Condon-Shortley phase will be excluded. If -1, the Condon-Shortley phase of (-1)^m will be appended to the spherical harmonic functions. degrees : optional, bool, default = True If True, colat and phi are expressed in degrees. Description ----------- spharm_lm will calculate the spherical harmonic function for a specific degree l and order m, and for a given colatitude theta and longitude phi. Three parameters determine how the spherical harmonic functions are defined. normalization can be either '4pi' (default), 'ortho', 'schmidt', or 'unnorm' for 4pi normalized, orthonormalized, Schmidt semi-normalized, or unnormalized spherical harmonic functions, respectively. kind can be either 'real' or 'complex', and csphase determines whether to include or exclude (default) the Condon-Shortley phase factor. The spherical harmonic functions are calculated using the standard three-term recursion formula, and in order to prevent overflows, the scaling approach of Holmes and Featherstone (2002) is utilized. The resulting functions are accurate to about degree 2800. See Wieczorek and Meschede (2018) for exact definitions on how the spherical harmonic functions are defined. References ---------- Holmes, S. A., and W. E. Featherstone, A unified approach to the Clenshaw summation and the recursive computation of very high degree and order normalised associated Legendre functions, J. Geodesy, 76, 279-299, doi:10.1007/s00190-002-0216-2, 2002. Wieczorek, M. A., and M. Meschede. SHTools — Tools for working with spherical harmonics, Geochem., Geophys., Geosyst., 19, 2574-2592, doi:10.1029/2018GC007529, 2018. """ if l < 0: raise ValueError( "The degree l must be greater or equal than 0. Input value was {:s}." .format(repr(l)) ) if m > l: raise ValueError( "The order m must be less than or equal to the degree l. " + "Input values were l={:s} and m={:s}.".format(repr(l), repr(m)) ) if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'): raise ValueError( "The normalization must be '4pi', 'ortho', 'schmidt', " + "or 'unnorm'. Input value was {:s}." .format(repr(normalization)) ) if kind.lower() not in ('real', 'complex'): raise ValueError( "kind must be 'real' or 'complex'. " + "Input value was {:s}.".format(repr(kind)) ) if csphase != 1 and csphase != -1: raise ValueError( "csphase must be either 1 or -1. Input value was {:s}." .format(repr(csphase)) ) if normalization.lower() == 'unnorm' and lmax > 85: _warnings.warn("Calculations using unnormalized coefficients " + "are stable only for degrees less than or equal " + "to 85. lmax for the coefficients will be set to " + "85. Input value was {:d}.".format(lmax), category=RuntimeWarning) lmax = 85 ind = (l*(l+1))//2 + abs(m) if degrees is True: theta = _np.deg2rad(theta) phi = _np.deg2rad(phi) if kind.lower() == 'real': p = _legendre(l, _np.cos(theta), normalization=normalization, csphase=csphase, cnorm=0, packed=True) if m >= 0: ylm = p[ind] * _np.cos(m*phi) else: ylm = p[ind] * _np.sin(abs(m)*phi) else: p = _legendre(l, _np.cos(theta), normalization=normalization, csphase=csphase, cnorm=1, packed=True) ylm = p[ind] * (_np.cos(m*phi) + 1j * _np.sin(abs(m)*phi)) # Yl|m| if m < 0: ylm = ylm.conj() if _np.mod(m, 2) == 1: ylm = - ylm return ylm
[ "def", "spharm_lm", "(", "l", ",", "m", ",", "theta", ",", "phi", ",", "normalization", "=", "'4pi'", ",", "kind", "=", "'real'", ",", "csphase", "=", "1", ",", "degrees", "=", "True", ")", ":", "if", "l", "<", "0", ":", "raise", "ValueError", "(", "\"The degree l must be greater or equal than 0. Input value was {:s}.\"", ".", "format", "(", "repr", "(", "l", ")", ")", ")", "if", "m", ">", "l", ":", "raise", "ValueError", "(", "\"The order m must be less than or equal to the degree l. \"", "+", "\"Input values were l={:s} and m={:s}.\"", ".", "format", "(", "repr", "(", "l", ")", ",", "repr", "(", "m", ")", ")", ")", "if", "normalization", ".", "lower", "(", ")", "not", "in", "(", "'4pi'", ",", "'ortho'", ",", "'schmidt'", ",", "'unnorm'", ")", ":", "raise", "ValueError", "(", "\"The normalization must be '4pi', 'ortho', 'schmidt', \"", "+", "\"or 'unnorm'. Input value was {:s}.\"", ".", "format", "(", "repr", "(", "normalization", ")", ")", ")", "if", "kind", ".", "lower", "(", ")", "not", "in", "(", "'real'", ",", "'complex'", ")", ":", "raise", "ValueError", "(", "\"kind must be 'real' or 'complex'. \"", "+", "\"Input value was {:s}.\"", ".", "format", "(", "repr", "(", "kind", ")", ")", ")", "if", "csphase", "!=", "1", "and", "csphase", "!=", "-", "1", ":", "raise", "ValueError", "(", "\"csphase must be either 1 or -1. Input value was {:s}.\"", ".", "format", "(", "repr", "(", "csphase", ")", ")", ")", "if", "normalization", ".", "lower", "(", ")", "==", "'unnorm'", "and", "lmax", ">", "85", ":", "_warnings", ".", "warn", "(", "\"Calculations using unnormalized coefficients \"", "+", "\"are stable only for degrees less than or equal \"", "+", "\"to 85. lmax for the coefficients will be set to \"", "+", "\"85. Input value was {:d}.\"", ".", "format", "(", "lmax", ")", ",", "category", "=", "RuntimeWarning", ")", "lmax", "=", "85", "ind", "=", "(", "l", "*", "(", "l", "+", "1", ")", ")", "//", "2", "+", "abs", "(", "m", ")", "if", "degrees", "is", "True", ":", "theta", "=", "_np", ".", "deg2rad", "(", "theta", ")", "phi", "=", "_np", ".", "deg2rad", "(", "phi", ")", "if", "kind", ".", "lower", "(", ")", "==", "'real'", ":", "p", "=", "_legendre", "(", "l", ",", "_np", ".", "cos", "(", "theta", ")", ",", "normalization", "=", "normalization", ",", "csphase", "=", "csphase", ",", "cnorm", "=", "0", ",", "packed", "=", "True", ")", "if", "m", ">=", "0", ":", "ylm", "=", "p", "[", "ind", "]", "*", "_np", ".", "cos", "(", "m", "*", "phi", ")", "else", ":", "ylm", "=", "p", "[", "ind", "]", "*", "_np", ".", "sin", "(", "abs", "(", "m", ")", "*", "phi", ")", "else", ":", "p", "=", "_legendre", "(", "l", ",", "_np", ".", "cos", "(", "theta", ")", ",", "normalization", "=", "normalization", ",", "csphase", "=", "csphase", ",", "cnorm", "=", "1", ",", "packed", "=", "True", ")", "ylm", "=", "p", "[", "ind", "]", "*", "(", "_np", ".", "cos", "(", "m", "*", "phi", ")", "+", "1j", "*", "_np", ".", "sin", "(", "abs", "(", "m", ")", "*", "phi", ")", ")", "# Yl|m|", "if", "m", "<", "0", ":", "ylm", "=", "ylm", ".", "conj", "(", ")", "if", "_np", ".", "mod", "(", "m", ",", "2", ")", "==", "1", ":", "ylm", "=", "-", "ylm", "return", "ylm" ]
37.732824
0.000591
def weighted_choice(seq, cdf): """ Select a random element from a sequence, given cumulative probabilities of selection. See ``compute_fitness_cdf`` function for obtaining cumulative probabilities. seq: sequence to select from cdf: sequence with 1 cumulative probability value in [0, 1] for each element in ``seq`` return: randomly selected element """ assert len(seq) == len(cdf) rand = random.random() for i, e in enumerate(seq): cp = cdf[i] assert 0 <= cp <= 1 if rand < cp: return e
[ "def", "weighted_choice", "(", "seq", ",", "cdf", ")", ":", "assert", "len", "(", "seq", ")", "==", "len", "(", "cdf", ")", "rand", "=", "random", ".", "random", "(", ")", "for", "i", ",", "e", "in", "enumerate", "(", "seq", ")", ":", "cp", "=", "cdf", "[", "i", "]", "assert", "0", "<=", "cp", "<=", "1", "if", "rand", "<", "cp", ":", "return", "e" ]
28.75
0.015152
def _union_in_blocks(contours, block_size): """ Generator which yields a valid shape for each block_size multiple of input contours. This merges together the contours for each block before yielding them. """ n_contours = len(contours) for i in range(0, n_contours, block_size): j = min(i + block_size, n_contours) inners = [] for c in contours[i:j]: p = _contour_to_poly(c) if p.type == 'Polygon': inners.append(p) elif p.type == 'MultiPolygon': inners.extend(p.geoms) holes = unary_union(inners) assert holes.is_valid yield holes
[ "def", "_union_in_blocks", "(", "contours", ",", "block_size", ")", ":", "n_contours", "=", "len", "(", "contours", ")", "for", "i", "in", "range", "(", "0", ",", "n_contours", ",", "block_size", ")", ":", "j", "=", "min", "(", "i", "+", "block_size", ",", "n_contours", ")", "inners", "=", "[", "]", "for", "c", "in", "contours", "[", "i", ":", "j", "]", ":", "p", "=", "_contour_to_poly", "(", "c", ")", "if", "p", ".", "type", "==", "'Polygon'", ":", "inners", ".", "append", "(", "p", ")", "elif", "p", ".", "type", "==", "'MultiPolygon'", ":", "inners", ".", "extend", "(", "p", ".", "geoms", ")", "holes", "=", "unary_union", "(", "inners", ")", "assert", "holes", ".", "is_valid", "yield", "holes" ]
29.818182
0.001477
def parse_date_range(date, alt_end_date=None): """ Parse input `date` string in free-text format for four-digit long groups. Args: date (str): Input containing years. Returns: tuple: ``(from, to)`` as four-digit strings. """ NOT_ENDED = "9999" all_years = re.findall(r"\d{4}", date) if alt_end_date: NOT_ENDED = alt_end_date if not all_years: return "****", NOT_ENDED elif len(all_years) == 1: return all_years[0], NOT_ENDED return all_years[0], all_years[1]
[ "def", "parse_date_range", "(", "date", ",", "alt_end_date", "=", "None", ")", ":", "NOT_ENDED", "=", "\"9999\"", "all_years", "=", "re", ".", "findall", "(", "r\"\\d{4}\"", ",", "date", ")", "if", "alt_end_date", ":", "NOT_ENDED", "=", "alt_end_date", "if", "not", "all_years", ":", "return", "\"****\"", ",", "NOT_ENDED", "elif", "len", "(", "all_years", ")", "==", "1", ":", "return", "all_years", "[", "0", "]", ",", "NOT_ENDED", "return", "all_years", "[", "0", "]", ",", "all_years", "[", "1", "]" ]
22.869565
0.001825
def sync(self): """Retrieve areas from ElkM1""" self.elk.send(cs_encode()) self.get_descriptions(TextDescriptions.OUTPUT.value)
[ "def", "sync", "(", "self", ")", ":", "self", ".", "elk", ".", "send", "(", "cs_encode", "(", ")", ")", "self", ".", "get_descriptions", "(", "TextDescriptions", ".", "OUTPUT", ".", "value", ")" ]
37
0.013245
def random_split(self, weights): """ Random split imageframes according to weights :param weights: weights for each ImageFrame :return: """ jvalues = self.image_frame.random_split(weights) return [ImageFrame(jvalue) for jvalue in jvalues]
[ "def", "random_split", "(", "self", ",", "weights", ")", ":", "jvalues", "=", "self", ".", "image_frame", ".", "random_split", "(", "weights", ")", "return", "[", "ImageFrame", "(", "jvalue", ")", "for", "jvalue", "in", "jvalues", "]" ]
36.125
0.013514
def main(): """Entry point for the application script""" if len(sys.argv) >= 2: cmd = sys.argv[1] if cmd == "help": print_usage() else: if cmd == "mensa": print_menu("Mensa") elif cmd == "bistro": print_menu("Bistro") elif cmd == "cafeteriab": print_menu("CB") elif cmd == "west": print_menu("West") elif cmd == "hochschule": print_menu("Prittwitzstr") elif cmd == "westside": print_menu("Diner", True) elif cmd == "burgerbar": print_menu("Burgerbar", True) else: print("[ERROR]: No valid place given") print_usage() else: print("[ERROR]: No argument given") print_usage()
[ "def", "main", "(", ")", ":", "if", "len", "(", "sys", ".", "argv", ")", ">=", "2", ":", "cmd", "=", "sys", ".", "argv", "[", "1", "]", "if", "cmd", "==", "\"help\"", ":", "print_usage", "(", ")", "else", ":", "if", "cmd", "==", "\"mensa\"", ":", "print_menu", "(", "\"Mensa\"", ")", "elif", "cmd", "==", "\"bistro\"", ":", "print_menu", "(", "\"Bistro\"", ")", "elif", "cmd", "==", "\"cafeteriab\"", ":", "print_menu", "(", "\"CB\"", ")", "elif", "cmd", "==", "\"west\"", ":", "print_menu", "(", "\"West\"", ")", "elif", "cmd", "==", "\"hochschule\"", ":", "print_menu", "(", "\"Prittwitzstr\"", ")", "elif", "cmd", "==", "\"westside\"", ":", "print_menu", "(", "\"Diner\"", ",", "True", ")", "elif", "cmd", "==", "\"burgerbar\"", ":", "print_menu", "(", "\"Burgerbar\"", ",", "True", ")", "else", ":", "print", "(", "\"[ERROR]: No valid place given\"", ")", "print_usage", "(", ")", "else", ":", "print", "(", "\"[ERROR]: No argument given\"", ")", "print_usage", "(", ")" ]
31.481481
0.001142
def append(self, record): """ Adds the passed +record+ to satisfy the query. Only intended to be used in conjunction with associations (i.e. do not use if self.record is None). Intended use case (DO THIS): post.comments.append(comment) NOT THIS: Query(Post).where(content="foo").append(post) """ if self.record: self._validate_record(record) if self.join_args: # As always, the related record is created when the primary # record is saved build_args = dict(self.where_query) # The +final_join+ is what connects the record chain to the # passed +record+ final_join = self.join_args[-2] # don't need to worry about one-to-many through because # there is not enough information to find or create the # joining record # i.e. in the Forum -> Thread -> Post example # forum.posts.append(post) doesn't make sense since there # is no information about what thread it will be attached to # Thus, this only makes sense on many-to-many. BUT we still # have to consider the case where there is a one-many-many # To make that work, we need to treat this like when doing # building joining_relation = getattr(self.record, final_join['table']) # Uses the lookup info in the join to figure out what ids to # set, and where to get the id value from joining_args = {final_join['on'][0]: getattr(record, final_join['on'][1])} build_args.update(joining_args) joining_record = joining_relation.build(**build_args) self.record._related_records.append(joining_record) else: # Add our id to their foreign key so that the relationship is # created setattr(record, foreign_key(record, self.record), self.record.id) # Add to the list of related records so that it is saved when # we are self.record._related_records.append(record)
[ "def", "append", "(", "self", ",", "record", ")", ":", "if", "self", ".", "record", ":", "self", ".", "_validate_record", "(", "record", ")", "if", "self", ".", "join_args", ":", "# As always, the related record is created when the primary", "# record is saved", "build_args", "=", "dict", "(", "self", ".", "where_query", ")", "# The +final_join+ is what connects the record chain to the", "# passed +record+", "final_join", "=", "self", ".", "join_args", "[", "-", "2", "]", "# don't need to worry about one-to-many through because", "# there is not enough information to find or create the", "# joining record", "# i.e. in the Forum -> Thread -> Post example", "# forum.posts.append(post) doesn't make sense since there", "# is no information about what thread it will be attached to", "# Thus, this only makes sense on many-to-many. BUT we still", "# have to consider the case where there is a one-many-many", "# To make that work, we need to treat this like when doing", "# building", "joining_relation", "=", "getattr", "(", "self", ".", "record", ",", "final_join", "[", "'table'", "]", ")", "# Uses the lookup info in the join to figure out what ids to", "# set, and where to get the id value from", "joining_args", "=", "{", "final_join", "[", "'on'", "]", "[", "0", "]", ":", "getattr", "(", "record", ",", "final_join", "[", "'on'", "]", "[", "1", "]", ")", "}", "build_args", ".", "update", "(", "joining_args", ")", "joining_record", "=", "joining_relation", ".", "build", "(", "*", "*", "build_args", ")", "self", ".", "record", ".", "_related_records", ".", "append", "(", "joining_record", ")", "else", ":", "# Add our id to their foreign key so that the relationship is", "# created", "setattr", "(", "record", ",", "foreign_key", "(", "record", ",", "self", ".", "record", ")", ",", "self", ".", "record", ".", "id", ")", "# Add to the list of related records so that it is saved when", "# we are", "self", ".", "record", ".", "_related_records", ".", "append", "(", "record", ")" ]
46.36
0.000845
def join_wrapped_lines(lines): """ Join one or multiple lines that wrapped. Returns the reconstructed line. Takes into account proper spacing between the lines (see STRIP_SPACE_CHARS). """ if len(lines) == 1: return lines[0] joined = lines[0] for line in lines[1:]: if joined and joined[-1] in STRIP_SPACE_CHARS: joined += line else: joined += ' ' joined += line return joined
[ "def", "join_wrapped_lines", "(", "lines", ")", ":", "if", "len", "(", "lines", ")", "==", "1", ":", "return", "lines", "[", "0", "]", "joined", "=", "lines", "[", "0", "]", "for", "line", "in", "lines", "[", "1", ":", "]", ":", "if", "joined", "and", "joined", "[", "-", "1", "]", "in", "STRIP_SPACE_CHARS", ":", "joined", "+=", "line", "else", ":", "joined", "+=", "' '", "joined", "+=", "line", "return", "joined" ]
25.388889
0.00211
def sphrec(r, colat, lon): """ Convert from spherical coordinates to rectangular coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sphrec_c.html :param r: Distance of a point from the origin. :type r: float :param colat: Angle of the point from the positive Z-axis. :type colat: float :param lon: Angle of the point from the XZ plane in radians. :type lon: float :return: Rectangular coordinates of the point. :rtype: 3-Element Array of floats """ r = ctypes.c_double(r) colat = ctypes.c_double(colat) lon = ctypes.c_double(lon) rectan = stypes.emptyDoubleVector(3) libspice.sphrec_c(r, colat, lon, rectan) return stypes.cVectorToPython(rectan)
[ "def", "sphrec", "(", "r", ",", "colat", ",", "lon", ")", ":", "r", "=", "ctypes", ".", "c_double", "(", "r", ")", "colat", "=", "ctypes", ".", "c_double", "(", "colat", ")", "lon", "=", "ctypes", ".", "c_double", "(", "lon", ")", "rectan", "=", "stypes", ".", "emptyDoubleVector", "(", "3", ")", "libspice", ".", "sphrec_c", "(", "r", ",", "colat", ",", "lon", ",", "rectan", ")", "return", "stypes", ".", "cVectorToPython", "(", "rectan", ")" ]
34.142857
0.001357
def uniform_binned(self, name=None): """ Return a new histogram with constant width bins along all axes by using the bin indices as the bin edges of the new histogram. """ if self.GetDimension() == 1: new_hist = Hist( self.GetNbinsX(), 0, self.GetNbinsX(), name=name, type=self.TYPE) elif self.GetDimension() == 2: new_hist = Hist2D( self.GetNbinsX(), 0, self.GetNbinsX(), self.GetNbinsY(), 0, self.GetNbinsY(), name=name, type=self.TYPE) else: new_hist = Hist3D( self.GetNbinsX(), 0, self.GetNbinsX(), self.GetNbinsY(), 0, self.GetNbinsY(), self.GetNbinsZ(), 0, self.GetNbinsZ(), name=name, type=self.TYPE) # copy over the bin contents and errors for outbin, inbin in zip(new_hist.bins(), self.bins()): outbin.value = inbin.value outbin.error = inbin.error new_hist.decorate(self) new_hist.entries = self.entries return new_hist
[ "def", "uniform_binned", "(", "self", ",", "name", "=", "None", ")", ":", "if", "self", ".", "GetDimension", "(", ")", "==", "1", ":", "new_hist", "=", "Hist", "(", "self", ".", "GetNbinsX", "(", ")", ",", "0", ",", "self", ".", "GetNbinsX", "(", ")", ",", "name", "=", "name", ",", "type", "=", "self", ".", "TYPE", ")", "elif", "self", ".", "GetDimension", "(", ")", "==", "2", ":", "new_hist", "=", "Hist2D", "(", "self", ".", "GetNbinsX", "(", ")", ",", "0", ",", "self", ".", "GetNbinsX", "(", ")", ",", "self", ".", "GetNbinsY", "(", ")", ",", "0", ",", "self", ".", "GetNbinsY", "(", ")", ",", "name", "=", "name", ",", "type", "=", "self", ".", "TYPE", ")", "else", ":", "new_hist", "=", "Hist3D", "(", "self", ".", "GetNbinsX", "(", ")", ",", "0", ",", "self", ".", "GetNbinsX", "(", ")", ",", "self", ".", "GetNbinsY", "(", ")", ",", "0", ",", "self", ".", "GetNbinsY", "(", ")", ",", "self", ".", "GetNbinsZ", "(", ")", ",", "0", ",", "self", ".", "GetNbinsZ", "(", ")", ",", "name", "=", "name", ",", "type", "=", "self", ".", "TYPE", ")", "# copy over the bin contents and errors", "for", "outbin", ",", "inbin", "in", "zip", "(", "new_hist", ".", "bins", "(", ")", ",", "self", ".", "bins", "(", ")", ")", ":", "outbin", ".", "value", "=", "inbin", ".", "value", "outbin", ".", "error", "=", "inbin", ".", "error", "new_hist", ".", "decorate", "(", "self", ")", "new_hist", ".", "entries", "=", "self", ".", "entries", "return", "new_hist" ]
40.851852
0.001771
def cluster_two_diamonds(): "Start with wrong number of clusters." start_centers = [[0.8, 0.2]] template_clustering(start_centers, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, criterion = splitting_type.BAYESIAN_INFORMATION_CRITERION) template_clustering(start_centers, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, criterion = splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH)
[ "def", "cluster_two_diamonds", "(", ")", ":", "start_centers", "=", "[", "[", "0.8", ",", "0.2", "]", "]", "template_clustering", "(", "start_centers", ",", "FCPS_SAMPLES", ".", "SAMPLE_TWO_DIAMONDS", ",", "criterion", "=", "splitting_type", ".", "BAYESIAN_INFORMATION_CRITERION", ")", "template_clustering", "(", "start_centers", ",", "FCPS_SAMPLES", ".", "SAMPLE_TWO_DIAMONDS", ",", "criterion", "=", "splitting_type", ".", "MINIMUM_NOISELESS_DESCRIPTION_LENGTH", ")" ]
74.6
0.018568
def principal_axis_system(self): """ Returns a chemical shielding tensor aligned to the principle axis system so that only the 3 diagnol components are non-zero """ return ChemicalShielding(np.diag(np.sort(np.linalg.eigvals(self))))
[ "def", "principal_axis_system", "(", "self", ")", ":", "return", "ChemicalShielding", "(", "np", ".", "diag", "(", "np", ".", "sort", "(", "np", ".", "linalg", ".", "eigvals", "(", "self", ")", ")", ")", ")" ]
44.5
0.011029
def doVerify(self): """Process the form submission, initating OpenID verification. """ # First, make sure that the user entered something openid_url = self.query.get('openid_identifier') if not openid_url: self.render( 'Enter an OpenID Identifier to verify.', css_class='error', form_contents=openid_url) return immediate = 'immediate' in self.query use_sreg = 'use_sreg' in self.query use_pape = 'use_pape' in self.query use_stateless = 'use_stateless' in self.query oidconsumer = self.getConsumer(stateless=use_stateless) try: request = oidconsumer.begin(openid_url) except consumer.DiscoveryFailure as exc: fetch_error_string = 'Error in discovery: %s' % ( cgi.escape(str(exc))) self.render( fetch_error_string, css_class='error', form_contents=openid_url) else: if request is None: msg = 'No OpenID services found for <code>%s</code>' % ( cgi.escape(openid_url), ) self.render(msg, css_class='error', form_contents=openid_url) else: # Then, ask the library to begin the authorization. # Here we find out the identity server that will verify the # user's identity, and get a token that allows us to # communicate securely with the identity server. if use_sreg: self.requestRegistrationData(request) if use_pape: self.requestPAPEDetails(request) trust_root = self.server.base_url return_to = self.buildURL('process') if request.shouldSendRedirect(): redirect_url = request.redirectURL( trust_root, return_to, immediate=immediate) self.send_response(302) self.send_header('Location', redirect_url) self.writeUserHeader() self.end_headers() else: form_html = request.htmlMarkup( trust_root, return_to, form_tag_attrs={'id': 'openid_message'}, immediate=immediate) self.wfile.write(bytes(form_html, 'utf-8'))
[ "def", "doVerify", "(", "self", ")", ":", "# First, make sure that the user entered something", "openid_url", "=", "self", ".", "query", ".", "get", "(", "'openid_identifier'", ")", "if", "not", "openid_url", ":", "self", ".", "render", "(", "'Enter an OpenID Identifier to verify.'", ",", "css_class", "=", "'error'", ",", "form_contents", "=", "openid_url", ")", "return", "immediate", "=", "'immediate'", "in", "self", ".", "query", "use_sreg", "=", "'use_sreg'", "in", "self", ".", "query", "use_pape", "=", "'use_pape'", "in", "self", ".", "query", "use_stateless", "=", "'use_stateless'", "in", "self", ".", "query", "oidconsumer", "=", "self", ".", "getConsumer", "(", "stateless", "=", "use_stateless", ")", "try", ":", "request", "=", "oidconsumer", ".", "begin", "(", "openid_url", ")", "except", "consumer", ".", "DiscoveryFailure", "as", "exc", ":", "fetch_error_string", "=", "'Error in discovery: %s'", "%", "(", "cgi", ".", "escape", "(", "str", "(", "exc", ")", ")", ")", "self", ".", "render", "(", "fetch_error_string", ",", "css_class", "=", "'error'", ",", "form_contents", "=", "openid_url", ")", "else", ":", "if", "request", "is", "None", ":", "msg", "=", "'No OpenID services found for <code>%s</code>'", "%", "(", "cgi", ".", "escape", "(", "openid_url", ")", ",", ")", "self", ".", "render", "(", "msg", ",", "css_class", "=", "'error'", ",", "form_contents", "=", "openid_url", ")", "else", ":", "# Then, ask the library to begin the authorization.", "# Here we find out the identity server that will verify the", "# user's identity, and get a token that allows us to", "# communicate securely with the identity server.", "if", "use_sreg", ":", "self", ".", "requestRegistrationData", "(", "request", ")", "if", "use_pape", ":", "self", ".", "requestPAPEDetails", "(", "request", ")", "trust_root", "=", "self", ".", "server", ".", "base_url", "return_to", "=", "self", ".", "buildURL", "(", "'process'", ")", "if", "request", ".", "shouldSendRedirect", "(", ")", ":", "redirect_url", "=", "request", ".", "redirectURL", "(", "trust_root", ",", "return_to", ",", "immediate", "=", "immediate", ")", "self", ".", "send_response", "(", "302", ")", "self", ".", "send_header", "(", "'Location'", ",", "redirect_url", ")", "self", ".", "writeUserHeader", "(", ")", "self", ".", "end_headers", "(", ")", "else", ":", "form_html", "=", "request", ".", "htmlMarkup", "(", "trust_root", ",", "return_to", ",", "form_tag_attrs", "=", "{", "'id'", ":", "'openid_message'", "}", ",", "immediate", "=", "immediate", ")", "self", ".", "wfile", ".", "write", "(", "bytes", "(", "form_html", ",", "'utf-8'", ")", ")" ]
41.083333
0.000792
def setCheckedRecords( self, records ): """ Sets the checked off records to the list of inputed records. :param records | [<orb.Table>, ..] """ QApplication.sendPostedEvents(self, -1) indexes = [] for i in range(self.count()): record = self.recordAt(i) if record is not None and record in records: indexes.append(i) self.setCheckedIndexes(indexes)
[ "def", "setCheckedRecords", "(", "self", ",", "records", ")", ":", "QApplication", ".", "sendPostedEvents", "(", "self", ",", "-", "1", ")", "indexes", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "count", "(", ")", ")", ":", "record", "=", "self", ".", "recordAt", "(", "i", ")", "if", "record", "is", "not", "None", "and", "record", "in", "records", ":", "indexes", ".", "append", "(", "i", ")", "self", ".", "setCheckedIndexes", "(", "indexes", ")" ]
31.666667
0.014315
def reset(self): """ Drops index table. """ query = """ DROP TABLE identifier_index; """ self.backend.library.database.connection.execute(query)
[ "def", "reset", "(", "self", ")", ":", "query", "=", "\"\"\"\n DROP TABLE identifier_index;\n \"\"\"", "self", ".", "backend", ".", "library", ".", "database", ".", "connection", ".", "execute", "(", "query", ")" ]
30.5
0.010638
def _parse_samples_header(self, io_bytes): """ _parse_samples_header: binary data in XBee IO data format -> (int, [int ...], [int ...], int, int) _parse_samples_header will read the first three bytes of the binary data given and will return the number of samples which follow, a list of enabled digital inputs, a list of enabled analog inputs, the dio_mask, and the size of the header in bytes """ header_size = 3 # number of samples (always 1?) is the first byte sample_count = byteToInt(io_bytes[0]) # part of byte 1 and byte 2 are the DIO mask ( 9 bits ) dio_mask = (byteToInt(io_bytes[1]) << 8 | byteToInt(io_bytes[2])) \ & 0x01FF # upper 7 bits of byte 1 is the AIO mask aio_mask = (byteToInt(io_bytes[1]) & 0xFE) >> 1 # sorted lists of enabled channels; value is position of bit in mask dio_chans = [] aio_chans = [] for i in range(0, 9): if dio_mask & (1 << i): dio_chans.append(i) dio_chans.sort() for i in range(0, 7): if aio_mask & (1 << i): aio_chans.append(i) aio_chans.sort() return (sample_count, dio_chans, aio_chans, dio_mask, header_size)
[ "def", "_parse_samples_header", "(", "self", ",", "io_bytes", ")", ":", "header_size", "=", "3", "# number of samples (always 1?) is the first byte", "sample_count", "=", "byteToInt", "(", "io_bytes", "[", "0", "]", ")", "# part of byte 1 and byte 2 are the DIO mask ( 9 bits )", "dio_mask", "=", "(", "byteToInt", "(", "io_bytes", "[", "1", "]", ")", "<<", "8", "|", "byteToInt", "(", "io_bytes", "[", "2", "]", ")", ")", "&", "0x01FF", "# upper 7 bits of byte 1 is the AIO mask", "aio_mask", "=", "(", "byteToInt", "(", "io_bytes", "[", "1", "]", ")", "&", "0xFE", ")", ">>", "1", "# sorted lists of enabled channels; value is position of bit in mask", "dio_chans", "=", "[", "]", "aio_chans", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "9", ")", ":", "if", "dio_mask", "&", "(", "1", "<<", "i", ")", ":", "dio_chans", ".", "append", "(", "i", ")", "dio_chans", ".", "sort", "(", ")", "for", "i", "in", "range", "(", "0", ",", "7", ")", ":", "if", "aio_mask", "&", "(", "1", "<<", "i", ")", ":", "aio_chans", ".", "append", "(", "i", ")", "aio_chans", ".", "sort", "(", ")", "return", "(", "sample_count", ",", "dio_chans", ",", "aio_chans", ",", "dio_mask", ",", "header_size", ")" ]
33.205128
0.0015
def portal_touch_up(self, touch): """Try to create a portal between the spots the user chose.""" try: # If the touch ended upon a spot, and there isn't # already a portal between the origin and this # destination, create one. destspot = next(self.spots_at(*touch.pos)) orig = self.origspot.proxy dest = destspot.proxy if not( orig.name in self.character.portal and dest.name in self.character.portal[orig.name] ): port = self.character.new_portal( orig.name, dest.name ) self.arrowlayout.add_widget( self.make_arrow(port) ) # And another in the opposite direction if needed if ( hasattr(self, 'protoportal2') and not( orig.name in self.character.preportal and dest.name in self.character.preportal[orig.name] ) ): deport = self.character.new_portal( dest.name, orig.name ) self.arrowlayout.add_widget( self.make_arrow(deport) ) except StopIteration: pass self.remove_widget(self.protoportal) if hasattr(self, 'protoportal2'): self.remove_widget(self.protoportal2) del self.protoportal2 self.remove_widget(self.protodest) del self.protoportal del self.protodest
[ "def", "portal_touch_up", "(", "self", ",", "touch", ")", ":", "try", ":", "# If the touch ended upon a spot, and there isn't", "# already a portal between the origin and this", "# destination, create one.", "destspot", "=", "next", "(", "self", ".", "spots_at", "(", "*", "touch", ".", "pos", ")", ")", "orig", "=", "self", ".", "origspot", ".", "proxy", "dest", "=", "destspot", ".", "proxy", "if", "not", "(", "orig", ".", "name", "in", "self", ".", "character", ".", "portal", "and", "dest", ".", "name", "in", "self", ".", "character", ".", "portal", "[", "orig", ".", "name", "]", ")", ":", "port", "=", "self", ".", "character", ".", "new_portal", "(", "orig", ".", "name", ",", "dest", ".", "name", ")", "self", ".", "arrowlayout", ".", "add_widget", "(", "self", ".", "make_arrow", "(", "port", ")", ")", "# And another in the opposite direction if needed", "if", "(", "hasattr", "(", "self", ",", "'protoportal2'", ")", "and", "not", "(", "orig", ".", "name", "in", "self", ".", "character", ".", "preportal", "and", "dest", ".", "name", "in", "self", ".", "character", ".", "preportal", "[", "orig", ".", "name", "]", ")", ")", ":", "deport", "=", "self", ".", "character", ".", "new_portal", "(", "dest", ".", "name", ",", "orig", ".", "name", ")", "self", ".", "arrowlayout", ".", "add_widget", "(", "self", ".", "make_arrow", "(", "deport", ")", ")", "except", "StopIteration", ":", "pass", "self", ".", "remove_widget", "(", "self", ".", "protoportal", ")", "if", "hasattr", "(", "self", ",", "'protoportal2'", ")", ":", "self", ".", "remove_widget", "(", "self", ".", "protoportal2", ")", "del", "self", ".", "protoportal2", "self", ".", "remove_widget", "(", "self", ".", "protodest", ")", "del", "self", ".", "protoportal", "del", "self", ".", "protodest" ]
38.744186
0.001171
def tree(height=3, is_perfect=False): """Generate a random binary tree and return its root node. :param height: Height of the tree (default: 3, range: 0 - 9 inclusive). :type height: int :param is_perfect: If set to True (default: False), a perfect binary tree with all levels filled is returned. If set to False, a perfect binary tree may still be generated by chance. :type is_perfect: bool :return: Root node of the binary tree. :rtype: binarytree.Node :raise binarytree.exceptions.TreeHeightError: If height is invalid. **Example**: .. doctest:: >>> from binarytree import tree >>> >>> root = tree() >>> >>> root.height 3 .. doctest:: >>> from binarytree import tree >>> >>> root = tree(height=5, is_perfect=True) >>> >>> root.height 5 >>> root.is_perfect True .. doctest:: >>> from binarytree import tree >>> >>> root = tree(height=20) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TreeHeightError: height must be an int between 0 - 9 """ _validate_tree_height(height) values = _generate_random_node_values(height) if is_perfect: return build(values) leaf_count = _generate_random_leaf_count(height) root = Node(values.pop(0)) leaves = set() for value in values: node = root depth = 0 inserted = False while depth < height and not inserted: attr = random.choice(('left', 'right')) if getattr(node, attr) is None: setattr(node, attr, Node(value)) inserted = True node = getattr(node, attr) depth += 1 if inserted and depth == height: leaves.add(node) if len(leaves) == leaf_count: break return root
[ "def", "tree", "(", "height", "=", "3", ",", "is_perfect", "=", "False", ")", ":", "_validate_tree_height", "(", "height", ")", "values", "=", "_generate_random_node_values", "(", "height", ")", "if", "is_perfect", ":", "return", "build", "(", "values", ")", "leaf_count", "=", "_generate_random_leaf_count", "(", "height", ")", "root", "=", "Node", "(", "values", ".", "pop", "(", "0", ")", ")", "leaves", "=", "set", "(", ")", "for", "value", "in", "values", ":", "node", "=", "root", "depth", "=", "0", "inserted", "=", "False", "while", "depth", "<", "height", "and", "not", "inserted", ":", "attr", "=", "random", ".", "choice", "(", "(", "'left'", ",", "'right'", ")", ")", "if", "getattr", "(", "node", ",", "attr", ")", "is", "None", ":", "setattr", "(", "node", ",", "attr", ",", "Node", "(", "value", ")", ")", "inserted", "=", "True", "node", "=", "getattr", "(", "node", ",", "attr", ")", "depth", "+=", "1", "if", "inserted", "and", "depth", "==", "height", ":", "leaves", ".", "add", "(", "node", ")", "if", "len", "(", "leaves", ")", "==", "leaf_count", ":", "break", "return", "root" ]
26.319444
0.000509
def set_mode(path, mode): ''' Set the mode of a file This just calls get_mode, which returns None because we don't use mode on Windows Args: path: The path to the file or directory mode: The mode (not used) Returns: None CLI Example: .. code-block:: bash salt '*' file.set_mode /etc/passwd 0644 ''' func_name = '{0}.set_mode'.format(__virtualname__) if __opts__.get('fun', '') == func_name: log.info('The function %s should not be used on Windows systems; ' 'see function docs for details. The value returned is ' 'always None. Use set_perms instead.', func_name) return get_mode(path)
[ "def", "set_mode", "(", "path", ",", "mode", ")", ":", "func_name", "=", "'{0}.set_mode'", ".", "format", "(", "__virtualname__", ")", "if", "__opts__", ".", "get", "(", "'fun'", ",", "''", ")", "==", "func_name", ":", "log", ".", "info", "(", "'The function %s should not be used on Windows systems; '", "'see function docs for details. The value returned is '", "'always None. Use set_perms instead.'", ",", "func_name", ")", "return", "get_mode", "(", "path", ")" ]
25.481481
0.001401
def query_status(self): '''Query the hub for the status of this command''' try: data = self.api_iface._api_get(self.link) self._update_details(data) except APIError as e: print("API error: ") for key,value in e.data.iteritems: print(str(key) + ": " + str(value))
[ "def", "query_status", "(", "self", ")", ":", "try", ":", "data", "=", "self", ".", "api_iface", ".", "_api_get", "(", "self", ".", "link", ")", "self", ".", "_update_details", "(", "data", ")", "except", "APIError", "as", "e", ":", "print", "(", "\"API error: \"", ")", "for", "key", ",", "value", "in", "e", ".", "data", ".", "iteritems", ":", "print", "(", "str", "(", "key", ")", "+", "\": \"", "+", "str", "(", "value", ")", ")" ]
38
0.008571
def setup_app_scope(name, scope): """activate plugins accordingly to config""" # load plugins plugins = [] for plugin_name, active in get('settings').get('rw.plugins', {}).items(): plugin = __import__(plugin_name) plugin_path = plugin_name.split('.')[1:] + ['plugin'] for sub in plugin_path: plugin = getattr(plugin, sub) plugins.append(scope.activate(plugin)) yield plugins raise rw.gen.Return(scope['settings'])
[ "def", "setup_app_scope", "(", "name", ",", "scope", ")", ":", "# load plugins", "plugins", "=", "[", "]", "for", "plugin_name", ",", "active", "in", "get", "(", "'settings'", ")", ".", "get", "(", "'rw.plugins'", ",", "{", "}", ")", ".", "items", "(", ")", ":", "plugin", "=", "__import__", "(", "plugin_name", ")", "plugin_path", "=", "plugin_name", ".", "split", "(", "'.'", ")", "[", "1", ":", "]", "+", "[", "'plugin'", "]", "for", "sub", "in", "plugin_path", ":", "plugin", "=", "getattr", "(", "plugin", ",", "sub", ")", "plugins", ".", "append", "(", "scope", ".", "activate", "(", "plugin", ")", ")", "yield", "plugins", "raise", "rw", ".", "gen", ".", "Return", "(", "scope", "[", "'settings'", "]", ")" ]
33.571429
0.00207
def _dataset_info(dataset): """Return information about dataset as a dict.""" info = {} info["uri"] = dataset.uri info["uuid"] = dataset.uuid # Computer and human readable size of dataset. tot_size = sum([dataset.item_properties(i)["size_in_bytes"] for i in dataset.identifiers]) info["size_int"] = tot_size info["size_str"] = sizeof_fmt(tot_size) info["creator"] = dataset._admin_metadata["creator_username"] info["name"] = dataset._admin_metadata["name"] info["date"] = date_fmt(dataset._admin_metadata["frozen_at"]) info["num_items"] = len(dataset.identifiers) info["readme_content"] = dataset.get_readme_content() return info
[ "def", "_dataset_info", "(", "dataset", ")", ":", "info", "=", "{", "}", "info", "[", "\"uri\"", "]", "=", "dataset", ".", "uri", "info", "[", "\"uuid\"", "]", "=", "dataset", ".", "uuid", "# Computer and human readable size of dataset.", "tot_size", "=", "sum", "(", "[", "dataset", ".", "item_properties", "(", "i", ")", "[", "\"size_in_bytes\"", "]", "for", "i", "in", "dataset", ".", "identifiers", "]", ")", "info", "[", "\"size_int\"", "]", "=", "tot_size", "info", "[", "\"size_str\"", "]", "=", "sizeof_fmt", "(", "tot_size", ")", "info", "[", "\"creator\"", "]", "=", "dataset", ".", "_admin_metadata", "[", "\"creator_username\"", "]", "info", "[", "\"name\"", "]", "=", "dataset", ".", "_admin_metadata", "[", "\"name\"", "]", "info", "[", "\"date\"", "]", "=", "date_fmt", "(", "dataset", ".", "_admin_metadata", "[", "\"frozen_at\"", "]", ")", "info", "[", "\"num_items\"", "]", "=", "len", "(", "dataset", ".", "identifiers", ")", "info", "[", "\"readme_content\"", "]", "=", "dataset", ".", "get_readme_content", "(", ")", "return", "info" ]
30
0.001404
def write_membership(self,filename): """ Write a catalog file of the likelihood region including membership properties. Parameters: ----------- filename : output filename Returns: -------- None """ # Column names name_objid = self.config['catalog']['objid_field'] name_mag_1 = self.config['catalog']['mag_1_field'] name_mag_2 = self.config['catalog']['mag_2_field'] name_mag_err_1 = self.config['catalog']['mag_err_1_field'] name_mag_err_2 = self.config['catalog']['mag_err_2_field'] # Coordinate conversion #ra,dec = gal2cel(self.catalog.lon,self.catalog.lat) glon,glat = self.catalog.glon_glat ra,dec = self.catalog.ra_dec # Angular and isochrone separations sep = angsep(self.source.lon,self.source.lat, self.catalog.lon,self.catalog.lat) isosep = self.isochrone.separation(self.catalog.mag_1,self.catalog.mag_2) # If size becomes an issue we can make everything float32 data = odict() data[name_objid] = self.catalog.objid data['GLON'] = glon data['GLAT'] = glat data['RA'] = ra data['DEC'] = dec data[name_mag_1] = self.catalog.mag_1 data[name_mag_err_1] = self.catalog.mag_err_1 data[name_mag_2] = self.catalog.mag_2 data[name_mag_err_2] = self.catalog.mag_err_2 data['COLOR'] = self.catalog.color data['ANGSEP'] = sep.astype(np.float32) data['ISOSEP'] = isosep.astype(np.float32) data['PROB'] = self.p.astype(np.float32) # HIERARCH allows header keywords longer than 8 characters header = [] for param,value in self.source.params.items(): card = dict(name='HIERARCH %s'%param.upper(), value=value.value, comment=param) header.append(card) card = dict(name='HIERARCH %s'%'TS',value=self.ts(), comment='test statistic') header.append(card) card = dict(name='HIERARCH %s'%'TIMESTAMP',value=time.asctime(), comment='creation time') header.append(card) fitsio.write(filename,data,header=header,clobber=True)
[ "def", "write_membership", "(", "self", ",", "filename", ")", ":", "# Column names", "name_objid", "=", "self", ".", "config", "[", "'catalog'", "]", "[", "'objid_field'", "]", "name_mag_1", "=", "self", ".", "config", "[", "'catalog'", "]", "[", "'mag_1_field'", "]", "name_mag_2", "=", "self", ".", "config", "[", "'catalog'", "]", "[", "'mag_2_field'", "]", "name_mag_err_1", "=", "self", ".", "config", "[", "'catalog'", "]", "[", "'mag_err_1_field'", "]", "name_mag_err_2", "=", "self", ".", "config", "[", "'catalog'", "]", "[", "'mag_err_2_field'", "]", "# Coordinate conversion", "#ra,dec = gal2cel(self.catalog.lon,self.catalog.lat)", "glon", ",", "glat", "=", "self", ".", "catalog", ".", "glon_glat", "ra", ",", "dec", "=", "self", ".", "catalog", ".", "ra_dec", "# Angular and isochrone separations", "sep", "=", "angsep", "(", "self", ".", "source", ".", "lon", ",", "self", ".", "source", ".", "lat", ",", "self", ".", "catalog", ".", "lon", ",", "self", ".", "catalog", ".", "lat", ")", "isosep", "=", "self", ".", "isochrone", ".", "separation", "(", "self", ".", "catalog", ".", "mag_1", ",", "self", ".", "catalog", ".", "mag_2", ")", "# If size becomes an issue we can make everything float32", "data", "=", "odict", "(", ")", "data", "[", "name_objid", "]", "=", "self", ".", "catalog", ".", "objid", "data", "[", "'GLON'", "]", "=", "glon", "data", "[", "'GLAT'", "]", "=", "glat", "data", "[", "'RA'", "]", "=", "ra", "data", "[", "'DEC'", "]", "=", "dec", "data", "[", "name_mag_1", "]", "=", "self", ".", "catalog", ".", "mag_1", "data", "[", "name_mag_err_1", "]", "=", "self", ".", "catalog", ".", "mag_err_1", "data", "[", "name_mag_2", "]", "=", "self", ".", "catalog", ".", "mag_2", "data", "[", "name_mag_err_2", "]", "=", "self", ".", "catalog", ".", "mag_err_2", "data", "[", "'COLOR'", "]", "=", "self", ".", "catalog", ".", "color", "data", "[", "'ANGSEP'", "]", "=", "sep", ".", "astype", "(", "np", ".", "float32", ")", "data", "[", "'ISOSEP'", "]", "=", "isosep", ".", "astype", "(", "np", ".", "float32", ")", "data", "[", "'PROB'", "]", "=", "self", ".", "p", ".", "astype", "(", "np", ".", "float32", ")", "# HIERARCH allows header keywords longer than 8 characters", "header", "=", "[", "]", "for", "param", ",", "value", "in", "self", ".", "source", ".", "params", ".", "items", "(", ")", ":", "card", "=", "dict", "(", "name", "=", "'HIERARCH %s'", "%", "param", ".", "upper", "(", ")", ",", "value", "=", "value", ".", "value", ",", "comment", "=", "param", ")", "header", ".", "append", "(", "card", ")", "card", "=", "dict", "(", "name", "=", "'HIERARCH %s'", "%", "'TS'", ",", "value", "=", "self", ".", "ts", "(", ")", ",", "comment", "=", "'test statistic'", ")", "header", ".", "append", "(", "card", ")", "card", "=", "dict", "(", "name", "=", "'HIERARCH %s'", "%", "'TIMESTAMP'", ",", "value", "=", "time", ".", "asctime", "(", ")", ",", "comment", "=", "'creation time'", ")", "header", ".", "append", "(", "card", ")", "fitsio", ".", "write", "(", "filename", ",", "data", ",", "header", "=", "header", ",", "clobber", "=", "True", ")" ]
39.116667
0.013716
def Preserve(self): """This tells the XML Reader to preserve the current node. The caller must also use xmlTextReaderCurrentDoc() to keep an handle on the resulting document once parsing has finished """ ret = libxml2mod.xmlTextReaderPreserve(self._o) if ret is None:raise treeError('xmlTextReaderPreserve() failed') __tmp = xmlNode(_obj=ret) return __tmp
[ "def", "Preserve", "(", "self", ")", ":", "ret", "=", "libxml2mod", ".", "xmlTextReaderPreserve", "(", "self", ".", "_o", ")", "if", "ret", "is", "None", ":", "raise", "treeError", "(", "'xmlTextReaderPreserve() failed'", ")", "__tmp", "=", "xmlNode", "(", "_obj", "=", "ret", ")", "return", "__tmp" ]
51.125
0.009615
def cmd(send, msg, _): """Generates a meaning for the specified acronym. Syntax: {command} <acronym> """ if not msg: send("What acronym?") return words = get_list() letters = [c for c in msg.lower() if c in string.ascii_lowercase] output = " ".join([choice(words[c]) for c in letters]) if output: send('%s: %s' % (msg, output.title())) else: send("No acronym found for %s" % msg)
[ "def", "cmd", "(", "send", ",", "msg", ",", "_", ")", ":", "if", "not", "msg", ":", "send", "(", "\"What acronym?\"", ")", "return", "words", "=", "get_list", "(", ")", "letters", "=", "[", "c", "for", "c", "in", "msg", ".", "lower", "(", ")", "if", "c", "in", "string", ".", "ascii_lowercase", "]", "output", "=", "\" \"", ".", "join", "(", "[", "choice", "(", "words", "[", "c", "]", ")", "for", "c", "in", "letters", "]", ")", "if", "output", ":", "send", "(", "'%s: %s'", "%", "(", "msg", ",", "output", ".", "title", "(", ")", ")", ")", "else", ":", "send", "(", "\"No acronym found for %s\"", "%", "msg", ")" ]
27.125
0.002227
def get_tag_context(name, state): """ Given a tag name, return its associated value as defined in the current context stack. """ new_contexts = 0 ctm = None while True: try: ctx_key, name = name.split('.', 1) ctm = state.context.get(ctx_key) except ValueError: break if not ctm: break else: state.context.push(ctm) new_contexts += 1 ctm = state.context.get(name) return new_contexts, ctm
[ "def", "get_tag_context", "(", "name", ",", "state", ")", ":", "new_contexts", "=", "0", "ctm", "=", "None", "while", "True", ":", "try", ":", "ctx_key", ",", "name", "=", "name", ".", "split", "(", "'.'", ",", "1", ")", "ctm", "=", "state", ".", "context", ".", "get", "(", "ctx_key", ")", "except", "ValueError", ":", "break", "if", "not", "ctm", ":", "break", "else", ":", "state", ".", "context", ".", "push", "(", "ctm", ")", "new_contexts", "+=", "1", "ctm", "=", "state", ".", "context", ".", "get", "(", "name", ")", "return", "new_contexts", ",", "ctm" ]
23.090909
0.00189
def make_file(self, host): """创建文件""" url = self.file_url(host) body = ','.join([status['ctx'] for status in self.blockStatus]) self.upload_progress_recorder.delete_upload_record(self.file_name, self.key) return self.post(url, body)
[ "def", "make_file", "(", "self", ",", "host", ")", ":", "url", "=", "self", ".", "file_url", "(", "host", ")", "body", "=", "','", ".", "join", "(", "[", "status", "[", "'ctx'", "]", "for", "status", "in", "self", ".", "blockStatus", "]", ")", "self", ".", "upload_progress_recorder", ".", "delete_upload_record", "(", "self", ".", "file_name", ",", "self", ".", "key", ")", "return", "self", ".", "post", "(", "url", ",", "body", ")" ]
44.5
0.011029
def read_output_config (self): """Read configuration options in section "output".""" section = "output" from ..logger import LoggerClasses for c in LoggerClasses: key = c.LoggerName if self.has_section(key): for opt in self.options(key): self.config[key][opt] = self.get(key, opt) if self.has_option(key, 'parts'): val = self.get(key, 'parts') parts = [f.strip().lower() for f in val.split(',')] self.config[key]['parts'] = parts self.read_boolean_option(section, "warnings") if self.has_option(section, "verbose"): if self.getboolean(section, "verbose"): self.config["verbose"] = True self.config["warnings"] = True if self.has_option(section, "quiet"): if self.getboolean(section, "quiet"): self.config['output'] = 'none' self.config['quiet'] = True if self.has_option(section, "debug"): val = self.get(section, "debug") parts = [f.strip().lower() for f in val.split(',')] logconf.set_debug(parts) self.read_boolean_option(section, "status") if self.has_option(section, "log"): val = self.get(section, "log").strip().lower() self.config['output'] = val if self.has_option(section, "fileoutput"): loggers = self.get(section, "fileoutput").split(",") # strip names from whitespace loggers = (x.strip().lower() for x in loggers) # no file output for the blacklist and none Logger from ..logger import LoggerNames loggers = (x for x in loggers if x in LoggerNames and x not in ("blacklist", "none")) for val in loggers: output = self.config.logger_new(val, fileoutput=1) self.config['fileoutput'].append(output)
[ "def", "read_output_config", "(", "self", ")", ":", "section", "=", "\"output\"", "from", ".", ".", "logger", "import", "LoggerClasses", "for", "c", "in", "LoggerClasses", ":", "key", "=", "c", ".", "LoggerName", "if", "self", ".", "has_section", "(", "key", ")", ":", "for", "opt", "in", "self", ".", "options", "(", "key", ")", ":", "self", ".", "config", "[", "key", "]", "[", "opt", "]", "=", "self", ".", "get", "(", "key", ",", "opt", ")", "if", "self", ".", "has_option", "(", "key", ",", "'parts'", ")", ":", "val", "=", "self", ".", "get", "(", "key", ",", "'parts'", ")", "parts", "=", "[", "f", ".", "strip", "(", ")", ".", "lower", "(", ")", "for", "f", "in", "val", ".", "split", "(", "','", ")", "]", "self", ".", "config", "[", "key", "]", "[", "'parts'", "]", "=", "parts", "self", ".", "read_boolean_option", "(", "section", ",", "\"warnings\"", ")", "if", "self", ".", "has_option", "(", "section", ",", "\"verbose\"", ")", ":", "if", "self", ".", "getboolean", "(", "section", ",", "\"verbose\"", ")", ":", "self", ".", "config", "[", "\"verbose\"", "]", "=", "True", "self", ".", "config", "[", "\"warnings\"", "]", "=", "True", "if", "self", ".", "has_option", "(", "section", ",", "\"quiet\"", ")", ":", "if", "self", ".", "getboolean", "(", "section", ",", "\"quiet\"", ")", ":", "self", ".", "config", "[", "'output'", "]", "=", "'none'", "self", ".", "config", "[", "'quiet'", "]", "=", "True", "if", "self", ".", "has_option", "(", "section", ",", "\"debug\"", ")", ":", "val", "=", "self", ".", "get", "(", "section", ",", "\"debug\"", ")", "parts", "=", "[", "f", ".", "strip", "(", ")", ".", "lower", "(", ")", "for", "f", "in", "val", ".", "split", "(", "','", ")", "]", "logconf", ".", "set_debug", "(", "parts", ")", "self", ".", "read_boolean_option", "(", "section", ",", "\"status\"", ")", "if", "self", ".", "has_option", "(", "section", ",", "\"log\"", ")", ":", "val", "=", "self", ".", "get", "(", "section", ",", "\"log\"", ")", ".", "strip", "(", ")", ".", "lower", "(", ")", "self", ".", "config", "[", "'output'", "]", "=", "val", "if", "self", ".", "has_option", "(", "section", ",", "\"fileoutput\"", ")", ":", "loggers", "=", "self", ".", "get", "(", "section", ",", "\"fileoutput\"", ")", ".", "split", "(", "\",\"", ")", "# strip names from whitespace", "loggers", "=", "(", "x", ".", "strip", "(", ")", ".", "lower", "(", ")", "for", "x", "in", "loggers", ")", "# no file output for the blacklist and none Logger", "from", ".", ".", "logger", "import", "LoggerNames", "loggers", "=", "(", "x", "for", "x", "in", "loggers", "if", "x", "in", "LoggerNames", "and", "x", "not", "in", "(", "\"blacklist\"", ",", "\"none\"", ")", ")", "for", "val", "in", "loggers", ":", "output", "=", "self", ".", "config", ".", "logger_new", "(", "val", ",", "fileoutput", "=", "1", ")", "self", ".", "config", "[", "'fileoutput'", "]", ".", "append", "(", "output", ")" ]
48.292683
0.001485
def iter(self, offset=0, count=None, pagesize=None, **kwargs): """Iterates over the collection. This method is equivalent to the :meth:`list` method, but it returns an iterator and can load a certain number of entities at a time from the server. :param offset: The index of the first entity to return (optional). :type offset: ``integer`` :param count: The maximum number of entities to return (optional). :type count: ``integer`` :param pagesize: The number of entities to load (optional). :type pagesize: ``integer`` :param kwargs: Additional arguments (optional): - "search" (``string``): The search query to filter responses. - "sort_dir" (``string``): The direction to sort returned items: "asc" or "desc". - "sort_key" (``string``): The field to use for sorting (optional). - "sort_mode" (``string``): The collating sequence for sorting returned items: "auto", "alpha", "alpha_case", or "num". :type kwargs: ``dict`` **Example**:: import splunklib.client as client s = client.connect(...) for saved_search in s.saved_searches.iter(pagesize=10): # Loads 10 saved searches at a time from the # server. ... """ assert pagesize is None or pagesize > 0 if count is None: count = self.null_count fetched = 0 while count == self.null_count or fetched < count: response = self.get(count=pagesize or count, offset=offset, **kwargs) items = self._load_list(response) N = len(items) fetched += N for item in items: yield item if pagesize is None or N < pagesize: break offset += N logging.debug("pagesize=%d, fetched=%d, offset=%d, N=%d, kwargs=%s", pagesize, fetched, offset, N, kwargs)
[ "def", "iter", "(", "self", ",", "offset", "=", "0", ",", "count", "=", "None", ",", "pagesize", "=", "None", ",", "*", "*", "kwargs", ")", ":", "assert", "pagesize", "is", "None", "or", "pagesize", ">", "0", "if", "count", "is", "None", ":", "count", "=", "self", ".", "null_count", "fetched", "=", "0", "while", "count", "==", "self", ".", "null_count", "or", "fetched", "<", "count", ":", "response", "=", "self", ".", "get", "(", "count", "=", "pagesize", "or", "count", ",", "offset", "=", "offset", ",", "*", "*", "kwargs", ")", "items", "=", "self", ".", "_load_list", "(", "response", ")", "N", "=", "len", "(", "items", ")", "fetched", "+=", "N", "for", "item", "in", "items", ":", "yield", "item", "if", "pagesize", "is", "None", "or", "N", "<", "pagesize", ":", "break", "offset", "+=", "N", "logging", ".", "debug", "(", "\"pagesize=%d, fetched=%d, offset=%d, N=%d, kwargs=%s\"", ",", "pagesize", ",", "fetched", ",", "offset", ",", "N", ",", "kwargs", ")" ]
38.921569
0.001966
def get_field_description(f): """Get the type description of a GRPC Message field.""" type_name = get_field_type(f) if type_name == 'MESSAGE' and \ {sf.name for sf in f.message_type.fields} == {'key', 'value'}: return 'map<string, string>' elif type_name == 'MESSAGE': return f.message_type.full_name elif type_name == 'ENUM': return f.enum_type.full_name else: return type_name.lower()
[ "def", "get_field_description", "(", "f", ")", ":", "type_name", "=", "get_field_type", "(", "f", ")", "if", "type_name", "==", "'MESSAGE'", "and", "{", "sf", ".", "name", "for", "sf", "in", "f", ".", "message_type", ".", "fields", "}", "==", "{", "'key'", ",", "'value'", "}", ":", "return", "'map<string, string>'", "elif", "type_name", "==", "'MESSAGE'", ":", "return", "f", ".", "message_type", ".", "full_name", "elif", "type_name", "==", "'ENUM'", ":", "return", "f", ".", "enum_type", ".", "full_name", "else", ":", "return", "type_name", ".", "lower", "(", ")" ]
36.916667
0.002203
def save(self): """ Saves the settings contents """ content = self.dumps() fileutils.save_text_to_file(content, self.file_path)
[ "def", "save", "(", "self", ")", ":", "content", "=", "self", ".", "dumps", "(", ")", "fileutils", ".", "save_text_to_file", "(", "content", ",", "self", ".", "file_path", ")" ]
37
0.013245
def users_request_create(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/users#request-user-create" api_path = "/api/v2/users/request_create.json" return self.call(api_path, method="POST", data=data, **kwargs)
[ "def", "users_request_create", "(", "self", ",", "data", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/users/request_create.json\"", "return", "self", ".", "call", "(", "api_path", ",", "method", "=", "\"POST\"", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
63.75
0.011628
def _get_service_account_info(self): """Retrieve json dict from service account file.""" with open(self.service_account_file, 'r') as f: info = json.load(f) self.service_account_email = info.get('client_email') if not self.service_account_email: raise GCECloudException( 'Service account JSON file is invalid for GCE. ' 'client_email key is expected. See getting started ' 'docs for information on GCE configuration.' ) self.service_account_project = info.get('project_id') if not self.service_account_project: raise GCECloudException( 'Service account JSON file is invalid for GCE. ' 'project_id key is expected. See getting started ' 'docs for information on GCE configuration.' )
[ "def", "_get_service_account_info", "(", "self", ")", ":", "with", "open", "(", "self", ".", "service_account_file", ",", "'r'", ")", "as", "f", ":", "info", "=", "json", ".", "load", "(", "f", ")", "self", ".", "service_account_email", "=", "info", ".", "get", "(", "'client_email'", ")", "if", "not", "self", ".", "service_account_email", ":", "raise", "GCECloudException", "(", "'Service account JSON file is invalid for GCE. '", "'client_email key is expected. See getting started '", "'docs for information on GCE configuration.'", ")", "self", ".", "service_account_project", "=", "info", ".", "get", "(", "'project_id'", ")", "if", "not", "self", ".", "service_account_project", ":", "raise", "GCECloudException", "(", "'Service account JSON file is invalid for GCE. '", "'project_id key is expected. See getting started '", "'docs for information on GCE configuration.'", ")" ]
43.45
0.002252
def next(self): """ Next CapitainsCtsPassage (Interactive CapitainsCtsPassage) """ if self.nextId is not None: return super(CapitainsCtsPassage, self).getTextualNode(subreference=self.nextId)
[ "def", "next", "(", "self", ")", ":", "if", "self", ".", "nextId", "is", "not", "None", ":", "return", "super", "(", "CapitainsCtsPassage", ",", "self", ")", ".", "getTextualNode", "(", "subreference", "=", "self", ".", "nextId", ")" ]
44.6
0.013216
def plot_vxz(self, colorbar=True, cb_orientation='vertical', cb_label=None, ax=None, show=True, fname=None, **kwargs): """ Plot the Vxz component of the tensor. Usage ----- x.plot_vxz([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation, cb_label, show, fname]) Parameters ---------- tick_interval : list or tuple, optional, default = [30, 30] Intervals to use when plotting the x and y ticks. If set to None, ticks will not be plotted. xlabel : str, optional, default = 'longitude' Label for the longitude axis. ylabel : str, optional, default = 'latitude' Label for the latitude axis. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. colorbar : bool, optional, default = True If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = '$V_{xz}$' Text label for the colorbar. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods. """ if cb_label is None: cb_label = self._vxz_label if ax is None: fig, axes = self.vxz.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, show=False, **kwargs) if show: fig.show() if fname is not None: fig.savefig(fname) return fig, axes else: self.vxz.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, ax=ax, **kwargs)
[ "def", "plot_vxz", "(", "self", ",", "colorbar", "=", "True", ",", "cb_orientation", "=", "'vertical'", ",", "cb_label", "=", "None", ",", "ax", "=", "None", ",", "show", "=", "True", ",", "fname", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "cb_label", "is", "None", ":", "cb_label", "=", "self", ".", "_vxz_label", "if", "ax", "is", "None", ":", "fig", ",", "axes", "=", "self", ".", "vxz", ".", "plot", "(", "colorbar", "=", "colorbar", ",", "cb_orientation", "=", "cb_orientation", ",", "cb_label", "=", "cb_label", ",", "show", "=", "False", ",", "*", "*", "kwargs", ")", "if", "show", ":", "fig", ".", "show", "(", ")", "if", "fname", "is", "not", "None", ":", "fig", ".", "savefig", "(", "fname", ")", "return", "fig", ",", "axes", "else", ":", "self", ".", "vxz", ".", "plot", "(", "colorbar", "=", "colorbar", ",", "cb_orientation", "=", "cb_orientation", ",", "cb_label", "=", "cb_label", ",", "ax", "=", "ax", ",", "*", "*", "kwargs", ")" ]
40.641509
0.00136
def _getextra(self): ''' Get the extra data of this struct. ''' current = self while hasattr(current, '_sub'): current = current._sub return getattr(current, '_extra', None)
[ "def", "_getextra", "(", "self", ")", ":", "current", "=", "self", "while", "hasattr", "(", "current", ",", "'_sub'", ")", ":", "current", "=", "current", ".", "_sub", "return", "getattr", "(", "current", ",", "'_extra'", ",", "None", ")" ]
28.25
0.008584
def _lemmatize_token(self, token, best_guess=True, return_frequencies=False): """Lemmatize a single token. If best_guess is true, then take the most frequent lemma when a form has multiple possible lemmatizations. If the form is not found, just return it. If best_guess is false, then always return the full set of possible lemmas, or None if none found. """ lemmas = self.lemma_dict.get(token.lower(), None) if best_guess == True: if lemmas == None: lemma = token elif len(lemmas) > 1: counts = [self.type_counts[word] for word in lemmas] lemma = lemmas[argmax(counts)] else: lemma = lemmas[0] if return_frequencies == True: lemma = (lemma, self._relative_frequency(lemma)) else: lemma = [] if lemmas == None else lemmas if return_frequencies == True: lemma = [(word, self._relative_frequency(word)) for word in lemma] return(token, lemma)
[ "def", "_lemmatize_token", "(", "self", ",", "token", ",", "best_guess", "=", "True", ",", "return_frequencies", "=", "False", ")", ":", "lemmas", "=", "self", ".", "lemma_dict", ".", "get", "(", "token", ".", "lower", "(", ")", ",", "None", ")", "if", "best_guess", "==", "True", ":", "if", "lemmas", "==", "None", ":", "lemma", "=", "token", "elif", "len", "(", "lemmas", ")", ">", "1", ":", "counts", "=", "[", "self", ".", "type_counts", "[", "word", "]", "for", "word", "in", "lemmas", "]", "lemma", "=", "lemmas", "[", "argmax", "(", "counts", ")", "]", "else", ":", "lemma", "=", "lemmas", "[", "0", "]", "if", "return_frequencies", "==", "True", ":", "lemma", "=", "(", "lemma", ",", "self", ".", "_relative_frequency", "(", "lemma", ")", ")", "else", ":", "lemma", "=", "[", "]", "if", "lemmas", "==", "None", "else", "lemmas", "if", "return_frequencies", "==", "True", ":", "lemma", "=", "[", "(", "word", ",", "self", ".", "_relative_frequency", "(", "word", ")", ")", "for", "word", "in", "lemma", "]", "return", "(", "token", ",", "lemma", ")" ]
36.2
0.033369
def render(self, context, instance, placeholder): ''' Allows this plugin to use templates designed for a list of locations. ''' context = super(LocationPlugin,self).render(context,instance,placeholder) context['location_list'] = [instance.location,] return context
[ "def", "render", "(", "self", ",", "context", ",", "instance", ",", "placeholder", ")", ":", "context", "=", "super", "(", "LocationPlugin", ",", "self", ")", ".", "render", "(", "context", ",", "instance", ",", "placeholder", ")", "context", "[", "'location_list'", "]", "=", "[", "instance", ".", "location", ",", "]", "return", "context" ]
59.2
0.026667
def tune_pair(self, pair): """Tune a pair of images.""" self._save_bm_state() self.pair = pair self.update_disparity_map()
[ "def", "tune_pair", "(", "self", ",", "pair", ")", ":", "self", ".", "_save_bm_state", "(", ")", "self", ".", "pair", "=", "pair", "self", ".", "update_disparity_map", "(", ")" ]
30
0.012987
def start(self, n): """Start n engines by profile or profile_dir.""" self.n = n return super(MPIEngineSetLauncher, self).start(n)
[ "def", "start", "(", "self", ",", "n", ")", ":", "self", ".", "n", "=", "n", "return", "super", "(", "MPIEngineSetLauncher", ",", "self", ")", ".", "start", "(", "n", ")" ]
37.5
0.013072
def get_cache_key(user_or_username, size, prefix): """ Returns a cache key consisten of a username and image size. """ if isinstance(user_or_username, get_user_model()): user_or_username = get_username(user_or_username) key = six.u('%s_%s_%s') % (prefix, user_or_username, size) return six.u('%s_%s') % (slugify(key)[:100], hashlib.md5(force_bytes(key)).hexdigest())
[ "def", "get_cache_key", "(", "user_or_username", ",", "size", ",", "prefix", ")", ":", "if", "isinstance", "(", "user_or_username", ",", "get_user_model", "(", ")", ")", ":", "user_or_username", "=", "get_username", "(", "user_or_username", ")", "key", "=", "six", ".", "u", "(", "'%s_%s_%s'", ")", "%", "(", "prefix", ",", "user_or_username", ",", "size", ")", "return", "six", ".", "u", "(", "'%s_%s'", ")", "%", "(", "slugify", "(", "key", ")", "[", ":", "100", "]", ",", "hashlib", ".", "md5", "(", "force_bytes", "(", "key", ")", ")", ".", "hexdigest", "(", ")", ")" ]
46.555556
0.002342
async def send_help(self, *args): """send_help(entity=<bot>) |coro| Shows the help command for the specified entity if given. The entity can be a command or a cog. If no entity is given, then it'll show help for the entire bot. If the entity is a string, then it looks up whether it's a :class:`Cog` or a :class:`Command`. .. note:: Due to the way this function works, instead of returning something similar to :meth:`~.commands.HelpCommand.command_not_found` this returns :class:`None` on bad input or no help command. Parameters ------------ entity: Optional[Union[:class:`Command`, :class:`Cog`, :class:`str`]] The entity to show help for. Returns -------- Any The result of the help command, if any. """ from .core import Group, Command bot = self.bot cmd = bot.help_command if cmd is None: return None cmd = cmd.copy() cmd.context = self if len(args) == 0: await cmd.prepare_help_command(self, None) mapping = cmd.get_bot_mapping() return await cmd.send_bot_help(mapping) entity = args[0] if entity is None: return None if isinstance(entity, str): entity = bot.get_cog(entity) or bot.get_command(entity) try: qualified_name = entity.qualified_name except AttributeError: # if we're here then it's not a cog, group, or command. return None await cmd.prepare_help_command(self, entity.qualified_name) if hasattr(entity, '__cog_commands__'): return await cmd.send_cog_help(entity) elif isinstance(entity, Group): return await cmd.send_group_help(entity) elif isinstance(entity, Command): return await cmd.send_command_help(entity) else: return None
[ "async", "def", "send_help", "(", "self", ",", "*", "args", ")", ":", "from", ".", "core", "import", "Group", ",", "Command", "bot", "=", "self", ".", "bot", "cmd", "=", "bot", ".", "help_command", "if", "cmd", "is", "None", ":", "return", "None", "cmd", "=", "cmd", ".", "copy", "(", ")", "cmd", ".", "context", "=", "self", "if", "len", "(", "args", ")", "==", "0", ":", "await", "cmd", ".", "prepare_help_command", "(", "self", ",", "None", ")", "mapping", "=", "cmd", ".", "get_bot_mapping", "(", ")", "return", "await", "cmd", ".", "send_bot_help", "(", "mapping", ")", "entity", "=", "args", "[", "0", "]", "if", "entity", "is", "None", ":", "return", "None", "if", "isinstance", "(", "entity", ",", "str", ")", ":", "entity", "=", "bot", ".", "get_cog", "(", "entity", ")", "or", "bot", ".", "get_command", "(", "entity", ")", "try", ":", "qualified_name", "=", "entity", ".", "qualified_name", "except", "AttributeError", ":", "# if we're here then it's not a cog, group, or command.", "return", "None", "await", "cmd", ".", "prepare_help_command", "(", "self", ",", "entity", ".", "qualified_name", ")", "if", "hasattr", "(", "entity", ",", "'__cog_commands__'", ")", ":", "return", "await", "cmd", ".", "send_cog_help", "(", "entity", ")", "elif", "isinstance", "(", "entity", ",", "Group", ")", ":", "return", "await", "cmd", ".", "send_group_help", "(", "entity", ")", "elif", "isinstance", "(", "entity", ",", "Command", ")", ":", "return", "await", "cmd", ".", "send_command_help", "(", "entity", ")", "else", ":", "return", "None" ]
29.102941
0.001466
async def set_digital_latch(self, command): """ This method sets the a digital latch for a given digital pin, the threshold type, and latching threshold. :param command:{"method": "set_digital_latch", "params": [PIN, THRESHOLD (0 or 1)]} :returns:{"method": digital_latch_data_reply", "params": [PIN, DATA_VALUE_LATCHED, TIMESTAMP_STRING]} """ pin = int(command[0]) threshold_value = int(command[1]) await self.core.set_digital_latch(pin, threshold_value, self.digital_latch_callback)
[ "async", "def", "set_digital_latch", "(", "self", ",", "command", ")", ":", "pin", "=", "int", "(", "command", "[", "0", "]", ")", "threshold_value", "=", "int", "(", "command", "[", "1", "]", ")", "await", "self", ".", "core", ".", "set_digital_latch", "(", "pin", ",", "threshold_value", ",", "self", ".", "digital_latch_callback", ")" ]
60
0.010949
def command_repo_list(self): """Repositories list """ if len(self.args) == 1 and self.args[0] == "repo-list": RepoList().repos() else: usage("")
[ "def", "command_repo_list", "(", "self", ")", ":", "if", "len", "(", "self", ".", "args", ")", "==", "1", "and", "self", ".", "args", "[", "0", "]", "==", "\"repo-list\"", ":", "RepoList", "(", ")", ".", "repos", "(", ")", "else", ":", "usage", "(", "\"\"", ")" ]
27.714286
0.01
def redo(self, channel, image): """Add an entry with image modification info.""" chname = channel.name if image is None: # shouldn't happen, but let's play it safe return imname = image.get('name', 'none') iminfo = channel.get_image_info(imname) timestamp = iminfo.time_modified if timestamp is None: reason = iminfo.get('reason_modified', None) if reason is not None: self.fv.show_error( "{0} invoked 'modified' callback to ChangeHistory with a " "reason but without a timestamp. The plugin invoking the " "callback is no longer be compatible with Ginga. " "Please contact plugin developer to update the plugin " "to use self.fv.update_image_info() like Mosaic " "plugin.".format(imname)) # Image somehow lost its history self.remove_image_info_cb(self.fv, channel, iminfo) return self.add_entry(chname, iminfo)
[ "def", "redo", "(", "self", ",", "channel", ",", "image", ")", ":", "chname", "=", "channel", ".", "name", "if", "image", "is", "None", ":", "# shouldn't happen, but let's play it safe", "return", "imname", "=", "image", ".", "get", "(", "'name'", ",", "'none'", ")", "iminfo", "=", "channel", ".", "get_image_info", "(", "imname", ")", "timestamp", "=", "iminfo", ".", "time_modified", "if", "timestamp", "is", "None", ":", "reason", "=", "iminfo", ".", "get", "(", "'reason_modified'", ",", "None", ")", "if", "reason", "is", "not", "None", ":", "self", ".", "fv", ".", "show_error", "(", "\"{0} invoked 'modified' callback to ChangeHistory with a \"", "\"reason but without a timestamp. The plugin invoking the \"", "\"callback is no longer be compatible with Ginga. \"", "\"Please contact plugin developer to update the plugin \"", "\"to use self.fv.update_image_info() like Mosaic \"", "\"plugin.\"", ".", "format", "(", "imname", ")", ")", "# Image somehow lost its history", "self", ".", "remove_image_info_cb", "(", "self", ".", "fv", ",", "channel", ",", "iminfo", ")", "return", "self", ".", "add_entry", "(", "chname", ",", "iminfo", ")" ]
39.777778
0.001818