text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def _add_vector(self, hash_name, bucket_key, v, data, redis_object): ''' Store vector and JSON-serializable data in bucket with specified key. ''' redis_key = self._format_redis_key(hash_name, bucket_key) val_dict = {} # Depending on type (sparse or not) fill value dict if scipy.sparse.issparse(v): # Make sure that we are using COO format (easy to handle) if not scipy.sparse.isspmatrix_coo(v): v = scipy.sparse.coo_matrix(v) # Construct list of [index, value] items, # one for each non-zero element of the sparse vector encoded_values = [] for k in range(v.data.size): row_index = v.row[k] value = v.data[k] encoded_values.append([int(row_index), value]) val_dict['sparse'] = 1 val_dict['nonzeros'] = encoded_values val_dict['dim'] = v.shape[0] else: # Make sure it is a 1d vector v = numpy.reshape(v, v.shape[0]) val_dict['vector'] = v.tostring() val_dict['dtype'] = v.dtype.name # Add data if set if data is not None: val_dict['data'] = data # Push JSON representation of dict to end of bucket list self.redis_object.rpush(redis_key, pickle.dumps(val_dict, protocol=2))
[ "def", "_add_vector", "(", "self", ",", "hash_name", ",", "bucket_key", ",", "v", ",", "data", ",", "redis_object", ")", ":", "redis_key", "=", "self", ".", "_format_redis_key", "(", "hash_name", ",", "bucket_key", ")", "val_dict", "=", "{", "}", "# Depending on type (sparse or not) fill value dict", "if", "scipy", ".", "sparse", ".", "issparse", "(", "v", ")", ":", "# Make sure that we are using COO format (easy to handle)", "if", "not", "scipy", ".", "sparse", ".", "isspmatrix_coo", "(", "v", ")", ":", "v", "=", "scipy", ".", "sparse", ".", "coo_matrix", "(", "v", ")", "# Construct list of [index, value] items,", "# one for each non-zero element of the sparse vector", "encoded_values", "=", "[", "]", "for", "k", "in", "range", "(", "v", ".", "data", ".", "size", ")", ":", "row_index", "=", "v", ".", "row", "[", "k", "]", "value", "=", "v", ".", "data", "[", "k", "]", "encoded_values", ".", "append", "(", "[", "int", "(", "row_index", ")", ",", "value", "]", ")", "val_dict", "[", "'sparse'", "]", "=", "1", "val_dict", "[", "'nonzeros'", "]", "=", "encoded_values", "val_dict", "[", "'dim'", "]", "=", "v", ".", "shape", "[", "0", "]", "else", ":", "# Make sure it is a 1d vector", "v", "=", "numpy", ".", "reshape", "(", "v", ",", "v", ".", "shape", "[", "0", "]", ")", "val_dict", "[", "'vector'", "]", "=", "v", ".", "tostring", "(", ")", "val_dict", "[", "'dtype'", "]", "=", "v", ".", "dtype", ".", "name", "# Add data if set", "if", "data", "is", "not", "None", ":", "val_dict", "[", "'data'", "]", "=", "data", "# Push JSON representation of dict to end of bucket list", "self", ".", "redis_object", ".", "rpush", "(", "redis_key", ",", "pickle", ".", "dumps", "(", "val_dict", ",", "protocol", "=", "2", ")", ")" ]
35.076923
0.001422
def remove_child(self, idx=None, *, name=None, node=None): """Remove a child node from the current node instance. :param idx: Index of child node to be removed. :type idx: int :param name: The first child node found with «name» will be removed. :type name: str :param node: Child node to be removed. :type node: Node :returns: The node that has been removed, or False if not successful. :rtype: Node or False """ if (idx and isinstance(idx, int) and -len(self.childs) <= idx < len(self.childs) ): return self.childs.pop(idx) if name and isinstance(name, str): found_node = None for _n in self.childs: if _n.name == name: found_node = _n break if found_node: self.childs.remove(found_node) return found_node if node and node in self.childs: self.childs.remove(node) return node return False
[ "def", "remove_child", "(", "self", ",", "idx", "=", "None", ",", "*", ",", "name", "=", "None", ",", "node", "=", "None", ")", ":", "if", "(", "idx", "and", "isinstance", "(", "idx", ",", "int", ")", "and", "-", "len", "(", "self", ".", "childs", ")", "<=", "idx", "<", "len", "(", "self", ".", "childs", ")", ")", ":", "return", "self", ".", "childs", ".", "pop", "(", "idx", ")", "if", "name", "and", "isinstance", "(", "name", ",", "str", ")", ":", "found_node", "=", "None", "for", "_n", "in", "self", ".", "childs", ":", "if", "_n", ".", "name", "==", "name", ":", "found_node", "=", "_n", "break", "if", "found_node", ":", "self", ".", "childs", ".", "remove", "(", "found_node", ")", "return", "found_node", "if", "node", "and", "node", "in", "self", ".", "childs", ":", "self", ".", "childs", ".", "remove", "(", "node", ")", "return", "node", "return", "False" ]
37.714286
0.009234
def compare_password(expected, actual): """Compare two 64byte encoded passwords.""" if expected == actual: return True, "OK" msg = [] ver_exp = expected[-8:].rstrip() ver_act = actual[-8:].rstrip() if expected[:-8] != actual[:-8]: msg.append("Password mismatch") if ver_exp != ver_act: msg.append("asterisk_mbox version mismatch. Client: '" + ver_act + "', Server: '" + ver_exp + "'") return False, ". ".join(msg)
[ "def", "compare_password", "(", "expected", ",", "actual", ")", ":", "if", "expected", "==", "actual", ":", "return", "True", ",", "\"OK\"", "msg", "=", "[", "]", "ver_exp", "=", "expected", "[", "-", "8", ":", "]", ".", "rstrip", "(", ")", "ver_act", "=", "actual", "[", "-", "8", ":", "]", ".", "rstrip", "(", ")", "if", "expected", "[", ":", "-", "8", "]", "!=", "actual", "[", ":", "-", "8", "]", ":", "msg", ".", "append", "(", "\"Password mismatch\"", ")", "if", "ver_exp", "!=", "ver_act", ":", "msg", ".", "append", "(", "\"asterisk_mbox version mismatch. Client: '\"", "+", "ver_act", "+", "\"', Server: '\"", "+", "ver_exp", "+", "\"'\"", ")", "return", "False", ",", "\". \"", ".", "join", "(", "msg", ")" ]
34.142857
0.002037
def parse_include(self, node): """ Parses <Include> @param node: Node containing the <Include> element @type node: xml.etree.Element @raise ParseError: Raised when the file to be included is not specified. """ if not self.include_includes: if self.model.debug: print("Ignoring included LEMS file: %s"%node.lattrib['file']) else: #TODO: remove this hard coding for reading NeuroML includes... if 'file' not in node.lattrib: if 'href' in node.lattrib: self.model.include_file(node.lattrib['href'], self.include_dirs) return else: self.raise_error('<Include> must specify the file to be included.') self.model.include_file(node.lattrib['file'], self.include_dirs)
[ "def", "parse_include", "(", "self", ",", "node", ")", ":", "if", "not", "self", ".", "include_includes", ":", "if", "self", ".", "model", ".", "debug", ":", "print", "(", "\"Ignoring included LEMS file: %s\"", "%", "node", ".", "lattrib", "[", "'file'", "]", ")", "else", ":", "#TODO: remove this hard coding for reading NeuroML includes...", "if", "'file'", "not", "in", "node", ".", "lattrib", ":", "if", "'href'", "in", "node", ".", "lattrib", ":", "self", ".", "model", ".", "include_file", "(", "node", ".", "lattrib", "[", "'href'", "]", ",", "self", ".", "include_dirs", ")", "return", "else", ":", "self", ".", "raise_error", "(", "'<Include> must specify the file to be included.'", ")", "self", ".", "model", ".", "include_file", "(", "node", ".", "lattrib", "[", "'file'", "]", ",", "self", ".", "include_dirs", ")" ]
38.545455
0.011507
def server_systems(self): """ Retrieve a list of available systems. """ response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey}) return self._raise_or_extract(response)
[ "def", "server_systems", "(", "self", ")", ":", "response", "=", "self", ".", "_post", "(", "self", ".", "apiurl", "+", "\"/v2/server/systems\"", ",", "data", "=", "{", "'apikey'", ":", "self", ".", "apikey", "}", ")", "return", "self", ".", "_raise_or_extract", "(", "response", ")" ]
33.428571
0.0125
def _check_region_for_parsing(number, default_region): """Checks to see that the region code used is valid, or if it is not valid, that the number to parse starts with a + symbol so that we can attempt to infer the region from the number. Returns False if it cannot use the region provided and the region cannot be inferred. """ if not _is_valid_region_code(default_region): # If the number is None or empty, we can't infer the region. if number is None or len(number) == 0: return False match = _PLUS_CHARS_PATTERN.match(number) if match is None: return False return True
[ "def", "_check_region_for_parsing", "(", "number", ",", "default_region", ")", ":", "if", "not", "_is_valid_region_code", "(", "default_region", ")", ":", "# If the number is None or empty, we can't infer the region.", "if", "number", "is", "None", "or", "len", "(", "number", ")", "==", "0", ":", "return", "False", "match", "=", "_PLUS_CHARS_PATTERN", ".", "match", "(", "number", ")", "if", "match", "is", "None", ":", "return", "False", "return", "True" ]
46
0.001522
def add_symbol(self, name, string=None): """ Add a symbol with key `name` to `scipy_data_fitting.Model.symbols`. Optionally, specify an alternative `string` to pass to [`sympy.Symbol`][1], otherwise `name` is used. [1]: http://docs.sympy.org/dev/modules/core.html#id4 """ if not string: string = name self.symbols[name] = sympy.Symbol(string)
[ "def", "add_symbol", "(", "self", ",", "name", ",", "string", "=", "None", ")", ":", "if", "not", "string", ":", "string", "=", "name", "self", ".", "symbols", "[", "name", "]", "=", "sympy", ".", "Symbol", "(", "string", ")" ]
39.8
0.009828
def tfmer_clas_split(model:nn.Module) -> List[nn.Module]: "Split a RNN `model` in groups for differential learning rates." encoder = model[0].module n = len(encoder.layers)//3 groups = [[encoder.encoder], list(encoder.layers[:n]), list(encoder.layers[n:2*n]), list(encoder.layers[2*n:])] return groups + [[model[1]]]
[ "def", "tfmer_clas_split", "(", "model", ":", "nn", ".", "Module", ")", "->", "List", "[", "nn", ".", "Module", "]", ":", "encoder", "=", "model", "[", "0", "]", ".", "module", "n", "=", "len", "(", "encoder", ".", "layers", ")", "//", "3", "groups", "=", "[", "[", "encoder", ".", "encoder", "]", ",", "list", "(", "encoder", ".", "layers", "[", ":", "n", "]", ")", ",", "list", "(", "encoder", ".", "layers", "[", "n", ":", "2", "*", "n", "]", ")", ",", "list", "(", "encoder", ".", "layers", "[", "2", "*", "n", ":", "]", ")", "]", "return", "groups", "+", "[", "[", "model", "[", "1", "]", "]", "]" ]
55.166667
0.008929
def dispatch_hook(key, hooks, hook_data, **kwargs): """Dispatches a hook dictionary on a given piece of data.""" hooks = hooks or {} hooks = hooks.get(key) if hooks: if hasattr(hooks, '__call__'): hooks = [hooks] for hook in hooks: _hook_data = hook(hook_data, **kwargs) if _hook_data is not None: hook_data = _hook_data return hook_data
[ "def", "dispatch_hook", "(", "key", ",", "hooks", ",", "hook_data", ",", "*", "*", "kwargs", ")", ":", "hooks", "=", "hooks", "or", "{", "}", "hooks", "=", "hooks", ".", "get", "(", "key", ")", "if", "hooks", ":", "if", "hasattr", "(", "hooks", ",", "'__call__'", ")", ":", "hooks", "=", "[", "hooks", "]", "for", "hook", "in", "hooks", ":", "_hook_data", "=", "hook", "(", "hook_data", ",", "*", "*", "kwargs", ")", "if", "_hook_data", "is", "not", "None", ":", "hook_data", "=", "_hook_data", "return", "hook_data" ]
34.5
0.002353
def run_cli_options(args): """ Quick implementation of Python interpreter's -m, -c and file execution. The resulting dictionary is imported into global namespace, just in case someone is using interactive mode. We try to keep argument order as to pass them correctly to the subcommands. """ if _interactive_mode(args.interactive): os.environ['PYTHONINSPECT'] = '1' if in_ipython(): return exclusive_choices = [[None, args.command], ['-c', args.string], ['-m', args.module]] for flag_choice in exclusive_choices: try: a = sys.argv.index(flag_choice[0] or flag_choice[1]) except ValueError: a = 1000 flag_choice.append(a) exclusive_choices.sort(key=lambda v: v[2]) for i, (flag, choice, _) in enumerate(exclusive_choices): if not choice: continue sys.argv = [choice] + sys.argv[sys.argv.index(choice)+1:] if not flag: if choice == 'ipython': launch_ipython(argv=sys.argv[1:]) elif choice == 'notebook': launch_notebook() else: globals().update(runpy.run_path(choice, run_name="__main__")) elif flag == '-m': if '--' in sys.argv[1:2] : # -m syntax needs '--' for extra args sys.argv.pop(1) globals().update(runpy.run_module(choice, run_name="__main__")) elif flag == '-c': exec choice in globals(), locals() # workaround else: continue break
[ "def", "run_cli_options", "(", "args", ")", ":", "if", "_interactive_mode", "(", "args", ".", "interactive", ")", ":", "os", ".", "environ", "[", "'PYTHONINSPECT'", "]", "=", "'1'", "if", "in_ipython", "(", ")", ":", "return", "exclusive_choices", "=", "[", "[", "None", ",", "args", ".", "command", "]", ",", "[", "'-c'", ",", "args", ".", "string", "]", ",", "[", "'-m'", ",", "args", ".", "module", "]", "]", "for", "flag_choice", "in", "exclusive_choices", ":", "try", ":", "a", "=", "sys", ".", "argv", ".", "index", "(", "flag_choice", "[", "0", "]", "or", "flag_choice", "[", "1", "]", ")", "except", "ValueError", ":", "a", "=", "1000", "flag_choice", ".", "append", "(", "a", ")", "exclusive_choices", ".", "sort", "(", "key", "=", "lambda", "v", ":", "v", "[", "2", "]", ")", "for", "i", ",", "(", "flag", ",", "choice", ",", "_", ")", "in", "enumerate", "(", "exclusive_choices", ")", ":", "if", "not", "choice", ":", "continue", "sys", ".", "argv", "=", "[", "choice", "]", "+", "sys", ".", "argv", "[", "sys", ".", "argv", ".", "index", "(", "choice", ")", "+", "1", ":", "]", "if", "not", "flag", ":", "if", "choice", "==", "'ipython'", ":", "launch_ipython", "(", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", ")", "elif", "choice", "==", "'notebook'", ":", "launch_notebook", "(", ")", "else", ":", "globals", "(", ")", ".", "update", "(", "runpy", ".", "run_path", "(", "choice", ",", "run_name", "=", "\"__main__\"", ")", ")", "elif", "flag", "==", "'-m'", ":", "if", "'--'", "in", "sys", ".", "argv", "[", "1", ":", "2", "]", ":", "# -m syntax needs '--' for extra args", "sys", ".", "argv", ".", "pop", "(", "1", ")", "globals", "(", ")", ".", "update", "(", "runpy", ".", "run_module", "(", "choice", ",", "run_name", "=", "\"__main__\"", ")", ")", "elif", "flag", "==", "'-c'", ":", "exec", "choice", "in", "globals", "(", ")", ",", "locals", "(", ")", "# workaround", "else", ":", "continue", "break" ]
38.25
0.001912
def _validate_file_roots(file_roots): ''' If the file_roots option has a key that is None then we will error out, just replace it with an empty list ''' if not isinstance(file_roots, dict): log.warning('The file_roots parameter is not properly formatted,' ' using defaults') return {'base': _expand_glob_path([salt.syspaths.BASE_FILE_ROOTS_DIR])} return _normalize_roots(file_roots)
[ "def", "_validate_file_roots", "(", "file_roots", ")", ":", "if", "not", "isinstance", "(", "file_roots", ",", "dict", ")", ":", "log", ".", "warning", "(", "'The file_roots parameter is not properly formatted,'", "' using defaults'", ")", "return", "{", "'base'", ":", "_expand_glob_path", "(", "[", "salt", ".", "syspaths", ".", "BASE_FILE_ROOTS_DIR", "]", ")", "}", "return", "_normalize_roots", "(", "file_roots", ")" ]
43.3
0.002262
def get_args(get_item): """Parse env, key, default out of input dict. Args: get_item: dict. contains keys env/key/default Returns: (env, key, has_default, default) tuple, where env: str. env var name. key: str. save env value to this context key. has_default: bool. True if default specified. default: the value of default, if specified. Raises: ContextError: envGet is not a list of dicts. KeyNotInContextError: If env or key not found in get_config. """ if not isinstance(get_item, dict): raise ContextError('envGet must contain a list of dicts.') env = get_item.get('env', None) if not env: raise KeyNotInContextError( 'context envGet[env] must exist in context for envGet.') key = get_item.get('key', None) if not key: raise KeyNotInContextError( 'context envGet[key] must exist in context for envGet.') if 'default' in get_item: has_default = True default = get_item['default'] else: has_default = False default = None return (env, key, has_default, default)
[ "def", "get_args", "(", "get_item", ")", ":", "if", "not", "isinstance", "(", "get_item", ",", "dict", ")", ":", "raise", "ContextError", "(", "'envGet must contain a list of dicts.'", ")", "env", "=", "get_item", ".", "get", "(", "'env'", ",", "None", ")", "if", "not", "env", ":", "raise", "KeyNotInContextError", "(", "'context envGet[env] must exist in context for envGet.'", ")", "key", "=", "get_item", ".", "get", "(", "'key'", ",", "None", ")", "if", "not", "key", ":", "raise", "KeyNotInContextError", "(", "'context envGet[key] must exist in context for envGet.'", ")", "if", "'default'", "in", "get_item", ":", "has_default", "=", "True", "default", "=", "get_item", "[", "'default'", "]", "else", ":", "has_default", "=", "False", "default", "=", "None", "return", "(", "env", ",", "key", ",", "has_default", ",", "default", ")" ]
27.926829
0.000844
def rsem_calculate_expression(bam_file, rsem_genome_dir, samplename, build, out_dir, cores=1): """ works only in unstranded mode for now (--forward-prob 0.5) """ if not utils.which("rsem-calculate-expression"): logger.info("Skipping RSEM because rsem-calculate-expression could " "not be found.") return None sentinel_file = os.path.join(out_dir, samplename + "Test.genes.results") if utils.file_exists(sentinel_file): return out_dir paired_flag = "--paired" if bam.is_paired(bam_file) else "" core_flag = "-p {cores}".format(cores=cores) command = CALCULATE_EXP.format( core_flag=core_flag, paired_flag=paired_flag, bam_file=bam_file, rsem_genome_dir=rsem_genome_dir, build=build, samplename=samplename) message = "Calculating transcript expression of {bam_file} using RSEM." with transaction.file_transaction(out_dir) as tx_out_dir: utils.safe_makedir(tx_out_dir) with utils.chdir(tx_out_dir): do.run(command, message.format(bam_file=bam_file)) return out_dir
[ "def", "rsem_calculate_expression", "(", "bam_file", ",", "rsem_genome_dir", ",", "samplename", ",", "build", ",", "out_dir", ",", "cores", "=", "1", ")", ":", "if", "not", "utils", ".", "which", "(", "\"rsem-calculate-expression\"", ")", ":", "logger", ".", "info", "(", "\"Skipping RSEM because rsem-calculate-expression could \"", "\"not be found.\"", ")", "return", "None", "sentinel_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "samplename", "+", "\"Test.genes.results\"", ")", "if", "utils", ".", "file_exists", "(", "sentinel_file", ")", ":", "return", "out_dir", "paired_flag", "=", "\"--paired\"", "if", "bam", ".", "is_paired", "(", "bam_file", ")", "else", "\"\"", "core_flag", "=", "\"-p {cores}\"", ".", "format", "(", "cores", "=", "cores", ")", "command", "=", "CALCULATE_EXP", ".", "format", "(", "core_flag", "=", "core_flag", ",", "paired_flag", "=", "paired_flag", ",", "bam_file", "=", "bam_file", ",", "rsem_genome_dir", "=", "rsem_genome_dir", ",", "build", "=", "build", ",", "samplename", "=", "samplename", ")", "message", "=", "\"Calculating transcript expression of {bam_file} using RSEM.\"", "with", "transaction", ".", "file_transaction", "(", "out_dir", ")", "as", "tx_out_dir", ":", "utils", ".", "safe_makedir", "(", "tx_out_dir", ")", "with", "utils", ".", "chdir", "(", "tx_out_dir", ")", ":", "do", ".", "run", "(", "command", ",", "message", ".", "format", "(", "bam_file", "=", "bam_file", ")", ")", "return", "out_dir" ]
42.5
0.000885
def authenticated_request(self, endpoint, method='GET', params=None, data=None): ''' Send a request to the given Wunderlist API with 'X-Access-Token' and 'X-Client-ID' headers and ensure the response code is as expected given the request type Params: endpoint -- API endpoint to send request to Keyword Args: method -- GET, PUT, PATCH, DELETE, etc. params -- parameters to encode in the request data -- data to send with the request ''' headers = { 'X-Access-Token' : self.access_token, 'X-Client-ID' : self.client_id } return self.api.request(endpoint, method=method, headers=headers, params=params, data=data)
[ "def", "authenticated_request", "(", "self", ",", "endpoint", ",", "method", "=", "'GET'", ",", "params", "=", "None", ",", "data", "=", "None", ")", ":", "headers", "=", "{", "'X-Access-Token'", ":", "self", ".", "access_token", ",", "'X-Client-ID'", ":", "self", ".", "client_id", "}", "return", "self", ".", "api", ".", "request", "(", "endpoint", ",", "method", "=", "method", ",", "headers", "=", "headers", ",", "params", "=", "params", ",", "data", "=", "data", ")" ]
43.117647
0.009346
def calc_zscale(self, data, contrast=0.25, num_points=1000, num_per_row=None): """ From the IRAF documentation: The zscale algorithm is designed to display the image values near the median image value without the time consuming process of computing a full image histogram. This is particularly useful for astronomical images which generally have a very peaked histogram corresponding to the background sky in direct imaging or the continuum in a two dimensional spectrum. The sample of pixels, specified by values greater than zero in the sample mask zmask or by an image section, is selected up to a maximum of nsample pixels. If a bad pixel mask is specified by the bpmask parameter then any pixels with mask values which are greater than zero are not counted in the sample. Only the first pixels up to the limit are selected where the order is by line beginning from the first line. If no mask is specified then a grid of pixels with even spacing along lines and columns that make up a number less than or equal to the maximum sample size is used. If a contrast of zero is specified (or the zrange flag is used and the image does not have a valid minimum/maximum value) then the minimum and maximum of the sample is used for the intensity mapping range. If the contrast is not zero the sample pixels are ranked in brightness to form the function I(i), where i is the rank of the pixel and I is its value. Generally the midpoint of this function (the median) is very near the peak of the image histogram and there is a well defined slope about the midpoint which is related to the width of the histogram. At the ends of the I(i) function there are a few very bright and dark pixels due to objects and defects in the field. To determine the slope a linear function is fit with iterative rejection; I(i) = intercept + slope * (i - midpoint) If more than half of the points are rejected then there is no well defined slope and the full range of the sample defines z1 and z2. Otherwise the endpoints of the linear function are used (provided they are within the original range of the sample): z1 = I(midpoint) + (slope / contrast) * (1 - midpoint) z2 = I(midpoint) + (slope / contrast) * (npoints - midpoint) As can be seen, the parameter contrast may be used to adjust the contrast produced by this algorithm. """ assert len(data.shape) >= 2, \ AutoCutsError("input data should be 2D or greater") ht, wd = data.shape[:2] assert (0.0 < contrast <= 1.0), \ AutoCutsError("contrast (%.2f) not in range 0 < c <= 1" % ( contrast)) # calculate num_points parameter, if omitted total_points = np.size(data) if num_points is None: num_points = max(int(total_points * 0.0002), 600) num_points = min(num_points, total_points) assert (0 < num_points <= total_points), \ AutoCutsError("num_points not in range 0-%d" % (total_points)) # calculate num_per_row parameter, if omitted if num_per_row is None: num_per_row = max(int(0.015 * num_points), 1) self.logger.debug("contrast=%.4f num_points=%d num_per_row=%d" % ( contrast, num_points, num_per_row)) # sample the data num_rows = num_points // num_per_row xmax = wd - 1 xskip = max(xmax // num_per_row, 1) ymax = ht - 1 yskip = max(ymax // num_rows, 1) # evenly spaced sampling over rows and cols ## xskip = int(max(1.0, np.sqrt(xmax * ymax / float(num_points)))) ## yskip = xskip cutout = data[0:ymax:yskip, 0:xmax:xskip] # flatten and trim off excess cutout = cutout.flat[0:num_points] # actual number of points selected num_pix = len(cutout) assert num_pix <= num_points, \ AutoCutsError("Actual number of points (%d) exceeds calculated " "number (%d)" % (num_pix, num_points)) # sort the data by value cutout = np.sort(cutout) # flat distribution? data_min = np.nanmin(cutout) data_max = np.nanmax(cutout) if (data_min == data_max) or (contrast == 0.0): return (data_min, data_max) # compute the midpoint and median midpoint = (num_pix // 2) if num_pix % 2 != 0: median = cutout[midpoint] else: median = 0.5 * (cutout[midpoint - 1] + cutout[midpoint]) self.logger.debug("num_pix=%d midpoint=%d median=%.4f" % ( num_pix, midpoint, median)) ## # Remove outliers to aid fitting ## threshold = np.std(cutout) * 2.5 ## cutout = cutout[np.where(np.fabs(cutout - median) > threshold)] ## num_pix = len(cutout) # zscale fitting function: # I(x) = slope * (x - midpoint) + intercept def fitting(x, slope, intercept): y = slope * (x - midpoint) + intercept return y # compute a least squares fit X = np.arange(num_pix) Y = cutout sigma = np.array([1.0] * num_pix) guess = np.array([0.0, 0.0]) # Curve fit with _lock: # NOTE: without this mutex, optimize.curvefit causes a fatal error # sometimes--it appears not to be thread safe. # The error is: # "SystemError: null argument to internal routine" # "Fatal Python error: GC object already tracked" try: p, cov = optimize.curve_fit(fitting, X, Y, guess, sigma) except Exception as e: self.logger.debug("curve fitting failed: %s" % (str(e))) cov = None if cov is None: self.logger.debug("curve fitting failed") return (float(data_min), float(data_max)) slope, intercept = p ## num_chosen = 0 self.logger.debug("intercept=%f slope=%f" % ( intercept, slope)) ## if num_chosen < (num_pix // 2): ## self.logger.debug("more than half pixels rejected--falling back to min/max of sample") ## return (data_min, data_max) # finally, compute the range falloff = slope / contrast z1 = median - midpoint * falloff z2 = median + (num_pix - midpoint) * falloff # final sanity check on cut levels locut = max(z1, data_min) hicut = min(z2, data_max) if locut >= hicut: locut = data_min hicut = data_max return (float(locut), float(hicut))
[ "def", "calc_zscale", "(", "self", ",", "data", ",", "contrast", "=", "0.25", ",", "num_points", "=", "1000", ",", "num_per_row", "=", "None", ")", ":", "assert", "len", "(", "data", ".", "shape", ")", ">=", "2", ",", "AutoCutsError", "(", "\"input data should be 2D or greater\"", ")", "ht", ",", "wd", "=", "data", ".", "shape", "[", ":", "2", "]", "assert", "(", "0.0", "<", "contrast", "<=", "1.0", ")", ",", "AutoCutsError", "(", "\"contrast (%.2f) not in range 0 < c <= 1\"", "%", "(", "contrast", ")", ")", "# calculate num_points parameter, if omitted", "total_points", "=", "np", ".", "size", "(", "data", ")", "if", "num_points", "is", "None", ":", "num_points", "=", "max", "(", "int", "(", "total_points", "*", "0.0002", ")", ",", "600", ")", "num_points", "=", "min", "(", "num_points", ",", "total_points", ")", "assert", "(", "0", "<", "num_points", "<=", "total_points", ")", ",", "AutoCutsError", "(", "\"num_points not in range 0-%d\"", "%", "(", "total_points", ")", ")", "# calculate num_per_row parameter, if omitted", "if", "num_per_row", "is", "None", ":", "num_per_row", "=", "max", "(", "int", "(", "0.015", "*", "num_points", ")", ",", "1", ")", "self", ".", "logger", ".", "debug", "(", "\"contrast=%.4f num_points=%d num_per_row=%d\"", "%", "(", "contrast", ",", "num_points", ",", "num_per_row", ")", ")", "# sample the data", "num_rows", "=", "num_points", "//", "num_per_row", "xmax", "=", "wd", "-", "1", "xskip", "=", "max", "(", "xmax", "//", "num_per_row", ",", "1", ")", "ymax", "=", "ht", "-", "1", "yskip", "=", "max", "(", "ymax", "//", "num_rows", ",", "1", ")", "# evenly spaced sampling over rows and cols", "## xskip = int(max(1.0, np.sqrt(xmax * ymax / float(num_points))))", "## yskip = xskip", "cutout", "=", "data", "[", "0", ":", "ymax", ":", "yskip", ",", "0", ":", "xmax", ":", "xskip", "]", "# flatten and trim off excess", "cutout", "=", "cutout", ".", "flat", "[", "0", ":", "num_points", "]", "# actual number of points selected", "num_pix", "=", "len", "(", "cutout", ")", "assert", "num_pix", "<=", "num_points", ",", "AutoCutsError", "(", "\"Actual number of points (%d) exceeds calculated \"", "\"number (%d)\"", "%", "(", "num_pix", ",", "num_points", ")", ")", "# sort the data by value", "cutout", "=", "np", ".", "sort", "(", "cutout", ")", "# flat distribution?", "data_min", "=", "np", ".", "nanmin", "(", "cutout", ")", "data_max", "=", "np", ".", "nanmax", "(", "cutout", ")", "if", "(", "data_min", "==", "data_max", ")", "or", "(", "contrast", "==", "0.0", ")", ":", "return", "(", "data_min", ",", "data_max", ")", "# compute the midpoint and median", "midpoint", "=", "(", "num_pix", "//", "2", ")", "if", "num_pix", "%", "2", "!=", "0", ":", "median", "=", "cutout", "[", "midpoint", "]", "else", ":", "median", "=", "0.5", "*", "(", "cutout", "[", "midpoint", "-", "1", "]", "+", "cutout", "[", "midpoint", "]", ")", "self", ".", "logger", ".", "debug", "(", "\"num_pix=%d midpoint=%d median=%.4f\"", "%", "(", "num_pix", ",", "midpoint", ",", "median", ")", ")", "## # Remove outliers to aid fitting", "## threshold = np.std(cutout) * 2.5", "## cutout = cutout[np.where(np.fabs(cutout - median) > threshold)]", "## num_pix = len(cutout)", "# zscale fitting function:", "# I(x) = slope * (x - midpoint) + intercept", "def", "fitting", "(", "x", ",", "slope", ",", "intercept", ")", ":", "y", "=", "slope", "*", "(", "x", "-", "midpoint", ")", "+", "intercept", "return", "y", "# compute a least squares fit", "X", "=", "np", ".", "arange", "(", "num_pix", ")", "Y", "=", "cutout", "sigma", "=", "np", ".", "array", "(", "[", "1.0", "]", "*", "num_pix", ")", "guess", "=", "np", ".", "array", "(", "[", "0.0", ",", "0.0", "]", ")", "# Curve fit", "with", "_lock", ":", "# NOTE: without this mutex, optimize.curvefit causes a fatal error", "# sometimes--it appears not to be thread safe.", "# The error is:", "# \"SystemError: null argument to internal routine\"", "# \"Fatal Python error: GC object already tracked\"", "try", ":", "p", ",", "cov", "=", "optimize", ".", "curve_fit", "(", "fitting", ",", "X", ",", "Y", ",", "guess", ",", "sigma", ")", "except", "Exception", "as", "e", ":", "self", ".", "logger", ".", "debug", "(", "\"curve fitting failed: %s\"", "%", "(", "str", "(", "e", ")", ")", ")", "cov", "=", "None", "if", "cov", "is", "None", ":", "self", ".", "logger", ".", "debug", "(", "\"curve fitting failed\"", ")", "return", "(", "float", "(", "data_min", ")", ",", "float", "(", "data_max", ")", ")", "slope", ",", "intercept", "=", "p", "## num_chosen = 0", "self", ".", "logger", ".", "debug", "(", "\"intercept=%f slope=%f\"", "%", "(", "intercept", ",", "slope", ")", ")", "## if num_chosen < (num_pix // 2):", "## self.logger.debug(\"more than half pixels rejected--falling back to min/max of sample\")", "## return (data_min, data_max)", "# finally, compute the range", "falloff", "=", "slope", "/", "contrast", "z1", "=", "median", "-", "midpoint", "*", "falloff", "z2", "=", "median", "+", "(", "num_pix", "-", "midpoint", ")", "*", "falloff", "# final sanity check on cut levels", "locut", "=", "max", "(", "z1", ",", "data_min", ")", "hicut", "=", "min", "(", "z2", ",", "data_max", ")", "if", "locut", ">=", "hicut", ":", "locut", "=", "data_min", "hicut", "=", "data_max", "return", "(", "float", "(", "locut", ")", ",", "float", "(", "hicut", ")", ")" ]
40.502959
0.001996
def limits(self,x1,x2,y1,y2): """Set the coordinate boundaries of plot""" import math self.x1=x1 self.x2=x2 self.y1=y1 self.y2=y2 self.xscale=(self.cx2-self.cx1)/(self.x2-self.x1) self.yscale=(self.cy2-self.cy1)/(self.y2-self.y1) ra1=self.x1 ra2=self.x2 dec1=self.y1 dec2=self.y2 (sx1,sy2)=self.p2c((ra1,dec1)) (sx2,sy1)=self.p2c((ra2,dec2)) self.config(scrollregion=(sx1-self.lgutter,sy1+self.bgutter,sx2+self.rgutter,sy2-self.tgutter))
[ "def", "limits", "(", "self", ",", "x1", ",", "x2", ",", "y1", ",", "y2", ")", ":", "import", "math", "self", ".", "x1", "=", "x1", "self", ".", "x2", "=", "x2", "self", ".", "y1", "=", "y1", "self", ".", "y2", "=", "y2", "self", ".", "xscale", "=", "(", "self", ".", "cx2", "-", "self", ".", "cx1", ")", "/", "(", "self", ".", "x2", "-", "self", ".", "x1", ")", "self", ".", "yscale", "=", "(", "self", ".", "cy2", "-", "self", ".", "cy1", ")", "/", "(", "self", ".", "y2", "-", "self", ".", "y1", ")", "ra1", "=", "self", ".", "x1", "ra2", "=", "self", ".", "x2", "dec1", "=", "self", ".", "y1", "dec2", "=", "self", ".", "y2", "(", "sx1", ",", "sy2", ")", "=", "self", ".", "p2c", "(", "(", "ra1", ",", "dec1", ")", ")", "(", "sx2", ",", "sy1", ")", "=", "self", ".", "p2c", "(", "(", "ra2", ",", "dec2", ")", ")", "self", ".", "config", "(", "scrollregion", "=", "(", "sx1", "-", "self", ".", "lgutter", ",", "sy1", "+", "self", ".", "bgutter", ",", "sx2", "+", "self", ".", "rgutter", ",", "sy2", "-", "self", ".", "tgutter", ")", ")" ]
27.526316
0.07024
def backup_location(src, loc=None): ''' Writes Backups of locations :param src: The source file/folder to backup :param loc: The target folder to backup into The backup will be called `src` + :func:`util.system.get_timestamp`. * If `loc` left to none, the backup gets written in the same \ folder like `src` resides in * Otherwise the specified path will be used. ''' from photon.util.system import get_timestamp src = _path.realpath(src) if not loc or not loc.startswith(_sep): loc = _path.dirname(src) pth = _path.join(_path.basename(src), _path.realpath(loc)) out = '%s_backup_%s' % (_path.basename(src), get_timestamp()) change_location(src, search_location(out, create_in=pth))
[ "def", "backup_location", "(", "src", ",", "loc", "=", "None", ")", ":", "from", "photon", ".", "util", ".", "system", "import", "get_timestamp", "src", "=", "_path", ".", "realpath", "(", "src", ")", "if", "not", "loc", "or", "not", "loc", ".", "startswith", "(", "_sep", ")", ":", "loc", "=", "_path", ".", "dirname", "(", "src", ")", "pth", "=", "_path", ".", "join", "(", "_path", ".", "basename", "(", "src", ")", ",", "_path", ".", "realpath", "(", "loc", ")", ")", "out", "=", "'%s_backup_%s'", "%", "(", "_path", ".", "basename", "(", "src", ")", ",", "get_timestamp", "(", ")", ")", "change_location", "(", "src", ",", "search_location", "(", "out", ",", "create_in", "=", "pth", ")", ")" ]
29.384615
0.001267
def all_equal(left, right, cache=None): """Check whether two objects `left` and `right` are equal. Parameters ---------- left : Union[object, Expr, Node] right : Union[object, Expr, Node] cache : Optional[Dict[Tuple[Node, Node], bool]] A dictionary indicating whether two Nodes are equal """ if cache is None: cache = {} if util.is_iterable(left): # check that left and right are equal length iterables and that all # of their elements are equal return ( util.is_iterable(right) and len(left) == len(right) and all( itertools.starmap( functools.partial(all_equal, cache=cache), zip(left, right) ) ) ) if hasattr(left, 'equals'): return left.equals(right, cache=cache) return left == right
[ "def", "all_equal", "(", "left", ",", "right", ",", "cache", "=", "None", ")", ":", "if", "cache", "is", "None", ":", "cache", "=", "{", "}", "if", "util", ".", "is_iterable", "(", "left", ")", ":", "# check that left and right are equal length iterables and that all", "# of their elements are equal", "return", "(", "util", ".", "is_iterable", "(", "right", ")", "and", "len", "(", "left", ")", "==", "len", "(", "right", ")", "and", "all", "(", "itertools", ".", "starmap", "(", "functools", ".", "partial", "(", "all_equal", ",", "cache", "=", "cache", ")", ",", "zip", "(", "left", ",", "right", ")", ")", ")", ")", "if", "hasattr", "(", "left", ",", "'equals'", ")", ":", "return", "left", ".", "equals", "(", "right", ",", "cache", "=", "cache", ")", "return", "left", "==", "right" ]
29.758621
0.001122
def sentence_texts(self): """The list of texts representing ``sentences`` layer elements.""" if not self.is_tagged(SENTENCES): self.tokenize_sentences() return self.texts(SENTENCES)
[ "def", "sentence_texts", "(", "self", ")", ":", "if", "not", "self", ".", "is_tagged", "(", "SENTENCES", ")", ":", "self", ".", "tokenize_sentences", "(", ")", "return", "self", ".", "texts", "(", "SENTENCES", ")" ]
42.6
0.009217
def max(self): """ The maximum integer value of a value-set. It is only defined when there is exactly one region. :return: A integer that represents the maximum integer value of this value-set. :rtype: int """ if len(self.regions) != 1: raise ClaripyVSAOperationError("'max()' onlly works on single-region value-sets.") return self.get_si(next(iter(self.regions))).max
[ "def", "max", "(", "self", ")", ":", "if", "len", "(", "self", ".", "regions", ")", "!=", "1", ":", "raise", "ClaripyVSAOperationError", "(", "\"'max()' onlly works on single-region value-sets.\"", ")", "return", "self", ".", "get_si", "(", "next", "(", "iter", "(", "self", ".", "regions", ")", ")", ")", ".", "max" ]
35.75
0.011364
def _get_baremetal_switches(self, port): """Get switch ip addresses from baremetal transaction. This method is used to extract switch information from the transaction where VNIC_TYPE is baremetal. :param port: Received port transaction :returns: list of all switches :returns: list of only switches which are active """ all_switches = set() active_switches = set() all_link_info = port[bc.portbindings.PROFILE]['local_link_information'] for link_info in all_link_info: switch_info = self._get_baremetal_switch_info(link_info) if not switch_info: continue switch_ip = switch_info['switch_ip'] # If not for Nexus if not self._switch_defined(switch_ip): continue all_switches.add(switch_ip) if self.is_switch_active(switch_ip): active_switches.add(switch_ip) return list(all_switches), list(active_switches)
[ "def", "_get_baremetal_switches", "(", "self", ",", "port", ")", ":", "all_switches", "=", "set", "(", ")", "active_switches", "=", "set", "(", ")", "all_link_info", "=", "port", "[", "bc", ".", "portbindings", ".", "PROFILE", "]", "[", "'local_link_information'", "]", "for", "link_info", "in", "all_link_info", ":", "switch_info", "=", "self", ".", "_get_baremetal_switch_info", "(", "link_info", ")", "if", "not", "switch_info", ":", "continue", "switch_ip", "=", "switch_info", "[", "'switch_ip'", "]", "# If not for Nexus", "if", "not", "self", ".", "_switch_defined", "(", "switch_ip", ")", ":", "continue", "all_switches", ".", "add", "(", "switch_ip", ")", "if", "self", ".", "is_switch_active", "(", "switch_ip", ")", ":", "active_switches", ".", "add", "(", "switch_ip", ")", "return", "list", "(", "all_switches", ")", ",", "list", "(", "active_switches", ")" ]
34.827586
0.001927
def check_permission(self, identifiers, permission_s, logical_operator): """ like Yosai's authentication process, the authorization process will raise an Exception to halt further authz checking once Yosai determines that a Subject is unauthorized to receive the requested permission :param identifiers: a collection of identifiers :type identifiers: subject_abcs.IdentifierCollection :param permission_s: a collection of 1..N permissions :type permission_s: List of Permission objects or Strings :param logical_operator: indicates whether all or at least one permission check is true (any) :type: any OR all (from python standard library) :raises UnauthorizedException: if any permission is unauthorized """ self.assert_realms_configured() permitted = self.is_permitted_collective(identifiers, permission_s, logical_operator) if not permitted: msg = "Subject lacks permission(s) to satisfy logical operation" raise UnauthorizedException(msg)
[ "def", "check_permission", "(", "self", ",", "identifiers", ",", "permission_s", ",", "logical_operator", ")", ":", "self", ".", "assert_realms_configured", "(", ")", "permitted", "=", "self", ".", "is_permitted_collective", "(", "identifiers", ",", "permission_s", ",", "logical_operator", ")", "if", "not", "permitted", ":", "msg", "=", "\"Subject lacks permission(s) to satisfy logical operation\"", "raise", "UnauthorizedException", "(", "msg", ")" ]
48
0.001634
def adsb_vehicle_encode(self, ICAO_address, lat, lon, altitude_type, altitude, heading, hor_velocity, ver_velocity, callsign, emitter_type, tslc, flags, squawk): ''' The location and information of an ADSB vehicle ICAO_address : ICAO address (uint32_t) lat : Latitude, expressed as degrees * 1E7 (int32_t) lon : Longitude, expressed as degrees * 1E7 (int32_t) altitude_type : Type from ADSB_ALTITUDE_TYPE enum (uint8_t) altitude : Altitude(ASL) in millimeters (int32_t) heading : Course over ground in centidegrees (uint16_t) hor_velocity : The horizontal velocity in centimeters/second (uint16_t) ver_velocity : The vertical velocity in centimeters/second, positive is up (int16_t) callsign : The callsign, 8+null (char) emitter_type : Type from ADSB_EMITTER_TYPE enum (uint8_t) tslc : Time since last communication in seconds (uint8_t) flags : Flags to indicate various statuses including valid data fields (uint16_t) squawk : Squawk code (uint16_t) ''' return MAVLink_adsb_vehicle_message(ICAO_address, lat, lon, altitude_type, altitude, heading, hor_velocity, ver_velocity, callsign, emitter_type, tslc, flags, squawk)
[ "def", "adsb_vehicle_encode", "(", "self", ",", "ICAO_address", ",", "lat", ",", "lon", ",", "altitude_type", ",", "altitude", ",", "heading", ",", "hor_velocity", ",", "ver_velocity", ",", "callsign", ",", "emitter_type", ",", "tslc", ",", "flags", ",", "squawk", ")", ":", "return", "MAVLink_adsb_vehicle_message", "(", "ICAO_address", ",", "lat", ",", "lon", ",", "altitude_type", ",", "altitude", ",", "heading", ",", "hor_velocity", ",", "ver_velocity", ",", "callsign", ",", "emitter_type", ",", "tslc", ",", "flags", ",", "squawk", ")" ]
79.85
0.008663
def inode(self): """Return the inode number of the entry.""" if self._inode is None: self.stat(follow_symlinks=False) return self._inode
[ "def", "inode", "(", "self", ")", ":", "if", "self", ".", "_inode", "is", "None", ":", "self", ".", "stat", "(", "follow_symlinks", "=", "False", ")", "return", "self", ".", "_inode" ]
33.6
0.011628
def fetch_withdrawals(self, limit: int) -> List[Withdrawal]: """Fetch latest withdrawals, must provide a limit.""" return self._transactions(self._withdrawals, 'withdrawals', limit)
[ "def", "fetch_withdrawals", "(", "self", ",", "limit", ":", "int", ")", "->", "List", "[", "Withdrawal", "]", ":", "return", "self", ".", "_transactions", "(", "self", ".", "_withdrawals", ",", "'withdrawals'", ",", "limit", ")" ]
65
0.010152
def match_bitap(self, text, pattern, loc): """Locate the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. Args: text: The text to search. pattern: The pattern to search for. loc: The location to search around. Returns: Best match index or -1. """ # Python doesn't have a maxint limit, so ignore this check. #if self.Match_MaxBits != 0 and len(pattern) > self.Match_MaxBits: # raise ValueError("Pattern too long for this application.") # Initialise the alphabet. s = self.match_alphabet(pattern) def match_bitapScore(e, x): """Compute and return the score for a match with e errors and x location. Accesses loc and pattern through being a closure. Args: e: Number of errors in match. x: Location of match. Returns: Overall score for match (0.0 = good, 1.0 = bad). """ accuracy = float(e) / len(pattern) proximity = abs(loc - x) if not self.Match_Distance: # Dodge divide by zero error. return proximity and 1.0 or accuracy return accuracy + (proximity / float(self.Match_Distance)) # Highest score beyond which we give up. score_threshold = self.Match_Threshold # Is there a nearby exact match? (speedup) best_loc = text.find(pattern, loc) if best_loc != -1: score_threshold = min(match_bitapScore(0, best_loc), score_threshold) # What about in the other direction? (speedup) best_loc = text.rfind(pattern, loc + len(pattern)) if best_loc != -1: score_threshold = min(match_bitapScore(0, best_loc), score_threshold) # Initialise the bit arrays. matchmask = 1 << (len(pattern) - 1) best_loc = -1 bin_max = len(pattern) + len(text) # Empty initialization added to appease pychecker. last_rd = None for d in range(len(pattern)): # Scan for the best match each iteration allows for one more error. # Run a binary search to determine how far from 'loc' we can stray at # this error level. bin_min = 0 bin_mid = bin_max while bin_min < bin_mid: if match_bitapScore(d, loc + bin_mid) <= score_threshold: bin_min = bin_mid else: bin_max = bin_mid bin_mid = (bin_max - bin_min) // 2 + bin_min # Use the result from this iteration as the maximum for the next. bin_max = bin_mid start = max(1, loc - bin_mid + 1) finish = min(loc + bin_mid, len(text)) + len(pattern) rd = [0] * (finish + 2) rd[finish + 1] = (1 << d) - 1 for j in range(finish, start - 1, -1): if len(text) <= j - 1: # Out of range. charMatch = 0 else: charMatch = s.get(text[j - 1], 0) if d == 0: # First pass: exact match. rd[j] = ((rd[j + 1] << 1) | 1) & charMatch else: # Subsequent passes: fuzzy match. rd[j] = (((rd[j + 1] << 1) | 1) & charMatch) | ( ((last_rd[j + 1] | last_rd[j]) << 1) | 1) | last_rd[j + 1] if rd[j] & matchmask: score = match_bitapScore(d, j - 1) # This match will almost certainly be better than any existing match. # But check anyway. if score <= score_threshold: # Told you so. score_threshold = score best_loc = j - 1 if best_loc > loc: # When passing loc, don't exceed our current distance from loc. start = max(1, 2 * loc - best_loc) else: # Already passed loc, downhill from here on in. break # No hope for a (better) match at greater error levels. if match_bitapScore(d + 1, loc) > score_threshold: break last_rd = rd return best_loc
[ "def", "match_bitap", "(", "self", ",", "text", ",", "pattern", ",", "loc", ")", ":", "# Python doesn't have a maxint limit, so ignore this check.", "#if self.Match_MaxBits != 0 and len(pattern) > self.Match_MaxBits:", "# raise ValueError(\"Pattern too long for this application.\")", "# Initialise the alphabet.", "s", "=", "self", ".", "match_alphabet", "(", "pattern", ")", "def", "match_bitapScore", "(", "e", ",", "x", ")", ":", "\"\"\"Compute and return the score for a match with e errors and x location.\n Accesses loc and pattern through being a closure.\n\n Args:\n e: Number of errors in match.\n x: Location of match.\n\n Returns:\n Overall score for match (0.0 = good, 1.0 = bad).\n \"\"\"", "accuracy", "=", "float", "(", "e", ")", "/", "len", "(", "pattern", ")", "proximity", "=", "abs", "(", "loc", "-", "x", ")", "if", "not", "self", ".", "Match_Distance", ":", "# Dodge divide by zero error.", "return", "proximity", "and", "1.0", "or", "accuracy", "return", "accuracy", "+", "(", "proximity", "/", "float", "(", "self", ".", "Match_Distance", ")", ")", "# Highest score beyond which we give up.", "score_threshold", "=", "self", ".", "Match_Threshold", "# Is there a nearby exact match? (speedup)", "best_loc", "=", "text", ".", "find", "(", "pattern", ",", "loc", ")", "if", "best_loc", "!=", "-", "1", ":", "score_threshold", "=", "min", "(", "match_bitapScore", "(", "0", ",", "best_loc", ")", ",", "score_threshold", ")", "# What about in the other direction? (speedup)", "best_loc", "=", "text", ".", "rfind", "(", "pattern", ",", "loc", "+", "len", "(", "pattern", ")", ")", "if", "best_loc", "!=", "-", "1", ":", "score_threshold", "=", "min", "(", "match_bitapScore", "(", "0", ",", "best_loc", ")", ",", "score_threshold", ")", "# Initialise the bit arrays.", "matchmask", "=", "1", "<<", "(", "len", "(", "pattern", ")", "-", "1", ")", "best_loc", "=", "-", "1", "bin_max", "=", "len", "(", "pattern", ")", "+", "len", "(", "text", ")", "# Empty initialization added to appease pychecker.", "last_rd", "=", "None", "for", "d", "in", "range", "(", "len", "(", "pattern", ")", ")", ":", "# Scan for the best match each iteration allows for one more error.", "# Run a binary search to determine how far from 'loc' we can stray at", "# this error level.", "bin_min", "=", "0", "bin_mid", "=", "bin_max", "while", "bin_min", "<", "bin_mid", ":", "if", "match_bitapScore", "(", "d", ",", "loc", "+", "bin_mid", ")", "<=", "score_threshold", ":", "bin_min", "=", "bin_mid", "else", ":", "bin_max", "=", "bin_mid", "bin_mid", "=", "(", "bin_max", "-", "bin_min", ")", "//", "2", "+", "bin_min", "# Use the result from this iteration as the maximum for the next.", "bin_max", "=", "bin_mid", "start", "=", "max", "(", "1", ",", "loc", "-", "bin_mid", "+", "1", ")", "finish", "=", "min", "(", "loc", "+", "bin_mid", ",", "len", "(", "text", ")", ")", "+", "len", "(", "pattern", ")", "rd", "=", "[", "0", "]", "*", "(", "finish", "+", "2", ")", "rd", "[", "finish", "+", "1", "]", "=", "(", "1", "<<", "d", ")", "-", "1", "for", "j", "in", "range", "(", "finish", ",", "start", "-", "1", ",", "-", "1", ")", ":", "if", "len", "(", "text", ")", "<=", "j", "-", "1", ":", "# Out of range.", "charMatch", "=", "0", "else", ":", "charMatch", "=", "s", ".", "get", "(", "text", "[", "j", "-", "1", "]", ",", "0", ")", "if", "d", "==", "0", ":", "# First pass: exact match.", "rd", "[", "j", "]", "=", "(", "(", "rd", "[", "j", "+", "1", "]", "<<", "1", ")", "|", "1", ")", "&", "charMatch", "else", ":", "# Subsequent passes: fuzzy match.", "rd", "[", "j", "]", "=", "(", "(", "(", "rd", "[", "j", "+", "1", "]", "<<", "1", ")", "|", "1", ")", "&", "charMatch", ")", "|", "(", "(", "(", "last_rd", "[", "j", "+", "1", "]", "|", "last_rd", "[", "j", "]", ")", "<<", "1", ")", "|", "1", ")", "|", "last_rd", "[", "j", "+", "1", "]", "if", "rd", "[", "j", "]", "&", "matchmask", ":", "score", "=", "match_bitapScore", "(", "d", ",", "j", "-", "1", ")", "# This match will almost certainly be better than any existing match.", "# But check anyway.", "if", "score", "<=", "score_threshold", ":", "# Told you so.", "score_threshold", "=", "score", "best_loc", "=", "j", "-", "1", "if", "best_loc", ">", "loc", ":", "# When passing loc, don't exceed our current distance from loc.", "start", "=", "max", "(", "1", ",", "2", "*", "loc", "-", "best_loc", ")", "else", ":", "# Already passed loc, downhill from here on in.", "break", "# No hope for a (better) match at greater error levels.", "if", "match_bitapScore", "(", "d", "+", "1", ",", "loc", ")", ">", "score_threshold", ":", "break", "last_rd", "=", "rd", "return", "best_loc" ]
35.342857
0.011009
def _sprite_map(g, **kwargs): """ Generates a sprite map from the files matching the glob pattern. Uses the keyword-style arguments passed in to control the placement. """ g = StringValue(g).value if not Image: raise Exception("Images manipulation require PIL") if g in sprite_maps: sprite_maps[glob]['*'] = datetime.datetime.now() elif '..' not in g: # Protect against going to prohibited places... vertical = (kwargs.get('direction', 'vertical') == 'vertical') offset_x = NumberValue(kwargs.get('offset_x', 0)) offset_y = NumberValue(kwargs.get('offset_y', 0)) repeat = StringValue(kwargs.get('repeat', 'no-repeat')) position = NumberValue(kwargs.get('position', 0)) dst_color = kwargs.get('dst_color') src_color = kwargs.get('src_color') if position and position > -1 and position < 1: position.units = {'%': _units_weights.get('%', 1), '_': '%'} spacing = kwargs.get('spacing', 0) if isinstance(spacing, ListValue): spacing = [int(NumberValue(v).value) for n, v in spacing.items()] else: spacing = [int(NumberValue(spacing).value)] spacing = (spacing * 4)[:4] if callable(STATIC_ROOT): rfiles = files = sorted(STATIC_ROOT(g)) else: glob_path = os.path.join(STATIC_ROOT, g) files = glob.glob(glob_path) files = sorted((f, None) for f in files) rfiles = [(f[len(STATIC_ROOT):], s) for f, s in files] if not files: log.error("Nothing found at '%s'", glob_path) return StringValue(None) times = [] for file, storage in files: try: d_obj = storage.modified_time(file) times.append(int(time.mktime(d_obj.timetuple()))) except: times.append(int(os.path.getmtime(file))) map_name = os.path.normpath( os.path.dirname(g)).replace('\\', '_').replace('/', '_') key = list(zip(*files)[0]) + times + [repr(kwargs)] key = map_name + '-' + base64.urlsafe_b64encode( hashlib.md5(repr(key)).digest()).rstrip('=').replace('-', '_') asset_file = key + '.png' asset_path = os.path.join(ASSETS_ROOT, asset_file) if os.path.exists(asset_path + '.cache'): asset, map, sizes = pickle.load(open(asset_path + '.cache')) sprite_maps[asset] = map else: images = tuple(Image.open(storage.open(file)) if storage is not None else Image.open(file) for file, storage in files) names = tuple(os.path.splitext( os.path.basename(file))[0] for file, storage in files) positions = [] spacings = [] tot_spacings = [] for name in names: name = name.replace('-', '_') _position = kwargs.get(name + '_position') if _position is None: _position = position else: _position = NumberValue(_position) if _position and _position > -1 and _position < 1: _position.units = {'%': _units_weights.get('%', 1), '_': '%'} positions.append(_position) _spacing = kwargs.get(name + '_spacing') if _spacing is None: _spacing = spacing else: if isinstance(_spacing, ListValue): _spacing = [int(NumberValue( v).value) for n, v in _spacing.items()] else: _spacing = [int(NumberValue(_spacing).value)] _spacing = (_spacing * 4)[:4] spacings.append(_spacing) if _position and _position.unit != '%': if vertical: if _position > 0: tot_spacings.append((_spacing[0], _spacing[ 1], _spacing[2], _spacing[3] + _position)) else: if _position > 0: tot_spacings.append((_spacing[0] + _position, _spacing[1], _spacing[2], _spacing[3])) else: tot_spacings.append(_spacing) sizes = tuple(image.size for image in images) _spacings = zip(*tot_spacings) if vertical: width = max( zip(*sizes)[0]) + max(_spacings[1]) + max(_spacings[3]) height = sum( zip(*sizes)[1]) + sum(_spacings[0]) + sum(_spacings[2]) else: width = sum( zip(*sizes)[0]) + sum(_spacings[1]) + sum(_spacings[3]) height = max( zip(*sizes)[1]) + max(_spacings[0]) + max(_spacings[2]) new_image = Image.new( mode='RGBA', size=(width, height), color=(0, 0, 0, 0) ) offsets_x = [] offsets_y = [] offset = 0 for i, image in enumerate(images): spacing = spacings[i] position = positions[i] if vertical: if position and position.unit == '%': x = width * position.value - \ (spacing[3] + sizes[i][1] + spacing[1]) elif position.value < 0: x = width + position.value - \ (spacing[3] + sizes[i][1] + spacing[1]) else: x = position.value offset += spacing[0] new_image.paste(image, (int(x + spacing[3]), offset)) offsets_x.append(x) offsets_y.append(offset - spacing[0]) offset += sizes[i][1] + spacing[2] else: if position and position.unit == '%': y = height * position.value - \ (spacing[0] + sizes[i][1] + spacing[2]) elif position.value < 0: y = height + position.value - \ (spacing[0] + sizes[i][1] + spacing[2]) else: y = position.value offset += spacing[3] new_image.paste(image, (offset, int(y + spacing[0]))) offsets_x.append(offset - spacing[3]) offsets_y.append(y) offset += sizes[i][0] + spacing[1] if dst_color: src_color = ColorValue( src_color).value[:3] if src_color else (0, 0, 0) dst_color = list(ColorValue(dst_color).value[:3]) pixdata = new_image.load() for y in xrange(new_image.size[1]): for x in xrange(new_image.size[0]): if pixdata[x, y][:3] == src_color: pixdata[x, y] = tuple(dst_color + [pixdata[x, y][3]]) try: new_image.save(asset_path) except IOError: log.exception("Error while saving image") filetime = int(time.mktime(datetime.datetime.now().timetuple())) url = '%s%s?_=%s' % (ASSETS_URL, asset_file, filetime) asset = 'url("%s") %s' % (escape(url), repeat) # Use the sorted list to remove older elements (keep only 500 # objects): if len(sprite_maps) > 1000: for a in sorted(sprite_maps, key=lambda a: sprite_maps[a]['*'], reverse=True)[500:]: del sprite_maps[a] # Add the new object: map = dict(zip(names, zip(sizes, rfiles, offsets_x, offsets_y))) map['*'] = datetime.datetime.now() map['*f*'] = asset_file map['*k*'] = key map['*n*'] = map_name map['*t*'] = filetime pickle.dump((asset, map, zip(files, sizes)), open( asset_path + '.cache', 'w')) sprite_maps[asset] = map for file, size in sizes: sprite_images[file] = size ret = StringValue(asset) return ret
[ "def", "_sprite_map", "(", "g", ",", "*", "*", "kwargs", ")", ":", "g", "=", "StringValue", "(", "g", ")", ".", "value", "if", "not", "Image", ":", "raise", "Exception", "(", "\"Images manipulation require PIL\"", ")", "if", "g", "in", "sprite_maps", ":", "sprite_maps", "[", "glob", "]", "[", "'*'", "]", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "elif", "'..'", "not", "in", "g", ":", "# Protect against going to prohibited places...", "vertical", "=", "(", "kwargs", ".", "get", "(", "'direction'", ",", "'vertical'", ")", "==", "'vertical'", ")", "offset_x", "=", "NumberValue", "(", "kwargs", ".", "get", "(", "'offset_x'", ",", "0", ")", ")", "offset_y", "=", "NumberValue", "(", "kwargs", ".", "get", "(", "'offset_y'", ",", "0", ")", ")", "repeat", "=", "StringValue", "(", "kwargs", ".", "get", "(", "'repeat'", ",", "'no-repeat'", ")", ")", "position", "=", "NumberValue", "(", "kwargs", ".", "get", "(", "'position'", ",", "0", ")", ")", "dst_color", "=", "kwargs", ".", "get", "(", "'dst_color'", ")", "src_color", "=", "kwargs", ".", "get", "(", "'src_color'", ")", "if", "position", "and", "position", ">", "-", "1", "and", "position", "<", "1", ":", "position", ".", "units", "=", "{", "'%'", ":", "_units_weights", ".", "get", "(", "'%'", ",", "1", ")", ",", "'_'", ":", "'%'", "}", "spacing", "=", "kwargs", ".", "get", "(", "'spacing'", ",", "0", ")", "if", "isinstance", "(", "spacing", ",", "ListValue", ")", ":", "spacing", "=", "[", "int", "(", "NumberValue", "(", "v", ")", ".", "value", ")", "for", "n", ",", "v", "in", "spacing", ".", "items", "(", ")", "]", "else", ":", "spacing", "=", "[", "int", "(", "NumberValue", "(", "spacing", ")", ".", "value", ")", "]", "spacing", "=", "(", "spacing", "*", "4", ")", "[", ":", "4", "]", "if", "callable", "(", "STATIC_ROOT", ")", ":", "rfiles", "=", "files", "=", "sorted", "(", "STATIC_ROOT", "(", "g", ")", ")", "else", ":", "glob_path", "=", "os", ".", "path", ".", "join", "(", "STATIC_ROOT", ",", "g", ")", "files", "=", "glob", ".", "glob", "(", "glob_path", ")", "files", "=", "sorted", "(", "(", "f", ",", "None", ")", "for", "f", "in", "files", ")", "rfiles", "=", "[", "(", "f", "[", "len", "(", "STATIC_ROOT", ")", ":", "]", ",", "s", ")", "for", "f", ",", "s", "in", "files", "]", "if", "not", "files", ":", "log", ".", "error", "(", "\"Nothing found at '%s'\"", ",", "glob_path", ")", "return", "StringValue", "(", "None", ")", "times", "=", "[", "]", "for", "file", ",", "storage", "in", "files", ":", "try", ":", "d_obj", "=", "storage", ".", "modified_time", "(", "file", ")", "times", ".", "append", "(", "int", "(", "time", ".", "mktime", "(", "d_obj", ".", "timetuple", "(", ")", ")", ")", ")", "except", ":", "times", ".", "append", "(", "int", "(", "os", ".", "path", ".", "getmtime", "(", "file", ")", ")", ")", "map_name", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "dirname", "(", "g", ")", ")", ".", "replace", "(", "'\\\\'", ",", "'_'", ")", ".", "replace", "(", "'/'", ",", "'_'", ")", "key", "=", "list", "(", "zip", "(", "*", "files", ")", "[", "0", "]", ")", "+", "times", "+", "[", "repr", "(", "kwargs", ")", "]", "key", "=", "map_name", "+", "'-'", "+", "base64", ".", "urlsafe_b64encode", "(", "hashlib", ".", "md5", "(", "repr", "(", "key", ")", ")", ".", "digest", "(", ")", ")", ".", "rstrip", "(", "'='", ")", ".", "replace", "(", "'-'", ",", "'_'", ")", "asset_file", "=", "key", "+", "'.png'", "asset_path", "=", "os", ".", "path", ".", "join", "(", "ASSETS_ROOT", ",", "asset_file", ")", "if", "os", ".", "path", ".", "exists", "(", "asset_path", "+", "'.cache'", ")", ":", "asset", ",", "map", ",", "sizes", "=", "pickle", ".", "load", "(", "open", "(", "asset_path", "+", "'.cache'", ")", ")", "sprite_maps", "[", "asset", "]", "=", "map", "else", ":", "images", "=", "tuple", "(", "Image", ".", "open", "(", "storage", ".", "open", "(", "file", ")", ")", "if", "storage", "is", "not", "None", "else", "Image", ".", "open", "(", "file", ")", "for", "file", ",", "storage", "in", "files", ")", "names", "=", "tuple", "(", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "file", ")", ")", "[", "0", "]", "for", "file", ",", "storage", "in", "files", ")", "positions", "=", "[", "]", "spacings", "=", "[", "]", "tot_spacings", "=", "[", "]", "for", "name", "in", "names", ":", "name", "=", "name", ".", "replace", "(", "'-'", ",", "'_'", ")", "_position", "=", "kwargs", ".", "get", "(", "name", "+", "'_position'", ")", "if", "_position", "is", "None", ":", "_position", "=", "position", "else", ":", "_position", "=", "NumberValue", "(", "_position", ")", "if", "_position", "and", "_position", ">", "-", "1", "and", "_position", "<", "1", ":", "_position", ".", "units", "=", "{", "'%'", ":", "_units_weights", ".", "get", "(", "'%'", ",", "1", ")", ",", "'_'", ":", "'%'", "}", "positions", ".", "append", "(", "_position", ")", "_spacing", "=", "kwargs", ".", "get", "(", "name", "+", "'_spacing'", ")", "if", "_spacing", "is", "None", ":", "_spacing", "=", "spacing", "else", ":", "if", "isinstance", "(", "_spacing", ",", "ListValue", ")", ":", "_spacing", "=", "[", "int", "(", "NumberValue", "(", "v", ")", ".", "value", ")", "for", "n", ",", "v", "in", "_spacing", ".", "items", "(", ")", "]", "else", ":", "_spacing", "=", "[", "int", "(", "NumberValue", "(", "_spacing", ")", ".", "value", ")", "]", "_spacing", "=", "(", "_spacing", "*", "4", ")", "[", ":", "4", "]", "spacings", ".", "append", "(", "_spacing", ")", "if", "_position", "and", "_position", ".", "unit", "!=", "'%'", ":", "if", "vertical", ":", "if", "_position", ">", "0", ":", "tot_spacings", ".", "append", "(", "(", "_spacing", "[", "0", "]", ",", "_spacing", "[", "1", "]", ",", "_spacing", "[", "2", "]", ",", "_spacing", "[", "3", "]", "+", "_position", ")", ")", "else", ":", "if", "_position", ">", "0", ":", "tot_spacings", ".", "append", "(", "(", "_spacing", "[", "0", "]", "+", "_position", ",", "_spacing", "[", "1", "]", ",", "_spacing", "[", "2", "]", ",", "_spacing", "[", "3", "]", ")", ")", "else", ":", "tot_spacings", ".", "append", "(", "_spacing", ")", "sizes", "=", "tuple", "(", "image", ".", "size", "for", "image", "in", "images", ")", "_spacings", "=", "zip", "(", "*", "tot_spacings", ")", "if", "vertical", ":", "width", "=", "max", "(", "zip", "(", "*", "sizes", ")", "[", "0", "]", ")", "+", "max", "(", "_spacings", "[", "1", "]", ")", "+", "max", "(", "_spacings", "[", "3", "]", ")", "height", "=", "sum", "(", "zip", "(", "*", "sizes", ")", "[", "1", "]", ")", "+", "sum", "(", "_spacings", "[", "0", "]", ")", "+", "sum", "(", "_spacings", "[", "2", "]", ")", "else", ":", "width", "=", "sum", "(", "zip", "(", "*", "sizes", ")", "[", "0", "]", ")", "+", "sum", "(", "_spacings", "[", "1", "]", ")", "+", "sum", "(", "_spacings", "[", "3", "]", ")", "height", "=", "max", "(", "zip", "(", "*", "sizes", ")", "[", "1", "]", ")", "+", "max", "(", "_spacings", "[", "0", "]", ")", "+", "max", "(", "_spacings", "[", "2", "]", ")", "new_image", "=", "Image", ".", "new", "(", "mode", "=", "'RGBA'", ",", "size", "=", "(", "width", ",", "height", ")", ",", "color", "=", "(", "0", ",", "0", ",", "0", ",", "0", ")", ")", "offsets_x", "=", "[", "]", "offsets_y", "=", "[", "]", "offset", "=", "0", "for", "i", ",", "image", "in", "enumerate", "(", "images", ")", ":", "spacing", "=", "spacings", "[", "i", "]", "position", "=", "positions", "[", "i", "]", "if", "vertical", ":", "if", "position", "and", "position", ".", "unit", "==", "'%'", ":", "x", "=", "width", "*", "position", ".", "value", "-", "(", "spacing", "[", "3", "]", "+", "sizes", "[", "i", "]", "[", "1", "]", "+", "spacing", "[", "1", "]", ")", "elif", "position", ".", "value", "<", "0", ":", "x", "=", "width", "+", "position", ".", "value", "-", "(", "spacing", "[", "3", "]", "+", "sizes", "[", "i", "]", "[", "1", "]", "+", "spacing", "[", "1", "]", ")", "else", ":", "x", "=", "position", ".", "value", "offset", "+=", "spacing", "[", "0", "]", "new_image", ".", "paste", "(", "image", ",", "(", "int", "(", "x", "+", "spacing", "[", "3", "]", ")", ",", "offset", ")", ")", "offsets_x", ".", "append", "(", "x", ")", "offsets_y", ".", "append", "(", "offset", "-", "spacing", "[", "0", "]", ")", "offset", "+=", "sizes", "[", "i", "]", "[", "1", "]", "+", "spacing", "[", "2", "]", "else", ":", "if", "position", "and", "position", ".", "unit", "==", "'%'", ":", "y", "=", "height", "*", "position", ".", "value", "-", "(", "spacing", "[", "0", "]", "+", "sizes", "[", "i", "]", "[", "1", "]", "+", "spacing", "[", "2", "]", ")", "elif", "position", ".", "value", "<", "0", ":", "y", "=", "height", "+", "position", ".", "value", "-", "(", "spacing", "[", "0", "]", "+", "sizes", "[", "i", "]", "[", "1", "]", "+", "spacing", "[", "2", "]", ")", "else", ":", "y", "=", "position", ".", "value", "offset", "+=", "spacing", "[", "3", "]", "new_image", ".", "paste", "(", "image", ",", "(", "offset", ",", "int", "(", "y", "+", "spacing", "[", "0", "]", ")", ")", ")", "offsets_x", ".", "append", "(", "offset", "-", "spacing", "[", "3", "]", ")", "offsets_y", ".", "append", "(", "y", ")", "offset", "+=", "sizes", "[", "i", "]", "[", "0", "]", "+", "spacing", "[", "1", "]", "if", "dst_color", ":", "src_color", "=", "ColorValue", "(", "src_color", ")", ".", "value", "[", ":", "3", "]", "if", "src_color", "else", "(", "0", ",", "0", ",", "0", ")", "dst_color", "=", "list", "(", "ColorValue", "(", "dst_color", ")", ".", "value", "[", ":", "3", "]", ")", "pixdata", "=", "new_image", ".", "load", "(", ")", "for", "y", "in", "xrange", "(", "new_image", ".", "size", "[", "1", "]", ")", ":", "for", "x", "in", "xrange", "(", "new_image", ".", "size", "[", "0", "]", ")", ":", "if", "pixdata", "[", "x", ",", "y", "]", "[", ":", "3", "]", "==", "src_color", ":", "pixdata", "[", "x", ",", "y", "]", "=", "tuple", "(", "dst_color", "+", "[", "pixdata", "[", "x", ",", "y", "]", "[", "3", "]", "]", ")", "try", ":", "new_image", ".", "save", "(", "asset_path", ")", "except", "IOError", ":", "log", ".", "exception", "(", "\"Error while saving image\"", ")", "filetime", "=", "int", "(", "time", ".", "mktime", "(", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "timetuple", "(", ")", ")", ")", "url", "=", "'%s%s?_=%s'", "%", "(", "ASSETS_URL", ",", "asset_file", ",", "filetime", ")", "asset", "=", "'url(\"%s\") %s'", "%", "(", "escape", "(", "url", ")", ",", "repeat", ")", "# Use the sorted list to remove older elements (keep only 500", "# objects):", "if", "len", "(", "sprite_maps", ")", ">", "1000", ":", "for", "a", "in", "sorted", "(", "sprite_maps", ",", "key", "=", "lambda", "a", ":", "sprite_maps", "[", "a", "]", "[", "'*'", "]", ",", "reverse", "=", "True", ")", "[", "500", ":", "]", ":", "del", "sprite_maps", "[", "a", "]", "# Add the new object:", "map", "=", "dict", "(", "zip", "(", "names", ",", "zip", "(", "sizes", ",", "rfiles", ",", "offsets_x", ",", "offsets_y", ")", ")", ")", "map", "[", "'*'", "]", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "map", "[", "'*f*'", "]", "=", "asset_file", "map", "[", "'*k*'", "]", "=", "key", "map", "[", "'*n*'", "]", "=", "map_name", "map", "[", "'*t*'", "]", "=", "filetime", "pickle", ".", "dump", "(", "(", "asset", ",", "map", ",", "zip", "(", "files", ",", "sizes", ")", ")", ",", "open", "(", "asset_path", "+", "'.cache'", ",", "'w'", ")", ")", "sprite_maps", "[", "asset", "]", "=", "map", "for", "file", ",", "size", "in", "sizes", ":", "sprite_images", "[", "file", "]", "=", "size", "ret", "=", "StringValue", "(", "asset", ")", "return", "ret" ]
43.248705
0.000703
def get_cluster_health( self, nodes_health_state_filter=0, applications_health_state_filter=0, events_health_state_filter=0, exclude_health_statistics=False, include_system_application_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): """Gets the health of a Service Fabric cluster. Use EventsHealthStateFilter to filter the collection of health events reported on the cluster based on the health state. Similarly, use NodesHealthStateFilter and ApplicationsHealthStateFilter to filter the collection of nodes and applications returned based on their aggregated health state. :param nodes_health_state_filter: Allows filtering of the node health state objects returned in the result of cluster health query based on their health state. The possible values for this parameter include integer value of one of the following health states. Only nodes that match the filter are returned. All nodes are used to evaluate the aggregated health state. If not specified, all entries are returned. The state values are flag-based enumeration, so the value could be a combination of these values obtained using bitwise 'OR' operator. For example, if the provided value is 6 then health state of nodes with HealthState value of OK (2) and Warning (4) are returned. - Default - Default value. Matches any HealthState. The value is zero. - None - Filter that doesn't match any HealthState value. Used in order to return no results on a given collection of states. The value is 1. - Ok - Filter that matches input with HealthState value Ok. The value is 2. - Warning - Filter that matches input with HealthState value Warning. The value is 4. - Error - Filter that matches input with HealthState value Error. The value is 8. - All - Filter that matches input with any HealthState value. The value is 65535. :type nodes_health_state_filter: int :param applications_health_state_filter: Allows filtering of the application health state objects returned in the result of cluster health query based on their health state. The possible values for this parameter include integer value obtained from members or bitwise operations on members of HealthStateFilter enumeration. Only applications that match the filter are returned. All applications are used to evaluate the aggregated health state. If not specified, all entries are returned. The state values are flag-based enumeration, so the value could be a combination of these values obtained using bitwise 'OR' operator. For example, if the provided value is 6 then health state of applications with HealthState value of OK (2) and Warning (4) are returned. - Default - Default value. Matches any HealthState. The value is zero. - None - Filter that doesn't match any HealthState value. Used in order to return no results on a given collection of states. The value is 1. - Ok - Filter that matches input with HealthState value Ok. The value is 2. - Warning - Filter that matches input with HealthState value Warning. The value is 4. - Error - Filter that matches input with HealthState value Error. The value is 8. - All - Filter that matches input with any HealthState value. The value is 65535. :type applications_health_state_filter: int :param events_health_state_filter: Allows filtering the collection of HealthEvent objects returned based on health state. The possible values for this parameter include integer value of one of the following health states. Only events that match the filter are returned. All events are used to evaluate the aggregated health state. If not specified, all entries are returned. The state values are flag-based enumeration, so the value could be a combination of these values, obtained using the bitwise 'OR' operator. For example, If the provided value is 6 then all of the events with HealthState value of OK (2) and Warning (4) are returned. - Default - Default value. Matches any HealthState. The value is zero. - None - Filter that doesn't match any HealthState value. Used in order to return no results on a given collection of states. The value is 1. - Ok - Filter that matches input with HealthState value Ok. The value is 2. - Warning - Filter that matches input with HealthState value Warning. The value is 4. - Error - Filter that matches input with HealthState value Error. The value is 8. - All - Filter that matches input with any HealthState value. The value is 65535. :type events_health_state_filter: int :param exclude_health_statistics: Indicates whether the health statistics should be returned as part of the query result. False by default. The statistics show the number of children entities in health state Ok, Warning, and Error. :type exclude_health_statistics: bool :param include_system_application_health_statistics: Indicates whether the health statistics should include the fabric:/System application health statistics. False by default. If IncludeSystemApplicationHealthStatistics is set to true, the health statistics include the entities that belong to the fabric:/System application. Otherwise, the query result includes health statistics only for user applications. The health statistics must be included in the query result for this parameter to be applied. :type include_system_application_health_statistics: bool :param timeout: The server timeout for performing the operation in seconds. This timeout specifies the time duration that the client is willing to wait for the requested operation to complete. The default value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: ClusterHealth or ClientRawResponse if raw=true :rtype: ~azure.servicefabric.models.ClusterHealth or ~msrest.pipeline.ClientRawResponse :raises: :class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>` """ api_version = "6.0" # Construct URL url = self.get_cluster_health.metadata['url'] # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') if nodes_health_state_filter is not None: query_parameters['NodesHealthStateFilter'] = self._serialize.query("nodes_health_state_filter", nodes_health_state_filter, 'int') if applications_health_state_filter is not None: query_parameters['ApplicationsHealthStateFilter'] = self._serialize.query("applications_health_state_filter", applications_health_state_filter, 'int') if events_health_state_filter is not None: query_parameters['EventsHealthStateFilter'] = self._serialize.query("events_health_state_filter", events_health_state_filter, 'int') if exclude_health_statistics is not None: query_parameters['ExcludeHealthStatistics'] = self._serialize.query("exclude_health_statistics", exclude_health_statistics, 'bool') if include_system_application_health_statistics is not None: query_parameters['IncludeSystemApplicationHealthStatistics'] = self._serialize.query("include_system_application_health_statistics", include_system_application_health_statistics, 'bool') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if custom_headers: header_parameters.update(custom_headers) # Construct and send request request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.FabricErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('ClusterHealth', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
[ "def", "get_cluster_health", "(", "self", ",", "nodes_health_state_filter", "=", "0", ",", "applications_health_state_filter", "=", "0", ",", "events_health_state_filter", "=", "0", ",", "exclude_health_statistics", "=", "False", ",", "include_system_application_health_statistics", "=", "False", ",", "timeout", "=", "60", ",", "custom_headers", "=", "None", ",", "raw", "=", "False", ",", "*", "*", "operation_config", ")", ":", "api_version", "=", "\"6.0\"", "# Construct URL", "url", "=", "self", ".", "get_cluster_health", ".", "metadata", "[", "'url'", "]", "# Construct parameters", "query_parameters", "=", "{", "}", "query_parameters", "[", "'api-version'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"api_version\"", ",", "api_version", ",", "'str'", ")", "if", "nodes_health_state_filter", "is", "not", "None", ":", "query_parameters", "[", "'NodesHealthStateFilter'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"nodes_health_state_filter\"", ",", "nodes_health_state_filter", ",", "'int'", ")", "if", "applications_health_state_filter", "is", "not", "None", ":", "query_parameters", "[", "'ApplicationsHealthStateFilter'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"applications_health_state_filter\"", ",", "applications_health_state_filter", ",", "'int'", ")", "if", "events_health_state_filter", "is", "not", "None", ":", "query_parameters", "[", "'EventsHealthStateFilter'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"events_health_state_filter\"", ",", "events_health_state_filter", ",", "'int'", ")", "if", "exclude_health_statistics", "is", "not", "None", ":", "query_parameters", "[", "'ExcludeHealthStatistics'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"exclude_health_statistics\"", ",", "exclude_health_statistics", ",", "'bool'", ")", "if", "include_system_application_health_statistics", "is", "not", "None", ":", "query_parameters", "[", "'IncludeSystemApplicationHealthStatistics'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"include_system_application_health_statistics\"", ",", "include_system_application_health_statistics", ",", "'bool'", ")", "if", "timeout", "is", "not", "None", ":", "query_parameters", "[", "'timeout'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"timeout\"", ",", "timeout", ",", "'long'", ",", "maximum", "=", "4294967295", ",", "minimum", "=", "1", ")", "# Construct headers", "header_parameters", "=", "{", "}", "header_parameters", "[", "'Accept'", "]", "=", "'application/json'", "if", "custom_headers", ":", "header_parameters", ".", "update", "(", "custom_headers", ")", "# Construct and send request", "request", "=", "self", ".", "_client", ".", "get", "(", "url", ",", "query_parameters", ",", "header_parameters", ")", "response", "=", "self", ".", "_client", ".", "send", "(", "request", ",", "stream", "=", "False", ",", "*", "*", "operation_config", ")", "if", "response", ".", "status_code", "not", "in", "[", "200", "]", ":", "raise", "models", ".", "FabricErrorException", "(", "self", ".", "_deserialize", ",", "response", ")", "deserialized", "=", "None", "if", "response", ".", "status_code", "==", "200", ":", "deserialized", "=", "self", ".", "_deserialize", "(", "'ClusterHealth'", ",", "response", ")", "if", "raw", ":", "client_raw_response", "=", "ClientRawResponse", "(", "deserialized", ",", "response", ")", "return", "client_raw_response", "return", "deserialized" ]
56.306748
0.001071
def export_data_json(self, return_response_object, chunk_size=1024, path=None, data_type_name=None, date_range=None, delimiter=None, start_date_time=None, end_date_time=None, omit_fields=None, only_fields=None, campaign_id=None): """ Custom Keyword arguments: 1. return_response_object: if set to 'True', the 'r' response object will be returned. The benefit of this is that you can manipulate the data in any way you want. If set to false, we will write the response to a file where each Iterable activity you're exporting is a single-line JSON object. 2. chunk_size: Chunk size is used as a paremeter in the r.iter_content(chunk_size) method that controls how big the response chunks are (in bytes). Depending on the device used to make the request, this might change depending on the user. Default is set to 1 MB. 3. path: Allows you to choose the directory where the file is downloaded into. Example: "/Users/username/Desktop/" If not set the file will download into the current directory. """ call="/api/export/data.json" # make sure correct ranges are being used date_ranges = ["Today", "Yesterday", "BeforeToday", "All"] if isinstance(return_response_object, bool) is False: raise ValueError("'return_iterator_object'parameter must be a boolean") if chunk_size is not None and isinstance(chunk_size, int): pass else: raise ValueError("'chunk_size' parameter must be a integer") payload={} if data_type_name is not None: payload["dataTypeName"]= data_type_name if date_range is not None and date_range in date_ranges: payload["range"]= date_range if start_date_time is not None: payload["startDateTime"]= start_date_time if end_date_time is not None: payload["endDateTime"]= end_date_time if omit_fields is not None: payload["omitFields"]= omit_fields if only_fields is not None and isinstance(only_fields, list): payload["onlyFields"]= only_fields if campaign_id is not None: payload["campaignId"]= campaign_id return self.export_data_api(call=call, chunk_size=chunk_size, params=payload, path=path, return_response_object=return_response_object)
[ "def", "export_data_json", "(", "self", ",", "return_response_object", ",", "chunk_size", "=", "1024", ",", "path", "=", "None", ",", "data_type_name", "=", "None", ",", "date_range", "=", "None", ",", "delimiter", "=", "None", ",", "start_date_time", "=", "None", ",", "end_date_time", "=", "None", ",", "omit_fields", "=", "None", ",", "only_fields", "=", "None", ",", "campaign_id", "=", "None", ")", ":", "call", "=", "\"/api/export/data.json\"", "# make sure correct ranges are being used", "date_ranges", "=", "[", "\"Today\"", ",", "\"Yesterday\"", ",", "\"BeforeToday\"", ",", "\"All\"", "]", "if", "isinstance", "(", "return_response_object", ",", "bool", ")", "is", "False", ":", "raise", "ValueError", "(", "\"'return_iterator_object'parameter must be a boolean\"", ")", "if", "chunk_size", "is", "not", "None", "and", "isinstance", "(", "chunk_size", ",", "int", ")", ":", "pass", "else", ":", "raise", "ValueError", "(", "\"'chunk_size' parameter must be a integer\"", ")", "payload", "=", "{", "}", "if", "data_type_name", "is", "not", "None", ":", "payload", "[", "\"dataTypeName\"", "]", "=", "data_type_name", "if", "date_range", "is", "not", "None", "and", "date_range", "in", "date_ranges", ":", "payload", "[", "\"range\"", "]", "=", "date_range", "if", "start_date_time", "is", "not", "None", ":", "payload", "[", "\"startDateTime\"", "]", "=", "start_date_time", "if", "end_date_time", "is", "not", "None", ":", "payload", "[", "\"endDateTime\"", "]", "=", "end_date_time", "if", "omit_fields", "is", "not", "None", ":", "payload", "[", "\"omitFields\"", "]", "=", "omit_fields", "if", "only_fields", "is", "not", "None", "and", "isinstance", "(", "only_fields", ",", "list", ")", ":", "payload", "[", "\"onlyFields\"", "]", "=", "only_fields", "if", "campaign_id", "is", "not", "None", ":", "payload", "[", "\"campaignId\"", "]", "=", "campaign_id", "return", "self", ".", "export_data_api", "(", "call", "=", "call", ",", "chunk_size", "=", "chunk_size", ",", "params", "=", "payload", ",", "path", "=", "path", ",", "return_response_object", "=", "return_response_object", ")" ]
33.045455
0.036509
def add_zone(self, spatial_unit, container_id, name='', description='', visible=True, reuse=0, drop_behavior_type=None): """container_id is a targetId that the zone belongs to """ if not isinstance(spatial_unit, abc_mapping_primitives.SpatialUnit): raise InvalidArgument('zone is not a SpatialUnit') # if not isinstance(name, DisplayText): # raise InvalidArgument('name is not a DisplayText object') if not isinstance(reuse, int): raise InvalidArgument('reuse must be an integer') if reuse < 0: raise InvalidArgument('reuse must be >= 0') if not isinstance(name, DisplayText): # if default '' name = self._str_display_text(name) if not isinstance(description, DisplayText): # if default '' description = self._str_display_text(description) zone = { 'id': str(ObjectId()), 'spatialUnit': spatial_unit.get_spatial_unit_map(), 'containerId': container_id, 'names': [self._dict_display_text(name)], 'descriptions': [self._dict_display_text(description)], 'visible': visible, 'reuse': reuse, 'dropBehaviorType': str(drop_behavior_type) } self.my_osid_object_form._my_map['zones'].append(zone) return zone
[ "def", "add_zone", "(", "self", ",", "spatial_unit", ",", "container_id", ",", "name", "=", "''", ",", "description", "=", "''", ",", "visible", "=", "True", ",", "reuse", "=", "0", ",", "drop_behavior_type", "=", "None", ")", ":", "if", "not", "isinstance", "(", "spatial_unit", ",", "abc_mapping_primitives", ".", "SpatialUnit", ")", ":", "raise", "InvalidArgument", "(", "'zone is not a SpatialUnit'", ")", "# if not isinstance(name, DisplayText):", "# raise InvalidArgument('name is not a DisplayText object')", "if", "not", "isinstance", "(", "reuse", ",", "int", ")", ":", "raise", "InvalidArgument", "(", "'reuse must be an integer'", ")", "if", "reuse", "<", "0", ":", "raise", "InvalidArgument", "(", "'reuse must be >= 0'", ")", "if", "not", "isinstance", "(", "name", ",", "DisplayText", ")", ":", "# if default ''", "name", "=", "self", ".", "_str_display_text", "(", "name", ")", "if", "not", "isinstance", "(", "description", ",", "DisplayText", ")", ":", "# if default ''", "description", "=", "self", ".", "_str_display_text", "(", "description", ")", "zone", "=", "{", "'id'", ":", "str", "(", "ObjectId", "(", ")", ")", ",", "'spatialUnit'", ":", "spatial_unit", ".", "get_spatial_unit_map", "(", ")", ",", "'containerId'", ":", "container_id", ",", "'names'", ":", "[", "self", ".", "_dict_display_text", "(", "name", ")", "]", ",", "'descriptions'", ":", "[", "self", ".", "_dict_display_text", "(", "description", ")", "]", ",", "'visible'", ":", "visible", ",", "'reuse'", ":", "reuse", ",", "'dropBehaviorType'", ":", "str", "(", "drop_behavior_type", ")", "}", "self", ".", "my_osid_object_form", ".", "_my_map", "[", "'zones'", "]", ".", "append", "(", "zone", ")", "return", "zone" ]
46.862069
0.002163
def sectionWalker(section,callback,*args,walkTrace=tuple(),**kwargs): """ callback needs to be a function that handles different Section elements appropriately walkTrace needs to be a tuple, indicate the route to the section, e.g. (1,2,0) """ callback(section,*args,walkTrace=walkTrace,case='sectionmain',**kwargs) c = count(1) for f in section.figs.items(): callback(section,*args,walkTrace=walkTrace,case='figure',element=f,**kwargs) c = count(1) for t in section.tabs.items(): callback(section,*args,walkTrace=walkTrace,case='table',element=t,**kwargs) c = count(1) for s in section.subs: Section.sectionWalker(s,callback,*args,walkTrace=walkTrace+(next(c),),**kwargs)
[ "def", "sectionWalker", "(", "section", ",", "callback", ",", "*", "args", ",", "walkTrace", "=", "tuple", "(", ")", ",", "*", "*", "kwargs", ")", ":", "callback", "(", "section", ",", "*", "args", ",", "walkTrace", "=", "walkTrace", ",", "case", "=", "'sectionmain'", ",", "*", "*", "kwargs", ")", "c", "=", "count", "(", "1", ")", "for", "f", "in", "section", ".", "figs", ".", "items", "(", ")", ":", "callback", "(", "section", ",", "*", "args", ",", "walkTrace", "=", "walkTrace", ",", "case", "=", "'figure'", ",", "element", "=", "f", ",", "*", "*", "kwargs", ")", "c", "=", "count", "(", "1", ")", "for", "t", "in", "section", ".", "tabs", ".", "items", "(", ")", ":", "callback", "(", "section", ",", "*", "args", ",", "walkTrace", "=", "walkTrace", ",", "case", "=", "'table'", ",", "element", "=", "t", ",", "*", "*", "kwargs", ")", "c", "=", "count", "(", "1", ")", "for", "s", "in", "section", ".", "subs", ":", "Section", ".", "sectionWalker", "(", "s", ",", "callback", ",", "*", "args", ",", "walkTrace", "=", "walkTrace", "+", "(", "next", "(", "c", ")", ",", ")", ",", "*", "*", "kwargs", ")" ]
49.3125
0.03607
def initialize(): """IMPORTANT: Call this function at the beginning of your program.""" try: requests.get('http://localhost:4242/poll') state['connected_to_bot'] = True except requests.exceptions.ConnectionError: state['connected_to_bot'] = False # set up turtle state['window'] = turtle.Screen() state['window'].setup(width=AXI_WIDTH, height=AXI_HEIGHT) state['turtle'] = turtle.Turtle() state['turtle'].width(2) state['turtle'].speed(0) point_in_direction(0) pen_up()
[ "def", "initialize", "(", ")", ":", "try", ":", "requests", ".", "get", "(", "'http://localhost:4242/poll'", ")", "state", "[", "'connected_to_bot'", "]", "=", "True", "except", "requests", ".", "exceptions", ".", "ConnectionError", ":", "state", "[", "'connected_to_bot'", "]", "=", "False", "# set up turtle", "state", "[", "'window'", "]", "=", "turtle", ".", "Screen", "(", ")", "state", "[", "'window'", "]", ".", "setup", "(", "width", "=", "AXI_WIDTH", ",", "height", "=", "AXI_HEIGHT", ")", "state", "[", "'turtle'", "]", "=", "turtle", ".", "Turtle", "(", ")", "state", "[", "'turtle'", "]", ".", "width", "(", "2", ")", "state", "[", "'turtle'", "]", ".", "speed", "(", "0", ")", "point_in_direction", "(", "0", ")", "pen_up", "(", ")" ]
30.764706
0.001855
def _get_children_path_interval(cls, path): """:returns: An interval of all possible children paths for a node.""" return (path + cls.alphabet[0] * cls.steplen, path + cls.alphabet[-1] * cls.steplen)
[ "def", "_get_children_path_interval", "(", "cls", ",", "path", ")", ":", "return", "(", "path", "+", "cls", ".", "alphabet", "[", "0", "]", "*", "cls", ".", "steplen", ",", "path", "+", "cls", ".", "alphabet", "[", "-", "1", "]", "*", "cls", ".", "steplen", ")" ]
57
0.008658
def com_google_fonts_check_metadata_valid_copyright(font_metadata): """Copyright notices match canonical pattern in METADATA.pb""" import re string = font_metadata.copyright does_match = re.search(r'Copyright [0-9]{4} The .* Project Authors \([^\@]*\)', string) if does_match: yield PASS, "METADATA.pb copyright string is good" else: yield FAIL, ("METADATA.pb: Copyright notices should match" " a pattern similar to:" " 'Copyright 2017 The Familyname" " Project Authors (git url)'\n" "But instead we have got:" " '{}'").format(string)
[ "def", "com_google_fonts_check_metadata_valid_copyright", "(", "font_metadata", ")", ":", "import", "re", "string", "=", "font_metadata", ".", "copyright", "does_match", "=", "re", ".", "search", "(", "r'Copyright [0-9]{4} The .* Project Authors \\([^\\@]*\\)'", ",", "string", ")", "if", "does_match", ":", "yield", "PASS", ",", "\"METADATA.pb copyright string is good\"", "else", ":", "yield", "FAIL", ",", "(", "\"METADATA.pb: Copyright notices should match\"", "\" a pattern similar to:\"", "\" 'Copyright 2017 The Familyname\"", "\" Project Authors (git url)'\\n\"", "\"But instead we have got:\"", "\" '{}'\"", ")", ".", "format", "(", "string", ")" ]
43.466667
0.013514
def segment_allocation_find(context, lock_mode=False, **filters): """Query for segment allocations.""" range_ids = filters.pop("segment_allocation_range_ids", None) query = context.session.query(models.SegmentAllocation) if lock_mode: query = query.with_lockmode("update") query = query.filter_by(**filters) # Optionally filter by given list of range ids if range_ids: query.filter( models.SegmentAllocation.segment_allocation_range_id.in_( range_ids)) return query
[ "def", "segment_allocation_find", "(", "context", ",", "lock_mode", "=", "False", ",", "*", "*", "filters", ")", ":", "range_ids", "=", "filters", ".", "pop", "(", "\"segment_allocation_range_ids\"", ",", "None", ")", "query", "=", "context", ".", "session", ".", "query", "(", "models", ".", "SegmentAllocation", ")", "if", "lock_mode", ":", "query", "=", "query", ".", "with_lockmode", "(", "\"update\"", ")", "query", "=", "query", ".", "filter_by", "(", "*", "*", "filters", ")", "# Optionally filter by given list of range ids", "if", "range_ids", ":", "query", ".", "filter", "(", "models", ".", "SegmentAllocation", ".", "segment_allocation_range_id", ".", "in_", "(", "range_ids", ")", ")", "return", "query" ]
33.0625
0.001838
def add_node(self, node): """ Add a node to this network, let the node know which network it's on. """ if _debug: Network._debug("add_node %r", node) self.nodes.append(node) node.lan = self # update the node name if not node.name: node.name = '%s:%s' % (self.name, node.address)
[ "def", "add_node", "(", "self", ",", "node", ")", ":", "if", "_debug", ":", "Network", ".", "_debug", "(", "\"add_node %r\"", ",", "node", ")", "self", ".", "nodes", ".", "append", "(", "node", ")", "node", ".", "lan", "=", "self", "# update the node name", "if", "not", "node", ".", "name", ":", "node", ".", "name", "=", "'%s:%s'", "%", "(", "self", ".", "name", ",", "node", ".", "address", ")" ]
33.1
0.011765
def expect_regex(self, pattern, timeout=3, regex_options=0): """Wait for a match to the regex in *pattern* to appear on the stream. Waits for input matching the regex *pattern* for up to *timeout* seconds. If a match is found, a :class:`RegexMatch` result is returned. If no match is found within *timeout* seconds, raise an :class:`ExpectTimeout` exception. :param pattern: The pattern to search for, as a single compiled regex or a string that will be processed as a regex. :param float timeout: Timeout in seconds. :param regex_options: Options passed to the regex engine. :return: :class:`RegexMatch` if matched, None if no match was found. """ return self.expect(RegexSearcher(pattern, regex_options), timeout)
[ "def", "expect_regex", "(", "self", ",", "pattern", ",", "timeout", "=", "3", ",", "regex_options", "=", "0", ")", ":", "return", "self", ".", "expect", "(", "RegexSearcher", "(", "pattern", ",", "regex_options", ")", ",", "timeout", ")" ]
53.533333
0.002448
def delete(self, id=None, q=None, commit=None, softCommit=False, waitFlush=None, waitSearcher=None, handler='update'): # NOQA: A002 """ Deletes documents. Requires *either* ``id`` or ``query``. ``id`` is if you know the specific document id to remove. Note that ``id`` can also be a list of document ids to be deleted. ``query`` is a Lucene-style query indicating a collection of documents to delete. Optionally accepts ``commit``. Default is ``True``. Optionally accepts ``softCommit``. Default is ``False``. Optionally accepts ``waitFlush``. Default is ``None``. Optionally accepts ``waitSearcher``. Default is ``None``. Usage:: solr.delete(id='doc_12') solr.delete(id=['doc_1', 'doc_3']) solr.delete(q='*:*') """ if id is None and q is None: raise ValueError('You must specify "id" or "q".') elif id is not None and q is not None: raise ValueError('You many only specify "id" OR "q", not both.') elif id is not None: if not isinstance(id, (list, set, tuple)): doc_id = [id] else: doc_id = list(filter(None, id)) if doc_id: m = '<delete>%s</delete>' % ''.join('<id>%s</id>' % i for i in doc_id) else: raise ValueError('The list of documents to delete was empty.') elif q is not None: m = '<delete><query>%s</query></delete>' % q return self._update(m, commit=commit, softCommit=softCommit, waitFlush=waitFlush, waitSearcher=waitSearcher, handler=handler)
[ "def", "delete", "(", "self", ",", "id", "=", "None", ",", "q", "=", "None", ",", "commit", "=", "None", ",", "softCommit", "=", "False", ",", "waitFlush", "=", "None", ",", "waitSearcher", "=", "None", ",", "handler", "=", "'update'", ")", ":", "# NOQA: A002", "if", "id", "is", "None", "and", "q", "is", "None", ":", "raise", "ValueError", "(", "'You must specify \"id\" or \"q\".'", ")", "elif", "id", "is", "not", "None", "and", "q", "is", "not", "None", ":", "raise", "ValueError", "(", "'You many only specify \"id\" OR \"q\", not both.'", ")", "elif", "id", "is", "not", "None", ":", "if", "not", "isinstance", "(", "id", ",", "(", "list", ",", "set", ",", "tuple", ")", ")", ":", "doc_id", "=", "[", "id", "]", "else", ":", "doc_id", "=", "list", "(", "filter", "(", "None", ",", "id", ")", ")", "if", "doc_id", ":", "m", "=", "'<delete>%s</delete>'", "%", "''", ".", "join", "(", "'<id>%s</id>'", "%", "i", "for", "i", "in", "doc_id", ")", "else", ":", "raise", "ValueError", "(", "'The list of documents to delete was empty.'", ")", "elif", "q", "is", "not", "None", ":", "m", "=", "'<delete><query>%s</query></delete>'", "%", "q", "return", "self", ".", "_update", "(", "m", ",", "commit", "=", "commit", ",", "softCommit", "=", "softCommit", ",", "waitFlush", "=", "waitFlush", ",", "waitSearcher", "=", "waitSearcher", ",", "handler", "=", "handler", ")" ]
40.146341
0.002372
def add_row_group(self, tables, collapsed=True): """ Adds a group over all the given tables (will include any rows between the first row over all tables, and the last row over all tables) Initially collapsed if collapsed is True (True by default) """ self.__groups.append((tables, collapsed))
[ "def", "add_row_group", "(", "self", ",", "tables", ",", "collapsed", "=", "True", ")", ":", "self", ".", "__groups", ".", "append", "(", "(", "tables", ",", "collapsed", ")", ")" ]
47.714286
0.008824
def sine(x): ''' sine(x) is equivalent to sin(x) except that it also works on sparse arrays. ''' if sps.issparse(x): x = x.copy() x.data = np.sine(x.data) return x else: return np.sin(x)
[ "def", "sine", "(", "x", ")", ":", "if", "sps", ".", "issparse", "(", "x", ")", ":", "x", "=", "x", ".", "copy", "(", ")", "x", ".", "data", "=", "np", ".", "sine", "(", "x", ".", "data", ")", "return", "x", "else", ":", "return", "np", ".", "sin", "(", "x", ")" ]
24.666667
0.008696
def trans(ele, standard=False): """Translates esprima syntax tree to python by delegating to appropriate translating node""" try: node = globals().get(ele['type']) if not node: raise NotImplementedError('%s is not supported!' % ele['type']) if standard: node = node.__dict__[ 'standard'] if 'standard' in node.__dict__ else node return node(**ele) except: #print ele raise
[ "def", "trans", "(", "ele", ",", "standard", "=", "False", ")", ":", "try", ":", "node", "=", "globals", "(", ")", ".", "get", "(", "ele", "[", "'type'", "]", ")", "if", "not", "node", ":", "raise", "NotImplementedError", "(", "'%s is not supported!'", "%", "ele", "[", "'type'", "]", ")", "if", "standard", ":", "node", "=", "node", ".", "__dict__", "[", "'standard'", "]", "if", "'standard'", "in", "node", ".", "__dict__", "else", "node", "return", "node", "(", "*", "*", "ele", ")", "except", ":", "#print ele", "raise" ]
35.384615
0.008475
def macshim(): """Shim to run 32-bit on 64-bit mac as a sub-process""" import subprocess, sys subprocess.call([ sys.argv[0] + '32' ]+sys.argv[1:], env={"VERSIONER_PYTHON_PREFER_32_BIT":"yes"} )
[ "def", "macshim", "(", ")", ":", "import", "subprocess", ",", "sys", "subprocess", ".", "call", "(", "[", "sys", ".", "argv", "[", "0", "]", "+", "'32'", "]", "+", "sys", ".", "argv", "[", "1", ":", "]", ",", "env", "=", "{", "\"VERSIONER_PYTHON_PREFER_32_BIT\"", ":", "\"yes\"", "}", ")" ]
27.875
0.017391
def generate_timestamped_string(subject="test", number_of_random_chars=4): """ Generate time-stamped string. Format as follows... `2013-01-31_14:12:23_SubjectString_a3Zg` Kwargs: subject (str): String to use as subject. number_of_random_chars (int) : Number of random characters to append. This method is helpful for creating unique names with timestamps in them so when you have to troubleshoot an issue, the name is easier to find.:: self.project_name = generate_timestamped_string("project") new_project_page.create_project(project_name) """ random_str = generate_random_string(number_of_random_chars) timestamp = generate_timestamp() return u"{timestamp}_{subject}_{random_str}".format(timestamp=timestamp, subject=subject, random_str=random_str)
[ "def", "generate_timestamped_string", "(", "subject", "=", "\"test\"", ",", "number_of_random_chars", "=", "4", ")", ":", "random_str", "=", "generate_random_string", "(", "number_of_random_chars", ")", "timestamp", "=", "generate_timestamp", "(", ")", "return", "u\"{timestamp}_{subject}_{random_str}\"", ".", "format", "(", "timestamp", "=", "timestamp", ",", "subject", "=", "subject", ",", "random_str", "=", "random_str", ")" ]
38.291667
0.002123
def cardinality(self): ''' Obtain the cardinality string. Example: '1C' for a conditional link with a single instance [0..1] 'MC' for a link with any number of instances [0..*] 'M' for a more than one instance [1..*] 'M' for a link with exactly one instance [1] ''' if self.many: s = 'M' else: s = '1' if self.conditional: s += 'C' return s
[ "def", "cardinality", "(", "self", ")", ":", "if", "self", ".", "many", ":", "s", "=", "'M'", "else", ":", "s", "=", "'1'", "if", "self", ".", "conditional", ":", "s", "+=", "'C'", "return", "s" ]
28.388889
0.00947
def normalize_name(s): """Convert a string into a valid python attribute name. This function is called to convert ASCII strings to something that can pass as python attribute name, to be used with namedtuples. >>> str(normalize_name('class')) 'class_' >>> str(normalize_name('a-name')) 'a_name' >>> str(normalize_name('a n\u00e4me')) 'a_name' >>> str(normalize_name('Name')) 'Name' >>> str(normalize_name('')) '_' >>> str(normalize_name('1')) '_1' """ s = s.replace('-', '_').replace('.', '_').replace(' ', '_') if s in keyword.kwlist: return s + '_' s = '_'.join(slug(ss, lowercase=False) for ss in s.split('_')) if not s: s = '_' if s[0] not in string.ascii_letters + '_': s = '_' + s return s
[ "def", "normalize_name", "(", "s", ")", ":", "s", "=", "s", ".", "replace", "(", "'-'", ",", "'_'", ")", ".", "replace", "(", "'.'", ",", "'_'", ")", ".", "replace", "(", "' '", ",", "'_'", ")", "if", "s", "in", "keyword", ".", "kwlist", ":", "return", "s", "+", "'_'", "s", "=", "'_'", ".", "join", "(", "slug", "(", "ss", ",", "lowercase", "=", "False", ")", "for", "ss", "in", "s", ".", "split", "(", "'_'", ")", ")", "if", "not", "s", ":", "s", "=", "'_'", "if", "s", "[", "0", "]", "not", "in", "string", ".", "ascii_letters", "+", "'_'", ":", "s", "=", "'_'", "+", "s", "return", "s" ]
28.962963
0.002475
def validate(self, result, spec): # noqa Yes, it's too complex. """Validate that the result has the correct structure.""" if spec is None: # None matches anything. return if isinstance(spec, dict): if not isinstance(result, dict): raise ValueError('Dictionary expected, but %r found.' % result) if spec: spec_value = next(iter(spec.values())) # Yay Python 3! for value in result.values(): self.validate(value, spec_value) spec_key = next(iter(spec.keys())) for key in result: self.validate(key, spec_key) if isinstance(spec, list): if not isinstance(result, list): raise ValueError('List expected, but %r found.' % result) if spec: for value in result: self.validate(value, spec[0]) if isinstance(spec, tuple): if not isinstance(result, tuple): raise ValueError('Tuple expected, but %r found.' % result) if len(result) != len(spec): raise ValueError('Expected %d elements in tuple %r.' % (len(spec), result)) for s, value in zip(spec, result): self.validate(value, s) if isinstance(spec, six.string_types): if not isinstance(result, six.string_types): raise ValueError('String expected, but %r found.' % result) if isinstance(spec, int): if not isinstance(result, int): raise ValueError('Integer expected, but %r found.' % result) if isinstance(spec, bool): if not isinstance(result, bool): raise ValueError('Boolean expected, but %r found.' % result)
[ "def", "validate", "(", "self", ",", "result", ",", "spec", ")", ":", "# noqa Yes, it's too complex.", "if", "spec", "is", "None", ":", "# None matches anything.", "return", "if", "isinstance", "(", "spec", ",", "dict", ")", ":", "if", "not", "isinstance", "(", "result", ",", "dict", ")", ":", "raise", "ValueError", "(", "'Dictionary expected, but %r found.'", "%", "result", ")", "if", "spec", ":", "spec_value", "=", "next", "(", "iter", "(", "spec", ".", "values", "(", ")", ")", ")", "# Yay Python 3!", "for", "value", "in", "result", ".", "values", "(", ")", ":", "self", ".", "validate", "(", "value", ",", "spec_value", ")", "spec_key", "=", "next", "(", "iter", "(", "spec", ".", "keys", "(", ")", ")", ")", "for", "key", "in", "result", ":", "self", ".", "validate", "(", "key", ",", "spec_key", ")", "if", "isinstance", "(", "spec", ",", "list", ")", ":", "if", "not", "isinstance", "(", "result", ",", "list", ")", ":", "raise", "ValueError", "(", "'List expected, but %r found.'", "%", "result", ")", "if", "spec", ":", "for", "value", "in", "result", ":", "self", ".", "validate", "(", "value", ",", "spec", "[", "0", "]", ")", "if", "isinstance", "(", "spec", ",", "tuple", ")", ":", "if", "not", "isinstance", "(", "result", ",", "tuple", ")", ":", "raise", "ValueError", "(", "'Tuple expected, but %r found.'", "%", "result", ")", "if", "len", "(", "result", ")", "!=", "len", "(", "spec", ")", ":", "raise", "ValueError", "(", "'Expected %d elements in tuple %r.'", "%", "(", "len", "(", "spec", ")", ",", "result", ")", ")", "for", "s", ",", "value", "in", "zip", "(", "spec", ",", "result", ")", ":", "self", ".", "validate", "(", "value", ",", "s", ")", "if", "isinstance", "(", "spec", ",", "six", ".", "string_types", ")", ":", "if", "not", "isinstance", "(", "result", ",", "six", ".", "string_types", ")", ":", "raise", "ValueError", "(", "'String expected, but %r found.'", "%", "result", ")", "if", "isinstance", "(", "spec", ",", "int", ")", ":", "if", "not", "isinstance", "(", "result", ",", "int", ")", ":", "raise", "ValueError", "(", "'Integer expected, but %r found.'", "%", "result", ")", "if", "isinstance", "(", "spec", ",", "bool", ")", ":", "if", "not", "isinstance", "(", "result", ",", "bool", ")", ":", "raise", "ValueError", "(", "'Boolean expected, but %r found.'", "%", "result", ")" ]
48.026316
0.001074
def rnow( format='%Y-%m-%d %H:%M:%S', in_utc=False): """rnow Get right now as a string formatted datetime :param format: string output format for datetime :param in_utc: bool timezone in utc or local time """ if in_utc: return datetime.datetime.utcnow().strftime( format) else: return datetime.datetime.now().strftime( format)
[ "def", "rnow", "(", "format", "=", "'%Y-%m-%d %H:%M:%S'", ",", "in_utc", "=", "False", ")", ":", "if", "in_utc", ":", "return", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "format", ")", "else", ":", "return", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "format", ")" ]
23.352941
0.002421
def log_trial(args): ''''get trial log path''' trial_id_path_dict = {} nni_config = Config(get_config_filename(args)) rest_port = nni_config.get_config('restServerPort') rest_pid = nni_config.get_config('restServerPid') if not detect_process(rest_pid): print_error('Experiment is not running...') return running, response = check_rest_server_quick(rest_port) if running: response = rest_get(trial_jobs_url(rest_port), REST_TIME_OUT) if response and check_response(response): content = json.loads(response.text) for trial in content: trial_id_path_dict[trial['id']] = trial['logPath'] else: print_error('Restful server is not running...') exit(1) if args.id: if args.trial_id: if trial_id_path_dict.get(args.trial_id): print_normal('id:' + args.trial_id + ' path:' + trial_id_path_dict[args.trial_id]) else: print_error('trial id is not valid!') exit(1) else: print_error('please specific the trial id!') exit(1) else: for key in trial_id_path_dict: print('id:' + key + ' path:' + trial_id_path_dict[key])
[ "def", "log_trial", "(", "args", ")", ":", "trial_id_path_dict", "=", "{", "}", "nni_config", "=", "Config", "(", "get_config_filename", "(", "args", ")", ")", "rest_port", "=", "nni_config", ".", "get_config", "(", "'restServerPort'", ")", "rest_pid", "=", "nni_config", ".", "get_config", "(", "'restServerPid'", ")", "if", "not", "detect_process", "(", "rest_pid", ")", ":", "print_error", "(", "'Experiment is not running...'", ")", "return", "running", ",", "response", "=", "check_rest_server_quick", "(", "rest_port", ")", "if", "running", ":", "response", "=", "rest_get", "(", "trial_jobs_url", "(", "rest_port", ")", ",", "REST_TIME_OUT", ")", "if", "response", "and", "check_response", "(", "response", ")", ":", "content", "=", "json", ".", "loads", "(", "response", ".", "text", ")", "for", "trial", "in", "content", ":", "trial_id_path_dict", "[", "trial", "[", "'id'", "]", "]", "=", "trial", "[", "'logPath'", "]", "else", ":", "print_error", "(", "'Restful server is not running...'", ")", "exit", "(", "1", ")", "if", "args", ".", "id", ":", "if", "args", ".", "trial_id", ":", "if", "trial_id_path_dict", ".", "get", "(", "args", ".", "trial_id", ")", ":", "print_normal", "(", "'id:'", "+", "args", ".", "trial_id", "+", "' path:'", "+", "trial_id_path_dict", "[", "args", ".", "trial_id", "]", ")", "else", ":", "print_error", "(", "'trial id is not valid!'", ")", "exit", "(", "1", ")", "else", ":", "print_error", "(", "'please specific the trial id!'", ")", "exit", "(", "1", ")", "else", ":", "for", "key", "in", "trial_id_path_dict", ":", "print", "(", "'id:'", "+", "key", "+", "' path:'", "+", "trial_id_path_dict", "[", "key", "]", ")" ]
38.65625
0.001577
async def pong(self, message: bytes=b'') -> None: """Send pong message.""" if isinstance(message, str): message = message.encode('utf-8') await self._send_frame(message, WSMsgType.PONG)
[ "async", "def", "pong", "(", "self", ",", "message", ":", "bytes", "=", "b''", ")", "->", "None", ":", "if", "isinstance", "(", "message", ",", "str", ")", ":", "message", "=", "message", ".", "encode", "(", "'utf-8'", ")", "await", "self", ".", "_send_frame", "(", "message", ",", "WSMsgType", ".", "PONG", ")" ]
43.4
0.0181
def write_json(json_obj, filename, mode="w", print_pretty=True): '''write_json will (optionally,pretty print) a json object to file Parameters ========== json_obj: the dict to print to json filename: the output file to write to pretty_print: if True, will use nicer formatting ''' with open(filename, mode) as filey: if print_pretty: filey.writelines(print_json(json_obj)) else: filey.writelines(json.dumps(json_obj)) return filename
[ "def", "write_json", "(", "json_obj", ",", "filename", ",", "mode", "=", "\"w\"", ",", "print_pretty", "=", "True", ")", ":", "with", "open", "(", "filename", ",", "mode", ")", "as", "filey", ":", "if", "print_pretty", ":", "filey", ".", "writelines", "(", "print_json", "(", "json_obj", ")", ")", "else", ":", "filey", ".", "writelines", "(", "json", ".", "dumps", "(", "json_obj", ")", ")", "return", "filename" ]
34.066667
0.001905
def query(self, query): '''Returns objects matching criteria expressed in `query`. Follows links.''' results = super(SymlinkDatastore, self).query(query) return self._follow_link_gen(results)
[ "def", "query", "(", "self", ",", "query", ")", ":", "results", "=", "super", "(", "SymlinkDatastore", ",", "self", ")", ".", "query", "(", "query", ")", "return", "self", ".", "_follow_link_gen", "(", "results", ")" ]
50
0.009852
def Fold(seglist1, seglist2): """ An iterator that generates the results of taking the intersection of seglist1 with each segment in seglist2 in turn. In each result, the segment start and stop values are adjusted to be with respect to the start of the corresponding segment in seglist2. See also the segmentlist_range() function. This has use in applications that wish to convert ranges of values to ranges relative to epoch boundaries. Below, a list of time intervals in hours is converted to a sequence of daily interval lists with times relative to midnight. Example: >>> from pycbc_glue.segments import * >>> x = segmentlist([segment(0, 13), segment(14, 20), segment(22, 36)]) >>> for y in Fold(x, segmentlist_range(0, 48, 24)): print y ... [segment(0, 13), segment(14, 20), segment(22, 24)] [segment(0, 12)] """ for seg in seglist2: yield (seglist1 & segments.segmentlist([seg])).shift(-seg[0])
[ "def", "Fold", "(", "seglist1", ",", "seglist2", ")", ":", "for", "seg", "in", "seglist2", ":", "yield", "(", "seglist1", "&", "segments", ".", "segmentlist", "(", "[", "seg", "]", ")", ")", ".", "shift", "(", "-", "seg", "[", "0", "]", ")" ]
37.666667
0.022654
def FindUniqueId(dic): """Return a string not used as a key in the dictionary dic""" name = str(len(dic)) while name in dic: # Use bigger numbers so it is obvious when an id is picked randomly. name = str(random.randint(1000000, 999999999)) return name
[ "def", "FindUniqueId", "(", "dic", ")", ":", "name", "=", "str", "(", "len", "(", "dic", ")", ")", "while", "name", "in", "dic", ":", "# Use bigger numbers so it is obvious when an id is picked randomly.", "name", "=", "str", "(", "random", ".", "randint", "(", "1000000", ",", "999999999", ")", ")", "return", "name" ]
37.428571
0.018657
def hicpro_capture_chart (self): """ Generate Capture Hi-C plot""" keys = OrderedDict() keys['valid_pairs_on_target_cap_cap'] = { 'color': '#0039e6', 'name': 'Capture-Capture interactions' } keys['valid_pairs_on_target_cap_rep'] = { 'color': '#809fff', 'name': 'Capture-Reporter interactions' } keys['valid_pairs_off_target'] = { 'color': '#cccccc', 'name': 'Off-target valid pairs' } # Check capture info are available num_samples = 0 for s_name in self.hicpro_data: for k in keys: num_samples += sum([1 if k in self.hicpro_data[s_name] else 0]) if num_samples == 0: return False # Config for the plot config = { 'id': 'hicpro_cap_plot', 'title': 'HiC-Pro: Capture Statistics', 'ylab': '# Pairs', 'cpswitch_counts_label': 'Number of Pairs' } return bargraph.plot(self.hicpro_data, keys, config)
[ "def", "hicpro_capture_chart", "(", "self", ")", ":", "keys", "=", "OrderedDict", "(", ")", "keys", "[", "'valid_pairs_on_target_cap_cap'", "]", "=", "{", "'color'", ":", "'#0039e6'", ",", "'name'", ":", "'Capture-Capture interactions'", "}", "keys", "[", "'valid_pairs_on_target_cap_rep'", "]", "=", "{", "'color'", ":", "'#809fff'", ",", "'name'", ":", "'Capture-Reporter interactions'", "}", "keys", "[", "'valid_pairs_off_target'", "]", "=", "{", "'color'", ":", "'#cccccc'", ",", "'name'", ":", "'Off-target valid pairs'", "}", "# Check capture info are available", "num_samples", "=", "0", "for", "s_name", "in", "self", ".", "hicpro_data", ":", "for", "k", "in", "keys", ":", "num_samples", "+=", "sum", "(", "[", "1", "if", "k", "in", "self", ".", "hicpro_data", "[", "s_name", "]", "else", "0", "]", ")", "if", "num_samples", "==", "0", ":", "return", "False", "# Config for the plot", "config", "=", "{", "'id'", ":", "'hicpro_cap_plot'", ",", "'title'", ":", "'HiC-Pro: Capture Statistics'", ",", "'ylab'", ":", "'# Pairs'", ",", "'cpswitch_counts_label'", ":", "'Number of Pairs'", "}", "return", "bargraph", ".", "plot", "(", "self", ".", "hicpro_data", ",", "keys", ",", "config", ")" ]
38.8
0.014085
def deprecated(use_instead=None): """This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used.""" def wrapped(func): @wraps(func) def new_func(*args, **kwargs): message = "Call to deprecated function %s." % func.__name__ if use_instead: message += " Use %s instead." % use_instead if not DISABLE_WARNINGS: warn(message, stacklevel=3) return func(*args, **kwargs) return new_func return wrapped
[ "def", "deprecated", "(", "use_instead", "=", "None", ")", ":", "def", "wrapped", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "new_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "message", "=", "\"Call to deprecated function %s.\"", "%", "func", ".", "__name__", "if", "use_instead", ":", "message", "+=", "\" Use %s instead.\"", "%", "use_instead", "if", "not", "DISABLE_WARNINGS", ":", "warn", "(", "message", ",", "stacklevel", "=", "3", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "new_func", "return", "wrapped" ]
36.5625
0.001667
def iterargs(self): """ uses the singular name as key """ iterargs = OrderedDict() for name in self._iterargs: plural = self._profile.iterargs[name] iterargs[name] = tuple(self._values[plural]) return iterargs
[ "def", "iterargs", "(", "self", ")", ":", "iterargs", "=", "OrderedDict", "(", ")", "for", "name", "in", "self", ".", "_iterargs", ":", "plural", "=", "self", ".", "_profile", ".", "iterargs", "[", "name", "]", "iterargs", "[", "name", "]", "=", "tuple", "(", "self", ".", "_values", "[", "plural", "]", ")", "return", "iterargs" ]
33
0.012658
def get(self, default=None, callback=None): u"""Returns leaf's value.""" value = self._xml.text if self._xml.text else default return callback(value) if callback else value
[ "def", "get", "(", "self", ",", "default", "=", "None", ",", "callback", "=", "None", ")", ":", "value", "=", "self", ".", "_xml", ".", "text", "if", "self", ".", "_xml", ".", "text", "else", "default", "return", "callback", "(", "value", ")", "if", "callback", "else", "value" ]
48.25
0.010204
def get_charset(content_type): """Function used to retrieve the charset from a content-type.If there is no charset in the content type then the charset defined on DEFAULT_CHARSET will be returned :param content_type: A string containing a Content-Type header :returns: A string containing the charset """ if not content_type: return DEFAULT_CHARSET matched = _get_charset_re.search(content_type) if matched: # Extract the charset and strip its double quotes return matched.group('charset').replace('"', '') return DEFAULT_CHARSET
[ "def", "get_charset", "(", "content_type", ")", ":", "if", "not", "content_type", ":", "return", "DEFAULT_CHARSET", "matched", "=", "_get_charset_re", ".", "search", "(", "content_type", ")", "if", "matched", ":", "# Extract the charset and strip its double quotes", "return", "matched", ".", "group", "(", "'charset'", ")", ".", "replace", "(", "'\"'", ",", "''", ")", "return", "DEFAULT_CHARSET" ]
37.3125
0.001634
def generate_form(model, form=None, fields=False, exclude=False): """ Generate a form from a model. :param model: A Django model. :param form: A Django form. :param fields: A list of fields to include in this form. :param exclude: A list of fields to exclude in this form. """ _model, _fields, _exclude = model, fields, exclude class Form(form or forms.ModelForm): class Meta: model = _model if _fields is not False: fields = _fields if _exclude is not False: exclude = _exclude return Form
[ "def", "generate_form", "(", "model", ",", "form", "=", "None", ",", "fields", "=", "False", ",", "exclude", "=", "False", ")", ":", "_model", ",", "_fields", ",", "_exclude", "=", "model", ",", "fields", ",", "exclude", "class", "Form", "(", "form", "or", "forms", ".", "ModelForm", ")", ":", "class", "Meta", ":", "model", "=", "_model", "if", "_fields", "is", "not", "False", ":", "fields", "=", "_fields", "if", "_exclude", "is", "not", "False", ":", "exclude", "=", "_exclude", "return", "Form" ]
26.818182
0.001637
def get_sequence_rules_for_assessment_part(self, assessment_part_id): """Gets a ``SequenceRuleList`` for the given source assessment part. arg: assessment_part_id (osid.id.Id): an assessment part ``Id`` return: (osid.assessment.authoring.SequenceRuleList) - the returned ``SequenceRule`` list raise: NullArgument - ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.learning.ActivityLookupSession.get_activities_for_objective_template # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('assessment_authoring', collection='SequenceRule', runtime=self._runtime) result = collection.find( dict({'assessmentPartId': str(assessment_part_id)}, **self._view_filter())) return objects.SequenceRuleList(result, runtime=self._runtime)
[ "def", "get_sequence_rules_for_assessment_part", "(", "self", ",", "assessment_part_id", ")", ":", "# Implemented from template for", "# osid.learning.ActivityLookupSession.get_activities_for_objective_template", "# NOTE: This implementation currently ignores plenary view", "collection", "=", "JSONClientValidated", "(", "'assessment_authoring'", ",", "collection", "=", "'SequenceRule'", ",", "runtime", "=", "self", ".", "_runtime", ")", "result", "=", "collection", ".", "find", "(", "dict", "(", "{", "'assessmentPartId'", ":", "str", "(", "assessment_part_id", ")", "}", ",", "*", "*", "self", ".", "_view_filter", "(", ")", ")", ")", "return", "objects", ".", "SequenceRuleList", "(", "result", ",", "runtime", "=", "self", ".", "_runtime", ")" ]
51.869565
0.001646
def flatten_list(lobj): """ Recursively flattens a list. :param lobj: List to flatten :type lobj: list :rtype: list For example: >>> import pmisc >>> pmisc.flatten_list([1, [2, 3, [4, 5, 6]], 7]) [1, 2, 3, 4, 5, 6, 7] """ ret = [] for item in lobj: if isinstance(item, list): for sub_item in flatten_list(item): ret.append(sub_item) else: ret.append(item) return ret
[ "def", "flatten_list", "(", "lobj", ")", ":", "ret", "=", "[", "]", "for", "item", "in", "lobj", ":", "if", "isinstance", "(", "item", ",", "list", ")", ":", "for", "sub_item", "in", "flatten_list", "(", "item", ")", ":", "ret", ".", "append", "(", "sub_item", ")", "else", ":", "ret", ".", "append", "(", "item", ")", "return", "ret" ]
20.391304
0.002037
def write_catalog(filename, catalog, fmt=None, meta=None, prefix=None): """ Write a catalog (list of sources) to a file with format determined by extension. Sources must be of type :class:`AegeanTools.models.OutputSource`, :class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`. Parameters ---------- filename : str Base name for file to write. `_simp`, `_comp`, or `_isle` will be added to differentiate the different types of sources that are being written. catalog : list A list of source objects. Sources must be of type :class:`AegeanTools.models.OutputSource`, :class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`. fmt : str The file format extension. prefix : str Prepend each column name with "prefix_". Default is to prepend nothing. meta : dict A dictionary to be used as metadata for some file types (fits, VOTable). Returns ------- None """ if meta is None: meta = {} if prefix is None: pre='' else: pre = prefix + '_' def writer(filename, catalog, fmt=None): """ construct a dict of the data this method preserves the data types in the VOTable """ tab_dict = {} name_list = [] for name in catalog[0].names: col_name = name if catalog[0].galactic: if name.startswith('ra'): col_name = 'lon'+name[2:] elif name.endswith('ra'): col_name = name[:-2] + 'lon' elif name.startswith('dec'): col_name = 'lat'+name[3:] elif name.endswith('dec'): col_name = name[:-3] + 'lat' col_name = pre + col_name tab_dict[col_name] = [getattr(c, name, None) for c in catalog] name_list.append(col_name) t = Table(tab_dict, meta=meta) # re-order the columns t = t[[n for n in name_list]] if fmt is not None: if fmt in ["vot", "vo", "xml"]: vot = from_table(t) # description of this votable vot.description = repr(meta) writetoVO(vot, filename) elif fmt in ['hdf5']: t.write(filename, path='data', overwrite=True) elif fmt in ['fits']: writeFITSTable(filename, t) else: ascii.write(t, filename, fmt, overwrite=True) else: ascii.write(t, filename, overwrite=True) return # sort the sources into types and then write them out individually components, islands, simples = classify_catalog(catalog) if len(components) > 0: new_name = "{1}{0}{2}".format('_comp', *os.path.splitext(filename)) writer(new_name, components, fmt) log.info("wrote {0}".format(new_name)) if len(islands) > 0: new_name = "{1}{0}{2}".format('_isle', *os.path.splitext(filename)) writer(new_name, islands, fmt) log.info("wrote {0}".format(new_name)) if len(simples) > 0: new_name = "{1}{0}{2}".format('_simp', *os.path.splitext(filename)) writer(new_name, simples, fmt) log.info("wrote {0}".format(new_name)) return
[ "def", "write_catalog", "(", "filename", ",", "catalog", ",", "fmt", "=", "None", ",", "meta", "=", "None", ",", "prefix", "=", "None", ")", ":", "if", "meta", "is", "None", ":", "meta", "=", "{", "}", "if", "prefix", "is", "None", ":", "pre", "=", "''", "else", ":", "pre", "=", "prefix", "+", "'_'", "def", "writer", "(", "filename", ",", "catalog", ",", "fmt", "=", "None", ")", ":", "\"\"\"\n construct a dict of the data\n this method preserves the data types in the VOTable\n \"\"\"", "tab_dict", "=", "{", "}", "name_list", "=", "[", "]", "for", "name", "in", "catalog", "[", "0", "]", ".", "names", ":", "col_name", "=", "name", "if", "catalog", "[", "0", "]", ".", "galactic", ":", "if", "name", ".", "startswith", "(", "'ra'", ")", ":", "col_name", "=", "'lon'", "+", "name", "[", "2", ":", "]", "elif", "name", ".", "endswith", "(", "'ra'", ")", ":", "col_name", "=", "name", "[", ":", "-", "2", "]", "+", "'lon'", "elif", "name", ".", "startswith", "(", "'dec'", ")", ":", "col_name", "=", "'lat'", "+", "name", "[", "3", ":", "]", "elif", "name", ".", "endswith", "(", "'dec'", ")", ":", "col_name", "=", "name", "[", ":", "-", "3", "]", "+", "'lat'", "col_name", "=", "pre", "+", "col_name", "tab_dict", "[", "col_name", "]", "=", "[", "getattr", "(", "c", ",", "name", ",", "None", ")", "for", "c", "in", "catalog", "]", "name_list", ".", "append", "(", "col_name", ")", "t", "=", "Table", "(", "tab_dict", ",", "meta", "=", "meta", ")", "# re-order the columns", "t", "=", "t", "[", "[", "n", "for", "n", "in", "name_list", "]", "]", "if", "fmt", "is", "not", "None", ":", "if", "fmt", "in", "[", "\"vot\"", ",", "\"vo\"", ",", "\"xml\"", "]", ":", "vot", "=", "from_table", "(", "t", ")", "# description of this votable", "vot", ".", "description", "=", "repr", "(", "meta", ")", "writetoVO", "(", "vot", ",", "filename", ")", "elif", "fmt", "in", "[", "'hdf5'", "]", ":", "t", ".", "write", "(", "filename", ",", "path", "=", "'data'", ",", "overwrite", "=", "True", ")", "elif", "fmt", "in", "[", "'fits'", "]", ":", "writeFITSTable", "(", "filename", ",", "t", ")", "else", ":", "ascii", ".", "write", "(", "t", ",", "filename", ",", "fmt", ",", "overwrite", "=", "True", ")", "else", ":", "ascii", ".", "write", "(", "t", ",", "filename", ",", "overwrite", "=", "True", ")", "return", "# sort the sources into types and then write them out individually", "components", ",", "islands", ",", "simples", "=", "classify_catalog", "(", "catalog", ")", "if", "len", "(", "components", ")", ">", "0", ":", "new_name", "=", "\"{1}{0}{2}\"", ".", "format", "(", "'_comp'", ",", "*", "os", ".", "path", ".", "splitext", "(", "filename", ")", ")", "writer", "(", "new_name", ",", "components", ",", "fmt", ")", "log", ".", "info", "(", "\"wrote {0}\"", ".", "format", "(", "new_name", ")", ")", "if", "len", "(", "islands", ")", ">", "0", ":", "new_name", "=", "\"{1}{0}{2}\"", ".", "format", "(", "'_isle'", ",", "*", "os", ".", "path", ".", "splitext", "(", "filename", ")", ")", "writer", "(", "new_name", ",", "islands", ",", "fmt", ")", "log", ".", "info", "(", "\"wrote {0}\"", ".", "format", "(", "new_name", ")", ")", "if", "len", "(", "simples", ")", ">", "0", ":", "new_name", "=", "\"{1}{0}{2}\"", ".", "format", "(", "'_simp'", ",", "*", "os", ".", "path", ".", "splitext", "(", "filename", ")", ")", "writer", "(", "new_name", ",", "simples", ",", "fmt", ")", "log", ".", "info", "(", "\"wrote {0}\"", ".", "format", "(", "new_name", ")", ")", "return" ]
34.589474
0.002367
def encode(data: Union[str, bytes]) -> str: """ Return Base58 string from data :param data: Bytes or string data """ return ensure_str(base58.b58encode(ensure_bytes(data)))
[ "def", "encode", "(", "data", ":", "Union", "[", "str", ",", "bytes", "]", ")", "->", "str", ":", "return", "ensure_str", "(", "base58", ".", "b58encode", "(", "ensure_bytes", "(", "data", ")", ")", ")" ]
29.571429
0.00939
def install_integration(self, id, **kwargs): # noqa: E501 """Installs a Wavefront integration # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.install_integration(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :return: ResponseContainerIntegrationStatus If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.install_integration_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.install_integration_with_http_info(id, **kwargs) # noqa: E501 return data
[ "def", "install_integration", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "install_integration_with_http_info", "(", "id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "install_integration_with_http_info", "(", "id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
42.190476
0.002208
def create_big_thumbnail(self, token, bitstream_id, item_id, width=575): """ Create a big thumbnail for the given bitstream with the given width. It is used as the main image of the given item and shown in the item view page. :param token: A valid token for the user in question. :type token: string :param bitstream_id: The bitstream from which to create the thumbnail. :type bitstream_id: int | long :param item_id: The item on which to set the thumbnail. :type item_id: int | long :param width: (optional) The width in pixels to which to resize (aspect ratio will be preserved). Defaults to 575. :type width: int | long :returns: The ItemthumbnailDao object that was created. :rtype: dict """ parameters = dict() parameters['token'] = token parameters['bitstreamId'] = bitstream_id parameters['itemId'] = item_id parameters['width'] = width response = self.request('midas.thumbnailcreator.create.big.thumbnail', parameters) return response
[ "def", "create_big_thumbnail", "(", "self", ",", "token", ",", "bitstream_id", ",", "item_id", ",", "width", "=", "575", ")", ":", "parameters", "=", "dict", "(", ")", "parameters", "[", "'token'", "]", "=", "token", "parameters", "[", "'bitstreamId'", "]", "=", "bitstream_id", "parameters", "[", "'itemId'", "]", "=", "item_id", "parameters", "[", "'width'", "]", "=", "width", "response", "=", "self", ".", "request", "(", "'midas.thumbnailcreator.create.big.thumbnail'", ",", "parameters", ")", "return", "response" ]
43.769231
0.00172
def stop(name, lbn, target, profile='default', tgt_type='glob'): ''' .. versionchanged:: 2017.7.0 The ``expr_form`` argument has been renamed to ``tgt_type``, earlier releases must use ``expr_form``. Stop the named worker from the lbn load balancers at the targeted minions The worker won't get any traffic from the lbn Example: .. code-block:: yaml disable-before-deploy: modjk_worker.stop: - name: {{ grains['id'] }} - lbn: application - target: 'roles:balancer' - tgt_type: grain ''' return _talk2modjk(name, lbn, target, 'worker_stop', profile, tgt_type)
[ "def", "stop", "(", "name", ",", "lbn", ",", "target", ",", "profile", "=", "'default'", ",", "tgt_type", "=", "'glob'", ")", ":", "return", "_talk2modjk", "(", "name", ",", "lbn", ",", "target", ",", "'worker_stop'", ",", "profile", ",", "tgt_type", ")" ]
31.238095
0.001479
def containsSettingsGroup(groupName, settings=None): """ Returns True if the settings contain a group with the name groupName. Works recursively when the groupName is a slash separated path. """ def _containsPath(path, settings): "Aux function for containsSettingsGroup. Does the actual recursive search." if len(path) == 0: return True else: head = path[0] tail = path[1:] if head not in settings.childGroups(): return False else: settings.beginGroup(head) try: return _containsPath(tail, settings) finally: settings.endGroup() # Body starts here path = os.path.split(groupName) logger.debug("Looking for path: {}".format(path)) settings = QtCore.QSettings() if settings is None else settings return _containsPath(path, settings)
[ "def", "containsSettingsGroup", "(", "groupName", ",", "settings", "=", "None", ")", ":", "def", "_containsPath", "(", "path", ",", "settings", ")", ":", "\"Aux function for containsSettingsGroup. Does the actual recursive search.\"", "if", "len", "(", "path", ")", "==", "0", ":", "return", "True", "else", ":", "head", "=", "path", "[", "0", "]", "tail", "=", "path", "[", "1", ":", "]", "if", "head", "not", "in", "settings", ".", "childGroups", "(", ")", ":", "return", "False", "else", ":", "settings", ".", "beginGroup", "(", "head", ")", "try", ":", "return", "_containsPath", "(", "tail", ",", "settings", ")", "finally", ":", "settings", ".", "endGroup", "(", ")", "# Body starts here", "path", "=", "os", ".", "path", ".", "split", "(", "groupName", ")", "logger", ".", "debug", "(", "\"Looking for path: {}\"", ".", "format", "(", "path", ")", ")", "settings", "=", "QtCore", ".", "QSettings", "(", ")", "if", "settings", "is", "None", "else", "settings", "return", "_containsPath", "(", "path", ",", "settings", ")" ]
37.4
0.002086
def read_pot_status(self): """Read the status of the digital pot. Firmware v18+ only. The return value is a dictionary containing the following as unsigned 8-bit integers: FanON, LaserON, FanDACVal, LaserDACVal. :rtype: dict :Example: >>> alpha.read_pot_status() { 'LaserDACVal': 230, 'FanDACVal': 255, 'FanON': 0, 'LaserON': 0 } """ # Send the command byte and wait 10 ms a = self.cnxn.xfer([0x13])[0] sleep(10e-3) # Build an array of the results res = [] for i in range(4): res.append(self.cnxn.xfer([0x00])[0]) sleep(0.1) return { 'FanON': res[0], 'LaserON': res[1], 'FanDACVal': res[2], 'LaserDACVal': res[3] }
[ "def", "read_pot_status", "(", "self", ")", ":", "# Send the command byte and wait 10 ms", "a", "=", "self", ".", "cnxn", ".", "xfer", "(", "[", "0x13", "]", ")", "[", "0", "]", "sleep", "(", "10e-3", ")", "# Build an array of the results", "res", "=", "[", "]", "for", "i", "in", "range", "(", "4", ")", ":", "res", ".", "append", "(", "self", ".", "cnxn", ".", "xfer", "(", "[", "0x00", "]", ")", "[", "0", "]", ")", "sleep", "(", "0.1", ")", "return", "{", "'FanON'", ":", "res", "[", "0", "]", ",", "'LaserON'", ":", "res", "[", "1", "]", ",", "'FanDACVal'", ":", "res", "[", "2", "]", ",", "'LaserDACVal'", ":", "res", "[", "3", "]", "}" ]
24.542857
0.00224
def _fit_RSA_marginalized_null(self, Y, X_base, scan_onsets): """ The marginalized version of the null model for Bayesian RSA. The null model assumes no task-related response to the design matrix. Note that there is a naming change of variable. X in fit() is changed to Y here. This is because we follow the tradition that Y corresponds to data. However, in wrapper function fit(), we follow the naming routine of scikit-learn. """ # Because there is nothing to learn that is shared across # participants, we can run each subject in serial. # The only fitting required is to re-estimate X0 after # each iteration n_subj = len(Y) t_start = time.time() logger.info('Starting to fit the model. Maximum iteration: ' '{}.'.format(self.n_iter)) rho_grids, rho_weights = self._set_rho_grids() logger.info('The grids of rho used to do numerical integration ' 'is {}.'.format(rho_grids)) n_grid = self.rho_bins log_weights = np.log(rho_weights) rho_post = [None] * n_subj sigma_post = [None] * n_subj beta0_post = [None] * n_subj X0 = [None] * n_subj LL_null = np.zeros(n_subj) for subj in range(n_subj): logger.debug('Running on subject {}.'.format(subj)) [n_T, n_V] = np.shape(Y[subj]) D, F, run_TRs, n_run = self._prepare_DF( n_T, scan_onsets=scan_onsets[subj]) YTY_diag = np.sum(Y[subj] * Y[subj], axis=0) YTDY_diag = np.sum(Y[subj] * np.dot(D, Y[subj]), axis=0) YTFY_diag = np.sum(Y[subj] * np.dot(F, Y[subj]), axis=0) # Add DC components capturing run-specific baselines. X_DC = self._gen_X_DC(run_TRs) X_DC, X_base[subj], idx_DC = self._merge_DC_to_base( X_DC, X_base[subj], no_DC=False) X_res = np.empty((n_T, 0)) for it in range(0, self.n_iter): X0[subj] = np.concatenate( (X_base[subj], X_res), axis=1) n_X0 = X0[subj].shape[1] X0TX0, X0TDX0, X0TFX0 = self._make_templates( D, F, X0[subj], X0[subj]) X0TY, X0TDY, X0TFY = self._make_templates( D, F, X0[subj], Y[subj]) YTAY_diag = YTY_diag - rho_grids[:, None] * YTDY_diag \ + rho_grids[:, None]**2 * YTFY_diag # dimension: #rho*space, # A/sigma2 is the inverse of noise covariance matrix. # YTAY means Y'AY X0TAX0 = X0TX0[None, :, :] \ - rho_grids[:, None, None] \ * X0TDX0[None, :, :] \ + rho_grids[:, None, None]**2 \ * X0TFX0[None, :, :] # dimension: #rho*#baseline*#baseline X0TAY = X0TY - rho_grids[:, None, None] * X0TDY \ + rho_grids[:, None, None]**2 * X0TFY # dimension: #rho*#baseline*space X0TAX0_i = np.linalg.solve( X0TAX0, np.identity(n_X0)[None, :, :]) # dimension: #rho*#baseline*#baseline YTAcorrY_diag = np.empty(np.shape(YTAY_diag)) for i_r in range(np.size(rho_grids)): YTAcorrY_diag[i_r, :] = YTAY_diag[i_r, :] \ - np.sum(X0TAY[i_r, :, :] * np.dot( X0TAX0_i[i_r, :, :], X0TAY[i_r, :, :]), axis=0) log_fixed_terms = - (n_T - n_X0) / 2 * np.log(2 * np.pi)\ + n_run / 2 * np.log(1 - rho_grids**2) \ + scipy.special.gammaln((n_T - n_X0 - 2) / 2) \ + (n_T - n_X0 - 2) / 2 * np.log(2) # These are terms in the log likelihood that do not # depend on L. Notice that the last term comes from # ther term of marginalizing sigma. We take the 2 in # the denominator out. Accordingly, the "denominator" # variable in the _raw_loglike_grids() function is not # divided by 2 half_log_det_X0TAX0 = self._half_log_det(X0TAX0) LL_raw = -half_log_det_X0TAX0[:, None] \ - (n_T - n_X0 - 2) / 2 * np.log(YTAcorrY_diag) \ + log_weights[:, None] + log_fixed_terms[:, None] # dimension: n_grid * space # The log likelihood at each pair of values of rho1. # half_log_det_X0TAX0 is 0.5*log(det(X0TAX0)) with the size of # number of parameter grids. So is the size of log_weights result_sum, max_value, result_exp = utils.sumexp_stable(LL_raw) weight_post = result_exp / result_sum rho_post[subj] = np.sum(rho_grids[:, None] * weight_post, axis=0) # Mean-posterior estimate of rho. sigma_means = YTAcorrY_diag ** 0.5 \ * (np.exp(scipy.special.gammaln((n_T - n_X0 - 3) / 2) - scipy.special.gammaln((n_T - n_X0 - 2) / 2)) / 2**0.5) sigma_post[subj] = np.sum(sigma_means * weight_post, axis=0) beta0_post[subj] = np.zeros((n_X0, n_V)) for grid in range(n_grid): beta0_post[subj] += weight_post[grid, :] * np.dot( X0TAX0_i[grid, :, :], X0TAY[grid, :, :]) if self.auto_nuisance: residuals = Y[subj] - np.dot( X_base[subj], beta0_post[subj][:np.size(X_base[subj], 1), :]) X_res_new = self.nureg_method( self.n_nureg_[subj]).fit_transform( self.preprocess_residual(residuals)) if it >= 1: if np.max(np.abs(X_res_new - X_res)) <= self.tol: logger.info('The change of X_res is ' 'smaller than the tolerance value {}.' 'Fitting is finished after {} ' 'iterations'.format(self.tol, it + 1)) break X_res = X_res_new if idx_DC.size > 1: collapsed_DC = np.sum(X0[subj][:, idx_DC], axis=1) X0[subj] = np.insert(np.delete(X0[subj], idx_DC, axis=1), 0, collapsed_DC, axis=1) collapsed_beta0 = np.mean(beta0_post[subj][idx_DC, :], axis=0) beta0_post[subj] = np.insert( np.delete(beta0_post[subj], idx_DC, axis=0), 0, collapsed_beta0, axis=0) LL_null[subj] = np.sum(np.log(result_sum) + max_value) t_finish = time.time() logger.info( 'total time of fitting: {} seconds'.format(t_finish - t_start)) return beta0_post, sigma_post, rho_post, X0, LL_null
[ "def", "_fit_RSA_marginalized_null", "(", "self", ",", "Y", ",", "X_base", ",", "scan_onsets", ")", ":", "# Because there is nothing to learn that is shared across", "# participants, we can run each subject in serial.", "# The only fitting required is to re-estimate X0 after", "# each iteration", "n_subj", "=", "len", "(", "Y", ")", "t_start", "=", "time", ".", "time", "(", ")", "logger", ".", "info", "(", "'Starting to fit the model. Maximum iteration: '", "'{}.'", ".", "format", "(", "self", ".", "n_iter", ")", ")", "rho_grids", ",", "rho_weights", "=", "self", ".", "_set_rho_grids", "(", ")", "logger", ".", "info", "(", "'The grids of rho used to do numerical integration '", "'is {}.'", ".", "format", "(", "rho_grids", ")", ")", "n_grid", "=", "self", ".", "rho_bins", "log_weights", "=", "np", ".", "log", "(", "rho_weights", ")", "rho_post", "=", "[", "None", "]", "*", "n_subj", "sigma_post", "=", "[", "None", "]", "*", "n_subj", "beta0_post", "=", "[", "None", "]", "*", "n_subj", "X0", "=", "[", "None", "]", "*", "n_subj", "LL_null", "=", "np", ".", "zeros", "(", "n_subj", ")", "for", "subj", "in", "range", "(", "n_subj", ")", ":", "logger", ".", "debug", "(", "'Running on subject {}.'", ".", "format", "(", "subj", ")", ")", "[", "n_T", ",", "n_V", "]", "=", "np", ".", "shape", "(", "Y", "[", "subj", "]", ")", "D", ",", "F", ",", "run_TRs", ",", "n_run", "=", "self", ".", "_prepare_DF", "(", "n_T", ",", "scan_onsets", "=", "scan_onsets", "[", "subj", "]", ")", "YTY_diag", "=", "np", ".", "sum", "(", "Y", "[", "subj", "]", "*", "Y", "[", "subj", "]", ",", "axis", "=", "0", ")", "YTDY_diag", "=", "np", ".", "sum", "(", "Y", "[", "subj", "]", "*", "np", ".", "dot", "(", "D", ",", "Y", "[", "subj", "]", ")", ",", "axis", "=", "0", ")", "YTFY_diag", "=", "np", ".", "sum", "(", "Y", "[", "subj", "]", "*", "np", ".", "dot", "(", "F", ",", "Y", "[", "subj", "]", ")", ",", "axis", "=", "0", ")", "# Add DC components capturing run-specific baselines.", "X_DC", "=", "self", ".", "_gen_X_DC", "(", "run_TRs", ")", "X_DC", ",", "X_base", "[", "subj", "]", ",", "idx_DC", "=", "self", ".", "_merge_DC_to_base", "(", "X_DC", ",", "X_base", "[", "subj", "]", ",", "no_DC", "=", "False", ")", "X_res", "=", "np", ".", "empty", "(", "(", "n_T", ",", "0", ")", ")", "for", "it", "in", "range", "(", "0", ",", "self", ".", "n_iter", ")", ":", "X0", "[", "subj", "]", "=", "np", ".", "concatenate", "(", "(", "X_base", "[", "subj", "]", ",", "X_res", ")", ",", "axis", "=", "1", ")", "n_X0", "=", "X0", "[", "subj", "]", ".", "shape", "[", "1", "]", "X0TX0", ",", "X0TDX0", ",", "X0TFX0", "=", "self", ".", "_make_templates", "(", "D", ",", "F", ",", "X0", "[", "subj", "]", ",", "X0", "[", "subj", "]", ")", "X0TY", ",", "X0TDY", ",", "X0TFY", "=", "self", ".", "_make_templates", "(", "D", ",", "F", ",", "X0", "[", "subj", "]", ",", "Y", "[", "subj", "]", ")", "YTAY_diag", "=", "YTY_diag", "-", "rho_grids", "[", ":", ",", "None", "]", "*", "YTDY_diag", "+", "rho_grids", "[", ":", ",", "None", "]", "**", "2", "*", "YTFY_diag", "# dimension: #rho*space,", "# A/sigma2 is the inverse of noise covariance matrix.", "# YTAY means Y'AY", "X0TAX0", "=", "X0TX0", "[", "None", ",", ":", ",", ":", "]", "-", "rho_grids", "[", ":", ",", "None", ",", "None", "]", "*", "X0TDX0", "[", "None", ",", ":", ",", ":", "]", "+", "rho_grids", "[", ":", ",", "None", ",", "None", "]", "**", "2", "*", "X0TFX0", "[", "None", ",", ":", ",", ":", "]", "# dimension: #rho*#baseline*#baseline", "X0TAY", "=", "X0TY", "-", "rho_grids", "[", ":", ",", "None", ",", "None", "]", "*", "X0TDY", "+", "rho_grids", "[", ":", ",", "None", ",", "None", "]", "**", "2", "*", "X0TFY", "# dimension: #rho*#baseline*space", "X0TAX0_i", "=", "np", ".", "linalg", ".", "solve", "(", "X0TAX0", ",", "np", ".", "identity", "(", "n_X0", ")", "[", "None", ",", ":", ",", ":", "]", ")", "# dimension: #rho*#baseline*#baseline", "YTAcorrY_diag", "=", "np", ".", "empty", "(", "np", ".", "shape", "(", "YTAY_diag", ")", ")", "for", "i_r", "in", "range", "(", "np", ".", "size", "(", "rho_grids", ")", ")", ":", "YTAcorrY_diag", "[", "i_r", ",", ":", "]", "=", "YTAY_diag", "[", "i_r", ",", ":", "]", "-", "np", ".", "sum", "(", "X0TAY", "[", "i_r", ",", ":", ",", ":", "]", "*", "np", ".", "dot", "(", "X0TAX0_i", "[", "i_r", ",", ":", ",", ":", "]", ",", "X0TAY", "[", "i_r", ",", ":", ",", ":", "]", ")", ",", "axis", "=", "0", ")", "log_fixed_terms", "=", "-", "(", "n_T", "-", "n_X0", ")", "/", "2", "*", "np", ".", "log", "(", "2", "*", "np", ".", "pi", ")", "+", "n_run", "/", "2", "*", "np", ".", "log", "(", "1", "-", "rho_grids", "**", "2", ")", "+", "scipy", ".", "special", ".", "gammaln", "(", "(", "n_T", "-", "n_X0", "-", "2", ")", "/", "2", ")", "+", "(", "n_T", "-", "n_X0", "-", "2", ")", "/", "2", "*", "np", ".", "log", "(", "2", ")", "# These are terms in the log likelihood that do not", "# depend on L. Notice that the last term comes from", "# ther term of marginalizing sigma. We take the 2 in", "# the denominator out. Accordingly, the \"denominator\"", "# variable in the _raw_loglike_grids() function is not", "# divided by 2", "half_log_det_X0TAX0", "=", "self", ".", "_half_log_det", "(", "X0TAX0", ")", "LL_raw", "=", "-", "half_log_det_X0TAX0", "[", ":", ",", "None", "]", "-", "(", "n_T", "-", "n_X0", "-", "2", ")", "/", "2", "*", "np", ".", "log", "(", "YTAcorrY_diag", ")", "+", "log_weights", "[", ":", ",", "None", "]", "+", "log_fixed_terms", "[", ":", ",", "None", "]", "# dimension: n_grid * space", "# The log likelihood at each pair of values of rho1.", "# half_log_det_X0TAX0 is 0.5*log(det(X0TAX0)) with the size of", "# number of parameter grids. So is the size of log_weights", "result_sum", ",", "max_value", ",", "result_exp", "=", "utils", ".", "sumexp_stable", "(", "LL_raw", ")", "weight_post", "=", "result_exp", "/", "result_sum", "rho_post", "[", "subj", "]", "=", "np", ".", "sum", "(", "rho_grids", "[", ":", ",", "None", "]", "*", "weight_post", ",", "axis", "=", "0", ")", "# Mean-posterior estimate of rho.", "sigma_means", "=", "YTAcorrY_diag", "**", "0.5", "*", "(", "np", ".", "exp", "(", "scipy", ".", "special", ".", "gammaln", "(", "(", "n_T", "-", "n_X0", "-", "3", ")", "/", "2", ")", "-", "scipy", ".", "special", ".", "gammaln", "(", "(", "n_T", "-", "n_X0", "-", "2", ")", "/", "2", ")", ")", "/", "2", "**", "0.5", ")", "sigma_post", "[", "subj", "]", "=", "np", ".", "sum", "(", "sigma_means", "*", "weight_post", ",", "axis", "=", "0", ")", "beta0_post", "[", "subj", "]", "=", "np", ".", "zeros", "(", "(", "n_X0", ",", "n_V", ")", ")", "for", "grid", "in", "range", "(", "n_grid", ")", ":", "beta0_post", "[", "subj", "]", "+=", "weight_post", "[", "grid", ",", ":", "]", "*", "np", ".", "dot", "(", "X0TAX0_i", "[", "grid", ",", ":", ",", ":", "]", ",", "X0TAY", "[", "grid", ",", ":", ",", ":", "]", ")", "if", "self", ".", "auto_nuisance", ":", "residuals", "=", "Y", "[", "subj", "]", "-", "np", ".", "dot", "(", "X_base", "[", "subj", "]", ",", "beta0_post", "[", "subj", "]", "[", ":", "np", ".", "size", "(", "X_base", "[", "subj", "]", ",", "1", ")", ",", ":", "]", ")", "X_res_new", "=", "self", ".", "nureg_method", "(", "self", ".", "n_nureg_", "[", "subj", "]", ")", ".", "fit_transform", "(", "self", ".", "preprocess_residual", "(", "residuals", ")", ")", "if", "it", ">=", "1", ":", "if", "np", ".", "max", "(", "np", ".", "abs", "(", "X_res_new", "-", "X_res", ")", ")", "<=", "self", ".", "tol", ":", "logger", ".", "info", "(", "'The change of X_res is '", "'smaller than the tolerance value {}.'", "'Fitting is finished after {} '", "'iterations'", ".", "format", "(", "self", ".", "tol", ",", "it", "+", "1", ")", ")", "break", "X_res", "=", "X_res_new", "if", "idx_DC", ".", "size", ">", "1", ":", "collapsed_DC", "=", "np", ".", "sum", "(", "X0", "[", "subj", "]", "[", ":", ",", "idx_DC", "]", ",", "axis", "=", "1", ")", "X0", "[", "subj", "]", "=", "np", ".", "insert", "(", "np", ".", "delete", "(", "X0", "[", "subj", "]", ",", "idx_DC", ",", "axis", "=", "1", ")", ",", "0", ",", "collapsed_DC", ",", "axis", "=", "1", ")", "collapsed_beta0", "=", "np", ".", "mean", "(", "beta0_post", "[", "subj", "]", "[", "idx_DC", ",", ":", "]", ",", "axis", "=", "0", ")", "beta0_post", "[", "subj", "]", "=", "np", ".", "insert", "(", "np", ".", "delete", "(", "beta0_post", "[", "subj", "]", ",", "idx_DC", ",", "axis", "=", "0", ")", ",", "0", ",", "collapsed_beta0", ",", "axis", "=", "0", ")", "LL_null", "[", "subj", "]", "=", "np", ".", "sum", "(", "np", ".", "log", "(", "result_sum", ")", "+", "max_value", ")", "t_finish", "=", "time", ".", "time", "(", ")", "logger", ".", "info", "(", "'total time of fitting: {} seconds'", ".", "format", "(", "t_finish", "-", "t_start", ")", ")", "return", "beta0_post", ",", "sigma_post", ",", "rho_post", ",", "X0", ",", "LL_null" ]
50.829787
0.000411
def ConsultarPuerto(self, sep="||"): "Consulta de Puertos habilitados" ret = self.client.puertoConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, )['puertoReturn'] self.__analizar_errores(ret) array = ret.get('puertos', []) return [("%s %%s %s %%s %s" % (sep, sep, sep)) % (it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array]
[ "def", "ConsultarPuerto", "(", "self", ",", "sep", "=", "\"||\"", ")", ":", "ret", "=", "self", ".", "client", ".", "puertoConsultar", "(", "auth", "=", "{", "'token'", ":", "self", ".", "Token", ",", "'sign'", ":", "self", ".", "Sign", ",", "'cuit'", ":", "self", ".", "Cuit", ",", "}", ",", ")", "[", "'puertoReturn'", "]", "self", ".", "__analizar_errores", "(", "ret", ")", "array", "=", "ret", ".", "get", "(", "'puertos'", ",", "[", "]", ")", "return", "[", "(", "\"%s %%s %s %%s %s\"", "%", "(", "sep", ",", "sep", ",", "sep", ")", ")", "%", "(", "it", "[", "'codigoDescripcion'", "]", "[", "'codigo'", "]", ",", "it", "[", "'codigoDescripcion'", "]", "[", "'descripcion'", "]", ")", "for", "it", "in", "array", "]" ]
45.230769
0.01
def register(style, func=None): """注册一个拼音风格实现 :: @register('echo') def echo(pinyin, **kwargs): return pinyin # or register('echo', echo) """ if func is not None: _registry[style] = func return def decorator(func): _registry[style] = func @wraps(func) def wrapper(pinyin, **kwargs): return func(pinyin, **kwargs) return wrapper return decorator
[ "def", "register", "(", "style", ",", "func", "=", "None", ")", ":", "if", "func", "is", "not", "None", ":", "_registry", "[", "style", "]", "=", "func", "return", "def", "decorator", "(", "func", ")", ":", "_registry", "[", "style", "]", "=", "func", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "pinyin", ",", "*", "*", "kwargs", ")", ":", "return", "func", "(", "pinyin", ",", "*", "*", "kwargs", ")", "return", "wrapper", "return", "decorator" ]
18.12
0.002096
def get_loglevel(level): """ return logging level object corresponding to a given level passed as a string @str level: name of a syslog log level @rtype: logging, logging level from logging module """ if level == 'debug': return logging.DEBUG elif level == 'notice': return logging.INFO elif level == 'info': return logging.INFO elif level == 'warning' or level == 'warn': return logging.WARNING elif level == 'error' or level == 'err': return logging.ERROR elif level == 'critical' or level == 'crit': return logging.CRITICAL elif level == 'alert': return logging.CRITICAL elif level == 'emergency' or level == 'emerg': return logging.CRITICAL else: return logging.INFO
[ "def", "get_loglevel", "(", "level", ")", ":", "if", "level", "==", "'debug'", ":", "return", "logging", ".", "DEBUG", "elif", "level", "==", "'notice'", ":", "return", "logging", ".", "INFO", "elif", "level", "==", "'info'", ":", "return", "logging", ".", "INFO", "elif", "level", "==", "'warning'", "or", "level", "==", "'warn'", ":", "return", "logging", ".", "WARNING", "elif", "level", "==", "'error'", "or", "level", "==", "'err'", ":", "return", "logging", ".", "ERROR", "elif", "level", "==", "'critical'", "or", "level", "==", "'crit'", ":", "return", "logging", ".", "CRITICAL", "elif", "level", "==", "'alert'", ":", "return", "logging", ".", "CRITICAL", "elif", "level", "==", "'emergency'", "or", "level", "==", "'emerg'", ":", "return", "logging", ".", "CRITICAL", "else", ":", "return", "logging", ".", "INFO" ]
31.12
0.001247
def minter(record_uuid, data, pid_type, key): """Mint PIDs for a record.""" pid = PersistentIdentifier.create( pid_type, data[key], object_type='rec', object_uuid=record_uuid, status=PIDStatus.REGISTERED ) for scheme, identifier in data['identifiers'].items(): if identifier: PersistentIdentifier.create( scheme, identifier, object_type='rec', object_uuid=record_uuid, status=PIDStatus.REGISTERED ) return pid
[ "def", "minter", "(", "record_uuid", ",", "data", ",", "pid_type", ",", "key", ")", ":", "pid", "=", "PersistentIdentifier", ".", "create", "(", "pid_type", ",", "data", "[", "key", "]", ",", "object_type", "=", "'rec'", ",", "object_uuid", "=", "record_uuid", ",", "status", "=", "PIDStatus", ".", "REGISTERED", ")", "for", "scheme", ",", "identifier", "in", "data", "[", "'identifiers'", "]", ".", "items", "(", ")", ":", "if", "identifier", ":", "PersistentIdentifier", ".", "create", "(", "scheme", ",", "identifier", ",", "object_type", "=", "'rec'", ",", "object_uuid", "=", "record_uuid", ",", "status", "=", "PIDStatus", ".", "REGISTERED", ")", "return", "pid" ]
29.631579
0.001721
def subcellular_locations(self): """Distinct subcellular locations (``location`` in :class:`.models.SubcellularLocation`) :return: all distinct subcellular locations :rtype: list[str] """ return [x[0] for x in self.session.query(models.SubcellularLocation.location).all()]
[ "def", "subcellular_locations", "(", "self", ")", ":", "return", "[", "x", "[", "0", "]", "for", "x", "in", "self", ".", "session", ".", "query", "(", "models", ".", "SubcellularLocation", ".", "location", ")", ".", "all", "(", ")", "]" ]
43.857143
0.01278
def get_short_annotations(annotations): """ Converts full GATK annotation name to the shortened version :param annotations: :return: """ # Annotations need to match VCF header short_name = {'QualByDepth': 'QD', 'FisherStrand': 'FS', 'StrandOddsRatio': 'SOR', 'ReadPosRankSumTest': 'ReadPosRankSum', 'MappingQualityRankSumTest': 'MQRankSum', 'RMSMappingQuality': 'MQ', 'InbreedingCoeff': 'ID'} short_annotations = [] for annotation in annotations: if annotation in short_name: annotation = short_name[annotation] short_annotations.append(annotation) return short_annotations
[ "def", "get_short_annotations", "(", "annotations", ")", ":", "# Annotations need to match VCF header", "short_name", "=", "{", "'QualByDepth'", ":", "'QD'", ",", "'FisherStrand'", ":", "'FS'", ",", "'StrandOddsRatio'", ":", "'SOR'", ",", "'ReadPosRankSumTest'", ":", "'ReadPosRankSum'", ",", "'MappingQualityRankSumTest'", ":", "'MQRankSum'", ",", "'RMSMappingQuality'", ":", "'MQ'", ",", "'InbreedingCoeff'", ":", "'ID'", "}", "short_annotations", "=", "[", "]", "for", "annotation", "in", "annotations", ":", "if", "annotation", "in", "short_name", ":", "annotation", "=", "short_name", "[", "annotation", "]", "short_annotations", ".", "append", "(", "annotation", ")", "return", "short_annotations" ]
34.761905
0.001333
def fetch_ticker(self) -> Ticker: """Fetch the market ticker.""" return self._fetch('ticker', self.market.code)(self._ticker)()
[ "def", "fetch_ticker", "(", "self", ")", "->", "Ticker", ":", "return", "self", ".", "_fetch", "(", "'ticker'", ",", "self", ".", "market", ".", "code", ")", "(", "self", ".", "_ticker", ")", "(", ")" ]
47
0.013986
def QPSK_rx(fc,N_symb,Rs,EsN0=100,fs=125,lfsr_len=10,phase=0,pulse='src'): """ This function generates """ Ns = int(np.round(fs/Rs)) print('Ns = ', Ns) print('Rs = ', fs/float(Ns)) print('EsN0 = ', EsN0, 'dB') print('phase = ', phase, 'degrees') print('pulse = ', pulse) x, b, data = QPSK_bb(N_symb,Ns,lfsr_len,pulse) # Add AWGN to x x = cpx_AWGN(x,EsN0,Ns) n = np.arange(len(x)) xc = x*np.exp(1j*2*np.pi*fc/float(fs)*n) * np.exp(1j*phase) return xc, b, data
[ "def", "QPSK_rx", "(", "fc", ",", "N_symb", ",", "Rs", ",", "EsN0", "=", "100", ",", "fs", "=", "125", ",", "lfsr_len", "=", "10", ",", "phase", "=", "0", ",", "pulse", "=", "'src'", ")", ":", "Ns", "=", "int", "(", "np", ".", "round", "(", "fs", "/", "Rs", ")", ")", "print", "(", "'Ns = '", ",", "Ns", ")", "print", "(", "'Rs = '", ",", "fs", "/", "float", "(", "Ns", ")", ")", "print", "(", "'EsN0 = '", ",", "EsN0", ",", "'dB'", ")", "print", "(", "'phase = '", ",", "phase", ",", "'degrees'", ")", "print", "(", "'pulse = '", ",", "pulse", ")", "x", ",", "b", ",", "data", "=", "QPSK_bb", "(", "N_symb", ",", "Ns", ",", "lfsr_len", ",", "pulse", ")", "# Add AWGN to x", "x", "=", "cpx_AWGN", "(", "x", ",", "EsN0", ",", "Ns", ")", "n", "=", "np", ".", "arange", "(", "len", "(", "x", ")", ")", "xc", "=", "x", "*", "np", ".", "exp", "(", "1j", "*", "2", "*", "np", ".", "pi", "*", "fc", "/", "float", "(", "fs", ")", "*", "n", ")", "*", "np", ".", "exp", "(", "1j", "*", "phase", ")", "return", "xc", ",", "b", ",", "data" ]
31.4375
0.025097
async def send_animation(self, chat_id: typing.Union[base.Integer, base.String], animation: typing.Union[base.InputFile, base.String], duration: typing.Union[base.Integer, None] = None, width: typing.Union[base.Integer, None] = None, height: typing.Union[base.Integer, None] = None, thumb: typing.Union[typing.Union[base.InputFile, base.String], None] = None, caption: typing.Union[base.String, None] = None, parse_mode: typing.Union[base.String, None] = None, disable_notification: typing.Union[base.Boolean, None] = None, reply_to_message_id: typing.Union[base.Integer, None] = None, reply_markup: typing.Union[typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply], None] = None ) -> types.Message: """ Use this method to send animation files (GIF or H.264/MPEG-4 AVC video without sound). On success, the sent Message is returned. Bots can currently send animation files of up to 50 MB in size, this limit may be changed in the future. Source https://core.telegram.org/bots/api#sendanimation :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :param animation: Animation to send. Pass a file_id as String to send an animation that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an animation from the Internet, or upload a new animation using multipart/form-data :type animation: :obj:`typing.Union[base.InputFile, base.String]` :param duration: Duration of sent animation in seconds :type duration: :obj:`typing.Union[base.Integer, None]` :param width: Animation width :type width: :obj:`typing.Union[base.Integer, None]` :param height: Animation height :type height: :obj:`typing.Union[base.Integer, None]` :param thumb: Thumbnail of the file sent. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail‘s width and height should not exceed 90. :type thumb: :obj:`typing.Union[typing.Union[base.InputFile, base.String], None]` :param caption: Animation caption (may also be used when resending animation by file_id), 0-1024 characters :type caption: :obj:`typing.Union[base.String, None]` :param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in the media caption :type parse_mode: :obj:`typing.Union[base.String, None]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: :obj:`typing.Union[base.Integer, None]` :param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user :type reply_markup: :obj:`typing.Union[typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply], None]` :return: On success, the sent Message is returned :rtype: :obj:`types.Message` """ reply_markup = prepare_arg(reply_markup) payload = generate_payload(**locals(), exclude=["animation", "thumb"]) files = {} prepare_file(payload, files, 'animation', animation) prepare_attachment(payload, files, 'thumb', thumb) result = await self.request(api.Methods.SEND_ANIMATION, payload, files) return types.Message(**result)
[ "async", "def", "send_animation", "(", "self", ",", "chat_id", ":", "typing", ".", "Union", "[", "base", ".", "Integer", ",", "base", ".", "String", "]", ",", "animation", ":", "typing", ".", "Union", "[", "base", ".", "InputFile", ",", "base", ".", "String", "]", ",", "duration", ":", "typing", ".", "Union", "[", "base", ".", "Integer", ",", "None", "]", "=", "None", ",", "width", ":", "typing", ".", "Union", "[", "base", ".", "Integer", ",", "None", "]", "=", "None", ",", "height", ":", "typing", ".", "Union", "[", "base", ".", "Integer", ",", "None", "]", "=", "None", ",", "thumb", ":", "typing", ".", "Union", "[", "typing", ".", "Union", "[", "base", ".", "InputFile", ",", "base", ".", "String", "]", ",", "None", "]", "=", "None", ",", "caption", ":", "typing", ".", "Union", "[", "base", ".", "String", ",", "None", "]", "=", "None", ",", "parse_mode", ":", "typing", ".", "Union", "[", "base", ".", "String", ",", "None", "]", "=", "None", ",", "disable_notification", ":", "typing", ".", "Union", "[", "base", ".", "Boolean", ",", "None", "]", "=", "None", ",", "reply_to_message_id", ":", "typing", ".", "Union", "[", "base", ".", "Integer", ",", "None", "]", "=", "None", ",", "reply_markup", ":", "typing", ".", "Union", "[", "typing", ".", "Union", "[", "types", ".", "InlineKeyboardMarkup", ",", "types", ".", "ReplyKeyboardMarkup", ",", "types", ".", "ReplyKeyboardRemove", ",", "types", ".", "ForceReply", "]", ",", "None", "]", "=", "None", ")", "->", "types", ".", "Message", ":", "reply_markup", "=", "prepare_arg", "(", "reply_markup", ")", "payload", "=", "generate_payload", "(", "*", "*", "locals", "(", ")", ",", "exclude", "=", "[", "\"animation\"", ",", "\"thumb\"", "]", ")", "files", "=", "{", "}", "prepare_file", "(", "payload", ",", "files", ",", "'animation'", ",", "animation", ")", "prepare_attachment", "(", "payload", ",", "files", ",", "'thumb'", ",", "thumb", ")", "result", "=", "await", "self", ".", "request", "(", "api", ".", "Methods", ".", "SEND_ANIMATION", ",", "payload", ",", "files", ")", "return", "types", ".", "Message", "(", "*", "*", "result", ")" ]
68.046154
0.008469
def insert_json(table=None, bulk_size=1000, concurrency=25, hosts=None, output_fmt=None): """Insert JSON lines fed into stdin into a Crate cluster. If no hosts are specified the statements will be printed. Args: table: Target table name. bulk_size: Bulk size of the insert statements. concurrency: Number of operations to run concurrently. hosts: hostname:port pairs of the Crate nodes """ if not hosts: return print_only(table) queries = (to_insert(table, d) for d in dicts_from_stdin()) bulk_queries = as_bulk_queries(queries, bulk_size) print('Executing inserts: bulk_size={} concurrency={}'.format( bulk_size, concurrency), file=sys.stderr) stats = Stats() with clients.client(hosts, concurrency=concurrency) as client: f = partial(aio.measure, stats, client.execute_many) try: aio.run_many(f, bulk_queries, concurrency) except clients.SqlException as e: raise SystemExit(str(e)) try: print(format_stats(stats.get(), output_fmt)) except KeyError: if not stats.sampler.values: raise SystemExit('No data received via stdin') raise
[ "def", "insert_json", "(", "table", "=", "None", ",", "bulk_size", "=", "1000", ",", "concurrency", "=", "25", ",", "hosts", "=", "None", ",", "output_fmt", "=", "None", ")", ":", "if", "not", "hosts", ":", "return", "print_only", "(", "table", ")", "queries", "=", "(", "to_insert", "(", "table", ",", "d", ")", "for", "d", "in", "dicts_from_stdin", "(", ")", ")", "bulk_queries", "=", "as_bulk_queries", "(", "queries", ",", "bulk_size", ")", "print", "(", "'Executing inserts: bulk_size={} concurrency={}'", ".", "format", "(", "bulk_size", ",", "concurrency", ")", ",", "file", "=", "sys", ".", "stderr", ")", "stats", "=", "Stats", "(", ")", "with", "clients", ".", "client", "(", "hosts", ",", "concurrency", "=", "concurrency", ")", "as", "client", ":", "f", "=", "partial", "(", "aio", ".", "measure", ",", "stats", ",", "client", ".", "execute_many", ")", "try", ":", "aio", ".", "run_many", "(", "f", ",", "bulk_queries", ",", "concurrency", ")", "except", "clients", ".", "SqlException", "as", "e", ":", "raise", "SystemExit", "(", "str", "(", "e", ")", ")", "try", ":", "print", "(", "format_stats", "(", "stats", ".", "get", "(", ")", ",", "output_fmt", ")", ")", "except", "KeyError", ":", "if", "not", "stats", ".", "sampler", ".", "values", ":", "raise", "SystemExit", "(", "'No data received via stdin'", ")", "raise" ]
34.583333
0.000781
def _cmp_by_asn(local_asn, path1, path2): """Select the path based on source (iBGP/eBGP) peer. eBGP path is preferred over iBGP. If both paths are from same kind of peers, return None. """ def get_path_source_asn(path): asn = None if path.source is None: asn = local_asn else: asn = path.source.remote_as return asn p1_asn = get_path_source_asn(path1) p2_asn = get_path_source_asn(path2) # If path1 is from ibgp peer and path2 is from ebgp peer. if (p1_asn == local_asn) and (p2_asn != local_asn): return path2 # If path2 is from ibgp peer and path1 is from ebgp peer, if (p2_asn == local_asn) and (p1_asn != local_asn): return path1 # If both paths are from ebgp or ibpg peers, we cannot decide. return None
[ "def", "_cmp_by_asn", "(", "local_asn", ",", "path1", ",", "path2", ")", ":", "def", "get_path_source_asn", "(", "path", ")", ":", "asn", "=", "None", "if", "path", ".", "source", "is", "None", ":", "asn", "=", "local_asn", "else", ":", "asn", "=", "path", ".", "source", ".", "remote_as", "return", "asn", "p1_asn", "=", "get_path_source_asn", "(", "path1", ")", "p2_asn", "=", "get_path_source_asn", "(", "path2", ")", "# If path1 is from ibgp peer and path2 is from ebgp peer.", "if", "(", "p1_asn", "==", "local_asn", ")", "and", "(", "p2_asn", "!=", "local_asn", ")", ":", "return", "path2", "# If path2 is from ibgp peer and path1 is from ebgp peer,", "if", "(", "p2_asn", "==", "local_asn", ")", "and", "(", "p1_asn", "!=", "local_asn", ")", ":", "return", "path1", "# If both paths are from ebgp or ibpg peers, we cannot decide.", "return", "None" ]
31.192308
0.001196
def c_handle_array(objs): """Create ctypes const void ** from a list of MXNet objects with handles. Parameters ---------- objs : list of NDArray/Symbol. MXNet objects. Returns ------- (ctypes.c_void_p * len(objs)) A void ** pointer that can be passed to C API. """ arr = (ctypes.c_void_p * len(objs))() arr[:] = [o.handle for o in objs] return arr
[ "def", "c_handle_array", "(", "objs", ")", ":", "arr", "=", "(", "ctypes", ".", "c_void_p", "*", "len", "(", "objs", ")", ")", "(", ")", "arr", "[", ":", "]", "=", "[", "o", ".", "handle", "for", "o", "in", "objs", "]", "return", "arr" ]
24.625
0.002445
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'language') and self.language is not None: _dict['language'] = self.language if hasattr(self, 'analyzed_text') and self.analyzed_text is not None: _dict['analyzed_text'] = self.analyzed_text if hasattr(self, 'retrieved_url') and self.retrieved_url is not None: _dict['retrieved_url'] = self.retrieved_url if hasattr(self, 'usage') and self.usage is not None: _dict['usage'] = self.usage._to_dict() if hasattr(self, 'concepts') and self.concepts is not None: _dict['concepts'] = [x._to_dict() for x in self.concepts] if hasattr(self, 'entities') and self.entities is not None: _dict['entities'] = [x._to_dict() for x in self.entities] if hasattr(self, 'keywords') and self.keywords is not None: _dict['keywords'] = [x._to_dict() for x in self.keywords] if hasattr(self, 'categories') and self.categories is not None: _dict['categories'] = [x._to_dict() for x in self.categories] if hasattr(self, 'emotion') and self.emotion is not None: _dict['emotion'] = self.emotion._to_dict() if hasattr(self, 'metadata') and self.metadata is not None: _dict['metadata'] = self.metadata._to_dict() if hasattr(self, 'relations') and self.relations is not None: _dict['relations'] = [x._to_dict() for x in self.relations] if hasattr(self, 'semantic_roles') and self.semantic_roles is not None: _dict['semantic_roles'] = [ x._to_dict() for x in self.semantic_roles ] if hasattr(self, 'sentiment') and self.sentiment is not None: _dict['sentiment'] = self.sentiment._to_dict() if hasattr(self, 'syntax') and self.syntax is not None: _dict['syntax'] = self.syntax._to_dict() return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'language'", ")", "and", "self", ".", "language", "is", "not", "None", ":", "_dict", "[", "'language'", "]", "=", "self", ".", "language", "if", "hasattr", "(", "self", ",", "'analyzed_text'", ")", "and", "self", ".", "analyzed_text", "is", "not", "None", ":", "_dict", "[", "'analyzed_text'", "]", "=", "self", ".", "analyzed_text", "if", "hasattr", "(", "self", ",", "'retrieved_url'", ")", "and", "self", ".", "retrieved_url", "is", "not", "None", ":", "_dict", "[", "'retrieved_url'", "]", "=", "self", ".", "retrieved_url", "if", "hasattr", "(", "self", ",", "'usage'", ")", "and", "self", ".", "usage", "is", "not", "None", ":", "_dict", "[", "'usage'", "]", "=", "self", ".", "usage", ".", "_to_dict", "(", ")", "if", "hasattr", "(", "self", ",", "'concepts'", ")", "and", "self", ".", "concepts", "is", "not", "None", ":", "_dict", "[", "'concepts'", "]", "=", "[", "x", ".", "_to_dict", "(", ")", "for", "x", "in", "self", ".", "concepts", "]", "if", "hasattr", "(", "self", ",", "'entities'", ")", "and", "self", ".", "entities", "is", "not", "None", ":", "_dict", "[", "'entities'", "]", "=", "[", "x", ".", "_to_dict", "(", ")", "for", "x", "in", "self", ".", "entities", "]", "if", "hasattr", "(", "self", ",", "'keywords'", ")", "and", "self", ".", "keywords", "is", "not", "None", ":", "_dict", "[", "'keywords'", "]", "=", "[", "x", ".", "_to_dict", "(", ")", "for", "x", "in", "self", ".", "keywords", "]", "if", "hasattr", "(", "self", ",", "'categories'", ")", "and", "self", ".", "categories", "is", "not", "None", ":", "_dict", "[", "'categories'", "]", "=", "[", "x", ".", "_to_dict", "(", ")", "for", "x", "in", "self", ".", "categories", "]", "if", "hasattr", "(", "self", ",", "'emotion'", ")", "and", "self", ".", "emotion", "is", "not", "None", ":", "_dict", "[", "'emotion'", "]", "=", "self", ".", "emotion", ".", "_to_dict", "(", ")", "if", "hasattr", "(", "self", ",", "'metadata'", ")", "and", "self", ".", "metadata", "is", "not", "None", ":", "_dict", "[", "'metadata'", "]", "=", "self", ".", "metadata", ".", "_to_dict", "(", ")", "if", "hasattr", "(", "self", ",", "'relations'", ")", "and", "self", ".", "relations", "is", "not", "None", ":", "_dict", "[", "'relations'", "]", "=", "[", "x", ".", "_to_dict", "(", ")", "for", "x", "in", "self", ".", "relations", "]", "if", "hasattr", "(", "self", ",", "'semantic_roles'", ")", "and", "self", ".", "semantic_roles", "is", "not", "None", ":", "_dict", "[", "'semantic_roles'", "]", "=", "[", "x", ".", "_to_dict", "(", ")", "for", "x", "in", "self", ".", "semantic_roles", "]", "if", "hasattr", "(", "self", ",", "'sentiment'", ")", "and", "self", ".", "sentiment", "is", "not", "None", ":", "_dict", "[", "'sentiment'", "]", "=", "self", ".", "sentiment", ".", "_to_dict", "(", ")", "if", "hasattr", "(", "self", ",", "'syntax'", ")", "and", "self", ".", "syntax", "is", "not", "None", ":", "_dict", "[", "'syntax'", "]", "=", "self", ".", "syntax", ".", "_to_dict", "(", ")", "return", "_dict" ]
57.970588
0.000998
def fit_quantile(self, X, y, quantile, max_iter=20, tol=0.01, weights=None): """fit ExpectileGAM to a desired quantile via binary search Parameters ---------- X : array-like, shape (n_samples, m_features) Training vectors, where n_samples is the number of samples and m_features is the number of features. y : array-like, shape (n_samples,) Target values (integers in classification, real numbers in regression) For classification, labels must correspond to classes. quantile : float on (0, 1) desired quantile to fit. max_iter : int, default: 20 maximum number of binary search iterations to perform tol : float > 0, default: 0.01 maximum distance between desired quantile and fitted quantile weights : array-like shape (n_samples,) or None, default: None containing sample weights if None, defaults to array of ones Returns ------- self : fitted GAM object """ def _within_tol(a, b, tol): return np.abs(a - b) <= tol # validate arguments if quantile <= 0 or quantile >= 1: raise ValueError('quantile must be on (0, 1), but found {}'.format(quantile)) if tol <= 0: raise ValueError('tol must be float > 0 {}'.format(tol)) if max_iter <= 0: raise ValueError('max_iter must be int > 0 {}'.format(max_iter)) # perform a first fit if necessary if not self._is_fitted: self.fit(X, y, weights=weights) # do binary search max_ = 1.0 min_ = 0.0 n_iter = 0 while n_iter < max_iter: ratio = self._get_quantile_ratio(X, y) if _within_tol(ratio, quantile, tol): break if ratio < quantile: min_ = self.expectile else: max_ = self.expectile expectile = (max_ + min_) / 2. self.set_params(expectile=expectile) self.fit(X, y, weights=weights) n_iter += 1 # print diagnostics if not _within_tol(ratio, quantile, tol) and self.verbose: warnings.warn('maximum iterations reached') return self
[ "def", "fit_quantile", "(", "self", ",", "X", ",", "y", ",", "quantile", ",", "max_iter", "=", "20", ",", "tol", "=", "0.01", ",", "weights", "=", "None", ")", ":", "def", "_within_tol", "(", "a", ",", "b", ",", "tol", ")", ":", "return", "np", ".", "abs", "(", "a", "-", "b", ")", "<=", "tol", "# validate arguments", "if", "quantile", "<=", "0", "or", "quantile", ">=", "1", ":", "raise", "ValueError", "(", "'quantile must be on (0, 1), but found {}'", ".", "format", "(", "quantile", ")", ")", "if", "tol", "<=", "0", ":", "raise", "ValueError", "(", "'tol must be float > 0 {}'", ".", "format", "(", "tol", ")", ")", "if", "max_iter", "<=", "0", ":", "raise", "ValueError", "(", "'max_iter must be int > 0 {}'", ".", "format", "(", "max_iter", ")", ")", "# perform a first fit if necessary", "if", "not", "self", ".", "_is_fitted", ":", "self", ".", "fit", "(", "X", ",", "y", ",", "weights", "=", "weights", ")", "# do binary search", "max_", "=", "1.0", "min_", "=", "0.0", "n_iter", "=", "0", "while", "n_iter", "<", "max_iter", ":", "ratio", "=", "self", ".", "_get_quantile_ratio", "(", "X", ",", "y", ")", "if", "_within_tol", "(", "ratio", ",", "quantile", ",", "tol", ")", ":", "break", "if", "ratio", "<", "quantile", ":", "min_", "=", "self", ".", "expectile", "else", ":", "max_", "=", "self", ".", "expectile", "expectile", "=", "(", "max_", "+", "min_", ")", "/", "2.", "self", ".", "set_params", "(", "expectile", "=", "expectile", ")", "self", ".", "fit", "(", "X", ",", "y", ",", "weights", "=", "weights", ")", "n_iter", "+=", "1", "# print diagnostics", "if", "not", "_within_tol", "(", "ratio", ",", "quantile", ",", "tol", ")", "and", "self", ".", "verbose", ":", "warnings", ".", "warn", "(", "'maximum iterations reached'", ")", "return", "self" ]
33.028986
0.001278
def get_content_html(request): """Retrieve content as HTML using the ident-hash (uuid@version).""" result = _get_content_json() media_type = result['mediaType'] if media_type == COLLECTION_MIMETYPE: content = tree_to_html(result['tree']) else: content = result['content'] resp = request.response resp.body = content resp.status = "200 OK" resp.content_type = 'application/xhtml+xml' return result, resp
[ "def", "get_content_html", "(", "request", ")", ":", "result", "=", "_get_content_json", "(", ")", "media_type", "=", "result", "[", "'mediaType'", "]", "if", "media_type", "==", "COLLECTION_MIMETYPE", ":", "content", "=", "tree_to_html", "(", "result", "[", "'tree'", "]", ")", "else", ":", "content", "=", "result", "[", "'content'", "]", "resp", "=", "request", ".", "response", "resp", ".", "body", "=", "content", "resp", ".", "status", "=", "\"200 OK\"", "resp", ".", "content_type", "=", "'application/xhtml+xml'", "return", "result", ",", "resp" ]
29.733333
0.002174
def action_set(method_name): """ Creates a setter that will call the action method with the context's key as first parameter and the value as second parameter. @param method_name: the name of a method belonging to the action. @type method_name: str """ def action_set(value, context, **_params): method = getattr(context["action"], method_name) return _set(method, context["key"], value, (), {}) return action_set
[ "def", "action_set", "(", "method_name", ")", ":", "def", "action_set", "(", "value", ",", "context", ",", "*", "*", "_params", ")", ":", "method", "=", "getattr", "(", "context", "[", "\"action\"", "]", ",", "method_name", ")", "return", "_set", "(", "method", ",", "context", "[", "\"key\"", "]", ",", "value", ",", "(", ")", ",", "{", "}", ")", "return", "action_set" ]
34.692308
0.00216
def resource_for_link(link, includes, resources=None, locale=None): """Returns the resource that matches the link""" if resources is not None: cache_key = "{0}:{1}:{2}".format( link['sys']['linkType'], link['sys']['id'], locale ) if cache_key in resources: return resources[cache_key] for i in includes: if (i['sys']['id'] == link['sys']['id'] and i['sys']['type'] == link['sys']['linkType']): return i return None
[ "def", "resource_for_link", "(", "link", ",", "includes", ",", "resources", "=", "None", ",", "locale", "=", "None", ")", ":", "if", "resources", "is", "not", "None", ":", "cache_key", "=", "\"{0}:{1}:{2}\"", ".", "format", "(", "link", "[", "'sys'", "]", "[", "'linkType'", "]", ",", "link", "[", "'sys'", "]", "[", "'id'", "]", ",", "locale", ")", "if", "cache_key", "in", "resources", ":", "return", "resources", "[", "cache_key", "]", "for", "i", "in", "includes", ":", "if", "(", "i", "[", "'sys'", "]", "[", "'id'", "]", "==", "link", "[", "'sys'", "]", "[", "'id'", "]", "and", "i", "[", "'sys'", "]", "[", "'type'", "]", "==", "link", "[", "'sys'", "]", "[", "'linkType'", "]", ")", ":", "return", "i", "return", "None" ]
30.823529
0.001852
def _calc(self, y, w): '''Helper to estimate spatial lag conditioned Markov transition probability matrices based on maximum likelihood techniques. ''' if self.discrete: self.lclass_ids = weights.lag_categorical(w, self.class_ids, ties="tryself") else: ly = weights.lag_spatial(w, y) self.lclass_ids, self.lag_cutoffs, self.m = self._maybe_classify( ly, self.m, self.lag_cutoffs) self.lclasses = np.arange(self.m) T = np.zeros((self.m, self.k, self.k)) n, t = y.shape for t1 in range(t - 1): t2 = t1 + 1 for i in range(n): T[self.lclass_ids[i, t1], self.class_ids[i, t1], self.class_ids[i, t2]] += 1 P = np.zeros_like(T) for i, mat in enumerate(T): row_sum = mat.sum(axis=1) row_sum = row_sum + (row_sum == 0) p_i = np.matrix(np.diag(1. / row_sum) * np.matrix(mat)) P[i] = p_i return T, P
[ "def", "_calc", "(", "self", ",", "y", ",", "w", ")", ":", "if", "self", ".", "discrete", ":", "self", ".", "lclass_ids", "=", "weights", ".", "lag_categorical", "(", "w", ",", "self", ".", "class_ids", ",", "ties", "=", "\"tryself\"", ")", "else", ":", "ly", "=", "weights", ".", "lag_spatial", "(", "w", ",", "y", ")", "self", ".", "lclass_ids", ",", "self", ".", "lag_cutoffs", ",", "self", ".", "m", "=", "self", ".", "_maybe_classify", "(", "ly", ",", "self", ".", "m", ",", "self", ".", "lag_cutoffs", ")", "self", ".", "lclasses", "=", "np", ".", "arange", "(", "self", ".", "m", ")", "T", "=", "np", ".", "zeros", "(", "(", "self", ".", "m", ",", "self", ".", "k", ",", "self", ".", "k", ")", ")", "n", ",", "t", "=", "y", ".", "shape", "for", "t1", "in", "range", "(", "t", "-", "1", ")", ":", "t2", "=", "t1", "+", "1", "for", "i", "in", "range", "(", "n", ")", ":", "T", "[", "self", ".", "lclass_ids", "[", "i", ",", "t1", "]", ",", "self", ".", "class_ids", "[", "i", ",", "t1", "]", ",", "self", ".", "class_ids", "[", "i", ",", "t2", "]", "]", "+=", "1", "P", "=", "np", ".", "zeros_like", "(", "T", ")", "for", "i", ",", "mat", "in", "enumerate", "(", "T", ")", ":", "row_sum", "=", "mat", ".", "sum", "(", "axis", "=", "1", ")", "row_sum", "=", "row_sum", "+", "(", "row_sum", "==", "0", ")", "p_i", "=", "np", ".", "matrix", "(", "np", ".", "diag", "(", "1.", "/", "row_sum", ")", "*", "np", ".", "matrix", "(", "mat", ")", ")", "P", "[", "i", "]", "=", "p_i", "return", "T", ",", "P" ]
37.137931
0.00181
def relation(self, table, origin_field, search_field, destination_field=None, id_field="id"): """ Add a column to the main dataframe from a relation foreign key """ df = self._relation(table, origin_field, search_field, destination_field, id_field) self.df = df
[ "def", "relation", "(", "self", ",", "table", ",", "origin_field", ",", "search_field", ",", "destination_field", "=", "None", ",", "id_field", "=", "\"id\"", ")", ":", "df", "=", "self", ".", "_relation", "(", "table", ",", "origin_field", ",", "search_field", ",", "destination_field", ",", "id_field", ")", "self", ".", "df", "=", "df" ]
42.5
0.011527
def modify(self, dn: str, mod_list: dict) -> None: """ Modify a DN in the LDAP database; See ldap module. Doesn't return a result if transactions enabled. """ _debug("modify", self, dn, mod_list) # need to work out how to reverse changes in mod_list; result in revlist revlist = {} # get the current cached attributes result = self._cache_get_for_dn(dn) # find the how to reverse mod_list (for rollback) and put result in # revlist. Also simulate actions on cache. for mod_type, l in six.iteritems(mod_list): for mod_op, mod_vals in l: _debug("attribute:", mod_type) if mod_type in result: _debug("attribute cache:", result[mod_type]) else: _debug("attribute cache is empty") _debug("attribute modify:", (mod_op, mod_vals)) if mod_vals is not None: if not isinstance(mod_vals, list): mod_vals = [mod_vals] if mod_op == ldap3.MODIFY_ADD: # reverse of MODIFY_ADD is MODIFY_DELETE reverse = (ldap3.MODIFY_DELETE, mod_vals) elif mod_op == ldap3.MODIFY_DELETE and len(mod_vals) > 0: # Reverse of MODIFY_DELETE is MODIFY_ADD, but only if value # is given if mod_vals is None, this means all values where # deleted. reverse = (ldap3.MODIFY_ADD, mod_vals) elif mod_op == ldap3.MODIFY_DELETE \ or mod_op == ldap3.MODIFY_REPLACE: if mod_type in result: # If MODIFY_DELETE with no values or MODIFY_REPLACE # then we have to replace all attributes with cached # state reverse = ( ldap3.MODIFY_REPLACE, tldap.modlist.escape_list(result[mod_type]) ) else: # except if we have no cached state for this DN, in # which case we delete it. reverse = (ldap3.MODIFY_DELETE, []) else: raise RuntimeError("mod_op of %d not supported" % mod_op) reverse = [reverse] _debug("attribute reverse:", reverse) if mod_type in result: _debug("attribute cache:", result[mod_type]) else: _debug("attribute cache is empty") revlist[mod_type] = reverse _debug("--") _debug("mod_list:", mod_list) _debug("revlist:", revlist) _debug("--") # now the hard stuff is over, we get to the easy stuff def on_commit(obj): obj.modify(dn, mod_list) def on_rollback(obj): obj.modify(dn, revlist) return self._process(on_commit, on_rollback)
[ "def", "modify", "(", "self", ",", "dn", ":", "str", ",", "mod_list", ":", "dict", ")", "->", "None", ":", "_debug", "(", "\"modify\"", ",", "self", ",", "dn", ",", "mod_list", ")", "# need to work out how to reverse changes in mod_list; result in revlist", "revlist", "=", "{", "}", "# get the current cached attributes", "result", "=", "self", ".", "_cache_get_for_dn", "(", "dn", ")", "# find the how to reverse mod_list (for rollback) and put result in", "# revlist. Also simulate actions on cache.", "for", "mod_type", ",", "l", "in", "six", ".", "iteritems", "(", "mod_list", ")", ":", "for", "mod_op", ",", "mod_vals", "in", "l", ":", "_debug", "(", "\"attribute:\"", ",", "mod_type", ")", "if", "mod_type", "in", "result", ":", "_debug", "(", "\"attribute cache:\"", ",", "result", "[", "mod_type", "]", ")", "else", ":", "_debug", "(", "\"attribute cache is empty\"", ")", "_debug", "(", "\"attribute modify:\"", ",", "(", "mod_op", ",", "mod_vals", ")", ")", "if", "mod_vals", "is", "not", "None", ":", "if", "not", "isinstance", "(", "mod_vals", ",", "list", ")", ":", "mod_vals", "=", "[", "mod_vals", "]", "if", "mod_op", "==", "ldap3", ".", "MODIFY_ADD", ":", "# reverse of MODIFY_ADD is MODIFY_DELETE", "reverse", "=", "(", "ldap3", ".", "MODIFY_DELETE", ",", "mod_vals", ")", "elif", "mod_op", "==", "ldap3", ".", "MODIFY_DELETE", "and", "len", "(", "mod_vals", ")", ">", "0", ":", "# Reverse of MODIFY_DELETE is MODIFY_ADD, but only if value", "# is given if mod_vals is None, this means all values where", "# deleted.", "reverse", "=", "(", "ldap3", ".", "MODIFY_ADD", ",", "mod_vals", ")", "elif", "mod_op", "==", "ldap3", ".", "MODIFY_DELETE", "or", "mod_op", "==", "ldap3", ".", "MODIFY_REPLACE", ":", "if", "mod_type", "in", "result", ":", "# If MODIFY_DELETE with no values or MODIFY_REPLACE", "# then we have to replace all attributes with cached", "# state", "reverse", "=", "(", "ldap3", ".", "MODIFY_REPLACE", ",", "tldap", ".", "modlist", ".", "escape_list", "(", "result", "[", "mod_type", "]", ")", ")", "else", ":", "# except if we have no cached state for this DN, in", "# which case we delete it.", "reverse", "=", "(", "ldap3", ".", "MODIFY_DELETE", ",", "[", "]", ")", "else", ":", "raise", "RuntimeError", "(", "\"mod_op of %d not supported\"", "%", "mod_op", ")", "reverse", "=", "[", "reverse", "]", "_debug", "(", "\"attribute reverse:\"", ",", "reverse", ")", "if", "mod_type", "in", "result", ":", "_debug", "(", "\"attribute cache:\"", ",", "result", "[", "mod_type", "]", ")", "else", ":", "_debug", "(", "\"attribute cache is empty\"", ")", "revlist", "[", "mod_type", "]", "=", "reverse", "_debug", "(", "\"--\"", ")", "_debug", "(", "\"mod_list:\"", ",", "mod_list", ")", "_debug", "(", "\"revlist:\"", ",", "revlist", ")", "_debug", "(", "\"--\"", ")", "# now the hard stuff is over, we get to the easy stuff", "def", "on_commit", "(", "obj", ")", ":", "obj", ".", "modify", "(", "dn", ",", "mod_list", ")", "def", "on_rollback", "(", "obj", ")", ":", "obj", ".", "modify", "(", "dn", ",", "revlist", ")", "return", "self", ".", "_process", "(", "on_commit", ",", "on_rollback", ")" ]
37.7875
0.000967
def grep(prev, pattern, *args, **kw): """The pipe greps the data passed from previous generator according to given regular expression. :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The pattern which used to filter out data. :type pattern: str|unicode|re pattern object :param inv: If true, invert the match condition. :type inv: boolean :param kw: :type kw: dict :returns: generator """ inv = False if 'inv' not in kw else kw.pop('inv') pattern_obj = re.compile(pattern, *args, **kw) for data in prev: if bool(inv) ^ bool(pattern_obj.match(data)): yield data
[ "def", "grep", "(", "prev", ",", "pattern", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "inv", "=", "False", "if", "'inv'", "not", "in", "kw", "else", "kw", ".", "pop", "(", "'inv'", ")", "pattern_obj", "=", "re", ".", "compile", "(", "pattern", ",", "*", "args", ",", "*", "*", "kw", ")", "for", "data", "in", "prev", ":", "if", "bool", "(", "inv", ")", "^", "bool", "(", "pattern_obj", ".", "match", "(", "data", ")", ")", ":", "yield", "data" ]
32.6
0.00149
def sequence_quality_plot (self): """ Create the HTML for the phred quality score plot """ data = dict() for s_name in self.fastqc_data: try: data[s_name] = {self.avg_bp_from_range(d['base']): d['mean'] for d in self.fastqc_data[s_name]['per_base_sequence_quality']} except KeyError: pass if len(data) == 0: log.debug('sequence_quality not found in FastQC reports') return None pconfig = { 'id': 'fastqc_per_base_sequence_quality_plot', 'title': 'FastQC: Mean Quality Scores', 'ylab': 'Phred Score', 'xlab': 'Position (bp)', 'ymin': 0, 'xDecimals': False, 'tt_label': '<b>Base {point.x}</b>: {point.y:.2f}', 'colors': self.get_status_cols('per_base_sequence_quality'), 'yPlotBands': [ {'from': 28, 'to': 100, 'color': '#c3e6c3'}, {'from': 20, 'to': 28, 'color': '#e6dcc3'}, {'from': 0, 'to': 20, 'color': '#e6c3c3'}, ] } self.add_section ( name = 'Sequence Quality Histograms', anchor = 'fastqc_per_base_sequence_quality', description = 'The mean quality value across each base position in the read.', helptext = ''' To enable multiple samples to be plotted on the same graph, only the mean quality scores are plotted (unlike the box plots seen in FastQC reports). Taken from the [FastQC help](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/2%20Per%20Base%20Sequence%20Quality.html): _The y-axis on the graph shows the quality scores. The higher the score, the better the base call. The background of the graph divides the y axis into very good quality calls (green), calls of reasonable quality (orange), and calls of poor quality (red). The quality of calls on most platforms will degrade as the run progresses, so it is common to see base calls falling into the orange area towards the end of a read._ ''', plot = linegraph.plot(data, pconfig) )
[ "def", "sequence_quality_plot", "(", "self", ")", ":", "data", "=", "dict", "(", ")", "for", "s_name", "in", "self", ".", "fastqc_data", ":", "try", ":", "data", "[", "s_name", "]", "=", "{", "self", ".", "avg_bp_from_range", "(", "d", "[", "'base'", "]", ")", ":", "d", "[", "'mean'", "]", "for", "d", "in", "self", ".", "fastqc_data", "[", "s_name", "]", "[", "'per_base_sequence_quality'", "]", "}", "except", "KeyError", ":", "pass", "if", "len", "(", "data", ")", "==", "0", ":", "log", ".", "debug", "(", "'sequence_quality not found in FastQC reports'", ")", "return", "None", "pconfig", "=", "{", "'id'", ":", "'fastqc_per_base_sequence_quality_plot'", ",", "'title'", ":", "'FastQC: Mean Quality Scores'", ",", "'ylab'", ":", "'Phred Score'", ",", "'xlab'", ":", "'Position (bp)'", ",", "'ymin'", ":", "0", ",", "'xDecimals'", ":", "False", ",", "'tt_label'", ":", "'<b>Base {point.x}</b>: {point.y:.2f}'", ",", "'colors'", ":", "self", ".", "get_status_cols", "(", "'per_base_sequence_quality'", ")", ",", "'yPlotBands'", ":", "[", "{", "'from'", ":", "28", ",", "'to'", ":", "100", ",", "'color'", ":", "'#c3e6c3'", "}", ",", "{", "'from'", ":", "20", ",", "'to'", ":", "28", ",", "'color'", ":", "'#e6dcc3'", "}", ",", "{", "'from'", ":", "0", ",", "'to'", ":", "20", ",", "'color'", ":", "'#e6c3c3'", "}", ",", "]", "}", "self", ".", "add_section", "(", "name", "=", "'Sequence Quality Histograms'", ",", "anchor", "=", "'fastqc_per_base_sequence_quality'", ",", "description", "=", "'The mean quality value across each base position in the read.'", ",", "helptext", "=", "'''\n To enable multiple samples to be plotted on the same graph, only the mean quality\n scores are plotted (unlike the box plots seen in FastQC reports).\n\n Taken from the [FastQC help](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/2%20Per%20Base%20Sequence%20Quality.html):\n\n _The y-axis on the graph shows the quality scores. The higher the score, the better\n the base call. The background of the graph divides the y axis into very good quality\n calls (green), calls of reasonable quality (orange), and calls of poor quality (red).\n The quality of calls on most platforms will degrade as the run progresses, so it is\n common to see base calls falling into the orange area towards the end of a read._\n '''", ",", "plot", "=", "linegraph", ".", "plot", "(", "data", ",", "pconfig", ")", ")" ]
48.369565
0.010132
def create_jail(name, arch, version="9.0-RELEASE"): ''' Creates a new poudriere jail if one does not exist *NOTE* creating a new jail will take some time the master is not hanging CLI Example: .. code-block:: bash salt '*' poudriere.create_jail 90amd64 amd64 ''' # Config file must be on system to create a poudriere jail _check_config_exists() # Check if the jail is there if is_jail(name): return '{0} already exists'.format(name) cmd = 'poudriere jails -c -j {0} -v {1} -a {2}'.format(name, version, arch) __salt__['cmd.run'](cmd) # Make jail pkgng aware make_pkgng_aware(name) # Make sure the jail was created if is_jail(name): return 'Created jail {0}'.format(name) return 'Issue creating jail {0}'.format(name)
[ "def", "create_jail", "(", "name", ",", "arch", ",", "version", "=", "\"9.0-RELEASE\"", ")", ":", "# Config file must be on system to create a poudriere jail", "_check_config_exists", "(", ")", "# Check if the jail is there", "if", "is_jail", "(", "name", ")", ":", "return", "'{0} already exists'", ".", "format", "(", "name", ")", "cmd", "=", "'poudriere jails -c -j {0} -v {1} -a {2}'", ".", "format", "(", "name", ",", "version", ",", "arch", ")", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ")", "# Make jail pkgng aware", "make_pkgng_aware", "(", "name", ")", "# Make sure the jail was created", "if", "is_jail", "(", "name", ")", ":", "return", "'Created jail {0}'", ".", "format", "(", "name", ")", "return", "'Issue creating jail {0}'", ".", "format", "(", "name", ")" ]
26.266667
0.001224
def create_virtualenv(venv=VENV): """Creates the virtual environment and installs PIP only into the virtual environment """ print 'Creating venv...', run_command(['virtualenv', '-q', '--no-site-packages', VENV]) print 'done.' print 'Installing pip in virtualenv...', if not run_command([WITH_VENV, 'easy_install', 'pip']).strip(): die("Failed to install pip.") print 'done.' print 'Installing distribute in virtualenv...' pip_install('distribute>=0.6.24') print 'done.'
[ "def", "create_virtualenv", "(", "venv", "=", "VENV", ")", ":", "print", "'Creating venv...'", ",", "run_command", "(", "[", "'virtualenv'", ",", "'-q'", ",", "'--no-site-packages'", ",", "VENV", "]", ")", "print", "'done.'", "print", "'Installing pip in virtualenv...'", ",", "if", "not", "run_command", "(", "[", "WITH_VENV", ",", "'easy_install'", ",", "'pip'", "]", ")", ".", "strip", "(", ")", ":", "die", "(", "\"Failed to install pip.\"", ")", "print", "'done.'", "print", "'Installing distribute in virtualenv...'", "pip_install", "(", "'distribute>=0.6.24'", ")", "print", "'done.'" ]
36.571429
0.001905
def tag(self, tagname, message=None, force=True): """Create an annotated tag.""" return git_tag(self.repo_dir, tagname, message=message, force=force)
[ "def", "tag", "(", "self", ",", "tagname", ",", "message", "=", "None", ",", "force", "=", "True", ")", ":", "return", "git_tag", "(", "self", ".", "repo_dir", ",", "tagname", ",", "message", "=", "message", ",", "force", "=", "force", ")" ]
54.333333
0.012121
def insert_sequences_into_tree(aln, moltype, params={}, write_log=True): """Returns a tree from Alignment object aln. aln: an xxx.Alignment object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object params: dict of parameters to pass in to the RAxML app controller. The result will be an xxx.Alignment object, or None if tree fails. """ # convert aln to phy since seq_names need fixed to run through pplacer new_aln=get_align_for_phylip(StringIO(aln)) # convert aln to fasta in case it is not already a fasta file aln2 = Alignment(new_aln) seqs = aln2.toFasta() ih = '_input_as_multiline_string' pplacer_app = Pplacer(params=params, InputHandler=ih, WorkingDir=None, SuppressStderr=False, SuppressStdout=False) pplacer_result = pplacer_app(seqs) # write a log file if write_log: log_fp = join(params["--out-dir"],'log_pplacer_' + \ split(get_tmp_filename())[-1]) log_file=open(log_fp,'w') log_file.write(pplacer_result['StdOut'].read()) log_file.close() # use guppy to convert json file into a placement tree guppy_params={'tog':None} new_tree=build_tree_from_json_using_params(pplacer_result['json'].name, \ output_dir=params['--out-dir'], \ params=guppy_params) pplacer_result.cleanUp() return new_tree
[ "def", "insert_sequences_into_tree", "(", "aln", ",", "moltype", ",", "params", "=", "{", "}", ",", "write_log", "=", "True", ")", ":", "# convert aln to phy since seq_names need fixed to run through pplacer", "new_aln", "=", "get_align_for_phylip", "(", "StringIO", "(", "aln", ")", ")", "# convert aln to fasta in case it is not already a fasta file", "aln2", "=", "Alignment", "(", "new_aln", ")", "seqs", "=", "aln2", ".", "toFasta", "(", ")", "ih", "=", "'_input_as_multiline_string'", "pplacer_app", "=", "Pplacer", "(", "params", "=", "params", ",", "InputHandler", "=", "ih", ",", "WorkingDir", "=", "None", ",", "SuppressStderr", "=", "False", ",", "SuppressStdout", "=", "False", ")", "pplacer_result", "=", "pplacer_app", "(", "seqs", ")", "# write a log file", "if", "write_log", ":", "log_fp", "=", "join", "(", "params", "[", "\"--out-dir\"", "]", ",", "'log_pplacer_'", "+", "split", "(", "get_tmp_filename", "(", ")", ")", "[", "-", "1", "]", ")", "log_file", "=", "open", "(", "log_fp", ",", "'w'", ")", "log_file", ".", "write", "(", "pplacer_result", "[", "'StdOut'", "]", ".", "read", "(", ")", ")", "log_file", ".", "close", "(", ")", "# use guppy to convert json file into a placement tree", "guppy_params", "=", "{", "'tog'", ":", "None", "}", "new_tree", "=", "build_tree_from_json_using_params", "(", "pplacer_result", "[", "'json'", "]", ".", "name", ",", "output_dir", "=", "params", "[", "'--out-dir'", "]", ",", "params", "=", "guppy_params", ")", "pplacer_result", ".", "cleanUp", "(", ")", "return", "new_tree" ]
31.959184
0.010533
def filter(array, predicates, ty=None): """ Returns a new array, with each element in the original array satisfying the passed-in predicate set to `new_value` Args: array (WeldObject / Numpy.ndarray): Input array predicates (WeldObject / Numpy.ndarray<bool>): Predicate set ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation """ weld_obj = WeldObject(encoder_, decoder_) array_var = weld_obj.update(array) if isinstance(array, WeldObject): array_var = array.obj_id weld_obj.dependencies[array_var] = array predicates_var = weld_obj.update(predicates) if isinstance(predicates, WeldObject): predicates_var = predicates.obj_id weld_obj.dependencies[predicates_var] = predicates weld_template = """ result( for( zip(%(array)s, %(predicates)s), appender, |b, i, e| if (e.$1, merge(b, e.$0), b) ) ) """ weld_obj.weld_code = weld_template % { "array": array_var, "predicates": predicates_var} return weld_obj
[ "def", "filter", "(", "array", ",", "predicates", ",", "ty", "=", "None", ")", ":", "weld_obj", "=", "WeldObject", "(", "encoder_", ",", "decoder_", ")", "array_var", "=", "weld_obj", ".", "update", "(", "array", ")", "if", "isinstance", "(", "array", ",", "WeldObject", ")", ":", "array_var", "=", "array", ".", "obj_id", "weld_obj", ".", "dependencies", "[", "array_var", "]", "=", "array", "predicates_var", "=", "weld_obj", ".", "update", "(", "predicates", ")", "if", "isinstance", "(", "predicates", ",", "WeldObject", ")", ":", "predicates_var", "=", "predicates", ".", "obj_id", "weld_obj", ".", "dependencies", "[", "predicates_var", "]", "=", "predicates", "weld_template", "=", "\"\"\"\n result(\n for(\n zip(%(array)s, %(predicates)s),\n appender,\n |b, i, e| if (e.$1, merge(b, e.$0), b)\n )\n )\n \"\"\"", "weld_obj", ".", "weld_code", "=", "weld_template", "%", "{", "\"array\"", ":", "array_var", ",", "\"predicates\"", ":", "predicates_var", "}", "return", "weld_obj" ]
29
0.000855
def from_dict(document): """Create data type definition form Json-like object represenation. Parameters ---------- document : dict Json-like object represenation Returns ------- AttributeType """ # Get the type name from the document type_name = document['name'] if type_name == ATTR_TYPE_INT: return IntType() elif type_name == ATTR_TYPE_FLOAT: return FloatType() elif type_name == ATTR_TYPE_ENUM: return EnumType(document['values']) elif type_name == ATTR_TYPE_DICT: return DictType() elif type_name == ATTR_TYPE_LIST: return ListType() else: raise ValueError('invalid attribute type: ' + str(type_name))
[ "def", "from_dict", "(", "document", ")", ":", "# Get the type name from the document", "type_name", "=", "document", "[", "'name'", "]", "if", "type_name", "==", "ATTR_TYPE_INT", ":", "return", "IntType", "(", ")", "elif", "type_name", "==", "ATTR_TYPE_FLOAT", ":", "return", "FloatType", "(", ")", "elif", "type_name", "==", "ATTR_TYPE_ENUM", ":", "return", "EnumType", "(", "document", "[", "'values'", "]", ")", "elif", "type_name", "==", "ATTR_TYPE_DICT", ":", "return", "DictType", "(", ")", "elif", "type_name", "==", "ATTR_TYPE_LIST", ":", "return", "ListType", "(", ")", "else", ":", "raise", "ValueError", "(", "'invalid attribute type: '", "+", "str", "(", "type_name", ")", ")" ]
30.576923
0.002439
def create_order(email, request, addresses=None, shipping_address=None, billing_address=None, shipping_option=None, capture_payment=False): """ Create an order from a basket and customer infomation """ basket_items, _ = get_basket_items(request) if addresses: # Longclaw < 0.2 used 'shipping_name', longclaw > 0.2 uses a consistent # prefix (shipping_address_xxxx) try: shipping_name = addresses['shipping_name'] except KeyError: shipping_name = addresses['shipping_address_name'] shipping_country = addresses['shipping_address_country'] if not shipping_country: shipping_country = None shipping_address, _ = Address.objects.get_or_create(name=shipping_name, line_1=addresses[ 'shipping_address_line1'], city=addresses[ 'shipping_address_city'], postcode=addresses[ 'shipping_address_zip'], country=shipping_country) shipping_address.save() try: billing_name = addresses['billing_name'] except KeyError: billing_name = addresses['billing_address_name'] billing_country = addresses['shipping_address_country'] if not billing_country: billing_country = None billing_address, _ = Address.objects.get_or_create(name=billing_name, line_1=addresses[ 'billing_address_line1'], city=addresses[ 'billing_address_city'], postcode=addresses[ 'billing_address_zip'], country=billing_country) billing_address.save() else: shipping_country = shipping_address.country ip_address = get_real_ip(request) if shipping_country and shipping_option: site_settings = Configuration.for_site(request.site) shipping_rate = get_shipping_cost( site_settings, shipping_address.country.pk, shipping_option)['rate'] else: shipping_rate = Decimal(0) order = Order( email=email, ip_address=ip_address, shipping_address=shipping_address, billing_address=billing_address, shipping_rate=shipping_rate ) order.save() # Create the order items & compute total total = 0 for item in basket_items: total += item.total() order_item = OrderItem( product=item.variant, quantity=item.quantity, order=order ) order_item.save() if capture_payment: desc = 'Payment from {} for order id #{}'.format(email, order.id) try: transaction_id = GATEWAY.create_payment(request, total + shipping_rate, description=desc) order.payment_date = timezone.now() order.transaction_id = transaction_id # Once the order has been successfully taken, we can empty the basket destroy_basket(request) except PaymentError: order.status = order.FAILURE order.save() return order
[ "def", "create_order", "(", "email", ",", "request", ",", "addresses", "=", "None", ",", "shipping_address", "=", "None", ",", "billing_address", "=", "None", ",", "shipping_option", "=", "None", ",", "capture_payment", "=", "False", ")", ":", "basket_items", ",", "_", "=", "get_basket_items", "(", "request", ")", "if", "addresses", ":", "# Longclaw < 0.2 used 'shipping_name', longclaw > 0.2 uses a consistent", "# prefix (shipping_address_xxxx)", "try", ":", "shipping_name", "=", "addresses", "[", "'shipping_name'", "]", "except", "KeyError", ":", "shipping_name", "=", "addresses", "[", "'shipping_address_name'", "]", "shipping_country", "=", "addresses", "[", "'shipping_address_country'", "]", "if", "not", "shipping_country", ":", "shipping_country", "=", "None", "shipping_address", ",", "_", "=", "Address", ".", "objects", ".", "get_or_create", "(", "name", "=", "shipping_name", ",", "line_1", "=", "addresses", "[", "'shipping_address_line1'", "]", ",", "city", "=", "addresses", "[", "'shipping_address_city'", "]", ",", "postcode", "=", "addresses", "[", "'shipping_address_zip'", "]", ",", "country", "=", "shipping_country", ")", "shipping_address", ".", "save", "(", ")", "try", ":", "billing_name", "=", "addresses", "[", "'billing_name'", "]", "except", "KeyError", ":", "billing_name", "=", "addresses", "[", "'billing_address_name'", "]", "billing_country", "=", "addresses", "[", "'shipping_address_country'", "]", "if", "not", "billing_country", ":", "billing_country", "=", "None", "billing_address", ",", "_", "=", "Address", ".", "objects", ".", "get_or_create", "(", "name", "=", "billing_name", ",", "line_1", "=", "addresses", "[", "'billing_address_line1'", "]", ",", "city", "=", "addresses", "[", "'billing_address_city'", "]", ",", "postcode", "=", "addresses", "[", "'billing_address_zip'", "]", ",", "country", "=", "billing_country", ")", "billing_address", ".", "save", "(", ")", "else", ":", "shipping_country", "=", "shipping_address", ".", "country", "ip_address", "=", "get_real_ip", "(", "request", ")", "if", "shipping_country", "and", "shipping_option", ":", "site_settings", "=", "Configuration", ".", "for_site", "(", "request", ".", "site", ")", "shipping_rate", "=", "get_shipping_cost", "(", "site_settings", ",", "shipping_address", ".", "country", ".", "pk", ",", "shipping_option", ")", "[", "'rate'", "]", "else", ":", "shipping_rate", "=", "Decimal", "(", "0", ")", "order", "=", "Order", "(", "email", "=", "email", ",", "ip_address", "=", "ip_address", ",", "shipping_address", "=", "shipping_address", ",", "billing_address", "=", "billing_address", ",", "shipping_rate", "=", "shipping_rate", ")", "order", ".", "save", "(", ")", "# Create the order items & compute total", "total", "=", "0", "for", "item", "in", "basket_items", ":", "total", "+=", "item", ".", "total", "(", ")", "order_item", "=", "OrderItem", "(", "product", "=", "item", ".", "variant", ",", "quantity", "=", "item", ".", "quantity", ",", "order", "=", "order", ")", "order_item", ".", "save", "(", ")", "if", "capture_payment", ":", "desc", "=", "'Payment from {} for order id #{}'", ".", "format", "(", "email", ",", "order", ".", "id", ")", "try", ":", "transaction_id", "=", "GATEWAY", ".", "create_payment", "(", "request", ",", "total", "+", "shipping_rate", ",", "description", "=", "desc", ")", "order", ".", "payment_date", "=", "timezone", ".", "now", "(", ")", "order", ".", "transaction_id", "=", "transaction_id", "# Once the order has been successfully taken, we can empty the basket", "destroy_basket", "(", "request", ")", "except", "PaymentError", ":", "order", ".", "status", "=", "order", ".", "FAILURE", "order", ".", "save", "(", ")", "return", "order" ]
40.75
0.002496
def parse_URL(cls, url, timeout=None, resolve=True, required=False, unresolved_value=DEFAULT_SUBSTITUTION): """Parse URL :param url: url to parse :type url: basestring :param resolve: if true, resolve substitutions :type resolve: boolean :param unresolved_value: assigned value value to unresolved substitution. If overriden with a default value, it will replace all unresolved value to the default value. If it is set to to pyhocon.STR_SUBSTITUTION then it will replace the value by its substitution expression (e.g., ${x}) :type unresolved_value: boolean :return: Config object or [] :type return: Config or list """ socket_timeout = socket._GLOBAL_DEFAULT_TIMEOUT if timeout is None else timeout try: with contextlib.closing(urlopen(url, timeout=socket_timeout)) as fd: content = fd.read() if use_urllib2 else fd.read().decode('utf-8') return cls.parse_string(content, os.path.dirname(url), resolve, unresolved_value) except (HTTPError, URLError) as e: logger.warn('Cannot include url %s. Resource is inaccessible.', url) if required: raise e else: return []
[ "def", "parse_URL", "(", "cls", ",", "url", ",", "timeout", "=", "None", ",", "resolve", "=", "True", ",", "required", "=", "False", ",", "unresolved_value", "=", "DEFAULT_SUBSTITUTION", ")", ":", "socket_timeout", "=", "socket", ".", "_GLOBAL_DEFAULT_TIMEOUT", "if", "timeout", "is", "None", "else", "timeout", "try", ":", "with", "contextlib", ".", "closing", "(", "urlopen", "(", "url", ",", "timeout", "=", "socket_timeout", ")", ")", "as", "fd", ":", "content", "=", "fd", ".", "read", "(", ")", "if", "use_urllib2", "else", "fd", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "return", "cls", ".", "parse_string", "(", "content", ",", "os", ".", "path", ".", "dirname", "(", "url", ")", ",", "resolve", ",", "unresolved_value", ")", "except", "(", "HTTPError", ",", "URLError", ")", "as", "e", ":", "logger", ".", "warn", "(", "'Cannot include url %s. Resource is inaccessible.'", ",", "url", ")", "if", "required", ":", "raise", "e", "else", ":", "return", "[", "]" ]
48.846154
0.008494
def dashboard(request): """Dashboard page""" user = None if request.user.is_authenticated(): user = User.objects.get(username=request.user) latest_results, count_types = get_collaboration_data(user) latest_results.sort(key=lambda elem: elem.modified, reverse=True) context = { 'type_count': count_types, 'latest_results': latest_results[:6], } return render(request, 'home.html', context)
[ "def", "dashboard", "(", "request", ")", ":", "user", "=", "None", "if", "request", ".", "user", ".", "is_authenticated", "(", ")", ":", "user", "=", "User", ".", "objects", ".", "get", "(", "username", "=", "request", ".", "user", ")", "latest_results", ",", "count_types", "=", "get_collaboration_data", "(", "user", ")", "latest_results", ".", "sort", "(", "key", "=", "lambda", "elem", ":", "elem", ".", "modified", ",", "reverse", "=", "True", ")", "context", "=", "{", "'type_count'", ":", "count_types", ",", "'latest_results'", ":", "latest_results", "[", ":", "6", "]", ",", "}", "return", "render", "(", "request", ",", "'home.html'", ",", "context", ")" ]
28.866667
0.002237