text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def roundClosestValid(val, res, decimals=None): """ round to closest resolution """ if decimals is None and "." in str(res): decimals = len(str(res).split('.')[1]) return round(round(val / res) * res, decimals)
[ "def", "roundClosestValid", "(", "val", ",", "res", ",", "decimals", "=", "None", ")", ":", "if", "decimals", "is", "None", "and", "\".\"", "in", "str", "(", "res", ")", ":", "decimals", "=", "len", "(", "str", "(", "res", ")", ".", "split", "(", "'.'", ")", "[", "1", "]", ")", "return", "round", "(", "round", "(", "val", "/", "res", ")", "*", "res", ",", "decimals", ")" ]
40.333333
0.008097
def get_locales(prefix=None, normalize=True, locale_getter=_default_locale_getter): """ Get all the locales that are available on the system. Parameters ---------- prefix : str If not ``None`` then return only those locales with the prefix provided. For example to get all English language locales (those that start with ``"en"``), pass ``prefix="en"``. normalize : bool Call ``locale.normalize`` on the resulting list of available locales. If ``True``, only locales that can be set without throwing an ``Exception`` are returned. locale_getter : callable The function to use to retrieve the current locales. This should return a string with each locale separated by a newline character. Returns ------- locales : list of strings A list of locale strings that can be set with ``locale.setlocale()``. For example:: locale.setlocale(locale.LC_ALL, locale_string) On error will return None (no locale available, e.g. Windows) """ try: raw_locales = locale_getter() except Exception: return None try: # raw_locales is "\n" separated list of locales # it may contain non-decodable parts, so split # extract what we can and then rejoin. raw_locales = raw_locales.split(b'\n') out_locales = [] for x in raw_locales: out_locales.append(str( x, encoding=options.display.encoding)) except TypeError: pass if prefix is None: return _valid_locales(out_locales, normalize) pattern = re.compile('{prefix}.*'.format(prefix=prefix)) found = pattern.findall('\n'.join(out_locales)) return _valid_locales(found, normalize)
[ "def", "get_locales", "(", "prefix", "=", "None", ",", "normalize", "=", "True", ",", "locale_getter", "=", "_default_locale_getter", ")", ":", "try", ":", "raw_locales", "=", "locale_getter", "(", ")", "except", "Exception", ":", "return", "None", "try", ":", "# raw_locales is \"\\n\" separated list of locales", "# it may contain non-decodable parts, so split", "# extract what we can and then rejoin.", "raw_locales", "=", "raw_locales", ".", "split", "(", "b'\\n'", ")", "out_locales", "=", "[", "]", "for", "x", "in", "raw_locales", ":", "out_locales", ".", "append", "(", "str", "(", "x", ",", "encoding", "=", "options", ".", "display", ".", "encoding", ")", ")", "except", "TypeError", ":", "pass", "if", "prefix", "is", "None", ":", "return", "_valid_locales", "(", "out_locales", ",", "normalize", ")", "pattern", "=", "re", ".", "compile", "(", "'{prefix}.*'", ".", "format", "(", "prefix", "=", "prefix", ")", ")", "found", "=", "pattern", ".", "findall", "(", "'\\n'", ".", "join", "(", "out_locales", ")", ")", "return", "_valid_locales", "(", "found", ",", "normalize", ")" ]
32.518519
0.000553
def object_to_markdownpage(obj_name, obj, s=''): """Generate the markdown documentation of a Python object. Parameters ---------- obj_name : str Name of the Python object. obj : object Python object (class, method, function, ...) s : str (default: '') A string to which the documentation will be appended to. Returns --------- s : str The markdown page. """ # header s += '## %s\n' % obj_name # function/class/method signature sig = str(inspect.signature(obj)).replace('(self, ', '(') s += '\n*%s%s*\n\n' % (obj_name, sig) # docstring body doc = str(inspect.getdoc(obj)) ds = docstring_to_markdown(doc) s += '\n'.join(ds) # document methods if inspect.isclass(obj): methods, properties = '\n\n### Methods', '\n\n### Properties' members = inspect.getmembers(obj) for m in members: if not m[0].startswith('_') and len(m) >= 2: if isinstance(m[1], property): properties += '\n\n<hr>\n\n*%s*\n\n' % m[0] m_doc = docstring_to_markdown(str(inspect.getdoc(m[1]))) properties += '\n'.join(m_doc) else: sig = str(inspect.signature(m[1])) sig = sig.replace('(self, ', '(').replace('(self)', '()') sig = sig.replace('(self)', '()') methods += '\n\n<hr>\n\n*%s%s*\n\n' % (m[0], sig) m_doc = docstring_to_markdown(str(inspect.getdoc(m[1]))) methods += '\n'.join(m_doc) s += methods s += properties return s + '\n\n'
[ "def", "object_to_markdownpage", "(", "obj_name", ",", "obj", ",", "s", "=", "''", ")", ":", "# header", "s", "+=", "'## %s\\n'", "%", "obj_name", "# function/class/method signature", "sig", "=", "str", "(", "inspect", ".", "signature", "(", "obj", ")", ")", ".", "replace", "(", "'(self, '", ",", "'('", ")", "s", "+=", "'\\n*%s%s*\\n\\n'", "%", "(", "obj_name", ",", "sig", ")", "# docstring body", "doc", "=", "str", "(", "inspect", ".", "getdoc", "(", "obj", ")", ")", "ds", "=", "docstring_to_markdown", "(", "doc", ")", "s", "+=", "'\\n'", ".", "join", "(", "ds", ")", "# document methods", "if", "inspect", ".", "isclass", "(", "obj", ")", ":", "methods", ",", "properties", "=", "'\\n\\n### Methods'", ",", "'\\n\\n### Properties'", "members", "=", "inspect", ".", "getmembers", "(", "obj", ")", "for", "m", "in", "members", ":", "if", "not", "m", "[", "0", "]", ".", "startswith", "(", "'_'", ")", "and", "len", "(", "m", ")", ">=", "2", ":", "if", "isinstance", "(", "m", "[", "1", "]", ",", "property", ")", ":", "properties", "+=", "'\\n\\n<hr>\\n\\n*%s*\\n\\n'", "%", "m", "[", "0", "]", "m_doc", "=", "docstring_to_markdown", "(", "str", "(", "inspect", ".", "getdoc", "(", "m", "[", "1", "]", ")", ")", ")", "properties", "+=", "'\\n'", ".", "join", "(", "m_doc", ")", "else", ":", "sig", "=", "str", "(", "inspect", ".", "signature", "(", "m", "[", "1", "]", ")", ")", "sig", "=", "sig", ".", "replace", "(", "'(self, '", ",", "'('", ")", ".", "replace", "(", "'(self)'", ",", "'()'", ")", "sig", "=", "sig", ".", "replace", "(", "'(self)'", ",", "'()'", ")", "methods", "+=", "'\\n\\n<hr>\\n\\n*%s%s*\\n\\n'", "%", "(", "m", "[", "0", "]", ",", "sig", ")", "m_doc", "=", "docstring_to_markdown", "(", "str", "(", "inspect", ".", "getdoc", "(", "m", "[", "1", "]", ")", ")", ")", "methods", "+=", "'\\n'", ".", "join", "(", "m_doc", ")", "s", "+=", "methods", "s", "+=", "properties", "return", "s", "+", "'\\n\\n'" ]
32.86
0.000591
def extractPrintSaveIntermittens(): """ This function will print out the intermittents onto the screen for casual viewing. It will also print out where the giant summary dictionary is going to be stored. :return: None """ # extract intermittents from collected failed tests global g_summary_dict_intermittents localtz = time.tzname[0] for ind in range(len(g_summary_dict_all["TestName"])): if g_summary_dict_all["TestInfo"][ind]["FailureCount"] >= g_threshold_failure: addFailedTests(g_summary_dict_intermittents, g_summary_dict_all, ind) # save dict in file if len(g_summary_dict_intermittents["TestName"]) > 0: json.dump(g_summary_dict_intermittents, open(g_summary_dict_name, 'w')) with open(g_summary_csv_filename, 'w') as summaryFile: for ind in range(len(g_summary_dict_intermittents["TestName"])): testName = g_summary_dict_intermittents["TestName"][ind] numberFailure = g_summary_dict_intermittents["TestInfo"][ind]["FailureCount"] firstFailedTS = parser.parse(time.ctime(min(g_summary_dict_intermittents["TestInfo"][ind]["Timestamp"]))+ ' '+localtz) firstFailedStr = firstFailedTS.strftime("%a %b %d %H:%M:%S %Y %Z") recentFail = parser.parse(time.ctime(max(g_summary_dict_intermittents["TestInfo"][ind]["Timestamp"]))+ ' '+localtz) recentFailStr = recentFail.strftime("%a %b %d %H:%M:%S %Y %Z") eachTest = "{0}, {1}, {2}, {3}\n".format(testName, recentFailStr, numberFailure, g_summary_dict_intermittents["TestInfo"][ind]["TestCategory"][0]) summaryFile.write(eachTest) print("Intermittent: {0}, Last failed: {1}, Failed {2} times since " "{3}".format(testName, recentFailStr, numberFailure, firstFailedStr))
[ "def", "extractPrintSaveIntermittens", "(", ")", ":", "# extract intermittents from collected failed tests", "global", "g_summary_dict_intermittents", "localtz", "=", "time", ".", "tzname", "[", "0", "]", "for", "ind", "in", "range", "(", "len", "(", "g_summary_dict_all", "[", "\"TestName\"", "]", ")", ")", ":", "if", "g_summary_dict_all", "[", "\"TestInfo\"", "]", "[", "ind", "]", "[", "\"FailureCount\"", "]", ">=", "g_threshold_failure", ":", "addFailedTests", "(", "g_summary_dict_intermittents", ",", "g_summary_dict_all", ",", "ind", ")", "# save dict in file", "if", "len", "(", "g_summary_dict_intermittents", "[", "\"TestName\"", "]", ")", ">", "0", ":", "json", ".", "dump", "(", "g_summary_dict_intermittents", ",", "open", "(", "g_summary_dict_name", ",", "'w'", ")", ")", "with", "open", "(", "g_summary_csv_filename", ",", "'w'", ")", "as", "summaryFile", ":", "for", "ind", "in", "range", "(", "len", "(", "g_summary_dict_intermittents", "[", "\"TestName\"", "]", ")", ")", ":", "testName", "=", "g_summary_dict_intermittents", "[", "\"TestName\"", "]", "[", "ind", "]", "numberFailure", "=", "g_summary_dict_intermittents", "[", "\"TestInfo\"", "]", "[", "ind", "]", "[", "\"FailureCount\"", "]", "firstFailedTS", "=", "parser", ".", "parse", "(", "time", ".", "ctime", "(", "min", "(", "g_summary_dict_intermittents", "[", "\"TestInfo\"", "]", "[", "ind", "]", "[", "\"Timestamp\"", "]", ")", ")", "+", "' '", "+", "localtz", ")", "firstFailedStr", "=", "firstFailedTS", ".", "strftime", "(", "\"%a %b %d %H:%M:%S %Y %Z\"", ")", "recentFail", "=", "parser", ".", "parse", "(", "time", ".", "ctime", "(", "max", "(", "g_summary_dict_intermittents", "[", "\"TestInfo\"", "]", "[", "ind", "]", "[", "\"Timestamp\"", "]", ")", ")", "+", "' '", "+", "localtz", ")", "recentFailStr", "=", "recentFail", ".", "strftime", "(", "\"%a %b %d %H:%M:%S %Y %Z\"", ")", "eachTest", "=", "\"{0}, {1}, {2}, {3}\\n\"", ".", "format", "(", "testName", ",", "recentFailStr", ",", "numberFailure", ",", "g_summary_dict_intermittents", "[", "\"TestInfo\"", "]", "[", "ind", "]", "[", "\"TestCategory\"", "]", "[", "0", "]", ")", "summaryFile", ".", "write", "(", "eachTest", ")", "print", "(", "\"Intermittent: {0}, Last failed: {1}, Failed {2} times since \"", "\"{3}\"", ".", "format", "(", "testName", ",", "recentFailStr", ",", "numberFailure", ",", "firstFailedStr", ")", ")" ]
55.333333
0.008387
def load_features(paths: List[str], expected_shape: Optional[tuple] = None) -> List[np.ndarray]: """ Load features specified with absolute paths. :param paths: List of files specified with paths. :param expected_shape: Optional expected shape. :return: A list of loaded images (numpy arrays). """ data = [] # type: List[np.ndarray] for path in paths: data.append(load_feature(path, expected_shape)) return data
[ "def", "load_features", "(", "paths", ":", "List", "[", "str", "]", ",", "expected_shape", ":", "Optional", "[", "tuple", "]", "=", "None", ")", "->", "List", "[", "np", ".", "ndarray", "]", ":", "data", "=", "[", "]", "# type: List[np.ndarray]", "for", "path", "in", "paths", ":", "data", ".", "append", "(", "load_feature", "(", "path", ",", "expected_shape", ")", ")", "return", "data" ]
35.538462
0.00211
def make_scrape_request(session, url, mode='get', data=None): """Make a request to URL.""" try: html = session.request(mode, url, data=data) except RequestException: raise VooblyError('failed to connect') if SCRAPE_FETCH_ERROR in html.text: raise VooblyError('not logged in') if html.status_code != 200 or SCRAPE_PAGE_NOT_FOUND in html.text: raise VooblyError('page not found') return bs4.BeautifulSoup(html.text, features='lxml')
[ "def", "make_scrape_request", "(", "session", ",", "url", ",", "mode", "=", "'get'", ",", "data", "=", "None", ")", ":", "try", ":", "html", "=", "session", ".", "request", "(", "mode", ",", "url", ",", "data", "=", "data", ")", "except", "RequestException", ":", "raise", "VooblyError", "(", "'failed to connect'", ")", "if", "SCRAPE_FETCH_ERROR", "in", "html", ".", "text", ":", "raise", "VooblyError", "(", "'not logged in'", ")", "if", "html", ".", "status_code", "!=", "200", "or", "SCRAPE_PAGE_NOT_FOUND", "in", "html", ".", "text", ":", "raise", "VooblyError", "(", "'page not found'", ")", "return", "bs4", ".", "BeautifulSoup", "(", "html", ".", "text", ",", "features", "=", "'lxml'", ")" ]
43.272727
0.002058
def copy(self): """Make a deep copy of this object. Example:: >>> c2 = c.copy() """ vec1 = np.copy(self.scoef1._vec) vec2 = np.copy(self.scoef2._vec) return VectorCoefs(vec1, vec2, self.nmax, self.mmax)
[ "def", "copy", "(", "self", ")", ":", "vec1", "=", "np", ".", "copy", "(", "self", ".", "scoef1", ".", "_vec", ")", "vec2", "=", "np", ".", "copy", "(", "self", ".", "scoef2", ".", "_vec", ")", "return", "VectorCoefs", "(", "vec1", ",", "vec2", ",", "self", ".", "nmax", ",", "self", ".", "mmax", ")" ]
25.545455
0.013746
def __create_list_bidir_connections(self): """! @brief Creates network as bidirectional list. @details Each oscillator may be conneted with two neighbors in line with classical list structure: right, left. """ if (self._conn_represent == conn_represent.MATRIX): for index in range(0, self._num_osc, 1): self._osc_conn.append([0] * self._num_osc); self._osc_conn[index][index] = False; if (index > 0): self._osc_conn[index][index - 1] = True; if (index < (self._num_osc - 1)): self._osc_conn[index][index + 1] = True; elif (self._conn_represent == conn_represent.LIST): for index in range(self._num_osc): self._osc_conn.append([]); if (index > 0): self._osc_conn[index].append(index - 1); if (index < (self._num_osc - 1)): self._osc_conn[index].append(index + 1);
[ "def", "__create_list_bidir_connections", "(", "self", ")", ":", "if", "(", "self", ".", "_conn_represent", "==", "conn_represent", ".", "MATRIX", ")", ":", "for", "index", "in", "range", "(", "0", ",", "self", ".", "_num_osc", ",", "1", ")", ":", "self", ".", "_osc_conn", ".", "append", "(", "[", "0", "]", "*", "self", ".", "_num_osc", ")", "self", ".", "_osc_conn", "[", "index", "]", "[", "index", "]", "=", "False", "if", "(", "index", ">", "0", ")", ":", "self", ".", "_osc_conn", "[", "index", "]", "[", "index", "-", "1", "]", "=", "True", "if", "(", "index", "<", "(", "self", ".", "_num_osc", "-", "1", ")", ")", ":", "self", ".", "_osc_conn", "[", "index", "]", "[", "index", "+", "1", "]", "=", "True", "elif", "(", "self", ".", "_conn_represent", "==", "conn_represent", ".", "LIST", ")", ":", "for", "index", "in", "range", "(", "self", ".", "_num_osc", ")", ":", "self", ".", "_osc_conn", ".", "append", "(", "[", "]", ")", "if", "(", "index", ">", "0", ")", ":", "self", ".", "_osc_conn", "[", "index", "]", ".", "append", "(", "index", "-", "1", ")", "if", "(", "index", "<", "(", "self", ".", "_num_osc", "-", "1", ")", ")", ":", "self", ".", "_osc_conn", "[", "index", "]", ".", "append", "(", "index", "+", "1", ")" ]
43.64
0.013453
def evaluate_net(net, path_imgrec, num_classes, num_batch, mean_pixels, data_shape, model_prefix, epoch, ctx=mx.cpu(), batch_size=32, path_imglist="", nms_thresh=0.45, force_nms=False, ovp_thresh=0.5, use_difficult=False, class_names=None, voc07_metric=False): """ evalute network given validation record file Parameters: ---------- net : str or None Network name or use None to load from json without modifying path_imgrec : str path to the record validation file path_imglist : str path to the list file to replace labels in record file, optional num_classes : int number of classes, not including background mean_pixels : tuple (mean_r, mean_g, mean_b) data_shape : tuple or int (3, height, width) or height/width model_prefix : str model prefix of saved checkpoint epoch : int load model epoch ctx : mx.ctx mx.gpu() or mx.cpu() batch_size : int validation batch size nms_thresh : float non-maximum suppression threshold force_nms : boolean whether suppress different class objects ovp_thresh : float AP overlap threshold for true/false postives use_difficult : boolean whether to use difficult objects in evaluation if applicable class_names : comma separated str class names in string, must correspond to num_classes if set voc07_metric : boolean whether to use 11-point evluation as in VOC07 competition """ # set up logger logging.basicConfig() logger = logging.getLogger() logger.setLevel(logging.INFO) # args if isinstance(data_shape, int): data_shape = (3, data_shape, data_shape) assert len(data_shape) == 3 and data_shape[0] == 3 model_prefix += '_' + str(data_shape[1]) # iterator eval_iter = DetRecordIter(path_imgrec, batch_size, data_shape, mean_pixels=mean_pixels, path_imglist=path_imglist, **cfg.valid) # model params load_net, args, auxs = mx.model.load_checkpoint(model_prefix, epoch) # network if net is None: net = load_net else: net = get_symbol(net, data_shape[1], num_classes=num_classes, nms_thresh=nms_thresh, force_suppress=force_nms) if not 'label' in net.list_arguments(): label = mx.sym.Variable(name='label') net = mx.sym.Group([net, label]) # init module mod = mx.mod.Module(net, label_names=('label',), logger=logger, context=ctx, fixed_param_names=net.list_arguments()) mod.bind(data_shapes=eval_iter.provide_data, label_shapes=eval_iter.provide_label) mod.set_params(args, auxs, allow_missing=False, force_init=True) # run evaluation if voc07_metric: metric = VOC07MApMetric(ovp_thresh, use_difficult, class_names) else: metric = MApMetric(ovp_thresh, use_difficult, class_names) num = num_batch * batch_size data = [mx.random.uniform(-1.0, 1.0, shape=shape, ctx=ctx) for _, shape in mod.data_shapes] batch = mx.io.DataBatch(data, []) # empty label dry_run = 5 # use 5 iterations to warm up for i in range(dry_run): mod.forward(batch, is_train=False) for output in mod.get_outputs(): output.wait_to_read() tic = time.time() results = mod.score(eval_iter, metric, num_batch=num_batch) speed = num / (time.time() - tic) if logger is not None: logger.info('Finished inference with %d images' % num) logger.info('Finished with %f images per second', speed) for k, v in results: print("{}: {}".format(k, v))
[ "def", "evaluate_net", "(", "net", ",", "path_imgrec", ",", "num_classes", ",", "num_batch", ",", "mean_pixels", ",", "data_shape", ",", "model_prefix", ",", "epoch", ",", "ctx", "=", "mx", ".", "cpu", "(", ")", ",", "batch_size", "=", "32", ",", "path_imglist", "=", "\"\"", ",", "nms_thresh", "=", "0.45", ",", "force_nms", "=", "False", ",", "ovp_thresh", "=", "0.5", ",", "use_difficult", "=", "False", ",", "class_names", "=", "None", ",", "voc07_metric", "=", "False", ")", ":", "# set up logger", "logging", ".", "basicConfig", "(", ")", "logger", "=", "logging", ".", "getLogger", "(", ")", "logger", ".", "setLevel", "(", "logging", ".", "INFO", ")", "# args", "if", "isinstance", "(", "data_shape", ",", "int", ")", ":", "data_shape", "=", "(", "3", ",", "data_shape", ",", "data_shape", ")", "assert", "len", "(", "data_shape", ")", "==", "3", "and", "data_shape", "[", "0", "]", "==", "3", "model_prefix", "+=", "'_'", "+", "str", "(", "data_shape", "[", "1", "]", ")", "# iterator", "eval_iter", "=", "DetRecordIter", "(", "path_imgrec", ",", "batch_size", ",", "data_shape", ",", "mean_pixels", "=", "mean_pixels", ",", "path_imglist", "=", "path_imglist", ",", "*", "*", "cfg", ".", "valid", ")", "# model params", "load_net", ",", "args", ",", "auxs", "=", "mx", ".", "model", ".", "load_checkpoint", "(", "model_prefix", ",", "epoch", ")", "# network", "if", "net", "is", "None", ":", "net", "=", "load_net", "else", ":", "net", "=", "get_symbol", "(", "net", ",", "data_shape", "[", "1", "]", ",", "num_classes", "=", "num_classes", ",", "nms_thresh", "=", "nms_thresh", ",", "force_suppress", "=", "force_nms", ")", "if", "not", "'label'", "in", "net", ".", "list_arguments", "(", ")", ":", "label", "=", "mx", ".", "sym", ".", "Variable", "(", "name", "=", "'label'", ")", "net", "=", "mx", ".", "sym", ".", "Group", "(", "[", "net", ",", "label", "]", ")", "# init module", "mod", "=", "mx", ".", "mod", ".", "Module", "(", "net", ",", "label_names", "=", "(", "'label'", ",", ")", ",", "logger", "=", "logger", ",", "context", "=", "ctx", ",", "fixed_param_names", "=", "net", ".", "list_arguments", "(", ")", ")", "mod", ".", "bind", "(", "data_shapes", "=", "eval_iter", ".", "provide_data", ",", "label_shapes", "=", "eval_iter", ".", "provide_label", ")", "mod", ".", "set_params", "(", "args", ",", "auxs", ",", "allow_missing", "=", "False", ",", "force_init", "=", "True", ")", "# run evaluation", "if", "voc07_metric", ":", "metric", "=", "VOC07MApMetric", "(", "ovp_thresh", ",", "use_difficult", ",", "class_names", ")", "else", ":", "metric", "=", "MApMetric", "(", "ovp_thresh", ",", "use_difficult", ",", "class_names", ")", "num", "=", "num_batch", "*", "batch_size", "data", "=", "[", "mx", ".", "random", ".", "uniform", "(", "-", "1.0", ",", "1.0", ",", "shape", "=", "shape", ",", "ctx", "=", "ctx", ")", "for", "_", ",", "shape", "in", "mod", ".", "data_shapes", "]", "batch", "=", "mx", ".", "io", ".", "DataBatch", "(", "data", ",", "[", "]", ")", "# empty label", "dry_run", "=", "5", "# use 5 iterations to warm up", "for", "i", "in", "range", "(", "dry_run", ")", ":", "mod", ".", "forward", "(", "batch", ",", "is_train", "=", "False", ")", "for", "output", "in", "mod", ".", "get_outputs", "(", ")", ":", "output", ".", "wait_to_read", "(", ")", "tic", "=", "time", ".", "time", "(", ")", "results", "=", "mod", ".", "score", "(", "eval_iter", ",", "metric", ",", "num_batch", "=", "num_batch", ")", "speed", "=", "num", "/", "(", "time", ".", "time", "(", ")", "-", "tic", ")", "if", "logger", "is", "not", "None", ":", "logger", ".", "info", "(", "'Finished inference with %d images'", "%", "num", ")", "logger", ".", "info", "(", "'Finished with %f images per second'", ",", "speed", ")", "for", "k", ",", "v", "in", "results", ":", "print", "(", "\"{}: {}\"", ".", "format", "(", "k", ",", "v", ")", ")" ]
36.36
0.00241
def clean(outputdir, drivers=None): """Remove driver executables from the specified outputdir. drivers can be a list of drivers to filter which executables to remove. Specify a version using an equal sign i.e.: 'chrome=2.2' """ if drivers: # Generate a list of tuples: [(driver_name, requested_version)] # If driver string does not contain a version, the second element # of the tuple is None. # Example: # [('driver_a', '2.2'), ('driver_b', None)] drivers_split = [helpers.split_driver_name_and_version(x) for x in drivers] file_data = [(helpers.normalize_driver_name(x[0]), x[1]) for x in drivers_split] else: file_data = [(x, None) for x in config.ALL_DRIVERS] files = [file for file in os.listdir(outputdir) if os.path.isfile(os.path.join(outputdir, file))] for file in files: for data in file_data: prefix, version = data starts_with = file.startswith(prefix) version_match = 'N/A' if version is not None: file_version = helpers.extract_version_from_filename(file) if file_version == version: version_match = True else: version_match = False if starts_with and version_match in [True, 'N/A']: filepath = os.path.join(outputdir, file) try: os.remove(filepath) except OSError: pass finally: logger.info('removed {}'.format(file)) break
[ "def", "clean", "(", "outputdir", ",", "drivers", "=", "None", ")", ":", "if", "drivers", ":", "# Generate a list of tuples: [(driver_name, requested_version)]", "# If driver string does not contain a version, the second element", "# of the tuple is None.", "# Example:", "# [('driver_a', '2.2'), ('driver_b', None)]", "drivers_split", "=", "[", "helpers", ".", "split_driver_name_and_version", "(", "x", ")", "for", "x", "in", "drivers", "]", "file_data", "=", "[", "(", "helpers", ".", "normalize_driver_name", "(", "x", "[", "0", "]", ")", ",", "x", "[", "1", "]", ")", "for", "x", "in", "drivers_split", "]", "else", ":", "file_data", "=", "[", "(", "x", ",", "None", ")", "for", "x", "in", "config", ".", "ALL_DRIVERS", "]", "files", "=", "[", "file", "for", "file", "in", "os", ".", "listdir", "(", "outputdir", ")", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "outputdir", ",", "file", ")", ")", "]", "for", "file", "in", "files", ":", "for", "data", "in", "file_data", ":", "prefix", ",", "version", "=", "data", "starts_with", "=", "file", ".", "startswith", "(", "prefix", ")", "version_match", "=", "'N/A'", "if", "version", "is", "not", "None", ":", "file_version", "=", "helpers", ".", "extract_version_from_filename", "(", "file", ")", "if", "file_version", "==", "version", ":", "version_match", "=", "True", "else", ":", "version_match", "=", "False", "if", "starts_with", "and", "version_match", "in", "[", "True", ",", "'N/A'", "]", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "outputdir", ",", "file", ")", "try", ":", "os", ".", "remove", "(", "filepath", ")", "except", "OSError", ":", "pass", "finally", ":", "logger", ".", "info", "(", "'removed {}'", ".", "format", "(", "file", ")", ")", "break" ]
40.365854
0.00059
def template_inheritance(obj): ''' Generator that iterates the template and its ancestors. The order is from most specialized (furthest descendant) to most general (furthest ancestor). obj can be either: 1. Mako Template object 2. Mako `self` object (available within a rendering template) ''' if isinstance(obj, MakoTemplate): obj = create_mako_context(obj)['self'] elif isinstance(obj, MakoContext): obj = obj['self'] while obj is not None: yield obj.template obj = obj.inherits
[ "def", "template_inheritance", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "MakoTemplate", ")", ":", "obj", "=", "create_mako_context", "(", "obj", ")", "[", "'self'", "]", "elif", "isinstance", "(", "obj", ",", "MakoContext", ")", ":", "obj", "=", "obj", "[", "'self'", "]", "while", "obj", "is", "not", "None", ":", "yield", "obj", ".", "template", "obj", "=", "obj", ".", "inherits" ]
32.294118
0.00177
def sim(self, src, tar): """Return the length similarity of two strings. Length similarity is the ratio of the length of the shorter string to the longer. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float Length similarity Examples -------- >>> cmp = Length() >>> cmp.sim('cat', 'hat') 1.0 >>> cmp.sim('Niall', 'Neil') 0.8 >>> cmp.sim('aluminum', 'Catalan') 0.875 >>> cmp.sim('ATCG', 'TAGC') 1.0 """ if src == tar: return 1.0 if not src or not tar: return 0.0 return ( len(src) / len(tar) if len(src) < len(tar) else len(tar) / len(src) )
[ "def", "sim", "(", "self", ",", "src", ",", "tar", ")", ":", "if", "src", "==", "tar", ":", "return", "1.0", "if", "not", "src", "or", "not", "tar", ":", "return", "0.0", "return", "(", "len", "(", "src", ")", "/", "len", "(", "tar", ")", "if", "len", "(", "src", ")", "<", "len", "(", "tar", ")", "else", "len", "(", "tar", ")", "/", "len", "(", "src", ")", ")" ]
22.578947
0.002235
def check_signature(signature, key, data): """Compute the HMAC signature and test against a given hash.""" if isinstance(key, type(u'')): key = key.encode() digest = 'sha1=' + hmac.new(key, data, hashlib.sha1).hexdigest() # Covert everything to byte sequences if isinstance(digest, type(u'')): digest = digest.encode() if isinstance(signature, type(u'')): signature = signature.encode() return werkzeug.security.safe_str_cmp(digest, signature)
[ "def", "check_signature", "(", "signature", ",", "key", ",", "data", ")", ":", "if", "isinstance", "(", "key", ",", "type", "(", "u''", ")", ")", ":", "key", "=", "key", ".", "encode", "(", ")", "digest", "=", "'sha1='", "+", "hmac", ".", "new", "(", "key", ",", "data", ",", "hashlib", ".", "sha1", ")", ".", "hexdigest", "(", ")", "# Covert everything to byte sequences", "if", "isinstance", "(", "digest", ",", "type", "(", "u''", ")", ")", ":", "digest", "=", "digest", ".", "encode", "(", ")", "if", "isinstance", "(", "signature", ",", "type", "(", "u''", ")", ")", ":", "signature", "=", "signature", ".", "encode", "(", ")", "return", "werkzeug", ".", "security", ".", "safe_str_cmp", "(", "digest", ",", "signature", ")" ]
34.642857
0.002008
def get_map_location(target_device, fallback_device='cpu'): """Determine the location to map loaded data (e.g., weights) for a given target device (e.g. 'cuda'). """ map_location = torch.device(target_device) # The user wants to use CUDA but there is no CUDA device # available, thus fall back to CPU. if map_location.type == 'cuda' and not torch.cuda.is_available(): warnings.warn( 'Requested to load data to CUDA but no CUDA devices ' 'are available. Loading on device "{}" instead.'.format( fallback_device, ), DeviceWarning) map_location = torch.device(fallback_device) return map_location
[ "def", "get_map_location", "(", "target_device", ",", "fallback_device", "=", "'cpu'", ")", ":", "map_location", "=", "torch", ".", "device", "(", "target_device", ")", "# The user wants to use CUDA but there is no CUDA device", "# available, thus fall back to CPU.", "if", "map_location", ".", "type", "==", "'cuda'", "and", "not", "torch", ".", "cuda", ".", "is_available", "(", ")", ":", "warnings", ".", "warn", "(", "'Requested to load data to CUDA but no CUDA devices '", "'are available. Loading on device \"{}\" instead.'", ".", "format", "(", "fallback_device", ",", ")", ",", "DeviceWarning", ")", "map_location", "=", "torch", ".", "device", "(", "fallback_device", ")", "return", "map_location" ]
42.4375
0.001441
def split_args(args): """ Split a list of argument strings into a dictionary where each key is an argument name. An argument looks like ``force_ssl=True``. """ if not args: return {} # Handle the old comma separated argument format. if len(args) == 1 and not REGEXP_ARGS.search(args[0]): args = args[0].split(',') # Separate out the key and value for each argument. args_dict = {} for arg in args: split_arg = arg.split('=', 1) value = len(split_arg) > 1 and split_arg[1] or None args_dict[split_arg[0]] = value return args_dict
[ "def", "split_args", "(", "args", ")", ":", "if", "not", "args", ":", "return", "{", "}", "# Handle the old comma separated argument format.", "if", "len", "(", "args", ")", "==", "1", "and", "not", "REGEXP_ARGS", ".", "search", "(", "args", "[", "0", "]", ")", ":", "args", "=", "args", "[", "0", "]", ".", "split", "(", "','", ")", "# Separate out the key and value for each argument.", "args_dict", "=", "{", "}", "for", "arg", "in", "args", ":", "split_arg", "=", "arg", ".", "split", "(", "'='", ",", "1", ")", "value", "=", "len", "(", "split_arg", ")", ">", "1", "and", "split_arg", "[", "1", "]", "or", "None", "args_dict", "[", "split_arg", "[", "0", "]", "]", "=", "value", "return", "args_dict" ]
27.227273
0.001613
def queryWorkitems(self, query_str, projectarea_id=None, projectarea_name=None, returned_properties=None, archived=False): """Query workitems with the query string in a certain :class:`rtcclient.project_area.ProjectArea` At least either of `projectarea_id` and `projectarea_name` is given :param query_str: a valid query string :param projectarea_id: the :class:`rtcclient.project_area.ProjectArea` id :param projectarea_name: the :class:`rtcclient.project_area.ProjectArea` name :param returned_properties: the returned properties that you want. Refer to :class:`rtcclient.client.RTCClient` for more explanations :param archived: (default is False) whether the :class:`rtcclient.workitem.Workitem` is archived :return: a :class:`list` that contains the queried :class:`rtcclient.workitem.Workitem` objects :rtype: list """ pa_id = (self.rtc_obj ._pre_get_resource(projectarea_id=projectarea_id, projectarea_name=projectarea_name)) self.log.info("Start to query workitems with query string: %s", query_str) query_str = urlquote(query_str) rp = returned_properties return (self.rtc_obj ._get_paged_resources("Query", projectarea_id=pa_id, customized_attr=query_str, page_size="100", returned_properties=rp, archived=archived))
[ "def", "queryWorkitems", "(", "self", ",", "query_str", ",", "projectarea_id", "=", "None", ",", "projectarea_name", "=", "None", ",", "returned_properties", "=", "None", ",", "archived", "=", "False", ")", ":", "pa_id", "=", "(", "self", ".", "rtc_obj", ".", "_pre_get_resource", "(", "projectarea_id", "=", "projectarea_id", ",", "projectarea_name", "=", "projectarea_name", ")", ")", "self", ".", "log", ".", "info", "(", "\"Start to query workitems with query string: %s\"", ",", "query_str", ")", "query_str", "=", "urlquote", "(", "query_str", ")", "rp", "=", "returned_properties", "return", "(", "self", ".", "rtc_obj", ".", "_get_paged_resources", "(", "\"Query\"", ",", "projectarea_id", "=", "pa_id", ",", "customized_attr", "=", "query_str", ",", "page_size", "=", "\"100\"", ",", "returned_properties", "=", "rp", ",", "archived", "=", "archived", ")", ")" ]
45.789474
0.002251
def start(self, wait_for_completion=True, operation_timeout=None): """ Start this CPC, using the HMC operation "Start CPC". Authorization requirements: * Object-access permission to this CPC. * Task permission for the "Start (start a single DPM system)" task. Parameters: wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. Returns: `None` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns `None`. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. """ result = self.manager.session.post( self.uri + '/operations/start', wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) return result
[ "def", "start", "(", "self", ",", "wait_for_completion", "=", "True", ",", "operation_timeout", "=", "None", ")", ":", "result", "=", "self", ".", "manager", ".", "session", ".", "post", "(", "self", ".", "uri", "+", "'/operations/start'", ",", "wait_for_completion", "=", "wait_for_completion", ",", "operation_timeout", "=", "operation_timeout", ")", "return", "result" ]
37.90566
0.00097
def save_figures(image_path, fig_count, gallery_conf): """Save all open matplotlib figures of the example code-block Parameters ---------- image_path : str Path where plots are saved (format string which accepts figure number) fig_count : int Previous figure number count. Figure number add from this number Returns ------- list of strings containing the full path to each figure """ figure_list = [] fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers() for fig_mngr in fig_managers: # Set the fig_num figure as the current figure as we can't # save a figure that's not the current figure. fig = plt.figure(fig_mngr.num) kwargs = {} to_rgba = matplotlib.colors.colorConverter.to_rgba for attr in ['facecolor', 'edgecolor']: fig_attr = getattr(fig, 'get_' + attr)() default_attr = matplotlib.rcParams['figure.' + attr] if to_rgba(fig_attr) != to_rgba(default_attr): kwargs[attr] = fig_attr current_fig = image_path.format(fig_count + fig_mngr.num) fig.savefig(current_fig, **kwargs) figure_list.append(current_fig) if gallery_conf.get('find_mayavi_figures', False): from mayavi import mlab e = mlab.get_engine() last_matplotlib_fig_num = len(figure_list) total_fig_num = last_matplotlib_fig_num + len(e.scenes) mayavi_fig_nums = range(last_matplotlib_fig_num, total_fig_num) for scene, mayavi_fig_num in zip(e.scenes, mayavi_fig_nums): current_fig = image_path.format(mayavi_fig_num) mlab.savefig(current_fig, figure=scene) # make sure the image is not too large scale_image(current_fig, current_fig, 850, 999) figure_list.append(current_fig) mlab.close(all=True) return figure_list
[ "def", "save_figures", "(", "image_path", ",", "fig_count", ",", "gallery_conf", ")", ":", "figure_list", "=", "[", "]", "fig_managers", "=", "matplotlib", ".", "_pylab_helpers", ".", "Gcf", ".", "get_all_fig_managers", "(", ")", "for", "fig_mngr", "in", "fig_managers", ":", "# Set the fig_num figure as the current figure as we can't", "# save a figure that's not the current figure.", "fig", "=", "plt", ".", "figure", "(", "fig_mngr", ".", "num", ")", "kwargs", "=", "{", "}", "to_rgba", "=", "matplotlib", ".", "colors", ".", "colorConverter", ".", "to_rgba", "for", "attr", "in", "[", "'facecolor'", ",", "'edgecolor'", "]", ":", "fig_attr", "=", "getattr", "(", "fig", ",", "'get_'", "+", "attr", ")", "(", ")", "default_attr", "=", "matplotlib", ".", "rcParams", "[", "'figure.'", "+", "attr", "]", "if", "to_rgba", "(", "fig_attr", ")", "!=", "to_rgba", "(", "default_attr", ")", ":", "kwargs", "[", "attr", "]", "=", "fig_attr", "current_fig", "=", "image_path", ".", "format", "(", "fig_count", "+", "fig_mngr", ".", "num", ")", "fig", ".", "savefig", "(", "current_fig", ",", "*", "*", "kwargs", ")", "figure_list", ".", "append", "(", "current_fig", ")", "if", "gallery_conf", ".", "get", "(", "'find_mayavi_figures'", ",", "False", ")", ":", "from", "mayavi", "import", "mlab", "e", "=", "mlab", ".", "get_engine", "(", ")", "last_matplotlib_fig_num", "=", "len", "(", "figure_list", ")", "total_fig_num", "=", "last_matplotlib_fig_num", "+", "len", "(", "e", ".", "scenes", ")", "mayavi_fig_nums", "=", "range", "(", "last_matplotlib_fig_num", ",", "total_fig_num", ")", "for", "scene", ",", "mayavi_fig_num", "in", "zip", "(", "e", ".", "scenes", ",", "mayavi_fig_nums", ")", ":", "current_fig", "=", "image_path", ".", "format", "(", "mayavi_fig_num", ")", "mlab", ".", "savefig", "(", "current_fig", ",", "figure", "=", "scene", ")", "# make sure the image is not too large", "scale_image", "(", "current_fig", ",", "current_fig", ",", "850", ",", "999", ")", "figure_list", ".", "append", "(", "current_fig", ")", "mlab", ".", "close", "(", "all", "=", "True", ")", "return", "figure_list" ]
38.102041
0.000522
def render_debug_img( file_name, page_num, elems, nodes=[], scaler=1, print_segments=False, print_curves=True, print_table_bbox=True, print_text_as_rect=True, ): """ Shows an image rendering of the pdf page along with debugging info printed """ # For debugging show the boolean pixels in black white grayscale height = scaler * int(elems.layout.height) width = scaler * int(elems.layout.width) debug_img, draw = create_img((0, 0, width, height)) font = lazy_load_font() large_font = lazy_load_font(24) if print_curves: for i, c in enumerate(elems.curves): if len(c.pts) > 1: draw.polygon(c.pts, outline=blue) draw.rectangle(c.bbox, fill=blue) # for fig in elems.figures: # draw.rectangle(fig.bbox, fill = blue) for i, m in enumerate(elems.mentions): if isinstance(m, LTAnno): continue if print_text_as_rect: fill = "pink" if hasattr(m, "feats") and m.feats["is_cell"] else green # fill = green draw.rectangle(m.bbox, fill=fill) # draw.text(center(m.bbox), str(i), black, font = font) # Draw id draw.text( m.bbox[:2], m.get_text(), black, font=font ) # Draw mention content else: draw.text(m.bbox[:2], m.get_text(), "black", font=font) if print_segments: # draw skeleton for all segments for i, s in enumerate(elems.segments): draw.line(s.bbox, fill="black") if print_table_bbox: for node in nodes: is_table = node.is_table() color = "red" if is_table else "green" draw.rectangle(node.bbox, outline=color) if is_table: # text = 'Borderless' if node.is_borderless() else 'Bordered' text = "Table" draw.rectangle(node.bbox, outline=color) draw.text(node.bbox[:2], text, red, font=large_font) # Water mark with file name so we can identify among multiple images if file_name and page_num is not None: water_mark = ( file_name + ":page " + str(page_num + 1) + "@%dx%d" % (width, height) ) draw.text((10, 10), water_mark, black, font=font) debug_img.show() return debug_img
[ "def", "render_debug_img", "(", "file_name", ",", "page_num", ",", "elems", ",", "nodes", "=", "[", "]", ",", "scaler", "=", "1", ",", "print_segments", "=", "False", ",", "print_curves", "=", "True", ",", "print_table_bbox", "=", "True", ",", "print_text_as_rect", "=", "True", ",", ")", ":", "# For debugging show the boolean pixels in black white grayscale", "height", "=", "scaler", "*", "int", "(", "elems", ".", "layout", ".", "height", ")", "width", "=", "scaler", "*", "int", "(", "elems", ".", "layout", ".", "width", ")", "debug_img", ",", "draw", "=", "create_img", "(", "(", "0", ",", "0", ",", "width", ",", "height", ")", ")", "font", "=", "lazy_load_font", "(", ")", "large_font", "=", "lazy_load_font", "(", "24", ")", "if", "print_curves", ":", "for", "i", ",", "c", "in", "enumerate", "(", "elems", ".", "curves", ")", ":", "if", "len", "(", "c", ".", "pts", ")", ">", "1", ":", "draw", ".", "polygon", "(", "c", ".", "pts", ",", "outline", "=", "blue", ")", "draw", ".", "rectangle", "(", "c", ".", "bbox", ",", "fill", "=", "blue", ")", "# for fig in elems.figures:", "# draw.rectangle(fig.bbox, fill = blue)", "for", "i", ",", "m", "in", "enumerate", "(", "elems", ".", "mentions", ")", ":", "if", "isinstance", "(", "m", ",", "LTAnno", ")", ":", "continue", "if", "print_text_as_rect", ":", "fill", "=", "\"pink\"", "if", "hasattr", "(", "m", ",", "\"feats\"", ")", "and", "m", ".", "feats", "[", "\"is_cell\"", "]", "else", "green", "# fill = green", "draw", ".", "rectangle", "(", "m", ".", "bbox", ",", "fill", "=", "fill", ")", "# draw.text(center(m.bbox), str(i), black, font = font) # Draw id", "draw", ".", "text", "(", "m", ".", "bbox", "[", ":", "2", "]", ",", "m", ".", "get_text", "(", ")", ",", "black", ",", "font", "=", "font", ")", "# Draw mention content", "else", ":", "draw", ".", "text", "(", "m", ".", "bbox", "[", ":", "2", "]", ",", "m", ".", "get_text", "(", ")", ",", "\"black\"", ",", "font", "=", "font", ")", "if", "print_segments", ":", "# draw skeleton for all segments", "for", "i", ",", "s", "in", "enumerate", "(", "elems", ".", "segments", ")", ":", "draw", ".", "line", "(", "s", ".", "bbox", ",", "fill", "=", "\"black\"", ")", "if", "print_table_bbox", ":", "for", "node", "in", "nodes", ":", "is_table", "=", "node", ".", "is_table", "(", ")", "color", "=", "\"red\"", "if", "is_table", "else", "\"green\"", "draw", ".", "rectangle", "(", "node", ".", "bbox", ",", "outline", "=", "color", ")", "if", "is_table", ":", "# text = 'Borderless' if node.is_borderless() else 'Bordered'", "text", "=", "\"Table\"", "draw", ".", "rectangle", "(", "node", ".", "bbox", ",", "outline", "=", "color", ")", "draw", ".", "text", "(", "node", ".", "bbox", "[", ":", "2", "]", ",", "text", ",", "red", ",", "font", "=", "large_font", ")", "# Water mark with file name so we can identify among multiple images", "if", "file_name", "and", "page_num", "is", "not", "None", ":", "water_mark", "=", "(", "file_name", "+", "\":page \"", "+", "str", "(", "page_num", "+", "1", ")", "+", "\"@%dx%d\"", "%", "(", "width", ",", "height", ")", ")", "draw", ".", "text", "(", "(", "10", ",", "10", ")", ",", "water_mark", ",", "black", ",", "font", "=", "font", ")", "debug_img", ".", "show", "(", ")", "return", "debug_img" ]
34.147059
0.001256
async def wait_for_group(self, container, networkid, timeout = 120): """ Wait for a VXLAN group to be created """ if networkid in self._current_groups: return self._current_groups[networkid] else: if not self._connection.connected: raise ConnectionResetException groupchanged = VXLANGroupChanged.createMatcher(self._connection, networkid, VXLANGroupChanged.UPDATED) conn_down = self._connection.protocol.statematcher(self._connection) timeout_, ev, m = await container.wait_with_timeout(timeout, groupchanged, conn_down) if timeout_: raise ValueError('VXLAN group is still not created after a long time') elif m is conn_down: raise ConnectionResetException else: return ev.physicalportid
[ "async", "def", "wait_for_group", "(", "self", ",", "container", ",", "networkid", ",", "timeout", "=", "120", ")", ":", "if", "networkid", "in", "self", ".", "_current_groups", ":", "return", "self", ".", "_current_groups", "[", "networkid", "]", "else", ":", "if", "not", "self", ".", "_connection", ".", "connected", ":", "raise", "ConnectionResetException", "groupchanged", "=", "VXLANGroupChanged", ".", "createMatcher", "(", "self", ".", "_connection", ",", "networkid", ",", "VXLANGroupChanged", ".", "UPDATED", ")", "conn_down", "=", "self", ".", "_connection", ".", "protocol", ".", "statematcher", "(", "self", ".", "_connection", ")", "timeout_", ",", "ev", ",", "m", "=", "await", "container", ".", "wait_with_timeout", "(", "timeout", ",", "groupchanged", ",", "conn_down", ")", "if", "timeout_", ":", "raise", "ValueError", "(", "'VXLAN group is still not created after a long time'", ")", "elif", "m", "is", "conn_down", ":", "raise", "ConnectionResetException", "else", ":", "return", "ev", ".", "physicalportid" ]
48.333333
0.009019
def fields(depth, Rp, Rm, Gam, lrec, lsrc, zsrc, TM): r"""Calculate Pu+, Pu-, Pd+, Pd-. This is a modified version of empymod.kernel.fields(). See the original version for more information. """ # Booleans if src in first or last layer; swapped if up=True first_layer = lsrc == 0 last_layer = lsrc == depth.size-1 # Depths; dp and dm are swapped if up=True if lsrc != depth.size-1: ds = depth[lsrc+1]-depth[lsrc] dp = depth[lsrc+1]-zsrc dm = zsrc-depth[lsrc] # Rm and Rp; swapped if up=True Rmp = Rm Rpm = Rp # Boolean if plus or minus has to be calculated if TM: plus = False else: plus = True # Sign-switches pm = 1 # + if plus=True, - if plus=False if not plus: pm = -1 # Calculate down- and up-going fields for up in [False, True]: # No upgoing field if rec is in last layer or below src if up and (lrec == depth.size-1 or lrec > lsrc): Puu = np.full_like(Gam[:, :, lsrc, :], 0+0j) Pud = np.full_like(Gam[:, :, lsrc, :], 0+0j) continue # No downgoing field if rec is in first layer or above src if not up and (lrec == 0 or lrec < lsrc): Pdu = np.full_like(Gam[:, :, lsrc, :], 0+0j) Pdd = np.full_like(Gam[:, :, lsrc, :], 0+0j) continue # Swaps if up=True if up: dp, dm = dm, dp Rmp, Rpm = Rpm, Rmp first_layer, last_layer = last_layer, first_layer # Calculate Pu+, Pu-, Pd+, Pd-; rec in src layer; Eqs 81/82, A-8/A-9 iGam = Gam[:, :, lsrc, :] if last_layer: # If src/rec are in top (up) or bottom (down) layer Pd = Rmp*np.exp(-iGam*dm) Pu = np.full_like(Gam[:, :, lsrc, :], 0+0j) else: # If src and rec are in any layer in between Ms = 1 - Rmp*Rpm*np.exp(-2*iGam*ds) Pd = Rmp/Ms*np.exp(-iGam*dm) Pu = Rmp/Ms*pm*Rpm*np.exp(-iGam*(ds+dp)) # Store P's if up: Puu = Pu Pud = Pd else: Pdu = Pd Pdd = Pu # Return fields (up- and downgoing) return Puu, Pud, Pdu, Pdd
[ "def", "fields", "(", "depth", ",", "Rp", ",", "Rm", ",", "Gam", ",", "lrec", ",", "lsrc", ",", "zsrc", ",", "TM", ")", ":", "# Booleans if src in first or last layer; swapped if up=True", "first_layer", "=", "lsrc", "==", "0", "last_layer", "=", "lsrc", "==", "depth", ".", "size", "-", "1", "# Depths; dp and dm are swapped if up=True", "if", "lsrc", "!=", "depth", ".", "size", "-", "1", ":", "ds", "=", "depth", "[", "lsrc", "+", "1", "]", "-", "depth", "[", "lsrc", "]", "dp", "=", "depth", "[", "lsrc", "+", "1", "]", "-", "zsrc", "dm", "=", "zsrc", "-", "depth", "[", "lsrc", "]", "# Rm and Rp; swapped if up=True", "Rmp", "=", "Rm", "Rpm", "=", "Rp", "# Boolean if plus or minus has to be calculated", "if", "TM", ":", "plus", "=", "False", "else", ":", "plus", "=", "True", "# Sign-switches", "pm", "=", "1", "# + if plus=True, - if plus=False", "if", "not", "plus", ":", "pm", "=", "-", "1", "# Calculate down- and up-going fields", "for", "up", "in", "[", "False", ",", "True", "]", ":", "# No upgoing field if rec is in last layer or below src", "if", "up", "and", "(", "lrec", "==", "depth", ".", "size", "-", "1", "or", "lrec", ">", "lsrc", ")", ":", "Puu", "=", "np", ".", "full_like", "(", "Gam", "[", ":", ",", ":", ",", "lsrc", ",", ":", "]", ",", "0", "+", "0j", ")", "Pud", "=", "np", ".", "full_like", "(", "Gam", "[", ":", ",", ":", ",", "lsrc", ",", ":", "]", ",", "0", "+", "0j", ")", "continue", "# No downgoing field if rec is in first layer or above src", "if", "not", "up", "and", "(", "lrec", "==", "0", "or", "lrec", "<", "lsrc", ")", ":", "Pdu", "=", "np", ".", "full_like", "(", "Gam", "[", ":", ",", ":", ",", "lsrc", ",", ":", "]", ",", "0", "+", "0j", ")", "Pdd", "=", "np", ".", "full_like", "(", "Gam", "[", ":", ",", ":", ",", "lsrc", ",", ":", "]", ",", "0", "+", "0j", ")", "continue", "# Swaps if up=True", "if", "up", ":", "dp", ",", "dm", "=", "dm", ",", "dp", "Rmp", ",", "Rpm", "=", "Rpm", ",", "Rmp", "first_layer", ",", "last_layer", "=", "last_layer", ",", "first_layer", "# Calculate Pu+, Pu-, Pd+, Pd-; rec in src layer; Eqs 81/82, A-8/A-9", "iGam", "=", "Gam", "[", ":", ",", ":", ",", "lsrc", ",", ":", "]", "if", "last_layer", ":", "# If src/rec are in top (up) or bottom (down) layer", "Pd", "=", "Rmp", "*", "np", ".", "exp", "(", "-", "iGam", "*", "dm", ")", "Pu", "=", "np", ".", "full_like", "(", "Gam", "[", ":", ",", ":", ",", "lsrc", ",", ":", "]", ",", "0", "+", "0j", ")", "else", ":", "# If src and rec are in any layer in between", "Ms", "=", "1", "-", "Rmp", "*", "Rpm", "*", "np", ".", "exp", "(", "-", "2", "*", "iGam", "*", "ds", ")", "Pd", "=", "Rmp", "/", "Ms", "*", "np", ".", "exp", "(", "-", "iGam", "*", "dm", ")", "Pu", "=", "Rmp", "/", "Ms", "*", "pm", "*", "Rpm", "*", "np", ".", "exp", "(", "-", "iGam", "*", "(", "ds", "+", "dp", ")", ")", "# Store P's", "if", "up", ":", "Puu", "=", "Pu", "Pud", "=", "Pd", "else", ":", "Pdu", "=", "Pd", "Pdd", "=", "Pu", "# Return fields (up- and downgoing)", "return", "Puu", ",", "Pud", ",", "Pdu", ",", "Pdd" ]
30.152778
0.000446
def _conflict_bail(VC_err, version): """ Setuptools was imported prior to invocation, so it is unsafe to unload it. Bail out. """ conflict_tmpl = textwrap.dedent(""" The required version of setuptools (>={version}) is not available, and can't be installed while this script is running. Please install a more recent version first, using 'easy_install -U setuptools'. (Currently using {VC_err.args[0]!r}) """) msg = conflict_tmpl.format(**locals()) sys.stderr.write(msg) sys.exit(2)
[ "def", "_conflict_bail", "(", "VC_err", ",", "version", ")", ":", "conflict_tmpl", "=", "textwrap", ".", "dedent", "(", "\"\"\"\n The required version of setuptools (>={version}) is not available,\n and can't be installed while this script is running. Please\n install a more recent version first, using\n 'easy_install -U setuptools'.\n\n (Currently using {VC_err.args[0]!r})\n \"\"\"", ")", "msg", "=", "conflict_tmpl", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "sys", ".", "stderr", ".", "write", "(", "msg", ")", "sys", ".", "exit", "(", "2", ")" ]
34.125
0.001783
def circular(cls, shape, pixel_scale, radius_arcsec, centre=(0., 0.), invert=False): """Setup a mask where unmasked pixels are within a circle of an input arc second radius and centre. Parameters ---------- shape: (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. radius_arcsec : float The radius (in arc seconds) of the circle within which pixels unmasked. centre: (float, float) The centre of the circle used to mask pixels. """ mask = mask_util.mask_circular_from_shape_pixel_scale_and_radius(shape, pixel_scale, radius_arcsec, centre) if invert: mask = np.invert(mask) return cls(array=mask.astype('bool'), pixel_scale=pixel_scale)
[ "def", "circular", "(", "cls", ",", "shape", ",", "pixel_scale", ",", "radius_arcsec", ",", "centre", "=", "(", "0.", ",", "0.", ")", ",", "invert", "=", "False", ")", ":", "mask", "=", "mask_util", ".", "mask_circular_from_shape_pixel_scale_and_radius", "(", "shape", ",", "pixel_scale", ",", "radius_arcsec", ",", "centre", ")", "if", "invert", ":", "mask", "=", "np", ".", "invert", "(", "mask", ")", "return", "cls", "(", "array", "=", "mask", ".", "astype", "(", "'bool'", ")", ",", "pixel_scale", "=", "pixel_scale", ")" ]
50.722222
0.008602
def distance_correlation_sqr(x, y, **kwargs): """ distance_correlation_sqr(x, y, *, exponent=1) Computes the usual (biased) estimator for the squared distance correlation between two random vectors. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. Returns ------- numpy scalar Value of the biased estimator of the squared distance correlation. See Also -------- distance_correlation u_distance_correlation_sqr Notes ----- The algorithm uses the fast distance covariance algorithm proposed in :cite:`b-fast_distance_correlation` when possible. Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.distance_correlation_sqr(a, a) 1.0 >>> dcor.distance_correlation_sqr(a, b) # doctest: +ELLIPSIS 0.2773500... >>> dcor.distance_correlation_sqr(b, b) 1.0 >>> dcor.distance_correlation_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS 0.4493308... """ if _can_use_fast_algorithm(x, y, **kwargs): return _distance_correlation_sqr_fast(x, y) else: return _distance_correlation_sqr_naive(x, y, **kwargs)
[ "def", "distance_correlation_sqr", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")", ":", "if", "_can_use_fast_algorithm", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")", ":", "return", "_distance_correlation_sqr_fast", "(", "x", ",", "y", ")", "else", ":", "return", "_distance_correlation_sqr_naive", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")" ]
31.258621
0.000535
def create_key_file(path): """ Creates a new encryption key in the path provided and sets the file permissions. Setting the file permissions currently does not work on Windows platforms because of the differences in how file permissions are read and modified. """ iv = "{}{}".format(os.urandom(32), time.time()) new_key = generate_key(ensure_bytes(iv)) with open(path, "wb") as f: f.write(base64.b64encode(new_key)) os.chmod(path, 0o400)
[ "def", "create_key_file", "(", "path", ")", ":", "iv", "=", "\"{}{}\"", ".", "format", "(", "os", ".", "urandom", "(", "32", ")", ",", "time", ".", "time", "(", ")", ")", "new_key", "=", "generate_key", "(", "ensure_bytes", "(", "iv", ")", ")", "with", "open", "(", "path", ",", "\"wb\"", ")", "as", "f", ":", "f", ".", "write", "(", "base64", ".", "b64encode", "(", "new_key", ")", ")", "os", ".", "chmod", "(", "path", ",", "0o400", ")" ]
39.583333
0.002058
def simplify_specifiers(spec): """Try to simplify a SpecifierSet by combining redundant specifiers.""" def key(s): return (s.version, 1 if s.operator in ['>=', '<'] else 2) def in_bounds(v, lo, hi): if lo and v not in lo: return False if hi and v not in hi: return False return True def err(reason='inconsistent'): return ValueError('{} specifier set {}'.format(reason, spec)) gt = None lt = None eq = None ne = [] for i in spec: if i.operator == '==': if eq is None: eq = i elif eq != i: # pragma: no branch raise err() elif i.operator == '!=': ne.append(i) elif i.operator in ['>', '>=']: gt = i if gt is None else max(gt, i, key=key) elif i.operator in ['<', '<=']: lt = i if lt is None else min(lt, i, key=key) else: raise err('invalid') ne = [i for i in ne if in_bounds(i.version, gt, lt)] if eq: if ( any(i.version in eq for i in ne) or not in_bounds(eq.version, gt, lt)): raise err() return SpecifierSet(str(eq)) if lt and gt: if lt.version not in gt or gt.version not in lt: raise err() if ( gt.version == lt.version and gt.operator == '>=' and lt.operator == '<='): return SpecifierSet('=={}'.format(gt.version)) return SpecifierSet( ','.join(str(i) for i in chain(iterate(gt), iterate(lt), ne)) )
[ "def", "simplify_specifiers", "(", "spec", ")", ":", "def", "key", "(", "s", ")", ":", "return", "(", "s", ".", "version", ",", "1", "if", "s", ".", "operator", "in", "[", "'>='", ",", "'<'", "]", "else", "2", ")", "def", "in_bounds", "(", "v", ",", "lo", ",", "hi", ")", ":", "if", "lo", "and", "v", "not", "in", "lo", ":", "return", "False", "if", "hi", "and", "v", "not", "in", "hi", ":", "return", "False", "return", "True", "def", "err", "(", "reason", "=", "'inconsistent'", ")", ":", "return", "ValueError", "(", "'{} specifier set {}'", ".", "format", "(", "reason", ",", "spec", ")", ")", "gt", "=", "None", "lt", "=", "None", "eq", "=", "None", "ne", "=", "[", "]", "for", "i", "in", "spec", ":", "if", "i", ".", "operator", "==", "'=='", ":", "if", "eq", "is", "None", ":", "eq", "=", "i", "elif", "eq", "!=", "i", ":", "# pragma: no branch", "raise", "err", "(", ")", "elif", "i", ".", "operator", "==", "'!='", ":", "ne", ".", "append", "(", "i", ")", "elif", "i", ".", "operator", "in", "[", "'>'", ",", "'>='", "]", ":", "gt", "=", "i", "if", "gt", "is", "None", "else", "max", "(", "gt", ",", "i", ",", "key", "=", "key", ")", "elif", "i", ".", "operator", "in", "[", "'<'", ",", "'<='", "]", ":", "lt", "=", "i", "if", "lt", "is", "None", "else", "min", "(", "lt", ",", "i", ",", "key", "=", "key", ")", "else", ":", "raise", "err", "(", "'invalid'", ")", "ne", "=", "[", "i", "for", "i", "in", "ne", "if", "in_bounds", "(", "i", ".", "version", ",", "gt", ",", "lt", ")", "]", "if", "eq", ":", "if", "(", "any", "(", "i", ".", "version", "in", "eq", "for", "i", "in", "ne", ")", "or", "not", "in_bounds", "(", "eq", ".", "version", ",", "gt", ",", "lt", ")", ")", ":", "raise", "err", "(", ")", "return", "SpecifierSet", "(", "str", "(", "eq", ")", ")", "if", "lt", "and", "gt", ":", "if", "lt", ".", "version", "not", "in", "gt", "or", "gt", ".", "version", "not", "in", "lt", ":", "raise", "err", "(", ")", "if", "(", "gt", ".", "version", "==", "lt", ".", "version", "and", "gt", ".", "operator", "==", "'>='", "and", "lt", ".", "operator", "==", "'<='", ")", ":", "return", "SpecifierSet", "(", "'=={}'", ".", "format", "(", "gt", ".", "version", ")", ")", "return", "SpecifierSet", "(", "','", ".", "join", "(", "str", "(", "i", ")", "for", "i", "in", "chain", "(", "iterate", "(", "gt", ")", ",", "iterate", "(", "lt", ")", ",", "ne", ")", ")", ")" ]
29.403846
0.001899
def addItem(self, child, href): """Add a new item (a catalogue or resource) as a child of this catalogue.""" assert isinstance(child, Base), "child must be a hypercat Catalogue or Resource" child.setHref(href) for item in self.items: assert item.href != href, "All items in a catalogue must have unique hrefs : "+href self.items += [child] # Add new return
[ "def", "addItem", "(", "self", ",", "child", ",", "href", ")", ":", "assert", "isinstance", "(", "child", ",", "Base", ")", ",", "\"child must be a hypercat Catalogue or Resource\"", "child", ".", "setHref", "(", "href", ")", "for", "item", "in", "self", ".", "items", ":", "assert", "item", ".", "href", "!=", "href", ",", "\"All items in a catalogue must have unique hrefs : \"", "+", "href", "self", ".", "items", "+=", "[", "child", "]", "# Add new", "return" ]
52.375
0.011737
async def aiter(*args): """Return an iterator object. Args: obj: An object that implements the __iter__ or __aiter__ method. sentinel: An optional sentinel value to look for while iterator. Return: iterable: Some iterable that provides a __anext__ method. Raises: TypeError: If only the object is given and it is not iterable. TypeError: If two arguments are given and the first is not an async callable. This function behaves very differently based on the number of arguments given. If only the first argument is present the method will return an async iterable that implements the __anext__ method by called the given object's __aiter__. If the object does not define __aiter__ but does define __iter__ then the result will be an AsyncIterWrapper that contains the original iterable. This form of the function can be used to coerce all iterables, async or not, into async iterables for interoperablilty. If the second argument is given then the first argument _must_ be an async callable. The returned value will still be an iterable implementing the __aiter__ method, but each call to that method will call the underlying async callable. If the value returned from the async callable matches the sentinel value then StopAsyncIteration is raised. Otherwise the value is returned. """ if not args: raise TypeError('aiter() expected at least 1 arguments, got 0') if len(args) > 2: raise TypeError( 'aiter() expected at most 2 arguments, got {}'.format(len(args)) ) if len(args) == 2: func, sentinel = args if not isinstance(func, types.CoroutineType): raise TypeError('aiter(v, w): v must be async callable') # TODO: repeating call thing raise NotImplementedError() obj = args[0] if hasattr(obj, '__anext__'): return obj if hasattr(obj, '__aiter__'): return (await obj.__aiter__()) if hasattr(obj, '__iter__') or hasattr(obj, '__next__'): return AsyncIterWrapper(iter(obj)) raise TypeError("'{}' object is not iterable".format(type(args[0])))
[ "async", "def", "aiter", "(", "*", "args", ")", ":", "if", "not", "args", ":", "raise", "TypeError", "(", "'aiter() expected at least 1 arguments, got 0'", ")", "if", "len", "(", "args", ")", ">", "2", ":", "raise", "TypeError", "(", "'aiter() expected at most 2 arguments, got {}'", ".", "format", "(", "len", "(", "args", ")", ")", ")", "if", "len", "(", "args", ")", "==", "2", ":", "func", ",", "sentinel", "=", "args", "if", "not", "isinstance", "(", "func", ",", "types", ".", "CoroutineType", ")", ":", "raise", "TypeError", "(", "'aiter(v, w): v must be async callable'", ")", "# TODO: repeating call thing", "raise", "NotImplementedError", "(", ")", "obj", "=", "args", "[", "0", "]", "if", "hasattr", "(", "obj", ",", "'__anext__'", ")", ":", "return", "obj", "if", "hasattr", "(", "obj", ",", "'__aiter__'", ")", ":", "return", "(", "await", "obj", ".", "__aiter__", "(", ")", ")", "if", "hasattr", "(", "obj", ",", "'__iter__'", ")", "or", "hasattr", "(", "obj", ",", "'__next__'", ")", ":", "return", "AsyncIterWrapper", "(", "iter", "(", "obj", ")", ")", "raise", "TypeError", "(", "\"'{}' object is not iterable\"", ".", "format", "(", "type", "(", "args", "[", "0", "]", ")", ")", ")" ]
34.253968
0.00045
def delete_vpcid_for_switch(vpc_id, switch_ip): """Removes unused vpcid for a switch. :param vpc_id: vpc id to remove :param switch_ip: ip address of the switch """ LOG.debug("delete_vpcid_for_switch called") session = bc.get_writer_session() vpc = _lookup_one_vpc_allocs(vpc_id=vpc_id, switch_ip=switch_ip, active=False) session.delete(vpc) session.flush()
[ "def", "delete_vpcid_for_switch", "(", "vpc_id", ",", "switch_ip", ")", ":", "LOG", ".", "debug", "(", "\"delete_vpcid_for_switch called\"", ")", "session", "=", "bc", ".", "get_writer_session", "(", ")", "vpc", "=", "_lookup_one_vpc_allocs", "(", "vpc_id", "=", "vpc_id", ",", "switch_ip", "=", "switch_ip", ",", "active", "=", "False", ")", "session", ".", "delete", "(", "vpc", ")", "session", ".", "flush", "(", ")" ]
29.866667
0.002165
def _setup(self): """Prepare for code generation by setting up root clock nodes. These nodes are subsequently used as the basis for all clock operations. """ # Create a root system ticks and user configurable ticks systick = self.allocator.allocate_stream(DataStream.CounterType, attach=True) fasttick = self.allocator.allocate_stream(DataStream.CounterType, attach=True) user1tick = self.allocator.allocate_stream(DataStream.CounterType, attach=True) user2tick = self.allocator.allocate_stream(DataStream.CounterType, attach=True) self.sensor_graph.add_node("({} always) => {} using copy_all_a".format(system_tick, systick)) self.sensor_graph.add_node("({} always) => {} using copy_all_a".format(fast_tick, fasttick)) self.sensor_graph.add_config(SlotIdentifier.FromString('controller'), config_fast_tick_secs, 'uint32_t', 1) self.sensor_graph.add_node("({} always) => {} using copy_all_a".format(tick_1, user1tick)) self.sensor_graph.add_node("({} always) => {} using copy_all_a".format(tick_2, user2tick)) self.system_tick = systick self.fast_tick = fasttick self.user1_tick = user1tick self.user2_tick = user2tick
[ "def", "_setup", "(", "self", ")", ":", "# Create a root system ticks and user configurable ticks", "systick", "=", "self", ".", "allocator", ".", "allocate_stream", "(", "DataStream", ".", "CounterType", ",", "attach", "=", "True", ")", "fasttick", "=", "self", ".", "allocator", ".", "allocate_stream", "(", "DataStream", ".", "CounterType", ",", "attach", "=", "True", ")", "user1tick", "=", "self", ".", "allocator", ".", "allocate_stream", "(", "DataStream", ".", "CounterType", ",", "attach", "=", "True", ")", "user2tick", "=", "self", ".", "allocator", ".", "allocate_stream", "(", "DataStream", ".", "CounterType", ",", "attach", "=", "True", ")", "self", ".", "sensor_graph", ".", "add_node", "(", "\"({} always) => {} using copy_all_a\"", ".", "format", "(", "system_tick", ",", "systick", ")", ")", "self", ".", "sensor_graph", ".", "add_node", "(", "\"({} always) => {} using copy_all_a\"", ".", "format", "(", "fast_tick", ",", "fasttick", ")", ")", "self", ".", "sensor_graph", ".", "add_config", "(", "SlotIdentifier", ".", "FromString", "(", "'controller'", ")", ",", "config_fast_tick_secs", ",", "'uint32_t'", ",", "1", ")", "self", ".", "sensor_graph", ".", "add_node", "(", "\"({} always) => {} using copy_all_a\"", ".", "format", "(", "tick_1", ",", "user1tick", ")", ")", "self", ".", "sensor_graph", ".", "add_node", "(", "\"({} always) => {} using copy_all_a\"", ".", "format", "(", "tick_2", ",", "user2tick", ")", ")", "self", ".", "system_tick", "=", "systick", "self", ".", "fast_tick", "=", "fasttick", "self", ".", "user1_tick", "=", "user1tick", "self", ".", "user2_tick", "=", "user2tick" ]
56.181818
0.009547
def sample_with_temperature(x, dim, temperature=1.0, dtype=tf.int32, name=None): """Either argmax or random sampling. Args: x: a Tensor. dim: a Dimension in x.shape.dims temperature: a float 0.0=argmax 1.0=random dtype: a tf.dtype (for the output) name: an optional string Returns: a Tensor with type dtype. """ dim = convert_to_dimension(dim) with tf.name_scope(name, default_name="sample_with_temperature"): if temperature != 0.0: # gumbel trick. # Note: we don't want to generate 0 or 1 because: # * -log(-log(0)) is -infinity # * -log(-log(1)) is +infinity. # np.finfo(x.dtype.as_numpy_dtype).tiny doesn't work on bfloat16 tiny_val = 1e-9 g = -log(-log( random_uniform( x.mesh, x.shape, minval=tiny_val, maxval=1., dtype=x.dtype))) x += g * temperature return argmax(x, dim, dtype, name)
[ "def", "sample_with_temperature", "(", "x", ",", "dim", ",", "temperature", "=", "1.0", ",", "dtype", "=", "tf", ".", "int32", ",", "name", "=", "None", ")", ":", "dim", "=", "convert_to_dimension", "(", "dim", ")", "with", "tf", ".", "name_scope", "(", "name", ",", "default_name", "=", "\"sample_with_temperature\"", ")", ":", "if", "temperature", "!=", "0.0", ":", "# gumbel trick.", "# Note: we don't want to generate 0 or 1 because:", "# * -log(-log(0)) is -infinity", "# * -log(-log(1)) is +infinity.", "# np.finfo(x.dtype.as_numpy_dtype).tiny doesn't work on bfloat16", "tiny_val", "=", "1e-9", "g", "=", "-", "log", "(", "-", "log", "(", "random_uniform", "(", "x", ".", "mesh", ",", "x", ".", "shape", ",", "minval", "=", "tiny_val", ",", "maxval", "=", "1.", ",", "dtype", "=", "x", ".", "dtype", ")", ")", ")", "x", "+=", "g", "*", "temperature", "return", "argmax", "(", "x", ",", "dim", ",", "dtype", ",", "name", ")" ]
30.193548
0.013458
def train(model, X_train=None, Y_train=None, save=False, predictions_adv=None, evaluate=None, args=None, rng=None, var_list=None, attack=None, attack_args=None): """ Train a TF Eager model :param model: cleverhans.model.Model :param X_train: numpy array with training inputs :param Y_train: numpy array with training outputs :param save: boolean controlling the save operation :param predictions_adv: if set with the adversarial example tensor, will run adversarial training :param evaluate: function that is run after each training iteration (typically to display the test/validation accuracy). :param args: dict or argparse `Namespace` object. Should contain `nb_epochs`, `learning_rate`, `batch_size` If save is True, should also contain 'train_dir' and 'filename' :param rng: Instance of numpy.random.RandomState :param var_list: List of variables to train. :param attack: Instance of the class cleverhans.attacks.attacks_eager :param attack_args: Parameters required for the attack. :return: True if model trained """ assert isinstance(model, Model) args = _ArgsWrapper(args or {}) if ((attack is None) != (attack_args is None)): raise ValueError("attack and attack_args must be " "passed together.") if X_train is None or Y_train is None: raise ValueError("X_train argument and Y_train argument " "must be supplied.") # Check that necessary arguments were given (see doc above) assert args.nb_epochs, "Number of epochs was not given in args dict" assert args.learning_rate, "Learning rate was not given in args dict" assert args.batch_size, "Batch size was not given in args dict" if save: assert args.train_dir, "Directory for save was not given in args dict" assert args.filename, "Filename for save was not given in args dict" if rng is None: rng = np.random.RandomState() # Optimizer tfe = tf.contrib.eager optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate) batch_x = tfe.Variable(X_train[0:args.batch_size], dtype=tf.float32) batch_y = tfe.Variable(Y_train[0:args.batch_size], dtype=tf.float32) # One epoch of training. for epoch in xrange(args.nb_epochs): # Compute number of batches nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size)) assert nb_batches * args.batch_size >= len(X_train) # Indices to shuffle training set index_shuf = list(range(len(X_train))) rng.shuffle(index_shuf) prev = time.time() for batch in range(nb_batches): # Compute batch start and end indices start, end = batch_indices( batch, len(X_train), args.batch_size) # Perform one training step tf.assign(batch_x, X_train[index_shuf[start:end]]) tf.assign(batch_y, Y_train[index_shuf[start:end]]) # Compute grads with tf.GradientTape() as tape: # Define loss loss_clean_obj = LossCrossEntropy(model, smoothing=0.) loss_clean = loss_clean_obj.fprop(x=batch_x, y=batch_y) loss = loss_clean # Adversarial training if attack is not None: batch_adv_x = attack.generate(batch_x, **attack_args) loss_adv_obj = LossCrossEntropy(model, smoothing=0.) loss_adv = loss_adv_obj.fprop(x=batch_adv_x, y=batch_y) loss = (loss_clean + loss_adv) / 2.0 # Apply grads model_variables = model.get_params() grads = tape.gradient(loss, model_variables) optimizer.apply_gradients(zip(grads, model_variables)) assert end >= len(X_train) # Check that all examples were used cur = time.time() _logger.info("Epoch " + str(epoch) + " took " + str(cur - prev) + " seconds") if evaluate is not None: evaluate() if save: save_path = os.path.join(args.train_dir, args.filename) saver = tf.train.Saver() saver.save(save_path, model_variables) _logger.info("Completed model training and saved at: " + str(save_path)) else: _logger.info("Completed model training.") return True
[ "def", "train", "(", "model", ",", "X_train", "=", "None", ",", "Y_train", "=", "None", ",", "save", "=", "False", ",", "predictions_adv", "=", "None", ",", "evaluate", "=", "None", ",", "args", "=", "None", ",", "rng", "=", "None", ",", "var_list", "=", "None", ",", "attack", "=", "None", ",", "attack_args", "=", "None", ")", ":", "assert", "isinstance", "(", "model", ",", "Model", ")", "args", "=", "_ArgsWrapper", "(", "args", "or", "{", "}", ")", "if", "(", "(", "attack", "is", "None", ")", "!=", "(", "attack_args", "is", "None", ")", ")", ":", "raise", "ValueError", "(", "\"attack and attack_args must be \"", "\"passed together.\"", ")", "if", "X_train", "is", "None", "or", "Y_train", "is", "None", ":", "raise", "ValueError", "(", "\"X_train argument and Y_train argument \"", "\"must be supplied.\"", ")", "# Check that necessary arguments were given (see doc above)", "assert", "args", ".", "nb_epochs", ",", "\"Number of epochs was not given in args dict\"", "assert", "args", ".", "learning_rate", ",", "\"Learning rate was not given in args dict\"", "assert", "args", ".", "batch_size", ",", "\"Batch size was not given in args dict\"", "if", "save", ":", "assert", "args", ".", "train_dir", ",", "\"Directory for save was not given in args dict\"", "assert", "args", ".", "filename", ",", "\"Filename for save was not given in args dict\"", "if", "rng", "is", "None", ":", "rng", "=", "np", ".", "random", ".", "RandomState", "(", ")", "# Optimizer", "tfe", "=", "tf", ".", "contrib", ".", "eager", "optimizer", "=", "tf", ".", "train", ".", "AdamOptimizer", "(", "learning_rate", "=", "args", ".", "learning_rate", ")", "batch_x", "=", "tfe", ".", "Variable", "(", "X_train", "[", "0", ":", "args", ".", "batch_size", "]", ",", "dtype", "=", "tf", ".", "float32", ")", "batch_y", "=", "tfe", ".", "Variable", "(", "Y_train", "[", "0", ":", "args", ".", "batch_size", "]", ",", "dtype", "=", "tf", ".", "float32", ")", "# One epoch of training.", "for", "epoch", "in", "xrange", "(", "args", ".", "nb_epochs", ")", ":", "# Compute number of batches", "nb_batches", "=", "int", "(", "math", ".", "ceil", "(", "float", "(", "len", "(", "X_train", ")", ")", "/", "args", ".", "batch_size", ")", ")", "assert", "nb_batches", "*", "args", ".", "batch_size", ">=", "len", "(", "X_train", ")", "# Indices to shuffle training set", "index_shuf", "=", "list", "(", "range", "(", "len", "(", "X_train", ")", ")", ")", "rng", ".", "shuffle", "(", "index_shuf", ")", "prev", "=", "time", ".", "time", "(", ")", "for", "batch", "in", "range", "(", "nb_batches", ")", ":", "# Compute batch start and end indices", "start", ",", "end", "=", "batch_indices", "(", "batch", ",", "len", "(", "X_train", ")", ",", "args", ".", "batch_size", ")", "# Perform one training step", "tf", ".", "assign", "(", "batch_x", ",", "X_train", "[", "index_shuf", "[", "start", ":", "end", "]", "]", ")", "tf", ".", "assign", "(", "batch_y", ",", "Y_train", "[", "index_shuf", "[", "start", ":", "end", "]", "]", ")", "# Compute grads", "with", "tf", ".", "GradientTape", "(", ")", "as", "tape", ":", "# Define loss", "loss_clean_obj", "=", "LossCrossEntropy", "(", "model", ",", "smoothing", "=", "0.", ")", "loss_clean", "=", "loss_clean_obj", ".", "fprop", "(", "x", "=", "batch_x", ",", "y", "=", "batch_y", ")", "loss", "=", "loss_clean", "# Adversarial training", "if", "attack", "is", "not", "None", ":", "batch_adv_x", "=", "attack", ".", "generate", "(", "batch_x", ",", "*", "*", "attack_args", ")", "loss_adv_obj", "=", "LossCrossEntropy", "(", "model", ",", "smoothing", "=", "0.", ")", "loss_adv", "=", "loss_adv_obj", ".", "fprop", "(", "x", "=", "batch_adv_x", ",", "y", "=", "batch_y", ")", "loss", "=", "(", "loss_clean", "+", "loss_adv", ")", "/", "2.0", "# Apply grads", "model_variables", "=", "model", ".", "get_params", "(", ")", "grads", "=", "tape", ".", "gradient", "(", "loss", ",", "model_variables", ")", "optimizer", ".", "apply_gradients", "(", "zip", "(", "grads", ",", "model_variables", ")", ")", "assert", "end", ">=", "len", "(", "X_train", ")", "# Check that all examples were used", "cur", "=", "time", ".", "time", "(", ")", "_logger", ".", "info", "(", "\"Epoch \"", "+", "str", "(", "epoch", ")", "+", "\" took \"", "+", "str", "(", "cur", "-", "prev", ")", "+", "\" seconds\"", ")", "if", "evaluate", "is", "not", "None", ":", "evaluate", "(", ")", "if", "save", ":", "save_path", "=", "os", ".", "path", ".", "join", "(", "args", ".", "train_dir", ",", "args", ".", "filename", ")", "saver", "=", "tf", ".", "train", ".", "Saver", "(", ")", "saver", ".", "save", "(", "save_path", ",", "model_variables", ")", "_logger", ".", "info", "(", "\"Completed model training and saved at: \"", "+", "str", "(", "save_path", ")", ")", "else", ":", "_logger", ".", "info", "(", "\"Completed model training.\"", ")", "return", "True" ]
39.104762
0.009026
def __update(self): """ This is called each time an attribute is asked, to be sure every params are updated, beceause of callbacks. """ # I can not set the size attr because it is my property, so I set the width and height separately width, height = self.size super(BaseWidget, self).__setattr__("width", width) super(BaseWidget, self).__setattr__("height", height) super(BaseWidget, self).__setattr__(self.anchor, self.pos)
[ "def", "__update", "(", "self", ")", ":", "# I can not set the size attr because it is my property, so I set the width and height separately", "width", ",", "height", "=", "self", ".", "size", "super", "(", "BaseWidget", ",", "self", ")", ".", "__setattr__", "(", "\"width\"", ",", "width", ")", "super", "(", "BaseWidget", ",", "self", ")", ".", "__setattr__", "(", "\"height\"", ",", "height", ")", "super", "(", "BaseWidget", ",", "self", ")", ".", "__setattr__", "(", "self", ".", "anchor", ",", "self", ".", "pos", ")" ]
48
0.00818
def process_record_dataset(dataset, is_training, batch_size, shuffle_buffer, parse_record_fn, num_epochs=1, num_gpus=None, examples_per_epoch=None, dtype=tf.float32): """Given a Dataset with raw records, return an iterator over the records. Args: dataset: A Dataset representing raw records is_training: A boolean denoting whether the input is for training. batch_size: The number of samples per batch. shuffle_buffer: The buffer size to use when shuffling records. A larger value results in better randomness, but smaller values reduce startup time and use less memory. parse_record_fn: A function that takes a raw record and returns the corresponding (image, label) pair. num_epochs: The number of epochs to repeat the dataset. num_gpus: The number of gpus used for training. examples_per_epoch: The number of examples in an epoch. dtype: Data type to use for images/features. Returns: Dataset of (image, label) pairs ready for iteration. """ # We prefetch a batch at a time, This can help smooth out the time taken to # load input files as we go through shuffling and processing. dataset = dataset.prefetch(buffer_size=batch_size) if is_training: # Shuffle the records. Note that we shuffle before repeating to ensure # that the shuffling respects epoch boundaries. mlperf_log.resnet_print(key=mlperf_log.INPUT_ORDER) dataset = dataset.shuffle(buffer_size=shuffle_buffer) # If we are training over multiple epochs before evaluating, repeat the # dataset for the appropriate number of epochs. dataset = dataset.repeat(num_epochs) # Parse the raw records into images and labels. Testing has shown that setting # num_parallel_batches > 1 produces no improvement in throughput, since # batch_size is almost always much greater than the number of CPU cores. dataset = dataset.apply( tf.contrib.data.map_and_batch( lambda value: parse_record_fn(value, is_training, dtype), batch_size=batch_size, num_parallel_batches=1)) # Operations between the final prefetch and the get_next call to the iterator # will happen synchronously during run time. We prefetch here again to # background all of the above processing work and keep it out of the # critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE # allows DistributionStrategies to adjust how many batches to fetch based # on how many devices are present. dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE) return dataset
[ "def", "process_record_dataset", "(", "dataset", ",", "is_training", ",", "batch_size", ",", "shuffle_buffer", ",", "parse_record_fn", ",", "num_epochs", "=", "1", ",", "num_gpus", "=", "None", ",", "examples_per_epoch", "=", "None", ",", "dtype", "=", "tf", ".", "float32", ")", ":", "# We prefetch a batch at a time, This can help smooth out the time taken to", "# load input files as we go through shuffling and processing.", "dataset", "=", "dataset", ".", "prefetch", "(", "buffer_size", "=", "batch_size", ")", "if", "is_training", ":", "# Shuffle the records. Note that we shuffle before repeating to ensure", "# that the shuffling respects epoch boundaries.", "mlperf_log", ".", "resnet_print", "(", "key", "=", "mlperf_log", ".", "INPUT_ORDER", ")", "dataset", "=", "dataset", ".", "shuffle", "(", "buffer_size", "=", "shuffle_buffer", ")", "# If we are training over multiple epochs before evaluating, repeat the", "# dataset for the appropriate number of epochs.", "dataset", "=", "dataset", ".", "repeat", "(", "num_epochs", ")", "# Parse the raw records into images and labels. Testing has shown that setting", "# num_parallel_batches > 1 produces no improvement in throughput, since", "# batch_size is almost always much greater than the number of CPU cores.", "dataset", "=", "dataset", ".", "apply", "(", "tf", ".", "contrib", ".", "data", ".", "map_and_batch", "(", "lambda", "value", ":", "parse_record_fn", "(", "value", ",", "is_training", ",", "dtype", ")", ",", "batch_size", "=", "batch_size", ",", "num_parallel_batches", "=", "1", ")", ")", "# Operations between the final prefetch and the get_next call to the iterator", "# will happen synchronously during run time. We prefetch here again to", "# background all of the above processing work and keep it out of the", "# critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE", "# allows DistributionStrategies to adjust how many batches to fetch based", "# on how many devices are present.", "dataset", "=", "dataset", ".", "prefetch", "(", "buffer_size", "=", "tf", ".", "contrib", ".", "data", ".", "AUTOTUNE", ")", "return", "dataset" ]
47.444444
0.008413
def configure_nodes(self, info): """ Handles display of the nodes editor. """ if info.initialized: self.model.edit_traits(parent=info.ui.control, kind="live", view=nodes_view)
[ "def", "configure_nodes", "(", "self", ",", "info", ")", ":", "if", "info", ".", "initialized", ":", "self", ".", "model", ".", "edit_traits", "(", "parent", "=", "info", ".", "ui", ".", "control", ",", "kind", "=", "\"live\"", ",", "view", "=", "nodes_view", ")" ]
37
0.013216
def setup(app): ''' Required Sphinx extension setup function. ''' app.add_node(bokeh_palette_group, html=(html_visit_bokeh_palette_group, None)) app.add_directive('bokeh-palette-group', BokehPaletteGroupDirective)
[ "def", "setup", "(", "app", ")", ":", "app", ".", "add_node", "(", "bokeh_palette_group", ",", "html", "=", "(", "html_visit_bokeh_palette_group", ",", "None", ")", ")", "app", ".", "add_directive", "(", "'bokeh-palette-group'", ",", "BokehPaletteGroupDirective", ")" ]
55.5
0.008889
def MessageSetItemSizer(field_number): """Returns a sizer for extensions of MessageSet. The message set message looks like this: message MessageSet { repeated group Item = 1 { required int32 type_id = 2; required string message = 3; } } """ static_size = (_TagSize(1) * 2 + _TagSize(2) + _VarintSize(field_number) + _TagSize(3)) local_VarintSize = _VarintSize def FieldSize(value): l = value.ByteSize() return static_size + local_VarintSize(l) + l return FieldSize
[ "def", "MessageSetItemSizer", "(", "field_number", ")", ":", "static_size", "=", "(", "_TagSize", "(", "1", ")", "*", "2", "+", "_TagSize", "(", "2", ")", "+", "_VarintSize", "(", "field_number", ")", "+", "_TagSize", "(", "3", ")", ")", "local_VarintSize", "=", "_VarintSize", "def", "FieldSize", "(", "value", ")", ":", "l", "=", "value", ".", "ByteSize", "(", ")", "return", "static_size", "+", "local_VarintSize", "(", "l", ")", "+", "l", "return", "FieldSize" ]
26.15
0.012915
def build_overviews(source_file, factors=None, minsize=256, external=False, blocksize=256, interleave='pixel', compress='lzw', resampling=Resampling.gauss, **kwargs): """Build overviews at one or more decimation factors for all bands of the dataset. Parameters ------------ source_file : str, file object or pathlib.Path object Source file. factors : list, optional A list of integral overview levels to build. minsize : int, optional Maximum width or height of the smallest overview level. Only taken into account if explicit factors are not specified. Defaults to `256`. external : bool, optional Can be set to `True` to force external overviews in the GeoTIFF (.ovr) format. Default is False. blocksize : int, optional The block size (tile width and height) used for overviews. Should be a power-of-two value between 64 and 4096. Default value is `256`. interleave : str, optional Interleaving. Default value is `pixel`. compress : str, optional Set the compression to use. Default is `lzw`. resampling : rasterio.enums.Resampling Resampling method. Default is `gauss`. kwargs : optional Additional arguments passed to rasterio.Env. Returns --------- out: None Original file is altered or external .ovr can be created. """ with rasterio.open(source_file, 'r+') as dst: if factors is None: factors = _calc_overviews_factors( SimpleNamespace(width=dst.width, height=dst.height), minsize) with rasterio.Env( GDAL_TIFF_OVR_BLOCKSIZE=blocksize, INTERLEAVE_OVERVIEW=interleave, COMPRESS_OVERVIEW=compress, TIFF_USE_OVR=external, **kwargs ): dst.build_overviews(factors, resampling)
[ "def", "build_overviews", "(", "source_file", ",", "factors", "=", "None", ",", "minsize", "=", "256", ",", "external", "=", "False", ",", "blocksize", "=", "256", ",", "interleave", "=", "'pixel'", ",", "compress", "=", "'lzw'", ",", "resampling", "=", "Resampling", ".", "gauss", ",", "*", "*", "kwargs", ")", ":", "with", "rasterio", ".", "open", "(", "source_file", ",", "'r+'", ")", "as", "dst", ":", "if", "factors", "is", "None", ":", "factors", "=", "_calc_overviews_factors", "(", "SimpleNamespace", "(", "width", "=", "dst", ".", "width", ",", "height", "=", "dst", ".", "height", ")", ",", "minsize", ")", "with", "rasterio", ".", "Env", "(", "GDAL_TIFF_OVR_BLOCKSIZE", "=", "blocksize", ",", "INTERLEAVE_OVERVIEW", "=", "interleave", ",", "COMPRESS_OVERVIEW", "=", "compress", ",", "TIFF_USE_OVR", "=", "external", ",", "*", "*", "kwargs", ")", ":", "dst", ".", "build_overviews", "(", "factors", ",", "resampling", ")" ]
38.244898
0.001561
def validate(self, value, messages=None, prefix=None): """validate(value[, messages[, prefix]]) -> True | False Validates the given value according to this PrimitiveType definition. Validation error messages are appended to an optional messages array, each with the optional message prefix. """ valid = False def log(msg): if messages is not None: if prefix is not None: tok = msg.split() msg = prefix + ' ' + tok[0].lower() + " " + " ".join(tok[1:]) messages.append(msg) if self.string: valid = type(value) is str else: if type(value) is str: log("String '%s' cannot be represented as a number." % value) elif type(value) not in (int, long, float): log("Value '%s' is not a primitive type." % str(value)) elif type(value) is float and not self.float: log("Float '%g' cannot be represented as an integer." % value) else: if value < self.min or value > self.max: args = (str(value), self.min, self.max) log("Value '%s' out of range [%d, %d]." % args) else: valid = True return valid
[ "def", "validate", "(", "self", ",", "value", ",", "messages", "=", "None", ",", "prefix", "=", "None", ")", ":", "valid", "=", "False", "def", "log", "(", "msg", ")", ":", "if", "messages", "is", "not", "None", ":", "if", "prefix", "is", "not", "None", ":", "tok", "=", "msg", ".", "split", "(", ")", "msg", "=", "prefix", "+", "' '", "+", "tok", "[", "0", "]", ".", "lower", "(", ")", "+", "\" \"", "+", "\" \"", ".", "join", "(", "tok", "[", "1", ":", "]", ")", "messages", ".", "append", "(", "msg", ")", "if", "self", ".", "string", ":", "valid", "=", "type", "(", "value", ")", "is", "str", "else", ":", "if", "type", "(", "value", ")", "is", "str", ":", "log", "(", "\"String '%s' cannot be represented as a number.\"", "%", "value", ")", "elif", "type", "(", "value", ")", "not", "in", "(", "int", ",", "long", ",", "float", ")", ":", "log", "(", "\"Value '%s' is not a primitive type.\"", "%", "str", "(", "value", ")", ")", "elif", "type", "(", "value", ")", "is", "float", "and", "not", "self", ".", "float", ":", "log", "(", "\"Float '%g' cannot be represented as an integer.\"", "%", "value", ")", "else", ":", "if", "value", "<", "self", ".", "min", "or", "value", ">", "self", ".", "max", ":", "args", "=", "(", "str", "(", "value", ")", ",", "self", ".", "min", ",", "self", ".", "max", ")", "log", "(", "\"Value '%s' out of range [%d, %d].\"", "%", "args", ")", "else", ":", "valid", "=", "True", "return", "valid" ]
39.939394
0.002222
def set_icon_file(self, filename, rel="icon"): """ Allows to define an icon for the App Args: filename (str): the resource file name (ie. "/res:myicon.png") rel (str): leave it unchanged (standard "icon") """ mimetype, encoding = mimetypes.guess_type(filename) self.add_child("favicon", '<link rel="%s" href="%s" type="%s" />'%(rel, filename, mimetype))
[ "def", "set_icon_file", "(", "self", ",", "filename", ",", "rel", "=", "\"icon\"", ")", ":", "mimetype", ",", "encoding", "=", "mimetypes", ".", "guess_type", "(", "filename", ")", "self", ".", "add_child", "(", "\"favicon\"", ",", "'<link rel=\"%s\" href=\"%s\" type=\"%s\" />'", "%", "(", "rel", ",", "filename", ",", "mimetype", ")", ")" ]
46.888889
0.009302
def bitonic_sort(arr, reverse=False): """ bitonic sort is sorting algorithm to use multiple process, but this code not containing parallel process It can sort only array that sizes power of 2 It can sort array in both increasing order and decreasing order by giving argument true(increasing) and false(decreasing) Worst-case in parallel: O(log(n)^2) Worst-case in non-parallel: O(nlog(n)^2) reference: https://en.wikipedia.org/wiki/Bitonic_sorter """ def compare(arr, reverse): n = len(arr)//2 for i in range(n): if reverse != (arr[i] > arr[i+n]): arr[i], arr[i+n] = arr[i+n], arr[i] return arr def bitonic_merge(arr, reverse): n = len(arr) if n <= 1: return arr arr = compare(arr, reverse) left = bitonic_merge(arr[:n // 2], reverse) right = bitonic_merge(arr[n // 2:], reverse) return left + right #end of function(compare and bitionic_merge) definition n = len(arr) if n <= 1: return arr # checks if n is power of two if not (n and (not(n & (n - 1))) ): raise ValueError("the size of input should be power of two") left = bitonic_sort(arr[:n // 2], True) right = bitonic_sort(arr[n // 2:], False) arr = bitonic_merge(left + right, reverse) return arr
[ "def", "bitonic_sort", "(", "arr", ",", "reverse", "=", "False", ")", ":", "def", "compare", "(", "arr", ",", "reverse", ")", ":", "n", "=", "len", "(", "arr", ")", "//", "2", "for", "i", "in", "range", "(", "n", ")", ":", "if", "reverse", "!=", "(", "arr", "[", "i", "]", ">", "arr", "[", "i", "+", "n", "]", ")", ":", "arr", "[", "i", "]", ",", "arr", "[", "i", "+", "n", "]", "=", "arr", "[", "i", "+", "n", "]", ",", "arr", "[", "i", "]", "return", "arr", "def", "bitonic_merge", "(", "arr", ",", "reverse", ")", ":", "n", "=", "len", "(", "arr", ")", "if", "n", "<=", "1", ":", "return", "arr", "arr", "=", "compare", "(", "arr", ",", "reverse", ")", "left", "=", "bitonic_merge", "(", "arr", "[", ":", "n", "//", "2", "]", ",", "reverse", ")", "right", "=", "bitonic_merge", "(", "arr", "[", "n", "//", "2", ":", "]", ",", "reverse", ")", "return", "left", "+", "right", "#end of function(compare and bitionic_merge) definition", "n", "=", "len", "(", "arr", ")", "if", "n", "<=", "1", ":", "return", "arr", "# checks if n is power of two", "if", "not", "(", "n", "and", "(", "not", "(", "n", "&", "(", "n", "-", "1", ")", ")", ")", ")", ":", "raise", "ValueError", "(", "\"the size of input should be power of two\"", ")", "left", "=", "bitonic_sort", "(", "arr", "[", ":", "n", "//", "2", "]", ",", "True", ")", "right", "=", "bitonic_sort", "(", "arr", "[", "n", "//", "2", ":", "]", ",", "False", ")", "arr", "=", "bitonic_merge", "(", "left", "+", "right", ",", "reverse", ")", "return", "arr" ]
31.72093
0.008535
def _run_markdownlint(matched_filenames, show_lint_files): """Run markdownlint on matched_filenames.""" from prospector.message import Message, Location for filename in matched_filenames: _debug_linter_status("mdl", filename, show_lint_files) try: proc = subprocess.Popen(["mdl"] + matched_filenames, stdout=subprocess.PIPE, stderr=subprocess.PIPE) lines = proc.communicate()[0].decode().splitlines() except OSError as error: if error.errno == errno.ENOENT: return [] lines = [ re.match(r"([\w\-.\/\\ ]+)\:([0-9]+)\: (\w+) (.+)", l).groups(1) for l in lines ] return_dict = dict() for filename, lineno, code, msg in lines: key = _Key(filename, int(lineno), code) loc = Location(filename, None, None, int(lineno), 0) return_dict[key] = Message("markdownlint", code, loc, msg) return return_dict
[ "def", "_run_markdownlint", "(", "matched_filenames", ",", "show_lint_files", ")", ":", "from", "prospector", ".", "message", "import", "Message", ",", "Location", "for", "filename", "in", "matched_filenames", ":", "_debug_linter_status", "(", "\"mdl\"", ",", "filename", ",", "show_lint_files", ")", "try", ":", "proc", "=", "subprocess", ".", "Popen", "(", "[", "\"mdl\"", "]", "+", "matched_filenames", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "lines", "=", "proc", ".", "communicate", "(", ")", "[", "0", "]", ".", "decode", "(", ")", ".", "splitlines", "(", ")", "except", "OSError", "as", "error", ":", "if", "error", ".", "errno", "==", "errno", ".", "ENOENT", ":", "return", "[", "]", "lines", "=", "[", "re", ".", "match", "(", "r\"([\\w\\-.\\/\\\\ ]+)\\:([0-9]+)\\: (\\w+) (.+)\"", ",", "l", ")", ".", "groups", "(", "1", ")", "for", "l", "in", "lines", "]", "return_dict", "=", "dict", "(", ")", "for", "filename", ",", "lineno", ",", "code", ",", "msg", "in", "lines", ":", "key", "=", "_Key", "(", "filename", ",", "int", "(", "lineno", ")", ",", "code", ")", "loc", "=", "Location", "(", "filename", ",", "None", ",", "None", ",", "int", "(", "lineno", ")", ",", "0", ")", "return_dict", "[", "key", "]", "=", "Message", "(", "\"markdownlint\"", ",", "code", ",", "loc", ",", "msg", ")", "return", "return_dict" ]
35.518519
0.00203
def get_url_shortener(): """ Return the selected URL shortener backend. """ try: backend_module = import_module(URL_SHORTENER_BACKEND) backend = getattr(backend_module, 'backend') except (ImportError, AttributeError): warnings.warn('%s backend cannot be imported' % URL_SHORTENER_BACKEND, RuntimeWarning) backend = default_backend except ImproperlyConfigured as e: warnings.warn(str(e), RuntimeWarning) backend = default_backend return backend
[ "def", "get_url_shortener", "(", ")", ":", "try", ":", "backend_module", "=", "import_module", "(", "URL_SHORTENER_BACKEND", ")", "backend", "=", "getattr", "(", "backend_module", ",", "'backend'", ")", "except", "(", "ImportError", ",", "AttributeError", ")", ":", "warnings", ".", "warn", "(", "'%s backend cannot be imported'", "%", "URL_SHORTENER_BACKEND", ",", "RuntimeWarning", ")", "backend", "=", "default_backend", "except", "ImproperlyConfigured", "as", "e", ":", "warnings", ".", "warn", "(", "str", "(", "e", ")", ",", "RuntimeWarning", ")", "backend", "=", "default_backend", "return", "backend" ]
32.9375
0.001845
def compute_average_oxidation_state(site): """ Calculates the average oxidation state of a site Args: site: Site to compute average oxidation state Returns: Average oxidation state of site. """ try: avg_oxi = sum([sp.oxi_state * occu for sp, occu in site.species.items() if sp is not None]) return avg_oxi except AttributeError: pass try: return site.charge except AttributeError: raise ValueError("Ewald summation can only be performed on structures " "that are either oxidation state decorated or have " "site charges.")
[ "def", "compute_average_oxidation_state", "(", "site", ")", ":", "try", ":", "avg_oxi", "=", "sum", "(", "[", "sp", ".", "oxi_state", "*", "occu", "for", "sp", ",", "occu", "in", "site", ".", "species", ".", "items", "(", ")", "if", "sp", "is", "not", "None", "]", ")", "return", "avg_oxi", "except", "AttributeError", ":", "pass", "try", ":", "return", "site", ".", "charge", "except", "AttributeError", ":", "raise", "ValueError", "(", "\"Ewald summation can only be performed on structures \"", "\"that are either oxidation state decorated or have \"", "\"site charges.\"", ")" ]
30
0.001404
def build_db_tables(db): """Build Seshet's basic database schema. Requires one parameter, `db` as `pydal.DAL` instance. """ if not isinstance(db, DAL) or not db._uri: raise Exception("Need valid DAL object to define tables") # event log - self-explanatory, logs all events db.define_table('event_log', Field('event_type'), Field('event_time', 'datetime'), Field('source'), Field('target'), Field('message', 'text'), Field('host'), Field('params', 'list:string'), ) db.define_table('modules', Field('name', notnull=True, unique=True, length=256), Field('enabled', 'boolean'), Field('event_types', 'list:string'), Field('description', 'text'), Field('echannels', 'list:string'), Field('dchannels', 'list:string'), Field('enicks', 'list:string'), Field('dnicks', 'list:string'), Field('whitelist', 'list:string'), Field('blacklist', 'list:string'), Field('cmd_prefix', length=1, default='!', notnull=True), Field('acl', 'json'), Field('rate_limit', 'json'), )
[ "def", "build_db_tables", "(", "db", ")", ":", "if", "not", "isinstance", "(", "db", ",", "DAL", ")", "or", "not", "db", ".", "_uri", ":", "raise", "Exception", "(", "\"Need valid DAL object to define tables\"", ")", "# event log - self-explanatory, logs all events", "db", ".", "define_table", "(", "'event_log'", ",", "Field", "(", "'event_type'", ")", ",", "Field", "(", "'event_time'", ",", "'datetime'", ")", ",", "Field", "(", "'source'", ")", ",", "Field", "(", "'target'", ")", ",", "Field", "(", "'message'", ",", "'text'", ")", ",", "Field", "(", "'host'", ")", ",", "Field", "(", "'params'", ",", "'list:string'", ")", ",", ")", "db", ".", "define_table", "(", "'modules'", ",", "Field", "(", "'name'", ",", "notnull", "=", "True", ",", "unique", "=", "True", ",", "length", "=", "256", ")", ",", "Field", "(", "'enabled'", ",", "'boolean'", ")", ",", "Field", "(", "'event_types'", ",", "'list:string'", ")", ",", "Field", "(", "'description'", ",", "'text'", ")", ",", "Field", "(", "'echannels'", ",", "'list:string'", ")", ",", "Field", "(", "'dchannels'", ",", "'list:string'", ")", ",", "Field", "(", "'enicks'", ",", "'list:string'", ")", ",", "Field", "(", "'dnicks'", ",", "'list:string'", ")", ",", "Field", "(", "'whitelist'", ",", "'list:string'", ")", ",", "Field", "(", "'blacklist'", ",", "'list:string'", ")", ",", "Field", "(", "'cmd_prefix'", ",", "length", "=", "1", ",", "default", "=", "'!'", ",", "notnull", "=", "True", ")", ",", "Field", "(", "'acl'", ",", "'json'", ")", ",", "Field", "(", "'rate_limit'", ",", "'json'", ")", ",", ")" ]
42.606061
0.001391
def wait_for_prepare_to_finish( self, prepare_id, sec_to_sleep=5.0, max_retries=100000): """wait_for_prepare_to_finish :param prepare_id: MLPrepare.id to wait on :param sec_to_sleep: seconds to sleep during polling :param max_retries: max retires until stopping """ not_done = True retry_attempt = 1 while not_done: if self.debug: log.info(("PREPSTATUS getting prepare.id={} details") .format( prepare_id)) response = self.get_prepare_by_id(prepare_id) if self.debug: log.info(("PREPSTATUS got prepare.id={} response={}") .format( prepare_id, response)) if response["status"] != SUCCESS: log.error(("PREPSTATUS failed to get prepare.id={} " "with error={}") .format( prepare_id, response["error"])) return self.build_response( status=ERROR, error=response["error"], data=response["data"]) # stop if this failed getting the prepare details prepare_data = response.get( "data", None) if not prepare_data: return self.build_response( status=ERROR, error="failed to find prepare dictionary in response", data=response["data"]) prepare_status = prepare_data["status"] if prepare_status == "finished" \ or prepare_status == "completed": not_done = False return self.build_response( status=SUCCESS, error="", data=prepare_data) else: retry_attempt += 1 if retry_attempt > max_retries: err_msg = ("failed waiting " "for prepare.id={} to finish").format( prepare_id) log.error(err_msg) return self.build_response( status=ERROR, error=err_msg) else: if self.verbose: if retry_attempt % 100 == 0: log.info(("waiting on prepare.id={} retry={}") .format( prepare_id, retry_attempt)) # if logging just to show this is running time.sleep(sec_to_sleep)
[ "def", "wait_for_prepare_to_finish", "(", "self", ",", "prepare_id", ",", "sec_to_sleep", "=", "5.0", ",", "max_retries", "=", "100000", ")", ":", "not_done", "=", "True", "retry_attempt", "=", "1", "while", "not_done", ":", "if", "self", ".", "debug", ":", "log", ".", "info", "(", "(", "\"PREPSTATUS getting prepare.id={} details\"", ")", ".", "format", "(", "prepare_id", ")", ")", "response", "=", "self", ".", "get_prepare_by_id", "(", "prepare_id", ")", "if", "self", ".", "debug", ":", "log", ".", "info", "(", "(", "\"PREPSTATUS got prepare.id={} response={}\"", ")", ".", "format", "(", "prepare_id", ",", "response", ")", ")", "if", "response", "[", "\"status\"", "]", "!=", "SUCCESS", ":", "log", ".", "error", "(", "(", "\"PREPSTATUS failed to get prepare.id={} \"", "\"with error={}\"", ")", ".", "format", "(", "prepare_id", ",", "response", "[", "\"error\"", "]", ")", ")", "return", "self", ".", "build_response", "(", "status", "=", "ERROR", ",", "error", "=", "response", "[", "\"error\"", "]", ",", "data", "=", "response", "[", "\"data\"", "]", ")", "# stop if this failed getting the prepare details", "prepare_data", "=", "response", ".", "get", "(", "\"data\"", ",", "None", ")", "if", "not", "prepare_data", ":", "return", "self", ".", "build_response", "(", "status", "=", "ERROR", ",", "error", "=", "\"failed to find prepare dictionary in response\"", ",", "data", "=", "response", "[", "\"data\"", "]", ")", "prepare_status", "=", "prepare_data", "[", "\"status\"", "]", "if", "prepare_status", "==", "\"finished\"", "or", "prepare_status", "==", "\"completed\"", ":", "not_done", "=", "False", "return", "self", ".", "build_response", "(", "status", "=", "SUCCESS", ",", "error", "=", "\"\"", ",", "data", "=", "prepare_data", ")", "else", ":", "retry_attempt", "+=", "1", "if", "retry_attempt", ">", "max_retries", ":", "err_msg", "=", "(", "\"failed waiting \"", "\"for prepare.id={} to finish\"", ")", ".", "format", "(", "prepare_id", ")", "log", ".", "error", "(", "err_msg", ")", "return", "self", ".", "build_response", "(", "status", "=", "ERROR", ",", "error", "=", "err_msg", ")", "else", ":", "if", "self", ".", "verbose", ":", "if", "retry_attempt", "%", "100", "==", "0", ":", "log", ".", "info", "(", "(", "\"waiting on prepare.id={} retry={}\"", ")", ".", "format", "(", "prepare_id", ",", "retry_attempt", ")", ")", "# if logging just to show this is running", "time", ".", "sleep", "(", "sec_to_sleep", ")" ]
34.975309
0.000687
def split_url(self, url): """Parse an IIIF API URL path into components. Will parse a URL or URL path that accords with either the parametrized or info API forms. Will raise an IIIFRequestError on failure. If self.identifier is set then url is assumed not to include the identifier. """ # clear data first identifier = self.identifier self.clear() # url must start with baseurl if set (including slash) if (self.baseurl is not None): (path, num) = re.subn('^' + self.baseurl, '', url, 1) if (num != 1): raise IIIFRequestError( text="Request URL does not start with base URL") url = path # Break up by path segments, count to decide format segs = url.split('/') if (identifier is not None): segs.insert(0, identifier) elif (self.allow_slashes_in_identifier): segs = self._allow_slashes_in_identifier_munger(segs) # Now have segments with identifier as first if (len(segs) > 5): raise IIIFRequestPathError( text="Request URL (%s) has too many path segments" % url) elif (len(segs) == 5): self.identifier = urlunquote(segs[0]) self.region = urlunquote(segs[1]) self.size = urlunquote(segs[2]) self.rotation = urlunquote(segs[3]) self.quality = self.strip_format(urlunquote(segs[4])) self.info = False elif (len(segs) == 2): self.identifier = urlunquote(segs[0]) info_name = self.strip_format(urlunquote(segs[1])) if (info_name != "info"): raise IIIFRequestError( text="Bad name for Image Information") if (self.api_version == '1.0'): if (self.format not in ['json', 'xml']): raise IIIFRequestError( text="Invalid format for Image Information (json and xml allowed)") elif (self.format != 'json'): raise IIIFRequestError( text="Invalid format for Image Information (only json allowed)") self.info = True elif (len(segs) == 1): self.identifier = urlunquote(segs[0]) raise IIIFRequestBaseURI() else: raise IIIFRequestPathError( text="Bad number of path segments in request") return(self)
[ "def", "split_url", "(", "self", ",", "url", ")", ":", "# clear data first", "identifier", "=", "self", ".", "identifier", "self", ".", "clear", "(", ")", "# url must start with baseurl if set (including slash)", "if", "(", "self", ".", "baseurl", "is", "not", "None", ")", ":", "(", "path", ",", "num", ")", "=", "re", ".", "subn", "(", "'^'", "+", "self", ".", "baseurl", ",", "''", ",", "url", ",", "1", ")", "if", "(", "num", "!=", "1", ")", ":", "raise", "IIIFRequestError", "(", "text", "=", "\"Request URL does not start with base URL\"", ")", "url", "=", "path", "# Break up by path segments, count to decide format", "segs", "=", "url", ".", "split", "(", "'/'", ")", "if", "(", "identifier", "is", "not", "None", ")", ":", "segs", ".", "insert", "(", "0", ",", "identifier", ")", "elif", "(", "self", ".", "allow_slashes_in_identifier", ")", ":", "segs", "=", "self", ".", "_allow_slashes_in_identifier_munger", "(", "segs", ")", "# Now have segments with identifier as first", "if", "(", "len", "(", "segs", ")", ">", "5", ")", ":", "raise", "IIIFRequestPathError", "(", "text", "=", "\"Request URL (%s) has too many path segments\"", "%", "url", ")", "elif", "(", "len", "(", "segs", ")", "==", "5", ")", ":", "self", ".", "identifier", "=", "urlunquote", "(", "segs", "[", "0", "]", ")", "self", ".", "region", "=", "urlunquote", "(", "segs", "[", "1", "]", ")", "self", ".", "size", "=", "urlunquote", "(", "segs", "[", "2", "]", ")", "self", ".", "rotation", "=", "urlunquote", "(", "segs", "[", "3", "]", ")", "self", ".", "quality", "=", "self", ".", "strip_format", "(", "urlunquote", "(", "segs", "[", "4", "]", ")", ")", "self", ".", "info", "=", "False", "elif", "(", "len", "(", "segs", ")", "==", "2", ")", ":", "self", ".", "identifier", "=", "urlunquote", "(", "segs", "[", "0", "]", ")", "info_name", "=", "self", ".", "strip_format", "(", "urlunquote", "(", "segs", "[", "1", "]", ")", ")", "if", "(", "info_name", "!=", "\"info\"", ")", ":", "raise", "IIIFRequestError", "(", "text", "=", "\"Bad name for Image Information\"", ")", "if", "(", "self", ".", "api_version", "==", "'1.0'", ")", ":", "if", "(", "self", ".", "format", "not", "in", "[", "'json'", ",", "'xml'", "]", ")", ":", "raise", "IIIFRequestError", "(", "text", "=", "\"Invalid format for Image Information (json and xml allowed)\"", ")", "elif", "(", "self", ".", "format", "!=", "'json'", ")", ":", "raise", "IIIFRequestError", "(", "text", "=", "\"Invalid format for Image Information (only json allowed)\"", ")", "self", ".", "info", "=", "True", "elif", "(", "len", "(", "segs", ")", "==", "1", ")", ":", "self", ".", "identifier", "=", "urlunquote", "(", "segs", "[", "0", "]", ")", "raise", "IIIFRequestBaseURI", "(", ")", "else", ":", "raise", "IIIFRequestPathError", "(", "text", "=", "\"Bad number of path segments in request\"", ")", "return", "(", "self", ")" ]
42.448276
0.001588
def _linearEOM(y,t,pot): """ NAME: linearEOM PURPOSE: the one-dimensional equation-of-motion INPUT: y - current phase-space position t - current time pot - (list of) linearPotential instance(s) OUTPUT: dy/dt HISTORY: 2010-07-13 - Bovy (NYU) """ return [y[1],_evaluatelinearForces(pot,y[0],t=t)]
[ "def", "_linearEOM", "(", "y", ",", "t", ",", "pot", ")", ":", "return", "[", "y", "[", "1", "]", ",", "_evaluatelinearForces", "(", "pot", ",", "y", "[", "0", "]", ",", "t", "=", "t", ")", "]" ]
22.5
0.016
def EncryptPassword(password, key): """ Encrypts the password using the given key. """ from time import time from array import array import hmac import sha import os import base64 H = UcsUtils.GetShaHash uhash = H(','.join(str(x) for x in [`time()`, `os.getpid()`, `len(password)`, password, key]))[:16] k_enc, k_auth = H('enc' + key + uhash), H('auth' + key + uhash) n = len(password) passwordStream = array('L', password + '0000'[n & 3:]) xkey = UcsUtils.Expandkey(k_enc, n + 4) for i in xrange(len(passwordStream)): passwordStream[i] = passwordStream[i] ^ xkey[i] ct = uhash + passwordStream.tostring()[:n] auth = hmac.new(ct, k_auth, sha).digest() encryptStr = ct + auth[:8] encodedStr = base64.encodestring(encryptStr) encryptedPassword = encodedStr.rstrip('\n') return encryptedPassword
[ "def", "EncryptPassword", "(", "password", ",", "key", ")", ":", "from", "time", "import", "time", "from", "array", "import", "array", "import", "hmac", "import", "sha", "import", "os", "import", "base64", "H", "=", "UcsUtils", ".", "GetShaHash", "uhash", "=", "H", "(", "','", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "[", "`time()`", ",", "`os.getpid()`", ",", "`len(password)`", ",", "password", ",", "key", "]", ")", ")", "[", ":", "16", "]", "k_enc", ",", "k_auth", "=", "H", "(", "'enc'", "+", "key", "+", "uhash", ")", ",", "H", "(", "'auth'", "+", "key", "+", "uhash", ")", "n", "=", "len", "(", "password", ")", "passwordStream", "=", "array", "(", "'L'", ",", "password", "+", "'0000'", "[", "n", "&", "3", ":", "]", ")", "xkey", "=", "UcsUtils", ".", "Expandkey", "(", "k_enc", ",", "n", "+", "4", ")", "for", "i", "in", "xrange", "(", "len", "(", "passwordStream", ")", ")", ":", "passwordStream", "[", "i", "]", "=", "passwordStream", "[", "i", "]", "^", "xkey", "[", "i", "]", "ct", "=", "uhash", "+", "passwordStream", ".", "tostring", "(", ")", "[", ":", "n", "]", "auth", "=", "hmac", ".", "new", "(", "ct", ",", "k_auth", ",", "sha", ")", ".", "digest", "(", ")", "encryptStr", "=", "ct", "+", "auth", "[", ":", "8", "]", "encodedStr", "=", "base64", ".", "encodestring", "(", "encryptStr", ")", "encryptedPassword", "=", "encodedStr", ".", "rstrip", "(", "'\\n'", ")", "return", "encryptedPassword" ]
29.071429
0.029727
def table_output(data): '''Get a table representation of a dictionary.''' if type(data) == DictType: data = data.items() headings = [ item[0] for item in data ] rows = [ item[1] for item in data ] columns = zip(*rows) if len(columns): widths = [ max([ len(str(y)) for y in row ]) for row in rows ] else: widths = [ 0 for c in headings ] for c, heading in enumerate(headings): widths[c] = max(widths[c], len(heading)) column_count = range(len(rows)) table = [ ' '.join([ headings[c].ljust(widths[c]) for c in column_count ]) ] table.append(' '.join([ '=' * widths[c] for c in column_count ])) for column in columns: table.append(' '.join([ str(column[c]).ljust(widths[c]) for c in column_count ])) return '\n'.join(table)
[ "def", "table_output", "(", "data", ")", ":", "if", "type", "(", "data", ")", "==", "DictType", ":", "data", "=", "data", ".", "items", "(", ")", "headings", "=", "[", "item", "[", "0", "]", "for", "item", "in", "data", "]", "rows", "=", "[", "item", "[", "1", "]", "for", "item", "in", "data", "]", "columns", "=", "zip", "(", "*", "rows", ")", "if", "len", "(", "columns", ")", ":", "widths", "=", "[", "max", "(", "[", "len", "(", "str", "(", "y", ")", ")", "for", "y", "in", "row", "]", ")", "for", "row", "in", "rows", "]", "else", ":", "widths", "=", "[", "0", "for", "c", "in", "headings", "]", "for", "c", ",", "heading", "in", "enumerate", "(", "headings", ")", ":", "widths", "[", "c", "]", "=", "max", "(", "widths", "[", "c", "]", ",", "len", "(", "heading", ")", ")", "column_count", "=", "range", "(", "len", "(", "rows", ")", ")", "table", "=", "[", "' '", ".", "join", "(", "[", "headings", "[", "c", "]", ".", "ljust", "(", "widths", "[", "c", "]", ")", "for", "c", "in", "column_count", "]", ")", "]", "table", ".", "append", "(", "' '", ".", "join", "(", "[", "'='", "*", "widths", "[", "c", "]", "for", "c", "in", "column_count", "]", ")", ")", "for", "column", "in", "columns", ":", "table", ".", "append", "(", "' '", ".", "join", "(", "[", "str", "(", "column", "[", "c", "]", ")", ".", "ljust", "(", "widths", "[", "c", "]", ")", "for", "c", "in", "column_count", "]", ")", ")", "return", "'\\n'", ".", "join", "(", "table", ")" ]
41.789474
0.025862
def addDelta(self, location, aMathObject, deltaName = None, punch=False, axisOnly=True): """ Add a delta at this location. * location: a Location object * mathObject: a math-sensitive object * deltaName: optional string/token * punch: * True: add the difference with the instance value at that location and the delta * False: just add the delta. """ #location = self._bender(location) if punch: r = self.getInstance(location, axisOnly=axisOnly) if r is not None: self[location.asTuple()] = aMathObject-r, deltaName else: raise MutatorError("Could not get instance.") else: self[location.asTuple()] = aMathObject, deltaName
[ "def", "addDelta", "(", "self", ",", "location", ",", "aMathObject", ",", "deltaName", "=", "None", ",", "punch", "=", "False", ",", "axisOnly", "=", "True", ")", ":", "#location = self._bender(location)", "if", "punch", ":", "r", "=", "self", ".", "getInstance", "(", "location", ",", "axisOnly", "=", "axisOnly", ")", "if", "r", "is", "not", "None", ":", "self", "[", "location", ".", "asTuple", "(", ")", "]", "=", "aMathObject", "-", "r", ",", "deltaName", "else", ":", "raise", "MutatorError", "(", "\"Could not get instance.\"", ")", "else", ":", "self", "[", "location", ".", "asTuple", "(", ")", "]", "=", "aMathObject", ",", "deltaName" ]
45.555556
0.008363
def qubit_adjacent_lifted_gate(i, matrix, n_qubits): """ Lifts input k-qubit gate on adjacent qubits starting from qubit i to complete Hilbert space of dimension 2 ** num_qubits. Ex: 1-qubit gate, lifts from qubit i Ex: 2-qubit gate, lifts from qubits (i+1, i) Ex: 3-qubit gate, lifts from qubits (i+2, i+1, i), operating in that order In general, this takes a k-qubit gate (2D matrix 2^k x 2^k) and lifts it to the complete Hilbert space of dim 2^num_qubits, as defined by the right-to-left tensor product (1) in arXiv:1608.03355. Developer note: Quil and the QVM like qubits to be ordered such that qubit 0 is on the right. Therefore, in ``qubit_adjacent_lifted_gate``, ``lifted_pauli``, and ``lifted_state_operator``, we build up the lifted matrix by performing the kronecker product from right to left. Note that while the qubits are addressed in decreasing order, starting with num_qubit - 1 on the left and ending with qubit 0 on the right (in a little-endian fashion), gates are still lifted to apply on qubits in increasing index (right-to-left) order. :param int i: starting qubit to lift matrix from (incr. index order) :param np.array matrix: the matrix to be lifted :param int n_qubits: number of overall qubits present in space :return: matrix representation of operator acting on the complete Hilbert space of all num_qubits. :rtype: sparse_array """ n_rows, n_cols = matrix.shape assert n_rows == n_cols, 'Matrix must be square' gate_size = np.log2(n_rows) assert gate_size == int(gate_size), 'Matrix must be 2^n by 2^n' gate_size = int(gate_size) # Outer-product to lift gate to complete Hilbert space # bottom: i qubits below target bottom_matrix = np.eye(2 ** i, dtype=np.complex128) # top: Nq - i (bottom) - gate_size (gate) qubits above target top_qubits = n_qubits - i - gate_size top_matrix = np.eye(2 ** top_qubits, dtype=np.complex128) return np.kron(top_matrix, np.kron(matrix, bottom_matrix))
[ "def", "qubit_adjacent_lifted_gate", "(", "i", ",", "matrix", ",", "n_qubits", ")", ":", "n_rows", ",", "n_cols", "=", "matrix", ".", "shape", "assert", "n_rows", "==", "n_cols", ",", "'Matrix must be square'", "gate_size", "=", "np", ".", "log2", "(", "n_rows", ")", "assert", "gate_size", "==", "int", "(", "gate_size", ")", ",", "'Matrix must be 2^n by 2^n'", "gate_size", "=", "int", "(", "gate_size", ")", "# Outer-product to lift gate to complete Hilbert space", "# bottom: i qubits below target", "bottom_matrix", "=", "np", ".", "eye", "(", "2", "**", "i", ",", "dtype", "=", "np", ".", "complex128", ")", "# top: Nq - i (bottom) - gate_size (gate) qubits above target", "top_qubits", "=", "n_qubits", "-", "i", "-", "gate_size", "top_matrix", "=", "np", ".", "eye", "(", "2", "**", "top_qubits", ",", "dtype", "=", "np", ".", "complex128", ")", "return", "np", ".", "kron", "(", "top_matrix", ",", "np", ".", "kron", "(", "matrix", ",", "bottom_matrix", ")", ")" ]
45.044444
0.001931
def fft_frequencies(sr=22050, n_fft=2048): '''Alternative implementation of `np.fft.fftfreq` Parameters ---------- sr : number > 0 [scalar] Audio sampling rate n_fft : int > 0 [scalar] FFT window size Returns ------- freqs : np.ndarray [shape=(1 + n_fft/2,)] Frequencies `(0, sr/n_fft, 2*sr/n_fft, ..., sr/2)` Examples -------- >>> librosa.fft_frequencies(sr=22050, n_fft=16) array([ 0. , 1378.125, 2756.25 , 4134.375, 5512.5 , 6890.625, 8268.75 , 9646.875, 11025. ]) ''' return np.linspace(0, float(sr) / 2, int(1 + n_fft//2), endpoint=True)
[ "def", "fft_frequencies", "(", "sr", "=", "22050", ",", "n_fft", "=", "2048", ")", ":", "return", "np", ".", "linspace", "(", "0", ",", "float", "(", "sr", ")", "/", "2", ",", "int", "(", "1", "+", "n_fft", "//", "2", ")", ",", "endpoint", "=", "True", ")" ]
23.5
0.001362
def read_mutating_webhook_configuration(self, name, **kwargs): # noqa: E501 """read_mutating_webhook_configuration # noqa: E501 read the specified MutatingWebhookConfiguration # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_mutating_webhook_configuration(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the MutatingWebhookConfiguration (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1beta1MutatingWebhookConfiguration If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_mutating_webhook_configuration_with_http_info(name, **kwargs) # noqa: E501 else: (data) = self.read_mutating_webhook_configuration_with_http_info(name, **kwargs) # noqa: E501 return data
[ "def", "read_mutating_webhook_configuration", "(", "self", ",", "name", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "read_mutating_webhook_configuration_with_http_info", "(", "name", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "read_mutating_webhook_configuration_with_http_info", "(", "name", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
56.375
0.001453
def generate_sql_markdown(jvm, path): """ Generates a markdown file after listing the function information. The output file is created in `path`. Expected output: ### NAME USAGE **Arguments:** ARGUMENTS **Examples:** ``` EXAMPLES ``` **Note:** NOTE **Since:** SINCE **Deprecated:** DEPRECATED <br/> """ with open(path, 'w') as mdfile: for info in _list_function_infos(jvm): name = info.name usage = _make_pretty_usage(info.usage) arguments = _make_pretty_arguments(info.arguments) examples = _make_pretty_examples(info.examples) note = _make_pretty_note(info.note) since = info.since deprecated = _make_pretty_deprecated(info.deprecated) mdfile.write("### %s\n\n" % name) if usage is not None: mdfile.write("%s\n\n" % usage.strip()) if arguments is not None: mdfile.write(arguments) if examples is not None: mdfile.write(examples) if note is not None: mdfile.write(note) if since is not None and since != "": mdfile.write("**Since:** %s\n\n" % since.strip()) if deprecated is not None: mdfile.write(deprecated) mdfile.write("<br/>\n\n")
[ "def", "generate_sql_markdown", "(", "jvm", ",", "path", ")", ":", "with", "open", "(", "path", ",", "'w'", ")", "as", "mdfile", ":", "for", "info", "in", "_list_function_infos", "(", "jvm", ")", ":", "name", "=", "info", ".", "name", "usage", "=", "_make_pretty_usage", "(", "info", ".", "usage", ")", "arguments", "=", "_make_pretty_arguments", "(", "info", ".", "arguments", ")", "examples", "=", "_make_pretty_examples", "(", "info", ".", "examples", ")", "note", "=", "_make_pretty_note", "(", "info", ".", "note", ")", "since", "=", "info", ".", "since", "deprecated", "=", "_make_pretty_deprecated", "(", "info", ".", "deprecated", ")", "mdfile", ".", "write", "(", "\"### %s\\n\\n\"", "%", "name", ")", "if", "usage", "is", "not", "None", ":", "mdfile", ".", "write", "(", "\"%s\\n\\n\"", "%", "usage", ".", "strip", "(", ")", ")", "if", "arguments", "is", "not", "None", ":", "mdfile", ".", "write", "(", "arguments", ")", "if", "examples", "is", "not", "None", ":", "mdfile", ".", "write", "(", "examples", ")", "if", "note", "is", "not", "None", ":", "mdfile", ".", "write", "(", "note", ")", "if", "since", "is", "not", "None", "and", "since", "!=", "\"\"", ":", "mdfile", ".", "write", "(", "\"**Since:** %s\\n\\n\"", "%", "since", ".", "strip", "(", ")", ")", "if", "deprecated", "is", "not", "None", ":", "mdfile", ".", "write", "(", "deprecated", ")", "mdfile", ".", "write", "(", "\"<br/>\\n\\n\"", ")" ]
23.413793
0.001413
def visit(self, node, abort=abort_visit): """Visit a node.""" method = 'visit_' + node.__class__.__name__ visitor = getattr(self, method, abort) return visitor(node)
[ "def", "visit", "(", "self", ",", "node", ",", "abort", "=", "abort_visit", ")", ":", "method", "=", "'visit_'", "+", "node", ".", "__class__", ".", "__name__", "visitor", "=", "getattr", "(", "self", ",", "method", ",", "abort", ")", "return", "visitor", "(", "node", ")" ]
38.6
0.010152
def v1_fc_put(request, response, visid_to_dbid, store, cid): '''Store a single feature collection. The route for this endpoint is: ``PUT /dossier/v1/feature-collections/<content_id>``. ``content_id`` is the id to associate with the given feature collection. The feature collection should be in the request body serialized as JSON. This endpoint returns status ``201`` upon successful storage. An existing feature collection with id ``content_id`` is overwritten. ''' fc = FeatureCollection.from_dict(json.load(request.body)) store.put([(visid_to_dbid(cid), fc)]) response.status = 201
[ "def", "v1_fc_put", "(", "request", ",", "response", ",", "visid_to_dbid", ",", "store", ",", "cid", ")", ":", "fc", "=", "FeatureCollection", ".", "from_dict", "(", "json", ".", "load", "(", "request", ".", "body", ")", ")", "store", ".", "put", "(", "[", "(", "visid_to_dbid", "(", "cid", ")", ",", "fc", ")", "]", ")", "response", ".", "status", "=", "201" ]
36.647059
0.001565
def items_get(self, **kwargs): '''taobao.increment.items.get 获取商品变更通知信息 开通主动通知业务的APP可以通过该接口获取商品变更通知信息 建议获取增量消息的时间间隔是:半个小时''' request = TOPRequest('taobao.increment.items.get') for k, v in kwargs.iteritems(): if k not in ('status', 'nick', 'start_modified', 'end_modified', 'page_no', 'page_size') and v==None: continue request[k] = v self.create(self.execute(request), fields=['notify_items', 'total_results'], models={'notify_items':NotifyItem}) return self.notify_items
[ "def", "items_get", "(", "self", ",", "*", "*", "kwargs", ")", ":", "request", "=", "TOPRequest", "(", "'taobao.increment.items.get'", ")", "for", "k", ",", "v", "in", "kwargs", ".", "iteritems", "(", ")", ":", "if", "k", "not", "in", "(", "'status'", ",", "'nick'", ",", "'start_modified'", ",", "'end_modified'", ",", "'page_no'", ",", "'page_size'", ")", "and", "v", "==", "None", ":", "continue", "request", "[", "k", "]", "=", "v", "self", ".", "create", "(", "self", ".", "execute", "(", "request", ")", ",", "fields", "=", "[", "'notify_items'", ",", "'total_results'", "]", ",", "models", "=", "{", "'notify_items'", ":", "NotifyItem", "}", ")", "return", "self", ".", "notify_items" ]
54.3
0.016304
def parse_type_reference(lexer: Lexer) -> TypeNode: """Type: NamedType or ListType or NonNullType""" start = lexer.token if expect_optional_token(lexer, TokenKind.BRACKET_L): type_ = parse_type_reference(lexer) expect_token(lexer, TokenKind.BRACKET_R) type_ = ListTypeNode(type=type_, loc=loc(lexer, start)) else: type_ = parse_named_type(lexer) if expect_optional_token(lexer, TokenKind.BANG): return NonNullTypeNode(type=type_, loc=loc(lexer, start)) return type_
[ "def", "parse_type_reference", "(", "lexer", ":", "Lexer", ")", "->", "TypeNode", ":", "start", "=", "lexer", ".", "token", "if", "expect_optional_token", "(", "lexer", ",", "TokenKind", ".", "BRACKET_L", ")", ":", "type_", "=", "parse_type_reference", "(", "lexer", ")", "expect_token", "(", "lexer", ",", "TokenKind", ".", "BRACKET_R", ")", "type_", "=", "ListTypeNode", "(", "type", "=", "type_", ",", "loc", "=", "loc", "(", "lexer", ",", "start", ")", ")", "else", ":", "type_", "=", "parse_named_type", "(", "lexer", ")", "if", "expect_optional_token", "(", "lexer", ",", "TokenKind", ".", "BANG", ")", ":", "return", "NonNullTypeNode", "(", "type", "=", "type_", ",", "loc", "=", "loc", "(", "lexer", ",", "start", ")", ")", "return", "type_" ]
43.166667
0.00189
def _new_stream(self, idx): '''Randomly select and create a stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace ''' # instantiate if self.rate is not None: n_stream = 1 + self.rng.poisson(lam=self.rate) else: n_stream = None # If we're sampling without replacement, zero this one out # This effectively disables this stream as soon as it is chosen, # preventing it from being chosen again (unless it is revived) if not self.with_replacement: self.distribution_[idx] = 0.0 # Correct the distribution if (self.distribution_ > 0).any(): self.distribution_[:] /= np.sum(self.distribution_) return (self.streamers[idx].iterate(max_iter=n_stream), self.weights[idx])
[ "def", "_new_stream", "(", "self", ",", "idx", ")", ":", "# instantiate", "if", "self", ".", "rate", "is", "not", "None", ":", "n_stream", "=", "1", "+", "self", ".", "rng", ".", "poisson", "(", "lam", "=", "self", ".", "rate", ")", "else", ":", "n_stream", "=", "None", "# If we're sampling without replacement, zero this one out", "# This effectively disables this stream as soon as it is chosen,", "# preventing it from being chosen again (unless it is revived)", "if", "not", "self", ".", "with_replacement", ":", "self", ".", "distribution_", "[", "idx", "]", "=", "0.0", "# Correct the distribution", "if", "(", "self", ".", "distribution_", ">", "0", ")", ".", "any", "(", ")", ":", "self", ".", "distribution_", "[", ":", "]", "/=", "np", ".", "sum", "(", "self", ".", "distribution_", ")", "return", "(", "self", ".", "streamers", "[", "idx", "]", ".", "iterate", "(", "max_iter", "=", "n_stream", ")", ",", "self", ".", "weights", "[", "idx", "]", ")" ]
33.923077
0.002205
def load_entry_point_group(self, entry_point_group): """Load actions from an entry point group.""" for ep in pkg_resources.iter_entry_points(group=entry_point_group): self.register(ep.name, ep.load())
[ "def", "load_entry_point_group", "(", "self", ",", "entry_point_group", ")", ":", "for", "ep", "in", "pkg_resources", ".", "iter_entry_points", "(", "group", "=", "entry_point_group", ")", ":", "self", ".", "register", "(", "ep", ".", "name", ",", "ep", ".", "load", "(", ")", ")" ]
56.25
0.008772
def parse(bin_payload, recipient, update_hash ): """ # NOTE: first three bytes were stripped """ fqn = bin_payload if not is_name_valid( fqn ): log.warning("Name '%s' is invalid" % fqn) return None return { 'opcode': 'NAME_IMPORT', 'name': fqn, 'recipient': recipient, 'value_hash': update_hash }
[ "def", "parse", "(", "bin_payload", ",", "recipient", ",", "update_hash", ")", ":", "fqn", "=", "bin_payload", "if", "not", "is_name_valid", "(", "fqn", ")", ":", "log", ".", "warning", "(", "\"Name '%s' is invalid\"", "%", "fqn", ")", "return", "None", "return", "{", "'opcode'", ":", "'NAME_IMPORT'", ",", "'name'", ":", "fqn", ",", "'recipient'", ":", "recipient", ",", "'value_hash'", ":", "update_hash", "}" ]
22.8125
0.018421
def _makeHttpRequest(self, method, route, payload): """ Make an HTTP Request for the API endpoint. This method wraps the logic about doing failure retry and passes off the actual work of doing an HTTP request to another method.""" url = self._constructUrl(route) log.debug('Full URL used is: %s', url) hawkExt = self.makeHawkExt() # Serialize payload if given if payload is not None: payload = utils.dumpJson(payload) # Do a loop of retries retry = -1 # we plus first in the loop, and attempt 1 is retry 0 retries = self.options['maxRetries'] while retry < retries: retry += 1 # if this isn't the first retry then we sleep if retry > 0: time.sleep(utils.calculateSleepTime(retry)) # Construct header if self._hasCredentials(): sender = mohawk.Sender( credentials={ 'id': self.options['credentials']['clientId'], 'key': self.options['credentials']['accessToken'], 'algorithm': 'sha256', }, ext=hawkExt if hawkExt else {}, url=url, content=payload if payload else '', content_type='application/json' if payload else '', method=method, ) headers = {'Authorization': sender.request_header} else: log.debug('Not using hawk!') headers = {} if payload: # Set header for JSON if payload is given, note that we serialize # outside this loop. headers['Content-Type'] = 'application/json' log.debug('Making attempt %d', retry) try: response = utils.makeSingleHttpRequest(method, url, payload, headers) except requests.exceptions.RequestException as rerr: if retry < retries: log.warn('Retrying because of: %s' % rerr) continue # raise a connection exception raise exceptions.TaskclusterConnectionError( "Failed to establish connection", superExc=rerr ) # Handle non 2xx status code and retry if possible status = response.status_code if status == 204: return None # Catch retryable errors and go to the beginning of the loop # to do the retry if 500 <= status and status < 600 and retry < retries: log.warn('Retrying because of a %s status code' % status) continue # Throw errors for non-retryable errors if status < 200 or status >= 300: data = {} try: data = response.json() except Exception: pass # Ignore JSON errors in error messages # Find error message message = "Unknown Server Error" if isinstance(data, dict): message = data.get('message') else: if status == 401: message = "Authentication Error" elif status == 500: message = "Internal Server Error" # Raise TaskclusterAuthFailure if this is an auth issue if status == 401: raise exceptions.TaskclusterAuthFailure( message, status_code=status, body=data, superExc=None ) # Raise TaskclusterRestFailure for all other issues raise exceptions.TaskclusterRestFailure( message, status_code=status, body=data, superExc=None ) # Try to load JSON try: return response.json() except ValueError: return {"response": response} # This code-path should be unreachable assert False, "Error from last retry should have been raised!"
[ "def", "_makeHttpRequest", "(", "self", ",", "method", ",", "route", ",", "payload", ")", ":", "url", "=", "self", ".", "_constructUrl", "(", "route", ")", "log", ".", "debug", "(", "'Full URL used is: %s'", ",", "url", ")", "hawkExt", "=", "self", ".", "makeHawkExt", "(", ")", "# Serialize payload if given", "if", "payload", "is", "not", "None", ":", "payload", "=", "utils", ".", "dumpJson", "(", "payload", ")", "# Do a loop of retries", "retry", "=", "-", "1", "# we plus first in the loop, and attempt 1 is retry 0", "retries", "=", "self", ".", "options", "[", "'maxRetries'", "]", "while", "retry", "<", "retries", ":", "retry", "+=", "1", "# if this isn't the first retry then we sleep", "if", "retry", ">", "0", ":", "time", ".", "sleep", "(", "utils", ".", "calculateSleepTime", "(", "retry", ")", ")", "# Construct header", "if", "self", ".", "_hasCredentials", "(", ")", ":", "sender", "=", "mohawk", ".", "Sender", "(", "credentials", "=", "{", "'id'", ":", "self", ".", "options", "[", "'credentials'", "]", "[", "'clientId'", "]", ",", "'key'", ":", "self", ".", "options", "[", "'credentials'", "]", "[", "'accessToken'", "]", ",", "'algorithm'", ":", "'sha256'", ",", "}", ",", "ext", "=", "hawkExt", "if", "hawkExt", "else", "{", "}", ",", "url", "=", "url", ",", "content", "=", "payload", "if", "payload", "else", "''", ",", "content_type", "=", "'application/json'", "if", "payload", "else", "''", ",", "method", "=", "method", ",", ")", "headers", "=", "{", "'Authorization'", ":", "sender", ".", "request_header", "}", "else", ":", "log", ".", "debug", "(", "'Not using hawk!'", ")", "headers", "=", "{", "}", "if", "payload", ":", "# Set header for JSON if payload is given, note that we serialize", "# outside this loop.", "headers", "[", "'Content-Type'", "]", "=", "'application/json'", "log", ".", "debug", "(", "'Making attempt %d'", ",", "retry", ")", "try", ":", "response", "=", "utils", ".", "makeSingleHttpRequest", "(", "method", ",", "url", ",", "payload", ",", "headers", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "rerr", ":", "if", "retry", "<", "retries", ":", "log", ".", "warn", "(", "'Retrying because of: %s'", "%", "rerr", ")", "continue", "# raise a connection exception", "raise", "exceptions", ".", "TaskclusterConnectionError", "(", "\"Failed to establish connection\"", ",", "superExc", "=", "rerr", ")", "# Handle non 2xx status code and retry if possible", "status", "=", "response", ".", "status_code", "if", "status", "==", "204", ":", "return", "None", "# Catch retryable errors and go to the beginning of the loop", "# to do the retry", "if", "500", "<=", "status", "and", "status", "<", "600", "and", "retry", "<", "retries", ":", "log", ".", "warn", "(", "'Retrying because of a %s status code'", "%", "status", ")", "continue", "# Throw errors for non-retryable errors", "if", "status", "<", "200", "or", "status", ">=", "300", ":", "data", "=", "{", "}", "try", ":", "data", "=", "response", ".", "json", "(", ")", "except", "Exception", ":", "pass", "# Ignore JSON errors in error messages", "# Find error message", "message", "=", "\"Unknown Server Error\"", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "message", "=", "data", ".", "get", "(", "'message'", ")", "else", ":", "if", "status", "==", "401", ":", "message", "=", "\"Authentication Error\"", "elif", "status", "==", "500", ":", "message", "=", "\"Internal Server Error\"", "# Raise TaskclusterAuthFailure if this is an auth issue", "if", "status", "==", "401", ":", "raise", "exceptions", ".", "TaskclusterAuthFailure", "(", "message", ",", "status_code", "=", "status", ",", "body", "=", "data", ",", "superExc", "=", "None", ")", "# Raise TaskclusterRestFailure for all other issues", "raise", "exceptions", ".", "TaskclusterRestFailure", "(", "message", ",", "status_code", "=", "status", ",", "body", "=", "data", ",", "superExc", "=", "None", ")", "# Try to load JSON", "try", ":", "return", "response", ".", "json", "(", ")", "except", "ValueError", ":", "return", "{", "\"response\"", ":", "response", "}", "# This code-path should be unreachable", "assert", "False", ",", "\"Error from last retry should have been raised!\"" ]
39.090909
0.000907
def _init_stub(self, stub_init, **stub_kwargs): """Initializes all other stubs for consistency's sake""" getattr(self.testbed, stub_init, lambda **kwargs: None)(**stub_kwargs)
[ "def", "_init_stub", "(", "self", ",", "stub_init", ",", "*", "*", "stub_kwargs", ")", ":", "getattr", "(", "self", ".", "testbed", ",", "stub_init", ",", "lambda", "*", "*", "kwargs", ":", "None", ")", "(", "*", "*", "stub_kwargs", ")" ]
63
0.010471
def stmts_from_path(path, model, stmts): """Return source Statements corresponding to a path in a model. Parameters ---------- path : list[tuple[str, int]] A list of tuples where the first element of the tuple is the name of a rule, and the second is the associated polarity along a path. model : pysb.core.Model A PySB model which contains the rules along the path. stmts : list[indra.statements.Statement] A list of INDRA Statements from which the model was assembled. Returns ------- path_stmts : list[indra.statements.Statement] The Statements from which the rules along the path were obtained. """ path_stmts = [] for path_rule, sign in path: for rule in model.rules: if rule.name == path_rule: stmt = stmt_from_rule(path_rule, model, stmts) assert stmt is not None path_stmts.append(stmt) return path_stmts
[ "def", "stmts_from_path", "(", "path", ",", "model", ",", "stmts", ")", ":", "path_stmts", "=", "[", "]", "for", "path_rule", ",", "sign", "in", "path", ":", "for", "rule", "in", "model", ".", "rules", ":", "if", "rule", ".", "name", "==", "path_rule", ":", "stmt", "=", "stmt_from_rule", "(", "path_rule", ",", "model", ",", "stmts", ")", "assert", "stmt", "is", "not", "None", "path_stmts", ".", "append", "(", "stmt", ")", "return", "path_stmts" ]
35.407407
0.001018
def initialise(self): """ Initialise this data repository, creating any necessary directories and file paths. """ self._checkWriteMode() self._createSystemTable() self._createNetworkTables() self._createOntologyTable() self._createReferenceSetTable() self._createReferenceTable() self._createDatasetTable() self._createReadGroupSetTable() self._createReadGroupTable() self._createCallSetTable() self._createVariantSetTable() self._createVariantAnnotationSetTable() self._createFeatureSetTable() self._createContinuousSetTable() self._createBiosampleTable() self._createIndividualTable() self._createPhenotypeAssociationSetTable() self._createRnaQuantificationSetTable()
[ "def", "initialise", "(", "self", ")", ":", "self", ".", "_checkWriteMode", "(", ")", "self", ".", "_createSystemTable", "(", ")", "self", ".", "_createNetworkTables", "(", ")", "self", ".", "_createOntologyTable", "(", ")", "self", ".", "_createReferenceSetTable", "(", ")", "self", ".", "_createReferenceTable", "(", ")", "self", ".", "_createDatasetTable", "(", ")", "self", ".", "_createReadGroupSetTable", "(", ")", "self", ".", "_createReadGroupTable", "(", ")", "self", ".", "_createCallSetTable", "(", ")", "self", ".", "_createVariantSetTable", "(", ")", "self", ".", "_createVariantAnnotationSetTable", "(", ")", "self", ".", "_createFeatureSetTable", "(", ")", "self", ".", "_createContinuousSetTable", "(", ")", "self", ".", "_createBiosampleTable", "(", ")", "self", ".", "_createIndividualTable", "(", ")", "self", ".", "_createPhenotypeAssociationSetTable", "(", ")", "self", ".", "_createRnaQuantificationSetTable", "(", ")" ]
35.782609
0.002367
def transform(self, sequences, mode='clip'): """Transform a list of sequences to internal indexing Recall that `sequences` can be arbitrary labels, whereas ``transmat_`` and ``countsmat_`` are indexed with integers between 0 and ``n_states - 1``. This methods maps a set of sequences from the labels onto this internal indexing. Parameters ---------- sequences : list of array-like List of sequences, or a single sequence. Each sequence should be a 1D iterable of state labels. Labels can be integers, strings, or other orderable objects. mode : {'clip', 'fill'} Method by which to treat labels in `sequences` which do not have a corresponding index. This can be due, for example, to the ergodic trimming step. ``clip`` Unmapped labels are removed during transform. If they occur at the beginning or end of a sequence, the resulting transformed sequence will be shorted. If they occur in the middle of a sequence, that sequence will be broken into two (or more) sequences. (Default) ``fill`` Unmapped labels will be replaced with NaN, to signal missing data. [The use of NaN to signal missing data is not fantastic, but it's consistent with current behavior of the ``pandas`` library.] Returns ------- mapped_sequences : list List of sequences in internal indexing """ if mode not in ['clip', 'fill']: raise ValueError('mode must be one of ["clip", "fill"]: %s' % mode) sequences = list_of_1d(sequences) result = [] for y in sequences: if mode == 'fill': result.append(self.partial_transform(y, mode)) elif mode == 'clip': result.extend(self.partial_transform(y, mode)) else: raise RuntimeError() return result
[ "def", "transform", "(", "self", ",", "sequences", ",", "mode", "=", "'clip'", ")", ":", "if", "mode", "not", "in", "[", "'clip'", ",", "'fill'", "]", ":", "raise", "ValueError", "(", "'mode must be one of [\"clip\", \"fill\"]: %s'", "%", "mode", ")", "sequences", "=", "list_of_1d", "(", "sequences", ")", "result", "=", "[", "]", "for", "y", "in", "sequences", ":", "if", "mode", "==", "'fill'", ":", "result", ".", "append", "(", "self", ".", "partial_transform", "(", "y", ",", "mode", ")", ")", "elif", "mode", "==", "'clip'", ":", "result", ".", "extend", "(", "self", ".", "partial_transform", "(", "y", ",", "mode", ")", ")", "else", ":", "raise", "RuntimeError", "(", ")", "return", "result" ]
40.96
0.000954
def export_icon(self, icon, size, color='black', scale='auto', filename=None, export_dir='exported'): """ Exports given icon with provided parameters. If the desired icon size is less than 150x150 pixels, we will first create a 150x150 pixels image and then scale it down, so that it's much less likely that the edges of the icon end up cropped. :param icon: valid icon name :param filename: name of the output file :param size: icon size in pixels :param color: color name or hex value :param scale: scaling factor between 0 and 1, or 'auto' for automatic scaling :param export_dir: path to export directory """ org_size = size size = max(150, size) image = Image.new("RGBA", (size, size), color=(0, 0, 0, 0)) draw = ImageDraw.Draw(image) if scale == 'auto': scale_factor = 1 else: scale_factor = float(scale) font = ImageFont.truetype(self.ttf_file, int(size * scale_factor)) width, height = draw.textsize(self.css_icons[icon], font=font) # If auto-scaling is enabled, we need to make sure the resulting # graphic fits inside the boundary. The values are rounded and may be # off by a pixel or two, so we may need to do a few iterations. # The use of a decrementing multiplication factor protects us from # getting into an infinite loop. if scale == 'auto': iteration = 0 factor = 1 while True: width, height = draw.textsize(self.css_icons[icon], font=font) # Check if the image fits dim = max(width, height) if dim > size: font = ImageFont.truetype(self.ttf_file, int(size * size/dim * factor)) else: break # Adjust the factor every two iterations iteration += 1 if iteration % 2 == 0: factor *= 0.99 draw.text((float(size - width) / 2, float(size - height) / 2), self.css_icons[icon], font=font, fill=color) # Get bounding box bbox = image.getbbox() # Create an alpha mask image_mask = Image.new("L", (size, size), 0) draw_mask = ImageDraw.Draw(image_mask) # Draw the icon on the mask draw_mask.text((float(size - width) / 2, float(size - height) / 2), self.css_icons[icon], font=font, fill=255) # Create a solid color image and apply the mask icon_image = Image.new("RGBA", (size, size), color) icon_image.putalpha(image_mask) if bbox: icon_image = icon_image.crop(bbox) border_w = int((size - (bbox[2] - bbox[0])) / 2) border_h = int((size - (bbox[3] - bbox[1])) / 2) # Create output image out_image = Image.new("RGBA", (size, size), (0, 0, 0, 0)) out_image.paste(icon_image, (border_w, border_h)) # If necessary, scale the image to the target size if org_size != size: out_image = out_image.resize((org_size, org_size), Image.ANTIALIAS) # Make sure export directory exists if not os.path.exists(export_dir): os.makedirs(export_dir) # Default filename if not filename: filename = icon + '.png' # Save file out_image.save(os.path.join(export_dir, filename))
[ "def", "export_icon", "(", "self", ",", "icon", ",", "size", ",", "color", "=", "'black'", ",", "scale", "=", "'auto'", ",", "filename", "=", "None", ",", "export_dir", "=", "'exported'", ")", ":", "org_size", "=", "size", "size", "=", "max", "(", "150", ",", "size", ")", "image", "=", "Image", ".", "new", "(", "\"RGBA\"", ",", "(", "size", ",", "size", ")", ",", "color", "=", "(", "0", ",", "0", ",", "0", ",", "0", ")", ")", "draw", "=", "ImageDraw", ".", "Draw", "(", "image", ")", "if", "scale", "==", "'auto'", ":", "scale_factor", "=", "1", "else", ":", "scale_factor", "=", "float", "(", "scale", ")", "font", "=", "ImageFont", ".", "truetype", "(", "self", ".", "ttf_file", ",", "int", "(", "size", "*", "scale_factor", ")", ")", "width", ",", "height", "=", "draw", ".", "textsize", "(", "self", ".", "css_icons", "[", "icon", "]", ",", "font", "=", "font", ")", "# If auto-scaling is enabled, we need to make sure the resulting", "# graphic fits inside the boundary. The values are rounded and may be", "# off by a pixel or two, so we may need to do a few iterations.", "# The use of a decrementing multiplication factor protects us from", "# getting into an infinite loop.", "if", "scale", "==", "'auto'", ":", "iteration", "=", "0", "factor", "=", "1", "while", "True", ":", "width", ",", "height", "=", "draw", ".", "textsize", "(", "self", ".", "css_icons", "[", "icon", "]", ",", "font", "=", "font", ")", "# Check if the image fits", "dim", "=", "max", "(", "width", ",", "height", ")", "if", "dim", ">", "size", ":", "font", "=", "ImageFont", ".", "truetype", "(", "self", ".", "ttf_file", ",", "int", "(", "size", "*", "size", "/", "dim", "*", "factor", ")", ")", "else", ":", "break", "# Adjust the factor every two iterations", "iteration", "+=", "1", "if", "iteration", "%", "2", "==", "0", ":", "factor", "*=", "0.99", "draw", ".", "text", "(", "(", "float", "(", "size", "-", "width", ")", "/", "2", ",", "float", "(", "size", "-", "height", ")", "/", "2", ")", ",", "self", ".", "css_icons", "[", "icon", "]", ",", "font", "=", "font", ",", "fill", "=", "color", ")", "# Get bounding box", "bbox", "=", "image", ".", "getbbox", "(", ")", "# Create an alpha mask", "image_mask", "=", "Image", ".", "new", "(", "\"L\"", ",", "(", "size", ",", "size", ")", ",", "0", ")", "draw_mask", "=", "ImageDraw", ".", "Draw", "(", "image_mask", ")", "# Draw the icon on the mask", "draw_mask", ".", "text", "(", "(", "float", "(", "size", "-", "width", ")", "/", "2", ",", "float", "(", "size", "-", "height", ")", "/", "2", ")", ",", "self", ".", "css_icons", "[", "icon", "]", ",", "font", "=", "font", ",", "fill", "=", "255", ")", "# Create a solid color image and apply the mask", "icon_image", "=", "Image", ".", "new", "(", "\"RGBA\"", ",", "(", "size", ",", "size", ")", ",", "color", ")", "icon_image", ".", "putalpha", "(", "image_mask", ")", "if", "bbox", ":", "icon_image", "=", "icon_image", ".", "crop", "(", "bbox", ")", "border_w", "=", "int", "(", "(", "size", "-", "(", "bbox", "[", "2", "]", "-", "bbox", "[", "0", "]", ")", ")", "/", "2", ")", "border_h", "=", "int", "(", "(", "size", "-", "(", "bbox", "[", "3", "]", "-", "bbox", "[", "1", "]", ")", ")", "/", "2", ")", "# Create output image", "out_image", "=", "Image", ".", "new", "(", "\"RGBA\"", ",", "(", "size", ",", "size", ")", ",", "(", "0", ",", "0", ",", "0", ",", "0", ")", ")", "out_image", ".", "paste", "(", "icon_image", ",", "(", "border_w", ",", "border_h", ")", ")", "# If necessary, scale the image to the target size", "if", "org_size", "!=", "size", ":", "out_image", "=", "out_image", ".", "resize", "(", "(", "org_size", ",", "org_size", ")", ",", "Image", ".", "ANTIALIAS", ")", "# Make sure export directory exists", "if", "not", "os", ".", "path", ".", "exists", "(", "export_dir", ")", ":", "os", ".", "makedirs", "(", "export_dir", ")", "# Default filename", "if", "not", "filename", ":", "filename", "=", "icon", "+", "'.png'", "# Save file", "out_image", ".", "save", "(", "os", ".", "path", ".", "join", "(", "export_dir", ",", "filename", ")", ")" ]
35.908163
0.00083
def terminate(self): """Override of PantsService.terminate() that cleans up when the Pailgun server is terminated.""" # Tear down the Pailgun TCPServer. if self.pailgun: self.pailgun.server_close() super(PailgunService, self).terminate()
[ "def", "terminate", "(", "self", ")", ":", "# Tear down the Pailgun TCPServer.", "if", "self", ".", "pailgun", ":", "self", ".", "pailgun", ".", "server_close", "(", ")", "super", "(", "PailgunService", ",", "self", ")", ".", "terminate", "(", ")" ]
36.285714
0.011538
def get(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> T: """Gets a query from the data source. Args: query: The query being requested. context: The context for the extraction (mutable). Returns: The requested object. """ pass
[ "def", "get", "(", "self", ",", "type", ":", "Type", "[", "T", "]", ",", "query", ":", "Mapping", "[", "str", ",", "Any", "]", ",", "context", ":", "PipelineContext", "=", "None", ")", "->", "T", ":", "pass" ]
30
0.008824
def _pull_out_unaffected_blocks_lhs(lhs, rest, out_port, in_port): """In a self-Feedback of a series product, where the left-most operand is reducible, pull all non-trivial blocks outside of the feedback. Args: lhs (Circuit): The reducible circuit rest (tuple): The other SeriesProduct operands out_port (int): The feedback output port index in_port (int): The feedback input port index Returns: Circuit: The simplified circuit """ _, block_index = lhs.index_in_block(out_port) bs = lhs.block_structure nbefore, nblock, nafter = (sum(bs[:block_index]), bs[block_index], sum(bs[block_index + 1:])) before, block, after = lhs.get_blocks((nbefore, nblock, nafter)) if before != cid(nbefore) or after != cid(nafter): outer_lhs = before + cid(nblock - 1) + after inner_lhs = cid(nbefore) + block + cid(nafter) return outer_lhs << Feedback.create( SeriesProduct.create(inner_lhs, *rest), out_port=out_port, in_port=in_port) elif block == cid(nblock): outer_lhs = before + cid(nblock - 1) + after return outer_lhs << Feedback.create( SeriesProduct.create(*rest), out_port=out_port, in_port=in_port) raise CannotSimplify()
[ "def", "_pull_out_unaffected_blocks_lhs", "(", "lhs", ",", "rest", ",", "out_port", ",", "in_port", ")", ":", "_", ",", "block_index", "=", "lhs", ".", "index_in_block", "(", "out_port", ")", "bs", "=", "lhs", ".", "block_structure", "nbefore", ",", "nblock", ",", "nafter", "=", "(", "sum", "(", "bs", "[", ":", "block_index", "]", ")", ",", "bs", "[", "block_index", "]", ",", "sum", "(", "bs", "[", "block_index", "+", "1", ":", "]", ")", ")", "before", ",", "block", ",", "after", "=", "lhs", ".", "get_blocks", "(", "(", "nbefore", ",", "nblock", ",", "nafter", ")", ")", "if", "before", "!=", "cid", "(", "nbefore", ")", "or", "after", "!=", "cid", "(", "nafter", ")", ":", "outer_lhs", "=", "before", "+", "cid", "(", "nblock", "-", "1", ")", "+", "after", "inner_lhs", "=", "cid", "(", "nbefore", ")", "+", "block", "+", "cid", "(", "nafter", ")", "return", "outer_lhs", "<<", "Feedback", ".", "create", "(", "SeriesProduct", ".", "create", "(", "inner_lhs", ",", "*", "rest", ")", ",", "out_port", "=", "out_port", ",", "in_port", "=", "in_port", ")", "elif", "block", "==", "cid", "(", "nblock", ")", ":", "outer_lhs", "=", "before", "+", "cid", "(", "nblock", "-", "1", ")", "+", "after", "return", "outer_lhs", "<<", "Feedback", ".", "create", "(", "SeriesProduct", ".", "create", "(", "*", "rest", ")", ",", "out_port", "=", "out_port", ",", "in_port", "=", "in_port", ")", "raise", "CannotSimplify", "(", ")" ]
38.057143
0.000732
def assign_from_ast(node, expr): """ Creates code to assign name (or tuple of names) node from expr This is useful for recreating destructuring assignment behavior, like a, *b = [1,2,3]. """ if isinstance(expr, str): expr = ast.Name(id=expr, ctx=ast.Load()) mod = ast.Module([ast.Assign(targets=[node], value=expr)]) ast.fix_missing_locations(mod) return compile(mod, "<assignment_script>", "exec")
[ "def", "assign_from_ast", "(", "node", ",", "expr", ")", ":", "if", "isinstance", "(", "expr", ",", "str", ")", ":", "expr", "=", "ast", ".", "Name", "(", "id", "=", "expr", ",", "ctx", "=", "ast", ".", "Load", "(", ")", ")", "mod", "=", "ast", ".", "Module", "(", "[", "ast", ".", "Assign", "(", "targets", "=", "[", "node", "]", ",", "value", "=", "expr", ")", "]", ")", "ast", ".", "fix_missing_locations", "(", "mod", ")", "return", "compile", "(", "mod", ",", "\"<assignment_script>\"", ",", "\"exec\"", ")" ]
36
0.002257
def createFinalTPEDandTFAM(tped, toReadPrefix, prefix, snpToRemove): """Creates the final TPED and TFAM. :param tped: a representation of the ``tped`` of duplicated markers. :param toReadPrefix: the prefix of the unique files. :param prefix: the prefix of the output files. :param snpToRemove: the markers to remove. :type tped: numpy.array :type toReadPrefix: str :type prefix: str :type snpToRemove: set Starts by copying the unique markers' ``tfam`` file to ``prefix.final.tfam``. Then, it copies the unique markers' ``tped`` file, in which the chosen markers will be appended. The final data set will include the unique markers, the chosen markers which were completed, and the problematic duplicated markers (for further analysis). The markers that were used to complete the chosen ones are not present in the final data set. """ # First, copying the tfam try: shutil.copy(toReadPrefix + ".tfam", prefix + ".final.tfam") except IOError: msg = "%(toReadPrefix)s.tfam: can't copy file to " \ "%(prefix)s.final.tfam" % locals() raise ProgramError(msg) # Next, copy the tped, and append at the end try: shutil.copy(toReadPrefix + ".tped", prefix + ".final.tped") except IOError: msg = "%(toReadPrefix)s.tped: can't copy fil to " \ "%(prefix)s.final.tped" % locals() raise ProgramError(msg) tpedFile = None try: tpedFile = open(prefix + ".final.tped", "a") except IOError: msg = "%(prefix)s.final.tped: can't append to file" % locals() raise ProgramError(msg) for i, row in enumerate(tped): if i not in snpToRemove: print >>tpedFile, "\t".join(row) tpedFile.close()
[ "def", "createFinalTPEDandTFAM", "(", "tped", ",", "toReadPrefix", ",", "prefix", ",", "snpToRemove", ")", ":", "# First, copying the tfam", "try", ":", "shutil", ".", "copy", "(", "toReadPrefix", "+", "\".tfam\"", ",", "prefix", "+", "\".final.tfam\"", ")", "except", "IOError", ":", "msg", "=", "\"%(toReadPrefix)s.tfam: can't copy file to \"", "\"%(prefix)s.final.tfam\"", "%", "locals", "(", ")", "raise", "ProgramError", "(", "msg", ")", "# Next, copy the tped, and append at the end", "try", ":", "shutil", ".", "copy", "(", "toReadPrefix", "+", "\".tped\"", ",", "prefix", "+", "\".final.tped\"", ")", "except", "IOError", ":", "msg", "=", "\"%(toReadPrefix)s.tped: can't copy fil to \"", "\"%(prefix)s.final.tped\"", "%", "locals", "(", ")", "raise", "ProgramError", "(", "msg", ")", "tpedFile", "=", "None", "try", ":", "tpedFile", "=", "open", "(", "prefix", "+", "\".final.tped\"", ",", "\"a\"", ")", "except", "IOError", ":", "msg", "=", "\"%(prefix)s.final.tped: can't append to file\"", "%", "locals", "(", ")", "raise", "ProgramError", "(", "msg", ")", "for", "i", ",", "row", "in", "enumerate", "(", "tped", ")", ":", "if", "i", "not", "in", "snpToRemove", ":", "print", ">>", "tpedFile", ",", "\"\\t\"", ".", "join", "(", "row", ")", "tpedFile", ".", "close", "(", ")" ]
36.583333
0.000555
def mend(self, length): """Cut all branches from this node to its children and adopt all nodes at certain level.""" if length == 0: raise Exception("Can't mend the root !") if length == 1: return self.children = OrderedDict((node.name, node) for node in self.get_level(length)) for child in self.children.values(): child.parent = self
[ "def", "mend", "(", "self", ",", "length", ")", ":", "if", "length", "==", "0", ":", "raise", "Exception", "(", "\"Can't mend the root !\"", ")", "if", "length", "==", "1", ":", "return", "self", ".", "children", "=", "OrderedDict", "(", "(", "node", ".", "name", ",", "node", ")", "for", "node", "in", "self", ".", "get_level", "(", "length", ")", ")", "for", "child", "in", "self", ".", "children", ".", "values", "(", ")", ":", "child", ".", "parent", "=", "self" ]
53.714286
0.015707
def create_ui(self): ''' .. versionchanged:: 0.21.2 Load the builder configuration file using :func:`pkgutil.getdata`, which supports loading from `.zip` archives (e.g., in an app packaged with Py2Exe). ''' builder = gtk.Builder() # Read glade file using `pkgutil` to also support loading from `.zip` # files (e.g., in app packaged with Py2Exe). glade_str = pkgutil.get_data(__name__, 'glade/form_view_dialog.glade') builder.add_from_string(glade_str) self.window = builder.get_object('form_view_dialog') self.vbox_form = builder.get_object('vbox_form') if self.title: self.window.set_title(self.title) if self.short_desc: self.short_label = gtk.Label() self.short_label.set_text(self.short_desc) self.short_label.set_alignment(0, .5) self.vbox_form.pack_start(self.short_label, expand=True, fill=True) if self.long_desc: self.long_label = gtk.Label() self.long_label.set_text(self.long_desc) self.long_label.set_alignment(.1, .5) self.long_expander = gtk.Expander(label='Details') self.long_expander.set_spacing(5) self.long_expander.add(self.long_label) self.vbox_form.pack_start(self.long_expander, expand=True, fill=True) if self.parent is None: self.parent = self.default_parent self.window.set_default_response(gtk.RESPONSE_OK) self.window.set_position(gtk.WIN_POS_CENTER_ON_PARENT) if self.parent: self.window.set_transient_for(self.parent) self.window.show_all()
[ "def", "create_ui", "(", "self", ")", ":", "builder", "=", "gtk", ".", "Builder", "(", ")", "# Read glade file using `pkgutil` to also support loading from `.zip`", "# files (e.g., in app packaged with Py2Exe).", "glade_str", "=", "pkgutil", ".", "get_data", "(", "__name__", ",", "'glade/form_view_dialog.glade'", ")", "builder", ".", "add_from_string", "(", "glade_str", ")", "self", ".", "window", "=", "builder", ".", "get_object", "(", "'form_view_dialog'", ")", "self", ".", "vbox_form", "=", "builder", ".", "get_object", "(", "'vbox_form'", ")", "if", "self", ".", "title", ":", "self", ".", "window", ".", "set_title", "(", "self", ".", "title", ")", "if", "self", ".", "short_desc", ":", "self", ".", "short_label", "=", "gtk", ".", "Label", "(", ")", "self", ".", "short_label", ".", "set_text", "(", "self", ".", "short_desc", ")", "self", ".", "short_label", ".", "set_alignment", "(", "0", ",", ".5", ")", "self", ".", "vbox_form", ".", "pack_start", "(", "self", ".", "short_label", ",", "expand", "=", "True", ",", "fill", "=", "True", ")", "if", "self", ".", "long_desc", ":", "self", ".", "long_label", "=", "gtk", ".", "Label", "(", ")", "self", ".", "long_label", ".", "set_text", "(", "self", ".", "long_desc", ")", "self", ".", "long_label", ".", "set_alignment", "(", ".1", ",", ".5", ")", "self", ".", "long_expander", "=", "gtk", ".", "Expander", "(", "label", "=", "'Details'", ")", "self", ".", "long_expander", ".", "set_spacing", "(", "5", ")", "self", ".", "long_expander", ".", "add", "(", "self", ".", "long_label", ")", "self", ".", "vbox_form", ".", "pack_start", "(", "self", ".", "long_expander", ",", "expand", "=", "True", ",", "fill", "=", "True", ")", "if", "self", ".", "parent", "is", "None", ":", "self", ".", "parent", "=", "self", ".", "default_parent", "self", ".", "window", ".", "set_default_response", "(", "gtk", ".", "RESPONSE_OK", ")", "self", ".", "window", ".", "set_position", "(", "gtk", ".", "WIN_POS_CENTER_ON_PARENT", ")", "if", "self", ".", "parent", ":", "self", ".", "window", ".", "set_transient_for", "(", "self", ".", "parent", ")", "self", ".", "window", ".", "show_all", "(", ")" ]
45.025641
0.001115
def in_string(objet, pattern): """ abstractSearch dans une chaine, sans tenir compte de la casse. """ return bool(re.search(pattern, str(objet), flags=re.I)) if objet else False
[ "def", "in_string", "(", "objet", ",", "pattern", ")", ":", "return", "bool", "(", "re", ".", "search", "(", "pattern", ",", "str", "(", "objet", ")", ",", "flags", "=", "re", ".", "I", ")", ")", "if", "objet", "else", "False" ]
63.666667
0.015544
def conditional(self, condition, name): """Defines a 'condition' when conditional element of 'name' exists if `condition` is true. `condition` can contain multiple conditions combined together using Logical Expressions(&&,||). Example: | Conditional | mycondition == 1 | foo | | u8 | myelement | 42 | | End conditional | | Conditional | condition1 == 1 && condition2 != 2 | bar | | u8 | myelement | 8 | | End condtional | """ self._message_stack.append(ConditionalTemplate(condition, name, self._current_container))
[ "def", "conditional", "(", "self", ",", "condition", ",", "name", ")", ":", "self", ".", "_message_stack", ".", "append", "(", "ConditionalTemplate", "(", "condition", ",", "name", ",", "self", ".", "_current_container", ")", ")" ]
39.733333
0.008197
def _check_next(self): """Checks if a next message is possible. :returns: True if a next message is possible, otherwise False :rtype: bool """ if self.is_initial: return True if self.before: if self.before_cursor: return True else: return False else: if self.after_cursor: return True else: return False
[ "def", "_check_next", "(", "self", ")", ":", "if", "self", ".", "is_initial", ":", "return", "True", "if", "self", ".", "before", ":", "if", "self", ".", "before_cursor", ":", "return", "True", "else", ":", "return", "False", "else", ":", "if", "self", ".", "after_cursor", ":", "return", "True", "else", ":", "return", "False" ]
19.842105
0.01519
def _find_secondary_anchors(self, residue, heavy_atom, anchor): """ Searches through the bond network for atoms bound to the anchor. Returns a secondary and tertiary anchors. Example, for CA, returns C and O. """ for secondary in self.bonds[residue][anchor.name][1]: for tertiary in self.bonds[residue][secondary.name][1]: if (tertiary.name != heavy_atom.name and tertiary.name != anchor.name): return (secondary, tertiary) return None
[ "def", "_find_secondary_anchors", "(", "self", ",", "residue", ",", "heavy_atom", ",", "anchor", ")", ":", "for", "secondary", "in", "self", ".", "bonds", "[", "residue", "]", "[", "anchor", ".", "name", "]", "[", "1", "]", ":", "for", "tertiary", "in", "self", ".", "bonds", "[", "residue", "]", "[", "secondary", ".", "name", "]", "[", "1", "]", ":", "if", "(", "tertiary", ".", "name", "!=", "heavy_atom", ".", "name", "and", "tertiary", ".", "name", "!=", "anchor", ".", "name", ")", ":", "return", "(", "secondary", ",", "tertiary", ")", "return", "None" ]
39.4
0.01157
def fetch_all_objects_from_db(self, cls: Type[T], table: str, fieldlist: Sequence[str], construct_with_pk: bool, *args) -> List[T]: """Fetches all objects from a table, returning an array of objects of class cls.""" return self.fetch_all_objects_from_db_where( cls, table, fieldlist, construct_with_pk, None, *args)
[ "def", "fetch_all_objects_from_db", "(", "self", ",", "cls", ":", "Type", "[", "T", "]", ",", "table", ":", "str", ",", "fieldlist", ":", "Sequence", "[", "str", "]", ",", "construct_with_pk", ":", "bool", ",", "*", "args", ")", "->", "List", "[", "T", "]", ":", "return", "self", ".", "fetch_all_objects_from_db_where", "(", "cls", ",", "table", ",", "fieldlist", ",", "construct_with_pk", ",", "None", ",", "*", "args", ")" ]
51.2
0.013436
def add_idle(self, callback, *args, **kwds): """Add an idle callback. An idle callback can return True, False or None. These mean: - None: remove the callback (don't reschedule) - False: the callback did no work; reschedule later - True: the callback did some work; reschedule soon If the callback raises an exception, the traceback is logged and the callback is removed. """ self.idlers.append((callback, args, kwds))
[ "def", "add_idle", "(", "self", ",", "callback", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "self", ".", "idlers", ".", "append", "(", "(", "callback", ",", "args", ",", "kwds", ")", ")" ]
34.307692
0.002183
def query(database, query, time_precision='s', chunked=False, user=None, password=None, host=None, port=None): ''' Querying data database The database to query query Query to be executed time_precision Time precision to use ('s', 'm', or 'u') chunked Whether is chunked or not user The user to connect as password The password of the user host The host to connect to port The port to connect to CLI Example: .. code-block:: bash salt '*' influxdb08.query <database> <query> salt '*' influxdb08.query <database> <query> <time_precision> <chunked> <user> <password> <host> <port> ''' client = _client(user=user, password=password, host=host, port=port) client.switch_database(database) return client.query(query, time_precision=time_precision, chunked=chunked)
[ "def", "query", "(", "database", ",", "query", ",", "time_precision", "=", "'s'", ",", "chunked", "=", "False", ",", "user", "=", "None", ",", "password", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ")", ":", "client", "=", "_client", "(", "user", "=", "user", ",", "password", "=", "password", ",", "host", "=", "host", ",", "port", "=", "port", ")", "client", ".", "switch_database", "(", "database", ")", "return", "client", ".", "query", "(", "query", ",", "time_precision", "=", "time_precision", ",", "chunked", "=", "chunked", ")" ]
20.866667
0.002035
def assign(self, experiment): """Assign an experiment.""" self.experiments.append(experiment) self.farms.append(empty_farm)
[ "def", "assign", "(", "self", ",", "experiment", ")", ":", "self", ".", "experiments", ".", "append", "(", "experiment", ")", "self", ".", "farms", ".", "append", "(", "empty_farm", ")" ]
28.8
0.013514
def date_time_between(self, start_date='-30y', end_date='now', tzinfo=None): """ Get a DateTime object based on a random date between two given dates. Accepts date strings that can be recognized by strtotime(). :param start_date Defaults to 30 years ago :param end_date Defaults to "now" :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1999-02-02 11:42:52') :return DateTime """ start_date = self._parse_date_time(start_date, tzinfo=tzinfo) end_date = self._parse_date_time(end_date, tzinfo=tzinfo) if end_date - start_date <= 1: ts = start_date + self.generator.random.random() else: ts = self.generator.random.randint(start_date, end_date) if tzinfo is None: return datetime(1970, 1, 1, tzinfo=tzinfo) + timedelta(seconds=ts) else: return ( datetime(1970, 1, 1, tzinfo=tzutc()) + timedelta(seconds=ts) ).astimezone(tzinfo)
[ "def", "date_time_between", "(", "self", ",", "start_date", "=", "'-30y'", ",", "end_date", "=", "'now'", ",", "tzinfo", "=", "None", ")", ":", "start_date", "=", "self", ".", "_parse_date_time", "(", "start_date", ",", "tzinfo", "=", "tzinfo", ")", "end_date", "=", "self", ".", "_parse_date_time", "(", "end_date", ",", "tzinfo", "=", "tzinfo", ")", "if", "end_date", "-", "start_date", "<=", "1", ":", "ts", "=", "start_date", "+", "self", ".", "generator", ".", "random", ".", "random", "(", ")", "else", ":", "ts", "=", "self", ".", "generator", ".", "random", ".", "randint", "(", "start_date", ",", "end_date", ")", "if", "tzinfo", "is", "None", ":", "return", "datetime", "(", "1970", ",", "1", ",", "1", ",", "tzinfo", "=", "tzinfo", ")", "+", "timedelta", "(", "seconds", "=", "ts", ")", "else", ":", "return", "(", "datetime", "(", "1970", ",", "1", ",", "1", ",", "tzinfo", "=", "tzutc", "(", ")", ")", "+", "timedelta", "(", "seconds", "=", "ts", ")", ")", ".", "astimezone", "(", "tzinfo", ")" ]
44.869565
0.001898
def has_option(self, option_name=None): """ Check whether configuration selection has the specified option. :param option_name: option name to check. If no option is specified, then check is made for this option :return: bool """ if option_name is None: option_name = '' return self.config().has_option(self.section(), self.option_prefix() + option_name)
[ "def", "has_option", "(", "self", ",", "option_name", "=", "None", ")", ":", "if", "option_name", "is", "None", ":", "option_name", "=", "''", "return", "self", ".", "config", "(", ")", ".", "has_option", "(", "self", ".", "section", "(", ")", ",", "self", ".", "option_prefix", "(", ")", "+", "option_name", ")" ]
36.2
0.02965
def render_file(context, path, absolute=False): """ Like :py:func:`read_file`, except that the file is rendered as a Jinja template using the current context. If `absolute` is True, use absolute path, otherwise path is assumed to be relative to Tarbell template root dir. For example: .. code-block:: html+jinja <div class="chapter"> {{ render_file('_chapters/one.txt') }} </div> """ site = g.current_site if not absolute: path = os.path.join(site.path, path) return render_template(path, **context)
[ "def", "render_file", "(", "context", ",", "path", ",", "absolute", "=", "False", ")", ":", "site", "=", "g", ".", "current_site", "if", "not", "absolute", ":", "path", "=", "os", ".", "path", ".", "join", "(", "site", ".", "path", ",", "path", ")", "return", "render_template", "(", "path", ",", "*", "*", "context", ")" ]
30.263158
0.011804
def strahler_order(section): '''Branching order of a tree section The strahler order is the inverse of the branch order, since this is computed from the tips of the tree towards the root. This implementation is a translation of the three steps described in Wikipedia (https://en.wikipedia.org/wiki/Strahler_number): - If the node is a leaf (has no children), its Strahler number is one. - If the node has one child with Strahler number i, and all other children have Strahler numbers less than i, then the Strahler number of the node is i again. - If the node has two or more children with Strahler number i, and no children with greater number, then the Strahler number of the node is i + 1. No efforts have been invested in making it computationnaly efficient, but it computes acceptably fast on tested morphologies (i.e., no waiting time). ''' if section.children: child_orders = [strahler_order(child) for child in section.children] max_so_children = max(child_orders) it = iter(co == max_so_children for co in child_orders) # check if there are *two* or more children w/ the max_so_children any(it) if any(it): return max_so_children + 1 return max_so_children return 1
[ "def", "strahler_order", "(", "section", ")", ":", "if", "section", ".", "children", ":", "child_orders", "=", "[", "strahler_order", "(", "child", ")", "for", "child", "in", "section", ".", "children", "]", "max_so_children", "=", "max", "(", "child_orders", ")", "it", "=", "iter", "(", "co", "==", "max_so_children", "for", "co", "in", "child_orders", ")", "# check if there are *two* or more children w/ the max_so_children", "any", "(", "it", ")", "if", "any", "(", "it", ")", ":", "return", "max_so_children", "+", "1", "return", "max_so_children", "return", "1" ]
42.516129
0.002226
def execute(self, query, args=None): """Execute a query. query -- string, query to execute on server args -- optional sequence or mapping, parameters to use with query. Note: If args is a sequence, then %s must be used as the parameter placeholder in the query. If a mapping is used, %(key)s must be used as the placeholder. Returns integer represents rows affected, if any """ while self.nextset(): pass db = self._get_db() if isinstance(query, unicode): query = query.encode(db.encoding) if args is not None: if isinstance(args, dict): nargs = {} for key, item in args.items(): if isinstance(key, unicode): key = key.encode(db.encoding) nargs[key] = db.literal(item) args = nargs else: args = tuple(map(db.literal, args)) try: query = query % args except TypeError as m: raise ProgrammingError(str(m)) assert isinstance(query, (bytes, bytearray)) res = self._query(query) return res
[ "def", "execute", "(", "self", ",", "query", ",", "args", "=", "None", ")", ":", "while", "self", ".", "nextset", "(", ")", ":", "pass", "db", "=", "self", ".", "_get_db", "(", ")", "if", "isinstance", "(", "query", ",", "unicode", ")", ":", "query", "=", "query", ".", "encode", "(", "db", ".", "encoding", ")", "if", "args", "is", "not", "None", ":", "if", "isinstance", "(", "args", ",", "dict", ")", ":", "nargs", "=", "{", "}", "for", "key", ",", "item", "in", "args", ".", "items", "(", ")", ":", "if", "isinstance", "(", "key", ",", "unicode", ")", ":", "key", "=", "key", ".", "encode", "(", "db", ".", "encoding", ")", "nargs", "[", "key", "]", "=", "db", ".", "literal", "(", "item", ")", "args", "=", "nargs", "else", ":", "args", "=", "tuple", "(", "map", "(", "db", ".", "literal", ",", "args", ")", ")", "try", ":", "query", "=", "query", "%", "args", "except", "TypeError", "as", "m", ":", "raise", "ProgrammingError", "(", "str", "(", "m", ")", ")", "assert", "isinstance", "(", "query", ",", "(", "bytes", ",", "bytearray", ")", ")", "res", "=", "self", ".", "_query", "(", "query", ")", "return", "res" ]
32.594595
0.00161
def get_parts(self): """ Searches for all DictCells (with nested structure) """ return self.find_path(lambda x: isinstance(x[1], DictCell), on_targets=True)
[ "def", "get_parts", "(", "self", ")", ":", "return", "self", ".", "find_path", "(", "lambda", "x", ":", "isinstance", "(", "x", "[", "1", "]", ",", "DictCell", ")", ",", "on_targets", "=", "True", ")" ]
36.8
0.015957
def _default_auth_location(filename): """ Determine auth location for filename, like 'bugzillacookies'. If old style ~/.bugzillacookies exists, we use that, otherwise we use ~/.cache/python-bugzilla/bugzillacookies. Same for bugzillatoken """ homepath = os.path.expanduser("~/.%s" % filename) xdgpath = os.path.expanduser("~/.cache/python-bugzilla/%s" % filename) if os.path.exists(xdgpath): return xdgpath if os.path.exists(homepath): return homepath if not os.path.exists(os.path.dirname(xdgpath)): os.makedirs(os.path.dirname(xdgpath), 0o700) return xdgpath
[ "def", "_default_auth_location", "(", "filename", ")", ":", "homepath", "=", "os", ".", "path", ".", "expanduser", "(", "\"~/.%s\"", "%", "filename", ")", "xdgpath", "=", "os", ".", "path", ".", "expanduser", "(", "\"~/.cache/python-bugzilla/%s\"", "%", "filename", ")", "if", "os", ".", "path", ".", "exists", "(", "xdgpath", ")", ":", "return", "xdgpath", "if", "os", ".", "path", ".", "exists", "(", "homepath", ")", ":", "return", "homepath", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "xdgpath", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "xdgpath", ")", ",", "0o700", ")", "return", "xdgpath" ]
38.375
0.00159
def close(self, wait=False): """Close session, shutdown pool.""" self.session.close() self.pool.shutdown(wait=wait)
[ "def", "close", "(", "self", ",", "wait", "=", "False", ")", ":", "self", ".", "session", ".", "close", "(", ")", "self", ".", "pool", ".", "shutdown", "(", "wait", "=", "wait", ")" ]
34
0.014388
def variable_name(value, allow_empty = False, **kwargs): """Validate that the value is a valid Python variable name. .. caution:: This function does **NOT** check whether the variable exists. It only checks that the ``value`` would work as a Python variable (or class, or function, etc.) name. :param value: The value to validate. :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :rtype: :class:`str <python:str>` or :obj:`None <python:None>` :raises EmptyValueError: if ``allow_empty`` is ``False`` and ``value`` is empty """ if not value and not allow_empty: raise errors.EmptyValueError('value (%s) was empty' % value) elif not value: return None try: parse('%s = None' % value) except (SyntaxError, ValueError, TypeError): raise errors.InvalidVariableNameError( 'value (%s) is not a valid variable name' % value ) return value
[ "def", "variable_name", "(", "value", ",", "allow_empty", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "value", "and", "not", "allow_empty", ":", "raise", "errors", ".", "EmptyValueError", "(", "'value (%s) was empty'", "%", "value", ")", "elif", "not", "value", ":", "return", "None", "try", ":", "parse", "(", "'%s = None'", "%", "value", ")", "except", "(", "SyntaxError", ",", "ValueError", ",", "TypeError", ")", ":", "raise", "errors", ".", "InvalidVariableNameError", "(", "'value (%s) is not a valid variable name'", "%", "value", ")", "return", "value" ]
33.236842
0.002308
def fromdicts(dicts, header=None, sample=1000, missing=None): """ View a sequence of Python :class:`dict` as a table. E.g.:: >>> import petl as etl >>> dicts = [{"foo": "a", "bar": 1}, ... {"foo": "b", "bar": 2}, ... {"foo": "c", "bar": 2}] >>> table1 = etl.fromdicts(dicts, header=['foo', 'bar']) >>> table1 +-----+-----+ | foo | bar | +=====+=====+ | 'a' | 1 | +-----+-----+ | 'b' | 2 | +-----+-----+ | 'c' | 2 | +-----+-----+ If `header` is not specified, `sample` items from `dicts` will be inspected to discovery dictionary keys. Note that the order in which dictionary keys are discovered may not be stable, See also :func:`petl.io.json.fromjson`. .. versionchanged:: 1.1.0 If no `header` is specified, fields will be discovered by sampling keys from the first `sample` dictionaries in `dicts`. The header will be constructed from keys in the order discovered. Note that this ordering may not be stable, and therefore it may be advisable to specify an explicit `header` or to use another function like :func:`petl.transform.headers.sortheader` on the resulting table to guarantee stability. """ return DictsView(dicts, header=header, sample=sample, missing=missing)
[ "def", "fromdicts", "(", "dicts", ",", "header", "=", "None", ",", "sample", "=", "1000", ",", "missing", "=", "None", ")", ":", "return", "DictsView", "(", "dicts", ",", "header", "=", "header", ",", "sample", "=", "sample", ",", "missing", "=", "missing", ")" ]
34.564103
0.000722
def source_call(method_name, *args, **kwargs): """ Creates an effect that will drop the current effect value, call the source's method with specified name with the specified arguments and keywords. @param method_name: the name of method belonging to the source reference. @type method_name: str """ def source_call(_value, context, **_params): method = getattr(context["model"].source, method_name) return _call(method, args, kwargs) return source_call
[ "def", "source_call", "(", "method_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "source_call", "(", "_value", ",", "context", ",", "*", "*", "_params", ")", ":", "method", "=", "getattr", "(", "context", "[", "\"model\"", "]", ".", "source", ",", "method_name", ")", "return", "_call", "(", "method", ",", "args", ",", "kwargs", ")", "return", "source_call" ]
35.214286
0.001976
def as_chord(chord): """ convert from str to Chord instance if input is str :type chord: str|pychord.Chord :param chord: Chord name or Chord instance :rtype: pychord.Chord :return: Chord instance """ if isinstance(chord, Chord): return chord elif isinstance(chord, str): return Chord(chord) else: raise TypeError("input type should be str or Chord instance.")
[ "def", "as_chord", "(", "chord", ")", ":", "if", "isinstance", "(", "chord", ",", "Chord", ")", ":", "return", "chord", "elif", "isinstance", "(", "chord", ",", "str", ")", ":", "return", "Chord", "(", "chord", ")", "else", ":", "raise", "TypeError", "(", "\"input type should be str or Chord instance.\"", ")" ]
29.071429
0.002381
def which(cmd, mode=os.F_OK | os.X_OK, path=None): """Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path. Note: This function was backported from the Python 3 source code. """ # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) # If we're given a path with a directory part, look it up directly # rather than referring to PATH directories. This includes checking # relative to the current directory, e.g. ./script if os.path.dirname(cmd): if _access_check(cmd, mode): return cmd return None if path is None: path = os.environ.get("PATH", os.defpath) if not path: return None path = path.split(os.pathsep) files = [cmd] seen = set() for dir in path: normdir = os.path.normcase(dir) if normdir not in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None
[ "def", "which", "(", "cmd", ",", "mode", "=", "os", ".", "F_OK", "|", "os", ".", "X_OK", ",", "path", "=", "None", ")", ":", "# Check that a given file can be accessed with the correct mode.", "# Additionally check that `file` is not a directory, as on Windows", "# directories pass the os.access check.", "def", "_access_check", "(", "fn", ",", "mode", ")", ":", "return", "(", "os", ".", "path", ".", "exists", "(", "fn", ")", "and", "os", ".", "access", "(", "fn", ",", "mode", ")", "and", "not", "os", ".", "path", ".", "isdir", "(", "fn", ")", ")", "# If we're given a path with a directory part, look it up directly", "# rather than referring to PATH directories. This includes checking", "# relative to the current directory, e.g. ./script", "if", "os", ".", "path", ".", "dirname", "(", "cmd", ")", ":", "if", "_access_check", "(", "cmd", ",", "mode", ")", ":", "return", "cmd", "return", "None", "if", "path", "is", "None", ":", "path", "=", "os", ".", "environ", ".", "get", "(", "\"PATH\"", ",", "os", ".", "defpath", ")", "if", "not", "path", ":", "return", "None", "path", "=", "path", ".", "split", "(", "os", ".", "pathsep", ")", "files", "=", "[", "cmd", "]", "seen", "=", "set", "(", ")", "for", "dir", "in", "path", ":", "normdir", "=", "os", ".", "path", ".", "normcase", "(", "dir", ")", "if", "normdir", "not", "in", "seen", ":", "seen", ".", "add", "(", "normdir", ")", "for", "thefile", "in", "files", ":", "name", "=", "os", ".", "path", ".", "join", "(", "dir", ",", "thefile", ")", "if", "_access_check", "(", "name", ",", "mode", ")", ":", "return", "name", "return", "None" ]
35.785714
0.000648
def layout_json_outputs(self): """Return layout.json outputs in a flattened dict with name param as key.""" if self._layout_json_outputs is None: self._layout_json_outputs = {} for o in self.layout_json.get('outputs', []): self._layout_json_outputs.setdefault(o.get('name'), o) return self._layout_json_outputs
[ "def", "layout_json_outputs", "(", "self", ")", ":", "if", "self", ".", "_layout_json_outputs", "is", "None", ":", "self", ".", "_layout_json_outputs", "=", "{", "}", "for", "o", "in", "self", ".", "layout_json", ".", "get", "(", "'outputs'", ",", "[", "]", ")", ":", "self", ".", "_layout_json_outputs", ".", "setdefault", "(", "o", ".", "get", "(", "'name'", ")", ",", "o", ")", "return", "self", ".", "_layout_json_outputs" ]
52.571429
0.008021
def read_namespaced_replication_controller_scale(self, name, namespace, **kwargs): # noqa: E501 """read_namespaced_replication_controller_scale # noqa: E501 read scale of the specified ReplicationController # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_replication_controller_scale(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Scale If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_replication_controller_scale_with_http_info(name, namespace, **kwargs) # noqa: E501 else: (data) = self.read_namespaced_replication_controller_scale_with_http_info(name, namespace, **kwargs) # noqa: E501 return data
[ "def", "read_namespaced_replication_controller_scale", "(", "self", ",", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "read_namespaced_replication_controller_scale_with_http_info", "(", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "read_namespaced_replication_controller_scale_with_http_info", "(", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
54.913043
0.001556
def payzen_form(payment_request, auto_submit=False): """TODO docstring.""" if auto_submit: template_used = "django_payzen/auto_submit_form.html" else: template_used = "django_payzen/form.html" payment_request.update() t = template.loader.get_template(template_used) return t.render({ "form": forms.PaymentRequestForm(instance=payment_request), "payzen_submit_url": app_settings.PAYZEN_REQUEST_URL })
[ "def", "payzen_form", "(", "payment_request", ",", "auto_submit", "=", "False", ")", ":", "if", "auto_submit", ":", "template_used", "=", "\"django_payzen/auto_submit_form.html\"", "else", ":", "template_used", "=", "\"django_payzen/form.html\"", "payment_request", ".", "update", "(", ")", "t", "=", "template", ".", "loader", ".", "get_template", "(", "template_used", ")", "return", "t", ".", "render", "(", "{", "\"form\"", ":", "forms", ".", "PaymentRequestForm", "(", "instance", "=", "payment_request", ")", ",", "\"payzen_submit_url\"", ":", "app_settings", ".", "PAYZEN_REQUEST_URL", "}", ")" ]
37.333333
0.002179
def adjust_text(texts, x=None, y=None, add_objects=None, ax=None, expand_text=(1.05, 1.2), expand_points=(1.05, 1.2), expand_objects=(1.05, 1.2), expand_align=(1.05, 1.2), autoalign='xy', va='center', ha='center', force_text=(0.1, 0.25), force_points=(0.2, 0.5), force_objects=(0.1, 0.25), lim=500, precision=0.01, only_move={'points':'xy', 'text':'xy', 'objects':'xy'}, avoid_text=True, avoid_points=True, avoid_self=True, save_steps=False, save_prefix='', save_format='png', add_step_numbers=True, on_basemap=False, *args, **kwargs): """Iteratively adjusts the locations of texts. Call adjust_text the very last, after all plotting (especially anything that can change the axes limits) has been done. This is because to move texts the function needs to use the dimensions of the axes, and without knowing the final size of the plots the results will be completely nonsensical, or suboptimal. First moves all texts that are outside the axes limits inside. Then in each iteration moves all texts away from each other and from points. In the end hides texts and substitutes them with annotations to link them to the respective points. Parameters ---------- texts : list A list of :obj:`matplotlib.text.Text` objects to adjust. Other Parameters ---------------- x : array_like x-coordinates of points to repel from; if not provided only uses text coordinates. y : array_like y-coordinates of points to repel from; if not provided only uses text coordinates add_objects : list or PathCollection a list of additional matplotlib objects to avoid; they must have a `.get_window_extent()` method; alternatively, a PathCollection or a list of Bbox objects. ax : matplotlib axe, default is current axe (plt.gca()) axe object with the plot expand_text : array_like, default (1.05, 1.2) a tuple/list/... with 2 multipliers (x, y) by which to expand the bounding box of texts when repelling them from each other. expand_points : array_like, default (1.05, 1.2) a tuple/list/... with 2 multipliers (x, y) by which to expand the bounding box of texts when repelling them from points. expand_objects : array_like, default (1.05, 1.2) a tuple/list/... with 2 multipliers (x, y) by which to expand the bounding box of texts when repelling them from other objects. expand_align : array_like, default (1.05, 1.2) a tuple/list/... with 2 multipliers (x, y) by which to expand the bounding box of texts when autoaligning texts. autoalign: str or boolean {'xy', 'x', 'y', True, False}, default 'xy' Direction in wich the best alignement will be determined - 'xy' or True, best alignment of all texts determined in all directions automatically before running the iterative adjustment (overriding va and ha), - 'x', will only align horizontally, - 'y', will only align vertically, - False, do nothing (i.e. preserve va and ha) va : str, default 'center' vertical alignment of texts ha : str, default 'center' horizontal alignment of texts, force_text : tuple, default (0.1, 0.25) the repel force from texts is multiplied by this value force_points : tuple, default (0.2, 0.5) the repel force from points is multiplied by this value force_objects : float, default (0.1, 0.25) same as other forces, but for repelling additional objects lim : int, default 500 limit of number of iterations precision : float, default 0.01 iterate until the sum of all overlaps along both x and y are less than this amount, as a fraction of the total widths and heights, respectively. May need to increase for complicated situations. only_move : dict, default {'points':'xy', 'text':'xy', 'objects':'xy'} a dict to restrict movement of texts to only certain axes for certain types of overlaps. Valid keys are 'points', 'text', and 'objects'. Valid values are '', 'x', 'y', and 'xy'. For example, only_move={'points':'y', 'text':'xy', 'objects':'xy'} forbids moving texts along the x axis due to overlaps with points. avoid_text : bool, default True whether to repel texts from each other. avoid_points : bool, default True whether to repel texts from points. Can be helpful to switch off in extremely crowded plots. avoid_self : bool, default True whether to repel texts from its original positions. save_steps : bool, default False whether to save intermediate steps as images. save_prefix : str, default '' if `save_steps` is True, a path and/or prefix to the saved steps. save_format : str, default 'png' if `save_steps` is True, a format to save the steps into. add_step_numbers : bool, default True if `save_steps` is True, whether to add step numbers as titles to the images of saving steps. on_basemap : bool, default False whether your plot uses the basemap library, stops labels going over the edge of the map. args and kwargs : any arguments will be fed into obj:`ax.annotate` after all the optimization is done just for plotting the connecting arrows if required. Return ------ int Number of iteration """ plt.draw() if ax is None: ax = plt.gca() r = get_renderer(ax.get_figure()) orig_xy = [get_text_position(text, ax=ax) for text in texts] orig_x = [xy[0] for xy in orig_xy] orig_y = [xy[1] for xy in orig_xy] force_objects = float_to_tuple(force_objects) force_text = float_to_tuple(force_text) force_points = float_to_tuple(force_points) # xdiff = np.diff(ax.get_xlim())[0] # ydiff = np.diff(ax.get_ylim())[0] bboxes = get_bboxes(texts, r, (1.0, 1.0), ax) sum_width = np.sum(list(map(lambda bbox: bbox.width, bboxes))) sum_height = np.sum(list(map(lambda bbox: bbox.height, bboxes))) if not any(list(map(lambda val: 'x' in val, only_move.values()))): precision_x = np.inf else: precision_x = precision*sum_width # if not any(list(map(lambda val: 'y' in val, only_move.values()))): precision_y = np.inf else: precision_y = precision*sum_height if x is None: if y is None: if avoid_self: x, y = orig_x, orig_y else: x, y = [], [] else: raise ValueError('Please specify both x and y, or neither') if y is None: raise ValueError('Please specify both x and y, or neither') if add_objects is None: text_from_objects = False add_bboxes = [] else: try: add_bboxes = get_bboxes(add_objects, r, (1, 1), ax) except: raise ValueError("Can't get bounding boxes from add_objects - is'\ it a flat list of matplotlib objects?") return text_from_objects = True for text in texts: text.set_va(va) text.set_ha(ha) if save_steps: if add_step_numbers: plt.title('Before') plt.savefig('%s%s.%s' % (save_prefix, '000a', save_format), format=save_format, dpi=150) elif on_basemap: ax.draw(r) if autoalign: if autoalign is True: autoalign='xy' for i in range(2): texts = optimally_align_text(x, y, texts, expand=expand_align, add_bboxes=add_bboxes, direction=autoalign, renderer=r, ax=ax) if save_steps: if add_step_numbers: plt.title('Autoaligned') plt.savefig('%s%s.%s' % (save_prefix, '000b', save_format), format=save_format, dpi=150) elif on_basemap: ax.draw(r) texts = repel_text_from_axes(texts, ax, renderer=r, expand=expand_points) history = [(np.inf, np.inf)]*10 for i in xrange(lim): # q1, q2 = [np.inf, np.inf], [np.inf, np.inf] if avoid_text: d_x_text, d_y_text, q1 = repel_text(texts, renderer=r, ax=ax, expand=expand_text) else: d_x_text, d_y_text, q1 = [0]*len(texts), [0]*len(texts), (0, 0) if avoid_points: d_x_points, d_y_points, q2 = repel_text_from_points(x, y, texts, ax=ax, renderer=r, expand=expand_points) else: d_x_points, d_y_points, q2 = [0]*len(texts), [0]*len(texts), (0, 0) if text_from_objects: d_x_objects, d_y_objects, q3 = repel_text_from_bboxes(add_bboxes, texts, ax=ax, renderer=r, expand=expand_objects) else: d_x_objects, d_y_objects, q3 = [0]*len(texts), [0]*len(texts), (0, 0) if only_move: if 'text' in only_move: if 'x' not in only_move['text']: d_x_text = np.zeros_like(d_x_text) if 'y' not in only_move['text']: d_y_text = np.zeros_like(d_y_text) if 'points' in only_move: if 'x' not in only_move['points']: d_x_points = np.zeros_like(d_x_points) if 'y' not in only_move['points']: d_y_points = np.zeros_like(d_y_points) if 'objects' in only_move: if 'x' not in only_move['objects']: d_x_objects = np.zeros_like(d_x_objects) if 'y' not in only_move['objects']: d_y_objects = np.zeros_like(d_y_objects) dx = (np.array(d_x_text) * force_text[0] + np.array(d_x_points) * force_points[0] + np.array(d_x_objects) * force_objects[0]) dy = (np.array(d_y_text) * force_text[1] + np.array(d_y_points) * force_points[1] + np.array(d_y_objects) * force_objects[1]) qx = np.sum([q[0] for q in [q1, q2, q3]]) qy = np.sum([q[1] for q in [q1, q2, q3]]) histm = np.max(np.array(history), axis=0) history.pop(0) history.append((qx, qy)) move_texts(texts, dx, dy, bboxes = get_bboxes(texts, r, (1, 1), ax), ax=ax) if save_steps: if add_step_numbers: plt.title(i+1) plt.savefig('%s%s.%s' % (save_prefix, '{0:03}'.format(i+1), save_format), format=save_format, dpi=150) elif on_basemap: ax.draw(r) # Stop if we've reached the precision threshold, or if the x and y displacement # are both greater than the max over the last 10 iterations (suggesting a # failure to converge) if (qx < precision_x and qy < precision_y) or np.all([qx, qy] >= histm): break # Now adding arrows from texts to their original locations if required if 'arrowprops' in kwargs: bboxes = get_bboxes(texts, r, (1, 1), ax) kwap = kwargs.pop('arrowprops') for j, (bbox, text) in enumerate(zip(bboxes, texts)): ap = {'patchA':text} # Ensure arrow is clipped by the text ap.update(kwap) # Add arrowprops from kwargs ax.annotate("", # Add an arrow from the text to the point xy = (orig_xy[j]), xytext=get_midpoint(bbox), arrowprops=ap, *args, **kwargs) if save_steps: if add_step_numbers: plt.title(i+1) plt.savefig('%s%s.%s' % (save_prefix, '{0:03}'.format(i+1), save_format), format=save_format, dpi=150) elif on_basemap: ax.draw(r) return i+1
[ "def", "adjust_text", "(", "texts", ",", "x", "=", "None", ",", "y", "=", "None", ",", "add_objects", "=", "None", ",", "ax", "=", "None", ",", "expand_text", "=", "(", "1.05", ",", "1.2", ")", ",", "expand_points", "=", "(", "1.05", ",", "1.2", ")", ",", "expand_objects", "=", "(", "1.05", ",", "1.2", ")", ",", "expand_align", "=", "(", "1.05", ",", "1.2", ")", ",", "autoalign", "=", "'xy'", ",", "va", "=", "'center'", ",", "ha", "=", "'center'", ",", "force_text", "=", "(", "0.1", ",", "0.25", ")", ",", "force_points", "=", "(", "0.2", ",", "0.5", ")", ",", "force_objects", "=", "(", "0.1", ",", "0.25", ")", ",", "lim", "=", "500", ",", "precision", "=", "0.01", ",", "only_move", "=", "{", "'points'", ":", "'xy'", ",", "'text'", ":", "'xy'", ",", "'objects'", ":", "'xy'", "}", ",", "avoid_text", "=", "True", ",", "avoid_points", "=", "True", ",", "avoid_self", "=", "True", ",", "save_steps", "=", "False", ",", "save_prefix", "=", "''", ",", "save_format", "=", "'png'", ",", "add_step_numbers", "=", "True", ",", "on_basemap", "=", "False", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "plt", ".", "draw", "(", ")", "if", "ax", "is", "None", ":", "ax", "=", "plt", ".", "gca", "(", ")", "r", "=", "get_renderer", "(", "ax", ".", "get_figure", "(", ")", ")", "orig_xy", "=", "[", "get_text_position", "(", "text", ",", "ax", "=", "ax", ")", "for", "text", "in", "texts", "]", "orig_x", "=", "[", "xy", "[", "0", "]", "for", "xy", "in", "orig_xy", "]", "orig_y", "=", "[", "xy", "[", "1", "]", "for", "xy", "in", "orig_xy", "]", "force_objects", "=", "float_to_tuple", "(", "force_objects", ")", "force_text", "=", "float_to_tuple", "(", "force_text", ")", "force_points", "=", "float_to_tuple", "(", "force_points", ")", "# xdiff = np.diff(ax.get_xlim())[0]", "# ydiff = np.diff(ax.get_ylim())[0]", "bboxes", "=", "get_bboxes", "(", "texts", ",", "r", ",", "(", "1.0", ",", "1.0", ")", ",", "ax", ")", "sum_width", "=", "np", ".", "sum", "(", "list", "(", "map", "(", "lambda", "bbox", ":", "bbox", ".", "width", ",", "bboxes", ")", ")", ")", "sum_height", "=", "np", ".", "sum", "(", "list", "(", "map", "(", "lambda", "bbox", ":", "bbox", ".", "height", ",", "bboxes", ")", ")", ")", "if", "not", "any", "(", "list", "(", "map", "(", "lambda", "val", ":", "'x'", "in", "val", ",", "only_move", ".", "values", "(", ")", ")", ")", ")", ":", "precision_x", "=", "np", ".", "inf", "else", ":", "precision_x", "=", "precision", "*", "sum_width", "#", "if", "not", "any", "(", "list", "(", "map", "(", "lambda", "val", ":", "'y'", "in", "val", ",", "only_move", ".", "values", "(", ")", ")", ")", ")", ":", "precision_y", "=", "np", ".", "inf", "else", ":", "precision_y", "=", "precision", "*", "sum_height", "if", "x", "is", "None", ":", "if", "y", "is", "None", ":", "if", "avoid_self", ":", "x", ",", "y", "=", "orig_x", ",", "orig_y", "else", ":", "x", ",", "y", "=", "[", "]", ",", "[", "]", "else", ":", "raise", "ValueError", "(", "'Please specify both x and y, or neither'", ")", "if", "y", "is", "None", ":", "raise", "ValueError", "(", "'Please specify both x and y, or neither'", ")", "if", "add_objects", "is", "None", ":", "text_from_objects", "=", "False", "add_bboxes", "=", "[", "]", "else", ":", "try", ":", "add_bboxes", "=", "get_bboxes", "(", "add_objects", ",", "r", ",", "(", "1", ",", "1", ")", ",", "ax", ")", "except", ":", "raise", "ValueError", "(", "\"Can't get bounding boxes from add_objects - is'\\\n it a flat list of matplotlib objects?\"", ")", "return", "text_from_objects", "=", "True", "for", "text", "in", "texts", ":", "text", ".", "set_va", "(", "va", ")", "text", ".", "set_ha", "(", "ha", ")", "if", "save_steps", ":", "if", "add_step_numbers", ":", "plt", ".", "title", "(", "'Before'", ")", "plt", ".", "savefig", "(", "'%s%s.%s'", "%", "(", "save_prefix", ",", "'000a'", ",", "save_format", ")", ",", "format", "=", "save_format", ",", "dpi", "=", "150", ")", "elif", "on_basemap", ":", "ax", ".", "draw", "(", "r", ")", "if", "autoalign", ":", "if", "autoalign", "is", "True", ":", "autoalign", "=", "'xy'", "for", "i", "in", "range", "(", "2", ")", ":", "texts", "=", "optimally_align_text", "(", "x", ",", "y", ",", "texts", ",", "expand", "=", "expand_align", ",", "add_bboxes", "=", "add_bboxes", ",", "direction", "=", "autoalign", ",", "renderer", "=", "r", ",", "ax", "=", "ax", ")", "if", "save_steps", ":", "if", "add_step_numbers", ":", "plt", ".", "title", "(", "'Autoaligned'", ")", "plt", ".", "savefig", "(", "'%s%s.%s'", "%", "(", "save_prefix", ",", "'000b'", ",", "save_format", ")", ",", "format", "=", "save_format", ",", "dpi", "=", "150", ")", "elif", "on_basemap", ":", "ax", ".", "draw", "(", "r", ")", "texts", "=", "repel_text_from_axes", "(", "texts", ",", "ax", ",", "renderer", "=", "r", ",", "expand", "=", "expand_points", ")", "history", "=", "[", "(", "np", ".", "inf", ",", "np", ".", "inf", ")", "]", "*", "10", "for", "i", "in", "xrange", "(", "lim", ")", ":", "# q1, q2 = [np.inf, np.inf], [np.inf, np.inf]", "if", "avoid_text", ":", "d_x_text", ",", "d_y_text", ",", "q1", "=", "repel_text", "(", "texts", ",", "renderer", "=", "r", ",", "ax", "=", "ax", ",", "expand", "=", "expand_text", ")", "else", ":", "d_x_text", ",", "d_y_text", ",", "q1", "=", "[", "0", "]", "*", "len", "(", "texts", ")", ",", "[", "0", "]", "*", "len", "(", "texts", ")", ",", "(", "0", ",", "0", ")", "if", "avoid_points", ":", "d_x_points", ",", "d_y_points", ",", "q2", "=", "repel_text_from_points", "(", "x", ",", "y", ",", "texts", ",", "ax", "=", "ax", ",", "renderer", "=", "r", ",", "expand", "=", "expand_points", ")", "else", ":", "d_x_points", ",", "d_y_points", ",", "q2", "=", "[", "0", "]", "*", "len", "(", "texts", ")", ",", "[", "0", "]", "*", "len", "(", "texts", ")", ",", "(", "0", ",", "0", ")", "if", "text_from_objects", ":", "d_x_objects", ",", "d_y_objects", ",", "q3", "=", "repel_text_from_bboxes", "(", "add_bboxes", ",", "texts", ",", "ax", "=", "ax", ",", "renderer", "=", "r", ",", "expand", "=", "expand_objects", ")", "else", ":", "d_x_objects", ",", "d_y_objects", ",", "q3", "=", "[", "0", "]", "*", "len", "(", "texts", ")", ",", "[", "0", "]", "*", "len", "(", "texts", ")", ",", "(", "0", ",", "0", ")", "if", "only_move", ":", "if", "'text'", "in", "only_move", ":", "if", "'x'", "not", "in", "only_move", "[", "'text'", "]", ":", "d_x_text", "=", "np", ".", "zeros_like", "(", "d_x_text", ")", "if", "'y'", "not", "in", "only_move", "[", "'text'", "]", ":", "d_y_text", "=", "np", ".", "zeros_like", "(", "d_y_text", ")", "if", "'points'", "in", "only_move", ":", "if", "'x'", "not", "in", "only_move", "[", "'points'", "]", ":", "d_x_points", "=", "np", ".", "zeros_like", "(", "d_x_points", ")", "if", "'y'", "not", "in", "only_move", "[", "'points'", "]", ":", "d_y_points", "=", "np", ".", "zeros_like", "(", "d_y_points", ")", "if", "'objects'", "in", "only_move", ":", "if", "'x'", "not", "in", "only_move", "[", "'objects'", "]", ":", "d_x_objects", "=", "np", ".", "zeros_like", "(", "d_x_objects", ")", "if", "'y'", "not", "in", "only_move", "[", "'objects'", "]", ":", "d_y_objects", "=", "np", ".", "zeros_like", "(", "d_y_objects", ")", "dx", "=", "(", "np", ".", "array", "(", "d_x_text", ")", "*", "force_text", "[", "0", "]", "+", "np", ".", "array", "(", "d_x_points", ")", "*", "force_points", "[", "0", "]", "+", "np", ".", "array", "(", "d_x_objects", ")", "*", "force_objects", "[", "0", "]", ")", "dy", "=", "(", "np", ".", "array", "(", "d_y_text", ")", "*", "force_text", "[", "1", "]", "+", "np", ".", "array", "(", "d_y_points", ")", "*", "force_points", "[", "1", "]", "+", "np", ".", "array", "(", "d_y_objects", ")", "*", "force_objects", "[", "1", "]", ")", "qx", "=", "np", ".", "sum", "(", "[", "q", "[", "0", "]", "for", "q", "in", "[", "q1", ",", "q2", ",", "q3", "]", "]", ")", "qy", "=", "np", ".", "sum", "(", "[", "q", "[", "1", "]", "for", "q", "in", "[", "q1", ",", "q2", ",", "q3", "]", "]", ")", "histm", "=", "np", ".", "max", "(", "np", ".", "array", "(", "history", ")", ",", "axis", "=", "0", ")", "history", ".", "pop", "(", "0", ")", "history", ".", "append", "(", "(", "qx", ",", "qy", ")", ")", "move_texts", "(", "texts", ",", "dx", ",", "dy", ",", "bboxes", "=", "get_bboxes", "(", "texts", ",", "r", ",", "(", "1", ",", "1", ")", ",", "ax", ")", ",", "ax", "=", "ax", ")", "if", "save_steps", ":", "if", "add_step_numbers", ":", "plt", ".", "title", "(", "i", "+", "1", ")", "plt", ".", "savefig", "(", "'%s%s.%s'", "%", "(", "save_prefix", ",", "'{0:03}'", ".", "format", "(", "i", "+", "1", ")", ",", "save_format", ")", ",", "format", "=", "save_format", ",", "dpi", "=", "150", ")", "elif", "on_basemap", ":", "ax", ".", "draw", "(", "r", ")", "# Stop if we've reached the precision threshold, or if the x and y displacement", "# are both greater than the max over the last 10 iterations (suggesting a", "# failure to converge)", "if", "(", "qx", "<", "precision_x", "and", "qy", "<", "precision_y", ")", "or", "np", ".", "all", "(", "[", "qx", ",", "qy", "]", ">=", "histm", ")", ":", "break", "# Now adding arrows from texts to their original locations if required", "if", "'arrowprops'", "in", "kwargs", ":", "bboxes", "=", "get_bboxes", "(", "texts", ",", "r", ",", "(", "1", ",", "1", ")", ",", "ax", ")", "kwap", "=", "kwargs", ".", "pop", "(", "'arrowprops'", ")", "for", "j", ",", "(", "bbox", ",", "text", ")", "in", "enumerate", "(", "zip", "(", "bboxes", ",", "texts", ")", ")", ":", "ap", "=", "{", "'patchA'", ":", "text", "}", "# Ensure arrow is clipped by the text", "ap", ".", "update", "(", "kwap", ")", "# Add arrowprops from kwargs", "ax", ".", "annotate", "(", "\"\"", ",", "# Add an arrow from the text to the point", "xy", "=", "(", "orig_xy", "[", "j", "]", ")", ",", "xytext", "=", "get_midpoint", "(", "bbox", ")", ",", "arrowprops", "=", "ap", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "save_steps", ":", "if", "add_step_numbers", ":", "plt", ".", "title", "(", "i", "+", "1", ")", "plt", ".", "savefig", "(", "'%s%s.%s'", "%", "(", "save_prefix", ",", "'{0:03}'", ".", "format", "(", "i", "+", "1", ")", ",", "save_format", ")", ",", "format", "=", "save_format", ",", "dpi", "=", "150", ")", "elif", "on_basemap", ":", "ax", ".", "draw", "(", "r", ")", "return", "i", "+", "1" ]
42.531469
0.002008