text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def visit_Capture(self, node: parsing.Capture) -> [ast.stmt] or ast.expr: """Generates python code to capture text consumed by a clause. #If all clauses can be inlined self.beginTag('tagname') and clause and self.endTag('tagname') if not self.beginTag('tagname'): return False <code for the clause> if not self.endTag('tagname'): return False """ begintag = ast.Attribute( ast.Name('self', ast.Load()), 'beginTag', ast.Load()) endtag = ast.Attribute( ast.Name('self', ast.Load()), 'endTag', ast.Load()) begin = ast.Call(begintag, [ast.Str(node.tagname)], [], None, None) end = ast.Call(endtag, [ast.Str(node.tagname)], [], None, None) result = [begin, self.visit(node.pt), end] for clause in result: if not isinstance(clause, ast.expr): break else: return ast.BoolOp(ast.And(), result) res = [] for stmt in map(self._clause, result): res.extend(stmt) return res
[ "def", "visit_Capture", "(", "self", ",", "node", ":", "parsing", ".", "Capture", ")", "->", "[", "ast", ".", "stmt", "]", "or", "ast", ".", "expr", ":", "begintag", "=", "ast", ".", "Attribute", "(", "ast", ".", "Name", "(", "'self'", ",", "ast", ".", "Load", "(", ")", ")", ",", "'beginTag'", ",", "ast", ".", "Load", "(", ")", ")", "endtag", "=", "ast", ".", "Attribute", "(", "ast", ".", "Name", "(", "'self'", ",", "ast", ".", "Load", "(", ")", ")", ",", "'endTag'", ",", "ast", ".", "Load", "(", ")", ")", "begin", "=", "ast", ".", "Call", "(", "begintag", ",", "[", "ast", ".", "Str", "(", "node", ".", "tagname", ")", "]", ",", "[", "]", ",", "None", ",", "None", ")", "end", "=", "ast", ".", "Call", "(", "endtag", ",", "[", "ast", ".", "Str", "(", "node", ".", "tagname", ")", "]", ",", "[", "]", ",", "None", ",", "None", ")", "result", "=", "[", "begin", ",", "self", ".", "visit", "(", "node", ".", "pt", ")", ",", "end", "]", "for", "clause", "in", "result", ":", "if", "not", "isinstance", "(", "clause", ",", "ast", ".", "expr", ")", ":", "break", "else", ":", "return", "ast", ".", "BoolOp", "(", "ast", ".", "And", "(", ")", ",", "result", ")", "res", "=", "[", "]", "for", "stmt", "in", "map", "(", "self", ".", "_clause", ",", "result", ")", ":", "res", ".", "extend", "(", "stmt", ")", "return", "res" ]
38.285714
0.00182
def get_pstats_print2list(fnames, filter_fnames=None, exclude_fnames=None, sort=None, sort_reverse=None, limit=None): """Print stats with a filter or exclude filenames, sort index and limit. :param list fnames: cProfile standard files to process. :param list filter_fnames: Relative paths to filter and show them. :param list exclude_fnames: Relative paths to avoid show them. :param str sort: Standard `pstats` key of value to sort the result. \n\t\t\t'calls' (call count) \n\t\t\t'cumulative' (cumulative time) \n\t\t\t'cumtime' (cumulative time) \n\t\t\t'file' (file name) \n\t\t\t'filename' (file name) \n\t\t\t'module' (file name) \n\t\t\t'ncalls' (call count) \n\t\t\t'pcalls' (primitive call count) \n\t\t\t'line' (line number) \n\t\t\t'name' (function name) \n\t\t\t'nfl' (name/file/line) \n\t\t\t'stdname' (standard name) \n\t\t\t'time' (internal time) \n\t\t\t'tottime' (internal time) :param bool sort_reverse: Reverse sort order. :param int limit: Limit max result. :returns: List of dicts with `pstats` print result after filters, sorted and limited. """ if isinstance(fnames, basestring): fnames = [fnames] fnames_expanded = [ os.path.expandvars(os.path.expanduser(fname)) for fname in fnames] stream = StringIO() try: stats = pstats.Stats(fnames[0], stream=stream) for fname in fnames_expanded[1:]: stats.add(fname) except TypeError: print("No cProfile stats valid.") return False except EOFError: print("Empty file cProfile stats valid.") return False except IOError: print("Error to open file.") return False stats.print_stats() stream.seek(0) field_list = get_field_list() line_stats_re = re.compile( r'(?P<%s>\d+/?\d+|\d+)\s+(?P<%s>\d+\.?\d+)\s+(?P<%s>\d+\.?\d+)\s+' r'(?P<%s>\d+\.?\d+)\s+(?P<%s>\d+\.?\d+)\s+(?P<%s>.*):(?P<%s>\d+)' r'\((?P<%s>.*)\)' % tuple(field_list)) stats_list = [] count = 0 for line in stream: line = line.strip('\r\n ') line_stats_match = line_stats_re.match(line) if line else None fname = line_stats_match.group('file') if line_stats_match else None if fname and is_fname_match(fname, filter_fnames) and \ not is_exclude(fname, exclude_fnames): data = dict([(field, line_stats_match.group(field)) for field in field_list]) data['rcalls'], data['calls'] = ( data.get('ncalls', '') + '/' + data.get('ncalls', '') ).split('/')[:2] data['factor'] = "%.2f" % ( (float(data['rcalls']) - float(data['calls']) + 1) * float(data['cumtime'])) data['cumulative'] = data['cumtime'] stats_list.append(data) count += 1 return sorted(stats_list, key=lambda key: float(key[sort or 'factor']), reverse=not sort_reverse)[:limit]
[ "def", "get_pstats_print2list", "(", "fnames", ",", "filter_fnames", "=", "None", ",", "exclude_fnames", "=", "None", ",", "sort", "=", "None", ",", "sort_reverse", "=", "None", ",", "limit", "=", "None", ")", ":", "if", "isinstance", "(", "fnames", ",", "basestring", ")", ":", "fnames", "=", "[", "fnames", "]", "fnames_expanded", "=", "[", "os", ".", "path", ".", "expandvars", "(", "os", ".", "path", ".", "expanduser", "(", "fname", ")", ")", "for", "fname", "in", "fnames", "]", "stream", "=", "StringIO", "(", ")", "try", ":", "stats", "=", "pstats", ".", "Stats", "(", "fnames", "[", "0", "]", ",", "stream", "=", "stream", ")", "for", "fname", "in", "fnames_expanded", "[", "1", ":", "]", ":", "stats", ".", "add", "(", "fname", ")", "except", "TypeError", ":", "print", "(", "\"No cProfile stats valid.\"", ")", "return", "False", "except", "EOFError", ":", "print", "(", "\"Empty file cProfile stats valid.\"", ")", "return", "False", "except", "IOError", ":", "print", "(", "\"Error to open file.\"", ")", "return", "False", "stats", ".", "print_stats", "(", ")", "stream", ".", "seek", "(", "0", ")", "field_list", "=", "get_field_list", "(", ")", "line_stats_re", "=", "re", ".", "compile", "(", "r'(?P<%s>\\d+/?\\d+|\\d+)\\s+(?P<%s>\\d+\\.?\\d+)\\s+(?P<%s>\\d+\\.?\\d+)\\s+'", "r'(?P<%s>\\d+\\.?\\d+)\\s+(?P<%s>\\d+\\.?\\d+)\\s+(?P<%s>.*):(?P<%s>\\d+)'", "r'\\((?P<%s>.*)\\)'", "%", "tuple", "(", "field_list", ")", ")", "stats_list", "=", "[", "]", "count", "=", "0", "for", "line", "in", "stream", ":", "line", "=", "line", ".", "strip", "(", "'\\r\\n '", ")", "line_stats_match", "=", "line_stats_re", ".", "match", "(", "line", ")", "if", "line", "else", "None", "fname", "=", "line_stats_match", ".", "group", "(", "'file'", ")", "if", "line_stats_match", "else", "None", "if", "fname", "and", "is_fname_match", "(", "fname", ",", "filter_fnames", ")", "and", "not", "is_exclude", "(", "fname", ",", "exclude_fnames", ")", ":", "data", "=", "dict", "(", "[", "(", "field", ",", "line_stats_match", ".", "group", "(", "field", ")", ")", "for", "field", "in", "field_list", "]", ")", "data", "[", "'rcalls'", "]", ",", "data", "[", "'calls'", "]", "=", "(", "data", ".", "get", "(", "'ncalls'", ",", "''", ")", "+", "'/'", "+", "data", ".", "get", "(", "'ncalls'", ",", "''", ")", ")", ".", "split", "(", "'/'", ")", "[", ":", "2", "]", "data", "[", "'factor'", "]", "=", "\"%.2f\"", "%", "(", "(", "float", "(", "data", "[", "'rcalls'", "]", ")", "-", "float", "(", "data", "[", "'calls'", "]", ")", "+", "1", ")", "*", "float", "(", "data", "[", "'cumtime'", "]", ")", ")", "data", "[", "'cumulative'", "]", "=", "data", "[", "'cumtime'", "]", "stats_list", ".", "append", "(", "data", ")", "count", "+=", "1", "return", "sorted", "(", "stats_list", ",", "key", "=", "lambda", "key", ":", "float", "(", "key", "[", "sort", "or", "'factor'", "]", ")", ",", "reverse", "=", "not", "sort_reverse", ")", "[", ":", "limit", "]" ]
41.432432
0.000319
def mtf_transformer_lm_baseline(): """Small language model to run on 1 TPU. Run this on 2x2 on languagemodel_lm1b32k_packed for 272000 steps (10 epochs) Results: params/10^9 log-ppl(per-token) 0.14 3.202 Returns: a hparams """ hparams = mtf_transformer_paper_lm(-1) hparams.batch_size = 128 hparams.learning_rate_decay_steps = 27200 # one epoch on lm1b hparams.mesh_shape = "batch:8" return hparams
[ "def", "mtf_transformer_lm_baseline", "(", ")", ":", "hparams", "=", "mtf_transformer_paper_lm", "(", "-", "1", ")", "hparams", ".", "batch_size", "=", "128", "hparams", ".", "learning_rate_decay_steps", "=", "27200", "# one epoch on lm1b", "hparams", ".", "mesh_shape", "=", "\"batch:8\"", "return", "hparams" ]
27.3125
0.015487
def whois_list(request, format=None): """ Retrieve basic whois information related to a layer2 or layer3 network address. """ results = [] # layer3 results for ip in Ip.objects.select_related().all(): interface = ip.interface user = interface.device.node.user device = interface.device results.append({ 'address': str(ip.address), 'user': user.username, 'name': user.get_full_name(), 'device': device.name, 'node': device.node.name }) # layer2 results for interface in Interface.objects.select_related().all(): if interface.mac is None: continue user = interface.device.node.user device = interface.device results.append({ 'address': str(interface.mac).replace('-', ':'), 'user': user.username, 'name': user.get_full_name(), 'device': device.name, 'node': device.node.name }) return Response(results)
[ "def", "whois_list", "(", "request", ",", "format", "=", "None", ")", ":", "results", "=", "[", "]", "# layer3 results", "for", "ip", "in", "Ip", ".", "objects", ".", "select_related", "(", ")", ".", "all", "(", ")", ":", "interface", "=", "ip", ".", "interface", "user", "=", "interface", ".", "device", ".", "node", ".", "user", "device", "=", "interface", ".", "device", "results", ".", "append", "(", "{", "'address'", ":", "str", "(", "ip", ".", "address", ")", ",", "'user'", ":", "user", ".", "username", ",", "'name'", ":", "user", ".", "get_full_name", "(", ")", ",", "'device'", ":", "device", ".", "name", ",", "'node'", ":", "device", ".", "node", ".", "name", "}", ")", "# layer2 results", "for", "interface", "in", "Interface", ".", "objects", ".", "select_related", "(", ")", ".", "all", "(", ")", ":", "if", "interface", ".", "mac", "is", "None", ":", "continue", "user", "=", "interface", ".", "device", ".", "node", ".", "user", "device", "=", "interface", ".", "device", "results", ".", "append", "(", "{", "'address'", ":", "str", "(", "interface", ".", "mac", ")", ".", "replace", "(", "'-'", ",", "':'", ")", ",", "'user'", ":", "user", ".", "username", ",", "'name'", ":", "user", ".", "get_full_name", "(", ")", ",", "'device'", ":", "device", ".", "name", ",", "'node'", ":", "device", ".", "node", ".", "name", "}", ")", "return", "Response", "(", "results", ")" ]
32.83871
0.001908
def load_modules(self, data=None, proxy=None): ''' Load the modules into the state ''' log.info('Loading fresh modules for state activity') self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, self.state_con, utils=self.utils, proxy=self.proxy) if isinstance(data, dict): if data.get('provider', False): if isinstance(data['provider'], six.string_types): providers = [{data['state']: data['provider']}] elif isinstance(data['provider'], list): providers = data['provider'] else: providers = {} for provider in providers: for mod in provider: funcs = salt.loader.raw_mod(self.opts, provider[mod], self.functions) if funcs: for func in funcs: f_key = '{0}{1}'.format( mod, func[func.rindex('.'):] ) self.functions[f_key] = funcs[func] self.serializers = salt.loader.serializers(self.opts) self._load_states() self.rend = salt.loader.render(self.opts, self.functions, states=self.states, proxy=self.proxy, context=self.state_con)
[ "def", "load_modules", "(", "self", ",", "data", "=", "None", ",", "proxy", "=", "None", ")", ":", "log", ".", "info", "(", "'Loading fresh modules for state activity'", ")", "self", ".", "utils", "=", "salt", ".", "loader", ".", "utils", "(", "self", ".", "opts", ")", "self", ".", "functions", "=", "salt", ".", "loader", ".", "minion_mods", "(", "self", ".", "opts", ",", "self", ".", "state_con", ",", "utils", "=", "self", ".", "utils", ",", "proxy", "=", "self", ".", "proxy", ")", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "if", "data", ".", "get", "(", "'provider'", ",", "False", ")", ":", "if", "isinstance", "(", "data", "[", "'provider'", "]", ",", "six", ".", "string_types", ")", ":", "providers", "=", "[", "{", "data", "[", "'state'", "]", ":", "data", "[", "'provider'", "]", "}", "]", "elif", "isinstance", "(", "data", "[", "'provider'", "]", ",", "list", ")", ":", "providers", "=", "data", "[", "'provider'", "]", "else", ":", "providers", "=", "{", "}", "for", "provider", "in", "providers", ":", "for", "mod", "in", "provider", ":", "funcs", "=", "salt", ".", "loader", ".", "raw_mod", "(", "self", ".", "opts", ",", "provider", "[", "mod", "]", ",", "self", ".", "functions", ")", "if", "funcs", ":", "for", "func", "in", "funcs", ":", "f_key", "=", "'{0}{1}'", ".", "format", "(", "mod", ",", "func", "[", "func", ".", "rindex", "(", "'.'", ")", ":", "]", ")", "self", ".", "functions", "[", "f_key", "]", "=", "funcs", "[", "func", "]", "self", ".", "serializers", "=", "salt", ".", "loader", ".", "serializers", "(", "self", ".", "opts", ")", "self", ".", "_load_states", "(", ")", "self", ".", "rend", "=", "salt", ".", "loader", ".", "render", "(", "self", ".", "opts", ",", "self", ".", "functions", ",", "states", "=", "self", ".", "states", ",", "proxy", "=", "self", ".", "proxy", ",", "context", "=", "self", ".", "state_con", ")" ]
49.454545
0.002404
def handle_processing_packets(): """handle_processing_packets Replacement packet processing engine. This is not done. """ host = os.getenv( "LISTEN_ON_HOST", "127.0.0.1").strip().lstrip() port = int(os.getenv( "LISTEN_ON_PORT", "80").strip().lstrip()) backlog = int(os.getenv( "LISTEN_BACKLOG", "5").strip().lstrip()) size = int(os.getenv( "LISTEN_SIZE", "102400").strip().lstrip()) sleep_in_seconds = float(os.getenv( "LISTEN_SLEEP", "0.5").strip().lstrip()) needs_response = bool(os.getenv( "LISTEN_SEND_RESPONSE", "0").strip().lstrip() == "1") shutdown_hook = os.getenv( "LISTEN_SHUTDOWN_HOOK", "/tmp/shutdown-listen-server-{}-{}".format( host, port)).strip().lstrip() filter_key = os.getenv( "IGNORE_KEY", INCLUDED_IGNORE_KEY).strip().lstrip() if os.path.exists(shutdown_hook): log.info(("Please remove the shutdown hook file: " "\nrm -f {}") .format( shutdown_hook)) sys.exit(1) default_filter_key = filter_key bytes_for_filter_key = len(default_filter_key) offset_to_filter_key = (-1 * bytes_for_filter_key) offset_to_msg = offset_to_filter_key - 1 now = datetime.datetime.now().isoformat() log.info(("{} - Starting Server address={}:{} " "backlog={} size={} sleep={} shutdown={} " "filter_key={}") .format( now, host, port, backlog, size, sleep_in_seconds, shutdown_hook, default_filter_key)) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((host, port)) s.listen(backlog) client, address = s.accept() midx = 0 while 1: data = None address = None ignore_key = None try: if not client: client, address = s.accept() except Exception as e: log.error(("socket accept with ex={}") .format( e)) try: if client: data = client.recv(size) except Exception as e: log.error(("recv - disconnected with ex={}") .format( e)) if data: now = datetime.datetime.now().isoformat() packet_to_process = data[0:offset_to_msg] ignore_key = data[offset_to_filter_key:] log.info(("decoding data={} key={}") .format( packet_to_process, ignore_key)) msg = None try: msg = json.loads( packet_to_process.decode("utf-8")) except Exception as e: msg = None log.error(("Invalid data={} with ex={}") .format( packet_to_process, e)) if msg: log.info(("received msg={} " "data={} replying - ignore='{}'") .format( ppj(msg), packet_to_process, ignore_key)) if msg["status"] == VALID: if msg["data_type"] == TCP: log.info("TCP") elif msg["data_type"] == UDP: log.info("TCP") elif msg["data_type"] == ARP: log.info("TCP") elif msg["data_type"] == ICMP: log.info("TCP") else: log.error(("unsuppported type={}") .format( msg["data_type"])) # end of supported eth protocol message types else: log.error(("unsuppported msg status={}") .format( msg["status"])) # end if msg was VALID # end of if found msg midx += 1 if midx > 1000000: midx = 0 else: log.debug("ignoring invalid data") # end of if valid msg or not if needs_response: client.send(ignore_key) else: log.info("no response") time.sleep(sleep_in_seconds) if os.path.exists(shutdown_hook): now = datetime.datetime.now().isoformat() log.info(("{} detected shutdown " "file={}") .format( now, shutdown_hook)) # end of loop log.info("shutting down") client.close() log.info("done")
[ "def", "handle_processing_packets", "(", ")", ":", "host", "=", "os", ".", "getenv", "(", "\"LISTEN_ON_HOST\"", ",", "\"127.0.0.1\"", ")", ".", "strip", "(", ")", ".", "lstrip", "(", ")", "port", "=", "int", "(", "os", ".", "getenv", "(", "\"LISTEN_ON_PORT\"", ",", "\"80\"", ")", ".", "strip", "(", ")", ".", "lstrip", "(", ")", ")", "backlog", "=", "int", "(", "os", ".", "getenv", "(", "\"LISTEN_BACKLOG\"", ",", "\"5\"", ")", ".", "strip", "(", ")", ".", "lstrip", "(", ")", ")", "size", "=", "int", "(", "os", ".", "getenv", "(", "\"LISTEN_SIZE\"", ",", "\"102400\"", ")", ".", "strip", "(", ")", ".", "lstrip", "(", ")", ")", "sleep_in_seconds", "=", "float", "(", "os", ".", "getenv", "(", "\"LISTEN_SLEEP\"", ",", "\"0.5\"", ")", ".", "strip", "(", ")", ".", "lstrip", "(", ")", ")", "needs_response", "=", "bool", "(", "os", ".", "getenv", "(", "\"LISTEN_SEND_RESPONSE\"", ",", "\"0\"", ")", ".", "strip", "(", ")", ".", "lstrip", "(", ")", "==", "\"1\"", ")", "shutdown_hook", "=", "os", ".", "getenv", "(", "\"LISTEN_SHUTDOWN_HOOK\"", ",", "\"/tmp/shutdown-listen-server-{}-{}\"", ".", "format", "(", "host", ",", "port", ")", ")", ".", "strip", "(", ")", ".", "lstrip", "(", ")", "filter_key", "=", "os", ".", "getenv", "(", "\"IGNORE_KEY\"", ",", "INCLUDED_IGNORE_KEY", ")", ".", "strip", "(", ")", ".", "lstrip", "(", ")", "if", "os", ".", "path", ".", "exists", "(", "shutdown_hook", ")", ":", "log", ".", "info", "(", "(", "\"Please remove the shutdown hook file: \"", "\"\\nrm -f {}\"", ")", ".", "format", "(", "shutdown_hook", ")", ")", "sys", ".", "exit", "(", "1", ")", "default_filter_key", "=", "filter_key", "bytes_for_filter_key", "=", "len", "(", "default_filter_key", ")", "offset_to_filter_key", "=", "(", "-", "1", "*", "bytes_for_filter_key", ")", "offset_to_msg", "=", "offset_to_filter_key", "-", "1", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", "log", ".", "info", "(", "(", "\"{} - Starting Server address={}:{} \"", "\"backlog={} size={} sleep={} shutdown={} \"", "\"filter_key={}\"", ")", ".", "format", "(", "now", ",", "host", ",", "port", ",", "backlog", ",", "size", ",", "sleep_in_seconds", ",", "shutdown_hook", ",", "default_filter_key", ")", ")", "s", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "s", ".", "bind", "(", "(", "host", ",", "port", ")", ")", "s", ".", "listen", "(", "backlog", ")", "client", ",", "address", "=", "s", ".", "accept", "(", ")", "midx", "=", "0", "while", "1", ":", "data", "=", "None", "address", "=", "None", "ignore_key", "=", "None", "try", ":", "if", "not", "client", ":", "client", ",", "address", "=", "s", ".", "accept", "(", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "(", "\"socket accept with ex={}\"", ")", ".", "format", "(", "e", ")", ")", "try", ":", "if", "client", ":", "data", "=", "client", ".", "recv", "(", "size", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "(", "\"recv - disconnected with ex={}\"", ")", ".", "format", "(", "e", ")", ")", "if", "data", ":", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", "packet_to_process", "=", "data", "[", "0", ":", "offset_to_msg", "]", "ignore_key", "=", "data", "[", "offset_to_filter_key", ":", "]", "log", ".", "info", "(", "(", "\"decoding data={} key={}\"", ")", ".", "format", "(", "packet_to_process", ",", "ignore_key", ")", ")", "msg", "=", "None", "try", ":", "msg", "=", "json", ".", "loads", "(", "packet_to_process", ".", "decode", "(", "\"utf-8\"", ")", ")", "except", "Exception", "as", "e", ":", "msg", "=", "None", "log", ".", "error", "(", "(", "\"Invalid data={} with ex={}\"", ")", ".", "format", "(", "packet_to_process", ",", "e", ")", ")", "if", "msg", ":", "log", ".", "info", "(", "(", "\"received msg={} \"", "\"data={} replying - ignore='{}'\"", ")", ".", "format", "(", "ppj", "(", "msg", ")", ",", "packet_to_process", ",", "ignore_key", ")", ")", "if", "msg", "[", "\"status\"", "]", "==", "VALID", ":", "if", "msg", "[", "\"data_type\"", "]", "==", "TCP", ":", "log", ".", "info", "(", "\"TCP\"", ")", "elif", "msg", "[", "\"data_type\"", "]", "==", "UDP", ":", "log", ".", "info", "(", "\"TCP\"", ")", "elif", "msg", "[", "\"data_type\"", "]", "==", "ARP", ":", "log", ".", "info", "(", "\"TCP\"", ")", "elif", "msg", "[", "\"data_type\"", "]", "==", "ICMP", ":", "log", ".", "info", "(", "\"TCP\"", ")", "else", ":", "log", ".", "error", "(", "(", "\"unsuppported type={}\"", ")", ".", "format", "(", "msg", "[", "\"data_type\"", "]", ")", ")", "# end of supported eth protocol message types", "else", ":", "log", ".", "error", "(", "(", "\"unsuppported msg status={}\"", ")", ".", "format", "(", "msg", "[", "\"status\"", "]", ")", ")", "# end if msg was VALID", "# end of if found msg", "midx", "+=", "1", "if", "midx", ">", "1000000", ":", "midx", "=", "0", "else", ":", "log", ".", "debug", "(", "\"ignoring invalid data\"", ")", "# end of if valid msg or not", "if", "needs_response", ":", "client", ".", "send", "(", "ignore_key", ")", "else", ":", "log", ".", "info", "(", "\"no response\"", ")", "time", ".", "sleep", "(", "sleep_in_seconds", ")", "if", "os", ".", "path", ".", "exists", "(", "shutdown_hook", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", "log", ".", "info", "(", "(", "\"{} detected shutdown \"", "\"file={}\"", ")", ".", "format", "(", "now", ",", "shutdown_hook", ")", ")", "# end of loop", "log", ".", "info", "(", "\"shutting down\"", ")", "client", ".", "close", "(", ")", "log", ".", "info", "(", "\"done\"", ")" ]
30.322981
0.000198
def price_dataframe(symbols='sp5002012', start=datetime.datetime(2008, 1, 1), end=datetime.datetime(2009, 12, 31), price_type='actual_close', cleaner=clean_dataframe, ): """Retrieve the prices of a list of equities as a DataFrame (columns = symbols) Arguments: symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc e.g. ["AAPL", " slv ", GLD", "GOOG", "$SPX", "XOM", "msft"] start (datetime): The date at the start of the period being analyzed. end (datetime): The date at the end of the period being analyzed. Yahoo data stops at 2013/1/1 """ if isinstance(price_type, basestring): price_type = [price_type] start = util.normalize_date(start or datetime.date(2008, 1, 1)) end = util.normalize_date(end or datetime.date(2009, 12, 31)) symbols = normalize_symbols(symbols) t = du.getNYSEdays(start, end, datetime.timedelta(hours=16)) df = clean_dataframes(dataobj.get_data(t, symbols, price_type)) if not df or len(df) > 1: return cleaner(df) else: return cleaner(df[0])
[ "def", "price_dataframe", "(", "symbols", "=", "'sp5002012'", ",", "start", "=", "datetime", ".", "datetime", "(", "2008", ",", "1", ",", "1", ")", ",", "end", "=", "datetime", ".", "datetime", "(", "2009", ",", "12", ",", "31", ")", ",", "price_type", "=", "'actual_close'", ",", "cleaner", "=", "clean_dataframe", ",", ")", ":", "if", "isinstance", "(", "price_type", ",", "basestring", ")", ":", "price_type", "=", "[", "price_type", "]", "start", "=", "util", ".", "normalize_date", "(", "start", "or", "datetime", ".", "date", "(", "2008", ",", "1", ",", "1", ")", ")", "end", "=", "util", ".", "normalize_date", "(", "end", "or", "datetime", ".", "date", "(", "2009", ",", "12", ",", "31", ")", ")", "symbols", "=", "normalize_symbols", "(", "symbols", ")", "t", "=", "du", ".", "getNYSEdays", "(", "start", ",", "end", ",", "datetime", ".", "timedelta", "(", "hours", "=", "16", ")", ")", "df", "=", "clean_dataframes", "(", "dataobj", ".", "get_data", "(", "t", ",", "symbols", ",", "price_type", ")", ")", "if", "not", "df", "or", "len", "(", "df", ")", ">", "1", ":", "return", "cleaner", "(", "df", ")", "else", ":", "return", "cleaner", "(", "df", "[", "0", "]", ")" ]
41.461538
0.00816
def disvec(self, x, y, aq=None): '''Returns array of size (2, nparam, naq)''' if aq is None: aq = self.model.aq.find_aquifer_data(x, y) return np.sum(self.parameters * self.disvecinf(x, y, aq), 1)
[ "def", "disvec", "(", "self", ",", "x", ",", "y", ",", "aq", "=", "None", ")", ":", "if", "aq", "is", "None", ":", "aq", "=", "self", ".", "model", ".", "aq", ".", "find_aquifer_data", "(", "x", ",", "y", ")", "return", "np", ".", "sum", "(", "self", ".", "parameters", "*", "self", ".", "disvecinf", "(", "x", ",", "y", ",", "aq", ")", ",", "1", ")" ]
54.25
0.013636
def parse_localclasspath(self, tup_tree): """ Parse a LOCALCLASSPATH element and return the class path it represents as a CIMClassName object. :: <!ELEMENT LOCALCLASSPATH (LOCALNAMESPACEPATH, CLASSNAME)> """ self.check_node(tup_tree, 'LOCALCLASSPATH') k = kids(tup_tree) if len(k) != 2: raise CIMXMLParseError( _format("Element {0!A} has invalid number of child elements " "{1!A} (expecting two child elements " "(LOCALNAMESPACEPATH, CLASSNAME))", name(tup_tree), k), conn_id=self.conn_id) namespace = self.parse_localnamespacepath(k[0]) class_path = self.parse_classname(k[1]) class_path.namespace = namespace return class_path
[ "def", "parse_localclasspath", "(", "self", ",", "tup_tree", ")", ":", "self", ".", "check_node", "(", "tup_tree", ",", "'LOCALCLASSPATH'", ")", "k", "=", "kids", "(", "tup_tree", ")", "if", "len", "(", "k", ")", "!=", "2", ":", "raise", "CIMXMLParseError", "(", "_format", "(", "\"Element {0!A} has invalid number of child elements \"", "\"{1!A} (expecting two child elements \"", "\"(LOCALNAMESPACEPATH, CLASSNAME))\"", ",", "name", "(", "tup_tree", ")", ",", "k", ")", ",", "conn_id", "=", "self", ".", "conn_id", ")", "namespace", "=", "self", ".", "parse_localnamespacepath", "(", "k", "[", "0", "]", ")", "class_path", "=", "self", ".", "parse_classname", "(", "k", "[", "1", "]", ")", "class_path", ".", "namespace", "=", "namespace", "return", "class_path" ]
31.230769
0.002389
def is_highlink_density(self, element): """ checks the density of links within a node, is there not much text and most of it contains linky shit? if so it's no good """ links = self.parser.getElementsByTag(element, tag='a') if not links: return False text = self.parser.getText(element) words = text.split(' ') words_number = float(len(words)) link_text_parts = [] for link in links: link_text_parts.append(self.parser.getText(link)) link_text = ''.join(link_text_parts) link_words = link_text.split(' ') number_of_link_words = float(len(link_words)) number_of_links = float(len(links)) link_divisor = float(number_of_link_words / words_number) score = float(link_divisor * number_of_links) if score >= 1.0: return True return False
[ "def", "is_highlink_density", "(", "self", ",", "element", ")", ":", "links", "=", "self", ".", "parser", ".", "getElementsByTag", "(", "element", ",", "tag", "=", "'a'", ")", "if", "not", "links", ":", "return", "False", "text", "=", "self", ".", "parser", ".", "getText", "(", "element", ")", "words", "=", "text", ".", "split", "(", "' '", ")", "words_number", "=", "float", "(", "len", "(", "words", ")", ")", "link_text_parts", "=", "[", "]", "for", "link", "in", "links", ":", "link_text_parts", ".", "append", "(", "self", ".", "parser", ".", "getText", "(", "link", ")", ")", "link_text", "=", "''", ".", "join", "(", "link_text_parts", ")", "link_words", "=", "link_text", ".", "split", "(", "' '", ")", "number_of_link_words", "=", "float", "(", "len", "(", "link_words", ")", ")", "number_of_links", "=", "float", "(", "len", "(", "links", ")", ")", "link_divisor", "=", "float", "(", "number_of_link_words", "/", "words_number", ")", "score", "=", "float", "(", "link_divisor", "*", "number_of_links", ")", "if", "score", ">=", "1.0", ":", "return", "True", "return", "False" ]
34.807692
0.002151
def random_link(self, dataset, state, backward=False): """Get a random link. Parameters ---------- dataset : `object` Dataset from `self.get_dataset()`. state : `object` Link source. backward : `bool`, optional Link direction. Raises ------ ValueError If link count is invalid. Returns ------- (`str` or `None`, `object` or `None`) Link value and next state. """ links = self.get_links(dataset, state, backward) if not links: return None, None x = randint(0, sum(link[0] for link in links) - 1) for link in links: count = link[0] if x < count: return link[1], self.follow_link(link, state, backward) x -= count raise RuntimeError('invalid link sum')
[ "def", "random_link", "(", "self", ",", "dataset", ",", "state", ",", "backward", "=", "False", ")", ":", "links", "=", "self", ".", "get_links", "(", "dataset", ",", "state", ",", "backward", ")", "if", "not", "links", ":", "return", "None", ",", "None", "x", "=", "randint", "(", "0", ",", "sum", "(", "link", "[", "0", "]", "for", "link", "in", "links", ")", "-", "1", ")", "for", "link", "in", "links", ":", "count", "=", "link", "[", "0", "]", "if", "x", "<", "count", ":", "return", "link", "[", "1", "]", ",", "self", ".", "follow_link", "(", "link", ",", "state", ",", "backward", ")", "x", "-=", "count", "raise", "RuntimeError", "(", "'invalid link sum'", ")" ]
27.78125
0.002174
def copystat(self, target): """Copies the permissions, times and flags from this to the `target`. The owner is not copied. """ shutil.copystat(self.path, self._to_backend(target))
[ "def", "copystat", "(", "self", ",", "target", ")", ":", "shutil", ".", "copystat", "(", "self", ".", "path", ",", "self", ".", "_to_backend", "(", "target", ")", ")" ]
34.5
0.009434
def pb_for_delete(document_path, option): """Make a ``Write`` protobuf for ``delete()`` methods. Args: document_path (str): A fully-qualified document path. option (optional[~.firestore_v1beta1.client.WriteOption]): A write option to make assertions / preconditions on the server state of the document before applying changes. Returns: google.cloud.firestore_v1beta1.types.Write: A ``Write`` protobuf instance for the ``delete()``. """ write_pb = write_pb2.Write(delete=document_path) if option is not None: option.modify_write(write_pb) return write_pb
[ "def", "pb_for_delete", "(", "document_path", ",", "option", ")", ":", "write_pb", "=", "write_pb2", ".", "Write", "(", "delete", "=", "document_path", ")", "if", "option", "is", "not", "None", ":", "option", ".", "modify_write", "(", "write_pb", ")", "return", "write_pb" ]
34.944444
0.001548
def _convert_filetime_to_timestamp(filetime): """ Windows returns times as 64-bit unsigned longs that are the number of hundreds of nanoseconds since Jan 1 1601. This converts it to a datetime object. :param filetime: A FILETIME struct object :return: An integer unix timestamp """ hundreds_nano_seconds = struct.unpack( b'>Q', struct.pack( b'>LL', filetime.dwHighDateTime, filetime.dwLowDateTime ) )[0] seconds_since_1601 = hundreds_nano_seconds / 10000000 return seconds_since_1601 - 11644473600
[ "def", "_convert_filetime_to_timestamp", "(", "filetime", ")", ":", "hundreds_nano_seconds", "=", "struct", ".", "unpack", "(", "b'>Q'", ",", "struct", ".", "pack", "(", "b'>LL'", ",", "filetime", ".", "dwHighDateTime", ",", "filetime", ".", "dwLowDateTime", ")", ")", "[", "0", "]", "seconds_since_1601", "=", "hundreds_nano_seconds", "/", "10000000", "return", "seconds_since_1601", "-", "11644473600" ]
26
0.001613
def modulename(cls, depth=1): """ get caller's __name__ """ depth += cls.extra_depth frame = sys._getframe(depth) return frame.f_globals['__name__']
[ "def", "modulename", "(", "cls", ",", "depth", "=", "1", ")", ":", "depth", "+=", "cls", ".", "extra_depth", "frame", "=", "sys", ".", "_getframe", "(", "depth", ")", "return", "frame", ".", "f_globals", "[", "'__name__'", "]" ]
27.142857
0.010204
def _synthesize_multiple_generic(self, helper_function, text_file, output_file_path, quit_after=None, backwards=False): """ Synthesize multiple fragments, generic function. The ``helper_function`` is a function that takes parameters ``(text, voice_code, output_file_path)`` and returns a tuple ``(result, (audio_length, audio_sample_rate, audio_format, audio_samples))``. :rtype: tuple (result, (anchors, current_time, num_chars)) """ self.log(u"Calling TTS engine using multiple generic function...") # get sample rate and codec self.log(u"Determining codec and sample rate...") if (self.OUTPUT_AUDIO_FORMAT is None) or (len(self.OUTPUT_AUDIO_FORMAT) != 3): self.log(u"Determining codec and sample rate with dummy text...") succeeded, data = helper_function( text=u"Dummy text to get sample_rate", voice_code=self._language_to_voice_code(self.DEFAULT_LANGUAGE), output_file_path=None ) if not succeeded: self.log_crit(u"An unexpected error occurred in helper_function") return (False, None) du_nu, sample_rate, codec, da_nu = data self.log(u"Determining codec and sample rate with dummy text... done") else: self.log(u"Reading codec and sample rate from OUTPUT_AUDIO_FORMAT") codec, channels_nu, sample_rate = self.OUTPUT_AUDIO_FORMAT self.log(u"Determining codec and sample rate... done") self.log([u" codec: %s", codec]) self.log([u" sample rate: %d", sample_rate]) # open output file output_file = AudioFile(rconf=self.rconf, logger=self.logger) output_file.audio_format = codec output_file.audio_channels = 1 output_file.audio_sample_rate = sample_rate # create output anchors = [] current_time = TimeValue("0.000") num_chars = 0 fragments = text_file.fragments if backwards: fragments = fragments[::-1] loop_function = self._loop_use_cache if self.use_cache else self._loop_no_cache for num, fragment in enumerate(fragments): succeeded, data = loop_function( helper_function=helper_function, num=num, fragment=fragment ) if not succeeded: self.log_crit(u"An unexpected error occurred in loop_function") return (False, None) duration, sr_nu, enc_nu, samples = data # store for later output anchors.append([current_time, fragment.identifier, fragment.text]) # increase the character counter num_chars += fragment.characters # concatenate new samples self.log([u"Fragment %d starts at: %.3f", num, current_time]) if duration > 0: self.log([u"Fragment %d duration: %.3f", num, duration]) current_time += duration output_file.add_samples(samples, reverse=backwards) else: self.log([u"Fragment %d has zero duration", num]) # check if we must stop synthesizing because we have enough audio if (quit_after is not None) and (current_time > quit_after): self.log([u"Quitting after reached duration %.3f", current_time]) break # minimize memory self.log(u"Minimizing memory...") output_file.minimize_memory() self.log(u"Minimizing memory... done") # if backwards, we need to reverse the audio samples again if backwards: self.log(u"Reversing audio samples...") output_file.reverse() self.log(u"Reversing audio samples... done") # write output file self.log([u"Writing audio file '%s'", output_file_path]) output_file.write(file_path=output_file_path) # return output if backwards: self.log_warn(u"Please note that anchor time values do not make sense since backwards=True") self.log([u"Returning %d time anchors", len(anchors)]) self.log([u"Current time %.3f", current_time]) self.log([u"Synthesized %d characters", num_chars]) self.log(u"Calling TTS engine using multiple generic function... done") return (True, (anchors, current_time, num_chars))
[ "def", "_synthesize_multiple_generic", "(", "self", ",", "helper_function", ",", "text_file", ",", "output_file_path", ",", "quit_after", "=", "None", ",", "backwards", "=", "False", ")", ":", "self", ".", "log", "(", "u\"Calling TTS engine using multiple generic function...\"", ")", "# get sample rate and codec", "self", ".", "log", "(", "u\"Determining codec and sample rate...\"", ")", "if", "(", "self", ".", "OUTPUT_AUDIO_FORMAT", "is", "None", ")", "or", "(", "len", "(", "self", ".", "OUTPUT_AUDIO_FORMAT", ")", "!=", "3", ")", ":", "self", ".", "log", "(", "u\"Determining codec and sample rate with dummy text...\"", ")", "succeeded", ",", "data", "=", "helper_function", "(", "text", "=", "u\"Dummy text to get sample_rate\"", ",", "voice_code", "=", "self", ".", "_language_to_voice_code", "(", "self", ".", "DEFAULT_LANGUAGE", ")", ",", "output_file_path", "=", "None", ")", "if", "not", "succeeded", ":", "self", ".", "log_crit", "(", "u\"An unexpected error occurred in helper_function\"", ")", "return", "(", "False", ",", "None", ")", "du_nu", ",", "sample_rate", ",", "codec", ",", "da_nu", "=", "data", "self", ".", "log", "(", "u\"Determining codec and sample rate with dummy text... done\"", ")", "else", ":", "self", ".", "log", "(", "u\"Reading codec and sample rate from OUTPUT_AUDIO_FORMAT\"", ")", "codec", ",", "channels_nu", ",", "sample_rate", "=", "self", ".", "OUTPUT_AUDIO_FORMAT", "self", ".", "log", "(", "u\"Determining codec and sample rate... done\"", ")", "self", ".", "log", "(", "[", "u\" codec: %s\"", ",", "codec", "]", ")", "self", ".", "log", "(", "[", "u\" sample rate: %d\"", ",", "sample_rate", "]", ")", "# open output file", "output_file", "=", "AudioFile", "(", "rconf", "=", "self", ".", "rconf", ",", "logger", "=", "self", ".", "logger", ")", "output_file", ".", "audio_format", "=", "codec", "output_file", ".", "audio_channels", "=", "1", "output_file", ".", "audio_sample_rate", "=", "sample_rate", "# create output", "anchors", "=", "[", "]", "current_time", "=", "TimeValue", "(", "\"0.000\"", ")", "num_chars", "=", "0", "fragments", "=", "text_file", ".", "fragments", "if", "backwards", ":", "fragments", "=", "fragments", "[", ":", ":", "-", "1", "]", "loop_function", "=", "self", ".", "_loop_use_cache", "if", "self", ".", "use_cache", "else", "self", ".", "_loop_no_cache", "for", "num", ",", "fragment", "in", "enumerate", "(", "fragments", ")", ":", "succeeded", ",", "data", "=", "loop_function", "(", "helper_function", "=", "helper_function", ",", "num", "=", "num", ",", "fragment", "=", "fragment", ")", "if", "not", "succeeded", ":", "self", ".", "log_crit", "(", "u\"An unexpected error occurred in loop_function\"", ")", "return", "(", "False", ",", "None", ")", "duration", ",", "sr_nu", ",", "enc_nu", ",", "samples", "=", "data", "# store for later output", "anchors", ".", "append", "(", "[", "current_time", ",", "fragment", ".", "identifier", ",", "fragment", ".", "text", "]", ")", "# increase the character counter", "num_chars", "+=", "fragment", ".", "characters", "# concatenate new samples", "self", ".", "log", "(", "[", "u\"Fragment %d starts at: %.3f\"", ",", "num", ",", "current_time", "]", ")", "if", "duration", ">", "0", ":", "self", ".", "log", "(", "[", "u\"Fragment %d duration: %.3f\"", ",", "num", ",", "duration", "]", ")", "current_time", "+=", "duration", "output_file", ".", "add_samples", "(", "samples", ",", "reverse", "=", "backwards", ")", "else", ":", "self", ".", "log", "(", "[", "u\"Fragment %d has zero duration\"", ",", "num", "]", ")", "# check if we must stop synthesizing because we have enough audio", "if", "(", "quit_after", "is", "not", "None", ")", "and", "(", "current_time", ">", "quit_after", ")", ":", "self", ".", "log", "(", "[", "u\"Quitting after reached duration %.3f\"", ",", "current_time", "]", ")", "break", "# minimize memory", "self", ".", "log", "(", "u\"Minimizing memory...\"", ")", "output_file", ".", "minimize_memory", "(", ")", "self", ".", "log", "(", "u\"Minimizing memory... done\"", ")", "# if backwards, we need to reverse the audio samples again", "if", "backwards", ":", "self", ".", "log", "(", "u\"Reversing audio samples...\"", ")", "output_file", ".", "reverse", "(", ")", "self", ".", "log", "(", "u\"Reversing audio samples... done\"", ")", "# write output file", "self", ".", "log", "(", "[", "u\"Writing audio file '%s'\"", ",", "output_file_path", "]", ")", "output_file", ".", "write", "(", "file_path", "=", "output_file_path", ")", "# return output", "if", "backwards", ":", "self", ".", "log_warn", "(", "u\"Please note that anchor time values do not make sense since backwards=True\"", ")", "self", ".", "log", "(", "[", "u\"Returning %d time anchors\"", ",", "len", "(", "anchors", ")", "]", ")", "self", ".", "log", "(", "[", "u\"Current time %.3f\"", ",", "current_time", "]", ")", "self", ".", "log", "(", "[", "u\"Synthesized %d characters\"", ",", "num_chars", "]", ")", "self", ".", "log", "(", "u\"Calling TTS engine using multiple generic function... done\"", ")", "return", "(", "True", ",", "(", "anchors", ",", "current_time", ",", "num_chars", ")", ")" ]
44.938776
0.002222
def convertImages(self): """ run this to turn all folder1 TIFs and JPGs into folder2 data. TIFs will be treated as micrographs and converted to JPG with enhanced contrast. JPGs will simply be copied over. """ # copy over JPGs (and such) exts=['.jpg','.png'] for fname in [x for x in self.files1 if cm.ext(x) in exts]: ID="UNKNOWN" if len(fname)>8 and fname[:8] in self.IDs: ID=fname[:8] fname2=ID+"_jpg_"+fname if not fname2 in self.files2: self.log.info("copying over [%s]"%fname2) shutil.copy(os.path.join(self.folder1,fname),os.path.join(self.folder2,fname2)) if not fname[:8]+".abf" in self.files1: self.log.error("orphan image: %s",fname) # convert TIFs (and such) to JPGs exts=['.tif','.tiff'] for fname in [x for x in self.files1 if cm.ext(x) in exts]: ID="UNKNOWN" if len(fname)>8 and fname[:8] in self.IDs: ID=fname[:8] fname2=ID+"_tif_"+fname+".jpg" if not fname2 in self.files2: self.log.info("converting micrograph [%s]"%fname2) imaging.TIF_to_jpg(os.path.join(self.folder1,fname),saveAs=os.path.join(self.folder2,fname2)) if not fname[:8]+".abf" in self.files1: self.log.error("orphan image: %s",fname)
[ "def", "convertImages", "(", "self", ")", ":", "# copy over JPGs (and such)", "exts", "=", "[", "'.jpg'", ",", "'.png'", "]", "for", "fname", "in", "[", "x", "for", "x", "in", "self", ".", "files1", "if", "cm", ".", "ext", "(", "x", ")", "in", "exts", "]", ":", "ID", "=", "\"UNKNOWN\"", "if", "len", "(", "fname", ")", ">", "8", "and", "fname", "[", ":", "8", "]", "in", "self", ".", "IDs", ":", "ID", "=", "fname", "[", ":", "8", "]", "fname2", "=", "ID", "+", "\"_jpg_\"", "+", "fname", "if", "not", "fname2", "in", "self", ".", "files2", ":", "self", ".", "log", ".", "info", "(", "\"copying over [%s]\"", "%", "fname2", ")", "shutil", ".", "copy", "(", "os", ".", "path", ".", "join", "(", "self", ".", "folder1", ",", "fname", ")", ",", "os", ".", "path", ".", "join", "(", "self", ".", "folder2", ",", "fname2", ")", ")", "if", "not", "fname", "[", ":", "8", "]", "+", "\".abf\"", "in", "self", ".", "files1", ":", "self", ".", "log", ".", "error", "(", "\"orphan image: %s\"", ",", "fname", ")", "# convert TIFs (and such) to JPGs", "exts", "=", "[", "'.tif'", ",", "'.tiff'", "]", "for", "fname", "in", "[", "x", "for", "x", "in", "self", ".", "files1", "if", "cm", ".", "ext", "(", "x", ")", "in", "exts", "]", ":", "ID", "=", "\"UNKNOWN\"", "if", "len", "(", "fname", ")", ">", "8", "and", "fname", "[", ":", "8", "]", "in", "self", ".", "IDs", ":", "ID", "=", "fname", "[", ":", "8", "]", "fname2", "=", "ID", "+", "\"_tif_\"", "+", "fname", "+", "\".jpg\"", "if", "not", "fname2", "in", "self", ".", "files2", ":", "self", ".", "log", ".", "info", "(", "\"converting micrograph [%s]\"", "%", "fname2", ")", "imaging", ".", "TIF_to_jpg", "(", "os", ".", "path", ".", "join", "(", "self", ".", "folder1", ",", "fname", ")", ",", "saveAs", "=", "os", ".", "path", ".", "join", "(", "self", ".", "folder2", ",", "fname2", ")", ")", "if", "not", "fname", "[", ":", "8", "]", "+", "\".abf\"", "in", "self", ".", "files1", ":", "self", ".", "log", ".", "error", "(", "\"orphan image: %s\"", ",", "fname", ")" ]
44.4375
0.01927
def value_counts(self, dropna=True): """ Returns a Series containing counts of unique values. Parameters ---------- dropna : boolean, default True Don't include counts of NaN, even if NaN is in sp_values. Returns ------- counts : Series """ from pandas import Index, Series keys, counts = algos._value_counts_arraylike(self.sp_values, dropna=dropna) fcounts = self.sp_index.ngaps if fcounts > 0: if self._null_fill_value and dropna: pass else: if self._null_fill_value: mask = isna(keys) else: mask = keys == self.fill_value if mask.any(): counts[mask] += fcounts else: keys = np.insert(keys, 0, self.fill_value) counts = np.insert(counts, 0, fcounts) if not isinstance(keys, ABCIndexClass): keys = Index(keys) result = Series(counts, index=keys) return result
[ "def", "value_counts", "(", "self", ",", "dropna", "=", "True", ")", ":", "from", "pandas", "import", "Index", ",", "Series", "keys", ",", "counts", "=", "algos", ".", "_value_counts_arraylike", "(", "self", ".", "sp_values", ",", "dropna", "=", "dropna", ")", "fcounts", "=", "self", ".", "sp_index", ".", "ngaps", "if", "fcounts", ">", "0", ":", "if", "self", ".", "_null_fill_value", "and", "dropna", ":", "pass", "else", ":", "if", "self", ".", "_null_fill_value", ":", "mask", "=", "isna", "(", "keys", ")", "else", ":", "mask", "=", "keys", "==", "self", ".", "fill_value", "if", "mask", ".", "any", "(", ")", ":", "counts", "[", "mask", "]", "+=", "fcounts", "else", ":", "keys", "=", "np", ".", "insert", "(", "keys", ",", "0", ",", "self", ".", "fill_value", ")", "counts", "=", "np", ".", "insert", "(", "counts", ",", "0", ",", "fcounts", ")", "if", "not", "isinstance", "(", "keys", ",", "ABCIndexClass", ")", ":", "keys", "=", "Index", "(", "keys", ")", "result", "=", "Series", "(", "counts", ",", "index", "=", "keys", ")", "return", "result" ]
30.756757
0.001704
def get_last(self): """ Get the last migration batch. :rtype: list """ query = self.table().where('batch', self.get_last_batch_number()) return query.order_by('migration', 'desc').get()
[ "def", "get_last", "(", "self", ")", ":", "query", "=", "self", ".", "table", "(", ")", ".", "where", "(", "'batch'", ",", "self", ".", "get_last_batch_number", "(", ")", ")", "return", "query", ".", "order_by", "(", "'migration'", ",", "'desc'", ")", ".", "get", "(", ")" ]
25.222222
0.008511
def refresh(self, token, timeout=None): """Set new timeout for lock, if existing and valid.""" if timeout is None: timeout = LockManager.LOCK_TIME_OUT_DEFAULT return self.storage.refresh(token, timeout)
[ "def", "refresh", "(", "self", ",", "token", ",", "timeout", "=", "None", ")", ":", "if", "timeout", "is", "None", ":", "timeout", "=", "LockManager", ".", "LOCK_TIME_OUT_DEFAULT", "return", "self", ".", "storage", ".", "refresh", "(", "token", ",", "timeout", ")" ]
46.8
0.008403
def gradev(data, rate=1.0, data_type="phase", taus=None, ci=0.9, noisetype='wp'): """ gap resistant overlapping Allan deviation Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). Warning : phase data works better (frequency data is first trantformed into phase using numpy.cumsum() function, which can lead to poor results). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. ci: float the total confidence interval desired, i.e. if ci = 0.9, the bounds will be at 0.05 and 0.95. noisetype: string the type of noise desired: 'wp' returns white phase noise. 'wf' returns white frequency noise. 'fp' returns flicker phase noise. 'ff' returns flicker frequency noise. 'rf' returns random walk frequency noise. If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Returns ------- taus: np.array list of tau vales in seconds adev: np.array deviations [err_l, err_h] : list of len()==2, np.array the upper and lower bounds of the confidence interval taken as distances from the the estimated two sample variance. ns: np.array numper of terms n in the adev estimate. """ if (data_type == "freq"): print("Warning : phase data is preferred as input to gradev()") phase = input_to_phase(data, rate, data_type) (data, m, taus_used) = tau_generator(phase, rate, taus) ad = np.zeros_like(taus_used) ade_l = np.zeros_like(taus_used) ade_h = np.zeros_like(taus_used) adn = np.zeros_like(taus_used) for idx, mj in enumerate(m): (dev, deverr, n) = calc_gradev_phase(data, rate, mj, 1, ci, noisetype) # stride=1 for overlapping ADEV ad[idx] = dev ade_l[idx] = deverr[0] ade_h[idx] = deverr[1] adn[idx] = n # Note that errors are split in 2 arrays return remove_small_ns(taus_used, ad, [ade_l, ade_h], adn)
[ "def", "gradev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ",", "ci", "=", "0.9", ",", "noisetype", "=", "'wp'", ")", ":", "if", "(", "data_type", "==", "\"freq\"", ")", ":", "print", "(", "\"Warning : phase data is preferred as input to gradev()\"", ")", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "data", ",", "m", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ")", "ad", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ade_l", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ade_h", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "adn", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "m", ")", ":", "(", "dev", ",", "deverr", ",", "n", ")", "=", "calc_gradev_phase", "(", "data", ",", "rate", ",", "mj", ",", "1", ",", "ci", ",", "noisetype", ")", "# stride=1 for overlapping ADEV", "ad", "[", "idx", "]", "=", "dev", "ade_l", "[", "idx", "]", "=", "deverr", "[", "0", "]", "ade_h", "[", "idx", "]", "=", "deverr", "[", "1", "]", "adn", "[", "idx", "]", "=", "n", "# Note that errors are split in 2 arrays", "return", "remove_small_ns", "(", "taus_used", ",", "ad", ",", "[", "ade_l", ",", "ade_h", "]", ",", "adn", ")" ]
36.942857
0.000377
def U(self): "Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)" if getattr(self.data, 'tzinfo', None): return int(calendar.timegm(self.data.utctimetuple())) else: return int(time.mktime(self.data.timetuple()))
[ "def", "U", "(", "self", ")", ":", "if", "getattr", "(", "self", ".", "data", ",", "'tzinfo'", ",", "None", ")", ":", "return", "int", "(", "calendar", ".", "timegm", "(", "self", ".", "data", ".", "utctimetuple", "(", ")", ")", ")", "else", ":", "return", "int", "(", "time", ".", "mktime", "(", "self", ".", "data", ".", "timetuple", "(", ")", ")", ")" ]
39.666667
0.012346
def paga_adjacency( adata, adjacency='connectivities', adjacency_tree='connectivities_tree', as_heatmap=True, color_map=None, show=None, save=None): """Connectivity of paga groups. """ connectivity = adata.uns[adjacency].toarray() connectivity_select = adata.uns[adjacency_tree] if as_heatmap: matrix(connectivity, color_map=color_map, show=False) for i in range(connectivity_select.shape[0]): neighbors = connectivity_select[i].nonzero()[1] pl.scatter([i for j in neighbors], neighbors, color='black', s=1) # as a stripplot else: pl.figure() for i, cs in enumerate(connectivity): x = [i for j, d in enumerate(cs) if i != j] y = [c for j, c in enumerate(cs) if i != j] pl.scatter(x, y, color='gray', s=1) neighbors = connectivity_select[i].nonzero()[1] pl.scatter([i for j in neighbors], cs[neighbors], color='black', s=1) utils.savefig_or_show('paga_connectivity', show=show, save=save)
[ "def", "paga_adjacency", "(", "adata", ",", "adjacency", "=", "'connectivities'", ",", "adjacency_tree", "=", "'connectivities_tree'", ",", "as_heatmap", "=", "True", ",", "color_map", "=", "None", ",", "show", "=", "None", ",", "save", "=", "None", ")", ":", "connectivity", "=", "adata", ".", "uns", "[", "adjacency", "]", ".", "toarray", "(", ")", "connectivity_select", "=", "adata", ".", "uns", "[", "adjacency_tree", "]", "if", "as_heatmap", ":", "matrix", "(", "connectivity", ",", "color_map", "=", "color_map", ",", "show", "=", "False", ")", "for", "i", "in", "range", "(", "connectivity_select", ".", "shape", "[", "0", "]", ")", ":", "neighbors", "=", "connectivity_select", "[", "i", "]", ".", "nonzero", "(", ")", "[", "1", "]", "pl", ".", "scatter", "(", "[", "i", "for", "j", "in", "neighbors", "]", ",", "neighbors", ",", "color", "=", "'black'", ",", "s", "=", "1", ")", "# as a stripplot", "else", ":", "pl", ".", "figure", "(", ")", "for", "i", ",", "cs", "in", "enumerate", "(", "connectivity", ")", ":", "x", "=", "[", "i", "for", "j", ",", "d", "in", "enumerate", "(", "cs", ")", "if", "i", "!=", "j", "]", "y", "=", "[", "c", "for", "j", ",", "c", "in", "enumerate", "(", "cs", ")", "if", "i", "!=", "j", "]", "pl", ".", "scatter", "(", "x", ",", "y", ",", "color", "=", "'gray'", ",", "s", "=", "1", ")", "neighbors", "=", "connectivity_select", "[", "i", "]", ".", "nonzero", "(", ")", "[", "1", "]", "pl", ".", "scatter", "(", "[", "i", "for", "j", "in", "neighbors", "]", ",", "cs", "[", "neighbors", "]", ",", "color", "=", "'black'", ",", "s", "=", "1", ")", "utils", ".", "savefig_or_show", "(", "'paga_connectivity'", ",", "show", "=", "show", ",", "save", "=", "save", ")" ]
38.821429
0.000898
def _get_filehandler_with_formatter(logname, formatter=None): """ Return a logging FileHandler for given logname using a given logging formatter :param logname: Name of the file where logs will be stored, ".log" extension will be added :param formatter: An instance of logging.Formatter or None if the default should be used :return: """ handler = logging.FileHandler(logname) if formatter is not None: handler.setFormatter(formatter) return handler
[ "def", "_get_filehandler_with_formatter", "(", "logname", ",", "formatter", "=", "None", ")", ":", "handler", "=", "logging", ".", "FileHandler", "(", "logname", ")", "if", "formatter", "is", "not", "None", ":", "handler", ".", "setFormatter", "(", "formatter", ")", "return", "handler" ]
37.615385
0.001996
def _validate_frequency(cls, index, freq, **kwargs): """ Validate that a frequency is compatible with the values of a given Datetime Array/Index or Timedelta Array/Index Parameters ---------- index : DatetimeIndex or TimedeltaIndex The index on which to determine if the given frequency is valid freq : DateOffset The frequency to validate """ if is_period_dtype(cls): # Frequency validation is not meaningful for Period Array/Index return None inferred = index.inferred_freq if index.size == 0 or inferred == freq.freqstr: return None try: on_freq = cls._generate_range(start=index[0], end=None, periods=len(index), freq=freq, **kwargs) if not np.array_equal(index.asi8, on_freq.asi8): raise ValueError except ValueError as e: if "non-fixed" in str(e): # non-fixed frequencies are not meaningful for timedelta64; # we retain that error message raise e # GH#11587 the main way this is reached is if the `np.array_equal` # check above is False. This can also be reached if index[0] # is `NaT`, in which case the call to `cls._generate_range` will # raise a ValueError, which we re-raise with a more targeted # message. raise ValueError('Inferred frequency {infer} from passed values ' 'does not conform to passed frequency {passed}' .format(infer=inferred, passed=freq.freqstr))
[ "def", "_validate_frequency", "(", "cls", ",", "index", ",", "freq", ",", "*", "*", "kwargs", ")", ":", "if", "is_period_dtype", "(", "cls", ")", ":", "# Frequency validation is not meaningful for Period Array/Index", "return", "None", "inferred", "=", "index", ".", "inferred_freq", "if", "index", ".", "size", "==", "0", "or", "inferred", "==", "freq", ".", "freqstr", ":", "return", "None", "try", ":", "on_freq", "=", "cls", ".", "_generate_range", "(", "start", "=", "index", "[", "0", "]", ",", "end", "=", "None", ",", "periods", "=", "len", "(", "index", ")", ",", "freq", "=", "freq", ",", "*", "*", "kwargs", ")", "if", "not", "np", ".", "array_equal", "(", "index", ".", "asi8", ",", "on_freq", ".", "asi8", ")", ":", "raise", "ValueError", "except", "ValueError", "as", "e", ":", "if", "\"non-fixed\"", "in", "str", "(", "e", ")", ":", "# non-fixed frequencies are not meaningful for timedelta64;", "# we retain that error message", "raise", "e", "# GH#11587 the main way this is reached is if the `np.array_equal`", "# check above is False. This can also be reached if index[0]", "# is `NaT`, in which case the call to `cls._generate_range` will", "# raise a ValueError, which we re-raise with a more targeted", "# message.", "raise", "ValueError", "(", "'Inferred frequency {infer} from passed values '", "'does not conform to passed frequency {passed}'", ".", "format", "(", "infer", "=", "inferred", ",", "passed", "=", "freq", ".", "freqstr", ")", ")" ]
44.25641
0.001134
def stream_bytes(data, chunk_size=default_chunk_size): """Gets a buffered generator for streaming binary data. Returns a buffered generator which encodes binary data as :mimetype:`multipart/form-data` with the corresponding headers. Parameters ---------- data : bytes The data bytes to stream chunk_size : int The maximum size of each stream chunk Returns ------- (generator, dict) """ stream = BytesStream(data, chunk_size=chunk_size) return stream.body(), stream.headers
[ "def", "stream_bytes", "(", "data", ",", "chunk_size", "=", "default_chunk_size", ")", ":", "stream", "=", "BytesStream", "(", "data", ",", "chunk_size", "=", "chunk_size", ")", "return", "stream", ".", "body", "(", ")", ",", "stream", ".", "headers" ]
26.45
0.001825
def compute_screen_line_counts( feed: "Feed", linestring: LineString, dates: List[str], geo_shapes=None ) -> DataFrame: """ Find all the Feed trips active on the given dates that intersect the given Shapely LineString (with WGS84 longitude-latitude coordinates). Parameters ---------- feed : Feed linestring : Shapely LineString dates : list YYYYMMDD date strings Returns ------- DataFrame The columns are - ``'date'`` - ``'trip_id'`` - ``'route_id'`` - ``'route_short_name'`` - ``'crossing_time'``: time that the trip's vehicle crosses the linestring; one trip could cross multiple times - ``'orientation'``: 1 or -1; 1 indicates trip travel from the left side to the right side of the screen line; -1 indicates trip travel in the opposite direction Notes ----- - Requires GeoPandas - The first step is to geometrize ``feed.shapes`` via :func:`.shapes.geometrize_shapes`. Alternatively, use the ``geo_shapes`` GeoDataFrame, if given. - Assume ``feed.stop_times`` has an accurate ``shape_dist_traveled`` column. - Assume that trips travel in the same direction as their shapes. That restriction is part of GTFS, by the way. To calculate direction quickly and accurately, assume that the screen line is straight and doesn't double back on itself. - Probably does not give correct results for trips with self-intersecting shapes. - The algorithm works as follows 1. Compute all the shapes that intersect the linestring 2. For each such shape, compute the intersection points 3. For each point p, scan through all the trips in the feed that have that shape 4. For each date in ``dates``, restrict to trips active on the date and interpolate a stop time for p by assuming that the feed has the shape_dist_traveled field in stop times 5. Use that interpolated time as the crossing time of the trip vehicle, and compute the trip orientation to the screen line via a cross product of a vector in the direction of the screen line and a tiny vector in the direction of trip travel - Assume the following feed attributes are not ``None``: * ``feed.shapes``, if ``geo_shapes`` is not given """ dates = feed.restrict_dates(dates) if not dates: return pd.DataFrame() # Get all shapes that intersect the screen line shapes = feed.get_shapes_intersecting_geometry( linestring, geo_shapes, geometrized=True ) # Convert shapes to UTM lat, lon = feed.shapes.loc[0, ["shape_pt_lat", "shape_pt_lon"]].values crs = hp.get_utm_crs(lat, lon) shapes = shapes.to_crs(crs) # Convert linestring to UTM linestring = hp.linestring_to_utm(linestring) # Get all intersection points of shapes and linestring shapes["intersection"] = shapes.intersection(linestring) # Make a vector in the direction of the screen line # to later calculate trip orientation. # Does not work in case of a bent screen line. p1 = sg.Point(linestring.coords[0]) p2 = sg.Point(linestring.coords[-1]) w = np.array([p2.x - p1.x, p2.y - p1.y]) # Build a dictionary from the shapes DataFrame of the form # shape ID -> list of pairs (d, v), one for each intersection point, # where d is the distance of the intersection point along shape, # and v is a tiny vectors from the point in direction of shape. # Assume here that trips travel in the same direction as their shapes. dv_by_shape = {} eps = 1 convert_dist = hp.get_convert_dist("m", feed.dist_units) for __, sid, geom, intersection in shapes.itertuples(): # Get distances along shape of intersection points (in meters) distances = [geom.project(p) for p in intersection] # Build tiny vectors vectors = [] for i, p in enumerate(intersection): q = geom.interpolate(distances[i] + eps) vector = np.array([q.x - p.x, q.y - p.y]) vectors.append(vector) # Convert distances to units used in feed distances = [convert_dist(d) for d in distances] dv_by_shape[sid] = list(zip(distances, vectors)) # Get trips with those shapes t = feed.trips t = t[t["shape_id"].isin(dv_by_shape.keys())].copy() # Merge in route short names and stop times t = t.merge(feed.routes[["route_id", "route_short_name"]]).merge( feed.stop_times ) # Drop NaN departure times and convert to seconds past midnight t = t[t["departure_time"].notnull()].copy() t["departure_time"] = t["departure_time"].map(hp.timestr_to_seconds) # Compile crossings by date a = feed.compute_trip_activity(dates) rows = [] for date in dates: # Slice to trips active on date ids = a.loc[a[date] == 1, "trip_id"] f = t[t["trip_id"].isin(ids)].copy() # For each shape find the trips that cross the screen line # and get crossing times and orientation f = f.sort_values(["trip_id", "stop_sequence"]) for tid, group in f.groupby("trip_id"): sid = group["shape_id"].iat[0] rid = group["route_id"].iat[0] rsn = group["route_short_name"].iat[0] stop_times = group["departure_time"].values stop_distances = group["shape_dist_traveled"].values for d, v in dv_by_shape[sid]: # Interpolate crossing time time = np.interp(d, stop_distances, stop_times) # Compute direction of trip travel relative to # screen line by looking at the sign of the cross # product of tiny shape vector and screen line vector det = np.linalg.det(np.array([v, w])) if det >= 0: orientation = 1 else: orientation = -1 # Update rows rows.append([date, tid, rid, rsn, time, orientation]) # Create DataFrame cols = [ "date", "trip_id", "route_id", "route_short_name", "crossing_time", "orientation", ] g = pd.DataFrame(rows, columns=cols).sort_values(["date", "crossing_time"]) # Convert departure times back to time strings g["crossing_time"] = g["crossing_time"].map( lambda x: hp.timestr_to_seconds(x, inverse=True) ) return g
[ "def", "compute_screen_line_counts", "(", "feed", ":", "\"Feed\"", ",", "linestring", ":", "LineString", ",", "dates", ":", "List", "[", "str", "]", ",", "geo_shapes", "=", "None", ")", "->", "DataFrame", ":", "dates", "=", "feed", ".", "restrict_dates", "(", "dates", ")", "if", "not", "dates", ":", "return", "pd", ".", "DataFrame", "(", ")", "# Get all shapes that intersect the screen line", "shapes", "=", "feed", ".", "get_shapes_intersecting_geometry", "(", "linestring", ",", "geo_shapes", ",", "geometrized", "=", "True", ")", "# Convert shapes to UTM", "lat", ",", "lon", "=", "feed", ".", "shapes", ".", "loc", "[", "0", ",", "[", "\"shape_pt_lat\"", ",", "\"shape_pt_lon\"", "]", "]", ".", "values", "crs", "=", "hp", ".", "get_utm_crs", "(", "lat", ",", "lon", ")", "shapes", "=", "shapes", ".", "to_crs", "(", "crs", ")", "# Convert linestring to UTM", "linestring", "=", "hp", ".", "linestring_to_utm", "(", "linestring", ")", "# Get all intersection points of shapes and linestring", "shapes", "[", "\"intersection\"", "]", "=", "shapes", ".", "intersection", "(", "linestring", ")", "# Make a vector in the direction of the screen line", "# to later calculate trip orientation.", "# Does not work in case of a bent screen line.", "p1", "=", "sg", ".", "Point", "(", "linestring", ".", "coords", "[", "0", "]", ")", "p2", "=", "sg", ".", "Point", "(", "linestring", ".", "coords", "[", "-", "1", "]", ")", "w", "=", "np", ".", "array", "(", "[", "p2", ".", "x", "-", "p1", ".", "x", ",", "p2", ".", "y", "-", "p1", ".", "y", "]", ")", "# Build a dictionary from the shapes DataFrame of the form", "# shape ID -> list of pairs (d, v), one for each intersection point,", "# where d is the distance of the intersection point along shape,", "# and v is a tiny vectors from the point in direction of shape.", "# Assume here that trips travel in the same direction as their shapes.", "dv_by_shape", "=", "{", "}", "eps", "=", "1", "convert_dist", "=", "hp", ".", "get_convert_dist", "(", "\"m\"", ",", "feed", ".", "dist_units", ")", "for", "__", ",", "sid", ",", "geom", ",", "intersection", "in", "shapes", ".", "itertuples", "(", ")", ":", "# Get distances along shape of intersection points (in meters)", "distances", "=", "[", "geom", ".", "project", "(", "p", ")", "for", "p", "in", "intersection", "]", "# Build tiny vectors", "vectors", "=", "[", "]", "for", "i", ",", "p", "in", "enumerate", "(", "intersection", ")", ":", "q", "=", "geom", ".", "interpolate", "(", "distances", "[", "i", "]", "+", "eps", ")", "vector", "=", "np", ".", "array", "(", "[", "q", ".", "x", "-", "p", ".", "x", ",", "q", ".", "y", "-", "p", ".", "y", "]", ")", "vectors", ".", "append", "(", "vector", ")", "# Convert distances to units used in feed", "distances", "=", "[", "convert_dist", "(", "d", ")", "for", "d", "in", "distances", "]", "dv_by_shape", "[", "sid", "]", "=", "list", "(", "zip", "(", "distances", ",", "vectors", ")", ")", "# Get trips with those shapes", "t", "=", "feed", ".", "trips", "t", "=", "t", "[", "t", "[", "\"shape_id\"", "]", ".", "isin", "(", "dv_by_shape", ".", "keys", "(", ")", ")", "]", ".", "copy", "(", ")", "# Merge in route short names and stop times", "t", "=", "t", ".", "merge", "(", "feed", ".", "routes", "[", "[", "\"route_id\"", ",", "\"route_short_name\"", "]", "]", ")", ".", "merge", "(", "feed", ".", "stop_times", ")", "# Drop NaN departure times and convert to seconds past midnight", "t", "=", "t", "[", "t", "[", "\"departure_time\"", "]", ".", "notnull", "(", ")", "]", ".", "copy", "(", ")", "t", "[", "\"departure_time\"", "]", "=", "t", "[", "\"departure_time\"", "]", ".", "map", "(", "hp", ".", "timestr_to_seconds", ")", "# Compile crossings by date", "a", "=", "feed", ".", "compute_trip_activity", "(", "dates", ")", "rows", "=", "[", "]", "for", "date", "in", "dates", ":", "# Slice to trips active on date", "ids", "=", "a", ".", "loc", "[", "a", "[", "date", "]", "==", "1", ",", "\"trip_id\"", "]", "f", "=", "t", "[", "t", "[", "\"trip_id\"", "]", ".", "isin", "(", "ids", ")", "]", ".", "copy", "(", ")", "# For each shape find the trips that cross the screen line", "# and get crossing times and orientation", "f", "=", "f", ".", "sort_values", "(", "[", "\"trip_id\"", ",", "\"stop_sequence\"", "]", ")", "for", "tid", ",", "group", "in", "f", ".", "groupby", "(", "\"trip_id\"", ")", ":", "sid", "=", "group", "[", "\"shape_id\"", "]", ".", "iat", "[", "0", "]", "rid", "=", "group", "[", "\"route_id\"", "]", ".", "iat", "[", "0", "]", "rsn", "=", "group", "[", "\"route_short_name\"", "]", ".", "iat", "[", "0", "]", "stop_times", "=", "group", "[", "\"departure_time\"", "]", ".", "values", "stop_distances", "=", "group", "[", "\"shape_dist_traveled\"", "]", ".", "values", "for", "d", ",", "v", "in", "dv_by_shape", "[", "sid", "]", ":", "# Interpolate crossing time", "time", "=", "np", ".", "interp", "(", "d", ",", "stop_distances", ",", "stop_times", ")", "# Compute direction of trip travel relative to", "# screen line by looking at the sign of the cross", "# product of tiny shape vector and screen line vector", "det", "=", "np", ".", "linalg", ".", "det", "(", "np", ".", "array", "(", "[", "v", ",", "w", "]", ")", ")", "if", "det", ">=", "0", ":", "orientation", "=", "1", "else", ":", "orientation", "=", "-", "1", "# Update rows", "rows", ".", "append", "(", "[", "date", ",", "tid", ",", "rid", ",", "rsn", ",", "time", ",", "orientation", "]", ")", "# Create DataFrame", "cols", "=", "[", "\"date\"", ",", "\"trip_id\"", ",", "\"route_id\"", ",", "\"route_short_name\"", ",", "\"crossing_time\"", ",", "\"orientation\"", ",", "]", "g", "=", "pd", ".", "DataFrame", "(", "rows", ",", "columns", "=", "cols", ")", ".", "sort_values", "(", "[", "\"date\"", ",", "\"crossing_time\"", "]", ")", "# Convert departure times back to time strings", "g", "[", "\"crossing_time\"", "]", "=", "g", "[", "\"crossing_time\"", "]", ".", "map", "(", "lambda", "x", ":", "hp", ".", "timestr_to_seconds", "(", "x", ",", "inverse", "=", "True", ")", ")", "return", "g" ]
37.584795
0.000152
def pposition(hd, details=False): """Parse string into angular position. A string containing 2 or 6 numbers is parsed, and the numbers are converted into decimal numbers. In the former case the numbers are assumed to be floats. In the latter case, the numbers are assumed to be sexagesimal. Parameters ---------- hd: str String containing 2 or 6 numbers. The numbers can be spearated with character or characters other than ".", "-", "+". The string must contain either 2 or 6 numbers. details: bool The detailed result from parsing the string is returned. See "Returns" section below. Default is False. Returns ------- x: (float, float) or dict A tuple containing decimal equivalents of the parsed numbers. If the string contains 6 numbers then they are assumed be sexagesimal components. If ``details`` is True then a dictionary with the following keys is returned: x: float The first number. y: float The second number numvals: int Number of items parsed; 2 or 6. raw_x: dict The result returned by ``phmsdms`` for the first number. raw_y: dict The result returned by ``phmsdms`` for the second number. It is up to the user to interpret the units of the numbers returned. Raises ------ ValueError: The exception is raised if the string cannot be interpreted as a sequence of 2 or 6 numbers. Examples -------- The position of M100 reported by SIMBAD is "12 22 54.899 +15 49 20.57". This can be easily parsed in the following manner. >>> from angles import pposition >>> ra, de = pposition("12 22 54.899 +15 49 20.57") >>> ra 12.38191638888889 >>> de 15.822380555555556 """ # :TODO: split two angles based on user entered separator and process each part separately. # Split at any character other than a digit, ".", "-", and "+". p = re.split(r"[^\d\-+.]*", hd) if len(p) not in [2, 6]: raise ValueError("Input must contain either 2 or 6 numbers.") # Two floating point numbers if string has 2 numbers. if len(p) == 2: x, y = float(p[0]), float(p[1]) if details: numvals = 2 raw_x = p[0] raw_y = p[1] # Two sexagesimal numbers if string has 6 numbers. elif len(p) == 6: x_p = phmsdms(" ".join(p[:3])) x = sexa2deci(x_p['sign'], *x_p['vals']) y_p = phmsdms(" ".join(p[3:])) y = sexa2deci(y_p['sign'], *y_p['vals']) if details: raw_x = x_p raw_y = y_p numvals = 6 if details: result = dict(x=x, y=y, numvals=numvals, raw_x=raw_x, raw_y=raw_y) else: result = x, y return result
[ "def", "pposition", "(", "hd", ",", "details", "=", "False", ")", ":", "# :TODO: split two angles based on user entered separator and process each part separately.", "# Split at any character other than a digit, \".\", \"-\", and \"+\".", "p", "=", "re", ".", "split", "(", "r\"[^\\d\\-+.]*\"", ",", "hd", ")", "if", "len", "(", "p", ")", "not", "in", "[", "2", ",", "6", "]", ":", "raise", "ValueError", "(", "\"Input must contain either 2 or 6 numbers.\"", ")", "# Two floating point numbers if string has 2 numbers.", "if", "len", "(", "p", ")", "==", "2", ":", "x", ",", "y", "=", "float", "(", "p", "[", "0", "]", ")", ",", "float", "(", "p", "[", "1", "]", ")", "if", "details", ":", "numvals", "=", "2", "raw_x", "=", "p", "[", "0", "]", "raw_y", "=", "p", "[", "1", "]", "# Two sexagesimal numbers if string has 6 numbers.", "elif", "len", "(", "p", ")", "==", "6", ":", "x_p", "=", "phmsdms", "(", "\" \"", ".", "join", "(", "p", "[", ":", "3", "]", ")", ")", "x", "=", "sexa2deci", "(", "x_p", "[", "'sign'", "]", ",", "*", "x_p", "[", "'vals'", "]", ")", "y_p", "=", "phmsdms", "(", "\" \"", ".", "join", "(", "p", "[", "3", ":", "]", ")", ")", "y", "=", "sexa2deci", "(", "y_p", "[", "'sign'", "]", ",", "*", "y_p", "[", "'vals'", "]", ")", "if", "details", ":", "raw_x", "=", "x_p", "raw_y", "=", "y_p", "numvals", "=", "6", "if", "details", ":", "result", "=", "dict", "(", "x", "=", "x", ",", "y", "=", "y", ",", "numvals", "=", "numvals", ",", "raw_x", "=", "raw_x", ",", "raw_y", "=", "raw_y", ")", "else", ":", "result", "=", "x", ",", "y", "return", "result" ]
28.876289
0.00069
def get_command(self, command): """Helper function for osx - return gnu utils rather than default for eg head and md5sum where possible and needed. """ shutit_global.shutit_global_object.yield_to_draw() if command in ('md5sum','sed','head'): if self.get_current_shutit_pexpect_session_environment().distro == 'osx': return 'g' + command return command
[ "def", "get_command", "(", "self", ",", "command", ")", ":", "shutit_global", ".", "shutit_global_object", ".", "yield_to_draw", "(", ")", "if", "command", "in", "(", "'md5sum'", ",", "'sed'", ",", "'head'", ")", ":", "if", "self", ".", "get_current_shutit_pexpect_session_environment", "(", ")", ".", "distro", "==", "'osx'", ":", "return", "'g'", "+", "command", "return", "command" ]
40.555556
0.034853
def _normalizeImpurityMatrix(matrix): """Normalize each row of the matrix that the sum of the row equals 1. :params matrix: a matrix (2d nested list) containing numbers, each isobaric channel must be present as a row. :returns: a matrix containing normalized values """ newMatrix = list() for line in matrix: total = sum(line) if total != 0: newMatrix.append([i / total for i in line]) else: newMatrix.append(line) return newMatrix
[ "def", "_normalizeImpurityMatrix", "(", "matrix", ")", ":", "newMatrix", "=", "list", "(", ")", "for", "line", "in", "matrix", ":", "total", "=", "sum", "(", "line", ")", "if", "total", "!=", "0", ":", "newMatrix", ".", "append", "(", "[", "i", "/", "total", "for", "i", "in", "line", "]", ")", "else", ":", "newMatrix", ".", "append", "(", "line", ")", "return", "newMatrix" ]
33.466667
0.001938
def select_right(self): """move cursor right""" r, c = self._index self._select_index(r, c+1)
[ "def", "select_right", "(", "self", ")", ":", "r", ",", "c", "=", "self", ".", "_index", "self", ".", "_select_index", "(", "r", ",", "c", "+", "1", ")" ]
28.5
0.017094
def _get_variable_value(defined_variables, name): """ Call the function provided on the defined_variables object with the given name (raise exception if that doesn't exist) and casts it to the specified type. Returns an instance of operators.BaseType """ def fallback(*args, **kwargs): raise AssertionError("Variable {0} is not defined in class {1}".format( name, defined_variables.__class__.__name__)) method = getattr(defined_variables, name, fallback) val = method() return method.field_type(val)
[ "def", "_get_variable_value", "(", "defined_variables", ",", "name", ")", ":", "def", "fallback", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "raise", "AssertionError", "(", "\"Variable {0} is not defined in class {1}\"", ".", "format", "(", "name", ",", "defined_variables", ".", "__class__", ".", "__name__", ")", ")", "method", "=", "getattr", "(", "defined_variables", ",", "name", ",", "fallback", ")", "val", "=", "method", "(", ")", "return", "method", ".", "field_type", "(", "val", ")" ]
42.153846
0.001786
def _parse_docstring(fh): """Parse the docstrings of a script to find marked dependencies.""" find_fades = re.compile(r'\b(fades)\b:').search for line in fh: if line.startswith("'"): quote = "'" break if line.startswith('"'): quote = '"' break else: return {} if line[1] == quote: # comment start with triple quotes endquote = quote * 3 else: endquote = quote if endquote in line[len(endquote):]: docstring_lines = [line[:line.index(endquote)]] else: docstring_lines = [line] for line in fh: if endquote in line: docstring_lines.append(line[:line.index(endquote)]) break docstring_lines.append(line) docstring_lines = iter(docstring_lines) for doc_line in docstring_lines: if find_fades(doc_line): break else: return {} return _parse_requirement(list(docstring_lines))
[ "def", "_parse_docstring", "(", "fh", ")", ":", "find_fades", "=", "re", ".", "compile", "(", "r'\\b(fades)\\b:'", ")", ".", "search", "for", "line", "in", "fh", ":", "if", "line", ".", "startswith", "(", "\"'\"", ")", ":", "quote", "=", "\"'\"", "break", "if", "line", ".", "startswith", "(", "'\"'", ")", ":", "quote", "=", "'\"'", "break", "else", ":", "return", "{", "}", "if", "line", "[", "1", "]", "==", "quote", ":", "# comment start with triple quotes", "endquote", "=", "quote", "*", "3", "else", ":", "endquote", "=", "quote", "if", "endquote", "in", "line", "[", "len", "(", "endquote", ")", ":", "]", ":", "docstring_lines", "=", "[", "line", "[", ":", "line", ".", "index", "(", "endquote", ")", "]", "]", "else", ":", "docstring_lines", "=", "[", "line", "]", "for", "line", "in", "fh", ":", "if", "endquote", "in", "line", ":", "docstring_lines", ".", "append", "(", "line", "[", ":", "line", ".", "index", "(", "endquote", ")", "]", ")", "break", "docstring_lines", ".", "append", "(", "line", ")", "docstring_lines", "=", "iter", "(", "docstring_lines", ")", "for", "doc_line", "in", "docstring_lines", ":", "if", "find_fades", "(", "doc_line", ")", ":", "break", "else", ":", "return", "{", "}", "return", "_parse_requirement", "(", "list", "(", "docstring_lines", ")", ")" ]
26
0.000976
def segment_curvature(self, t, use_inf=False): """returns the curvature of the segment at t. Notes ----- If you receive a RuntimeWarning, run command >>> old = np.seterr(invalid='raise') This can be undone with >>> np.seterr(**old) """ dz = self.derivative(t) ddz = self.derivative(t, n=2) dx, dy = dz.real, dz.imag ddx, ddy = ddz.real, ddz.imag old_np_seterr = np.seterr(invalid='raise') try: kappa = abs(dx*ddy - dy*ddx)/sqrt(dx*dx + dy*dy)**3 except (ZeroDivisionError, FloatingPointError): # tangent vector is zero at t, use polytools to find limit p = self.poly() dp = p.deriv() ddp = dp.deriv() dx, dy = real(dp), imag(dp) ddx, ddy = real(ddp), imag(ddp) f2 = (dx*ddy - dy*ddx)**2 g2 = (dx*dx + dy*dy)**3 lim2 = rational_limit(f2, g2, t) if lim2 < 0: # impossible, must be numerical error return 0 kappa = sqrt(lim2) finally: np.seterr(**old_np_seterr) return kappa
[ "def", "segment_curvature", "(", "self", ",", "t", ",", "use_inf", "=", "False", ")", ":", "dz", "=", "self", ".", "derivative", "(", "t", ")", "ddz", "=", "self", ".", "derivative", "(", "t", ",", "n", "=", "2", ")", "dx", ",", "dy", "=", "dz", ".", "real", ",", "dz", ".", "imag", "ddx", ",", "ddy", "=", "ddz", ".", "real", ",", "ddz", ".", "imag", "old_np_seterr", "=", "np", ".", "seterr", "(", "invalid", "=", "'raise'", ")", "try", ":", "kappa", "=", "abs", "(", "dx", "*", "ddy", "-", "dy", "*", "ddx", ")", "/", "sqrt", "(", "dx", "*", "dx", "+", "dy", "*", "dy", ")", "**", "3", "except", "(", "ZeroDivisionError", ",", "FloatingPointError", ")", ":", "# tangent vector is zero at t, use polytools to find limit", "p", "=", "self", ".", "poly", "(", ")", "dp", "=", "p", ".", "deriv", "(", ")", "ddp", "=", "dp", ".", "deriv", "(", ")", "dx", ",", "dy", "=", "real", "(", "dp", ")", ",", "imag", "(", "dp", ")", "ddx", ",", "ddy", "=", "real", "(", "ddp", ")", ",", "imag", "(", "ddp", ")", "f2", "=", "(", "dx", "*", "ddy", "-", "dy", "*", "ddx", ")", "**", "2", "g2", "=", "(", "dx", "*", "dx", "+", "dy", "*", "dy", ")", "**", "3", "lim2", "=", "rational_limit", "(", "f2", ",", "g2", ",", "t", ")", "if", "lim2", "<", "0", ":", "# impossible, must be numerical error", "return", "0", "kappa", "=", "sqrt", "(", "lim2", ")", "finally", ":", "np", ".", "seterr", "(", "*", "*", "old_np_seterr", ")", "return", "kappa" ]
30.147059
0.000945
def MAFFT(sequences, gap_open=1.53, gap_extension=0.0, retree=2): '''A Coral wrapper for the MAFFT command line multiple sequence aligner. :param sequences: A list of sequences to align. :type sequences: List of homogeneous sequences (all DNA, or all RNA, etc.) :param gap_open: --op (gap open) penalty in MAFFT cli. :type gap_open: float :param gap_extension: --ep (gap extension) penalty in MAFFT cli. :type gap_extension: float :param retree: Number of times to build the guide tree. :type retree: int ''' arguments = ['mafft'] arguments += ['--op', str(gap_open)] arguments += ['--ep', str(gap_extension)] arguments += ['--retree', str(retree)] arguments.append('input.fasta') tempdir = tempfile.mkdtemp() try: with open(os.path.join(tempdir, 'input.fasta'), 'w') as f: for i, sequence in enumerate(sequences): if hasattr(sequence, 'name'): name = sequence.name else: name = 'sequence{}'.format(i) f.write('>{}\n'.format(name)) f.write(str(sequence) + '\n') process = subprocess.Popen(arguments, stdout=subprocess.PIPE, stderr=open(os.devnull, 'w'), cwd=tempdir) stdout = process.communicate()[0] finally: shutil.rmtree(tempdir) # Process stdout into something downstream process can use records = stdout.split('>') # First line is now blank records.pop(0) aligned_list = [] for record in records: lines = record.split('\n') name = lines.pop(0) aligned_list.append(coral.DNA(''.join(lines))) return aligned_list
[ "def", "MAFFT", "(", "sequences", ",", "gap_open", "=", "1.53", ",", "gap_extension", "=", "0.0", ",", "retree", "=", "2", ")", ":", "arguments", "=", "[", "'mafft'", "]", "arguments", "+=", "[", "'--op'", ",", "str", "(", "gap_open", ")", "]", "arguments", "+=", "[", "'--ep'", ",", "str", "(", "gap_extension", ")", "]", "arguments", "+=", "[", "'--retree'", ",", "str", "(", "retree", ")", "]", "arguments", ".", "append", "(", "'input.fasta'", ")", "tempdir", "=", "tempfile", ".", "mkdtemp", "(", ")", "try", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "tempdir", ",", "'input.fasta'", ")", ",", "'w'", ")", "as", "f", ":", "for", "i", ",", "sequence", "in", "enumerate", "(", "sequences", ")", ":", "if", "hasattr", "(", "sequence", ",", "'name'", ")", ":", "name", "=", "sequence", ".", "name", "else", ":", "name", "=", "'sequence{}'", ".", "format", "(", "i", ")", "f", ".", "write", "(", "'>{}\\n'", ".", "format", "(", "name", ")", ")", "f", ".", "write", "(", "str", "(", "sequence", ")", "+", "'\\n'", ")", "process", "=", "subprocess", ".", "Popen", "(", "arguments", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "open", "(", "os", ".", "devnull", ",", "'w'", ")", ",", "cwd", "=", "tempdir", ")", "stdout", "=", "process", ".", "communicate", "(", ")", "[", "0", "]", "finally", ":", "shutil", ".", "rmtree", "(", "tempdir", ")", "# Process stdout into something downstream process can use", "records", "=", "stdout", ".", "split", "(", "'>'", ")", "# First line is now blank", "records", ".", "pop", "(", "0", ")", "aligned_list", "=", "[", "]", "for", "record", "in", "records", ":", "lines", "=", "record", ".", "split", "(", "'\\n'", ")", "name", "=", "lines", ".", "pop", "(", "0", ")", "aligned_list", ".", "append", "(", "coral", ".", "DNA", "(", "''", ".", "join", "(", "lines", ")", ")", ")", "return", "aligned_list" ]
36.212766
0.000572
def cmpToDataStore_uri(base, ds1, ds2): '''Bases the comparison of the datastores on URI alone.''' ret = difflib.get_close_matches(base.uri, [ds1.uri, ds2.uri], 1, cutoff=0.5) if len(ret) <= 0: return 0 if ret[0] == ds1.uri: return -1 return 1
[ "def", "cmpToDataStore_uri", "(", "base", ",", "ds1", ",", "ds2", ")", ":", "ret", "=", "difflib", ".", "get_close_matches", "(", "base", ".", "uri", ",", "[", "ds1", ".", "uri", ",", "ds2", ".", "uri", "]", ",", "1", ",", "cutoff", "=", "0.5", ")", "if", "len", "(", "ret", ")", "<=", "0", ":", "return", "0", "if", "ret", "[", "0", "]", "==", "ds1", ".", "uri", ":", "return", "-", "1", "return", "1" ]
31.75
0.022989
def get_pydoc_completions(modulename): """Get possible completions for modulename for pydoc. Returns a list of possible values to be passed to pydoc. """ modulename = compat.ensure_not_unicode(modulename) modulename = modulename.rstrip(".") if modulename == "": return sorted(get_modules()) candidates = get_completions(modulename) if candidates: return sorted(candidates) needle = modulename if "." in needle: modulename, part = needle.rsplit(".", 1) candidates = get_completions(modulename) else: candidates = get_modules() return sorted(candidate for candidate in candidates if candidate.startswith(needle))
[ "def", "get_pydoc_completions", "(", "modulename", ")", ":", "modulename", "=", "compat", ".", "ensure_not_unicode", "(", "modulename", ")", "modulename", "=", "modulename", ".", "rstrip", "(", "\".\"", ")", "if", "modulename", "==", "\"\"", ":", "return", "sorted", "(", "get_modules", "(", ")", ")", "candidates", "=", "get_completions", "(", "modulename", ")", "if", "candidates", ":", "return", "sorted", "(", "candidates", ")", "needle", "=", "modulename", "if", "\".\"", "in", "needle", ":", "modulename", ",", "part", "=", "needle", ".", "rsplit", "(", "\".\"", ",", "1", ")", "candidates", "=", "get_completions", "(", "modulename", ")", "else", ":", "candidates", "=", "get_modules", "(", ")", "return", "sorted", "(", "candidate", "for", "candidate", "in", "candidates", "if", "candidate", ".", "startswith", "(", "needle", ")", ")" ]
33.238095
0.001393
def get_condition(self, service_id, version_number, name): """Gets a specified condition.""" content = self._fetch("/service/%s/version/%d/condition/%s" % (service_id, version_number, name)) return FastlyCondition(self, content)
[ "def", "get_condition", "(", "self", ",", "service_id", ",", "version_number", ",", "name", ")", ":", "content", "=", "self", ".", "_fetch", "(", "\"/service/%s/version/%d/condition/%s\"", "%", "(", "service_id", ",", "version_number", ",", "name", ")", ")", "return", "FastlyCondition", "(", "self", ",", "content", ")" ]
57.75
0.025641
def Rank(a): """ Rank op. """ return np.array([len(a.shape)], dtype=np.int32),
[ "def", "Rank", "(", "a", ")", ":", "return", "np", ".", "array", "(", "[", "len", "(", "a", ".", "shape", ")", "]", ",", "dtype", "=", "np", ".", "int32", ")", "," ]
18
0.010638
def process_connection(connection, my_socket): """Handle I/O and Timers on a single Connection.""" if connection.closed: return False work = False readfd = [] writefd = [] if connection.needs_input > 0: readfd = [my_socket] work = True if connection.has_output > 0: writefd = [my_socket] work = True timeout = None deadline = connection.next_tick if deadline: work = True now = time.time() timeout = 0 if deadline <= now else deadline - now if not work: return False readable, writable, ignore = select.select(readfd, writefd, [], timeout) if readable: try: pyngus.read_socket_input(connection, my_socket) except Exception as e: # treat any socket error as LOG.error("Socket error on read: %s", str(e)) connection.close_input() # make an attempt to cleanly close connection.close() connection.process(time.time()) if writable: try: pyngus.write_socket_output(connection, my_socket) except Exception as e: LOG.error("Socket error on write %s", str(e)) connection.close_output() # this may not help, but it won't hurt: connection.close() return True
[ "def", "process_connection", "(", "connection", ",", "my_socket", ")", ":", "if", "connection", ".", "closed", ":", "return", "False", "work", "=", "False", "readfd", "=", "[", "]", "writefd", "=", "[", "]", "if", "connection", ".", "needs_input", ">", "0", ":", "readfd", "=", "[", "my_socket", "]", "work", "=", "True", "if", "connection", ".", "has_output", ">", "0", ":", "writefd", "=", "[", "my_socket", "]", "work", "=", "True", "timeout", "=", "None", "deadline", "=", "connection", ".", "next_tick", "if", "deadline", ":", "work", "=", "True", "now", "=", "time", ".", "time", "(", ")", "timeout", "=", "0", "if", "deadline", "<=", "now", "else", "deadline", "-", "now", "if", "not", "work", ":", "return", "False", "readable", ",", "writable", ",", "ignore", "=", "select", ".", "select", "(", "readfd", ",", "writefd", ",", "[", "]", ",", "timeout", ")", "if", "readable", ":", "try", ":", "pyngus", ".", "read_socket_input", "(", "connection", ",", "my_socket", ")", "except", "Exception", "as", "e", ":", "# treat any socket error as", "LOG", ".", "error", "(", "\"Socket error on read: %s\"", ",", "str", "(", "e", ")", ")", "connection", ".", "close_input", "(", ")", "# make an attempt to cleanly close", "connection", ".", "close", "(", ")", "connection", ".", "process", "(", "time", ".", "time", "(", ")", ")", "if", "writable", ":", "try", ":", "pyngus", ".", "write_socket_output", "(", "connection", ",", "my_socket", ")", "except", "Exception", "as", "e", ":", "LOG", ".", "error", "(", "\"Socket error on write %s\"", ",", "str", "(", "e", ")", ")", "connection", ".", "close_output", "(", ")", "# this may not help, but it won't hurt:", "connection", ".", "close", "(", ")", "return", "True" ]
29.489796
0.00067
def take(list_, index_list): """ Selects a subset of a list based on a list of indices. This is similar to np.take, but pure python. Args: list_ (list): some indexable object index_list (list, slice, int): some indexing object Returns: list or scalar: subset of the list CommandLine: python -m utool.util_list --test-take SeeAlso: ut.dict_take ut.dict_subset ut.none_take ut.compress Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> list_ = [0, 1, 2, 3] >>> index_list = [2, 0] >>> result = take(list_, index_list) >>> print(result) [2, 0] Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> list_ = [0, 1, 2, 3] >>> index = 2 >>> result = take(list_, index) >>> print(result) 2 Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> list_ = [0, 1, 2, 3] >>> index = slice(1, None, 2) >>> result = take(list_, index) >>> print(result) [1, 3] """ try: return [list_[index] for index in index_list] except TypeError: return list_[index_list]
[ "def", "take", "(", "list_", ",", "index_list", ")", ":", "try", ":", "return", "[", "list_", "[", "index", "]", "for", "index", "in", "index_list", "]", "except", "TypeError", ":", "return", "list_", "[", "index_list", "]" ]
24.365385
0.000759
def download_as_file(fn, data=None): """ Download given `data` as file `fn`. This service exists to allow frontend present user with downloadable files. """ if data is None: raise HTTPError(500, "This service require POST `data` parameter.") response.set_header("Content-Type", "application/octet-stream") response.set_header( "Content-Disposition", 'attachment; filename="%s"' % fn ) return StringIO(data)
[ "def", "download_as_file", "(", "fn", ",", "data", "=", "None", ")", ":", "if", "data", "is", "None", ":", "raise", "HTTPError", "(", "500", ",", "\"This service require POST `data` parameter.\"", ")", "response", ".", "set_header", "(", "\"Content-Type\"", ",", "\"application/octet-stream\"", ")", "response", ".", "set_header", "(", "\"Content-Disposition\"", ",", "'attachment; filename=\"%s\"'", "%", "fn", ")", "return", "StringIO", "(", "data", ")" ]
30.266667
0.002137
def _EncodeUnknownFields(message): """Remap unknown fields in message out of message.source.""" source = _UNRECOGNIZED_FIELD_MAPPINGS.get(type(message)) if source is None: return message # CopyProtoMessage uses _ProtoJsonApiTools, which uses this message. Use # the vanilla protojson-based copy function to avoid infinite recursion. result = _CopyProtoMessageVanillaProtoJson(message) pairs_field = message.field_by_name(source) if not isinstance(pairs_field, messages.MessageField): raise exceptions.InvalidUserInputError( 'Invalid pairs field %s' % pairs_field) pairs_type = pairs_field.message_type value_field = pairs_type.field_by_name('value') value_variant = value_field.variant pairs = getattr(message, source) codec = _ProtoJsonApiTools.Get() for pair in pairs: encoded_value = codec.encode_field(value_field, pair.value) result.set_unrecognized_field(pair.key, encoded_value, value_variant) setattr(result, source, []) return result
[ "def", "_EncodeUnknownFields", "(", "message", ")", ":", "source", "=", "_UNRECOGNIZED_FIELD_MAPPINGS", ".", "get", "(", "type", "(", "message", ")", ")", "if", "source", "is", "None", ":", "return", "message", "# CopyProtoMessage uses _ProtoJsonApiTools, which uses this message. Use", "# the vanilla protojson-based copy function to avoid infinite recursion.", "result", "=", "_CopyProtoMessageVanillaProtoJson", "(", "message", ")", "pairs_field", "=", "message", ".", "field_by_name", "(", "source", ")", "if", "not", "isinstance", "(", "pairs_field", ",", "messages", ".", "MessageField", ")", ":", "raise", "exceptions", ".", "InvalidUserInputError", "(", "'Invalid pairs field %s'", "%", "pairs_field", ")", "pairs_type", "=", "pairs_field", ".", "message_type", "value_field", "=", "pairs_type", ".", "field_by_name", "(", "'value'", ")", "value_variant", "=", "value_field", ".", "variant", "pairs", "=", "getattr", "(", "message", ",", "source", ")", "codec", "=", "_ProtoJsonApiTools", ".", "Get", "(", ")", "for", "pair", "in", "pairs", ":", "encoded_value", "=", "codec", ".", "encode_field", "(", "value_field", ",", "pair", ".", "value", ")", "result", ".", "set_unrecognized_field", "(", "pair", ".", "key", ",", "encoded_value", ",", "value_variant", ")", "setattr", "(", "result", ",", "source", ",", "[", "]", ")", "return", "result" ]
46.772727
0.000952
def process_from_file(signor_data_file, signor_complexes_file=None): """Process Signor interaction data from CSV files. Parameters ---------- signor_data_file : str Path to the Signor interaction data file in CSV format. signor_complexes_file : str Path to the Signor complexes data in CSV format. If unspecified, Signor complexes will not be expanded to their constitutents. Returns ------- indra.sources.signor.SignorProcessor SignorProcessor containing Statements extracted from the Signor data. """ # Get generator over the CSV file data_iter = read_unicode_csv(signor_data_file, delimiter=';', skiprows=1) complexes_iter = None if signor_complexes_file: complexes_iter = read_unicode_csv(signor_complexes_file, delimiter=';', skiprows=1) else: logger.warning('Signor complex mapping file not provided, Statements ' 'involving complexes will not be expanded to members.') return _processor_from_data(data_iter, complexes_iter)
[ "def", "process_from_file", "(", "signor_data_file", ",", "signor_complexes_file", "=", "None", ")", ":", "# Get generator over the CSV file", "data_iter", "=", "read_unicode_csv", "(", "signor_data_file", ",", "delimiter", "=", "';'", ",", "skiprows", "=", "1", ")", "complexes_iter", "=", "None", "if", "signor_complexes_file", ":", "complexes_iter", "=", "read_unicode_csv", "(", "signor_complexes_file", ",", "delimiter", "=", "';'", ",", "skiprows", "=", "1", ")", "else", ":", "logger", ".", "warning", "(", "'Signor complex mapping file not provided, Statements '", "'involving complexes will not be expanded to members.'", ")", "return", "_processor_from_data", "(", "data_iter", ",", "complexes_iter", ")" ]
41.538462
0.000905
def lowstate_file_refs(chunks, extras=''): ''' Create a list of file ref objects to reconcile ''' refs = {} for chunk in chunks: if not isinstance(chunk, dict): continue saltenv = 'base' crefs = [] for state in chunk: if state == '__env__': saltenv = chunk[state] elif state.startswith('__'): continue crefs.extend(salt_refs(chunk[state])) if saltenv not in refs: refs[saltenv] = [] if crefs: refs[saltenv].append(crefs) if extras: extra_refs = extras.split(',') if extra_refs: for env in refs: for x in extra_refs: refs[env].append([x]) return refs
[ "def", "lowstate_file_refs", "(", "chunks", ",", "extras", "=", "''", ")", ":", "refs", "=", "{", "}", "for", "chunk", "in", "chunks", ":", "if", "not", "isinstance", "(", "chunk", ",", "dict", ")", ":", "continue", "saltenv", "=", "'base'", "crefs", "=", "[", "]", "for", "state", "in", "chunk", ":", "if", "state", "==", "'__env__'", ":", "saltenv", "=", "chunk", "[", "state", "]", "elif", "state", ".", "startswith", "(", "'__'", ")", ":", "continue", "crefs", ".", "extend", "(", "salt_refs", "(", "chunk", "[", "state", "]", ")", ")", "if", "saltenv", "not", "in", "refs", ":", "refs", "[", "saltenv", "]", "=", "[", "]", "if", "crefs", ":", "refs", "[", "saltenv", "]", ".", "append", "(", "crefs", ")", "if", "extras", ":", "extra_refs", "=", "extras", ".", "split", "(", "','", ")", "if", "extra_refs", ":", "for", "env", "in", "refs", ":", "for", "x", "in", "extra_refs", ":", "refs", "[", "env", "]", ".", "append", "(", "[", "x", "]", ")", "return", "refs" ]
28.407407
0.001261
def ADDMOD(self, a, b, c): """Modulo addition operation""" try: result = Operators.ITEBV(256, c == 0, 0, (a + b) % c) except ZeroDivisionError: result = 0 return result
[ "def", "ADDMOD", "(", "self", ",", "a", ",", "b", ",", "c", ")", ":", "try", ":", "result", "=", "Operators", ".", "ITEBV", "(", "256", ",", "c", "==", "0", ",", "0", ",", "(", "a", "+", "b", ")", "%", "c", ")", "except", "ZeroDivisionError", ":", "result", "=", "0", "return", "result" ]
31.142857
0.008929
def _process_dataset(name, directory, num_shards, labels_file): """Process a complete data set and save it as a TFRecord. Args: name: string, unique identifier specifying the data set. directory: string, root path to the data set. num_shards: integer number of shards for this data set. labels_file: string, path to the labels file. """ filenames, texts, labels = _find_image_files(directory, labels_file) _process_image_files(name, filenames, texts, labels, num_shards)
[ "def", "_process_dataset", "(", "name", ",", "directory", ",", "num_shards", ",", "labels_file", ")", ":", "filenames", ",", "texts", ",", "labels", "=", "_find_image_files", "(", "directory", ",", "labels_file", ")", "_process_image_files", "(", "name", ",", "filenames", ",", "texts", ",", "labels", ",", "num_shards", ")" ]
44.272727
0.008048
def add_provide(self, provide): """ Add a provide object if it does not already exist """ for p in self.provides: if p.value == provide.value: return self.provides.append(provide)
[ "def", "add_provide", "(", "self", ",", "provide", ")", ":", "for", "p", "in", "self", ".", "provides", ":", "if", "p", ".", "value", "==", "provide", ".", "value", ":", "return", "self", ".", "provides", ".", "append", "(", "provide", ")" ]
37.666667
0.008658
def _parseHeader (self, line, lineno, log): """Parses a sequence header line containing 'name: value' pairs.""" if line.startswith('#') and line.find(':') > 0: tokens = [ t.strip().lower() for t in line[1:].split(":", 1) ] name = tokens[0] pos = SeqPos(line, lineno) if name in self.header: msg = 'Ignoring duplicate header parameter: %s' log.warning(msg % name, pos) else: for expected in ['seqid', 'version']: if name == expected: value = util.toNumber(tokens[1], None) if value is None: msg = 'Parameter "%s" value "%s" is not a number.' log.error(msg % (name, tokens[1]), poss) else: self.header[name] = value
[ "def", "_parseHeader", "(", "self", ",", "line", ",", "lineno", ",", "log", ")", ":", "if", "line", ".", "startswith", "(", "'#'", ")", "and", "line", ".", "find", "(", "':'", ")", ">", "0", ":", "tokens", "=", "[", "t", ".", "strip", "(", ")", ".", "lower", "(", ")", "for", "t", "in", "line", "[", "1", ":", "]", ".", "split", "(", "\":\"", ",", "1", ")", "]", "name", "=", "tokens", "[", "0", "]", "pos", "=", "SeqPos", "(", "line", ",", "lineno", ")", "if", "name", "in", "self", ".", "header", ":", "msg", "=", "'Ignoring duplicate header parameter: %s'", "log", ".", "warning", "(", "msg", "%", "name", ",", "pos", ")", "else", ":", "for", "expected", "in", "[", "'seqid'", ",", "'version'", "]", ":", "if", "name", "==", "expected", ":", "value", "=", "util", ".", "toNumber", "(", "tokens", "[", "1", "]", ",", "None", ")", "if", "value", "is", "None", ":", "msg", "=", "'Parameter \"%s\" value \"%s\" is not a number.'", "log", ".", "error", "(", "msg", "%", "(", "name", ",", "tokens", "[", "1", "]", ")", ",", "poss", ")", "else", ":", "self", ".", "header", "[", "name", "]", "=", "value" ]
39.526316
0.019506
def get_grp2codes(self): """Get dict of group name to namedtuples.""" grp2codes = cx.defaultdict(set) for code, ntd in self.code2nt.items(): grp2codes[ntd.group].add(code) return dict(grp2codes)
[ "def", "get_grp2codes", "(", "self", ")", ":", "grp2codes", "=", "cx", ".", "defaultdict", "(", "set", ")", "for", "code", ",", "ntd", "in", "self", ".", "code2nt", ".", "items", "(", ")", ":", "grp2codes", "[", "ntd", ".", "group", "]", ".", "add", "(", "code", ")", "return", "dict", "(", "grp2codes", ")" ]
38.833333
0.008403
def add_bollinger_bands(self,periods=20,boll_std=2,fill=True,column=None,name='', str='{name}({column},{period})',**kwargs): """ Add Bollinger Bands (BOLL) study to QuantFigure.studies Parameters: periods : int or list(int) Number of periods boll_std : int Number of standard deviations for the bollinger upper and lower bands fill : boolean If True, then the innner area of the bands will filled column :string Defines the data column name that contains the data over which the study will be applied. Default: 'close' name : string Name given to the study str : string Label factory for studies The following wildcards can be used: {name} : Name of the column {study} : Name of the study {period} : Period used Examples: 'study: {study} - period: {period}' kwargs: legendgroup : bool If true, all legend items are grouped into a single one fillcolor : string Color to be used for the fill color. Example: 'rgba(62, 111, 176, .4)' All formatting values available on iplot() """ if not column: column=self._d['close'] study={'kind':'boll', 'name':name, 'params':{'periods':periods,'boll_std':boll_std,'column':column, 'str':str}, 'display':utils.merge_dict({'legendgroup':True,'fill':fill},kwargs)} self._add_study(study)
[ "def", "add_bollinger_bands", "(", "self", ",", "periods", "=", "20", ",", "boll_std", "=", "2", ",", "fill", "=", "True", ",", "column", "=", "None", ",", "name", "=", "''", ",", "str", "=", "'{name}({column},{period})'", ",", "*", "*", "kwargs", ")", ":", "if", "not", "column", ":", "column", "=", "self", ".", "_d", "[", "'close'", "]", "study", "=", "{", "'kind'", ":", "'boll'", ",", "'name'", ":", "name", ",", "'params'", ":", "{", "'periods'", ":", "periods", ",", "'boll_std'", ":", "boll_std", ",", "'column'", ":", "column", ",", "'str'", ":", "str", "}", ",", "'display'", ":", "utils", ".", "merge_dict", "(", "{", "'legendgroup'", ":", "True", ",", "'fill'", ":", "fill", "}", ",", "kwargs", ")", "}", "self", ".", "_add_study", "(", "study", ")" ]
29.304348
0.059584
def from_analysis_period(cls, analysis_period, tau_b, tau_d, daylight_savings_indicator='No'): """"Initialize a RevisedClearSkyCondition from an analysis_period""" _check_analysis_period(analysis_period) return cls(analysis_period.st_month, analysis_period.st_day, tau_b, tau_d, daylight_savings_indicator)
[ "def", "from_analysis_period", "(", "cls", ",", "analysis_period", ",", "tau_b", ",", "tau_d", ",", "daylight_savings_indicator", "=", "'No'", ")", ":", "_check_analysis_period", "(", "analysis_period", ")", "return", "cls", "(", "analysis_period", ".", "st_month", ",", "analysis_period", ".", "st_day", ",", "tau_b", ",", "tau_d", ",", "daylight_savings_indicator", ")" ]
62.166667
0.010582
def init_defaults(self): """Initializes important headers to default values, if not already specified. The WARC-Record-ID header is set to a newly generated UUID. The WARC-Date header is set to the current datetime. The Content-Type is set based on the WARC-Type header. The Content-Length is initialized to 0. """ if "WARC-Record-ID" not in self: self['WARC-Record-ID'] = "<urn:uuid:%s>" % uuid.uuid1() if "WARC-Date" not in self: self['WARC-Date'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ') if "Content-Type" not in self: self['Content-Type'] = WARCHeader.CONTENT_TYPES.get(self.type, "application/octet-stream")
[ "def", "init_defaults", "(", "self", ")", ":", "if", "\"WARC-Record-ID\"", "not", "in", "self", ":", "self", "[", "'WARC-Record-ID'", "]", "=", "\"<urn:uuid:%s>\"", "%", "uuid", ".", "uuid1", "(", ")", "if", "\"WARC-Date\"", "not", "in", "self", ":", "self", "[", "'WARC-Date'", "]", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%SZ'", ")", "if", "\"Content-Type\"", "not", "in", "self", ":", "self", "[", "'Content-Type'", "]", "=", "WARCHeader", ".", "CONTENT_TYPES", ".", "get", "(", "self", ".", "type", ",", "\"application/octet-stream\"", ")" ]
52.5
0.008021
def branches(self): """ #TODO: description """ for branch in self._grid.graph_edges(): if branch['branch'].ring == self: yield branch
[ "def", "branches", "(", "self", ")", ":", "for", "branch", "in", "self", ".", "_grid", ".", "graph_edges", "(", ")", ":", "if", "branch", "[", "'branch'", "]", ".", "ring", "==", "self", ":", "yield", "branch" ]
30
0.010811
def build_job(name=None, parameters=None): ''' Initiate a build for the provided job. :param name: The name of the job is check if it exists. :param parameters: Parameters to send to the job. :return: True is successful, otherwise raise an exception. CLI Example: .. code-block:: bash salt '*' jenkins.build_job jobname ''' if not name: raise SaltInvocationError('Required parameter \'name\' is missing') server = _connect() if not job_exists(name): raise CommandExecutionError('Job \'{0}\' does not exist.'.format(name)) try: server.build_job(name, parameters) except jenkins.JenkinsException as err: raise CommandExecutionError( 'Encountered error building job \'{0}\': {1}'.format(name, err) ) return True
[ "def", "build_job", "(", "name", "=", "None", ",", "parameters", "=", "None", ")", ":", "if", "not", "name", ":", "raise", "SaltInvocationError", "(", "'Required parameter \\'name\\' is missing'", ")", "server", "=", "_connect", "(", ")", "if", "not", "job_exists", "(", "name", ")", ":", "raise", "CommandExecutionError", "(", "'Job \\'{0}\\' does not exist.'", ".", "format", "(", "name", ")", ")", "try", ":", "server", ".", "build_job", "(", "name", ",", "parameters", ")", "except", "jenkins", ".", "JenkinsException", "as", "err", ":", "raise", "CommandExecutionError", "(", "'Encountered error building job \\'{0}\\': {1}'", ".", "format", "(", "name", ",", "err", ")", ")", "return", "True" ]
26.766667
0.001202
def sca_intensity(scatterer, h_pol=True): """Scattering intensity (phase function) for the current setup. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The differential scattering cross section. """ Z = scatterer.get_Z() return (Z[0,0] - Z[0,1]) if h_pol else (Z[0,0] + Z[0,1])
[ "def", "sca_intensity", "(", "scatterer", ",", "h_pol", "=", "True", ")", ":", "Z", "=", "scatterer", ".", "get_Z", "(", ")", "return", "(", "Z", "[", "0", ",", "0", "]", "-", "Z", "[", "0", ",", "1", "]", ")", "if", "h_pol", "else", "(", "Z", "[", "0", ",", "0", "]", "+", "Z", "[", "0", ",", "1", "]", ")" ]
32.384615
0.013857
def _instantiate_target(self, target_adaptor): """Given a TargetAdaptor struct previously parsed from a BUILD file, instantiate a Target.""" target_cls = self._target_types[target_adaptor.type_alias] try: # Pop dependencies, which were already consumed during construction. kwargs = target_adaptor.kwargs() kwargs.pop('dependencies') # Instantiate. if issubclass(target_cls, AppBase): return self._instantiate_app(target_cls, kwargs) elif target_cls is RemoteSources: return self._instantiate_remote_sources(kwargs) return target_cls(build_graph=self, **kwargs) except TargetDefinitionException: raise except Exception as e: raise TargetDefinitionException( target_adaptor.address, 'Failed to instantiate Target with type {}: {}'.format(target_cls, e))
[ "def", "_instantiate_target", "(", "self", ",", "target_adaptor", ")", ":", "target_cls", "=", "self", ".", "_target_types", "[", "target_adaptor", ".", "type_alias", "]", "try", ":", "# Pop dependencies, which were already consumed during construction.", "kwargs", "=", "target_adaptor", ".", "kwargs", "(", ")", "kwargs", ".", "pop", "(", "'dependencies'", ")", "# Instantiate.", "if", "issubclass", "(", "target_cls", ",", "AppBase", ")", ":", "return", "self", ".", "_instantiate_app", "(", "target_cls", ",", "kwargs", ")", "elif", "target_cls", "is", "RemoteSources", ":", "return", "self", ".", "_instantiate_remote_sources", "(", "kwargs", ")", "return", "target_cls", "(", "build_graph", "=", "self", ",", "*", "*", "kwargs", ")", "except", "TargetDefinitionException", ":", "raise", "except", "Exception", "as", "e", ":", "raise", "TargetDefinitionException", "(", "target_adaptor", ".", "address", ",", "'Failed to instantiate Target with type {}: {}'", ".", "format", "(", "target_cls", ",", "e", ")", ")" ]
42.2
0.013905
def nnz_obs_names(self): """ wrapper around pyemu.Pst.nnz_obs_names for listing non-zero observation names Returns ------- nnz_obs_names : list pyemu.Pst.nnz_obs_names """ if self.__pst is not None: return self.pst.nnz_obs_names else: return self.jco.obs_names
[ "def", "nnz_obs_names", "(", "self", ")", ":", "if", "self", ".", "__pst", "is", "not", "None", ":", "return", "self", ".", "pst", ".", "nnz_obs_names", "else", ":", "return", "self", ".", "jco", ".", "obs_names" ]
25.5
0.008108
def _pre_request(self, url, method = u"get", data = None, headers=None, **kwargs): """ hook for manipulating the _pre request data """ return (url, method, data, headers, kwargs)
[ "def", "_pre_request", "(", "self", ",", "url", ",", "method", "=", "u\"get\"", ",", "data", "=", "None", ",", "headers", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "(", "url", ",", "method", ",", "data", ",", "headers", ",", "kwargs", ")" ]
41.2
0.033333
def _gpdfit(x): """Estimate the parameters for the Generalized Pareto Distribution (GPD). Empirical Bayes estimate for the parameters of the generalized Pareto distribution given the data. Parameters ---------- x : array sorted 1D data array Returns ------- k : float estimated shape parameter sigma : float estimated scale parameter """ prior_bs = 3 prior_k = 10 len_x = len(x) m_est = 30 + int(len_x ** 0.5) b_ary = 1 - np.sqrt(m_est / (np.arange(1, m_est + 1, dtype=float) - 0.5)) b_ary /= prior_bs * x[int(len_x / 4 + 0.5) - 1] b_ary += 1 / x[-1] k_ary = np.log1p(-b_ary[:, None] * x).mean(axis=1) # pylint: disable=no-member len_scale = len_x * (np.log(-(b_ary / k_ary)) - k_ary - 1) weights = 1 / np.exp(len_scale - len_scale[:, None]).sum(axis=1) # remove negligible weights real_idxs = weights >= 10 * np.finfo(float).eps if not np.all(real_idxs): weights = weights[real_idxs] b_ary = b_ary[real_idxs] # normalise weights weights /= weights.sum() # posterior mean for b b_post = np.sum(b_ary * weights) # estimate for k k_post = np.log1p(-b_post * x).mean() # pylint: disable=invalid-unary-operand-type,no-member # add prior for k_post k_post = (len_x * k_post + prior_k * 0.5) / (len_x + prior_k) sigma = -k_post / b_post return k_post, sigma
[ "def", "_gpdfit", "(", "x", ")", ":", "prior_bs", "=", "3", "prior_k", "=", "10", "len_x", "=", "len", "(", "x", ")", "m_est", "=", "30", "+", "int", "(", "len_x", "**", "0.5", ")", "b_ary", "=", "1", "-", "np", ".", "sqrt", "(", "m_est", "/", "(", "np", ".", "arange", "(", "1", ",", "m_est", "+", "1", ",", "dtype", "=", "float", ")", "-", "0.5", ")", ")", "b_ary", "/=", "prior_bs", "*", "x", "[", "int", "(", "len_x", "/", "4", "+", "0.5", ")", "-", "1", "]", "b_ary", "+=", "1", "/", "x", "[", "-", "1", "]", "k_ary", "=", "np", ".", "log1p", "(", "-", "b_ary", "[", ":", ",", "None", "]", "*", "x", ")", ".", "mean", "(", "axis", "=", "1", ")", "# pylint: disable=no-member", "len_scale", "=", "len_x", "*", "(", "np", ".", "log", "(", "-", "(", "b_ary", "/", "k_ary", ")", ")", "-", "k_ary", "-", "1", ")", "weights", "=", "1", "/", "np", ".", "exp", "(", "len_scale", "-", "len_scale", "[", ":", ",", "None", "]", ")", ".", "sum", "(", "axis", "=", "1", ")", "# remove negligible weights", "real_idxs", "=", "weights", ">=", "10", "*", "np", ".", "finfo", "(", "float", ")", ".", "eps", "if", "not", "np", ".", "all", "(", "real_idxs", ")", ":", "weights", "=", "weights", "[", "real_idxs", "]", "b_ary", "=", "b_ary", "[", "real_idxs", "]", "# normalise weights", "weights", "/=", "weights", ".", "sum", "(", ")", "# posterior mean for b", "b_post", "=", "np", ".", "sum", "(", "b_ary", "*", "weights", ")", "# estimate for k", "k_post", "=", "np", ".", "log1p", "(", "-", "b_post", "*", "x", ")", ".", "mean", "(", ")", "# pylint: disable=invalid-unary-operand-type,no-member", "# add prior for k_post", "k_post", "=", "(", "len_x", "*", "k_post", "+", "prior_k", "*", "0.5", ")", "/", "(", "len_x", "+", "prior_k", ")", "sigma", "=", "-", "k_post", "/", "b_post", "return", "k_post", ",", "sigma" ]
28.958333
0.002088
def get_conn(self): """Returns a Google Cloud Dataproc service object.""" http_authorized = self._authorize() return build( 'dataproc', self.api_version, http=http_authorized, cache_discovery=False)
[ "def", "get_conn", "(", "self", ")", ":", "http_authorized", "=", "self", ".", "_authorize", "(", ")", "return", "build", "(", "'dataproc'", ",", "self", ".", "api_version", ",", "http", "=", "http_authorized", ",", "cache_discovery", "=", "False", ")" ]
40.166667
0.00813
def create_logger(name, formatter=None, handler=None, level=None): """ Returns a new logger for the specified name. """ logger = logging.getLogger(name) #: remove existing handlers logger.handlers = [] #: use a standard out handler if handler is None: handler = logging.StreamHandler(sys.stdout) #: set the formatter when a formatter is given if formatter is not None: handler.setFormatter(formatter) #: set DEBUG level if no level is specified if level is None: level = logging.DEBUG handler.setLevel(level) logger.setLevel(level) logger.addHandler(handler) return logger
[ "def", "create_logger", "(", "name", ",", "formatter", "=", "None", ",", "handler", "=", "None", ",", "level", "=", "None", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "name", ")", "#: remove existing handlers", "logger", ".", "handlers", "=", "[", "]", "#: use a standard out handler", "if", "handler", "is", "None", ":", "handler", "=", "logging", ".", "StreamHandler", "(", "sys", ".", "stdout", ")", "#: set the formatter when a formatter is given", "if", "formatter", "is", "not", "None", ":", "handler", ".", "setFormatter", "(", "formatter", ")", "#: set DEBUG level if no level is specified", "if", "level", "is", "None", ":", "level", "=", "logging", ".", "DEBUG", "handler", ".", "setLevel", "(", "level", ")", "logger", ".", "setLevel", "(", "level", ")", "logger", ".", "addHandler", "(", "handler", ")", "return", "logger" ]
25.64
0.001504
def get_row_by_fsid(self, fs_id): '''确认在Liststore中是否存在这条任务. 如果存在, 返回TreeModelRow, 否则就返回None''' for row in self.liststore: if row[FSID_COL] == fs_id: return row return None
[ "def", "get_row_by_fsid", "(", "self", ",", "fs_id", ")", ":", "for", "row", "in", "self", ".", "liststore", ":", "if", "row", "[", "FSID_COL", "]", "==", "fs_id", ":", "return", "row", "return", "None" ]
32.142857
0.008658
def register_preprocessed_file(self, infile, pmid, extra_annotations): """Set up already preprocessed text file for reading with ISI reader. This is essentially a mock function to "register" already preprocessed files and get an IsiPreprocessor object that can be passed to the IsiProcessor. Parameters ---------- infile : str Path to an already preprocessed text file (i.e. one ready to be sent for reading to ISI reader). pmid : str The PMID corresponding to the file extra_annotations : dict Extra annotations to be added to each statement, possibly including metadata about the source (annotations with the key "interaction" will be overridden) """ infile_base = os.path.basename(infile) outfile = os.path.join(self.preprocessed_dir, infile_base) shutil.copyfile(infile, outfile) infile_key = os.path.splitext(infile_base)[0] self.pmids[infile_key] = pmid self.extra_annotations[infile_key] = extra_annotations
[ "def", "register_preprocessed_file", "(", "self", ",", "infile", ",", "pmid", ",", "extra_annotations", ")", ":", "infile_base", "=", "os", ".", "path", ".", "basename", "(", "infile", ")", "outfile", "=", "os", ".", "path", ".", "join", "(", "self", ".", "preprocessed_dir", ",", "infile_base", ")", "shutil", ".", "copyfile", "(", "infile", ",", "outfile", ")", "infile_key", "=", "os", ".", "path", ".", "splitext", "(", "infile_base", ")", "[", "0", "]", "self", ".", "pmids", "[", "infile_key", "]", "=", "pmid", "self", ".", "extra_annotations", "[", "infile_key", "]", "=", "extra_annotations" ]
40.444444
0.001789
def serialize_to_xml(root, block): """ Serialize the Peer Instruction XBlock's content to XML. Args: block (PeerInstructionXBlock): The peer instruction block to serialize. root (etree.Element): The XML root node to update. Returns: etree.Element """ root.tag = 'ubcpi' if block.rationale_size is not None: if block.rationale_size.get('min'): root.set('rationale_size_min', unicode(block.rationale_size.get('min'))) if block.rationale_size.get('max'): root.set('rationale_size_max', unicode(block.rationale_size['max'])) if block.algo: if block.algo.get('name'): root.set('algorithm', block.algo.get('name')) if block.algo.get('num_responses'): root.set('num_responses', unicode(block.algo.get('num_responses'))) display_name = etree.SubElement(root, 'display_name') display_name.text = block.display_name question = etree.SubElement(root, 'question') question_text = etree.SubElement(question, 'text') question_text.text = block.question_text['text'] serialize_image(block.question_text, question) options = etree.SubElement(root, 'options') serialize_options(options, block) seeds = etree.SubElement(root, 'seeds') serialize_seeds(seeds, block)
[ "def", "serialize_to_xml", "(", "root", ",", "block", ")", ":", "root", ".", "tag", "=", "'ubcpi'", "if", "block", ".", "rationale_size", "is", "not", "None", ":", "if", "block", ".", "rationale_size", ".", "get", "(", "'min'", ")", ":", "root", ".", "set", "(", "'rationale_size_min'", ",", "unicode", "(", "block", ".", "rationale_size", ".", "get", "(", "'min'", ")", ")", ")", "if", "block", ".", "rationale_size", ".", "get", "(", "'max'", ")", ":", "root", ".", "set", "(", "'rationale_size_max'", ",", "unicode", "(", "block", ".", "rationale_size", "[", "'max'", "]", ")", ")", "if", "block", ".", "algo", ":", "if", "block", ".", "algo", ".", "get", "(", "'name'", ")", ":", "root", ".", "set", "(", "'algorithm'", ",", "block", ".", "algo", ".", "get", "(", "'name'", ")", ")", "if", "block", ".", "algo", ".", "get", "(", "'num_responses'", ")", ":", "root", ".", "set", "(", "'num_responses'", ",", "unicode", "(", "block", ".", "algo", ".", "get", "(", "'num_responses'", ")", ")", ")", "display_name", "=", "etree", ".", "SubElement", "(", "root", ",", "'display_name'", ")", "display_name", ".", "text", "=", "block", ".", "display_name", "question", "=", "etree", ".", "SubElement", "(", "root", ",", "'question'", ")", "question_text", "=", "etree", ".", "SubElement", "(", "question", ",", "'text'", ")", "question_text", ".", "text", "=", "block", ".", "question_text", "[", "'text'", "]", "serialize_image", "(", "block", ".", "question_text", ",", "question", ")", "options", "=", "etree", ".", "SubElement", "(", "root", ",", "'options'", ")", "serialize_options", "(", "options", ",", "block", ")", "seeds", "=", "etree", ".", "SubElement", "(", "root", ",", "'seeds'", ")", "serialize_seeds", "(", "seeds", ",", "block", ")" ]
33.153846
0.002254
def make_name(super_name, default_super_name, sub_name): """Helper which makes a `str` name; useful for tf.compat.v1.name_scope.""" name = super_name if super_name is not None else default_super_name if sub_name is not None: name += '_' + sub_name return name
[ "def", "make_name", "(", "super_name", ",", "default_super_name", ",", "sub_name", ")", ":", "name", "=", "super_name", "if", "super_name", "is", "not", "None", "else", "default_super_name", "if", "sub_name", "is", "not", "None", ":", "name", "+=", "'_'", "+", "sub_name", "return", "name" ]
44.333333
0.01845
def getCenter(self): """ Return the ``Location`` of the center of this region """ return Location(self.x+(self.w/2), self.y+(self.h/2))
[ "def", "getCenter", "(", "self", ")", ":", "return", "Location", "(", "self", ".", "x", "+", "(", "self", ".", "w", "/", "2", ")", ",", "self", ".", "y", "+", "(", "self", ".", "h", "/", "2", ")", ")" ]
49.666667
0.013245
def decode(token, certs=None, verify=True, audience=None): """Decode and verify a JWT. Args: token (str): The encoded JWT. certs (Union[str, bytes, Mapping[str, Union[str, bytes]]]): The certificate used to validate the JWT signature. If bytes or string, it must the the public key certificate in PEM format. If a mapping, it must be a mapping of key IDs to public key certificates in PEM format. The mapping must contain the same key ID that's specified in the token's header. verify (bool): Whether to perform signature and claim validation. Verification is done by default. audience (str): The audience claim, 'aud', that this JWT should contain. If None then the JWT's 'aud' parameter is not verified. Returns: Mapping[str, str]: The deserialized JSON payload in the JWT. Raises: ValueError: if any verification checks failed. """ header, payload, signed_section, signature = _unverified_decode(token) if not verify: return payload # If certs is specified as a dictionary of key IDs to certificates, then # use the certificate identified by the key ID in the token header. if isinstance(certs, collections.Mapping): key_id = header.get('kid') if key_id: if key_id not in certs: raise ValueError( 'Certificate for key id {} not found.'.format(key_id)) certs_to_check = [certs[key_id]] # If there's no key id in the header, check against all of the certs. else: certs_to_check = certs.values() else: certs_to_check = certs # Verify that the signature matches the message. if not crypt.verify_signature(signed_section, signature, certs_to_check): raise ValueError('Could not verify token signature.') # Verify the issued at and created times in the payload. _verify_iat_and_exp(payload) # Check audience. if audience is not None: claim_audience = payload.get('aud') if audience != claim_audience: raise ValueError( 'Token has wrong audience {}, expected {}'.format( claim_audience, audience)) return payload
[ "def", "decode", "(", "token", ",", "certs", "=", "None", ",", "verify", "=", "True", ",", "audience", "=", "None", ")", ":", "header", ",", "payload", ",", "signed_section", ",", "signature", "=", "_unverified_decode", "(", "token", ")", "if", "not", "verify", ":", "return", "payload", "# If certs is specified as a dictionary of key IDs to certificates, then", "# use the certificate identified by the key ID in the token header.", "if", "isinstance", "(", "certs", ",", "collections", ".", "Mapping", ")", ":", "key_id", "=", "header", ".", "get", "(", "'kid'", ")", "if", "key_id", ":", "if", "key_id", "not", "in", "certs", ":", "raise", "ValueError", "(", "'Certificate for key id {} not found.'", ".", "format", "(", "key_id", ")", ")", "certs_to_check", "=", "[", "certs", "[", "key_id", "]", "]", "# If there's no key id in the header, check against all of the certs.", "else", ":", "certs_to_check", "=", "certs", ".", "values", "(", ")", "else", ":", "certs_to_check", "=", "certs", "# Verify that the signature matches the message.", "if", "not", "crypt", ".", "verify_signature", "(", "signed_section", ",", "signature", ",", "certs_to_check", ")", ":", "raise", "ValueError", "(", "'Could not verify token signature.'", ")", "# Verify the issued at and created times in the payload.", "_verify_iat_and_exp", "(", "payload", ")", "# Check audience.", "if", "audience", "is", "not", "None", ":", "claim_audience", "=", "payload", ".", "get", "(", "'aud'", ")", "if", "audience", "!=", "claim_audience", ":", "raise", "ValueError", "(", "'Token has wrong audience {}, expected {}'", ".", "format", "(", "claim_audience", ",", "audience", ")", ")", "return", "payload" ]
38.896552
0.000432
def logscale(x_min, x_max, n): """ :param x_min: minumum value :param x_max: maximum value :param n: number of steps :returns: an array of n values from x_min to x_max """ if not (isinstance(n, int) and n > 0): raise ValueError('n must be a positive integer, got %s' % n) if x_min <= 0: raise ValueError('x_min must be positive, got %s' % x_min) if x_max <= x_min: raise ValueError('x_max (%s) must be bigger than x_min (%s)' % (x_max, x_min)) delta = numpy.log(x_max / x_min) return numpy.exp(delta * numpy.arange(n) / (n - 1)) * x_min
[ "def", "logscale", "(", "x_min", ",", "x_max", ",", "n", ")", ":", "if", "not", "(", "isinstance", "(", "n", ",", "int", ")", "and", "n", ">", "0", ")", ":", "raise", "ValueError", "(", "'n must be a positive integer, got %s'", "%", "n", ")", "if", "x_min", "<=", "0", ":", "raise", "ValueError", "(", "'x_min must be positive, got %s'", "%", "x_min", ")", "if", "x_max", "<=", "x_min", ":", "raise", "ValueError", "(", "'x_max (%s) must be bigger than x_min (%s)'", "%", "(", "x_max", ",", "x_min", ")", ")", "delta", "=", "numpy", ".", "log", "(", "x_max", "/", "x_min", ")", "return", "numpy", ".", "exp", "(", "delta", "*", "numpy", ".", "arange", "(", "n", ")", "/", "(", "n", "-", "1", ")", ")", "*", "x_min" ]
38.375
0.00159
def _outer_error_is_decreasing(self): """True if outer iteration error is decreasing.""" is_decreasing, self._last_outer_error = self._error_is_decreasing(self._last_outer_error) return is_decreasing
[ "def", "_outer_error_is_decreasing", "(", "self", ")", ":", "is_decreasing", ",", "self", ".", "_last_outer_error", "=", "self", ".", "_error_is_decreasing", "(", "self", ".", "_last_outer_error", ")", "return", "is_decreasing" ]
55
0.013453
def _getshapes_2d(center, max_radius, shape): """Calculate indices and slices for the bounding box of a disk.""" index_mean = shape * center index_radius = max_radius / 2.0 * np.array(shape) # Avoid negative indices min_idx = np.maximum(np.floor(index_mean - index_radius), 0).astype(int) max_idx = np.ceil(index_mean + index_radius).astype(int) idx = [slice(minx, maxx) for minx, maxx in zip(min_idx, max_idx)] shapes = [(idx[0], slice(None)), (slice(None), idx[1])] return tuple(idx), tuple(shapes)
[ "def", "_getshapes_2d", "(", "center", ",", "max_radius", ",", "shape", ")", ":", "index_mean", "=", "shape", "*", "center", "index_radius", "=", "max_radius", "/", "2.0", "*", "np", ".", "array", "(", "shape", ")", "# Avoid negative indices", "min_idx", "=", "np", ".", "maximum", "(", "np", ".", "floor", "(", "index_mean", "-", "index_radius", ")", ",", "0", ")", ".", "astype", "(", "int", ")", "max_idx", "=", "np", ".", "ceil", "(", "index_mean", "+", "index_radius", ")", ".", "astype", "(", "int", ")", "idx", "=", "[", "slice", "(", "minx", ",", "maxx", ")", "for", "minx", ",", "maxx", "in", "zip", "(", "min_idx", ",", "max_idx", ")", "]", "shapes", "=", "[", "(", "idx", "[", "0", "]", ",", "slice", "(", "None", ")", ")", ",", "(", "slice", "(", "None", ")", ",", "idx", "[", "1", "]", ")", "]", "return", "tuple", "(", "idx", ")", ",", "tuple", "(", "shapes", ")" ]
45
0.001815
def _get_queue_for_the_action(self, action): """Find action queue for the action depending on the module. The id is found with action modulo on action id :param a: the action that need action queue to be assigned :type action: object :return: worker id and queue. (0, None) if no queue for the module_type :rtype: tuple """ # get the module name, if not, take fork mod = getattr(action, 'module_type', 'fork') queues = list(self.q_by_mod[mod].items()) # Maybe there is no more queue, it's very bad! if not queues: return (0, None) # if not get action round robin index to get action queue based # on the action id self.rr_qid = (self.rr_qid + 1) % len(queues) (worker_id, queue) = queues[self.rr_qid] # return the id of the worker (i), and its queue return (worker_id, queue)
[ "def", "_get_queue_for_the_action", "(", "self", ",", "action", ")", ":", "# get the module name, if not, take fork", "mod", "=", "getattr", "(", "action", ",", "'module_type'", ",", "'fork'", ")", "queues", "=", "list", "(", "self", ".", "q_by_mod", "[", "mod", "]", ".", "items", "(", ")", ")", "# Maybe there is no more queue, it's very bad!", "if", "not", "queues", ":", "return", "(", "0", ",", "None", ")", "# if not get action round robin index to get action queue based", "# on the action id", "self", ".", "rr_qid", "=", "(", "self", ".", "rr_qid", "+", "1", ")", "%", "len", "(", "queues", ")", "(", "worker_id", ",", "queue", ")", "=", "queues", "[", "self", ".", "rr_qid", "]", "# return the id of the worker (i), and its queue", "return", "(", "worker_id", ",", "queue", ")" ]
38
0.002139
def dict_from_cookiejar(cj): """Returns a key/value dictionary from a CookieJar. :param cj: CookieJar object to extract cookies from. """ cookie_dict = {} for _, cookies in cj._cookies.items(): for _, cookies in cookies.items(): for cookie in cookies.values(): # print cookie cookie_dict[cookie.name] = cookie.value return cookie_dict
[ "def", "dict_from_cookiejar", "(", "cj", ")", ":", "cookie_dict", "=", "{", "}", "for", "_", ",", "cookies", "in", "cj", ".", "_cookies", ".", "items", "(", ")", ":", "for", "_", ",", "cookies", "in", "cookies", ".", "items", "(", ")", ":", "for", "cookie", "in", "cookies", ".", "values", "(", ")", ":", "# print cookie", "cookie_dict", "[", "cookie", ".", "name", "]", "=", "cookie", ".", "value", "return", "cookie_dict" ]
26.666667
0.002415
def rgb(red, green, blue, content): """ Colors a content using rgb for h :param red: [0-5] :type red: int :param green: [0-5] :type green: int :param blue: [0-5] :type blue: int :param content: Whatever you want to say... :type content: unicode :return: ansi string :rtype: unicode """ color = 16 + 36 * red + 6 * green + blue return encode('38;5;' + str(color)) + content + encode(DEFAULT)
[ "def", "rgb", "(", "red", ",", "green", ",", "blue", ",", "content", ")", ":", "color", "=", "16", "+", "36", "*", "red", "+", "6", "*", "green", "+", "blue", "return", "encode", "(", "'38;5;'", "+", "str", "(", "color", ")", ")", "+", "content", "+", "encode", "(", "DEFAULT", ")" ]
28.8
0.002242
def get_polygons(self, by_spec=False, depth=None): """ Returns a list of polygons in this cell. Parameters ---------- by_spec : bool If ``True``, the return value is a dictionary with the polygons of each individual pair (layer, datatype). depth : integer or ``None`` If not ``None``, defines from how many reference levels to retrieve polygons. References below this level will result in a bounding box. If ``by_spec`` is ``True`` the key will be the name of this cell. Returns ------- out : list of array-like[N][2] or dictionary List containing the coordinates of the vertices of each polygon, or dictionary with the list of polygons (if ``by_spec`` is ``True``). """ if depth is not None and depth < 0: bb = self.get_bounding_box() if bb is None: return {} if by_spec else [] pts = [ numpy.array([(bb[0, 0], bb[0, 1]), (bb[0, 0], bb[1, 1]), (bb[1, 0], bb[1, 1]), (bb[1, 0], bb[0, 1])]) ] polygons = {self.name: pts} if by_spec else pts else: if by_spec: polygons = {} for element in self.elements: if isinstance(element, PolygonSet): for ii in range(len(element.polygons)): key = (element.layers[ii], element.datatypes[ii]) if key in polygons: polygons[key].append( numpy.array(element.polygons[ii])) else: polygons[key] = [ numpy.array(element.polygons[ii]) ] else: cell_polygons = element.get_polygons( True, None if depth is None else depth - 1) for kk in cell_polygons.keys(): if kk in polygons: polygons[kk].extend(cell_polygons[kk]) else: polygons[kk] = cell_polygons[kk] else: polygons = [] for element in self.elements: if isinstance(element, PolygonSet): for points in element.polygons: polygons.append(numpy.array(points)) else: polygons.extend( element.get_polygons( depth=None if depth is None else depth - 1)) return polygons
[ "def", "get_polygons", "(", "self", ",", "by_spec", "=", "False", ",", "depth", "=", "None", ")", ":", "if", "depth", "is", "not", "None", "and", "depth", "<", "0", ":", "bb", "=", "self", ".", "get_bounding_box", "(", ")", "if", "bb", "is", "None", ":", "return", "{", "}", "if", "by_spec", "else", "[", "]", "pts", "=", "[", "numpy", ".", "array", "(", "[", "(", "bb", "[", "0", ",", "0", "]", ",", "bb", "[", "0", ",", "1", "]", ")", ",", "(", "bb", "[", "0", ",", "0", "]", ",", "bb", "[", "1", ",", "1", "]", ")", ",", "(", "bb", "[", "1", ",", "0", "]", ",", "bb", "[", "1", ",", "1", "]", ")", ",", "(", "bb", "[", "1", ",", "0", "]", ",", "bb", "[", "0", ",", "1", "]", ")", "]", ")", "]", "polygons", "=", "{", "self", ".", "name", ":", "pts", "}", "if", "by_spec", "else", "pts", "else", ":", "if", "by_spec", ":", "polygons", "=", "{", "}", "for", "element", "in", "self", ".", "elements", ":", "if", "isinstance", "(", "element", ",", "PolygonSet", ")", ":", "for", "ii", "in", "range", "(", "len", "(", "element", ".", "polygons", ")", ")", ":", "key", "=", "(", "element", ".", "layers", "[", "ii", "]", ",", "element", ".", "datatypes", "[", "ii", "]", ")", "if", "key", "in", "polygons", ":", "polygons", "[", "key", "]", ".", "append", "(", "numpy", ".", "array", "(", "element", ".", "polygons", "[", "ii", "]", ")", ")", "else", ":", "polygons", "[", "key", "]", "=", "[", "numpy", ".", "array", "(", "element", ".", "polygons", "[", "ii", "]", ")", "]", "else", ":", "cell_polygons", "=", "element", ".", "get_polygons", "(", "True", ",", "None", "if", "depth", "is", "None", "else", "depth", "-", "1", ")", "for", "kk", "in", "cell_polygons", ".", "keys", "(", ")", ":", "if", "kk", "in", "polygons", ":", "polygons", "[", "kk", "]", ".", "extend", "(", "cell_polygons", "[", "kk", "]", ")", "else", ":", "polygons", "[", "kk", "]", "=", "cell_polygons", "[", "kk", "]", "else", ":", "polygons", "=", "[", "]", "for", "element", "in", "self", ".", "elements", ":", "if", "isinstance", "(", "element", ",", "PolygonSet", ")", ":", "for", "points", "in", "element", ".", "polygons", ":", "polygons", ".", "append", "(", "numpy", ".", "array", "(", "points", ")", ")", "else", ":", "polygons", ".", "extend", "(", "element", ".", "get_polygons", "(", "depth", "=", "None", "if", "depth", "is", "None", "else", "depth", "-", "1", ")", ")", "return", "polygons" ]
43.625
0.000701
def assemble(self): """Assemble a QasmQobjInstruction""" instruction = super().assemble() if self.label: instruction.label = self.label return instruction
[ "def", "assemble", "(", "self", ")", ":", "instruction", "=", "super", "(", ")", ".", "assemble", "(", ")", "if", "self", ".", "label", ":", "instruction", ".", "label", "=", "self", ".", "label", "return", "instruction" ]
32.166667
0.010101
def longest_monotonic_subseq_length(xs): '''Return the length of the longest monotonic subsequence of xs, second return value is the difference between increasing and decreasing lengths. >>> longest_monotonic_subseq_length((4, 5, 1, 2, 3)) (3, 1) >>> longest_monotonic_subseq_length((1, 2, 3, 5, 4)) (4, 2) >>> longest_monotonic_subseq_length((1, 2, 1)) (2, 0) ''' li = longest_increasing_subseq_length(xs) ld = longest_decreasing_subseq_length(xs) return max(li, ld), li - ld
[ "def", "longest_monotonic_subseq_length", "(", "xs", ")", ":", "li", "=", "longest_increasing_subseq_length", "(", "xs", ")", "ld", "=", "longest_decreasing_subseq_length", "(", "xs", ")", "return", "max", "(", "li", ",", "ld", ")", ",", "li", "-", "ld" ]
36.571429
0.001905
def docker_to_uuid(uuid): ''' Get the image uuid from an imported docker image .. versionadded:: 2019.2.0 ''' if _is_uuid(uuid): return uuid if _is_docker_uuid(uuid): images = list_installed(verbose=True) for image_uuid in images: if 'name' not in images[image_uuid]: continue if images[image_uuid]['name'] == uuid: return image_uuid return None
[ "def", "docker_to_uuid", "(", "uuid", ")", ":", "if", "_is_uuid", "(", "uuid", ")", ":", "return", "uuid", "if", "_is_docker_uuid", "(", "uuid", ")", ":", "images", "=", "list_installed", "(", "verbose", "=", "True", ")", "for", "image_uuid", "in", "images", ":", "if", "'name'", "not", "in", "images", "[", "image_uuid", "]", ":", "continue", "if", "images", "[", "image_uuid", "]", "[", "'name'", "]", "==", "uuid", ":", "return", "image_uuid", "return", "None" ]
27.4375
0.002203
def tplot_options(option, value): """ This function allows the user to set several global options for the generated plots. Parameters: option : str The name of the option. See section below value : str/int/float/list The value of the option. See section below. Options: ============ ========== ===== Options Value type Notes ============ ========== ===== title str Title of the the entire output title_size int Font size of the output wsize [int, int] [height, width], pixel size of the plot window title_align int Offset position in pixels of the title var_label srt Name of the tplot variable to be used as another x axis alt_range [flt, flt] The min and max altitude to be plotted on all alt plots map_x_range [int, int] The min and max longitude to be plotted on all map plots map_y_range [int, int] The min and max latitude to be plotted on all map plots x_range [flt, flt] The min and max x_range (usually time) to be plotted on all Spec/1D plots data_gap int Number of seconds with consecutive nan values allowed before no interp should occur crosshair bool Option allowing crosshairs and crosshair legend roi [str, str] Times between which there's a region of interest for a user ============ ========== ===== Returns: None Examples: >>> # Set the plot title >>> import pytplot >>> pytplot.tplot_options('title', 'SWEA Data for Orbit 1563') >>> # Set the window size >>> pytplot.tplot_options('wsize', [1000,500]) """ option = option.lower() temp = tplot_utilities.set_tplot_options(option, value, pytplot.tplot_opt_glob) pytplot.tplot_opt_glob = temp return
[ "def", "tplot_options", "(", "option", ",", "value", ")", ":", "option", "=", "option", ".", "lower", "(", ")", "temp", "=", "tplot_utilities", ".", "set_tplot_options", "(", "option", ",", "value", ",", "pytplot", ".", "tplot_opt_glob", ")", "pytplot", ".", "tplot_opt_glob", "=", "temp", "return" ]
40.12
0.015085
def stripe_to_db(self, data): """Convert the raw timestamp value to a DateTime representation.""" val = data.get(self.name) # Note: 0 is a possible return value, which is 'falseish' if val is not None: return convert_tstamp(val)
[ "def", "stripe_to_db", "(", "self", ",", "data", ")", ":", "val", "=", "data", ".", "get", "(", "self", ".", "name", ")", "# Note: 0 is a possible return value, which is 'falseish'", "if", "val", "is", "not", "None", ":", "return", "convert_tstamp", "(", "val", ")" ]
33.428571
0.029167
def update_limits(self, change_source_lower_limit=None, change_source_upper_limit=None): """ updates the limits (lower and upper) of the update manager instance :param change_source_lower_limit: [[i_model, ['param_name', ...], [value1, value2, ...]]] :return: updates internal state of lower and upper limits accessible from outside """ if not change_source_lower_limit is None: self._source_lower = self._update_limit(change_source_lower_limit, self._source_lower) if not change_source_upper_limit is None: self._source_upper = self._update_limit(change_source_upper_limit, self._source_upper)
[ "def", "update_limits", "(", "self", ",", "change_source_lower_limit", "=", "None", ",", "change_source_upper_limit", "=", "None", ")", ":", "if", "not", "change_source_lower_limit", "is", "None", ":", "self", ".", "_source_lower", "=", "self", ".", "_update_limit", "(", "change_source_lower_limit", ",", "self", ".", "_source_lower", ")", "if", "not", "change_source_upper_limit", "is", "None", ":", "self", ".", "_source_upper", "=", "self", ".", "_update_limit", "(", "change_source_upper_limit", ",", "self", ".", "_source_upper", ")" ]
60.454545
0.013333
def validate_capacity(capacity): """Validate ScalingConfiguration capacity for serverless DBCluster""" if capacity not in VALID_SCALING_CONFIGURATION_CAPACITIES: raise ValueError( "ScalingConfiguration capacity must be one of: {}".format( ", ".join(map( str, VALID_SCALING_CONFIGURATION_CAPACITIES )) ) ) return capacity
[ "def", "validate_capacity", "(", "capacity", ")", ":", "if", "capacity", "not", "in", "VALID_SCALING_CONFIGURATION_CAPACITIES", ":", "raise", "ValueError", "(", "\"ScalingConfiguration capacity must be one of: {}\"", ".", "format", "(", "\", \"", ".", "join", "(", "map", "(", "str", ",", "VALID_SCALING_CONFIGURATION_CAPACITIES", ")", ")", ")", ")", "return", "capacity" ]
33.307692
0.002247
def project_status(): """...""" r = Response() try: project = cauldron.project.get_internal_project() if project: r.update(project=project.status()) else: r.update(project=None) except Exception as err: r.fail( code='PROJECT_STATUS_ERROR', message='Unable to check status of currently opened project', error=err ) r.update(server=server_runner.get_server_data()) return flask.jsonify(r.serialize())
[ "def", "project_status", "(", ")", ":", "r", "=", "Response", "(", ")", "try", ":", "project", "=", "cauldron", ".", "project", ".", "get_internal_project", "(", ")", "if", "project", ":", "r", ".", "update", "(", "project", "=", "project", ".", "status", "(", ")", ")", "else", ":", "r", ".", "update", "(", "project", "=", "None", ")", "except", "Exception", "as", "err", ":", "r", ".", "fail", "(", "code", "=", "'PROJECT_STATUS_ERROR'", ",", "message", "=", "'Unable to check status of currently opened project'", ",", "error", "=", "err", ")", "r", ".", "update", "(", "server", "=", "server_runner", ".", "get_server_data", "(", ")", ")", "return", "flask", ".", "jsonify", "(", "r", ".", "serialize", "(", ")", ")" ]
26.631579
0.001908
def addai(argname, condition=None): r""" Add an "AI" exception in the global exception handler. An "AI" exception is of the type :code:`RuntimeError('Argument \`*[argname]*\` is not valid')` where :code:`*[argname]*` is the value of the **argname** argument :param argname: Argument name :type argname: string :param condition: Flag that indicates whether the exception is raised *(True)* or not *(False)*. If None the flag is not used and no exception is raised :type condition: boolean or None :rtype: (if condition is not given or None) function :raises: * RuntimeError (Argument \`argname\` is not valid) * RuntimeError (Argument \`condition\` is not valid) """ # pylint: disable=C0123 if not isinstance(argname, str): raise RuntimeError("Argument `argname` is not valid") if (condition is not None) and (type(condition) != bool): raise RuntimeError("Argument `condition` is not valid") obj = _ExObj(RuntimeError, "Argument `{0}` is not valid".format(argname), condition) return obj.craise
[ "def", "addai", "(", "argname", ",", "condition", "=", "None", ")", ":", "# pylint: disable=C0123", "if", "not", "isinstance", "(", "argname", ",", "str", ")", ":", "raise", "RuntimeError", "(", "\"Argument `argname` is not valid\"", ")", "if", "(", "condition", "is", "not", "None", ")", "and", "(", "type", "(", "condition", ")", "!=", "bool", ")", ":", "raise", "RuntimeError", "(", "\"Argument `condition` is not valid\"", ")", "obj", "=", "_ExObj", "(", "RuntimeError", ",", "\"Argument `{0}` is not valid\"", ".", "format", "(", "argname", ")", ",", "condition", ")", "return", "obj", ".", "craise" ]
37
0.001756
def spawn(func, *args, **kwargs): """Create a task to run ``func(*args, **kwargs)``. Returns a :class:`Task` objec. Execution control returns immediately to the caller; the created task is merely scheduled to be run at the next available opportunity. Use :func:`spawn_later` to arrange for tasks to be spawned after a finite delay. """ t = Task(target=func, args=args, kwargs=kwargs) t.start() return t
[ "def", "spawn", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "t", "=", "Task", "(", "target", "=", "func", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ")", "t", ".", "start", "(", ")", "return", "t" ]
36.166667
0.002247
def get_overlapping_ranges(self, collection_link, partition_key_ranges): ''' Given a partition key range and a collection, returns the list of overlapping partition key ranges :param str collection_link: The name of the collection. :param list partition_key_range: List of partition key range. :return: List of overlapping partition key ranges. :rtype: list ''' cl = self._documentClient collection_id = base.GetResourceIdOrFullNameFromLink(collection_link) collection_routing_map = self._collection_routing_map_by_item.get(collection_id) if collection_routing_map is None: collection_pk_ranges = list(cl._ReadPartitionKeyRanges(collection_link)) # for large collections, a split may complete between the read partition key ranges query page responses, # causing the partitionKeyRanges to have both the children ranges and their parents. Therefore, we need # to discard the parent ranges to have a valid routing map. collection_pk_ranges = _PartitionKeyRangeCache._discard_parent_ranges(collection_pk_ranges) collection_routing_map = _CollectionRoutingMap.CompleteRoutingMap([(r, True) for r in collection_pk_ranges], collection_id) self._collection_routing_map_by_item[collection_id] = collection_routing_map return collection_routing_map.get_overlapping_ranges(partition_key_ranges)
[ "def", "get_overlapping_ranges", "(", "self", ",", "collection_link", ",", "partition_key_ranges", ")", ":", "cl", "=", "self", ".", "_documentClient", "collection_id", "=", "base", ".", "GetResourceIdOrFullNameFromLink", "(", "collection_link", ")", "collection_routing_map", "=", "self", ".", "_collection_routing_map_by_item", ".", "get", "(", "collection_id", ")", "if", "collection_routing_map", "is", "None", ":", "collection_pk_ranges", "=", "list", "(", "cl", ".", "_ReadPartitionKeyRanges", "(", "collection_link", ")", ")", "# for large collections, a split may complete between the read partition key ranges query page responses, ", "# causing the partitionKeyRanges to have both the children ranges and their parents. Therefore, we need ", "# to discard the parent ranges to have a valid routing map.", "collection_pk_ranges", "=", "_PartitionKeyRangeCache", ".", "_discard_parent_ranges", "(", "collection_pk_ranges", ")", "collection_routing_map", "=", "_CollectionRoutingMap", ".", "CompleteRoutingMap", "(", "[", "(", "r", ",", "True", ")", "for", "r", "in", "collection_pk_ranges", "]", ",", "collection_id", ")", "self", ".", "_collection_routing_map_by_item", "[", "collection_id", "]", "=", "collection_routing_map", "return", "collection_routing_map", ".", "get_overlapping_ranges", "(", "partition_key_ranges", ")" ]
54.321429
0.011628
def list_config_map_for_all_namespaces(self, **kwargs): """ list or watch objects of kind ConfigMap This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_config_map_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1ConfigMapList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_config_map_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_config_map_for_all_namespaces_with_http_info(**kwargs) return data
[ "def", "list_config_map_for_all_namespaces", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "list_config_map_for_all_namespaces_with_http_info", "(", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "list_config_map_for_all_namespaces_with_http_info", "(", "*", "*", "kwargs", ")", "return", "data" ]
168.777778
0.0024
def protocols(self): """ :rtype: dict[int, list of ProtocolAnalyzer] """ if self.__protocols is None: self.__protocols = self.proto_tree_model.protocols return self.__protocols
[ "def", "protocols", "(", "self", ")", ":", "if", "self", ".", "__protocols", "is", "None", ":", "self", ".", "__protocols", "=", "self", ".", "proto_tree_model", ".", "protocols", "return", "self", ".", "__protocols" ]
31.714286
0.008772
def local_manager_gid(self): """Group id of local manager group of current authenticated member. Currently a user can be assigned only to one local manager group. If more than one local manager group is configured, an error is raised. """ config = self.root['settings']['ugm_localmanager'].attrs user = security.authenticated_user(get_current_request()) if not user: return None gids = user.group_ids adm_gids = list() for gid in gids: rule = config.get(gid) if rule: adm_gids.append(gid) if len(adm_gids) == 0: return None if len(adm_gids) > 1: msg = (u"Authenticated member defined in local manager " u"groups %s but only one management group allowed for " u"each user. Please contact System Administrator in " u"order to fix this problem.") exc = msg % ', '.join(["'%s'" % gid for gid in adm_gids]) raise Exception(exc) return adm_gids[0]
[ "def", "local_manager_gid", "(", "self", ")", ":", "config", "=", "self", ".", "root", "[", "'settings'", "]", "[", "'ugm_localmanager'", "]", ".", "attrs", "user", "=", "security", ".", "authenticated_user", "(", "get_current_request", "(", ")", ")", "if", "not", "user", ":", "return", "None", "gids", "=", "user", ".", "group_ids", "adm_gids", "=", "list", "(", ")", "for", "gid", "in", "gids", ":", "rule", "=", "config", ".", "get", "(", "gid", ")", "if", "rule", ":", "adm_gids", ".", "append", "(", "gid", ")", "if", "len", "(", "adm_gids", ")", "==", "0", ":", "return", "None", "if", "len", "(", "adm_gids", ")", ">", "1", ":", "msg", "=", "(", "u\"Authenticated member defined in local manager \"", "u\"groups %s but only one management group allowed for \"", "u\"each user. Please contact System Administrator in \"", "u\"order to fix this problem.\"", ")", "exc", "=", "msg", "%", "', '", ".", "join", "(", "[", "\"'%s'\"", "%", "gid", "for", "gid", "in", "adm_gids", "]", ")", "raise", "Exception", "(", "exc", ")", "return", "adm_gids", "[", "0", "]" ]
41.461538
0.001813
def layout(self): """Call to have the view layout itself. Subclasses should invoke this after laying out child views and/or updating its own frame. """ if self.shadowed: shadow_size = theme.current.shadow_size shadowed_frame_size = (self.frame.w + shadow_size, self.frame.h + shadow_size) self.surface = pygame.Surface( shadowed_frame_size, pygame.SRCALPHA, 32) shadow_image = resource.get_image('shadow') self.shadow_image = resource.scale_image(shadow_image, shadowed_frame_size) else: self.surface = pygame.Surface(self.frame.size, pygame.SRCALPHA, 32) self.shadow_image = None
[ "def", "layout", "(", "self", ")", ":", "if", "self", ".", "shadowed", ":", "shadow_size", "=", "theme", ".", "current", ".", "shadow_size", "shadowed_frame_size", "=", "(", "self", ".", "frame", ".", "w", "+", "shadow_size", ",", "self", ".", "frame", ".", "h", "+", "shadow_size", ")", "self", ".", "surface", "=", "pygame", ".", "Surface", "(", "shadowed_frame_size", ",", "pygame", ".", "SRCALPHA", ",", "32", ")", "shadow_image", "=", "resource", ".", "get_image", "(", "'shadow'", ")", "self", ".", "shadow_image", "=", "resource", ".", "scale_image", "(", "shadow_image", ",", "shadowed_frame_size", ")", "else", ":", "self", ".", "surface", "=", "pygame", ".", "Surface", "(", "self", ".", "frame", ".", "size", ",", "pygame", ".", "SRCALPHA", ",", "32", ")", "self", ".", "shadow_image", "=", "None" ]
44.444444
0.002448
def ui_label(self): """UI string identifying the partition if possible.""" return ': '.join(filter(None, [ self.ui_device_presentation, self.ui_id_label or self.ui_id_uuid or self.drive_label ]))
[ "def", "ui_label", "(", "self", ")", ":", "return", "': '", ".", "join", "(", "filter", "(", "None", ",", "[", "self", ".", "ui_device_presentation", ",", "self", ".", "ui_id_label", "or", "self", ".", "ui_id_uuid", "or", "self", ".", "drive_label", "]", ")", ")" ]
39.666667
0.00823
def get_value(self, dictionary): """ Given the *incoming* primitive data, return the value for this field that should be validated and transformed to a native value. """ if html.is_html_input(dictionary): # HTML forms will represent empty fields as '', and cannot # represent None or False values directly. if self.field_name not in dictionary: if getattr(self.root, 'partial', False): return empty return self.default_empty_html ret = dictionary[self.field_name] if ret == '' and self.allow_null: # If the field is blank, and null is a valid value then # determine if we should use null instead. return '' if getattr(self, 'allow_blank', False) else None elif ret == '' and not self.required: # If the field is blank, and emptyness is valid then # determine if we should use emptyness instead. return '' if getattr(self, 'allow_blank', False) else empty return ret return dictionary.get(self.field_name, empty)
[ "def", "get_value", "(", "self", ",", "dictionary", ")", ":", "if", "html", ".", "is_html_input", "(", "dictionary", ")", ":", "# HTML forms will represent empty fields as '', and cannot", "# represent None or False values directly.", "if", "self", ".", "field_name", "not", "in", "dictionary", ":", "if", "getattr", "(", "self", ".", "root", ",", "'partial'", ",", "False", ")", ":", "return", "empty", "return", "self", ".", "default_empty_html", "ret", "=", "dictionary", "[", "self", ".", "field_name", "]", "if", "ret", "==", "''", "and", "self", ".", "allow_null", ":", "# If the field is blank, and null is a valid value then", "# determine if we should use null instead.", "return", "''", "if", "getattr", "(", "self", ",", "'allow_blank'", ",", "False", ")", "else", "None", "elif", "ret", "==", "''", "and", "not", "self", ".", "required", ":", "# If the field is blank, and emptyness is valid then", "# determine if we should use emptyness instead.", "return", "''", "if", "getattr", "(", "self", ",", "'allow_blank'", ",", "False", ")", "else", "empty", "return", "ret", "return", "dictionary", ".", "get", "(", "self", ".", "field_name", ",", "empty", ")" ]
50.826087
0.001679
def user(name, id='', user='', priv='', password='', status='active'): ''' Ensures that a user is configured on the device. Due to being unable to verify the user password. This is a forced operation. .. versionadded:: 2019.2.0 name: The name of the module function to execute. id(int): The user ID slot on the device. user(str): The username of the user. priv(str): The privilege level of the user. password(str): The password of the user. status(str): The status of the user. Can be either active or inactive. SLS Example: .. code-block:: yaml user_configuration: cimc.user: - id: 11 - user: foo - priv: admin - password: mypassword - status: active ''' ret = _default_ret(name) user_conf = __salt__['cimc.get_users']() try: for entry in user_conf['outConfigs']['aaaUser']: if entry['id'] == str(id): conf = entry if not conf: ret['result'] = False ret['comment'] = "Unable to find requested user id on device. Please verify id is valid." return ret updates = __salt__['cimc.set_user'](str(id), user, password, priv, status) if 'outConfig' in updates: ret['changes']['before'] = conf ret['changes']['after'] = updates['outConfig']['aaaUser'] ret['comment'] = "User settings modified." else: ret['result'] = False ret['comment'] = "Error setting user configuration." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting user configuration." log.error(err) return ret ret['result'] = True return ret
[ "def", "user", "(", "name", ",", "id", "=", "''", ",", "user", "=", "''", ",", "priv", "=", "''", ",", "password", "=", "''", ",", "status", "=", "'active'", ")", ":", "ret", "=", "_default_ret", "(", "name", ")", "user_conf", "=", "__salt__", "[", "'cimc.get_users'", "]", "(", ")", "try", ":", "for", "entry", "in", "user_conf", "[", "'outConfigs'", "]", "[", "'aaaUser'", "]", ":", "if", "entry", "[", "'id'", "]", "==", "str", "(", "id", ")", ":", "conf", "=", "entry", "if", "not", "conf", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "\"Unable to find requested user id on device. Please verify id is valid.\"", "return", "ret", "updates", "=", "__salt__", "[", "'cimc.set_user'", "]", "(", "str", "(", "id", ")", ",", "user", ",", "password", ",", "priv", ",", "status", ")", "if", "'outConfig'", "in", "updates", ":", "ret", "[", "'changes'", "]", "[", "'before'", "]", "=", "conf", "ret", "[", "'changes'", "]", "[", "'after'", "]", "=", "updates", "[", "'outConfig'", "]", "[", "'aaaUser'", "]", "ret", "[", "'comment'", "]", "=", "\"User settings modified.\"", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "\"Error setting user configuration.\"", "return", "ret", "except", "Exception", "as", "err", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "\"Error setting user configuration.\"", "log", ".", "error", "(", "err", ")", "return", "ret", "ret", "[", "'result'", "]", "=", "True", "return", "ret" ]
26.149254
0.00165
def get_account_tokens(self, address): """ Get the list of tokens that this address owns """ cur = self.db.cursor() return namedb_get_account_tokens(cur, address)
[ "def", "get_account_tokens", "(", "self", ",", "address", ")", ":", "cur", "=", "self", ".", "db", ".", "cursor", "(", ")", "return", "namedb_get_account_tokens", "(", "cur", ",", "address", ")" ]
32.833333
0.009901
def toJSONFilters(actions): """Generate a JSON-to-JSON filter from stdin to stdout The filter: * reads a JSON-formatted pandoc document from stdin * transforms it by walking the tree and performing the actions * returns a new JSON-formatted pandoc document to stdout The argument `actions` is a list of functions of the form `action(key, value, format, meta)`, as described in more detail under `walk`. This function calls `applyJSONFilters`, with the `format` argument provided by the first command-line argument, if present. (Pandoc sets this by default when calling filters.) """ try: input_stream = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8') except AttributeError: # Python 2 does not have sys.stdin.buffer. # REF: https://stackoverflow.com/questions/2467928/python-unicodeencode input_stream = codecs.getreader("utf-8")(sys.stdin) source = input_stream.read() if len(sys.argv) > 1: format = sys.argv[1] else: format = "" sys.stdout.write(applyJSONFilters(actions, source, format))
[ "def", "toJSONFilters", "(", "actions", ")", ":", "try", ":", "input_stream", "=", "io", ".", "TextIOWrapper", "(", "sys", ".", "stdin", ".", "buffer", ",", "encoding", "=", "'utf-8'", ")", "except", "AttributeError", ":", "# Python 2 does not have sys.stdin.buffer.", "# REF: https://stackoverflow.com/questions/2467928/python-unicodeencode", "input_stream", "=", "codecs", ".", "getreader", "(", "\"utf-8\"", ")", "(", "sys", ".", "stdin", ")", "source", "=", "input_stream", ".", "read", "(", ")", "if", "len", "(", "sys", ".", "argv", ")", ">", "1", ":", "format", "=", "sys", ".", "argv", "[", "1", "]", "else", ":", "format", "=", "\"\"", "sys", ".", "stdout", ".", "write", "(", "applyJSONFilters", "(", "actions", ",", "source", ",", "format", ")", ")" ]
34.21875
0.000888
def getCalculationDependencies(self, flat=False, deps=None): """ Recursively calculates all dependencies of this calculation. The return value is dictionary of dictionaries (of dictionaries...) {service_UID1: {service_UID2: {service_UID3: {}, service_UID4: {}, }, }, } set flat=True to get a simple list of AnalysisService objects """ if deps is None: deps = [] if flat is True else {} for service in self.getDependentServices(): calc = service.getCalculation() if calc: calc.getCalculationDependencies(flat, deps) if flat: deps.append(service) else: deps[service.UID()] = {} return deps
[ "def", "getCalculationDependencies", "(", "self", ",", "flat", "=", "False", ",", "deps", "=", "None", ")", ":", "if", "deps", "is", "None", ":", "deps", "=", "[", "]", "if", "flat", "is", "True", "else", "{", "}", "for", "service", "in", "self", ".", "getDependentServices", "(", ")", ":", "calc", "=", "service", ".", "getCalculation", "(", ")", "if", "calc", ":", "calc", ".", "getCalculationDependencies", "(", "flat", ",", "deps", ")", "if", "flat", ":", "deps", ".", "append", "(", "service", ")", "else", ":", "deps", "[", "service", ".", "UID", "(", ")", "]", "=", "{", "}", "return", "deps" ]
32.846154
0.002275
def _sub16(ins): ''' Pops last 2 words from the stack and subtract them. Then push the result onto the stack. Top of the stack is subtracted Top -1 Optimizations: * If 2nd op is ZERO, then do NOTHING: A - 0 = A * If any of the operands is < 4, then DEC is used * If any of the operands is > 65531 (-4..-1), then INC is used ''' op1, op2 = tuple(ins.quad[2:4]) if is_int(op2): op = int16(op2) output = _16bit_oper(op1) if op == 0: output.append('push hl') return output if op < 4: output.extend(['dec hl'] * op) output.append('push hl') return output if op > 65531: output.extend(['inc hl'] * (0x10000 - op)) output.append('push hl') return output output.append('ld de, -%i' % op) output.append('add hl, de') output.append('push hl') return output if op2[0] == '_': # Optimization when 2nd operand is an id rev = True op1, op2 = op2, op1 else: rev = False output = _16bit_oper(op1, op2, rev) output.append('or a') output.append('sbc hl, de') output.append('push hl') return output
[ "def", "_sub16", "(", "ins", ")", ":", "op1", ",", "op2", "=", "tuple", "(", "ins", ".", "quad", "[", "2", ":", "4", "]", ")", "if", "is_int", "(", "op2", ")", ":", "op", "=", "int16", "(", "op2", ")", "output", "=", "_16bit_oper", "(", "op1", ")", "if", "op", "==", "0", ":", "output", ".", "append", "(", "'push hl'", ")", "return", "output", "if", "op", "<", "4", ":", "output", ".", "extend", "(", "[", "'dec hl'", "]", "*", "op", ")", "output", ".", "append", "(", "'push hl'", ")", "return", "output", "if", "op", ">", "65531", ":", "output", ".", "extend", "(", "[", "'inc hl'", "]", "*", "(", "0x10000", "-", "op", ")", ")", "output", ".", "append", "(", "'push hl'", ")", "return", "output", "output", ".", "append", "(", "'ld de, -%i'", "%", "op", ")", "output", ".", "append", "(", "'add hl, de'", ")", "output", ".", "append", "(", "'push hl'", ")", "return", "output", "if", "op2", "[", "0", "]", "==", "'_'", ":", "# Optimization when 2nd operand is an id", "rev", "=", "True", "op1", ",", "op2", "=", "op2", ",", "op1", "else", ":", "rev", "=", "False", "output", "=", "_16bit_oper", "(", "op1", ",", "op2", ",", "rev", ")", "output", ".", "append", "(", "'or a'", ")", "output", ".", "append", "(", "'sbc hl, de'", ")", "output", ".", "append", "(", "'push hl'", ")", "return", "output" ]
24.117647
0.000781
def connectivity_map(dset,prefix,x,y,z,radius=2): '''Will perform connectivity analysis on ``dset`` using seed point ``(x,y,z)`` (in RAI order) with a sphere of radius ``radius``. Does not perform any preprocessing of ``dset``. This should be already motion corrected, noise-regressed, residualized, etc.''' seed_series = nl.sphere_average(dset,x,y,z,radius) with tempfile.NamedTemporaryFile(delete=False) as temp: temp.write('\n'.join([str(x) for x in seed_series])) decon = nl.Decon() decon.input_dsets = dset decon.stim_files = {'seed':temp.name} decon.prefix = prefix decon.run() try: os.remove(temp.name) except: pass
[ "def", "connectivity_map", "(", "dset", ",", "prefix", ",", "x", ",", "y", ",", "z", ",", "radius", "=", "2", ")", ":", "seed_series", "=", "nl", ".", "sphere_average", "(", "dset", ",", "x", ",", "y", ",", "z", ",", "radius", ")", "with", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ")", "as", "temp", ":", "temp", ".", "write", "(", "'\\n'", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "seed_series", "]", ")", ")", "decon", "=", "nl", ".", "Decon", "(", ")", "decon", ".", "input_dsets", "=", "dset", "decon", ".", "stim_files", "=", "{", "'seed'", ":", "temp", ".", "name", "}", "decon", ".", "prefix", "=", "prefix", "decon", ".", "run", "(", ")", "try", ":", "os", ".", "remove", "(", "temp", ".", "name", ")", "except", ":", "pass" ]
45.066667
0.02029
def reset(self): """Resets the iterator to the beginning of the data.""" self.curr_idx = 0 random.shuffle(self.idx) for buck in self.data: np.random.shuffle(buck)
[ "def", "reset", "(", "self", ")", ":", "self", ".", "curr_idx", "=", "0", "random", ".", "shuffle", "(", "self", ".", "idx", ")", "for", "buck", "in", "self", ".", "data", ":", "np", ".", "random", ".", "shuffle", "(", "buck", ")" ]
33.5
0.009709
def set_position(self, x, y, speed=None): ''' Move chuck to absolute position in um''' if speed: self._intf.write('MoveChuckSubsite %1.1f %1.1f R Y %d' % (x, y, speed)) else: self._intf.write('MoveChuckSubsite %1.1f %1.1f R Y' % (x, y))
[ "def", "set_position", "(", "self", ",", "x", ",", "y", ",", "speed", "=", "None", ")", ":", "if", "speed", ":", "self", ".", "_intf", ".", "write", "(", "'MoveChuckSubsite %1.1f %1.1f R Y %d'", "%", "(", "x", ",", "y", ",", "speed", ")", ")", "else", ":", "self", ".", "_intf", ".", "write", "(", "'MoveChuckSubsite %1.1f %1.1f R Y'", "%", "(", "x", ",", "y", ")", ")" ]
46.5
0.010563