text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def accel_move_tab_left(self, *args): # TODO KEYBINDINGS ONLY """ Callback to move a tab to the left """ pos = self.get_notebook().get_current_page() if pos != 0: self.move_tab(pos, pos - 1) return True
[ "def", "accel_move_tab_left", "(", "self", ",", "*", "args", ")", ":", "# TODO KEYBINDINGS ONLY", "pos", "=", "self", ".", "get_notebook", "(", ")", ".", "get_current_page", "(", ")", "if", "pos", "!=", "0", ":", "self", ".", "move_tab", "(", "pos", ",", "pos", "-", "1", ")", "return", "True" ]
35.428571
0.011811
def get_account_certificate(self, account_id, cert_id, **kwargs): # noqa: E501 """Get trusted certificate by ID. # noqa: E501 An endpoint for retrieving a trusted certificate by ID. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/trusted-certificates/{cert-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_account_certificate(account_id, cert_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str cert_id: The ID of the trusted certificate to be retrieved. (required) :return: TrustedCertificateInternalResp If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_account_certificate_with_http_info(account_id, cert_id, **kwargs) # noqa: E501 else: (data) = self.get_account_certificate_with_http_info(account_id, cert_id, **kwargs) # noqa: E501 return data
[ "def", "get_account_certificate", "(", "self", ",", "account_id", ",", "cert_id", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", "return", "self", ".", "get_account_certificate_with_http_info", "(", "account_id", ",", "cert_id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "get_account_certificate_with_http_info", "(", "account_id", ",", "cert_id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
58.818182
0.001521
def run_script(self, description): """ Run an existing script using its description to look it up Usage: C{engine.run_script(description)} @param description: description of the script to run @raise Exception: if the specified script does not exist """ targetScript = None for item in self.configManager.allItems: if item.description == description and isinstance(item, model.Script): targetScript = item if targetScript is not None: self.runner.run_subscript(targetScript) else: raise Exception("No script with description '%s' found" % description)
[ "def", "run_script", "(", "self", ",", "description", ")", ":", "targetScript", "=", "None", "for", "item", "in", "self", ".", "configManager", ".", "allItems", ":", "if", "item", ".", "description", "==", "description", "and", "isinstance", "(", "item", ",", "model", ".", "Script", ")", ":", "targetScript", "=", "item", "if", "targetScript", "is", "not", "None", ":", "self", ".", "runner", ".", "run_subscript", "(", "targetScript", ")", "else", ":", "raise", "Exception", "(", "\"No script with description '%s' found\"", "%", "description", ")" ]
38
0.008559
def get_inline_expression(self, text): """Extract an inline expression from the given text.""" text = text.strip() if not text.startswith(self.inline_tags[0]) or not text.endswith(self.inline_tags[1]): return return text[2:-2]
[ "def", "get_inline_expression", "(", "self", ",", "text", ")", ":", "text", "=", "text", ".", "strip", "(", ")", "if", "not", "text", ".", "startswith", "(", "self", ".", "inline_tags", "[", "0", "]", ")", "or", "not", "text", ".", "endswith", "(", "self", ".", "inline_tags", "[", "1", "]", ")", ":", "return", "return", "text", "[", "2", ":", "-", "2", "]" ]
37.857143
0.01107
def createWindow(self,cls=None,caption_t=None,*args,**kwargs): """ createWindow(cls=window.PengWindow, *args, **kwargs) Creates a new window using the supplied ``cls``\ . If ``cls`` is not given, :py:class:`peng3d.window.PengWindow()` will be used. Any other positional or keyword arguments are passed to the class constructor. Note that this method currently does not support using multiple windows. .. todo:: Implement having multiple windows. """ if cls is None: from . import window cls = window.PengWindow if self.window is not None: raise RuntimeError("Window already created!") self.sendEvent("peng3d:window.create.pre",{"peng":self,"cls":cls}) if caption_t is not None: kwargs["caption"] = "Peng3d Application" self.window = cls(self,*args,**kwargs) self.sendEvent("peng3d:window.create.post",{"peng":self,"window":self.window}) if self.cfg["rsrc.enable"] and self.resourceMgr is None: self.sendEvent("peng3d:rsrc.init.pre",{"peng":self,"basepath":self.cfg["rsrc.basepath"]}) self.resourceMgr = resource.ResourceManager(self,self.cfg["rsrc.basepath"]) self.rsrcMgr = self.resourceMgr self.sendEvent("peng3d:rsrc.init.post",{"peng":self,"rsrcMgr":self.resourceMgr}) if self.cfg["i18n.enable"] and self.i18n is None: self.sendEvent("peng3d:i18n.init.pre",{"peng":self}) self.i18n = i18n.TranslationManager(self) self._t = self.i18n.t self._tl = self.i18n.tl self.sendEvent("peng3d:i18n.init.post",{"peng":self,"i18n":self.i18n}) if caption_t is not None: self.window.set_caption(self.t(caption_t)) def f(): self.window.set_caption(self.t(caption_t)) self.i18n.addAction("setlang",f) return self.window
[ "def", "createWindow", "(", "self", ",", "cls", "=", "None", ",", "caption_t", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "cls", "is", "None", ":", "from", ".", "import", "window", "cls", "=", "window", ".", "PengWindow", "if", "self", ".", "window", "is", "not", "None", ":", "raise", "RuntimeError", "(", "\"Window already created!\"", ")", "self", ".", "sendEvent", "(", "\"peng3d:window.create.pre\"", ",", "{", "\"peng\"", ":", "self", ",", "\"cls\"", ":", "cls", "}", ")", "if", "caption_t", "is", "not", "None", ":", "kwargs", "[", "\"caption\"", "]", "=", "\"Peng3d Application\"", "self", ".", "window", "=", "cls", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "sendEvent", "(", "\"peng3d:window.create.post\"", ",", "{", "\"peng\"", ":", "self", ",", "\"window\"", ":", "self", ".", "window", "}", ")", "if", "self", ".", "cfg", "[", "\"rsrc.enable\"", "]", "and", "self", ".", "resourceMgr", "is", "None", ":", "self", ".", "sendEvent", "(", "\"peng3d:rsrc.init.pre\"", ",", "{", "\"peng\"", ":", "self", ",", "\"basepath\"", ":", "self", ".", "cfg", "[", "\"rsrc.basepath\"", "]", "}", ")", "self", ".", "resourceMgr", "=", "resource", ".", "ResourceManager", "(", "self", ",", "self", ".", "cfg", "[", "\"rsrc.basepath\"", "]", ")", "self", ".", "rsrcMgr", "=", "self", ".", "resourceMgr", "self", ".", "sendEvent", "(", "\"peng3d:rsrc.init.post\"", ",", "{", "\"peng\"", ":", "self", ",", "\"rsrcMgr\"", ":", "self", ".", "resourceMgr", "}", ")", "if", "self", ".", "cfg", "[", "\"i18n.enable\"", "]", "and", "self", ".", "i18n", "is", "None", ":", "self", ".", "sendEvent", "(", "\"peng3d:i18n.init.pre\"", ",", "{", "\"peng\"", ":", "self", "}", ")", "self", ".", "i18n", "=", "i18n", ".", "TranslationManager", "(", "self", ")", "self", ".", "_t", "=", "self", ".", "i18n", ".", "t", "self", ".", "_tl", "=", "self", ".", "i18n", ".", "tl", "self", ".", "sendEvent", "(", "\"peng3d:i18n.init.post\"", ",", "{", "\"peng\"", ":", "self", ",", "\"i18n\"", ":", "self", ".", "i18n", "}", ")", "if", "caption_t", "is", "not", "None", ":", "self", ".", "window", ".", "set_caption", "(", "self", ".", "t", "(", "caption_t", ")", ")", "def", "f", "(", ")", ":", "self", ".", "window", ".", "set_caption", "(", "self", ".", "t", "(", "caption_t", ")", ")", "self", ".", "i18n", ".", "addAction", "(", "\"setlang\"", ",", "f", ")", "return", "self", ".", "window" ]
46.72093
0.023403
def batch_iterable(iterable, count): """ Yield batches of `count` items from the given iterable. >>> for x in batch([1, 2, 3, 4, 5, 6, 7], 3): >>> print(x) [1, 2, 3] [4, 5, 6] [7] :param iterable: An iterable :type iterable: Iterable :param count: Number of items per batch. If <= 0, nothing is yielded. :type count: int :return: Iterable of lists of items :rtype: Iterable[list[object]] """ if count <= 0: return current_batch = [] for item in iterable: if len(current_batch) == count: yield current_batch current_batch = [] current_batch.append(item) if current_batch: yield current_batch
[ "def", "batch_iterable", "(", "iterable", ",", "count", ")", ":", "if", "count", "<=", "0", ":", "return", "current_batch", "=", "[", "]", "for", "item", "in", "iterable", ":", "if", "len", "(", "current_batch", ")", "==", "count", ":", "yield", "current_batch", "current_batch", "=", "[", "]", "current_batch", ".", "append", "(", "item", ")", "if", "current_batch", ":", "yield", "current_batch" ]
25.777778
0.001385
def split_by_rand_pct(self, valid_pct:float=0.2, seed:int=None)->'ItemLists': "Split the items randomly by putting `valid_pct` in the validation set, optional `seed` can be passed." if valid_pct==0.: return self.split_none() if seed is not None: np.random.seed(seed) rand_idx = np.random.permutation(range_of(self)) cut = int(valid_pct * len(self)) return self.split_by_idx(rand_idx[:cut])
[ "def", "split_by_rand_pct", "(", "self", ",", "valid_pct", ":", "float", "=", "0.2", ",", "seed", ":", "int", "=", "None", ")", "->", "'ItemLists'", ":", "if", "valid_pct", "==", "0.", ":", "return", "self", ".", "split_none", "(", ")", "if", "seed", "is", "not", "None", ":", "np", ".", "random", ".", "seed", "(", "seed", ")", "rand_idx", "=", "np", ".", "random", ".", "permutation", "(", "range_of", "(", "self", ")", ")", "cut", "=", "int", "(", "valid_pct", "*", "len", "(", "self", ")", ")", "return", "self", ".", "split_by_idx", "(", "rand_idx", "[", ":", "cut", "]", ")" ]
61.571429
0.029748
def get_fileset(self, fileset): """ Caches a single fileset (if the 'path' attribute is accessed and it has not been previously cached for example Parameters ---------- fileset : Fileset The fileset to cache prev_login : xnat.XNATSession An XNATSession object to use for the connection. A new one is created if one isn't provided Returns ------- primary_path : str The path of the primary file once it has been cached aux_paths : dict[str, str] A dictionary containing a mapping of auxiliary file names to paths """ if fileset.format is None: raise ArcanaUsageError( "Attempting to download {}, which has not been assigned a " "file format (see Fileset.formatted)".format(fileset)) self._check_repository(fileset) with self: # Connect to the XNAT repository if haven't already xsession = self.get_xsession(fileset) xscan = xsession.scans[fileset.name] # Set URI so we can retrieve checksums if required fileset.uri = xscan.uri fileset.id = xscan.id cache_path = self._cache_path(fileset) need_to_download = True if op.exists(cache_path): if self._check_md5: md5_path = cache_path + XnatRepo.MD5_SUFFIX try: with open(md5_path, 'r') as f: cached_checksums = json.load(f) if cached_checksums == fileset.checksums: need_to_download = False except IOError: pass else: need_to_download = False if need_to_download: # if fileset._resource_name is not None: xresource = xscan.resources[fileset._resource_name] # else: # xresources = [] # for resource_name in fileset.format.xnat_resource_names: # try: # xresources.append(xscan.resources[resource_name]) # except KeyError: # pass # if not xresources: # raise ArcanaError( # "Could not find matching resource for {} ('{}') " # "in {}, available resources are '{}'" # .format( # self.format, # "', '".join( # fileset.format.xnat_resource_names), # xscan.uri, # "', '".join( # r.label # for r in list(xscan.resources.values())))) # elif len(xresources) > 1: # logger.warning( # "Found multiple acceptable resources for {}: {}" # .format(fileset, # ', '.join(str(r) for r in xresources))) # xresource = xresources[0] # The path to the directory which the files will be # downloaded to. tmp_dir = cache_path + '.download' try: # Attempt to make tmp download directory. This will # fail if another process (or previous attempt) has # already created it. In that case this process will # wait to see if that download finishes successfully, # and if so use the cached version. os.mkdir(tmp_dir) except OSError as e: if e.errno == errno.EEXIST: # Another process may be concurrently downloading # the same file to the cache. Wait for # 'race_cond_delay' seconds and then check that it # has been completed or assume interrupted and # redownload. self._delayed_download( tmp_dir, xresource, xscan, fileset, xsession.label, cache_path, delay=self._race_cond_delay) else: raise else: self.download_fileset( tmp_dir, xresource, xscan, fileset, xsession.label, cache_path) shutil.rmtree(tmp_dir) if not fileset.format.directory: (primary_path, aux_paths) = fileset.format.assort_files( op.join(cache_path, f) for f in os.listdir(cache_path)) else: primary_path = cache_path aux_paths = None return primary_path, aux_paths
[ "def", "get_fileset", "(", "self", ",", "fileset", ")", ":", "if", "fileset", ".", "format", "is", "None", ":", "raise", "ArcanaUsageError", "(", "\"Attempting to download {}, which has not been assigned a \"", "\"file format (see Fileset.formatted)\"", ".", "format", "(", "fileset", ")", ")", "self", ".", "_check_repository", "(", "fileset", ")", "with", "self", ":", "# Connect to the XNAT repository if haven't already", "xsession", "=", "self", ".", "get_xsession", "(", "fileset", ")", "xscan", "=", "xsession", ".", "scans", "[", "fileset", ".", "name", "]", "# Set URI so we can retrieve checksums if required", "fileset", ".", "uri", "=", "xscan", ".", "uri", "fileset", ".", "id", "=", "xscan", ".", "id", "cache_path", "=", "self", ".", "_cache_path", "(", "fileset", ")", "need_to_download", "=", "True", "if", "op", ".", "exists", "(", "cache_path", ")", ":", "if", "self", ".", "_check_md5", ":", "md5_path", "=", "cache_path", "+", "XnatRepo", ".", "MD5_SUFFIX", "try", ":", "with", "open", "(", "md5_path", ",", "'r'", ")", "as", "f", ":", "cached_checksums", "=", "json", ".", "load", "(", "f", ")", "if", "cached_checksums", "==", "fileset", ".", "checksums", ":", "need_to_download", "=", "False", "except", "IOError", ":", "pass", "else", ":", "need_to_download", "=", "False", "if", "need_to_download", ":", "# if fileset._resource_name is not None:", "xresource", "=", "xscan", ".", "resources", "[", "fileset", ".", "_resource_name", "]", "# else:", "# xresources = []", "# for resource_name in fileset.format.xnat_resource_names:", "# try:", "# xresources.append(xscan.resources[resource_name])", "# except KeyError:", "# pass", "# if not xresources:", "# raise ArcanaError(", "# \"Could not find matching resource for {} ('{}') \"", "# \"in {}, available resources are '{}'\"", "# .format(", "# self.format,", "# \"', '\".join(", "# fileset.format.xnat_resource_names),", "# xscan.uri,", "# \"', '\".join(", "# r.label", "# for r in list(xscan.resources.values()))))", "# elif len(xresources) > 1:", "# logger.warning(", "# \"Found multiple acceptable resources for {}: {}\"", "# .format(fileset,", "# ', '.join(str(r) for r in xresources)))", "# xresource = xresources[0]", "# The path to the directory which the files will be", "# downloaded to.", "tmp_dir", "=", "cache_path", "+", "'.download'", "try", ":", "# Attempt to make tmp download directory. This will", "# fail if another process (or previous attempt) has", "# already created it. In that case this process will", "# wait to see if that download finishes successfully,", "# and if so use the cached version.", "os", ".", "mkdir", "(", "tmp_dir", ")", "except", "OSError", "as", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "EEXIST", ":", "# Another process may be concurrently downloading", "# the same file to the cache. Wait for", "# 'race_cond_delay' seconds and then check that it", "# has been completed or assume interrupted and", "# redownload.", "self", ".", "_delayed_download", "(", "tmp_dir", ",", "xresource", ",", "xscan", ",", "fileset", ",", "xsession", ".", "label", ",", "cache_path", ",", "delay", "=", "self", ".", "_race_cond_delay", ")", "else", ":", "raise", "else", ":", "self", ".", "download_fileset", "(", "tmp_dir", ",", "xresource", ",", "xscan", ",", "fileset", ",", "xsession", ".", "label", ",", "cache_path", ")", "shutil", ".", "rmtree", "(", "tmp_dir", ")", "if", "not", "fileset", ".", "format", ".", "directory", ":", "(", "primary_path", ",", "aux_paths", ")", "=", "fileset", ".", "format", ".", "assort_files", "(", "op", ".", "join", "(", "cache_path", ",", "f", ")", "for", "f", "in", "os", ".", "listdir", "(", "cache_path", ")", ")", "else", ":", "primary_path", "=", "cache_path", "aux_paths", "=", "None", "return", "primary_path", ",", "aux_paths" ]
45.990826
0.000781
def replace_pyof_version(module_fullname, version): """Replace the OF Version of a module fullname. Get's a module name (eg. 'pyof.v0x01.common.header') and returns it on a new 'version' (eg. 'pyof.v0x02.common.header'). Args: module_fullname (str): The fullname of the module (e.g.: pyof.v0x01.common.header) version (str): The version to be 'inserted' on the module fullname. Returns: str: module fullname The new module fullname, with the replaced version, on the format "pyof.v0x01.common.header". If the requested version is the same as the one of the module_fullname or if the module_fullname is not a 'OF version' specific module, returns None. """ module_version = MetaStruct.get_pyof_version(module_fullname) if not module_version or module_version == version: return None return module_fullname.replace(module_version, version)
[ "def", "replace_pyof_version", "(", "module_fullname", ",", "version", ")", ":", "module_version", "=", "MetaStruct", ".", "get_pyof_version", "(", "module_fullname", ")", "if", "not", "module_version", "or", "module_version", "==", "version", ":", "return", "None", "return", "module_fullname", ".", "replace", "(", "module_version", ",", "version", ")" ]
42.32
0.001848
def command_line(): """Command-line tool for Midas gas detector communication.""" import argparse import asyncio import json parser = argparse.ArgumentParser(description="Read a Honeywell Midas gas " "detector state from the command line.") parser.add_argument('address', help="The IP address of the gas detector.") args = parser.parse_args() async def get(): async with GasDetector(args.address) as detector: print(json.dumps(await detector.get(), indent=4, sort_keys=True)) loop = asyncio.get_event_loop() loop.run_until_complete(get()) loop.close()
[ "def", "command_line", "(", ")", ":", "import", "argparse", "import", "asyncio", "import", "json", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Read a Honeywell Midas gas \"", "\"detector state from the command line.\"", ")", "parser", ".", "add_argument", "(", "'address'", ",", "help", "=", "\"The IP address of the gas detector.\"", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "async", "def", "get", "(", ")", ":", "async", "with", "GasDetector", "(", "args", ".", "address", ")", "as", "detector", ":", "print", "(", "json", ".", "dumps", "(", "await", "detector", ".", "get", "(", ")", ",", "indent", "=", "4", ",", "sort_keys", "=", "True", ")", ")", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "loop", ".", "run_until_complete", "(", "get", "(", ")", ")", "loop", ".", "close", "(", ")" ]
35.444444
0.001527
def binary_stdout(): """ A sys.stdout that accepts bytes. """ # First there is a Python3 issue. try: stdout = sys.stdout.buffer except AttributeError: # Probably Python 2, where bytes are strings. stdout = sys.stdout # On Windows the C runtime file orientation needs changing. if sys.platform == "win32": import msvcrt import os msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return stdout
[ "def", "binary_stdout", "(", ")", ":", "# First there is a Python3 issue.", "try", ":", "stdout", "=", "sys", ".", "stdout", ".", "buffer", "except", "AttributeError", ":", "# Probably Python 2, where bytes are strings.", "stdout", "=", "sys", ".", "stdout", "# On Windows the C runtime file orientation needs changing.", "if", "sys", ".", "platform", "==", "\"win32\"", ":", "import", "msvcrt", "import", "os", "msvcrt", ".", "setmode", "(", "sys", ".", "stdout", ".", "fileno", "(", ")", ",", "os", ".", "O_BINARY", ")", "return", "stdout" ]
24.210526
0.002092
def qtaax(mt, x, t, q, m=1): """ geometrica """ q = float(q) j = (mt.i - q) / (1 + q) mtj = Actuarial(nt=mt.nt, i=j) return taax(mtj, x, t) - ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, t)))
[ "def", "qtaax", "(", "mt", ",", "x", ",", "t", ",", "q", ",", "m", "=", "1", ")", ":", "q", "=", "float", "(", "q", ")", "j", "=", "(", "mt", ".", "i", "-", "q", ")", "/", "(", "1", "+", "q", ")", "mtj", "=", "Actuarial", "(", "nt", "=", "mt", ".", "nt", ",", "i", "=", "j", ")", "return", "taax", "(", "mtj", ",", "x", ",", "t", ")", "-", "(", "(", "float", "(", "m", "-", "1", ")", "/", "float", "(", "m", "*", "2", ")", ")", "*", "(", "1", "-", "nEx", "(", "mt", ",", "x", ",", "t", ")", ")", ")" ]
35
0.009302
def reserve(self, amount, account=None, **kwargs): """ Reserve/Burn an amount of this shares This removes the shares from the supply :param bitshares.amount.Amount amount: The amount to be burned. :param str account: (optional) the account to allow access to (defaults to ``default_account``) """ assert isinstance(amount, Amount) if not account: if "default_account" in self.config: account = self.config["default_account"] if not account: raise ValueError("You need to provide an account") account = Account(account, blockchain_instance=self) op = operations.Asset_reserve( **{ "fee": {"amount": 0, "asset_id": "1.3.0"}, "payer": account["id"], "amount_to_reserve": { "amount": int(amount), "asset_id": amount["asset"]["id"], }, "extensions": [], } ) return self.finalizeOp(op, account, "active", **kwargs)
[ "def", "reserve", "(", "self", ",", "amount", ",", "account", "=", "None", ",", "*", "*", "kwargs", ")", ":", "assert", "isinstance", "(", "amount", ",", "Amount", ")", "if", "not", "account", ":", "if", "\"default_account\"", "in", "self", ".", "config", ":", "account", "=", "self", ".", "config", "[", "\"default_account\"", "]", "if", "not", "account", ":", "raise", "ValueError", "(", "\"You need to provide an account\"", ")", "account", "=", "Account", "(", "account", ",", "blockchain_instance", "=", "self", ")", "op", "=", "operations", ".", "Asset_reserve", "(", "*", "*", "{", "\"fee\"", ":", "{", "\"amount\"", ":", "0", ",", "\"asset_id\"", ":", "\"1.3.0\"", "}", ",", "\"payer\"", ":", "account", "[", "\"id\"", "]", ",", "\"amount_to_reserve\"", ":", "{", "\"amount\"", ":", "int", "(", "amount", ")", ",", "\"asset_id\"", ":", "amount", "[", "\"asset\"", "]", "[", "\"id\"", "]", ",", "}", ",", "\"extensions\"", ":", "[", "]", ",", "}", ")", "return", "self", ".", "finalizeOp", "(", "op", ",", "account", ",", "\"active\"", ",", "*", "*", "kwargs", ")" ]
38.964286
0.001789
def build_alexnet(self, weights, output_layer=None): """ Connects graph of alexnet from weights """ if output_layer is None: output_layer = self._output_layer #conv1 #conv(11, 11, 96, 4, 4, padding='VALID', name='conv1') k_h = 11; k_w = 11; c_o = 96; s_h = 4; s_w = 4 conv1_in = conv(self._input_node, weights.conv1W, weights.conv1b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=1) conv1 = tf.nn.relu(conv1_in) if output_layer == 'conv1': return tf.contrib.layers.flatten(conv1) #lrn1 #lrn(2, 2e-05, 0.75, name='norm1') radius = 2; alpha = 2e-05; beta = 0.75; bias = 1.0 lrn1 = tf.nn.local_response_normalization(conv1, depth_radius=radius, alpha=alpha, beta=beta, bias=bias) #maxpool1 #max_pool(3, 3, 2, 2, padding='VALID', name='pool1') k_h = 3; k_w = 3; s_h = 2; s_w = 2; padding = 'VALID' maxpool1 = tf.nn.max_pool(lrn1, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding) #conv2 #conv(5, 5, 256, 1, 1, group=2, name='conv2') k_h = 5; k_w = 5; c_o = 256; s_h = 1; s_w = 1; group = 2 conv2_in = conv(maxpool1, weights.conv2W, weights.conv2b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group) conv2 = tf.nn.relu(conv2_in) if output_layer == 'conv2': return tf.contrib.layers.flatten(conv2) #lrn2 #lrn(2, 2e-05, 0.75, name='norm2') radius = 2; alpha = 2e-05; beta = 0.75; bias = 1.0 lrn2 = tf.nn.local_response_normalization(conv2, depth_radius=radius, alpha=alpha, beta=beta, bias=bias) #maxpool2 #max_pool(3, 3, 2, 2, padding='VALID', name='pool2') k_h = 3; k_w = 3; s_h = 2; s_w = 2; padding = 'VALID' maxpool2 = tf.nn.max_pool(lrn2, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding) #conv3 #conv(3, 3, 384, 1, 1, name='conv3') k_h = 3; k_w = 3; c_o = 384; s_h = 1; s_w = 1; group = 1 conv3_in = conv(maxpool2, weights.conv3W, weights.conv3b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group) conv3 = tf.nn.relu(conv3_in) if output_layer == 'conv3': return tf.contrib.layers.flatten(conv3) #conv4 #conv(3, 3, 384, 1, 1, group=2, name='conv4') k_h = 3; k_w = 3; c_o = 384; s_h = 1; s_w = 1; group = 2 conv4_in = conv(conv3, weights.conv4W, weights.conv4b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group) conv4 = tf.nn.relu(conv4_in) if output_layer == 'conv4': return tf.contrib.layers.flatten(conv4) #conv5 #conv(3, 3, 256, 1, 1, group=2, name='conv5') k_h = 3; k_w = 3; c_o = 256; s_h = 1; s_w = 1; group = 2 conv5_in = conv(conv4, weights.conv5W, weights.conv5b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group) conv5 = tf.nn.relu(conv5_in) if output_layer == 'conv5': return tf.contrib.layers.flatten(conv5) #maxpool5 #max_pool(3, 3, 2, 2, padding='VALID', name='pool5') k_h = 3; k_w = 3; s_h = 2; s_w = 2; padding = 'VALID' maxpool5 = tf.nn.max_pool(conv5, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding) #fc6 #fc(4096, name='fc6') fc6 = tf.nn.relu_layer(tf.reshape(maxpool5, [-1, int(np.prod(maxpool5.get_shape()[1:]))]), weights.fc6W, weights.fc6b) if output_layer == 'fc6': return fc6 #fc7 #fc(4096, name='fc7') fc7 = tf.nn.relu_layer(fc6, weights.fc7W, weights.fc7b) if output_layer == 'fc7': return fc7 #fc8 #fc(num_cats, relu=False, name='fc8') fc8 = tf.nn.xw_plus_b(fc7, weights.fc8W, weights.fc8b) if output_layer == 'fc8': return fc8 #softmax sm = tf.nn.softmax(fc8) return sm
[ "def", "build_alexnet", "(", "self", ",", "weights", ",", "output_layer", "=", "None", ")", ":", "if", "output_layer", "is", "None", ":", "output_layer", "=", "self", ".", "_output_layer", "#conv1", "#conv(11, 11, 96, 4, 4, padding='VALID', name='conv1')", "k_h", "=", "11", "k_w", "=", "11", "c_o", "=", "96", "s_h", "=", "4", "s_w", "=", "4", "conv1_in", "=", "conv", "(", "self", ".", "_input_node", ",", "weights", ".", "conv1W", ",", "weights", ".", "conv1b", ",", "k_h", ",", "k_w", ",", "c_o", ",", "s_h", ",", "s_w", ",", "padding", "=", "\"SAME\"", ",", "group", "=", "1", ")", "conv1", "=", "tf", ".", "nn", ".", "relu", "(", "conv1_in", ")", "if", "output_layer", "==", "'conv1'", ":", "return", "tf", ".", "contrib", ".", "layers", ".", "flatten", "(", "conv1", ")", "#lrn1", "#lrn(2, 2e-05, 0.75, name='norm1')", "radius", "=", "2", "alpha", "=", "2e-05", "beta", "=", "0.75", "bias", "=", "1.0", "lrn1", "=", "tf", ".", "nn", ".", "local_response_normalization", "(", "conv1", ",", "depth_radius", "=", "radius", ",", "alpha", "=", "alpha", ",", "beta", "=", "beta", ",", "bias", "=", "bias", ")", "#maxpool1", "#max_pool(3, 3, 2, 2, padding='VALID', name='pool1')", "k_h", "=", "3", "k_w", "=", "3", "s_h", "=", "2", "s_w", "=", "2", "padding", "=", "'VALID'", "maxpool1", "=", "tf", ".", "nn", ".", "max_pool", "(", "lrn1", ",", "ksize", "=", "[", "1", ",", "k_h", ",", "k_w", ",", "1", "]", ",", "strides", "=", "[", "1", ",", "s_h", ",", "s_w", ",", "1", "]", ",", "padding", "=", "padding", ")", "#conv2", "#conv(5, 5, 256, 1, 1, group=2, name='conv2')", "k_h", "=", "5", "k_w", "=", "5", "c_o", "=", "256", "s_h", "=", "1", "s_w", "=", "1", "group", "=", "2", "conv2_in", "=", "conv", "(", "maxpool1", ",", "weights", ".", "conv2W", ",", "weights", ".", "conv2b", ",", "k_h", ",", "k_w", ",", "c_o", ",", "s_h", ",", "s_w", ",", "padding", "=", "\"SAME\"", ",", "group", "=", "group", ")", "conv2", "=", "tf", ".", "nn", ".", "relu", "(", "conv2_in", ")", "if", "output_layer", "==", "'conv2'", ":", "return", "tf", ".", "contrib", ".", "layers", ".", "flatten", "(", "conv2", ")", "#lrn2", "#lrn(2, 2e-05, 0.75, name='norm2')", "radius", "=", "2", "alpha", "=", "2e-05", "beta", "=", "0.75", "bias", "=", "1.0", "lrn2", "=", "tf", ".", "nn", ".", "local_response_normalization", "(", "conv2", ",", "depth_radius", "=", "radius", ",", "alpha", "=", "alpha", ",", "beta", "=", "beta", ",", "bias", "=", "bias", ")", "#maxpool2", "#max_pool(3, 3, 2, 2, padding='VALID', name='pool2') ", "k_h", "=", "3", "k_w", "=", "3", "s_h", "=", "2", "s_w", "=", "2", "padding", "=", "'VALID'", "maxpool2", "=", "tf", ".", "nn", ".", "max_pool", "(", "lrn2", ",", "ksize", "=", "[", "1", ",", "k_h", ",", "k_w", ",", "1", "]", ",", "strides", "=", "[", "1", ",", "s_h", ",", "s_w", ",", "1", "]", ",", "padding", "=", "padding", ")", "#conv3", "#conv(3, 3, 384, 1, 1, name='conv3')", "k_h", "=", "3", "k_w", "=", "3", "c_o", "=", "384", "s_h", "=", "1", "s_w", "=", "1", "group", "=", "1", "conv3_in", "=", "conv", "(", "maxpool2", ",", "weights", ".", "conv3W", ",", "weights", ".", "conv3b", ",", "k_h", ",", "k_w", ",", "c_o", ",", "s_h", ",", "s_w", ",", "padding", "=", "\"SAME\"", ",", "group", "=", "group", ")", "conv3", "=", "tf", ".", "nn", ".", "relu", "(", "conv3_in", ")", "if", "output_layer", "==", "'conv3'", ":", "return", "tf", ".", "contrib", ".", "layers", ".", "flatten", "(", "conv3", ")", "#conv4", "#conv(3, 3, 384, 1, 1, group=2, name='conv4')", "k_h", "=", "3", "k_w", "=", "3", "c_o", "=", "384", "s_h", "=", "1", "s_w", "=", "1", "group", "=", "2", "conv4_in", "=", "conv", "(", "conv3", ",", "weights", ".", "conv4W", ",", "weights", ".", "conv4b", ",", "k_h", ",", "k_w", ",", "c_o", ",", "s_h", ",", "s_w", ",", "padding", "=", "\"SAME\"", ",", "group", "=", "group", ")", "conv4", "=", "tf", ".", "nn", ".", "relu", "(", "conv4_in", ")", "if", "output_layer", "==", "'conv4'", ":", "return", "tf", ".", "contrib", ".", "layers", ".", "flatten", "(", "conv4", ")", "#conv5", "#conv(3, 3, 256, 1, 1, group=2, name='conv5')", "k_h", "=", "3", "k_w", "=", "3", "c_o", "=", "256", "s_h", "=", "1", "s_w", "=", "1", "group", "=", "2", "conv5_in", "=", "conv", "(", "conv4", ",", "weights", ".", "conv5W", ",", "weights", ".", "conv5b", ",", "k_h", ",", "k_w", ",", "c_o", ",", "s_h", ",", "s_w", ",", "padding", "=", "\"SAME\"", ",", "group", "=", "group", ")", "conv5", "=", "tf", ".", "nn", ".", "relu", "(", "conv5_in", ")", "if", "output_layer", "==", "'conv5'", ":", "return", "tf", ".", "contrib", ".", "layers", ".", "flatten", "(", "conv5", ")", "#maxpool5", "#max_pool(3, 3, 2, 2, padding='VALID', name='pool5')", "k_h", "=", "3", "k_w", "=", "3", "s_h", "=", "2", "s_w", "=", "2", "padding", "=", "'VALID'", "maxpool5", "=", "tf", ".", "nn", ".", "max_pool", "(", "conv5", ",", "ksize", "=", "[", "1", ",", "k_h", ",", "k_w", ",", "1", "]", ",", "strides", "=", "[", "1", ",", "s_h", ",", "s_w", ",", "1", "]", ",", "padding", "=", "padding", ")", "#fc6", "#fc(4096, name='fc6')", "fc6", "=", "tf", ".", "nn", ".", "relu_layer", "(", "tf", ".", "reshape", "(", "maxpool5", ",", "[", "-", "1", ",", "int", "(", "np", ".", "prod", "(", "maxpool5", ".", "get_shape", "(", ")", "[", "1", ":", "]", ")", ")", "]", ")", ",", "weights", ".", "fc6W", ",", "weights", ".", "fc6b", ")", "if", "output_layer", "==", "'fc6'", ":", "return", "fc6", "#fc7", "#fc(4096, name='fc7') ", "fc7", "=", "tf", ".", "nn", ".", "relu_layer", "(", "fc6", ",", "weights", ".", "fc7W", ",", "weights", ".", "fc7b", ")", "if", "output_layer", "==", "'fc7'", ":", "return", "fc7", "#fc8", "#fc(num_cats, relu=False, name='fc8')", "fc8", "=", "tf", ".", "nn", ".", "xw_plus_b", "(", "fc7", ",", "weights", ".", "fc8W", ",", "weights", ".", "fc8b", ")", "if", "output_layer", "==", "'fc8'", ":", "return", "fc8", "#softmax", "sm", "=", "tf", ".", "nn", ".", "softmax", "(", "fc8", ")", "return", "sm" ]
43.88
0.021395
def install_bash_completion(self, script_name=None, dest="~/.bashrc"): '''add line to activate bash_completion for given script_name into dest You can use this for letting the user install bash_completion:: from argdeco import command, main @command("install-bash-completion", arg('--dest', help="destination", default="~/.bashrc") ) def install_bash_completion(dest): main.install_bash_completion(dest=dest) ''' if 'USERPROFILE' in os.environ and 'HOME' not in os.environ: os.environ['HOME'] = os.environ['USERPROFILE'] dest = expanduser(dest) if script_name is None: script_name = sys.argv[0] self.uninstall_bash_completion(script_name=script_name, dest=dest) with open(dest, 'a') as f: f.write('eval "$(register-python-argcomplete %s)"\n' % script_name)
[ "def", "install_bash_completion", "(", "self", ",", "script_name", "=", "None", ",", "dest", "=", "\"~/.bashrc\"", ")", ":", "if", "'USERPROFILE'", "in", "os", ".", "environ", "and", "'HOME'", "not", "in", "os", ".", "environ", ":", "os", ".", "environ", "[", "'HOME'", "]", "=", "os", ".", "environ", "[", "'USERPROFILE'", "]", "dest", "=", "expanduser", "(", "dest", ")", "if", "script_name", "is", "None", ":", "script_name", "=", "sys", ".", "argv", "[", "0", "]", "self", ".", "uninstall_bash_completion", "(", "script_name", "=", "script_name", ",", "dest", "=", "dest", ")", "with", "open", "(", "dest", ",", "'a'", ")", "as", "f", ":", "f", ".", "write", "(", "'eval \"$(register-python-argcomplete %s)\"\\n'", "%", "script_name", ")" ]
39.956522
0.002125
def is_indexed(self, partition): """ Returns True if partition is already indexed. Otherwise returns False. """ query = text(""" SELECT vid FROM partition_index WHERE vid = :vid; """) result = self.execute(query, vid=partition.vid) return bool(result.fetchall())
[ "def", "is_indexed", "(", "self", ",", "partition", ")", ":", "query", "=", "text", "(", "\"\"\"\n SELECT vid\n FROM partition_index\n WHERE vid = :vid;\n \"\"\"", ")", "result", "=", "self", ".", "execute", "(", "query", ",", "vid", "=", "partition", ".", "vid", ")", "return", "bool", "(", "result", ".", "fetchall", "(", ")", ")" ]
36.666667
0.008876
def upload_delete(self, token, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/attachments#delete-upload" api_path = "/api/v2/uploads/{token}.json" api_path = api_path.format(token=token) return self.call(api_path, method="DELETE", **kwargs)
[ "def", "upload_delete", "(", "self", ",", "token", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/uploads/{token}.json\"", "api_path", "=", "api_path", ".", "format", "(", "token", "=", "token", ")", "return", "self", ".", "call", "(", "api_path", ",", "method", "=", "\"DELETE\"", ",", "*", "*", "kwargs", ")" ]
56.4
0.01049
def remove_graphic(self, graphic: Graphics.Graphic, *, safe: bool=False) -> typing.Optional[typing.Sequence]: """Remove a graphic, but do it through the container, so dependencies can be tracked.""" return self.remove_model_item(self, "graphics", graphic, safe=safe)
[ "def", "remove_graphic", "(", "self", ",", "graphic", ":", "Graphics", ".", "Graphic", ",", "*", ",", "safe", ":", "bool", "=", "False", ")", "->", "typing", ".", "Optional", "[", "typing", ".", "Sequence", "]", ":", "return", "self", ".", "remove_model_item", "(", "self", ",", "\"graphics\"", ",", "graphic", ",", "safe", "=", "safe", ")" ]
93.333333
0.021277
def levelorder(self): """Return the nodes in the binary tree using level-order_ traversal. A level-order_ traversal visits nodes left to right, level by level. .. _level-order: https://en.wikipedia.org/wiki/Tree_traversal#Breadth-first_search :return: List of nodes. :rtype: [binarytree.Node] **Example**: .. doctest:: >>> from binarytree import Node >>> >>> root = Node(1) >>> root.left = Node(2) >>> root.right = Node(3) >>> root.left.left = Node(4) >>> root.left.right = Node(5) >>> >>> print(root) <BLANKLINE> __1 / \\ 2 3 / \\ 4 5 <BLANKLINE> >>> root.levelorder [Node(1), Node(2), Node(3), Node(4), Node(5)] """ current_nodes = [self] result = [] while len(current_nodes) > 0: next_nodes = [] for node in current_nodes: result.append(node) if node.left is not None: next_nodes.append(node.left) if node.right is not None: next_nodes.append(node.right) current_nodes = next_nodes return result
[ "def", "levelorder", "(", "self", ")", ":", "current_nodes", "=", "[", "self", "]", "result", "=", "[", "]", "while", "len", "(", "current_nodes", ")", ">", "0", ":", "next_nodes", "=", "[", "]", "for", "node", "in", "current_nodes", ":", "result", ".", "append", "(", "node", ")", "if", "node", ".", "left", "is", "not", "None", ":", "next_nodes", ".", "append", "(", "node", ".", "left", ")", "if", "node", ".", "right", "is", "not", "None", ":", "next_nodes", ".", "append", "(", "node", ".", "right", ")", "current_nodes", "=", "next_nodes", "return", "result" ]
27.604167
0.001458
def add_vlan_int(self, vlan_id): """ Add VLAN Interface. VLAN interfaces are required for VLANs even when not wanting to use the interface for any L3 features. Args: vlan_id: ID for the VLAN interface being created. Value of 2-4096. Returns: True if command completes successfully or False if not. Raises: None """ config = ET.Element('config') vlinterface = ET.SubElement(config, 'interface-vlan', xmlns=("urn:brocade.com:mgmt:" "brocade-interface")) interface = ET.SubElement(vlinterface, 'interface') vlan = ET.SubElement(interface, 'vlan') name = ET.SubElement(vlan, 'name') name.text = vlan_id try: self._callback(config) return True # TODO add logging and narrow exception window. except Exception as error: logging.error(error) return False
[ "def", "add_vlan_int", "(", "self", ",", "vlan_id", ")", ":", "config", "=", "ET", ".", "Element", "(", "'config'", ")", "vlinterface", "=", "ET", ".", "SubElement", "(", "config", ",", "'interface-vlan'", ",", "xmlns", "=", "(", "\"urn:brocade.com:mgmt:\"", "\"brocade-interface\"", ")", ")", "interface", "=", "ET", ".", "SubElement", "(", "vlinterface", ",", "'interface'", ")", "vlan", "=", "ET", ".", "SubElement", "(", "interface", ",", "'vlan'", ")", "name", "=", "ET", ".", "SubElement", "(", "vlan", ",", "'name'", ")", "name", ".", "text", "=", "vlan_id", "try", ":", "self", ".", "_callback", "(", "config", ")", "return", "True", "# TODO add logging and narrow exception window.", "except", "Exception", "as", "error", ":", "logging", ".", "error", "(", "error", ")", "return", "False" ]
34.931034
0.001921
def hasLogger(self, logger): """ Returns whether or not the inputed logger is tracked by this widget. :param logger | <str> || <logging.Logger> """ if isinstance(logger, logging.Logger): logger = logging.name return logger in self._loggers
[ "def", "hasLogger", "(", "self", ",", "logger", ")", ":", "if", "isinstance", "(", "logger", ",", "logging", ".", "Logger", ")", ":", "logger", "=", "logging", ".", "name", "return", "logger", "in", "self", ".", "_loggers" ]
32.2
0.012085
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, error): """Checks rules from the 'C++ style rules' section of cppguide.html. Most of these rules are hard to test (naming, comment style), but we do what we can. In particular we check for 2-space indents, line lengths, tab usage, spaces inside code, etc. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. file_extension: The extension (without the dot) of the filename. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Don't use "elided" lines here, otherwise we can't check commented lines. # Don't want to use "raw" either, because we don't want to check inside C++11 # raw strings, raw_lines = clean_lines.lines_without_raw_strings line = raw_lines[linenum] if line.find('\t') != -1: error(filename, linenum, 'whitespace/tab', 1, 'Tab found; better to use spaces') # One or three blank spaces at the beginning of the line is weird; it's # hard to reconcile that with 2-space indents. # NOTE: here are the conditions rob pike used for his tests. Mine aren't # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces # if(RLENGTH > 20) complain = 0; # if(match($0, " +(error|private|public|protected):")) complain = 0; # if(match(prev, "&& *$")) complain = 0; # if(match(prev, "\\|\\| *$")) complain = 0; # if(match(prev, "[\",=><] *$")) complain = 0; # if(match($0, " <<")) complain = 0; # if(match(prev, " +for \\(")) complain = 0; # if(prevodd && match(prevprev, " +for \\(")) complain = 0; scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$' classinfo = nesting_state.InnermostClass() initial_spaces = 0 cleansed_line = clean_lines.elided[linenum] while initial_spaces < len(line) and line[initial_spaces] == ' ': initial_spaces += 1 if line and line[-1].isspace(): error(filename, linenum, 'whitespace/end_of_line', 4, 'Line ends in whitespace. Consider deleting these extra spaces.') # There are certain situations we allow one space, notably for # section labels, and also lines containing multi-line raw strings. elif ((initial_spaces == 1 or initial_spaces == 3) and not Match(scope_or_label_pattern, cleansed_line) and not (clean_lines.raw_lines[linenum] != line and Match(r'^\s*""', line))): error(filename, linenum, 'whitespace/indent', 3, 'Weird number of spaces at line-start. ' 'Are you using a 2-space indent?') # Check if the line is a header guard. is_header_guard = False if file_extension == 'h': cppvar = GetHeaderGuardCPPVariable(filename) if (line.startswith('#ifndef %s' % cppvar) or line.startswith('#define %s' % cppvar) or line.startswith('#endif // %s' % cppvar)): is_header_guard = True # #include lines and header guards can be long, since there's no clean way to # split them. # # URLs can be long too. It's possible to split these, but it makes them # harder to cut&paste. # # The "$Id:...$" comment may also get very long without it being the # developers fault. if (not line.startswith('#include') and not is_header_guard and not Match(r'^\s*//.*http(s?)://\S*$', line) and not Match(r'^// \$Id:.*#[0-9]+ \$$', line)): line_width = GetLineWidth(line) extended_length = int((_line_length * 1.25)) if line_width > extended_length: error(filename, linenum, 'whitespace/line_length', 4, 'Lines should very rarely be longer than %i characters' % extended_length) elif line_width > _line_length: error(filename, linenum, 'whitespace/line_length', 2, 'Lines should be <= %i characters long' % _line_length) if (cleansed_line.count(';') > 1 and # for loops are allowed two ;'s (and may run over two lines). cleansed_line.find('for') == -1 and (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and # It's ok to have many commands in a switch case that fits in 1 line not ((cleansed_line.find('case ') != -1 or cleansed_line.find('default:') != -1) and cleansed_line.find('break;') != -1)): error(filename, linenum, 'whitespace/newline', 0, 'More than one command on the same line') # Some more style checks CheckBraces(filename, clean_lines, linenum, error) CheckTrailingSemicolon(filename, clean_lines, linenum, error) CheckEmptyBlockBody(filename, clean_lines, linenum, error) CheckAccess(filename, clean_lines, linenum, nesting_state, error) CheckSpacing(filename, clean_lines, linenum, nesting_state, error) CheckOperatorSpacing(filename, clean_lines, linenum, error) CheckParenthesisSpacing(filename, clean_lines, linenum, error) CheckCommaSpacing(filename, clean_lines, linenum, error) CheckBracesSpacing(filename, clean_lines, linenum, error) CheckSpacingForFunctionCall(filename, clean_lines, linenum, error) CheckRValueReference(filename, clean_lines, linenum, nesting_state, error) CheckCheck(filename, clean_lines, linenum, error) CheckAltTokens(filename, clean_lines, linenum, error) classinfo = nesting_state.InnermostClass() if classinfo: CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
[ "def", "CheckStyle", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "file_extension", ",", "nesting_state", ",", "error", ")", ":", "# Don't use \"elided\" lines here, otherwise we can't check commented lines.", "# Don't want to use \"raw\" either, because we don't want to check inside C++11", "# raw strings,", "raw_lines", "=", "clean_lines", ".", "lines_without_raw_strings", "line", "=", "raw_lines", "[", "linenum", "]", "if", "line", ".", "find", "(", "'\\t'", ")", "!=", "-", "1", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/tab'", ",", "1", ",", "'Tab found; better to use spaces'", ")", "# One or three blank spaces at the beginning of the line is weird; it's", "# hard to reconcile that with 2-space indents.", "# NOTE: here are the conditions rob pike used for his tests. Mine aren't", "# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces", "# if(RLENGTH > 20) complain = 0;", "# if(match($0, \" +(error|private|public|protected):\")) complain = 0;", "# if(match(prev, \"&& *$\")) complain = 0;", "# if(match(prev, \"\\\\|\\\\| *$\")) complain = 0;", "# if(match(prev, \"[\\\",=><] *$\")) complain = 0;", "# if(match($0, \" <<\")) complain = 0;", "# if(match(prev, \" +for \\\\(\")) complain = 0;", "# if(prevodd && match(prevprev, \" +for \\\\(\")) complain = 0;", "scope_or_label_pattern", "=", "r'\\s*\\w+\\s*:\\s*\\\\?$'", "classinfo", "=", "nesting_state", ".", "InnermostClass", "(", ")", "initial_spaces", "=", "0", "cleansed_line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "while", "initial_spaces", "<", "len", "(", "line", ")", "and", "line", "[", "initial_spaces", "]", "==", "' '", ":", "initial_spaces", "+=", "1", "if", "line", "and", "line", "[", "-", "1", "]", ".", "isspace", "(", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/end_of_line'", ",", "4", ",", "'Line ends in whitespace. Consider deleting these extra spaces.'", ")", "# There are certain situations we allow one space, notably for", "# section labels, and also lines containing multi-line raw strings.", "elif", "(", "(", "initial_spaces", "==", "1", "or", "initial_spaces", "==", "3", ")", "and", "not", "Match", "(", "scope_or_label_pattern", ",", "cleansed_line", ")", "and", "not", "(", "clean_lines", ".", "raw_lines", "[", "linenum", "]", "!=", "line", "and", "Match", "(", "r'^\\s*\"\"'", ",", "line", ")", ")", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/indent'", ",", "3", ",", "'Weird number of spaces at line-start. '", "'Are you using a 2-space indent?'", ")", "# Check if the line is a header guard.", "is_header_guard", "=", "False", "if", "file_extension", "==", "'h'", ":", "cppvar", "=", "GetHeaderGuardCPPVariable", "(", "filename", ")", "if", "(", "line", ".", "startswith", "(", "'#ifndef %s'", "%", "cppvar", ")", "or", "line", ".", "startswith", "(", "'#define %s'", "%", "cppvar", ")", "or", "line", ".", "startswith", "(", "'#endif // %s'", "%", "cppvar", ")", ")", ":", "is_header_guard", "=", "True", "# #include lines and header guards can be long, since there's no clean way to", "# split them.", "#", "# URLs can be long too. It's possible to split these, but it makes them", "# harder to cut&paste.", "#", "# The \"$Id:...$\" comment may also get very long without it being the", "# developers fault.", "if", "(", "not", "line", ".", "startswith", "(", "'#include'", ")", "and", "not", "is_header_guard", "and", "not", "Match", "(", "r'^\\s*//.*http(s?)://\\S*$'", ",", "line", ")", "and", "not", "Match", "(", "r'^// \\$Id:.*#[0-9]+ \\$$'", ",", "line", ")", ")", ":", "line_width", "=", "GetLineWidth", "(", "line", ")", "extended_length", "=", "int", "(", "(", "_line_length", "*", "1.25", ")", ")", "if", "line_width", ">", "extended_length", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/line_length'", ",", "4", ",", "'Lines should very rarely be longer than %i characters'", "%", "extended_length", ")", "elif", "line_width", ">", "_line_length", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/line_length'", ",", "2", ",", "'Lines should be <= %i characters long'", "%", "_line_length", ")", "if", "(", "cleansed_line", ".", "count", "(", "';'", ")", ">", "1", "and", "# for loops are allowed two ;'s (and may run over two lines).", "cleansed_line", ".", "find", "(", "'for'", ")", "==", "-", "1", "and", "(", "GetPreviousNonBlankLine", "(", "clean_lines", ",", "linenum", ")", "[", "0", "]", ".", "find", "(", "'for'", ")", "==", "-", "1", "or", "GetPreviousNonBlankLine", "(", "clean_lines", ",", "linenum", ")", "[", "0", "]", ".", "find", "(", "';'", ")", "!=", "-", "1", ")", "and", "# It's ok to have many commands in a switch case that fits in 1 line", "not", "(", "(", "cleansed_line", ".", "find", "(", "'case '", ")", "!=", "-", "1", "or", "cleansed_line", ".", "find", "(", "'default:'", ")", "!=", "-", "1", ")", "and", "cleansed_line", ".", "find", "(", "'break;'", ")", "!=", "-", "1", ")", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/newline'", ",", "0", ",", "'More than one command on the same line'", ")", "# Some more style checks", "CheckBraces", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "CheckTrailingSemicolon", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "CheckEmptyBlockBody", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "CheckAccess", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "nesting_state", ",", "error", ")", "CheckSpacing", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "nesting_state", ",", "error", ")", "CheckOperatorSpacing", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "CheckParenthesisSpacing", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "CheckCommaSpacing", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "CheckBracesSpacing", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "CheckSpacingForFunctionCall", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "CheckRValueReference", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "nesting_state", ",", "error", ")", "CheckCheck", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "CheckAltTokens", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "classinfo", "=", "nesting_state", ".", "InnermostClass", "(", ")", "if", "classinfo", ":", "CheckSectionSpacing", "(", "filename", ",", "clean_lines", ",", "classinfo", ",", "linenum", ",", "error", ")" ]
47.042735
0.01121
def _track_change(self, name, value, formatter=None): """Track that a change happened. This function is only needed for manually recording changes that are not captured by changes to properties of this object that are tracked automatically. Classes that inherit from `emulation_mixin` should use this function to record interesting changes in their internal state or events that happen. The `value` parameter that you pass here should be a native python object best representing what the value of the property that changed is. When saved to disk, it will be converted to a string using: `str(value)`. If you do not like the string that would result from such a call, you can pass a custom formatter that will be called as `formatter(value)` and must return a string. Args: name (str): The name of the property that changed. value (object): The new value of the property. formatter (callable): Optional function to convert value to a string. This function will only be called if track_changes() is enabled and `name` is on the whitelist for properties that should be tracked. If `formatter` is not passed or is None, it will default to `str` """ self._emulation_log.track_change(self._emulation_address, name, value, formatter)
[ "def", "_track_change", "(", "self", ",", "name", ",", "value", ",", "formatter", "=", "None", ")", ":", "self", ".", "_emulation_log", ".", "track_change", "(", "self", ".", "_emulation_address", ",", "name", ",", "value", ",", "formatter", ")" ]
52.925926
0.002062
def eli_hanley_dense(T, MW, Tc, Vc, Zc, omega, Cvm, Vm): r'''Estimates the thermal conductivity of a gas at high pressure as a function of temperature using the reference fluid method of Eli and Hanley [1]_ as shown in [2]_. .. math:: Tr = min(Tr, 2) Vr = min(Vr, 2) f = \frac{T_c}{190.4}\theta h = \frac{V_c}{9.92E-5}\psi T_0 = T/f \rho_0 = \frac{16.04}{V}h \theta = 1 + (\omega-0.011)\left(0.09057 - 0.86276\ln Tr + \left( 0.31664 - \frac{0.46568}{Tr}\right) (V_r - 0.5)\right) \psi = [1 + (\omega - 0.011)(0.39490(V_r - 1.02355) - 0.93281(V_r - 0.75464)\ln T_r]\frac{0.288}{Z_c} \lambda_1 = 1944 \eta_0 \lambda_2 = \left\{b_1 + b_2\left[b_3 - \ln \left(\frac{T_0}{b_4} \right)\right]^2\right\}\rho_0 \lambda_3 = \exp\left(a_1 + \frac{a_2}{T_0}\right)\left\{\exp[(a_3 + \frac{a_4}{T_0^{1.5}})\rho_0^{0.1} + (\frac{\rho_0}{0.1617} - 1) \rho_0^{0.5}(a_5 + \frac{a_6}{T_0} + \frac{a_7}{T_0^2})] - 1\right\} \lambda^{**} = [\lambda_1 + \lambda_2 + \lambda_3]H H = \left(\frac{16.04}{MW}\right)^{0.5}f^{0.5}/h^{2/3} X = \left\{\left[1 - \frac{T}{f}\left(\frac{df}{dT}\right)_v \right] \frac{0.288}{Z_c}\right\}^{1.5} \left(\frac{df}{dT}\right)_v = \frac{T_c}{190.4}\left(\frac{d\theta} {d T}\right)_v \left(\frac{d\theta}{d T}\right)_v = (\omega-0.011)\left[ \frac{-0.86276}{T} + (V_r-0.5)\frac{0.46568T_c}{T^2}\right] Parameters ---------- T : float Temperature of the gas [K] MW : float Molecular weight of the gas [g/mol] Tc : float Critical temperature of the gas [K] Vc : float Critical volume of the gas [m^3/mol] Zc : float Critical compressibility of the gas [] omega : float Acentric factor of the gas [-] Cvm : float Molar contant volume heat capacity of the gas [J/mol/K] Vm : float Volume of the gas at T and P [m^3/mol] Returns ------- kg : float Estimated dense gas thermal conductivity [W/m/k] Notes ----- Reference fluid is Methane. MW internally converted to kg/g-mol. Examples -------- >>> eli_hanley_dense(T=473., MW=42.081, Tc=364.9, Vc=1.81E-4, Zc=0.274, ... omega=0.144, Cvm=82.70, Vm=1.721E-4) 0.06038475936515042 References ---------- .. [1] Ely, James F., and H. J. M. Hanley. "Prediction of Transport Properties. 2. Thermal Conductivity of Pure Fluids and Mixtures." Industrial & Engineering Chemistry Fundamentals 22, no. 1 (February 1, 1983): 90-97. doi:10.1021/i100009a016. .. [2] Reid, Robert C.; Prausnitz, John M.; Poling, Bruce E. Properties of Gases and Liquids. McGraw-Hill Companies, 1987. ''' Cs = [2.907741307E6, -3.312874033E6, 1.608101838E6, -4.331904871E5, 7.062481330E4, -7.116620750E3, 4.325174400E2, -1.445911210E1, 2.037119479E-1] Tr = T/Tc if Tr > 2: Tr = 2 Vr = Vm/Vc if Vr > 2: Vr = 2 theta = 1 + (omega - 0.011)*(0.09057 - 0.86276*log(Tr) + (0.31664 - 0.46568/Tr)*(Vr-0.5)) psi = (1 + (omega-0.011)*(0.39490*(Vr-1.02355) - 0.93281*(Vr-0.75464)*log(Tr)))*0.288/Zc f = Tc/190.4*theta h = Vc/9.92E-5*psi T0 = T/f rho0 = 16.04/(Vm*1E6)*h # Vm must be in cm^3/mol here. eta0 = 1E-7*sum([Cs[i]*T0**((i+1-4)/3.) for i in range(len(Cs))]) k1 = 1944*eta0 b1 = -0.25276920E0 b2 = 0.334328590E0 b3 = 1.12 b4 = 0.1680E3 k2 = (b1 + b2*(b3 - log(T0/b4))**2)/1000.*rho0 a1 = -7.19771 a2 = 85.67822 a3 = 12.47183 a4 = -984.6252 a5 = 0.3594685 a6 = 69.79841 a7 = -872.8833 k3 = exp(a1 + a2/T0)*(exp((a3 + a4/T0**1.5)*rho0**0.1 + (rho0/0.1617 - 1)*rho0**0.5*(a5 + a6/T0 + a7/T0**2)) - 1)/1000. if T/Tc > 2: dtheta = 0 else: dtheta = (omega - 0.011)*(-0.86276/T + (Vr-0.5)*0.46568*Tc/T**2) dfdT = Tc/190.4*dtheta X = ((1 - T/f*dfdT)*0.288/Zc)**1.5 H = (16.04/MW)**0.5*f**0.5/h**(2/3.) ks = (k1*X + k2 + k3)*H ### Uses calculations similar to those for pure species here theta = 1 + (omega - 0.011)*(0.56553 - 0.86276*log(Tr) - 0.69852/Tr) psi = (1 + (omega-0.011)*(0.38560 - 1.1617*log(Tr)))*0.288/Zc f = Tc/190.4*theta h = Vc/9.92E-5*psi T0 = T/f eta0 = 1E-7*sum([Cs[i]*T0**((i+1-4)/3.) for i in range(len(Cs))]) H = (16.04/MW)**0.5*f**0.5/h**(2/3.) etas = eta0*H*MW/16.04 k = ks + etas/(MW/1000.)*1.32*(Cvm-3*R/2.) return k
[ "def", "eli_hanley_dense", "(", "T", ",", "MW", ",", "Tc", ",", "Vc", ",", "Zc", ",", "omega", ",", "Cvm", ",", "Vm", ")", ":", "Cs", "=", "[", "2.907741307E6", ",", "-", "3.312874033E6", ",", "1.608101838E6", ",", "-", "4.331904871E5", ",", "7.062481330E4", ",", "-", "7.116620750E3", ",", "4.325174400E2", ",", "-", "1.445911210E1", ",", "2.037119479E-1", "]", "Tr", "=", "T", "/", "Tc", "if", "Tr", ">", "2", ":", "Tr", "=", "2", "Vr", "=", "Vm", "/", "Vc", "if", "Vr", ">", "2", ":", "Vr", "=", "2", "theta", "=", "1", "+", "(", "omega", "-", "0.011", ")", "*", "(", "0.09057", "-", "0.86276", "*", "log", "(", "Tr", ")", "+", "(", "0.31664", "-", "0.46568", "/", "Tr", ")", "*", "(", "Vr", "-", "0.5", ")", ")", "psi", "=", "(", "1", "+", "(", "omega", "-", "0.011", ")", "*", "(", "0.39490", "*", "(", "Vr", "-", "1.02355", ")", "-", "0.93281", "*", "(", "Vr", "-", "0.75464", ")", "*", "log", "(", "Tr", ")", ")", ")", "*", "0.288", "/", "Zc", "f", "=", "Tc", "/", "190.4", "*", "theta", "h", "=", "Vc", "/", "9.92E-5", "*", "psi", "T0", "=", "T", "/", "f", "rho0", "=", "16.04", "/", "(", "Vm", "*", "1E6", ")", "*", "h", "# Vm must be in cm^3/mol here.", "eta0", "=", "1E-7", "*", "sum", "(", "[", "Cs", "[", "i", "]", "*", "T0", "**", "(", "(", "i", "+", "1", "-", "4", ")", "/", "3.", ")", "for", "i", "in", "range", "(", "len", "(", "Cs", ")", ")", "]", ")", "k1", "=", "1944", "*", "eta0", "b1", "=", "-", "0.25276920E0", "b2", "=", "0.334328590E0", "b3", "=", "1.12", "b4", "=", "0.1680E3", "k2", "=", "(", "b1", "+", "b2", "*", "(", "b3", "-", "log", "(", "T0", "/", "b4", ")", ")", "**", "2", ")", "/", "1000.", "*", "rho0", "a1", "=", "-", "7.19771", "a2", "=", "85.67822", "a3", "=", "12.47183", "a4", "=", "-", "984.6252", "a5", "=", "0.3594685", "a6", "=", "69.79841", "a7", "=", "-", "872.8833", "k3", "=", "exp", "(", "a1", "+", "a2", "/", "T0", ")", "*", "(", "exp", "(", "(", "a3", "+", "a4", "/", "T0", "**", "1.5", ")", "*", "rho0", "**", "0.1", "+", "(", "rho0", "/", "0.1617", "-", "1", ")", "*", "rho0", "**", "0.5", "*", "(", "a5", "+", "a6", "/", "T0", "+", "a7", "/", "T0", "**", "2", ")", ")", "-", "1", ")", "/", "1000.", "if", "T", "/", "Tc", ">", "2", ":", "dtheta", "=", "0", "else", ":", "dtheta", "=", "(", "omega", "-", "0.011", ")", "*", "(", "-", "0.86276", "/", "T", "+", "(", "Vr", "-", "0.5", ")", "*", "0.46568", "*", "Tc", "/", "T", "**", "2", ")", "dfdT", "=", "Tc", "/", "190.4", "*", "dtheta", "X", "=", "(", "(", "1", "-", "T", "/", "f", "*", "dfdT", ")", "*", "0.288", "/", "Zc", ")", "**", "1.5", "H", "=", "(", "16.04", "/", "MW", ")", "**", "0.5", "*", "f", "**", "0.5", "/", "h", "**", "(", "2", "/", "3.", ")", "ks", "=", "(", "k1", "*", "X", "+", "k2", "+", "k3", ")", "*", "H", "### Uses calculations similar to those for pure species here", "theta", "=", "1", "+", "(", "omega", "-", "0.011", ")", "*", "(", "0.56553", "-", "0.86276", "*", "log", "(", "Tr", ")", "-", "0.69852", "/", "Tr", ")", "psi", "=", "(", "1", "+", "(", "omega", "-", "0.011", ")", "*", "(", "0.38560", "-", "1.1617", "*", "log", "(", "Tr", ")", ")", ")", "*", "0.288", "/", "Zc", "f", "=", "Tc", "/", "190.4", "*", "theta", "h", "=", "Vc", "/", "9.92E-5", "*", "psi", "T0", "=", "T", "/", "f", "eta0", "=", "1E-7", "*", "sum", "(", "[", "Cs", "[", "i", "]", "*", "T0", "**", "(", "(", "i", "+", "1", "-", "4", ")", "/", "3.", ")", "for", "i", "in", "range", "(", "len", "(", "Cs", ")", ")", "]", ")", "H", "=", "(", "16.04", "/", "MW", ")", "**", "0.5", "*", "f", "**", "0.5", "/", "h", "**", "(", "2", "/", "3.", ")", "etas", "=", "eta0", "*", "H", "*", "MW", "/", "16.04", "k", "=", "ks", "+", "etas", "/", "(", "MW", "/", "1000.", ")", "*", "1.32", "*", "(", "Cvm", "-", "3", "*", "R", "/", "2.", ")", "return", "k" ]
30.868966
0.001082
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: WorkersStatisticsContext for this WorkersStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_statistics.WorkersStatisticsContext """ if self._context is None: self._context = WorkersStatisticsContext( self._version, workspace_sid=self._solution['workspace_sid'], ) return self._context
[ "def", "_proxy", "(", "self", ")", ":", "if", "self", ".", "_context", "is", "None", ":", "self", ".", "_context", "=", "WorkersStatisticsContext", "(", "self", ".", "_version", ",", "workspace_sid", "=", "self", ".", "_solution", "[", "'workspace_sid'", "]", ",", ")", "return", "self", ".", "_context" ]
42.928571
0.008143
def find_closest_match(target_track, tracks): """ Return closest match to target track """ track = None # Get a list of (track, artist match ratio, name match ratio) tracks_with_match_ratio = [( track, get_similarity(target_track.artist, track.artist), get_similarity(target_track.name, track.name), ) for track in tracks] # Sort by artist then by title sorted_tracks = sorted( tracks_with_match_ratio, key=lambda t: (t[1], t[2]), reverse=True # Descending, highest match ratio first ) if sorted_tracks: track = sorted_tracks[0][0] # Closest match to query return track
[ "def", "find_closest_match", "(", "target_track", ",", "tracks", ")", ":", "track", "=", "None", "# Get a list of (track, artist match ratio, name match ratio)", "tracks_with_match_ratio", "=", "[", "(", "track", ",", "get_similarity", "(", "target_track", ".", "artist", ",", "track", ".", "artist", ")", ",", "get_similarity", "(", "target_track", ".", "name", ",", "track", ".", "name", ")", ",", ")", "for", "track", "in", "tracks", "]", "# Sort by artist then by title", "sorted_tracks", "=", "sorted", "(", "tracks_with_match_ratio", ",", "key", "=", "lambda", "t", ":", "(", "t", "[", "1", "]", ",", "t", "[", "2", "]", ")", ",", "reverse", "=", "True", "# Descending, highest match ratio first", ")", "if", "sorted_tracks", ":", "track", "=", "sorted_tracks", "[", "0", "]", "[", "0", "]", "# Closest match to query", "return", "track" ]
32.8
0.001481
def process(self, metrics, config): """Processes metrics. This method is called by the Snap deamon during the process phase of the execution of a Snap workflow. Examples of processing metrics include applying filtering, max, min, average functions as well as adding additional context to the metrics to name just a few. In this example we are adding a tag called 'context' to every metric. Args: metrics (obj:`list` of `snap_plugin.v1.Metric`): List of metrics to be processed. Returns: :obj:`list` of `snap_plugin.v1.Metric`: List of processed metrics. """ LOG.debug("Process called") for metric in metrics: metric.tags["instance-id"] = config["instance-id"] return metrics
[ "def", "process", "(", "self", ",", "metrics", ",", "config", ")", ":", "LOG", ".", "debug", "(", "\"Process called\"", ")", "for", "metric", "in", "metrics", ":", "metric", ".", "tags", "[", "\"instance-id\"", "]", "=", "config", "[", "\"instance-id\"", "]", "return", "metrics" ]
37.363636
0.002372
def sentences(self): """Iterate over all sentences (sentence_id, sentence) in the document, sentence is a list of 4-tuples (word,id,pos,lemma)""" prevp = 0 prevs = 0 sentence = []; sentence_id = "" for word, id, pos, lemma in iter(self): try: doc_id, ptype, p, s, w = re.findall('([\w\d-]+)\.(p|head)\.(\d+)\.s\.(\d+)\.w\.(\d+)',id)[0] if ((p != prevp) or (s != prevs)) and sentence: yield sentence_id, sentence sentence = [] sentence_id = doc_id + '.' + ptype + '.' + str(p) + '.s.' + str(s) prevp = p except IndexError: doc_id, s, w = re.findall('([\w\d-]+)\.s\.(\d+)\.w\.(\d+)',id)[0] if s != prevs and sentence: yield sentence_id, sentence sentence = [] sentence_id = doc_id + '.s.' + str(s) sentence.append( (word,id,pos,lemma) ) prevs = s if sentence: yield sentence_id, sentence
[ "def", "sentences", "(", "self", ")", ":", "prevp", "=", "0", "prevs", "=", "0", "sentence", "=", "[", "]", "sentence_id", "=", "\"\"", "for", "word", ",", "id", ",", "pos", ",", "lemma", "in", "iter", "(", "self", ")", ":", "try", ":", "doc_id", ",", "ptype", ",", "p", ",", "s", ",", "w", "=", "re", ".", "findall", "(", "'([\\w\\d-]+)\\.(p|head)\\.(\\d+)\\.s\\.(\\d+)\\.w\\.(\\d+)'", ",", "id", ")", "[", "0", "]", "if", "(", "(", "p", "!=", "prevp", ")", "or", "(", "s", "!=", "prevs", ")", ")", "and", "sentence", ":", "yield", "sentence_id", ",", "sentence", "sentence", "=", "[", "]", "sentence_id", "=", "doc_id", "+", "'.'", "+", "ptype", "+", "'.'", "+", "str", "(", "p", ")", "+", "'.s.'", "+", "str", "(", "s", ")", "prevp", "=", "p", "except", "IndexError", ":", "doc_id", ",", "s", ",", "w", "=", "re", ".", "findall", "(", "'([\\w\\d-]+)\\.s\\.(\\d+)\\.w\\.(\\d+)'", ",", "id", ")", "[", "0", "]", "if", "s", "!=", "prevs", "and", "sentence", ":", "yield", "sentence_id", ",", "sentence", "sentence", "=", "[", "]", "sentence_id", "=", "doc_id", "+", "'.s.'", "+", "str", "(", "s", ")", "sentence", ".", "append", "(", "(", "word", ",", "id", ",", "pos", ",", "lemma", ")", ")", "prevs", "=", "s", "if", "sentence", ":", "yield", "sentence_id", ",", "sentence" ]
44.916667
0.029973
def _set_text(self, value): """ set text at current working_index. Return whether it changed. """ working_index = self.working_index working_lines = self._working_lines original_value = working_lines[working_index] working_lines[working_index] = value # Return True when this text has been changed. if len(value) != len(original_value): # For Python 2, it seems that when two strings have a different # length and one is a prefix of the other, Python still scans # character by character to see whether the strings are different. # (Some benchmarking showed significant differences for big # documents. >100,000 of lines.) return True elif value != original_value: return True return False
[ "def", "_set_text", "(", "self", ",", "value", ")", ":", "working_index", "=", "self", ".", "working_index", "working_lines", "=", "self", ".", "_working_lines", "original_value", "=", "working_lines", "[", "working_index", "]", "working_lines", "[", "working_index", "]", "=", "value", "# Return True when this text has been changed.", "if", "len", "(", "value", ")", "!=", "len", "(", "original_value", ")", ":", "# For Python 2, it seems that when two strings have a different", "# length and one is a prefix of the other, Python still scans", "# character by character to see whether the strings are different.", "# (Some benchmarking showed significant differences for big", "# documents. >100,000 of lines.)", "return", "True", "elif", "value", "!=", "original_value", ":", "return", "True", "return", "False" ]
43.631579
0.002361
def com_daltonmaag_check_ufolint(font): """Run ufolint on UFO source directory.""" import subprocess ufolint_cmd = ["ufolint", font] try: subprocess.check_output(ufolint_cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: yield FAIL, ("ufolint failed the UFO source. Output follows :" "\n\n{}\n").format(e.output.decode()) except OSError: yield ERROR, "ufolint is not available!" else: yield PASS, "ufolint passed the UFO source."
[ "def", "com_daltonmaag_check_ufolint", "(", "font", ")", ":", "import", "subprocess", "ufolint_cmd", "=", "[", "\"ufolint\"", ",", "font", "]", "try", ":", "subprocess", ".", "check_output", "(", "ufolint_cmd", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "except", "subprocess", ".", "CalledProcessError", "as", "e", ":", "yield", "FAIL", ",", "(", "\"ufolint failed the UFO source. Output follows :\"", "\"\\n\\n{}\\n\"", ")", ".", "format", "(", "e", ".", "output", ".", "decode", "(", ")", ")", "except", "OSError", ":", "yield", "ERROR", ",", "\"ufolint is not available!\"", "else", ":", "yield", "PASS", ",", "\"ufolint passed the UFO source.\"" ]
34.785714
0.016
def fbeta_score( gold, pred, pos_label=1, beta=1.0, ignore_in_gold=[], ignore_in_pred=[] ): """ Calculate recall for a single class. Args: gold: A 1d array-like of gold labels pred: A 1d array-like of predicted labels (assuming abstain = 0) ignore_in_gold: A list of labels for which elements having that gold label will be ignored. ignore_in_pred: A list of labels for which elements having that pred label will be ignored. pos_label: The class label to treat as positive for f-beta beta: The beta to use in the f-beta score calculation Returns: fbeta: The (float) f-beta score """ gold, pred = _preprocess(gold, pred, ignore_in_gold, ignore_in_pred) pre = precision_score(gold, pred, pos_label=pos_label) rec = recall_score(gold, pred, pos_label=pos_label) if pre or rec: fbeta = (1 + beta ** 2) * (pre * rec) / ((beta ** 2 * pre) + rec) else: fbeta = 0 return fbeta
[ "def", "fbeta_score", "(", "gold", ",", "pred", ",", "pos_label", "=", "1", ",", "beta", "=", "1.0", ",", "ignore_in_gold", "=", "[", "]", ",", "ignore_in_pred", "=", "[", "]", ")", ":", "gold", ",", "pred", "=", "_preprocess", "(", "gold", ",", "pred", ",", "ignore_in_gold", ",", "ignore_in_pred", ")", "pre", "=", "precision_score", "(", "gold", ",", "pred", ",", "pos_label", "=", "pos_label", ")", "rec", "=", "recall_score", "(", "gold", ",", "pred", ",", "pos_label", "=", "pos_label", ")", "if", "pre", "or", "rec", ":", "fbeta", "=", "(", "1", "+", "beta", "**", "2", ")", "*", "(", "pre", "*", "rec", ")", "/", "(", "(", "beta", "**", "2", "*", "pre", ")", "+", "rec", ")", "else", ":", "fbeta", "=", "0", "return", "fbeta" ]
35.285714
0.000985
def visitLexerBlock(self, ctx: jsgParser.LexerBlockContext): """ lexerBlock: OPREN lexeraltList CPREN """ self._rulePattern += '(' self.visitChildren(ctx) self._rulePattern += ')'
[ "def", "visitLexerBlock", "(", "self", ",", "ctx", ":", "jsgParser", ".", "LexerBlockContext", ")", ":", "self", ".", "_rulePattern", "+=", "'('", "self", ".", "visitChildren", "(", "ctx", ")", "self", ".", "_rulePattern", "+=", "')'" ]
41.4
0.009479
def execute(self, style, xpoints, ypoints, zpoints, mask=None, backend='vectorized', specified_drift_arrays=None): """Calculates a kriged grid and the associated variance. This is now the method that performs the main kriging calculation. Note that currently measurements (i.e., z values) are considered 'exact'. This means that, when a specified coordinate for interpolation is exactly the same as one of the data points, the variogram evaluated at the point is forced to be zero. Also, the diagonal of the kriging matrix is also always forced to be zero. In forcing the variogram evaluated at data points to be zero, we are effectively saying that there is no variance at that point (no uncertainty, so the value is 'exact'). In the future, the code may include an extra 'exact_values' boolean flag that can be adjusted to specify whether to treat the measurements as 'exact'. Setting the flag to false would indicate that the variogram should not be forced to be zero at zero distance (i.e., when evaluated at data points). Instead, the uncertainty in the point will be equal to the nugget. This would mean that the diagonal of the kriging matrix would be set to the nugget instead of to zero. Parameters ---------- style : str Specifies how to treat input kriging points. Specifying 'grid' treats xpoints, ypoints, and zpoints as arrays of x, y, and z coordinates that define a rectangular grid. Specifying 'points' treats xpoints, ypoints, and zpoints as arrays that provide coordinates at which to solve the kriging system. Specifying 'masked' treats xpoints, ypoints, and zpoints as arrays of x, y, and z coordinates that define a rectangular grid and uses mask to only evaluate specific points in the grid. xpoints : array_like, shape (N,) or (N, 1) If style is specific as 'grid' or 'masked', x-coordinates of LxMxN grid. If style is specified as 'points', x-coordinates of specific points at which to solve kriging system. ypoints : array_like, shape (M,) or (M, 1) If style is specified as 'grid' or 'masked', y-coordinates of LxMxN grid. If style is specified as 'points', y-coordinates of specific points at which to solve kriging system. Note that in this case, xpoints, ypoints, and zpoints must have the same dimensions (i.e., L = M = N). zpoints : array_like, shape (L,) or (L, 1) If style is specified as 'grid' or 'masked', z-coordinates of LxMxN grid. If style is specified as 'points', z-coordinates of specific points at which to solve kriging system. Note that in this case, xpoints, ypoints, and zpoints must have the same dimensions (i.e., L = M = N). mask : boolean array, shape (L, M, N), optional Specifies the points in the rectangular grid defined by xpoints, ypoints, zpoints that are to be excluded in the kriging calculations. Must be provided if style is specified as 'masked'. False indicates that the point should not be masked, so the kriging system will be solved at the point. True indicates that the point should be masked, so the kriging system will not be solved at the point. backend : string, optional Specifies which approach to use in kriging. Specifying 'vectorized' will solve the entire kriging problem at once in a vectorized operation. This approach is faster but also can consume a significant amount of memory for large grids and/or large datasets. Specifying 'loop' will loop through each point at which the kriging system is to be solved. This approach is slower but also less memory-intensive. Default is 'vectorized'. specified_drift_arrays : list of array-like objects, optional Specifies the drift values at the points at which the kriging system is to be evaluated. Required if 'specified' drift provided in the list of drift terms when instantiating the UniversalKriging3D class. Must be a list of arrays in the same order as the list provided when instantiating the kriging object. Array(s) must be the same dimension as the specified grid or have the same number of points as the specified points; i.e., the arrays either must be shape (L, M, N), where L is the number of z grid-points, M is the number of y grid-points, and N is the number of x grid-points, or shape (N,) or (N, 1), where N is the number of points at which to evaluate the kriging system. Returns ------- kvalues : ndarray, shape (L, M, N) or (N,) or (N, 1) Interpolated values of specified grid or at the specified set of points. If style was specified as 'masked', kvalues will be a numpy masked array. sigmasq : ndarray, shape (L, M, N) or (N,) or (N, 1) Variance at specified grid points or at the specified set of points. If style was specified as 'masked', sigmasq will be a numpy masked array. """ if self.verbose: print("Executing Ordinary Kriging...\n") if style != 'grid' and style != 'masked' and style != 'points': raise ValueError("style argument must be 'grid', 'points', " "or 'masked'") xpts = np.atleast_1d(np.squeeze(np.array(xpoints, copy=True))) ypts = np.atleast_1d(np.squeeze(np.array(ypoints, copy=True))) zpts = np.atleast_1d(np.squeeze(np.array(zpoints, copy=True))) n = self.X_ADJUSTED.shape[0] n_withdrifts = n if self.regional_linear_drift: n_withdrifts += 3 if self.specified_drift: n_withdrifts += len(self.specified_drift_data_arrays) if self.functional_drift: n_withdrifts += len(self.functional_drift_terms) nx = xpts.size ny = ypts.size nz = zpts.size a = self._get_kriging_matrix(n, n_withdrifts) if style in ['grid', 'masked']: if style == 'masked': if mask is None: raise IOError("Must specify boolean masking array " "when style is 'masked'.") if mask.ndim != 3: raise ValueError("Mask is not three-dimensional.") if mask.shape[0] != nz or mask.shape[1] != ny or mask.shape[2] != nx: if mask.shape[0] == nx and mask.shape[2] == nz and mask.shape[1] == ny: mask = mask.swapaxes(0, 2) else: raise ValueError("Mask dimensions do not match " "specified grid dimensions.") mask = mask.flatten() npt = nz * ny * nx grid_z, grid_y, grid_x = np.meshgrid(zpts, ypts, xpts, indexing='ij') xpts = grid_x.flatten() ypts = grid_y.flatten() zpts = grid_z.flatten() elif style == 'points': if xpts.size != ypts.size and ypts.size != zpts.size: raise ValueError("xpoints and ypoints must have same " "dimensions when treated as listing " "discrete points.") npt = nx else: raise ValueError("style argument must be 'grid', 'points', " "or 'masked'") if specified_drift_arrays is None: specified_drift_arrays = [] spec_drift_grids = [] if self.specified_drift: if len(specified_drift_arrays) == 0: raise ValueError("Must provide drift values for kriging " "points when using 'specified' drift " "capability.") if type(specified_drift_arrays) is not list: raise TypeError("Arrays for specified drift terms must " "be encapsulated in a list.") for spec in specified_drift_arrays: if style in ['grid', 'masked']: if spec.ndim < 3: raise ValueError("Dimensions of drift values array do " "not match specified grid dimensions.") elif spec.shape[0] != nz or spec.shape[1] != ny or spec.shape[2] != nx: if spec.shape[0] == nx and spec.shape[2] == nz and spec.shape[1] == ny: spec_drift_grids.append(np.squeeze(spec.swapaxes(0, 2))) else: raise ValueError("Dimensions of drift values array " "do not match specified grid " "dimensions.") else: spec_drift_grids.append(np.squeeze(spec)) elif style == 'points': if spec.ndim != 1: raise ValueError("Dimensions of drift values array do " "not match specified grid dimensions.") elif spec.shape[0] != xpts.size: raise ValueError("Number of supplied drift values in " "array do not match specified number " "of kriging points.") else: spec_drift_grids.append(np.squeeze(spec)) if len(spec_drift_grids) != len(self.specified_drift_data_arrays): raise ValueError("Inconsistent number of specified " "drift terms supplied.") else: if len(specified_drift_arrays) != 0: warnings.warn("Provided specified drift values, but " "'specified' drift was not initialized during " "instantiation of UniversalKriging3D class.", RuntimeWarning) xpts, ypts, zpts = _adjust_for_anisotropy(np.vstack((xpts, ypts, zpts)).T, [self.XCENTER, self.YCENTER, self.ZCENTER], [self.anisotropy_scaling_y, self.anisotropy_scaling_z], [self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z]).T if style != 'masked': mask = np.zeros(npt, dtype='bool') xyz_points = np.concatenate((zpts[:, np.newaxis], ypts[:, np.newaxis], xpts[:, np.newaxis]), axis=1) xyz_data = np.concatenate((self.Z_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis], self.X_ADJUSTED[:, np.newaxis]), axis=1) bd = cdist(xyz_points, xyz_data, 'euclidean') if backend == 'vectorized': kvalues, sigmasq = self._exec_vector(a, bd, xyz_points, mask, n_withdrifts, spec_drift_grids) elif backend == 'loop': kvalues, sigmasq = self._exec_loop(a, bd, xyz_points, mask, n_withdrifts, spec_drift_grids) else: raise ValueError('Specified backend {} is not supported for ' '3D ordinary kriging.'.format(backend)) if style == 'masked': kvalues = np.ma.array(kvalues, mask=mask) sigmasq = np.ma.array(sigmasq, mask=mask) if style in ['masked', 'grid']: kvalues = kvalues.reshape((nz, ny, nx)) sigmasq = sigmasq.reshape((nz, ny, nx)) return kvalues, sigmasq
[ "def", "execute", "(", "self", ",", "style", ",", "xpoints", ",", "ypoints", ",", "zpoints", ",", "mask", "=", "None", ",", "backend", "=", "'vectorized'", ",", "specified_drift_arrays", "=", "None", ")", ":", "if", "self", ".", "verbose", ":", "print", "(", "\"Executing Ordinary Kriging...\\n\"", ")", "if", "style", "!=", "'grid'", "and", "style", "!=", "'masked'", "and", "style", "!=", "'points'", ":", "raise", "ValueError", "(", "\"style argument must be 'grid', 'points', \"", "\"or 'masked'\"", ")", "xpts", "=", "np", ".", "atleast_1d", "(", "np", ".", "squeeze", "(", "np", ".", "array", "(", "xpoints", ",", "copy", "=", "True", ")", ")", ")", "ypts", "=", "np", ".", "atleast_1d", "(", "np", ".", "squeeze", "(", "np", ".", "array", "(", "ypoints", ",", "copy", "=", "True", ")", ")", ")", "zpts", "=", "np", ".", "atleast_1d", "(", "np", ".", "squeeze", "(", "np", ".", "array", "(", "zpoints", ",", "copy", "=", "True", ")", ")", ")", "n", "=", "self", ".", "X_ADJUSTED", ".", "shape", "[", "0", "]", "n_withdrifts", "=", "n", "if", "self", ".", "regional_linear_drift", ":", "n_withdrifts", "+=", "3", "if", "self", ".", "specified_drift", ":", "n_withdrifts", "+=", "len", "(", "self", ".", "specified_drift_data_arrays", ")", "if", "self", ".", "functional_drift", ":", "n_withdrifts", "+=", "len", "(", "self", ".", "functional_drift_terms", ")", "nx", "=", "xpts", ".", "size", "ny", "=", "ypts", ".", "size", "nz", "=", "zpts", ".", "size", "a", "=", "self", ".", "_get_kriging_matrix", "(", "n", ",", "n_withdrifts", ")", "if", "style", "in", "[", "'grid'", ",", "'masked'", "]", ":", "if", "style", "==", "'masked'", ":", "if", "mask", "is", "None", ":", "raise", "IOError", "(", "\"Must specify boolean masking array \"", "\"when style is 'masked'.\"", ")", "if", "mask", ".", "ndim", "!=", "3", ":", "raise", "ValueError", "(", "\"Mask is not three-dimensional.\"", ")", "if", "mask", ".", "shape", "[", "0", "]", "!=", "nz", "or", "mask", ".", "shape", "[", "1", "]", "!=", "ny", "or", "mask", ".", "shape", "[", "2", "]", "!=", "nx", ":", "if", "mask", ".", "shape", "[", "0", "]", "==", "nx", "and", "mask", ".", "shape", "[", "2", "]", "==", "nz", "and", "mask", ".", "shape", "[", "1", "]", "==", "ny", ":", "mask", "=", "mask", ".", "swapaxes", "(", "0", ",", "2", ")", "else", ":", "raise", "ValueError", "(", "\"Mask dimensions do not match \"", "\"specified grid dimensions.\"", ")", "mask", "=", "mask", ".", "flatten", "(", ")", "npt", "=", "nz", "*", "ny", "*", "nx", "grid_z", ",", "grid_y", ",", "grid_x", "=", "np", ".", "meshgrid", "(", "zpts", ",", "ypts", ",", "xpts", ",", "indexing", "=", "'ij'", ")", "xpts", "=", "grid_x", ".", "flatten", "(", ")", "ypts", "=", "grid_y", ".", "flatten", "(", ")", "zpts", "=", "grid_z", ".", "flatten", "(", ")", "elif", "style", "==", "'points'", ":", "if", "xpts", ".", "size", "!=", "ypts", ".", "size", "and", "ypts", ".", "size", "!=", "zpts", ".", "size", ":", "raise", "ValueError", "(", "\"xpoints and ypoints must have same \"", "\"dimensions when treated as listing \"", "\"discrete points.\"", ")", "npt", "=", "nx", "else", ":", "raise", "ValueError", "(", "\"style argument must be 'grid', 'points', \"", "\"or 'masked'\"", ")", "if", "specified_drift_arrays", "is", "None", ":", "specified_drift_arrays", "=", "[", "]", "spec_drift_grids", "=", "[", "]", "if", "self", ".", "specified_drift", ":", "if", "len", "(", "specified_drift_arrays", ")", "==", "0", ":", "raise", "ValueError", "(", "\"Must provide drift values for kriging \"", "\"points when using 'specified' drift \"", "\"capability.\"", ")", "if", "type", "(", "specified_drift_arrays", ")", "is", "not", "list", ":", "raise", "TypeError", "(", "\"Arrays for specified drift terms must \"", "\"be encapsulated in a list.\"", ")", "for", "spec", "in", "specified_drift_arrays", ":", "if", "style", "in", "[", "'grid'", ",", "'masked'", "]", ":", "if", "spec", ".", "ndim", "<", "3", ":", "raise", "ValueError", "(", "\"Dimensions of drift values array do \"", "\"not match specified grid dimensions.\"", ")", "elif", "spec", ".", "shape", "[", "0", "]", "!=", "nz", "or", "spec", ".", "shape", "[", "1", "]", "!=", "ny", "or", "spec", ".", "shape", "[", "2", "]", "!=", "nx", ":", "if", "spec", ".", "shape", "[", "0", "]", "==", "nx", "and", "spec", ".", "shape", "[", "2", "]", "==", "nz", "and", "spec", ".", "shape", "[", "1", "]", "==", "ny", ":", "spec_drift_grids", ".", "append", "(", "np", ".", "squeeze", "(", "spec", ".", "swapaxes", "(", "0", ",", "2", ")", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Dimensions of drift values array \"", "\"do not match specified grid \"", "\"dimensions.\"", ")", "else", ":", "spec_drift_grids", ".", "append", "(", "np", ".", "squeeze", "(", "spec", ")", ")", "elif", "style", "==", "'points'", ":", "if", "spec", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"Dimensions of drift values array do \"", "\"not match specified grid dimensions.\"", ")", "elif", "spec", ".", "shape", "[", "0", "]", "!=", "xpts", ".", "size", ":", "raise", "ValueError", "(", "\"Number of supplied drift values in \"", "\"array do not match specified number \"", "\"of kriging points.\"", ")", "else", ":", "spec_drift_grids", ".", "append", "(", "np", ".", "squeeze", "(", "spec", ")", ")", "if", "len", "(", "spec_drift_grids", ")", "!=", "len", "(", "self", ".", "specified_drift_data_arrays", ")", ":", "raise", "ValueError", "(", "\"Inconsistent number of specified \"", "\"drift terms supplied.\"", ")", "else", ":", "if", "len", "(", "specified_drift_arrays", ")", "!=", "0", ":", "warnings", ".", "warn", "(", "\"Provided specified drift values, but \"", "\"'specified' drift was not initialized during \"", "\"instantiation of UniversalKriging3D class.\"", ",", "RuntimeWarning", ")", "xpts", ",", "ypts", ",", "zpts", "=", "_adjust_for_anisotropy", "(", "np", ".", "vstack", "(", "(", "xpts", ",", "ypts", ",", "zpts", ")", ")", ".", "T", ",", "[", "self", ".", "XCENTER", ",", "self", ".", "YCENTER", ",", "self", ".", "ZCENTER", "]", ",", "[", "self", ".", "anisotropy_scaling_y", ",", "self", ".", "anisotropy_scaling_z", "]", ",", "[", "self", ".", "anisotropy_angle_x", ",", "self", ".", "anisotropy_angle_y", ",", "self", ".", "anisotropy_angle_z", "]", ")", ".", "T", "if", "style", "!=", "'masked'", ":", "mask", "=", "np", ".", "zeros", "(", "npt", ",", "dtype", "=", "'bool'", ")", "xyz_points", "=", "np", ".", "concatenate", "(", "(", "zpts", "[", ":", ",", "np", ".", "newaxis", "]", ",", "ypts", "[", ":", ",", "np", ".", "newaxis", "]", ",", "xpts", "[", ":", ",", "np", ".", "newaxis", "]", ")", ",", "axis", "=", "1", ")", "xyz_data", "=", "np", ".", "concatenate", "(", "(", "self", ".", "Z_ADJUSTED", "[", ":", ",", "np", ".", "newaxis", "]", ",", "self", ".", "Y_ADJUSTED", "[", ":", ",", "np", ".", "newaxis", "]", ",", "self", ".", "X_ADJUSTED", "[", ":", ",", "np", ".", "newaxis", "]", ")", ",", "axis", "=", "1", ")", "bd", "=", "cdist", "(", "xyz_points", ",", "xyz_data", ",", "'euclidean'", ")", "if", "backend", "==", "'vectorized'", ":", "kvalues", ",", "sigmasq", "=", "self", ".", "_exec_vector", "(", "a", ",", "bd", ",", "xyz_points", ",", "mask", ",", "n_withdrifts", ",", "spec_drift_grids", ")", "elif", "backend", "==", "'loop'", ":", "kvalues", ",", "sigmasq", "=", "self", ".", "_exec_loop", "(", "a", ",", "bd", ",", "xyz_points", ",", "mask", ",", "n_withdrifts", ",", "spec_drift_grids", ")", "else", ":", "raise", "ValueError", "(", "'Specified backend {} is not supported for '", "'3D ordinary kriging.'", ".", "format", "(", "backend", ")", ")", "if", "style", "==", "'masked'", ":", "kvalues", "=", "np", ".", "ma", ".", "array", "(", "kvalues", ",", "mask", "=", "mask", ")", "sigmasq", "=", "np", ".", "ma", ".", "array", "(", "sigmasq", ",", "mask", "=", "mask", ")", "if", "style", "in", "[", "'masked'", ",", "'grid'", "]", ":", "kvalues", "=", "kvalues", ".", "reshape", "(", "(", "nz", ",", "ny", ",", "nx", ")", ")", "sigmasq", "=", "sigmasq", ".", "reshape", "(", "(", "nz", ",", "ny", ",", "nx", ")", ")", "return", "kvalues", ",", "sigmasq" ]
54.608108
0.001539
def find_macro_by_name(self, name, package): """Find a macro in the graph by its name and package name, or None for any package. """ return self._find_by_name(name, package, 'macros', [NodeType.Macro])
[ "def", "find_macro_by_name", "(", "self", ",", "name", ",", "package", ")", ":", "return", "self", ".", "_find_by_name", "(", "name", ",", "package", ",", "'macros'", ",", "[", "NodeType", ".", "Macro", "]", ")" ]
45.8
0.008584
def add_worksheet(self, name=None): """ Adds a new worksheet """ url = self.build_url(self._endpoints.get('get_worksheets')) response = self.session.post(url, data={'name': name} if name else None) if not response: return None data = response.json() return self.worksheet_constructor(parent=self, **{self._cloud_data_key: data})
[ "def", "add_worksheet", "(", "self", ",", "name", "=", "None", ")", ":", "url", "=", "self", ".", "build_url", "(", "self", ".", "_endpoints", ".", "get", "(", "'get_worksheets'", ")", ")", "response", "=", "self", ".", "session", ".", "post", "(", "url", ",", "data", "=", "{", "'name'", ":", "name", "}", "if", "name", "else", "None", ")", "if", "not", "response", ":", "return", "None", "data", "=", "response", ".", "json", "(", ")", "return", "self", ".", "worksheet_constructor", "(", "parent", "=", "self", ",", "*", "*", "{", "self", ".", "_cloud_data_key", ":", "data", "}", ")" ]
47.625
0.010309
def uniform_partition_fromgrid(grid, min_pt=None, max_pt=None): """Return a partition of an interval product based on a given grid. This method is complementary to `uniform_partition_fromintv` in that it infers the set to be partitioned from a given grid and optional parameters for ``min_pt`` and ``max_pt`` of the set. Parameters ---------- grid : `RectGrid` Grid on which the partition is based min_pt, max_pt : float, sequence of floats, or dict, optional Spatial points defining the lower/upper limits of the intervals to be partitioned. The points can be specified in two ways: float or sequence: The values are used directly as ``min_pt`` and/or ``max_pt``. dict: Index-value pairs specifying an axis and a spatial coordinate to be used in that axis. In axes which are not a key in the dictionary, the coordinate for the vector is calculated as:: min_pt = x[0] - (x[1] - x[0]) / 2 max_pt = x[-1] + (x[-1] - x[-2]) / 2 See ``Examples`` below. In general, ``min_pt`` may not be larger than ``grid.min_pt``, and ``max_pt`` not smaller than ``grid.max_pt`` in any component. ``None`` is equivalent to an empty dictionary, i.e. the values are calculated in each dimension. See Also -------- uniform_partition_fromintv Examples -------- Have ``min_pt`` and ``max_pt`` of the bounding box automatically calculated: >>> grid = odl.uniform_grid(0, 1, 3) >>> grid.coord_vectors (array([ 0. , 0.5, 1. ]),) >>> part = odl.uniform_partition_fromgrid(grid) >>> part.cell_boundary_vecs (array([-0.25, 0.25, 0.75, 1.25]),) ``min_pt`` and ``max_pt`` can be given explicitly: >>> part = odl.uniform_partition_fromgrid(grid, min_pt=0, max_pt=1) >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.75, 1. ]),) Using dictionaries, selective axes can be explicitly set. The keys refer to axes, the values to the coordinates to use: >>> grid = odl.uniform_grid([0, 0], [1, 1], (3, 3)) >>> part = odl.uniform_partition_fromgrid(grid, ... min_pt={0: -1}, max_pt={-1: 3}) >>> part.cell_boundary_vecs[0] array([-1. , 0.25, 0.75, 1.25]) >>> part.cell_boundary_vecs[1] array([-0.25, 0.25, 0.75, 3. ]) """ # Make dictionaries from `min_pt` and `max_pt` and fill with `None` where # no value is given (taking negative indices into account) if min_pt is None: min_pt = {i: None for i in range(grid.ndim)} elif not hasattr(min_pt, 'items'): # array-like min_pt = np.atleast_1d(min_pt) min_pt = {i: float(v) for i, v in enumerate(min_pt)} else: min_pt.update({i: None for i in range(grid.ndim) if i not in min_pt and i - grid.ndim not in min_pt}) if max_pt is None: max_pt = {i: None for i in range(grid.ndim)} elif not hasattr(max_pt, 'items'): max_pt = np.atleast_1d(max_pt) max_pt = {i: float(v) for i, v in enumerate(max_pt)} else: max_pt.update({i: None for i in range(grid.ndim) if i not in max_pt and i - grid.ndim not in max_pt}) # Set the values in the vectors by computing (None) or directly from the # given vectors (otherwise). min_pt_vec = np.empty(grid.ndim) for ax, xmin in min_pt.items(): if xmin is None: cvec = grid.coord_vectors[ax] if len(cvec) == 1: raise ValueError('in axis {}: cannot calculate `min_pt` with ' 'only 1 grid point'.format(ax)) min_pt_vec[ax] = cvec[0] - (cvec[1] - cvec[0]) / 2 else: min_pt_vec[ax] = xmin max_pt_vec = np.empty(grid.ndim) for ax, xmax in max_pt.items(): if xmax is None: cvec = grid.coord_vectors[ax] if len(cvec) == 1: raise ValueError('in axis {}: cannot calculate `max_pt` with ' 'only 1 grid point'.format(ax)) max_pt_vec[ax] = cvec[-1] + (cvec[-1] - cvec[-2]) / 2 else: max_pt_vec[ax] = xmax return RectPartition(IntervalProd(min_pt_vec, max_pt_vec), grid)
[ "def", "uniform_partition_fromgrid", "(", "grid", ",", "min_pt", "=", "None", ",", "max_pt", "=", "None", ")", ":", "# Make dictionaries from `min_pt` and `max_pt` and fill with `None` where", "# no value is given (taking negative indices into account)", "if", "min_pt", "is", "None", ":", "min_pt", "=", "{", "i", ":", "None", "for", "i", "in", "range", "(", "grid", ".", "ndim", ")", "}", "elif", "not", "hasattr", "(", "min_pt", ",", "'items'", ")", ":", "# array-like", "min_pt", "=", "np", ".", "atleast_1d", "(", "min_pt", ")", "min_pt", "=", "{", "i", ":", "float", "(", "v", ")", "for", "i", ",", "v", "in", "enumerate", "(", "min_pt", ")", "}", "else", ":", "min_pt", ".", "update", "(", "{", "i", ":", "None", "for", "i", "in", "range", "(", "grid", ".", "ndim", ")", "if", "i", "not", "in", "min_pt", "and", "i", "-", "grid", ".", "ndim", "not", "in", "min_pt", "}", ")", "if", "max_pt", "is", "None", ":", "max_pt", "=", "{", "i", ":", "None", "for", "i", "in", "range", "(", "grid", ".", "ndim", ")", "}", "elif", "not", "hasattr", "(", "max_pt", ",", "'items'", ")", ":", "max_pt", "=", "np", ".", "atleast_1d", "(", "max_pt", ")", "max_pt", "=", "{", "i", ":", "float", "(", "v", ")", "for", "i", ",", "v", "in", "enumerate", "(", "max_pt", ")", "}", "else", ":", "max_pt", ".", "update", "(", "{", "i", ":", "None", "for", "i", "in", "range", "(", "grid", ".", "ndim", ")", "if", "i", "not", "in", "max_pt", "and", "i", "-", "grid", ".", "ndim", "not", "in", "max_pt", "}", ")", "# Set the values in the vectors by computing (None) or directly from the", "# given vectors (otherwise).", "min_pt_vec", "=", "np", ".", "empty", "(", "grid", ".", "ndim", ")", "for", "ax", ",", "xmin", "in", "min_pt", ".", "items", "(", ")", ":", "if", "xmin", "is", "None", ":", "cvec", "=", "grid", ".", "coord_vectors", "[", "ax", "]", "if", "len", "(", "cvec", ")", "==", "1", ":", "raise", "ValueError", "(", "'in axis {}: cannot calculate `min_pt` with '", "'only 1 grid point'", ".", "format", "(", "ax", ")", ")", "min_pt_vec", "[", "ax", "]", "=", "cvec", "[", "0", "]", "-", "(", "cvec", "[", "1", "]", "-", "cvec", "[", "0", "]", ")", "/", "2", "else", ":", "min_pt_vec", "[", "ax", "]", "=", "xmin", "max_pt_vec", "=", "np", ".", "empty", "(", "grid", ".", "ndim", ")", "for", "ax", ",", "xmax", "in", "max_pt", ".", "items", "(", ")", ":", "if", "xmax", "is", "None", ":", "cvec", "=", "grid", ".", "coord_vectors", "[", "ax", "]", "if", "len", "(", "cvec", ")", "==", "1", ":", "raise", "ValueError", "(", "'in axis {}: cannot calculate `max_pt` with '", "'only 1 grid point'", ".", "format", "(", "ax", ")", ")", "max_pt_vec", "[", "ax", "]", "=", "cvec", "[", "-", "1", "]", "+", "(", "cvec", "[", "-", "1", "]", "-", "cvec", "[", "-", "2", "]", ")", "/", "2", "else", ":", "max_pt_vec", "[", "ax", "]", "=", "xmax", "return", "RectPartition", "(", "IntervalProd", "(", "min_pt_vec", ",", "max_pt_vec", ")", ",", "grid", ")" ]
38.054054
0.000231
def get_object(self, *args, **kwargs): """ Should memoize the object to avoid multiple query if get_object is used many times in the view """ self.category_instance = get_object_or_404(Category, slug=self.kwargs['category_slug']) return get_object_or_404(Post, thread__id=self.kwargs['thread_id'], thread__category=self.category_instance, pk=self.kwargs['post_id'])
[ "def", "get_object", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "category_instance", "=", "get_object_or_404", "(", "Category", ",", "slug", "=", "self", ".", "kwargs", "[", "'category_slug'", "]", ")", "return", "get_object_or_404", "(", "Post", ",", "thread__id", "=", "self", ".", "kwargs", "[", "'thread_id'", "]", ",", "thread__category", "=", "self", ".", "category_instance", ",", "pk", "=", "self", ".", "kwargs", "[", "'post_id'", "]", ")" ]
66.666667
0.012346
def pipe_fetch(context=None, _INPUT=None, conf=None, **kwargs): """A source that fetches and parses one or more feeds to return the entries. Loopable. Parameters ---------- context : pipe2py.Context object _INPUT : pipeforever pipe or an iterable of items or fields conf : { 'URL': [ {'type': 'url', 'value': <url1>}, {'type': 'url', 'value': <url2>}, {'type': 'url', 'value': <url3>}, ] } Returns ------- _OUTPUT : generator of items """ splits = get_splits(_INPUT, conf['URL'], **cdicts(opts, kwargs)) items = starmap(parse_result, splits) _OUTPUT = utils.multiplex(items) return _OUTPUT
[ "def", "pipe_fetch", "(", "context", "=", "None", ",", "_INPUT", "=", "None", ",", "conf", "=", "None", ",", "*", "*", "kwargs", ")", ":", "splits", "=", "get_splits", "(", "_INPUT", ",", "conf", "[", "'URL'", "]", ",", "*", "*", "cdicts", "(", "opts", ",", "kwargs", ")", ")", "items", "=", "starmap", "(", "parse_result", ",", "splits", ")", "_OUTPUT", "=", "utils", ".", "multiplex", "(", "items", ")", "return", "_OUTPUT" ]
28.5
0.001414
def cons(collection, value): """Extends a collection with a value.""" if isinstance(value, collections.Mapping): if collection is None: collection = {} collection.update(**value) elif isinstance(value, six.string_types): if collection is None: collection = [] collection.append(value) elif isinstance(value, collections.Iterable): if collection is None: collection = [] collection.extend(value) else: if collection is None: collection = [] collection.append(value) return collection
[ "def", "cons", "(", "collection", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "collections", ".", "Mapping", ")", ":", "if", "collection", "is", "None", ":", "collection", "=", "{", "}", "collection", ".", "update", "(", "*", "*", "value", ")", "elif", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "if", "collection", "is", "None", ":", "collection", "=", "[", "]", "collection", ".", "append", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "collections", ".", "Iterable", ")", ":", "if", "collection", "is", "None", ":", "collection", "=", "[", "]", "collection", ".", "extend", "(", "value", ")", "else", ":", "if", "collection", "is", "None", ":", "collection", "=", "[", "]", "collection", ".", "append", "(", "value", ")", "return", "collection" ]
26.086957
0.001608
def get_next_occurrence(self) -> date: """ Returns the next occurrence date for transaction """ result = get_next_occurrence(self.transaction) assert isinstance(result, date) return result
[ "def", "get_next_occurrence", "(", "self", ")", "->", "date", ":", "result", "=", "get_next_occurrence", "(", "self", ".", "transaction", ")", "assert", "isinstance", "(", "result", ",", "date", ")", "return", "result" ]
43.2
0.009091
def error(self, *args): """Log an error. By default this will also raise an exception.""" if _canShortcutLogging(self.logCategory, ERROR): return errorObject(self.logObjectName(), self.logCategory, *self.logFunction(*args))
[ "def", "error", "(", "self", ",", "*", "args", ")", ":", "if", "_canShortcutLogging", "(", "self", ".", "logCategory", ",", "ERROR", ")", ":", "return", "errorObject", "(", "self", ".", "logObjectName", "(", ")", ",", "self", ".", "logCategory", ",", "*", "self", ".", "logFunction", "(", "*", "args", ")", ")" ]
44.5
0.011029
def get_active_lineage(): """ Query the lineage of the current timer level. Provides only timer names, not stamp names (as these have not been decided yet!). Returns: str: Formatted sequence of timer names in one string. """ lin_str = '' for active_timer in f.timer_stack: lin_str += "{}-->".format(active_timer.name) try: return lin_str[:-3] except IndexError: pass
[ "def", "get_active_lineage", "(", ")", ":", "lin_str", "=", "''", "for", "active_timer", "in", "f", ".", "timer_stack", ":", "lin_str", "+=", "\"{}-->\"", ".", "format", "(", "active_timer", ".", "name", ")", "try", ":", "return", "lin_str", "[", ":", "-", "3", "]", "except", "IndexError", ":", "pass" ]
28.133333
0.002294
def check_integrity(sakefile, settings): """ Checks the format of the sakefile dictionary to ensure it conforms to specification Args: A dictionary that is the parsed Sakefile (from sake.py) The setting dictionary (for print functions) Returns: True if the Sakefile is conformant False if not """ sprint = settings["sprint"] error = settings["error"] sprint("Call to check_integrity issued", level="verbose") if not sakefile: error("Sakefile is empty") return False # checking for duplicate targets if len(sakefile.keys()) != len(set(sakefile.keys())): error("Sakefile contains duplicate targets") return False for target in sakefile: if target == "all": if not check_target_integrity(target, sakefile["all"], all=True): error("Failed to accept target 'all'") return False continue if "formula" not in sakefile[target]: if not check_target_integrity(target, sakefile[target], meta=True): errmes = "Failed to accept meta-target '{}'".format(target) error(errmes) return False for atom_target in sakefile[target]: if atom_target == "help": continue if not check_target_integrity(atom_target, sakefile[target][atom_target], parent=target): errmes = "Failed to accept target '{}'\n".format( atom_target) error(errmes) return False continue if not check_target_integrity(target, sakefile[target]): errmes = "Failed to accept target '{}'\n".format(target) error(errmes) return False return True
[ "def", "check_integrity", "(", "sakefile", ",", "settings", ")", ":", "sprint", "=", "settings", "[", "\"sprint\"", "]", "error", "=", "settings", "[", "\"error\"", "]", "sprint", "(", "\"Call to check_integrity issued\"", ",", "level", "=", "\"verbose\"", ")", "if", "not", "sakefile", ":", "error", "(", "\"Sakefile is empty\"", ")", "return", "False", "# checking for duplicate targets", "if", "len", "(", "sakefile", ".", "keys", "(", ")", ")", "!=", "len", "(", "set", "(", "sakefile", ".", "keys", "(", ")", ")", ")", ":", "error", "(", "\"Sakefile contains duplicate targets\"", ")", "return", "False", "for", "target", "in", "sakefile", ":", "if", "target", "==", "\"all\"", ":", "if", "not", "check_target_integrity", "(", "target", ",", "sakefile", "[", "\"all\"", "]", ",", "all", "=", "True", ")", ":", "error", "(", "\"Failed to accept target 'all'\"", ")", "return", "False", "continue", "if", "\"formula\"", "not", "in", "sakefile", "[", "target", "]", ":", "if", "not", "check_target_integrity", "(", "target", ",", "sakefile", "[", "target", "]", ",", "meta", "=", "True", ")", ":", "errmes", "=", "\"Failed to accept meta-target '{}'\"", ".", "format", "(", "target", ")", "error", "(", "errmes", ")", "return", "False", "for", "atom_target", "in", "sakefile", "[", "target", "]", ":", "if", "atom_target", "==", "\"help\"", ":", "continue", "if", "not", "check_target_integrity", "(", "atom_target", ",", "sakefile", "[", "target", "]", "[", "atom_target", "]", ",", "parent", "=", "target", ")", ":", "errmes", "=", "\"Failed to accept target '{}'\\n\"", ".", "format", "(", "atom_target", ")", "error", "(", "errmes", ")", "return", "False", "continue", "if", "not", "check_target_integrity", "(", "target", ",", "sakefile", "[", "target", "]", ")", ":", "errmes", "=", "\"Failed to accept target '{}'\\n\"", ".", "format", "(", "target", ")", "error", "(", "errmes", ")", "return", "False", "return", "True" ]
39.4
0.000495
def load_topology(self,topology): """ Loads the topology file (e.g. GRO,PDB,INPCRD) as a MDAnalysis Universe, checks if it can be loaded. Needs to be run before the equivalent function topol.load_trajectory() and provides the snapshot that is going to be used for final residue placement - i.e. the residue coordinates for placement are taken from this file. Takes: * topology * - a topology file e.g. GRO, PDB, INPCRD, CARD, DMS Output: * self.universe * - MDAnalysis Universe """ try: self.universe = MDAnalysis.Universe(topology) except ValueError: print "Check your topology file - it is either missing or misspelled." sys.exit()
[ "def", "load_topology", "(", "self", ",", "topology", ")", ":", "try", ":", "self", ".", "universe", "=", "MDAnalysis", ".", "Universe", "(", "topology", ")", "except", "ValueError", ":", "print", "\"Check your topology file - it is either missing or misspelled.\"", "sys", ".", "exit", "(", ")" ]
43.166667
0.008816
def cdx_clamp(cdx_iter, from_ts, to_ts): """ Clamp by start and end ts """ if from_ts and len(from_ts) < 14: from_ts = pad_timestamp(from_ts, PAD_14_DOWN) if to_ts and len(to_ts) < 14: to_ts = pad_timestamp(to_ts, PAD_14_UP) for cdx in cdx_iter: if from_ts and cdx[TIMESTAMP] < from_ts: continue if to_ts and cdx[TIMESTAMP] > to_ts: continue yield cdx
[ "def", "cdx_clamp", "(", "cdx_iter", ",", "from_ts", ",", "to_ts", ")", ":", "if", "from_ts", "and", "len", "(", "from_ts", ")", "<", "14", ":", "from_ts", "=", "pad_timestamp", "(", "from_ts", ",", "PAD_14_DOWN", ")", "if", "to_ts", "and", "len", "(", "to_ts", ")", "<", "14", ":", "to_ts", "=", "pad_timestamp", "(", "to_ts", ",", "PAD_14_UP", ")", "for", "cdx", "in", "cdx_iter", ":", "if", "from_ts", "and", "cdx", "[", "TIMESTAMP", "]", "<", "from_ts", ":", "continue", "if", "to_ts", "and", "cdx", "[", "TIMESTAMP", "]", ">", "to_ts", ":", "continue", "yield", "cdx" ]
23.666667
0.002257
def get(self, sys_id=None, limit=100): """Returns a list of attachments :param sys_id: record sys_id to list attachments for :param limit: override the default limit of 100 :return: list of attachments """ if sys_id: return self.resource.get(query={'table_sys_id': sys_id, 'table_name': self.table_name}).all() return self.resource.get(query={'table_name': self.table_name}, limit=limit).all()
[ "def", "get", "(", "self", ",", "sys_id", "=", "None", ",", "limit", "=", "100", ")", ":", "if", "sys_id", ":", "return", "self", ".", "resource", ".", "get", "(", "query", "=", "{", "'table_sys_id'", ":", "sys_id", ",", "'table_name'", ":", "self", ".", "table_name", "}", ")", ".", "all", "(", ")", "return", "self", ".", "resource", ".", "get", "(", "query", "=", "{", "'table_name'", ":", "self", ".", "table_name", "}", ",", "limit", "=", "limit", ")", ".", "all", "(", ")" ]
37.75
0.008621
def tplot_save(names, filename=None): """ This function will save tplot variables into a single file by using the python "pickle" function. This file can then be "restored" using tplot_restore. This is useful if you want to end the pytplot session, but save all of your data/options. All variables and plot options can be read back into tplot with the "tplot_restore" command. Parameters: names : str/list A string or a list of strings of the tplot variables you would like saved. filename : str, optional The filename where you want to save the file. Returns: None Examples: >>> # Save a single tplot variable >>> import pytplot >>> x_data = [1,2,3,4,5] >>> y_data = [1,2,3,4,5] >>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data}) >>> pytplot.ylim('Variable1', 2, 4) >>> pytplot.save('Variable1', filename='C:/temp/variable1.pytplot') """ if isinstance(names,int): names = list(data_quants.keys())[names-1] if not isinstance(names, list): names = [names] #Check that we have all available data for name in names: if isinstance(data_quants[name].data, list): for data_name in data_quants[name].data: if data_name not in names: names.append(data_name) #Pickle it up to_pickle =[] for name in names: if name not in data_quants.keys(): print("That name is currently not in pytplot") return to_pickle.append(data_quants[name]) num_quants = len(to_pickle) to_pickle = [num_quants] + to_pickle temp_tplot_opt_glob = tplot_opt_glob to_pickle.append(temp_tplot_opt_glob) if filename==None: filename='var_'+'-'.join(names)+'.pytplot' pickle.dump(to_pickle, open(filename, "wb")) return
[ "def", "tplot_save", "(", "names", ",", "filename", "=", "None", ")", ":", "if", "isinstance", "(", "names", ",", "int", ")", ":", "names", "=", "list", "(", "data_quants", ".", "keys", "(", ")", ")", "[", "names", "-", "1", "]", "if", "not", "isinstance", "(", "names", ",", "list", ")", ":", "names", "=", "[", "names", "]", "#Check that we have all available data", "for", "name", "in", "names", ":", "if", "isinstance", "(", "data_quants", "[", "name", "]", ".", "data", ",", "list", ")", ":", "for", "data_name", "in", "data_quants", "[", "name", "]", ".", "data", ":", "if", "data_name", "not", "in", "names", ":", "names", ".", "append", "(", "data_name", ")", "#Pickle it up", "to_pickle", "=", "[", "]", "for", "name", "in", "names", ":", "if", "name", "not", "in", "data_quants", ".", "keys", "(", ")", ":", "print", "(", "\"That name is currently not in pytplot\"", ")", "return", "to_pickle", ".", "append", "(", "data_quants", "[", "name", "]", ")", "num_quants", "=", "len", "(", "to_pickle", ")", "to_pickle", "=", "[", "num_quants", "]", "+", "to_pickle", "temp_tplot_opt_glob", "=", "tplot_opt_glob", "to_pickle", ".", "append", "(", "temp_tplot_opt_glob", ")", "if", "filename", "==", "None", ":", "filename", "=", "'var_'", "+", "'-'", ".", "join", "(", "names", ")", "+", "'.pytplot'", "pickle", ".", "dump", "(", "to_pickle", ",", "open", "(", "filename", ",", "\"wb\"", ")", ")", "return" ]
33.684211
0.01417
def merge(self, other): """Merges a set of build file aliases and returns a new set of aliases containing both. Any duplicate aliases from `other` will trump. :API: public :param other: The BuildFileAliases to merge in. :type other: :class:`BuildFileAliases` :returns: A new BuildFileAliases containing `other`'s aliases merged into ours. :rtype: :class:`BuildFileAliases` """ if not isinstance(other, BuildFileAliases): raise TypeError('Can only merge other BuildFileAliases, given {0}'.format(other)) def merge(*items): merged = {} for item in items: merged.update(item) return merged targets = merge(self.target_types, self.target_macro_factories, other.target_types, other.target_macro_factories) objects = merge(self.objects, other.objects) context_aware_object_factories=merge(self.context_aware_object_factories, other.context_aware_object_factories) return BuildFileAliases(targets=targets, objects=objects, context_aware_object_factories=context_aware_object_factories)
[ "def", "merge", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "BuildFileAliases", ")", ":", "raise", "TypeError", "(", "'Can only merge other BuildFileAliases, given {0}'", ".", "format", "(", "other", ")", ")", "def", "merge", "(", "*", "items", ")", ":", "merged", "=", "{", "}", "for", "item", "in", "items", ":", "merged", ".", "update", "(", "item", ")", "return", "merged", "targets", "=", "merge", "(", "self", ".", "target_types", ",", "self", ".", "target_macro_factories", ",", "other", ".", "target_types", ",", "other", ".", "target_macro_factories", ")", "objects", "=", "merge", "(", "self", ".", "objects", ",", "other", ".", "objects", ")", "context_aware_object_factories", "=", "merge", "(", "self", ".", "context_aware_object_factories", ",", "other", ".", "context_aware_object_factories", ")", "return", "BuildFileAliases", "(", "targets", "=", "targets", ",", "objects", "=", "objects", ",", "context_aware_object_factories", "=", "context_aware_object_factories", ")" ]
40
0.008418
def Reynolds_valve(nu, Q, D1, FL, Fd, C): r'''Calculates Reynolds number of a control valve for a liquid or gas flowing through it at a specified Q, for a specified D1, FL, Fd, C, and with kinematic viscosity `nu` according to IEC 60534 calculations. .. math:: Re_v = \frac{N_4 F_d Q}{\nu \sqrt{C F_L}}\left(\frac{F_L^2 C^2} {N_2D^4} +1\right)^{1/4} Parameters ---------- nu : float Kinematic viscosity, [m^2/s] Q : float Volumetric flow rate of the fluid [m^3/s] D1 : float Diameter of the pipe before the valve [m] FL : float, optional Liquid pressure recovery factor of a control valve without attached fittings [] Fd : float Valve style modifier [-] C : float Metric Kv valve flow coefficient (flow rate of water at a pressure drop of 1 bar) [m^3/hr] Returns ------- Rev : float Valve reynolds number [-] Examples -------- >>> Reynolds_valve(3.26e-07, 360, 150.0, 0.9, 0.46, 165) 2966984.7525455453 References ---------- .. [1] IEC 60534-2-1 / ISA-75.01.01-2007 ''' return N4*Fd*Q/nu/(C*FL)**0.5*(FL**2*C**2/(N2*D1**4) + 1)**0.25
[ "def", "Reynolds_valve", "(", "nu", ",", "Q", ",", "D1", ",", "FL", ",", "Fd", ",", "C", ")", ":", "return", "N4", "*", "Fd", "*", "Q", "/", "nu", "/", "(", "C", "*", "FL", ")", "**", "0.5", "*", "(", "FL", "**", "2", "*", "C", "**", "2", "/", "(", "N2", "*", "D1", "**", "4", ")", "+", "1", ")", "**", "0.25" ]
28.902439
0.002449
def ConnectionUpdate(self, settings): '''Update settings on a connection. settings is a String String Variant Map Map. See https://developer.gnome.org/NetworkManager/0.9/spec.html #type-String_String_Variant_Map_Map ''' connection_path = self.connection_path NM = dbusmock.get_object(MANAGER_OBJ) settings_obj = dbusmock.get_object(SETTINGS_OBJ) main_connections = settings_obj.ListConnections() if connection_path not in main_connections: raise dbus.exceptions.DBusException( 'Connection %s does not exist' % connection_path, name=MANAGER_IFACE + '.DoesNotExist',) # Take care not to overwrite the secrets for setting_name in settings: setting = settings[setting_name] for k in setting: if setting_name not in self.settings: self.settings[setting_name] = {} self.settings[setting_name][k] = setting[k] self.EmitSignal(CSETTINGS_IFACE, 'Updated', '', []) auto_connect = False if 'autoconnect' in settings['connection']: auto_connect = settings['connection']['autoconnect'] if auto_connect: dev = None devices = NM.GetDevices() # Grab the first device. if len(devices) > 0: dev = devices[0] if dev: activate_connection(NM, connection_path, dev, connection_path) return connection_path
[ "def", "ConnectionUpdate", "(", "self", ",", "settings", ")", ":", "connection_path", "=", "self", ".", "connection_path", "NM", "=", "dbusmock", ".", "get_object", "(", "MANAGER_OBJ", ")", "settings_obj", "=", "dbusmock", ".", "get_object", "(", "SETTINGS_OBJ", ")", "main_connections", "=", "settings_obj", ".", "ListConnections", "(", ")", "if", "connection_path", "not", "in", "main_connections", ":", "raise", "dbus", ".", "exceptions", ".", "DBusException", "(", "'Connection %s does not exist'", "%", "connection_path", ",", "name", "=", "MANAGER_IFACE", "+", "'.DoesNotExist'", ",", ")", "# Take care not to overwrite the secrets", "for", "setting_name", "in", "settings", ":", "setting", "=", "settings", "[", "setting_name", "]", "for", "k", "in", "setting", ":", "if", "setting_name", "not", "in", "self", ".", "settings", ":", "self", ".", "settings", "[", "setting_name", "]", "=", "{", "}", "self", ".", "settings", "[", "setting_name", "]", "[", "k", "]", "=", "setting", "[", "k", "]", "self", ".", "EmitSignal", "(", "CSETTINGS_IFACE", ",", "'Updated'", ",", "''", ",", "[", "]", ")", "auto_connect", "=", "False", "if", "'autoconnect'", "in", "settings", "[", "'connection'", "]", ":", "auto_connect", "=", "settings", "[", "'connection'", "]", "[", "'autoconnect'", "]", "if", "auto_connect", ":", "dev", "=", "None", "devices", "=", "NM", ".", "GetDevices", "(", ")", "# Grab the first device.", "if", "len", "(", "devices", ")", ">", "0", ":", "dev", "=", "devices", "[", "0", "]", "if", "dev", ":", "activate_connection", "(", "NM", ",", "connection_path", ",", "dev", ",", "connection_path", ")", "return", "connection_path" ]
30.711111
0.000701
def _find_players(self, boxscore): """ Find all players for each team. Iterate through every player for both teams as found in the boxscore tables and create a list of instances of the BoxscorePlayer class for each player. Return lists of player instances comprising the away and home team players, respectively. Parameters ---------- boxscore : PyQuery object A PyQuery object containing all of the HTML data from the boxscore. Returns ------- tuple Returns a ``tuple`` in the format (away_players, home_players) where each element is a list of player instances for the away and home teams, respectively. """ player_dict = {} table_count = 0 tables = self._find_boxscore_tables(boxscore) for table in tables: home_or_away = HOME # There are two tables per team with the odd tables belonging to # the away team. if table_count % 2 == 1: home_or_away = AWAY player_dict = self._extract_player_stats(table, player_dict, home_or_away) table_count += 1 away_players, home_players = self._instantiate_players(player_dict) return away_players, home_players
[ "def", "_find_players", "(", "self", ",", "boxscore", ")", ":", "player_dict", "=", "{", "}", "table_count", "=", "0", "tables", "=", "self", ".", "_find_boxscore_tables", "(", "boxscore", ")", "for", "table", "in", "tables", ":", "home_or_away", "=", "HOME", "# There are two tables per team with the odd tables belonging to", "# the away team.", "if", "table_count", "%", "2", "==", "1", ":", "home_or_away", "=", "AWAY", "player_dict", "=", "self", ".", "_extract_player_stats", "(", "table", ",", "player_dict", ",", "home_or_away", ")", "table_count", "+=", "1", "away_players", ",", "home_players", "=", "self", ".", "_instantiate_players", "(", "player_dict", ")", "return", "away_players", ",", "home_players" ]
38.162162
0.001381
def read_data(archive, arc_type, day, stachans, length=86400): """ Function to read the appropriate data from an archive for a day. :type archive: str :param archive: The archive source - if arc_type is seishub, this should be a url, if the arc_type is FDSN then this can be either a url or a known obspy client. If arc_type is day_vols, then this is the path to the top directory. :type arc_type: str :param arc_type: The type of archive, can be: seishub, FDSN, day_volumes :type day: datetime.date :param day: Date to retrieve data for :type stachans: list :param stachans: List of tuples of Stations and channels to try and get, will not fail if stations are not available, but will warn. :type length: float :param length: Data length to extract in seconds, defaults to 1 day. :returns: Stream of data :rtype: obspy.core.stream.Stream .. note:: A note on arc_types, if arc_type is day_vols, then this will \ look for directories labelled in the IRIS DMC conventions of \ Yyyyy/Rjjj.01/... where yyyy is the year and jjj is the julian day. \ Data within these files directories should be stored as day-long, \ single-channel files. This is not implemented in the fasted way \ possible to allow for a more general situation. If you require more \ speed you will need to re-write this. .. rubric:: Example >>> from obspy import UTCDateTime >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('JCNB', 'SP1')] >>> st = read_data('NCEDC', 'FDSN', t1, stachans) >>> print(st) 1 Trace(s) in Stream: BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\ 950000Z | 20.0 Hz, 1728000 samples .. rubric:: Example, missing data >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('JCNB', 'SP1'), ('GCSZ', 'HHZ')] >>> st = read_data('NCEDC', 'FDSN', t1, stachans) >>> print(st) 1 Trace(s) in Stream: BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\ 950000Z | 20.0 Hz, 1728000 samples .. rubric:: Example, local day-volumes >>> # Get the path to the test data >>> import eqcorrscan >>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data' >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('WHYM', 'SHZ'), ('EORO', 'SHZ')] >>> st = read_data(TEST_PATH + '/day_vols', 'day_vols', ... t1, stachans) >>> print(st) 2 Trace(s) in Stream: AF.WHYM..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \ | 1.0 Hz, 86400 samples AF.EORO..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \ | 1.0 Hz, 86400 samples """ st = [] available_stations = _check_available_data(archive, arc_type, day) for station in stachans: if len(station[1]) == 2: # Cope with two char channel naming in seisan station_map = (station[0], station[1][0] + '*' + station[1][1]) available_stations_map = [(sta[0], sta[1][0] + '*' + sta[1][-1]) for sta in available_stations] else: station_map = station available_stations_map = available_stations if station_map not in available_stations_map: msg = ' '.join([station[0], station_map[1], 'is not available for', day.strftime('%Y/%m/%d')]) warnings.warn(msg) continue if arc_type.lower() == 'seishub': client = SeishubClient(archive) st += client.get_waveforms( network='*', station=station_map[0], location='*', channel=station_map[1], starttime=UTCDateTime(day), endtime=UTCDateTime(day) + length) elif arc_type.upper() == "FDSN": client = FDSNClient(archive) try: st += client.get_waveforms( network='*', station=station_map[0], location='*', channel=station_map[1], starttime=UTCDateTime(day), endtime=UTCDateTime(day) + length) except FDSNException: warnings.warn('No data on server despite station being ' + 'available...') continue elif arc_type.lower() == 'day_vols': wavfiles = _get_station_file(os.path.join( archive, day.strftime('Y%Y' + os.sep + 'R%j.01')), station_map[0], station_map[1]) for wavfile in wavfiles: st += read(wavfile, starttime=day, endtime=day + length) st = Stream(st) return st
[ "def", "read_data", "(", "archive", ",", "arc_type", ",", "day", ",", "stachans", ",", "length", "=", "86400", ")", ":", "st", "=", "[", "]", "available_stations", "=", "_check_available_data", "(", "archive", ",", "arc_type", ",", "day", ")", "for", "station", "in", "stachans", ":", "if", "len", "(", "station", "[", "1", "]", ")", "==", "2", ":", "# Cope with two char channel naming in seisan", "station_map", "=", "(", "station", "[", "0", "]", ",", "station", "[", "1", "]", "[", "0", "]", "+", "'*'", "+", "station", "[", "1", "]", "[", "1", "]", ")", "available_stations_map", "=", "[", "(", "sta", "[", "0", "]", ",", "sta", "[", "1", "]", "[", "0", "]", "+", "'*'", "+", "sta", "[", "1", "]", "[", "-", "1", "]", ")", "for", "sta", "in", "available_stations", "]", "else", ":", "station_map", "=", "station", "available_stations_map", "=", "available_stations", "if", "station_map", "not", "in", "available_stations_map", ":", "msg", "=", "' '", ".", "join", "(", "[", "station", "[", "0", "]", ",", "station_map", "[", "1", "]", ",", "'is not available for'", ",", "day", ".", "strftime", "(", "'%Y/%m/%d'", ")", "]", ")", "warnings", ".", "warn", "(", "msg", ")", "continue", "if", "arc_type", ".", "lower", "(", ")", "==", "'seishub'", ":", "client", "=", "SeishubClient", "(", "archive", ")", "st", "+=", "client", ".", "get_waveforms", "(", "network", "=", "'*'", ",", "station", "=", "station_map", "[", "0", "]", ",", "location", "=", "'*'", ",", "channel", "=", "station_map", "[", "1", "]", ",", "starttime", "=", "UTCDateTime", "(", "day", ")", ",", "endtime", "=", "UTCDateTime", "(", "day", ")", "+", "length", ")", "elif", "arc_type", ".", "upper", "(", ")", "==", "\"FDSN\"", ":", "client", "=", "FDSNClient", "(", "archive", ")", "try", ":", "st", "+=", "client", ".", "get_waveforms", "(", "network", "=", "'*'", ",", "station", "=", "station_map", "[", "0", "]", ",", "location", "=", "'*'", ",", "channel", "=", "station_map", "[", "1", "]", ",", "starttime", "=", "UTCDateTime", "(", "day", ")", ",", "endtime", "=", "UTCDateTime", "(", "day", ")", "+", "length", ")", "except", "FDSNException", ":", "warnings", ".", "warn", "(", "'No data on server despite station being '", "+", "'available...'", ")", "continue", "elif", "arc_type", ".", "lower", "(", ")", "==", "'day_vols'", ":", "wavfiles", "=", "_get_station_file", "(", "os", ".", "path", ".", "join", "(", "archive", ",", "day", ".", "strftime", "(", "'Y%Y'", "+", "os", ".", "sep", "+", "'R%j.01'", ")", ")", ",", "station_map", "[", "0", "]", ",", "station_map", "[", "1", "]", ")", "for", "wavfile", "in", "wavfiles", ":", "st", "+=", "read", "(", "wavfile", ",", "starttime", "=", "day", ",", "endtime", "=", "day", "+", "length", ")", "st", "=", "Stream", "(", "st", ")", "return", "st" ]
42.045455
0.000211
def chain(cmd_list): """ Feed output of one command to the next and return final output Returns string output of chained application of commands. """ command = ' | '.join(map(lambda x: ' '.join(x), cmd_list)) chained_proc = functools.reduce(pipe, [None] + cmd_list) stdout_builder = proc.async_stdout_builder(chained_proc) chained_proc.wait() return { 'command': command, 'stdout': stdout_builder.result() }
[ "def", "chain", "(", "cmd_list", ")", ":", "command", "=", "' | '", ".", "join", "(", "map", "(", "lambda", "x", ":", "' '", ".", "join", "(", "x", ")", ",", "cmd_list", ")", ")", "chained_proc", "=", "functools", ".", "reduce", "(", "pipe", ",", "[", "None", "]", "+", "cmd_list", ")", "stdout_builder", "=", "proc", ".", "async_stdout_builder", "(", "chained_proc", ")", "chained_proc", ".", "wait", "(", ")", "return", "{", "'command'", ":", "command", ",", "'stdout'", ":", "stdout_builder", ".", "result", "(", ")", "}" ]
32.846154
0.015945
def prepare_payload(op, method, uri, data): """Return the URI (modified perhaps) and body and headers. - For GET requests, encode parameters in the query string. - Otherwise always encode parameters in the request body. - Except op; this can always go in the query string. :param method: The HTTP method. :param uri: The URI of the action. :param data: An iterable of ``name, value`` or ``name, opener`` tuples (see `name_value_pair`) to pack into the body or query, depending on the type of request. """ query = [] if op is None else [("op", op)] def slurp(opener): with opener() as fd: return fd.read() if method == "GET": headers, body = [], None query.extend( (name, slurp(value) if callable(value) else value) for name, value in data) else: # Even if data is empty, construct a multipart request body. Piston # (server-side) sets `request.data` to `None` if there's no payload. message = build_multipart_message(data) headers, body = encode_multipart_message(message) uri = urlparse(uri)._replace(query=urlencode(query)).geturl() return uri, body, headers
[ "def", "prepare_payload", "(", "op", ",", "method", ",", "uri", ",", "data", ")", ":", "query", "=", "[", "]", "if", "op", "is", "None", "else", "[", "(", "\"op\"", ",", "op", ")", "]", "def", "slurp", "(", "opener", ")", ":", "with", "opener", "(", ")", "as", "fd", ":", "return", "fd", ".", "read", "(", ")", "if", "method", "==", "\"GET\"", ":", "headers", ",", "body", "=", "[", "]", ",", "None", "query", ".", "extend", "(", "(", "name", ",", "slurp", "(", "value", ")", "if", "callable", "(", "value", ")", "else", "value", ")", "for", "name", ",", "value", "in", "data", ")", "else", ":", "# Even if data is empty, construct a multipart request body. Piston", "# (server-side) sets `request.data` to `None` if there's no payload.", "message", "=", "build_multipart_message", "(", "data", ")", "headers", ",", "body", "=", "encode_multipart_message", "(", "message", ")", "uri", "=", "urlparse", "(", "uri", ")", ".", "_replace", "(", "query", "=", "urlencode", "(", "query", ")", ")", ".", "geturl", "(", ")", "return", "uri", ",", "body", ",", "headers" ]
35.235294
0.000812
def delete(self, id): """Delete a file. Parameters: * id: The Puush ID of the file to delete. """ res = self._api_request('del', data={'i': id})[0] if res[0] == '-1': raise PuushError("File deletion failed.")
[ "def", "delete", "(", "self", ",", "id", ")", ":", "res", "=", "self", ".", "_api_request", "(", "'del'", ",", "data", "=", "{", "'i'", ":", "id", "}", ")", "[", "0", "]", "if", "res", "[", "0", "]", "==", "'-1'", ":", "raise", "PuushError", "(", "\"File deletion failed.\"", ")" ]
30.333333
0.010676
def write(self,output): """Writes the data to be output to the device buffer :param output: data to output :type output: numpy.ndarray """ w = c_int32() self.WriteAnalogF64(self.npoints, 0, 10.0, DAQmx_Val_GroupByChannel, output, w, None);
[ "def", "write", "(", "self", ",", "output", ")", ":", "w", "=", "c_int32", "(", ")", "self", ".", "WriteAnalogF64", "(", "self", ".", "npoints", ",", "0", ",", "10.0", ",", "DAQmx_Val_GroupByChannel", ",", "output", ",", "w", ",", "None", ")" ]
35.111111
0.015432
def assert_almost_eq(arr_test, arr_target, thresh=1E-11): r""" Args: arr_test (ndarray or list): arr_target (ndarray or list): thresh (scalar or ndarray or list): """ if util_arg.NO_ASSERTS: return import utool as ut arr1 = np.array(arr_test) arr2 = np.array(arr_target) passed, error = ut.almost_eq(arr1, arr2, thresh, ret_error=True) if not np.all(passed): failed_xs = np.where(np.logical_not(passed)) failed_error = error.take(failed_xs) failed_arr_test = arr1.take(failed_xs) failed_arr_target = arr2.take(failed_xs) msg_list = [ 'FAILED ASSERT ALMOST EQUAL', ' * failed_xs = %r' % (failed_xs,), ' * failed_error = %r' % (failed_error,), ' * failed_arr_test = %r' % (failed_arr_test,), ' * failed_arr_target = %r' % (failed_arr_target,), ] msg = '\n'.join(msg_list) raise AssertionError(msg) return error
[ "def", "assert_almost_eq", "(", "arr_test", ",", "arr_target", ",", "thresh", "=", "1E-11", ")", ":", "if", "util_arg", ".", "NO_ASSERTS", ":", "return", "import", "utool", "as", "ut", "arr1", "=", "np", ".", "array", "(", "arr_test", ")", "arr2", "=", "np", ".", "array", "(", "arr_target", ")", "passed", ",", "error", "=", "ut", ".", "almost_eq", "(", "arr1", ",", "arr2", ",", "thresh", ",", "ret_error", "=", "True", ")", "if", "not", "np", ".", "all", "(", "passed", ")", ":", "failed_xs", "=", "np", ".", "where", "(", "np", ".", "logical_not", "(", "passed", ")", ")", "failed_error", "=", "error", ".", "take", "(", "failed_xs", ")", "failed_arr_test", "=", "arr1", ".", "take", "(", "failed_xs", ")", "failed_arr_target", "=", "arr2", ".", "take", "(", "failed_xs", ")", "msg_list", "=", "[", "'FAILED ASSERT ALMOST EQUAL'", ",", "' * failed_xs = %r'", "%", "(", "failed_xs", ",", ")", ",", "' * failed_error = %r'", "%", "(", "failed_error", ",", ")", ",", "' * failed_arr_test = %r'", "%", "(", "failed_arr_test", ",", ")", ",", "' * failed_arr_target = %r'", "%", "(", "failed_arr_target", ",", ")", ",", "]", "msg", "=", "'\\n'", ".", "join", "(", "msg_list", ")", "raise", "AssertionError", "(", "msg", ")", "return", "error" ]
33.896552
0.000989
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs): """Display a gene's expressions. Displays a scatter plot using the SAM projection or another input projection with a particular gene's expressions overlaid. Parameters ---------- gene - string a case-sensitive string indicating the gene expression pattern to display. avg - bool, optional, default True If True, the plots use the k-nearest-neighbor-averaged expression values to smooth out noisy expression patterns and improves visualization. axes - matplotlib axis, optional, default None Plot output to the specified, existing axes. If None, create new figure window. **kwargs - all keyword arguments in 'SAM.scatter' are eligible. """ all_gene_names = np.array(list(self.adata.var_names)) cell_names = np.array(list(self.adata.obs_names)) all_cell_names = np.array(list(self.adata_raw.obs_names)) idx = np.where(all_gene_names == gene)[0] name = gene if(idx.size == 0): print( "Gene note found in the filtered dataset. Note that genes " "are case sensitive.") return if(avg): a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten() if a.sum() == 0: a = np.log2(self.adata_raw.X[np.in1d( all_cell_names, cell_names), :][:, idx].toarray().flatten() + 1) else: a = np.log2(self.adata_raw.X[np.in1d( all_cell_names, cell_names), :][:, idx].toarray().flatten() + 1) if axes is None: plt.figure() axes = plt.gca() self.scatter(c=a, axes=axes, **kwargs) axes.set_title(name)
[ "def", "show_gene_expression", "(", "self", ",", "gene", ",", "avg", "=", "True", ",", "axes", "=", "None", ",", "*", "*", "kwargs", ")", ":", "all_gene_names", "=", "np", ".", "array", "(", "list", "(", "self", ".", "adata", ".", "var_names", ")", ")", "cell_names", "=", "np", ".", "array", "(", "list", "(", "self", ".", "adata", ".", "obs_names", ")", ")", "all_cell_names", "=", "np", ".", "array", "(", "list", "(", "self", ".", "adata_raw", ".", "obs_names", ")", ")", "idx", "=", "np", ".", "where", "(", "all_gene_names", "==", "gene", ")", "[", "0", "]", "name", "=", "gene", "if", "(", "idx", ".", "size", "==", "0", ")", ":", "print", "(", "\"Gene note found in the filtered dataset. Note that genes \"", "\"are case sensitive.\"", ")", "return", "if", "(", "avg", ")", ":", "a", "=", "self", ".", "adata", ".", "layers", "[", "'X_knn_avg'", "]", "[", ":", ",", "idx", "]", ".", "toarray", "(", ")", ".", "flatten", "(", ")", "if", "a", ".", "sum", "(", ")", "==", "0", ":", "a", "=", "np", ".", "log2", "(", "self", ".", "adata_raw", ".", "X", "[", "np", ".", "in1d", "(", "all_cell_names", ",", "cell_names", ")", ",", ":", "]", "[", ":", ",", "idx", "]", ".", "toarray", "(", ")", ".", "flatten", "(", ")", "+", "1", ")", "else", ":", "a", "=", "np", ".", "log2", "(", "self", ".", "adata_raw", ".", "X", "[", "np", ".", "in1d", "(", "all_cell_names", ",", "cell_names", ")", ",", ":", "]", "[", ":", ",", "idx", "]", ".", "toarray", "(", ")", ".", "flatten", "(", ")", "+", "1", ")", "if", "axes", "is", "None", ":", "plt", ".", "figure", "(", ")", "axes", "=", "plt", ".", "gca", "(", ")", "self", ".", "scatter", "(", "c", "=", "a", ",", "axes", "=", "axes", ",", "*", "*", "kwargs", ")", "axes", ".", "set_title", "(", "name", ")" ]
34.927273
0.001519
def _check_user(self, user): """ Checks to make sure that a user is valid. First, checks that the user is not None. If this check fails, a MissingUserError is raised. Next, checks if the user has a validation method. If the method does not exist, the check passes. If the method exists, it is called. If the result of the call is not truthy, an InvalidUserError is raised """ MissingUserError.require_condition( user is not None, 'Could not find the requested user', ) user_validate_method = getattr( user, self.user_class_validation_method, None ) if user_validate_method is None: return InvalidUserError.require_condition( user_validate_method(), "The user is not valid or has had access revoked", )
[ "def", "_check_user", "(", "self", ",", "user", ")", ":", "MissingUserError", ".", "require_condition", "(", "user", "is", "not", "None", ",", "'Could not find the requested user'", ",", ")", "user_validate_method", "=", "getattr", "(", "user", ",", "self", ".", "user_class_validation_method", ",", "None", ")", "if", "user_validate_method", "is", "None", ":", "return", "InvalidUserError", ".", "require_condition", "(", "user_validate_method", "(", ")", ",", "\"The user is not valid or has had access revoked\"", ",", ")" ]
41.190476
0.00226
def ancestors(self): """Returns a list consisting of this workunit and those enclosing it, up to the root. :API: public """ ret = [] workunit = self while workunit is not None: ret.append(workunit) workunit = workunit.parent return ret
[ "def", "ancestors", "(", "self", ")", ":", "ret", "=", "[", "]", "workunit", "=", "self", "while", "workunit", "is", "not", "None", ":", "ret", ".", "append", "(", "workunit", ")", "workunit", "=", "workunit", ".", "parent", "return", "ret" ]
24.181818
0.014493
def sendline(self, data, linesep=os.linesep): ''' Send the provided data to the terminal appending a line feed. ''' return self.send('{0}{1}'.format(data, linesep))
[ "def", "sendline", "(", "self", ",", "data", ",", "linesep", "=", "os", ".", "linesep", ")", ":", "return", "self", ".", "send", "(", "'{0}{1}'", ".", "format", "(", "data", ",", "linesep", ")", ")" ]
38.4
0.010204
def update(self, other=None, **kwargs): """D.update([other, ]**kwargs) -> None. Update D From dict/iterable ``other`` and ``kwargs``. If ``other`` present and has a .keys() method, does: for k in other: D[k] = other[k] If ``other`` present and lacks .keys() method, does: for (k, v) in other: D[k] = v In either case, this is followed by: for k in kwargs: D[k] = kwargs[k] >>> dc = Dictator() >>> dc['1'] = 'abc' >>> dc['2'] = 'def' >>> dc.values() ['def', 'abc'] >>> dc.update({'3': 'ghi'}, name='Keys') >>> dc.values() ['Keys', 'ghi', 'def', 'abc'] >>> dc.clear() :param other: dict/iterable with .keys() function. :param kwargs: key/value pairs """ logger.debug('call update %s', other) if other: if hasattr(other, 'keys'): for key in other.keys(): self.set(key, other[key]) else: for (key, value) in other: self.set(key, value) if kwargs: for key, value in six.iteritems(kwargs): self.set(key, value)
[ "def", "update", "(", "self", ",", "other", "=", "None", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "debug", "(", "'call update %s'", ",", "other", ")", "if", "other", ":", "if", "hasattr", "(", "other", ",", "'keys'", ")", ":", "for", "key", "in", "other", ".", "keys", "(", ")", ":", "self", ".", "set", "(", "key", ",", "other", "[", "key", "]", ")", "else", ":", "for", "(", "key", ",", "value", ")", "in", "other", ":", "self", ".", "set", "(", "key", ",", "value", ")", "if", "kwargs", ":", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "kwargs", ")", ":", "self", ".", "set", "(", "key", ",", "value", ")" ]
34.764706
0.001646
def exec_start(self, exec_id, detach=False, tty=False, stream=False, socket=False, demux=False): """ Start a previously set up exec instance. Args: exec_id (str): ID of the exec instance detach (bool): If true, detach from the exec command. Default: False tty (bool): Allocate a pseudo-TTY. Default: False stream (bool): Stream response data. Default: False socket (bool): Return the connection socket to allow custom read/write operations. demux (bool): Return stdout and stderr separately Returns: (generator or str or tuple): If ``stream=True``, a generator yielding response chunks. If ``socket=True``, a socket object for the connection. A string containing response data otherwise. If ``demux=True``, a tuple with two elements of type byte: stdout and stderr. Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ # we want opened socket if socket == True data = { 'Tty': tty, 'Detach': detach } headers = {} if detach else { 'Connection': 'Upgrade', 'Upgrade': 'tcp' } res = self._post_json( self._url('/exec/{0}/start', exec_id), headers=headers, data=data, stream=True ) if detach: return self._result(res) if socket: return self._get_raw_response_socket(res) return self._read_from_socket(res, stream, tty=tty, demux=demux)
[ "def", "exec_start", "(", "self", ",", "exec_id", ",", "detach", "=", "False", ",", "tty", "=", "False", ",", "stream", "=", "False", ",", "socket", "=", "False", ",", "demux", "=", "False", ")", ":", "# we want opened socket if socket == True", "data", "=", "{", "'Tty'", ":", "tty", ",", "'Detach'", ":", "detach", "}", "headers", "=", "{", "}", "if", "detach", "else", "{", "'Connection'", ":", "'Upgrade'", ",", "'Upgrade'", ":", "'tcp'", "}", "res", "=", "self", ".", "_post_json", "(", "self", ".", "_url", "(", "'/exec/{0}/start'", ",", "exec_id", ")", ",", "headers", "=", "headers", ",", "data", "=", "data", ",", "stream", "=", "True", ")", "if", "detach", ":", "return", "self", ".", "_result", "(", "res", ")", "if", "socket", ":", "return", "self", ".", "_get_raw_response_socket", "(", "res", ")", "return", "self", ".", "_read_from_socket", "(", "res", ",", "stream", ",", "tty", "=", "tty", ",", "demux", "=", "demux", ")" ]
33.5
0.00174
def attach_video(self, video: String, caption: String = None, width: Integer = None, height: Integer = None, duration: Integer = None): """ Attach video :param video: :param caption: :param width: :param height: :param duration: :return: self """ self.media.attach_video(video, caption, width=width, height=height, duration=duration) return self
[ "def", "attach_video", "(", "self", ",", "video", ":", "String", ",", "caption", ":", "String", "=", "None", ",", "width", ":", "Integer", "=", "None", ",", "height", ":", "Integer", "=", "None", ",", "duration", ":", "Integer", "=", "None", ")", ":", "self", ".", "media", ".", "attach_video", "(", "video", ",", "caption", ",", "width", "=", "width", ",", "height", "=", "height", ",", "duration", "=", "duration", ")", "return", "self" ]
31.571429
0.010989
def batch_map_mean(func, batch_iter, progress_iter_func=None, sum_axis=None, n_batches=None, prepend_args=None): """ Apply a function to all the samples that are accessed as mini-batches obtained from an iterator. Returns the across-samples mean of the results returned by `func` The `sum_axis` arguments tells `mean_batch_map` how to process the results of `func` before accumulating them: - If `sum_axis` is `None`, `func` should return the across-samples SUM of the results of operating on the mini-batch the sum of the values for the samples, e.g. for loss and error it should return `(sum([loss0, loss1, ... lossN]), sum([err0, err1, ... errN]))` - Otherwise, `sum_axis` should specify the axis or axes over which the the batch results should be summed, e.g. if `func` returns a per-sample loss and error in two arrays `[[loss0, loss1, ... lossN], [err0, err1, ... errN]`, give `sum_axis` a value of `0` to sum over axis 0 to get the per-batch loss and error. These results will be accumulated and divided by the number of samples at the end to get the mean. Parameters ---------- func: callable `func(*batch) -> results` The function to call on each mini-batch. Note that the results must be `None`, a tuple or a NumPy array batch_iter: data set iterator Iterator that generates mini-batches of data progress_iter_func: [optional] callable `progress_iter_func(iterator, total=total, leave=leave)` A `tqdm` style function that will be passed the iterator that generates training batches along with the total number of batches and `False` for the `leave` parameter. By passing either `tqdm.tqdm` or `tqdm.tqdm_notebook` as this argument you can have the training loop display a progress bar. sum_axis: (default=`None`) int, tuple of ints or None If an integer or a tuple of integers, the results returned by `func` will be summed across this axis / these axes before being accumulated; e.g. if `func` returns an array of per-sample losses, with axis 0 being the sample dimension, passing a value of `0` as `sum_axis` will cause these results to be summed along axis 0 to get the per-batch sum before accumulating the losses. The total summed loss will be divided by the number of samples at the end in order to compute the mean loss. n_batches: [optional] integer that specifies the number of mini-batches to process before returning prepend_args: [optional] tuple Arguments to prepend to the arguments passed to `func` Returns ------- tuple The sum of the results of the function `fn` divided by the number of samples processed, e.g. `(sum(outA_per_batch) / n_samples, sum(outB_per_batch) / n_samples, ...)` Examples -------- The following examples will demonstrate the use of `mean_batch_map` to compute binary cross entropy loss over a data set. A few variants will be demonstrated: - the default behaviour in which the function being applied should return the sum over the batch sample axis - having the function return per sample results and maving `mean_batch_map` perform the sum operation. This is easier to understand but less efficient as a Theano function would have to move more data back from the GPU. - limiting the number of batches that will be processed in order to get partial results when dealing with a large data set Define a function to compute the per-sample binary cross entropy loss: >>> def binary_crossentropy_loss(pred, target): ... e = -target * np.log(pred) - (1 - target) * np.log(1 - pred) ... return e.mean(axis=1) Now define a function that computes the *SUM* of the binary cross entropy losses over the sample axis (axis 0), as the default behaviour of `mean_batch_map` will sum them up and divide by the number of samples at the end: >>> def binary_crossentropy_loss_sum(pred, target): ... return binary_crossentropy_loss(pred, target).sum() Construct prediction and target data >>> pred = np.random.uniform(0.1, 0.9, size=(7, 10)) >>> tgt = np.random.uniform(0.1, 0.9, size=(7, 10)) >>> ds = ArrayDataSource([pred, tgt]) Apply the loss sum function defined above: >>> batch_iter = ds.batch_iterator(batch_size=5) >>> loss = batch_map_mean(binary_crossentropy_loss_sum, batch_iter) >>> assert np.allclose( ... loss, binary_crossentropy_loss(pred, tgt).mean()) Have `mean_batch_map` sum over axis 0: >>> batch_iter = ds.batch_iterator(batch_size=5) >>> loss = batch_map_mean(binary_crossentropy_loss, batch_iter, ... sum_axis=0) >>> assert np.allclose( ... loss, binary_crossentropy_loss(pred, tgt).mean()) Construct a large data set and use `batch >>> pred_large = np.random.uniform(0.1, 0.9, size=(100, 10)) >>> tgt_large = np.random.uniform(0.1, 0.9, size=(100, 10)) >>> ds_large = ArrayDataSource([pred_large, tgt_large]) >>> iter_large = ds_large.batch_iterator(batch_size=5) >>> for i in range(10): ... partial_loss = batch_map_mean(binary_crossentropy_loss_sum, ... iter_large, n_batches=2) ... j = i * 10 ... assert np.allclose( ... partial_loss, binary_crossentropy_loss( ... pred_large[j:j + 10], tgt_large[j:j + 10]).mean()) """ # Accumulator for results and number of samples results_accum = None n_samples_accum = 0 # If `progress_iter_func` is not `None`, apply it if progress_iter_func is not None: batch_iter = progress_iter_func(batch_iter, total=n_batches, leave=False) # Train on each batch n_processed = 0 for batch in batch_iter: # Get number of samples in batch; can vary batch_n = _length_of_batch(batch) # Apply on batch and check the type of the results if prepend_args is not None: batch_results = func(*(prepend_args + tuple(batch))) else: batch_results = func(*batch) if batch_results is None: pass elif isinstance(batch_results, (np.ndarray, float)): batch_results = (batch_results,) elif isinstance(batch_results, tuple): pass else: raise TypeError( 'Batch function should return a tuple of results, a ' 'single result as a NumPy array or float, or None, ' 'not {}'.format(type(batch_results))) # Accumulate results and number of samples if results_accum is None: # Initialise the accumulator to the batch results if `func` # returns summed results or if it returned None; # don't attempt to iterate over None and sum each item if batch_results is None: pass elif sum_axis is None: results_accum = list(batch_results) else: results_accum = [br.sum(axis=sum_axis) for br in batch_results] else: if batch_results is not None: for i in range(len(results_accum)): br = batch_results[i] if sum_axis is not None: br = br.sum(axis=sum_axis) results_accum[i] += br n_samples_accum += batch_n n_processed += 1 if n_batches is not None and n_processed >= n_batches: break # Divide by the number of training examples used to compute mean if results_accum is not None: results_accum = tuple([np.array(r).astype(float) / n_samples_accum for r in results_accum]) return results_accum
[ "def", "batch_map_mean", "(", "func", ",", "batch_iter", ",", "progress_iter_func", "=", "None", ",", "sum_axis", "=", "None", ",", "n_batches", "=", "None", ",", "prepend_args", "=", "None", ")", ":", "# Accumulator for results and number of samples", "results_accum", "=", "None", "n_samples_accum", "=", "0", "# If `progress_iter_func` is not `None`, apply it", "if", "progress_iter_func", "is", "not", "None", ":", "batch_iter", "=", "progress_iter_func", "(", "batch_iter", ",", "total", "=", "n_batches", ",", "leave", "=", "False", ")", "# Train on each batch", "n_processed", "=", "0", "for", "batch", "in", "batch_iter", ":", "# Get number of samples in batch; can vary", "batch_n", "=", "_length_of_batch", "(", "batch", ")", "# Apply on batch and check the type of the results", "if", "prepend_args", "is", "not", "None", ":", "batch_results", "=", "func", "(", "*", "(", "prepend_args", "+", "tuple", "(", "batch", ")", ")", ")", "else", ":", "batch_results", "=", "func", "(", "*", "batch", ")", "if", "batch_results", "is", "None", ":", "pass", "elif", "isinstance", "(", "batch_results", ",", "(", "np", ".", "ndarray", ",", "float", ")", ")", ":", "batch_results", "=", "(", "batch_results", ",", ")", "elif", "isinstance", "(", "batch_results", ",", "tuple", ")", ":", "pass", "else", ":", "raise", "TypeError", "(", "'Batch function should return a tuple of results, a '", "'single result as a NumPy array or float, or None, '", "'not {}'", ".", "format", "(", "type", "(", "batch_results", ")", ")", ")", "# Accumulate results and number of samples", "if", "results_accum", "is", "None", ":", "# Initialise the accumulator to the batch results if `func`", "# returns summed results or if it returned None;", "# don't attempt to iterate over None and sum each item", "if", "batch_results", "is", "None", ":", "pass", "elif", "sum_axis", "is", "None", ":", "results_accum", "=", "list", "(", "batch_results", ")", "else", ":", "results_accum", "=", "[", "br", ".", "sum", "(", "axis", "=", "sum_axis", ")", "for", "br", "in", "batch_results", "]", "else", ":", "if", "batch_results", "is", "not", "None", ":", "for", "i", "in", "range", "(", "len", "(", "results_accum", ")", ")", ":", "br", "=", "batch_results", "[", "i", "]", "if", "sum_axis", "is", "not", "None", ":", "br", "=", "br", ".", "sum", "(", "axis", "=", "sum_axis", ")", "results_accum", "[", "i", "]", "+=", "br", "n_samples_accum", "+=", "batch_n", "n_processed", "+=", "1", "if", "n_batches", "is", "not", "None", "and", "n_processed", ">=", "n_batches", ":", "break", "# Divide by the number of training examples used to compute mean", "if", "results_accum", "is", "not", "None", ":", "results_accum", "=", "tuple", "(", "[", "np", ".", "array", "(", "r", ")", ".", "astype", "(", "float", ")", "/", "n_samples_accum", "for", "r", "in", "results_accum", "]", ")", "return", "results_accum" ]
44.151685
0.000124
def _point_scalar(self, name=None): """ Returns point scalars of a vtk object Parameters ---------- name : str Name of point scalars to retrive. Returns ------- scalars : np.ndarray Numpy array of scalars """ if name is None: # use active scalar array field, name = self.active_scalar_info if field != POINT_DATA_FIELD: raise RuntimeError('Must specify an array to fetch.') vtkarr = self.GetPointData().GetArray(name) if vtkarr is None: raise AssertionError('({}) is not a point scalar'.format(name)) # numpy does not support bit array data types if isinstance(vtkarr, vtk.vtkBitArray): vtkarr = vtk_bit_array_to_char(vtkarr) if name not in self._point_bool_array_names: self._point_bool_array_names.append(name) array = vtk_to_numpy(vtkarr) if array.dtype == np.uint8 and name in self._point_bool_array_names: array = array.view(np.bool) return array
[ "def", "_point_scalar", "(", "self", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "# use active scalar array", "field", ",", "name", "=", "self", ".", "active_scalar_info", "if", "field", "!=", "POINT_DATA_FIELD", ":", "raise", "RuntimeError", "(", "'Must specify an array to fetch.'", ")", "vtkarr", "=", "self", ".", "GetPointData", "(", ")", ".", "GetArray", "(", "name", ")", "if", "vtkarr", "is", "None", ":", "raise", "AssertionError", "(", "'({}) is not a point scalar'", ".", "format", "(", "name", ")", ")", "# numpy does not support bit array data types", "if", "isinstance", "(", "vtkarr", ",", "vtk", ".", "vtkBitArray", ")", ":", "vtkarr", "=", "vtk_bit_array_to_char", "(", "vtkarr", ")", "if", "name", "not", "in", "self", ".", "_point_bool_array_names", ":", "self", ".", "_point_bool_array_names", ".", "append", "(", "name", ")", "array", "=", "vtk_to_numpy", "(", "vtkarr", ")", "if", "array", ".", "dtype", "==", "np", ".", "uint8", "and", "name", "in", "self", ".", "_point_bool_array_names", ":", "array", "=", "array", ".", "view", "(", "np", ".", "bool", ")", "return", "array" ]
32.323529
0.001767
def get_properties_of_kind(kind, start=None, end=None): """Return all properties of kind in the specified range. NOTE: This function does not return unindexed properties. Args: kind: name of kind whose properties you want. start: only return properties >= start if start is not None. end: only return properties < end if end is not None. Returns: A list of property names of kind between the (optional) start and end values. """ q = Property.query(ancestor=Property.key_for_kind(kind)) if start is not None and start != '': q = q.filter(Property.key >= Property.key_for_property(kind, start)) if end is not None: if end == '': return [] q = q.filter(Property.key < Property.key_for_property(kind, end)) return [Property.key_to_property(k) for k in q.iter(keys_only=True)]
[ "def", "get_properties_of_kind", "(", "kind", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "q", "=", "Property", ".", "query", "(", "ancestor", "=", "Property", ".", "key_for_kind", "(", "kind", ")", ")", "if", "start", "is", "not", "None", "and", "start", "!=", "''", ":", "q", "=", "q", ".", "filter", "(", "Property", ".", "key", ">=", "Property", ".", "key_for_property", "(", "kind", ",", "start", ")", ")", "if", "end", "is", "not", "None", ":", "if", "end", "==", "''", ":", "return", "[", "]", "q", "=", "q", ".", "filter", "(", "Property", ".", "key", "<", "Property", ".", "key_for_property", "(", "kind", ",", "end", ")", ")", "return", "[", "Property", ".", "key_to_property", "(", "k", ")", "for", "k", "in", "q", ".", "iter", "(", "keys_only", "=", "True", ")", "]" ]
35.173913
0.008424
def add_haproxy_checks(nrpe, unit_name): """ Add checks for each service in list :param NRPE nrpe: NRPE object to add check to :param str unit_name: Unit name to use in check description """ nrpe.add_check( shortname='haproxy_servers', description='Check HAProxy {%s}' % unit_name, check_cmd='check_haproxy.sh') nrpe.add_check( shortname='haproxy_queue', description='Check HAProxy queue depth {%s}' % unit_name, check_cmd='check_haproxy_queue_depth.sh')
[ "def", "add_haproxy_checks", "(", "nrpe", ",", "unit_name", ")", ":", "nrpe", ".", "add_check", "(", "shortname", "=", "'haproxy_servers'", ",", "description", "=", "'Check HAProxy {%s}'", "%", "unit_name", ",", "check_cmd", "=", "'check_haproxy.sh'", ")", "nrpe", ".", "add_check", "(", "shortname", "=", "'haproxy_queue'", ",", "description", "=", "'Check HAProxy queue depth {%s}'", "%", "unit_name", ",", "check_cmd", "=", "'check_haproxy_queue_depth.sh'", ")" ]
34.466667
0.001883
def get_transport_target(cls, instance, timeout, retries): ''' Generate a Transport target object based on the instance's configuration ''' if "ip_address" not in instance: raise Exception("An IP address needs to be specified") ip_address = instance["ip_address"] port = int(instance.get("port", 161)) # Default SNMP port return hlapi.UdpTransportTarget((ip_address, port), timeout=timeout, retries=retries)
[ "def", "get_transport_target", "(", "cls", ",", "instance", ",", "timeout", ",", "retries", ")", ":", "if", "\"ip_address\"", "not", "in", "instance", ":", "raise", "Exception", "(", "\"An IP address needs to be specified\"", ")", "ip_address", "=", "instance", "[", "\"ip_address\"", "]", "port", "=", "int", "(", "instance", ".", "get", "(", "\"port\"", ",", "161", ")", ")", "# Default SNMP port", "return", "hlapi", ".", "UdpTransportTarget", "(", "(", "ip_address", ",", "port", ")", ",", "timeout", "=", "timeout", ",", "retries", "=", "retries", ")" ]
52
0.008403
def searchInAleph(base, phrase, considerSimilar, field): """ Send request to the aleph search engine. Request itself is pretty useless, but it can be later used as parameter for :func:`getDocumentIDs`, which can fetch records from Aleph. Args: base (str): which database you want to use phrase (str): what do you want to search considerSimilar (bool): fuzzy search, which is not working at all, so don't use it field (str): where you want to look (see: :attr:`VALID_ALEPH_FIELDS`) Returns: dictionary: consisting from following fields: | error (optional): present if there was some form of error | no_entries (int): number of entries that can be fetch from aleph | no_records (int): no idea what is this, but it is always >= than `no_entries` | set_number (int): important - something like ID of your request | session-id (str): used to count users for licensing purposes Example: Returned dict:: { 'session-id': 'YLI54HBQJESUTS678YYUNKEU4BNAUJDKA914GMF39J6K89VSCB', 'set_number': 36520, 'no_records': 1, 'no_entries': 1 } Raises: AlephException: if Aleph doesn't return any information InvalidAlephFieldException: if specified field is not valid """ downer = Downloader() if field.lower() not in VALID_ALEPH_FIELDS: raise InvalidAlephFieldException("Unknown field '" + field + "'!") param_url = Template(SEARCH_URL_TEMPLATE).substitute( PHRASE=quote_plus(phrase), # urlencode phrase BASE=base, FIELD=field, SIMILAR="Y" if considerSimilar else "N" ) result = downer.download(ALEPH_URL + param_url) dom = dhtmlparser.parseString(result) find = dom.find("find") # find <find> element :) if len(find) <= 0: raise AlephException("Aleph didn't returned any information.") find = find[0] # convert aleph result into dictionary result = _alephResultToDict(find) # add informations about base into result result["base"] = base if "error" not in result: return result # handle errors if result["error"] == "empty set": result["no_entries"] = 0 # empty set have 0 entries return result else: raise AlephException(result["error"])
[ "def", "searchInAleph", "(", "base", ",", "phrase", ",", "considerSimilar", ",", "field", ")", ":", "downer", "=", "Downloader", "(", ")", "if", "field", ".", "lower", "(", ")", "not", "in", "VALID_ALEPH_FIELDS", ":", "raise", "InvalidAlephFieldException", "(", "\"Unknown field '\"", "+", "field", "+", "\"'!\"", ")", "param_url", "=", "Template", "(", "SEARCH_URL_TEMPLATE", ")", ".", "substitute", "(", "PHRASE", "=", "quote_plus", "(", "phrase", ")", ",", "# urlencode phrase", "BASE", "=", "base", ",", "FIELD", "=", "field", ",", "SIMILAR", "=", "\"Y\"", "if", "considerSimilar", "else", "\"N\"", ")", "result", "=", "downer", ".", "download", "(", "ALEPH_URL", "+", "param_url", ")", "dom", "=", "dhtmlparser", ".", "parseString", "(", "result", ")", "find", "=", "dom", ".", "find", "(", "\"find\"", ")", "# find <find> element :)", "if", "len", "(", "find", ")", "<=", "0", ":", "raise", "AlephException", "(", "\"Aleph didn't returned any information.\"", ")", "find", "=", "find", "[", "0", "]", "# convert aleph result into dictionary", "result", "=", "_alephResultToDict", "(", "find", ")", "# add informations about base into result", "result", "[", "\"base\"", "]", "=", "base", "if", "\"error\"", "not", "in", "result", ":", "return", "result", "# handle errors", "if", "result", "[", "\"error\"", "]", "==", "\"empty set\"", ":", "result", "[", "\"no_entries\"", "]", "=", "0", "# empty set have 0 entries", "return", "result", "else", ":", "raise", "AlephException", "(", "result", "[", "\"error\"", "]", ")" ]
32.684932
0.000407
def set_deserializer_by_mime_type(self, mime_type): """ :param mime_type: :return: Used by content_type_set to set get a reference to the serializer object """ for deserializer in self._deserializers: if deserializer.content_type() == mime_type: self._selected_deserializer = deserializer return raise exception.UnsupportedContentTypeError(mime_type, self.supported_mime_types_str)
[ "def", "set_deserializer_by_mime_type", "(", "self", ",", "mime_type", ")", ":", "for", "deserializer", "in", "self", ".", "_deserializers", ":", "if", "deserializer", ".", "content_type", "(", ")", "==", "mime_type", ":", "self", ".", "_selected_deserializer", "=", "deserializer", "return", "raise", "exception", ".", "UnsupportedContentTypeError", "(", "mime_type", ",", "self", ".", "supported_mime_types_str", ")" ]
33.642857
0.008264
def overview(index, start, end): """Compute metrics in the overview section for enriched github issues indexes. Returns a dictionary. Each key in the dictionary is the name of a metric, the value is the value of that metric. Value can be a complex object (eg, a time series). :param index: index object :param start: start date to get the data from :param end: end date to get the data upto :return: dictionary with the value of the metrics """ results = { "activity_metrics": [OpenedIssues(index, start, end), ClosedIssues(index, start, end)], "author_metrics": [], "bmi_metrics": [BMI(index, start, end)], "time_to_close_metrics": [DaysToCloseMedian(index, start, end)], "projects_metrics": [] } return results
[ "def", "overview", "(", "index", ",", "start", ",", "end", ")", ":", "results", "=", "{", "\"activity_metrics\"", ":", "[", "OpenedIssues", "(", "index", ",", "start", ",", "end", ")", ",", "ClosedIssues", "(", "index", ",", "start", ",", "end", ")", "]", ",", "\"author_metrics\"", ":", "[", "]", ",", "\"bmi_metrics\"", ":", "[", "BMI", "(", "index", ",", "start", ",", "end", ")", "]", ",", "\"time_to_close_metrics\"", ":", "[", "DaysToCloseMedian", "(", "index", ",", "start", ",", "end", ")", "]", ",", "\"projects_metrics\"", ":", "[", "]", "}", "return", "results" ]
35.391304
0.001196
def correlation_matvec(P, obs1, obs2=None, times=[1]): r"""Time-correlation for equilibrium experiment - via matrix vector products. Parameters ---------- P : (M, M) ndarray Transition matrix obs1 : (M,) ndarray Observable, represented as vector on state space obs2 : (M,) ndarray (optional) Second observable, for cross-correlations times : list of int (optional) List of times (in tau) at which to compute correlation Returns ------- correlations : ndarray Correlation values at given times """ if obs2 is None: obs2 = obs1 """Compute stationary vector""" mu = statdist(P) obs1mu = mu * obs1 times = np.asarray(times) """Sort in increasing order""" ind = np.argsort(times) times = times[ind] if times[0] < 0: raise ValueError("Times can not be negative") dt = times[1:] - times[0:-1] nt = len(times) correlations = np.zeros(nt) """Propagate obs2 to initial time""" obs2_t = 1.0 * obs2 obs2_t = propagate(P, obs2_t, times[0]) correlations[0] = np.dot(obs1mu, obs2_t) for i in range(nt - 1): obs2_t = propagate(P, obs2_t, dt[i]) correlations[i + 1] = np.dot(obs1mu, obs2_t) """Cast back to original order of time points""" correlations = correlations[ind] return correlations
[ "def", "correlation_matvec", "(", "P", ",", "obs1", ",", "obs2", "=", "None", ",", "times", "=", "[", "1", "]", ")", ":", "if", "obs2", "is", "None", ":", "obs2", "=", "obs1", "\"\"\"Compute stationary vector\"\"\"", "mu", "=", "statdist", "(", "P", ")", "obs1mu", "=", "mu", "*", "obs1", "times", "=", "np", ".", "asarray", "(", "times", ")", "\"\"\"Sort in increasing order\"\"\"", "ind", "=", "np", ".", "argsort", "(", "times", ")", "times", "=", "times", "[", "ind", "]", "if", "times", "[", "0", "]", "<", "0", ":", "raise", "ValueError", "(", "\"Times can not be negative\"", ")", "dt", "=", "times", "[", "1", ":", "]", "-", "times", "[", "0", ":", "-", "1", "]", "nt", "=", "len", "(", "times", ")", "correlations", "=", "np", ".", "zeros", "(", "nt", ")", "\"\"\"Propagate obs2 to initial time\"\"\"", "obs2_t", "=", "1.0", "*", "obs2", "obs2_t", "=", "propagate", "(", "P", ",", "obs2_t", ",", "times", "[", "0", "]", ")", "correlations", "[", "0", "]", "=", "np", ".", "dot", "(", "obs1mu", ",", "obs2_t", ")", "for", "i", "in", "range", "(", "nt", "-", "1", ")", ":", "obs2_t", "=", "propagate", "(", "P", ",", "obs2_t", ",", "dt", "[", "i", "]", ")", "correlations", "[", "i", "+", "1", "]", "=", "np", ".", "dot", "(", "obs1mu", ",", "obs2_t", ")", "\"\"\"Cast back to original order of time points\"\"\"", "correlations", "=", "correlations", "[", "ind", "]", "return", "correlations" ]
25.596154
0.001447
def t_CHAR(self, t): r"'.'" # A single char t.value = ord(t.value[1]) t.type = 'INTEGER' return t
[ "def", "t_CHAR", "(", "self", ",", "t", ")", ":", "# A single char", "t", ".", "value", "=", "ord", "(", "t", ".", "value", "[", "1", "]", ")", "t", ".", "type", "=", "'INTEGER'", "return", "t" ]
21
0.015267
def check_entry(self, entries, *args, **kwargs): """ With a list of entries, check each entry against every other """ verbosity = kwargs.get('verbosity', 1) user_total_overlaps = 0 user = '' for index_a, entry_a in enumerate(entries): # Show the name the first time through if index_a == 0: if args and verbosity >= 1 or verbosity >= 2: self.show_name(entry_a.user) user = entry_a.user for index_b in range(index_a, len(entries)): entry_b = entries[index_b] if entry_a.check_overlap(entry_b): user_total_overlaps += 1 self.show_overlap(entry_a, entry_b, verbosity=verbosity) if user_total_overlaps and user and verbosity >= 1: overlap_data = { 'first': user.first_name, 'last': user.last_name, 'total': user_total_overlaps, } self.stdout.write('Total overlapping entries for user ' + '%(first)s %(last)s: %(total)d' % overlap_data) return user_total_overlaps
[ "def", "check_entry", "(", "self", ",", "entries", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "verbosity", "=", "kwargs", ".", "get", "(", "'verbosity'", ",", "1", ")", "user_total_overlaps", "=", "0", "user", "=", "''", "for", "index_a", ",", "entry_a", "in", "enumerate", "(", "entries", ")", ":", "# Show the name the first time through", "if", "index_a", "==", "0", ":", "if", "args", "and", "verbosity", ">=", "1", "or", "verbosity", ">=", "2", ":", "self", ".", "show_name", "(", "entry_a", ".", "user", ")", "user", "=", "entry_a", ".", "user", "for", "index_b", "in", "range", "(", "index_a", ",", "len", "(", "entries", ")", ")", ":", "entry_b", "=", "entries", "[", "index_b", "]", "if", "entry_a", ".", "check_overlap", "(", "entry_b", ")", ":", "user_total_overlaps", "+=", "1", "self", ".", "show_overlap", "(", "entry_a", ",", "entry_b", ",", "verbosity", "=", "verbosity", ")", "if", "user_total_overlaps", "and", "user", "and", "verbosity", ">=", "1", ":", "overlap_data", "=", "{", "'first'", ":", "user", ".", "first_name", ",", "'last'", ":", "user", ".", "last_name", ",", "'total'", ":", "user_total_overlaps", ",", "}", "self", ".", "stdout", ".", "write", "(", "'Total overlapping entries for user '", "+", "'%(first)s %(last)s: %(total)d'", "%", "overlap_data", ")", "return", "user_total_overlaps" ]
43.777778
0.001656
def clear( self ): """ Clears the current settings for the current action. """ item = self.uiActionTREE.currentItem() if ( not item ): return self.uiShortcutTXT.setText('') item.setText(1, '')
[ "def", "clear", "(", "self", ")", ":", "item", "=", "self", ".", "uiActionTREE", ".", "currentItem", "(", ")", "if", "(", "not", "item", ")", ":", "return", "self", ".", "uiShortcutTXT", ".", "setText", "(", "''", ")", "item", ".", "setText", "(", "1", ",", "''", ")" ]
26
0.026022
def _execCommand(Argv, collect_missing): r"""Worker of execCommand. """ if not Argv: raise HandledException('Please specify a command!') RouteParts = Argv[0].split('/') Args, KwArgs = getDigestableArgs(Argv[1:]) ResolvedMember = getDescendant(BaseGroup, RouteParts[:]) if isinstance(ResolvedMember, Group): raise HandledException('Please specify a task.', Member=ResolvedMember) if not isinstance(ResolvedMember, Task): raise HandledException('No such task.', Member=BaseGroup) return ResolvedMember.__collect_n_call__(*Args, **KwArgs) if collect_missing else ResolvedMember(*Args, **KwArgs)
[ "def", "_execCommand", "(", "Argv", ",", "collect_missing", ")", ":", "if", "not", "Argv", ":", "raise", "HandledException", "(", "'Please specify a command!'", ")", "RouteParts", "=", "Argv", "[", "0", "]", ".", "split", "(", "'/'", ")", "Args", ",", "KwArgs", "=", "getDigestableArgs", "(", "Argv", "[", "1", ":", "]", ")", "ResolvedMember", "=", "getDescendant", "(", "BaseGroup", ",", "RouteParts", "[", ":", "]", ")", "if", "isinstance", "(", "ResolvedMember", ",", "Group", ")", ":", "raise", "HandledException", "(", "'Please specify a task.'", ",", "Member", "=", "ResolvedMember", ")", "if", "not", "isinstance", "(", "ResolvedMember", ",", "Task", ")", ":", "raise", "HandledException", "(", "'No such task.'", ",", "Member", "=", "BaseGroup", ")", "return", "ResolvedMember", ".", "__collect_n_call__", "(", "*", "Args", ",", "*", "*", "KwArgs", ")", "if", "collect_missing", "else", "ResolvedMember", "(", "*", "Args", ",", "*", "*", "KwArgs", ")" ]
34.777778
0.015552
def draw_math(str, x, y, alpha=1.0): """ Uses mimetex to generate a GIF-image from the LaTeX equation. """ try: from web import _ctx except: pass str = re.sub("</{0,1}math>", "", str.strip()) img = mimetex.gif(str) w, h = _ctx.imagesize(img) _ctx.image(img, x, y, alpha=alpha) return w, h
[ "def", "draw_math", "(", "str", ",", "x", ",", "y", ",", "alpha", "=", "1.0", ")", ":", "try", ":", "from", "web", "import", "_ctx", "except", ":", "pass", "str", "=", "re", ".", "sub", "(", "\"</{0,1}math>\"", ",", "\"\"", ",", "str", ".", "strip", "(", ")", ")", "img", "=", "mimetex", ".", "gif", "(", "str", ")", "w", ",", "h", "=", "_ctx", ".", "imagesize", "(", "img", ")", "_ctx", ".", "image", "(", "img", ",", "x", ",", "y", ",", "alpha", "=", "alpha", ")", "return", "w", ",", "h" ]
25.153846
0.020649
def parse(self): """ Parse file specified by constructor. """ f = open(self.parse_log_path, "r") self.parse2(f) f.close()
[ "def", "parse", "(", "self", ")", ":", "f", "=", "open", "(", "self", ".", "parse_log_path", ",", "\"r\"", ")", "self", ".", "parse2", "(", "f", ")", "f", ".", "close", "(", ")" ]
23.285714
0.011834
def handle_get_reseller(self, req): """Handles the GET v2 call for getting general reseller information (currently just a list of accounts). Can only be called by a .reseller_admin. On success, a JSON dictionary will be returned with a single `accounts` key whose value is list of dicts. Each dict represents an account and currently only contains the single key `name`. For example:: {"accounts": [{"name": "reseller"}, {"name": "test"}, {"name": "test2"}]} :param req: The swob.Request to process. :returns: swob.Response, 2xx on success with a JSON dictionary as explained above. """ if not self.is_reseller_admin(req): return self.denied_response(req) listing = [] marker = '' while True: path = '/v1/%s?format=json&marker=%s' % (quote(self.auth_account), quote(marker)) resp = self.make_pre_authed_request( req.environ, 'GET', path).get_response(self.app) if resp.status_int // 100 != 2: raise Exception('Could not list main auth account: %s %s' % (path, resp.status)) sublisting = json.loads(resp.body) if not sublisting: break for container in sublisting: if container['name'][0] != '.': listing.append({'name': container['name']}) marker = sublisting[-1]['name'].encode('utf-8') return Response(body=json.dumps({'accounts': listing}), content_type=CONTENT_TYPE_JSON)
[ "def", "handle_get_reseller", "(", "self", ",", "req", ")", ":", "if", "not", "self", ".", "is_reseller_admin", "(", "req", ")", ":", "return", "self", ".", "denied_response", "(", "req", ")", "listing", "=", "[", "]", "marker", "=", "''", "while", "True", ":", "path", "=", "'/v1/%s?format=json&marker=%s'", "%", "(", "quote", "(", "self", ".", "auth_account", ")", ",", "quote", "(", "marker", ")", ")", "resp", "=", "self", ".", "make_pre_authed_request", "(", "req", ".", "environ", ",", "'GET'", ",", "path", ")", ".", "get_response", "(", "self", ".", "app", ")", "if", "resp", ".", "status_int", "//", "100", "!=", "2", ":", "raise", "Exception", "(", "'Could not list main auth account: %s %s'", "%", "(", "path", ",", "resp", ".", "status", ")", ")", "sublisting", "=", "json", ".", "loads", "(", "resp", ".", "body", ")", "if", "not", "sublisting", ":", "break", "for", "container", "in", "sublisting", ":", "if", "container", "[", "'name'", "]", "[", "0", "]", "!=", "'.'", ":", "listing", ".", "append", "(", "{", "'name'", ":", "container", "[", "'name'", "]", "}", ")", "marker", "=", "sublisting", "[", "-", "1", "]", "[", "'name'", "]", ".", "encode", "(", "'utf-8'", ")", "return", "Response", "(", "body", "=", "json", ".", "dumps", "(", "{", "'accounts'", ":", "listing", "}", ")", ",", "content_type", "=", "CONTENT_TYPE_JSON", ")" ]
45.891892
0.001153
def load_ext(self): """Read time series data like method |IOSequence.load_ext| of class |IOSequence|, but with special handling of missing data. When reading incomplete time series data, *HydPy* usually raises a |RuntimeError| to prevent from performing erroneous calculations. For instance, this makes sense for meteorological input data, being a definite requirement for hydrological simulations. However, the same often does not hold for the time series of |Obs| sequences, e.g. representing measured discharge. Measured discharge is often handled as an optional input value, or even used for comparison purposes only. According to this reasoning, *HydPy* raises (at most) a |UserWarning| in case of missing or incomplete external time series data of |Obs| sequences. The following examples show this based on the `LahnH` project, mainly focussing on the |Obs| sequence of node `dill`, which is ready for handling time series data at the end of the following steps: >>> from hydpy.core.examples import prepare_full_example_1 >>> prepare_full_example_1() >>> from hydpy import HydPy, pub, TestIO >>> hp = HydPy('LahnH') >>> pub.timegrids = '1996-01-01', '1996-01-06', '1d' >>> with TestIO(): ... hp.prepare_network() ... hp.init_models() ... hp.prepare_obsseries() >>> obs = hp.nodes.dill.sequences.obs >>> obs.ramflag True Trying to read non-existing data raises the following warning and disables the sequence's ability to handle time series data: >>> with TestIO(): ... hp.load_obsseries() # doctest: +ELLIPSIS Traceback (most recent call last): ... UserWarning: The `memory flag` of sequence `obs` of node `dill` had \ to be set to `False` due to the following problem: While trying to load the \ external data of sequence `obs` of node `dill`, the following error occurred: \ [Errno 2] No such file or directory: '...dill_obs_q.asc' >>> obs.ramflag False After writing a complete external data fine, everything works fine: >>> obs.activate_ram() >>> obs.series = 1.0 >>> with TestIO(): ... obs.save_ext() >>> obs.series = 0.0 >>> with TestIO(): ... obs.load_ext() >>> obs.series InfoArray([ 1., 1., 1., 1., 1.]) Reading incomplete data also results in a warning message, but does not disable the |IOSequence.memoryflag|: >>> import numpy >>> obs.series[2] = numpy.nan >>> with TestIO(): ... pub.sequencemanager.nodeoverwrite = True ... obs.save_ext() >>> with TestIO(): ... obs.load_ext() Traceback (most recent call last): ... UserWarning: While trying to load the external data of sequence `obs` \ of node `dill`, the following error occurred: The series array of sequence \ `obs` of node `dill` contains 1 nan value. >>> obs.memoryflag True Option |Options.warnmissingobsfile| allows disabling the warning messages without altering the functionalities described above: >>> hp.prepare_obsseries() >>> with TestIO(): ... with pub.options.warnmissingobsfile(False): ... hp.load_obsseries() >>> obs.series InfoArray([ 1., 1., nan, 1., 1.]) >>> hp.nodes.lahn_1.sequences.obs.memoryflag False """ try: super().load_ext() except OSError: del self.memoryflag if hydpy.pub.options.warnmissingobsfile: warnings.warn( f'The `memory flag` of sequence ' f'{objecttools.nodephrase(self)} had to be set to `False` ' f'due to the following problem: {sys.exc_info()[1]}') except BaseException: if hydpy.pub.options.warnmissingobsfile: warnings.warn(str(sys.exc_info()[1]))
[ "def", "load_ext", "(", "self", ")", ":", "try", ":", "super", "(", ")", ".", "load_ext", "(", ")", "except", "OSError", ":", "del", "self", ".", "memoryflag", "if", "hydpy", ".", "pub", ".", "options", ".", "warnmissingobsfile", ":", "warnings", ".", "warn", "(", "f'The `memory flag` of sequence '", "f'{objecttools.nodephrase(self)} had to be set to `False` '", "f'due to the following problem: {sys.exc_info()[1]}'", ")", "except", "BaseException", ":", "if", "hydpy", ".", "pub", ".", "options", ".", "warnmissingobsfile", ":", "warnings", ".", "warn", "(", "str", "(", "sys", ".", "exc_info", "(", ")", "[", "1", "]", ")", ")" ]
40.405941
0.000478
def drawBackground( self, painter, opt, rect, brush ): """ Make sure the background extends to 0 for the first item. :param painter | <QtGui.QPainter> rect | <QtCore.QRect> brush | <QtGui.QBrush> """ if not brush: return painter.setPen(QtCore.Qt.NoPen) painter.setBrush(brush) painter.drawRect(rect)
[ "def", "drawBackground", "(", "self", ",", "painter", ",", "opt", ",", "rect", ",", "brush", ")", ":", "if", "not", "brush", ":", "return", "painter", ".", "setPen", "(", "QtCore", ".", "Qt", ".", "NoPen", ")", "painter", ".", "setBrush", "(", "brush", ")", "painter", ".", "drawRect", "(", "rect", ")" ]
31.642857
0.013158
def unroll_option_legs(cls, client, option_orders): ''' unroll option orders like this, https://github.com/joshfraser/robinhood-to-csv/blob/master/csv-options-export.py ''' # # @TODO write this with python threats to make concurrent HTTP requests # results = [] for oo in option_orders: for index, leg in enumerate(oo['legs']): for execution in leg['executions']: order = dict() keys_in_question = ['legs', 'price', 'type', 'premium', 'processed_premium', 'response_category', 'cancel_url'] for k, v in oo.items(): if k not in keys_in_question: order[k] = oo[k] order['order_type'] = oo['type'] contract = client.get(leg['option']) order['leg'] = index+1 order['symbol'] = contract['chain_symbol'] order['strike_price'] = contract['strike_price'] order['expiration_date'] = contract['expiration_date'] order['contract_type'] = contract['type'] for k, v in leg.items(): if k not in ['id', 'executions']: order[k] = leg[k] coef = (-1.0 if leg['side'] == 'buy' else 1.0) order['price'] = float(execution['price']) * 100.0 * coef order['execution_id'] = execution['id'] results.append(order) return results
[ "def", "unroll_option_legs", "(", "cls", ",", "client", ",", "option_orders", ")", ":", "#", "# @TODO write this with python threats to make concurrent HTTP requests", "#", "results", "=", "[", "]", "for", "oo", "in", "option_orders", ":", "for", "index", ",", "leg", "in", "enumerate", "(", "oo", "[", "'legs'", "]", ")", ":", "for", "execution", "in", "leg", "[", "'executions'", "]", ":", "order", "=", "dict", "(", ")", "keys_in_question", "=", "[", "'legs'", ",", "'price'", ",", "'type'", ",", "'premium'", ",", "'processed_premium'", ",", "'response_category'", ",", "'cancel_url'", "]", "for", "k", ",", "v", "in", "oo", ".", "items", "(", ")", ":", "if", "k", "not", "in", "keys_in_question", ":", "order", "[", "k", "]", "=", "oo", "[", "k", "]", "order", "[", "'order_type'", "]", "=", "oo", "[", "'type'", "]", "contract", "=", "client", ".", "get", "(", "leg", "[", "'option'", "]", ")", "order", "[", "'leg'", "]", "=", "index", "+", "1", "order", "[", "'symbol'", "]", "=", "contract", "[", "'chain_symbol'", "]", "order", "[", "'strike_price'", "]", "=", "contract", "[", "'strike_price'", "]", "order", "[", "'expiration_date'", "]", "=", "contract", "[", "'expiration_date'", "]", "order", "[", "'contract_type'", "]", "=", "contract", "[", "'type'", "]", "for", "k", ",", "v", "in", "leg", ".", "items", "(", ")", ":", "if", "k", "not", "in", "[", "'id'", ",", "'executions'", "]", ":", "order", "[", "k", "]", "=", "leg", "[", "k", "]", "coef", "=", "(", "-", "1.0", "if", "leg", "[", "'side'", "]", "==", "'buy'", "else", "1.0", ")", "order", "[", "'price'", "]", "=", "float", "(", "execution", "[", "'price'", "]", ")", "*", "100.0", "*", "coef", "order", "[", "'execution_id'", "]", "=", "execution", "[", "'id'", "]", "results", ".", "append", "(", "order", ")", "return", "results" ]
38.627907
0.001174
def __diff(self, level, parents_ids=frozenset({})): """The main diff method""" if level.t1 is level.t2: return if self.__skip_this(level): return if get_type(level.t1) != get_type(level.t2): report_type_change = True for type_group in self.ignore_type_in_groups: if self.type_check_func(level.t1, type_group) and self.type_check_func(level.t2, type_group): report_type_change = False break if report_type_change: self.__diff_types(level) return if isinstance(level.t1, strings): self.__diff_str(level) elif isinstance(level.t1, numbers): self.__diff_numbers(level) elif isinstance(level.t1, Mapping): self.__diff_dict(level, parents_ids) elif isinstance(level.t1, tuple): self.__diff_tuple(level, parents_ids) elif isinstance(level.t1, (set, frozenset, OrderedSet)): self.__diff_set(level) elif isinstance(level.t1, Iterable): if self.ignore_order: self.__diff_iterable_with_deephash(level) else: self.__diff_iterable(level, parents_ids) else: self.__diff_obj(level, parents_ids)
[ "def", "__diff", "(", "self", ",", "level", ",", "parents_ids", "=", "frozenset", "(", "{", "}", ")", ")", ":", "if", "level", ".", "t1", "is", "level", ".", "t2", ":", "return", "if", "self", ".", "__skip_this", "(", "level", ")", ":", "return", "if", "get_type", "(", "level", ".", "t1", ")", "!=", "get_type", "(", "level", ".", "t2", ")", ":", "report_type_change", "=", "True", "for", "type_group", "in", "self", ".", "ignore_type_in_groups", ":", "if", "self", ".", "type_check_func", "(", "level", ".", "t1", ",", "type_group", ")", "and", "self", ".", "type_check_func", "(", "level", ".", "t2", ",", "type_group", ")", ":", "report_type_change", "=", "False", "break", "if", "report_type_change", ":", "self", ".", "__diff_types", "(", "level", ")", "return", "if", "isinstance", "(", "level", ".", "t1", ",", "strings", ")", ":", "self", ".", "__diff_str", "(", "level", ")", "elif", "isinstance", "(", "level", ".", "t1", ",", "numbers", ")", ":", "self", ".", "__diff_numbers", "(", "level", ")", "elif", "isinstance", "(", "level", ".", "t1", ",", "Mapping", ")", ":", "self", ".", "__diff_dict", "(", "level", ",", "parents_ids", ")", "elif", "isinstance", "(", "level", ".", "t1", ",", "tuple", ")", ":", "self", ".", "__diff_tuple", "(", "level", ",", "parents_ids", ")", "elif", "isinstance", "(", "level", ".", "t1", ",", "(", "set", ",", "frozenset", ",", "OrderedSet", ")", ")", ":", "self", ".", "__diff_set", "(", "level", ")", "elif", "isinstance", "(", "level", ".", "t1", ",", "Iterable", ")", ":", "if", "self", ".", "ignore_order", ":", "self", ".", "__diff_iterable_with_deephash", "(", "level", ")", "else", ":", "self", ".", "__diff_iterable", "(", "level", ",", "parents_ids", ")", "else", ":", "self", ".", "__diff_obj", "(", "level", ",", "parents_ids", ")" ]
32
0.002219
def ksum(p, K=2): """From T. Ogita, S.M. Rump, and S. Oishi. Accurate Sum and Dot Product, SIAM J. Sci. Comput., 26(6), 1955–1988 (34 pages). <https://doi.org/10.1137/030601818>. Algorithm 4.8. Summation as in K-fold precision by (K−1)-fold error-free vector transformation. """ # Don't override the input data. q = p.copy() distill(q, K - 1) return numpy.sum(q[:-1], axis=0) + q[-1]
[ "def", "ksum", "(", "p", ",", "K", "=", "2", ")", ":", "# Don't override the input data.", "q", "=", "p", ".", "copy", "(", ")", "distill", "(", "q", ",", "K", "-", "1", ")", "return", "numpy", ".", "sum", "(", "q", "[", ":", "-", "1", "]", ",", "axis", "=", "0", ")", "+", "q", "[", "-", "1", "]" ]
27.933333
0.002309
def _check_deprecated(self, dest, kwargs): """Checks option for deprecation and issues a warning/error if necessary.""" removal_version = kwargs.get('removal_version', None) if removal_version is not None: warn_or_error( removal_version=removal_version, deprecated_entity_description="option '{}' in {}".format(dest, self._scope_str()), deprecation_start_version=kwargs.get('deprecation_start_version', None), hint=kwargs.get('removal_hint', None), stacklevel=9999)
[ "def", "_check_deprecated", "(", "self", ",", "dest", ",", "kwargs", ")", ":", "removal_version", "=", "kwargs", ".", "get", "(", "'removal_version'", ",", "None", ")", "if", "removal_version", "is", "not", "None", ":", "warn_or_error", "(", "removal_version", "=", "removal_version", ",", "deprecated_entity_description", "=", "\"option '{}' in {}\"", ".", "format", "(", "dest", ",", "self", ".", "_scope_str", "(", ")", ")", ",", "deprecation_start_version", "=", "kwargs", ".", "get", "(", "'deprecation_start_version'", ",", "None", ")", ",", "hint", "=", "kwargs", ".", "get", "(", "'removal_hint'", ",", "None", ")", ",", "stacklevel", "=", "9999", ")" ]
51.4
0.00956
def indentation_pre_event_input_accelerators(editor, event): """ Implements indentation pre event input accelerators. :param editor: Document editor. :type editor: QWidget :param event: Event being handled. :type event: QEvent :return: Process event. :rtype: bool """ process_event = True if not hasattr(editor, "indent"): return process_event if event.key() == Qt.Key_Tab: process_event = editor.indent() and False elif event.key() == Qt.Key_Backtab: process_event = editor.unindent() and False return process_event
[ "def", "indentation_pre_event_input_accelerators", "(", "editor", ",", "event", ")", ":", "process_event", "=", "True", "if", "not", "hasattr", "(", "editor", ",", "\"indent\"", ")", ":", "return", "process_event", "if", "event", ".", "key", "(", ")", "==", "Qt", ".", "Key_Tab", ":", "process_event", "=", "editor", ".", "indent", "(", ")", "and", "False", "elif", "event", ".", "key", "(", ")", "==", "Qt", ".", "Key_Backtab", ":", "process_event", "=", "editor", ".", "unindent", "(", ")", "and", "False", "return", "process_event" ]
27.571429
0.001669
def _print_SCALAR_TYPES(self, expr, *args, **kwargs): """Render scalars""" adjoint = kwargs.get('adjoint', False) if adjoint: expr = expr.conjugate() if isinstance(expr, SympyBasic): self._sympy_printer._print_level = self._print_level + 1 res = self._sympy_printer.doprint(expr) else: # numeric type try: if int(expr) == expr: # In Python, objects that evaluate equal (e.g. 2.0 == 2) # have the same hash. We want to normalize this, so that we # get consistent results when printing with a cache expr = int(expr) except TypeError: pass if adjoint: kwargs = { key: val for (key, val) in kwargs.items() if key != 'adjoint'} res = self._print(expr, *args, **kwargs) return res
[ "def", "_print_SCALAR_TYPES", "(", "self", ",", "expr", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "adjoint", "=", "kwargs", ".", "get", "(", "'adjoint'", ",", "False", ")", "if", "adjoint", ":", "expr", "=", "expr", ".", "conjugate", "(", ")", "if", "isinstance", "(", "expr", ",", "SympyBasic", ")", ":", "self", ".", "_sympy_printer", ".", "_print_level", "=", "self", ".", "_print_level", "+", "1", "res", "=", "self", ".", "_sympy_printer", ".", "doprint", "(", "expr", ")", "else", ":", "# numeric type", "try", ":", "if", "int", "(", "expr", ")", "==", "expr", ":", "# In Python, objects that evaluate equal (e.g. 2.0 == 2)", "# have the same hash. We want to normalize this, so that we", "# get consistent results when printing with a cache", "expr", "=", "int", "(", "expr", ")", "except", "TypeError", ":", "pass", "if", "adjoint", ":", "kwargs", "=", "{", "key", ":", "val", "for", "(", "key", ",", "val", ")", "in", "kwargs", ".", "items", "(", ")", "if", "key", "!=", "'adjoint'", "}", "res", "=", "self", ".", "_print", "(", "expr", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "res" ]
41.434783
0.002051
def _do_validate_resourcescenario(resourcescenario, template_id=None): """ Perform a check to ensure a resource scenario's datasets are correct given what the definition of that resource (its type) specifies. """ res = resourcescenario.resourceattr.get_resource() types = res.types dataset = resourcescenario.dataset if len(types) == 0: return if template_id is not None: if template_id not in [r.templatetype.template_id for r in res.types]: raise HydraError("Template %s is not used for resource attribute %s in scenario %s"%\ (template_id, resourcescenario.resourceattr.attr.name, resourcescenario.scenario.name)) #Validate against all the types for the resource for resourcetype in types: #If a specific type has been specified, then only validate #against that type and ignore all the others if template_id is not None: if resourcetype.templatetype.template_id != template_id: continue #Identify the template types for the template tmpltype = resourcetype.templatetype for ta in tmpltype.typeattrs: #If we find a template type which mactches the current attribute. #we can do some validation. if ta.attr_id == resourcescenario.resourceattr.attr_id: if ta.data_restriction: log.debug("Validating against %s", ta.data_restriction) validation_dict = eval(ta.data_restriction) dataset_util.validate_value(validation_dict, dataset.get_val())
[ "def", "_do_validate_resourcescenario", "(", "resourcescenario", ",", "template_id", "=", "None", ")", ":", "res", "=", "resourcescenario", ".", "resourceattr", ".", "get_resource", "(", ")", "types", "=", "res", ".", "types", "dataset", "=", "resourcescenario", ".", "dataset", "if", "len", "(", "types", ")", "==", "0", ":", "return", "if", "template_id", "is", "not", "None", ":", "if", "template_id", "not", "in", "[", "r", ".", "templatetype", ".", "template_id", "for", "r", "in", "res", ".", "types", "]", ":", "raise", "HydraError", "(", "\"Template %s is not used for resource attribute %s in scenario %s\"", "%", "(", "template_id", ",", "resourcescenario", ".", "resourceattr", ".", "attr", ".", "name", ",", "resourcescenario", ".", "scenario", ".", "name", ")", ")", "#Validate against all the types for the resource", "for", "resourcetype", "in", "types", ":", "#If a specific type has been specified, then only validate", "#against that type and ignore all the others", "if", "template_id", "is", "not", "None", ":", "if", "resourcetype", ".", "templatetype", ".", "template_id", "!=", "template_id", ":", "continue", "#Identify the template types for the template", "tmpltype", "=", "resourcetype", ".", "templatetype", "for", "ta", "in", "tmpltype", ".", "typeattrs", ":", "#If we find a template type which mactches the current attribute.", "#we can do some validation.", "if", "ta", ".", "attr_id", "==", "resourcescenario", ".", "resourceattr", ".", "attr_id", ":", "if", "ta", ".", "data_restriction", ":", "log", ".", "debug", "(", "\"Validating against %s\"", ",", "ta", ".", "data_restriction", ")", "validation_dict", "=", "eval", "(", "ta", ".", "data_restriction", ")", "dataset_util", ".", "validate_value", "(", "validation_dict", ",", "dataset", ".", "get_val", "(", ")", ")" ]
44.216216
0.008373
def grid_to_eccentric_radii(self, grid): """Convert a grid of (y,x) coordinates to an eccentric radius, which is (1.0/axis_ratio) * elliptical radius \ and used to define light profile half-light radii using circular radii. If the coordinates have not been transformed to the profile's geometry, this is performed automatically. Parameters ---------- grid : TransformedGrid(ndarray) The (y, x) coordinates in the reference frame of the elliptical profile. """ return np.multiply(np.sqrt(self.axis_ratio), self.grid_to_elliptical_radii(grid)).view(np.ndarray)
[ "def", "grid_to_eccentric_radii", "(", "self", ",", "grid", ")", ":", "return", "np", ".", "multiply", "(", "np", ".", "sqrt", "(", "self", ".", "axis_ratio", ")", ",", "self", ".", "grid_to_elliptical_radii", "(", "grid", ")", ")", ".", "view", "(", "np", ".", "ndarray", ")" ]
52.083333
0.009434
def construct_exc_class(cls): """Constructs proxy class for the exception.""" class ProxyException(cls, BaseException): __pep3134__ = True @property def __traceback__(self): if self.__fixed_traceback__: return self.__fixed_traceback__ current_exc, current_tb = sys.exc_info()[1:] if current_exc is self: return current_tb def __init__(self, instance=None): # pylint: disable=W0231 self.__original_exception__ = instance self.__fixed_traceback__ = None def __getattr__(self, item): return getattr(self.__original_exception__, item) def __repr__(self): return repr(self.__original_exception__) def __str__(self): return str(self.__original_exception__) def with_traceback(self, traceback): instance = copy.copy(self) instance.__fixed_traceback__ = traceback return instance ProxyException.__name__ = cls.__name__ return ProxyException
[ "def", "construct_exc_class", "(", "cls", ")", ":", "class", "ProxyException", "(", "cls", ",", "BaseException", ")", ":", "__pep3134__", "=", "True", "@", "property", "def", "__traceback__", "(", "self", ")", ":", "if", "self", ".", "__fixed_traceback__", ":", "return", "self", ".", "__fixed_traceback__", "current_exc", ",", "current_tb", "=", "sys", ".", "exc_info", "(", ")", "[", "1", ":", "]", "if", "current_exc", "is", "self", ":", "return", "current_tb", "def", "__init__", "(", "self", ",", "instance", "=", "None", ")", ":", "# pylint: disable=W0231", "self", ".", "__original_exception__", "=", "instance", "self", ".", "__fixed_traceback__", "=", "None", "def", "__getattr__", "(", "self", ",", "item", ")", ":", "return", "getattr", "(", "self", ".", "__original_exception__", ",", "item", ")", "def", "__repr__", "(", "self", ")", ":", "return", "repr", "(", "self", ".", "__original_exception__", ")", "def", "__str__", "(", "self", ")", ":", "return", "str", "(", "self", ".", "__original_exception__", ")", "def", "with_traceback", "(", "self", ",", "traceback", ")", ":", "instance", "=", "copy", ".", "copy", "(", "self", ")", "instance", ".", "__fixed_traceback__", "=", "traceback", "return", "instance", "ProxyException", ".", "__name__", "=", "cls", ".", "__name__", "return", "ProxyException" ]
29.222222
0.00092
def is_subdir(base_path, test_path, trailing_slash=False, wildcards=False): '''Return whether the a path is a subpath of another. Args: base_path: The base path test_path: The path which we are testing trailing_slash: If True, the trailing slash is treated with importance. For example, ``/images/`` is a directory while ``/images`` is a file. wildcards: If True, globbing wildcards are matched against paths ''' if trailing_slash: base_path = base_path.rsplit('/', 1)[0] + '/' test_path = test_path.rsplit('/', 1)[0] + '/' else: if not base_path.endswith('/'): base_path += '/' if not test_path.endswith('/'): test_path += '/' if wildcards: return fnmatch.fnmatchcase(test_path, base_path) else: return test_path.startswith(base_path)
[ "def", "is_subdir", "(", "base_path", ",", "test_path", ",", "trailing_slash", "=", "False", ",", "wildcards", "=", "False", ")", ":", "if", "trailing_slash", ":", "base_path", "=", "base_path", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "0", "]", "+", "'/'", "test_path", "=", "test_path", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "0", "]", "+", "'/'", "else", ":", "if", "not", "base_path", ".", "endswith", "(", "'/'", ")", ":", "base_path", "+=", "'/'", "if", "not", "test_path", ".", "endswith", "(", "'/'", ")", ":", "test_path", "+=", "'/'", "if", "wildcards", ":", "return", "fnmatch", ".", "fnmatchcase", "(", "test_path", ",", "base_path", ")", "else", ":", "return", "test_path", ".", "startswith", "(", "base_path", ")" ]
34.8
0.001119
def _get_dopants(substitutions, num_dopants, match_oxi_sign): """ Utility method to get n- and p-type dopants from a list of substitutions. """ n_type = [pred for pred in substitutions if pred['dopant_species'].oxi_state > pred['original_species'].oxi_state and (not match_oxi_sign or np.sign(pred['dopant_species'].oxi_state) == np.sign(pred['original_species'].oxi_state))] p_type = [pred for pred in substitutions if pred['dopant_species'].oxi_state < pred['original_species'].oxi_state and (not match_oxi_sign or np.sign(pred['dopant_species'].oxi_state) == np.sign(pred['original_species'].oxi_state))] return {'n_type': n_type[:num_dopants], 'p_type': p_type[:num_dopants]}
[ "def", "_get_dopants", "(", "substitutions", ",", "num_dopants", ",", "match_oxi_sign", ")", ":", "n_type", "=", "[", "pred", "for", "pred", "in", "substitutions", "if", "pred", "[", "'dopant_species'", "]", ".", "oxi_state", ">", "pred", "[", "'original_species'", "]", ".", "oxi_state", "and", "(", "not", "match_oxi_sign", "or", "np", ".", "sign", "(", "pred", "[", "'dopant_species'", "]", ".", "oxi_state", ")", "==", "np", ".", "sign", "(", "pred", "[", "'original_species'", "]", ".", "oxi_state", ")", ")", "]", "p_type", "=", "[", "pred", "for", "pred", "in", "substitutions", "if", "pred", "[", "'dopant_species'", "]", ".", "oxi_state", "<", "pred", "[", "'original_species'", "]", ".", "oxi_state", "and", "(", "not", "match_oxi_sign", "or", "np", ".", "sign", "(", "pred", "[", "'dopant_species'", "]", ".", "oxi_state", ")", "==", "np", ".", "sign", "(", "pred", "[", "'original_species'", "]", ".", "oxi_state", ")", ")", "]", "return", "{", "'n_type'", ":", "n_type", "[", ":", "num_dopants", "]", ",", "'p_type'", ":", "p_type", "[", ":", "num_dopants", "]", "}" ]
47.055556
0.001157
def scrypt_mcf_check(mcf, password): """Returns True if the password matches the given MCF hash""" if isinstance(password, unicode): password = password.encode('utf8') elif not isinstance(password, bytes): raise TypeError('password must be a unicode or byte string') if not isinstance(mcf, bytes): raise TypeError('MCF must be a byte string') if mcf_mod._scrypt_mcf_7_is_standard(mcf) and not _scrypt_ll: return _scrypt_str_chk(mcf, password, len(password)) == 0 return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
[ "def", "scrypt_mcf_check", "(", "mcf", ",", "password", ")", ":", "if", "isinstance", "(", "password", ",", "unicode", ")", ":", "password", "=", "password", ".", "encode", "(", "'utf8'", ")", "elif", "not", "isinstance", "(", "password", ",", "bytes", ")", ":", "raise", "TypeError", "(", "'password must be a unicode or byte string'", ")", "if", "not", "isinstance", "(", "mcf", ",", "bytes", ")", ":", "raise", "TypeError", "(", "'MCF must be a byte string'", ")", "if", "mcf_mod", ".", "_scrypt_mcf_7_is_standard", "(", "mcf", ")", "and", "not", "_scrypt_ll", ":", "return", "_scrypt_str_chk", "(", "mcf", ",", "password", ",", "len", "(", "password", ")", ")", "==", "0", "return", "mcf_mod", ".", "scrypt_mcf_check", "(", "scrypt", ",", "mcf", ",", "password", ")" ]
51.181818
0.001745
def get_messages(self, count=1, block=True, timeout=10): """ Fetch the specified number of messages Keyword Arguments: count: Indicates the maximum number of messages to be fetched block: If True, the API will block till all messages are fetched. If block is a positive integer the API will block until that many messages are fetched. timeout: When blocking is requested the function will block for the specified time (in seconds) until count messages is fetched. If None, it will block forever. """ messages = [] # Give a size hint to the consumers. Each consumer process will fetch # a maximum of "count" messages. This will fetch more messages than # necessary, but these will not be committed to kafka. Also, the extra # messages can be provided in subsequent runs self.size.value = count self.events.pause.clear() if timeout is not None: max_time = time.time() + timeout new_offsets = {} while count > 0 and (timeout is None or timeout > 0): # Trigger consumption only if the queue is empty # By doing this, we will ensure that consumers do not # go into overdrive and keep consuming thousands of # messages when the user might need only a few if self.queue.empty(): self.events.start.set() block_next_call = block is True or block > len(messages) try: partition, message = self.queue.get(block_next_call, timeout) except queue.Empty: break _msg = (partition, message) if self.partition_info else message messages.append(_msg) new_offsets[partition] = message.offset + 1 count -= 1 if timeout is not None: timeout = max_time - time.time() self.size.value = 0 self.events.start.clear() self.events.pause.set() # Update and commit offsets if necessary self.offsets.update(new_offsets) self.count_since_commit += len(messages) self._auto_commit() return messages
[ "def", "get_messages", "(", "self", ",", "count", "=", "1", ",", "block", "=", "True", ",", "timeout", "=", "10", ")", ":", "messages", "=", "[", "]", "# Give a size hint to the consumers. Each consumer process will fetch", "# a maximum of \"count\" messages. This will fetch more messages than", "# necessary, but these will not be committed to kafka. Also, the extra", "# messages can be provided in subsequent runs", "self", ".", "size", ".", "value", "=", "count", "self", ".", "events", ".", "pause", ".", "clear", "(", ")", "if", "timeout", "is", "not", "None", ":", "max_time", "=", "time", ".", "time", "(", ")", "+", "timeout", "new_offsets", "=", "{", "}", "while", "count", ">", "0", "and", "(", "timeout", "is", "None", "or", "timeout", ">", "0", ")", ":", "# Trigger consumption only if the queue is empty", "# By doing this, we will ensure that consumers do not", "# go into overdrive and keep consuming thousands of", "# messages when the user might need only a few", "if", "self", ".", "queue", ".", "empty", "(", ")", ":", "self", ".", "events", ".", "start", ".", "set", "(", ")", "block_next_call", "=", "block", "is", "True", "or", "block", ">", "len", "(", "messages", ")", "try", ":", "partition", ",", "message", "=", "self", ".", "queue", ".", "get", "(", "block_next_call", ",", "timeout", ")", "except", "queue", ".", "Empty", ":", "break", "_msg", "=", "(", "partition", ",", "message", ")", "if", "self", ".", "partition_info", "else", "message", "messages", ".", "append", "(", "_msg", ")", "new_offsets", "[", "partition", "]", "=", "message", ".", "offset", "+", "1", "count", "-=", "1", "if", "timeout", "is", "not", "None", ":", "timeout", "=", "max_time", "-", "time", ".", "time", "(", ")", "self", ".", "size", ".", "value", "=", "0", "self", ".", "events", ".", "start", ".", "clear", "(", ")", "self", ".", "events", ".", "pause", ".", "set", "(", ")", "# Update and commit offsets if necessary", "self", ".", "offsets", ".", "update", "(", "new_offsets", ")", "self", ".", "count_since_commit", "+=", "len", "(", "messages", ")", "self", ".", "_auto_commit", "(", ")", "return", "messages" ]
39.241379
0.000857
def accept_all(self): ''' Accept all keys in pre ''' keys = self.list_keys() for key in keys[self.PEND]: try: shutil.move( os.path.join( self.opts['pki_dir'], self.PEND, key), os.path.join( self.opts['pki_dir'], self.ACC, key) ) eload = {'result': True, 'act': 'accept', 'id': key} self.event.fire_event(eload, salt.utils.event.tagify(prefix='key')) except (IOError, OSError): pass return self.list_keys()
[ "def", "accept_all", "(", "self", ")", ":", "keys", "=", "self", ".", "list_keys", "(", ")", "for", "key", "in", "keys", "[", "self", ".", "PEND", "]", ":", "try", ":", "shutil", ".", "move", "(", "os", ".", "path", ".", "join", "(", "self", ".", "opts", "[", "'pki_dir'", "]", ",", "self", ".", "PEND", ",", "key", ")", ",", "os", ".", "path", ".", "join", "(", "self", ".", "opts", "[", "'pki_dir'", "]", ",", "self", ".", "ACC", ",", "key", ")", ")", "eload", "=", "{", "'result'", ":", "True", ",", "'act'", ":", "'accept'", ",", "'id'", ":", "key", "}", "self", ".", "event", ".", "fire_event", "(", "eload", ",", "salt", ".", "utils", ".", "event", ".", "tagify", "(", "prefix", "=", "'key'", ")", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "pass", "return", "self", ".", "list_keys", "(", ")" ]
33.8
0.002301
def merge(x, y): """ Merge two dictionaries and raise an error for inconsistencies. Parameters ---------- x : dict dictionary x y : dict dictionary y Returns ------- x : dict merged dictionary Raises ------ ValueError if `x` and `y` are inconsistent """ keys_x = set(x) keys_y = set(y) for key in keys_y - keys_x: x[key] = y[key] for key in keys_x & keys_y: value_x = x[key] value_y = y[key] if isinstance(value_x, dict) and isinstance(value_y, dict): x[key] = merge(value_x, value_y) else: if value_x != value_y: raise ValueError return x
[ "def", "merge", "(", "x", ",", "y", ")", ":", "keys_x", "=", "set", "(", "x", ")", "keys_y", "=", "set", "(", "y", ")", "for", "key", "in", "keys_y", "-", "keys_x", ":", "x", "[", "key", "]", "=", "y", "[", "key", "]", "for", "key", "in", "keys_x", "&", "keys_y", ":", "value_x", "=", "x", "[", "key", "]", "value_y", "=", "y", "[", "key", "]", "if", "isinstance", "(", "value_x", ",", "dict", ")", "and", "isinstance", "(", "value_y", ",", "dict", ")", ":", "x", "[", "key", "]", "=", "merge", "(", "value_x", ",", "value_y", ")", "else", ":", "if", "value_x", "!=", "value_y", ":", "raise", "ValueError", "return", "x" ]
18.236842
0.00137
def find_state(self, state, best_match=True, min_similarity=70): """ Fuzzy search correct state. :param best_match: bool, when True, only the best matched state will be return. otherwise, will return all matching states. """ result_state_short_list = list() # check if it is a abbreviate name if state.upper() in STATE_ABBR_SHORT_TO_LONG: result_state_short_list.append(state.upper()) # if not, find out what is the state that user looking for else: if best_match: state_long, confidence = extractOne(state, self.state_list) if confidence >= min_similarity: result_state_short_list.append( STATE_ABBR_LONG_TO_SHORT[state_long]) else: for state_long, confidence in extract(state, self.state_list): if confidence >= min_similarity: result_state_short_list.append( STATE_ABBR_LONG_TO_SHORT[state_long]) if len(result_state_short_list) == 0: message = ("'%s' is not a valid state name, use 2 letter " "short name or correct full name please.") raise ValueError(message % state) return result_state_short_list
[ "def", "find_state", "(", "self", ",", "state", ",", "best_match", "=", "True", ",", "min_similarity", "=", "70", ")", ":", "result_state_short_list", "=", "list", "(", ")", "# check if it is a abbreviate name", "if", "state", ".", "upper", "(", ")", "in", "STATE_ABBR_SHORT_TO_LONG", ":", "result_state_short_list", ".", "append", "(", "state", ".", "upper", "(", ")", ")", "# if not, find out what is the state that user looking for", "else", ":", "if", "best_match", ":", "state_long", ",", "confidence", "=", "extractOne", "(", "state", ",", "self", ".", "state_list", ")", "if", "confidence", ">=", "min_similarity", ":", "result_state_short_list", ".", "append", "(", "STATE_ABBR_LONG_TO_SHORT", "[", "state_long", "]", ")", "else", ":", "for", "state_long", ",", "confidence", "in", "extract", "(", "state", ",", "self", ".", "state_list", ")", ":", "if", "confidence", ">=", "min_similarity", ":", "result_state_short_list", ".", "append", "(", "STATE_ABBR_LONG_TO_SHORT", "[", "state_long", "]", ")", "if", "len", "(", "result_state_short_list", ")", "==", "0", ":", "message", "=", "(", "\"'%s' is not a valid state name, use 2 letter \"", "\"short name or correct full name please.\"", ")", "raise", "ValueError", "(", "message", "%", "state", ")", "return", "result_state_short_list" ]
41.40625
0.001475
def GetDirections(self, origin, destination, sensor = False, mode = None, waypoints = None, alternatives = None, avoid = None, language = None, units = None, region = None, departure_time = None, arrival_time = None): '''Get Directions Service Pls refer to the Google Maps Web API for the details of the remained parameters ''' params = { 'origin': origin, 'destination': destination, 'sensor': str(sensor).lower() } if mode: params['mode'] = mode if waypoints: params['waypoints'] = waypoints if alternatives: params['alternatives'] = alternatives if avoid: params['avoid'] = avoid if language: params['language'] = language if units: params['units'] = units if region: params['region'] = region if departure_time: params['departure_time'] = departure_time if arrival_time: params['arrival_time'] = arrival_time if not self.premier: url = self.get_url(params) else: url = self.get_signed_url(params) return self.GetService_url(url)
[ "def", "GetDirections", "(", "self", ",", "origin", ",", "destination", ",", "sensor", "=", "False", ",", "mode", "=", "None", ",", "waypoints", "=", "None", ",", "alternatives", "=", "None", ",", "avoid", "=", "None", ",", "language", "=", "None", ",", "units", "=", "None", ",", "region", "=", "None", ",", "departure_time", "=", "None", ",", "arrival_time", "=", "None", ")", ":", "params", "=", "{", "'origin'", ":", "origin", ",", "'destination'", ":", "destination", ",", "'sensor'", ":", "str", "(", "sensor", ")", ".", "lower", "(", ")", "}", "if", "mode", ":", "params", "[", "'mode'", "]", "=", "mode", "if", "waypoints", ":", "params", "[", "'waypoints'", "]", "=", "waypoints", "if", "alternatives", ":", "params", "[", "'alternatives'", "]", "=", "alternatives", "if", "avoid", ":", "params", "[", "'avoid'", "]", "=", "avoid", "if", "language", ":", "params", "[", "'language'", "]", "=", "language", "if", "units", ":", "params", "[", "'units'", "]", "=", "units", "if", "region", ":", "params", "[", "'region'", "]", "=", "region", "if", "departure_time", ":", "params", "[", "'departure_time'", "]", "=", "departure_time", "if", "arrival_time", ":", "params", "[", "'arrival_time'", "]", "=", "arrival_time", "if", "not", "self", ".", "premier", ":", "url", "=", "self", ".", "get_url", "(", "params", ")", "else", ":", "url", "=", "self", ".", "get_signed_url", "(", "params", ")", "return", "self", ".", "GetService_url", "(", "url", ")" ]
28.914894
0.030605
def from_structure(cls, structure, ff_elements=None, atom_style="charge"): """ Simple constructor building LammpsData from a structure without force field parameters and topologies. Args: structure (Structure): Input structure. ff_elements ([str]): List of strings of elements that must be present due to force field settings but not necessarily in the structure. Default to None. atom_style (str): Choose between "atomic" (neutral) and "charge" (charged). Default to "charge". """ s = structure.get_sorted_structure() box, symmop = lattice_2_lmpbox(s.lattice) coords = symmop.operate_multi(s.cart_coords) site_properties = s.site_properties if "velocities" in site_properties: velos = np.array(s.site_properties["velocities"]) rot = SymmOp.from_rotation_and_translation(symmop.rotation_matrix) rot_velos = rot.operate_multi(velos) site_properties.update({"velocities": rot_velos}) boxed_s = Structure(box.to_lattice(), s.species, coords, site_properties=site_properties, coords_are_cartesian=True) symbols = list(s.symbol_set) if ff_elements: symbols.extend(ff_elements) elements = sorted(Element(el) for el in set(symbols)) mass_info = [tuple([i.symbol] * 2) for i in elements] ff = ForceField(mass_info) topo = Topology(boxed_s) return cls.from_ff_and_topologies(box=box, ff=ff, topologies=[topo], atom_style=atom_style)
[ "def", "from_structure", "(", "cls", ",", "structure", ",", "ff_elements", "=", "None", ",", "atom_style", "=", "\"charge\"", ")", ":", "s", "=", "structure", ".", "get_sorted_structure", "(", ")", "box", ",", "symmop", "=", "lattice_2_lmpbox", "(", "s", ".", "lattice", ")", "coords", "=", "symmop", ".", "operate_multi", "(", "s", ".", "cart_coords", ")", "site_properties", "=", "s", ".", "site_properties", "if", "\"velocities\"", "in", "site_properties", ":", "velos", "=", "np", ".", "array", "(", "s", ".", "site_properties", "[", "\"velocities\"", "]", ")", "rot", "=", "SymmOp", ".", "from_rotation_and_translation", "(", "symmop", ".", "rotation_matrix", ")", "rot_velos", "=", "rot", ".", "operate_multi", "(", "velos", ")", "site_properties", ".", "update", "(", "{", "\"velocities\"", ":", "rot_velos", "}", ")", "boxed_s", "=", "Structure", "(", "box", ".", "to_lattice", "(", ")", ",", "s", ".", "species", ",", "coords", ",", "site_properties", "=", "site_properties", ",", "coords_are_cartesian", "=", "True", ")", "symbols", "=", "list", "(", "s", ".", "symbol_set", ")", "if", "ff_elements", ":", "symbols", ".", "extend", "(", "ff_elements", ")", "elements", "=", "sorted", "(", "Element", "(", "el", ")", "for", "el", "in", "set", "(", "symbols", ")", ")", "mass_info", "=", "[", "tuple", "(", "[", "i", ".", "symbol", "]", "*", "2", ")", "for", "i", "in", "elements", "]", "ff", "=", "ForceField", "(", "mass_info", ")", "topo", "=", "Topology", "(", "boxed_s", ")", "return", "cls", ".", "from_ff_and_topologies", "(", "box", "=", "box", ",", "ff", "=", "ff", ",", "topologies", "=", "[", "topo", "]", ",", "atom_style", "=", "atom_style", ")" ]
46.472222
0.001171