code
stringlengths
64
7.01k
docstring
stringlengths
2
15.8k
text
stringlengths
144
19.2k
#vtb def has_property(elem_to_parse, xpath): xroot, attr = get_xpath_tuple(xpath) if not xroot and not attr: return False elif not attr: return bool(get_elements_text(elem_to_parse, xroot)) else: return bool(get_elements_attributes(elem_to_parse, xroot, attr))
Parse xpath for any attribute reference "path/@attr" and check for root and presence of attribute. :return: True if xpath is present in the element along with any attribute referenced, otherwise False
### Input: Parse xpath for any attribute reference "path/@attr" and check for root and presence of attribute. :return: True if xpath is present in the element along with any attribute referenced, otherwise False ### Response: #vtb def has_property(elem_to_parse, xpath): xroot, attr = get_xpath_tuple(xpath) if not xroot and not attr: return False elif not attr: return bool(get_elements_text(elem_to_parse, xroot)) else: return bool(get_elements_attributes(elem_to_parse, xroot, attr))
#vtb def sci(x, digs): if type(x) != type(): x = repr(x) sign, intpart, fraction, expo = extract(x) if not intpart: while fraction and fraction[0] == : fraction = fraction[1:] expo = expo - 1 if fraction: intpart, fraction = fraction[0], fraction[1:] expo = expo - 1 else: intpart = else: expo = expo + len(intpart) - 1 intpart, fraction = intpart[0], intpart[1:] + fraction digs = max(0, digs) intpart, fraction = roundfrac(intpart, fraction, digs) if len(intpart) > 1: intpart, fraction, expo = \ intpart[0], intpart[1:] + fraction[:-1], \ expo + len(intpart) - 1 s = sign + intpart if digs > 0: s = s + + fraction e = repr(abs(expo)) e = *(3-len(e)) + e if expo < 0: e = + e else: e = + e return s + + e
Format x as [-]d.dddE[+-]ddd with 'digs' digits after the point and exactly one digit before. If digs is <= 0, one digit is kept and the point is suppressed.
### Input: Format x as [-]d.dddE[+-]ddd with 'digs' digits after the point and exactly one digit before. If digs is <= 0, one digit is kept and the point is suppressed. ### Response: #vtb def sci(x, digs): if type(x) != type(): x = repr(x) sign, intpart, fraction, expo = extract(x) if not intpart: while fraction and fraction[0] == : fraction = fraction[1:] expo = expo - 1 if fraction: intpart, fraction = fraction[0], fraction[1:] expo = expo - 1 else: intpart = else: expo = expo + len(intpart) - 1 intpart, fraction = intpart[0], intpart[1:] + fraction digs = max(0, digs) intpart, fraction = roundfrac(intpart, fraction, digs) if len(intpart) > 1: intpart, fraction, expo = \ intpart[0], intpart[1:] + fraction[:-1], \ expo + len(intpart) - 1 s = sign + intpart if digs > 0: s = s + + fraction e = repr(abs(expo)) e = *(3-len(e)) + e if expo < 0: e = + e else: e = + e return s + + e
#vtb def set_iter_mesh(self, mesh, shift=None, is_time_reversal=True, is_mesh_symmetry=True, is_eigenvectors=False, is_gamma_center=False): warnings.warn("Phonopy.set_iter_mesh is deprecated. " "Use Phonopy.run_mesh with use_iter_mesh=True.", DeprecationWarning) self.run_mesh(mesh=mesh, shift=shift, is_time_reversal=is_time_reversal, is_mesh_symmetry=is_mesh_symmetry, with_eigenvectors=is_eigenvectors, is_gamma_center=is_gamma_center, use_iter_mesh=True)
Create an IterMesh instancer Attributes ---------- See set_mesh method.
### Input: Create an IterMesh instancer Attributes ---------- See set_mesh method. ### Response: #vtb def set_iter_mesh(self, mesh, shift=None, is_time_reversal=True, is_mesh_symmetry=True, is_eigenvectors=False, is_gamma_center=False): warnings.warn("Phonopy.set_iter_mesh is deprecated. " "Use Phonopy.run_mesh with use_iter_mesh=True.", DeprecationWarning) self.run_mesh(mesh=mesh, shift=shift, is_time_reversal=is_time_reversal, is_mesh_symmetry=is_mesh_symmetry, with_eigenvectors=is_eigenvectors, is_gamma_center=is_gamma_center, use_iter_mesh=True)
#vtb def find_rt_jar(javahome=None): if not javahome: if in os.environ: javahome = os.environ[] elif sys.platform == : javahome = _find_osx_javahome() else: javahome = _get_javahome_from_java(_find_java_binary()) rtpath = os.path.join(javahome, , , ) if not os.path.isfile(rtpath): msg = .format(rtpath) raise ExtensionError(msg) return rtpath
Find the path to the Java standard library jar. The jar is expected to exist at the path 'jre/lib/rt.jar' inside a standard Java installation directory. The directory is found using the following procedure: 1. If the javehome argument is provided, use the value as the directory. 2. If the JAVA_HOME environment variable is set, use the value as the directory. 3. Find the location of the ``java`` binary in the current PATH and compute the installation directory from this location. Args: javahome: A path to a Java installation directory (optional).
### Input: Find the path to the Java standard library jar. The jar is expected to exist at the path 'jre/lib/rt.jar' inside a standard Java installation directory. The directory is found using the following procedure: 1. If the javehome argument is provided, use the value as the directory. 2. If the JAVA_HOME environment variable is set, use the value as the directory. 3. Find the location of the ``java`` binary in the current PATH and compute the installation directory from this location. Args: javahome: A path to a Java installation directory (optional). ### Response: #vtb def find_rt_jar(javahome=None): if not javahome: if in os.environ: javahome = os.environ[] elif sys.platform == : javahome = _find_osx_javahome() else: javahome = _get_javahome_from_java(_find_java_binary()) rtpath = os.path.join(javahome, , , ) if not os.path.isfile(rtpath): msg = .format(rtpath) raise ExtensionError(msg) return rtpath
#vtb def show_raslog_output_show_all_raslog_raslog_entries_log_type(self, **kwargs): config = ET.Element("config") show_raslog = ET.Element("show_raslog") config = show_raslog output = ET.SubElement(show_raslog, "output") show_all_raslog = ET.SubElement(output, "show-all-raslog") raslog_entries = ET.SubElement(show_all_raslog, "raslog-entries") log_type = ET.SubElement(raslog_entries, "log-type") log_type.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
### Input: Auto Generated Code ### Response: #vtb def show_raslog_output_show_all_raslog_raslog_entries_log_type(self, **kwargs): config = ET.Element("config") show_raslog = ET.Element("show_raslog") config = show_raslog output = ET.SubElement(show_raslog, "output") show_all_raslog = ET.SubElement(output, "show-all-raslog") raslog_entries = ET.SubElement(show_all_raslog, "raslog-entries") log_type = ET.SubElement(raslog_entries, "log-type") log_type.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
#vtb def deep_align(objects, join=, copy=True, indexes=None, exclude=frozenset(), raise_on_invalid=True): from .dataarray import DataArray from .dataset import Dataset if indexes is None: indexes = {} def is_alignable(obj): return isinstance(obj, (DataArray, Dataset)) positions = [] keys = [] out = [] targets = [] no_key = object() not_replaced = object() for n, variables in enumerate(objects): if is_alignable(variables): positions.append(n) keys.append(no_key) targets.append(variables) out.append(not_replaced) elif is_dict_like(variables): for k, v in variables.items(): if is_alignable(v) and k not in indexes: positions.append(n) keys.append(k) targets.append(v) out.append(OrderedDict(variables)) elif raise_on_invalid: raise ValueError( % variables) else: out.append(variables) aligned = align(*targets, join=join, copy=copy, indexes=indexes, exclude=exclude) for position, key, aligned_obj in zip(positions, keys, aligned): if key is no_key: out[position] = aligned_obj else: out[position][key] = aligned_obj assert all(arg is not not_replaced for arg in out) return out
Align objects for merging, recursing into dictionary values. This function is not public API.
### Input: Align objects for merging, recursing into dictionary values. This function is not public API. ### Response: #vtb def deep_align(objects, join=, copy=True, indexes=None, exclude=frozenset(), raise_on_invalid=True): from .dataarray import DataArray from .dataset import Dataset if indexes is None: indexes = {} def is_alignable(obj): return isinstance(obj, (DataArray, Dataset)) positions = [] keys = [] out = [] targets = [] no_key = object() not_replaced = object() for n, variables in enumerate(objects): if is_alignable(variables): positions.append(n) keys.append(no_key) targets.append(variables) out.append(not_replaced) elif is_dict_like(variables): for k, v in variables.items(): if is_alignable(v) and k not in indexes: positions.append(n) keys.append(k) targets.append(v) out.append(OrderedDict(variables)) elif raise_on_invalid: raise ValueError( % variables) else: out.append(variables) aligned = align(*targets, join=join, copy=copy, indexes=indexes, exclude=exclude) for position, key, aligned_obj in zip(positions, keys, aligned): if key is no_key: out[position] = aligned_obj else: out[position][key] = aligned_obj assert all(arg is not not_replaced for arg in out) return out
#vtb def _apply_user_port_channel_config(self, nexus_host, vpc_nbr): cli_cmds = self._get_user_port_channel_config(nexus_host, vpc_nbr) if cli_cmds: self._send_cli_conf_string(nexus_host, cli_cmds) else: vpc_str = str(vpc_nbr) path_snip = snipp.PATH_ALL body_snip = snipp.BODY_ADD_PORT_CH_P2 % (vpc_str, vpc_str) self.send_edit_string(nexus_host, path_snip, body_snip)
Adds STP and no lacp suspend config to port channel.
### Input: Adds STP and no lacp suspend config to port channel. ### Response: #vtb def _apply_user_port_channel_config(self, nexus_host, vpc_nbr): cli_cmds = self._get_user_port_channel_config(nexus_host, vpc_nbr) if cli_cmds: self._send_cli_conf_string(nexus_host, cli_cmds) else: vpc_str = str(vpc_nbr) path_snip = snipp.PATH_ALL body_snip = snipp.BODY_ADD_PORT_CH_P2 % (vpc_str, vpc_str) self.send_edit_string(nexus_host, path_snip, body_snip)
#vtb def ajModeles(self): sl = [] lines = [line for line in lignesFichier(self.path("modeles.la"))] max = len(lines) - 1 for i, l in enumerate(lines): if l.startswith(): varname, value = tuple(l.split("=")) self.lemmatiseur._variables[varname] = value continue eclats = l.split(":") if (eclats[0] == "modele" or i == max) and len(sl) > 0: m = self.parse_modele(sl) self.register_modele(m) sl = [] sl.append(l)
Lecture des modèles, et enregistrement de leurs désinences
### Input: Lecture des modèles, et enregistrement de leurs désinences ### Response: #vtb def ajModeles(self): sl = [] lines = [line for line in lignesFichier(self.path("modeles.la"))] max = len(lines) - 1 for i, l in enumerate(lines): if l.startswith(): varname, value = tuple(l.split("=")) self.lemmatiseur._variables[varname] = value continue eclats = l.split(":") if (eclats[0] == "modele" or i == max) and len(sl) > 0: m = self.parse_modele(sl) self.register_modele(m) sl = [] sl.append(l)
#vtb def from_header(self, binary): if binary is None: return span_context_module.SpanContext(from_header=False) try: data = Header._make(struct.unpack(BINARY_FORMAT, binary)) except struct.error: logging.warning( .format( binary, FORMAT_LENGTH ) ) return span_context_module.SpanContext(from_header=False) trace_id = str(binascii.hexlify(data.trace_id).decode(UTF8)) span_id = str(binascii.hexlify(data.span_id).decode(UTF8)) trace_options = TraceOptions(data.trace_option) span_context = span_context_module.SpanContext( trace_id=trace_id, span_id=span_id, trace_options=trace_options, from_header=True) return span_context
Generate a SpanContext object using the trace context header. The value of enabled parsed from header is int. Need to convert to bool. :type binary: bytes :param binary: Trace context header which was extracted from the request headers. :rtype: :class:`~opencensus.trace.span_context.SpanContext` :returns: SpanContext generated from the trace context header.
### Input: Generate a SpanContext object using the trace context header. The value of enabled parsed from header is int. Need to convert to bool. :type binary: bytes :param binary: Trace context header which was extracted from the request headers. :rtype: :class:`~opencensus.trace.span_context.SpanContext` :returns: SpanContext generated from the trace context header. ### Response: #vtb def from_header(self, binary): if binary is None: return span_context_module.SpanContext(from_header=False) try: data = Header._make(struct.unpack(BINARY_FORMAT, binary)) except struct.error: logging.warning( .format( binary, FORMAT_LENGTH ) ) return span_context_module.SpanContext(from_header=False) trace_id = str(binascii.hexlify(data.trace_id).decode(UTF8)) span_id = str(binascii.hexlify(data.span_id).decode(UTF8)) trace_options = TraceOptions(data.trace_option) span_context = span_context_module.SpanContext( trace_id=trace_id, span_id=span_id, trace_options=trace_options, from_header=True) return span_context
#vtb def mk_class_name(*parts): cap = lambda s: s and (s[0].capitalize() + s[1:]) return "".join(["".join([cap(i) for i in re.split("[\ \-\_\.]", str(p))]) for p in parts])
Create a valid class name from a list of strings.
### Input: Create a valid class name from a list of strings. ### Response: #vtb def mk_class_name(*parts): cap = lambda s: s and (s[0].capitalize() + s[1:]) return "".join(["".join([cap(i) for i in re.split("[\ \-\_\.]", str(p))]) for p in parts])
#vtb def send_command_return(self, obj, command, *arguments): return self._perform_command(.format(self.session_url, obj.ref), command, OperReturnType.line_output, *arguments).json()
Send command with single line output. :param obj: requested object. :param command: command to send. :param arguments: list of command arguments. :return: command output.
### Input: Send command with single line output. :param obj: requested object. :param command: command to send. :param arguments: list of command arguments. :return: command output. ### Response: #vtb def send_command_return(self, obj, command, *arguments): return self._perform_command(.format(self.session_url, obj.ref), command, OperReturnType.line_output, *arguments).json()
#vtb def _get_clumpp_table(self, kpop, max_var_multiple, quiet): reps, excluded = _concat_reps(self, kpop, max_var_multiple, quiet) if reps: ninds = reps[0].inds nreps = len(reps) else: ninds = nreps = 0 if not reps: return "no result files found" clumphandle = os.path.join(self.workdir, "tmp.clumppparams.txt") self.clumppparams.kpop = kpop self.clumppparams.c = ninds self.clumppparams.r = nreps with open(clumphandle, ) as tmp_c: tmp_c.write(self.clumppparams._asfile()) outfile = os.path.join(self.workdir, "{}-K-{}.outfile".format(self.name, kpop)) indfile = os.path.join(self.workdir, "{}-K-{}.indfile".format(self.name, kpop)) miscfile = os.path.join(self.workdir, "{}-K-{}.miscfile".format(self.name, kpop)) cmd = ["CLUMPP", clumphandle, "-i", indfile, "-o", outfile, "-j", miscfile, "-r", str(nreps), "-c", str(ninds), "-k", str(kpop)] proc = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) _ = proc.communicate() for rfile in [indfile, miscfile]: if os.path.exists(rfile): os.remove(rfile) ofile = os.path.join(self.workdir, "{}-K-{}.outfile".format(self.name, kpop)) if os.path.exists(ofile): csvtable = pd.read_csv(ofile, delim_whitespace=True, header=None) table = csvtable.loc[:, 5:] table.columns = range(table.shape[1]) table.index = self.labels if not quiet: sys.stderr.write( "[K{}] {}/{} results permuted across replicates (max_var={}).\n"\ .format(kpop, nreps, nreps+excluded, max_var_multiple)) return table else: sys.stderr.write("No files ready for {}-K-{} in {}\n"\ .format(self.name, kpop, self.workdir)) return
private function to clumpp results
### Input: private function to clumpp results ### Response: #vtb def _get_clumpp_table(self, kpop, max_var_multiple, quiet): reps, excluded = _concat_reps(self, kpop, max_var_multiple, quiet) if reps: ninds = reps[0].inds nreps = len(reps) else: ninds = nreps = 0 if not reps: return "no result files found" clumphandle = os.path.join(self.workdir, "tmp.clumppparams.txt") self.clumppparams.kpop = kpop self.clumppparams.c = ninds self.clumppparams.r = nreps with open(clumphandle, ) as tmp_c: tmp_c.write(self.clumppparams._asfile()) outfile = os.path.join(self.workdir, "{}-K-{}.outfile".format(self.name, kpop)) indfile = os.path.join(self.workdir, "{}-K-{}.indfile".format(self.name, kpop)) miscfile = os.path.join(self.workdir, "{}-K-{}.miscfile".format(self.name, kpop)) cmd = ["CLUMPP", clumphandle, "-i", indfile, "-o", outfile, "-j", miscfile, "-r", str(nreps), "-c", str(ninds), "-k", str(kpop)] proc = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) _ = proc.communicate() for rfile in [indfile, miscfile]: if os.path.exists(rfile): os.remove(rfile) ofile = os.path.join(self.workdir, "{}-K-{}.outfile".format(self.name, kpop)) if os.path.exists(ofile): csvtable = pd.read_csv(ofile, delim_whitespace=True, header=None) table = csvtable.loc[:, 5:] table.columns = range(table.shape[1]) table.index = self.labels if not quiet: sys.stderr.write( "[K{}] {}/{} results permuted across replicates (max_var={}).\n"\ .format(kpop, nreps, nreps+excluded, max_var_multiple)) return table else: sys.stderr.write("No files ready for {}-K-{} in {}\n"\ .format(self.name, kpop, self.workdir)) return
#vtb def to_element(self, include_namespaces=False): elt_attrib = {} if include_namespaces: elt_attrib.update({ : "urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/", : "http://purl.org/dc/elements/1.1/", : "urn:schemas-upnp-org:metadata-1-0/upnp/", }) elt_attrib.update({ : self.parent_id, : if self.restricted else , : self.item_id }) elt = XML.Element(self.tag, elt_attrib) XML.SubElement(elt, ).text = self.title for resource in self.resources: elt.append(resource.to_element()) for key, value in self._translation.items(): if hasattr(self, key): tag = "%s:%s" % value if value[0] else "%s" % value[1] XML.SubElement(elt, tag).text = ("%s" % getattr(self, key)) XML.SubElement(elt, ).text = self.item_class desc_attrib = {: , : } desc_elt = XML.SubElement(elt, , desc_attrib) desc_elt.text = self.desc return elt
Return an ElementTree Element representing this instance. Args: include_namespaces (bool, optional): If True, include xml namespace attributes on the root element Return: ~xml.etree.ElementTree.Element: an Element.
### Input: Return an ElementTree Element representing this instance. Args: include_namespaces (bool, optional): If True, include xml namespace attributes on the root element Return: ~xml.etree.ElementTree.Element: an Element. ### Response: #vtb def to_element(self, include_namespaces=False): elt_attrib = {} if include_namespaces: elt_attrib.update({ : "urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/", : "http://purl.org/dc/elements/1.1/", : "urn:schemas-upnp-org:metadata-1-0/upnp/", }) elt_attrib.update({ : self.parent_id, : if self.restricted else , : self.item_id }) elt = XML.Element(self.tag, elt_attrib) XML.SubElement(elt, ).text = self.title for resource in self.resources: elt.append(resource.to_element()) for key, value in self._translation.items(): if hasattr(self, key): tag = "%s:%s" % value if value[0] else "%s" % value[1] XML.SubElement(elt, tag).text = ("%s" % getattr(self, key)) XML.SubElement(elt, ).text = self.item_class desc_attrib = {: , : } desc_elt = XML.SubElement(elt, , desc_attrib) desc_elt.text = self.desc return elt
#vtb def log_variable_sizes(var_list=None, tag=None, verbose=False): if var_list is None: var_list = tf.trainable_variables() if tag is None: tag = "Trainable Variables" if not var_list: return name_to_var = {v.name: v for v in var_list} total_size = 0 for v_name in sorted(list(name_to_var)): v = name_to_var[v_name] v_size = int(np.prod(np.array(v.shape.as_list()))) if verbose: tf.logging.info("Weight %s\tshape %s\tsize %d", v.name[:-2].ljust(80), str(v.shape).ljust(20), v_size) total_size += v_size tf.logging.info("%s Total size: %d", tag, total_size)
Log the sizes and shapes of variables, and the total size. Args: var_list: a list of variables; defaults to trainable_variables tag: a string; defaults to "Trainable Variables" verbose: bool, if True, log every weight; otherwise, log total size only.
### Input: Log the sizes and shapes of variables, and the total size. Args: var_list: a list of variables; defaults to trainable_variables tag: a string; defaults to "Trainable Variables" verbose: bool, if True, log every weight; otherwise, log total size only. ### Response: #vtb def log_variable_sizes(var_list=None, tag=None, verbose=False): if var_list is None: var_list = tf.trainable_variables() if tag is None: tag = "Trainable Variables" if not var_list: return name_to_var = {v.name: v for v in var_list} total_size = 0 for v_name in sorted(list(name_to_var)): v = name_to_var[v_name] v_size = int(np.prod(np.array(v.shape.as_list()))) if verbose: tf.logging.info("Weight %s\tshape %s\tsize %d", v.name[:-2].ljust(80), str(v.shape).ljust(20), v_size) total_size += v_size tf.logging.info("%s Total size: %d", tag, total_size)
#vtb def logpdf_link(self, inv_link_f, y, Y_metadata=None): e = y - inv_link_f objective = (+ gammaln((self.v + 1) * 0.5) - gammaln(self.v * 0.5) - 0.5*np.log(self.sigma2 * self.v * np.pi) - 0.5*(self.v + 1)*np.log(1 + (1/np.float(self.v))*((e**2)/self.sigma2)) ) return objective
Log Likelihood Function given link(f) .. math:: \\ln p(y_{i}|\lambda(f_{i})) = \\ln \\Gamma\\left(\\frac{v+1}{2}\\right) - \\ln \\Gamma\\left(\\frac{v}{2}\\right) - \\ln \\sqrt{v \\pi\\sigma^{2}} - \\frac{v+1}{2}\\ln \\left(1 + \\frac{1}{v}\\left(\\frac{(y_{i} - \lambda(f_{i}))^{2}}{\\sigma^{2}}\\right)\\right) :param inv_link_f: latent variables (link(f)) :type inv_link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in student t distribution :returns: likelihood evaluated for this point :rtype: float
### Input: Log Likelihood Function given link(f) .. math:: \\ln p(y_{i}|\lambda(f_{i})) = \\ln \\Gamma\\left(\\frac{v+1}{2}\\right) - \\ln \\Gamma\\left(\\frac{v}{2}\\right) - \\ln \\sqrt{v \\pi\\sigma^{2}} - \\frac{v+1}{2}\\ln \\left(1 + \\frac{1}{v}\\left(\\frac{(y_{i} - \lambda(f_{i}))^{2}}{\\sigma^{2}}\\right)\\right) :param inv_link_f: latent variables (link(f)) :type inv_link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in student t distribution :returns: likelihood evaluated for this point :rtype: float ### Response: #vtb def logpdf_link(self, inv_link_f, y, Y_metadata=None): e = y - inv_link_f objective = (+ gammaln((self.v + 1) * 0.5) - gammaln(self.v * 0.5) - 0.5*np.log(self.sigma2 * self.v * np.pi) - 0.5*(self.v + 1)*np.log(1 + (1/np.float(self.v))*((e**2)/self.sigma2)) ) return objective
#vtb def template_delete(call=None, kwargs=None): if call != : raise SaltCloudSystemExit( ) if kwargs is None: kwargs = {} name = kwargs.get(, None) template_id = kwargs.get(, None) if template_id: if name: log.warning( template_id\name\ template_id\ ) elif name: template_id = get_template_id(kwargs={: name}) else: raise SaltCloudSystemExit( name\template_id\ ) server, user, password = _get_xml_rpc() auth = .join([user, password]) response = server.one.template.delete(auth, int(template_id)) data = { : , : response[0], : response[1], : response[2], } return data
Deletes the given template from OpenNebula. Either a name or a template_id must be supplied. .. versionadded:: 2016.3.0 name The name of the template to delete. Can be used instead of ``template_id``. template_id The ID of the template to delete. Can be used instead of ``name``. CLI Example: .. code-block:: bash salt-cloud -f template_delete opennebula name=my-template salt-cloud --function template_delete opennebula template_id=5
### Input: Deletes the given template from OpenNebula. Either a name or a template_id must be supplied. .. versionadded:: 2016.3.0 name The name of the template to delete. Can be used instead of ``template_id``. template_id The ID of the template to delete. Can be used instead of ``name``. CLI Example: .. code-block:: bash salt-cloud -f template_delete opennebula name=my-template salt-cloud --function template_delete opennebula template_id=5 ### Response: #vtb def template_delete(call=None, kwargs=None): if call != : raise SaltCloudSystemExit( ) if kwargs is None: kwargs = {} name = kwargs.get(, None) template_id = kwargs.get(, None) if template_id: if name: log.warning( template_id\name\ template_id\ ) elif name: template_id = get_template_id(kwargs={: name}) else: raise SaltCloudSystemExit( name\template_id\ ) server, user, password = _get_xml_rpc() auth = .join([user, password]) response = server.one.template.delete(auth, int(template_id)) data = { : , : response[0], : response[1], : response[2], } return data
#vtb def instance_attr_ancestors(self, name, context=None): for astroid in self.ancestors(context=context): if name in astroid.instance_attrs: yield astroid
Iterate over the parents that define the given name as an attribute. :param name: The name to find definitions for. :type name: str :returns: The parents that define the given name as an instance attribute. :rtype: iterable(NodeNG)
### Input: Iterate over the parents that define the given name as an attribute. :param name: The name to find definitions for. :type name: str :returns: The parents that define the given name as an instance attribute. :rtype: iterable(NodeNG) ### Response: #vtb def instance_attr_ancestors(self, name, context=None): for astroid in self.ancestors(context=context): if name in astroid.instance_attrs: yield astroid
#vtb def OnUpdate(self, event): undo_toolid = self.label2id["Undo"] redo_toolid = self.label2id["Redo"] self.EnableTool(undo_toolid, undo.stack().canundo()) self.EnableTool(redo_toolid, undo.stack().canredo()) undotext = undo.stack().undotext() undo_tool = self.FindTool(undo_toolid) if undotext is None: undo_tool.SetShortHelp(_("No undo actions available")) else: undo_tool.SetShortHelp(undotext) redotext = undo.stack().redotext() redo_tool = self.FindTool(redo_toolid) if redotext is None: redo_tool.SetShortHelp(_("No redo actions available")) else: redo_tool.SetShortHelp(redotext) self.Refresh() event.Skip()
Updates the toolbar states
### Input: Updates the toolbar states ### Response: #vtb def OnUpdate(self, event): undo_toolid = self.label2id["Undo"] redo_toolid = self.label2id["Redo"] self.EnableTool(undo_toolid, undo.stack().canundo()) self.EnableTool(redo_toolid, undo.stack().canredo()) undotext = undo.stack().undotext() undo_tool = self.FindTool(undo_toolid) if undotext is None: undo_tool.SetShortHelp(_("No undo actions available")) else: undo_tool.SetShortHelp(undotext) redotext = undo.stack().redotext() redo_tool = self.FindTool(redo_toolid) if redotext is None: redo_tool.SetShortHelp(_("No redo actions available")) else: redo_tool.SetShortHelp(redotext) self.Refresh() event.Skip()
#vtb def _item_to_document_ref(iterator, item): document_id = item.name.split(_helpers.DOCUMENT_PATH_DELIMITER)[-1] return iterator.collection.document(document_id)
Convert Document resource to document ref. Args: iterator (google.api_core.page_iterator.GRPCIterator): iterator response item (dict): document resource
### Input: Convert Document resource to document ref. Args: iterator (google.api_core.page_iterator.GRPCIterator): iterator response item (dict): document resource ### Response: #vtb def _item_to_document_ref(iterator, item): document_id = item.name.split(_helpers.DOCUMENT_PATH_DELIMITER)[-1] return iterator.collection.document(document_id)
#vtb def deserialize_duration(attr): if isinstance(attr, ET.Element): attr = attr.text try: duration = isodate.parse_duration(attr) except(ValueError, OverflowError, AttributeError) as err: msg = "Cannot deserialize duration object." raise_with_traceback(DeserializationError, msg, err) else: return duration
Deserialize ISO-8601 formatted string into TimeDelta object. :param str attr: response string to be deserialized. :rtype: TimeDelta :raises: DeserializationError if string format invalid.
### Input: Deserialize ISO-8601 formatted string into TimeDelta object. :param str attr: response string to be deserialized. :rtype: TimeDelta :raises: DeserializationError if string format invalid. ### Response: #vtb def deserialize_duration(attr): if isinstance(attr, ET.Element): attr = attr.text try: duration = isodate.parse_duration(attr) except(ValueError, OverflowError, AttributeError) as err: msg = "Cannot deserialize duration object." raise_with_traceback(DeserializationError, msg, err) else: return duration
#vtb def create_model_schema(target_model): from nautilus.database import db schema = graphene.Schema(auto_camelcase=False) primary_key = target_model.primary_key() primary_key_type = convert_peewee_field(primary_key) class ModelObjectType(PeeweeObjectType): class Meta: model = target_model pk = Field(primary_key_type, description="The primary key for this object.") @graphene.resolve_only_args def resolve_pk(self): return getattr(self, self.primary_key().name) class Query(graphene.ObjectType): all_models = List(ModelObjectType, args=args_for_model(target_model)) @graphene.resolve_only_args def resolve_all_models(self, **args): return filter_model(target_model, args) schema.query = Query return schema
This function creates a graphql schema that provides a single model
### Input: This function creates a graphql schema that provides a single model ### Response: #vtb def create_model_schema(target_model): from nautilus.database import db schema = graphene.Schema(auto_camelcase=False) primary_key = target_model.primary_key() primary_key_type = convert_peewee_field(primary_key) class ModelObjectType(PeeweeObjectType): class Meta: model = target_model pk = Field(primary_key_type, description="The primary key for this object.") @graphene.resolve_only_args def resolve_pk(self): return getattr(self, self.primary_key().name) class Query(graphene.ObjectType): all_models = List(ModelObjectType, args=args_for_model(target_model)) @graphene.resolve_only_args def resolve_all_models(self, **args): return filter_model(target_model, args) schema.query = Query return schema
#vtb def is_valid_cidr(string_network): if string_network.count() == 1: try: mask = int(string_network.split()[1]) except ValueError: return False if mask < 1 or mask > 32: return False try: socket.inet_aton(string_network.split()[0]) except socket.error: return False else: return False return True
Very simple check of the cidr format in no_proxy variable. :rtype: bool
### Input: Very simple check of the cidr format in no_proxy variable. :rtype: bool ### Response: #vtb def is_valid_cidr(string_network): if string_network.count() == 1: try: mask = int(string_network.split()[1]) except ValueError: return False if mask < 1 or mask > 32: return False try: socket.inet_aton(string_network.split()[0]) except socket.error: return False else: return False return True
#vtb def vlan_dot1q_tag_native(self, **kwargs): config = ET.Element("config") vlan = ET.SubElement(config, "vlan", xmlns="urn:brocade.com:mgmt:brocade-vlan") dot1q = ET.SubElement(vlan, "dot1q") tag = ET.SubElement(dot1q, "tag") native = ET.SubElement(tag, "native") callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
### Input: Auto Generated Code ### Response: #vtb def vlan_dot1q_tag_native(self, **kwargs): config = ET.Element("config") vlan = ET.SubElement(config, "vlan", xmlns="urn:brocade.com:mgmt:brocade-vlan") dot1q = ET.SubElement(vlan, "dot1q") tag = ET.SubElement(dot1q, "tag") native = ET.SubElement(tag, "native") callback = kwargs.pop(, self._callback) return callback(config)
#vtb def append(self, data_frame): if len(data_frame) == 0: return data_frame_index = data_frame.index combined_index = self._index + data_frame_index if len(set(combined_index)) != len(combined_index): raise ValueError() for c, column in enumerate(data_frame.columns): if PYTHON3: self.set(indexes=data_frame_index, columns=column, values=data_frame.data[c].copy()) else: self.set(indexes=data_frame_index, columns=column, values=data_frame.data[c][:])
Append another DataFrame to this DataFrame. If the new data_frame has columns that are not in the current DataFrame then new columns will be created. All of the indexes in the data_frame must be different from the current indexes or will raise an error. :param data_frame: DataFrame to append :return: nothing
### Input: Append another DataFrame to this DataFrame. If the new data_frame has columns that are not in the current DataFrame then new columns will be created. All of the indexes in the data_frame must be different from the current indexes or will raise an error. :param data_frame: DataFrame to append :return: nothing ### Response: #vtb def append(self, data_frame): if len(data_frame) == 0: return data_frame_index = data_frame.index combined_index = self._index + data_frame_index if len(set(combined_index)) != len(combined_index): raise ValueError() for c, column in enumerate(data_frame.columns): if PYTHON3: self.set(indexes=data_frame_index, columns=column, values=data_frame.data[c].copy()) else: self.set(indexes=data_frame_index, columns=column, values=data_frame.data[c][:])
#vtb def geoms(self, scale=None, bounds=None, as_element=True): feature = self.data if scale is not None: feature = feature.with_scale(scale) if bounds: extent = (bounds[0], bounds[2], bounds[1], bounds[3]) else: extent = None geoms = [g for g in feature.intersecting_geometries(extent) if g is not None] if not as_element: return geoms elif not geoms or in geoms[0].geom_type: return Polygons(geoms, crs=feature.crs) elif in geoms[0].geom_type: return Points(geoms, crs=feature.crs) else: return Path(geoms, crs=feature.crs)
Returns the geometries held by the Feature. Parameters ---------- scale: str Scale of the geometry to return expressed as string. Available scales depends on the Feature type. NaturalEarthFeature: '10m', '50m', '110m' GSHHSFeature: 'auto', 'coarse', 'low', 'intermediate', 'high', 'full' bounds: tuple Tuple of a bounding region to query for geometries in as_element: boolean Whether to wrap the geometries in an element Returns ------- geometries: Polygons/Path Polygons or Path object wrapping around returned geometries
### Input: Returns the geometries held by the Feature. Parameters ---------- scale: str Scale of the geometry to return expressed as string. Available scales depends on the Feature type. NaturalEarthFeature: '10m', '50m', '110m' GSHHSFeature: 'auto', 'coarse', 'low', 'intermediate', 'high', 'full' bounds: tuple Tuple of a bounding region to query for geometries in as_element: boolean Whether to wrap the geometries in an element Returns ------- geometries: Polygons/Path Polygons or Path object wrapping around returned geometries ### Response: #vtb def geoms(self, scale=None, bounds=None, as_element=True): feature = self.data if scale is not None: feature = feature.with_scale(scale) if bounds: extent = (bounds[0], bounds[2], bounds[1], bounds[3]) else: extent = None geoms = [g for g in feature.intersecting_geometries(extent) if g is not None] if not as_element: return geoms elif not geoms or in geoms[0].geom_type: return Polygons(geoms, crs=feature.crs) elif in geoms[0].geom_type: return Points(geoms, crs=feature.crs) else: return Path(geoms, crs=feature.crs)
#vtb def setter(self, func): if not callable(func): raise TypeError() if hasattr(func, ) and func.__code__.co_argcount != 2: raise TypeError() if func.__name__ != self.name: raise TypeError() self._set_func = func return self
Register a set function for the DynamicProperty This function must take two arguments, self and the new value. Input value to the function is validated with prop validation prior to execution.
### Input: Register a set function for the DynamicProperty This function must take two arguments, self and the new value. Input value to the function is validated with prop validation prior to execution. ### Response: #vtb def setter(self, func): if not callable(func): raise TypeError() if hasattr(func, ) and func.__code__.co_argcount != 2: raise TypeError() if func.__name__ != self.name: raise TypeError() self._set_func = func return self
#vtb def send_cmd(cmd, args, ret): from dvc.daemon import daemon if not Analytics._is_enabled(cmd): return analytics = Analytics() analytics.collect_cmd(args, ret) daemon(["analytics", analytics.dump()])
Collect and send analytics for CLI command. Args: args (list): parsed args for the CLI command. ret (int): return value of the CLI command.
### Input: Collect and send analytics for CLI command. Args: args (list): parsed args for the CLI command. ret (int): return value of the CLI command. ### Response: #vtb def send_cmd(cmd, args, ret): from dvc.daemon import daemon if not Analytics._is_enabled(cmd): return analytics = Analytics() analytics.collect_cmd(args, ret) daemon(["analytics", analytics.dump()])
#vtb async def stop(self): await self.node.stop(self.channel.guild.id) self.queue = [] self.current = None self.position = 0 self._paused = False
Stops playback from lavalink. .. important:: This method will clear the queue.
### Input: Stops playback from lavalink. .. important:: This method will clear the queue. ### Response: #vtb async def stop(self): await self.node.stop(self.channel.guild.id) self.queue = [] self.current = None self.position = 0 self._paused = False
#vtb def zipWithUniqueId(self): n = self.getNumPartitions() def func(k, it): for i, v in enumerate(it): yield v, i * n + k return self.mapPartitionsWithIndex(func)
Zips this RDD with generated unique Long ids. Items in the kth partition will get ids k, n+k, 2*n+k, ..., where n is the number of partitions. So there may exist gaps, but this method won't trigger a spark job, which is different from L{zipWithIndex} >>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect() [('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
### Input: Zips this RDD with generated unique Long ids. Items in the kth partition will get ids k, n+k, 2*n+k, ..., where n is the number of partitions. So there may exist gaps, but this method won't trigger a spark job, which is different from L{zipWithIndex} >>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect() [('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)] ### Response: #vtb def zipWithUniqueId(self): n = self.getNumPartitions() def func(k, it): for i, v in enumerate(it): yield v, i * n + k return self.mapPartitionsWithIndex(func)
#vtb def name_targets(func): def wrap(*a, **kw): ret = func(*a, **kw) return dict(zip(ret[:-1], ret[-1])) return wrap
Wrap a function such that returning ``'a', 'b', 'c', [1, 2, 3]`` transforms the value into ``dict(a=1, b=2, c=3)``. This is useful in the case where the last parameter is an SCons command.
### Input: Wrap a function such that returning ``'a', 'b', 'c', [1, 2, 3]`` transforms the value into ``dict(a=1, b=2, c=3)``. This is useful in the case where the last parameter is an SCons command. ### Response: #vtb def name_targets(func): def wrap(*a, **kw): ret = func(*a, **kw) return dict(zip(ret[:-1], ret[-1])) return wrap
#vtb def bind(self, ticket, device_id, user_id): return self._post( , data={ : ticket, : device_id, : user_id } )
绑定设备 详情请参考 https://iot.weixin.qq.com/wiki/new/index.html?page=3-4-7 :param ticket: 绑定操作合法性的凭证(由微信后台生成,第三方H5通过客户端jsapi获得) :param device_id: 设备id :param user_id: 用户对应的openid :return: 返回的 JSON 数据包
### Input: 绑定设备 详情请参考 https://iot.weixin.qq.com/wiki/new/index.html?page=3-4-7 :param ticket: 绑定操作合法性的凭证(由微信后台生成,第三方H5通过客户端jsapi获得) :param device_id: 设备id :param user_id: 用户对应的openid :return: 返回的 JSON 数据包 ### Response: #vtb def bind(self, ticket, device_id, user_id): return self._post( , data={ : ticket, : device_id, : user_id } )
#vtb def _serialize_parameters(parameters): for key, value in parameters.items(): if isinstance(value, bool): parameters[key] = "true" if value else "false" elif isinstance(value, dict): parameters[key] = "|".join( ("%s:%s" % (k, v) for k, v in value.items())) elif isinstance(value, (list, tuple)): parameters[key] = "|".join(value) return parameters
Serialize some parameters to match python native types with formats specified in google api docs like: * True/False -> "true"/"false", * {"a": 1, "b":2} -> "a:1|b:2" :type parameters: dict oif query parameters
### Input: Serialize some parameters to match python native types with formats specified in google api docs like: * True/False -> "true"/"false", * {"a": 1, "b":2} -> "a:1|b:2" :type parameters: dict oif query parameters ### Response: #vtb def _serialize_parameters(parameters): for key, value in parameters.items(): if isinstance(value, bool): parameters[key] = "true" if value else "false" elif isinstance(value, dict): parameters[key] = "|".join( ("%s:%s" % (k, v) for k, v in value.items())) elif isinstance(value, (list, tuple)): parameters[key] = "|".join(value) return parameters
#vtb def parse_play(boxscore_id, details, is_hm): return p
Parse play details from a play-by-play string describing a play. Assuming valid input, this function returns structured data in a dictionary describing the play. If the play detail string was invalid, this function returns None. :param boxscore_id: the boxscore ID of the play :param details: detail string for the play :param is_hm: bool indicating whether the offense is at home :param returns: dictionary of play attributes or None if invalid :rtype: dictionary or None
### Input: Parse play details from a play-by-play string describing a play. Assuming valid input, this function returns structured data in a dictionary describing the play. If the play detail string was invalid, this function returns None. :param boxscore_id: the boxscore ID of the play :param details: detail string for the play :param is_hm: bool indicating whether the offense is at home :param returns: dictionary of play attributes or None if invalid :rtype: dictionary or None ### Response: #vtb def parse_play(boxscore_id, details, is_hm): return p
#vtb def hiddenColumns( self ): output = [] columns = self.columns() for c, column in enumerate(columns): if ( not self.isColumnHidden(c) ): continue output.append(column) return output
Returns a list of the hidden columns for this tree. :return [<str>, ..]
### Input: Returns a list of the hidden columns for this tree. :return [<str>, ..] ### Response: #vtb def hiddenColumns( self ): output = [] columns = self.columns() for c, column in enumerate(columns): if ( not self.isColumnHidden(c) ): continue output.append(column) return output
#vtb def hashed(field_name, percent, fields=None, count=0): if field_name is None: raise Exception() def _hashed_sampling(sql): projection = Sampling._create_projection(fields) sql = % \ (projection, sql, field_name, percent) if count != 0: sql = % (sql, count) return sql return _hashed_sampling
Provides a sampling strategy based on hashing and selecting a percentage of data. Args: field_name: the name of the field to hash. percent: the percentage of the resulting hashes to select. fields: an optional list of field names to retrieve. count: optional maximum count of rows to pick. Returns: A sampling function that can be applied to get a hash-based sampling.
### Input: Provides a sampling strategy based on hashing and selecting a percentage of data. Args: field_name: the name of the field to hash. percent: the percentage of the resulting hashes to select. fields: an optional list of field names to retrieve. count: optional maximum count of rows to pick. Returns: A sampling function that can be applied to get a hash-based sampling. ### Response: #vtb def hashed(field_name, percent, fields=None, count=0): if field_name is None: raise Exception() def _hashed_sampling(sql): projection = Sampling._create_projection(fields) sql = % \ (projection, sql, field_name, percent) if count != 0: sql = % (sql, count) return sql return _hashed_sampling
#vtb def is_fw_complete(self): LOG.info("In fw_complete needed %(fw_created)s " "%(active_policy_id)s %(is_fw_drvr_created)s " "%(pol_present)s %(fw_type)s", {: self.fw_created, : self.active_pol_id, : self.is_fw_drvr_created(), : self.active_pol_id in self.policies, : self.fw_type}) if self.active_pol_id is not None: LOG.info("In Drvr create needed %(len_policy)s %(one_rule)s", {: len(self.policies[self.active_pol_id][]), : self.one_rule_present(self.active_pol_id)}) return self.fw_created and self.active_pol_id and ( self.is_fw_drvr_created()) and self.fw_type and ( self.active_pol_id in self.policies) and ( len(self.policies[self.active_pol_id][])) > 0 and ( self.one_rule_present(self.active_pol_id))
This API returns the completion status of FW. This returns True if a FW is created with a active policy that has more than one rule associated with it and if a driver init is done successfully.
### Input: This API returns the completion status of FW. This returns True if a FW is created with a active policy that has more than one rule associated with it and if a driver init is done successfully. ### Response: #vtb def is_fw_complete(self): LOG.info("In fw_complete needed %(fw_created)s " "%(active_policy_id)s %(is_fw_drvr_created)s " "%(pol_present)s %(fw_type)s", {: self.fw_created, : self.active_pol_id, : self.is_fw_drvr_created(), : self.active_pol_id in self.policies, : self.fw_type}) if self.active_pol_id is not None: LOG.info("In Drvr create needed %(len_policy)s %(one_rule)s", {: len(self.policies[self.active_pol_id][]), : self.one_rule_present(self.active_pol_id)}) return self.fw_created and self.active_pol_id and ( self.is_fw_drvr_created()) and self.fw_type and ( self.active_pol_id in self.policies) and ( len(self.policies[self.active_pol_id][])) > 0 and ( self.one_rule_present(self.active_pol_id))
#vtb def fetchmany(self, size=None): if self._state == self._STATE_NONE: raise Exception("No query yet") if size is None: size = 1 if not self._data: return [] else: if len(self._data) > size: result, self._data = self._data[:size], self._data[size:] else: result, self._data = self._data, [] self._rownumber += len(result) return result
Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a list of tuples). An empty sequence is returned when no more rows are available. The number of rows to fetch per call is specified by the parameter. If it is not given, the cursor's arraysize determines the number of rows to be fetched. The method should try to fetch as many rows as indicated by the size parameter. If this is not possible due to the specified number of rows not being available, fewer rows may be returned.
### Input: Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a list of tuples). An empty sequence is returned when no more rows are available. The number of rows to fetch per call is specified by the parameter. If it is not given, the cursor's arraysize determines the number of rows to be fetched. The method should try to fetch as many rows as indicated by the size parameter. If this is not possible due to the specified number of rows not being available, fewer rows may be returned. ### Response: #vtb def fetchmany(self, size=None): if self._state == self._STATE_NONE: raise Exception("No query yet") if size is None: size = 1 if not self._data: return [] else: if len(self._data) > size: result, self._data = self._data[:size], self._data[size:] else: result, self._data = self._data, [] self._rownumber += len(result) return result
#vtb def get_pixel_distance(self, x1, y1, x2, y2): dx = abs(x2 - x1) dy = abs(y2 - y1) dist = np.sqrt(dx * dx + dy * dy) dist = np.round(dist) return dist
Calculate distance between the given pixel positions. Parameters ---------- x1, y1, x2, y2 : number Pixel coordinates. Returns ------- dist : float Rounded distance.
### Input: Calculate distance between the given pixel positions. Parameters ---------- x1, y1, x2, y2 : number Pixel coordinates. Returns ------- dist : float Rounded distance. ### Response: #vtb def get_pixel_distance(self, x1, y1, x2, y2): dx = abs(x2 - x1) dy = abs(y2 - y1) dist = np.sqrt(dx * dx + dy * dy) dist = np.round(dist) return dist
#vtb def to_task(self): from google.appengine.api.taskqueue import Task from google.appengine.api.taskqueue import TaskRetryOptions self._increment_recursion_level() self.check_recursion_depth() url = "%s/%s" % (ASYNC_ENDPOINT, self.function_path) kwargs = { : url, : self.get_headers().copy(), : json.dumps(self.to_dict()) } kwargs.update(copy.deepcopy(self.get_task_args())) retry_options = copy.deepcopy(DEFAULT_RETRY_OPTIONS) retry_options.update(kwargs.pop(, {})) kwargs[] = TaskRetryOptions(**retry_options) return Task(**kwargs)
Return a task object representing this async job.
### Input: Return a task object representing this async job. ### Response: #vtb def to_task(self): from google.appengine.api.taskqueue import Task from google.appengine.api.taskqueue import TaskRetryOptions self._increment_recursion_level() self.check_recursion_depth() url = "%s/%s" % (ASYNC_ENDPOINT, self.function_path) kwargs = { : url, : self.get_headers().copy(), : json.dumps(self.to_dict()) } kwargs.update(copy.deepcopy(self.get_task_args())) retry_options = copy.deepcopy(DEFAULT_RETRY_OPTIONS) retry_options.update(kwargs.pop(, {})) kwargs[] = TaskRetryOptions(**retry_options) return Task(**kwargs)
#vtb def inflate_plugins(self, plugins_definition, inflate_method): if isinstance(plugins_definition, list): return self.inflate_plugin_list(plugins_definition, inflate_method) elif isinstance(plugins_definition, dict): return self.inflate_plugin_dict(plugins_definition, inflate_method) else: raise ValueError( % type(plugins_definition))
Inflate multiple plugins based on a list/dict definition. Args: plugins_definition (list/dict): the plugins definitions. inflate_method (method): the method to indlate each plugin. Returns: list: a list of plugin instances. Raises: ValueError: when the definition type is not list or dict.
### Input: Inflate multiple plugins based on a list/dict definition. Args: plugins_definition (list/dict): the plugins definitions. inflate_method (method): the method to indlate each plugin. Returns: list: a list of plugin instances. Raises: ValueError: when the definition type is not list or dict. ### Response: #vtb def inflate_plugins(self, plugins_definition, inflate_method): if isinstance(plugins_definition, list): return self.inflate_plugin_list(plugins_definition, inflate_method) elif isinstance(plugins_definition, dict): return self.inflate_plugin_dict(plugins_definition, inflate_method) else: raise ValueError( % type(plugins_definition))
#vtb def mmi_ramp_roman(raster_layer): items = [] sorted_mmi_scale = sorted( earthquake_mmi_scale[], key=itemgetter()) for class_max in sorted_mmi_scale: colour = class_max[] label = % class_max[] ramp_item = QgsColorRampShader.ColorRampItem( class_max[], colour, label) items.append(ramp_item) raster_shader = QgsRasterShader() ramp_shader = QgsColorRampShader() ramp_shader.setColorRampType(QgsColorRampShader.Interpolated) ramp_shader.setColorRampItemList(items) raster_shader.setRasterShaderFunction(ramp_shader) band = 1 renderer = QgsSingleBandPseudoColorRenderer( raster_layer.dataProvider(), band, raster_shader) raster_layer.setRenderer(renderer)
Generate an mmi ramp using range of 1-10 on roman. A standarised range is used so that two shakemaps of different intensities can be properly compared visually with colours stretched accross the same range. The colours used are the 'standard' colours commonly shown for the mercalli scale e.g. on wikipedia and other sources. :param raster_layer: A raster layer that will have an mmi style applied. :type raster_layer: QgsRasterLayer .. versionadded:: 4.0
### Input: Generate an mmi ramp using range of 1-10 on roman. A standarised range is used so that two shakemaps of different intensities can be properly compared visually with colours stretched accross the same range. The colours used are the 'standard' colours commonly shown for the mercalli scale e.g. on wikipedia and other sources. :param raster_layer: A raster layer that will have an mmi style applied. :type raster_layer: QgsRasterLayer .. versionadded:: 4.0 ### Response: #vtb def mmi_ramp_roman(raster_layer): items = [] sorted_mmi_scale = sorted( earthquake_mmi_scale[], key=itemgetter()) for class_max in sorted_mmi_scale: colour = class_max[] label = % class_max[] ramp_item = QgsColorRampShader.ColorRampItem( class_max[], colour, label) items.append(ramp_item) raster_shader = QgsRasterShader() ramp_shader = QgsColorRampShader() ramp_shader.setColorRampType(QgsColorRampShader.Interpolated) ramp_shader.setColorRampItemList(items) raster_shader.setRasterShaderFunction(ramp_shader) band = 1 renderer = QgsSingleBandPseudoColorRenderer( raster_layer.dataProvider(), band, raster_shader) raster_layer.setRenderer(renderer)
#vtb def chown(self, tarinfo, targetpath): if pwd and hasattr(os, "geteuid") and os.geteuid() == 0: try: g = grp.getgrnam(tarinfo.gname)[2] except KeyError: g = tarinfo.gid try: u = pwd.getpwnam(tarinfo.uname)[2] except KeyError: u = tarinfo.uid try: if tarinfo.issym() and hasattr(os, "lchown"): os.lchown(targetpath, u, g) else: if sys.platform != "os2emx": os.chown(targetpath, u, g) except EnvironmentError as e: raise ExtractError("could not change owner")
Set owner of targetpath according to tarinfo.
### Input: Set owner of targetpath according to tarinfo. ### Response: #vtb def chown(self, tarinfo, targetpath): if pwd and hasattr(os, "geteuid") and os.geteuid() == 0: try: g = grp.getgrnam(tarinfo.gname)[2] except KeyError: g = tarinfo.gid try: u = pwd.getpwnam(tarinfo.uname)[2] except KeyError: u = tarinfo.uid try: if tarinfo.issym() and hasattr(os, "lchown"): os.lchown(targetpath, u, g) else: if sys.platform != "os2emx": os.chown(targetpath, u, g) except EnvironmentError as e: raise ExtractError("could not change owner")
#vtb def run_gradle(path=kernel_path, cmd=, skip_tests=False): class Gradle(BaseCommand): description = def skip_test_option(self, skip): if skip: return else: return def run(self): run([( if sys.platform == else ) + , , cmd, self.skip_test_option(skip_tests)], cwd=path) return Gradle
Return a Command for running gradle scripts. Parameters ---------- path: str, optional The base path of the node package. Defaults to the repo root. cmd: str, optional The command to run with gradlew.
### Input: Return a Command for running gradle scripts. Parameters ---------- path: str, optional The base path of the node package. Defaults to the repo root. cmd: str, optional The command to run with gradlew. ### Response: #vtb def run_gradle(path=kernel_path, cmd=, skip_tests=False): class Gradle(BaseCommand): description = def skip_test_option(self, skip): if skip: return else: return def run(self): run([( if sys.platform == else ) + , , cmd, self.skip_test_option(skip_tests)], cwd=path) return Gradle
#vtb def email_url_config(cls, url, backend=None): config = {} url = urlparse(url) if not isinstance(url, cls.URL_CLASS) else url path = url.path[1:] path = unquote_plus(path.split(, 2)[0]) config.update({ : path, : _cast_urlstr(url.username), : _cast_urlstr(url.password), : url.hostname, : _cast_int(url.port), }) if backend: config[] = backend elif url.scheme not in cls.EMAIL_SCHEMES: raise ImproperlyConfigured( % url.scheme) elif url.scheme in cls.EMAIL_SCHEMES: config[] = cls.EMAIL_SCHEMES[url.scheme] if url.scheme in (, ): config[] = True elif url.scheme == : config[] = True if url.query: config_options = {} for k, v in parse_qs(url.query).items(): opt = {k.upper(): _cast_int(v[0])} if k.upper() in cls._EMAIL_BASE_OPTIONS: config.update(opt) else: config_options.update(opt) config[] = config_options return config
Parses an email URL.
### Input: Parses an email URL. ### Response: #vtb def email_url_config(cls, url, backend=None): config = {} url = urlparse(url) if not isinstance(url, cls.URL_CLASS) else url path = url.path[1:] path = unquote_plus(path.split(, 2)[0]) config.update({ : path, : _cast_urlstr(url.username), : _cast_urlstr(url.password), : url.hostname, : _cast_int(url.port), }) if backend: config[] = backend elif url.scheme not in cls.EMAIL_SCHEMES: raise ImproperlyConfigured( % url.scheme) elif url.scheme in cls.EMAIL_SCHEMES: config[] = cls.EMAIL_SCHEMES[url.scheme] if url.scheme in (, ): config[] = True elif url.scheme == : config[] = True if url.query: config_options = {} for k, v in parse_qs(url.query).items(): opt = {k.upper(): _cast_int(v[0])} if k.upper() in cls._EMAIL_BASE_OPTIONS: config.update(opt) else: config_options.update(opt) config[] = config_options return config
#vtb def get_for_nearest_ancestor(self, cls, attribute_name): for family_cls in family(cls): if self.has(family_cls.__module__, family_cls.__name__, attribute_name): return self.get(family_cls.__module__, family_cls.__name__, attribute_name) ini_filename = cls.__module__.split(".")[-1] raise exc.PriorException( "The prior config at {}/{} does not contain {} in {} or any of its parents".format(self.path, ini_filename, attribute_name, cls.__name__ ))
Find a prior with the attribute analysis_path from the config for this class or one of its ancestors Parameters ---------- cls: class The class of interest attribute_name: String The analysis_path of the attribute Returns ------- prior_array: [] An array describing this prior
### Input: Find a prior with the attribute analysis_path from the config for this class or one of its ancestors Parameters ---------- cls: class The class of interest attribute_name: String The analysis_path of the attribute Returns ------- prior_array: [] An array describing this prior ### Response: #vtb def get_for_nearest_ancestor(self, cls, attribute_name): for family_cls in family(cls): if self.has(family_cls.__module__, family_cls.__name__, attribute_name): return self.get(family_cls.__module__, family_cls.__name__, attribute_name) ini_filename = cls.__module__.split(".")[-1] raise exc.PriorException( "The prior config at {}/{} does not contain {} in {} or any of its parents".format(self.path, ini_filename, attribute_name, cls.__name__ ))
#vtb def __regkey_value(self, path, name=, start_key=None): r if sys.version < : import _winreg as reg else: import winreg as reg def _fn(path, name=, start_key=None): if isinstance(path, str): path = path.split() if start_key is None: start_key = getattr(reg, path[0]) return _fn(path[1:], name, start_key) else: subkey = path.pop(0) with reg.OpenKey(start_key, subkey) as handle: if path: return _fn(path, name, handle) else: desc, i = None, 0 while not desc or desc[0] != name: desc = reg.EnumValue(handle, i) i += 1 return desc[1] return _fn(path, name, start_key)
r'''Return the data of value mecabrc at MeCab HKEY node. On Windows, the path to the mecabrc as set in the Windows Registry is used to deduce the path to libmecab.dll. Returns: The full path to the mecabrc on Windows. Raises: WindowsError: A problem was encountered in trying to locate the value mecabrc at HKEY_CURRENT_USER\Software\MeCab.
### Input: r'''Return the data of value mecabrc at MeCab HKEY node. On Windows, the path to the mecabrc as set in the Windows Registry is used to deduce the path to libmecab.dll. Returns: The full path to the mecabrc on Windows. Raises: WindowsError: A problem was encountered in trying to locate the value mecabrc at HKEY_CURRENT_USER\Software\MeCab. ### Response: #vtb def __regkey_value(self, path, name=, start_key=None): r if sys.version < : import _winreg as reg else: import winreg as reg def _fn(path, name=, start_key=None): if isinstance(path, str): path = path.split() if start_key is None: start_key = getattr(reg, path[0]) return _fn(path[1:], name, start_key) else: subkey = path.pop(0) with reg.OpenKey(start_key, subkey) as handle: if path: return _fn(path, name, handle) else: desc, i = None, 0 while not desc or desc[0] != name: desc = reg.EnumValue(handle, i) i += 1 return desc[1] return _fn(path, name, start_key)
#vtb def get_questions(self, answered=None, honor_sequential=True, update=True): def update_question_list(): latest_question_response = question_map[][0] question_answered = False if not in latest_question_response: question_answered = True if answered is None or answered == question_answered: question_list.append(self.get_question(question_map=question_map)) return question_answered prev_question_answered = True question_list = [] if update: self._update_questions() for question_map in self._my_map[]: if self._is_question_sequential(question_map) and honor_sequential: if prev_question_answered: prev_question_answered = update_question_list() else: update_question_list() if self._my_map[] is None: self._my_map[] = DateTime.utcnow() return QuestionList(question_list, runtime=self._runtime, proxy=self._proxy)
gets all available questions for this section if answered == False: only return next unanswered question if answered == True: only return next answered question if answered in None: return next question whether answered or not if honor_sequential == True: only return questions if section or part is set to sequential items
### Input: gets all available questions for this section if answered == False: only return next unanswered question if answered == True: only return next answered question if answered in None: return next question whether answered or not if honor_sequential == True: only return questions if section or part is set to sequential items ### Response: #vtb def get_questions(self, answered=None, honor_sequential=True, update=True): def update_question_list(): latest_question_response = question_map[][0] question_answered = False if not in latest_question_response: question_answered = True if answered is None or answered == question_answered: question_list.append(self.get_question(question_map=question_map)) return question_answered prev_question_answered = True question_list = [] if update: self._update_questions() for question_map in self._my_map[]: if self._is_question_sequential(question_map) and honor_sequential: if prev_question_answered: prev_question_answered = update_question_list() else: update_question_list() if self._my_map[] is None: self._my_map[] = DateTime.utcnow() return QuestionList(question_list, runtime=self._runtime, proxy=self._proxy)
#vtb def add_local(self, field_name, field): self._dlog("adding local ".format(field_name)) field._pfp__name = field_name self._curr_scope["vars"][field_name] = field
Add a local variable in the current scope :field_name: The field's name :field: The field :returns: None
### Input: Add a local variable in the current scope :field_name: The field's name :field: The field :returns: None ### Response: #vtb def add_local(self, field_name, field): self._dlog("adding local ".format(field_name)) field._pfp__name = field_name self._curr_scope["vars"][field_name] = field
#vtb def apply(self, df): if hasattr(self.definition, ): r = self.definition(df) elif self.definition in df.columns: r = df[self.definition] elif not isinstance(self.definition, string_types): r = pd.Series(self.definition, index=df.index) else: raise ValueError("Invalid column definition: %s" % str(self.definition)) return r.astype(self.astype) if self.astype else r
Takes a pd.DataFrame and returns the newly defined column, i.e. a pd.Series that has the same index as `df`.
### Input: Takes a pd.DataFrame and returns the newly defined column, i.e. a pd.Series that has the same index as `df`. ### Response: #vtb def apply(self, df): if hasattr(self.definition, ): r = self.definition(df) elif self.definition in df.columns: r = df[self.definition] elif not isinstance(self.definition, string_types): r = pd.Series(self.definition, index=df.index) else: raise ValueError("Invalid column definition: %s" % str(self.definition)) return r.astype(self.astype) if self.astype else r
#vtb def check_config_xml(self, contents): self.log(u"Checking contents XML config file") self.result = ValidatorResult() if self._are_safety_checks_disabled(u"check_config_xml"): return self.result contents = gf.safe_bytes(contents) self.log(u"Checking that contents is well formed") self.check_raw_string(contents, is_bstring=True) if not self.result.passed: return self.result self.log(u"Checking required parameters for job") job_parameters = gf.config_xml_to_dict(contents, self.result, parse_job=True) self._check_required_parameters(self.XML_JOB_REQUIRED_PARAMETERS, job_parameters) if not self.result.passed: return self.result self.log(u"Checking required parameters for task") tasks_parameters = gf.config_xml_to_dict(contents, self.result, parse_job=False) for parameters in tasks_parameters: self.log([u"Checking required parameters for task: ", parameters]) self._check_required_parameters(self.XML_TASK_REQUIRED_PARAMETERS, parameters) if not self.result.passed: return self.result return self.result
Check whether the given XML config file contents is well-formed and it has all the required parameters. :param string contents: the XML config file contents or XML config string :param bool is_config_string: if ``True``, contents is a config string :rtype: :class:`~aeneas.validator.ValidatorResult`
### Input: Check whether the given XML config file contents is well-formed and it has all the required parameters. :param string contents: the XML config file contents or XML config string :param bool is_config_string: if ``True``, contents is a config string :rtype: :class:`~aeneas.validator.ValidatorResult` ### Response: #vtb def check_config_xml(self, contents): self.log(u"Checking contents XML config file") self.result = ValidatorResult() if self._are_safety_checks_disabled(u"check_config_xml"): return self.result contents = gf.safe_bytes(contents) self.log(u"Checking that contents is well formed") self.check_raw_string(contents, is_bstring=True) if not self.result.passed: return self.result self.log(u"Checking required parameters for job") job_parameters = gf.config_xml_to_dict(contents, self.result, parse_job=True) self._check_required_parameters(self.XML_JOB_REQUIRED_PARAMETERS, job_parameters) if not self.result.passed: return self.result self.log(u"Checking required parameters for task") tasks_parameters = gf.config_xml_to_dict(contents, self.result, parse_job=False) for parameters in tasks_parameters: self.log([u"Checking required parameters for task: ", parameters]) self._check_required_parameters(self.XML_TASK_REQUIRED_PARAMETERS, parameters) if not self.result.passed: return self.result return self.result
#vtb def create_project(self, key, name=None, assignee=None, type="Software", template_name=None): if assignee is None: assignee = self.current_user() if name is None: name = key possible_templates = [, , , ] if template_name is not None: possible_templates = [template_name] templates = self.templates() template_key = list(templates.values())[0][] for template_name, template_dic in templates.items(): if template_name in possible_templates: template_key = template_dic[] break payload = {: name, : key, : , : template_key, : template_key, : assignee, } if self._version[0] > 6: payload[] = type headers = CaseInsensitiveDict( {: }) url = self._options[] + \ r = self._session.post(url, data=payload, headers=headers) if r.status_code == 200: r_json = json_loads(r) return r_json f = tempfile.NamedTemporaryFile( suffix=, prefix=, delete=False) f.write(r.text) if self.logging: logging.error( "Unexpected result while running create project. Server response saved in %s for further investigation [HTTP response=%s]." % ( f.name, r.status_code)) return False
Create a project with the specified parameters. :param key: Mandatory. Must match JIRA project key requirements, usually only 2-10 uppercase characters. :type: str :param name: If not specified it will use the key value. :type name: Optional[str] :param assignee: If not specified it will use current user. :type assignee: Optional[str] :param type: Determines the type of project should be created. :type type: Optional[str] :param template_name: is used to create a project based on one of the existing project templates. If `template_name` is not specified, then it should use one of the default values. :type template_name: Optional[str] :return: Should evaluate to False if it fails otherwise it will be the new project id. :rtype: Union[bool,int]
### Input: Create a project with the specified parameters. :param key: Mandatory. Must match JIRA project key requirements, usually only 2-10 uppercase characters. :type: str :param name: If not specified it will use the key value. :type name: Optional[str] :param assignee: If not specified it will use current user. :type assignee: Optional[str] :param type: Determines the type of project should be created. :type type: Optional[str] :param template_name: is used to create a project based on one of the existing project templates. If `template_name` is not specified, then it should use one of the default values. :type template_name: Optional[str] :return: Should evaluate to False if it fails otherwise it will be the new project id. :rtype: Union[bool,int] ### Response: #vtb def create_project(self, key, name=None, assignee=None, type="Software", template_name=None): if assignee is None: assignee = self.current_user() if name is None: name = key possible_templates = [, , , ] if template_name is not None: possible_templates = [template_name] templates = self.templates() template_key = list(templates.values())[0][] for template_name, template_dic in templates.items(): if template_name in possible_templates: template_key = template_dic[] break payload = {: name, : key, : , : template_key, : template_key, : assignee, } if self._version[0] > 6: payload[] = type headers = CaseInsensitiveDict( {: }) url = self._options[] + \ r = self._session.post(url, data=payload, headers=headers) if r.status_code == 200: r_json = json_loads(r) return r_json f = tempfile.NamedTemporaryFile( suffix=, prefix=, delete=False) f.write(r.text) if self.logging: logging.error( "Unexpected result while running create project. Server response saved in %s for further investigation [HTTP response=%s]." % ( f.name, r.status_code)) return False
#vtb def get_default_config(self): config = super(OneWireCollector, self).get_default_config() config.update({ : , : , }) return config
Returns the default collector settings
### Input: Returns the default collector settings ### Response: #vtb def get_default_config(self): config = super(OneWireCollector, self).get_default_config() config.update({ : , : , }) return config
#vtb def fantope(x, rho, dim, tol=1e-4): U, V = np.linalg.eigh(x) minval, maxval = np.maximum(U.min(), 0), np.maximum(U.max(), 20 * dim) while True: theta = 0.5 * (maxval + minval) thr_eigvals = np.minimum(np.maximum((U - theta), 0), 1) constraint = np.sum(thr_eigvals) if np.abs(constraint - dim) <= tol: break elif constraint < dim: maxval = theta elif constraint > dim: minval = theta else: break return np.linalg.multi_dot((V, np.diag(thr_eigvals), V.T))
Projection onto the fantope [1]_ .. [1] Vu, Vincent Q., et al. "Fantope projection and selection: A near-optimal convex relaxation of sparse PCA." Advances in neural information processing systems. 2013.
### Input: Projection onto the fantope [1]_ .. [1] Vu, Vincent Q., et al. "Fantope projection and selection: A near-optimal convex relaxation of sparse PCA." Advances in neural information processing systems. 2013. ### Response: #vtb def fantope(x, rho, dim, tol=1e-4): U, V = np.linalg.eigh(x) minval, maxval = np.maximum(U.min(), 0), np.maximum(U.max(), 20 * dim) while True: theta = 0.5 * (maxval + minval) thr_eigvals = np.minimum(np.maximum((U - theta), 0), 1) constraint = np.sum(thr_eigvals) if np.abs(constraint - dim) <= tol: break elif constraint < dim: maxval = theta elif constraint > dim: minval = theta else: break return np.linalg.multi_dot((V, np.diag(thr_eigvals), V.T))
#vtb def convertDirMP3ToWav(dirName, Fs, nC, useMp3TagsAsName = False): types = (dirName+os.sep+,) filesToProcess = [] for files in types: filesToProcess.extend(glob.glob(files)) for f in filesToProcess: audioFile = eyed3.load(f) if useMp3TagsAsName and audioFile.tag != None: artist = audioFile.tag.artist title = audioFile.tag.title if artist!=None and title!=None: if len(title)>0 and len(artist)>0: wavFileName = ntpath.split(f)[0] + os.sep + artist.replace(","," ") + " --- " + title.replace(","," ") + ".wav" else: wavFileName = f.replace(".mp3",".wav") else: wavFileName = f.replace(".mp3",".wav") else: wavFileName = f.replace(".mp3",".wav") command = "avconv -i \"" + f + "\" -ar " +str(Fs) + " -ac " + str(nC) + " \"" + wavFileName + "\""; print(command) os.system(command.decode().encode(,).replace("\0",""))
This function converts the MP3 files stored in a folder to WAV. If required, the output names of the WAV files are based on MP3 tags, otherwise the same names are used. ARGUMENTS: - dirName: the path of the folder where the MP3s are stored - Fs: the sampling rate of the generated WAV files - nC: the number of channels of the generated WAV files - useMp3TagsAsName: True if the WAV filename is generated on MP3 tags
### Input: This function converts the MP3 files stored in a folder to WAV. If required, the output names of the WAV files are based on MP3 tags, otherwise the same names are used. ARGUMENTS: - dirName: the path of the folder where the MP3s are stored - Fs: the sampling rate of the generated WAV files - nC: the number of channels of the generated WAV files - useMp3TagsAsName: True if the WAV filename is generated on MP3 tags ### Response: #vtb def convertDirMP3ToWav(dirName, Fs, nC, useMp3TagsAsName = False): types = (dirName+os.sep+,) filesToProcess = [] for files in types: filesToProcess.extend(glob.glob(files)) for f in filesToProcess: audioFile = eyed3.load(f) if useMp3TagsAsName and audioFile.tag != None: artist = audioFile.tag.artist title = audioFile.tag.title if artist!=None and title!=None: if len(title)>0 and len(artist)>0: wavFileName = ntpath.split(f)[0] + os.sep + artist.replace(","," ") + " --- " + title.replace(","," ") + ".wav" else: wavFileName = f.replace(".mp3",".wav") else: wavFileName = f.replace(".mp3",".wav") else: wavFileName = f.replace(".mp3",".wav") command = "avconv -i \"" + f + "\" -ar " +str(Fs) + " -ac " + str(nC) + " \"" + wavFileName + "\""; print(command) os.system(command.decode().encode(,).replace("\0",""))
#vtb def DeleteGroup(r, group, dry_run=False): query = { "dry-run": dry_run, } return r.request("delete", "/2/groups/%s" % group, query=query)
Deletes a node group. @type group: str @param group: the node group to delete @type dry_run: bool @param dry_run: whether to peform a dry run @rtype: int @return: job id
### Input: Deletes a node group. @type group: str @param group: the node group to delete @type dry_run: bool @param dry_run: whether to peform a dry run @rtype: int @return: job id ### Response: #vtb def DeleteGroup(r, group, dry_run=False): query = { "dry-run": dry_run, } return r.request("delete", "/2/groups/%s" % group, query=query)
#vtb def prepare_renderable(request, test_case_result, is_admin): test_case = test_case_result.test_case file_directory = request.registry.settings[] sha1 = test_case_result.diff.sha1 if test_case_result.diff else None kwargs = {: test_case.id, : test_case.testable.name, : test_case.name, : test_case.points, : test_case_result.status, : test_case_result.extra} if test_case.output_type == : url = request.route_path(, filename=, _query={: 1}, sha1sum=sha1) if sha1 else None return ImageOutput(url=url, **kwargs) elif test_case.output_type == : content = None if sha1: with open(File.file_path(file_directory, sha1)) as fp: content = fp.read() return TextOutput(content=content, **kwargs) elif not test_case_result.diff: return DiffWithMetadata(diff=None, **kwargs) try: with open(File.file_path(file_directory, sha1)) as fp: diff = pickle.load(fp) except (AttributeError, EOFError): content = content += traceback.format_exc(1) return TextOutput(content=content, **kwargs) except Exception: content = content += traceback.format_exc(1) return TextOutput(content=content, **kwargs) diff.hide_expected = not is_admin and test_case.hide_expected return DiffWithMetadata(diff=diff, **kwargs)
Return a completed Renderable.
### Input: Return a completed Renderable. ### Response: #vtb def prepare_renderable(request, test_case_result, is_admin): test_case = test_case_result.test_case file_directory = request.registry.settings[] sha1 = test_case_result.diff.sha1 if test_case_result.diff else None kwargs = {: test_case.id, : test_case.testable.name, : test_case.name, : test_case.points, : test_case_result.status, : test_case_result.extra} if test_case.output_type == : url = request.route_path(, filename=, _query={: 1}, sha1sum=sha1) if sha1 else None return ImageOutput(url=url, **kwargs) elif test_case.output_type == : content = None if sha1: with open(File.file_path(file_directory, sha1)) as fp: content = fp.read() return TextOutput(content=content, **kwargs) elif not test_case_result.diff: return DiffWithMetadata(diff=None, **kwargs) try: with open(File.file_path(file_directory, sha1)) as fp: diff = pickle.load(fp) except (AttributeError, EOFError): content = content += traceback.format_exc(1) return TextOutput(content=content, **kwargs) except Exception: content = content += traceback.format_exc(1) return TextOutput(content=content, **kwargs) diff.hide_expected = not is_admin and test_case.hide_expected return DiffWithMetadata(diff=diff, **kwargs)
#vtb def git_url_ssh_to_https(url): path = url.split(, 1)[1][1:].strip() new = % path print( % new) return new.format(GITHUB_TOKEN=os.getenv())
Convert a git url url will look like https://github.com/ARMmbed/mbed-cloud-sdk-python.git or [email protected]:ARMmbed/mbed-cloud-sdk-python.git we want: https://${GITHUB_TOKEN}@github.com/ARMmbed/mbed-cloud-sdk-python-private.git
### Input: Convert a git url url will look like https://github.com/ARMmbed/mbed-cloud-sdk-python.git or [email protected]:ARMmbed/mbed-cloud-sdk-python.git we want: https://${GITHUB_TOKEN}@github.com/ARMmbed/mbed-cloud-sdk-python-private.git ### Response: #vtb def git_url_ssh_to_https(url): path = url.split(, 1)[1][1:].strip() new = % path print( % new) return new.format(GITHUB_TOKEN=os.getenv())
#vtb def get_composite_reflectivity(self, tower_id, background=, include_legend=True, include_counties=True, include_warnings=True, include_highways=True, include_cities=True, include_rivers=True, include_topography=True): return self._build_radar_image(tower_id, "NCR", background=background, include_legend=include_legend, include_counties=include_counties, include_warnings=include_warnings, include_highways=include_highways, include_cities=include_cities, include_rivers=include_rivers, include_topography=include_topography)
Get the composite reflectivity for a noaa radar site. :param tower_id: The noaa tower id. Ex Huntsville, Al -> 'HTX'. :type tower_id: str :param background: The hex background color. :type background: str :param include_legend: True - include legend. :type include_legend: bool :param include_counties: True - include county lines. :type include_counties: bool :param include_warnings: True - include warning lines. :type include_warnings: bool :param include_highways: True - include highways. :type include_highways: bool :param include_cities: True - include city labels. :type include_cities: bool :param include_rivers: True - include rivers :type include_rivers: bool :param include_topography: True - include topography :type include_topography: bool :rtype: PIL.Image :return: A PIL.Image instance with the Radar composite reflectivity.
### Input: Get the composite reflectivity for a noaa radar site. :param tower_id: The noaa tower id. Ex Huntsville, Al -> 'HTX'. :type tower_id: str :param background: The hex background color. :type background: str :param include_legend: True - include legend. :type include_legend: bool :param include_counties: True - include county lines. :type include_counties: bool :param include_warnings: True - include warning lines. :type include_warnings: bool :param include_highways: True - include highways. :type include_highways: bool :param include_cities: True - include city labels. :type include_cities: bool :param include_rivers: True - include rivers :type include_rivers: bool :param include_topography: True - include topography :type include_topography: bool :rtype: PIL.Image :return: A PIL.Image instance with the Radar composite reflectivity. ### Response: #vtb def get_composite_reflectivity(self, tower_id, background=, include_legend=True, include_counties=True, include_warnings=True, include_highways=True, include_cities=True, include_rivers=True, include_topography=True): return self._build_radar_image(tower_id, "NCR", background=background, include_legend=include_legend, include_counties=include_counties, include_warnings=include_warnings, include_highways=include_highways, include_cities=include_cities, include_rivers=include_rivers, include_topography=include_topography)
#vtb def get_key_from_envs(envs, key): if hasattr(envs, ): envs = [envs] for env in envs: if key in env: return env[key] return NO_VALUE
Return the value of a key from the given dict respecting namespaces. Data can also be a list of data dicts.
### Input: Return the value of a key from the given dict respecting namespaces. Data can also be a list of data dicts. ### Response: #vtb def get_key_from_envs(envs, key): if hasattr(envs, ): envs = [envs] for env in envs: if key in env: return env[key] return NO_VALUE
#vtb def any2utf8(text, errors=, encoding=): if isinstance(text, unicode): return text.encode() return unicode(text, encoding, errors=errors).encode()
Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8.
### Input: Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8. ### Response: #vtb def any2utf8(text, errors=, encoding=): if isinstance(text, unicode): return text.encode() return unicode(text, encoding, errors=errors).encode()
#vtb def list_stateful_set_for_all_namespaces(self, **kwargs): kwargs[] = True if kwargs.get(): return self.list_stateful_set_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_stateful_set_for_all_namespaces_with_http_info(**kwargs) return data
list or watch objects of kind StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_stateful_set_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1StatefulSetList If the method is called asynchronously, returns the request thread.
### Input: list or watch objects of kind StatefulSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_stateful_set_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1StatefulSetList If the method is called asynchronously, returns the request thread. ### Response: #vtb def list_stateful_set_for_all_namespaces(self, **kwargs): kwargs[] = True if kwargs.get(): return self.list_stateful_set_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_stateful_set_for_all_namespaces_with_http_info(**kwargs) return data
#vtb async def evaluate_trained_model(state): return await evaluate_model( state.train_model_path, state.best_model_path, os.path.join(fsdb.eval_dir(), state.train_model_name), state.seed)
Evaluate the most recently trained model against the current best model. Args: state: the RL loop State instance.
### Input: Evaluate the most recently trained model against the current best model. Args: state: the RL loop State instance. ### Response: #vtb async def evaluate_trained_model(state): return await evaluate_model( state.train_model_path, state.best_model_path, os.path.join(fsdb.eval_dir(), state.train_model_name), state.seed)
#vtb def excerpts(n_samples, n_excerpts=None, excerpt_size=None): assert n_excerpts >= 2 step = _excerpt_step(n_samples, n_excerpts=n_excerpts, excerpt_size=excerpt_size) for i in range(n_excerpts): start = i * step if start >= n_samples: break end = min(start + excerpt_size, n_samples) yield start, end
Yield (start, end) where start is included and end is excluded.
### Input: Yield (start, end) where start is included and end is excluded. ### Response: #vtb def excerpts(n_samples, n_excerpts=None, excerpt_size=None): assert n_excerpts >= 2 step = _excerpt_step(n_samples, n_excerpts=n_excerpts, excerpt_size=excerpt_size) for i in range(n_excerpts): start = i * step if start >= n_samples: break end = min(start + excerpt_size, n_samples) yield start, end
#vtb def get_fields(model_class): return [ attr for attr, value in model_class.__dict__.items() if issubclass(type(value), (mongo.base.BaseField, mongo.EmbeddedDocumentField)) ]
Pass in a mongo model class and extract all the attributes which are mongoengine fields Returns: list of strings of field attributes
### Input: Pass in a mongo model class and extract all the attributes which are mongoengine fields Returns: list of strings of field attributes ### Response: #vtb def get_fields(model_class): return [ attr for attr, value in model_class.__dict__.items() if issubclass(type(value), (mongo.base.BaseField, mongo.EmbeddedDocumentField)) ]
#vtb def serve(request, path, document_root=None, show_indexes=False, default=): path = posixpath.normpath(unquote(path)) path = path.lstrip() newpath = for part in path.split(): if not part: continue drive, part = os.path.splitdrive(part) head, part = os.path.split(part) if part in (os.curdir, os.pardir): continue newpath = os.path.join(newpath, part).replace(, ) if newpath and path != newpath: return HttpResponseRedirect(newpath) fullpath = os.path.join(document_root, newpath) if os.path.isdir(fullpath) and default: defaultpath = os.path.join(fullpath, default) if os.path.exists(defaultpath): fullpath = defaultpath if os.path.isdir(fullpath): if show_indexes: return directory_index(newpath, fullpath) raise Http404("Directory indexes are not allowed here.") if not os.path.exists(fullpath): raise Http404( % fullpath) statobj = os.stat(fullpath) mimetype = mimetypes.guess_type(fullpath)[0] or if not was_modified_since(request.META.get(), statobj[stat.ST_MTIME], statobj[stat.ST_SIZE]): if django.VERSION > (1, 6): return HttpResponseNotModified(content_type=mimetype) else: return HttpResponseNotModified(mimetype=mimetype) contents = open(fullpath, ).read() if django.VERSION > (1, 6): response = HttpResponse(contents, content_type=mimetype) else: response = HttpResponse(contents, mimetype=mimetype) response["Last-Modified"] = http_date(statobj[stat.ST_MTIME]) response["Content-Length"] = len(contents) return response
Serve static files below a given point in the directory structure. To use, put a URL pattern such as:: (r'^(?P<path>.*)$', 'django.views.static.serve', {'document_root' : '/path/to/my/files/'}) in your URLconf. You must provide the ``document_root`` param. You may also set ``show_indexes`` to ``True`` if you'd like to serve a basic index of the directory. This index view will use the template hardcoded below, but if you'd like to override it, you can create a template called ``static/directory_index.html``. Modified by ticket #1013 to serve index.html files in the same manner as Apache and other web servers. https://code.djangoproject.com/ticket/1013
### Input: Serve static files below a given point in the directory structure. To use, put a URL pattern such as:: (r'^(?P<path>.*)$', 'django.views.static.serve', {'document_root' : '/path/to/my/files/'}) in your URLconf. You must provide the ``document_root`` param. You may also set ``show_indexes`` to ``True`` if you'd like to serve a basic index of the directory. This index view will use the template hardcoded below, but if you'd like to override it, you can create a template called ``static/directory_index.html``. Modified by ticket #1013 to serve index.html files in the same manner as Apache and other web servers. https://code.djangoproject.com/ticket/1013 ### Response: #vtb def serve(request, path, document_root=None, show_indexes=False, default=): path = posixpath.normpath(unquote(path)) path = path.lstrip() newpath = for part in path.split(): if not part: continue drive, part = os.path.splitdrive(part) head, part = os.path.split(part) if part in (os.curdir, os.pardir): continue newpath = os.path.join(newpath, part).replace(, ) if newpath and path != newpath: return HttpResponseRedirect(newpath) fullpath = os.path.join(document_root, newpath) if os.path.isdir(fullpath) and default: defaultpath = os.path.join(fullpath, default) if os.path.exists(defaultpath): fullpath = defaultpath if os.path.isdir(fullpath): if show_indexes: return directory_index(newpath, fullpath) raise Http404("Directory indexes are not allowed here.") if not os.path.exists(fullpath): raise Http404( % fullpath) statobj = os.stat(fullpath) mimetype = mimetypes.guess_type(fullpath)[0] or if not was_modified_since(request.META.get(), statobj[stat.ST_MTIME], statobj[stat.ST_SIZE]): if django.VERSION > (1, 6): return HttpResponseNotModified(content_type=mimetype) else: return HttpResponseNotModified(mimetype=mimetype) contents = open(fullpath, ).read() if django.VERSION > (1, 6): response = HttpResponse(contents, content_type=mimetype) else: response = HttpResponse(contents, mimetype=mimetype) response["Last-Modified"] = http_date(statobj[stat.ST_MTIME]) response["Content-Length"] = len(contents) return response
#vtb def isValidFeatureWriter(klass): if not isclass(klass): logger.error("%r is not a class", klass) return False if not hasattr(klass, "tableTag"): logger.error("%r does not have required attribute", klass) return False if not hasattr(klass, "write"): logger.error("%r does not have a required method", klass) return False if ( getargspec(klass.write).args != getargspec(BaseFeatureWriter.write).args ): logger.error("%r method has incorrect signature", klass) return False return True
Return True if 'klass' is a valid feature writer class. A valid feature writer class is a class (of type 'type'), that has two required attributes: 1) 'tableTag' (str), which can be "GSUB", "GPOS", or other similar tags. 2) 'write' (bound method), with the signature matching the same method from the BaseFeatureWriter class: def write(self, font, feaFile, compiler=None)
### Input: Return True if 'klass' is a valid feature writer class. A valid feature writer class is a class (of type 'type'), that has two required attributes: 1) 'tableTag' (str), which can be "GSUB", "GPOS", or other similar tags. 2) 'write' (bound method), with the signature matching the same method from the BaseFeatureWriter class: def write(self, font, feaFile, compiler=None) ### Response: #vtb def isValidFeatureWriter(klass): if not isclass(klass): logger.error("%r is not a class", klass) return False if not hasattr(klass, "tableTag"): logger.error("%r does not have required attribute", klass) return False if not hasattr(klass, "write"): logger.error("%r does not have a required method", klass) return False if ( getargspec(klass.write).args != getargspec(BaseFeatureWriter.write).args ): logger.error("%r method has incorrect signature", klass) return False return True
#vtb def disp(self, idx=100): filenameprefix = self.name_prefix def printdatarow(dat, iteration): i = np.where(dat.f[:, 0] == iteration)[0][0] j = np.where(dat.std[:, 0] == iteration)[0][0] print( % (int(dat.f[i, 0])) + % (int(dat.f[i, 1])) + % (dat.f[i, 5]) + % (dat.f[i, 3]) + % (max(dat.std[j, 5:])) + % min(dat.std[j, 5:])) dat = CMADataLogger(filenameprefix).load() ndata = dat.f.shape[0] if idx is None: idx = 100 if isscalar(idx): if idx: idx = np.r_[0, 1, idx:ndata - 3:idx, -3:0] else: idx = np.r_[0, 1, -3:0] idx = array(idx) idx = idx[idx < ndata] idx = idx[-idx <= ndata] iters = dat.f[idx, 0] idxbest = np.argmin(dat.f[:, 5]) iterbest = dat.f[idxbest, 0] if len(iters) == 1: printdatarow(dat, iters[0]) else: self.disp_header() for i in iters: printdatarow(dat, i) self.disp_header() printdatarow(dat, iterbest) sys.stdout.flush()
displays selected data from (files written by) the class `CMADataLogger`. Arguments --------- `idx` indices corresponding to rows in the data file; if idx is a scalar (int), the first two, then every idx-th, and the last three rows are displayed. Too large index values are removed. Example ------- >>> import cma, numpy as np >>> res = cma.fmin(cma.fcts.elli, 7 * [0.1], 1, {'verb_disp':1e9}) # generate data >>> assert res[1] < 1e-9 >>> assert res[2] < 4400 >>> l = cma.CMADataLogger() # == res[-1], logger with default name, "points to" above data >>> l.disp([0,-1]) # first and last >>> l.disp(20) # some first/last and every 20-th line >>> l.disp(np.r_[0:999999:100, -1]) # every 100-th and last >>> l.disp(np.r_[0, -10:0]) # first and ten last >>> cma.disp(l.name_prefix, np.r_[0::100, -10:]) # the same as l.disp(...) Details ------- The data line with the best f-value is displayed as last line. :See: `disp()`
### Input: displays selected data from (files written by) the class `CMADataLogger`. Arguments --------- `idx` indices corresponding to rows in the data file; if idx is a scalar (int), the first two, then every idx-th, and the last three rows are displayed. Too large index values are removed. Example ------- >>> import cma, numpy as np >>> res = cma.fmin(cma.fcts.elli, 7 * [0.1], 1, {'verb_disp':1e9}) # generate data >>> assert res[1] < 1e-9 >>> assert res[2] < 4400 >>> l = cma.CMADataLogger() # == res[-1], logger with default name, "points to" above data >>> l.disp([0,-1]) # first and last >>> l.disp(20) # some first/last and every 20-th line >>> l.disp(np.r_[0:999999:100, -1]) # every 100-th and last >>> l.disp(np.r_[0, -10:0]) # first and ten last >>> cma.disp(l.name_prefix, np.r_[0::100, -10:]) # the same as l.disp(...) Details ------- The data line with the best f-value is displayed as last line. :See: `disp()` ### Response: #vtb def disp(self, idx=100): filenameprefix = self.name_prefix def printdatarow(dat, iteration): i = np.where(dat.f[:, 0] == iteration)[0][0] j = np.where(dat.std[:, 0] == iteration)[0][0] print( % (int(dat.f[i, 0])) + % (int(dat.f[i, 1])) + % (dat.f[i, 5]) + % (dat.f[i, 3]) + % (max(dat.std[j, 5:])) + % min(dat.std[j, 5:])) dat = CMADataLogger(filenameprefix).load() ndata = dat.f.shape[0] if idx is None: idx = 100 if isscalar(idx): if idx: idx = np.r_[0, 1, idx:ndata - 3:idx, -3:0] else: idx = np.r_[0, 1, -3:0] idx = array(idx) idx = idx[idx < ndata] idx = idx[-idx <= ndata] iters = dat.f[idx, 0] idxbest = np.argmin(dat.f[:, 5]) iterbest = dat.f[idxbest, 0] if len(iters) == 1: printdatarow(dat, iters[0]) else: self.disp_header() for i in iters: printdatarow(dat, i) self.disp_header() printdatarow(dat, iterbest) sys.stdout.flush()
#vtb def get_dates_range(self, scale=, start=None, end=None, date_max=): automaximumdailyweeklymonthlyquarterlyyearlyauto if scale not in [, , , , , , ]: raise ValueError( % scale) start = Timestamp(start or self._start.min() or date_max) start = Timestamp(date_max) if repr(start) == else start end = Timestamp(end or max(Timestamp(self._end.max()), self._start.max())) end = datetime.utcnow() if repr(end) == else end start = start if self.check_in_bounds(start) else self._lbound end = end if self.check_in_bounds(end) else self._rbound if scale == : scale = self._auto_select_scale(start, end) if scale == : start_dts = list(self._start.dropna().values) end_dts = list(self._end.dropna().values) dts = map(Timestamp, set(start_dts + end_dts)) dts = filter(lambda ts: self.check_in_bounds(ts) and ts >= start and ts <= end, dts) return dts freq = dict(daily=, weekly=, monthly=, quarterly=, yearly=) offset = dict(daily=off.Day(n=0), weekly=off.Week(), monthly=off.MonthEnd(), quarterly=off.QuarterEnd(), yearly=off.YearEnd()) end_ = end + off.Week() if scale == else end ret = list(pd.date_range(start + offset[scale], end_, freq=freq[scale])) ret = [dt for dt in ret if dt <= end] ret = [start] + ret if ret and start < ret[0] else ret ret = ret + [end] if ret and end > ret[-1] else ret ret = filter(lambda ts: self.check_in_bounds(ts), ret) return ret
Returns a list of dates sampled according to the specified parameters. :param scale: {'auto', 'maximum', 'daily', 'weekly', 'monthly', 'quarterly', 'yearly'} Scale specifies the sampling intervals. 'auto' will heuristically choose a scale for quick processing :param start: First date that will be included. :param end: Last date that will be included
### Input: Returns a list of dates sampled according to the specified parameters. :param scale: {'auto', 'maximum', 'daily', 'weekly', 'monthly', 'quarterly', 'yearly'} Scale specifies the sampling intervals. 'auto' will heuristically choose a scale for quick processing :param start: First date that will be included. :param end: Last date that will be included ### Response: #vtb def get_dates_range(self, scale=, start=None, end=None, date_max=): automaximumdailyweeklymonthlyquarterlyyearlyauto if scale not in [, , , , , , ]: raise ValueError( % scale) start = Timestamp(start or self._start.min() or date_max) start = Timestamp(date_max) if repr(start) == else start end = Timestamp(end or max(Timestamp(self._end.max()), self._start.max())) end = datetime.utcnow() if repr(end) == else end start = start if self.check_in_bounds(start) else self._lbound end = end if self.check_in_bounds(end) else self._rbound if scale == : scale = self._auto_select_scale(start, end) if scale == : start_dts = list(self._start.dropna().values) end_dts = list(self._end.dropna().values) dts = map(Timestamp, set(start_dts + end_dts)) dts = filter(lambda ts: self.check_in_bounds(ts) and ts >= start and ts <= end, dts) return dts freq = dict(daily=, weekly=, monthly=, quarterly=, yearly=) offset = dict(daily=off.Day(n=0), weekly=off.Week(), monthly=off.MonthEnd(), quarterly=off.QuarterEnd(), yearly=off.YearEnd()) end_ = end + off.Week() if scale == else end ret = list(pd.date_range(start + offset[scale], end_, freq=freq[scale])) ret = [dt for dt in ret if dt <= end] ret = [start] + ret if ret and start < ret[0] else ret ret = ret + [end] if ret and end > ret[-1] else ret ret = filter(lambda ts: self.check_in_bounds(ts), ret) return ret
#vtb def acts_as_state_machine(cls): assert not hasattr(cls, ), .format(cls) assert not hasattr(cls, ), .format(cls) def get_states(obj): return StateInfo.get_states(obj.__class__) def is_transition_failure_handler(obj): return all([ any([ inspect.ismethod(obj), inspect.isfunction(obj), ]), getattr(obj, , False), ]) transition_failure_handlers = sorted( [value for name, value in inspect.getmembers(cls, is_transition_failure_handler)], key=lambda m: getattr(m, , 0), ) setattr(cls, , transition_failure_handlers) cls.current_state = property(fget=StateInfo.get_current_state) cls.states = property(fget=get_states) return cls
a decorator which sets two properties on a class: * the 'current_state' property: a read-only property, returning the state machine's current state, as 'State' object * the 'states' property: a tuple of all valid state machine states, as 'State' objects class objects may use current_state and states freely :param cls: :return:
### Input: a decorator which sets two properties on a class: * the 'current_state' property: a read-only property, returning the state machine's current state, as 'State' object * the 'states' property: a tuple of all valid state machine states, as 'State' objects class objects may use current_state and states freely :param cls: :return: ### Response: #vtb def acts_as_state_machine(cls): assert not hasattr(cls, ), .format(cls) assert not hasattr(cls, ), .format(cls) def get_states(obj): return StateInfo.get_states(obj.__class__) def is_transition_failure_handler(obj): return all([ any([ inspect.ismethod(obj), inspect.isfunction(obj), ]), getattr(obj, , False), ]) transition_failure_handlers = sorted( [value for name, value in inspect.getmembers(cls, is_transition_failure_handler)], key=lambda m: getattr(m, , 0), ) setattr(cls, , transition_failure_handlers) cls.current_state = property(fget=StateInfo.get_current_state) cls.states = property(fget=get_states) return cls
#vtb def power_cycle_vm(virtual_machine, action=): s name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine onpower onNot enough permissions. Required privilege: {}offpower offNot enough permissions. Required privilege: {}The given action is not supported An error occurred during poweroperation, a file was not found: {0}'.format(exc)])) return virtual_machine
Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine
### Input: Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ### Response: #vtb def power_cycle_vm(virtual_machine, action=): s name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine onpower onNot enough permissions. Required privilege: {}offpower offNot enough permissions. Required privilege: {}The given action is not supported An error occurred during poweroperation, a file was not found: {0}'.format(exc)])) return virtual_machine
#vtb def __fix_field_date(self, item, attribute): field_date = str_to_datetime(item[attribute]) try: _ = int(field_date.strftime("%z")[0:3]) except ValueError: logger.warning("%s in commit %s has a wrong format", attribute, item[]) item[attribute] = field_date.replace(tzinfo=None).isoformat()
Fix possible errors in the field date
### Input: Fix possible errors in the field date ### Response: #vtb def __fix_field_date(self, item, attribute): field_date = str_to_datetime(item[attribute]) try: _ = int(field_date.strftime("%z")[0:3]) except ValueError: logger.warning("%s in commit %s has a wrong format", attribute, item[]) item[attribute] = field_date.replace(tzinfo=None).isoformat()
#vtb def date_from_number(self, value): if not isinstance(value, numbers.Real): return None delta = datetime.timedelta(days=value) return self._null_date + delta
Converts a float value to corresponding datetime instance.
### Input: Converts a float value to corresponding datetime instance. ### Response: #vtb def date_from_number(self, value): if not isinstance(value, numbers.Real): return None delta = datetime.timedelta(days=value) return self._null_date + delta
#vtb def computePhase2(self, doLearn=False): self.confidence[][c,i] = maxConfidence
This is the phase 2 of learning, inference and multistep prediction. During this phase, all the cell with lateral support have their predictedState turned on and the firing segments are queued up for updates. Parameters: -------------------------------------------- doLearn: Boolean flag to queue segment updates during learning retval: ?
### Input: This is the phase 2 of learning, inference and multistep prediction. During this phase, all the cell with lateral support have their predictedState turned on and the firing segments are queued up for updates. Parameters: -------------------------------------------- doLearn: Boolean flag to queue segment updates during learning retval: ? ### Response: #vtb def computePhase2(self, doLearn=False): self.confidence[][c,i] = maxConfidence
#vtb def row_factory(cursor, row): d = {} for idx, col in enumerate(cursor.description): d[col[0]] = row[idx] return d
Returns a sqlite row factory that returns a dictionary
### Input: Returns a sqlite row factory that returns a dictionary ### Response: #vtb def row_factory(cursor, row): d = {} for idx, col in enumerate(cursor.description): d[col[0]] = row[idx] return d
#vtb def _interpolate_missing_data(data, mask, method=): from scipy import interpolate data_interp = np.array(data, copy=True) if len(data_interp.shape) != 2: raise ValueError() if mask.shape != data.shape: raise ValueError() y, x = np.indices(data_interp.shape) xy = np.dstack((x[~mask].ravel(), y[~mask].ravel()))[0] z = data_interp[~mask].ravel() if method == : interpol = interpolate.NearestNDInterpolator(xy, z) elif method == : interpol = interpolate.CloughTocher2DInterpolator(xy, z) else: raise ValueError() xy_missing = np.dstack((x[mask].ravel(), y[mask].ravel()))[0] data_interp[mask] = interpol(xy_missing) return data_interp
Interpolate missing data as identified by the ``mask`` keyword. Parameters ---------- data : 2D `~numpy.ndarray` An array containing the 2D image. mask : 2D bool `~numpy.ndarray` A 2D booleen mask array with the same shape as the input ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. The masked data points are those that will be interpolated. method : {'cubic', 'nearest'}, optional The method of used to interpolate the missing data: * ``'cubic'``: Masked data are interpolated using 2D cubic splines. This is the default. * ``'nearest'``: Masked data are interpolated using nearest-neighbor interpolation. Returns ------- data_interp : 2D `~numpy.ndarray` The interpolated 2D image.
### Input: Interpolate missing data as identified by the ``mask`` keyword. Parameters ---------- data : 2D `~numpy.ndarray` An array containing the 2D image. mask : 2D bool `~numpy.ndarray` A 2D booleen mask array with the same shape as the input ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. The masked data points are those that will be interpolated. method : {'cubic', 'nearest'}, optional The method of used to interpolate the missing data: * ``'cubic'``: Masked data are interpolated using 2D cubic splines. This is the default. * ``'nearest'``: Masked data are interpolated using nearest-neighbor interpolation. Returns ------- data_interp : 2D `~numpy.ndarray` The interpolated 2D image. ### Response: #vtb def _interpolate_missing_data(data, mask, method=): from scipy import interpolate data_interp = np.array(data, copy=True) if len(data_interp.shape) != 2: raise ValueError() if mask.shape != data.shape: raise ValueError() y, x = np.indices(data_interp.shape) xy = np.dstack((x[~mask].ravel(), y[~mask].ravel()))[0] z = data_interp[~mask].ravel() if method == : interpol = interpolate.NearestNDInterpolator(xy, z) elif method == : interpol = interpolate.CloughTocher2DInterpolator(xy, z) else: raise ValueError() xy_missing = np.dstack((x[mask].ravel(), y[mask].ravel()))[0] data_interp[mask] = interpol(xy_missing) return data_interp
#vtb def copy_framebuffer(self, dst, src) -> None: s content) - downsample a framebuffer directly to a texture. Args: dst (Framebuffer or Texture): Destination framebuffer or texture. src (Framebuffer): Source framebuffer. ' self.mglo.copy_framebuffer(dst.mglo, src.mglo)
Copy framebuffer content. Use this method to: - blit framebuffers. - copy framebuffer content into a texture. - downsample framebuffers. (it will allow to read the framebuffer's content) - downsample a framebuffer directly to a texture. Args: dst (Framebuffer or Texture): Destination framebuffer or texture. src (Framebuffer): Source framebuffer.
### Input: Copy framebuffer content. Use this method to: - blit framebuffers. - copy framebuffer content into a texture. - downsample framebuffers. (it will allow to read the framebuffer's content) - downsample a framebuffer directly to a texture. Args: dst (Framebuffer or Texture): Destination framebuffer or texture. src (Framebuffer): Source framebuffer. ### Response: #vtb def copy_framebuffer(self, dst, src) -> None: s content) - downsample a framebuffer directly to a texture. Args: dst (Framebuffer or Texture): Destination framebuffer or texture. src (Framebuffer): Source framebuffer. ' self.mglo.copy_framebuffer(dst.mglo, src.mglo)
#vtb def swipe_bottom(self, steps=10, *args, **selectors): self.device(**selectors).swipe.down(steps=steps)
Swipe the UI object with *selectors* from center to bottom See `Swipe Left` for more details.
### Input: Swipe the UI object with *selectors* from center to bottom See `Swipe Left` for more details. ### Response: #vtb def swipe_bottom(self, steps=10, *args, **selectors): self.device(**selectors).swipe.down(steps=steps)
#vtb def normalize(arg=None): res = t_arg = type(arg) if t_arg in (list, tuple): for i in arg: res += normalize(i) elif t_arg is dict: keys = arg.keys() keys.sort() for key in keys: res += % (normalize(key), normalize(arg[key])) elif t_arg is unicode: res = arg.encode() elif t_arg is bool: res = if arg else elif arg != None: res = str(arg) return res
Normalizes an argument for signing purpose. This is used for normalizing the arguments of RPC method calls. :param arg: The argument to normalize :return: A string representating the normalized argument. .. doctest:: >>> from cloud.rpc import normalize >>> normalize(['foo', 42, 'bar']) 'foo42bar' >>> normalize({'yellow': 1, 'red': 2, 'pink' : 3}) 'pink3red2yellow1' >>> normalize(['foo', 42, {'yellow': 1, 'red': 2, 'pink' : 3}, 'bar']) 'foo42pink3red2yellow1bar' >>> normalize(None) '' >>> normalize([None, 1,2]) '12' >>> normalize({2: [None, 1,2], 3: None, 4:5}) '212345'
### Input: Normalizes an argument for signing purpose. This is used for normalizing the arguments of RPC method calls. :param arg: The argument to normalize :return: A string representating the normalized argument. .. doctest:: >>> from cloud.rpc import normalize >>> normalize(['foo', 42, 'bar']) 'foo42bar' >>> normalize({'yellow': 1, 'red': 2, 'pink' : 3}) 'pink3red2yellow1' >>> normalize(['foo', 42, {'yellow': 1, 'red': 2, 'pink' : 3}, 'bar']) 'foo42pink3red2yellow1bar' >>> normalize(None) '' >>> normalize([None, 1,2]) '12' >>> normalize({2: [None, 1,2], 3: None, 4:5}) '212345' ### Response: #vtb def normalize(arg=None): res = t_arg = type(arg) if t_arg in (list, tuple): for i in arg: res += normalize(i) elif t_arg is dict: keys = arg.keys() keys.sort() for key in keys: res += % (normalize(key), normalize(arg[key])) elif t_arg is unicode: res = arg.encode() elif t_arg is bool: res = if arg else elif arg != None: res = str(arg) return res
#vtb def set_mtime(self, name, mtime, size): self.check_write(name) os.utime(os.path.join(self.cur_dir, name), (-1, mtime))
Set modification time on file.
### Input: Set modification time on file. ### Response: #vtb def set_mtime(self, name, mtime, size): self.check_write(name) os.utime(os.path.join(self.cur_dir, name), (-1, mtime))
#vtb def to_json(self): json_dict = self.to_json_basic() json_dict[] = self.closed json_dict[] = self.opened json_dict[] = self.closed_long return json.dumps(json_dict)
:return: str
### Input: :return: str ### Response: #vtb def to_json(self): json_dict = self.to_json_basic() json_dict[] = self.closed json_dict[] = self.opened json_dict[] = self.closed_long return json.dumps(json_dict)
#vtb def _reload(self, module=None): if self.module is None: raise RuntimeError elif module is None: import importlib module = ModuleSource(importlib.reload(module)) elif module.name != self.module: raise RuntimeError if self.name in module.funcs: func = module.funcs[self.name] self.__init__(func=func) else: self.__init__(func=NULL_FORMULA) return self
Reload the source function from the source module. **Internal use only** Update the source function of the formula. This method is used to updated the underlying formula when the source code of the module in which the source function is read from is modified. If the formula was not created from a module, an error is raised. If ``module_`` is not given, the source module of the formula is reloaded. If ``module_`` is given and matches the source module, then the module_ is used without being reloaded. If ``module_`` is given and does not match the source module of the formula, an error is raised. Args: module_: A ``ModuleSource`` object Returns: self
### Input: Reload the source function from the source module. **Internal use only** Update the source function of the formula. This method is used to updated the underlying formula when the source code of the module in which the source function is read from is modified. If the formula was not created from a module, an error is raised. If ``module_`` is not given, the source module of the formula is reloaded. If ``module_`` is given and matches the source module, then the module_ is used without being reloaded. If ``module_`` is given and does not match the source module of the formula, an error is raised. Args: module_: A ``ModuleSource`` object Returns: self ### Response: #vtb def _reload(self, module=None): if self.module is None: raise RuntimeError elif module is None: import importlib module = ModuleSource(importlib.reload(module)) elif module.name != self.module: raise RuntimeError if self.name in module.funcs: func = module.funcs[self.name] self.__init__(func=func) else: self.__init__(func=NULL_FORMULA) return self
#vtb def lastOfferedMonth(self): lastOfferedSeries = self.event_set.order_by().first() return (lastOfferedSeries.year,lastOfferedSeries.month)
Sometimes a Series is associated with a month other than the one in which the first class begins, so this returns a (year,month) tuple that can be used in admin instead.
### Input: Sometimes a Series is associated with a month other than the one in which the first class begins, so this returns a (year,month) tuple that can be used in admin instead. ### Response: #vtb def lastOfferedMonth(self): lastOfferedSeries = self.event_set.order_by().first() return (lastOfferedSeries.year,lastOfferedSeries.month)
#vtb def clean_username(self, username): username_case = settings.CAS_FORCE_CHANGE_USERNAME_CASE if username_case == : username = username.lower() elif username_case == : username = username.upper() elif username_case is not None: raise ImproperlyConfigured( "Invalid value for the CAS_FORCE_CHANGE_USERNAME_CASE setting. " "Valid values are ``, ``, and `None`.") return username
Performs any cleaning on the "username" prior to using it to get or create the user object. Returns the cleaned username. By default, changes the username case according to `settings.CAS_FORCE_CHANGE_USERNAME_CASE`.
### Input: Performs any cleaning on the "username" prior to using it to get or create the user object. Returns the cleaned username. By default, changes the username case according to `settings.CAS_FORCE_CHANGE_USERNAME_CASE`. ### Response: #vtb def clean_username(self, username): username_case = settings.CAS_FORCE_CHANGE_USERNAME_CASE if username_case == : username = username.lower() elif username_case == : username = username.upper() elif username_case is not None: raise ImproperlyConfigured( "Invalid value for the CAS_FORCE_CHANGE_USERNAME_CASE setting. " "Valid values are ``, ``, and `None`.") return username
#vtb def _retrieve(self, namespace, stream, start_id, end_time, order, limit, configuration): stream = self.get_stream(namespace, stream, configuration) events = stream.iterator(start_id, uuid_from_kronos_time(end_time, _type=UUIDType.HIGHEST), order == ResultOrder.DESCENDING, limit) events = events.__iter__() event = events.next() if event.id != start_id: yield event.json while True: yield events.next().json
Retrieve events for `stream` between `start_id` and `end_time`. `stream` : The stream to return events for. `start_id` : Return events with id > `start_id`. `end_time` : Return events ending <= `end_time`. `order` : Whether to return the results in ResultOrder.ASCENDING or ResultOrder.DESCENDING time-order. `configuration` : A dictionary of settings to override any default settings, such as number of shards or width of a time interval.
### Input: Retrieve events for `stream` between `start_id` and `end_time`. `stream` : The stream to return events for. `start_id` : Return events with id > `start_id`. `end_time` : Return events ending <= `end_time`. `order` : Whether to return the results in ResultOrder.ASCENDING or ResultOrder.DESCENDING time-order. `configuration` : A dictionary of settings to override any default settings, such as number of shards or width of a time interval. ### Response: #vtb def _retrieve(self, namespace, stream, start_id, end_time, order, limit, configuration): stream = self.get_stream(namespace, stream, configuration) events = stream.iterator(start_id, uuid_from_kronos_time(end_time, _type=UUIDType.HIGHEST), order == ResultOrder.DESCENDING, limit) events = events.__iter__() event = events.next() if event.id != start_id: yield event.json while True: yield events.next().json
#vtb def build_struct_type(s_sdt): s_dt = nav_one(s_sdt).S_DT[17]() struct = ET.Element(, name=s_dt.name) first_filter = lambda selected: not nav_one(selected).S_MBR[46, ]() s_mbr = nav_any(s_sdt).S_MBR[44](first_filter) while s_mbr: s_dt = nav_one(s_mbr).S_DT[45]() type_name = get_type_name(s_dt) ET.SubElement(struct, , name=s_mbr.name, type=type_name) s_mbr = nav_one(s_mbr).S_MBR[46, ]() return struct
Build an xsd complexType out of a S_SDT.
### Input: Build an xsd complexType out of a S_SDT. ### Response: #vtb def build_struct_type(s_sdt): s_dt = nav_one(s_sdt).S_DT[17]() struct = ET.Element(, name=s_dt.name) first_filter = lambda selected: not nav_one(selected).S_MBR[46, ]() s_mbr = nav_any(s_sdt).S_MBR[44](first_filter) while s_mbr: s_dt = nav_one(s_mbr).S_DT[45]() type_name = get_type_name(s_dt) ET.SubElement(struct, , name=s_mbr.name, type=type_name) s_mbr = nav_one(s_mbr).S_MBR[46, ]() return struct
#vtb def add_node(self, kind, image_id, image_user, flavor, security_group, image_userdata=, name=None, **extra): if not self._NODE_KIND_RE.match(kind): raise ValueError( "Invalid name `{kind}`. The `kind` argument may only contain" " alphanumeric characters, and must not end with a digit." .format(kind=kind)) if kind not in self.nodes: self.nodes[kind] = [] extra.update( cloud_provider=self._cloud_provider, cluster_name=self.name, flavor=flavor, image_id=image_id, image_user=image_user, image_userdata=image_userdata, kind=kind, security_group=security_group, ) for attr in ( , , , , , , , , ): if attr not in extra: extra[attr] = getattr(self, attr) if not name: name = self._naming_policy.new(**extra) else: self._naming_policy.use(kind, name) node = Node(name=name, **extra) self.nodes[kind].append(node) return node
Adds a new node to the cluster. This factory method provides an easy way to add a new node to the cluster by specifying all relevant parameters. The node does not get started nor setup automatically, this has to be done manually afterwards. :param str kind: kind of node to start. this refers to the groups defined in the ansible setup provider :py:class:`elasticluster.providers.AnsibleSetupProvider` Please note that this can only contain alphanumeric characters and hyphens (and must not end with a digit), as it is used to build a valid hostname :param str image_id: image id to use for the cloud instance (e.g. ami on amazon) :param str image_user: user to login on given image :param str flavor: machine type to use for cloud instance :param str security_group: security group that defines firewall rules to the instance :param str image_userdata: commands to execute after instance starts :param str name: name of this node, automatically generated if None :raises: ValueError: `kind` argument is an invalid string. :return: created :py:class:`Node`
### Input: Adds a new node to the cluster. This factory method provides an easy way to add a new node to the cluster by specifying all relevant parameters. The node does not get started nor setup automatically, this has to be done manually afterwards. :param str kind: kind of node to start. this refers to the groups defined in the ansible setup provider :py:class:`elasticluster.providers.AnsibleSetupProvider` Please note that this can only contain alphanumeric characters and hyphens (and must not end with a digit), as it is used to build a valid hostname :param str image_id: image id to use for the cloud instance (e.g. ami on amazon) :param str image_user: user to login on given image :param str flavor: machine type to use for cloud instance :param str security_group: security group that defines firewall rules to the instance :param str image_userdata: commands to execute after instance starts :param str name: name of this node, automatically generated if None :raises: ValueError: `kind` argument is an invalid string. :return: created :py:class:`Node` ### Response: #vtb def add_node(self, kind, image_id, image_user, flavor, security_group, image_userdata=, name=None, **extra): if not self._NODE_KIND_RE.match(kind): raise ValueError( "Invalid name `{kind}`. The `kind` argument may only contain" " alphanumeric characters, and must not end with a digit." .format(kind=kind)) if kind not in self.nodes: self.nodes[kind] = [] extra.update( cloud_provider=self._cloud_provider, cluster_name=self.name, flavor=flavor, image_id=image_id, image_user=image_user, image_userdata=image_userdata, kind=kind, security_group=security_group, ) for attr in ( , , , , , , , , ): if attr not in extra: extra[attr] = getattr(self, attr) if not name: name = self._naming_policy.new(**extra) else: self._naming_policy.use(kind, name) node = Node(name=name, **extra) self.nodes[kind].append(node) return node
#vtb def refresh(self, accept=MEDIA_TYPE_TAXII_V20): self.refresh_information(accept) self.refresh_collections(accept)
Update the API Root's information and list of Collections
### Input: Update the API Root's information and list of Collections ### Response: #vtb def refresh(self, accept=MEDIA_TYPE_TAXII_V20): self.refresh_information(accept) self.refresh_collections(accept)
#vtb def _summarize_in_roi(self, label_mask, num_clusters_per_roi=1, metric=): this_label = self.carpet[label_mask.flatten(), :] if num_clusters_per_roi == 1: out_matrix = self._summary_func(this_label, axis=0) else: out_matrix = self._make_clusters(this_label, num_clusters_per_roi, metric) return out_matrix
returns a single row summarizing (typically via mean) all rows in an ROI.
### Input: returns a single row summarizing (typically via mean) all rows in an ROI. ### Response: #vtb def _summarize_in_roi(self, label_mask, num_clusters_per_roi=1, metric=): this_label = self.carpet[label_mask.flatten(), :] if num_clusters_per_roi == 1: out_matrix = self._summary_func(this_label, axis=0) else: out_matrix = self._make_clusters(this_label, num_clusters_per_roi, metric) return out_matrix
#vtb def _mem(self): value = int(psutil.virtual_memory().percent) set_metric("memory", value, category=self.category) gauge("memory", value)
Record Memory usage.
### Input: Record Memory usage. ### Response: #vtb def _mem(self): value = int(psutil.virtual_memory().percent) set_metric("memory", value, category=self.category) gauge("memory", value)
#vtb def _parse_processor_embedded_health(self, data): processor = self.get_value_as_list((data[] []), ) if processor is None: msg = "Unable to get cpu data. Error: Data missing" raise exception.IloError(msg) cpus = 0 for proc in processor: for val in proc.values(): processor_detail = val[] proc_core_threads = processor_detail.split() for x in proc_core_threads: if "thread" in x: v = x.split() try: cpus = cpus + int(v[0]) except ValueError: msg = ("Unable to get cpu data. " "The Value %s returned couldnx86_64' return cpus, cpu_arch
Parse the get_host_health_data() for essential properties :param data: the output returned by get_host_health_data() :returns: processor details like cpu arch and number of cpus.
### Input: Parse the get_host_health_data() for essential properties :param data: the output returned by get_host_health_data() :returns: processor details like cpu arch and number of cpus. ### Response: #vtb def _parse_processor_embedded_health(self, data): processor = self.get_value_as_list((data[] []), ) if processor is None: msg = "Unable to get cpu data. Error: Data missing" raise exception.IloError(msg) cpus = 0 for proc in processor: for val in proc.values(): processor_detail = val[] proc_core_threads = processor_detail.split() for x in proc_core_threads: if "thread" in x: v = x.split() try: cpus = cpus + int(v[0]) except ValueError: msg = ("Unable to get cpu data. " "The Value %s returned couldnx86_64' return cpus, cpu_arch
#vtb def noise_set_type(n: tcod.noise.Noise, typ: int) -> None: n.algorithm = typ
Set a Noise objects default noise algorithm. Args: typ (int): Any NOISE_* constant.
### Input: Set a Noise objects default noise algorithm. Args: typ (int): Any NOISE_* constant. ### Response: #vtb def noise_set_type(n: tcod.noise.Noise, typ: int) -> None: n.algorithm = typ
#vtb def update_project(config, task_presenter, results, long_description, tutorial, watch): if watch: res = _update_project_watch(config, task_presenter, results, long_description, tutorial) else: res = _update_project(config, task_presenter, results, long_description, tutorial) click.echo(res)
Update project templates and information.
### Input: Update project templates and information. ### Response: #vtb def update_project(config, task_presenter, results, long_description, tutorial, watch): if watch: res = _update_project_watch(config, task_presenter, results, long_description, tutorial) else: res = _update_project(config, task_presenter, results, long_description, tutorial) click.echo(res)
#vtb def send_alert_to_configured_integration(integration_alert): try: alert = integration_alert.alert configured_integration = integration_alert.configured_integration integration = configured_integration.integration integration_actions_instance = configured_integration.integration.module alert_fields = dict() if integration.required_fields: if not all([hasattr(alert, _) for _ in integration.required_fields]): logger.debug("Alert does not have all required_fields (%s) for integration %s, skipping", integration.required_fields, integration.name) return exclude_fields = ["alert_type", "service_type"] alert_fields = {} for field in alert.__slots__: if hasattr(alert, field) and field not in exclude_fields: alert_fields[field] = getattr(alert, field) logger.debug("Sending alert %s to %s", alert_fields, integration.name) output_data, output_file_content = integration_actions_instance.send_event(alert_fields) if integration.polling_enabled: integration_alert.status = IntegrationAlertStatuses.POLLING.name polling_integration_alerts.append(integration_alert) else: integration_alert.status = IntegrationAlertStatuses.DONE.name integration_alert.send_time = get_current_datetime_utc() integration_alert.output_data = json.dumps(output_data) except exceptions.IntegrationMissingRequiredFieldError as exc: logger.exception("Send response formatting for integration alert %s failed. Missing required fields", integration_alert, exc.message) integration_alert.status = IntegrationAlertStatuses.ERROR_MISSING_SEND_FIELDS.name except exceptions.IntegrationOutputFormatError: logger.exception("Send response formatting for integration alert %s failed", integration_alert) integration_alert.status = IntegrationAlertStatuses.ERROR_SENDING_FORMATTING.name except exceptions.IntegrationSendEventError as exc: integration_send_retries = integration_alert.retries if integration_alert.retries <= MAX_SEND_RETRIES \ else MAX_SEND_RETRIES send_retries_left = integration_send_retries - 1 integration_alert.retries = send_retries_left logger.error("Sending integration alert %s failed. Message: %s. Retries left: %s", integration_alert, exc.message, send_retries_left) if send_retries_left == 0: integration_alert.status = IntegrationAlertStatuses.ERROR_SENDING.name if send_retries_left > 0: sleep(SEND_ALERT_DATA_INTERVAL) send_alert_to_configured_integration(integration_alert)
Send IntegrationAlert to configured integration.
### Input: Send IntegrationAlert to configured integration. ### Response: #vtb def send_alert_to_configured_integration(integration_alert): try: alert = integration_alert.alert configured_integration = integration_alert.configured_integration integration = configured_integration.integration integration_actions_instance = configured_integration.integration.module alert_fields = dict() if integration.required_fields: if not all([hasattr(alert, _) for _ in integration.required_fields]): logger.debug("Alert does not have all required_fields (%s) for integration %s, skipping", integration.required_fields, integration.name) return exclude_fields = ["alert_type", "service_type"] alert_fields = {} for field in alert.__slots__: if hasattr(alert, field) and field not in exclude_fields: alert_fields[field] = getattr(alert, field) logger.debug("Sending alert %s to %s", alert_fields, integration.name) output_data, output_file_content = integration_actions_instance.send_event(alert_fields) if integration.polling_enabled: integration_alert.status = IntegrationAlertStatuses.POLLING.name polling_integration_alerts.append(integration_alert) else: integration_alert.status = IntegrationAlertStatuses.DONE.name integration_alert.send_time = get_current_datetime_utc() integration_alert.output_data = json.dumps(output_data) except exceptions.IntegrationMissingRequiredFieldError as exc: logger.exception("Send response formatting for integration alert %s failed. Missing required fields", integration_alert, exc.message) integration_alert.status = IntegrationAlertStatuses.ERROR_MISSING_SEND_FIELDS.name except exceptions.IntegrationOutputFormatError: logger.exception("Send response formatting for integration alert %s failed", integration_alert) integration_alert.status = IntegrationAlertStatuses.ERROR_SENDING_FORMATTING.name except exceptions.IntegrationSendEventError as exc: integration_send_retries = integration_alert.retries if integration_alert.retries <= MAX_SEND_RETRIES \ else MAX_SEND_RETRIES send_retries_left = integration_send_retries - 1 integration_alert.retries = send_retries_left logger.error("Sending integration alert %s failed. Message: %s. Retries left: %s", integration_alert, exc.message, send_retries_left) if send_retries_left == 0: integration_alert.status = IntegrationAlertStatuses.ERROR_SENDING.name if send_retries_left > 0: sleep(SEND_ALERT_DATA_INTERVAL) send_alert_to_configured_integration(integration_alert)
#vtb def _fix_key(key): if isinstance(key, unicode): return key if isinstance(key, str): raise TypeError(key)
Normalize keys to Unicode strings.
### Input: Normalize keys to Unicode strings. ### Response: #vtb def _fix_key(key): if isinstance(key, unicode): return key if isinstance(key, str): raise TypeError(key)
#vtb def select_with_correspondence( self, selector, result_selector=KeyedElement): if self.closed(): raise ValueError("Attempt to call select_with_correspondence() on a " "closed Queryable.") if not is_callable(selector): raise TypeError("select_with_correspondence() parameter selector={0} is " "not callable".format(repr(selector))) if not is_callable(result_selector): raise TypeError("select_with_correspondence() parameter result_selector={0} is " "not callable".format(repr(result_selector))) return self._create(result_selector(elem, selector(elem)) for elem in iter(self))
Apply a callable to each element in an input sequence, generating a new sequence of 2-tuples where the first element is the input value and the second is the transformed input value. The generated sequence is lazily evaluated. Note: This method uses deferred execution. Args: selector: A unary function mapping a value in the source sequence to the second argument of the result selector. result_selector: A binary callable mapping the of a value in the source sequence and the transformed value to the corresponding value in the generated sequence. The two positional arguments of the selector function are the original source element and the transformed value. The return value should be the corresponding value in the result sequence. The default selector produces a KeyedElement containing the index and the element giving this function similar behaviour to the built-in enumerate(). Returns: When using the default selector, a Queryable whose elements are KeyedElements where the first element is from the input sequence and the second is the result of invoking the transform function on the first value. Raises: ValueError: If this Queryable has been closed. TypeError: If transform is not callable.
### Input: Apply a callable to each element in an input sequence, generating a new sequence of 2-tuples where the first element is the input value and the second is the transformed input value. The generated sequence is lazily evaluated. Note: This method uses deferred execution. Args: selector: A unary function mapping a value in the source sequence to the second argument of the result selector. result_selector: A binary callable mapping the of a value in the source sequence and the transformed value to the corresponding value in the generated sequence. The two positional arguments of the selector function are the original source element and the transformed value. The return value should be the corresponding value in the result sequence. The default selector produces a KeyedElement containing the index and the element giving this function similar behaviour to the built-in enumerate(). Returns: When using the default selector, a Queryable whose elements are KeyedElements where the first element is from the input sequence and the second is the result of invoking the transform function on the first value. Raises: ValueError: If this Queryable has been closed. TypeError: If transform is not callable. ### Response: #vtb def select_with_correspondence( self, selector, result_selector=KeyedElement): if self.closed(): raise ValueError("Attempt to call select_with_correspondence() on a " "closed Queryable.") if not is_callable(selector): raise TypeError("select_with_correspondence() parameter selector={0} is " "not callable".format(repr(selector))) if not is_callable(result_selector): raise TypeError("select_with_correspondence() parameter result_selector={0} is " "not callable".format(repr(result_selector))) return self._create(result_selector(elem, selector(elem)) for elem in iter(self))
#vtb def delete(self, run_id): self.generic_dao.delete_record( self.metrics_collection_name, {"run_id": self._parse_run_id(run_id)})
Delete all metrics belonging to the given run. :param run_id: ID of the Run that the metric belongs to.
### Input: Delete all metrics belonging to the given run. :param run_id: ID of the Run that the metric belongs to. ### Response: #vtb def delete(self, run_id): self.generic_dao.delete_record( self.metrics_collection_name, {"run_id": self._parse_run_id(run_id)})
#vtb def acquire(self, signal=True): if not self.needs_lock: return with self.synclock: while not self.lock.acquire(False): self.synclock.wait() if signal: self.acquired_event(self) self.synclock.notify_all()
Locks the account. Method has no effect if the constructor argument `needs_lock` wsa set to False. :type signal: bool :param signal: Whether to emit the acquired_event signal.
### Input: Locks the account. Method has no effect if the constructor argument `needs_lock` wsa set to False. :type signal: bool :param signal: Whether to emit the acquired_event signal. ### Response: #vtb def acquire(self, signal=True): if not self.needs_lock: return with self.synclock: while not self.lock.acquire(False): self.synclock.wait() if signal: self.acquired_event(self) self.synclock.notify_all()
#vtb def _combine_indexers(old_key, shape, new_key): if not isinstance(old_key, VectorizedIndexer): old_key = _outer_to_vectorized_indexer(old_key, shape) if len(old_key.tuple) == 0: return new_key new_shape = np.broadcast(*old_key.tuple).shape if isinstance(new_key, VectorizedIndexer): new_key = _arrayize_vectorized_indexer(new_key, new_shape) else: new_key = _outer_to_vectorized_indexer(new_key, new_shape) return VectorizedIndexer(tuple(o[new_key.tuple] for o in np.broadcast_arrays(*old_key.tuple)))
Combine two indexers. Parameters ---------- old_key: ExplicitIndexer The first indexer for the original array shape: tuple of ints Shape of the original array to be indexed by old_key new_key: The second indexer for indexing original[old_key]
### Input: Combine two indexers. Parameters ---------- old_key: ExplicitIndexer The first indexer for the original array shape: tuple of ints Shape of the original array to be indexed by old_key new_key: The second indexer for indexing original[old_key] ### Response: #vtb def _combine_indexers(old_key, shape, new_key): if not isinstance(old_key, VectorizedIndexer): old_key = _outer_to_vectorized_indexer(old_key, shape) if len(old_key.tuple) == 0: return new_key new_shape = np.broadcast(*old_key.tuple).shape if isinstance(new_key, VectorizedIndexer): new_key = _arrayize_vectorized_indexer(new_key, new_shape) else: new_key = _outer_to_vectorized_indexer(new_key, new_shape) return VectorizedIndexer(tuple(o[new_key.tuple] for o in np.broadcast_arrays(*old_key.tuple)))
#vtb def stop(self, timeout=5): self.inner().stop(timeout=timeout) self.inner().reload()
Stop the container. The container must have been created. :param timeout: Timeout in seconds to wait for the container to stop before sending a ``SIGKILL``. Default: 5 (half the Docker default)
### Input: Stop the container. The container must have been created. :param timeout: Timeout in seconds to wait for the container to stop before sending a ``SIGKILL``. Default: 5 (half the Docker default) ### Response: #vtb def stop(self, timeout=5): self.inner().stop(timeout=timeout) self.inner().reload()
#vtb def doigrf(lon, lat, alt, date, **kwargs): from . import coefficients as cf gh, sv = [], [] colat = 90. - lat if lon < 0: lon = lon + 360. itype = 1 models, igrf12coeffs = cf.get_igrf12() if in list(kwargs.keys()): if kwargs[] == : psvmodels, psvcoeffs = cf.get_arch3k() elif kwargs[] == : psvmodels, psvcoeffs = cf.get_cals3k() elif kwargs[] == : psvmodels, psvcoeffs = cf.get_pfm9k() elif kwargs[] == : psvmodels, psvcoeffs = cf.get_hfm10k() elif kwargs[] == : psvmodels, psvcoeffs = cf.get_cals10k_2() elif kwargs[] == : psvmodels, psvcoeffs = cf.get_shadif14k() else: psvmodels, psvcoeffs = cf.get_cals10k() if in kwargs: if in list(kwargs.keys()): return psvmodels, psvcoeffs else: return models, igrf12coeffs if date < -12000: print() return if in list(kwargs.keys()) and kwargs[] == : if date < -10000: incr = 100 else: incr = 50 model = date - date % incr gh = psvcoeffs[psvmodels.index(int(model))] sv = old_div( (psvcoeffs[psvmodels.index(int(model + incr))] - gh), float(incr)) x, y, z, f = magsyn(gh, sv, model, date, itype, alt, colat, lon) elif date < -1000: incr = 10 model = date - date % incr gh = psvcoeffs[psvmodels.index(int(model))] sv = old_div( (psvcoeffs[psvmodels.index(int(model + incr))] - gh), float(incr)) x, y, z, f = magsyn(gh, sv, model, date, itype, alt, colat, lon) elif date < 1900: if kwargs[] == : incr = 50 else: incr = 10 model = date - date % incr gh = psvcoeffs[psvmodels.index(model)] if model + incr < 1900: sv = old_div( (psvcoeffs[psvmodels.index(model + incr)] - gh), float(incr)) else: field2 = igrf12coeffs[models.index(1940)][0:120] sv = old_div((field2 - gh), float(1940 - model)) x, y, z, f = magsyn(gh, sv, model, date, itype, alt, colat, lon) else: model = date - date % 5 if date < 2015: gh = igrf12coeffs[models.index(model)] sv = old_div((igrf12coeffs[models.index(model + 5)] - gh), 5.) x, y, z, f = magsyn(gh, sv, model, date, itype, alt, colat, lon) else: gh = igrf12coeffs[models.index(2015)] sv = igrf12coeffs[models.index(2015.20)] x, y, z, f = magsyn(gh, sv, model, date, itype, alt, colat, lon) if in list(kwargs.keys()): return gh else: return x, y, z, f
Calculates the interpolated (<2015) or extrapolated (>2015) main field and secular variation coefficients and passes them to the Malin and Barraclough routine (function pmag.magsyn) to calculate the field from the coefficients. Parameters: ----------- lon : east longitude in degrees (0 to 360 or -180 to 180) lat : latitude in degrees (-90 to 90) alt : height above mean sea level in km (itype = 1 assumed) date : Required date in years and decimals of a year (A.D.) Optional Parameters: ----------- coeffs : if True, then return the gh coefficients mod : model to use ('arch3k','cals3k','pfm9k','hfm10k','cals10k.2','cals10k.1b','shadif14k') arch3k (Korte et al., 2009) cals3k (Korte and Constable, 2011) cals10k.1b (Korte et al., 2011) pfm9k (Nilsson et al., 2014) hfm.OL1.A1 (Constable et al., 2016) cals10k.2 (Constable et al., 2016) shadif14k (Pavon-Carrasco et al. (2014) NB : the first four of these models, are constrained to agree with gufm1 (Jackson et al., 2000) for the past four centuries Return ----------- x : north component of the magnetic field in nT y : east component of the magnetic field in nT z : downward component of the magnetic field in nT f : total magnetic field in nT By default, igrf12 coefficients are used between 1900 and 2020 from http://www.ngdc.noaa.gov/IAGA/vmod/igrf.html. To check the results you can run the interactive program at the NGDC www.ngdc.noaa.gov/geomag-web
### Input: Calculates the interpolated (<2015) or extrapolated (>2015) main field and secular variation coefficients and passes them to the Malin and Barraclough routine (function pmag.magsyn) to calculate the field from the coefficients. Parameters: ----------- lon : east longitude in degrees (0 to 360 or -180 to 180) lat : latitude in degrees (-90 to 90) alt : height above mean sea level in km (itype = 1 assumed) date : Required date in years and decimals of a year (A.D.) Optional Parameters: ----------- coeffs : if True, then return the gh coefficients mod : model to use ('arch3k','cals3k','pfm9k','hfm10k','cals10k.2','cals10k.1b','shadif14k') arch3k (Korte et al., 2009) cals3k (Korte and Constable, 2011) cals10k.1b (Korte et al., 2011) pfm9k (Nilsson et al., 2014) hfm.OL1.A1 (Constable et al., 2016) cals10k.2 (Constable et al., 2016) shadif14k (Pavon-Carrasco et al. (2014) NB : the first four of these models, are constrained to agree with gufm1 (Jackson et al., 2000) for the past four centuries Return ----------- x : north component of the magnetic field in nT y : east component of the magnetic field in nT z : downward component of the magnetic field in nT f : total magnetic field in nT By default, igrf12 coefficients are used between 1900 and 2020 from http://www.ngdc.noaa.gov/IAGA/vmod/igrf.html. To check the results you can run the interactive program at the NGDC www.ngdc.noaa.gov/geomag-web ### Response: #vtb def doigrf(lon, lat, alt, date, **kwargs): from . import coefficients as cf gh, sv = [], [] colat = 90. - lat if lon < 0: lon = lon + 360. itype = 1 models, igrf12coeffs = cf.get_igrf12() if in list(kwargs.keys()): if kwargs[] == : psvmodels, psvcoeffs = cf.get_arch3k() elif kwargs[] == : psvmodels, psvcoeffs = cf.get_cals3k() elif kwargs[] == : psvmodels, psvcoeffs = cf.get_pfm9k() elif kwargs[] == : psvmodels, psvcoeffs = cf.get_hfm10k() elif kwargs[] == : psvmodels, psvcoeffs = cf.get_cals10k_2() elif kwargs[] == : psvmodels, psvcoeffs = cf.get_shadif14k() else: psvmodels, psvcoeffs = cf.get_cals10k() if in kwargs: if in list(kwargs.keys()): return psvmodels, psvcoeffs else: return models, igrf12coeffs if date < -12000: print() return if in list(kwargs.keys()) and kwargs[] == : if date < -10000: incr = 100 else: incr = 50 model = date - date % incr gh = psvcoeffs[psvmodels.index(int(model))] sv = old_div( (psvcoeffs[psvmodels.index(int(model + incr))] - gh), float(incr)) x, y, z, f = magsyn(gh, sv, model, date, itype, alt, colat, lon) elif date < -1000: incr = 10 model = date - date % incr gh = psvcoeffs[psvmodels.index(int(model))] sv = old_div( (psvcoeffs[psvmodels.index(int(model + incr))] - gh), float(incr)) x, y, z, f = magsyn(gh, sv, model, date, itype, alt, colat, lon) elif date < 1900: if kwargs[] == : incr = 50 else: incr = 10 model = date - date % incr gh = psvcoeffs[psvmodels.index(model)] if model + incr < 1900: sv = old_div( (psvcoeffs[psvmodels.index(model + incr)] - gh), float(incr)) else: field2 = igrf12coeffs[models.index(1940)][0:120] sv = old_div((field2 - gh), float(1940 - model)) x, y, z, f = magsyn(gh, sv, model, date, itype, alt, colat, lon) else: model = date - date % 5 if date < 2015: gh = igrf12coeffs[models.index(model)] sv = old_div((igrf12coeffs[models.index(model + 5)] - gh), 5.) x, y, z, f = magsyn(gh, sv, model, date, itype, alt, colat, lon) else: gh = igrf12coeffs[models.index(2015)] sv = igrf12coeffs[models.index(2015.20)] x, y, z, f = magsyn(gh, sv, model, date, itype, alt, colat, lon) if in list(kwargs.keys()): return gh else: return x, y, z, f