text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def stage_name(self):
"""
Get stage name of current job instance.
Because instantiating job instance could be performed in different ways and those return different results,
we have to check where from to get name of the stage.
:return: stage name.
"""
if 'stage_name' in self.data and self.data.stage_name:
return self.data.get('stage_name')
else:
return self.stage.data.name | 0.006438 |
def get_dir(obj, **kwargs):
"""Return the type of an object. Do some regex to remove the "<class..." bit."""
attrs = list(filter(filter_dunder, dir(obj)))
if not attrs:
return "No public attributes."
s = "Attributes:"
for attr in attrs:
s += '\n - {} ({})'.format(attr, extract_type(type(getattr(obj, attr))))
return s | 0.00831 |
def delete_event(self, uid):
"""Delete event and sync calendar
Parameters
----------
uid : uid of event to be deleted
"""
ev_for_deletion = self.calendar.get(uid)
ev_for_deletion.delete() | 0.008197 |
def cli_plugin_add_help(help):
"""
Decorator generator that adds the cli help to the cli plugin based on the
decorated function
Args:
help (str): help string for the cli plugin
Returns:
function: Decorator that builds or extends the cliplugin for the
decorated function, setting the given help
Examples:
>>> @cli_plugin_add_help('my help string')
... def test(**kwargs):
... print 'test'
...
>>> print test.__class__
<class 'cli.CLIPluginFuncWrapper'>
>>> print test.help
my help string
>>> @cli_plugin_add_help('my help string')
... @cli_plugin()
... def test(**kwargs):
... print 'test'
>>> print test.__class__
<class 'cli.CLIPluginFuncWrapper'>
>>> print test.help
my help string
"""
def decorator(func):
if not isinstance(func, CLIPluginFuncWrapper):
func = CLIPluginFuncWrapper(do_run=func)
func.set_help(help)
return func
return decorator | 0.000919 |
def get_exported_interfaces(svc_ref, overriding_props=None):
# type: (ServiceReference, Optional[Dict[str, Any]]) -> Optional[List[str]]
"""
Looks for the interfaces exported by a service
:param svc_ref: Service reference
:param overriding_props: Properties overriding service ones
:return: The list of exported interfaces
"""
# first check overriding_props for service.exported.interfaces
exported_intfs = get_prop_value(
SERVICE_EXPORTED_INTERFACES, overriding_props
)
# then check svc_ref property
if not exported_intfs:
exported_intfs = svc_ref.get_property(SERVICE_EXPORTED_INTERFACES)
if not exported_intfs:
return None
return get_matching_interfaces(
svc_ref.get_property(constants.OBJECTCLASS), exported_intfs
) | 0.001229 |
def to_single_symbol_list(self):
"""
Convert this HandwrittenData object into a list of HandwrittenData
objects. Each element of the list is a single symbol.
Returns
-------
list of HandwrittenData objects
"""
symbol_stream = getattr(self,
'symbol_stream',
[None for symbol in self.segmentation])
single_symbols = []
pointlist = self.get_sorted_pointlist()
for stroke_indices, label in zip(self.segmentation, symbol_stream):
strokes = []
for stroke_index in stroke_indices:
strokes.append(pointlist[stroke_index])
single_symbols.append(HandwrittenData(json.dumps(strokes),
formula_id=label))
return single_symbols | 0.002283 |
def min_branch_length(go_id1, go_id2, godag, branch_dist):
'''
Finds the minimum branch length between two terms in the GO DAG.
'''
# First get the deepest common ancestor
goterm1 = godag[go_id1]
goterm2 = godag[go_id2]
if goterm1.namespace == goterm2.namespace:
dca = deepest_common_ancestor([go_id1, go_id2], godag)
# Then get the distance from the DCA to each term
dca_depth = godag[dca].depth
depth1 = goterm1.depth - dca_depth
depth2 = goterm2.depth - dca_depth
# Return the total distance - i.e., to the deepest common ancestor and back.
return depth1 + depth2
elif branch_dist is not None:
return goterm1.depth + goterm2.depth + branch_dist | 0.002667 |
def get_by_name(self, name):
"""
Gets a SAN Manager by name.
Args:
name: Name of the SAN Manager
Returns:
dict: SAN Manager.
"""
san_managers = self._client.get_all()
result = [x for x in san_managers if x['name'] == name]
return result[0] if result else None | 0.005731 |
def handle(self, *args, **options):
"""
Iterates over all the CRON_CLASSES (or if passed in as a commandline argument)
and runs them.
"""
cron_classes = options['cron_classes']
if cron_classes:
cron_class_names = cron_classes
else:
cron_class_names = getattr(settings, 'CRON_CLASSES', [])
try:
crons_to_run = [get_class(x) for x in cron_class_names]
except Exception:
error = traceback.format_exc()
self.stdout.write('Make sure these are valid cron class names: %s\n%s' % (cron_class_names, error))
return
for cron_class in crons_to_run:
run_cron_with_cache_check(
cron_class,
force=options['force'],
silent=options['silent']
)
clear_old_log_entries()
close_old_connections() | 0.004357 |
def centre_of_atoms(atoms, mass_weighted=True):
""" Returns centre point of any list of atoms.
Parameters
----------
atoms : list
List of AMPAL atom objects.
mass_weighted : bool, optional
If True returns centre of mass, otherwise just geometric centre of points.
Returns
-------
centre_of_mass : numpy.array
3D coordinate for the centre of mass.
"""
points = [x._vector for x in atoms]
if mass_weighted:
masses = [x.mass for x in atoms]
else:
masses = []
return centre_of_mass(points=points, masses=masses) | 0.003328 |
def _max(self):
"""Getter for the maximum series value"""
return (
self.range[1] if (self.range and self.range[1] is not None) else
(max(map(abs, self._values)) if self._values else None)
) | 0.008439 |
def getlist(self, key):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
try:
vals = _dict_getitem(self, key.lower())
except KeyError:
return []
else:
if isinstance(vals, tuple):
return [vals[1]]
else:
return vals[1:] | 0.005051 |
def get_java_remote_console_url(self, ip=None):
"""
Generates a Single Sign-On (SSO) session for the iLO Java Applet console and returns the URL to launch it.
If the server hardware is unmanaged or unsupported, the resulting URL will not use SSO and the iLO Java Applet
will prompt for credentials. This is not supported on G7/iLO3 or earlier servers.
Args:
ip: IP address or host name of the server's iLO management processor
Returns:
URL
"""
uri = "{}/javaRemoteConsoleUrl".format(self.data["uri"])
if ip:
uri = "{}?ip={}".format(uri, ip)
return self._helper.do_get(uri) | 0.008671 |
def finish(self, data):
"""Process success indicator from the server.
Process any addiitional data passed with the success.
Fail if the server was not authenticated.
:Parameters:
- `data`: an optional additional data with success.
:Types:
- `data`: `bytes`
:return: success or failure indicator.
:returntype: `sasl.Success` or `sasl.Failure`"""
if not self._server_first_message:
logger.debug("Got success too early")
return Failure("bad-success")
if self._finished:
return Success({"username": self.username, "authzid": self.authzid})
else:
ret = self._final_challenge(data)
if isinstance(ret, Failure):
return ret
if self._finished:
return Success({"username": self.username,
"authzid": self.authzid})
else:
logger.debug("Something went wrong when processing additional"
" data with success?")
return Failure("bad-success") | 0.004209 |
def cat(prefix="md", dirname=os.path.curdir, partsdir="parts", fulldir="full",
resolve_multi="pass"):
"""Concatenate all parts of a simulation.
The xtc, trr, and edr files in *dirname* such as prefix.xtc,
prefix.part0002.xtc, prefix.part0003.xtc, ... are
1) moved to the *partsdir* (under *dirname*)
2) concatenated with the Gromacs tools to yield prefix.xtc, prefix.trr,
prefix.edr, prefix.gro (or prefix.md) in *dirname*
3) Store these trajectories in *fulldir*
.. Note:: Trajectory files are *never* deleted by this function to avoid
data loss in case of bugs. You will have to clean up yourself
by deleting *dirname*/*partsdir*.
Symlinks for the trajectories are *not* handled well and
break the function. Use hard links instead.
.. Warning:: If an exception occurs when running this function then make
doubly and triply sure where your files are before running
this function again; otherwise you might **overwrite data**.
Possibly you will need to manually move the files from *partsdir*
back into the working directory *dirname*; this should onlu overwrite
generated files so far but *check carefully*!
:Keywords:
*prefix*
deffnm of the trajectories [md]
*resolve_multi"
how to deal with multiple "final" gro or pdb files: normally there should
only be one but in case of restarting from the checkpoint of a finished
simulation one can end up with multiple identical ones.
- "pass" : do nothing and log a warning
- "guess" : take prefix.pdb or prefix.gro if it exists, otherwise the one of
prefix.partNNNN.gro|pdb with the highes NNNN
*dirname*
change to *dirname* and assume all tarjectories are located there [.]
*partsdir*
directory where to store the input files (they are moved out of the way);
*partsdir* must be manually deleted [parts]
*fulldir*
directory where to store the final results [full]
"""
gmxcat = {'xtc': gromacs.trjcat,
'trr': gromacs.trjcat,
'edr': gromacs.eneconv,
'log': utilities.cat,
}
def _cat(prefix, ext, partsdir=partsdir, fulldir=fulldir):
filenames = glob_parts(prefix, ext)
if ext.startswith('.'):
ext = ext[1:]
outfile = os.path.join(fulldir, prefix + '.' + ext)
if not filenames:
return None
nonempty_files = []
for f in filenames:
if os.stat(f).st_size == 0:
logger.warn("File {f!r} is empty, skipping".format(**vars()))
continue
if os.path.islink(f):
# TODO: re-write the symlink to point to the original file
errmsg = "Symbolic links do not work (file %(f)r), sorry. " \
"CHECK LOCATION OF FILES MANUALLY BEFORE RUNNING gromacs.cbook.cat() AGAIN!" % vars()
logger.exception(errmsg)
raise NotImplementedError(errmsg)
shutil.move(f, partsdir)
nonempty_files.append(f)
filepaths = [os.path.join(partsdir, f) for f in nonempty_files]
gmxcat[ext](f=filepaths, o=outfile)
return outfile
_resolve_options = ("pass", "guess")
if not resolve_multi in _resolve_options:
raise ValueError("resolve_multi must be one of %(_resolve_options)r, "
"not %(resolve_multi)r" % vars())
if fulldir == os.path.curdir:
wmsg = "Using the current directory as fulldir can potentially lead to data loss if you run this function multiple times."
logger.warning(wmsg)
warnings.warn(wmsg, category=BadParameterWarning)
with utilities.in_dir(dirname, create=False):
utilities.mkdir_p(partsdir)
utilities.mkdir_p(fulldir)
for ext in ('log', 'edr', 'trr', 'xtc'):
logger.info("[%(dirname)s] concatenating %(ext)s files...", vars())
outfile = _cat(prefix, ext, partsdir)
logger.info("[%(dirname)s] created %(outfile)r", vars())
for ext in ('gro', 'pdb'): # XXX: ugly, make method out of parts?
filenames = glob_parts(prefix, ext)
if len(filenames) == 0:
continue # goto next ext
elif len(filenames) == 1:
pick = filenames[0]
else:
if resolve_multi == "pass":
logger.warning("[%(dirname)s] too many output structures %(filenames)r, "
"cannot decide which one --- resolve manually!", vars())
for f in filenames:
shutil.move(f, partsdir)
continue # goto next ext
elif resolve_multi == "guess":
pick = prefix + '.' + ext
if not pick in filenames:
pick = filenames[-1] # filenames are ordered with highest parts at end
final = os.path.join(fulldir, prefix + '.' + ext)
shutil.copy(pick, final) # copy2 fails on nfs with Darwin at least
for f in filenames:
shutil.move(f, partsdir)
logger.info("[%(dirname)s] collected final structure %(final)r "
"(from %(pick)r)", vars())
partsdirpath = utilities.realpath(dirname, partsdir)
logger.warn("[%(dirname)s] cat() complete in %(fulldir)r but original files "
"in %(partsdirpath)r must be manually removed", vars()) | 0.0031 |
def _remove_vm(name, datacenter, service_instance, placement=None,
power_off=None):
'''
Helper function to remove a virtual machine
name
Name of the virtual machine
service_instance
vCenter service instance for connection and configuration
datacenter
Datacenter of the virtual machine
placement
Placement information of the virtual machine
'''
results = {}
if placement:
(resourcepool_object, placement_object) = \
salt.utils.vmware.get_placement(service_instance,
datacenter,
placement)
else:
placement_object = salt.utils.vmware.get_datacenter(service_instance,
datacenter)
if power_off:
power_off_vm(name, datacenter, service_instance)
results['powered_off'] = True
vm_ref = salt.utils.vmware.get_mor_by_property(
service_instance,
vim.VirtualMachine,
name,
property_name='name',
container_ref=placement_object)
if not vm_ref:
raise salt.exceptions.VMwareObjectRetrievalError(
'The virtual machine object {0} in datacenter '
'{1} was not found'.format(name, datacenter))
return results, vm_ref | 0.000732 |
def get_records_for_package(self, package_name):
"""
Get all records identified by package.
"""
result = []
result.extend(self.package_module_map.get(package_name))
return result | 0.008811 |
def populate(self):
""" Populate this list with all views that take no arguments.
"""
from django.conf import settings
from django.core import urlresolvers
self.append(("", ""))
urlconf = settings.ROOT_URLCONF
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
# Collect base level views
for key, value in resolver.reverse_dict.items():
if isinstance(key, basestring):
args = value[0][0][1]
url = "/" + value[0][0][0]
self.append((key, " ".join(key.split("_"))))
# Collect namespaces (TODO: merge these two sections into one)
for namespace, url in resolver.namespace_dict.items():
for key, value in url[1].reverse_dict.items():
if isinstance(key, basestring):
args = value[0][0][1]
full_key = '%s:%s' % (namespace, key)
self.append((full_key, "%s: %s" % (namespace, " ".join(key.split("_")))))
self.sort() | 0.002841 |
def extract_common(self, keys):
"""
Return a new segmentlistdict containing only those
segmentlists associated with the keys in keys, with each
set to their mutual intersection. The offsets are
preserved.
"""
keys = set(keys)
new = self.__class__()
intersection = self.intersection(keys)
for key in keys:
dict.__setitem__(new, key, _shallowcopy(intersection))
dict.__setitem__(new.offsets, key, self.offsets[key])
return new | 0.033113 |
def get_channel_embeddings(io_depth, targets, hidden_size, name="channel"):
"""Get separate embedding for each of the channels."""
targets_split = tf.split(targets, io_depth, axis=3)
rgb_embedding_var = tf.get_variable("rgb_target_emb_%s" % name,
[256 * io_depth, hidden_size])
rgb_embedding_var = tf.identity(rgb_embedding_var)
rgb_embedding_var *= float(hidden_size)**0.5
channel_target_embs = []
for i in range(io_depth):
# Adding the channel offsets to get the right embedding since the
# embedding tensor has shape 256 * io_depth, hidden_size
target_ids = tf.squeeze(targets_split[i], axis=3) + i * 256
target_embs = common_layers.gather(rgb_embedding_var, target_ids)
channel_target_embs.append(target_embs)
return tf.concat(channel_target_embs, axis=-1) | 0.010778 |
def is_subpath(path, parent):
'''
Returns True if *path* points to the same or a subpath of *parent*.
'''
try:
relpath = os.path.relpath(path, parent)
except ValueError:
return False # happens on Windows if drive letters don't match
return relpath == os.curdir or not relpath.startswith(os.pardir) | 0.015674 |
def write(self, filename):
"""
Write a copy of this config object.
Parameters:
-----------
outfile : output filename
Returns:
--------
None
"""
ext = os.path.splitext(filename)[1]
writer = open(filename, 'w')
if ext == '.py':
writer.write(pprint.pformat(self))
elif ext == '.yaml':
writer.write(yaml.dump(self))
else:
writer.close()
raise Exception('Unrecognized config format: %s'%ext)
writer.close() | 0.006908 |
def _termination_callback(self, returncode):
"""
Called when the process has stopped.
:param returncode: Process returncode
"""
log.info("uBridge process has stopped, return code: %d", returncode)
if returncode != 0:
self._project.emit("log.error", {"message": "uBridge process has stopped, return code: {}\n{}".format(returncode, self.read_stdout())}) | 0.007246 |
def register_press_watcher(self, watcher_name, press_keys, *condition_list):
"""
The watcher perform *press_keys* action sequentially when conditions match.
"""
def unicode_to_list(a_unicode):
a_list = list()
comma_count = a_unicode.count(',')
for count in range(comma_count + 1):
comma_position = a_unicode.find(',')
if comma_position == -1:
a_list.append(str(a_unicode))
else:
a_list.append(a_unicode[0:comma_position])
a_unicode = a_unicode[comma_position + 1:]
return a_list
watcher = self.device.watcher(watcher_name)
for condition in condition_list:
watcher.when(**self.__unicode_to_dict(condition))
watcher.press(*unicode_to_list(press_keys))
self.device.watchers.run() | 0.0033 |
def last_visible_line(self, before_scroll_offset=False):
"""
Like `first_visible_line`, but for the last visible line.
"""
if before_scroll_offset:
return self.displayed_lines[-1 - self.applied_scroll_offsets.bottom]
else:
return self.displayed_lines[-1] | 0.009434 |
def problem_serializing(value, e=None):
"""
THROW ERROR ABOUT SERIALIZING
"""
from mo_logs import Log
try:
typename = type(value).__name__
except Exception:
typename = "<error getting name>"
try:
rep = text_type(repr(value))
except Exception as _:
rep = None
if rep == None:
Log.error(
"Problem turning value of type {{type}} to json",
type=typename,
cause=e
)
else:
Log.error(
"Problem turning value ({{value}}) of type {{type}} to json",
value=rep,
type=typename,
cause=e
) | 0.003003 |
def thread_exists(self, thread_id):
"""Check if a thread exists or has 404'd.
Args:
thread_id (int): Thread ID
Returns:
bool: Whether the given thread exists on this board.
"""
return self._requests_session.head(
self._url.thread_api_url(
thread_id=thread_id
)
).ok | 0.005222 |
def _update_settings(self, X):
"""
Update the model argument.
:param X: The input data of the model.
:type X: list of (candidate, features) pair
"""
self.logger.info("Loading default parameters for Sparse Logistic Regression")
config = get_config()["learning"]["SparseLogisticRegression"]
for key in config.keys():
if key not in self.settings:
self.settings[key] = config[key]
# Add one feature for padding vector (all 0s)
self.settings["input_dim"] = X[1].shape[1] + 1 | 0.005164 |
def ne(self, key, value, includeMissing=False):
'''Return entries where the key's value is NOT of equal (!=) value.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 },
... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]},
... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]},
... {"name": "Bill", "age": 19, "income": 29000 },
... ]
>>> print PLOD(test).ne("name", "Larry").returnString()
[
{age: 18, income: 93000, name: 'Jim' , wigs: 68},
{age: 20, income: 15000, name: 'Joe' , wigs: [1, 2, 3]},
{age: 19, income: 29000, name: 'Bill', wigs: None }
]
>>> print PLOD(test).ne("income", 15000, includeMissing=True).returnString()
[
{age: 18, income: 93000, name: 'Jim' , wigs: 68},
{age: 18, income: None , name: 'Larry', wigs: [3, 2, 9]},
{age: 19, income: 29000, name: 'Bill' , wigs: None }
]
.. versionadded:: 0.1.1
:param key:
The dictionary key (or cascading list of keys) that should be the
basis of comparison.
:param value:
The value to compare with.
:param includeMissing:
Defaults to False. If True, then entries missing the key are also
included.
:returns: self
'''
(self.table, self.index_track) = internal.select(self.table, self.index_track, key, self.NOT_EQUAL, value, includeMissing)
return self | 0.006002 |
def update_client_secrets(backend, updates, secrets=None, save=True):
'''update client secrets will update the data structure for a particular
authentication. This should only be used for a (quasi permanent) token
or similar. The secrets file, if found, is updated and saved by default.
'''
if secrets is None:
secrets = read_client_secrets()
if backend not in secrets:
secrets[backend] = {}
secrets[backend].update(updates)
# The update typically includes a save
if save is True:
secrets_file = get_secrets_file()
if secrets_file is not None:
write_json(secrets,secrets_file)
return secrets | 0.002928 |
def split_netloc(netloc):
"""Split netloc into username, password, host and port.
>>> split_netloc('foo:[email protected]:8080')
('foo', 'bar', 'www.example.com', '8080')
"""
username = password = host = port = ''
if '@' in netloc:
user_pw, netloc = netloc.split('@', 1)
if ':' in user_pw:
username, password = user_pw.split(':', 1)
else:
username = user_pw
netloc = _clean_netloc(netloc)
if ':' in netloc and netloc[-1] != ']':
host, port = netloc.rsplit(':', 1)
else:
host = netloc
return username, password, host, port | 0.001592 |
def load(self, code, setup='', teardown=''):
"""Prepares a set of setup, test, and teardown code to be
run in the console.
PARAMETERS:
code -- list; processed lines of code. Elements in the list are
either strings (input) or CodeAnswer objects (output)
setup -- str; raw setup code
teardown -- str; raw teardown code
"""
super().load(code, setup, teardown)
self._frame = self._original_frame.copy() | 0.004016 |
def mean(self, axis=None, keepdims=False):
"""
Return the mean of the array over the given axis.
Parameters
----------
axis : tuple or int, optional, default=None
Axis to compute statistic over, if None
will compute over all axes
keepdims : boolean, optional, default=False
Keep axis remaining after operation with size 1.
"""
return self._stat(axis, name='mean', keepdims=keepdims) | 0.004132 |
def has_default_privileges(name,
object_name,
object_type,
defprivileges=None,
grant_option=None,
prepend='public',
maintenance_db=None,
user=None,
host=None,
port=None,
password=None,
runas=None):
'''
.. versionadded:: 2019.0.0
Check if a role has the specified privileges on an object
CLI Example:
.. code-block:: bash
salt '*' postgres.has_default_privileges user_name table_name table \\
SELECT,INSERT maintenance_db=db_name
name
Name of the role whose privileges should be checked on object_type
object_name
Name of the object on which the check is to be performed
object_type
The object type, which can be one of the following:
- table
- sequence
- schema
- group
- function
privileges
Comma separated list of privileges to check, from the list below:
- INSERT
- CREATE
- TRUNCATE
- TRIGGER
- SELECT
- USAGE
- UPDATE
- EXECUTE
- REFERENCES
- DELETE
- ALL
grant_option
If grant_option is set to True, the grant option check is performed
prepend
Table and Sequence object types live under a schema so this should be
provided if the object is not under the default `public` schema
maintenance_db
The database to connect to
user
database username if different from config or default
password
user password if any password for a specified user
host
Database host if different from config or default
port
Database port if different from config or default
runas
System user all operations should be performed on behalf of
'''
object_type, defprivileges, _defprivs = _mod_defpriv_opts(object_type, defprivileges)
_validate_default_privileges(object_type, _defprivs, defprivileges)
if object_type != 'group':
owner = _get_object_owner(object_name, object_type, prepend=prepend,
maintenance_db=maintenance_db, user=user, host=host, port=port,
password=password, runas=runas)
if owner is not None and name == owner:
return True
_defprivileges = default_privileges_list(object_name, object_type, prepend=prepend,
maintenance_db=maintenance_db, user=user, host=host, port=port,
password=password, runas=runas)
if name in _defprivileges:
if object_type == 'group':
if grant_option:
retval = _defprivileges[name]
else:
retval = True
return retval
else:
_defperms = _DEFAULT_PRIVILEGE_TYPE_MAP[object_type]
if grant_option:
defperms = dict((_DEFAULT_PRIVILEGES_MAP[defperm], True) for defperm in _defperms)
retval = defperms == _defprivileges[name]
else:
defperms = [_DEFAULT_PRIVILEGES_MAP[defperm] for defperm in _defperms]
if 'ALL' in _defprivs:
retval = sorted(defperms) == sorted(_defprivileges[name].keys())
else:
retval = set(_defprivs).issubset(
set(_defprivileges[name].keys()))
return retval
return False | 0.006208 |
def decorate_class_method(func, classkey=None, skipmain=False):
"""
Will inject all decorated function as methods of classkey
classkey is some identifying string, tuple, or object
func can also be a tuple
"""
#import utool as ut
global __CLASSTYPE_ATTRIBUTES__
assert classkey is not None, 'must specify classkey'
#if not (skipmain and ut.get_caller_modname() == '__main__'):
__CLASSTYPE_ATTRIBUTES__[classkey].append(func)
return func | 0.006237 |
def WriteArtifact(self, artifact):
"""Writes new artifact to the database."""
name = str(artifact.name)
if name in self.artifacts:
raise db.DuplicatedArtifactError(name)
self.artifacts[name] = artifact.Copy() | 0.008621 |
def convert_tree(self, element1, element2=None):
'''convert_tree
High-level api: Convert cxml tree to an internal schema tree. This
method is recursive.
Parameters
----------
element1 : `Element`
The node to be converted.
element2 : `Element`
A new node being constructed.
Returns
-------
Element
This is element2 after convertion.
'''
if element2 is None:
attributes = deepcopy(element1.attrib)
tag = attributes['name']
del attributes['name']
element2 = etree.Element(tag, attributes)
for e1 in element1.findall('node'):
attributes = deepcopy(e1.attrib)
tag = self.prefix_to_url(attributes['name'])
del attributes['name']
e2 = etree.SubElement(element2, tag, attributes)
self.convert_tree(e1, e2)
return element2 | 0.002049 |
def validate(self, formats_dir="../formats/"):
"""checks if the document is valid"""
#TODO: download XSD from web
if self.inline:
xmlschema = ElementTree.XMLSchema(ElementTree.parse(StringIO("\n".join(open(formats_dir+"dcoi-dsc.xsd").readlines()))))
xmlschema.assertValid(self.tree)
#return xmlschema.validate(self)
else:
xmlschema = ElementTree.XMLSchema(ElementTree.parse(StringIO("\n".join(open(formats_dir+"dutchsemcor-standalone.xsd").readlines()))))
xmlschema.assertValid(self.tree) | 0.010345 |
def midpoint(self):
""" Return a point guranteed to fall within this range, hopefully near the middle.
"""
minpoint = self.leftedge
if self.leftop is gt:
minpoint += 1
maxpoint = self.rightedge
if self.rightop is lt:
maxpoint -= 1
if minpoint is None:
return maxpoint
elif maxpoint is None:
return minpoint
else:
return (minpoint + maxpoint) / 2 | 0.011742 |
def get_receive(self, script_list):
"""Return a list of received events contained in script_list."""
events = defaultdict(set)
for script in script_list:
if self.script_start_type(script) == self.HAT_WHEN_I_RECEIVE:
event = script.blocks[0].args[0].lower()
events[event].add(script)
return events | 0.005376 |
def copyh5(inh5, outh5):
"""Recursively copy all hdf5 data from one group to another
Data from links is copied.
Parameters
----------
inh5: str, h5py.File, or h5py.Group
The input hdf5 data. This can be either a file name or
an hdf5 object.
outh5: str, h5py.File, h5py.Group, or None
The output hdf5 data. This can be either a file name or
an hdf5 object. If set to `None`, a new hdf5 object is
created in memory.
Notes
-----
All data in outh5 are overridden by the inh5 data.
"""
if not isinstance(inh5, h5py.Group):
inh5 = h5py.File(inh5, mode="r")
if outh5 is None:
# create file in memory
h5kwargs = {"name": "qpimage{}.h5".format(QPImage._instances),
"driver": "core",
"backing_store": False,
"mode": "a"}
outh5 = h5py.File(**h5kwargs)
return_h5obj = True
QPImage._instances += 1
elif not isinstance(outh5, h5py.Group):
# create new file
outh5 = h5py.File(outh5, mode="w")
return_h5obj = False
else:
return_h5obj = True
# begin iteration
for key in inh5:
if key in outh5:
del outh5[key]
if isinstance(inh5[key], h5py.Group):
outh5.create_group(key)
copyh5(inh5[key], outh5[key])
else:
dset = write_image_dataset(group=outh5,
key=key,
data=inh5[key][:],
h5dtype=inh5[key].dtype)
dset.attrs.update(inh5[key].attrs)
outh5.attrs.update(inh5.attrs)
if return_h5obj:
# in-memory or previously created instance of h5py.File
return outh5
else:
# properly close the file and return its name
fn = outh5.filename
outh5.flush()
outh5.close()
return fn | 0.00051 |
def load_markov(argv, stdin):
"""Load and return markov algorithm."""
if len(argv) > 3:
with open(argv[3]) as input_file:
return Algorithm(input_file.readlines())
else:
return Algorithm(stdin.readlines()) | 0.004098 |
def get_data(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
sortby=None,
pagesize=10000,
max_workers=5,
):
"""Get GeoJSON featurecollection from DataBC WFS
"""
param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
results = executor.map(make_request, param_dicts)
outjson = dict(type="FeatureCollection", features=[])
for result in results:
outjson["features"] += result
return outjson | 0.001825 |
def get_pools(time_span=None, api_code=None):
"""Get number of blocks mined by each pool.
:param str time_span: duration of the chart.
Default is 4days (optional)
:param str api_code: Blockchain.info API code (optional)
:return: an instance of dict:{str,int}
"""
resource = 'pools'
if time_span is not None:
resource += '?timespan=' + time_span
if api_code is not None:
resource += '&api_code=' + api_code
response = util.call_api(resource, base_url='https://api.blockchain.info/')
json_response = json.loads(response)
return {k: v for (k, v) in json_response.items()} | 0.001577 |
def get_datastream_info(self, dsinfo):
'''Use regular expressions to pull datastream [version]
details (id, mimetype, size, and checksum) for binary content,
in order to sanity check the decoded data.
:param dsinfo: text content just before a binaryContent tag
:returns: dict with keys for id, mimetype, size, type and digest,
or None if no match is found
'''
# we only need to look at the end of this section of content
dsinfo = dsinfo[-750:]
# if not enough content is present, include the end of
# the last read chunk, if available
if len(dsinfo) < 750 and self.end_of_last_chunk is not None:
dsinfo = self.end_of_last_chunk + dsinfo
# force text needed for python 3 compatibility (in python 3
# dsinfo is bytes instead of a string)
try:
text = force_text(dsinfo)
except UnicodeDecodeError as err:
# it's possible to see a unicode character split across
# read blocks; if we get an "invalid start byte" unicode
# decode error, try converting the text without the first
# character; if that's the problem, it's not needed
# for datastream context
if 'invalid start byte' in force_text(err):
text = force_text(dsinfo[1:])
else:
raise err
# in case the text contains multiple datastream ids, find
# all matches and then use the last, since we want the last one
# in this section, just before the datastream content
matches = list(self.dsinfo_regex.finditer(text))
if matches:
infomatch = matches[-1]
return infomatch.groupdict() | 0.001132 |
def RDirs(self, pathlist):
"""Search for a list of directories in the Repository list."""
cwd = self.cwd or self.fs._cwd
return cwd.Rfindalldirs(pathlist) | 0.011236 |
def _process_json_resp_data(resp, no_custom_fields=False):
"""
process the response and return a list of BridgeUser
"""
bridge_users = []
while True:
resp_data = json.loads(resp)
link_url = None
if "meta" in resp_data and\
"next" in resp_data["meta"]:
link_url = resp_data["meta"]["next"]
bridge_users = _process_apage(resp_data, bridge_users,
no_custom_fields)
if link_url is None:
break
resp = get_resource(link_url)
return bridge_users | 0.001695 |
def fromSchemaItem(self, item):
"""set up global elements.
"""
if item.isElement() is False or item.isLocal() is True:
raise TypeError, 'expecting global element declaration: %s' %item.getItemTrace()
local = False
qName = item.getAttribute('type')
if not qName:
etp = item.content
local = True
else:
etp = item.getTypeDefinition('type')
if etp is None:
if local is True:
self.content = ElementLocalComplexTypeContainer(do_extended=self.do_extended)
else:
self.content = ElementSimpleTypeContainer()
elif etp.isLocal() is False:
self.content = ElementGlobalDefContainer()
elif etp.isSimple() is True:
self.content = ElementLocalSimpleTypeContainer()
elif etp.isComplex():
self.content = ElementLocalComplexTypeContainer(do_extended=self.do_extended)
else:
raise Wsdl2PythonError, "Unknown element declaration: %s" %item.getItemTrace()
self.logger.debug('ElementWriter setUp container "%r", Schema Item "%s"' %(
self.content, item.getItemTrace()))
self.content.setUp(item) | 0.013354 |
def transform_audio(self, y):
'''Compute the STFT with phase differentials.
Parameters
----------
y : np.ndarray
the audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, 1 + n_fft//2)
The STFT magnitude
data['dphase'] : np.ndarray, shape=(n_frames, 1 + n_fft//2)
The unwrapped phase differential
'''
data = super(STFTPhaseDiff, self).transform_audio(y)
data['dphase'] = self.phase_diff(data.pop('phase'))
return data | 0.0033 |
def mag_to_fnu(self, mag):
"""Convert a magnitude in this band to a f_ν flux density.
It is assumed that the magnitude has been computed in the appropriate
photometric system. The definition of "appropriate" will vary from
case to case.
"""
if self.native_flux_kind == 'flam':
return flam_ang_to_fnu_cgs(self.mag_to_flam(mag), self.pivot_wavelength())
raise PKError('dont\'t know how to get f_ν from mag for bandpass %s/%s',
self.telescope, self.band) | 0.007366 |
def escape(self, text, quote = True):
"""
Escape special characters in HTML
"""
if isinstance(text, bytes):
return escape_b(text, quote)
else:
return escape(text, quote) | 0.017167 |
def initialize(self):
'''List all datasets for a given ...'''
fmt = guess_format(self.source.url)
# if format can't be guessed from the url
# we fallback on the declared Content-Type
if not fmt:
response = requests.head(self.source.url)
mime_type = response.headers.get('Content-Type', '').split(';', 1)[0]
if not mime_type:
msg = 'Unable to detect format from extension or mime type'
raise ValueError(msg)
fmt = guess_format(mime_type)
if not fmt:
msg = 'Unsupported mime type "{0}"'.format(mime_type)
raise ValueError(msg)
graph = self.parse_graph(self.source.url, fmt)
self.job.data = {'graph': graph.serialize(format='json-ld', indent=None)} | 0.004848 |
def _revs_equal(rev1, rev2, rev_type):
'''
Shorthand helper function for comparing SHA1s. If rev_type == 'sha1' then
the comparison will be done using str.startwith() to allow short SHA1s to
compare successfully.
NOTE: This means that rev2 must be the short rev.
'''
if (rev1 is None and rev2 is not None) \
or (rev2 is None and rev1 is not None):
return False
elif rev1 is rev2 is None:
return True
elif rev_type == 'sha1':
return rev1.startswith(rev2)
else:
return rev1 == rev2 | 0.001773 |
def value(self, value):
""" Value to be written on register.
:param value: An integer.
:raises: IllegalDataValueError when value isn't in range.
"""
try:
struct.pack('>' + conf.TYPE_CHAR, value)
except struct.error:
raise IllegalDataValueError
self._value = value | 0.005797 |
def location(self, relative_alt=False):
'''return current location'''
self.wait_gps_fix()
# wait for another VFR_HUD, to ensure we have correct altitude
self.recv_match(type='VFR_HUD', blocking=True)
self.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
if relative_alt:
alt = self.messages['GLOBAL_POSITION_INT'].relative_alt*0.001
else:
alt = self.messages['VFR_HUD'].alt
return location(self.messages['GPS_RAW_INT'].lat*1.0e-7,
self.messages['GPS_RAW_INT'].lon*1.0e-7,
alt,
self.messages['VFR_HUD'].heading) | 0.002963 |
def enable_site(site_name):
"""Enable an available Nginx site."""
site_available = u'/etc/nginx/sites-available/%s' % site_name
site_enabled = u'/etc/nginx/sites-enabled/%s' % site_name
if files.exists(site_available):
sudo(u'ln -s -f %s %s' % (site_available, site_enabled))
restart_service(u'nginx')
else:
abort(u'%s site configuration is not available' % site_name) | 0.002421 |
def guest_pause(self, userid):
"""Pause a virtual machine.
:param str userid: the id of the virtual machine to be paused
:returns: None
"""
action = "pause guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.guest_pause(userid) | 0.006211 |
def p_list(self, tok):
"""list : IPV4
| IPV6
| DATETIME
| TIMEDELTA
| INTEGER
| FLOAT
| VARIABLE
| CONSTANT
| IPV4 COMMA list
| IPV6 COMMA list
| DATETIME COMMA list
| TIMEDELTA COMMA list
| INTEGER COMMA list
| FLOAT COMMA list
| VARIABLE COMMA list
| CONSTANT COMMA list"""
node = self._create_factor_rule(tok[1])
if len(tok) == 2:
tok[0] = ListRule([node])
else:
tok[0] = ListRule([node], tok[3]) | 0.002878 |
def visit_excepthandler(self, node, parent):
"""visit an ExceptHandler node by returning a fresh instance of it"""
newnode = nodes.ExceptHandler(node.lineno, node.col_offset, parent)
if node.name:
name = self.visit_assignname(node, newnode, node.name)
else:
name = None
newnode.postinit(
_visit_or_none(node, "type", self, newnode),
name,
[self.visit(child, newnode) for child in node.body],
)
return newnode | 0.003817 |
def color_str(self):
"Return an escape-coded string to write to the terminal."
s = self.s
for k, v in sorted(self.atts.items()):
# (self.atts sorted for the sake of always acting the same.)
if k not in xforms:
# Unsupported SGR code
continue
elif v is False:
continue
elif v is True:
s = xforms[k](s)
else:
s = xforms[k](s, v)
return s | 0.003945 |
def coverage(args):
"""
%prog coverage *.coverage
Plot coverage along chromosome. The coverage file can be generated with:
$ samtools depth a.bam > a.coverage
The plot is a simple line plot using matplotlib.
"""
from jcvi.graphics.base import savefig
p = OptionParser(coverage.__doc__)
opts, args, iopts = p.set_image_options(args, format="png")
if len(args) != 1:
sys.exit(not p.print_help())
covfile, = args
df = pd.read_csv(covfile, sep='\t', names=["Ref", "Position", "Depth"])
xlabel, ylabel = "Position", "Depth"
df.plot(xlabel, ylabel, color='g')
image_name = covfile + "." + iopts.format
savefig(image_name) | 0.001441 |
def getFileFormat(self, name, args):
""" Récupération du contenu d'un fichier via la configuration
et interprétation des variables données en argument """
# récupération du nom du fichier
template_pathname = self.get(name, "--")
if not os.path.isfile(template_pathname):
return False
# configuration
content = ""
with open(template_pathname) as fp:
# Create a text/plain message
content = fp.read().format(**args)
# retour ok
return content | 0.03178 |
def serialize(pca, **kwargs):
"""
Serialize an orientation object to a dict suitable
for JSON
"""
strike, dip, rake = pca.strike_dip_rake()
hyp_axes = sampling_axes(pca)
return dict(
**kwargs,
principal_axes = pca.axes.tolist(),
hyperbolic_axes = hyp_axes.tolist(),
n_samples = pca.n,
strike=strike, dip=dip, rake=rake,
angular_errors=[2*N.degrees(i)
for i in angular_errors(hyp_axes)]) | 0.016842 |
def externalCall(cls, method, url, codes=(200, 201, 204, 207), **kwargs):
"""
Make a public API call without a connected :class:`.Skype` instance.
The obvious implications are that no authenticated calls are possible, though this allows accessing some public
APIs such as join URL lookups.
Args:
method (str): HTTP request method
url (str): full URL to connect to
codes (int list): expected HTTP response codes for success
kwargs (dict): any extra parameters to pass to :func:`requests.request`
Returns:
requests.Response: response object provided by :mod:`requests`
Raises:
.SkypeAuthException: if an authentication rate limit is reached
.SkypeApiException: if a successful status code is not received
"""
if os.getenv("SKPY_DEBUG_HTTP"):
print("<= [{0}] {1} {2}".format(datetime.now().strftime("%d/%m %H:%M:%S"), method, url))
print(pformat(kwargs))
resp = cls.extSess.request(method, url, **kwargs)
if os.getenv("SKPY_DEBUG_HTTP"):
print("=> [{0}] {1}".format(datetime.now().strftime("%d/%m %H:%M:%S"), resp.status_code))
print(pformat(dict(resp.headers)))
try:
print(pformat(resp.json()))
except ValueError:
print(resp.text)
if resp.status_code not in codes:
raise SkypeApiException("{0} response from {1} {2}".format(resp.status_code, method, url), resp)
return resp | 0.004433 |
def beforeRender(self, ctx):
"""
Before rendering this page, identify the correct URL for the login to post
to, and the error message to display (if any), and fill the 'login
action' and 'error' slots in the template accordingly.
"""
generator = ixmantissa.ISiteURLGenerator(self.store)
url = generator.rootURL(IRequest(ctx))
url = url.child('__login__')
for seg in self.segments:
url = url.child(seg)
for queryKey, queryValues in self.arguments.iteritems():
for queryValue in queryValues:
url = url.add(queryKey, queryValue)
req = inevow.IRequest(ctx)
err = req.args.get('login-failure', ('',))[0]
if 0 < len(err):
error = inevow.IQ(
self.fragment).onePattern(
'error').fillSlots('error', err)
else:
error = ''
ctx.fillSlots("login-action", url)
ctx.fillSlots("error", error) | 0.002921 |
def process_calibration(self, save=True):
"""processes calibration control signal. Determines transfer function
of speaker to get frequency vs. attenuation curve.
:param save: Whether to save this calibration data to file
:type save: bool
:returns: numpy.ndarray, str, int, float -- frequency response (in dB), dataset name, calibration reference frequency, reference intensity
"""
if not self.save_data:
raise Exception("Cannot process an unsaved calibration")
avg_signal = np.mean(self.datafile.get_data(self.current_dataset_name + '/signal'), axis=0)
diffdB = attenuation_curve(self.stimulus.signal()[0], avg_signal,
self.stimulus.samplerate(), self.calf)
logger = logging.getLogger('main')
logger.debug('The maximum dB attenuation is {}, caldB {}'.format(max(diffdB), self.caldb))
# save a vector of only the calibration intensity results
self.datafile.init_data(self.current_dataset_name, mode='calibration',
dims=diffdB.shape,
nested_name='calibration_intensities')
self.datafile.append(self.current_dataset_name, diffdB,
nested_name='calibration_intensities')
relevant_info = {'frequencies': 'all', 'calibration_dB':self.caldb,
'calibration_voltage': self.calv, 'calibration_frequency': self.calf,
}
self.datafile.set_metadata('/'.join([self.current_dataset_name, 'calibration_intensities']),
relevant_info)
mean_reftone = np.mean(self.datafile.get_data(self.current_dataset_name + '/reference_tone'), axis=0)
tone_amp = signal_amplitude(mean_reftone, self.player.get_aifs())
db = calc_db(tone_amp, self.mphonesens, self.mphonedb)
# remove the reference tone from protocol
self.protocol_model.remove(0)
return diffdB, self.current_dataset_name, self.calf, db | 0.00572 |
def stop_all_tensorboards():
"""Terminate all TensorBoard instances."""
for process in Process.instances:
print("Process '%s', running %d" % (process.command[0],
process.is_running()))
if process.is_running() and process.command[0] == "tensorboard":
process.terminate() | 0.002865 |
def _get_subelements(self, node):
"""Gather the sub-elements attached to a node
Gather rdf:Bag and and rdf:Seq into set and list respectively. For
alternate languages values, take the first language only for
simplicity.
"""
items = node.find('rdf:Alt', self.NS)
if items is not None:
try:
return items[0].text
except IndexError:
return ''
for xmlcontainer, container, insertfn in XMP_CONTAINERS:
items = node.find('rdf:{}'.format(xmlcontainer), self.NS)
if items is None:
continue
result = container()
for item in items:
insertfn(result, item.text)
return result
return '' | 0.002519 |
def forwards(self, orm):
"Write your forwards methods here."
for category in orm['document_library.DocumentCategory'].objects.all():
category.is_published = True
category.save() | 0.009217 |
def _validate_address(self, address):
"""Confirm that supplied address is a valid URL and
has an `amqp` or `amqps` scheme.
:param address: The endpiont URL.
:type address: str
:rtype: ~urllib.parse.ParseResult
"""
parsed = compat.urlparse(address)
if not parsed.path:
raise ValueError("Invalid {} address: {}".format(
self.__class__.__name__, parsed))
return parsed | 0.004292 |
def convert_ipynbs(directory):
"""Recursively converts all ipynb files in a directory into rst files in
the same directory."""
# The ipython_examples dir has to be in the same dir as this script
for root, subfolders, files in os.walk(os.path.abspath(directory)):
for f in files:
if ".ipynb_checkpoints" not in root:
if f.endswith("ipynb"):
ipynb_to_rst(root, f) | 0.002309 |
def setmode(mode):
"""
You must call this method prior to using all other calls.
:param mode: the mode, one of :py:attr:`GPIO.BOARD`, :py:attr:`GPIO.BCM`,
:py:attr:`GPIO.SUNXI`, or a `dict` or `object` representing a custom
pin mapping.
"""
if hasattr(mode, '__getitem__'):
set_custom_pin_mappings(mode)
mode = CUSTOM
assert mode in [BCM, BOARD, SUNXI, CUSTOM]
global _mode
_mode = mode | 0.002212 |
def apply_weight_drop(block, local_param_regex, rate, axes=(),
weight_dropout_mode='training'):
"""Apply weight drop to the parameter of a block.
Parameters
----------
block : Block or HybridBlock
The block whose parameter is to be applied weight-drop.
local_param_regex : str
The regex for parameter names used in the self.params.get(), such as 'weight'.
rate : float
Fraction of the input units to drop. Must be a number between 0 and 1.
axes : tuple of int, default ()
The axes on which dropout mask is shared. If empty, regular dropout is applied.
weight_drop_mode : {'training', 'always'}, default 'training'
Whether the weight dropout should be applied only at training time, or always be applied.
Examples
--------
>>> net = gluon.rnn.LSTM(10, num_layers=2, bidirectional=True)
>>> gluonnlp.model.apply_weight_drop(net, r'.*h2h_weight', 0.5)
>>> net.collect_params()
lstm0_ (
Parameter lstm0_l0_i2h_weight (shape=(40, 0), dtype=<class 'numpy.float32'>)
WeightDropParameter lstm0_l0_h2h_weight (shape=(40, 10), dtype=<class 'numpy.float32'>, \
rate=0.5, mode=training)
Parameter lstm0_l0_i2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_l0_h2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_r0_i2h_weight (shape=(40, 0), dtype=<class 'numpy.float32'>)
WeightDropParameter lstm0_r0_h2h_weight (shape=(40, 10), dtype=<class 'numpy.float32'>, \
rate=0.5, mode=training)
Parameter lstm0_r0_i2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_r0_h2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_l1_i2h_weight (shape=(40, 20), dtype=<class 'numpy.float32'>)
WeightDropParameter lstm0_l1_h2h_weight (shape=(40, 10), dtype=<class 'numpy.float32'>, \
rate=0.5, mode=training)
Parameter lstm0_l1_i2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_l1_h2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_r1_i2h_weight (shape=(40, 20), dtype=<class 'numpy.float32'>)
WeightDropParameter lstm0_r1_h2h_weight (shape=(40, 10), dtype=<class 'numpy.float32'>, \
rate=0.5, mode=training)
Parameter lstm0_r1_i2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_r1_h2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
)
>>> ones = mx.nd.ones((3, 4, 5))
>>> net.initialize()
>>> with mx.autograd.train_mode():
... net(ones).max().asscalar() != net(ones).max().asscalar()
True
"""
if not rate:
return
existing_params = _find_params(block, local_param_regex)
for (local_param_name, param), \
(ref_params_list, ref_reg_params_list) in existing_params.items():
dropped_param = WeightDropParameter(param, rate, weight_dropout_mode, axes)
for ref_params in ref_params_list:
ref_params[param.name] = dropped_param
for ref_reg_params in ref_reg_params_list:
ref_reg_params[local_param_name] = dropped_param
if hasattr(block, local_param_name):
local_attr = getattr(block, local_param_name)
if local_attr == param:
local_attr = dropped_param
elif isinstance(local_attr, (list, tuple)):
if isinstance(local_attr, tuple):
local_attr = list(local_attr)
for i, v in enumerate(local_attr):
if v == param:
local_attr[i] = dropped_param
elif isinstance(local_attr, dict):
for k, v in local_attr:
if v == param:
local_attr[k] = dropped_param
else:
continue
if local_attr:
super(Block, block).__setattr__(local_param_name, local_attr) | 0.003483 |
def annot(self, node):
"""Add `node` as an annotation of the receiver."""
self.annots.append(node)
node.parent = self | 0.014184 |
def diff_for_humans(self, other=None, absolute=False, locale=None):
"""
Get the difference in a human readable format in the current locale.
When comparing a value in the past to default now:
1 day ago
5 months ago
When comparing a value in the future to default now:
1 day from now
5 months from now
When comparing a value in the past to another value:
1 day before
5 months before
When comparing a value in the future to another value:
1 day after
5 months after
:type other: Date
:param absolute: removes time difference modifiers ago, after, etc
:type absolute: bool
:param locale: The locale to use for localization
:type locale: str
:rtype: str
"""
is_now = other is None
if is_now:
other = self.today()
diff = self.diff(other)
return pendulum.format_diff(diff, is_now, absolute, locale) | 0.001969 |
def maxcut_qaoa(n_step, edges, minimizer=None, sampler=None, verbose=True):
"""Setup QAOA.
:param n_step: The number of step of QAOA
:param n_sample: The number of sampling time of each measurement in VQE.
If None, use calculated ideal value.
:param edges: The edges list of the graph.
:returns Vqe object
"""
sampler = sampler or vqe.non_sampling_sampler
minimizer = minimizer or vqe.get_scipy_minimizer(
method="Powell",
options={"ftol": 5.0e-2, "xtol": 5.0e-2, "maxiter": 1000, "disp": True}
)
hamiltonian = pauli.I() * 0
for i, j in edges:
hamiltonian += pauli.Z(i) * pauli.Z(j)
return vqe.Vqe(vqe.QaoaAnsatz(hamiltonian, n_step), minimizer, sampler) | 0.001333 |
def receive(self, request, wait=True, timeout=None):
"""
Polls the message buffer of the TCP connection and waits until a valid
message is received based on the message_id passed in.
:param request: The Request object to wait get the response for
:param wait: Wait for the final response in the case of a
STATUS_PENDING response, the pending response is returned in the
case of wait=False
:param timeout: Set a timeout used while waiting for a response from
the server
:return: SMB2HeaderResponse of the received message
"""
start_time = time.time()
# check if we have received a response
while True:
self._flush_message_buffer()
status = request.response['status'].get_value() if \
request.response else None
if status is not None and (wait and
status != NtStatus.STATUS_PENDING):
break
current_time = time.time() - start_time
if timeout and (current_time > timeout):
error_msg = "Connection timeout of %d seconds exceeded while" \
" waiting for a response from the server" \
% timeout
raise smbprotocol.exceptions.SMBException(error_msg)
response = request.response
status = response['status'].get_value()
if status not in [NtStatus.STATUS_SUCCESS, NtStatus.STATUS_PENDING]:
raise smbprotocol.exceptions.SMBResponseException(response, status)
# now we have a retrieval request for the response, we can delete
# the request from the outstanding requests
message_id = request.message['message_id'].get_value()
del self.outstanding_requests[message_id]
return response | 0.001058 |
def close(self):
"""
Log disconnects.
"""
time_spent = time.time() - self.time_start
logger.debug(
'<{}> Disconnect from [{}]:{}, time spent {:.3f} seconds'.format(
self.id, self.client_ip, self.client_port, time_spent))
return Milter.CONTINUE | 0.00625 |
def format_output(data, headers, format_name, **kwargs):
"""Format output using *format_name*.
This is a wrapper around the :class:`TabularOutputFormatter` class.
:param iterable data: An :term:`iterable` (e.g. list) of rows.
:param iterable headers: The column headers.
:param str format_name: The display format to use.
:param \*\*kwargs: Optional arguments for the formatter.
:return: The formatted data.
:rtype: str
"""
formatter = TabularOutputFormatter(format_name=format_name)
return formatter.format_output(data, headers, **kwargs) | 0.005119 |
def sci(x, digs):
"""Format x as [-]d.dddE[+-]ddd with 'digs' digits after the point
and exactly one digit before.
If digs is <= 0, one digit is kept and the point is suppressed."""
if type(x) != type(''): x = repr(x)
sign, intpart, fraction, expo = extract(x)
if not intpart:
while fraction and fraction[0] == '0':
fraction = fraction[1:]
expo = expo - 1
if fraction:
intpart, fraction = fraction[0], fraction[1:]
expo = expo - 1
else:
intpart = '0'
else:
expo = expo + len(intpart) - 1
intpart, fraction = intpart[0], intpart[1:] + fraction
digs = max(0, digs)
intpart, fraction = roundfrac(intpart, fraction, digs)
if len(intpart) > 1:
intpart, fraction, expo = \
intpart[0], intpart[1:] + fraction[:-1], \
expo + len(intpart) - 1
s = sign + intpart
if digs > 0: s = s + '.' + fraction
e = repr(abs(expo))
e = '0'*(3-len(e)) + e
if expo < 0: e = '-' + e
else: e = '+' + e
return s + 'e' + e | 0.005489 |
def transform(self, fn, column=None, dtype=None):
"""Execute a transformation on a column or columns. Returns the modified
DictRDD.
Parameters
----------
f : function
The function to execute on the columns.
column : {str, list or None}
The column(s) to transform. If None is specified the method is
equivalent to map.
column : {str, list or None}
The dtype of the column(s) to transform.
Returns
-------
result : DictRDD
DictRDD with transformed column(s).
TODO: optimize
"""
dtypes = self.dtype
if column is None:
indices = list(range(len(self.columns)))
else:
if not type(column) in (list, tuple):
column = [column]
indices = [self.columns.index(c) for c in column]
if dtype is not None:
if not type(dtype) in (list, tuple):
dtype = [dtype]
dtypes = [dtype[indices.index(i)] if i in indices else t
for i, t in enumerate(self.dtype)]
def mapper(values):
result = fn(*[values[i] for i in indices])
if len(indices) == 1:
result = (result,)
elif not isinstance(result, (tuple, list)):
raise ValueError("Transformer function must return an"
" iterable!")
elif len(result) != len(indices):
raise ValueError("Transformer result's length must be"
" equal to the given columns length!")
return tuple(result[indices.index(i)] if i in indices else v
for i, v in enumerate(values))
return DictRDD(self._rdd.map(mapper),
columns=self.columns, dtype=dtypes,
bsize=self.bsize, noblock=True) | 0.00154 |
def port_knock_tcp(self, host="localhost", port=22, timeout=15):
"""Open a TCP socket to check for a listening sevice on a host.
:param host: host name or IP address, default to localhost
:param port: TCP port number, default to 22
:param timeout: Connect timeout, default to 15 seconds
:returns: True if successful, False if connect failed
"""
# Resolve host name if possible
try:
connect_host = socket.gethostbyname(host)
host_human = "{} ({})".format(connect_host, host)
except socket.error as e:
self.log.warn('Unable to resolve address: '
'{} ({}) Trying anyway!'.format(host, e))
connect_host = host
host_human = connect_host
# Attempt socket connection
try:
knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
knock.settimeout(timeout)
knock.connect((connect_host, port))
knock.close()
self.log.debug('Socket connect OK for host '
'{} on port {}.'.format(host_human, port))
return True
except socket.error as e:
self.log.debug('Socket connect FAIL for'
' {} port {} ({})'.format(host_human, port, e))
return False | 0.00147 |
def csvSplit(line, delim=',', allowEol=True):
""" Take a string as input (e.g. a line in a csv text file), and break
it into tokens separated by commas while ignoring commas embedded inside
quoted sections. This is exactly what the 'csv' module is meant for, so
we *should* be using it, save that it has two bugs (described next) which
limit our use of it. When these bugs are fixed, this function should be
forsaken in favor of direct use of the csv module (or similar).
The basic use case is to split a function signature string, so for:
afunc(arg1='str1', arg2='str, with, embedded, commas', arg3=7)
we want a 3 element sequence:
["arg1='str1'", "arg2='str, with, embedded, commas'", "arg3=7"]
but:
>>> import csv
>>> y = "arg1='str1', arg2='str, with, embedded, commas', arg3=7"
>>> rdr = csv.reader( (y,), dialect='excel', quotechar="'", skipinitialspace=True)
>>> l = rdr.next(); print(len(l), str(l)) # doctest: +SKIP
6 ["arg1='str1'", "arg2='str", 'with', 'embedded', "commas'", "arg3=7"]
which we can see is not correct - we wanted 3 tokens. This occurs in
Python 2.5.2 and 2.6. It seems to be due to the text at the start of each
token ("arg1=") i.e. because the quote isn't for the whole token. If we
were to remove the names of the args and the equal signs, it works:
>>> x = "'str1', 'str, with, embedded, commas', 7"
>>> rdr = csv.reader( (x,), dialect='excel', quotechar="'", skipinitialspace=True)
>>> l = rdr.next(); print(len(l), str(l)) # doctest: +SKIP
3 ['str1', 'str, with, embedded, commas', '7']
But even this usage is delicate - when we turn off skipinitialspace, it
fails:
>>> x = "'str1', 'str, with, embedded, commas', 7"
>>> rdr = csv.reader( (x,), dialect='excel', quotechar="'")
>>> l = rdr.next(); print(len(l), str(l)) # doctest: +SKIP
6 ['str1', " 'str", ' with', ' embedded', " commas'", ' 7']
So, for now, we'll roll our own.
"""
# Algorithm: read chars left to right, go from delimiter to delimiter,
# but as soon as a single/double/triple quote is hit, scan forward
# (ignoring all else) until its matching end-quote is found.
# For now, we will not specially handle escaped quotes.
tokens = []
ldl = len(delim)
keepOnRollin = line is not None and len(line) > 0
while keepOnRollin:
tok = _getCharsUntil(line, delim, True, allowEol=allowEol)
# len of token should always be > 0 because it includes end delimiter
# except on last token
if len(tok) > 0:
# append it, but without the delimiter
if tok[-ldl:] == delim:
tokens.append(tok[:-ldl])
else:
tokens.append(tok) # tok goes to EOL - has no delimiter
keepOnRollin = False
line = line[len(tok):]
else:
# This is the case of the empty end token
tokens.append('')
keepOnRollin = False
return tokens | 0.001314 |
async def create(self, token):
"""Creates a new token with a given policy
Parameters:
token (Object): Token specification
Returns:
Object: token ID
The create endpoint is used to make a new token.
A token has a name, a type, and a set of ACL rules.
The request body may take the form::
{
"Name": "my-app-token",
"Type": "client",
"Rules": ""
}
None of the fields are mandatory. The **Name** and **Rules** fields
default to being blank, and the **Type** defaults to "client".
**Name** is opaque to Consul. To aid human operators, it should
be a meaningful indicator of the ACL's purpose.
**Type** is either **client** or **management**. A management token
is comparable to a root user and has the ability to perform any action
including creating, modifying and deleting ACLs.
**ID** field may be provided, and if omitted a random UUID will be
generated.
The format of **Rules** is
`documented here <https://www.consul.io/docs/internals/acl.html>`_.
A successful response body will return the **ID** of the newly
created ACL, like so::
{
"ID": "adf4238a-882b-9ddc-4a9d-5b6758e4159e"
}
"""
token = encode_token(token)
response = await self._api.put("/v1/acl/create", data=token)
return response.body | 0.00131 |
def configure(cls, impl, **kwargs):
# type: (Any, **Any) -> None
"""Sets the class to use when the base class is instantiated.
Keyword arguments will be saved and added to the arguments passed
to the constructor. This can be used to set global defaults for
some parameters.
"""
base = cls.configurable_base()
if isinstance(impl, (str, unicode_type)):
impl = import_object(impl)
if impl is not None and not issubclass(impl, cls):
raise ValueError("Invalid subclass of %s" % cls)
base.__impl_class = impl
base.__impl_kwargs = kwargs | 0.004658 |
def del_feature(self, pr_name):
""" Permanently deletes a node's feature."""
if hasattr(self, pr_name):
delattr(self, pr_name)
self.features.remove(pr_name) | 0.010204 |
def predictions_and_gradient(
self, image=None, label=None, strict=True, return_details=False):
"""Interface to model.predictions_and_gradient for attacks.
Parameters
----------
image : `numpy.ndarray`
Single input with shape as expected by the model
(without the batch dimension).
Defaults to the original image.
label : int
Label used to calculate the loss that is differentiated.
Defaults to the original label.
strict : bool
Controls if the bounds for the pixel values should be checked.
"""
assert self.has_gradient()
if image is None:
image = self.__original_image
if label is None:
label = self.__original_class
in_bounds = self.in_bounds(image)
assert not strict or in_bounds
self._total_prediction_calls += 1
self._total_gradient_calls += 1
predictions, gradient = self.__model.predictions_and_gradient(image, label) # noqa: E501
is_adversarial, is_best, distance = self.__is_adversarial(
image, predictions, in_bounds)
assert predictions.ndim == 1
assert gradient.shape == image.shape
if return_details:
return predictions, gradient, is_adversarial, is_best, distance
else:
return predictions, gradient, is_adversarial | 0.00139 |
def check_type_of_param_list_elements(param_list):
"""
Ensures that all elements of param_list are ndarrays or None. Raises a
helpful ValueError if otherwise.
"""
try:
assert isinstance(param_list[0], np.ndarray)
assert all([(x is None or isinstance(x, np.ndarray))
for x in param_list])
except AssertionError:
msg = "param_list[0] must be a numpy array."
msg_2 = "All other elements must be numpy arrays or None."
total_msg = msg + "\n" + msg_2
raise TypeError(total_msg)
return None | 0.001721 |
def get_element(self, line, column):
"""Gets the instance of the element who owns the specified line
and column."""
ichar = self.charindex(line, column)
icontains = self.contains_index
result = None
if line < icontains:
#We only need to search through the types and members.
maxstart = 0
tempresult = None
for t in self.types:
if ichar >= self.types[t].absstart:
if self.types[t].absstart > maxstart:
maxstart = self.types[t].absstart
tempresult = self.types[t]
#This takes the possibility of an incomplete type into account
if (tempresult is not None and (ichar <= tempresult.end or
tempresult.incomplete)):
result = tempresult
if not result:
#Members only span a single line usually and don't get added
#without an end token.
for m in self.members:
if (ichar >= self.members[m].start and
ichar <= self.members[m].end):
result = self.members[m]
break
else:
#We only need to search through the executables
tempresult = None
maxstart = 0
for iexec in self.executables:
if (ichar >= self.executables[iexec].absstart):
if self.executables[iexec].absstart > maxstart:
maxstart = self.executables[iexec].absstart
tempresult = self.executables[iexec]
if tempresult is not None and (ichar <= tempresult.end or
tempresult.incomplete):
result = tempresult
if result is None:
#We must be in the text of the module, return the module
return self
else:
return result | 0.005392 |
def _sample_action_fluent(self,
name: str,
dtype: tf.DType,
size: Sequence[int],
constraints: Dict[str, Constraints],
default_value: tf.Tensor,
prob: float) -> tf.Tensor:
'''Samples the action fluent with given `name`, `dtype`, and `size`.
With probability `prob` it chooses the action fluent `default_value`,
with probability 1-`prob` it samples the fluent w.r.t. its `constraints`.
Args:
name (str): The name of the action fluent.
dtype (tf.DType): The data type of the action fluent.
size (Sequence[int]): The size and shape of the action fluent.
constraints (Dict[str, Tuple[Optional[TensorFluent], Optional[TensorFluent]]]): The bounds for each action fluent.
default_value (tf.Tensor): The default value for the action fluent.
prob (float): A probability measure.
Returns:
tf.Tensor: A tensor for sampling the action fluent.
'''
shape = [self.batch_size] + list(size)
if dtype == tf.float32:
bounds = constraints.get(name)
if bounds is None:
low, high = -self.MAX_REAL_VALUE, self.MAX_REAL_VALUE
dist = tf.distributions.Uniform(low=low, high=high)
sampled_fluent = dist.sample(shape)
else:
low, high = bounds
batch = (low is not None and low.batch) or (high is not None and high.batch)
low = tf.cast(low.tensor, tf.float32) if low is not None else -self.MAX_REAL_VALUE
high = tf.cast(high.tensor, tf.float32) if high is not None else self.MAX_REAL_VALUE
dist = tf.distributions.Uniform(low=low, high=high)
if batch:
sampled_fluent = dist.sample()
elif isinstance(low, tf.Tensor) or isinstance(high, tf.Tensor):
if (low+high).shape.as_list() == list(size):
sampled_fluent = dist.sample([self.batch_size])
else:
raise ValueError('bounds are not compatible with action fluent.')
else:
sampled_fluent = dist.sample(shape)
elif dtype == tf.int32:
logits = [1.0] * self.MAX_INT_VALUE
dist = tf.distributions.Categorical(logits=logits, dtype=tf.int32)
sampled_fluent = dist.sample(shape)
elif dtype == tf.bool:
probs = 0.5
dist = tf.distributions.Bernoulli(probs=probs, dtype=tf.bool)
sampled_fluent = dist.sample(shape)
select_default = tf.distributions.Bernoulli(prob, dtype=tf.bool).sample(self.batch_size)
action_fluent = tf.where(select_default, default_value, sampled_fluent)
return action_fluent | 0.00521 |
def gene_list(list_id=None):
"""Display or add a gene list."""
all_case_ids = [case.case_id for case in app.db.cases()]
if list_id:
genelist_obj = app.db.gene_list(list_id)
case_ids = [case.case_id for case in app.db.cases()
if case not in genelist_obj.cases]
if genelist_obj is None:
return abort(404, "gene list not found: {}".format(list_id))
if 'download' in request.args:
response = make_response('\n'.join(genelist_obj.gene_ids))
filename = secure_filename("{}.txt".format(genelist_obj.list_id))
header = "attachment; filename={}".format(filename)
response.headers['Content-Disposition'] = header
return response
if request.method == 'POST':
if list_id:
# link a case to the gene list
case_ids = request.form.getlist('case_id')
for case_id in case_ids:
case_obj = app.db.case(case_id)
if case_obj not in genelist_obj.cases:
genelist_obj.cases.append(case_obj)
app.db.save()
else:
# upload a new gene list
req_file = request.files['file']
new_listid = (request.form['list_id'] or
secure_filename(req_file.filename))
if app.db.gene_list(new_listid):
return abort(500, 'Please provide a unique list name')
if not req_file:
return abort(500, 'Please provide a file for upload')
gene_ids = [line for line in req_file.stream
if not line.startswith('#')]
genelist_obj = app.db.add_genelist(new_listid, gene_ids)
case_ids = all_case_ids
return render_template('gene_list.html', gene_list=genelist_obj,
case_ids=case_ids) | 0.000534 |
def prepare(self):
'''
Run the preparation sequence required to start a salt syndic minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).prepare()
'''
super(Syndic, self).prepare()
try:
if self.config['verify_env']:
verify_env(
[
self.config['pki_dir'],
self.config['cachedir'],
self.config['sock_dir'],
self.config['extension_modules'],
],
self.config['user'],
permissive=self.config['permissive_pki_access'],
root_dir=self.config['root_dir'],
pki_dir=self.config['pki_dir'],
)
except OSError as error:
self.environment_failure(error)
self.setup_logfile_logger()
verify_log(self.config)
self.action_log_info('Setting up "{0}"'.format(self.config['id']))
# Late import so logging works correctly
import salt.minion
self.daemonize_if_required()
self.syndic = salt.minion.SyndicManager(self.config)
self.set_pidfile() | 0.0016 |
def Decompress(self, compressed_data):
"""Decompresses the compressed data.
Args:
compressed_data (bytes): compressed data.
Returns:
tuple(bytes, bytes): uncompressed data and remaining compressed data.
Raises:
BackEndError: if the BZIP2 compressed stream cannot be decompressed.
"""
try:
uncompressed_data = self._bz2_decompressor.decompress(compressed_data)
remaining_compressed_data = getattr(
self._bz2_decompressor, 'unused_data', b'')
except (EOFError, IOError) as exception:
raise errors.BackEndError((
'Unable to decompress BZIP2 compressed stream with error: '
'{0!s}.').format(exception))
return uncompressed_data, remaining_compressed_data | 0.005312 |
def data_parallel(batch_group: List[TensorDict],
model: Model,
cuda_devices: List) -> Dict[str, torch.Tensor]:
"""
Performs a forward pass using multiple GPUs. This is a simplification
of torch.nn.parallel.data_parallel to support the allennlp model
interface.
"""
assert len(batch_group) <= len(cuda_devices)
moved = [nn_util.move_to_device(batch, device)
for batch, device in zip(batch_group, cuda_devices)]
used_device_ids = cuda_devices[:len(moved)]
# Counterintuitively, it appears replicate expects the source device id to be the first element
# in the device id list. See torch.cuda.comm.broadcast_coalesced, which is called indirectly.
replicas = replicate(model, used_device_ids)
# We pass all our arguments as kwargs. Create a list of empty tuples of the
# correct shape to serve as (non-existent) positional arguments.
inputs = [()] * len(batch_group)
outputs = parallel_apply(replicas, inputs, moved, used_device_ids)
# Only the 'loss' is needed.
# a (num_gpu, ) tensor with loss on each GPU
losses = gather([output['loss'].unsqueeze(0) for output in outputs], used_device_ids[0], 0)
return {'loss': losses.mean()} | 0.003185 |
def xviewSlot(*typs, **opts):
"""
Defines a method as being a slot for the XView system. This will validate
the method against the signal properties if it is triggered from the
dispatcher, taking into account currency and grouping for the widget.
You can specify the optional policy keyword to define the specific signal
policy for this slot, otherwise it will use its parent view's policy.
:param default | <variant> | default return value
policy | <XView.SignalPolicy> || None
:usage |from projexui.widgets.xviewwidget import xviewSlot
|
|class A(XView):
| @xviewSlot()
| def format( self ):
| print 'test'
"""
default = opts.get('default')
policy = opts.get('policy')
if typs:
typ_count = len(typs)
else:
typ_count = 0
def decorated(func):
@wraps(func)
def wrapped(*args, **kwds):
if ( args and isinstance(args[0], XView) ):
validated = args[0].validateSignal(policy)
else:
validated = True
if ( validated ):
new_args = args[:typ_count+1]
return func(*new_args, **kwds)
return default
return wrapped
return decorated | 0.00859 |
def _add_spanning_relation(self, source, target):
"""add a spanning relation to this docgraph"""
self.add_edge(source, target, layers={self.ns, self.ns+':unit'},
edge_type=EdgeTypes.spanning_relation) | 0.008403 |
def get_spot_price_history(self, start_time=None, end_time=None,
instance_type=None, product_description=None,
availability_zone=None):
"""
Retrieve the recent history of spot instances pricing.
:type start_time: str
:param start_time: An indication of how far back to provide price
changes for. An ISO8601 DateTime string.
:type end_time: str
:param end_time: An indication of how far forward to provide price
changes for. An ISO8601 DateTime string.
:type instance_type: str
:param instance_type: Filter responses to a particular instance type.
:type product_description: str
:param product_description: Filter responses to a particular platform.
Valid values are currently: "Linux/UNIX",
"SUSE Linux", and "Windows"
:type availability_zone: str
:param availability_zone: The availability zone for which prices
should be returned
:rtype: list
:return: A list tuples containing price and timestamp.
"""
params = {}
if start_time:
params['StartTime'] = start_time
if end_time:
params['EndTime'] = end_time
if instance_type:
params['InstanceType'] = instance_type
if product_description:
params['ProductDescription'] = product_description
if availability_zone:
params['AvailabilityZone'] = availability_zone
return self.get_list('DescribeSpotPriceHistory', params,
[('item', SpotPriceHistory)], verb='POST') | 0.002225 |
def set_head(self, node):
"""Link this node to the current leaves."""
for head in self.head:
head.next.add(node)
self.head[:] = []
self.head.append(node) | 0.011429 |
def calc_qib2_v1(self):
"""Calculate the first inflow component released from the soil.
Required control parameters:
|NHRU|
|Lnk|
|NFk|
|DMin|
|DMax|
Required derived parameter:
|WZ|
Required state sequence:
|BoWa|
Calculated flux sequence:
|QIB2|
Basic equation:
:math:`QIB2 = (DMax-DMin) \\cdot
(\\frac{BoWa-WZ}{NFk-WZ})^\\frac{3}{2}`
Examples:
For water and sealed areas, no interflow is calculated (the first
three HRUs are of type |FLUSS|, |SEE|, and |VERS|, respectively).
No principal distinction is made between the remaining land use
classes (arable land |ACKER| has been selected for the last
five HRUs arbitrarily):
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> simulationstep('12h')
>>> nhru(8)
>>> lnk(FLUSS, SEE, VERS, ACKER, ACKER, ACKER, ACKER, ACKER)
>>> dmax(10.0)
>>> dmin(4.0)
>>> nfk(100.0, 100.0, 100.0, 50.0, 100.0, 100.0, 100.0, 200.0)
>>> derived.wz(50.0)
>>> states.bowa = 100.0, 100.0, 100.0, 50.1, 50.0, 75.0, 100.0, 100.0
Note the time dependence of parameters |DMin| (see the example above)
and |DMax|:
>>> dmax
dmax(10.0)
>>> dmax.values
array([ 5., 5., 5., 5., 5., 5., 5., 5.])
The following results show that he calculation of |QIB2| both
resembles those of |QBB| and |QIB1| in some regards:
>>> model.calc_qib2_v1()
>>> fluxes.qib2
qib2(0.0, 0.0, 0.0, 0.0, 0.0, 1.06066, 3.0, 0.57735)
In the given example, the maximum rate of total interflow
generation is 5 mm/12h (parameter |DMax|). For the seventh zone,
which contains a saturated soil, the value calculated for the
second interflow component (|QIB2|) is 3 mm/h. The "missing"
value of 2 mm/12h is be calculated by method |calc_qib1_v1|.
(The fourth zone, which is slightly oversaturated, is only intended
to demonstrate that zero division due to |NFk| = |WZ| is circumvented.)
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
sta = self.sequences.states.fastaccess
for k in range(con.nhru):
if ((con.lnk[k] in (VERS, WASSER, FLUSS, SEE)) or
(sta.bowa[k] <= der.wz[k]) or (con.nfk[k] <= der.wz[k])):
flu.qib2[k] = 0.
else:
flu.qib2[k] = ((con.dmax[k]-con.dmin[k]) *
((sta.bowa[k]-der.wz[k]) /
(con.nfk[k]-der.wz[k]))**1.5) | 0.000368 |
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None | 0.002551 |