text
stringlengths 75
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
0.18
|
---|---|---|---|
def drop_index(self, raw):
""" Executes a drop index command.
{ "op" : "c",
"ns" : "testdb.$cmd",
"o" : { "dropIndexes" : "testcoll",
"index" : "nuie_1" } }
"""
dbname = raw['ns'].split('.', 1)[0]
collname = raw['o']['dropIndexes']
self.dest[dbname][collname].drop_index(raw['o']['index']) | [
"def",
"drop_index",
"(",
"self",
",",
"raw",
")",
":",
"dbname",
"=",
"raw",
"[",
"'ns'",
"]",
".",
"split",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
"collname",
"=",
"raw",
"[",
"'o'",
"]",
"[",
"'dropIndexes'",
"]",
"self",
".",
"dest",
"[",
"dbname",
"]",
"[",
"collname",
"]",
".",
"drop_index",
"(",
"raw",
"[",
"'o'",
"]",
"[",
"'index'",
"]",
")"
] | 34.181818 | 0.010363 |
def formatted_completion_sig(completion):
"""Regenerate signature for methods. Return just the name otherwise"""
f_result = completion["name"]
if is_basic_type(completion):
# It's a raw type
return f_result
elif len(completion["typeInfo"]["paramSections"]) == 0:
return f_result
# It's a function type
sections = completion["typeInfo"]["paramSections"]
f_sections = [formatted_param_section(ps) for ps in sections]
return u"{}{}".format(f_result, "".join(f_sections)) | [
"def",
"formatted_completion_sig",
"(",
"completion",
")",
":",
"f_result",
"=",
"completion",
"[",
"\"name\"",
"]",
"if",
"is_basic_type",
"(",
"completion",
")",
":",
"# It's a raw type",
"return",
"f_result",
"elif",
"len",
"(",
"completion",
"[",
"\"typeInfo\"",
"]",
"[",
"\"paramSections\"",
"]",
")",
"==",
"0",
":",
"return",
"f_result",
"# It's a function type",
"sections",
"=",
"completion",
"[",
"\"typeInfo\"",
"]",
"[",
"\"paramSections\"",
"]",
"f_sections",
"=",
"[",
"formatted_param_section",
"(",
"ps",
")",
"for",
"ps",
"in",
"sections",
"]",
"return",
"u\"{}{}\"",
".",
"format",
"(",
"f_result",
",",
"\"\"",
".",
"join",
"(",
"f_sections",
")",
")"
] | 39.384615 | 0.001908 |
def R_isrk(self, k):
"""
Function returns the inverse square root of R matrix on step k.
"""
ind = int(self.index[self.R_time_var_index, k])
R = self.R[:, :, ind]
if (R.shape[0] == 1): # 1-D case handle simplier. No storage
# of the result, just compute it each time.
inv_square_root = np.sqrt(1.0/R)
else:
if self.svd_each_time:
(U, S, Vh) = sp.linalg.svd(R, full_matrices=False,
compute_uv=True, overwrite_a=False,
check_finite=True)
inv_square_root = U * 1.0/np.sqrt(S)
else:
if ind in self.R_square_root:
inv_square_root = self.R_square_root[ind]
else:
(U, S, Vh) = sp.linalg.svd(R, full_matrices=False,
compute_uv=True,
overwrite_a=False,
check_finite=True)
inv_square_root = U * 1.0/np.sqrt(S)
self.R_square_root[ind] = inv_square_root
return inv_square_root | [
"def",
"R_isrk",
"(",
"self",
",",
"k",
")",
":",
"ind",
"=",
"int",
"(",
"self",
".",
"index",
"[",
"self",
".",
"R_time_var_index",
",",
"k",
"]",
")",
"R",
"=",
"self",
".",
"R",
"[",
":",
",",
":",
",",
"ind",
"]",
"if",
"(",
"R",
".",
"shape",
"[",
"0",
"]",
"==",
"1",
")",
":",
"# 1-D case handle simplier. No storage",
"# of the result, just compute it each time.",
"inv_square_root",
"=",
"np",
".",
"sqrt",
"(",
"1.0",
"/",
"R",
")",
"else",
":",
"if",
"self",
".",
"svd_each_time",
":",
"(",
"U",
",",
"S",
",",
"Vh",
")",
"=",
"sp",
".",
"linalg",
".",
"svd",
"(",
"R",
",",
"full_matrices",
"=",
"False",
",",
"compute_uv",
"=",
"True",
",",
"overwrite_a",
"=",
"False",
",",
"check_finite",
"=",
"True",
")",
"inv_square_root",
"=",
"U",
"*",
"1.0",
"/",
"np",
".",
"sqrt",
"(",
"S",
")",
"else",
":",
"if",
"ind",
"in",
"self",
".",
"R_square_root",
":",
"inv_square_root",
"=",
"self",
".",
"R_square_root",
"[",
"ind",
"]",
"else",
":",
"(",
"U",
",",
"S",
",",
"Vh",
")",
"=",
"sp",
".",
"linalg",
".",
"svd",
"(",
"R",
",",
"full_matrices",
"=",
"False",
",",
"compute_uv",
"=",
"True",
",",
"overwrite_a",
"=",
"False",
",",
"check_finite",
"=",
"True",
")",
"inv_square_root",
"=",
"U",
"*",
"1.0",
"/",
"np",
".",
"sqrt",
"(",
"S",
")",
"self",
".",
"R_square_root",
"[",
"ind",
"]",
"=",
"inv_square_root",
"return",
"inv_square_root"
] | 38.21875 | 0.001595 |
def last_seen_utc(self) -> Optional[datetime]:
"""Timestamp when the story has last been watched or None (UTC)."""
if self._node['seen']:
return datetime.utcfromtimestamp(self._node['seen']) | [
"def",
"last_seen_utc",
"(",
"self",
")",
"->",
"Optional",
"[",
"datetime",
"]",
":",
"if",
"self",
".",
"_node",
"[",
"'seen'",
"]",
":",
"return",
"datetime",
".",
"utcfromtimestamp",
"(",
"self",
".",
"_node",
"[",
"'seen'",
"]",
")"
] | 53.75 | 0.009174 |
def compile_format(self, log_format: str) -> Tuple[str, List[KeyMethod]]:
"""Translate log_format into form usable by modulo formatting
All known atoms will be replaced with %s
Also methods for formatting of those atoms will be added to
_methods in appropriate order
For example we have log_format = "%a %t"
This format will be translated to "%s %s"
Also contents of _methods will be
[self._format_a, self._format_t]
These method will be called and results will be passed
to translated string format.
Each _format_* method receive 'args' which is list of arguments
given to self.log
Exceptions are _format_e, _format_i and _format_o methods which
also receive key name (by functools.partial)
"""
# list of (key, method) tuples, we don't use an OrderedDict as users
# can repeat the same key more than once
methods = list()
for atom in self.FORMAT_RE.findall(log_format):
if atom[1] == '':
format_key1 = self.LOG_FORMAT_MAP[atom[0]]
m = getattr(AccessLogger, '_format_%s' % atom[0])
key_method = KeyMethod(format_key1, m)
else:
format_key2 = (self.LOG_FORMAT_MAP[atom[2]], atom[1])
m = getattr(AccessLogger, '_format_%s' % atom[2])
key_method = KeyMethod(format_key2,
functools.partial(m, atom[1]))
methods.append(key_method)
log_format = self.FORMAT_RE.sub(r'%s', log_format)
log_format = self.CLEANUP_RE.sub(r'%\1', log_format)
return log_format, methods | [
"def",
"compile_format",
"(",
"self",
",",
"log_format",
":",
"str",
")",
"->",
"Tuple",
"[",
"str",
",",
"List",
"[",
"KeyMethod",
"]",
"]",
":",
"# list of (key, method) tuples, we don't use an OrderedDict as users",
"# can repeat the same key more than once",
"methods",
"=",
"list",
"(",
")",
"for",
"atom",
"in",
"self",
".",
"FORMAT_RE",
".",
"findall",
"(",
"log_format",
")",
":",
"if",
"atom",
"[",
"1",
"]",
"==",
"''",
":",
"format_key1",
"=",
"self",
".",
"LOG_FORMAT_MAP",
"[",
"atom",
"[",
"0",
"]",
"]",
"m",
"=",
"getattr",
"(",
"AccessLogger",
",",
"'_format_%s'",
"%",
"atom",
"[",
"0",
"]",
")",
"key_method",
"=",
"KeyMethod",
"(",
"format_key1",
",",
"m",
")",
"else",
":",
"format_key2",
"=",
"(",
"self",
".",
"LOG_FORMAT_MAP",
"[",
"atom",
"[",
"2",
"]",
"]",
",",
"atom",
"[",
"1",
"]",
")",
"m",
"=",
"getattr",
"(",
"AccessLogger",
",",
"'_format_%s'",
"%",
"atom",
"[",
"2",
"]",
")",
"key_method",
"=",
"KeyMethod",
"(",
"format_key2",
",",
"functools",
".",
"partial",
"(",
"m",
",",
"atom",
"[",
"1",
"]",
")",
")",
"methods",
".",
"append",
"(",
"key_method",
")",
"log_format",
"=",
"self",
".",
"FORMAT_RE",
".",
"sub",
"(",
"r'%s'",
",",
"log_format",
")",
"log_format",
"=",
"self",
".",
"CLEANUP_RE",
".",
"sub",
"(",
"r'%\\1'",
",",
"log_format",
")",
"return",
"log_format",
",",
"methods"
] | 40.731707 | 0.00117 |
async def process_frame(self, frame):
"""Update nodes via frame, usually received by house monitor."""
if isinstance(frame, FrameNodeStatePositionChangedNotification):
if frame.node_id not in self.pyvlx.nodes:
return
node = self.pyvlx.nodes[frame.node_id]
if isinstance(node, OpeningDevice):
node.position = Position(frame.current_position)
await node.after_update()
elif isinstance(frame, FrameGetAllNodesInformationNotification):
if frame.node_id not in self.pyvlx.nodes:
return
node = self.pyvlx.nodes[frame.node_id]
if isinstance(node, OpeningDevice):
node.position = Position(frame.current_position)
await node.after_update() | [
"async",
"def",
"process_frame",
"(",
"self",
",",
"frame",
")",
":",
"if",
"isinstance",
"(",
"frame",
",",
"FrameNodeStatePositionChangedNotification",
")",
":",
"if",
"frame",
".",
"node_id",
"not",
"in",
"self",
".",
"pyvlx",
".",
"nodes",
":",
"return",
"node",
"=",
"self",
".",
"pyvlx",
".",
"nodes",
"[",
"frame",
".",
"node_id",
"]",
"if",
"isinstance",
"(",
"node",
",",
"OpeningDevice",
")",
":",
"node",
".",
"position",
"=",
"Position",
"(",
"frame",
".",
"current_position",
")",
"await",
"node",
".",
"after_update",
"(",
")",
"elif",
"isinstance",
"(",
"frame",
",",
"FrameGetAllNodesInformationNotification",
")",
":",
"if",
"frame",
".",
"node_id",
"not",
"in",
"self",
".",
"pyvlx",
".",
"nodes",
":",
"return",
"node",
"=",
"self",
".",
"pyvlx",
".",
"nodes",
"[",
"frame",
".",
"node_id",
"]",
"if",
"isinstance",
"(",
"node",
",",
"OpeningDevice",
")",
":",
"node",
".",
"position",
"=",
"Position",
"(",
"frame",
".",
"current_position",
")",
"await",
"node",
".",
"after_update",
"(",
")"
] | 50.4375 | 0.002433 |
def _create_results_summary(self):
"""
Create the dataframe that displays the estimation results, and store
it on the model instance.
Returns
-------
None.
"""
# Make sure we have all attributes needed to create the results summary
needed_attributes = ["params",
"standard_errors",
"tvalues",
"pvalues",
"robust_std_errs",
"robust_t_stats",
"robust_p_vals"]
try:
assert all([hasattr(self, attr) for attr in needed_attributes])
assert all([isinstance(getattr(self, attr), pd.Series)
for attr in needed_attributes])
except AssertionError:
msg = "Call this function only after setting/calculating all other"
msg_2 = " estimation results attributes"
raise NotImplementedError(msg + msg_2)
self.summary = pd.concat((self.params,
self.standard_errors,
self.tvalues,
self.pvalues,
self.robust_std_errs,
self.robust_t_stats,
self.robust_p_vals), axis=1)
return None | [
"def",
"_create_results_summary",
"(",
"self",
")",
":",
"# Make sure we have all attributes needed to create the results summary",
"needed_attributes",
"=",
"[",
"\"params\"",
",",
"\"standard_errors\"",
",",
"\"tvalues\"",
",",
"\"pvalues\"",
",",
"\"robust_std_errs\"",
",",
"\"robust_t_stats\"",
",",
"\"robust_p_vals\"",
"]",
"try",
":",
"assert",
"all",
"(",
"[",
"hasattr",
"(",
"self",
",",
"attr",
")",
"for",
"attr",
"in",
"needed_attributes",
"]",
")",
"assert",
"all",
"(",
"[",
"isinstance",
"(",
"getattr",
"(",
"self",
",",
"attr",
")",
",",
"pd",
".",
"Series",
")",
"for",
"attr",
"in",
"needed_attributes",
"]",
")",
"except",
"AssertionError",
":",
"msg",
"=",
"\"Call this function only after setting/calculating all other\"",
"msg_2",
"=",
"\" estimation results attributes\"",
"raise",
"NotImplementedError",
"(",
"msg",
"+",
"msg_2",
")",
"self",
".",
"summary",
"=",
"pd",
".",
"concat",
"(",
"(",
"self",
".",
"params",
",",
"self",
".",
"standard_errors",
",",
"self",
".",
"tvalues",
",",
"self",
".",
"pvalues",
",",
"self",
".",
"robust_std_errs",
",",
"self",
".",
"robust_t_stats",
",",
"self",
".",
"robust_p_vals",
")",
",",
"axis",
"=",
"1",
")",
"return",
"None"
] | 39.771429 | 0.001403 |
def add_element(self, location, element, delete_elem=False):
"""
Create an entry located at ``location``.
Args:
location: String or :class:`LocationDescriptor` to describe a "separator location" (i.e. dir1/dir2/dir3 for
instance).
element: Element to store.
delete_elem: Delete old element or not if it exist?
Returns:
The created node with the element.
Notes:
The different sub locations entries **must** exist and the last may or may not already exist.
Use the more strict :meth:`add_unique_element` method if needed.
You don't need to have a common root node. We internally use a dummy root node.
"""
return self._create_entry(location, element, unique=False, delete_element=delete_elem) | [
"def",
"add_element",
"(",
"self",
",",
"location",
",",
"element",
",",
"delete_elem",
"=",
"False",
")",
":",
"return",
"self",
".",
"_create_entry",
"(",
"location",
",",
"element",
",",
"unique",
"=",
"False",
",",
"delete_element",
"=",
"delete_elem",
")"
] | 43.55 | 0.011236 |
def triangle_normal(a,b,c):
'''
triangle_normal(a, b, c) yields the normal vector of the triangle whose vertices are given by
the points a, b, and c. If the points are 2D points, then 3D normal vectors are still yielded,
that are always (0,0,1) or (0,0,-1). This function auto-threads over matrices, in which case
they must be in equivalent orientations, and the result is returned in whatever orientation
they are given in. In some cases, the intended orientation of the matrices is ambiguous (e.g.,
if a, b, and c are 2 x 3 matrices), in which case the matrix is always assumed to be given in
(dims x vertices) orientation.
'''
(a,b,c) = [np.asarray(x) for x in (a,b,c)]
if len(a.shape) == 1 and len(b.shape) == 1 and len(c.shape) == 1:
return triangle_normal(*[np.transpose([x]) for x in (a,b,c)])[:,0]
(a,b,c) = [np.transpose([x]) if len(x.shape) == 1 else x for x in (a,b,c)]
# find a required number of dimensions, if possible
if a.shape[0] in (2,3):
dims = a.shape[0]
tx = True
else:
dims = a.shape[1]
(a,b,c) = [x.T for x in (a,b,c)]
tx = False
n = (a.shape[1] if a.shape[1] != 1 else b.shape[1] if b.shape[1] != 1 else
c.shape[1] if c.shape[1] != 1 else 1)
if dims == 2:
(a,b,c) = [np.vstack((x, np.zeros((1,n)))) for x in (a,b,c)]
ab = normalize(b - a)
ac = normalize(c - a)
res = np.cross(ab, ac, axisa=0, axisb=0)
return res.T if tx else res | [
"def",
"triangle_normal",
"(",
"a",
",",
"b",
",",
"c",
")",
":",
"(",
"a",
",",
"b",
",",
"c",
")",
"=",
"[",
"np",
".",
"asarray",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"a",
",",
"b",
",",
"c",
")",
"]",
"if",
"len",
"(",
"a",
".",
"shape",
")",
"==",
"1",
"and",
"len",
"(",
"b",
".",
"shape",
")",
"==",
"1",
"and",
"len",
"(",
"c",
".",
"shape",
")",
"==",
"1",
":",
"return",
"triangle_normal",
"(",
"*",
"[",
"np",
".",
"transpose",
"(",
"[",
"x",
"]",
")",
"for",
"x",
"in",
"(",
"a",
",",
"b",
",",
"c",
")",
"]",
")",
"[",
":",
",",
"0",
"]",
"(",
"a",
",",
"b",
",",
"c",
")",
"=",
"[",
"np",
".",
"transpose",
"(",
"[",
"x",
"]",
")",
"if",
"len",
"(",
"x",
".",
"shape",
")",
"==",
"1",
"else",
"x",
"for",
"x",
"in",
"(",
"a",
",",
"b",
",",
"c",
")",
"]",
"# find a required number of dimensions, if possible",
"if",
"a",
".",
"shape",
"[",
"0",
"]",
"in",
"(",
"2",
",",
"3",
")",
":",
"dims",
"=",
"a",
".",
"shape",
"[",
"0",
"]",
"tx",
"=",
"True",
"else",
":",
"dims",
"=",
"a",
".",
"shape",
"[",
"1",
"]",
"(",
"a",
",",
"b",
",",
"c",
")",
"=",
"[",
"x",
".",
"T",
"for",
"x",
"in",
"(",
"a",
",",
"b",
",",
"c",
")",
"]",
"tx",
"=",
"False",
"n",
"=",
"(",
"a",
".",
"shape",
"[",
"1",
"]",
"if",
"a",
".",
"shape",
"[",
"1",
"]",
"!=",
"1",
"else",
"b",
".",
"shape",
"[",
"1",
"]",
"if",
"b",
".",
"shape",
"[",
"1",
"]",
"!=",
"1",
"else",
"c",
".",
"shape",
"[",
"1",
"]",
"if",
"c",
".",
"shape",
"[",
"1",
"]",
"!=",
"1",
"else",
"1",
")",
"if",
"dims",
"==",
"2",
":",
"(",
"a",
",",
"b",
",",
"c",
")",
"=",
"[",
"np",
".",
"vstack",
"(",
"(",
"x",
",",
"np",
".",
"zeros",
"(",
"(",
"1",
",",
"n",
")",
")",
")",
")",
"for",
"x",
"in",
"(",
"a",
",",
"b",
",",
"c",
")",
"]",
"ab",
"=",
"normalize",
"(",
"b",
"-",
"a",
")",
"ac",
"=",
"normalize",
"(",
"c",
"-",
"a",
")",
"res",
"=",
"np",
".",
"cross",
"(",
"ab",
",",
"ac",
",",
"axisa",
"=",
"0",
",",
"axisb",
"=",
"0",
")",
"return",
"res",
".",
"T",
"if",
"tx",
"else",
"res"
] | 49.5 | 0.019815 |
def setOptions( self, options ):
"""
Sets the tag option list for this widget. If used, tags need to be
found within the list of options when added.
:param options | [<str>, ..]
"""
self._options = map(str, options)
if ( options ):
completer = QCompleter(options, self)
completer.setCompletionMode(QCompleter.InlineCompletion)
self.setCompleter(completer)
else:
self.setCompleter(None) | [
"def",
"setOptions",
"(",
"self",
",",
"options",
")",
":",
"self",
".",
"_options",
"=",
"map",
"(",
"str",
",",
"options",
")",
"if",
"(",
"options",
")",
":",
"completer",
"=",
"QCompleter",
"(",
"options",
",",
"self",
")",
"completer",
".",
"setCompletionMode",
"(",
"QCompleter",
".",
"InlineCompletion",
")",
"self",
".",
"setCompleter",
"(",
"completer",
")",
"else",
":",
"self",
".",
"setCompleter",
"(",
"None",
")"
] | 34.6 | 0.015009 |
def flatten(nested_iterable):
"""
Flattens arbitrarily nested lists/tuples.
Code partially taken from https://stackoverflow.com/a/10824420.
Parameters
----------
nested_iterable
A list or tuple of arbitrarily nested values.
Yields
------
any
Non-list and non-tuple values in `nested_iterable`.
"""
# don't just check if something is iterable here, because then strings
# and arrays will be split into their characters and components
if not isinstance(nested_iterable, (list, tuple)):
yield nested_iterable
else:
for i in nested_iterable:
if isinstance(i, (list, tuple)):
for j in flatten(i):
yield j
else:
yield i | [
"def",
"flatten",
"(",
"nested_iterable",
")",
":",
"# don't just check if something is iterable here, because then strings",
"# and arrays will be split into their characters and components",
"if",
"not",
"isinstance",
"(",
"nested_iterable",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"yield",
"nested_iterable",
"else",
":",
"for",
"i",
"in",
"nested_iterable",
":",
"if",
"isinstance",
"(",
"i",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"j",
"in",
"flatten",
"(",
"i",
")",
":",
"yield",
"j",
"else",
":",
"yield",
"i"
] | 26.928571 | 0.00128 |
def _section_execution_order(self, section, iterargs
, reverse=False
, custom_order=None
, explicit_checks: Iterable = None
, exclude_checks: Iterable = None):
"""
order must:
a) contain all variable args (we're appending missing ones)
b) not contian duplictates (we're removing repeated items)
order may contain *iterargs otherwise it is appended
to the end
order may contain "*check" otherwise, it is like *check is appended
to the end (Not done explicitly though).
"""
stack = list(custom_order) if custom_order is not None else list(section.order)
if '*iterargs' not in stack:
stack.append('*iterargs')
stack.reverse()
full_order = []
seen = set()
while len(stack):
item = stack.pop()
if item in seen:
continue
seen.add(item)
if item == '*iterargs':
all_iterargs = list(iterargs.keys())
# assuming there is a meaningful order
all_iterargs.reverse()
stack += all_iterargs
continue
full_order.append(item)
# Filter down checks. Checks to exclude are filtered for last as the user
# might e.g. want to include all tests with "kerning" in the ID, except for
# "kerning_something". explicit_checks could then be ["kerning"] and
# exclude_checks ["something"].
checks = section.checks
if explicit_checks:
checks = [
check for check in checks
if any(include_string in check.id for include_string in explicit_checks)
]
if exclude_checks:
checks = [
check for check in checks
if not any(exclude_string in check.id for exclude_string in exclude_checks)
]
scopes = self._analyze_checks(full_order, checks)
key = lambda item: item[1] # check, signature, scope = item
scopes.sort(key=key, reverse=reverse)
for check, args in self._execute_scopes(iterargs, scopes):
# this is the iterargs tuple that will be used as a key for caching
# and so on. we could sort it, to ensure it yields in the same
# cache locations always, but then again, it is already in a well
# defined order, by clustering.
yield check, tuple(args) | [
"def",
"_section_execution_order",
"(",
"self",
",",
"section",
",",
"iterargs",
",",
"reverse",
"=",
"False",
",",
"custom_order",
"=",
"None",
",",
"explicit_checks",
":",
"Iterable",
"=",
"None",
",",
"exclude_checks",
":",
"Iterable",
"=",
"None",
")",
":",
"stack",
"=",
"list",
"(",
"custom_order",
")",
"if",
"custom_order",
"is",
"not",
"None",
"else",
"list",
"(",
"section",
".",
"order",
")",
"if",
"'*iterargs'",
"not",
"in",
"stack",
":",
"stack",
".",
"append",
"(",
"'*iterargs'",
")",
"stack",
".",
"reverse",
"(",
")",
"full_order",
"=",
"[",
"]",
"seen",
"=",
"set",
"(",
")",
"while",
"len",
"(",
"stack",
")",
":",
"item",
"=",
"stack",
".",
"pop",
"(",
")",
"if",
"item",
"in",
"seen",
":",
"continue",
"seen",
".",
"add",
"(",
"item",
")",
"if",
"item",
"==",
"'*iterargs'",
":",
"all_iterargs",
"=",
"list",
"(",
"iterargs",
".",
"keys",
"(",
")",
")",
"# assuming there is a meaningful order",
"all_iterargs",
".",
"reverse",
"(",
")",
"stack",
"+=",
"all_iterargs",
"continue",
"full_order",
".",
"append",
"(",
"item",
")",
"# Filter down checks. Checks to exclude are filtered for last as the user",
"# might e.g. want to include all tests with \"kerning\" in the ID, except for",
"# \"kerning_something\". explicit_checks could then be [\"kerning\"] and",
"# exclude_checks [\"something\"].",
"checks",
"=",
"section",
".",
"checks",
"if",
"explicit_checks",
":",
"checks",
"=",
"[",
"check",
"for",
"check",
"in",
"checks",
"if",
"any",
"(",
"include_string",
"in",
"check",
".",
"id",
"for",
"include_string",
"in",
"explicit_checks",
")",
"]",
"if",
"exclude_checks",
":",
"checks",
"=",
"[",
"check",
"for",
"check",
"in",
"checks",
"if",
"not",
"any",
"(",
"exclude_string",
"in",
"check",
".",
"id",
"for",
"exclude_string",
"in",
"exclude_checks",
")",
"]",
"scopes",
"=",
"self",
".",
"_analyze_checks",
"(",
"full_order",
",",
"checks",
")",
"key",
"=",
"lambda",
"item",
":",
"item",
"[",
"1",
"]",
"# check, signature, scope = item",
"scopes",
".",
"sort",
"(",
"key",
"=",
"key",
",",
"reverse",
"=",
"reverse",
")",
"for",
"check",
",",
"args",
"in",
"self",
".",
"_execute_scopes",
"(",
"iterargs",
",",
"scopes",
")",
":",
"# this is the iterargs tuple that will be used as a key for caching",
"# and so on. we could sort it, to ensure it yields in the same",
"# cache locations always, but then again, it is already in a well",
"# defined order, by clustering.",
"yield",
"check",
",",
"tuple",
"(",
"args",
")"
] | 36.612903 | 0.009867 |
def map(self):
"""Perform a function on every item in an iterable."""
with Pool(self.cpu_count) as pool:
pool.map(self._func, self._iterable)
pool.close()
return True | [
"def",
"map",
"(",
"self",
")",
":",
"with",
"Pool",
"(",
"self",
".",
"cpu_count",
")",
"as",
"pool",
":",
"pool",
".",
"map",
"(",
"self",
".",
"_func",
",",
"self",
".",
"_iterable",
")",
"pool",
".",
"close",
"(",
")",
"return",
"True"
] | 34.833333 | 0.009346 |
def write_Track(file, track, bpm=120, repeat=0, verbose=False):
"""Write a mingus.Track to a MIDI file.
Write the name to the file and set the instrument if the instrument has
the attribute instrument_nr, which represents the MIDI instrument
number. The class MidiInstrument in mingus.containers.Instrument has
this attribute by default.
"""
m = MidiFile()
t = MidiTrack(bpm)
m.tracks = [t]
while repeat >= 0:
t.play_Track(track)
repeat -= 1
return m.write_file(file, verbose) | [
"def",
"write_Track",
"(",
"file",
",",
"track",
",",
"bpm",
"=",
"120",
",",
"repeat",
"=",
"0",
",",
"verbose",
"=",
"False",
")",
":",
"m",
"=",
"MidiFile",
"(",
")",
"t",
"=",
"MidiTrack",
"(",
"bpm",
")",
"m",
".",
"tracks",
"=",
"[",
"t",
"]",
"while",
"repeat",
">=",
"0",
":",
"t",
".",
"play_Track",
"(",
"track",
")",
"repeat",
"-=",
"1",
"return",
"m",
".",
"write_file",
"(",
"file",
",",
"verbose",
")"
] | 34.866667 | 0.001862 |
def recursive_model_update(model, props):
"""
Recursively updates attributes on a model including other
models. If the type of the new model matches the old model
properties are simply updated, otherwise the model is replaced.
"""
updates = {}
valid_properties = model.properties_with_values()
for k, v in props.items():
if isinstance(v, Model):
nested_model = getattr(model, k)
if type(v) is type(nested_model):
nested_props = v.properties_with_values(include_defaults=False)
recursive_model_update(nested_model, nested_props)
else:
setattr(model, k, v)
elif k in valid_properties and v != valid_properties[k]:
updates[k] = v
model.update(**updates) | [
"def",
"recursive_model_update",
"(",
"model",
",",
"props",
")",
":",
"updates",
"=",
"{",
"}",
"valid_properties",
"=",
"model",
".",
"properties_with_values",
"(",
")",
"for",
"k",
",",
"v",
"in",
"props",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"Model",
")",
":",
"nested_model",
"=",
"getattr",
"(",
"model",
",",
"k",
")",
"if",
"type",
"(",
"v",
")",
"is",
"type",
"(",
"nested_model",
")",
":",
"nested_props",
"=",
"v",
".",
"properties_with_values",
"(",
"include_defaults",
"=",
"False",
")",
"recursive_model_update",
"(",
"nested_model",
",",
"nested_props",
")",
"else",
":",
"setattr",
"(",
"model",
",",
"k",
",",
"v",
")",
"elif",
"k",
"in",
"valid_properties",
"and",
"v",
"!=",
"valid_properties",
"[",
"k",
"]",
":",
"updates",
"[",
"k",
"]",
"=",
"v",
"model",
".",
"update",
"(",
"*",
"*",
"updates",
")"
] | 41.052632 | 0.001253 |
def move_identity(db, from_id, to_uuid):
"""Move an identity to a unique identity.
This function shifts the identity identified by 'from_id' to
the unique identity 'to_uuid'.
When 'to_uuid' is the unique identity that is currently related
to 'from_id', the action does not have any effect.
In the case of 'from_id' and 'to_uuid' have equal values and the
unique identity does not exist, a new unique identity will be
created and the identity will be moved to it.
The function raises a 'NotFoundError exception when either 'from_id'
or 'to_uuid' do not exist in the registry.
:param from_id: identifier of the identity set to be moved
:param to_uuid: identifier of the unique identity where 'from_id'
will be moved
:raises NotFoundError: raised when either 'from_uuid' or 'to_uuid'
do not exist in the registry
"""
with db.connect() as session:
fid = find_identity(session, from_id)
tuid = find_unique_identity(session, to_uuid)
if not fid:
raise NotFoundError(entity=from_id)
if not tuid:
# Move identity to a new one
if from_id == to_uuid:
tuid = add_unique_identity_db(session, to_uuid)
else:
raise NotFoundError(entity=to_uuid)
move_identity_db(session, fid, tuid) | [
"def",
"move_identity",
"(",
"db",
",",
"from_id",
",",
"to_uuid",
")",
":",
"with",
"db",
".",
"connect",
"(",
")",
"as",
"session",
":",
"fid",
"=",
"find_identity",
"(",
"session",
",",
"from_id",
")",
"tuid",
"=",
"find_unique_identity",
"(",
"session",
",",
"to_uuid",
")",
"if",
"not",
"fid",
":",
"raise",
"NotFoundError",
"(",
"entity",
"=",
"from_id",
")",
"if",
"not",
"tuid",
":",
"# Move identity to a new one",
"if",
"from_id",
"==",
"to_uuid",
":",
"tuid",
"=",
"add_unique_identity_db",
"(",
"session",
",",
"to_uuid",
")",
"else",
":",
"raise",
"NotFoundError",
"(",
"entity",
"=",
"to_uuid",
")",
"move_identity_db",
"(",
"session",
",",
"fid",
",",
"tuid",
")"
] | 36.081081 | 0.000729 |
def angle_subtended(ell, **kwargs):
"""
Compute the half angle subtended (or min and max angles)
for an offset elliptical conic
from the origin or an arbitrary viewpoint.
kwargs:
tangent Return tangent instead of angle (default false)
viewpoint Defaults to origin
"""
return_tangent = kwargs.pop('tangent',False)
con, transform, offset = ell.projection(**kwargs)
v = N.linalg.norm(N.array(con.major_axes()),axis=1)
A = N.sort(v)[::-1] # Sort highest values first
A = N.squeeze(A)
B = N.linalg.norm(offset)
if return_tangent: return A/B
return N.arctan2(A,B) | [
"def",
"angle_subtended",
"(",
"ell",
",",
"*",
"*",
"kwargs",
")",
":",
"return_tangent",
"=",
"kwargs",
".",
"pop",
"(",
"'tangent'",
",",
"False",
")",
"con",
",",
"transform",
",",
"offset",
"=",
"ell",
".",
"projection",
"(",
"*",
"*",
"kwargs",
")",
"v",
"=",
"N",
".",
"linalg",
".",
"norm",
"(",
"N",
".",
"array",
"(",
"con",
".",
"major_axes",
"(",
")",
")",
",",
"axis",
"=",
"1",
")",
"A",
"=",
"N",
".",
"sort",
"(",
"v",
")",
"[",
":",
":",
"-",
"1",
"]",
"# Sort highest values first",
"A",
"=",
"N",
".",
"squeeze",
"(",
"A",
")",
"B",
"=",
"N",
".",
"linalg",
".",
"norm",
"(",
"offset",
")",
"if",
"return_tangent",
":",
"return",
"A",
"/",
"B",
"return",
"N",
".",
"arctan2",
"(",
"A",
",",
"B",
")"
] | 32.421053 | 0.009464 |
def make_directory(path):
"""
Make a directory and any intermediate directories that don't already
exist. This function handles the case where two threads try to create
a directory at once.
"""
if not os.path.exists(path):
# concurrent writes that try to create the same dir can fail
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise e | [
"def",
"make_directory",
"(",
"path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"# concurrent writes that try to create the same dir can fail",
"try",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"EEXIST",
":",
"pass",
"else",
":",
"raise",
"e"
] | 29.75 | 0.002037 |
def authorize(self):
""" Use the magic of a unicorn and summon the set-top box to listen
to us.
/
,.. /
,' ';
,,.__ _,' /'; .
:',' ~~~~ '. '~
:' ( ) )::,
'. '. .=----=..-~ .;'
' ;' :: ':. '"
(: ': ;)
\\ '" ./
'" '"
Seriously, I've no idea what I'm doing here.
"""
# Read the version of the set-top box and write it back. Why? I've no
# idea.
version = self.con.makefile().readline()
self.con.send(version.encode())
# The set-top box returns with 2 bytes. I've no idea what they mean.
self.con.recv(2)
# The following reads and writes are used to authenticate. But I don't
# fully understand what is going on.
self.con.send(struct.pack('>B', 1))
msg = self.con.recv(4)
response = struct.unpack(">I", msg)
if response[0] != 0:
log.debug("Failed to authorize with set-top at %s:%s.",
self.ip, self.port)
raise AuthenticationError()
# Dunno where this is good for. But otherwise the client doesn't work.
self.con.send(b'0')
log.debug('Authorized succesfully with set-top box at %s:%s.',
self.ip, self.port) | [
"def",
"authorize",
"(",
"self",
")",
":",
"# Read the version of the set-top box and write it back. Why? I've no",
"# idea.",
"version",
"=",
"self",
".",
"con",
".",
"makefile",
"(",
")",
".",
"readline",
"(",
")",
"self",
".",
"con",
".",
"send",
"(",
"version",
".",
"encode",
"(",
")",
")",
"# The set-top box returns with 2 bytes. I've no idea what they mean.",
"self",
".",
"con",
".",
"recv",
"(",
"2",
")",
"# The following reads and writes are used to authenticate. But I don't",
"# fully understand what is going on.",
"self",
".",
"con",
".",
"send",
"(",
"struct",
".",
"pack",
"(",
"'>B'",
",",
"1",
")",
")",
"msg",
"=",
"self",
".",
"con",
".",
"recv",
"(",
"4",
")",
"response",
"=",
"struct",
".",
"unpack",
"(",
"\">I\"",
",",
"msg",
")",
"if",
"response",
"[",
"0",
"]",
"!=",
"0",
":",
"log",
".",
"debug",
"(",
"\"Failed to authorize with set-top at %s:%s.\"",
",",
"self",
".",
"ip",
",",
"self",
".",
"port",
")",
"raise",
"AuthenticationError",
"(",
")",
"# Dunno where this is good for. But otherwise the client doesn't work.",
"self",
".",
"con",
".",
"send",
"(",
"b'0'",
")",
"log",
".",
"debug",
"(",
"'Authorized succesfully with set-top box at %s:%s.'",
",",
"self",
".",
"ip",
",",
"self",
".",
"port",
")"
] | 33.536585 | 0.001413 |
def get(self, key, value=None):
"x.get(k[,d]) -> x[k] if k in x, else d. d defaults to None."
_key = self._prepare_key(key)
prefix, node = self._get_node_by_key(_key)
if prefix==_key and node.value is not None:
return self._unpickle_value(node.value)
else:
return value | [
"def",
"get",
"(",
"self",
",",
"key",
",",
"value",
"=",
"None",
")",
":",
"_key",
"=",
"self",
".",
"_prepare_key",
"(",
"key",
")",
"prefix",
",",
"node",
"=",
"self",
".",
"_get_node_by_key",
"(",
"_key",
")",
"if",
"prefix",
"==",
"_key",
"and",
"node",
".",
"value",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_unpickle_value",
"(",
"node",
".",
"value",
")",
"else",
":",
"return",
"value"
] | 40.75 | 0.009009 |
def set_bytes_at_offset(self, offset, data):
"""Overwrite the bytes at the given file offset with the given string.
Return True if successful, False otherwise. It can fail if the
offset is outside the file's boundaries.
"""
if not isinstance(data, bytes):
raise TypeError('data should be of type: bytes')
if 0 <= offset < len(self.__data__):
self.__data__ = ( self.__data__[:offset] + data + self.__data__[offset+len(data):] )
else:
return False
return True | [
"def",
"set_bytes_at_offset",
"(",
"self",
",",
"offset",
",",
"data",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"bytes",
")",
":",
"raise",
"TypeError",
"(",
"'data should be of type: bytes'",
")",
"if",
"0",
"<=",
"offset",
"<",
"len",
"(",
"self",
".",
"__data__",
")",
":",
"self",
".",
"__data__",
"=",
"(",
"self",
".",
"__data__",
"[",
":",
"offset",
"]",
"+",
"data",
"+",
"self",
".",
"__data__",
"[",
"offset",
"+",
"len",
"(",
"data",
")",
":",
"]",
")",
"else",
":",
"return",
"False",
"return",
"True"
] | 34.125 | 0.008913 |
def histogram(self, stat, value, tags=None):
"""Report a histogram."""
self._log('histogram', stat, value, tags) | [
"def",
"histogram",
"(",
"self",
",",
"stat",
",",
"value",
",",
"tags",
"=",
"None",
")",
":",
"self",
".",
"_log",
"(",
"'histogram'",
",",
"stat",
",",
"value",
",",
"tags",
")"
] | 42 | 0.015625 |
def bin2hexline(data, add_addr=True, width=16):
"""
Format binary data to a Hex-Editor like format...
>>> data = bytearray([i for i in range(256)])
>>> print('\\n'.join(bin2hexline(data, width=16)))
0000 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f ................
0016 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f ................
0032 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f !"#$%&'()*+,-./
0048 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f 0123456789:;<=>?
0064 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
0080 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f PQRSTUVWXYZ[\]^_
0096 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f `abcdefghijklmno
0112 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f pqrstuvwxyz{|}~.
0128 80 81 82 83 84 85 86 87 88 89 8a 8b 8c 8d 8e 8f ................
0144 90 91 92 93 94 95 96 97 98 99 9a 9b 9c 9d 9e 9f ................
0160 a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aa ab ac ad ae af ................
0176 b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 ba bb bc bd be bf ................
0192 c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf ................
0208 d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 da db dc dd de df ................
0224 e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 ea eb ec ed ee ef ................
0240 f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 fa fb fc fd fe ff ................
with open("C:\Python27\python.exe", "rb") as f:
data = f.read(150)
print("\n".join(bin2hexline(data, width=16)))
0000 4d 5a 90 00 03 00 00 00 04 00 00 00 ff ff 00 00 MZ..............
0016 b8 00 00 00 00 00 00 00 40 00 00 00 00 00 00 00 ........@.......
0032 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
0048 00 00 00 00 00 00 00 00 00 00 00 00 e8 00 00 00 ................
0064 0e 1f ba 0e 00 b4 09 cd 21 b8 01 4c cd 21 54 68 ........!..L.!Th
0080 69 73 20 70 72 6f 67 72 61 6d 20 63 61 6e 6e 6f is.program.canno
0096 74 20 62 65 20 72 75 6e 20 69 6e 20 44 4f 53 20 t.be.run.in.DOS.
0112 6d 6f 64 65 2e 0d 0d 0a 24 00 00 00 00 00 00 00 mode....$.......
0128 9d 68 ba 89 d9 09 d4 da d9 09 d4 da d9 09 d4 da .h..............
0144 d0 71 41 da d8 09 .qA...
"""
data = bytearray(data)
# same as string.printable but without \t\n\r\v\f ;)
printable = string.digits + string.ascii_letters + string.punctuation + " "
addr = 0
lines = []
run = True
line_width = 4 + (width * 3) + 1
while run:
if add_addr:
line = ["%04i" % addr]
else:
line = []
ascii_block = ""
for i in range(width):
b = data[addr]
if chr(b) in printable:
ascii_block += chr(b)
else:
ascii_block += "."
line.append("%02x" % b)
addr += 1
if addr >= len(data):
run = False
break
line = " ".join(line)
line = line.ljust(line_width)
line += ascii_block
lines.append(line)
return lines | [
"def",
"bin2hexline",
"(",
"data",
",",
"add_addr",
"=",
"True",
",",
"width",
"=",
"16",
")",
":",
"data",
"=",
"bytearray",
"(",
"data",
")",
"# same as string.printable but without \\t\\n\\r\\v\\f ;)",
"printable",
"=",
"string",
".",
"digits",
"+",
"string",
".",
"ascii_letters",
"+",
"string",
".",
"punctuation",
"+",
"\" \"",
"addr",
"=",
"0",
"lines",
"=",
"[",
"]",
"run",
"=",
"True",
"line_width",
"=",
"4",
"+",
"(",
"width",
"*",
"3",
")",
"+",
"1",
"while",
"run",
":",
"if",
"add_addr",
":",
"line",
"=",
"[",
"\"%04i\"",
"%",
"addr",
"]",
"else",
":",
"line",
"=",
"[",
"]",
"ascii_block",
"=",
"\"\"",
"for",
"i",
"in",
"range",
"(",
"width",
")",
":",
"b",
"=",
"data",
"[",
"addr",
"]",
"if",
"chr",
"(",
"b",
")",
"in",
"printable",
":",
"ascii_block",
"+=",
"chr",
"(",
"b",
")",
"else",
":",
"ascii_block",
"+=",
"\".\"",
"line",
".",
"append",
"(",
"\"%02x\"",
"%",
"b",
")",
"addr",
"+=",
"1",
"if",
"addr",
">=",
"len",
"(",
"data",
")",
":",
"run",
"=",
"False",
"break",
"line",
"=",
"\" \"",
".",
"join",
"(",
"line",
")",
"line",
"=",
"line",
".",
"ljust",
"(",
"line_width",
")",
"line",
"+=",
"ascii_block",
"lines",
".",
"append",
"(",
"line",
")",
"return",
"lines"
] | 40.065789 | 0.001282 |
def diff_sizes(a, b, progressbar=None):
"""Return list of tuples where sizes differ.
Tuple structure:
(identifier, size in a, size in b)
Assumes list of identifiers in a and b are identical.
:param a: first :class:`dtoolcore.DataSet`
:param b: second :class:`dtoolcore.DataSet`
:returns: list of tuples for all items with different sizes
"""
difference = []
for i in a.identifiers:
a_size = a.item_properties(i)["size_in_bytes"]
b_size = b.item_properties(i)["size_in_bytes"]
if a_size != b_size:
difference.append((i, a_size, b_size))
if progressbar:
progressbar.update(1)
return difference | [
"def",
"diff_sizes",
"(",
"a",
",",
"b",
",",
"progressbar",
"=",
"None",
")",
":",
"difference",
"=",
"[",
"]",
"for",
"i",
"in",
"a",
".",
"identifiers",
":",
"a_size",
"=",
"a",
".",
"item_properties",
"(",
"i",
")",
"[",
"\"size_in_bytes\"",
"]",
"b_size",
"=",
"b",
".",
"item_properties",
"(",
"i",
")",
"[",
"\"size_in_bytes\"",
"]",
"if",
"a_size",
"!=",
"b_size",
":",
"difference",
".",
"append",
"(",
"(",
"i",
",",
"a_size",
",",
"b_size",
")",
")",
"if",
"progressbar",
":",
"progressbar",
".",
"update",
"(",
"1",
")",
"return",
"difference"
] | 29.304348 | 0.001437 |
def check_data_port_connection(self, check_data_port):
"""Checks the connection validity of a data port
The method is called by a child state to check the validity of a data port in case it is connected with data
flows. The data port does not belong to 'self', but to one of self.states.
If the data port is connected to a data flow, the method checks, whether these connect consistent data types
of ports.
:param rafcon.core.data_port.DataPort check_data_port: The port to check
:return: valid, message
"""
for data_flow in self.data_flows.values():
# Check whether the data flow connects the given port
from_port = self.get_data_port(data_flow.from_state, data_flow.from_key)
to_port = self.get_data_port(data_flow.to_state, data_flow.to_key)
if check_data_port is from_port or check_data_port is to_port:
# check if one of the data_types if type 'object'; in this case the data flow is always valid
if not (from_port.data_type is object or to_port.data_type is object):
if not type_inherits_of_type(from_port.data_type, to_port.data_type):
return False, "Connection of two non-compatible data types"
return True, "valid" | [
"def",
"check_data_port_connection",
"(",
"self",
",",
"check_data_port",
")",
":",
"for",
"data_flow",
"in",
"self",
".",
"data_flows",
".",
"values",
"(",
")",
":",
"# Check whether the data flow connects the given port",
"from_port",
"=",
"self",
".",
"get_data_port",
"(",
"data_flow",
".",
"from_state",
",",
"data_flow",
".",
"from_key",
")",
"to_port",
"=",
"self",
".",
"get_data_port",
"(",
"data_flow",
".",
"to_state",
",",
"data_flow",
".",
"to_key",
")",
"if",
"check_data_port",
"is",
"from_port",
"or",
"check_data_port",
"is",
"to_port",
":",
"# check if one of the data_types if type 'object'; in this case the data flow is always valid",
"if",
"not",
"(",
"from_port",
".",
"data_type",
"is",
"object",
"or",
"to_port",
".",
"data_type",
"is",
"object",
")",
":",
"if",
"not",
"type_inherits_of_type",
"(",
"from_port",
".",
"data_type",
",",
"to_port",
".",
"data_type",
")",
":",
"return",
"False",
",",
"\"Connection of two non-compatible data types\"",
"return",
"True",
",",
"\"valid\""
] | 62.333333 | 0.008277 |
def on_value_change(self, picker, old, new):
""" Set the checked property based on the checked state
of all the children
"""
d = self.declaration
with self.widget.setValue.suppressed():
d.value = new | [
"def",
"on_value_change",
"(",
"self",
",",
"picker",
",",
"old",
",",
"new",
")",
":",
"d",
"=",
"self",
".",
"declaration",
"with",
"self",
".",
"widget",
".",
"setValue",
".",
"suppressed",
"(",
")",
":",
"d",
".",
"value",
"=",
"new"
] | 32.125 | 0.011364 |
def expect(self, pattern, timeout=-1):
"""Waits on the given pattern to appear in std_out"""
if self.blocking:
raise RuntimeError("expect can only be used on non-blocking commands.")
try:
self.subprocess.expect(pattern=pattern, timeout=timeout)
except pexpect.EOF:
pass | [
"def",
"expect",
"(",
"self",
",",
"pattern",
",",
"timeout",
"=",
"-",
"1",
")",
":",
"if",
"self",
".",
"blocking",
":",
"raise",
"RuntimeError",
"(",
"\"expect can only be used on non-blocking commands.\"",
")",
"try",
":",
"self",
".",
"subprocess",
".",
"expect",
"(",
"pattern",
"=",
"pattern",
",",
"timeout",
"=",
"timeout",
")",
"except",
"pexpect",
".",
"EOF",
":",
"pass"
] | 33 | 0.00885 |
def _constraint_lb_and_ub_to_gurobi_sense_rhs_and_range_value(lb, ub):
"""Helper function used by Constraint and Model"""
if lb is None and ub is None:
raise Exception("Free constraint ...")
elif lb is None:
sense = '<'
rhs = float(ub)
range_value = 0.
elif ub is None:
sense = '>'
rhs = float(lb)
range_value = 0.
elif lb == ub:
sense = '='
rhs = float(lb)
range_value = 0.
elif lb > ub:
raise ValueError("Lower bound is larger than upper bound.")
else:
sense = '='
rhs = float(lb)
range_value = float(ub - lb)
return sense, rhs, range_value | [
"def",
"_constraint_lb_and_ub_to_gurobi_sense_rhs_and_range_value",
"(",
"lb",
",",
"ub",
")",
":",
"if",
"lb",
"is",
"None",
"and",
"ub",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"Free constraint ...\"",
")",
"elif",
"lb",
"is",
"None",
":",
"sense",
"=",
"'<'",
"rhs",
"=",
"float",
"(",
"ub",
")",
"range_value",
"=",
"0.",
"elif",
"ub",
"is",
"None",
":",
"sense",
"=",
"'>'",
"rhs",
"=",
"float",
"(",
"lb",
")",
"range_value",
"=",
"0.",
"elif",
"lb",
"==",
"ub",
":",
"sense",
"=",
"'='",
"rhs",
"=",
"float",
"(",
"lb",
")",
"range_value",
"=",
"0.",
"elif",
"lb",
">",
"ub",
":",
"raise",
"ValueError",
"(",
"\"Lower bound is larger than upper bound.\"",
")",
"else",
":",
"sense",
"=",
"'='",
"rhs",
"=",
"float",
"(",
"lb",
")",
"range_value",
"=",
"float",
"(",
"ub",
"-",
"lb",
")",
"return",
"sense",
",",
"rhs",
",",
"range_value"
] | 28.869565 | 0.001458 |
def put(self, item):
"""Adds the passed in item object to the queue and calls :func:`flush` if the size of the queue is larger
than :func:`max_queue_length`. This method does nothing if the passed in item is None.
Args:
item (:class:`contracts.Envelope`) item the telemetry envelope object to send to the service.
"""
if not item:
return
self._queue.put(item)
if self._queue.qsize() >= self._max_queue_length:
self.flush() | [
"def",
"put",
"(",
"self",
",",
"item",
")",
":",
"if",
"not",
"item",
":",
"return",
"self",
".",
"_queue",
".",
"put",
"(",
"item",
")",
"if",
"self",
".",
"_queue",
".",
"qsize",
"(",
")",
">=",
"self",
".",
"_max_queue_length",
":",
"self",
".",
"flush",
"(",
")"
] | 42 | 0.009709 |
def merge_dict_of_lists(adict, indices, pop_later=True, copy=True):
"""Extend the within a dict of lists. The indices will indicate which
list have to be extended by which other list.
Parameters
----------
adict: OrderedDict
An ordered dictionary of lists
indices: list or tuple of 2 iterables of int, bot having the same length
The indices of the lists that have to be merged, both iterables items
will be read pair by pair, the first is the index to the list that
will be extended with the list of the second index.
The indices can be constructed with Numpy e.g.,
indices = np.where(square_matrix)
pop_later: bool
If True will oop out the lists that are indicated in the second
list of indices.
copy: bool
If True will perform a deep copy of the input adict before
modifying it, hence not changing the original input.
Returns
-------
Dictionary of lists
Raises
------
IndexError
If the indices are out of range
"""
def check_indices(idxs, x):
for i in chain(*idxs):
if i < 0 or i >= x:
raise IndexError("Given indices are out of dict range.")
check_indices(indices, len(adict))
rdict = adict.copy() if copy else adict
dict_keys = list(rdict.keys())
for i, j in zip(*indices):
rdict[dict_keys[i]].extend(rdict[dict_keys[j]])
if pop_later:
for i, j in zip(*indices):
rdict.pop(dict_keys[j], '')
return rdict | [
"def",
"merge_dict_of_lists",
"(",
"adict",
",",
"indices",
",",
"pop_later",
"=",
"True",
",",
"copy",
"=",
"True",
")",
":",
"def",
"check_indices",
"(",
"idxs",
",",
"x",
")",
":",
"for",
"i",
"in",
"chain",
"(",
"*",
"idxs",
")",
":",
"if",
"i",
"<",
"0",
"or",
"i",
">=",
"x",
":",
"raise",
"IndexError",
"(",
"\"Given indices are out of dict range.\"",
")",
"check_indices",
"(",
"indices",
",",
"len",
"(",
"adict",
")",
")",
"rdict",
"=",
"adict",
".",
"copy",
"(",
")",
"if",
"copy",
"else",
"adict",
"dict_keys",
"=",
"list",
"(",
"rdict",
".",
"keys",
"(",
")",
")",
"for",
"i",
",",
"j",
"in",
"zip",
"(",
"*",
"indices",
")",
":",
"rdict",
"[",
"dict_keys",
"[",
"i",
"]",
"]",
".",
"extend",
"(",
"rdict",
"[",
"dict_keys",
"[",
"j",
"]",
"]",
")",
"if",
"pop_later",
":",
"for",
"i",
",",
"j",
"in",
"zip",
"(",
"*",
"indices",
")",
":",
"rdict",
".",
"pop",
"(",
"dict_keys",
"[",
"j",
"]",
",",
"''",
")",
"return",
"rdict"
] | 29.686275 | 0.000639 |
def long_poll_notifications(self, **kwargs): # noqa: E501
"""Get notifications using Long Poll # noqa: E501
In this case, notifications are delivered through HTTP long poll requests. The HTTP request is kept open until an event notification or a batch of event notifications are delivered to the client or the request times out (response code 204). In both cases, the client should open a new polling connection after the previous one closes. Only a single long polling connection per API key can be ongoing at any given time. You must have a persistent connection (Connection keep-alive header in the request) to avoid excess TLS handshakes. The pull channel is implicitly created by the first GET call to `/v2/notification/pull`. It is refreshed on each GET call. If the channel is not polled for a long time (10 minutes) - it expires and will be deleted. This means that no notifications will stay in the queue between polls. A channel can be also deleted explicitly by a DELETE call. **Note:** If you cannot have a public facing callback URL, for example when developing on your local machine, you can use long polling to check for new messages. However, **long polling is deprecated** and will likely be replaced in future. It is meant only for experimentation and not for commercial usage. The proper method to receive notifications is a **notification callback**. There can only be one notification channel per API key at a time in Device Management Connect. If a callback notification channel already exists, you need to delete it before creating a long poll notification channel, and vice-versa. **Example usage:** curl -X GET https://api.us-east-1.mbedcloud.com/v2/notification/pull -H 'authorization: Bearer {api-key}' # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.long_poll_notifications(asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:return: NotificationMessage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.long_poll_notifications_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.long_poll_notifications_with_http_info(**kwargs) # noqa: E501
return data | [
"def",
"long_poll_notifications",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'asynchronous'",
")",
":",
"return",
"self",
".",
"long_poll_notifications_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"long_poll_notifications_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | 124.05 | 0.0008 |
def print_dependencies(_run):
"""Print the detected source-files and dependencies."""
print('Dependencies:')
for dep in _run.experiment_info['dependencies']:
pack, _, version = dep.partition('==')
print(' {:<20} == {}'.format(pack, version))
print('\nSources:')
for source, digest in _run.experiment_info['sources']:
print(' {:<43} {}'.format(source, digest))
if _run.experiment_info['repositories']:
repos = _run.experiment_info['repositories']
print('\nVersion Control:')
for repo in repos:
mod = COLOR_DIRTY + 'M' if repo['dirty'] else ' '
print('{} {:<43} {}'.format(mod, repo['url'], repo['commit']) +
ENDC)
print('') | [
"def",
"print_dependencies",
"(",
"_run",
")",
":",
"print",
"(",
"'Dependencies:'",
")",
"for",
"dep",
"in",
"_run",
".",
"experiment_info",
"[",
"'dependencies'",
"]",
":",
"pack",
",",
"_",
",",
"version",
"=",
"dep",
".",
"partition",
"(",
"'=='",
")",
"print",
"(",
"' {:<20} == {}'",
".",
"format",
"(",
"pack",
",",
"version",
")",
")",
"print",
"(",
"'\\nSources:'",
")",
"for",
"source",
",",
"digest",
"in",
"_run",
".",
"experiment_info",
"[",
"'sources'",
"]",
":",
"print",
"(",
"' {:<43} {}'",
".",
"format",
"(",
"source",
",",
"digest",
")",
")",
"if",
"_run",
".",
"experiment_info",
"[",
"'repositories'",
"]",
":",
"repos",
"=",
"_run",
".",
"experiment_info",
"[",
"'repositories'",
"]",
"print",
"(",
"'\\nVersion Control:'",
")",
"for",
"repo",
"in",
"repos",
":",
"mod",
"=",
"COLOR_DIRTY",
"+",
"'M'",
"if",
"repo",
"[",
"'dirty'",
"]",
"else",
"' '",
"print",
"(",
"'{} {:<43} {}'",
".",
"format",
"(",
"mod",
",",
"repo",
"[",
"'url'",
"]",
",",
"repo",
"[",
"'commit'",
"]",
")",
"+",
"ENDC",
")",
"print",
"(",
"''",
")"
] | 38.315789 | 0.00134 |
def relax_AX(self):
"""The parent class method that this method overrides only
implements the relaxation step for the variables of the baseline
consensus algorithm. This method calls the overridden method and
then implements the relaxation step for the additional variables
required for the mask decoupling modification to the baseline
algorithm.
"""
super(ConvCnstrMODMaskDcpl_Consensus, self).relax_AX()
self.AX1nr = sl.irfftn(sl.inner(self.Zf, self.swapaxes(self.Xf),
axis=self.cri.axisM),
self.cri.Nv, self.cri.axisN)
if self.rlx == 1.0:
self.AX1 = self.AX1nr
else:
alpha = self.rlx
self.AX1 = alpha*self.AX1nr + (1-alpha)*(self.Y1 + self.S) | [
"def",
"relax_AX",
"(",
"self",
")",
":",
"super",
"(",
"ConvCnstrMODMaskDcpl_Consensus",
",",
"self",
")",
".",
"relax_AX",
"(",
")",
"self",
".",
"AX1nr",
"=",
"sl",
".",
"irfftn",
"(",
"sl",
".",
"inner",
"(",
"self",
".",
"Zf",
",",
"self",
".",
"swapaxes",
"(",
"self",
".",
"Xf",
")",
",",
"axis",
"=",
"self",
".",
"cri",
".",
"axisM",
")",
",",
"self",
".",
"cri",
".",
"Nv",
",",
"self",
".",
"cri",
".",
"axisN",
")",
"if",
"self",
".",
"rlx",
"==",
"1.0",
":",
"self",
".",
"AX1",
"=",
"self",
".",
"AX1nr",
"else",
":",
"alpha",
"=",
"self",
".",
"rlx",
"self",
".",
"AX1",
"=",
"alpha",
"*",
"self",
".",
"AX1nr",
"+",
"(",
"1",
"-",
"alpha",
")",
"*",
"(",
"self",
".",
"Y1",
"+",
"self",
".",
"S",
")"
] | 45.777778 | 0.002378 |
def csrf_token():
"""
Get csrf token or create new one
"""
from uliweb import request, settings
from uliweb.utils.common import safe_str
v = {}
token_name = settings.CSRF.cookie_token_name
if not request.session.deleted and request.session.get(token_name):
v = request.session[token_name]
if time.time() >= v['created_time'] + v['expiry_time']:
v = {}
else:
v['created_time'] = time.time()
if not v:
token = request.cookies.get(token_name)
if not token:
token = uuid.uuid4().get_hex()
v = {'token':token, 'expiry_time':settings.CSRF.timeout, 'created_time':time.time()}
if not request.session.deleted:
request.session[token_name] = v
return safe_str(v['token']) | [
"def",
"csrf_token",
"(",
")",
":",
"from",
"uliweb",
"import",
"request",
",",
"settings",
"from",
"uliweb",
".",
"utils",
".",
"common",
"import",
"safe_str",
"v",
"=",
"{",
"}",
"token_name",
"=",
"settings",
".",
"CSRF",
".",
"cookie_token_name",
"if",
"not",
"request",
".",
"session",
".",
"deleted",
"and",
"request",
".",
"session",
".",
"get",
"(",
"token_name",
")",
":",
"v",
"=",
"request",
".",
"session",
"[",
"token_name",
"]",
"if",
"time",
".",
"time",
"(",
")",
">=",
"v",
"[",
"'created_time'",
"]",
"+",
"v",
"[",
"'expiry_time'",
"]",
":",
"v",
"=",
"{",
"}",
"else",
":",
"v",
"[",
"'created_time'",
"]",
"=",
"time",
".",
"time",
"(",
")",
"if",
"not",
"v",
":",
"token",
"=",
"request",
".",
"cookies",
".",
"get",
"(",
"token_name",
")",
"if",
"not",
"token",
":",
"token",
"=",
"uuid",
".",
"uuid4",
"(",
")",
".",
"get_hex",
"(",
")",
"v",
"=",
"{",
"'token'",
":",
"token",
",",
"'expiry_time'",
":",
"settings",
".",
"CSRF",
".",
"timeout",
",",
"'created_time'",
":",
"time",
".",
"time",
"(",
")",
"}",
"if",
"not",
"request",
".",
"session",
".",
"deleted",
":",
"request",
".",
"session",
"[",
"token_name",
"]",
"=",
"v",
"return",
"safe_str",
"(",
"v",
"[",
"'token'",
"]",
")"
] | 31.52 | 0.008621 |
def _position_and_velocity_TEME_km(self, t):
"""Return the raw true equator mean equinox (TEME) vectors from SGP4.
Returns a tuple of NumPy arrays ``([x y z], [xdot ydot zdot])``
expressed in kilometers and kilometers per second. Note that we
assume the TLE epoch to be a UTC date, per AIAA 2006-6753.
"""
sat = self.model
minutes_past_epoch = (t._utc_float() - sat.jdsatepoch) * 1440.0
if getattr(minutes_past_epoch, 'shape', None):
position = []
velocity = []
error = []
for m in minutes_past_epoch:
p, v = sgp4(sat, m)
position.append(p)
velocity.append(v)
error.append(sat.error_message)
return array(position).T, array(velocity).T, error
else:
position, velocity = sgp4(sat, minutes_past_epoch)
return array(position), array(velocity), sat.error_message | [
"def",
"_position_and_velocity_TEME_km",
"(",
"self",
",",
"t",
")",
":",
"sat",
"=",
"self",
".",
"model",
"minutes_past_epoch",
"=",
"(",
"t",
".",
"_utc_float",
"(",
")",
"-",
"sat",
".",
"jdsatepoch",
")",
"*",
"1440.0",
"if",
"getattr",
"(",
"minutes_past_epoch",
",",
"'shape'",
",",
"None",
")",
":",
"position",
"=",
"[",
"]",
"velocity",
"=",
"[",
"]",
"error",
"=",
"[",
"]",
"for",
"m",
"in",
"minutes_past_epoch",
":",
"p",
",",
"v",
"=",
"sgp4",
"(",
"sat",
",",
"m",
")",
"position",
".",
"append",
"(",
"p",
")",
"velocity",
".",
"append",
"(",
"v",
")",
"error",
".",
"append",
"(",
"sat",
".",
"error_message",
")",
"return",
"array",
"(",
"position",
")",
".",
"T",
",",
"array",
"(",
"velocity",
")",
".",
"T",
",",
"error",
"else",
":",
"position",
",",
"velocity",
"=",
"sgp4",
"(",
"sat",
",",
"minutes_past_epoch",
")",
"return",
"array",
"(",
"position",
")",
",",
"array",
"(",
"velocity",
")",
",",
"sat",
".",
"error_message"
] | 41.695652 | 0.002039 |
def get_term(self,term_id):
"""
Returns the term object for the supplied identifier
@type term_id: string
@param term_id: term identifier
"""
if term_id in self.idx:
return Cterm(self.idx[term_id],self.type)
else:
return None | [
"def",
"get_term",
"(",
"self",
",",
"term_id",
")",
":",
"if",
"term_id",
"in",
"self",
".",
"idx",
":",
"return",
"Cterm",
"(",
"self",
".",
"idx",
"[",
"term_id",
"]",
",",
"self",
".",
"type",
")",
"else",
":",
"return",
"None"
] | 29.6 | 0.013115 |
def wait(self, timeout_s: float = None) -> int:
"""
Wait for up to ``timeout_s`` for the child process to finish.
Args:
timeout_s: maximum time to wait or ``None`` to wait forever
Returns:
process return code; or ``0`` if it wasn't running, or ``1`` if
it managed to exit without a return code
Raises:
subprocess.TimeoutExpired: if the process continues to run
"""
if not self.running:
return 0
retcode = self.process.wait(timeout=timeout_s)
# We won't get further unless the process has stopped.
if retcode is None:
self.error("Subprocess finished, but return code was None")
retcode = 1 # we're promising to return an int
elif retcode == 0:
self.info("Subprocess finished cleanly (return code 0).")
else:
self.error(
"Subprocess finished, but FAILED (return code {}). "
"Logs were: {} (stdout), {} (stderr)".format(
retcode,
self.details.logfile_out,
self.details.logfile_err))
self.running = False
return retcode | [
"def",
"wait",
"(",
"self",
",",
"timeout_s",
":",
"float",
"=",
"None",
")",
"->",
"int",
":",
"if",
"not",
"self",
".",
"running",
":",
"return",
"0",
"retcode",
"=",
"self",
".",
"process",
".",
"wait",
"(",
"timeout",
"=",
"timeout_s",
")",
"# We won't get further unless the process has stopped.",
"if",
"retcode",
"is",
"None",
":",
"self",
".",
"error",
"(",
"\"Subprocess finished, but return code was None\"",
")",
"retcode",
"=",
"1",
"# we're promising to return an int",
"elif",
"retcode",
"==",
"0",
":",
"self",
".",
"info",
"(",
"\"Subprocess finished cleanly (return code 0).\"",
")",
"else",
":",
"self",
".",
"error",
"(",
"\"Subprocess finished, but FAILED (return code {}). \"",
"\"Logs were: {} (stdout), {} (stderr)\"",
".",
"format",
"(",
"retcode",
",",
"self",
".",
"details",
".",
"logfile_out",
",",
"self",
".",
"details",
".",
"logfile_err",
")",
")",
"self",
".",
"running",
"=",
"False",
"return",
"retcode"
] | 36.363636 | 0.001623 |
def _decode_filename_to_unicode(f):
'''Get bytestring filename and return unicode.
First, try to decode from default file system encoding
If that fails, use ``chardet`` module to guess encoding.
As a last resort, try to decode as utf-8.
If the argument already is unicode, return as is'''
log.debug('_decode_filename_to_unicode(%s)', repr(f))
if isinstance(f, unicode):
return f
try:
return f.decode(sys.getfilesystemencoding())
except UnicodeDecodeError:
charguess = chardet.detect(f)
log.debug("chardet filename: %r -> %r", f, charguess)
if charguess['encoding'] is not None:
try:
return f.decode(charguess['encoding'])
except UnicodeDecodeError:
pass
log.warning('Cannot understand decoding of this filename: %r (guessed %r, but was wrong)',
f, charguess)
log.debug('Trying utf-8 to decode %r', f)
try:
return f.decode('utf-8')
except UnicodeDecodeError:
pass
log.debug('Trying latin1 to decode %r', f)
try:
return f.decode('latin1')
except UnicodeDecodeError:
log.warning('Exhausted all options. Decoding %r to safe ascii', f)
return f.decode('ascii', errors='ignore') | [
"def",
"_decode_filename_to_unicode",
"(",
"f",
")",
":",
"log",
".",
"debug",
"(",
"'_decode_filename_to_unicode(%s)'",
",",
"repr",
"(",
"f",
")",
")",
"if",
"isinstance",
"(",
"f",
",",
"unicode",
")",
":",
"return",
"f",
"try",
":",
"return",
"f",
".",
"decode",
"(",
"sys",
".",
"getfilesystemencoding",
"(",
")",
")",
"except",
"UnicodeDecodeError",
":",
"charguess",
"=",
"chardet",
".",
"detect",
"(",
"f",
")",
"log",
".",
"debug",
"(",
"\"chardet filename: %r -> %r\"",
",",
"f",
",",
"charguess",
")",
"if",
"charguess",
"[",
"'encoding'",
"]",
"is",
"not",
"None",
":",
"try",
":",
"return",
"f",
".",
"decode",
"(",
"charguess",
"[",
"'encoding'",
"]",
")",
"except",
"UnicodeDecodeError",
":",
"pass",
"log",
".",
"warning",
"(",
"'Cannot understand decoding of this filename: %r (guessed %r, but was wrong)'",
",",
"f",
",",
"charguess",
")",
"log",
".",
"debug",
"(",
"'Trying utf-8 to decode %r'",
",",
"f",
")",
"try",
":",
"return",
"f",
".",
"decode",
"(",
"'utf-8'",
")",
"except",
"UnicodeDecodeError",
":",
"pass",
"log",
".",
"debug",
"(",
"'Trying latin1 to decode %r'",
",",
"f",
")",
"try",
":",
"return",
"f",
".",
"decode",
"(",
"'latin1'",
")",
"except",
"UnicodeDecodeError",
":",
"log",
".",
"warning",
"(",
"'Exhausted all options. Decoding %r to safe ascii'",
",",
"f",
")",
"return",
"f",
".",
"decode",
"(",
"'ascii'",
",",
"errors",
"=",
"'ignore'",
")"
] | 38.5 | 0.00149 |
def find_by_id(self, repoid):
"""
Returns the repo with the specified <repoid>
"""
for row in self.jsondata:
if repoid == row["repoid"]:
return self._infofromdict(row) | [
"def",
"find_by_id",
"(",
"self",
",",
"repoid",
")",
":",
"for",
"row",
"in",
"self",
".",
"jsondata",
":",
"if",
"repoid",
"==",
"row",
"[",
"\"repoid\"",
"]",
":",
"return",
"self",
".",
"_infofromdict",
"(",
"row",
")"
] | 31.571429 | 0.008811 |
def collect(path, no_input):
'''Collect static files'''
if exists(path):
msg = '"%s" directory already exists and will be erased'
log.warning(msg, path)
if not no_input:
click.confirm('Are you sure?', abort=True)
log.info('Deleting static directory "%s"', path)
shutil.rmtree(path)
prefix = current_app.static_url_path or current_app.static_folder
if prefix.startswith('/'):
prefix = prefix[1:]
destination = join(path, prefix)
log.info('Copying application assets into "%s"', destination)
shutil.copytree(current_app.static_folder, destination)
for blueprint in current_app.blueprints.values():
if blueprint.has_static_folder:
prefix = current_app.static_prefixes.get(blueprint.name)
prefix = prefix or blueprint.url_prefix or ''
prefix += blueprint.static_url_path or ''
if prefix.startswith('/'):
prefix = prefix[1:]
log.info('Copying %s assets to %s', blueprint.name, prefix)
destination = join(path, prefix)
copy_recursive(blueprint.static_folder, destination)
for prefix, source in current_app.config['STATIC_DIRS']:
log.info('Copying %s to %s', source, prefix)
destination = join(path, prefix)
copy_recursive(source, destination)
log.info('Done') | [
"def",
"collect",
"(",
"path",
",",
"no_input",
")",
":",
"if",
"exists",
"(",
"path",
")",
":",
"msg",
"=",
"'\"%s\" directory already exists and will be erased'",
"log",
".",
"warning",
"(",
"msg",
",",
"path",
")",
"if",
"not",
"no_input",
":",
"click",
".",
"confirm",
"(",
"'Are you sure?'",
",",
"abort",
"=",
"True",
")",
"log",
".",
"info",
"(",
"'Deleting static directory \"%s\"'",
",",
"path",
")",
"shutil",
".",
"rmtree",
"(",
"path",
")",
"prefix",
"=",
"current_app",
".",
"static_url_path",
"or",
"current_app",
".",
"static_folder",
"if",
"prefix",
".",
"startswith",
"(",
"'/'",
")",
":",
"prefix",
"=",
"prefix",
"[",
"1",
":",
"]",
"destination",
"=",
"join",
"(",
"path",
",",
"prefix",
")",
"log",
".",
"info",
"(",
"'Copying application assets into \"%s\"'",
",",
"destination",
")",
"shutil",
".",
"copytree",
"(",
"current_app",
".",
"static_folder",
",",
"destination",
")",
"for",
"blueprint",
"in",
"current_app",
".",
"blueprints",
".",
"values",
"(",
")",
":",
"if",
"blueprint",
".",
"has_static_folder",
":",
"prefix",
"=",
"current_app",
".",
"static_prefixes",
".",
"get",
"(",
"blueprint",
".",
"name",
")",
"prefix",
"=",
"prefix",
"or",
"blueprint",
".",
"url_prefix",
"or",
"''",
"prefix",
"+=",
"blueprint",
".",
"static_url_path",
"or",
"''",
"if",
"prefix",
".",
"startswith",
"(",
"'/'",
")",
":",
"prefix",
"=",
"prefix",
"[",
"1",
":",
"]",
"log",
".",
"info",
"(",
"'Copying %s assets to %s'",
",",
"blueprint",
".",
"name",
",",
"prefix",
")",
"destination",
"=",
"join",
"(",
"path",
",",
"prefix",
")",
"copy_recursive",
"(",
"blueprint",
".",
"static_folder",
",",
"destination",
")",
"for",
"prefix",
",",
"source",
"in",
"current_app",
".",
"config",
"[",
"'STATIC_DIRS'",
"]",
":",
"log",
".",
"info",
"(",
"'Copying %s to %s'",
",",
"source",
",",
"prefix",
")",
"destination",
"=",
"join",
"(",
"path",
",",
"prefix",
")",
"copy_recursive",
"(",
"source",
",",
"destination",
")",
"log",
".",
"info",
"(",
"'Done'",
")"
] | 37.666667 | 0.000719 |
def alignment_correcter(self, alignment_file_list, output_file_name,
filter_minimum=None):
'''
Remove lower case insertions in alignment outputs from HMM align. Give
a list of alignments, and an output file name, and each alignment will
be corrected, and written to a single file, ready to be placed together
using pplacer.
Parameters
----------
alignment_file_list : array
List of strings, each the path to different alignments from the
inputs provided to GraftM
output_file_name : str
The path and filename of the output file desired.
filter_minimum : int
minimum number of positions that must be aligned for each sequence
Returns
-------
True or False, depending if reads were written to file
'''
corrected_sequences = {}
for alignment_file in alignment_file_list:
insert_list = [] # Define list containing inserted positions to be removed (lower case characters)
sequence_list = list(SeqIO.parse(open(alignment_file, 'r'), 'fasta'))
for sequence in sequence_list: # For each sequence in the alignment
for idx, nt in enumerate(list(sequence.seq)): # For each nucleotide in the sequence
if nt.islower(): # Check for lower case character
insert_list.append(idx) # Add to the insert list if it is
insert_list = list(OrderedDict.fromkeys(sorted(insert_list, reverse=True))) # Reverse the list and remove duplicate positions
for sequence in sequence_list: # For each sequence in the alignment
new_seq = list(sequence.seq) # Define a list of sequences to be iterable list for writing
for position in insert_list: # For each position in the removal list
del new_seq[position] # Delete that inserted position in every sequence
corrected_sequences['>' + sequence.id + '\n'] = (''.join(new_seq) + '\n').replace('~', '-')
pre_filter_count=len(corrected_sequences)
if filter_minimum:
# Use '>' not '>=' here because the sequence is on a single line,
# but also includes a newline character at the end of the sequence
corrected_sequences={key:item for key, item in corrected_sequences.iteritems() if len(item.replace('-', '')) > filter_minimum}
post_filter_count=len(corrected_sequences)
logging.info("Filtered %i short sequences from the alignment" % \
(pre_filter_count-post_filter_count)
)
logging.info("%i sequences remaining" % post_filter_count)
if len(corrected_sequences) >= 1:
with open(output_file_name, 'w') as output_file: # Create an open file to write the new sequences to
for fasta_id, fasta_seq in corrected_sequences.iteritems():
output_file.write(fasta_id)
output_file.write(fasta_seq)
return True
else:
return False | [
"def",
"alignment_correcter",
"(",
"self",
",",
"alignment_file_list",
",",
"output_file_name",
",",
"filter_minimum",
"=",
"None",
")",
":",
"corrected_sequences",
"=",
"{",
"}",
"for",
"alignment_file",
"in",
"alignment_file_list",
":",
"insert_list",
"=",
"[",
"]",
"# Define list containing inserted positions to be removed (lower case characters)",
"sequence_list",
"=",
"list",
"(",
"SeqIO",
".",
"parse",
"(",
"open",
"(",
"alignment_file",
",",
"'r'",
")",
",",
"'fasta'",
")",
")",
"for",
"sequence",
"in",
"sequence_list",
":",
"# For each sequence in the alignment",
"for",
"idx",
",",
"nt",
"in",
"enumerate",
"(",
"list",
"(",
"sequence",
".",
"seq",
")",
")",
":",
"# For each nucleotide in the sequence",
"if",
"nt",
".",
"islower",
"(",
")",
":",
"# Check for lower case character",
"insert_list",
".",
"append",
"(",
"idx",
")",
"# Add to the insert list if it is",
"insert_list",
"=",
"list",
"(",
"OrderedDict",
".",
"fromkeys",
"(",
"sorted",
"(",
"insert_list",
",",
"reverse",
"=",
"True",
")",
")",
")",
"# Reverse the list and remove duplicate positions",
"for",
"sequence",
"in",
"sequence_list",
":",
"# For each sequence in the alignment",
"new_seq",
"=",
"list",
"(",
"sequence",
".",
"seq",
")",
"# Define a list of sequences to be iterable list for writing",
"for",
"position",
"in",
"insert_list",
":",
"# For each position in the removal list",
"del",
"new_seq",
"[",
"position",
"]",
"# Delete that inserted position in every sequence",
"corrected_sequences",
"[",
"'>'",
"+",
"sequence",
".",
"id",
"+",
"'\\n'",
"]",
"=",
"(",
"''",
".",
"join",
"(",
"new_seq",
")",
"+",
"'\\n'",
")",
".",
"replace",
"(",
"'~'",
",",
"'-'",
")",
"pre_filter_count",
"=",
"len",
"(",
"corrected_sequences",
")",
"if",
"filter_minimum",
":",
"# Use '>' not '>=' here because the sequence is on a single line, ",
"# but also includes a newline character at the end of the sequence",
"corrected_sequences",
"=",
"{",
"key",
":",
"item",
"for",
"key",
",",
"item",
"in",
"corrected_sequences",
".",
"iteritems",
"(",
")",
"if",
"len",
"(",
"item",
".",
"replace",
"(",
"'-'",
",",
"''",
")",
")",
">",
"filter_minimum",
"}",
"post_filter_count",
"=",
"len",
"(",
"corrected_sequences",
")",
"logging",
".",
"info",
"(",
"\"Filtered %i short sequences from the alignment\"",
"%",
"(",
"pre_filter_count",
"-",
"post_filter_count",
")",
")",
"logging",
".",
"info",
"(",
"\"%i sequences remaining\"",
"%",
"post_filter_count",
")",
"if",
"len",
"(",
"corrected_sequences",
")",
">=",
"1",
":",
"with",
"open",
"(",
"output_file_name",
",",
"'w'",
")",
"as",
"output_file",
":",
"# Create an open file to write the new sequences to",
"for",
"fasta_id",
",",
"fasta_seq",
"in",
"corrected_sequences",
".",
"iteritems",
"(",
")",
":",
"output_file",
".",
"write",
"(",
"fasta_id",
")",
"output_file",
".",
"write",
"(",
"fasta_seq",
")",
"return",
"True",
"else",
":",
"return",
"False"
] | 53.169492 | 0.008764 |
def sequence_to_dt64ns(data, dtype=None, copy=False,
tz=None,
dayfirst=False, yearfirst=False, ambiguous='raise',
int_as_wall_time=False):
"""
Parameters
----------
data : list-like
dtype : dtype, str, or None, default None
copy : bool, default False
tz : tzinfo, str, or None, default None
dayfirst : bool, default False
yearfirst : bool, default False
ambiguous : str, bool, or arraylike, default 'raise'
See pandas._libs.tslibs.conversion.tz_localize_to_utc
int_as_wall_time : bool, default False
Whether to treat ints as wall time in specified timezone, or as
nanosecond-precision UNIX epoch (wall time in UTC).
This is used in DatetimeIndex.__init__ to deprecate the wall-time
behaviour.
..versionadded:: 0.24.0
Returns
-------
result : numpy.ndarray
The sequence converted to a numpy array with dtype ``datetime64[ns]``.
tz : tzinfo or None
Either the user-provided tzinfo or one inferred from the data.
inferred_freq : Tick or None
The inferred frequency of the sequence.
Raises
------
TypeError : PeriodDType data is passed
"""
inferred_freq = None
dtype = _validate_dt64_dtype(dtype)
if not hasattr(data, "dtype"):
# e.g. list, tuple
if np.ndim(data) == 0:
# i.e. generator
data = list(data)
data = np.asarray(data)
copy = False
elif isinstance(data, ABCSeries):
data = data._values
if isinstance(data, ABCPandasArray):
data = data.to_numpy()
if hasattr(data, "freq"):
# i.e. DatetimeArray/Index
inferred_freq = data.freq
# if dtype has an embedded tz, capture it
tz = validate_tz_from_dtype(dtype, tz)
if isinstance(data, ABCIndexClass):
data = data._data
# By this point we are assured to have either a numpy array or Index
data, copy = maybe_convert_dtype(data, copy)
if is_object_dtype(data) or is_string_dtype(data):
# TODO: We do not have tests specific to string-dtypes,
# also complex or categorical or other extension
copy = False
if lib.infer_dtype(data, skipna=False) == 'integer':
data = data.astype(np.int64)
else:
# data comes back here as either i8 to denote UTC timestamps
# or M8[ns] to denote wall times
data, inferred_tz = objects_to_datetime64ns(
data, dayfirst=dayfirst, yearfirst=yearfirst)
tz = maybe_infer_tz(tz, inferred_tz)
# When a sequence of timestamp objects is passed, we always
# want to treat the (now i8-valued) data as UTC timestamps,
# not wall times.
int_as_wall_time = False
# `data` may have originally been a Categorical[datetime64[ns, tz]],
# so we need to handle these types.
if is_datetime64tz_dtype(data):
# DatetimeArray -> ndarray
tz = maybe_infer_tz(tz, data.tz)
result = data._data
elif is_datetime64_dtype(data):
# tz-naive DatetimeArray or ndarray[datetime64]
data = getattr(data, "_data", data)
if data.dtype != _NS_DTYPE:
data = conversion.ensure_datetime64ns(data)
if tz is not None:
# Convert tz-naive to UTC
tz = timezones.maybe_get_tz(tz)
data = conversion.tz_localize_to_utc(data.view('i8'), tz,
ambiguous=ambiguous)
data = data.view(_NS_DTYPE)
assert data.dtype == _NS_DTYPE, data.dtype
result = data
else:
# must be integer dtype otherwise
# assume this data are epoch timestamps
if tz:
tz = timezones.maybe_get_tz(tz)
if data.dtype != _INT64_DTYPE:
data = data.astype(np.int64, copy=False)
if int_as_wall_time and tz is not None and not timezones.is_utc(tz):
warnings.warn(_i8_message, FutureWarning, stacklevel=4)
data = conversion.tz_localize_to_utc(data.view('i8'), tz,
ambiguous=ambiguous)
data = data.view(_NS_DTYPE)
result = data.view(_NS_DTYPE)
if copy:
# TODO: should this be deepcopy?
result = result.copy()
assert isinstance(result, np.ndarray), type(result)
assert result.dtype == 'M8[ns]', result.dtype
# We have to call this again after possibly inferring a tz above
validate_tz_from_dtype(dtype, tz)
return result, tz, inferred_freq | [
"def",
"sequence_to_dt64ns",
"(",
"data",
",",
"dtype",
"=",
"None",
",",
"copy",
"=",
"False",
",",
"tz",
"=",
"None",
",",
"dayfirst",
"=",
"False",
",",
"yearfirst",
"=",
"False",
",",
"ambiguous",
"=",
"'raise'",
",",
"int_as_wall_time",
"=",
"False",
")",
":",
"inferred_freq",
"=",
"None",
"dtype",
"=",
"_validate_dt64_dtype",
"(",
"dtype",
")",
"if",
"not",
"hasattr",
"(",
"data",
",",
"\"dtype\"",
")",
":",
"# e.g. list, tuple",
"if",
"np",
".",
"ndim",
"(",
"data",
")",
"==",
"0",
":",
"# i.e. generator",
"data",
"=",
"list",
"(",
"data",
")",
"data",
"=",
"np",
".",
"asarray",
"(",
"data",
")",
"copy",
"=",
"False",
"elif",
"isinstance",
"(",
"data",
",",
"ABCSeries",
")",
":",
"data",
"=",
"data",
".",
"_values",
"if",
"isinstance",
"(",
"data",
",",
"ABCPandasArray",
")",
":",
"data",
"=",
"data",
".",
"to_numpy",
"(",
")",
"if",
"hasattr",
"(",
"data",
",",
"\"freq\"",
")",
":",
"# i.e. DatetimeArray/Index",
"inferred_freq",
"=",
"data",
".",
"freq",
"# if dtype has an embedded tz, capture it",
"tz",
"=",
"validate_tz_from_dtype",
"(",
"dtype",
",",
"tz",
")",
"if",
"isinstance",
"(",
"data",
",",
"ABCIndexClass",
")",
":",
"data",
"=",
"data",
".",
"_data",
"# By this point we are assured to have either a numpy array or Index",
"data",
",",
"copy",
"=",
"maybe_convert_dtype",
"(",
"data",
",",
"copy",
")",
"if",
"is_object_dtype",
"(",
"data",
")",
"or",
"is_string_dtype",
"(",
"data",
")",
":",
"# TODO: We do not have tests specific to string-dtypes,",
"# also complex or categorical or other extension",
"copy",
"=",
"False",
"if",
"lib",
".",
"infer_dtype",
"(",
"data",
",",
"skipna",
"=",
"False",
")",
"==",
"'integer'",
":",
"data",
"=",
"data",
".",
"astype",
"(",
"np",
".",
"int64",
")",
"else",
":",
"# data comes back here as either i8 to denote UTC timestamps",
"# or M8[ns] to denote wall times",
"data",
",",
"inferred_tz",
"=",
"objects_to_datetime64ns",
"(",
"data",
",",
"dayfirst",
"=",
"dayfirst",
",",
"yearfirst",
"=",
"yearfirst",
")",
"tz",
"=",
"maybe_infer_tz",
"(",
"tz",
",",
"inferred_tz",
")",
"# When a sequence of timestamp objects is passed, we always",
"# want to treat the (now i8-valued) data as UTC timestamps,",
"# not wall times.",
"int_as_wall_time",
"=",
"False",
"# `data` may have originally been a Categorical[datetime64[ns, tz]],",
"# so we need to handle these types.",
"if",
"is_datetime64tz_dtype",
"(",
"data",
")",
":",
"# DatetimeArray -> ndarray",
"tz",
"=",
"maybe_infer_tz",
"(",
"tz",
",",
"data",
".",
"tz",
")",
"result",
"=",
"data",
".",
"_data",
"elif",
"is_datetime64_dtype",
"(",
"data",
")",
":",
"# tz-naive DatetimeArray or ndarray[datetime64]",
"data",
"=",
"getattr",
"(",
"data",
",",
"\"_data\"",
",",
"data",
")",
"if",
"data",
".",
"dtype",
"!=",
"_NS_DTYPE",
":",
"data",
"=",
"conversion",
".",
"ensure_datetime64ns",
"(",
"data",
")",
"if",
"tz",
"is",
"not",
"None",
":",
"# Convert tz-naive to UTC",
"tz",
"=",
"timezones",
".",
"maybe_get_tz",
"(",
"tz",
")",
"data",
"=",
"conversion",
".",
"tz_localize_to_utc",
"(",
"data",
".",
"view",
"(",
"'i8'",
")",
",",
"tz",
",",
"ambiguous",
"=",
"ambiguous",
")",
"data",
"=",
"data",
".",
"view",
"(",
"_NS_DTYPE",
")",
"assert",
"data",
".",
"dtype",
"==",
"_NS_DTYPE",
",",
"data",
".",
"dtype",
"result",
"=",
"data",
"else",
":",
"# must be integer dtype otherwise",
"# assume this data are epoch timestamps",
"if",
"tz",
":",
"tz",
"=",
"timezones",
".",
"maybe_get_tz",
"(",
"tz",
")",
"if",
"data",
".",
"dtype",
"!=",
"_INT64_DTYPE",
":",
"data",
"=",
"data",
".",
"astype",
"(",
"np",
".",
"int64",
",",
"copy",
"=",
"False",
")",
"if",
"int_as_wall_time",
"and",
"tz",
"is",
"not",
"None",
"and",
"not",
"timezones",
".",
"is_utc",
"(",
"tz",
")",
":",
"warnings",
".",
"warn",
"(",
"_i8_message",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"4",
")",
"data",
"=",
"conversion",
".",
"tz_localize_to_utc",
"(",
"data",
".",
"view",
"(",
"'i8'",
")",
",",
"tz",
",",
"ambiguous",
"=",
"ambiguous",
")",
"data",
"=",
"data",
".",
"view",
"(",
"_NS_DTYPE",
")",
"result",
"=",
"data",
".",
"view",
"(",
"_NS_DTYPE",
")",
"if",
"copy",
":",
"# TODO: should this be deepcopy?",
"result",
"=",
"result",
".",
"copy",
"(",
")",
"assert",
"isinstance",
"(",
"result",
",",
"np",
".",
"ndarray",
")",
",",
"type",
"(",
"result",
")",
"assert",
"result",
".",
"dtype",
"==",
"'M8[ns]'",
",",
"result",
".",
"dtype",
"# We have to call this again after possibly inferring a tz above",
"validate_tz_from_dtype",
"(",
"dtype",
",",
"tz",
")",
"return",
"result",
",",
"tz",
",",
"inferred_freq"
] | 34.348485 | 0.000214 |
def remove_too_short(utterances: List[Utterance],
_winlen=25, winstep=10) -> List[Utterance]:
""" Removes utterances that will probably have issues with CTC because of
the number of frames being less than the number of tokens in the
transcription. Assuming char tokenization to minimize false negatives.
"""
def is_too_short(utterance: Utterance) -> bool:
charlen = len(utterance.text)
if (duration(utterance) / winstep) < charlen:
return True
else:
return False
return [utter for utter in utterances if not is_too_short(utter)] | [
"def",
"remove_too_short",
"(",
"utterances",
":",
"List",
"[",
"Utterance",
"]",
",",
"_winlen",
"=",
"25",
",",
"winstep",
"=",
"10",
")",
"->",
"List",
"[",
"Utterance",
"]",
":",
"def",
"is_too_short",
"(",
"utterance",
":",
"Utterance",
")",
"->",
"bool",
":",
"charlen",
"=",
"len",
"(",
"utterance",
".",
"text",
")",
"if",
"(",
"duration",
"(",
"utterance",
")",
"/",
"winstep",
")",
"<",
"charlen",
":",
"return",
"True",
"else",
":",
"return",
"False",
"return",
"[",
"utter",
"for",
"utter",
"in",
"utterances",
"if",
"not",
"is_too_short",
"(",
"utter",
")",
"]"
] | 43.5 | 0.001608 |
def port_pair(self):
"""The port and it's transport as a pair"""
if self.transport is NotSpecified:
return (self.port, "tcp")
else:
return (self.port, self.transport) | [
"def",
"port_pair",
"(",
"self",
")",
":",
"if",
"self",
".",
"transport",
"is",
"NotSpecified",
":",
"return",
"(",
"self",
".",
"port",
",",
"\"tcp\"",
")",
"else",
":",
"return",
"(",
"self",
".",
"port",
",",
"self",
".",
"transport",
")"
] | 34.833333 | 0.009346 |
def find_sanitizer_from_module(module_name, function_name):
"""
Attempts to find sanitizer function from given module. If the module
cannot be imported, or function with given name does not exist in it,
nothing will be returned by this method. Otherwise the found sanitizer
function will be returned.
:param module_name: Name of the module to import the function from.
:type module_name: str
:param function_name: Name of the function to look for inside the
module.
:type function_name: str
:return: Sanitizer function found from the module, if it can be
imported and it indeed contains function with the given name.
Otherwise None will be returned instead.
:rtype: callback|None
"""
try:
module = importlib.import_module(module_name)
except ImportError:
return None
# Look for the function inside the module. At this point it could be
# pretty much anything.
callback = getattr(module, function_name, None)
# Function does not exist in this module? Give up.
if callback is None:
return None
# It's actually callable function? Return it.
if callable(callback):
return callback
# Sanitizer seems to be something else than a function. Throw an
# exception to report such problem.
raise ConfigurationError("'%s' in '%s' is %s instead of function" % (
function_name,
module_name,
type(callback),
)) | [
"def",
"find_sanitizer_from_module",
"(",
"module_name",
",",
"function_name",
")",
":",
"try",
":",
"module",
"=",
"importlib",
".",
"import_module",
"(",
"module_name",
")",
"except",
"ImportError",
":",
"return",
"None",
"# Look for the function inside the module. At this point it could be",
"# pretty much anything.",
"callback",
"=",
"getattr",
"(",
"module",
",",
"function_name",
",",
"None",
")",
"# Function does not exist in this module? Give up.",
"if",
"callback",
"is",
"None",
":",
"return",
"None",
"# It's actually callable function? Return it.",
"if",
"callable",
"(",
"callback",
")",
":",
"return",
"callback",
"# Sanitizer seems to be something else than a function. Throw an",
"# exception to report such problem.",
"raise",
"ConfigurationError",
"(",
"\"'%s' in '%s' is %s instead of function\"",
"%",
"(",
"function_name",
",",
"module_name",
",",
"type",
"(",
"callback",
")",
",",
")",
")"
] | 37.372093 | 0.001213 |
def bar(h1: Histogram1D, **kwargs) -> dict:
"""Bar plot of 1D histogram.
Parameters
----------
lw : float
Width of the line between bars
alpha : float
Opacity of the bars
hover_alpha: float
Opacity of the bars when hover on
"""
# TODO: Enable collections
# TODO: Enable legend
vega = _create_figure(kwargs)
_add_title(h1, vega, kwargs)
_create_scales(h1, vega, kwargs)
_create_axes(h1, vega, kwargs)
data = get_data(h1, kwargs.pop("density", None), kwargs.pop("cumulative", None)).tolist()
lefts = h1.bin_left_edges.astype(float).tolist()
rights = h1.bin_right_edges.astype(float).tolist()
vega["data"] = [{
"name": "table",
"values": [{
"x": lefts[i],
"x2": rights[i],
"y": data[i],
}
for i in range(h1.bin_count)
]
}]
alpha = kwargs.pop("alpha", 1)
# hover_alpha = kwargs.pop("hover_alpha", alpha)
vega["marks"] = [
{
"type": "rect",
"from": {"data": "table"},
"encode": {
"enter": {
"x": {"scale": "xscale", "field": "x"},
"x2": {"scale": "xscale", "field": "x2"},
"y": {"scale": "yscale", "value": 0},
"y2": {"scale": "yscale", "field": "y"},
# "stroke": {"scale": "color", "field": "c"},
"strokeWidth": {"value": kwargs.pop("lw", 2)}
},
"update": {
"fillOpacity": [
# {"test": "datum === tooltip", "value": hover_alpha},
{"value": alpha}
]
},
}
}
]
_create_tooltips(h1, vega, kwargs)
return vega | [
"def",
"bar",
"(",
"h1",
":",
"Histogram1D",
",",
"*",
"*",
"kwargs",
")",
"->",
"dict",
":",
"# TODO: Enable collections",
"# TODO: Enable legend",
"vega",
"=",
"_create_figure",
"(",
"kwargs",
")",
"_add_title",
"(",
"h1",
",",
"vega",
",",
"kwargs",
")",
"_create_scales",
"(",
"h1",
",",
"vega",
",",
"kwargs",
")",
"_create_axes",
"(",
"h1",
",",
"vega",
",",
"kwargs",
")",
"data",
"=",
"get_data",
"(",
"h1",
",",
"kwargs",
".",
"pop",
"(",
"\"density\"",
",",
"None",
")",
",",
"kwargs",
".",
"pop",
"(",
"\"cumulative\"",
",",
"None",
")",
")",
".",
"tolist",
"(",
")",
"lefts",
"=",
"h1",
".",
"bin_left_edges",
".",
"astype",
"(",
"float",
")",
".",
"tolist",
"(",
")",
"rights",
"=",
"h1",
".",
"bin_right_edges",
".",
"astype",
"(",
"float",
")",
".",
"tolist",
"(",
")",
"vega",
"[",
"\"data\"",
"]",
"=",
"[",
"{",
"\"name\"",
":",
"\"table\"",
",",
"\"values\"",
":",
"[",
"{",
"\"x\"",
":",
"lefts",
"[",
"i",
"]",
",",
"\"x2\"",
":",
"rights",
"[",
"i",
"]",
",",
"\"y\"",
":",
"data",
"[",
"i",
"]",
",",
"}",
"for",
"i",
"in",
"range",
"(",
"h1",
".",
"bin_count",
")",
"]",
"}",
"]",
"alpha",
"=",
"kwargs",
".",
"pop",
"(",
"\"alpha\"",
",",
"1",
")",
"# hover_alpha = kwargs.pop(\"hover_alpha\", alpha)",
"vega",
"[",
"\"marks\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"rect\"",
",",
"\"from\"",
":",
"{",
"\"data\"",
":",
"\"table\"",
"}",
",",
"\"encode\"",
":",
"{",
"\"enter\"",
":",
"{",
"\"x\"",
":",
"{",
"\"scale\"",
":",
"\"xscale\"",
",",
"\"field\"",
":",
"\"x\"",
"}",
",",
"\"x2\"",
":",
"{",
"\"scale\"",
":",
"\"xscale\"",
",",
"\"field\"",
":",
"\"x2\"",
"}",
",",
"\"y\"",
":",
"{",
"\"scale\"",
":",
"\"yscale\"",
",",
"\"value\"",
":",
"0",
"}",
",",
"\"y2\"",
":",
"{",
"\"scale\"",
":",
"\"yscale\"",
",",
"\"field\"",
":",
"\"y\"",
"}",
",",
"# \"stroke\": {\"scale\": \"color\", \"field\": \"c\"},",
"\"strokeWidth\"",
":",
"{",
"\"value\"",
":",
"kwargs",
".",
"pop",
"(",
"\"lw\"",
",",
"2",
")",
"}",
"}",
",",
"\"update\"",
":",
"{",
"\"fillOpacity\"",
":",
"[",
"# {\"test\": \"datum === tooltip\", \"value\": hover_alpha},",
"{",
"\"value\"",
":",
"alpha",
"}",
"]",
"}",
",",
"}",
"}",
"]",
"_create_tooltips",
"(",
"h1",
",",
"vega",
",",
"kwargs",
")",
"return",
"vega"
] | 28.52381 | 0.001076 |
def print_upper_triangular_matrix_as_complete(matrix):
"""Prints a CVRP data dict upper triangular matrix as a normal matrix
Doesn't print headers.
Arguments
---------
matrix : dict
Description
"""
for i in sorted(matrix.keys()):
for j in sorted(matrix.keys()):
a, b = i, j
if a > b:
a, b = b, a
print(matrix[a][b], end=' ')
print() | [
"def",
"print_upper_triangular_matrix_as_complete",
"(",
"matrix",
")",
":",
"for",
"i",
"in",
"sorted",
"(",
"matrix",
".",
"keys",
"(",
")",
")",
":",
"for",
"j",
"in",
"sorted",
"(",
"matrix",
".",
"keys",
"(",
")",
")",
":",
"a",
",",
"b",
"=",
"i",
",",
"j",
"if",
"a",
">",
"b",
":",
"a",
",",
"b",
"=",
"b",
",",
"a",
"print",
"(",
"matrix",
"[",
"a",
"]",
"[",
"b",
"]",
",",
"end",
"=",
"' '",
")",
"print",
"(",
")"
] | 21.9 | 0.008753 |
def reshape_like_all_dims(a, b):
"""Reshapes a to match the shape of b."""
ret = tf.reshape(a, tf.shape(b))
if not tf.executing_eagerly():
ret.set_shape(b.get_shape())
return ret | [
"def",
"reshape_like_all_dims",
"(",
"a",
",",
"b",
")",
":",
"ret",
"=",
"tf",
".",
"reshape",
"(",
"a",
",",
"tf",
".",
"shape",
"(",
"b",
")",
")",
"if",
"not",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"ret",
".",
"set_shape",
"(",
"b",
".",
"get_shape",
"(",
")",
")",
"return",
"ret"
] | 30.833333 | 0.026316 |
def write_tree_newick(self, filename, hide_rooted_prefix=False):
'''Write this ``Tree`` to a Newick file
Args:
``filename`` (``str``): Path to desired output file (plain-text or gzipped)
'''
if not isinstance(filename, str):
raise TypeError("filename must be a str")
treestr = self.newick()
if hide_rooted_prefix:
if treestr.startswith('[&R]'):
treestr = treestr[4:].strip()
else:
warn("Specified hide_rooted_prefix, but tree was not rooted")
if filename.lower().endswith('.gz'): # gzipped file
f = gopen(expanduser(filename),'wb',9); f.write(treestr.encode()); f.close()
else: # plain-text file
f = open(expanduser(filename),'w'); f.write(treestr); f.close() | [
"def",
"write_tree_newick",
"(",
"self",
",",
"filename",
",",
"hide_rooted_prefix",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"filename",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"filename must be a str\"",
")",
"treestr",
"=",
"self",
".",
"newick",
"(",
")",
"if",
"hide_rooted_prefix",
":",
"if",
"treestr",
".",
"startswith",
"(",
"'[&R]'",
")",
":",
"treestr",
"=",
"treestr",
"[",
"4",
":",
"]",
".",
"strip",
"(",
")",
"else",
":",
"warn",
"(",
"\"Specified hide_rooted_prefix, but tree was not rooted\"",
")",
"if",
"filename",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"'.gz'",
")",
":",
"# gzipped file",
"f",
"=",
"gopen",
"(",
"expanduser",
"(",
"filename",
")",
",",
"'wb'",
",",
"9",
")",
"f",
".",
"write",
"(",
"treestr",
".",
"encode",
"(",
")",
")",
"f",
".",
"close",
"(",
")",
"else",
":",
"# plain-text file",
"f",
"=",
"open",
"(",
"expanduser",
"(",
"filename",
")",
",",
"'w'",
")",
"f",
".",
"write",
"(",
"treestr",
")",
"f",
".",
"close",
"(",
")"
] | 45.055556 | 0.0157 |
def values(self, key_type=None):
""" Returns a copy of the dictionary's values.
@param key_type if specified, only values pointed by keys of this type will be returned.
Otherwise list of all values contained in this dictionary will be returned."""
if(key_type is not None):
all_items = {} # in order to preserve keys() type (dict_values for python3)
keys_used = set()
direct_key = str(key_type)
if direct_key in self.__dict__:
for intermediate_key in self.__dict__[direct_key].values():
if not intermediate_key in keys_used:
all_items[intermediate_key] = self.items_dict[intermediate_key]
keys_used.add(intermediate_key)
return all_items.values()
else:
return self.items_dict.values() | [
"def",
"values",
"(",
"self",
",",
"key_type",
"=",
"None",
")",
":",
"if",
"(",
"key_type",
"is",
"not",
"None",
")",
":",
"all_items",
"=",
"{",
"}",
"# in order to preserve keys() type (dict_values for python3) \r",
"keys_used",
"=",
"set",
"(",
")",
"direct_key",
"=",
"str",
"(",
"key_type",
")",
"if",
"direct_key",
"in",
"self",
".",
"__dict__",
":",
"for",
"intermediate_key",
"in",
"self",
".",
"__dict__",
"[",
"direct_key",
"]",
".",
"values",
"(",
")",
":",
"if",
"not",
"intermediate_key",
"in",
"keys_used",
":",
"all_items",
"[",
"intermediate_key",
"]",
"=",
"self",
".",
"items_dict",
"[",
"intermediate_key",
"]",
"keys_used",
".",
"add",
"(",
"intermediate_key",
")",
"return",
"all_items",
".",
"values",
"(",
")",
"else",
":",
"return",
"self",
".",
"items_dict",
".",
"values",
"(",
")"
] | 55.875 | 0.009901 |
def relop_code(self, relop, operands_type):
"""Returns code for relational operator
relop - relational operator
operands_type - int or unsigned
"""
code = self.RELATIONAL_DICT[relop]
offset = 0 if operands_type == SharedData.TYPES.INT else len(SharedData.RELATIONAL_OPERATORS)
return code + offset | [
"def",
"relop_code",
"(",
"self",
",",
"relop",
",",
"operands_type",
")",
":",
"code",
"=",
"self",
".",
"RELATIONAL_DICT",
"[",
"relop",
"]",
"offset",
"=",
"0",
"if",
"operands_type",
"==",
"SharedData",
".",
"TYPES",
".",
"INT",
"else",
"len",
"(",
"SharedData",
".",
"RELATIONAL_OPERATORS",
")",
"return",
"code",
"+",
"offset"
] | 44.875 | 0.008197 |
def copyNodeList(self, node):
"""Do a recursive copy of the node list. """
if node is None: node__o = None
else: node__o = node._o
ret = libxml2mod.xmlDocCopyNodeList(self._o, node__o)
if ret is None:raise treeError('xmlDocCopyNodeList() failed')
__tmp = xmlNode(_obj=ret)
return __tmp | [
"def",
"copyNodeList",
"(",
"self",
",",
"node",
")",
":",
"if",
"node",
"is",
"None",
":",
"node__o",
"=",
"None",
"else",
":",
"node__o",
"=",
"node",
".",
"_o",
"ret",
"=",
"libxml2mod",
".",
"xmlDocCopyNodeList",
"(",
"self",
".",
"_o",
",",
"node__o",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'xmlDocCopyNodeList() failed'",
")",
"__tmp",
"=",
"xmlNode",
"(",
"_obj",
"=",
"ret",
")",
"return",
"__tmp"
] | 41.75 | 0.017595 |
def make_parser(parser_creator=None, **kwargs):
"""Returns a base argument parser for the ray.tune tool.
Args:
parser_creator: A constructor for the parser class.
kwargs: Non-positional args to be passed into the
parser class constructor.
"""
if parser_creator:
parser = parser_creator(**kwargs)
else:
parser = argparse.ArgumentParser(**kwargs)
# Note: keep this in sync with rllib/train.py
parser.add_argument(
"--run",
default=None,
type=str,
help="The algorithm or model to train. This may refer to the name "
"of a built-on algorithm (e.g. RLLib's DQN or PPO), or a "
"user-defined trainable function or class registered in the "
"tune registry.")
parser.add_argument(
"--stop",
default="{}",
type=json.loads,
help="The stopping criteria, specified in JSON. The keys may be any "
"field returned by 'train()' e.g. "
"'{\"time_total_s\": 600, \"training_iteration\": 100000}' to stop "
"after 600 seconds or 100k iterations, whichever is reached first.")
parser.add_argument(
"--config",
default="{}",
type=json.loads,
help="Algorithm-specific configuration (e.g. env, hyperparams), "
"specified in JSON.")
parser.add_argument(
"--resources-per-trial",
default=None,
type=json_to_resources,
help="Override the machine resources to allocate per trial, e.g. "
"'{\"cpu\": 64, \"gpu\": 8}'. Note that GPUs will not be assigned "
"unless you specify them here. For RLlib, you probably want to "
"leave this alone and use RLlib configs to control parallelism.")
parser.add_argument(
"--num-samples",
default=1,
type=int,
help="Number of times to repeat each trial.")
parser.add_argument(
"--local-dir",
default=DEFAULT_RESULTS_DIR,
type=str,
help="Local dir to save training results to. Defaults to '{}'.".format(
DEFAULT_RESULTS_DIR))
parser.add_argument(
"--upload-dir",
default="",
type=str,
help="Optional URI to sync training results to (e.g. s3://bucket).")
parser.add_argument(
"--trial-name-creator",
default=None,
help="Optional creator function for the trial string, used in "
"generating a trial directory.")
parser.add_argument(
"--sync-function",
default=None,
help="Function for syncing the local_dir to upload_dir. If string, "
"then it must be a string template for syncer to run and needs to "
"include replacement fields '{local_dir}' and '{remote_dir}'.")
parser.add_argument(
"--loggers",
default=None,
help="List of logger creators to be used with each Trial. "
"Defaults to ray.tune.logger.DEFAULT_LOGGERS.")
parser.add_argument(
"--checkpoint-freq",
default=0,
type=int,
help="How many training iterations between checkpoints. "
"A value of 0 (default) disables checkpointing.")
parser.add_argument(
"--checkpoint-at-end",
action="store_true",
help="Whether to checkpoint at the end of the experiment. "
"Default is False.")
parser.add_argument(
"--keep-checkpoints-num",
default=None,
type=int,
help="Number of last checkpoints to keep. Others get "
"deleted. Default (None) keeps all checkpoints.")
parser.add_argument(
"--checkpoint-score-attr",
default="training_iteration",
type=str,
help="Specifies by which attribute to rank the best checkpoint. "
"Default is increasing order. If attribute starts with min- it "
"will rank attribute in decreasing order. Example: "
"min-validation_loss")
parser.add_argument(
"--export-formats",
default=None,
help="List of formats that exported at the end of the experiment. "
"Default is None. For RLlib, 'checkpoint' and 'model' are "
"supported for TensorFlow policy graphs.")
parser.add_argument(
"--max-failures",
default=3,
type=int,
help="Try to recover a trial from its last checkpoint at least this "
"many times. Only applies if checkpointing is enabled.")
parser.add_argument(
"--scheduler",
default="FIFO",
type=str,
help="FIFO (default), MedianStopping, AsyncHyperBand, "
"HyperBand, or HyperOpt.")
parser.add_argument(
"--scheduler-config",
default="{}",
type=json.loads,
help="Config options to pass to the scheduler.")
# Note: this currently only makes sense when running a single trial
parser.add_argument(
"--restore",
default=None,
type=str,
help="If specified, restore from this checkpoint.")
return parser | [
"def",
"make_parser",
"(",
"parser_creator",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"parser_creator",
":",
"parser",
"=",
"parser_creator",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"*",
"*",
"kwargs",
")",
"# Note: keep this in sync with rllib/train.py",
"parser",
".",
"add_argument",
"(",
"\"--run\"",
",",
"default",
"=",
"None",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"The algorithm or model to train. This may refer to the name \"",
"\"of a built-on algorithm (e.g. RLLib's DQN or PPO), or a \"",
"\"user-defined trainable function or class registered in the \"",
"\"tune registry.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--stop\"",
",",
"default",
"=",
"\"{}\"",
",",
"type",
"=",
"json",
".",
"loads",
",",
"help",
"=",
"\"The stopping criteria, specified in JSON. The keys may be any \"",
"\"field returned by 'train()' e.g. \"",
"\"'{\\\"time_total_s\\\": 600, \\\"training_iteration\\\": 100000}' to stop \"",
"\"after 600 seconds or 100k iterations, whichever is reached first.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--config\"",
",",
"default",
"=",
"\"{}\"",
",",
"type",
"=",
"json",
".",
"loads",
",",
"help",
"=",
"\"Algorithm-specific configuration (e.g. env, hyperparams), \"",
"\"specified in JSON.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--resources-per-trial\"",
",",
"default",
"=",
"None",
",",
"type",
"=",
"json_to_resources",
",",
"help",
"=",
"\"Override the machine resources to allocate per trial, e.g. \"",
"\"'{\\\"cpu\\\": 64, \\\"gpu\\\": 8}'. Note that GPUs will not be assigned \"",
"\"unless you specify them here. For RLlib, you probably want to \"",
"\"leave this alone and use RLlib configs to control parallelism.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--num-samples\"",
",",
"default",
"=",
"1",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"Number of times to repeat each trial.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--local-dir\"",
",",
"default",
"=",
"DEFAULT_RESULTS_DIR",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"Local dir to save training results to. Defaults to '{}'.\"",
".",
"format",
"(",
"DEFAULT_RESULTS_DIR",
")",
")",
"parser",
".",
"add_argument",
"(",
"\"--upload-dir\"",
",",
"default",
"=",
"\"\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"Optional URI to sync training results to (e.g. s3://bucket).\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--trial-name-creator\"",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"Optional creator function for the trial string, used in \"",
"\"generating a trial directory.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--sync-function\"",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"Function for syncing the local_dir to upload_dir. If string, \"",
"\"then it must be a string template for syncer to run and needs to \"",
"\"include replacement fields '{local_dir}' and '{remote_dir}'.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--loggers\"",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"List of logger creators to be used with each Trial. \"",
"\"Defaults to ray.tune.logger.DEFAULT_LOGGERS.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--checkpoint-freq\"",
",",
"default",
"=",
"0",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"How many training iterations between checkpoints. \"",
"\"A value of 0 (default) disables checkpointing.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--checkpoint-at-end\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Whether to checkpoint at the end of the experiment. \"",
"\"Default is False.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--keep-checkpoints-num\"",
",",
"default",
"=",
"None",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"Number of last checkpoints to keep. Others get \"",
"\"deleted. Default (None) keeps all checkpoints.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--checkpoint-score-attr\"",
",",
"default",
"=",
"\"training_iteration\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"Specifies by which attribute to rank the best checkpoint. \"",
"\"Default is increasing order. If attribute starts with min- it \"",
"\"will rank attribute in decreasing order. Example: \"",
"\"min-validation_loss\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--export-formats\"",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"List of formats that exported at the end of the experiment. \"",
"\"Default is None. For RLlib, 'checkpoint' and 'model' are \"",
"\"supported for TensorFlow policy graphs.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--max-failures\"",
",",
"default",
"=",
"3",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"Try to recover a trial from its last checkpoint at least this \"",
"\"many times. Only applies if checkpointing is enabled.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--scheduler\"",
",",
"default",
"=",
"\"FIFO\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"FIFO (default), MedianStopping, AsyncHyperBand, \"",
"\"HyperBand, or HyperOpt.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--scheduler-config\"",
",",
"default",
"=",
"\"{}\"",
",",
"type",
"=",
"json",
".",
"loads",
",",
"help",
"=",
"\"Config options to pass to the scheduler.\"",
")",
"# Note: this currently only makes sense when running a single trial",
"parser",
".",
"add_argument",
"(",
"\"--restore\"",
",",
"default",
"=",
"None",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"If specified, restore from this checkpoint.\"",
")",
"return",
"parser"
] | 36.455224 | 0.000199 |
def export_epoch_file(stimfunction,
filename,
tr_duration,
temporal_resolution=100.0
):
""" Output an epoch file, necessary for some inputs into brainiak
This takes in the time course of stimulus events and outputs the epoch
file used in Brainiak. The epoch file is a way to structure the timing
information in fMRI that allows you to flexibly input different stimulus
sequences. This is a list with each entry a 3d matrix corresponding to a
participant. The dimensions of the 3d matrix are condition by epoch by
time. For the i-th condition, if its k-th epoch spans time points t_m to
t_n-1, then [i, k, t_m:t_n] are 1 in the epoch file.
Parameters
----------
stimfunction : list of timepoint by condition arrays
The stimulus function describing the time course of events. Each
list entry is from a different participant, each row is a different
timepoint (with the given temporal precision), each column is a
different condition. export_epoch_file is looking for differences in
the value of stimfunction to identify the start and end of an
epoch. If epochs in stimfunction are coded with the same weight and
there is no time between blocks then export_epoch_file won't be able to
label them as different epochs
filename : str
The name of the epoch file to be output
tr_duration : float
How long is each TR in seconds
temporal_resolution : float
How many elements per second are you modeling with the
stimfunction?
"""
# Cycle through the participants, different entries in the list
epoch_file = [0] * len(stimfunction)
for ppt_counter in range(len(stimfunction)):
# What is the time course for the participant (binarized)
stimfunction_ppt = np.abs(stimfunction[ppt_counter]) > 0
# Down sample the stim function
stride = tr_duration * temporal_resolution
stimfunction_downsampled = stimfunction_ppt[::int(stride), :]
# Calculates the number of event onsets. This uses changes in value
# to reflect different epochs. This might be false in some cases (the
# weight is non-uniform over an epoch or there is no break between
# identically weighted epochs).
epochs = 0 # Preset
conditions = stimfunction_ppt.shape[1]
for condition_counter in range(conditions):
weight_change = (np.diff(stimfunction_downsampled[:,
condition_counter], 1, 0) != 0)
# If the first or last events are 'on' then make these
# represent a epoch change
if stimfunction_downsampled[0, condition_counter] == 1:
weight_change[0] = True
if stimfunction_downsampled[-1, condition_counter] == 1:
weight_change[-1] = True
epochs += int(np.max(np.sum(weight_change, 0)) / 2)
# Get other information
trs = stimfunction_downsampled.shape[0]
# Make a timing file for this participant
epoch_file[ppt_counter] = np.zeros((conditions, epochs, trs))
# Cycle through conditions
epoch_counter = 0 # Reset and count across conditions
tr_counter = 0
while tr_counter < stimfunction_downsampled.shape[0]:
for condition_counter in range(conditions):
# Is it an event?
if tr_counter < stimfunction_downsampled.shape[0] and \
stimfunction_downsampled[
tr_counter, condition_counter] == 1:
# Add a one for this TR
epoch_file[ppt_counter][condition_counter,
epoch_counter, tr_counter] = 1
# Find the next non event value
end_idx = np.where(stimfunction_downsampled[tr_counter:,
condition_counter] == 0)[
0][0]
tr_idxs = list(range(tr_counter, tr_counter + end_idx))
# Add ones to all the trs within this event time frame
epoch_file[ppt_counter][condition_counter,
epoch_counter, tr_idxs] = 1
# Start from this index
tr_counter += end_idx
# Increment
epoch_counter += 1
# Increment the counter
tr_counter += 1
# Convert to boolean
epoch_file[ppt_counter] = epoch_file[ppt_counter].astype('bool')
# Save the file
np.save(filename, epoch_file) | [
"def",
"export_epoch_file",
"(",
"stimfunction",
",",
"filename",
",",
"tr_duration",
",",
"temporal_resolution",
"=",
"100.0",
")",
":",
"# Cycle through the participants, different entries in the list",
"epoch_file",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"stimfunction",
")",
"for",
"ppt_counter",
"in",
"range",
"(",
"len",
"(",
"stimfunction",
")",
")",
":",
"# What is the time course for the participant (binarized)",
"stimfunction_ppt",
"=",
"np",
".",
"abs",
"(",
"stimfunction",
"[",
"ppt_counter",
"]",
")",
">",
"0",
"# Down sample the stim function",
"stride",
"=",
"tr_duration",
"*",
"temporal_resolution",
"stimfunction_downsampled",
"=",
"stimfunction_ppt",
"[",
":",
":",
"int",
"(",
"stride",
")",
",",
":",
"]",
"# Calculates the number of event onsets. This uses changes in value",
"# to reflect different epochs. This might be false in some cases (the",
"# weight is non-uniform over an epoch or there is no break between",
"# identically weighted epochs).",
"epochs",
"=",
"0",
"# Preset",
"conditions",
"=",
"stimfunction_ppt",
".",
"shape",
"[",
"1",
"]",
"for",
"condition_counter",
"in",
"range",
"(",
"conditions",
")",
":",
"weight_change",
"=",
"(",
"np",
".",
"diff",
"(",
"stimfunction_downsampled",
"[",
":",
",",
"condition_counter",
"]",
",",
"1",
",",
"0",
")",
"!=",
"0",
")",
"# If the first or last events are 'on' then make these",
"# represent a epoch change",
"if",
"stimfunction_downsampled",
"[",
"0",
",",
"condition_counter",
"]",
"==",
"1",
":",
"weight_change",
"[",
"0",
"]",
"=",
"True",
"if",
"stimfunction_downsampled",
"[",
"-",
"1",
",",
"condition_counter",
"]",
"==",
"1",
":",
"weight_change",
"[",
"-",
"1",
"]",
"=",
"True",
"epochs",
"+=",
"int",
"(",
"np",
".",
"max",
"(",
"np",
".",
"sum",
"(",
"weight_change",
",",
"0",
")",
")",
"/",
"2",
")",
"# Get other information",
"trs",
"=",
"stimfunction_downsampled",
".",
"shape",
"[",
"0",
"]",
"# Make a timing file for this participant",
"epoch_file",
"[",
"ppt_counter",
"]",
"=",
"np",
".",
"zeros",
"(",
"(",
"conditions",
",",
"epochs",
",",
"trs",
")",
")",
"# Cycle through conditions",
"epoch_counter",
"=",
"0",
"# Reset and count across conditions",
"tr_counter",
"=",
"0",
"while",
"tr_counter",
"<",
"stimfunction_downsampled",
".",
"shape",
"[",
"0",
"]",
":",
"for",
"condition_counter",
"in",
"range",
"(",
"conditions",
")",
":",
"# Is it an event?",
"if",
"tr_counter",
"<",
"stimfunction_downsampled",
".",
"shape",
"[",
"0",
"]",
"and",
"stimfunction_downsampled",
"[",
"tr_counter",
",",
"condition_counter",
"]",
"==",
"1",
":",
"# Add a one for this TR",
"epoch_file",
"[",
"ppt_counter",
"]",
"[",
"condition_counter",
",",
"epoch_counter",
",",
"tr_counter",
"]",
"=",
"1",
"# Find the next non event value",
"end_idx",
"=",
"np",
".",
"where",
"(",
"stimfunction_downsampled",
"[",
"tr_counter",
":",
",",
"condition_counter",
"]",
"==",
"0",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"tr_idxs",
"=",
"list",
"(",
"range",
"(",
"tr_counter",
",",
"tr_counter",
"+",
"end_idx",
")",
")",
"# Add ones to all the trs within this event time frame",
"epoch_file",
"[",
"ppt_counter",
"]",
"[",
"condition_counter",
",",
"epoch_counter",
",",
"tr_idxs",
"]",
"=",
"1",
"# Start from this index",
"tr_counter",
"+=",
"end_idx",
"# Increment",
"epoch_counter",
"+=",
"1",
"# Increment the counter",
"tr_counter",
"+=",
"1",
"# Convert to boolean",
"epoch_file",
"[",
"ppt_counter",
"]",
"=",
"epoch_file",
"[",
"ppt_counter",
"]",
".",
"astype",
"(",
"'bool'",
")",
"# Save the file",
"np",
".",
"save",
"(",
"filename",
",",
"epoch_file",
")"
] | 40.482759 | 0.000208 |
def _catalog_check(self, cat_name, append=False):
"""
Check to see if the name of the ingested catalog is valid
Parameters
----------
cat_name: str
The name of the catalog in the Catalog object
append: bool
Append the catalog rather than replace
Returns
-------
bool
True if good catalog name else False
"""
good = True
# Make sure the attribute name is good
if cat_name[0].isdigit():
print("No names beginning with numbers please!")
good = False
# Make sure catalog is unique
if not append and cat_name in self.catalogs:
print("Catalog {} already ingested. Set 'append=True' to add more records.".format(cat_name))
good = False
return good | [
"def",
"_catalog_check",
"(",
"self",
",",
"cat_name",
",",
"append",
"=",
"False",
")",
":",
"good",
"=",
"True",
"# Make sure the attribute name is good",
"if",
"cat_name",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"print",
"(",
"\"No names beginning with numbers please!\"",
")",
"good",
"=",
"False",
"# Make sure catalog is unique",
"if",
"not",
"append",
"and",
"cat_name",
"in",
"self",
".",
"catalogs",
":",
"print",
"(",
"\"Catalog {} already ingested. Set 'append=True' to add more records.\"",
".",
"format",
"(",
"cat_name",
")",
")",
"good",
"=",
"False",
"return",
"good"
] | 30.103448 | 0.008879 |
def delete_tags(tags,
name=None,
group_id=None,
vpc_name=None,
vpc_id=None,
region=None,
key=None,
keyid=None,
profile=None):
'''
deletes tags from a security group
.. versionadded:: 2016.3.0
tags
a list of tags to remove
name
the name of the security group
group_id
the group id of the security group (in lie of a name/vpc combo)
vpc_name
the name of the vpc to search the named group for
vpc_id
the id of the vpc, in lieu of the vpc_name
region
the amazon region
key
amazon key
keyid
amazon keyid
profile
amazon profile
CLI example:
.. code-block:: bash
salt myminion boto_secgroup.delete_tags ['TAG_TO_DELETE1','TAG_TO_DELETE2'] security_group_name vpc_id=vpc-13435 profile=my_aws_profile
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
secgrp = _get_group(conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name,
group_id=group_id, region=region, key=key, keyid=keyid,
profile=profile)
if secgrp:
if isinstance(tags, list):
tags_to_remove = {}
for tag in tags:
tags_to_remove[tag] = None
secgrp.remove_tags(tags_to_remove)
else:
msg = 'Tags must be a list of tagnames to remove from the security group'
raise SaltInvocationError(msg)
else:
msg = 'The security group could not be found'
raise SaltInvocationError(msg)
return True | [
"def",
"delete_tags",
"(",
"tags",
",",
"name",
"=",
"None",
",",
"group_id",
"=",
"None",
",",
"vpc_name",
"=",
"None",
",",
"vpc_id",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"secgrp",
"=",
"_get_group",
"(",
"conn",
",",
"name",
"=",
"name",
",",
"vpc_id",
"=",
"vpc_id",
",",
"vpc_name",
"=",
"vpc_name",
",",
"group_id",
"=",
"group_id",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"secgrp",
":",
"if",
"isinstance",
"(",
"tags",
",",
"list",
")",
":",
"tags_to_remove",
"=",
"{",
"}",
"for",
"tag",
"in",
"tags",
":",
"tags_to_remove",
"[",
"tag",
"]",
"=",
"None",
"secgrp",
".",
"remove_tags",
"(",
"tags_to_remove",
")",
"else",
":",
"msg",
"=",
"'Tags must be a list of tagnames to remove from the security group'",
"raise",
"SaltInvocationError",
"(",
"msg",
")",
"else",
":",
"msg",
"=",
"'The security group could not be found'",
"raise",
"SaltInvocationError",
"(",
"msg",
")",
"return",
"True"
] | 25.734375 | 0.001754 |
def _index_sub(self, uri_list, num, batch_num):
"""
Converts a list of uris to elasticsearch json objects
args:
uri_list: list of uris to convert
num: the ending count within the batch
batch_num: the batch number
"""
bname = '%s-%s' % (batch_num, num)
log.debug("batch_num '%s' starting es_json conversion",
bname)
qry_data = get_all_item_data([item[0] for item in uri_list],
self.tstore_conn,
rdfclass=self.rdf_class)
log.debug("batch_num '%s-%s' query_complete | count: %s",
batch_num,
num,
len(qry_data))
# path = os.path.join(CFG.dirs.cache, "index_pre")
# if not os.path.exists(path):
# os.makedirs(path)
# with open(os.path.join(path, bname + ".json"), "w") as fo:
# fo.write(json.dumps(qry_data))
data = RdfDataset(qry_data)
del qry_data
log.debug("batch_num '%s-%s' RdfDataset Loaded", batch_num, num)
for value in uri_list:
try:
self.batch_data[batch_num]['main'].append(\
data[value[0]].es_json())
self.count += 1
except KeyError:
pass
for name, indexer in self.other_indexers.items():
for item in data.json_qry("$.:%s" % name.pyuri):
val = item.es_json()
if val:
self.batch_data[batch_num][name].append(val)
self.batch_uris[batch_num].append(item.subject)
del data
del uri_list
log.debug("batch_num '%s-%s' converted to es_json", batch_num, num) | [
"def",
"_index_sub",
"(",
"self",
",",
"uri_list",
",",
"num",
",",
"batch_num",
")",
":",
"bname",
"=",
"'%s-%s'",
"%",
"(",
"batch_num",
",",
"num",
")",
"log",
".",
"debug",
"(",
"\"batch_num '%s' starting es_json conversion\"",
",",
"bname",
")",
"qry_data",
"=",
"get_all_item_data",
"(",
"[",
"item",
"[",
"0",
"]",
"for",
"item",
"in",
"uri_list",
"]",
",",
"self",
".",
"tstore_conn",
",",
"rdfclass",
"=",
"self",
".",
"rdf_class",
")",
"log",
".",
"debug",
"(",
"\"batch_num '%s-%s' query_complete | count: %s\"",
",",
"batch_num",
",",
"num",
",",
"len",
"(",
"qry_data",
")",
")",
"# path = os.path.join(CFG.dirs.cache, \"index_pre\")",
"# if not os.path.exists(path):",
"# os.makedirs(path)",
"# with open(os.path.join(path, bname + \".json\"), \"w\") as fo:",
"# fo.write(json.dumps(qry_data))",
"data",
"=",
"RdfDataset",
"(",
"qry_data",
")",
"del",
"qry_data",
"log",
".",
"debug",
"(",
"\"batch_num '%s-%s' RdfDataset Loaded\"",
",",
"batch_num",
",",
"num",
")",
"for",
"value",
"in",
"uri_list",
":",
"try",
":",
"self",
".",
"batch_data",
"[",
"batch_num",
"]",
"[",
"'main'",
"]",
".",
"append",
"(",
"data",
"[",
"value",
"[",
"0",
"]",
"]",
".",
"es_json",
"(",
")",
")",
"self",
".",
"count",
"+=",
"1",
"except",
"KeyError",
":",
"pass",
"for",
"name",
",",
"indexer",
"in",
"self",
".",
"other_indexers",
".",
"items",
"(",
")",
":",
"for",
"item",
"in",
"data",
".",
"json_qry",
"(",
"\"$.:%s\"",
"%",
"name",
".",
"pyuri",
")",
":",
"val",
"=",
"item",
".",
"es_json",
"(",
")",
"if",
"val",
":",
"self",
".",
"batch_data",
"[",
"batch_num",
"]",
"[",
"name",
"]",
".",
"append",
"(",
"val",
")",
"self",
".",
"batch_uris",
"[",
"batch_num",
"]",
".",
"append",
"(",
"item",
".",
"subject",
")",
"del",
"data",
"del",
"uri_list",
"log",
".",
"debug",
"(",
"\"batch_num '%s-%s' converted to es_json\"",
",",
"batch_num",
",",
"num",
")"
] | 39.840909 | 0.00167 |
def create_namespaced_ingress(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_ingress # noqa: E501
create an Ingress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_ingress(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1Ingress body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1Ingress
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_ingress_with_http_info(namespace, body, **kwargs) # noqa: E501
else:
(data) = self.create_namespaced_ingress_with_http_info(namespace, body, **kwargs) # noqa: E501
return data | [
"def",
"create_namespaced_ingress",
"(",
"self",
",",
"namespace",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"create_namespaced_ingress_with_http_info",
"(",
"namespace",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"create_namespaced_ingress_with_http_info",
"(",
"namespace",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | 61 | 0.001291 |
def startDrag(self, dragData):
"""
Starts a new drag with the inputed data.
:param dragData | <dict>
"""
# create the mime data
mimeData = QMimeData()
for key, value in dragData.items():
mimeData.setData('application/x-%s' % key, wrapVariant(value))
# create the drag instance
drag = QDrag(self.scene().chart())
drag.setMimeData(mimeData)
drag.exec_() | [
"def",
"startDrag",
"(",
"self",
",",
"dragData",
")",
":",
"# create the mime data\r",
"mimeData",
"=",
"QMimeData",
"(",
")",
"for",
"key",
",",
"value",
"in",
"dragData",
".",
"items",
"(",
")",
":",
"mimeData",
".",
"setData",
"(",
"'application/x-%s'",
"%",
"key",
",",
"wrapVariant",
"(",
"value",
")",
")",
"# create the drag instance\r",
"drag",
"=",
"QDrag",
"(",
"self",
".",
"scene",
"(",
")",
".",
"chart",
"(",
")",
")",
"drag",
".",
"setMimeData",
"(",
"mimeData",
")",
"drag",
".",
"exec_",
"(",
")"
] | 31.6 | 0.008197 |
def in_attack_range_of(self, unit: Unit, bonus_distance: Union[int, float] = 0) -> "Units":
""" Filters units that are in attack range of the unit in parameter """
return self.filter(lambda x: unit.target_in_range(x, bonus_distance=bonus_distance)) | [
"def",
"in_attack_range_of",
"(",
"self",
",",
"unit",
":",
"Unit",
",",
"bonus_distance",
":",
"Union",
"[",
"int",
",",
"float",
"]",
"=",
"0",
")",
"->",
"\"Units\"",
":",
"return",
"self",
".",
"filter",
"(",
"lambda",
"x",
":",
"unit",
".",
"target_in_range",
"(",
"x",
",",
"bonus_distance",
"=",
"bonus_distance",
")",
")"
] | 87.333333 | 0.015152 |
def get_model_from_path_string(root_model, path):
""" Return a model class for a related model
root_model is the class of the initial model
path is like foo__bar where bar is related to foo
"""
for path_section in path.split('__'):
if path_section:
try:
field, model, direct, m2m = _get_field_by_name(root_model, path_section)
except FieldDoesNotExist:
return root_model
if direct:
if _get_remote_field(field):
try:
root_model = _get_remote_field(field).parent_model()
except AttributeError:
root_model = _get_remote_field(field).model
else:
if hasattr(field, 'related_model'):
root_model = field.related_model
else:
root_model = field.model
return root_model | [
"def",
"get_model_from_path_string",
"(",
"root_model",
",",
"path",
")",
":",
"for",
"path_section",
"in",
"path",
".",
"split",
"(",
"'__'",
")",
":",
"if",
"path_section",
":",
"try",
":",
"field",
",",
"model",
",",
"direct",
",",
"m2m",
"=",
"_get_field_by_name",
"(",
"root_model",
",",
"path_section",
")",
"except",
"FieldDoesNotExist",
":",
"return",
"root_model",
"if",
"direct",
":",
"if",
"_get_remote_field",
"(",
"field",
")",
":",
"try",
":",
"root_model",
"=",
"_get_remote_field",
"(",
"field",
")",
".",
"parent_model",
"(",
")",
"except",
"AttributeError",
":",
"root_model",
"=",
"_get_remote_field",
"(",
"field",
")",
".",
"model",
"else",
":",
"if",
"hasattr",
"(",
"field",
",",
"'related_model'",
")",
":",
"root_model",
"=",
"field",
".",
"related_model",
"else",
":",
"root_model",
"=",
"field",
".",
"model",
"return",
"root_model"
] | 40.217391 | 0.002112 |
def rjust_text(text, width=80, indent=0, subsequent=None):
"""Wrap text and adjust it to right border.
Same as L{wrap_text} with the difference that the text is aligned against
the right text border.
Args:
text (str): Text to wrap and align.
width (int): Maximum number of characters per line.
indent (int): Indentation of the first line.
subsequent (int or None): Indentation of all other lines, if it is
``None``, then the indentation will be same as for the first line.
"""
text = re.sub(r"\s+", " ", text).strip()
if subsequent is None:
subsequent = indent
wrapper = TextWrapper(
width=width,
break_long_words=False,
replace_whitespace=True,
initial_indent=" " * (indent + subsequent),
subsequent_indent=" " * subsequent,
)
return wrapper.fill(text)[subsequent:] | [
"def",
"rjust_text",
"(",
"text",
",",
"width",
"=",
"80",
",",
"indent",
"=",
"0",
",",
"subsequent",
"=",
"None",
")",
":",
"text",
"=",
"re",
".",
"sub",
"(",
"r\"\\s+\"",
",",
"\" \"",
",",
"text",
")",
".",
"strip",
"(",
")",
"if",
"subsequent",
"is",
"None",
":",
"subsequent",
"=",
"indent",
"wrapper",
"=",
"TextWrapper",
"(",
"width",
"=",
"width",
",",
"break_long_words",
"=",
"False",
",",
"replace_whitespace",
"=",
"True",
",",
"initial_indent",
"=",
"\" \"",
"*",
"(",
"indent",
"+",
"subsequent",
")",
",",
"subsequent_indent",
"=",
"\" \"",
"*",
"subsequent",
",",
")",
"return",
"wrapper",
".",
"fill",
"(",
"text",
")",
"[",
"subsequent",
":",
"]"
] | 37.5 | 0.001083 |
def get_exporter(obj, name):
"""
Get an exporter for the
:param obj: object to export
:type obj: :class:`Component <cqparts.Component>`
:param name: registered name of exporter
:type name: :class:`str`
:return: an exporter instance of the given type
:rtype: :class:`Exporter`
:raises TypeError: if exporter cannot be found
"""
if name not in exporter_index:
raise TypeError(
("exporter type '%s' is not registered: " % name) +
("registered types: %r" % sorted(exporter_index.keys()))
)
for base_class in exporter_index[name]:
if isinstance(obj, base_class):
return exporter_index[name][base_class](obj)
raise TypeError("exporter type '%s' for a %r is not registered" % (
name, type(obj)
)) | [
"def",
"get_exporter",
"(",
"obj",
",",
"name",
")",
":",
"if",
"name",
"not",
"in",
"exporter_index",
":",
"raise",
"TypeError",
"(",
"(",
"\"exporter type '%s' is not registered: \"",
"%",
"name",
")",
"+",
"(",
"\"registered types: %r\"",
"%",
"sorted",
"(",
"exporter_index",
".",
"keys",
"(",
")",
")",
")",
")",
"for",
"base_class",
"in",
"exporter_index",
"[",
"name",
"]",
":",
"if",
"isinstance",
"(",
"obj",
",",
"base_class",
")",
":",
"return",
"exporter_index",
"[",
"name",
"]",
"[",
"base_class",
"]",
"(",
"obj",
")",
"raise",
"TypeError",
"(",
"\"exporter type '%s' for a %r is not registered\"",
"%",
"(",
"name",
",",
"type",
"(",
"obj",
")",
")",
")"
] | 31.68 | 0.001225 |
def validate(node, source):
"""Call this function to validate an AST."""
# TODO: leaving strict checking off to support insert_grad_of
lf = LanguageFence(source, strict=False)
lf.visit(node)
return node | [
"def",
"validate",
"(",
"node",
",",
"source",
")",
":",
"# TODO: leaving strict checking off to support insert_grad_of",
"lf",
"=",
"LanguageFence",
"(",
"source",
",",
"strict",
"=",
"False",
")",
"lf",
".",
"visit",
"(",
"node",
")",
"return",
"node"
] | 34.5 | 0.028302 |
def cleanup_classes(rdf):
"""Remove unnecessary class definitions: definitions of SKOS classes or
unused classes. If a class is also a skos:Concept or skos:Collection,
remove the 'classness' of it but leave the Concept/Collection."""
for t in (OWL.Class, RDFS.Class):
for cl in rdf.subjects(RDF.type, t):
# SKOS classes may be safely removed
if cl.startswith(SKOS):
logging.debug("removing SKOS class definition: %s", cl)
replace_subject(rdf, cl, None)
continue
# if there are instances of the class, keep the class def
if rdf.value(None, RDF.type, cl, any=True) is not None:
continue
# if the class is used in a domain/range/equivalentClass
# definition, keep the class def
if rdf.value(None, RDFS.domain, cl, any=True) is not None:
continue
if rdf.value(None, RDFS.range, cl, any=True) is not None:
continue
if rdf.value(None, OWL.equivalentClass, cl, any=True) is not None:
continue
# if the class is also a skos:Concept or skos:Collection, only
# remove its rdf:type
if (cl, RDF.type, SKOS.Concept) in rdf \
or (cl, RDF.type, SKOS.Collection) in rdf:
logging.debug("removing classiness of %s", cl)
rdf.remove((cl, RDF.type, t))
else: # remove it completely
logging.debug("removing unused class definition: %s", cl)
replace_subject(rdf, cl, None) | [
"def",
"cleanup_classes",
"(",
"rdf",
")",
":",
"for",
"t",
"in",
"(",
"OWL",
".",
"Class",
",",
"RDFS",
".",
"Class",
")",
":",
"for",
"cl",
"in",
"rdf",
".",
"subjects",
"(",
"RDF",
".",
"type",
",",
"t",
")",
":",
"# SKOS classes may be safely removed",
"if",
"cl",
".",
"startswith",
"(",
"SKOS",
")",
":",
"logging",
".",
"debug",
"(",
"\"removing SKOS class definition: %s\"",
",",
"cl",
")",
"replace_subject",
"(",
"rdf",
",",
"cl",
",",
"None",
")",
"continue",
"# if there are instances of the class, keep the class def",
"if",
"rdf",
".",
"value",
"(",
"None",
",",
"RDF",
".",
"type",
",",
"cl",
",",
"any",
"=",
"True",
")",
"is",
"not",
"None",
":",
"continue",
"# if the class is used in a domain/range/equivalentClass",
"# definition, keep the class def",
"if",
"rdf",
".",
"value",
"(",
"None",
",",
"RDFS",
".",
"domain",
",",
"cl",
",",
"any",
"=",
"True",
")",
"is",
"not",
"None",
":",
"continue",
"if",
"rdf",
".",
"value",
"(",
"None",
",",
"RDFS",
".",
"range",
",",
"cl",
",",
"any",
"=",
"True",
")",
"is",
"not",
"None",
":",
"continue",
"if",
"rdf",
".",
"value",
"(",
"None",
",",
"OWL",
".",
"equivalentClass",
",",
"cl",
",",
"any",
"=",
"True",
")",
"is",
"not",
"None",
":",
"continue",
"# if the class is also a skos:Concept or skos:Collection, only",
"# remove its rdf:type",
"if",
"(",
"cl",
",",
"RDF",
".",
"type",
",",
"SKOS",
".",
"Concept",
")",
"in",
"rdf",
"or",
"(",
"cl",
",",
"RDF",
".",
"type",
",",
"SKOS",
".",
"Collection",
")",
"in",
"rdf",
":",
"logging",
".",
"debug",
"(",
"\"removing classiness of %s\"",
",",
"cl",
")",
"rdf",
".",
"remove",
"(",
"(",
"cl",
",",
"RDF",
".",
"type",
",",
"t",
")",
")",
"else",
":",
"# remove it completely",
"logging",
".",
"debug",
"(",
"\"removing unused class definition: %s\"",
",",
"cl",
")",
"replace_subject",
"(",
"rdf",
",",
"cl",
",",
"None",
")"
] | 49.90625 | 0.000614 |
def find_type(self, txt):
"""
top level function used to simply return the
ONE ACTUAL string used for data types
"""
searchString = txt.upper()
match = 'Unknown'
for i in self.lst_type:
if searchString in i:
match = i
return match | [
"def",
"find_type",
"(",
"self",
",",
"txt",
")",
":",
"searchString",
"=",
"txt",
".",
"upper",
"(",
")",
"match",
"=",
"'Unknown'",
"for",
"i",
"in",
"self",
".",
"lst_type",
":",
"if",
"searchString",
"in",
"i",
":",
"match",
"=",
"i",
"return",
"match"
] | 28.454545 | 0.009288 |
def all_subs(bounds):
"""given a list of tuples specifying the bounds of an array, all_subs()
returns a list of all the tuples of subscripts for that array."""
idx_list = []
for i in range(len(bounds)):
this_dim = bounds[i]
lo,hi = this_dim[0],this_dim[1] # bounds for this dimension
this_dim_idxs = range(lo,hi+1) # indexes for this dimension
idx_list.append(this_dim_idxs)
return idx2subs(idx_list) | [
"def",
"all_subs",
"(",
"bounds",
")",
":",
"idx_list",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"bounds",
")",
")",
":",
"this_dim",
"=",
"bounds",
"[",
"i",
"]",
"lo",
",",
"hi",
"=",
"this_dim",
"[",
"0",
"]",
",",
"this_dim",
"[",
"1",
"]",
"# bounds for this dimension",
"this_dim_idxs",
"=",
"range",
"(",
"lo",
",",
"hi",
"+",
"1",
")",
"# indexes for this dimension",
"idx_list",
".",
"append",
"(",
"this_dim_idxs",
")",
"return",
"idx2subs",
"(",
"idx_list",
")"
] | 41 | 0.008677 |
def simEvalCond(simulator, *conds):
"""
Evaluate list of values as condition
"""
_cond = True
_vld = True
for v in conds:
val = bool(v.val)
fullVld = v.vldMask == 1
if fullVld:
if not val:
return False, True
else:
return False, False
_cond = _cond and val
_vld = _vld and fullVld
return _cond, _vld | [
"def",
"simEvalCond",
"(",
"simulator",
",",
"*",
"conds",
")",
":",
"_cond",
"=",
"True",
"_vld",
"=",
"True",
"for",
"v",
"in",
"conds",
":",
"val",
"=",
"bool",
"(",
"v",
".",
"val",
")",
"fullVld",
"=",
"v",
".",
"vldMask",
"==",
"1",
"if",
"fullVld",
":",
"if",
"not",
"val",
":",
"return",
"False",
",",
"True",
"else",
":",
"return",
"False",
",",
"False",
"_cond",
"=",
"_cond",
"and",
"val",
"_vld",
"=",
"_vld",
"and",
"fullVld",
"return",
"_cond",
",",
"_vld"
] | 20.947368 | 0.002404 |
def filter(self, datasource_type, datasource_id, column):
"""
Endpoint to retrieve values for specified column.
:param datasource_type: Type of datasource e.g. table
:param datasource_id: Datasource id
:param column: Column name to retrieve values for
:return:
"""
# TODO: Cache endpoint by user, datasource and column
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session)
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
security_manager.assert_datasource_permission(datasource)
payload = json.dumps(
datasource.values_for_column(
column,
config.get('FILTER_SELECT_ROW_LIMIT', 10000),
),
default=utils.json_int_dttm_ser)
return json_success(payload) | [
"def",
"filter",
"(",
"self",
",",
"datasource_type",
",",
"datasource_id",
",",
"column",
")",
":",
"# TODO: Cache endpoint by user, datasource and column",
"datasource",
"=",
"ConnectorRegistry",
".",
"get_datasource",
"(",
"datasource_type",
",",
"datasource_id",
",",
"db",
".",
"session",
")",
"if",
"not",
"datasource",
":",
"return",
"json_error_response",
"(",
"DATASOURCE_MISSING_ERR",
")",
"security_manager",
".",
"assert_datasource_permission",
"(",
"datasource",
")",
"payload",
"=",
"json",
".",
"dumps",
"(",
"datasource",
".",
"values_for_column",
"(",
"column",
",",
"config",
".",
"get",
"(",
"'FILTER_SELECT_ROW_LIMIT'",
",",
"10000",
")",
",",
")",
",",
"default",
"=",
"utils",
".",
"json_int_dttm_ser",
")",
"return",
"json_success",
"(",
"payload",
")"
] | 40.181818 | 0.00221 |
def generate_and_run(simulation,
simulator,
network=None,
return_results=False,
base_dir=None,
target_dir=None,
num_processors=1):
"""
Generates the network in the specified simulator and runs, if appropriate
"""
if network == None:
network = load_network_json(simulation.network)
print_v("Generating network %s and running in simulator: %s..." % (network.id, simulator))
if simulator == 'NEURON':
_generate_neuron_files_from_neuroml(network, dir_for_mod_files=target_dir)
from neuromllite.NeuronHandler import NeuronHandler
nrn_handler = NeuronHandler()
for c in network.cells:
if c.neuroml2_source_file:
src_dir = os.path.dirname(os.path.abspath(c.neuroml2_source_file))
nrn_handler.executeHoc('load_file("%s/%s.hoc")' % (src_dir, c.id))
generate_network(network, nrn_handler, generate_network, base_dir)
if return_results:
raise NotImplementedError("Reloading results not supported in Neuron yet...")
elif simulator.lower() == 'sonata': # Will not "run" obviously...
from neuromllite.SonataHandler import SonataHandler
sonata_handler = SonataHandler()
generate_network(network, sonata_handler, always_include_props=True, base_dir=base_dir)
print_v("Done with Sonata...")
elif simulator.lower().startswith('graph'): # Will not "run" obviously...
from neuromllite.GraphVizHandler import GraphVizHandler, engines
try:
if simulator[-1].isalpha():
print simulator
print simulator[5:]
print simulator[5:-1]
engine = engines[simulator[-1]]
level = int(simulator[5:-1])
else:
engine = 'dot'
level = int(simulator[5:])
except Exception as e:
print e
print_v("Error parsing: %s"%simulator)
print_v("Graphs of the network structure can be generated at many levels of detail (1-6, required) and laid out using GraphViz engines (d - dot (default); c - circo; n - neato; f - fdp), so use: -graph3c, -graph2, -graph4f etc.")
return
handler = GraphVizHandler(level, engine=engine, nl_network=network)
generate_network(network, handler, always_include_props=True, base_dir=base_dir)
print_v("Done with GraphViz...")
elif simulator.lower().startswith('matrix'): # Will not "run" obviously...
from neuromllite.MatrixHandler import MatrixHandler
try:
level = int(simulator[6:])
except:
print_v("Error parsing: %s"%simulator)
print_v("Matrices of the network structure can be generated at many levels of detail (1-n, required), so use: -matrix1, -matrix2, etc.")
return
handler = MatrixHandler(level, nl_network=network)
generate_network(network, handler, always_include_props=True, base_dir=base_dir)
print_v("Done with MatrixHandler...")
elif simulator.startswith('PyNN'):
#_generate_neuron_files_from_neuroml(network)
simulator_name = simulator.split('_')[1].lower()
from neuromllite.PyNNHandler import PyNNHandler
pynn_handler = PyNNHandler(simulator_name, simulation.dt, network.id)
syn_cell_params = {}
for proj in network.projections:
synapse = network.get_child(proj.synapse, 'synapses')
post_pop = network.get_child(proj.postsynaptic, 'populations')
if not post_pop.component in syn_cell_params:
syn_cell_params[post_pop.component] = {}
for p in synapse.parameters:
post = ''
if synapse.pynn_receptor_type == "excitatory":
post = '_E'
elif synapse.pynn_receptor_type == "inhibitory":
post = '_I'
syn_cell_params[post_pop.component]['%s%s' % (p, post)] = synapse.parameters[p]
cells = {}
for c in network.cells:
if c.pynn_cell:
cell_params = {}
if c.parameters:
for p in c.parameters:
cell_params[p] = evaluate(c.parameters[p], network.parameters)
dont_set_here = ['tau_syn_E', 'e_rev_E', 'tau_syn_I', 'e_rev_I']
for d in dont_set_here:
if d in c.parameters:
raise Exception('Synaptic parameters like %s should be set '+
'in individual synapses, not in the list of parameters associated with the cell' % d)
if c.id in syn_cell_params:
cell_params.update(syn_cell_params[c.id])
print_v("Creating cell with params: %s" % cell_params)
exec('cells["%s"] = pynn_handler.sim.%s(**cell_params)' % (c.id, c.pynn_cell))
if c.pynn_cell != 'SpikeSourcePoisson':
exec("cells['%s'].default_initial_values['v'] = cells['%s'].parameter_space['v_rest'].base_value" % (c.id, c.id))
pynn_handler.set_cells(cells)
receptor_types = {}
for s in network.synapses:
if s.pynn_receptor_type:
receptor_types[s.id] = s.pynn_receptor_type
pynn_handler.set_receptor_types(receptor_types)
for input_source in network.input_sources:
if input_source.pynn_input:
pynn_handler.add_input_source(input_source)
generate_network(network, pynn_handler, always_include_props=True, base_dir=base_dir)
for pid in pynn_handler.populations:
pop = pynn_handler.populations[pid]
if 'all' in simulation.recordTraces or pop.label in simulation.recordTraces:
if pop.can_record('v'):
pop.record('v')
pynn_handler.sim.run(simulation.duration)
pynn_handler.sim.end()
traces = {}
events = {}
if not 'NeuroML' in simulator:
from neo.io import PyNNTextIO
for pid in pynn_handler.populations:
pop = pynn_handler.populations[pid]
if 'all' in simulation.recordTraces or pop.label in simulation.recordTraces:
filename = "%s.%s.v.dat" % (simulation.id, pop.label)
all_columns = []
print_v("Writing data for %s to %s" % (pop.label, filename))
for i in range(len(pop)):
if pop.can_record('v'):
ref = '%s[%i]'%(pop.label,i)
traces[ref] = []
data = pop.get_data('v', gather=False)
for segment in data.segments:
vm = segment.analogsignals[0].transpose()[i]
if len(all_columns) == 0:
tt = np.array([t * simulation.dt / 1000. for t in range(len(vm))])
all_columns.append(tt)
vm_si = [float(v / 1000.) for v in vm]
traces[ref] = vm_si
all_columns.append(vm_si)
times_vm = np.array(all_columns).transpose()
np.savetxt(filename, times_vm, delimiter='\t', fmt='%s')
if return_results:
_print_result_info(traces, events)
return traces, events
elif simulator == 'NetPyNE':
if target_dir==None:
target_dir='./'
_generate_neuron_files_from_neuroml(network, dir_for_mod_files=target_dir)
from netpyne import specs
from netpyne import sim
# Note NetPyNE from this branch is required: https://github.com/Neurosim-lab/netpyne/tree/neuroml_updates
from netpyne.conversion.neuromlFormat import NetPyNEBuilder
import pprint; pp = pprint.PrettyPrinter(depth=6)
netParams = specs.NetParams()
simConfig = specs.SimConfig()
netpyne_handler = NetPyNEBuilder(netParams, simConfig=simConfig, verbose=True)
generate_network(network, netpyne_handler, base_dir=base_dir)
netpyne_handler.finalise()
simConfig = specs.SimConfig()
simConfig.tstop = simulation.duration
simConfig.duration = simulation.duration
simConfig.dt = simulation.dt
simConfig.seed = simulation.seed
simConfig.recordStep = simulation.dt
simConfig.recordCells = ['all']
simConfig.recordTraces = {}
for pop in netpyne_handler.popParams.values():
if 'all' in simulation.recordTraces or pop.id in simulation.recordTraces:
for i in pop['cellsList']:
id = pop['pop']
index = i['cellLabel']
simConfig.recordTraces['v_%s_%s' % (id, index)] = {'sec':'soma', 'loc':0.5, 'var':'v', 'conds':{'pop':id, 'cellLabel':index}}
simConfig.saveDat = True
print_v("NetPyNE netParams: ")
pp.pprint(netParams.todict())
#print_v("NetPyNE simConfig: ")
#pp.pprint(simConfig.todict())
sim.initialize(netParams, simConfig) # create network object and set cfg and net params
sim.net.createPops()
cells = sim.net.createCells() # instantiate network cells based on defined populations
for proj_id in netpyne_handler.projection_infos.keys():
projName, prePop, postPop, synapse, ptype = netpyne_handler.projection_infos[proj_id]
print_v("Creating connections for %s (%s): %s->%s via %s" % (projName, ptype, prePop, postPop, synapse))
preComp = netpyne_handler.pop_ids_vs_components[prePop]
for conn in netpyne_handler.connections[projName]:
pre_id, pre_seg, pre_fract, post_id, post_seg, post_fract, delay, weight = conn
#connParam = {'delay':delay,'weight':weight,'synsPerConn':1, 'sec':post_seg, 'loc':post_fract, 'threshold':threshold}
connParam = {'delay':delay, 'weight':weight, 'synsPerConn':1, 'sec':post_seg, 'loc':post_fract}
if ptype == 'electricalProjection':
if weight != 1:
raise Exception('Cannot yet support inputs where weight !=1!')
connParam = {'synsPerConn': 1,
'sec': post_seg,
'loc': post_fract,
'gapJunction': True,
'weight': weight}
else:
connParam = {'delay': delay,
'weight': weight,
'synsPerConn': 1,
'sec': post_seg,
'loc': post_fract}
#'threshold': threshold}
connParam['synMech'] = synapse
if post_id in sim.net.gid2lid: # check if postsyn is in this node's list of gids
sim.net._addCellConn(connParam, pre_id, post_id)
stims = sim.net.addStims() # add external stimulation to cells (IClamps etc)
simData = sim.setupRecording() # setup variables to record for each cell (spikes, V traces, etc)
sim.runSim() # run parallel Neuron simulation
sim.gatherData() # gather spiking data and cell info from each node
sim.saveData() # save params, cell info and sim output to file (pickle,mat,txt,etc)
if return_results:
raise NotImplementedError("Reloading results not supported in NetPyNE yet...")
elif simulator == 'jNeuroML' or simulator == 'jNeuroML_NEURON' or simulator == 'jNeuroML_NetPyNE':
from pyneuroml.lems import generate_lems_file_for_neuroml
from pyneuroml import pynml
lems_file_name = 'LEMS_%s.xml' % simulation.id
nml_file_name, nml_doc = generate_neuroml2_from_network(network, base_dir=base_dir, target_dir=target_dir)
included_files = ['PyNN.xml']
for c in network.cells:
if c.lems_source_file:
included_files.append(c.lems_source_file)
'''
if network.cells:
for c in network.cells:
included_files.append(c.neuroml2_source_file)
'''
if network.synapses:
for s in network.synapses:
if s.lems_source_file:
included_files.append(s.lems_source_file)
print_v("Generating LEMS file prior to running in %s" % simulator)
pops_plot_save = []
pops_spike_save = []
gen_plots_for_quantities = {}
gen_saves_for_quantities = {}
for p in network.populations:
if simulation.recordTraces and ('all' in simulation.recordTraces or p.id in simulation.recordTraces):
pops_plot_save.append(p.id)
if simulation.recordSpikes and ('all' in simulation.recordSpikes or p.id in simulation.recordSpikes):
pops_spike_save.append(p.id)
if simulation.recordRates and ('all' in simulation.recordRates or p.id in simulation.recordRates):
size = evaluate(p.size, network.parameters)
for i in range(size):
quantity = '%s/%i/%s/r' % (p.id, i, p.component)
gen_plots_for_quantities['%s_%i_r' % (p.id, i)] = [quantity]
gen_saves_for_quantities['%s_%i.r.dat' % (p.id, i)] = [quantity]
if simulation.recordVariables:
for var in simulation.recordVariables:
to_rec = simulation.recordVariables[var]
if ('all' in to_rec or p.id in to_rec):
size = evaluate(p.size, network.parameters)
for i in range(size):
quantity = '%s/%i/%s/%s' % (p.id, i, p.component,var)
gen_plots_for_quantities['%s_%i_%s' % (p.id, i, var)] = [quantity]
gen_saves_for_quantities['%s_%i.%s.dat' % (p.id, i, var)] = [quantity]
generate_lems_file_for_neuroml(simulation.id,
nml_file_name,
network.id,
simulation.duration,
simulation.dt,
lems_file_name,
target_dir=target_dir if target_dir else '.',
nml_doc=nml_doc, # Use this if the nml doc has already been loaded (to avoid delay in reload)
include_extra_files=included_files,
gen_plots_for_all_v=False,
plot_all_segments=False,
gen_plots_for_quantities=gen_plots_for_quantities, # Dict with displays vs lists of quantity paths
gen_plots_for_only_populations=pops_plot_save, # List of populations, all pops if = []
gen_saves_for_all_v=False,
save_all_segments=False,
gen_saves_for_only_populations=pops_plot_save, # List of populations, all pops if = []
gen_saves_for_quantities=gen_saves_for_quantities, # Dict with file names vs lists of quantity paths
gen_spike_saves_for_all_somas=False,
gen_spike_saves_for_only_populations=pops_spike_save, # List of populations, all pops if = []
gen_spike_saves_for_cells={}, # Dict with file names vs lists of quantity paths
spike_time_format='ID_TIME',
copy_neuroml=True,
lems_file_generate_seed=12345,
report_file_name='report.%s.txt' % simulation.id,
simulation_seed=simulation.seed if simulation.seed else 12345,
verbose=True)
lems_file_name = _locate_file(lems_file_name, target_dir)
if simulator == 'jNeuroML':
results = pynml.run_lems_with_jneuroml(lems_file_name,
nogui=True,
load_saved_data=return_results,
reload_events=return_results)
elif simulator == 'jNeuroML_NEURON':
results = pynml.run_lems_with_jneuroml_neuron(lems_file_name,
nogui=True,
load_saved_data=return_results,
reload_events=return_results)
elif simulator == 'jNeuroML_NetPyNE':
results = pynml.run_lems_with_jneuroml_netpyne(lems_file_name,
nogui=True,
verbose=True,
load_saved_data=return_results,
reload_events=return_results,
num_processors=num_processors)
print_v("Finished running LEMS file %s in %s (returning results: %s)" % (lems_file_name, simulator, return_results))
if return_results:
traces, events = results
_print_result_info(traces, events)
return results | [
"def",
"generate_and_run",
"(",
"simulation",
",",
"simulator",
",",
"network",
"=",
"None",
",",
"return_results",
"=",
"False",
",",
"base_dir",
"=",
"None",
",",
"target_dir",
"=",
"None",
",",
"num_processors",
"=",
"1",
")",
":",
"if",
"network",
"==",
"None",
":",
"network",
"=",
"load_network_json",
"(",
"simulation",
".",
"network",
")",
"print_v",
"(",
"\"Generating network %s and running in simulator: %s...\"",
"%",
"(",
"network",
".",
"id",
",",
"simulator",
")",
")",
"if",
"simulator",
"==",
"'NEURON'",
":",
"_generate_neuron_files_from_neuroml",
"(",
"network",
",",
"dir_for_mod_files",
"=",
"target_dir",
")",
"from",
"neuromllite",
".",
"NeuronHandler",
"import",
"NeuronHandler",
"nrn_handler",
"=",
"NeuronHandler",
"(",
")",
"for",
"c",
"in",
"network",
".",
"cells",
":",
"if",
"c",
".",
"neuroml2_source_file",
":",
"src_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"c",
".",
"neuroml2_source_file",
")",
")",
"nrn_handler",
".",
"executeHoc",
"(",
"'load_file(\"%s/%s.hoc\")'",
"%",
"(",
"src_dir",
",",
"c",
".",
"id",
")",
")",
"generate_network",
"(",
"network",
",",
"nrn_handler",
",",
"generate_network",
",",
"base_dir",
")",
"if",
"return_results",
":",
"raise",
"NotImplementedError",
"(",
"\"Reloading results not supported in Neuron yet...\"",
")",
"elif",
"simulator",
".",
"lower",
"(",
")",
"==",
"'sonata'",
":",
"# Will not \"run\" obviously...",
"from",
"neuromllite",
".",
"SonataHandler",
"import",
"SonataHandler",
"sonata_handler",
"=",
"SonataHandler",
"(",
")",
"generate_network",
"(",
"network",
",",
"sonata_handler",
",",
"always_include_props",
"=",
"True",
",",
"base_dir",
"=",
"base_dir",
")",
"print_v",
"(",
"\"Done with Sonata...\"",
")",
"elif",
"simulator",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'graph'",
")",
":",
"# Will not \"run\" obviously...",
"from",
"neuromllite",
".",
"GraphVizHandler",
"import",
"GraphVizHandler",
",",
"engines",
"try",
":",
"if",
"simulator",
"[",
"-",
"1",
"]",
".",
"isalpha",
"(",
")",
":",
"print",
"simulator",
"print",
"simulator",
"[",
"5",
":",
"]",
"print",
"simulator",
"[",
"5",
":",
"-",
"1",
"]",
"engine",
"=",
"engines",
"[",
"simulator",
"[",
"-",
"1",
"]",
"]",
"level",
"=",
"int",
"(",
"simulator",
"[",
"5",
":",
"-",
"1",
"]",
")",
"else",
":",
"engine",
"=",
"'dot'",
"level",
"=",
"int",
"(",
"simulator",
"[",
"5",
":",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"e",
"print_v",
"(",
"\"Error parsing: %s\"",
"%",
"simulator",
")",
"print_v",
"(",
"\"Graphs of the network structure can be generated at many levels of detail (1-6, required) and laid out using GraphViz engines (d - dot (default); c - circo; n - neato; f - fdp), so use: -graph3c, -graph2, -graph4f etc.\"",
")",
"return",
"handler",
"=",
"GraphVizHandler",
"(",
"level",
",",
"engine",
"=",
"engine",
",",
"nl_network",
"=",
"network",
")",
"generate_network",
"(",
"network",
",",
"handler",
",",
"always_include_props",
"=",
"True",
",",
"base_dir",
"=",
"base_dir",
")",
"print_v",
"(",
"\"Done with GraphViz...\"",
")",
"elif",
"simulator",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'matrix'",
")",
":",
"# Will not \"run\" obviously...",
"from",
"neuromllite",
".",
"MatrixHandler",
"import",
"MatrixHandler",
"try",
":",
"level",
"=",
"int",
"(",
"simulator",
"[",
"6",
":",
"]",
")",
"except",
":",
"print_v",
"(",
"\"Error parsing: %s\"",
"%",
"simulator",
")",
"print_v",
"(",
"\"Matrices of the network structure can be generated at many levels of detail (1-n, required), so use: -matrix1, -matrix2, etc.\"",
")",
"return",
"handler",
"=",
"MatrixHandler",
"(",
"level",
",",
"nl_network",
"=",
"network",
")",
"generate_network",
"(",
"network",
",",
"handler",
",",
"always_include_props",
"=",
"True",
",",
"base_dir",
"=",
"base_dir",
")",
"print_v",
"(",
"\"Done with MatrixHandler...\"",
")",
"elif",
"simulator",
".",
"startswith",
"(",
"'PyNN'",
")",
":",
"#_generate_neuron_files_from_neuroml(network)",
"simulator_name",
"=",
"simulator",
".",
"split",
"(",
"'_'",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"from",
"neuromllite",
".",
"PyNNHandler",
"import",
"PyNNHandler",
"pynn_handler",
"=",
"PyNNHandler",
"(",
"simulator_name",
",",
"simulation",
".",
"dt",
",",
"network",
".",
"id",
")",
"syn_cell_params",
"=",
"{",
"}",
"for",
"proj",
"in",
"network",
".",
"projections",
":",
"synapse",
"=",
"network",
".",
"get_child",
"(",
"proj",
".",
"synapse",
",",
"'synapses'",
")",
"post_pop",
"=",
"network",
".",
"get_child",
"(",
"proj",
".",
"postsynaptic",
",",
"'populations'",
")",
"if",
"not",
"post_pop",
".",
"component",
"in",
"syn_cell_params",
":",
"syn_cell_params",
"[",
"post_pop",
".",
"component",
"]",
"=",
"{",
"}",
"for",
"p",
"in",
"synapse",
".",
"parameters",
":",
"post",
"=",
"''",
"if",
"synapse",
".",
"pynn_receptor_type",
"==",
"\"excitatory\"",
":",
"post",
"=",
"'_E'",
"elif",
"synapse",
".",
"pynn_receptor_type",
"==",
"\"inhibitory\"",
":",
"post",
"=",
"'_I'",
"syn_cell_params",
"[",
"post_pop",
".",
"component",
"]",
"[",
"'%s%s'",
"%",
"(",
"p",
",",
"post",
")",
"]",
"=",
"synapse",
".",
"parameters",
"[",
"p",
"]",
"cells",
"=",
"{",
"}",
"for",
"c",
"in",
"network",
".",
"cells",
":",
"if",
"c",
".",
"pynn_cell",
":",
"cell_params",
"=",
"{",
"}",
"if",
"c",
".",
"parameters",
":",
"for",
"p",
"in",
"c",
".",
"parameters",
":",
"cell_params",
"[",
"p",
"]",
"=",
"evaluate",
"(",
"c",
".",
"parameters",
"[",
"p",
"]",
",",
"network",
".",
"parameters",
")",
"dont_set_here",
"=",
"[",
"'tau_syn_E'",
",",
"'e_rev_E'",
",",
"'tau_syn_I'",
",",
"'e_rev_I'",
"]",
"for",
"d",
"in",
"dont_set_here",
":",
"if",
"d",
"in",
"c",
".",
"parameters",
":",
"raise",
"Exception",
"(",
"'Synaptic parameters like %s should be set '",
"+",
"'in individual synapses, not in the list of parameters associated with the cell'",
"%",
"d",
")",
"if",
"c",
".",
"id",
"in",
"syn_cell_params",
":",
"cell_params",
".",
"update",
"(",
"syn_cell_params",
"[",
"c",
".",
"id",
"]",
")",
"print_v",
"(",
"\"Creating cell with params: %s\"",
"%",
"cell_params",
")",
"exec",
"(",
"'cells[\"%s\"] = pynn_handler.sim.%s(**cell_params)'",
"%",
"(",
"c",
".",
"id",
",",
"c",
".",
"pynn_cell",
")",
")",
"if",
"c",
".",
"pynn_cell",
"!=",
"'SpikeSourcePoisson'",
":",
"exec",
"(",
"\"cells['%s'].default_initial_values['v'] = cells['%s'].parameter_space['v_rest'].base_value\"",
"%",
"(",
"c",
".",
"id",
",",
"c",
".",
"id",
")",
")",
"pynn_handler",
".",
"set_cells",
"(",
"cells",
")",
"receptor_types",
"=",
"{",
"}",
"for",
"s",
"in",
"network",
".",
"synapses",
":",
"if",
"s",
".",
"pynn_receptor_type",
":",
"receptor_types",
"[",
"s",
".",
"id",
"]",
"=",
"s",
".",
"pynn_receptor_type",
"pynn_handler",
".",
"set_receptor_types",
"(",
"receptor_types",
")",
"for",
"input_source",
"in",
"network",
".",
"input_sources",
":",
"if",
"input_source",
".",
"pynn_input",
":",
"pynn_handler",
".",
"add_input_source",
"(",
"input_source",
")",
"generate_network",
"(",
"network",
",",
"pynn_handler",
",",
"always_include_props",
"=",
"True",
",",
"base_dir",
"=",
"base_dir",
")",
"for",
"pid",
"in",
"pynn_handler",
".",
"populations",
":",
"pop",
"=",
"pynn_handler",
".",
"populations",
"[",
"pid",
"]",
"if",
"'all'",
"in",
"simulation",
".",
"recordTraces",
"or",
"pop",
".",
"label",
"in",
"simulation",
".",
"recordTraces",
":",
"if",
"pop",
".",
"can_record",
"(",
"'v'",
")",
":",
"pop",
".",
"record",
"(",
"'v'",
")",
"pynn_handler",
".",
"sim",
".",
"run",
"(",
"simulation",
".",
"duration",
")",
"pynn_handler",
".",
"sim",
".",
"end",
"(",
")",
"traces",
"=",
"{",
"}",
"events",
"=",
"{",
"}",
"if",
"not",
"'NeuroML'",
"in",
"simulator",
":",
"from",
"neo",
".",
"io",
"import",
"PyNNTextIO",
"for",
"pid",
"in",
"pynn_handler",
".",
"populations",
":",
"pop",
"=",
"pynn_handler",
".",
"populations",
"[",
"pid",
"]",
"if",
"'all'",
"in",
"simulation",
".",
"recordTraces",
"or",
"pop",
".",
"label",
"in",
"simulation",
".",
"recordTraces",
":",
"filename",
"=",
"\"%s.%s.v.dat\"",
"%",
"(",
"simulation",
".",
"id",
",",
"pop",
".",
"label",
")",
"all_columns",
"=",
"[",
"]",
"print_v",
"(",
"\"Writing data for %s to %s\"",
"%",
"(",
"pop",
".",
"label",
",",
"filename",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"pop",
")",
")",
":",
"if",
"pop",
".",
"can_record",
"(",
"'v'",
")",
":",
"ref",
"=",
"'%s[%i]'",
"%",
"(",
"pop",
".",
"label",
",",
"i",
")",
"traces",
"[",
"ref",
"]",
"=",
"[",
"]",
"data",
"=",
"pop",
".",
"get_data",
"(",
"'v'",
",",
"gather",
"=",
"False",
")",
"for",
"segment",
"in",
"data",
".",
"segments",
":",
"vm",
"=",
"segment",
".",
"analogsignals",
"[",
"0",
"]",
".",
"transpose",
"(",
")",
"[",
"i",
"]",
"if",
"len",
"(",
"all_columns",
")",
"==",
"0",
":",
"tt",
"=",
"np",
".",
"array",
"(",
"[",
"t",
"*",
"simulation",
".",
"dt",
"/",
"1000.",
"for",
"t",
"in",
"range",
"(",
"len",
"(",
"vm",
")",
")",
"]",
")",
"all_columns",
".",
"append",
"(",
"tt",
")",
"vm_si",
"=",
"[",
"float",
"(",
"v",
"/",
"1000.",
")",
"for",
"v",
"in",
"vm",
"]",
"traces",
"[",
"ref",
"]",
"=",
"vm_si",
"all_columns",
".",
"append",
"(",
"vm_si",
")",
"times_vm",
"=",
"np",
".",
"array",
"(",
"all_columns",
")",
".",
"transpose",
"(",
")",
"np",
".",
"savetxt",
"(",
"filename",
",",
"times_vm",
",",
"delimiter",
"=",
"'\\t'",
",",
"fmt",
"=",
"'%s'",
")",
"if",
"return_results",
":",
"_print_result_info",
"(",
"traces",
",",
"events",
")",
"return",
"traces",
",",
"events",
"elif",
"simulator",
"==",
"'NetPyNE'",
":",
"if",
"target_dir",
"==",
"None",
":",
"target_dir",
"=",
"'./'",
"_generate_neuron_files_from_neuroml",
"(",
"network",
",",
"dir_for_mod_files",
"=",
"target_dir",
")",
"from",
"netpyne",
"import",
"specs",
"from",
"netpyne",
"import",
"sim",
"# Note NetPyNE from this branch is required: https://github.com/Neurosim-lab/netpyne/tree/neuroml_updates",
"from",
"netpyne",
".",
"conversion",
".",
"neuromlFormat",
"import",
"NetPyNEBuilder",
"import",
"pprint",
"pp",
"=",
"pprint",
".",
"PrettyPrinter",
"(",
"depth",
"=",
"6",
")",
"netParams",
"=",
"specs",
".",
"NetParams",
"(",
")",
"simConfig",
"=",
"specs",
".",
"SimConfig",
"(",
")",
"netpyne_handler",
"=",
"NetPyNEBuilder",
"(",
"netParams",
",",
"simConfig",
"=",
"simConfig",
",",
"verbose",
"=",
"True",
")",
"generate_network",
"(",
"network",
",",
"netpyne_handler",
",",
"base_dir",
"=",
"base_dir",
")",
"netpyne_handler",
".",
"finalise",
"(",
")",
"simConfig",
"=",
"specs",
".",
"SimConfig",
"(",
")",
"simConfig",
".",
"tstop",
"=",
"simulation",
".",
"duration",
"simConfig",
".",
"duration",
"=",
"simulation",
".",
"duration",
"simConfig",
".",
"dt",
"=",
"simulation",
".",
"dt",
"simConfig",
".",
"seed",
"=",
"simulation",
".",
"seed",
"simConfig",
".",
"recordStep",
"=",
"simulation",
".",
"dt",
"simConfig",
".",
"recordCells",
"=",
"[",
"'all'",
"]",
"simConfig",
".",
"recordTraces",
"=",
"{",
"}",
"for",
"pop",
"in",
"netpyne_handler",
".",
"popParams",
".",
"values",
"(",
")",
":",
"if",
"'all'",
"in",
"simulation",
".",
"recordTraces",
"or",
"pop",
".",
"id",
"in",
"simulation",
".",
"recordTraces",
":",
"for",
"i",
"in",
"pop",
"[",
"'cellsList'",
"]",
":",
"id",
"=",
"pop",
"[",
"'pop'",
"]",
"index",
"=",
"i",
"[",
"'cellLabel'",
"]",
"simConfig",
".",
"recordTraces",
"[",
"'v_%s_%s'",
"%",
"(",
"id",
",",
"index",
")",
"]",
"=",
"{",
"'sec'",
":",
"'soma'",
",",
"'loc'",
":",
"0.5",
",",
"'var'",
":",
"'v'",
",",
"'conds'",
":",
"{",
"'pop'",
":",
"id",
",",
"'cellLabel'",
":",
"index",
"}",
"}",
"simConfig",
".",
"saveDat",
"=",
"True",
"print_v",
"(",
"\"NetPyNE netParams: \"",
")",
"pp",
".",
"pprint",
"(",
"netParams",
".",
"todict",
"(",
")",
")",
"#print_v(\"NetPyNE simConfig: \")",
"#pp.pprint(simConfig.todict())",
"sim",
".",
"initialize",
"(",
"netParams",
",",
"simConfig",
")",
"# create network object and set cfg and net params",
"sim",
".",
"net",
".",
"createPops",
"(",
")",
"cells",
"=",
"sim",
".",
"net",
".",
"createCells",
"(",
")",
"# instantiate network cells based on defined populations ",
"for",
"proj_id",
"in",
"netpyne_handler",
".",
"projection_infos",
".",
"keys",
"(",
")",
":",
"projName",
",",
"prePop",
",",
"postPop",
",",
"synapse",
",",
"ptype",
"=",
"netpyne_handler",
".",
"projection_infos",
"[",
"proj_id",
"]",
"print_v",
"(",
"\"Creating connections for %s (%s): %s->%s via %s\"",
"%",
"(",
"projName",
",",
"ptype",
",",
"prePop",
",",
"postPop",
",",
"synapse",
")",
")",
"preComp",
"=",
"netpyne_handler",
".",
"pop_ids_vs_components",
"[",
"prePop",
"]",
"for",
"conn",
"in",
"netpyne_handler",
".",
"connections",
"[",
"projName",
"]",
":",
"pre_id",
",",
"pre_seg",
",",
"pre_fract",
",",
"post_id",
",",
"post_seg",
",",
"post_fract",
",",
"delay",
",",
"weight",
"=",
"conn",
"#connParam = {'delay':delay,'weight':weight,'synsPerConn':1, 'sec':post_seg, 'loc':post_fract, 'threshold':threshold}",
"connParam",
"=",
"{",
"'delay'",
":",
"delay",
",",
"'weight'",
":",
"weight",
",",
"'synsPerConn'",
":",
"1",
",",
"'sec'",
":",
"post_seg",
",",
"'loc'",
":",
"post_fract",
"}",
"if",
"ptype",
"==",
"'electricalProjection'",
":",
"if",
"weight",
"!=",
"1",
":",
"raise",
"Exception",
"(",
"'Cannot yet support inputs where weight !=1!'",
")",
"connParam",
"=",
"{",
"'synsPerConn'",
":",
"1",
",",
"'sec'",
":",
"post_seg",
",",
"'loc'",
":",
"post_fract",
",",
"'gapJunction'",
":",
"True",
",",
"'weight'",
":",
"weight",
"}",
"else",
":",
"connParam",
"=",
"{",
"'delay'",
":",
"delay",
",",
"'weight'",
":",
"weight",
",",
"'synsPerConn'",
":",
"1",
",",
"'sec'",
":",
"post_seg",
",",
"'loc'",
":",
"post_fract",
"}",
"#'threshold': threshold}",
"connParam",
"[",
"'synMech'",
"]",
"=",
"synapse",
"if",
"post_id",
"in",
"sim",
".",
"net",
".",
"gid2lid",
":",
"# check if postsyn is in this node's list of gids",
"sim",
".",
"net",
".",
"_addCellConn",
"(",
"connParam",
",",
"pre_id",
",",
"post_id",
")",
"stims",
"=",
"sim",
".",
"net",
".",
"addStims",
"(",
")",
"# add external stimulation to cells (IClamps etc)",
"simData",
"=",
"sim",
".",
"setupRecording",
"(",
")",
"# setup variables to record for each cell (spikes, V traces, etc)",
"sim",
".",
"runSim",
"(",
")",
"# run parallel Neuron simulation ",
"sim",
".",
"gatherData",
"(",
")",
"# gather spiking data and cell info from each node",
"sim",
".",
"saveData",
"(",
")",
"# save params, cell info and sim output to file (pickle,mat,txt,etc)",
"if",
"return_results",
":",
"raise",
"NotImplementedError",
"(",
"\"Reloading results not supported in NetPyNE yet...\"",
")",
"elif",
"simulator",
"==",
"'jNeuroML'",
"or",
"simulator",
"==",
"'jNeuroML_NEURON'",
"or",
"simulator",
"==",
"'jNeuroML_NetPyNE'",
":",
"from",
"pyneuroml",
".",
"lems",
"import",
"generate_lems_file_for_neuroml",
"from",
"pyneuroml",
"import",
"pynml",
"lems_file_name",
"=",
"'LEMS_%s.xml'",
"%",
"simulation",
".",
"id",
"nml_file_name",
",",
"nml_doc",
"=",
"generate_neuroml2_from_network",
"(",
"network",
",",
"base_dir",
"=",
"base_dir",
",",
"target_dir",
"=",
"target_dir",
")",
"included_files",
"=",
"[",
"'PyNN.xml'",
"]",
"for",
"c",
"in",
"network",
".",
"cells",
":",
"if",
"c",
".",
"lems_source_file",
":",
"included_files",
".",
"append",
"(",
"c",
".",
"lems_source_file",
")",
"'''\n if network.cells:\n for c in network.cells:\n included_files.append(c.neuroml2_source_file)\n '''",
"if",
"network",
".",
"synapses",
":",
"for",
"s",
"in",
"network",
".",
"synapses",
":",
"if",
"s",
".",
"lems_source_file",
":",
"included_files",
".",
"append",
"(",
"s",
".",
"lems_source_file",
")",
"print_v",
"(",
"\"Generating LEMS file prior to running in %s\"",
"%",
"simulator",
")",
"pops_plot_save",
"=",
"[",
"]",
"pops_spike_save",
"=",
"[",
"]",
"gen_plots_for_quantities",
"=",
"{",
"}",
"gen_saves_for_quantities",
"=",
"{",
"}",
"for",
"p",
"in",
"network",
".",
"populations",
":",
"if",
"simulation",
".",
"recordTraces",
"and",
"(",
"'all'",
"in",
"simulation",
".",
"recordTraces",
"or",
"p",
".",
"id",
"in",
"simulation",
".",
"recordTraces",
")",
":",
"pops_plot_save",
".",
"append",
"(",
"p",
".",
"id",
")",
"if",
"simulation",
".",
"recordSpikes",
"and",
"(",
"'all'",
"in",
"simulation",
".",
"recordSpikes",
"or",
"p",
".",
"id",
"in",
"simulation",
".",
"recordSpikes",
")",
":",
"pops_spike_save",
".",
"append",
"(",
"p",
".",
"id",
")",
"if",
"simulation",
".",
"recordRates",
"and",
"(",
"'all'",
"in",
"simulation",
".",
"recordRates",
"or",
"p",
".",
"id",
"in",
"simulation",
".",
"recordRates",
")",
":",
"size",
"=",
"evaluate",
"(",
"p",
".",
"size",
",",
"network",
".",
"parameters",
")",
"for",
"i",
"in",
"range",
"(",
"size",
")",
":",
"quantity",
"=",
"'%s/%i/%s/r'",
"%",
"(",
"p",
".",
"id",
",",
"i",
",",
"p",
".",
"component",
")",
"gen_plots_for_quantities",
"[",
"'%s_%i_r'",
"%",
"(",
"p",
".",
"id",
",",
"i",
")",
"]",
"=",
"[",
"quantity",
"]",
"gen_saves_for_quantities",
"[",
"'%s_%i.r.dat'",
"%",
"(",
"p",
".",
"id",
",",
"i",
")",
"]",
"=",
"[",
"quantity",
"]",
"if",
"simulation",
".",
"recordVariables",
":",
"for",
"var",
"in",
"simulation",
".",
"recordVariables",
":",
"to_rec",
"=",
"simulation",
".",
"recordVariables",
"[",
"var",
"]",
"if",
"(",
"'all'",
"in",
"to_rec",
"or",
"p",
".",
"id",
"in",
"to_rec",
")",
":",
"size",
"=",
"evaluate",
"(",
"p",
".",
"size",
",",
"network",
".",
"parameters",
")",
"for",
"i",
"in",
"range",
"(",
"size",
")",
":",
"quantity",
"=",
"'%s/%i/%s/%s'",
"%",
"(",
"p",
".",
"id",
",",
"i",
",",
"p",
".",
"component",
",",
"var",
")",
"gen_plots_for_quantities",
"[",
"'%s_%i_%s'",
"%",
"(",
"p",
".",
"id",
",",
"i",
",",
"var",
")",
"]",
"=",
"[",
"quantity",
"]",
"gen_saves_for_quantities",
"[",
"'%s_%i.%s.dat'",
"%",
"(",
"p",
".",
"id",
",",
"i",
",",
"var",
")",
"]",
"=",
"[",
"quantity",
"]",
"generate_lems_file_for_neuroml",
"(",
"simulation",
".",
"id",
",",
"nml_file_name",
",",
"network",
".",
"id",
",",
"simulation",
".",
"duration",
",",
"simulation",
".",
"dt",
",",
"lems_file_name",
",",
"target_dir",
"=",
"target_dir",
"if",
"target_dir",
"else",
"'.'",
",",
"nml_doc",
"=",
"nml_doc",
",",
"# Use this if the nml doc has already been loaded (to avoid delay in reload)",
"include_extra_files",
"=",
"included_files",
",",
"gen_plots_for_all_v",
"=",
"False",
",",
"plot_all_segments",
"=",
"False",
",",
"gen_plots_for_quantities",
"=",
"gen_plots_for_quantities",
",",
"# Dict with displays vs lists of quantity paths",
"gen_plots_for_only_populations",
"=",
"pops_plot_save",
",",
"# List of populations, all pops if = []",
"gen_saves_for_all_v",
"=",
"False",
",",
"save_all_segments",
"=",
"False",
",",
"gen_saves_for_only_populations",
"=",
"pops_plot_save",
",",
"# List of populations, all pops if = []",
"gen_saves_for_quantities",
"=",
"gen_saves_for_quantities",
",",
"# Dict with file names vs lists of quantity paths",
"gen_spike_saves_for_all_somas",
"=",
"False",
",",
"gen_spike_saves_for_only_populations",
"=",
"pops_spike_save",
",",
"# List of populations, all pops if = []",
"gen_spike_saves_for_cells",
"=",
"{",
"}",
",",
"# Dict with file names vs lists of quantity paths",
"spike_time_format",
"=",
"'ID_TIME'",
",",
"copy_neuroml",
"=",
"True",
",",
"lems_file_generate_seed",
"=",
"12345",
",",
"report_file_name",
"=",
"'report.%s.txt'",
"%",
"simulation",
".",
"id",
",",
"simulation_seed",
"=",
"simulation",
".",
"seed",
"if",
"simulation",
".",
"seed",
"else",
"12345",
",",
"verbose",
"=",
"True",
")",
"lems_file_name",
"=",
"_locate_file",
"(",
"lems_file_name",
",",
"target_dir",
")",
"if",
"simulator",
"==",
"'jNeuroML'",
":",
"results",
"=",
"pynml",
".",
"run_lems_with_jneuroml",
"(",
"lems_file_name",
",",
"nogui",
"=",
"True",
",",
"load_saved_data",
"=",
"return_results",
",",
"reload_events",
"=",
"return_results",
")",
"elif",
"simulator",
"==",
"'jNeuroML_NEURON'",
":",
"results",
"=",
"pynml",
".",
"run_lems_with_jneuroml_neuron",
"(",
"lems_file_name",
",",
"nogui",
"=",
"True",
",",
"load_saved_data",
"=",
"return_results",
",",
"reload_events",
"=",
"return_results",
")",
"elif",
"simulator",
"==",
"'jNeuroML_NetPyNE'",
":",
"results",
"=",
"pynml",
".",
"run_lems_with_jneuroml_netpyne",
"(",
"lems_file_name",
",",
"nogui",
"=",
"True",
",",
"verbose",
"=",
"True",
",",
"load_saved_data",
"=",
"return_results",
",",
"reload_events",
"=",
"return_results",
",",
"num_processors",
"=",
"num_processors",
")",
"print_v",
"(",
"\"Finished running LEMS file %s in %s (returning results: %s)\"",
"%",
"(",
"lems_file_name",
",",
"simulator",
",",
"return_results",
")",
")",
"if",
"return_results",
":",
"traces",
",",
"events",
"=",
"results",
"_print_result_info",
"(",
"traces",
",",
"events",
")",
"return",
"results"
] | 45.574879 | 0.013329 |
def types_strict(instance):
"""Ensure that no custom object types are used, but only the official ones
from the specification.
"""
if instance['type'] not in enums.TYPES:
yield JSONError("Object type '%s' is not one of those defined in the"
" specification." % instance['type'], instance['id'])
if has_cyber_observable_data(instance):
for key, obj in instance['objects'].items():
if 'type' in obj and obj['type'] not in enums.OBSERVABLE_TYPES:
yield JSONError("Observable object %s is type '%s' which is "
"not one of those defined in the "
"specification."
% (key, obj['type']), instance['id']) | [
"def",
"types_strict",
"(",
"instance",
")",
":",
"if",
"instance",
"[",
"'type'",
"]",
"not",
"in",
"enums",
".",
"TYPES",
":",
"yield",
"JSONError",
"(",
"\"Object type '%s' is not one of those defined in the\"",
"\" specification.\"",
"%",
"instance",
"[",
"'type'",
"]",
",",
"instance",
"[",
"'id'",
"]",
")",
"if",
"has_cyber_observable_data",
"(",
"instance",
")",
":",
"for",
"key",
",",
"obj",
"in",
"instance",
"[",
"'objects'",
"]",
".",
"items",
"(",
")",
":",
"if",
"'type'",
"in",
"obj",
"and",
"obj",
"[",
"'type'",
"]",
"not",
"in",
"enums",
".",
"OBSERVABLE_TYPES",
":",
"yield",
"JSONError",
"(",
"\"Observable object %s is type '%s' which is \"",
"\"not one of those defined in the \"",
"\"specification.\"",
"%",
"(",
"key",
",",
"obj",
"[",
"'type'",
"]",
")",
",",
"instance",
"[",
"'id'",
"]",
")"
] | 51.066667 | 0.001282 |
def render_to_message(self, extra_context=None, **kwargs):
"""
Renders and returns an unsent message with the provided context.
Any extra keyword arguments passed will be passed through as keyword
arguments to the message constructor.
:param extra_context: Any additional context to use when rendering the
templated content.
:type extra_context: :class:`dict`
:returns: A message instance.
:rtype: :attr:`.message_class`
"""
if extra_context is None:
extra_context = {}
# Ensure our custom headers are added to the underlying message class.
kwargs.setdefault('headers', {}).update(self.headers)
context = self.get_context_data(**extra_context)
return self.message_class(
subject=self.render_subject(context),
body=self.render_body(context),
**kwargs) | [
"def",
"render_to_message",
"(",
"self",
",",
"extra_context",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"extra_context",
"is",
"None",
":",
"extra_context",
"=",
"{",
"}",
"# Ensure our custom headers are added to the underlying message class.",
"kwargs",
".",
"setdefault",
"(",
"'headers'",
",",
"{",
"}",
")",
".",
"update",
"(",
"self",
".",
"headers",
")",
"context",
"=",
"self",
".",
"get_context_data",
"(",
"*",
"*",
"extra_context",
")",
"return",
"self",
".",
"message_class",
"(",
"subject",
"=",
"self",
".",
"render_subject",
"(",
"context",
")",
",",
"body",
"=",
"self",
".",
"render_body",
"(",
"context",
")",
",",
"*",
"*",
"kwargs",
")"
] | 37.625 | 0.00216 |
def logpdf_link(self, link_f, y, Y_metadata=None):
"""
:param link_f: latent variables (link(f))
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: includes censoring information in dictionary key 'censored'
:returns: likelihood evaluated for this point
:rtype: float
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
c = np.zeros_like(y)
if Y_metadata is not None and 'censored' in Y_metadata.keys():
c = Y_metadata['censored']
uncensored = (1-c)* (-0.5*np.log(2*np.pi*self.variance) - np.log(y) - (np.log(y)-link_f)**2 /(2*self.variance) )
censored = c*np.log( 1 - stats.norm.cdf((np.log(y) - link_f)/np.sqrt(self.variance)) )
logpdf = uncensored + censored
return logpdf | [
"def",
"logpdf_link",
"(",
"self",
",",
"link_f",
",",
"y",
",",
"Y_metadata",
"=",
"None",
")",
":",
"assert",
"np",
".",
"atleast_1d",
"(",
"link_f",
")",
".",
"shape",
"==",
"np",
".",
"atleast_1d",
"(",
"y",
")",
".",
"shape",
"c",
"=",
"np",
".",
"zeros_like",
"(",
"y",
")",
"if",
"Y_metadata",
"is",
"not",
"None",
"and",
"'censored'",
"in",
"Y_metadata",
".",
"keys",
"(",
")",
":",
"c",
"=",
"Y_metadata",
"[",
"'censored'",
"]",
"uncensored",
"=",
"(",
"1",
"-",
"c",
")",
"*",
"(",
"-",
"0.5",
"*",
"np",
".",
"log",
"(",
"2",
"*",
"np",
".",
"pi",
"*",
"self",
".",
"variance",
")",
"-",
"np",
".",
"log",
"(",
"y",
")",
"-",
"(",
"np",
".",
"log",
"(",
"y",
")",
"-",
"link_f",
")",
"**",
"2",
"/",
"(",
"2",
"*",
"self",
".",
"variance",
")",
")",
"censored",
"=",
"c",
"*",
"np",
".",
"log",
"(",
"1",
"-",
"stats",
".",
"norm",
".",
"cdf",
"(",
"(",
"np",
".",
"log",
"(",
"y",
")",
"-",
"link_f",
")",
"/",
"np",
".",
"sqrt",
"(",
"self",
".",
"variance",
")",
")",
")",
"logpdf",
"=",
"uncensored",
"+",
"censored",
"return",
"logpdf"
] | 44.052632 | 0.011696 |
def mysql(host, user, passwd, db, charset):
"""Set MySQL/MariaDB connection"""
connection_string = database.set_mysql_connection(host=host, user=user, passwd=passwd, db=db, charset=charset)
test_connection(connection_string) | [
"def",
"mysql",
"(",
"host",
",",
"user",
",",
"passwd",
",",
"db",
",",
"charset",
")",
":",
"connection_string",
"=",
"database",
".",
"set_mysql_connection",
"(",
"host",
"=",
"host",
",",
"user",
"=",
"user",
",",
"passwd",
"=",
"passwd",
",",
"db",
"=",
"db",
",",
"charset",
"=",
"charset",
")",
"test_connection",
"(",
"connection_string",
")"
] | 58.25 | 0.008475 |
def update_id(self, sequence_id=None):
"""Alter the sequence id, and all of the names and ids derived from it. This
often needs to be done after an IntegrityError in a multiprocessing run"""
if sequence_id:
self.sequence_id = sequence_id
self._set_ids(force=True)
if self.dataset:
self._update_names() | [
"def",
"update_id",
"(",
"self",
",",
"sequence_id",
"=",
"None",
")",
":",
"if",
"sequence_id",
":",
"self",
".",
"sequence_id",
"=",
"sequence_id",
"self",
".",
"_set_ids",
"(",
"force",
"=",
"True",
")",
"if",
"self",
".",
"dataset",
":",
"self",
".",
"_update_names",
"(",
")"
] | 32.545455 | 0.01087 |
def import_class(class_path):
"""imports and returns given class string.
:param class_path: Class path as string
:type class_path: str
:returns: Class that has given path
:rtype: class
:Example:
>>> import_class('collections.OrderedDict').__name__
'OrderedDict'
"""
try:
from django.utils.importlib import import_module
module_name = '.'.join(class_path.split(".")[:-1])
mod = import_module(module_name)
return getattr(mod, class_path.split(".")[-1])
except Exception, detail:
raise ImportError(detail) | [
"def",
"import_class",
"(",
"class_path",
")",
":",
"try",
":",
"from",
"django",
".",
"utils",
".",
"importlib",
"import",
"import_module",
"module_name",
"=",
"'.'",
".",
"join",
"(",
"class_path",
".",
"split",
"(",
"\".\"",
")",
"[",
":",
"-",
"1",
"]",
")",
"mod",
"=",
"import_module",
"(",
"module_name",
")",
"return",
"getattr",
"(",
"mod",
",",
"class_path",
".",
"split",
"(",
"\".\"",
")",
"[",
"-",
"1",
"]",
")",
"except",
"Exception",
",",
"detail",
":",
"raise",
"ImportError",
"(",
"detail",
")"
] | 27.142857 | 0.001695 |
def get_pipeline_names():
"""Returns the class paths of all Pipelines defined in alphabetical order."""
class_path_set = set()
for cls in _PipelineMeta._all_classes:
if cls.class_path is not None:
class_path_set.add(cls.class_path)
return sorted(class_path_set) | [
"def",
"get_pipeline_names",
"(",
")",
":",
"class_path_set",
"=",
"set",
"(",
")",
"for",
"cls",
"in",
"_PipelineMeta",
".",
"_all_classes",
":",
"if",
"cls",
".",
"class_path",
"is",
"not",
"None",
":",
"class_path_set",
".",
"add",
"(",
"cls",
".",
"class_path",
")",
"return",
"sorted",
"(",
"class_path_set",
")"
] | 39.571429 | 0.021201 |
def _send_signal(self, unique_id, signalno, configs):
""" Issues a signal for the specified process
:Parameter unique_id: the name of the process
"""
pids = self.get_pid(unique_id, configs)
if pids != constants.PROCESS_NOT_RUNNING_PID:
pid_str = ' '.join(str(pid) for pid in pids)
hostname = self.processes[unique_id].hostname
msg= Deployer._signalnames.get(signalno,"SENDING SIGNAL %s TO"%signalno)
with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh:
better_exec_command(ssh, "kill -{0} {1}".format(signalno, pid_str), "{0} PROCESS {1}".format(msg, unique_id)) | [
"def",
"_send_signal",
"(",
"self",
",",
"unique_id",
",",
"signalno",
",",
"configs",
")",
":",
"pids",
"=",
"self",
".",
"get_pid",
"(",
"unique_id",
",",
"configs",
")",
"if",
"pids",
"!=",
"constants",
".",
"PROCESS_NOT_RUNNING_PID",
":",
"pid_str",
"=",
"' '",
".",
"join",
"(",
"str",
"(",
"pid",
")",
"for",
"pid",
"in",
"pids",
")",
"hostname",
"=",
"self",
".",
"processes",
"[",
"unique_id",
"]",
".",
"hostname",
"msg",
"=",
"Deployer",
".",
"_signalnames",
".",
"get",
"(",
"signalno",
",",
"\"SENDING SIGNAL %s TO\"",
"%",
"signalno",
")",
"with",
"get_ssh_client",
"(",
"hostname",
",",
"username",
"=",
"runtime",
".",
"get_username",
"(",
")",
",",
"password",
"=",
"runtime",
".",
"get_password",
"(",
")",
")",
"as",
"ssh",
":",
"better_exec_command",
"(",
"ssh",
",",
"\"kill -{0} {1}\"",
".",
"format",
"(",
"signalno",
",",
"pid_str",
")",
",",
"\"{0} PROCESS {1}\"",
".",
"format",
"(",
"msg",
",",
"unique_id",
")",
")"
] | 54.666667 | 0.016492 |
def copy_db(source_env, destination_env):
"""
Copies Db betweem servers, ie develop to qa.
Should be called by function from function defined in project fabfile.
Example usage:
def copy_db_between_servers(source_server, destination_server):
source_env = {}
destination_env = {}
def populate_env_dict(server, local_env):
app_dir = 'nutrimom'
if server == 'nm-dev':
user = 'nutrimom-dev'
prefix = "dev"
environment = 'devel'
host_string = 'dev.arabel.la'
elif server == 'nm-qa':
user = 'nutrimom-qa'
prefix = "qa"
environment = 'qa'
host_string = 'qa.arabel.la'
elif server.startswith('nm-f'):
if server in ['nm-f1', 'nm-f2', 'nm-f3', 'nm-f4', 'nm-f5']:
user = 'nutrimom-' + server.split('-')[1]
prefix = user.split('-')[1]
environment = prefix
host_string = 'dev.arabel.la'
else:
print ("supported params: nm-dev, nm-qa, nm-fx")
sys.exit()
local_env['app_dir'] = app_dir
local_env['remote_user'] = user
local_env['remote_path'] = '/home/%s/www/' % (user)
local_env['dir'] = '/home/%s/Envs/%s' % (user, app_dir)
local_env['python'] = '/home/%s/Envs/%s/bin/python' % (user, app_dir)
local_env['pip'] = '/home/%s/Envs/%s/bin/pip' % (user, app_dir)
local_env['prefix'] = prefix
local_env['environment'] = environment
local_env['host_string'] = host_string
local_env['is_feature'] = False
return local_env
source_env = populate_env_dict(source_server, source_env)
destination_env = populate_env_dict(destination_server, destination_env)
copy_db(source_env, destination_env)
"""
env.update(source_env)
local_file_path = _get_db()
# put the file on external file system
# clean external db
# load database into external file system
env.update(destination_env)
with cd(env.remote_path):
sudo('mkdir -p backups', user=env.remote_user)
sudo(env.python + ' manage.py dump_database | gzip > backups/' + _sql_paths('local', datetime.now()))
put(local_file_path, 'backups', use_sudo=True)
sudo(env.python + ' manage.py clear_database', user=env.remote_user)
if local_file_path.endswith('.gz'): # the path is the same here and there
sudo('gzip -dc %s | %s manage.py dbshell' % (local_file_path, env.python), user=env.remote_user)
else:
sudo('%s manage.py dbshell < %s ' % (env.python, local_file_path), user=env.remote_user) | [
"def",
"copy_db",
"(",
"source_env",
",",
"destination_env",
")",
":",
"env",
".",
"update",
"(",
"source_env",
")",
"local_file_path",
"=",
"_get_db",
"(",
")",
"# put the file on external file system",
"# clean external db",
"# load database into external file system",
"env",
".",
"update",
"(",
"destination_env",
")",
"with",
"cd",
"(",
"env",
".",
"remote_path",
")",
":",
"sudo",
"(",
"'mkdir -p backups'",
",",
"user",
"=",
"env",
".",
"remote_user",
")",
"sudo",
"(",
"env",
".",
"python",
"+",
"' manage.py dump_database | gzip > backups/'",
"+",
"_sql_paths",
"(",
"'local'",
",",
"datetime",
".",
"now",
"(",
")",
")",
")",
"put",
"(",
"local_file_path",
",",
"'backups'",
",",
"use_sudo",
"=",
"True",
")",
"sudo",
"(",
"env",
".",
"python",
"+",
"' manage.py clear_database'",
",",
"user",
"=",
"env",
".",
"remote_user",
")",
"if",
"local_file_path",
".",
"endswith",
"(",
"'.gz'",
")",
":",
"# the path is the same here and there",
"sudo",
"(",
"'gzip -dc %s | %s manage.py dbshell'",
"%",
"(",
"local_file_path",
",",
"env",
".",
"python",
")",
",",
"user",
"=",
"env",
".",
"remote_user",
")",
"else",
":",
"sudo",
"(",
"'%s manage.py dbshell < %s '",
"%",
"(",
"env",
".",
"python",
",",
"local_file_path",
")",
",",
"user",
"=",
"env",
".",
"remote_user",
")"
] | 43.80597 | 0.002333 |
def extract_version(exepath, version_arg, word_index=-1, version_rank=3):
"""Run an executable and get the program version.
Args:
exepath: Filepath to executable.
version_arg: Arg to pass to program, eg "-V". Can also be a list.
word_index: Expect the Nth word of output to be the version.
version_rank: Cap the version to this many tokens.
Returns:
`Version` object.
"""
if isinstance(version_arg, basestring):
version_arg = [version_arg]
args = [exepath] + version_arg
stdout, stderr, returncode = _run_command(args)
if returncode:
raise RezBindError("failed to execute %s: %s\n(error code %d)"
% (exepath, stderr, returncode))
stdout = stdout.strip().split('\n')[0].strip()
log("extracting version from output: '%s'" % stdout)
try:
strver = stdout.split()[word_index]
toks = strver.replace('.', ' ').replace('-', ' ').split()
strver = '.'.join(toks[:version_rank])
version = Version(strver)
except Exception as e:
raise RezBindError("failed to parse version from output '%s': %s"
% (stdout, str(e)))
log("extracted version: '%s'" % str(version))
return version | [
"def",
"extract_version",
"(",
"exepath",
",",
"version_arg",
",",
"word_index",
"=",
"-",
"1",
",",
"version_rank",
"=",
"3",
")",
":",
"if",
"isinstance",
"(",
"version_arg",
",",
"basestring",
")",
":",
"version_arg",
"=",
"[",
"version_arg",
"]",
"args",
"=",
"[",
"exepath",
"]",
"+",
"version_arg",
"stdout",
",",
"stderr",
",",
"returncode",
"=",
"_run_command",
"(",
"args",
")",
"if",
"returncode",
":",
"raise",
"RezBindError",
"(",
"\"failed to execute %s: %s\\n(error code %d)\"",
"%",
"(",
"exepath",
",",
"stderr",
",",
"returncode",
")",
")",
"stdout",
"=",
"stdout",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"log",
"(",
"\"extracting version from output: '%s'\"",
"%",
"stdout",
")",
"try",
":",
"strver",
"=",
"stdout",
".",
"split",
"(",
")",
"[",
"word_index",
"]",
"toks",
"=",
"strver",
".",
"replace",
"(",
"'.'",
",",
"' '",
")",
".",
"replace",
"(",
"'-'",
",",
"' '",
")",
".",
"split",
"(",
")",
"strver",
"=",
"'.'",
".",
"join",
"(",
"toks",
"[",
":",
"version_rank",
"]",
")",
"version",
"=",
"Version",
"(",
"strver",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"RezBindError",
"(",
"\"failed to parse version from output '%s': %s\"",
"%",
"(",
"stdout",
",",
"str",
"(",
"e",
")",
")",
")",
"log",
"(",
"\"extracted version: '%s'\"",
"%",
"str",
"(",
"version",
")",
")",
"return",
"version"
] | 35.457143 | 0.000784 |
def insert_route(**kw):
"""
`path` - '/', '/some/other/path/', '/test/<int:index>/'
`node_id`
`weight` - How this path is selected before other similar paths
`method` - 'GET' is default.
"""
binding = {
'path': None,
'node_id': None,
'weight': None,
'method': "GET"
}
binding.update(kw)
with current_app.app_context():
db.execute(text(fetch_query_string('insert_route.sql')), **binding) | [
"def",
"insert_route",
"(",
"*",
"*",
"kw",
")",
":",
"binding",
"=",
"{",
"'path'",
":",
"None",
",",
"'node_id'",
":",
"None",
",",
"'weight'",
":",
"None",
",",
"'method'",
":",
"\"GET\"",
"}",
"binding",
".",
"update",
"(",
"kw",
")",
"with",
"current_app",
".",
"app_context",
"(",
")",
":",
"db",
".",
"execute",
"(",
"text",
"(",
"fetch_query_string",
"(",
"'insert_route.sql'",
")",
")",
",",
"*",
"*",
"binding",
")"
] | 29.6875 | 0.002041 |
def digest(args):
"""
%prog digest fastafile NspI,BfuCI
Digest fasta sequences to map restriction site positions.
"""
p = OptionParser(digest.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, enzymes = args
enzymes = enzymes.split(",")
enzymes = [x for x in AllEnzymes if str(x) in enzymes]
f = Fasta(fastafile, lazy=True)
fw = must_open(opts.outfile, "w")
header = ["Contig", "Length"] + [str(x) for x in enzymes]
print("\t".join(header), file=fw)
for name, rec in f.iteritems_ordered():
row = [name, len(rec)]
for e in enzymes:
pos = e.search(rec.seq)
pos = "na" if not pos else "|".join(str(x) for x in pos)
row.append(pos)
print("\t".join(str(x) for x in row), file=fw) | [
"def",
"digest",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"digest",
".",
"__doc__",
")",
"p",
".",
"set_outfile",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"fastafile",
",",
"enzymes",
"=",
"args",
"enzymes",
"=",
"enzymes",
".",
"split",
"(",
"\",\"",
")",
"enzymes",
"=",
"[",
"x",
"for",
"x",
"in",
"AllEnzymes",
"if",
"str",
"(",
"x",
")",
"in",
"enzymes",
"]",
"f",
"=",
"Fasta",
"(",
"fastafile",
",",
"lazy",
"=",
"True",
")",
"fw",
"=",
"must_open",
"(",
"opts",
".",
"outfile",
",",
"\"w\"",
")",
"header",
"=",
"[",
"\"Contig\"",
",",
"\"Length\"",
"]",
"+",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"enzymes",
"]",
"print",
"(",
"\"\\t\"",
".",
"join",
"(",
"header",
")",
",",
"file",
"=",
"fw",
")",
"for",
"name",
",",
"rec",
"in",
"f",
".",
"iteritems_ordered",
"(",
")",
":",
"row",
"=",
"[",
"name",
",",
"len",
"(",
"rec",
")",
"]",
"for",
"e",
"in",
"enzymes",
":",
"pos",
"=",
"e",
".",
"search",
"(",
"rec",
".",
"seq",
")",
"pos",
"=",
"\"na\"",
"if",
"not",
"pos",
"else",
"\"|\"",
".",
"join",
"(",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"pos",
")",
"row",
".",
"append",
"(",
"pos",
")",
"print",
"(",
"\"\\t\"",
".",
"join",
"(",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"row",
")",
",",
"file",
"=",
"fw",
")"
] | 30.285714 | 0.001143 |
def is_running(conn, args):
"""
Run a command to check the status of a mon, return a boolean.
We heavily depend on the format of the output, if that ever changes
we need to modify this.
Check daemon status for 3 times
output of the status should be similar to::
mon.mira094: running {"version":"0.61.5"}
or when it fails::
mon.mira094: dead {"version":"0.61.5"}
mon.mira094: not running {"version":"0.61.5"}
"""
stdout, stderr, _ = remoto.process.check(
conn,
args
)
result_string = b' '.join(stdout)
for run_check in [b': running', b' start/running']:
if run_check in result_string:
return True
return False | [
"def",
"is_running",
"(",
"conn",
",",
"args",
")",
":",
"stdout",
",",
"stderr",
",",
"_",
"=",
"remoto",
".",
"process",
".",
"check",
"(",
"conn",
",",
"args",
")",
"result_string",
"=",
"b' '",
".",
"join",
"(",
"stdout",
")",
"for",
"run_check",
"in",
"[",
"b': running'",
",",
"b' start/running'",
"]",
":",
"if",
"run_check",
"in",
"result_string",
":",
"return",
"True",
"return",
"False"
] | 28 | 0.001381 |
def map_add(self, key, mapkey, value, create=False, **kwargs):
"""
Set a value for a key in a map.
.. warning::
The functionality of the various `map_*`, `list_*`, `queue_*`
and `set_*` functions are considered experimental and are included
in the library to demonstrate new functionality.
They may change in the future or be removed entirely!
These functions are all wrappers around the :meth:`mutate_in` or
:meth:`lookup_in` methods.
:param key: The document ID of the map
:param mapkey: The key in the map to set
:param value: The value to use (anything serializable to JSON)
:param create: Whether the map should be created if it does not exist
:param kwargs: Additional arguments passed to :meth:`mutate_in`
:return: A :class:`~.OperationResult`
:raise: :cb_exc:`NotFoundError` if the document does not exist.
and `create` was not specified
.. Initialize a map and add a value
cb.upsert('a_map', {})
cb.map_add('a_map', 'some_key', 'some_value')
cb.map_get('a_map', 'some_key').value # => 'some_value'
cb.get('a_map').value # => {'some_key': 'some_value'}
"""
op = SD.upsert(mapkey, value)
sdres = self.mutate_in(key, op, **kwargs)
return self._wrap_dsop(sdres) | [
"def",
"map_add",
"(",
"self",
",",
"key",
",",
"mapkey",
",",
"value",
",",
"create",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"op",
"=",
"SD",
".",
"upsert",
"(",
"mapkey",
",",
"value",
")",
"sdres",
"=",
"self",
".",
"mutate_in",
"(",
"key",
",",
"op",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"_wrap_dsop",
"(",
"sdres",
")"
] | 41 | 0.001402 |
def _process_filters(cls, filters):
"""Takes a list of filters and returns JSON
:Parameters:
- `filters`: List of Filters, (key, val) tuples, or dicts
Returns: List of JSON API filters
"""
data = []
# Filters should always be a list
for f in filters:
if isinstance(f, Filter):
if f.filters:
data.extend(cls._process_filters(f.filters))
elif isinstance(f, dict):
key = list(f.keys())[0]
val = f[key]
if isinstance(val, dict):
# pass val (a dict) as list
# so that it gets processed properly
filter_filters = cls._process_filters([val])
if len(filter_filters) == 1:
filter_filters = filter_filters[0]
data.append({key: filter_filters})
else:
data.append({key: cls._process_filters(val)})
else:
data.extend((f,))
return data | [
"def",
"_process_filters",
"(",
"cls",
",",
"filters",
")",
":",
"data",
"=",
"[",
"]",
"# Filters should always be a list",
"for",
"f",
"in",
"filters",
":",
"if",
"isinstance",
"(",
"f",
",",
"Filter",
")",
":",
"if",
"f",
".",
"filters",
":",
"data",
".",
"extend",
"(",
"cls",
".",
"_process_filters",
"(",
"f",
".",
"filters",
")",
")",
"elif",
"isinstance",
"(",
"f",
",",
"dict",
")",
":",
"key",
"=",
"list",
"(",
"f",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"val",
"=",
"f",
"[",
"key",
"]",
"if",
"isinstance",
"(",
"val",
",",
"dict",
")",
":",
"# pass val (a dict) as list",
"# so that it gets processed properly",
"filter_filters",
"=",
"cls",
".",
"_process_filters",
"(",
"[",
"val",
"]",
")",
"if",
"len",
"(",
"filter_filters",
")",
"==",
"1",
":",
"filter_filters",
"=",
"filter_filters",
"[",
"0",
"]",
"data",
".",
"append",
"(",
"{",
"key",
":",
"filter_filters",
"}",
")",
"else",
":",
"data",
".",
"append",
"(",
"{",
"key",
":",
"cls",
".",
"_process_filters",
"(",
"val",
")",
"}",
")",
"else",
":",
"data",
".",
"extend",
"(",
"(",
"f",
",",
")",
")",
"return",
"data"
] | 33.21875 | 0.001828 |
def poll(self, timeout=0.0):
"""Modified version of poll() from asyncore module"""
if self.sock_map is None:
Log.warning("Socket map is not registered to Gateway Looper")
readable_lst = []
writable_lst = []
error_lst = []
if self.sock_map is not None:
for fd, obj in self.sock_map.items():
is_r = obj.readable()
is_w = obj.writable()
if is_r:
readable_lst.append(fd)
if is_w and not obj.accepting:
writable_lst.append(fd)
if is_r or is_w:
error_lst.append(fd)
# Add wakeup fd
readable_lst.append(self.pipe_r)
Log.debug("Will select() with timeout: " + str(timeout) + ", with map: " + str(self.sock_map))
try:
readable_lst, writable_lst, error_lst = \
select.select(readable_lst, writable_lst, error_lst, timeout)
except select.error as err:
Log.debug("Trivial error: " + str(err))
if err.args[0] != errno.EINTR:
raise
else:
return
Log.debug("Selected [r]: " + str(readable_lst) +
" [w]: " + str(writable_lst) + " [e]: " + str(error_lst))
if self.pipe_r in readable_lst:
Log.debug("Read from pipe")
os.read(self.pipe_r, 1024)
readable_lst.remove(self.pipe_r)
if self.sock_map is not None:
for fd in readable_lst:
obj = self.sock_map.get(fd)
if obj is None:
continue
asyncore.read(obj)
for fd in writable_lst:
obj = self.sock_map.get(fd)
if obj is None:
continue
asyncore.write(obj)
for fd in error_lst:
obj = self.sock_map.get(fd)
if obj is None:
continue
# pylint: disable=W0212
asyncore._exception(obj) | [
"def",
"poll",
"(",
"self",
",",
"timeout",
"=",
"0.0",
")",
":",
"if",
"self",
".",
"sock_map",
"is",
"None",
":",
"Log",
".",
"warning",
"(",
"\"Socket map is not registered to Gateway Looper\"",
")",
"readable_lst",
"=",
"[",
"]",
"writable_lst",
"=",
"[",
"]",
"error_lst",
"=",
"[",
"]",
"if",
"self",
".",
"sock_map",
"is",
"not",
"None",
":",
"for",
"fd",
",",
"obj",
"in",
"self",
".",
"sock_map",
".",
"items",
"(",
")",
":",
"is_r",
"=",
"obj",
".",
"readable",
"(",
")",
"is_w",
"=",
"obj",
".",
"writable",
"(",
")",
"if",
"is_r",
":",
"readable_lst",
".",
"append",
"(",
"fd",
")",
"if",
"is_w",
"and",
"not",
"obj",
".",
"accepting",
":",
"writable_lst",
".",
"append",
"(",
"fd",
")",
"if",
"is_r",
"or",
"is_w",
":",
"error_lst",
".",
"append",
"(",
"fd",
")",
"# Add wakeup fd",
"readable_lst",
".",
"append",
"(",
"self",
".",
"pipe_r",
")",
"Log",
".",
"debug",
"(",
"\"Will select() with timeout: \"",
"+",
"str",
"(",
"timeout",
")",
"+",
"\", with map: \"",
"+",
"str",
"(",
"self",
".",
"sock_map",
")",
")",
"try",
":",
"readable_lst",
",",
"writable_lst",
",",
"error_lst",
"=",
"select",
".",
"select",
"(",
"readable_lst",
",",
"writable_lst",
",",
"error_lst",
",",
"timeout",
")",
"except",
"select",
".",
"error",
"as",
"err",
":",
"Log",
".",
"debug",
"(",
"\"Trivial error: \"",
"+",
"str",
"(",
"err",
")",
")",
"if",
"err",
".",
"args",
"[",
"0",
"]",
"!=",
"errno",
".",
"EINTR",
":",
"raise",
"else",
":",
"return",
"Log",
".",
"debug",
"(",
"\"Selected [r]: \"",
"+",
"str",
"(",
"readable_lst",
")",
"+",
"\" [w]: \"",
"+",
"str",
"(",
"writable_lst",
")",
"+",
"\" [e]: \"",
"+",
"str",
"(",
"error_lst",
")",
")",
"if",
"self",
".",
"pipe_r",
"in",
"readable_lst",
":",
"Log",
".",
"debug",
"(",
"\"Read from pipe\"",
")",
"os",
".",
"read",
"(",
"self",
".",
"pipe_r",
",",
"1024",
")",
"readable_lst",
".",
"remove",
"(",
"self",
".",
"pipe_r",
")",
"if",
"self",
".",
"sock_map",
"is",
"not",
"None",
":",
"for",
"fd",
"in",
"readable_lst",
":",
"obj",
"=",
"self",
".",
"sock_map",
".",
"get",
"(",
"fd",
")",
"if",
"obj",
"is",
"None",
":",
"continue",
"asyncore",
".",
"read",
"(",
"obj",
")",
"for",
"fd",
"in",
"writable_lst",
":",
"obj",
"=",
"self",
".",
"sock_map",
".",
"get",
"(",
"fd",
")",
"if",
"obj",
"is",
"None",
":",
"continue",
"asyncore",
".",
"write",
"(",
"obj",
")",
"for",
"fd",
"in",
"error_lst",
":",
"obj",
"=",
"self",
".",
"sock_map",
".",
"get",
"(",
"fd",
")",
"if",
"obj",
"is",
"None",
":",
"continue",
"# pylint: disable=W0212",
"asyncore",
".",
"_exception",
"(",
"obj",
")"
] | 28.779661 | 0.01139 |
def get_detection_results(url, timeout, metadata=False, save_har=False):
""" Return results from detector.
This function prepares the environment loading the plugins,
getting the response and passing it to the detector.
In case of errors, it raises exceptions to be handled externally.
"""
plugins = load_plugins()
if not plugins:
raise NoPluginsError('No plugins found')
logger.debug('[+] Starting detection with %(n)d plugins', {'n': len(plugins)})
response = get_response(url, plugins, timeout)
# Save HAR
if save_har:
fd, path = tempfile.mkstemp(suffix='.har')
logger.info(f'Saving HAR file to {path}')
with open(fd, 'w') as f:
json.dump(response['har'], f)
det = Detector(response, plugins, url)
softwares = det.get_results(metadata=metadata)
output = {
'url': url,
'softwares': softwares,
}
return output | [
"def",
"get_detection_results",
"(",
"url",
",",
"timeout",
",",
"metadata",
"=",
"False",
",",
"save_har",
"=",
"False",
")",
":",
"plugins",
"=",
"load_plugins",
"(",
")",
"if",
"not",
"plugins",
":",
"raise",
"NoPluginsError",
"(",
"'No plugins found'",
")",
"logger",
".",
"debug",
"(",
"'[+] Starting detection with %(n)d plugins'",
",",
"{",
"'n'",
":",
"len",
"(",
"plugins",
")",
"}",
")",
"response",
"=",
"get_response",
"(",
"url",
",",
"plugins",
",",
"timeout",
")",
"# Save HAR",
"if",
"save_har",
":",
"fd",
",",
"path",
"=",
"tempfile",
".",
"mkstemp",
"(",
"suffix",
"=",
"'.har'",
")",
"logger",
".",
"info",
"(",
"f'Saving HAR file to {path}'",
")",
"with",
"open",
"(",
"fd",
",",
"'w'",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"response",
"[",
"'har'",
"]",
",",
"f",
")",
"det",
"=",
"Detector",
"(",
"response",
",",
"plugins",
",",
"url",
")",
"softwares",
"=",
"det",
".",
"get_results",
"(",
"metadata",
"=",
"metadata",
")",
"output",
"=",
"{",
"'url'",
":",
"url",
",",
"'softwares'",
":",
"softwares",
",",
"}",
"return",
"output"
] | 26.794118 | 0.002119 |
def Nu_cylinder_Whitaker(Re, Pr, mu=None, muw=None):
r'''Calculates Nusselt number for crossflow across a single tube as shown
in [1]_ at a specified `Re` and `Pr`, both evaluated at the free stream
temperature. Recommends a viscosity exponent correction of 0.25, which is
applied only if provided. Also shown in [2]_.
.. math::
Nu_D = (0.4 Re_D^{0.5} + 0.06Re_D^{2/3})Pr^{0.4}
\left(\frac{\mu}{\mu_w}\right)^{0.25}
Parameters
----------
Re : float
Reynolds number with respect to cylinder diameter, [-]
Pr : float
Prandtl number at free stream temperature, [-]
mu : float, optional
Viscosity of fluid at the free stream temperature [Pa*s]
muw : float, optional
Viscosity of fluid at the wall temperature [Pa*s]
Returns
-------
Nu : float
Nusselt number with respect to cylinder diameter, [-]
Notes
-----
Developed considering data from 1 to 1E5 Re, 0.67 to 300 Pr, and range of
viscosity ratios from 0.25 to 5.2. Found experimental data to generally
agree with it within 25%.
Examples
--------
>>> Nu_cylinder_Whitaker(6071, 0.7)
45.94527461589126
References
----------
.. [1] Whitaker, Stephen. "Forced Convection Heat Transfer Correlations for
Flow in Pipes, Past Flat Plates, Single Cylinders, Single Spheres, and
for Flow in Packed Beds and Tube Bundles." AIChE Journal 18, no. 2
(March 1, 1972): 361-371. doi:10.1002/aic.690180219.
.. [2] Sanitjai, S., and R. J. Goldstein. "Forced Convection Heat Transfer
from a Circular Cylinder in Crossflow to Air and Liquids." International
Journal of Heat and Mass Transfer 47, no. 22 (October 2004): 4795-4805.
doi:10.1016/j.ijheatmasstransfer.2004.05.012.
'''
Nu = (0.4*Re**0.5 + 0.06*Re**(2/3.))*Pr**0.3
if mu and muw:
Nu *= (mu/muw)**0.25
return Nu | [
"def",
"Nu_cylinder_Whitaker",
"(",
"Re",
",",
"Pr",
",",
"mu",
"=",
"None",
",",
"muw",
"=",
"None",
")",
":",
"Nu",
"=",
"(",
"0.4",
"*",
"Re",
"**",
"0.5",
"+",
"0.06",
"*",
"Re",
"**",
"(",
"2",
"/",
"3.",
")",
")",
"*",
"Pr",
"**",
"0.3",
"if",
"mu",
"and",
"muw",
":",
"Nu",
"*=",
"(",
"mu",
"/",
"muw",
")",
"**",
"0.25",
"return",
"Nu"
] | 36.211538 | 0.000517 |
def handle(self, cycle_delay=0.1):
"""
Call this method to spend about ``cycle_delay`` seconds processing
requests in the pcaspy server. Under load, for example when running ``caget`` at a
high frequency, the actual time spent in the method may be much shorter. This effect
is not corrected for.
:param cycle_delay: Approximate time to be spent processing requests in pcaspy server.
"""
if self._server is not None:
self._server.process(cycle_delay)
self._driver.process_pv_updates() | [
"def",
"handle",
"(",
"self",
",",
"cycle_delay",
"=",
"0.1",
")",
":",
"if",
"self",
".",
"_server",
"is",
"not",
"None",
":",
"self",
".",
"_server",
".",
"process",
"(",
"cycle_delay",
")",
"self",
".",
"_driver",
".",
"process_pv_updates",
"(",
")"
] | 46.75 | 0.008741 |
def to_pypsa(network, mode, timesteps):
"""
Translate graph based grid representation to PyPSA Network
For details from a user perspective see API documentation of
:meth:`~.grid.network.EDisGo.analyze` of the API class
:class:`~.grid.network.EDisGo`.
Translating eDisGo's grid topology to PyPSA representation is structured
into translating the topology and adding time series for components of the
grid. In both cases translation of MV grid only (`mode='mv'`), LV grid only
(`mode='lv'`), MV and LV (`mode=None`) share some code. The
code is organized as follows:
* Medium-voltage only (`mode='mv'`): All medium-voltage grid components are
exported by :func:`mv_to_pypsa` including the LV station. LV grid load
and generation is considered using :func:`add_aggregated_lv_components`.
Time series are collected by `_pypsa_load_timeseries` (as example
for loads, generators and buses) specifying `mode='mv'`). Timeseries
for aggregated load/generation at substations are determined individually.
* Low-voltage only (`mode='lv'`): LV grid topology including the MV-LV
transformer is exported. The slack is defind at primary side of the MV-LV
transformer.
* Both level MV+LV (`mode=None`): The entire grid topology is translated to
PyPSA in order to perform a complete power flow analysis in both levels
together. First, both grid levels are translated seperately using
:func:`mv_to_pypsa` and :func:`lv_to_pypsa`. Those are merge by
:func:`combine_mv_and_lv`. Time series are obtained at once for both grid
levels.
This PyPSA interface is aware of translation errors and performs so checks
on integrity of data converted to PyPSA grid representation
* Sub-graphs/ Sub-networks: It is ensured the grid has no islanded parts
* Completeness of time series: It is ensured each component has a time
series
* Buses available: Each component (load, generator, line, transformer) is
connected to a bus. The PyPSA representation is check for completeness of
buses.
* Duplicate labels in components DataFrames and components' time series
DataFrames
Parameters
----------
network : :class:`~.grid.network.Network`
eDisGo grid container
mode : str
Determines grid levels that are translated to
`PyPSA grid representation
<https://www.pypsa.org/doc/components.html#network>`_. Specify
* None to export MV and LV grid levels. None is the default.
* ('mv' to export MV grid level only. This includes cumulative load and
generation from underlying LV grid aggregated at respective LV
station. This option is implemented, though the rest of edisgo does
not handle it yet.)
* ('lv' to export LV grid level only. This option is not yet
implemented)
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or \
:pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps to export to pypsa representation
and use in power flow analysis.
Returns
-------
:pypsa:`pypsa.Network<network>`
The `PyPSA network
<https://www.pypsa.org/doc/components.html#network>`_ container.
"""
# check if timesteps is array-like, otherwise convert to list (necessary
# to obtain a dataframe when using .loc in time series functions)
if not hasattr(timesteps, "__len__"):
timesteps = [timesteps]
# get topology and time series data
if mode is None:
mv_components = mv_to_pypsa(network)
lv_components = lv_to_pypsa(network)
components = combine_mv_and_lv(mv_components, lv_components)
if list(components['Load'].index.values):
timeseries_load_p, timeseries_load_q = _pypsa_load_timeseries(
network, mode=mode, timesteps=timesteps)
if len(list(components['Generator'].index.values)) > 1:
timeseries_gen_p, timeseries_gen_q = _pypsa_generator_timeseries(
network, mode=mode, timesteps=timesteps)
if list(components['Bus'].index.values):
timeseries_bus_v_set = _pypsa_bus_timeseries(
network, components['Bus'].index.tolist(), timesteps=timesteps)
if len(list(components['StorageUnit'].index.values)) > 0:
timeseries_storage_p, timeseries_storage_q = \
_pypsa_storage_timeseries(
network, mode=mode, timesteps=timesteps)
elif mode is 'mv':
# the pypsa export works but NotImplementedError is raised since the
# rest of edisgo (handling of results from pfa, grid expansion, etc.)
# does not yet work
raise NotImplementedError
mv_components = mv_to_pypsa(network)
components = add_aggregated_lv_components(network, mv_components)
if list(components['Load'].index.values):
timeseries_load_p, timeseries_load_q = _pypsa_load_timeseries(
network, mode=mode, timesteps=timesteps)
if len(list(components['Generator'].index.values)) > 1:
timeseries_gen_p, timeseries_gen_q = _pypsa_generator_timeseries(
network, mode=mode, timesteps=timesteps)
if list(components['Bus'].index.values):
timeseries_bus_v_set = _pypsa_bus_timeseries(
network, components['Bus'].index.tolist(), timesteps=timesteps)
if len(list(components['StorageUnit'].index.values)) > 0:
timeseries_storage_p, timeseries_storage_q = \
_pypsa_storage_timeseries(
network, mode=mode, timesteps=timesteps)
elif mode is 'lv':
raise NotImplementedError
#lv_to_pypsa(network)
else:
raise ValueError("Provide proper mode or leave it empty to export "
"entire grid topology.")
# check topology
_check_topology(components)
# create power flow problem
pypsa_network = PyPSANetwork()
pypsa_network.edisgo_mode = mode
pypsa_network.set_snapshots(timesteps)
# import grid topology to PyPSA network
# buses are created first to avoid warnings
pypsa_network.import_components_from_dataframe(components['Bus'], 'Bus')
for k, comps in components.items():
if k is not 'Bus' and not comps.empty:
pypsa_network.import_components_from_dataframe(comps, k)
# import time series to PyPSA network
if len(list(components['Generator'].index.values)) > 1:
import_series_from_dataframe(pypsa_network, timeseries_gen_p,
'Generator', 'p_set')
import_series_from_dataframe(pypsa_network, timeseries_gen_q,
'Generator', 'q_set')
if list(components['Load'].index.values):
import_series_from_dataframe(pypsa_network, timeseries_load_p,
'Load', 'p_set')
import_series_from_dataframe(pypsa_network, timeseries_load_q,
'Load', 'q_set')
if list(components['Bus'].index.values):
import_series_from_dataframe(pypsa_network, timeseries_bus_v_set,
'Bus', 'v_mag_pu_set')
if len(list(components['StorageUnit'].index.values)) > 0:
import_series_from_dataframe(pypsa_network, timeseries_storage_p,
'StorageUnit', 'p_set')
import_series_from_dataframe(pypsa_network, timeseries_storage_q,
'StorageUnit', 'q_set')
_check_integrity_of_pypsa(pypsa_network)
return pypsa_network | [
"def",
"to_pypsa",
"(",
"network",
",",
"mode",
",",
"timesteps",
")",
":",
"# check if timesteps is array-like, otherwise convert to list (necessary",
"# to obtain a dataframe when using .loc in time series functions)",
"if",
"not",
"hasattr",
"(",
"timesteps",
",",
"\"__len__\"",
")",
":",
"timesteps",
"=",
"[",
"timesteps",
"]",
"# get topology and time series data",
"if",
"mode",
"is",
"None",
":",
"mv_components",
"=",
"mv_to_pypsa",
"(",
"network",
")",
"lv_components",
"=",
"lv_to_pypsa",
"(",
"network",
")",
"components",
"=",
"combine_mv_and_lv",
"(",
"mv_components",
",",
"lv_components",
")",
"if",
"list",
"(",
"components",
"[",
"'Load'",
"]",
".",
"index",
".",
"values",
")",
":",
"timeseries_load_p",
",",
"timeseries_load_q",
"=",
"_pypsa_load_timeseries",
"(",
"network",
",",
"mode",
"=",
"mode",
",",
"timesteps",
"=",
"timesteps",
")",
"if",
"len",
"(",
"list",
"(",
"components",
"[",
"'Generator'",
"]",
".",
"index",
".",
"values",
")",
")",
">",
"1",
":",
"timeseries_gen_p",
",",
"timeseries_gen_q",
"=",
"_pypsa_generator_timeseries",
"(",
"network",
",",
"mode",
"=",
"mode",
",",
"timesteps",
"=",
"timesteps",
")",
"if",
"list",
"(",
"components",
"[",
"'Bus'",
"]",
".",
"index",
".",
"values",
")",
":",
"timeseries_bus_v_set",
"=",
"_pypsa_bus_timeseries",
"(",
"network",
",",
"components",
"[",
"'Bus'",
"]",
".",
"index",
".",
"tolist",
"(",
")",
",",
"timesteps",
"=",
"timesteps",
")",
"if",
"len",
"(",
"list",
"(",
"components",
"[",
"'StorageUnit'",
"]",
".",
"index",
".",
"values",
")",
")",
">",
"0",
":",
"timeseries_storage_p",
",",
"timeseries_storage_q",
"=",
"_pypsa_storage_timeseries",
"(",
"network",
",",
"mode",
"=",
"mode",
",",
"timesteps",
"=",
"timesteps",
")",
"elif",
"mode",
"is",
"'mv'",
":",
"# the pypsa export works but NotImplementedError is raised since the",
"# rest of edisgo (handling of results from pfa, grid expansion, etc.)",
"# does not yet work",
"raise",
"NotImplementedError",
"mv_components",
"=",
"mv_to_pypsa",
"(",
"network",
")",
"components",
"=",
"add_aggregated_lv_components",
"(",
"network",
",",
"mv_components",
")",
"if",
"list",
"(",
"components",
"[",
"'Load'",
"]",
".",
"index",
".",
"values",
")",
":",
"timeseries_load_p",
",",
"timeseries_load_q",
"=",
"_pypsa_load_timeseries",
"(",
"network",
",",
"mode",
"=",
"mode",
",",
"timesteps",
"=",
"timesteps",
")",
"if",
"len",
"(",
"list",
"(",
"components",
"[",
"'Generator'",
"]",
".",
"index",
".",
"values",
")",
")",
">",
"1",
":",
"timeseries_gen_p",
",",
"timeseries_gen_q",
"=",
"_pypsa_generator_timeseries",
"(",
"network",
",",
"mode",
"=",
"mode",
",",
"timesteps",
"=",
"timesteps",
")",
"if",
"list",
"(",
"components",
"[",
"'Bus'",
"]",
".",
"index",
".",
"values",
")",
":",
"timeseries_bus_v_set",
"=",
"_pypsa_bus_timeseries",
"(",
"network",
",",
"components",
"[",
"'Bus'",
"]",
".",
"index",
".",
"tolist",
"(",
")",
",",
"timesteps",
"=",
"timesteps",
")",
"if",
"len",
"(",
"list",
"(",
"components",
"[",
"'StorageUnit'",
"]",
".",
"index",
".",
"values",
")",
")",
">",
"0",
":",
"timeseries_storage_p",
",",
"timeseries_storage_q",
"=",
"_pypsa_storage_timeseries",
"(",
"network",
",",
"mode",
"=",
"mode",
",",
"timesteps",
"=",
"timesteps",
")",
"elif",
"mode",
"is",
"'lv'",
":",
"raise",
"NotImplementedError",
"#lv_to_pypsa(network)",
"else",
":",
"raise",
"ValueError",
"(",
"\"Provide proper mode or leave it empty to export \"",
"\"entire grid topology.\"",
")",
"# check topology",
"_check_topology",
"(",
"components",
")",
"# create power flow problem",
"pypsa_network",
"=",
"PyPSANetwork",
"(",
")",
"pypsa_network",
".",
"edisgo_mode",
"=",
"mode",
"pypsa_network",
".",
"set_snapshots",
"(",
"timesteps",
")",
"# import grid topology to PyPSA network",
"# buses are created first to avoid warnings",
"pypsa_network",
".",
"import_components_from_dataframe",
"(",
"components",
"[",
"'Bus'",
"]",
",",
"'Bus'",
")",
"for",
"k",
",",
"comps",
"in",
"components",
".",
"items",
"(",
")",
":",
"if",
"k",
"is",
"not",
"'Bus'",
"and",
"not",
"comps",
".",
"empty",
":",
"pypsa_network",
".",
"import_components_from_dataframe",
"(",
"comps",
",",
"k",
")",
"# import time series to PyPSA network",
"if",
"len",
"(",
"list",
"(",
"components",
"[",
"'Generator'",
"]",
".",
"index",
".",
"values",
")",
")",
">",
"1",
":",
"import_series_from_dataframe",
"(",
"pypsa_network",
",",
"timeseries_gen_p",
",",
"'Generator'",
",",
"'p_set'",
")",
"import_series_from_dataframe",
"(",
"pypsa_network",
",",
"timeseries_gen_q",
",",
"'Generator'",
",",
"'q_set'",
")",
"if",
"list",
"(",
"components",
"[",
"'Load'",
"]",
".",
"index",
".",
"values",
")",
":",
"import_series_from_dataframe",
"(",
"pypsa_network",
",",
"timeseries_load_p",
",",
"'Load'",
",",
"'p_set'",
")",
"import_series_from_dataframe",
"(",
"pypsa_network",
",",
"timeseries_load_q",
",",
"'Load'",
",",
"'q_set'",
")",
"if",
"list",
"(",
"components",
"[",
"'Bus'",
"]",
".",
"index",
".",
"values",
")",
":",
"import_series_from_dataframe",
"(",
"pypsa_network",
",",
"timeseries_bus_v_set",
",",
"'Bus'",
",",
"'v_mag_pu_set'",
")",
"if",
"len",
"(",
"list",
"(",
"components",
"[",
"'StorageUnit'",
"]",
".",
"index",
".",
"values",
")",
")",
">",
"0",
":",
"import_series_from_dataframe",
"(",
"pypsa_network",
",",
"timeseries_storage_p",
",",
"'StorageUnit'",
",",
"'p_set'",
")",
"import_series_from_dataframe",
"(",
"pypsa_network",
",",
"timeseries_storage_q",
",",
"'StorageUnit'",
",",
"'q_set'",
")",
"_check_integrity_of_pypsa",
"(",
"pypsa_network",
")",
"return",
"pypsa_network"
] | 43.710983 | 0.000388 |
def ratioTerminatorToStar(H_p, R_p, R_s): # TODO add into planet class
r"""Calculates the ratio of the terminator to the star assuming 5 scale
heights large. If you dont know all of the input try
:py:func:`calcRatioTerminatorToStar`
.. math::
\Delta F = \frac{10 H R_p + 25 H^2}{R_\star^2}
Where :math:`\Delta F` is the ration of the terminator to the star,
H scale height planet atmosphere, :math:`R_p` radius of the planet,
:math:`R_s` radius of the star
:param H_p:
:param R_p:
:param R_s:
:return: ratio of the terminator to the star
"""
deltaF = ((10 * H_p * R_p) + (25 * H_p**2)) / (R_s**2)
return deltaF.simplified | [
"def",
"ratioTerminatorToStar",
"(",
"H_p",
",",
"R_p",
",",
"R_s",
")",
":",
"# TODO add into planet class",
"deltaF",
"=",
"(",
"(",
"10",
"*",
"H_p",
"*",
"R_p",
")",
"+",
"(",
"25",
"*",
"H_p",
"**",
"2",
")",
")",
"/",
"(",
"R_s",
"**",
"2",
")",
"return",
"deltaF",
".",
"simplified"
] | 33.55 | 0.001449 |
def _analyzeDontMeasure(self, chunkSize, willMeasureLater, *sinks):
""" Figure out the best diffs to use to reach all our required volumes. """
nodes = [None]
height = 1
def sortKey(node):
if node is None:
return None
return (node.intermediate, self._totalSize(node))
while len(nodes) > 0:
logger.debug("Analyzing %d nodes for height %d...", len(nodes), height)
nodes.sort(key=sortKey)
for fromNode in nodes:
if self._height(fromNode) >= height:
continue
if fromNode is not None and fromNode.diffSize is None:
continue
fromVol = fromNode.volume if fromNode else None
logger.debug("Following edges from %s", fromVol)
for sink in sinks:
# logger.debug(
# "Listing edges in %s",
# sink
# )
for edge in sink.getEdges(fromVol):
toVol = edge.toVol
# logger.debug("Edge: %s", edge)
# Skip any edges already in the destination
if sink != self.dest and self.dest.hasEdge(edge):
continue
if toVol in self.nodes:
toNode = self.nodes[toVol]
# Don't transfer any edges we won't need in the destination
# elif sink != self.dest:
# logger.debug("Won't transfer unnecessary %s", edge)
# continue
else:
toNode = _Node(toVol, True)
self.nodes[toVol] = toNode
logger.debug("Considering %s", edge)
edgeSize = edge.size
if edge.sizeIsEstimated:
if willMeasureLater:
# Slight preference for accurate sizes
edgeSize *= 1.2
else:
# Large preference for accurate sizes
edgeSize *= 2
newCost = self._cost(sink, edgeSize, fromNode, height)
if toNode.diff is None:
oldCost = None
else:
oldCost = self._cost(
toNode.sink,
toNode.diffSize,
self._getNode(toNode.previous),
self._height(toNode)
)
# Don't use a more-expensive path
if oldCost is not None and oldCost <= newCost:
continue
# Don't create circular paths
if self._wouldLoop(fromVol, toVol):
# logger.debug("Ignoring looping edge: %s", toVol.display(sink))
continue
# if measureSize and sink != self.dest and edge.sizeIsEstimated:
# sink.measureSize(edge, chunkSize)
# newCost = self._cost(sink, edge.size, fromSize, height)
# if oldCost is not None and oldCost <= newCost:
# continue
logger.debug(
"Replacing edge (%s -> %s cost)\n%s",
humanize(oldCost),
humanize(newCost),
toNode.display(sink)
)
# logger.debug("Cost elements: %s", dict(
# sink=str(sink),
# edgeSize=humanize(edgeSize),
# fromSize=humanize(fromSize),
# height=height,
# ))
toNode.diff = edge
nodes = [node for node in self.nodes.values() if self._height(node) == height]
height += 1
self._prune()
for node in self.nodes.values():
node.height = self._height(node)
if node.diff is None:
logger.error(
"No source diffs for %s",
node.volume.display(sinks[-1], detail="line"),
) | [
"def",
"_analyzeDontMeasure",
"(",
"self",
",",
"chunkSize",
",",
"willMeasureLater",
",",
"*",
"sinks",
")",
":",
"nodes",
"=",
"[",
"None",
"]",
"height",
"=",
"1",
"def",
"sortKey",
"(",
"node",
")",
":",
"if",
"node",
"is",
"None",
":",
"return",
"None",
"return",
"(",
"node",
".",
"intermediate",
",",
"self",
".",
"_totalSize",
"(",
"node",
")",
")",
"while",
"len",
"(",
"nodes",
")",
">",
"0",
":",
"logger",
".",
"debug",
"(",
"\"Analyzing %d nodes for height %d...\"",
",",
"len",
"(",
"nodes",
")",
",",
"height",
")",
"nodes",
".",
"sort",
"(",
"key",
"=",
"sortKey",
")",
"for",
"fromNode",
"in",
"nodes",
":",
"if",
"self",
".",
"_height",
"(",
"fromNode",
")",
">=",
"height",
":",
"continue",
"if",
"fromNode",
"is",
"not",
"None",
"and",
"fromNode",
".",
"diffSize",
"is",
"None",
":",
"continue",
"fromVol",
"=",
"fromNode",
".",
"volume",
"if",
"fromNode",
"else",
"None",
"logger",
".",
"debug",
"(",
"\"Following edges from %s\"",
",",
"fromVol",
")",
"for",
"sink",
"in",
"sinks",
":",
"# logger.debug(",
"# \"Listing edges in %s\",",
"# sink",
"# )",
"for",
"edge",
"in",
"sink",
".",
"getEdges",
"(",
"fromVol",
")",
":",
"toVol",
"=",
"edge",
".",
"toVol",
"# logger.debug(\"Edge: %s\", edge)",
"# Skip any edges already in the destination",
"if",
"sink",
"!=",
"self",
".",
"dest",
"and",
"self",
".",
"dest",
".",
"hasEdge",
"(",
"edge",
")",
":",
"continue",
"if",
"toVol",
"in",
"self",
".",
"nodes",
":",
"toNode",
"=",
"self",
".",
"nodes",
"[",
"toVol",
"]",
"# Don't transfer any edges we won't need in the destination",
"# elif sink != self.dest:",
"# logger.debug(\"Won't transfer unnecessary %s\", edge)",
"# continue",
"else",
":",
"toNode",
"=",
"_Node",
"(",
"toVol",
",",
"True",
")",
"self",
".",
"nodes",
"[",
"toVol",
"]",
"=",
"toNode",
"logger",
".",
"debug",
"(",
"\"Considering %s\"",
",",
"edge",
")",
"edgeSize",
"=",
"edge",
".",
"size",
"if",
"edge",
".",
"sizeIsEstimated",
":",
"if",
"willMeasureLater",
":",
"# Slight preference for accurate sizes",
"edgeSize",
"*=",
"1.2",
"else",
":",
"# Large preference for accurate sizes",
"edgeSize",
"*=",
"2",
"newCost",
"=",
"self",
".",
"_cost",
"(",
"sink",
",",
"edgeSize",
",",
"fromNode",
",",
"height",
")",
"if",
"toNode",
".",
"diff",
"is",
"None",
":",
"oldCost",
"=",
"None",
"else",
":",
"oldCost",
"=",
"self",
".",
"_cost",
"(",
"toNode",
".",
"sink",
",",
"toNode",
".",
"diffSize",
",",
"self",
".",
"_getNode",
"(",
"toNode",
".",
"previous",
")",
",",
"self",
".",
"_height",
"(",
"toNode",
")",
")",
"# Don't use a more-expensive path",
"if",
"oldCost",
"is",
"not",
"None",
"and",
"oldCost",
"<=",
"newCost",
":",
"continue",
"# Don't create circular paths",
"if",
"self",
".",
"_wouldLoop",
"(",
"fromVol",
",",
"toVol",
")",
":",
"# logger.debug(\"Ignoring looping edge: %s\", toVol.display(sink))",
"continue",
"# if measureSize and sink != self.dest and edge.sizeIsEstimated:",
"# sink.measureSize(edge, chunkSize)",
"# newCost = self._cost(sink, edge.size, fromSize, height)",
"# if oldCost is not None and oldCost <= newCost:",
"# continue",
"logger",
".",
"debug",
"(",
"\"Replacing edge (%s -> %s cost)\\n%s\"",
",",
"humanize",
"(",
"oldCost",
")",
",",
"humanize",
"(",
"newCost",
")",
",",
"toNode",
".",
"display",
"(",
"sink",
")",
")",
"# logger.debug(\"Cost elements: %s\", dict(",
"# sink=str(sink),",
"# edgeSize=humanize(edgeSize),",
"# fromSize=humanize(fromSize),",
"# height=height,",
"# ))",
"toNode",
".",
"diff",
"=",
"edge",
"nodes",
"=",
"[",
"node",
"for",
"node",
"in",
"self",
".",
"nodes",
".",
"values",
"(",
")",
"if",
"self",
".",
"_height",
"(",
"node",
")",
"==",
"height",
"]",
"height",
"+=",
"1",
"self",
".",
"_prune",
"(",
")",
"for",
"node",
"in",
"self",
".",
"nodes",
".",
"values",
"(",
")",
":",
"node",
".",
"height",
"=",
"self",
".",
"_height",
"(",
"node",
")",
"if",
"node",
".",
"diff",
"is",
"None",
":",
"logger",
".",
"error",
"(",
"\"No source diffs for %s\"",
",",
"node",
".",
"volume",
".",
"display",
"(",
"sinks",
"[",
"-",
"1",
"]",
",",
"detail",
"=",
"\"line\"",
")",
",",
")"
] | 39.008621 | 0.002155 |
def get_hash(self):
"""
Get the HMAC-SHA1 that has been calculated this far.
"""
if not self.executed:
raise pyhsm.exception.YHSM_Error("HMAC-SHA1 hash not available, before execute().")
return self.result.hash_result | [
"def",
"get_hash",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"executed",
":",
"raise",
"pyhsm",
".",
"exception",
".",
"YHSM_Error",
"(",
"\"HMAC-SHA1 hash not available, before execute().\"",
")",
"return",
"self",
".",
"result",
".",
"hash_result"
] | 37.571429 | 0.011152 |
def coerce(value):
"""
Takes a number (float, int) or a two-valued integer and returns the
[low, high] in the standard interval form
"""
is_number = lambda x: isinstance(x, (int, float, complex)) #is x an instance of those things
if isinstance(value, IntervalCell) or issubclass(value.__class__,IntervalCell):
#if intervalcell or subclass, return the subclass
return value
elif is_number(value):
return IntervalCell(value, value)
elif hasattr(value, 'low') and hasattr(value, 'high'):
# duck type
assert value.low <= value.high, "Invalid low/high in %s" % str(value)
return value
elif isinstance(value, (list, tuple)) and all(map(is_number, value)):
if len(value) == 1:
low, high = value[0], value[0]
elif len(value) == 2:
low, high = value
else:
low, high = min(value), max(value)
if high < low:
raise Contradiction("Low must be lte High")
return IntervalCell(low, high)
else:
raise Exception("Don't know how to coerce %s" % (type(value))) | [
"def",
"coerce",
"(",
"value",
")",
":",
"is_number",
"=",
"lambda",
"x",
":",
"isinstance",
"(",
"x",
",",
"(",
"int",
",",
"float",
",",
"complex",
")",
")",
"#is x an instance of those things",
"if",
"isinstance",
"(",
"value",
",",
"IntervalCell",
")",
"or",
"issubclass",
"(",
"value",
".",
"__class__",
",",
"IntervalCell",
")",
":",
"#if intervalcell or subclass, return the subclass",
"return",
"value",
"elif",
"is_number",
"(",
"value",
")",
":",
"return",
"IntervalCell",
"(",
"value",
",",
"value",
")",
"elif",
"hasattr",
"(",
"value",
",",
"'low'",
")",
"and",
"hasattr",
"(",
"value",
",",
"'high'",
")",
":",
"# duck type",
"assert",
"value",
".",
"low",
"<=",
"value",
".",
"high",
",",
"\"Invalid low/high in %s\"",
"%",
"str",
"(",
"value",
")",
"return",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"all",
"(",
"map",
"(",
"is_number",
",",
"value",
")",
")",
":",
"if",
"len",
"(",
"value",
")",
"==",
"1",
":",
"low",
",",
"high",
"=",
"value",
"[",
"0",
"]",
",",
"value",
"[",
"0",
"]",
"elif",
"len",
"(",
"value",
")",
"==",
"2",
":",
"low",
",",
"high",
"=",
"value",
"else",
":",
"low",
",",
"high",
"=",
"min",
"(",
"value",
")",
",",
"max",
"(",
"value",
")",
"if",
"high",
"<",
"low",
":",
"raise",
"Contradiction",
"(",
"\"Low must be lte High\"",
")",
"return",
"IntervalCell",
"(",
"low",
",",
"high",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Don't know how to coerce %s\"",
"%",
"(",
"type",
"(",
"value",
")",
")",
")"
] | 44.592593 | 0.009756 |
def main():
"""
Primary Tarbell command dispatch.
"""
command = Command.lookup(args.get(0))
if len(args) == 0 or args.contains(('-h', '--help', 'help')):
display_info(args)
sys.exit(1)
elif args.contains(('-v', '--version')):
display_version()
sys.exit(1)
elif command:
arg = args.get(0)
args.remove(arg)
command.__call__(command, args)
sys.exit()
else:
show_error(colored.red('Error! Unknown command \'{0}\'.\n'
.format(args.get(0))))
display_info(args)
sys.exit(1) | [
"def",
"main",
"(",
")",
":",
"command",
"=",
"Command",
".",
"lookup",
"(",
"args",
".",
"get",
"(",
"0",
")",
")",
"if",
"len",
"(",
"args",
")",
"==",
"0",
"or",
"args",
".",
"contains",
"(",
"(",
"'-h'",
",",
"'--help'",
",",
"'help'",
")",
")",
":",
"display_info",
"(",
"args",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"elif",
"args",
".",
"contains",
"(",
"(",
"'-v'",
",",
"'--version'",
")",
")",
":",
"display_version",
"(",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"elif",
"command",
":",
"arg",
"=",
"args",
".",
"get",
"(",
"0",
")",
"args",
".",
"remove",
"(",
"arg",
")",
"command",
".",
"__call__",
"(",
"command",
",",
"args",
")",
"sys",
".",
"exit",
"(",
")",
"else",
":",
"show_error",
"(",
"colored",
".",
"red",
"(",
"'Error! Unknown command \\'{0}\\'.\\n'",
".",
"format",
"(",
"args",
".",
"get",
"(",
"0",
")",
")",
")",
")",
"display_info",
"(",
"args",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | 23.88 | 0.00161 |
def savepysyn(self,wave,flux,fname,units=None):
""" Cannot ever use the .writefits() method, because the array is
frequently just sampled at the synphot waveset; plus, writefits
is smart and does things like tapering."""
if units is None:
ytype='throughput'
units=' '
else:
ytype='flux'
col1=pyfits.Column(name='wavelength',format='D',array=wave)
col2=pyfits.Column(name=ytype,format='D',array=flux)
tbhdu=pyfits.BinTableHDU.from_columns(pyfits.ColDefs([col1,col2]))
tbhdu.header.update('tunit1','angstrom')
tbhdu.header.update('tunit2',units)
tbhdu.writeto(fname.replace('.fits','_pysyn.fits')) | [
"def",
"savepysyn",
"(",
"self",
",",
"wave",
",",
"flux",
",",
"fname",
",",
"units",
"=",
"None",
")",
":",
"if",
"units",
"is",
"None",
":",
"ytype",
"=",
"'throughput'",
"units",
"=",
"' '",
"else",
":",
"ytype",
"=",
"'flux'",
"col1",
"=",
"pyfits",
".",
"Column",
"(",
"name",
"=",
"'wavelength'",
",",
"format",
"=",
"'D'",
",",
"array",
"=",
"wave",
")",
"col2",
"=",
"pyfits",
".",
"Column",
"(",
"name",
"=",
"ytype",
",",
"format",
"=",
"'D'",
",",
"array",
"=",
"flux",
")",
"tbhdu",
"=",
"pyfits",
".",
"BinTableHDU",
".",
"from_columns",
"(",
"pyfits",
".",
"ColDefs",
"(",
"[",
"col1",
",",
"col2",
"]",
")",
")",
"tbhdu",
".",
"header",
".",
"update",
"(",
"'tunit1'",
",",
"'angstrom'",
")",
"tbhdu",
".",
"header",
".",
"update",
"(",
"'tunit2'",
",",
"units",
")",
"tbhdu",
".",
"writeto",
"(",
"fname",
".",
"replace",
"(",
"'.fits'",
",",
"'_pysyn.fits'",
")",
")"
] | 47 | 0.027816 |
def mkdtemp(suffix="", prefix=template, dir=None):
"""User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0o700)
return file
except FileExistsError:
continue # try again
raise FileExistsError(_errno.EEXIST,
"No usable temporary directory name found") | [
"def",
"mkdtemp",
"(",
"suffix",
"=",
"\"\"",
",",
"prefix",
"=",
"template",
",",
"dir",
"=",
"None",
")",
":",
"if",
"dir",
"is",
"None",
":",
"dir",
"=",
"gettempdir",
"(",
")",
"names",
"=",
"_get_candidate_names",
"(",
")",
"for",
"seq",
"in",
"range",
"(",
"TMP_MAX",
")",
":",
"name",
"=",
"next",
"(",
"names",
")",
"file",
"=",
"_os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"prefix",
"+",
"name",
"+",
"suffix",
")",
"try",
":",
"_os",
".",
"mkdir",
"(",
"file",
",",
"0o700",
")",
"return",
"file",
"except",
"FileExistsError",
":",
"continue",
"# try again",
"raise",
"FileExistsError",
"(",
"_errno",
".",
"EEXIST",
",",
"\"No usable temporary directory name found\"",
")"
] | 29.965517 | 0.001115 |
def interpolate(self, factor, minGlyph, maxGlyph,
round=True, suppressError=True):
"""
Interpolate the contents of this glyph at location ``factor``
in a linear interpolation between ``minGlyph`` and ``maxGlyph``.
>>> glyph.interpolate(0.5, otherGlyph1, otherGlyph2)
``factor`` may be a :ref:`type-int-float` or a tuple containing
two :ref:`type-int-float` values representing x and y factors.
>>> glyph.interpolate((0.5, 1.0), otherGlyph1, otherGlyph2)
``minGlyph`` must be a :class:`BaseGlyph` and will be located at 0.0
in the interpolation range. ``maxGlyph`` must be a :class:`BaseGlyph`
and will be located at 1.0 in the interpolation range. If ``round``
is ``True``, the contents of the glyph will be rounded to integers
after the interpolation is performed.
>>> glyph.interpolate(0.5, otherGlyph1, otherGlyph2, round=True)
This method assumes that ``minGlyph`` and ``maxGlyph`` are completely
compatible with each other for interpolation. If not, any errors
encountered will raise a :class:`FontPartsError`. If ``suppressError``
is ``True``, no exception will be raised and errors will be silently
ignored.
"""
factor = normalizers.normalizeInterpolationFactor(factor)
if not isinstance(minGlyph, BaseGlyph):
raise TypeError(("Interpolation to an instance of %r can not be "
"performed from an instance of %r.")
% (self.__class__.__name__,
minGlyph.__class__.__name__))
if not isinstance(maxGlyph, BaseGlyph):
raise TypeError(("Interpolation to an instance of %r can not be "
"performed from an instance of %r.")
% (self.__class__.__name__,
maxGlyph.__class__.__name__))
round = normalizers.normalizeBoolean(round)
suppressError = normalizers.normalizeBoolean(suppressError)
self._interpolate(factor, minGlyph, maxGlyph,
round=round, suppressError=suppressError) | [
"def",
"interpolate",
"(",
"self",
",",
"factor",
",",
"minGlyph",
",",
"maxGlyph",
",",
"round",
"=",
"True",
",",
"suppressError",
"=",
"True",
")",
":",
"factor",
"=",
"normalizers",
".",
"normalizeInterpolationFactor",
"(",
"factor",
")",
"if",
"not",
"isinstance",
"(",
"minGlyph",
",",
"BaseGlyph",
")",
":",
"raise",
"TypeError",
"(",
"(",
"\"Interpolation to an instance of %r can not be \"",
"\"performed from an instance of %r.\"",
")",
"%",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"minGlyph",
".",
"__class__",
".",
"__name__",
")",
")",
"if",
"not",
"isinstance",
"(",
"maxGlyph",
",",
"BaseGlyph",
")",
":",
"raise",
"TypeError",
"(",
"(",
"\"Interpolation to an instance of %r can not be \"",
"\"performed from an instance of %r.\"",
")",
"%",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"maxGlyph",
".",
"__class__",
".",
"__name__",
")",
")",
"round",
"=",
"normalizers",
".",
"normalizeBoolean",
"(",
"round",
")",
"suppressError",
"=",
"normalizers",
".",
"normalizeBoolean",
"(",
"suppressError",
")",
"self",
".",
"_interpolate",
"(",
"factor",
",",
"minGlyph",
",",
"maxGlyph",
",",
"round",
"=",
"round",
",",
"suppressError",
"=",
"suppressError",
")"
] | 52.214286 | 0.001343 |
def dfa_word_acceptance(dfa: dict, word: list) -> bool:
""" Checks if a given **word** is accepted by a DFA,
returning True/false.
The word w is accepted by a DFA if DFA has an accepting run
on w. Since A is deterministic,
:math:`w ∈ L(A)` if and only if :math:`ρ(s_0 , w) ∈ F` .
:param dict dfa: input DFA;
:param list word: list of actions ∈ dfa['alphabet'].
:return: *(bool)*, True if the word is accepted, False in the
other case.
"""
current_state = dfa['initial_state']
for action in word:
if (current_state, action) in dfa['transitions']:
current_state = dfa['transitions'][current_state, action]
else:
return False
if current_state in dfa['accepting_states']:
return True
else:
return False | [
"def",
"dfa_word_acceptance",
"(",
"dfa",
":",
"dict",
",",
"word",
":",
"list",
")",
"->",
"bool",
":",
"current_state",
"=",
"dfa",
"[",
"'initial_state'",
"]",
"for",
"action",
"in",
"word",
":",
"if",
"(",
"current_state",
",",
"action",
")",
"in",
"dfa",
"[",
"'transitions'",
"]",
":",
"current_state",
"=",
"dfa",
"[",
"'transitions'",
"]",
"[",
"current_state",
",",
"action",
"]",
"else",
":",
"return",
"False",
"if",
"current_state",
"in",
"dfa",
"[",
"'accepting_states'",
"]",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | 33.291667 | 0.001217 |
def inverse_lin_decay(max_step, min_value=0.01, step=None):
"""Inverse-decay linearly from 0.01 to 1.0 reached at max_step."""
if step is None:
step = tf.train.get_global_step()
if step is None:
return 1.0
step = to_float(step)
progress = tf.minimum(step / float(max_step), 1.0)
return progress * (1.0 - min_value) + min_value | [
"def",
"inverse_lin_decay",
"(",
"max_step",
",",
"min_value",
"=",
"0.01",
",",
"step",
"=",
"None",
")",
":",
"if",
"step",
"is",
"None",
":",
"step",
"=",
"tf",
".",
"train",
".",
"get_global_step",
"(",
")",
"if",
"step",
"is",
"None",
":",
"return",
"1.0",
"step",
"=",
"to_float",
"(",
"step",
")",
"progress",
"=",
"tf",
".",
"minimum",
"(",
"step",
"/",
"float",
"(",
"max_step",
")",
",",
"1.0",
")",
"return",
"progress",
"*",
"(",
"1.0",
"-",
"min_value",
")",
"+",
"min_value"
] | 37.555556 | 0.020231 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.