text
stringlengths 75
104k
| code_tokens
sequence | avg_line_len
float64 7.91
980
| score
float64 0
0.18
|
---|---|---|---|
def _inverse_permutation_indices(positions):
"""Like inverse_permutation, but also handles slices.
Parameters
----------
positions : list of np.ndarray or slice objects.
If slice objects, all are assumed to be slices.
Returns
-------
np.ndarray of indices or None, if no permutation is necessary.
"""
if not positions:
return None
if isinstance(positions[0], slice):
positions = _consolidate_slices(positions)
if positions == slice(None):
return None
positions = [np.arange(sl.start, sl.stop, sl.step) for sl in positions]
indices = nputils.inverse_permutation(np.concatenate(positions))
return indices | [
"def",
"_inverse_permutation_indices",
"(",
"positions",
")",
":",
"if",
"not",
"positions",
":",
"return",
"None",
"if",
"isinstance",
"(",
"positions",
"[",
"0",
"]",
",",
"slice",
")",
":",
"positions",
"=",
"_consolidate_slices",
"(",
"positions",
")",
"if",
"positions",
"==",
"slice",
"(",
"None",
")",
":",
"return",
"None",
"positions",
"=",
"[",
"np",
".",
"arange",
"(",
"sl",
".",
"start",
",",
"sl",
".",
"stop",
",",
"sl",
".",
"step",
")",
"for",
"sl",
"in",
"positions",
"]",
"indices",
"=",
"nputils",
".",
"inverse_permutation",
"(",
"np",
".",
"concatenate",
"(",
"positions",
")",
")",
"return",
"indices"
] | 29.73913 | 0.001416 |
def _next_partition(self, topic, key=None):
"""get the next partition to which to publish
Check with our client for the latest partitions for the topic, then
ask our partitioner for the next partition to which we should publish
for the give key. If needed, create a new partitioner for the topic.
"""
# check if the client has metadata for the topic
while self.client.metadata_error_for_topic(topic):
# client doesn't have good metadata for topic. ask to fetch...
# check if we have request attempts left
if self._req_attempts >= self._max_attempts:
# No, no attempts left, so raise the error
_check_error(self.client.metadata_error_for_topic(topic))
yield self.client.load_metadata_for_topics(topic)
if not self.client.metadata_error_for_topic(topic):
break
self._req_attempts += 1
d = Deferred()
self.client.reactor.callLater(
self._retry_interval, d.callback, True)
self._retry_interval *= self.RETRY_INTERVAL_FACTOR
yield d
# Ok, should be safe to get the partitions now...
partitions = self.client.topic_partitions[topic]
# Do we have a partitioner for this topic already?
if topic not in self.partitioners:
# No, create a new paritioner for topic, partitions
self.partitioners[topic] = \
self.partitioner_class(topic, partitions)
# Lookup the next partition
partition = self.partitioners[topic].partition(key, partitions)
returnValue(partition) | [
"def",
"_next_partition",
"(",
"self",
",",
"topic",
",",
"key",
"=",
"None",
")",
":",
"# check if the client has metadata for the topic",
"while",
"self",
".",
"client",
".",
"metadata_error_for_topic",
"(",
"topic",
")",
":",
"# client doesn't have good metadata for topic. ask to fetch...",
"# check if we have request attempts left",
"if",
"self",
".",
"_req_attempts",
">=",
"self",
".",
"_max_attempts",
":",
"# No, no attempts left, so raise the error",
"_check_error",
"(",
"self",
".",
"client",
".",
"metadata_error_for_topic",
"(",
"topic",
")",
")",
"yield",
"self",
".",
"client",
".",
"load_metadata_for_topics",
"(",
"topic",
")",
"if",
"not",
"self",
".",
"client",
".",
"metadata_error_for_topic",
"(",
"topic",
")",
":",
"break",
"self",
".",
"_req_attempts",
"+=",
"1",
"d",
"=",
"Deferred",
"(",
")",
"self",
".",
"client",
".",
"reactor",
".",
"callLater",
"(",
"self",
".",
"_retry_interval",
",",
"d",
".",
"callback",
",",
"True",
")",
"self",
".",
"_retry_interval",
"*=",
"self",
".",
"RETRY_INTERVAL_FACTOR",
"yield",
"d",
"# Ok, should be safe to get the partitions now...",
"partitions",
"=",
"self",
".",
"client",
".",
"topic_partitions",
"[",
"topic",
"]",
"# Do we have a partitioner for this topic already?",
"if",
"topic",
"not",
"in",
"self",
".",
"partitioners",
":",
"# No, create a new paritioner for topic, partitions",
"self",
".",
"partitioners",
"[",
"topic",
"]",
"=",
"self",
".",
"partitioner_class",
"(",
"topic",
",",
"partitions",
")",
"# Lookup the next partition",
"partition",
"=",
"self",
".",
"partitioners",
"[",
"topic",
"]",
".",
"partition",
"(",
"key",
",",
"partitions",
")",
"returnValue",
"(",
"partition",
")"
] | 48.676471 | 0.001185 |
def Builder(**kw):
"""A factory for builder objects."""
composite = None
if 'generator' in kw:
if 'action' in kw:
raise UserError("You must not specify both an action and a generator.")
kw['action'] = SCons.Action.CommandGeneratorAction(kw['generator'], {})
del kw['generator']
elif 'action' in kw:
source_ext_match = kw.get('source_ext_match', 1)
if 'source_ext_match' in kw:
del kw['source_ext_match']
if SCons.Util.is_Dict(kw['action']):
composite = DictCmdGenerator(kw['action'], source_ext_match)
kw['action'] = SCons.Action.CommandGeneratorAction(composite, {})
kw['src_suffix'] = composite.src_suffixes()
else:
kw['action'] = SCons.Action.Action(kw['action'])
if 'emitter' in kw:
emitter = kw['emitter']
if SCons.Util.is_String(emitter):
# This allows users to pass in an Environment
# variable reference (like "$FOO") as an emitter.
# We will look in that Environment variable for
# a callable to use as the actual emitter.
var = SCons.Util.get_environment_var(emitter)
if not var:
raise UserError("Supplied emitter '%s' does not appear to refer to an Environment variable" % emitter)
kw['emitter'] = EmitterProxy(var)
elif SCons.Util.is_Dict(emitter):
kw['emitter'] = DictEmitter(emitter)
elif SCons.Util.is_List(emitter):
kw['emitter'] = ListEmitter(emitter)
result = BuilderBase(**kw)
if not composite is None:
result = CompositeBuilder(result, composite)
return result | [
"def",
"Builder",
"(",
"*",
"*",
"kw",
")",
":",
"composite",
"=",
"None",
"if",
"'generator'",
"in",
"kw",
":",
"if",
"'action'",
"in",
"kw",
":",
"raise",
"UserError",
"(",
"\"You must not specify both an action and a generator.\"",
")",
"kw",
"[",
"'action'",
"]",
"=",
"SCons",
".",
"Action",
".",
"CommandGeneratorAction",
"(",
"kw",
"[",
"'generator'",
"]",
",",
"{",
"}",
")",
"del",
"kw",
"[",
"'generator'",
"]",
"elif",
"'action'",
"in",
"kw",
":",
"source_ext_match",
"=",
"kw",
".",
"get",
"(",
"'source_ext_match'",
",",
"1",
")",
"if",
"'source_ext_match'",
"in",
"kw",
":",
"del",
"kw",
"[",
"'source_ext_match'",
"]",
"if",
"SCons",
".",
"Util",
".",
"is_Dict",
"(",
"kw",
"[",
"'action'",
"]",
")",
":",
"composite",
"=",
"DictCmdGenerator",
"(",
"kw",
"[",
"'action'",
"]",
",",
"source_ext_match",
")",
"kw",
"[",
"'action'",
"]",
"=",
"SCons",
".",
"Action",
".",
"CommandGeneratorAction",
"(",
"composite",
",",
"{",
"}",
")",
"kw",
"[",
"'src_suffix'",
"]",
"=",
"composite",
".",
"src_suffixes",
"(",
")",
"else",
":",
"kw",
"[",
"'action'",
"]",
"=",
"SCons",
".",
"Action",
".",
"Action",
"(",
"kw",
"[",
"'action'",
"]",
")",
"if",
"'emitter'",
"in",
"kw",
":",
"emitter",
"=",
"kw",
"[",
"'emitter'",
"]",
"if",
"SCons",
".",
"Util",
".",
"is_String",
"(",
"emitter",
")",
":",
"# This allows users to pass in an Environment",
"# variable reference (like \"$FOO\") as an emitter.",
"# We will look in that Environment variable for",
"# a callable to use as the actual emitter.",
"var",
"=",
"SCons",
".",
"Util",
".",
"get_environment_var",
"(",
"emitter",
")",
"if",
"not",
"var",
":",
"raise",
"UserError",
"(",
"\"Supplied emitter '%s' does not appear to refer to an Environment variable\"",
"%",
"emitter",
")",
"kw",
"[",
"'emitter'",
"]",
"=",
"EmitterProxy",
"(",
"var",
")",
"elif",
"SCons",
".",
"Util",
".",
"is_Dict",
"(",
"emitter",
")",
":",
"kw",
"[",
"'emitter'",
"]",
"=",
"DictEmitter",
"(",
"emitter",
")",
"elif",
"SCons",
".",
"Util",
".",
"is_List",
"(",
"emitter",
")",
":",
"kw",
"[",
"'emitter'",
"]",
"=",
"ListEmitter",
"(",
"emitter",
")",
"result",
"=",
"BuilderBase",
"(",
"*",
"*",
"kw",
")",
"if",
"not",
"composite",
"is",
"None",
":",
"result",
"=",
"CompositeBuilder",
"(",
"result",
",",
"composite",
")",
"return",
"result"
] | 40.682927 | 0.002342 |
def _remove_qualifiers(obj):
"""
Remove all qualifiers from the input objectwhere the object may
be an CIMInstance or CIMClass. Removes qualifiers from the object and
from properties, methods, and parameters
This is used to process the IncludeQualifier parameter for classes
and instances
"""
assert isinstance(obj, (CIMInstance, CIMClass))
obj.qualifiers = NocaseDict()
for prop in obj.properties:
obj.properties[prop].qualifiers = NocaseDict()
if isinstance(obj, CIMClass):
for method in obj.methods:
obj.methods[method].qualifiers = NocaseDict()
for param in obj.methods[method].parameters:
obj.methods[method].parameters[param].qualifiers = \
NocaseDict() | [
"def",
"_remove_qualifiers",
"(",
"obj",
")",
":",
"assert",
"isinstance",
"(",
"obj",
",",
"(",
"CIMInstance",
",",
"CIMClass",
")",
")",
"obj",
".",
"qualifiers",
"=",
"NocaseDict",
"(",
")",
"for",
"prop",
"in",
"obj",
".",
"properties",
":",
"obj",
".",
"properties",
"[",
"prop",
"]",
".",
"qualifiers",
"=",
"NocaseDict",
"(",
")",
"if",
"isinstance",
"(",
"obj",
",",
"CIMClass",
")",
":",
"for",
"method",
"in",
"obj",
".",
"methods",
":",
"obj",
".",
"methods",
"[",
"method",
"]",
".",
"qualifiers",
"=",
"NocaseDict",
"(",
")",
"for",
"param",
"in",
"obj",
".",
"methods",
"[",
"method",
"]",
".",
"parameters",
":",
"obj",
".",
"methods",
"[",
"method",
"]",
".",
"parameters",
"[",
"param",
"]",
".",
"qualifiers",
"=",
"NocaseDict",
"(",
")"
] | 43.684211 | 0.002358 |
def taper(self, side='leftright'):
"""Taper the ends of this `TimeSeries` smoothly to zero.
Parameters
----------
side : `str`, optional
the side of the `TimeSeries` to taper, must be one of `'left'`,
`'right'`, or `'leftright'`
Returns
-------
out : `TimeSeries`
a copy of `self` tapered at one or both ends
Raises
------
ValueError
if `side` is not one of `('left', 'right', 'leftright')`
Examples
--------
To see the effect of the Planck-taper window, we can taper a
sinusoidal `TimeSeries` at both ends:
>>> import numpy
>>> from gwpy.timeseries import TimeSeries
>>> t = numpy.linspace(0, 1, 2048)
>>> series = TimeSeries(numpy.cos(10.5*numpy.pi*t), times=t)
>>> tapered = series.taper()
We can plot it to see how the ends now vary smoothly from 0 to 1:
>>> from gwpy.plot import Plot
>>> plot = Plot(series, tapered, separate=True, sharex=True)
>>> plot.show()
Notes
-----
The :meth:`TimeSeries.taper` automatically tapers from the second
stationary point (local maximum or minimum) on the specified side
of the input. However, the method will never taper more than half
the full width of the `TimeSeries`, and will fail if there are no
stationary points.
See :func:`~gwpy.signal.window.planck` for the generic Planck taper
window, and see :func:`scipy.signal.get_window` for other common
window formats.
"""
# check window properties
if side not in ('left', 'right', 'leftright'):
raise ValueError("side must be one of 'left', 'right', "
"or 'leftright'")
out = self.copy()
# identify the second stationary point away from each boundary,
# else default to half the TimeSeries width
nleft, nright = 0, 0
mini, = signal.argrelmin(out.value)
maxi, = signal.argrelmax(out.value)
if 'left' in side:
nleft = max(mini[0], maxi[0])
nleft = min(nleft, self.size/2)
if 'right' in side:
nright = out.size - min(mini[-1], maxi[-1])
nright = min(nright, self.size/2)
out *= planck(out.size, nleft=nleft, nright=nright)
return out | [
"def",
"taper",
"(",
"self",
",",
"side",
"=",
"'leftright'",
")",
":",
"# check window properties",
"if",
"side",
"not",
"in",
"(",
"'left'",
",",
"'right'",
",",
"'leftright'",
")",
":",
"raise",
"ValueError",
"(",
"\"side must be one of 'left', 'right', \"",
"\"or 'leftright'\"",
")",
"out",
"=",
"self",
".",
"copy",
"(",
")",
"# identify the second stationary point away from each boundary,",
"# else default to half the TimeSeries width",
"nleft",
",",
"nright",
"=",
"0",
",",
"0",
"mini",
",",
"=",
"signal",
".",
"argrelmin",
"(",
"out",
".",
"value",
")",
"maxi",
",",
"=",
"signal",
".",
"argrelmax",
"(",
"out",
".",
"value",
")",
"if",
"'left'",
"in",
"side",
":",
"nleft",
"=",
"max",
"(",
"mini",
"[",
"0",
"]",
",",
"maxi",
"[",
"0",
"]",
")",
"nleft",
"=",
"min",
"(",
"nleft",
",",
"self",
".",
"size",
"/",
"2",
")",
"if",
"'right'",
"in",
"side",
":",
"nright",
"=",
"out",
".",
"size",
"-",
"min",
"(",
"mini",
"[",
"-",
"1",
"]",
",",
"maxi",
"[",
"-",
"1",
"]",
")",
"nright",
"=",
"min",
"(",
"nright",
",",
"self",
".",
"size",
"/",
"2",
")",
"out",
"*=",
"planck",
"(",
"out",
".",
"size",
",",
"nleft",
"=",
"nleft",
",",
"nright",
"=",
"nright",
")",
"return",
"out"
] | 35.939394 | 0.000821 |
def on_data(self, ws, message, message_type, fin):
"""
Callback executed when message is received from the server.
:param ws: Websocket client
:param message: utf-8 string which we get from the server.
:param message_type: Message type which is either ABNF.OPCODE_TEXT or ABNF.OPCODE_BINARY
:param fin: continue flag. If 0, the data continues.
"""
try:
json_object = json.loads(message)
except Exception:
self.on_error(ws, 'Unable to parse received message.')
if 'error' in json_object:
# Only call on_error() if a real error occurred. The STT service sends
# {"error" : "No speech detected for 5s"} for valid timeouts, configured by
# options.inactivity_timeout
error = json_object['error']
if error.startswith(TIMEOUT_PREFIX):
self.callback.on_inactivity_timeout(error)
else:
self.on_error(ws, error)
# if uninitialized, receive the initialization response from the server
elif 'state' in json_object:
if not self.isListening:
self.isListening = True
self.callback.on_listening()
self.send_audio(ws)
else:
# close the connection
self.callback.on_close()
ws.close()
# if in streaming
elif 'results' in json_object or 'speaker_labels' in json_object:
hypothesis = ''
if 'results' in json_object:
hypothesis = json_object['results'][0]['alternatives'][0][
'transcript']
b_final = (json_object['results'][0]['final'] is True)
transcripts = self.extract_transcripts(
json_object['results'][0]['alternatives'])
if b_final:
self.callback.on_transcription(transcripts)
self.callback.on_hypothesis(hypothesis)
self.callback.on_data(json_object) | [
"def",
"on_data",
"(",
"self",
",",
"ws",
",",
"message",
",",
"message_type",
",",
"fin",
")",
":",
"try",
":",
"json_object",
"=",
"json",
".",
"loads",
"(",
"message",
")",
"except",
"Exception",
":",
"self",
".",
"on_error",
"(",
"ws",
",",
"'Unable to parse received message.'",
")",
"if",
"'error'",
"in",
"json_object",
":",
"# Only call on_error() if a real error occurred. The STT service sends",
"# {\"error\" : \"No speech detected for 5s\"} for valid timeouts, configured by",
"# options.inactivity_timeout",
"error",
"=",
"json_object",
"[",
"'error'",
"]",
"if",
"error",
".",
"startswith",
"(",
"TIMEOUT_PREFIX",
")",
":",
"self",
".",
"callback",
".",
"on_inactivity_timeout",
"(",
"error",
")",
"else",
":",
"self",
".",
"on_error",
"(",
"ws",
",",
"error",
")",
"# if uninitialized, receive the initialization response from the server",
"elif",
"'state'",
"in",
"json_object",
":",
"if",
"not",
"self",
".",
"isListening",
":",
"self",
".",
"isListening",
"=",
"True",
"self",
".",
"callback",
".",
"on_listening",
"(",
")",
"self",
".",
"send_audio",
"(",
"ws",
")",
"else",
":",
"# close the connection",
"self",
".",
"callback",
".",
"on_close",
"(",
")",
"ws",
".",
"close",
"(",
")",
"# if in streaming",
"elif",
"'results'",
"in",
"json_object",
"or",
"'speaker_labels'",
"in",
"json_object",
":",
"hypothesis",
"=",
"''",
"if",
"'results'",
"in",
"json_object",
":",
"hypothesis",
"=",
"json_object",
"[",
"'results'",
"]",
"[",
"0",
"]",
"[",
"'alternatives'",
"]",
"[",
"0",
"]",
"[",
"'transcript'",
"]",
"b_final",
"=",
"(",
"json_object",
"[",
"'results'",
"]",
"[",
"0",
"]",
"[",
"'final'",
"]",
"is",
"True",
")",
"transcripts",
"=",
"self",
".",
"extract_transcripts",
"(",
"json_object",
"[",
"'results'",
"]",
"[",
"0",
"]",
"[",
"'alternatives'",
"]",
")",
"if",
"b_final",
":",
"self",
".",
"callback",
".",
"on_transcription",
"(",
"transcripts",
")",
"self",
".",
"callback",
".",
"on_hypothesis",
"(",
"hypothesis",
")",
"self",
".",
"callback",
".",
"on_data",
"(",
"json_object",
")"
] | 39.803922 | 0.002404 |
def remove_wirevector(self, wirevector):
""" Remove a wirevector object to the block."""
self.wirevector_set.remove(wirevector)
del self.wirevector_by_name[wirevector.name] | [
"def",
"remove_wirevector",
"(",
"self",
",",
"wirevector",
")",
":",
"self",
".",
"wirevector_set",
".",
"remove",
"(",
"wirevector",
")",
"del",
"self",
".",
"wirevector_by_name",
"[",
"wirevector",
".",
"name",
"]"
] | 48.25 | 0.010204 |
def get_scrim(path=None, auto_write=None, shell=None, script=None, cache={}):
'''Get a :class:`Scrim` instance. Each instance is cached so if you call
get_scrim again with the same arguments you get the same instance.
See also:
:class:`Scrim`
'''
args = (path, auto_write, shell, script)
if args not in cache:
cache[args] = Scrim(*args)
return cache[args] | [
"def",
"get_scrim",
"(",
"path",
"=",
"None",
",",
"auto_write",
"=",
"None",
",",
"shell",
"=",
"None",
",",
"script",
"=",
"None",
",",
"cache",
"=",
"{",
"}",
")",
":",
"args",
"=",
"(",
"path",
",",
"auto_write",
",",
"shell",
",",
"script",
")",
"if",
"args",
"not",
"in",
"cache",
":",
"cache",
"[",
"args",
"]",
"=",
"Scrim",
"(",
"*",
"args",
")",
"return",
"cache",
"[",
"args",
"]"
] | 32.5 | 0.002494 |
def make_choice_validator(
choices, default_key=None, normalizer=None):
"""
Returns a callable that accepts the choices provided.
Choices should be provided as a list of 2-tuples, where the first
element is a string that should match user input (the key); the
second being the value associated with the key.
The callable by default will match, upon complete match the first
value associated with the result will be returned. Partial matches
are supported.
If a default is provided, that value will be returned if the user
provided input is empty, i.e. the value that is mapped to the empty
string.
Finally, a normalizer function can be passed. This normalizes all
keys and validation value.
"""
def normalize_all(_choices):
# normalize all the keys for easier comparison
if normalizer:
_choices = [(normalizer(key), value) for key, value in choices]
return _choices
choices = normalize_all(choices)
def choice_validator(value):
if normalizer:
value = normalizer(value)
if not value and default_key:
value = choices[default_key][0]
results = []
for choice, mapped in choices:
if value == choice:
return mapped
if choice.startswith(value):
results.append((choice, mapped))
if len(results) == 1:
return results[0][1]
elif not results:
raise ValueError('Invalid choice.')
else:
raise ValueError(
'Choice ambiguous between (%s)' % ', '.join(
k for k, v in normalize_all(results))
)
return choice_validator | [
"def",
"make_choice_validator",
"(",
"choices",
",",
"default_key",
"=",
"None",
",",
"normalizer",
"=",
"None",
")",
":",
"def",
"normalize_all",
"(",
"_choices",
")",
":",
"# normalize all the keys for easier comparison",
"if",
"normalizer",
":",
"_choices",
"=",
"[",
"(",
"normalizer",
"(",
"key",
")",
",",
"value",
")",
"for",
"key",
",",
"value",
"in",
"choices",
"]",
"return",
"_choices",
"choices",
"=",
"normalize_all",
"(",
"choices",
")",
"def",
"choice_validator",
"(",
"value",
")",
":",
"if",
"normalizer",
":",
"value",
"=",
"normalizer",
"(",
"value",
")",
"if",
"not",
"value",
"and",
"default_key",
":",
"value",
"=",
"choices",
"[",
"default_key",
"]",
"[",
"0",
"]",
"results",
"=",
"[",
"]",
"for",
"choice",
",",
"mapped",
"in",
"choices",
":",
"if",
"value",
"==",
"choice",
":",
"return",
"mapped",
"if",
"choice",
".",
"startswith",
"(",
"value",
")",
":",
"results",
".",
"append",
"(",
"(",
"choice",
",",
"mapped",
")",
")",
"if",
"len",
"(",
"results",
")",
"==",
"1",
":",
"return",
"results",
"[",
"0",
"]",
"[",
"1",
"]",
"elif",
"not",
"results",
":",
"raise",
"ValueError",
"(",
"'Invalid choice.'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Choice ambiguous between (%s)'",
"%",
"', '",
".",
"join",
"(",
"k",
"for",
"k",
",",
"v",
"in",
"normalize_all",
"(",
"results",
")",
")",
")",
"return",
"choice_validator"
] | 33.254902 | 0.000573 |
def default(self, obj):
"""If input object is an ndarray it will be converted into a dict
holding dtype, shape and the data, base64 encoded.
"""
if isinstance(obj, np.ndarray):
if obj.flags['C_CONTIGUOUS']:
obj_data = obj.data
else:
cont_obj = np.ascontiguousarray(obj)
assert(cont_obj.flags['C_CONTIGUOUS'])
obj_data = cont_obj.data
data_b64 = base64.b64encode(obj_data)
return dict(__ndarray__=data_b64,
dtype=str(obj.dtype),
shape=obj.shape)
elif isinstance(obj, np.generic):
return np.asscalar(obj)
# Let the base class default method raise the TypeError
return json.JSONEncoder(self, obj) | [
"def",
"default",
"(",
"self",
",",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"np",
".",
"ndarray",
")",
":",
"if",
"obj",
".",
"flags",
"[",
"'C_CONTIGUOUS'",
"]",
":",
"obj_data",
"=",
"obj",
".",
"data",
"else",
":",
"cont_obj",
"=",
"np",
".",
"ascontiguousarray",
"(",
"obj",
")",
"assert",
"(",
"cont_obj",
".",
"flags",
"[",
"'C_CONTIGUOUS'",
"]",
")",
"obj_data",
"=",
"cont_obj",
".",
"data",
"data_b64",
"=",
"base64",
".",
"b64encode",
"(",
"obj_data",
")",
"return",
"dict",
"(",
"__ndarray__",
"=",
"data_b64",
",",
"dtype",
"=",
"str",
"(",
"obj",
".",
"dtype",
")",
",",
"shape",
"=",
"obj",
".",
"shape",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"np",
".",
"generic",
")",
":",
"return",
"np",
".",
"asscalar",
"(",
"obj",
")",
"# Let the base class default method raise the TypeError",
"return",
"json",
".",
"JSONEncoder",
"(",
"self",
",",
"obj",
")"
] | 42.263158 | 0.002436 |
def UV_H(Hg,gw):
"""
Constructs implications and intents based on H
gw = g width
Hg = H(g), g is the binary coding of the attribute set
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
"""
lefts = set()
K = []
UV = []
p = Hwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Hg&pp:
y = istr(p,3,gw)
yy = y.replace('1','0')
if yy not in lefts:
if y.find('1') == -1:#y∈{0,2}^n
K.append(y)
else:
UV.append(y)
lefts.add(yy)
return (UV,K) | [
"def",
"UV_H",
"(",
"Hg",
",",
"gw",
")",
":",
"lefts",
"=",
"set",
"(",
")",
"K",
"=",
"[",
"]",
"UV",
"=",
"[",
"]",
"p",
"=",
"Hwidth",
"(",
"gw",
")",
"pp",
"=",
"2",
"**",
"p",
"while",
"p",
":",
"pp",
"=",
"pp",
">>",
"1",
"p",
"=",
"p",
"-",
"1",
"if",
"Hg",
"&",
"pp",
":",
"y",
"=",
"istr",
"(",
"p",
",",
"3",
",",
"gw",
")",
"yy",
"=",
"y",
".",
"replace",
"(",
"'1'",
",",
"'0'",
")",
"if",
"yy",
"not",
"in",
"lefts",
":",
"if",
"y",
".",
"find",
"(",
"'1'",
")",
"==",
"-",
"1",
":",
"#y∈{0,2}^n",
"K",
".",
"append",
"(",
"y",
")",
"else",
":",
"UV",
".",
"append",
"(",
"y",
")",
"lefts",
".",
"add",
"(",
"yy",
")",
"return",
"(",
"UV",
",",
"K",
")"
] | 26.076923 | 0.01707 |
def cli_check_normalize_args(args):
'''Check and normalize arguments
This function checks that basis set names, families, roles, etc, are
valid (and raise an exception if they aren't)
The original data passed to this function is not modified. A modified
copy is returned.
'''
args_keys = vars(args).keys() # What args we have
args_copy = copy.copy(args)
if 'data_dir' in args_keys:
args_copy.data_dir = _cli_check_data_dir(args.data_dir)
if 'basis' in args:
args_copy.basis = _cli_check_basis(args.basis, args.data_dir)
if 'basis1' in args_keys:
args_copy.basis1 = _cli_check_basis(args.basis1, args.data_dir)
if 'basis2' in args_keys:
args_copy.basis2 = _cli_check_basis(args.basis2, args.data_dir)
if 'fmt' in args_keys:
args_copy.fmt = _cli_check_format(args.fmt)
if 'reffmt' in args_keys:
args_copy.reffmt = _cli_check_ref_format(args.reffmt)
if 'role' in args_keys:
args_copy.role = _cli_check_role(args.role)
if 'family' in args_keys:
args_copy.family = _cli_check_family(args.family, args.data_dir)
if 'readfmt1' in args_keys:
args_copy.readfmt1 = _cli_check_readfmt(args.readfmt1)
if 'readfmt2' in args_keys:
args_copy.readfmt2 = _cli_check_readfmt(args.readfmt2)
return args_copy | [
"def",
"cli_check_normalize_args",
"(",
"args",
")",
":",
"args_keys",
"=",
"vars",
"(",
"args",
")",
".",
"keys",
"(",
")",
"# What args we have",
"args_copy",
"=",
"copy",
".",
"copy",
"(",
"args",
")",
"if",
"'data_dir'",
"in",
"args_keys",
":",
"args_copy",
".",
"data_dir",
"=",
"_cli_check_data_dir",
"(",
"args",
".",
"data_dir",
")",
"if",
"'basis'",
"in",
"args",
":",
"args_copy",
".",
"basis",
"=",
"_cli_check_basis",
"(",
"args",
".",
"basis",
",",
"args",
".",
"data_dir",
")",
"if",
"'basis1'",
"in",
"args_keys",
":",
"args_copy",
".",
"basis1",
"=",
"_cli_check_basis",
"(",
"args",
".",
"basis1",
",",
"args",
".",
"data_dir",
")",
"if",
"'basis2'",
"in",
"args_keys",
":",
"args_copy",
".",
"basis2",
"=",
"_cli_check_basis",
"(",
"args",
".",
"basis2",
",",
"args",
".",
"data_dir",
")",
"if",
"'fmt'",
"in",
"args_keys",
":",
"args_copy",
".",
"fmt",
"=",
"_cli_check_format",
"(",
"args",
".",
"fmt",
")",
"if",
"'reffmt'",
"in",
"args_keys",
":",
"args_copy",
".",
"reffmt",
"=",
"_cli_check_ref_format",
"(",
"args",
".",
"reffmt",
")",
"if",
"'role'",
"in",
"args_keys",
":",
"args_copy",
".",
"role",
"=",
"_cli_check_role",
"(",
"args",
".",
"role",
")",
"if",
"'family'",
"in",
"args_keys",
":",
"args_copy",
".",
"family",
"=",
"_cli_check_family",
"(",
"args",
".",
"family",
",",
"args",
".",
"data_dir",
")",
"if",
"'readfmt1'",
"in",
"args_keys",
":",
"args_copy",
".",
"readfmt1",
"=",
"_cli_check_readfmt",
"(",
"args",
".",
"readfmt1",
")",
"if",
"'readfmt2'",
"in",
"args_keys",
":",
"args_copy",
".",
"readfmt2",
"=",
"_cli_check_readfmt",
"(",
"args",
".",
"readfmt2",
")",
"return",
"args_copy"
] | 40.242424 | 0.000735 |
def to_str(obj, encoding='utf-8', **encode_args):
r"""
Returns a ``str`` of ``obj``, encoding using ``encoding`` if necessary. For
example::
>>> some_str = b"\xff"
>>> some_unicode = u"\u1234"
>>> some_exception = Exception(u'Error: ' + some_unicode)
>>> r(to_str(some_str))
b'\xff'
>>> r(to_str(some_unicode))
b'\xe1\x88\xb4'
>>> r(to_str(some_exception))
b'Error: \xe1\x88\xb4'
>>> r(to_str([42]))
b'[42]'
See source code for detailed semantics.
"""
# Note: On py3, ``b'x'.__str__()`` returns ``"b'x'"``, so we need to do the
# explicit check first.
if isinstance(obj, binary_type):
return obj
# We coerce to unicode if '__unicode__' is available because there is no
# way to specify encoding when calling ``str(obj)``, so, eg,
# ``str(Exception(u'\u1234'))`` will explode.
if isinstance(obj, text_type) or hasattr(obj, text_type_magicmethod):
# Note: unicode(u'foo') is O(1) (by experimentation)
return text_type(obj).encode(encoding, **encode_args)
return binary_type(obj) | [
"def",
"to_str",
"(",
"obj",
",",
"encoding",
"=",
"'utf-8'",
",",
"*",
"*",
"encode_args",
")",
":",
"# Note: On py3, ``b'x'.__str__()`` returns ``\"b'x'\"``, so we need to do the",
"# explicit check first.",
"if",
"isinstance",
"(",
"obj",
",",
"binary_type",
")",
":",
"return",
"obj",
"# We coerce to unicode if '__unicode__' is available because there is no",
"# way to specify encoding when calling ``str(obj)``, so, eg,",
"# ``str(Exception(u'\\u1234'))`` will explode.",
"if",
"isinstance",
"(",
"obj",
",",
"text_type",
")",
"or",
"hasattr",
"(",
"obj",
",",
"text_type_magicmethod",
")",
":",
"# Note: unicode(u'foo') is O(1) (by experimentation)",
"return",
"text_type",
"(",
"obj",
")",
".",
"encode",
"(",
"encoding",
",",
"*",
"*",
"encode_args",
")",
"return",
"binary_type",
"(",
"obj",
")"
] | 34.78125 | 0.000874 |
def ranking_game(n, steps=10, random_state=None):
"""
Return a NormalFormGame instance of (the 2-player version of) the
"ranking game" studied by Goldberg et al. (2013), where each player
chooses an effort level associated with a score and a cost which are
both increasing functions with randomly generated step sizes. The
player with the higher score wins the first prize, whose value is 1,
and the other player obtains the "second prize" of value 0; in the
case of a tie, the first prize is split and each player receives a
value of 0.5. The payoff of a player is given by the value of the
prize minus the cost of the effort.
Parameters
----------
n : scalar(int)
Number of actions, i.e, number of possible effort levels.
steps : scalar(int), optional(default=10)
Parameter determining the upper bound for the size of the random
steps for the scores and costs for each player: The step sizes
for the scores are drawn from `1`, ..., `steps`, while those for
the costs are multiples of `1/(n*steps)`, where the cost of
effort level `0` is 0, and the maximum possible cost of effort
level `n-1` is less than or equal to 1.
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
g : NormalFormGame
Examples
--------
>>> g = ranking_game(5, random_state=1234)
>>> g.players[0]
Player([[ 0. , 0. , 0. , 0. , 0. ],
[ 0.82, -0.18, -0.18, -0.18, -0.18],
[ 0.8 , 0.8 , -0.2 , -0.2 , -0.2 ],
[ 0.68, 0.68, 0.68, -0.32, -0.32],
[ 0.66, 0.66, 0.66, 0.66, -0.34]])
>>> g.players[1]
Player([[ 1. , 0. , 0. , 0. , 0. ],
[ 0.8 , 0.8 , -0.2 , -0.2 , -0.2 ],
[ 0.66, 0.66, 0.66, -0.34, -0.34],
[ 0.6 , 0.6 , 0.6 , 0.6 , -0.4 ],
[ 0.58, 0.58, 0.58, 0.58, 0.58]])
"""
payoff_arrays = tuple(np.empty((n, n)) for i in range(2))
random_state = check_random_state(random_state)
scores = random_state.randint(1, steps+1, size=(2, n))
scores.cumsum(axis=1, out=scores)
costs = np.empty((2, n-1))
costs[:] = random_state.randint(1, steps+1, size=(2, n-1))
costs.cumsum(axis=1, out=costs)
costs[:] /= (n * steps)
_populate_ranking_payoff_arrays(payoff_arrays, scores, costs)
g = NormalFormGame(
[Player(payoff_array) for payoff_array in payoff_arrays]
)
return g | [
"def",
"ranking_game",
"(",
"n",
",",
"steps",
"=",
"10",
",",
"random_state",
"=",
"None",
")",
":",
"payoff_arrays",
"=",
"tuple",
"(",
"np",
".",
"empty",
"(",
"(",
"n",
",",
"n",
")",
")",
"for",
"i",
"in",
"range",
"(",
"2",
")",
")",
"random_state",
"=",
"check_random_state",
"(",
"random_state",
")",
"scores",
"=",
"random_state",
".",
"randint",
"(",
"1",
",",
"steps",
"+",
"1",
",",
"size",
"=",
"(",
"2",
",",
"n",
")",
")",
"scores",
".",
"cumsum",
"(",
"axis",
"=",
"1",
",",
"out",
"=",
"scores",
")",
"costs",
"=",
"np",
".",
"empty",
"(",
"(",
"2",
",",
"n",
"-",
"1",
")",
")",
"costs",
"[",
":",
"]",
"=",
"random_state",
".",
"randint",
"(",
"1",
",",
"steps",
"+",
"1",
",",
"size",
"=",
"(",
"2",
",",
"n",
"-",
"1",
")",
")",
"costs",
".",
"cumsum",
"(",
"axis",
"=",
"1",
",",
"out",
"=",
"costs",
")",
"costs",
"[",
":",
"]",
"/=",
"(",
"n",
"*",
"steps",
")",
"_populate_ranking_payoff_arrays",
"(",
"payoff_arrays",
",",
"scores",
",",
"costs",
")",
"g",
"=",
"NormalFormGame",
"(",
"[",
"Player",
"(",
"payoff_array",
")",
"for",
"payoff_array",
"in",
"payoff_arrays",
"]",
")",
"return",
"g"
] | 40.242424 | 0.000368 |
def _handle_read_chunk(self):
"""Some data can be read"""
new_data = b''
buffer_length = len(self.read_buffer)
try:
while buffer_length < self.MAX_BUFFER_SIZE:
try:
piece = self.recv(4096)
except OSError as e:
if e.errno == errno.EAGAIN:
# End of the available data
break
elif e.errno == errno.EIO and new_data:
# Hopefully we could read an error message before the
# actual termination
break
else:
raise
if not piece:
# A closed connection is indicated by signaling a read
# condition, and having recv() return 0.
break
new_data += piece
buffer_length += len(piece)
finally:
new_data = new_data.replace(b'\r', b'\n')
self.read_buffer += new_data
return new_data | [
"def",
"_handle_read_chunk",
"(",
"self",
")",
":",
"new_data",
"=",
"b''",
"buffer_length",
"=",
"len",
"(",
"self",
".",
"read_buffer",
")",
"try",
":",
"while",
"buffer_length",
"<",
"self",
".",
"MAX_BUFFER_SIZE",
":",
"try",
":",
"piece",
"=",
"self",
".",
"recv",
"(",
"4096",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"EAGAIN",
":",
"# End of the available data",
"break",
"elif",
"e",
".",
"errno",
"==",
"errno",
".",
"EIO",
"and",
"new_data",
":",
"# Hopefully we could read an error message before the",
"# actual termination",
"break",
"else",
":",
"raise",
"if",
"not",
"piece",
":",
"# A closed connection is indicated by signaling a read",
"# condition, and having recv() return 0.",
"break",
"new_data",
"+=",
"piece",
"buffer_length",
"+=",
"len",
"(",
"piece",
")",
"finally",
":",
"new_data",
"=",
"new_data",
".",
"replace",
"(",
"b'\\r'",
",",
"b'\\n'",
")",
"self",
".",
"read_buffer",
"+=",
"new_data",
"return",
"new_data"
] | 34.935484 | 0.001797 |
def guess_file_name_stream_type_header(args):
"""
Guess filename, file stream, file type, file header from args.
:param args: may be string (filepath), 2-tuples (filename, fileobj), 3-tuples (filename, fileobj,
contentype) or 4-tuples (filename, fileobj, contentype, custom_headers).
:return: filename, file stream, file type, file header
"""
ftype = None
fheader = None
if isinstance(args, (tuple, list)):
if len(args) == 2:
fname, fstream = args
elif len(args) == 3:
fname, fstream, ftype = args
else:
fname, fstream, ftype, fheader = args
else:
fname, fstream = guess_filename_stream(args)
ftype = guess_content_type(fname)
if isinstance(fstream, (str, bytes, bytearray)):
fdata = fstream
else:
fdata = fstream.read()
return fname, fdata, ftype, fheader | [
"def",
"guess_file_name_stream_type_header",
"(",
"args",
")",
":",
"ftype",
"=",
"None",
"fheader",
"=",
"None",
"if",
"isinstance",
"(",
"args",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"2",
":",
"fname",
",",
"fstream",
"=",
"args",
"elif",
"len",
"(",
"args",
")",
"==",
"3",
":",
"fname",
",",
"fstream",
",",
"ftype",
"=",
"args",
"else",
":",
"fname",
",",
"fstream",
",",
"ftype",
",",
"fheader",
"=",
"args",
"else",
":",
"fname",
",",
"fstream",
"=",
"guess_filename_stream",
"(",
"args",
")",
"ftype",
"=",
"guess_content_type",
"(",
"fname",
")",
"if",
"isinstance",
"(",
"fstream",
",",
"(",
"str",
",",
"bytes",
",",
"bytearray",
")",
")",
":",
"fdata",
"=",
"fstream",
"else",
":",
"fdata",
"=",
"fstream",
".",
"read",
"(",
")",
"return",
"fname",
",",
"fdata",
",",
"ftype",
",",
"fheader"
] | 33.730769 | 0.002217 |
def isNumber(self, value):
"""
Validate whether a value is a number or not
"""
try:
str(value)
float(value)
return True
except ValueError:
return False | [
"def",
"isNumber",
"(",
"self",
",",
"value",
")",
":",
"try",
":",
"str",
"(",
"value",
")",
"float",
"(",
"value",
")",
"return",
"True",
"except",
"ValueError",
":",
"return",
"False"
] | 20.909091 | 0.008333 |
def get_all_tasks(conf):
"""Returns a list with every task registred on Hamster.
"""
db = HamsterDB(conf)
fact_list = db.all_facts_id
security_days = int(conf.get_option('tasks.security_days'))
today = datetime.today()
tasks = {}
for fact_id in fact_list:
ht = HamsterTask(fact_id, conf, db)
if ht.end_time:
end_time = ht.get_object_dates()[1]
if today - timedelta(security_days) <= end_time:
rt = ht.get_remote_task()
tasks[rt.task_id] = rt
db.close_connection()
print 'Obtained %d tasks' % len(tasks)
return tasks | [
"def",
"get_all_tasks",
"(",
"conf",
")",
":",
"db",
"=",
"HamsterDB",
"(",
"conf",
")",
"fact_list",
"=",
"db",
".",
"all_facts_id",
"security_days",
"=",
"int",
"(",
"conf",
".",
"get_option",
"(",
"'tasks.security_days'",
")",
")",
"today",
"=",
"datetime",
".",
"today",
"(",
")",
"tasks",
"=",
"{",
"}",
"for",
"fact_id",
"in",
"fact_list",
":",
"ht",
"=",
"HamsterTask",
"(",
"fact_id",
",",
"conf",
",",
"db",
")",
"if",
"ht",
".",
"end_time",
":",
"end_time",
"=",
"ht",
".",
"get_object_dates",
"(",
")",
"[",
"1",
"]",
"if",
"today",
"-",
"timedelta",
"(",
"security_days",
")",
"<=",
"end_time",
":",
"rt",
"=",
"ht",
".",
"get_remote_task",
"(",
")",
"tasks",
"[",
"rt",
".",
"task_id",
"]",
"=",
"rt",
"db",
".",
"close_connection",
"(",
")",
"print",
"'Obtained %d tasks'",
"%",
"len",
"(",
"tasks",
")",
"return",
"tasks"
] | 25.583333 | 0.00157 |
def purge_key(surrogate_key, service_id, api_key):
"""Instant purge URLs with a given surrogate key from the Fastly caches.
Parameters
----------
surrogate_key : `str`
Surrogate key header (``x-amz-meta-surrogate-key``) value of objects
to purge from the Fastly cache.
service_id : `str`
Fastly service ID.
api_key : `str`
Fastly API key.
Raises
------
FastlyError
Error with the Fastly API usage.
Notes
-----
This function uses Fastly's ``/service/{service}/purge/{key}`` endpoint.
See the `Fastly Purge documentation <http://ls.st/jxg>`_ for more
information.
For other Fastly APIs, consider using `fastly-py
<https://github.com/fastly/fastly-py>`_.
"""
logger = logging.getLogger(__name__)
api_root = 'https://api.fastly.com'
path = '/service/{service}/purge/{surrogate_key}'.format(
service=service_id,
surrogate_key=surrogate_key)
logger.info('Fastly purge {0}'.format(path))
r = requests.post(api_root + path,
headers={'Fastly-Key': api_key,
'Accept': 'application/json'})
if r.status_code != 200:
raise FastlyError(r.json) | [
"def",
"purge_key",
"(",
"surrogate_key",
",",
"service_id",
",",
"api_key",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"api_root",
"=",
"'https://api.fastly.com'",
"path",
"=",
"'/service/{service}/purge/{surrogate_key}'",
".",
"format",
"(",
"service",
"=",
"service_id",
",",
"surrogate_key",
"=",
"surrogate_key",
")",
"logger",
".",
"info",
"(",
"'Fastly purge {0}'",
".",
"format",
"(",
"path",
")",
")",
"r",
"=",
"requests",
".",
"post",
"(",
"api_root",
"+",
"path",
",",
"headers",
"=",
"{",
"'Fastly-Key'",
":",
"api_key",
",",
"'Accept'",
":",
"'application/json'",
"}",
")",
"if",
"r",
".",
"status_code",
"!=",
"200",
":",
"raise",
"FastlyError",
"(",
"r",
".",
"json",
")"
] | 30.871795 | 0.000805 |
def handler(self):
'Parametrized handler function'
return ft.partial(self.base.handler, parameter=self.parameter)\
if self.parameter else self.base.handler | [
"def",
"handler",
"(",
"self",
")",
":",
"return",
"ft",
".",
"partial",
"(",
"self",
".",
"base",
".",
"handler",
",",
"parameter",
"=",
"self",
".",
"parameter",
")",
"if",
"self",
".",
"parameter",
"else",
"self",
".",
"base",
".",
"handler"
] | 39.75 | 0.030864 |
def as_dictionary(self, is_proof=True):
"""
Return the DDO as a JSON dict.
:param if is_proof: if False then do not include the 'proof' element.
:return: dict
"""
if self._created is None:
self._created = DDO._get_timestamp()
data = {
'@context': DID_DDO_CONTEXT_URL,
'id': self._did,
'created': self._created,
}
if self._public_keys:
values = []
for public_key in self._public_keys:
values.append(public_key.as_dictionary())
data['publicKey'] = values
if self._authentications:
values = []
for authentication in self._authentications:
values.append(authentication)
data['authentication'] = values
if self._services:
values = []
for service in self._services:
values.append(service.as_dictionary())
data['service'] = values
if self._proof and is_proof:
data['proof'] = self._proof
return data | [
"def",
"as_dictionary",
"(",
"self",
",",
"is_proof",
"=",
"True",
")",
":",
"if",
"self",
".",
"_created",
"is",
"None",
":",
"self",
".",
"_created",
"=",
"DDO",
".",
"_get_timestamp",
"(",
")",
"data",
"=",
"{",
"'@context'",
":",
"DID_DDO_CONTEXT_URL",
",",
"'id'",
":",
"self",
".",
"_did",
",",
"'created'",
":",
"self",
".",
"_created",
",",
"}",
"if",
"self",
".",
"_public_keys",
":",
"values",
"=",
"[",
"]",
"for",
"public_key",
"in",
"self",
".",
"_public_keys",
":",
"values",
".",
"append",
"(",
"public_key",
".",
"as_dictionary",
"(",
")",
")",
"data",
"[",
"'publicKey'",
"]",
"=",
"values",
"if",
"self",
".",
"_authentications",
":",
"values",
"=",
"[",
"]",
"for",
"authentication",
"in",
"self",
".",
"_authentications",
":",
"values",
".",
"append",
"(",
"authentication",
")",
"data",
"[",
"'authentication'",
"]",
"=",
"values",
"if",
"self",
".",
"_services",
":",
"values",
"=",
"[",
"]",
"for",
"service",
"in",
"self",
".",
"_services",
":",
"values",
".",
"append",
"(",
"service",
".",
"as_dictionary",
"(",
")",
")",
"data",
"[",
"'service'",
"]",
"=",
"values",
"if",
"self",
".",
"_proof",
"and",
"is_proof",
":",
"data",
"[",
"'proof'",
"]",
"=",
"self",
".",
"_proof",
"return",
"data"
] | 31.823529 | 0.001794 |
def reduce_by(self, package_request):
"""Reduce this scope wrt a package request.
Returns:
A (_PackageScope, [Reduction]) tuple, where the scope is a new
scope copy with reductions applied, or self if there were no
reductions, or None if the scope was completely reduced.
"""
self.solver.reduction_broad_tests_count += 1
if self.package_request.conflict:
# conflict scopes don't reduce. Instead, other scopes will be
# reduced against a conflict scope.
return (self, [])
# perform the reduction
new_slice, reductions = self.variant_slice.reduce_by(package_request)
# there was total reduction
if new_slice is None:
self.solver.reductions_count += 1
if self.pr:
reqstr = _short_req_str(package_request)
self.pr("%s was reduced to nothing by %s", self, reqstr)
self.pr.br()
return (None, reductions)
# there was some reduction
if new_slice is not self.variant_slice:
self.solver.reductions_count += 1
scope = self._copy(new_slice)
if self.pr:
reqstr = _short_req_str(package_request)
self.pr("%s was reduced to %s by %s", self, scope, reqstr)
self.pr.br()
return (scope, reductions)
# there was no reduction
return (self, []) | [
"def",
"reduce_by",
"(",
"self",
",",
"package_request",
")",
":",
"self",
".",
"solver",
".",
"reduction_broad_tests_count",
"+=",
"1",
"if",
"self",
".",
"package_request",
".",
"conflict",
":",
"# conflict scopes don't reduce. Instead, other scopes will be",
"# reduced against a conflict scope.",
"return",
"(",
"self",
",",
"[",
"]",
")",
"# perform the reduction",
"new_slice",
",",
"reductions",
"=",
"self",
".",
"variant_slice",
".",
"reduce_by",
"(",
"package_request",
")",
"# there was total reduction",
"if",
"new_slice",
"is",
"None",
":",
"self",
".",
"solver",
".",
"reductions_count",
"+=",
"1",
"if",
"self",
".",
"pr",
":",
"reqstr",
"=",
"_short_req_str",
"(",
"package_request",
")",
"self",
".",
"pr",
"(",
"\"%s was reduced to nothing by %s\"",
",",
"self",
",",
"reqstr",
")",
"self",
".",
"pr",
".",
"br",
"(",
")",
"return",
"(",
"None",
",",
"reductions",
")",
"# there was some reduction",
"if",
"new_slice",
"is",
"not",
"self",
".",
"variant_slice",
":",
"self",
".",
"solver",
".",
"reductions_count",
"+=",
"1",
"scope",
"=",
"self",
".",
"_copy",
"(",
"new_slice",
")",
"if",
"self",
".",
"pr",
":",
"reqstr",
"=",
"_short_req_str",
"(",
"package_request",
")",
"self",
".",
"pr",
"(",
"\"%s was reduced to %s by %s\"",
",",
"self",
",",
"scope",
",",
"reqstr",
")",
"self",
".",
"pr",
".",
"br",
"(",
")",
"return",
"(",
"scope",
",",
"reductions",
")",
"# there was no reduction",
"return",
"(",
"self",
",",
"[",
"]",
")"
] | 34.452381 | 0.001344 |
def create_task_log(self, task_id, case_task_log):
"""
:param task_id: Task identifier
:param case_task_log: TheHive log
:type case_task_log: CaseTaskLog defined in models.py
:return: TheHive log
:rtype: json
"""
req = self.url + "/api/case/task/{}/log".format(task_id)
data = {'_json': json.dumps({"message":case_task_log.message})}
if case_task_log.file:
f = {'attachment': (os.path.basename(case_task_log.file), open(case_task_log.file, 'rb'), magic.Magic(mime=True).from_file(case_task_log.file))}
try:
return requests.post(req, data=data,files=f, proxies=self.proxies, auth=self.auth, verify=self.cert)
except requests.exceptions.RequestException as e:
raise CaseTaskException("Case task log create error: {}".format(e))
else:
try:
return requests.post(req, headers={'Content-Type': 'application/json'}, data=json.dumps({'message':case_task_log.message}), proxies=self.proxies, auth=self.auth, verify=self.cert)
except requests.exceptions.RequestException as e:
raise CaseTaskException("Case task log create error: {}".format(e)) | [
"def",
"create_task_log",
"(",
"self",
",",
"task_id",
",",
"case_task_log",
")",
":",
"req",
"=",
"self",
".",
"url",
"+",
"\"/api/case/task/{}/log\"",
".",
"format",
"(",
"task_id",
")",
"data",
"=",
"{",
"'_json'",
":",
"json",
".",
"dumps",
"(",
"{",
"\"message\"",
":",
"case_task_log",
".",
"message",
"}",
")",
"}",
"if",
"case_task_log",
".",
"file",
":",
"f",
"=",
"{",
"'attachment'",
":",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"case_task_log",
".",
"file",
")",
",",
"open",
"(",
"case_task_log",
".",
"file",
",",
"'rb'",
")",
",",
"magic",
".",
"Magic",
"(",
"mime",
"=",
"True",
")",
".",
"from_file",
"(",
"case_task_log",
".",
"file",
")",
")",
"}",
"try",
":",
"return",
"requests",
".",
"post",
"(",
"req",
",",
"data",
"=",
"data",
",",
"files",
"=",
"f",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"auth",
"=",
"self",
".",
"auth",
",",
"verify",
"=",
"self",
".",
"cert",
")",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
"as",
"e",
":",
"raise",
"CaseTaskException",
"(",
"\"Case task log create error: {}\"",
".",
"format",
"(",
"e",
")",
")",
"else",
":",
"try",
":",
"return",
"requests",
".",
"post",
"(",
"req",
",",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/json'",
"}",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"{",
"'message'",
":",
"case_task_log",
".",
"message",
"}",
")",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"auth",
"=",
"self",
".",
"auth",
",",
"verify",
"=",
"self",
".",
"cert",
")",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
"as",
"e",
":",
"raise",
"CaseTaskException",
"(",
"\"Case task log create error: {}\"",
".",
"format",
"(",
"e",
")",
")"
] | 51.083333 | 0.008006 |
def fbank(wav_path, flat=True):
""" Currently grabs log Mel filterbank, deltas and double deltas."""
(rate, sig) = wav.read(wav_path)
if len(sig) == 0:
logger.warning("Empty wav: {}".format(wav_path))
fbank_feat = python_speech_features.logfbank(sig, rate, nfilt=40)
energy = extract_energy(rate, sig)
feat = np.hstack([energy, fbank_feat])
delta_feat = python_speech_features.delta(feat, 2)
delta_delta_feat = python_speech_features.delta(delta_feat, 2)
all_feats = [feat, delta_feat, delta_delta_feat]
if not flat:
all_feats = np.array(all_feats)
# Make time the first dimension for easy length normalization padding
# later.
all_feats = np.swapaxes(all_feats, 0, 1)
all_feats = np.swapaxes(all_feats, 1, 2)
else:
all_feats = np.concatenate(all_feats, axis=1)
# Log Mel Filterbank, with delta, and double delta
feat_fn = wav_path[:-3] + "fbank.npy"
np.save(feat_fn, all_feats) | [
"def",
"fbank",
"(",
"wav_path",
",",
"flat",
"=",
"True",
")",
":",
"(",
"rate",
",",
"sig",
")",
"=",
"wav",
".",
"read",
"(",
"wav_path",
")",
"if",
"len",
"(",
"sig",
")",
"==",
"0",
":",
"logger",
".",
"warning",
"(",
"\"Empty wav: {}\"",
".",
"format",
"(",
"wav_path",
")",
")",
"fbank_feat",
"=",
"python_speech_features",
".",
"logfbank",
"(",
"sig",
",",
"rate",
",",
"nfilt",
"=",
"40",
")",
"energy",
"=",
"extract_energy",
"(",
"rate",
",",
"sig",
")",
"feat",
"=",
"np",
".",
"hstack",
"(",
"[",
"energy",
",",
"fbank_feat",
"]",
")",
"delta_feat",
"=",
"python_speech_features",
".",
"delta",
"(",
"feat",
",",
"2",
")",
"delta_delta_feat",
"=",
"python_speech_features",
".",
"delta",
"(",
"delta_feat",
",",
"2",
")",
"all_feats",
"=",
"[",
"feat",
",",
"delta_feat",
",",
"delta_delta_feat",
"]",
"if",
"not",
"flat",
":",
"all_feats",
"=",
"np",
".",
"array",
"(",
"all_feats",
")",
"# Make time the first dimension for easy length normalization padding",
"# later.",
"all_feats",
"=",
"np",
".",
"swapaxes",
"(",
"all_feats",
",",
"0",
",",
"1",
")",
"all_feats",
"=",
"np",
".",
"swapaxes",
"(",
"all_feats",
",",
"1",
",",
"2",
")",
"else",
":",
"all_feats",
"=",
"np",
".",
"concatenate",
"(",
"all_feats",
",",
"axis",
"=",
"1",
")",
"# Log Mel Filterbank, with delta, and double delta",
"feat_fn",
"=",
"wav_path",
"[",
":",
"-",
"3",
"]",
"+",
"\"fbank.npy\"",
"np",
".",
"save",
"(",
"feat_fn",
",",
"all_feats",
")"
] | 40.375 | 0.001008 |
def delete_record(table, sys_id):
'''
Delete an existing record
:param table: The table name, e.g. sys_user
:type table: ``str``
:param sys_id: The unique ID of the record
:type sys_id: ``str``
CLI Example:
.. code-block:: bash
salt myminion servicenow.delete_record sys_computer 2134566
'''
client = _get_client()
client.table = table
response = client.delete(sys_id)
return response | [
"def",
"delete_record",
"(",
"table",
",",
"sys_id",
")",
":",
"client",
"=",
"_get_client",
"(",
")",
"client",
".",
"table",
"=",
"table",
"response",
"=",
"client",
".",
"delete",
"(",
"sys_id",
")",
"return",
"response"
] | 21.6 | 0.002217 |
def diagonalize_collision_matrix(collision_matrices,
i_sigma=None,
i_temp=None,
pinv_solver=0,
log_level=0):
"""Diagonalize collision matrices.
Note
----
collision_matricies is overwritten by eigenvectors.
Parameters
----------
collision_matricies : ndarray, optional
Collision matrix. This ndarray has to have the following size and
flags.
shapes:
(sigmas, temperatures, prod(mesh), num_band, prod(mesh), num_band)
(sigmas, temperatures, ir_grid_points, num_band, 3,
ir_grid_points, num_band, 3)
(size, size)
dtype='double', order='C'
i_sigma : int, optional
Index of BZ integration methods, tetrahedron method and smearing
method with widths. Default is None.
i_temp : int, optional
Index of temperature. Default is None.
pinv_solver : int, optional
Diagnalization solver choice.
log_level : int, optional
Verbosity level. Smaller is more quiet. Default is 0.
Returns
-------
w : ndarray, optional
Eigenvalues.
shape=(size_of_collision_matrix,), dtype='double'
"""
start = time.time()
# Matrix size of collision matrix to be diagonalized.
# The following value is expected:
# ir-colmat: num_ir_grid_points * num_band * 3
# red-colmat: num_mesh_points * num_band
shape = collision_matrices.shape
if len(shape) == 6:
size = shape[2] * shape[3]
assert size == shape[4] * shape[5]
elif len(shape) == 8:
size = np.prod(shape[2:5])
assert size == np.prod(shape[5:8])
elif len(shape) == 2:
size = shape[0]
assert size == shape[1]
solver = _select_solver(pinv_solver)
# [1] dsyev: safer and slower than dsyevd and smallest memory usage
# [2] dsyevd: faster than dsyev and largest memory usage
if solver in [1, 2]:
if log_level:
routine = ['dsyev', 'dsyevd'][solver - 1]
sys.stdout.write("Diagonalizing by lapacke %s... " % routine)
sys.stdout.flush()
import phono3py._phono3py as phono3c
w = np.zeros(size, dtype='double')
if i_sigma is None:
_i_sigma = 0
else:
_i_sigma = i_sigma
if i_temp is None:
_i_temp = 0
else:
_i_temp = i_temp
phono3c.diagonalize_collision_matrix(collision_matrices,
w,
_i_sigma,
_i_temp,
0.0,
(solver + 1) % 2,
0) # only diagonalization
elif solver == 3: # np.linalg.eigh depends on dsyevd.
if log_level:
sys.stdout.write("Diagonalizing by np.linalg.eigh... ")
sys.stdout.flush()
col_mat = collision_matrices[i_sigma, i_temp].reshape(
size, size)
w, col_mat[:] = np.linalg.eigh(col_mat)
elif solver == 4: # fully scipy dsyev
if log_level:
sys.stdout.write("Diagonalizing by "
"scipy.linalg.lapack.dsyev... ")
sys.stdout.flush()
import scipy.linalg
col_mat = collision_matrices[i_sigma, i_temp].reshape(
size, size)
w, _, info = scipy.linalg.lapack.dsyev(col_mat.T, overwrite_a=1)
elif solver == 5: # fully scipy dsyevd
if log_level:
sys.stdout.write("Diagonalizing by "
"scipy.linalg.lapack.dsyevd... ")
sys.stdout.flush()
import scipy.linalg
col_mat = collision_matrices[i_sigma, i_temp].reshape(
size, size)
w, _, info = scipy.linalg.lapack.dsyevd(col_mat.T, overwrite_a=1)
if log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
return w | [
"def",
"diagonalize_collision_matrix",
"(",
"collision_matrices",
",",
"i_sigma",
"=",
"None",
",",
"i_temp",
"=",
"None",
",",
"pinv_solver",
"=",
"0",
",",
"log_level",
"=",
"0",
")",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"# Matrix size of collision matrix to be diagonalized.",
"# The following value is expected:",
"# ir-colmat: num_ir_grid_points * num_band * 3",
"# red-colmat: num_mesh_points * num_band",
"shape",
"=",
"collision_matrices",
".",
"shape",
"if",
"len",
"(",
"shape",
")",
"==",
"6",
":",
"size",
"=",
"shape",
"[",
"2",
"]",
"*",
"shape",
"[",
"3",
"]",
"assert",
"size",
"==",
"shape",
"[",
"4",
"]",
"*",
"shape",
"[",
"5",
"]",
"elif",
"len",
"(",
"shape",
")",
"==",
"8",
":",
"size",
"=",
"np",
".",
"prod",
"(",
"shape",
"[",
"2",
":",
"5",
"]",
")",
"assert",
"size",
"==",
"np",
".",
"prod",
"(",
"shape",
"[",
"5",
":",
"8",
"]",
")",
"elif",
"len",
"(",
"shape",
")",
"==",
"2",
":",
"size",
"=",
"shape",
"[",
"0",
"]",
"assert",
"size",
"==",
"shape",
"[",
"1",
"]",
"solver",
"=",
"_select_solver",
"(",
"pinv_solver",
")",
"# [1] dsyev: safer and slower than dsyevd and smallest memory usage",
"# [2] dsyevd: faster than dsyev and largest memory usage",
"if",
"solver",
"in",
"[",
"1",
",",
"2",
"]",
":",
"if",
"log_level",
":",
"routine",
"=",
"[",
"'dsyev'",
",",
"'dsyevd'",
"]",
"[",
"solver",
"-",
"1",
"]",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"Diagonalizing by lapacke %s... \"",
"%",
"routine",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"import",
"phono3py",
".",
"_phono3py",
"as",
"phono3c",
"w",
"=",
"np",
".",
"zeros",
"(",
"size",
",",
"dtype",
"=",
"'double'",
")",
"if",
"i_sigma",
"is",
"None",
":",
"_i_sigma",
"=",
"0",
"else",
":",
"_i_sigma",
"=",
"i_sigma",
"if",
"i_temp",
"is",
"None",
":",
"_i_temp",
"=",
"0",
"else",
":",
"_i_temp",
"=",
"i_temp",
"phono3c",
".",
"diagonalize_collision_matrix",
"(",
"collision_matrices",
",",
"w",
",",
"_i_sigma",
",",
"_i_temp",
",",
"0.0",
",",
"(",
"solver",
"+",
"1",
")",
"%",
"2",
",",
"0",
")",
"# only diagonalization",
"elif",
"solver",
"==",
"3",
":",
"# np.linalg.eigh depends on dsyevd.",
"if",
"log_level",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"Diagonalizing by np.linalg.eigh... \"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"col_mat",
"=",
"collision_matrices",
"[",
"i_sigma",
",",
"i_temp",
"]",
".",
"reshape",
"(",
"size",
",",
"size",
")",
"w",
",",
"col_mat",
"[",
":",
"]",
"=",
"np",
".",
"linalg",
".",
"eigh",
"(",
"col_mat",
")",
"elif",
"solver",
"==",
"4",
":",
"# fully scipy dsyev",
"if",
"log_level",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"Diagonalizing by \"",
"\"scipy.linalg.lapack.dsyev... \"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"import",
"scipy",
".",
"linalg",
"col_mat",
"=",
"collision_matrices",
"[",
"i_sigma",
",",
"i_temp",
"]",
".",
"reshape",
"(",
"size",
",",
"size",
")",
"w",
",",
"_",
",",
"info",
"=",
"scipy",
".",
"linalg",
".",
"lapack",
".",
"dsyev",
"(",
"col_mat",
".",
"T",
",",
"overwrite_a",
"=",
"1",
")",
"elif",
"solver",
"==",
"5",
":",
"# fully scipy dsyevd",
"if",
"log_level",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"Diagonalizing by \"",
"\"scipy.linalg.lapack.dsyevd... \"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"import",
"scipy",
".",
"linalg",
"col_mat",
"=",
"collision_matrices",
"[",
"i_sigma",
",",
"i_temp",
"]",
".",
"reshape",
"(",
"size",
",",
"size",
")",
"w",
",",
"_",
",",
"info",
"=",
"scipy",
".",
"linalg",
".",
"lapack",
".",
"dsyevd",
"(",
"col_mat",
".",
"T",
",",
"overwrite_a",
"=",
"1",
")",
"if",
"log_level",
":",
"print",
"(",
"\"[%.3fs]\"",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"return",
"w"
] | 34.853448 | 0.000241 |
def filter_intensity(df, label="", with_multiplicity=False):
"""
Filter to include only the Intensity values with optional specified label, excluding other
Intensity measurements, but retaining all other columns.
"""
label += ".*__\d" if with_multiplicity else ""
dft = df.filter(regex="^(?!Intensity).*$")
dfi = df.filter(regex='^(.*Intensity.*%s.*__\d)$' % label)
return pd.concat([dft,dfi], axis=1) | [
"def",
"filter_intensity",
"(",
"df",
",",
"label",
"=",
"\"\"",
",",
"with_multiplicity",
"=",
"False",
")",
":",
"label",
"+=",
"\".*__\\d\"",
"if",
"with_multiplicity",
"else",
"\"\"",
"dft",
"=",
"df",
".",
"filter",
"(",
"regex",
"=",
"\"^(?!Intensity).*$\"",
")",
"dfi",
"=",
"df",
".",
"filter",
"(",
"regex",
"=",
"'^(.*Intensity.*%s.*__\\d)$'",
"%",
"label",
")",
"return",
"pd",
".",
"concat",
"(",
"[",
"dft",
",",
"dfi",
"]",
",",
"axis",
"=",
"1",
")"
] | 38.727273 | 0.013761 |
def create_append(filename: str, layers: Union[np.ndarray, Dict[str, np.ndarray], loompy.LayerManager], row_attrs: Dict[str, np.ndarray], col_attrs: Dict[str, np.ndarray], *, file_attrs: Dict[str, str] = None, fill_values: Dict[str, np.ndarray] = None) -> None:
"""
**DEPRECATED** - Use `new` instead; see https://github.com/linnarsson-lab/loompy/issues/42
"""
deprecated("'create_append' is deprecated. See https://github.com/linnarsson-lab/loompy/issues/42")
if os.path.exists(filename):
with connect(filename) as ds:
ds.add_columns(layers, col_attrs, fill_values=fill_values)
else:
create(filename, layers, row_attrs, col_attrs, file_attrs=file_attrs) | [
"def",
"create_append",
"(",
"filename",
":",
"str",
",",
"layers",
":",
"Union",
"[",
"np",
".",
"ndarray",
",",
"Dict",
"[",
"str",
",",
"np",
".",
"ndarray",
"]",
",",
"loompy",
".",
"LayerManager",
"]",
",",
"row_attrs",
":",
"Dict",
"[",
"str",
",",
"np",
".",
"ndarray",
"]",
",",
"col_attrs",
":",
"Dict",
"[",
"str",
",",
"np",
".",
"ndarray",
"]",
",",
"*",
",",
"file_attrs",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
"=",
"None",
",",
"fill_values",
":",
"Dict",
"[",
"str",
",",
"np",
".",
"ndarray",
"]",
"=",
"None",
")",
"->",
"None",
":",
"deprecated",
"(",
"\"'create_append' is deprecated. See https://github.com/linnarsson-lab/loompy/issues/42\"",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"with",
"connect",
"(",
"filename",
")",
"as",
"ds",
":",
"ds",
".",
"add_columns",
"(",
"layers",
",",
"col_attrs",
",",
"fill_values",
"=",
"fill_values",
")",
"else",
":",
"create",
"(",
"filename",
",",
"layers",
",",
"row_attrs",
",",
"col_attrs",
",",
"file_attrs",
"=",
"file_attrs",
")"
] | 65.8 | 0.01949 |
def cbpdnmd_xstep(k):
"""Do the X step of the cbpdn stage. The only parameter is the slice
index `k` and there are no return values; all inputs and outputs are
from and to global variables.
"""
YU0 = mp_Z_Y0[k] + mp_S[k] - mp_Z_U0[k]
YU1 = mp_Z_Y1[k] - mp_Z_U1[k]
if mp_cri.Cd == 1:
b = np.conj(mp_Df) * sl.rfftn(YU0, None, mp_cri.axisN) + \
sl.rfftn(YU1, None, mp_cri.axisN)
Xf = sl.solvedbi_sm(mp_Df, 1.0, b, axis=mp_cri.axisM)
else:
b = sl.inner(np.conj(mp_Df), sl.rfftn(YU0, None, mp_cri.axisN),
axis=mp_cri.axisC) + \
sl.rfftn(YU1, None, mp_cri.axisN)
Xf = sl.solvemdbi_ism(mp_Df, 1.0, b, mp_cri.axisM, mp_cri.axisC)
mp_Z_X[k] = sl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
mp_DX[k] = sl.irfftn(sl.inner(mp_Df, Xf), mp_cri.Nv, mp_cri.axisN) | [
"def",
"cbpdnmd_xstep",
"(",
"k",
")",
":",
"YU0",
"=",
"mp_Z_Y0",
"[",
"k",
"]",
"+",
"mp_S",
"[",
"k",
"]",
"-",
"mp_Z_U0",
"[",
"k",
"]",
"YU1",
"=",
"mp_Z_Y1",
"[",
"k",
"]",
"-",
"mp_Z_U1",
"[",
"k",
"]",
"if",
"mp_cri",
".",
"Cd",
"==",
"1",
":",
"b",
"=",
"np",
".",
"conj",
"(",
"mp_Df",
")",
"*",
"sl",
".",
"rfftn",
"(",
"YU0",
",",
"None",
",",
"mp_cri",
".",
"axisN",
")",
"+",
"sl",
".",
"rfftn",
"(",
"YU1",
",",
"None",
",",
"mp_cri",
".",
"axisN",
")",
"Xf",
"=",
"sl",
".",
"solvedbi_sm",
"(",
"mp_Df",
",",
"1.0",
",",
"b",
",",
"axis",
"=",
"mp_cri",
".",
"axisM",
")",
"else",
":",
"b",
"=",
"sl",
".",
"inner",
"(",
"np",
".",
"conj",
"(",
"mp_Df",
")",
",",
"sl",
".",
"rfftn",
"(",
"YU0",
",",
"None",
",",
"mp_cri",
".",
"axisN",
")",
",",
"axis",
"=",
"mp_cri",
".",
"axisC",
")",
"+",
"sl",
".",
"rfftn",
"(",
"YU1",
",",
"None",
",",
"mp_cri",
".",
"axisN",
")",
"Xf",
"=",
"sl",
".",
"solvemdbi_ism",
"(",
"mp_Df",
",",
"1.0",
",",
"b",
",",
"mp_cri",
".",
"axisM",
",",
"mp_cri",
".",
"axisC",
")",
"mp_Z_X",
"[",
"k",
"]",
"=",
"sl",
".",
"irfftn",
"(",
"Xf",
",",
"mp_cri",
".",
"Nv",
",",
"mp_cri",
".",
"axisN",
")",
"mp_DX",
"[",
"k",
"]",
"=",
"sl",
".",
"irfftn",
"(",
"sl",
".",
"inner",
"(",
"mp_Df",
",",
"Xf",
")",
",",
"mp_cri",
".",
"Nv",
",",
"mp_cri",
".",
"axisN",
")"
] | 44.210526 | 0.002331 |
def _hash(self, iv, value):
"""
Generate and hmac signature for this encrypted data
:param key:
:param iv:
:param value:
:return string:
"""
return hmac.new(self.key, msg=iv+value, digestmod=hashlib.sha256).hexdigest() | [
"def",
"_hash",
"(",
"self",
",",
"iv",
",",
"value",
")",
":",
"return",
"hmac",
".",
"new",
"(",
"self",
".",
"key",
",",
"msg",
"=",
"iv",
"+",
"value",
",",
"digestmod",
"=",
"hashlib",
".",
"sha256",
")",
".",
"hexdigest",
"(",
")"
] | 30.444444 | 0.010638 |
def roles(self):
"""List[:class:`Role`]: A :class:`list` of roles that is allowed to use this emoji.
If roles is empty, the emoji is unrestricted.
"""
guild = self.guild
if guild is None:
return []
return [role for role in guild.roles if self._roles.has(role.id)] | [
"def",
"roles",
"(",
"self",
")",
":",
"guild",
"=",
"self",
".",
"guild",
"if",
"guild",
"is",
"None",
":",
"return",
"[",
"]",
"return",
"[",
"role",
"for",
"role",
"in",
"guild",
".",
"roles",
"if",
"self",
".",
"_roles",
".",
"has",
"(",
"role",
".",
"id",
")",
"]"
] | 31.6 | 0.009231 |
def qgis_composer_extractor(impact_report, component_metadata):
"""Extract composer context.
This method extract necessary context for a given impact report and
component metadata and save the context so it can be used in composer
rendering phase
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
# QGIS Composer needed certain context to generate the output
# - Map Settings
# - Substitution maps
# - Element settings, such as icon for picture file or image source
# Generate map settings
qgis_context = impact_report.qgis_composition_context
inasafe_context = impact_report.inasafe_context
provenance = impact_report.impact_function.provenance
extra_args = component_metadata.extra_args
context = QGISComposerContext()
# Set default image elements to replace
image_elements = [
{
'id': 'safe-logo',
'path': inasafe_context.inasafe_logo
},
{
'id': 'black-inasafe-logo',
'path': inasafe_context.black_inasafe_logo
},
{
'id': 'white-inasafe-logo',
'path': inasafe_context.white_inasafe_logo
},
{
'id': 'north-arrow',
'path': inasafe_context.north_arrow
},
{
'id': 'organisation-logo',
'path': inasafe_context.organisation_logo
},
{
'id': 'supporters_logo',
'path': inasafe_context.supporters_logo
}
]
context.image_elements = image_elements
# Set default HTML Frame elements to replace
html_frame_elements = [
{
'id': 'impact-report',
'mode': 'text', # another mode is url
'text': '', # TODO: get impact summary table
}
]
context.html_frame_elements = html_frame_elements
"""Define the layers for the impact map."""
project = QgsProject.instance()
layers = []
exposure_summary_layers = []
if impact_report.multi_exposure_impact_function:
for impact_function in (
impact_report.multi_exposure_impact_function.impact_functions):
impact_layer = impact_function.exposure_summary or (
impact_function.aggregate_hazard_impacted)
exposure_summary_layers.append(impact_layer)
# use custom ordered layer if any
if impact_report.ordered_layers:
for layer in impact_report.ordered_layers:
layers.append(layer)
# We are keeping this if we want to enable below behaviour again.
# Currently realtime might have layer order without impact layer in it.
# # make sure at least there is an impact layer
# if impact_report.multi_exposure_impact_function:
# additional_layers = [] # for exposure summary layers
# impact_layer_found = False
# impact_functions = (
# impact_report.multi_exposure_impact_function.impact_functions)
# # check for impact layer occurrences
# for analysis in impact_functions:
# impact_layer = analysis.exposure_summary or (
# analysis.aggregate_hazard_impacted)
# for index, layer in enumerate(layers):
# if impact_layer.source() == layer.source():
# add_impact_layers_to_canvas(analysis)
# layers[index] = impact_layer
# impact_layer_found = True
# if not impact_layer_found:
# for analysis in impact_functions:
# add_impact_layers_to_canvas(analysis)
# impact_layer = analysis.exposure_summary or (
# analysis.aggregate_hazard_impacted)
# layer_uri = full_layer_uri(impact_layer)
# layer = load_layer_from_registry(layer_uri)
# additional_layers.append(layer)
# layers = additional_layers + layers
# else:
# impact_layer = (
# impact_report.impact_function.exposure_summary or (
# impact_report.impact_function.aggregate_hazard_impacted))
# if impact_layer not in layers:
# layers.insert(0, impact_layer)
# use default layer order if no custom ordered layer found
else:
if not impact_report.multi_exposure_impact_function: # single IF
layers = [impact_report.impact] + impact_report.extra_layers
else: # multi-exposure IF
layers = [] + impact_report.extra_layers
add_supplementary_layers = (
not impact_report.multi_exposure_impact_function or not (
impact_report.multi_exposure_impact_function.
output_layers_ordered)
)
if add_supplementary_layers:
# Check show only impact.
show_only_impact = setting(
'set_show_only_impact_on_report', expected_type=bool)
if not show_only_impact:
hazard_layer = project.mapLayers().get(
provenance['hazard_layer_id'], None)
aggregation_layer_id = provenance['aggregation_layer_id']
if aggregation_layer_id:
aggregation_layer = project.mapLayers().get(
aggregation_layer_id, None)
layers.append(aggregation_layer)
layers.append(hazard_layer)
# check hide exposure settings
hide_exposure_flag = setting(
'setHideExposureFlag', expected_type=bool)
if not hide_exposure_flag:
exposure_layers_id = []
if provenance.get(
provenance_exposure_layer_id['provenance_key']):
exposure_layers_id.append(
provenance.get(
provenance_exposure_layer_id['provenance_key']))
elif provenance.get(
provenance_multi_exposure_layers_id['provenance_key']):
exposure_layers_id = provenance.get(
provenance_multi_exposure_layers_id['provenance_key'])
# place exposure at the bottom
for layer_id in exposure_layers_id:
exposure_layer = project.mapLayers().get(layer_id)
layers.append(exposure_layer)
# default extent is analysis extent
if not qgis_context.extent:
qgis_context.extent = impact_report.impact_function.analysis_extent
map_elements = [
{
'id': 'impact-map',
'extent': qgis_context.extent,
'grid_split_count': 5,
'layers': layers,
}
]
context.map_elements = map_elements
# calculate map_legends, only show the legend for impact layer
if impact_report.legend_layers: # use requested legend if any
layers = impact_report.legend_layers
elif impact_report.multi_exposure_impact_function: # multi-exposure IF
layers = exposure_summary_layers
else: # single IF
layers = [impact_report.impact]
symbol_count = 0
for l in layers:
layer = l
""":type: qgis.core.QgsMapLayer"""
try:
symbol_count += len(layer.legendSymbologyItems())
continue
except Exception: # pylint: disable=broad-except
pass
try:
symbol_count += len(layer.renderer().legendSymbolItems())
continue
except Exception: # pylint: disable=broad-except
pass
symbol_count += 1
legend_title = provenance.get('map_legend_title') or ''
map_legends = [
{
'id': 'impact-legend',
'title': legend_title,
'layers': layers,
'symbol_count': symbol_count,
# 'column_count': 2, # the number of column in legend display
}
]
context.map_legends = map_legends
# process substitution map
start_datetime = provenance['start_datetime']
""":type: datetime.datetime"""
date_format = resolve_from_dictionary(extra_args, 'date-format')
time_format = resolve_from_dictionary(extra_args, 'time-format')
if isinstance(start_datetime, datetime.datetime):
date = start_datetime.strftime(date_format)
time = start_datetime.strftime(time_format)
else:
date = ''
time = ''
long_version = get_version()
tokens = long_version.split('.')
version = '%s.%s.%s' % (tokens[0], tokens[1], tokens[2])
# Get title of the layer
title = provenance.get('map_title') or ''
# Set source
unknown_source_text = resolve_from_dictionary(
extra_args, ['defaults', 'unknown_source'])
aggregation_not_used = resolve_from_dictionary(
extra_args, ['defaults', 'aggregation_not_used'])
hazard_source = (
provenance.get(
'hazard_keywords', {}).get('source') or unknown_source_text)
exposure_source = (
provenance.get(
'exposure_keywords', {}).get('source') or unknown_source_text)
if provenance['aggregation_layer']:
aggregation_source = (
provenance['aggregation_keywords'].get('source')
or unknown_source_text)
else:
aggregation_source = aggregation_not_used
spatial_reference_format = resolve_from_dictionary(
extra_args, 'spatial-reference-format')
reference_name = spatial_reference_format.format(
crs=impact_report.impact_function.crs.authid())
analysis_layer = impact_report.analysis
analysis_name = value_from_field_name(
analysis_name_field['field_name'], analysis_layer)
# Prepare the substitution map
version_title = resolve_from_dictionary(extra_args, 'version-title')
disclaimer_title = resolve_from_dictionary(extra_args, 'disclaimer-title')
date_title = resolve_from_dictionary(extra_args, 'date-title')
time_title = resolve_from_dictionary(extra_args, 'time-title')
caution_title = resolve_from_dictionary(extra_args, 'caution-title')
caution_text = resolve_from_dictionary(extra_args, 'caution-text')
version_text = resolve_from_dictionary(extra_args, 'version-text')
legend_section_title = resolve_from_dictionary(
extra_args, 'legend-title')
information_title = resolve_from_dictionary(
extra_args, 'information-title')
supporters_title = resolve_from_dictionary(
extra_args, 'supporters-title')
source_title = resolve_from_dictionary(extra_args, 'source-title')
analysis_title = resolve_from_dictionary(extra_args, 'analysis-title')
reference_title = resolve_from_dictionary(
extra_args, 'spatial-reference-title')
substitution_map = {
'impact-title': title,
'date': date,
'time': time,
'safe-version': version, # deprecated
'disclaimer': inasafe_context.disclaimer,
# These added in 3.2
'version-title': version_title,
'inasafe-version': version,
'disclaimer-title': disclaimer_title,
'date-title': date_title,
'time-title': time_title,
'caution-title': caution_title,
'caution-text': caution_text,
'version-text': version_text.format(version=version),
'legend-title': legend_section_title,
'information-title': information_title,
'supporters-title': supporters_title,
'source-title': source_title,
'analysis-title': analysis_title,
'analysis-name': analysis_name,
'reference-title': reference_title,
'reference-name': reference_name,
'hazard-source': hazard_source,
'exposure-source': exposure_source,
'aggregation-source': aggregation_source,
}
context.substitution_map = substitution_map
return context | [
"def",
"qgis_composer_extractor",
"(",
"impact_report",
",",
"component_metadata",
")",
":",
"# QGIS Composer needed certain context to generate the output",
"# - Map Settings",
"# - Substitution maps",
"# - Element settings, such as icon for picture file or image source",
"# Generate map settings",
"qgis_context",
"=",
"impact_report",
".",
"qgis_composition_context",
"inasafe_context",
"=",
"impact_report",
".",
"inasafe_context",
"provenance",
"=",
"impact_report",
".",
"impact_function",
".",
"provenance",
"extra_args",
"=",
"component_metadata",
".",
"extra_args",
"context",
"=",
"QGISComposerContext",
"(",
")",
"# Set default image elements to replace",
"image_elements",
"=",
"[",
"{",
"'id'",
":",
"'safe-logo'",
",",
"'path'",
":",
"inasafe_context",
".",
"inasafe_logo",
"}",
",",
"{",
"'id'",
":",
"'black-inasafe-logo'",
",",
"'path'",
":",
"inasafe_context",
".",
"black_inasafe_logo",
"}",
",",
"{",
"'id'",
":",
"'white-inasafe-logo'",
",",
"'path'",
":",
"inasafe_context",
".",
"white_inasafe_logo",
"}",
",",
"{",
"'id'",
":",
"'north-arrow'",
",",
"'path'",
":",
"inasafe_context",
".",
"north_arrow",
"}",
",",
"{",
"'id'",
":",
"'organisation-logo'",
",",
"'path'",
":",
"inasafe_context",
".",
"organisation_logo",
"}",
",",
"{",
"'id'",
":",
"'supporters_logo'",
",",
"'path'",
":",
"inasafe_context",
".",
"supporters_logo",
"}",
"]",
"context",
".",
"image_elements",
"=",
"image_elements",
"# Set default HTML Frame elements to replace",
"html_frame_elements",
"=",
"[",
"{",
"'id'",
":",
"'impact-report'",
",",
"'mode'",
":",
"'text'",
",",
"# another mode is url",
"'text'",
":",
"''",
",",
"# TODO: get impact summary table",
"}",
"]",
"context",
".",
"html_frame_elements",
"=",
"html_frame_elements",
"\"\"\"Define the layers for the impact map.\"\"\"",
"project",
"=",
"QgsProject",
".",
"instance",
"(",
")",
"layers",
"=",
"[",
"]",
"exposure_summary_layers",
"=",
"[",
"]",
"if",
"impact_report",
".",
"multi_exposure_impact_function",
":",
"for",
"impact_function",
"in",
"(",
"impact_report",
".",
"multi_exposure_impact_function",
".",
"impact_functions",
")",
":",
"impact_layer",
"=",
"impact_function",
".",
"exposure_summary",
"or",
"(",
"impact_function",
".",
"aggregate_hazard_impacted",
")",
"exposure_summary_layers",
".",
"append",
"(",
"impact_layer",
")",
"# use custom ordered layer if any",
"if",
"impact_report",
".",
"ordered_layers",
":",
"for",
"layer",
"in",
"impact_report",
".",
"ordered_layers",
":",
"layers",
".",
"append",
"(",
"layer",
")",
"# We are keeping this if we want to enable below behaviour again.",
"# Currently realtime might have layer order without impact layer in it.",
"# # make sure at least there is an impact layer",
"# if impact_report.multi_exposure_impact_function:",
"# additional_layers = [] # for exposure summary layers",
"# impact_layer_found = False",
"# impact_functions = (",
"# impact_report.multi_exposure_impact_function.impact_functions)",
"# # check for impact layer occurrences",
"# for analysis in impact_functions:",
"# impact_layer = analysis.exposure_summary or (",
"# analysis.aggregate_hazard_impacted)",
"# for index, layer in enumerate(layers):",
"# if impact_layer.source() == layer.source():",
"# add_impact_layers_to_canvas(analysis)",
"# layers[index] = impact_layer",
"# impact_layer_found = True",
"# if not impact_layer_found:",
"# for analysis in impact_functions:",
"# add_impact_layers_to_canvas(analysis)",
"# impact_layer = analysis.exposure_summary or (",
"# analysis.aggregate_hazard_impacted)",
"# layer_uri = full_layer_uri(impact_layer)",
"# layer = load_layer_from_registry(layer_uri)",
"# additional_layers.append(layer)",
"# layers = additional_layers + layers",
"# else:",
"# impact_layer = (",
"# impact_report.impact_function.exposure_summary or (",
"# impact_report.impact_function.aggregate_hazard_impacted))",
"# if impact_layer not in layers:",
"# layers.insert(0, impact_layer)",
"# use default layer order if no custom ordered layer found",
"else",
":",
"if",
"not",
"impact_report",
".",
"multi_exposure_impact_function",
":",
"# single IF",
"layers",
"=",
"[",
"impact_report",
".",
"impact",
"]",
"+",
"impact_report",
".",
"extra_layers",
"else",
":",
"# multi-exposure IF",
"layers",
"=",
"[",
"]",
"+",
"impact_report",
".",
"extra_layers",
"add_supplementary_layers",
"=",
"(",
"not",
"impact_report",
".",
"multi_exposure_impact_function",
"or",
"not",
"(",
"impact_report",
".",
"multi_exposure_impact_function",
".",
"output_layers_ordered",
")",
")",
"if",
"add_supplementary_layers",
":",
"# Check show only impact.",
"show_only_impact",
"=",
"setting",
"(",
"'set_show_only_impact_on_report'",
",",
"expected_type",
"=",
"bool",
")",
"if",
"not",
"show_only_impact",
":",
"hazard_layer",
"=",
"project",
".",
"mapLayers",
"(",
")",
".",
"get",
"(",
"provenance",
"[",
"'hazard_layer_id'",
"]",
",",
"None",
")",
"aggregation_layer_id",
"=",
"provenance",
"[",
"'aggregation_layer_id'",
"]",
"if",
"aggregation_layer_id",
":",
"aggregation_layer",
"=",
"project",
".",
"mapLayers",
"(",
")",
".",
"get",
"(",
"aggregation_layer_id",
",",
"None",
")",
"layers",
".",
"append",
"(",
"aggregation_layer",
")",
"layers",
".",
"append",
"(",
"hazard_layer",
")",
"# check hide exposure settings",
"hide_exposure_flag",
"=",
"setting",
"(",
"'setHideExposureFlag'",
",",
"expected_type",
"=",
"bool",
")",
"if",
"not",
"hide_exposure_flag",
":",
"exposure_layers_id",
"=",
"[",
"]",
"if",
"provenance",
".",
"get",
"(",
"provenance_exposure_layer_id",
"[",
"'provenance_key'",
"]",
")",
":",
"exposure_layers_id",
".",
"append",
"(",
"provenance",
".",
"get",
"(",
"provenance_exposure_layer_id",
"[",
"'provenance_key'",
"]",
")",
")",
"elif",
"provenance",
".",
"get",
"(",
"provenance_multi_exposure_layers_id",
"[",
"'provenance_key'",
"]",
")",
":",
"exposure_layers_id",
"=",
"provenance",
".",
"get",
"(",
"provenance_multi_exposure_layers_id",
"[",
"'provenance_key'",
"]",
")",
"# place exposure at the bottom",
"for",
"layer_id",
"in",
"exposure_layers_id",
":",
"exposure_layer",
"=",
"project",
".",
"mapLayers",
"(",
")",
".",
"get",
"(",
"layer_id",
")",
"layers",
".",
"append",
"(",
"exposure_layer",
")",
"# default extent is analysis extent",
"if",
"not",
"qgis_context",
".",
"extent",
":",
"qgis_context",
".",
"extent",
"=",
"impact_report",
".",
"impact_function",
".",
"analysis_extent",
"map_elements",
"=",
"[",
"{",
"'id'",
":",
"'impact-map'",
",",
"'extent'",
":",
"qgis_context",
".",
"extent",
",",
"'grid_split_count'",
":",
"5",
",",
"'layers'",
":",
"layers",
",",
"}",
"]",
"context",
".",
"map_elements",
"=",
"map_elements",
"# calculate map_legends, only show the legend for impact layer",
"if",
"impact_report",
".",
"legend_layers",
":",
"# use requested legend if any",
"layers",
"=",
"impact_report",
".",
"legend_layers",
"elif",
"impact_report",
".",
"multi_exposure_impact_function",
":",
"# multi-exposure IF",
"layers",
"=",
"exposure_summary_layers",
"else",
":",
"# single IF",
"layers",
"=",
"[",
"impact_report",
".",
"impact",
"]",
"symbol_count",
"=",
"0",
"for",
"l",
"in",
"layers",
":",
"layer",
"=",
"l",
"\"\"\":type: qgis.core.QgsMapLayer\"\"\"",
"try",
":",
"symbol_count",
"+=",
"len",
"(",
"layer",
".",
"legendSymbologyItems",
"(",
")",
")",
"continue",
"except",
"Exception",
":",
"# pylint: disable=broad-except",
"pass",
"try",
":",
"symbol_count",
"+=",
"len",
"(",
"layer",
".",
"renderer",
"(",
")",
".",
"legendSymbolItems",
"(",
")",
")",
"continue",
"except",
"Exception",
":",
"# pylint: disable=broad-except",
"pass",
"symbol_count",
"+=",
"1",
"legend_title",
"=",
"provenance",
".",
"get",
"(",
"'map_legend_title'",
")",
"or",
"''",
"map_legends",
"=",
"[",
"{",
"'id'",
":",
"'impact-legend'",
",",
"'title'",
":",
"legend_title",
",",
"'layers'",
":",
"layers",
",",
"'symbol_count'",
":",
"symbol_count",
",",
"# 'column_count': 2, # the number of column in legend display",
"}",
"]",
"context",
".",
"map_legends",
"=",
"map_legends",
"# process substitution map",
"start_datetime",
"=",
"provenance",
"[",
"'start_datetime'",
"]",
"\"\"\":type: datetime.datetime\"\"\"",
"date_format",
"=",
"resolve_from_dictionary",
"(",
"extra_args",
",",
"'date-format'",
")",
"time_format",
"=",
"resolve_from_dictionary",
"(",
"extra_args",
",",
"'time-format'",
")",
"if",
"isinstance",
"(",
"start_datetime",
",",
"datetime",
".",
"datetime",
")",
":",
"date",
"=",
"start_datetime",
".",
"strftime",
"(",
"date_format",
")",
"time",
"=",
"start_datetime",
".",
"strftime",
"(",
"time_format",
")",
"else",
":",
"date",
"=",
"''",
"time",
"=",
"''",
"long_version",
"=",
"get_version",
"(",
")",
"tokens",
"=",
"long_version",
".",
"split",
"(",
"'.'",
")",
"version",
"=",
"'%s.%s.%s'",
"%",
"(",
"tokens",
"[",
"0",
"]",
",",
"tokens",
"[",
"1",
"]",
",",
"tokens",
"[",
"2",
"]",
")",
"# Get title of the layer",
"title",
"=",
"provenance",
".",
"get",
"(",
"'map_title'",
")",
"or",
"''",
"# Set source",
"unknown_source_text",
"=",
"resolve_from_dictionary",
"(",
"extra_args",
",",
"[",
"'defaults'",
",",
"'unknown_source'",
"]",
")",
"aggregation_not_used",
"=",
"resolve_from_dictionary",
"(",
"extra_args",
",",
"[",
"'defaults'",
",",
"'aggregation_not_used'",
"]",
")",
"hazard_source",
"=",
"(",
"provenance",
".",
"get",
"(",
"'hazard_keywords'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'source'",
")",
"or",
"unknown_source_text",
")",
"exposure_source",
"=",
"(",
"provenance",
".",
"get",
"(",
"'exposure_keywords'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'source'",
")",
"or",
"unknown_source_text",
")",
"if",
"provenance",
"[",
"'aggregation_layer'",
"]",
":",
"aggregation_source",
"=",
"(",
"provenance",
"[",
"'aggregation_keywords'",
"]",
".",
"get",
"(",
"'source'",
")",
"or",
"unknown_source_text",
")",
"else",
":",
"aggregation_source",
"=",
"aggregation_not_used",
"spatial_reference_format",
"=",
"resolve_from_dictionary",
"(",
"extra_args",
",",
"'spatial-reference-format'",
")",
"reference_name",
"=",
"spatial_reference_format",
".",
"format",
"(",
"crs",
"=",
"impact_report",
".",
"impact_function",
".",
"crs",
".",
"authid",
"(",
")",
")",
"analysis_layer",
"=",
"impact_report",
".",
"analysis",
"analysis_name",
"=",
"value_from_field_name",
"(",
"analysis_name_field",
"[",
"'field_name'",
"]",
",",
"analysis_layer",
")",
"# Prepare the substitution map",
"version_title",
"=",
"resolve_from_dictionary",
"(",
"extra_args",
",",
"'version-title'",
")",
"disclaimer_title",
"=",
"resolve_from_dictionary",
"(",
"extra_args",
",",
"'disclaimer-title'",
")",
"date_title",
"=",
"resolve_from_dictionary",
"(",
"extra_args",
",",
"'date-title'",
")",
"time_title",
"=",
"resolve_from_dictionary",
"(",
"extra_args",
",",
"'time-title'",
")",
"caution_title",
"=",
"resolve_from_dictionary",
"(",
"extra_args",
",",
"'caution-title'",
")",
"caution_text",
"=",
"resolve_from_dictionary",
"(",
"extra_args",
",",
"'caution-text'",
")",
"version_text",
"=",
"resolve_from_dictionary",
"(",
"extra_args",
",",
"'version-text'",
")",
"legend_section_title",
"=",
"resolve_from_dictionary",
"(",
"extra_args",
",",
"'legend-title'",
")",
"information_title",
"=",
"resolve_from_dictionary",
"(",
"extra_args",
",",
"'information-title'",
")",
"supporters_title",
"=",
"resolve_from_dictionary",
"(",
"extra_args",
",",
"'supporters-title'",
")",
"source_title",
"=",
"resolve_from_dictionary",
"(",
"extra_args",
",",
"'source-title'",
")",
"analysis_title",
"=",
"resolve_from_dictionary",
"(",
"extra_args",
",",
"'analysis-title'",
")",
"reference_title",
"=",
"resolve_from_dictionary",
"(",
"extra_args",
",",
"'spatial-reference-title'",
")",
"substitution_map",
"=",
"{",
"'impact-title'",
":",
"title",
",",
"'date'",
":",
"date",
",",
"'time'",
":",
"time",
",",
"'safe-version'",
":",
"version",
",",
"# deprecated",
"'disclaimer'",
":",
"inasafe_context",
".",
"disclaimer",
",",
"# These added in 3.2",
"'version-title'",
":",
"version_title",
",",
"'inasafe-version'",
":",
"version",
",",
"'disclaimer-title'",
":",
"disclaimer_title",
",",
"'date-title'",
":",
"date_title",
",",
"'time-title'",
":",
"time_title",
",",
"'caution-title'",
":",
"caution_title",
",",
"'caution-text'",
":",
"caution_text",
",",
"'version-text'",
":",
"version_text",
".",
"format",
"(",
"version",
"=",
"version",
")",
",",
"'legend-title'",
":",
"legend_section_title",
",",
"'information-title'",
":",
"information_title",
",",
"'supporters-title'",
":",
"supporters_title",
",",
"'source-title'",
":",
"source_title",
",",
"'analysis-title'",
":",
"analysis_title",
",",
"'analysis-name'",
":",
"analysis_name",
",",
"'reference-title'",
":",
"reference_title",
",",
"'reference-name'",
":",
"reference_name",
",",
"'hazard-source'",
":",
"hazard_source",
",",
"'exposure-source'",
":",
"exposure_source",
",",
"'aggregation-source'",
":",
"aggregation_source",
",",
"}",
"context",
".",
"substitution_map",
"=",
"substitution_map",
"return",
"context"
] | 38.330159 | 0.000161 |
def namespace(self, elem=None):
"""return the URL, if any, for the doc root or elem, if given."""
if elem is None:
elem = self.root
return XML.tag_namespace(elem.tag) | [
"def",
"namespace",
"(",
"self",
",",
"elem",
"=",
"None",
")",
":",
"if",
"elem",
"is",
"None",
":",
"elem",
"=",
"self",
".",
"root",
"return",
"XML",
".",
"tag_namespace",
"(",
"elem",
".",
"tag",
")"
] | 40.4 | 0.009709 |
def add_child(self, name, child):
"""
Add a new child to the node.
:param name: Name of the child that must be used to access that child. Should not contain anything that could interfere with the operator `.` (dot).
:param child: The new child, an instance of :any:`Scale` or :any:`Parameter` or :any:`ParameterNode`.
"""
if name in self.children:
raise ValueError("{} has already a child named {}".format(self.name, name))
if not (isinstance(child, ParameterNode) or isinstance(child, Parameter) or isinstance(child, Scale)):
raise TypeError("child must be of type ParameterNode, Parameter, or Scale. Instead got {}".format(type(child)))
self.children[name] = child
setattr(self, name, child) | [
"def",
"add_child",
"(",
"self",
",",
"name",
",",
"child",
")",
":",
"if",
"name",
"in",
"self",
".",
"children",
":",
"raise",
"ValueError",
"(",
"\"{} has already a child named {}\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"name",
")",
")",
"if",
"not",
"(",
"isinstance",
"(",
"child",
",",
"ParameterNode",
")",
"or",
"isinstance",
"(",
"child",
",",
"Parameter",
")",
"or",
"isinstance",
"(",
"child",
",",
"Scale",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"child must be of type ParameterNode, Parameter, or Scale. Instead got {}\"",
".",
"format",
"(",
"type",
"(",
"child",
")",
")",
")",
"self",
".",
"children",
"[",
"name",
"]",
"=",
"child",
"setattr",
"(",
"self",
",",
"name",
",",
"child",
")"
] | 59.846154 | 0.008861 |
def binOp(op, indx, amap, bmap, fill_vec):
'''
Combines the values from two map objects using the indx values
using the op operator. In situations where there is a missing value
it will use the callable function handle_missing
'''
def op_or_missing(id):
va = amap.get(id, None)
vb = bmap.get(id, None)
if va is None or vb is None:
# This should create as many elements as the number of columns!?
result = fill_vec
else:
try:
result = op(va, vb)
except Exception:
result = None
if result is None:
result = fill_vec
return result
seq_arys = map(op_or_missing, indx)
data = np.vstack(seq_arys)
return data | [
"def",
"binOp",
"(",
"op",
",",
"indx",
",",
"amap",
",",
"bmap",
",",
"fill_vec",
")",
":",
"def",
"op_or_missing",
"(",
"id",
")",
":",
"va",
"=",
"amap",
".",
"get",
"(",
"id",
",",
"None",
")",
"vb",
"=",
"bmap",
".",
"get",
"(",
"id",
",",
"None",
")",
"if",
"va",
"is",
"None",
"or",
"vb",
"is",
"None",
":",
"# This should create as many elements as the number of columns!?\r",
"result",
"=",
"fill_vec",
"else",
":",
"try",
":",
"result",
"=",
"op",
"(",
"va",
",",
"vb",
")",
"except",
"Exception",
":",
"result",
"=",
"None",
"if",
"result",
"is",
"None",
":",
"result",
"=",
"fill_vec",
"return",
"result",
"seq_arys",
"=",
"map",
"(",
"op_or_missing",
",",
"indx",
")",
"data",
"=",
"np",
".",
"vstack",
"(",
"seq_arys",
")",
"return",
"data"
] | 34.347826 | 0.001232 |
def read_atoms(fn, cycfn=None, pos_only=False, conv=1.0):
"""
Read atom information from an atoms.dat file (i.e., tblmd, MDCORE input file)
"""
f = paropen(fn, "r")
l = f.readline().lstrip()
while len(l) > 0 and ( l[0] == '#' or l[0] == '<' ):
l = f.readline().lstrip()
n_atoms = int(l)
l = f.readline().lstrip()
while len(l) > 0 and ( l[0] == '#' or l[0] == '<' ):
l = f.readline().lstrip()
l = f.readline().lstrip()
while len(l) > 0 and ( l[0] == '#' or l[0] == '<' ):
l = f.readline().lstrip()
#
# Read positions
#
forces = np.zeros( [ n_atoms, 3 ] )
groups = np.zeros( [ n_atoms ] )
gamma = np.zeros( [ n_atoms ] )
T = np.zeros( [ n_atoms ] )
ats = [ ]
for i in range(n_atoms):
s = l.split()
# type x y z
sym = None
try:
Z = int(s[0])
sym = ase.data.chemical_symbols[Z]
except:
sym = s[0]
a = ase.Atom(sym, ( float(s[2])*conv, float(s[3])*conv, float(s[4])*conv ) )
groups[i] = int(s[5])
gamma[i] = float(s[6])
T[i] = float(s[7])
ats += [ a ]
l = f.readline()
this = ase.Atoms(ats, pbc=True)
if not pos_only:
while l and l == "":
l = f.readline().strip()
while l:
key = l.strip(" <-#\r\n")
if key.upper() == "VELOCITIES":
for i in range(n_atoms):
s = f.readline().split()
m = this[i].mass
if m is None:
m = ase.data.atomic_masses[ase.data.chemical_symbols.index(this[i].symbol)]
this[i].momentum = ( m*float(s[0]), m*float(s[1]), m*float(s[2]) )
l = None
elif key.upper() == "FORCES":
for i in range(n_atoms):
s = f.readline().split()
forces[i] = np.array( [ float(s[0]), float(s[1]), float(s[2]) ] )
l = None
elif key.upper() == "CHARGES":
for i in this:
l = f.readline()
if l and len(l.split()) == 1:
i.charge = float(l)
l = None
elif key.upper() == "CELL" or key.upper().split()[0:2] == ("BOX", "VECTORS" ):
l1 = f.readline()
l2 = f.readline()
l3 = f.readline()
this.set_cell( [ [float(x) for x in l1.split()],
[float(x) for x in l2.split()],
[float(x) for x in l3.split()] ] )
l = None
else:
aux = [ ]
l = f.readline().strip()
while l and l[0] not in [ '<', '#' ]:
s = l.split()
aux += [ [float(x) for x in s] ]
l = f.readline().strip()
if len(aux) == n_atoms:
this.set_array(key, np.asarray(aux))
else:
print("Warning: Encountered field '%s' which does not seem to be per-atom data." % key)
if l is None:
l = f.readline().strip()
while l and l == "":
l = f.readline().strip()
f.close()
this.set_array("forces", forces)
this.set_array("groups", groups)
this.set_array("gamma", gamma)
this.set_array("T", T)
if cycfn:
read_cyc(this, cycfn, conv=conv)
return this | [
"def",
"read_atoms",
"(",
"fn",
",",
"cycfn",
"=",
"None",
",",
"pos_only",
"=",
"False",
",",
"conv",
"=",
"1.0",
")",
":",
"f",
"=",
"paropen",
"(",
"fn",
",",
"\"r\"",
")",
"l",
"=",
"f",
".",
"readline",
"(",
")",
".",
"lstrip",
"(",
")",
"while",
"len",
"(",
"l",
")",
">",
"0",
"and",
"(",
"l",
"[",
"0",
"]",
"==",
"'#'",
"or",
"l",
"[",
"0",
"]",
"==",
"'<'",
")",
":",
"l",
"=",
"f",
".",
"readline",
"(",
")",
".",
"lstrip",
"(",
")",
"n_atoms",
"=",
"int",
"(",
"l",
")",
"l",
"=",
"f",
".",
"readline",
"(",
")",
".",
"lstrip",
"(",
")",
"while",
"len",
"(",
"l",
")",
">",
"0",
"and",
"(",
"l",
"[",
"0",
"]",
"==",
"'#'",
"or",
"l",
"[",
"0",
"]",
"==",
"'<'",
")",
":",
"l",
"=",
"f",
".",
"readline",
"(",
")",
".",
"lstrip",
"(",
")",
"l",
"=",
"f",
".",
"readline",
"(",
")",
".",
"lstrip",
"(",
")",
"while",
"len",
"(",
"l",
")",
">",
"0",
"and",
"(",
"l",
"[",
"0",
"]",
"==",
"'#'",
"or",
"l",
"[",
"0",
"]",
"==",
"'<'",
")",
":",
"l",
"=",
"f",
".",
"readline",
"(",
")",
".",
"lstrip",
"(",
")",
"#",
"# Read positions",
"#",
"forces",
"=",
"np",
".",
"zeros",
"(",
"[",
"n_atoms",
",",
"3",
"]",
")",
"groups",
"=",
"np",
".",
"zeros",
"(",
"[",
"n_atoms",
"]",
")",
"gamma",
"=",
"np",
".",
"zeros",
"(",
"[",
"n_atoms",
"]",
")",
"T",
"=",
"np",
".",
"zeros",
"(",
"[",
"n_atoms",
"]",
")",
"ats",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"n_atoms",
")",
":",
"s",
"=",
"l",
".",
"split",
"(",
")",
"# type x y z",
"sym",
"=",
"None",
"try",
":",
"Z",
"=",
"int",
"(",
"s",
"[",
"0",
"]",
")",
"sym",
"=",
"ase",
".",
"data",
".",
"chemical_symbols",
"[",
"Z",
"]",
"except",
":",
"sym",
"=",
"s",
"[",
"0",
"]",
"a",
"=",
"ase",
".",
"Atom",
"(",
"sym",
",",
"(",
"float",
"(",
"s",
"[",
"2",
"]",
")",
"*",
"conv",
",",
"float",
"(",
"s",
"[",
"3",
"]",
")",
"*",
"conv",
",",
"float",
"(",
"s",
"[",
"4",
"]",
")",
"*",
"conv",
")",
")",
"groups",
"[",
"i",
"]",
"=",
"int",
"(",
"s",
"[",
"5",
"]",
")",
"gamma",
"[",
"i",
"]",
"=",
"float",
"(",
"s",
"[",
"6",
"]",
")",
"T",
"[",
"i",
"]",
"=",
"float",
"(",
"s",
"[",
"7",
"]",
")",
"ats",
"+=",
"[",
"a",
"]",
"l",
"=",
"f",
".",
"readline",
"(",
")",
"this",
"=",
"ase",
".",
"Atoms",
"(",
"ats",
",",
"pbc",
"=",
"True",
")",
"if",
"not",
"pos_only",
":",
"while",
"l",
"and",
"l",
"==",
"\"\"",
":",
"l",
"=",
"f",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"while",
"l",
":",
"key",
"=",
"l",
".",
"strip",
"(",
"\" <-#\\r\\n\"",
")",
"if",
"key",
".",
"upper",
"(",
")",
"==",
"\"VELOCITIES\"",
":",
"for",
"i",
"in",
"range",
"(",
"n_atoms",
")",
":",
"s",
"=",
"f",
".",
"readline",
"(",
")",
".",
"split",
"(",
")",
"m",
"=",
"this",
"[",
"i",
"]",
".",
"mass",
"if",
"m",
"is",
"None",
":",
"m",
"=",
"ase",
".",
"data",
".",
"atomic_masses",
"[",
"ase",
".",
"data",
".",
"chemical_symbols",
".",
"index",
"(",
"this",
"[",
"i",
"]",
".",
"symbol",
")",
"]",
"this",
"[",
"i",
"]",
".",
"momentum",
"=",
"(",
"m",
"*",
"float",
"(",
"s",
"[",
"0",
"]",
")",
",",
"m",
"*",
"float",
"(",
"s",
"[",
"1",
"]",
")",
",",
"m",
"*",
"float",
"(",
"s",
"[",
"2",
"]",
")",
")",
"l",
"=",
"None",
"elif",
"key",
".",
"upper",
"(",
")",
"==",
"\"FORCES\"",
":",
"for",
"i",
"in",
"range",
"(",
"n_atoms",
")",
":",
"s",
"=",
"f",
".",
"readline",
"(",
")",
".",
"split",
"(",
")",
"forces",
"[",
"i",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"float",
"(",
"s",
"[",
"0",
"]",
")",
",",
"float",
"(",
"s",
"[",
"1",
"]",
")",
",",
"float",
"(",
"s",
"[",
"2",
"]",
")",
"]",
")",
"l",
"=",
"None",
"elif",
"key",
".",
"upper",
"(",
")",
"==",
"\"CHARGES\"",
":",
"for",
"i",
"in",
"this",
":",
"l",
"=",
"f",
".",
"readline",
"(",
")",
"if",
"l",
"and",
"len",
"(",
"l",
".",
"split",
"(",
")",
")",
"==",
"1",
":",
"i",
".",
"charge",
"=",
"float",
"(",
"l",
")",
"l",
"=",
"None",
"elif",
"key",
".",
"upper",
"(",
")",
"==",
"\"CELL\"",
"or",
"key",
".",
"upper",
"(",
")",
".",
"split",
"(",
")",
"[",
"0",
":",
"2",
"]",
"==",
"(",
"\"BOX\"",
",",
"\"VECTORS\"",
")",
":",
"l1",
"=",
"f",
".",
"readline",
"(",
")",
"l2",
"=",
"f",
".",
"readline",
"(",
")",
"l3",
"=",
"f",
".",
"readline",
"(",
")",
"this",
".",
"set_cell",
"(",
"[",
"[",
"float",
"(",
"x",
")",
"for",
"x",
"in",
"l1",
".",
"split",
"(",
")",
"]",
",",
"[",
"float",
"(",
"x",
")",
"for",
"x",
"in",
"l2",
".",
"split",
"(",
")",
"]",
",",
"[",
"float",
"(",
"x",
")",
"for",
"x",
"in",
"l3",
".",
"split",
"(",
")",
"]",
"]",
")",
"l",
"=",
"None",
"else",
":",
"aux",
"=",
"[",
"]",
"l",
"=",
"f",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"while",
"l",
"and",
"l",
"[",
"0",
"]",
"not",
"in",
"[",
"'<'",
",",
"'#'",
"]",
":",
"s",
"=",
"l",
".",
"split",
"(",
")",
"aux",
"+=",
"[",
"[",
"float",
"(",
"x",
")",
"for",
"x",
"in",
"s",
"]",
"]",
"l",
"=",
"f",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"aux",
")",
"==",
"n_atoms",
":",
"this",
".",
"set_array",
"(",
"key",
",",
"np",
".",
"asarray",
"(",
"aux",
")",
")",
"else",
":",
"print",
"(",
"\"Warning: Encountered field '%s' which does not seem to be per-atom data.\"",
"%",
"key",
")",
"if",
"l",
"is",
"None",
":",
"l",
"=",
"f",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"while",
"l",
"and",
"l",
"==",
"\"\"",
":",
"l",
"=",
"f",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"f",
".",
"close",
"(",
")",
"this",
".",
"set_array",
"(",
"\"forces\"",
",",
"forces",
")",
"this",
".",
"set_array",
"(",
"\"groups\"",
",",
"groups",
")",
"this",
".",
"set_array",
"(",
"\"gamma\"",
",",
"gamma",
")",
"this",
".",
"set_array",
"(",
"\"T\"",
",",
"T",
")",
"if",
"cycfn",
":",
"read_cyc",
"(",
"this",
",",
"cycfn",
",",
"conv",
"=",
"conv",
")",
"return",
"this"
] | 28.300813 | 0.022481 |
def paste_format(self):
"""Pastes cell formats
Pasting starts at cursor or at top left bbox corner
"""
row, col, tab = self.grid.actions.cursor
selection = self.get_selection()
if selection:
# Use selection rather than cursor for top left cell if present
row, col = [tl if tl is not None else 0
for tl in selection.get_bbox()[0]]
cell_attributes = self.grid.code_array.cell_attributes
string_data = self.grid.main_window.clipboard.get_clipboard()
format_data = ast.literal_eval(string_data)
ca = format_data["cell_attributes"]
rh = format_data["row_heights"]
cw = format_data["col_widths"]
assert isinstance(ca, types.ListType)
assert isinstance(rh, types.DictType)
assert isinstance(cw, types.DictType)
# Cell attributes
for selection_params, tab, attrs in ca:
base_selection = Selection(*selection_params)
shifted_selection = base_selection.shifted(row, col)
if "merge_area" not in attrs:
# Do not paste merge areas because this may have
# inintended consequences for existing merge areas
new_cell_attribute = shifted_selection, tab, attrs
cell_attributes.append(new_cell_attribute)
# Row heights
row_heights = self.grid.code_array.row_heights
for __row, __tab in rh:
row_heights[__row+row, tab] = rh[__row, __tab]
# Column widths
col_widths = self.grid.code_array.col_widths
for __col, __tab in cw:
col_widths[__col+col, tab] = cw[(__col, __tab)] | [
"def",
"paste_format",
"(",
"self",
")",
":",
"row",
",",
"col",
",",
"tab",
"=",
"self",
".",
"grid",
".",
"actions",
".",
"cursor",
"selection",
"=",
"self",
".",
"get_selection",
"(",
")",
"if",
"selection",
":",
"# Use selection rather than cursor for top left cell if present",
"row",
",",
"col",
"=",
"[",
"tl",
"if",
"tl",
"is",
"not",
"None",
"else",
"0",
"for",
"tl",
"in",
"selection",
".",
"get_bbox",
"(",
")",
"[",
"0",
"]",
"]",
"cell_attributes",
"=",
"self",
".",
"grid",
".",
"code_array",
".",
"cell_attributes",
"string_data",
"=",
"self",
".",
"grid",
".",
"main_window",
".",
"clipboard",
".",
"get_clipboard",
"(",
")",
"format_data",
"=",
"ast",
".",
"literal_eval",
"(",
"string_data",
")",
"ca",
"=",
"format_data",
"[",
"\"cell_attributes\"",
"]",
"rh",
"=",
"format_data",
"[",
"\"row_heights\"",
"]",
"cw",
"=",
"format_data",
"[",
"\"col_widths\"",
"]",
"assert",
"isinstance",
"(",
"ca",
",",
"types",
".",
"ListType",
")",
"assert",
"isinstance",
"(",
"rh",
",",
"types",
".",
"DictType",
")",
"assert",
"isinstance",
"(",
"cw",
",",
"types",
".",
"DictType",
")",
"# Cell attributes",
"for",
"selection_params",
",",
"tab",
",",
"attrs",
"in",
"ca",
":",
"base_selection",
"=",
"Selection",
"(",
"*",
"selection_params",
")",
"shifted_selection",
"=",
"base_selection",
".",
"shifted",
"(",
"row",
",",
"col",
")",
"if",
"\"merge_area\"",
"not",
"in",
"attrs",
":",
"# Do not paste merge areas because this may have",
"# inintended consequences for existing merge areas",
"new_cell_attribute",
"=",
"shifted_selection",
",",
"tab",
",",
"attrs",
"cell_attributes",
".",
"append",
"(",
"new_cell_attribute",
")",
"# Row heights",
"row_heights",
"=",
"self",
".",
"grid",
".",
"code_array",
".",
"row_heights",
"for",
"__row",
",",
"__tab",
"in",
"rh",
":",
"row_heights",
"[",
"__row",
"+",
"row",
",",
"tab",
"]",
"=",
"rh",
"[",
"__row",
",",
"__tab",
"]",
"# Column widths",
"col_widths",
"=",
"self",
".",
"grid",
".",
"code_array",
".",
"col_widths",
"for",
"__col",
",",
"__tab",
"in",
"cw",
":",
"col_widths",
"[",
"__col",
"+",
"col",
",",
"tab",
"]",
"=",
"cw",
"[",
"(",
"__col",
",",
"__tab",
")",
"]"
] | 34.791667 | 0.001165 |
def determine_master(port=4000):
"""Determine address of master so that workers
can connect to it. If the environment variable
SPARK_LOCAL_IP is set, that address will be used.
:param port: port on which the application runs
:return: Master address
Example usage:
SPARK_LOCAL_IP=127.0.0.1 spark-submit --master \
local[8] examples/mllib_mlp.py
"""
if os.environ.get('SPARK_LOCAL_IP'):
return os.environ['SPARK_LOCAL_IP'] + ":" + str(port)
else:
return gethostbyname(gethostname()) + ":" + str(port) | [
"def",
"determine_master",
"(",
"port",
"=",
"4000",
")",
":",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"'SPARK_LOCAL_IP'",
")",
":",
"return",
"os",
".",
"environ",
"[",
"'SPARK_LOCAL_IP'",
"]",
"+",
"\":\"",
"+",
"str",
"(",
"port",
")",
"else",
":",
"return",
"gethostbyname",
"(",
"gethostname",
"(",
")",
")",
"+",
"\":\"",
"+",
"str",
"(",
"port",
")"
] | 34.8125 | 0.001748 |
async def parse_response(self):
"""
:py:func:`asyncio.coroutine`
Parsing full server response (all lines).
:return: (code, lines)
:rtype: (:py:class:`aioftp.Code`, :py:class:`list` of :py:class:`str`)
:raises aioftp.StatusCodeError: if received code does not matches all
already received codes
"""
code, rest = await self.parse_line()
info = [rest]
curr_code = code
while rest.startswith("-") or not curr_code.isdigit():
curr_code, rest = await self.parse_line()
if curr_code.isdigit():
info.append(rest)
if curr_code != code:
raise errors.StatusCodeError(code, curr_code, info)
else:
info.append(curr_code + rest)
return code, info | [
"async",
"def",
"parse_response",
"(",
"self",
")",
":",
"code",
",",
"rest",
"=",
"await",
"self",
".",
"parse_line",
"(",
")",
"info",
"=",
"[",
"rest",
"]",
"curr_code",
"=",
"code",
"while",
"rest",
".",
"startswith",
"(",
"\"-\"",
")",
"or",
"not",
"curr_code",
".",
"isdigit",
"(",
")",
":",
"curr_code",
",",
"rest",
"=",
"await",
"self",
".",
"parse_line",
"(",
")",
"if",
"curr_code",
".",
"isdigit",
"(",
")",
":",
"info",
".",
"append",
"(",
"rest",
")",
"if",
"curr_code",
"!=",
"code",
":",
"raise",
"errors",
".",
"StatusCodeError",
"(",
"code",
",",
"curr_code",
",",
"info",
")",
"else",
":",
"info",
".",
"append",
"(",
"curr_code",
"+",
"rest",
")",
"return",
"code",
",",
"info"
] | 34.333333 | 0.002361 |
def add_requirement(
self,
install_req, # type: InstallRequirement
parent_req_name=None, # type: Optional[str]
extras_requested=None # type: Optional[Iterable[str]]
):
# type: (...) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]] # noqa: E501
"""Add install_req as a requirement to install.
:param parent_req_name: The name of the requirement that needed this
added. The name is used because when multiple unnamed requirements
resolve to the same name, we could otherwise end up with dependency
links that point outside the Requirements set. parent_req must
already be added. Note that None implies that this is a user
supplied requirement, vs an inferred one.
:param extras_requested: an iterable of extras used to evaluate the
environment markers.
:return: Additional requirements to scan. That is either [] if
the requirement is not applicable, or [install_req] if the
requirement is applicable and has just been added.
"""
name = install_req.name
# If the markers do not match, ignore this requirement.
if not install_req.match_markers(extras_requested):
logger.info(
"Ignoring %s: markers '%s' don't match your environment",
name, install_req.markers,
)
return [], None
# If the wheel is not supported, raise an error.
# Should check this after filtering out based on environment markers to
# allow specifying different wheels based on the environment/OS, in a
# single requirements file.
if install_req.link and install_req.link.is_wheel:
wheel = Wheel(install_req.link.filename)
if self.check_supported_wheels and not wheel.supported():
raise InstallationError(
"%s is not a supported wheel on this platform." %
wheel.filename
)
# This next bit is really a sanity check.
assert install_req.is_direct == (parent_req_name is None), (
"a direct req shouldn't have a parent and also, "
"a non direct req should have a parent"
)
# Unnamed requirements are scanned again and the requirement won't be
# added as a dependency until after scanning.
if not name:
# url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
return [install_req], None
try:
existing_req = self.get_requirement(name)
except KeyError:
existing_req = None
has_conflicting_requirement = (
parent_req_name is None and
existing_req and
not existing_req.constraint and
existing_req.extras == install_req.extras and
existing_req.req.specifier != install_req.req.specifier
)
if has_conflicting_requirement:
raise InstallationError(
"Double requirement given: %s (already in %s, name=%r)"
% (install_req, existing_req, name)
)
# When no existing requirement exists, add the requirement as a
# dependency and it will be scanned again after.
if not existing_req:
self.requirements[name] = install_req
# FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
# We'd want to rescan this requirements later
return [install_req], install_req
# Assume there's no need to scan, and that we've already
# encountered this for scanning.
if install_req.constraint or not existing_req.constraint:
return [], existing_req
does_not_satisfy_constraint = (
install_req.link and
not (
existing_req.link and
install_req.link.path == existing_req.link.path
)
)
if does_not_satisfy_constraint:
self.reqs_to_cleanup.append(install_req)
raise InstallationError(
"Could not satisfy constraints for '%s': "
"installation from path or url cannot be "
"constrained to a version" % name,
)
# If we're now installing a constraint, mark the existing
# object for real installation.
existing_req.constraint = False
existing_req.extras = tuple(sorted(
set(existing_req.extras) | set(install_req.extras)
))
logger.debug(
"Setting %s extras to: %s",
existing_req, existing_req.extras,
)
# Return the existing requirement for addition to the parent and
# scanning again.
return [existing_req], existing_req | [
"def",
"add_requirement",
"(",
"self",
",",
"install_req",
",",
"# type: InstallRequirement",
"parent_req_name",
"=",
"None",
",",
"# type: Optional[str]",
"extras_requested",
"=",
"None",
"# type: Optional[Iterable[str]]",
")",
":",
"# type: (...) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]] # noqa: E501",
"name",
"=",
"install_req",
".",
"name",
"# If the markers do not match, ignore this requirement.",
"if",
"not",
"install_req",
".",
"match_markers",
"(",
"extras_requested",
")",
":",
"logger",
".",
"info",
"(",
"\"Ignoring %s: markers '%s' don't match your environment\"",
",",
"name",
",",
"install_req",
".",
"markers",
",",
")",
"return",
"[",
"]",
",",
"None",
"# If the wheel is not supported, raise an error.",
"# Should check this after filtering out based on environment markers to",
"# allow specifying different wheels based on the environment/OS, in a",
"# single requirements file.",
"if",
"install_req",
".",
"link",
"and",
"install_req",
".",
"link",
".",
"is_wheel",
":",
"wheel",
"=",
"Wheel",
"(",
"install_req",
".",
"link",
".",
"filename",
")",
"if",
"self",
".",
"check_supported_wheels",
"and",
"not",
"wheel",
".",
"supported",
"(",
")",
":",
"raise",
"InstallationError",
"(",
"\"%s is not a supported wheel on this platform.\"",
"%",
"wheel",
".",
"filename",
")",
"# This next bit is really a sanity check.",
"assert",
"install_req",
".",
"is_direct",
"==",
"(",
"parent_req_name",
"is",
"None",
")",
",",
"(",
"\"a direct req shouldn't have a parent and also, \"",
"\"a non direct req should have a parent\"",
")",
"# Unnamed requirements are scanned again and the requirement won't be",
"# added as a dependency until after scanning.",
"if",
"not",
"name",
":",
"# url or path requirement w/o an egg fragment",
"self",
".",
"unnamed_requirements",
".",
"append",
"(",
"install_req",
")",
"return",
"[",
"install_req",
"]",
",",
"None",
"try",
":",
"existing_req",
"=",
"self",
".",
"get_requirement",
"(",
"name",
")",
"except",
"KeyError",
":",
"existing_req",
"=",
"None",
"has_conflicting_requirement",
"=",
"(",
"parent_req_name",
"is",
"None",
"and",
"existing_req",
"and",
"not",
"existing_req",
".",
"constraint",
"and",
"existing_req",
".",
"extras",
"==",
"install_req",
".",
"extras",
"and",
"existing_req",
".",
"req",
".",
"specifier",
"!=",
"install_req",
".",
"req",
".",
"specifier",
")",
"if",
"has_conflicting_requirement",
":",
"raise",
"InstallationError",
"(",
"\"Double requirement given: %s (already in %s, name=%r)\"",
"%",
"(",
"install_req",
",",
"existing_req",
",",
"name",
")",
")",
"# When no existing requirement exists, add the requirement as a",
"# dependency and it will be scanned again after.",
"if",
"not",
"existing_req",
":",
"self",
".",
"requirements",
"[",
"name",
"]",
"=",
"install_req",
"# FIXME: what about other normalizations? E.g., _ vs. -?",
"if",
"name",
".",
"lower",
"(",
")",
"!=",
"name",
":",
"self",
".",
"requirement_aliases",
"[",
"name",
".",
"lower",
"(",
")",
"]",
"=",
"name",
"# We'd want to rescan this requirements later",
"return",
"[",
"install_req",
"]",
",",
"install_req",
"# Assume there's no need to scan, and that we've already",
"# encountered this for scanning.",
"if",
"install_req",
".",
"constraint",
"or",
"not",
"existing_req",
".",
"constraint",
":",
"return",
"[",
"]",
",",
"existing_req",
"does_not_satisfy_constraint",
"=",
"(",
"install_req",
".",
"link",
"and",
"not",
"(",
"existing_req",
".",
"link",
"and",
"install_req",
".",
"link",
".",
"path",
"==",
"existing_req",
".",
"link",
".",
"path",
")",
")",
"if",
"does_not_satisfy_constraint",
":",
"self",
".",
"reqs_to_cleanup",
".",
"append",
"(",
"install_req",
")",
"raise",
"InstallationError",
"(",
"\"Could not satisfy constraints for '%s': \"",
"\"installation from path or url cannot be \"",
"\"constrained to a version\"",
"%",
"name",
",",
")",
"# If we're now installing a constraint, mark the existing",
"# object for real installation.",
"existing_req",
".",
"constraint",
"=",
"False",
"existing_req",
".",
"extras",
"=",
"tuple",
"(",
"sorted",
"(",
"set",
"(",
"existing_req",
".",
"extras",
")",
"|",
"set",
"(",
"install_req",
".",
"extras",
")",
")",
")",
"logger",
".",
"debug",
"(",
"\"Setting %s extras to: %s\"",
",",
"existing_req",
",",
"existing_req",
".",
"extras",
",",
")",
"# Return the existing requirement for addition to the parent and",
"# scanning again.",
"return",
"[",
"existing_req",
"]",
",",
"existing_req"
] | 42.094828 | 0.0008 |
def find_item_in_path_by_type(self, si, path, obj_type):
"""
This function finds the first item of that type in path
:param ServiceInstance si: pyvmomi ServiceInstance
:param str path: the path to search in
:param type obj_type: the vim type of the object
:return: pyvmomi type instance object or None
"""
if obj_type is None:
return None
search_index = si.content.searchIndex
sub_folder = si.content.rootFolder
if path is None or not path:
return sub_folder
paths = path.split("/")
for currPath in paths:
if currPath is None or not currPath:
continue
manage = search_index.FindChild(sub_folder, currPath)
if isinstance(manage, obj_type):
return manage
return None | [
"def",
"find_item_in_path_by_type",
"(",
"self",
",",
"si",
",",
"path",
",",
"obj_type",
")",
":",
"if",
"obj_type",
"is",
"None",
":",
"return",
"None",
"search_index",
"=",
"si",
".",
"content",
".",
"searchIndex",
"sub_folder",
"=",
"si",
".",
"content",
".",
"rootFolder",
"if",
"path",
"is",
"None",
"or",
"not",
"path",
":",
"return",
"sub_folder",
"paths",
"=",
"path",
".",
"split",
"(",
"\"/\"",
")",
"for",
"currPath",
"in",
"paths",
":",
"if",
"currPath",
"is",
"None",
"or",
"not",
"currPath",
":",
"continue",
"manage",
"=",
"search_index",
".",
"FindChild",
"(",
"sub_folder",
",",
"currPath",
")",
"if",
"isinstance",
"(",
"manage",
",",
"obj_type",
")",
":",
"return",
"manage",
"return",
"None"
] | 31.37037 | 0.002291 |
def align_two_alignments(aln1, aln2, moltype, params=None):
"""Returns an Alignment object from two existing Alignments.
aln1, aln2: cogent.core.alignment.Alignment objects, or data that can be
used to build them.
params: dict of parameters to pass in to the Clustal app controller.
"""
#create SequenceCollection object from seqs
aln1 = Alignment(aln1,MolType=moltype)
#Create mapping between abbreviated IDs and full IDs
aln1_int_map, aln1_int_keys = aln1.getIntMap()
#Create SequenceCollection from int_map.
aln1_int_map = Alignment(aln1_int_map,MolType=moltype)
#create Alignment object from aln
aln2 = Alignment(aln2,MolType=moltype)
#Create mapping between abbreviated IDs and full IDs
aln2_int_map, aln2_int_keys = aln2.getIntMap(prefix='seqn_')
#Create SequenceCollection from int_map.
aln2_int_map = Alignment(aln2_int_map,MolType=moltype)
#Update aln1_int_keys with aln2_int_keys
aln1_int_keys.update(aln2_int_keys)
#Create Mafft app.
app = Clustalw(InputHandler='_input_as_multiline_string',\
params=params,
SuppressStderr=True)
app.Parameters['-align'].off()
app.Parameters['-infile'].off()
app.Parameters['-profile'].on()
#Add aln_int_map as profile1
app.Parameters['-profile1'].on(\
app._tempfile_as_multiline_string(aln1_int_map.toFasta()))
#Add seq_int_map as profile2
app.Parameters['-profile2'].on(\
app._tempfile_as_multiline_string(aln2_int_map.toFasta()))
#Get results using int_map as input to app
res = app()
#Get alignment as dict out of results
alignment = dict(ClustalParser(res['Align'].readlines()))
#Make new dict mapping original IDs
new_alignment = {}
for k,v in alignment.items():
new_alignment[aln1_int_keys[k]]=v
#Create an Alignment object from alignment dict
new_alignment = Alignment(new_alignment,MolType=moltype)
#Clean up
res.cleanUp()
remove(app.Parameters['-profile1'].Value)
remove(app.Parameters['-profile2'].Value)
del(aln1,aln1_int_map,aln1_int_keys,\
aln2,aln2_int_map,aln2_int_keys,app,res,alignment)
return new_alignment | [
"def",
"align_two_alignments",
"(",
"aln1",
",",
"aln2",
",",
"moltype",
",",
"params",
"=",
"None",
")",
":",
"#create SequenceCollection object from seqs",
"aln1",
"=",
"Alignment",
"(",
"aln1",
",",
"MolType",
"=",
"moltype",
")",
"#Create mapping between abbreviated IDs and full IDs",
"aln1_int_map",
",",
"aln1_int_keys",
"=",
"aln1",
".",
"getIntMap",
"(",
")",
"#Create SequenceCollection from int_map.",
"aln1_int_map",
"=",
"Alignment",
"(",
"aln1_int_map",
",",
"MolType",
"=",
"moltype",
")",
"#create Alignment object from aln",
"aln2",
"=",
"Alignment",
"(",
"aln2",
",",
"MolType",
"=",
"moltype",
")",
"#Create mapping between abbreviated IDs and full IDs",
"aln2_int_map",
",",
"aln2_int_keys",
"=",
"aln2",
".",
"getIntMap",
"(",
"prefix",
"=",
"'seqn_'",
")",
"#Create SequenceCollection from int_map.",
"aln2_int_map",
"=",
"Alignment",
"(",
"aln2_int_map",
",",
"MolType",
"=",
"moltype",
")",
"#Update aln1_int_keys with aln2_int_keys",
"aln1_int_keys",
".",
"update",
"(",
"aln2_int_keys",
")",
"#Create Mafft app.",
"app",
"=",
"Clustalw",
"(",
"InputHandler",
"=",
"'_input_as_multiline_string'",
",",
"params",
"=",
"params",
",",
"SuppressStderr",
"=",
"True",
")",
"app",
".",
"Parameters",
"[",
"'-align'",
"]",
".",
"off",
"(",
")",
"app",
".",
"Parameters",
"[",
"'-infile'",
"]",
".",
"off",
"(",
")",
"app",
".",
"Parameters",
"[",
"'-profile'",
"]",
".",
"on",
"(",
")",
"#Add aln_int_map as profile1",
"app",
".",
"Parameters",
"[",
"'-profile1'",
"]",
".",
"on",
"(",
"app",
".",
"_tempfile_as_multiline_string",
"(",
"aln1_int_map",
".",
"toFasta",
"(",
")",
")",
")",
"#Add seq_int_map as profile2",
"app",
".",
"Parameters",
"[",
"'-profile2'",
"]",
".",
"on",
"(",
"app",
".",
"_tempfile_as_multiline_string",
"(",
"aln2_int_map",
".",
"toFasta",
"(",
")",
")",
")",
"#Get results using int_map as input to app",
"res",
"=",
"app",
"(",
")",
"#Get alignment as dict out of results",
"alignment",
"=",
"dict",
"(",
"ClustalParser",
"(",
"res",
"[",
"'Align'",
"]",
".",
"readlines",
"(",
")",
")",
")",
"#Make new dict mapping original IDs",
"new_alignment",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"alignment",
".",
"items",
"(",
")",
":",
"new_alignment",
"[",
"aln1_int_keys",
"[",
"k",
"]",
"]",
"=",
"v",
"#Create an Alignment object from alignment dict",
"new_alignment",
"=",
"Alignment",
"(",
"new_alignment",
",",
"MolType",
"=",
"moltype",
")",
"#Clean up",
"res",
".",
"cleanUp",
"(",
")",
"remove",
"(",
"app",
".",
"Parameters",
"[",
"'-profile1'",
"]",
".",
"Value",
")",
"remove",
"(",
"app",
".",
"Parameters",
"[",
"'-profile2'",
"]",
".",
"Value",
")",
"del",
"(",
"aln1",
",",
"aln1_int_map",
",",
"aln1_int_keys",
",",
"aln2",
",",
"aln2_int_map",
",",
"aln2_int_keys",
",",
"app",
",",
"res",
",",
"alignment",
")",
"return",
"new_alignment"
] | 35.666667 | 0.016371 |
def run(connection):
"""
Parse arguments and start upload/download
"""
parser = argparse.ArgumentParser(description="""
Process database dumps.
Either download of upload a dump file to the objectstore.
downloads the latest dump and uploads with envronment and date
into given container destination
""")
parser.add_argument(
'location',
nargs=1,
default=f'{DUMPFOLDER}/database.{ENV}.dump',
help="Dump file location")
parser.add_argument(
'objectstore',
nargs=1,
default=f'{DUMPFOLDER}/database.{ENV}.dump',
help="Dump file objectstore location")
parser.add_argument(
'--download-db',
action='store_true',
dest='download',
default=False,
help='Download db')
parser.add_argument(
'--upload-db',
action='store_true',
dest='upload',
default=False,
help='Upload db')
parser.add_argument(
'--container',
action='store_true',
dest='container',
default=False,
help='Upload db')
parser.add_argument(
'--days',
type=int,
nargs=1,
dest='days',
default=0,
help='Days to keep database dumps')
args = parser.parse_args()
if args.days:
LOG.debug('Cleanup old dumps')
remove_old_dumps(
connection, args.objectstore[0], args.days[0])
elif args.download:
download_database(
connection, args.objectstore[0], args.location[0])
elif args.upload:
upload_database(
connection, args.objectstore[0], args.location[0])
else:
parser.print_help() | [
"def",
"run",
"(",
"connection",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"\"\"\n Process database dumps.\n\n Either download of upload a dump file to the objectstore.\n\n downloads the latest dump and uploads with envronment and date\n into given container destination\n \"\"\"",
")",
"parser",
".",
"add_argument",
"(",
"'location'",
",",
"nargs",
"=",
"1",
",",
"default",
"=",
"f'{DUMPFOLDER}/database.{ENV}.dump'",
",",
"help",
"=",
"\"Dump file location\"",
")",
"parser",
".",
"add_argument",
"(",
"'objectstore'",
",",
"nargs",
"=",
"1",
",",
"default",
"=",
"f'{DUMPFOLDER}/database.{ENV}.dump'",
",",
"help",
"=",
"\"Dump file objectstore location\"",
")",
"parser",
".",
"add_argument",
"(",
"'--download-db'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'download'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Download db'",
")",
"parser",
".",
"add_argument",
"(",
"'--upload-db'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'upload'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Upload db'",
")",
"parser",
".",
"add_argument",
"(",
"'--container'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'container'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Upload db'",
")",
"parser",
".",
"add_argument",
"(",
"'--days'",
",",
"type",
"=",
"int",
",",
"nargs",
"=",
"1",
",",
"dest",
"=",
"'days'",
",",
"default",
"=",
"0",
",",
"help",
"=",
"'Days to keep database dumps'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"args",
".",
"days",
":",
"LOG",
".",
"debug",
"(",
"'Cleanup old dumps'",
")",
"remove_old_dumps",
"(",
"connection",
",",
"args",
".",
"objectstore",
"[",
"0",
"]",
",",
"args",
".",
"days",
"[",
"0",
"]",
")",
"elif",
"args",
".",
"download",
":",
"download_database",
"(",
"connection",
",",
"args",
".",
"objectstore",
"[",
"0",
"]",
",",
"args",
".",
"location",
"[",
"0",
"]",
")",
"elif",
"args",
".",
"upload",
":",
"upload_database",
"(",
"connection",
",",
"args",
".",
"objectstore",
"[",
"0",
"]",
",",
"args",
".",
"location",
"[",
"0",
"]",
")",
"else",
":",
"parser",
".",
"print_help",
"(",
")"
] | 23.898551 | 0.000582 |
def url_for(context, __route_name, **parts):
"""Filter for generating urls.
Usage: {{ url('the-view-name') }} might become "/path/to/view" or
{{ url('item-details', id=123, query={'active': 'true'}) }}
might become "/items/1?active=true".
"""
app = context['app']
query = None
if 'query_' in parts:
query = parts.pop('query_')
for key in parts:
val = parts[key]
if isinstance(val, str):
# if type is inherited from str expilict cast to str makes sense
# if type is exactly str the operation is very fast
val = str(val)
elif type(val) is int:
# int inherited classes like bool are forbidden
val = str(val)
else:
raise TypeError("argument value should be str or int, "
"got {} -> [{}] {!r}".format(key, type(val), val))
parts[key] = val
url = app.router[__route_name].url_for(**parts)
if query:
url = url.with_query(query)
return url | [
"def",
"url_for",
"(",
"context",
",",
"__route_name",
",",
"*",
"*",
"parts",
")",
":",
"app",
"=",
"context",
"[",
"'app'",
"]",
"query",
"=",
"None",
"if",
"'query_'",
"in",
"parts",
":",
"query",
"=",
"parts",
".",
"pop",
"(",
"'query_'",
")",
"for",
"key",
"in",
"parts",
":",
"val",
"=",
"parts",
"[",
"key",
"]",
"if",
"isinstance",
"(",
"val",
",",
"str",
")",
":",
"# if type is inherited from str expilict cast to str makes sense",
"# if type is exactly str the operation is very fast",
"val",
"=",
"str",
"(",
"val",
")",
"elif",
"type",
"(",
"val",
")",
"is",
"int",
":",
"# int inherited classes like bool are forbidden",
"val",
"=",
"str",
"(",
"val",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"argument value should be str or int, \"",
"\"got {} -> [{}] {!r}\"",
".",
"format",
"(",
"key",
",",
"type",
"(",
"val",
")",
",",
"val",
")",
")",
"parts",
"[",
"key",
"]",
"=",
"val",
"url",
"=",
"app",
".",
"router",
"[",
"__route_name",
"]",
".",
"url_for",
"(",
"*",
"*",
"parts",
")",
"if",
"query",
":",
"url",
"=",
"url",
".",
"with_query",
"(",
"query",
")",
"return",
"url"
] | 32.548387 | 0.000962 |
def complete(text, state):
"""On tab press, return the next possible completion"""
global completion_results
if state == 0:
line = readline.get_line_buffer()
if line.startswith(':'):
# Control command completion
completion_results = complete_control_command(line, text)
else:
if line.startswith('!') and text and line.startswith(text):
dropped_exclam = True
text = text[1:]
else:
dropped_exclam = False
completion_results = []
# Complete local paths
completion_results += complete_local_path(text)
# Complete from history
l = len(text)
completion_results += [w + ' ' for w in history_words if
len(w) > l and w.startswith(text)]
if readline.get_begidx() == 0:
# Completing first word from $PATH
completion_results += [w + ' ' for w in user_commands_in_path
if len(w) > l and w.startswith(text)]
completion_results = remove_dupes(completion_results)
if dropped_exclam:
completion_results = ['!' + r for r in completion_results]
if state < len(completion_results):
return completion_results[state]
completion_results = None
return None | [
"def",
"complete",
"(",
"text",
",",
"state",
")",
":",
"global",
"completion_results",
"if",
"state",
"==",
"0",
":",
"line",
"=",
"readline",
".",
"get_line_buffer",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"':'",
")",
":",
"# Control command completion",
"completion_results",
"=",
"complete_control_command",
"(",
"line",
",",
"text",
")",
"else",
":",
"if",
"line",
".",
"startswith",
"(",
"'!'",
")",
"and",
"text",
"and",
"line",
".",
"startswith",
"(",
"text",
")",
":",
"dropped_exclam",
"=",
"True",
"text",
"=",
"text",
"[",
"1",
":",
"]",
"else",
":",
"dropped_exclam",
"=",
"False",
"completion_results",
"=",
"[",
"]",
"# Complete local paths",
"completion_results",
"+=",
"complete_local_path",
"(",
"text",
")",
"# Complete from history",
"l",
"=",
"len",
"(",
"text",
")",
"completion_results",
"+=",
"[",
"w",
"+",
"' '",
"for",
"w",
"in",
"history_words",
"if",
"len",
"(",
"w",
")",
">",
"l",
"and",
"w",
".",
"startswith",
"(",
"text",
")",
"]",
"if",
"readline",
".",
"get_begidx",
"(",
")",
"==",
"0",
":",
"# Completing first word from $PATH",
"completion_results",
"+=",
"[",
"w",
"+",
"' '",
"for",
"w",
"in",
"user_commands_in_path",
"if",
"len",
"(",
"w",
")",
">",
"l",
"and",
"w",
".",
"startswith",
"(",
"text",
")",
"]",
"completion_results",
"=",
"remove_dupes",
"(",
"completion_results",
")",
"if",
"dropped_exclam",
":",
"completion_results",
"=",
"[",
"'!'",
"+",
"r",
"for",
"r",
"in",
"completion_results",
"]",
"if",
"state",
"<",
"len",
"(",
"completion_results",
")",
":",
"return",
"completion_results",
"[",
"state",
"]",
"completion_results",
"=",
"None",
"return",
"None"
] | 42.030303 | 0.002114 |
def _get_rev(self, fpath):
"""
Get an SCM version number. Try svn and git.
"""
rev = None
try:
cmd = ["git", "log", "-n1", "--pretty=format:\"%h\"", fpath]
rev = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()[0]
except:
pass
if not rev:
try:
cmd = ["svn", "info", fpath]
svninfo = Popen(cmd,
stdout=PIPE,
stderr=PIPE).stdout.readlines()
for info in svninfo:
tokens = info.split(":")
if tokens[0].strip() == "Last Changed Rev":
rev = tokens[1].strip()
except:
pass
return rev | [
"def",
"_get_rev",
"(",
"self",
",",
"fpath",
")",
":",
"rev",
"=",
"None",
"try",
":",
"cmd",
"=",
"[",
"\"git\"",
",",
"\"log\"",
",",
"\"-n1\"",
",",
"\"--pretty=format:\\\"%h\\\"\"",
",",
"fpath",
"]",
"rev",
"=",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
")",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
"except",
":",
"pass",
"if",
"not",
"rev",
":",
"try",
":",
"cmd",
"=",
"[",
"\"svn\"",
",",
"\"info\"",
",",
"fpath",
"]",
"svninfo",
"=",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
")",
".",
"stdout",
".",
"readlines",
"(",
")",
"for",
"info",
"in",
"svninfo",
":",
"tokens",
"=",
"info",
".",
"split",
"(",
"\":\"",
")",
"if",
"tokens",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"==",
"\"Last Changed Rev\"",
":",
"rev",
"=",
"tokens",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"except",
":",
"pass",
"return",
"rev"
] | 30.615385 | 0.008526 |
def newline(self, copy_margin=True):
"""
Insert a line ending at the current position.
"""
if copy_margin:
self.insert_text('\n' + self.document.leading_whitespace_in_current_line)
else:
self.insert_text('\n') | [
"def",
"newline",
"(",
"self",
",",
"copy_margin",
"=",
"True",
")",
":",
"if",
"copy_margin",
":",
"self",
".",
"insert_text",
"(",
"'\\n'",
"+",
"self",
".",
"document",
".",
"leading_whitespace_in_current_line",
")",
"else",
":",
"self",
".",
"insert_text",
"(",
"'\\n'",
")"
] | 33.25 | 0.010989 |
def Produce_Predictions(FileName,train,test):
"""
Produces predictions for testing set, based off of training set.
:param FileName: This is the csv file name we wish to have our predictions exported to.
:param train: This is the file name of a csv file that will be the training set.
:param test: This is the file name of the testing set that predictions will be made for.
:returns: Returns nothing, creates csv file containing predictions for testing set.
"""
TestFileName=test
TrainFileName=train
trainDF=pd.read_csv(train)
train=Feature_Engineering(train,trainDF)
test=Feature_Engineering(test,trainDF)
MLA=Create_Random_Forest(TrainFileName)
predictions = MLA.predict(test)
predictions = pd.DataFrame(predictions, columns=['Survived'])
test = pd.read_csv(TestFileName)
predictions = pd.concat((test.iloc[:, 0], predictions), axis = 1)
predictions.to_csv(FileName, sep=",", index = False) | [
"def",
"Produce_Predictions",
"(",
"FileName",
",",
"train",
",",
"test",
")",
":",
"TestFileName",
"=",
"test",
"TrainFileName",
"=",
"train",
"trainDF",
"=",
"pd",
".",
"read_csv",
"(",
"train",
")",
"train",
"=",
"Feature_Engineering",
"(",
"train",
",",
"trainDF",
")",
"test",
"=",
"Feature_Engineering",
"(",
"test",
",",
"trainDF",
")",
"MLA",
"=",
"Create_Random_Forest",
"(",
"TrainFileName",
")",
"predictions",
"=",
"MLA",
".",
"predict",
"(",
"test",
")",
"predictions",
"=",
"pd",
".",
"DataFrame",
"(",
"predictions",
",",
"columns",
"=",
"[",
"'Survived'",
"]",
")",
"test",
"=",
"pd",
".",
"read_csv",
"(",
"TestFileName",
")",
"predictions",
"=",
"pd",
".",
"concat",
"(",
"(",
"test",
".",
"iloc",
"[",
":",
",",
"0",
"]",
",",
"predictions",
")",
",",
"axis",
"=",
"1",
")",
"predictions",
".",
"to_csv",
"(",
"FileName",
",",
"sep",
"=",
"\",\"",
",",
"index",
"=",
"False",
")"
] | 47.45 | 0.020661 |
def get_dir_walker(recursive, topdown=True, followlinks=False):
"""
Returns a recursive or a non-recursive directory walker.
:param recursive:
``True`` produces a recursive walker; ``False`` produces a non-recursive
walker.
:returns:
A walker function.
"""
if recursive:
walk = partial(os.walk, topdown=topdown, followlinks=followlinks)
else:
def walk(path, topdown=topdown, followlinks=followlinks):
try:
yield next(os.walk(path, topdown=topdown, followlinks=followlinks))
except NameError:
yield os.walk(path, topdown=topdown, followlinks=followlinks).next() #IGNORE:E1101
return walk | [
"def",
"get_dir_walker",
"(",
"recursive",
",",
"topdown",
"=",
"True",
",",
"followlinks",
"=",
"False",
")",
":",
"if",
"recursive",
":",
"walk",
"=",
"partial",
"(",
"os",
".",
"walk",
",",
"topdown",
"=",
"topdown",
",",
"followlinks",
"=",
"followlinks",
")",
"else",
":",
"def",
"walk",
"(",
"path",
",",
"topdown",
"=",
"topdown",
",",
"followlinks",
"=",
"followlinks",
")",
":",
"try",
":",
"yield",
"next",
"(",
"os",
".",
"walk",
"(",
"path",
",",
"topdown",
"=",
"topdown",
",",
"followlinks",
"=",
"followlinks",
")",
")",
"except",
"NameError",
":",
"yield",
"os",
".",
"walk",
"(",
"path",
",",
"topdown",
"=",
"topdown",
",",
"followlinks",
"=",
"followlinks",
")",
".",
"next",
"(",
")",
"#IGNORE:E1101",
"return",
"walk"
] | 36.684211 | 0.008392 |
def close(self):
"""
Finalize the GDSII stream library.
"""
self._outfile.write(struct.pack('>2h', 4, 0x0400))
if self._close:
self._outfile.close() | [
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"_outfile",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"'>2h'",
",",
"4",
",",
"0x0400",
")",
")",
"if",
"self",
".",
"_close",
":",
"self",
".",
"_outfile",
".",
"close",
"(",
")"
] | 27.714286 | 0.01 |
def preferred_height(self, cli, width, max_available_height, wrap_lines):
"""
Preferred height: as much as needed in order to display all the completions.
"""
complete_state = cli.current_buffer.complete_state
column_width = self._get_column_width(complete_state)
column_count = max(1, (width - self._required_margin) // column_width)
return int(math.ceil(len(complete_state.current_completions) / float(column_count))) | [
"def",
"preferred_height",
"(",
"self",
",",
"cli",
",",
"width",
",",
"max_available_height",
",",
"wrap_lines",
")",
":",
"complete_state",
"=",
"cli",
".",
"current_buffer",
".",
"complete_state",
"column_width",
"=",
"self",
".",
"_get_column_width",
"(",
"complete_state",
")",
"column_count",
"=",
"max",
"(",
"1",
",",
"(",
"width",
"-",
"self",
".",
"_required_margin",
")",
"//",
"column_width",
")",
"return",
"int",
"(",
"math",
".",
"ceil",
"(",
"len",
"(",
"complete_state",
".",
"current_completions",
")",
"/",
"float",
"(",
"column_count",
")",
")",
")"
] | 52 | 0.008403 |
def cli():
"""Run the command line interface."""
args = docopt.docopt(__doc__, version=__VERSION__)
secure = args['--secure']
numberofwords = int(args['<numberofwords>'])
dictpath = args['--dict']
if dictpath is not None:
dictfile = open(dictpath)
else:
dictfile = load_stream('words.txt')
with dictfile:
wordlist = read_wordlist(dictfile)
words = generate_words(numberofwords, wordlist, secure=secure)
print(' '.join(words)) | [
"def",
"cli",
"(",
")",
":",
"args",
"=",
"docopt",
".",
"docopt",
"(",
"__doc__",
",",
"version",
"=",
"__VERSION__",
")",
"secure",
"=",
"args",
"[",
"'--secure'",
"]",
"numberofwords",
"=",
"int",
"(",
"args",
"[",
"'<numberofwords>'",
"]",
")",
"dictpath",
"=",
"args",
"[",
"'--dict'",
"]",
"if",
"dictpath",
"is",
"not",
"None",
":",
"dictfile",
"=",
"open",
"(",
"dictpath",
")",
"else",
":",
"dictfile",
"=",
"load_stream",
"(",
"'words.txt'",
")",
"with",
"dictfile",
":",
"wordlist",
"=",
"read_wordlist",
"(",
"dictfile",
")",
"words",
"=",
"generate_words",
"(",
"numberofwords",
",",
"wordlist",
",",
"secure",
"=",
"secure",
")",
"print",
"(",
"' '",
".",
"join",
"(",
"words",
")",
")"
] | 29.75 | 0.002037 |
def infer_returned_object(pyfunction, args):
"""Infer the `PyObject` this `PyFunction` returns after calling"""
object_info = pyfunction.pycore.object_info
result = object_info.get_exact_returned(pyfunction, args)
if result is not None:
return result
result = _infer_returned(pyfunction, args)
if result is not None:
if args and pyfunction.get_module().get_resource() is not None:
params = args.get_arguments(
pyfunction.get_param_names(special_args=False))
object_info.function_called(pyfunction, params, result)
return result
result = object_info.get_returned(pyfunction, args)
if result is not None:
return result
hint_return = get_type_hinting_factory(pyfunction.pycore.project).make_return_provider()
type_ = hint_return(pyfunction)
if type_ is not None:
return rope.base.pyobjects.PyObject(type_) | [
"def",
"infer_returned_object",
"(",
"pyfunction",
",",
"args",
")",
":",
"object_info",
"=",
"pyfunction",
".",
"pycore",
".",
"object_info",
"result",
"=",
"object_info",
".",
"get_exact_returned",
"(",
"pyfunction",
",",
"args",
")",
"if",
"result",
"is",
"not",
"None",
":",
"return",
"result",
"result",
"=",
"_infer_returned",
"(",
"pyfunction",
",",
"args",
")",
"if",
"result",
"is",
"not",
"None",
":",
"if",
"args",
"and",
"pyfunction",
".",
"get_module",
"(",
")",
".",
"get_resource",
"(",
")",
"is",
"not",
"None",
":",
"params",
"=",
"args",
".",
"get_arguments",
"(",
"pyfunction",
".",
"get_param_names",
"(",
"special_args",
"=",
"False",
")",
")",
"object_info",
".",
"function_called",
"(",
"pyfunction",
",",
"params",
",",
"result",
")",
"return",
"result",
"result",
"=",
"object_info",
".",
"get_returned",
"(",
"pyfunction",
",",
"args",
")",
"if",
"result",
"is",
"not",
"None",
":",
"return",
"result",
"hint_return",
"=",
"get_type_hinting_factory",
"(",
"pyfunction",
".",
"pycore",
".",
"project",
")",
".",
"make_return_provider",
"(",
")",
"type_",
"=",
"hint_return",
"(",
"pyfunction",
")",
"if",
"type_",
"is",
"not",
"None",
":",
"return",
"rope",
".",
"base",
".",
"pyobjects",
".",
"PyObject",
"(",
"type_",
")"
] | 45.35 | 0.00216 |
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
from email.parser import Parser
self._pkg_info = Parser().parsestr(self.get_metadata(self.PKG_INFO))
return self._pkg_info | [
"def",
"_parsed_pkg_info",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"_pkg_info",
"except",
"AttributeError",
":",
"from",
"email",
".",
"parser",
"import",
"Parser",
"self",
".",
"_pkg_info",
"=",
"Parser",
"(",
")",
".",
"parsestr",
"(",
"self",
".",
"get_metadata",
"(",
"self",
".",
"PKG_INFO",
")",
")",
"return",
"self",
".",
"_pkg_info"
] | 37 | 0.009901 |
def delete(self, accountId):
"""Delete an account"""
acct = BaseAccount.get(accountId)
if not acct:
raise Exception('No such account found')
acct.delete()
auditlog(event='account.delete', actor=session['user'].username, data={'accountId': accountId})
return self.make_response('Account deleted') | [
"def",
"delete",
"(",
"self",
",",
"accountId",
")",
":",
"acct",
"=",
"BaseAccount",
".",
"get",
"(",
"accountId",
")",
"if",
"not",
"acct",
":",
"raise",
"Exception",
"(",
"'No such account found'",
")",
"acct",
".",
"delete",
"(",
")",
"auditlog",
"(",
"event",
"=",
"'account.delete'",
",",
"actor",
"=",
"session",
"[",
"'user'",
"]",
".",
"username",
",",
"data",
"=",
"{",
"'accountId'",
":",
"accountId",
"}",
")",
"return",
"self",
".",
"make_response",
"(",
"'Account deleted'",
")"
] | 34.8 | 0.008403 |
def recipe(recipe):
"""Apply the given recipe to a node
Sets the run_list to the given recipe
If no nodes/hostname.json file exists, it creates one
"""
env.host_string = lib.get_env_host_string()
lib.print_header(
"Applying recipe '{0}' on node {1}".format(recipe, env.host_string))
# Create configuration and sync node
data = lib.get_node(env.host_string)
data["run_list"] = ["recipe[{0}]".format(recipe)]
if not __testing__:
if env.autodeploy_chef and not chef.chef_test():
deploy_chef(ask="no")
chef.sync_node(data) | [
"def",
"recipe",
"(",
"recipe",
")",
":",
"env",
".",
"host_string",
"=",
"lib",
".",
"get_env_host_string",
"(",
")",
"lib",
".",
"print_header",
"(",
"\"Applying recipe '{0}' on node {1}\"",
".",
"format",
"(",
"recipe",
",",
"env",
".",
"host_string",
")",
")",
"# Create configuration and sync node",
"data",
"=",
"lib",
".",
"get_node",
"(",
"env",
".",
"host_string",
")",
"data",
"[",
"\"run_list\"",
"]",
"=",
"[",
"\"recipe[{0}]\"",
".",
"format",
"(",
"recipe",
")",
"]",
"if",
"not",
"__testing__",
":",
"if",
"env",
".",
"autodeploy_chef",
"and",
"not",
"chef",
".",
"chef_test",
"(",
")",
":",
"deploy_chef",
"(",
"ask",
"=",
"\"no\"",
")",
"chef",
".",
"sync_node",
"(",
"data",
")"
] | 34.117647 | 0.001678 |
def get_stakes(self):
"""List all your stakes.
Returns:
list of dicts: stakes
Each stake is a dict with the following fields:
* confidence (`decimal.Decimal`)
* roundNumber (`int`)
* tournamentId (`int`)
* soc (`decimal.Decimal`)
* insertedAt (`datetime`)
* staker (`str`): NMR adress used for staking
* status (`str`)
* txHash (`str`)
* value (`decimal.Decimal`)
Example:
>>> api = NumerAPI(secret_key="..", public_id="..")
>>> api.get_stakes()
[{'confidence': Decimal('0.053'),
'insertedAt': datetime.datetime(2017, 9, 26, 8, 18, 36, 709000, tzinfo=tzutc()),
'roundNumber': 74,
'soc': Decimal('56.60'),
'staker': '0x0000000000000000000000000000000000003f9e',
'status': 'confirmed',
'tournamentId': 1,
'txHash': '0x1cbb985629552a0f57b98a1e30a5e7f101a992121db318cef02e02aaf0e91c95',
'value': Decimal('3.00')},
..
]
"""
query = """
query {
user {
stakeTxs {
confidence
insertedAt
roundNumber
tournamentId
soc
staker
status
txHash
value
}
}
}
"""
data = self.raw_query(query, authorization=True)['data']
stakes = data['user']['stakeTxs']
# convert strings to python objects
for s in stakes:
utils.replace(s, "insertedAt", utils.parse_datetime_string)
utils.replace(s, "soc", utils.parse_float_string)
utils.replace(s, "confidence", utils.parse_float_string)
utils.replace(s, "value", utils.parse_float_string)
return stakes | [
"def",
"get_stakes",
"(",
"self",
")",
":",
"query",
"=",
"\"\"\"\n query {\n user {\n stakeTxs {\n confidence\n insertedAt\n roundNumber\n tournamentId\n soc\n staker\n status\n txHash\n value\n }\n }\n }\n \"\"\"",
"data",
"=",
"self",
".",
"raw_query",
"(",
"query",
",",
"authorization",
"=",
"True",
")",
"[",
"'data'",
"]",
"stakes",
"=",
"data",
"[",
"'user'",
"]",
"[",
"'stakeTxs'",
"]",
"# convert strings to python objects",
"for",
"s",
"in",
"stakes",
":",
"utils",
".",
"replace",
"(",
"s",
",",
"\"insertedAt\"",
",",
"utils",
".",
"parse_datetime_string",
")",
"utils",
".",
"replace",
"(",
"s",
",",
"\"soc\"",
",",
"utils",
".",
"parse_float_string",
")",
"utils",
".",
"replace",
"(",
"s",
",",
"\"confidence\"",
",",
"utils",
".",
"parse_float_string",
")",
"utils",
".",
"replace",
"(",
"s",
",",
"\"value\"",
",",
"utils",
".",
"parse_float_string",
")",
"return",
"stakes"
] | 32.7 | 0.001979 |
def time(self):
"""!
@brief (list) Returns sampling times when dynamic is measured during simulation.
"""
if ( (self._ccore_sync_dynamic_pointer is not None) and ( (self._time is None) or (len(self._time) == 0) ) ):
self._time = wrapper.sync_dynamic_get_time(self._ccore_sync_dynamic_pointer);
return self._time; | [
"def",
"time",
"(",
"self",
")",
":",
"if",
"(",
"(",
"self",
".",
"_ccore_sync_dynamic_pointer",
"is",
"not",
"None",
")",
"and",
"(",
"(",
"self",
".",
"_time",
"is",
"None",
")",
"or",
"(",
"len",
"(",
"self",
".",
"_time",
")",
"==",
"0",
")",
")",
")",
":",
"self",
".",
"_time",
"=",
"wrapper",
".",
"sync_dynamic_get_time",
"(",
"self",
".",
"_ccore_sync_dynamic_pointer",
")",
"return",
"self",
".",
"_time"
] | 42.444444 | 0.033333 |
def xyplot(points, title="", c="b", corner=1, lines=False):
"""
Return a ``vtkXYPlotActor`` that is a plot of `x` versus `y`,
where `points` is a list of `(x,y)` points.
:param int corner: assign position:
- 1, topleft,
- 2, topright,
- 3, bottomleft,
- 4, bottomright.
.. hint:: Example: |fitspheres1.py|_
"""
c = vc.getColor(c) # allow different codings
array_x = vtk.vtkFloatArray()
array_y = vtk.vtkFloatArray()
array_x.SetNumberOfTuples(len(points))
array_y.SetNumberOfTuples(len(points))
for i, p in enumerate(points):
array_x.InsertValue(i, p[0])
array_y.InsertValue(i, p[1])
field = vtk.vtkFieldData()
field.AddArray(array_x)
field.AddArray(array_y)
data = vtk.vtkDataObject()
data.SetFieldData(field)
plot = vtk.vtkXYPlotActor()
plot.AddDataObjectInput(data)
plot.SetDataObjectXComponent(0, 0)
plot.SetDataObjectYComponent(0, 1)
plot.SetXValuesToValue()
plot.SetXTitle(title)
plot.SetYTitle("")
plot.ExchangeAxesOff()
plot.PlotPointsOn()
if not lines:
plot.PlotLinesOff()
plot.GetProperty().SetPointSize(5)
plot.GetProperty().SetLineWidth(2)
plot.SetNumberOfXLabels(3) # not working
plot.GetProperty().SetColor(0, 0, 0)
plot.GetProperty().SetOpacity(0.7)
plot.SetPlotColor(0, c[0], c[1], c[2])
tprop = plot.GetAxisLabelTextProperty()
tprop.SetColor(0, 0, 0)
tprop.SetOpacity(0.7)
tprop.SetFontFamily(0)
tprop.BoldOff()
tprop.ItalicOff()
tprop.ShadowOff()
tprop.SetFontSize(3) # not working
plot.SetAxisTitleTextProperty(tprop)
plot.SetAxisLabelTextProperty(tprop)
plot.SetTitleTextProperty(tprop)
if corner == 1:
plot.GetPositionCoordinate().SetValue(0.0, 0.8, 0)
if corner == 2:
plot.GetPositionCoordinate().SetValue(0.7, 0.8, 0)
if corner == 3:
plot.GetPositionCoordinate().SetValue(0.0, 0.0, 0)
if corner == 4:
plot.GetPositionCoordinate().SetValue(0.7, 0.0, 0)
plot.GetPosition2Coordinate().SetValue(0.3, 0.2, 0)
return plot | [
"def",
"xyplot",
"(",
"points",
",",
"title",
"=",
"\"\"",
",",
"c",
"=",
"\"b\"",
",",
"corner",
"=",
"1",
",",
"lines",
"=",
"False",
")",
":",
"c",
"=",
"vc",
".",
"getColor",
"(",
"c",
")",
"# allow different codings",
"array_x",
"=",
"vtk",
".",
"vtkFloatArray",
"(",
")",
"array_y",
"=",
"vtk",
".",
"vtkFloatArray",
"(",
")",
"array_x",
".",
"SetNumberOfTuples",
"(",
"len",
"(",
"points",
")",
")",
"array_y",
".",
"SetNumberOfTuples",
"(",
"len",
"(",
"points",
")",
")",
"for",
"i",
",",
"p",
"in",
"enumerate",
"(",
"points",
")",
":",
"array_x",
".",
"InsertValue",
"(",
"i",
",",
"p",
"[",
"0",
"]",
")",
"array_y",
".",
"InsertValue",
"(",
"i",
",",
"p",
"[",
"1",
"]",
")",
"field",
"=",
"vtk",
".",
"vtkFieldData",
"(",
")",
"field",
".",
"AddArray",
"(",
"array_x",
")",
"field",
".",
"AddArray",
"(",
"array_y",
")",
"data",
"=",
"vtk",
".",
"vtkDataObject",
"(",
")",
"data",
".",
"SetFieldData",
"(",
"field",
")",
"plot",
"=",
"vtk",
".",
"vtkXYPlotActor",
"(",
")",
"plot",
".",
"AddDataObjectInput",
"(",
"data",
")",
"plot",
".",
"SetDataObjectXComponent",
"(",
"0",
",",
"0",
")",
"plot",
".",
"SetDataObjectYComponent",
"(",
"0",
",",
"1",
")",
"plot",
".",
"SetXValuesToValue",
"(",
")",
"plot",
".",
"SetXTitle",
"(",
"title",
")",
"plot",
".",
"SetYTitle",
"(",
"\"\"",
")",
"plot",
".",
"ExchangeAxesOff",
"(",
")",
"plot",
".",
"PlotPointsOn",
"(",
")",
"if",
"not",
"lines",
":",
"plot",
".",
"PlotLinesOff",
"(",
")",
"plot",
".",
"GetProperty",
"(",
")",
".",
"SetPointSize",
"(",
"5",
")",
"plot",
".",
"GetProperty",
"(",
")",
".",
"SetLineWidth",
"(",
"2",
")",
"plot",
".",
"SetNumberOfXLabels",
"(",
"3",
")",
"# not working",
"plot",
".",
"GetProperty",
"(",
")",
".",
"SetColor",
"(",
"0",
",",
"0",
",",
"0",
")",
"plot",
".",
"GetProperty",
"(",
")",
".",
"SetOpacity",
"(",
"0.7",
")",
"plot",
".",
"SetPlotColor",
"(",
"0",
",",
"c",
"[",
"0",
"]",
",",
"c",
"[",
"1",
"]",
",",
"c",
"[",
"2",
"]",
")",
"tprop",
"=",
"plot",
".",
"GetAxisLabelTextProperty",
"(",
")",
"tprop",
".",
"SetColor",
"(",
"0",
",",
"0",
",",
"0",
")",
"tprop",
".",
"SetOpacity",
"(",
"0.7",
")",
"tprop",
".",
"SetFontFamily",
"(",
"0",
")",
"tprop",
".",
"BoldOff",
"(",
")",
"tprop",
".",
"ItalicOff",
"(",
")",
"tprop",
".",
"ShadowOff",
"(",
")",
"tprop",
".",
"SetFontSize",
"(",
"3",
")",
"# not working",
"plot",
".",
"SetAxisTitleTextProperty",
"(",
"tprop",
")",
"plot",
".",
"SetAxisLabelTextProperty",
"(",
"tprop",
")",
"plot",
".",
"SetTitleTextProperty",
"(",
"tprop",
")",
"if",
"corner",
"==",
"1",
":",
"plot",
".",
"GetPositionCoordinate",
"(",
")",
".",
"SetValue",
"(",
"0.0",
",",
"0.8",
",",
"0",
")",
"if",
"corner",
"==",
"2",
":",
"plot",
".",
"GetPositionCoordinate",
"(",
")",
".",
"SetValue",
"(",
"0.7",
",",
"0.8",
",",
"0",
")",
"if",
"corner",
"==",
"3",
":",
"plot",
".",
"GetPositionCoordinate",
"(",
")",
".",
"SetValue",
"(",
"0.0",
",",
"0.0",
",",
"0",
")",
"if",
"corner",
"==",
"4",
":",
"plot",
".",
"GetPositionCoordinate",
"(",
")",
".",
"SetValue",
"(",
"0.7",
",",
"0.0",
",",
"0",
")",
"plot",
".",
"GetPosition2Coordinate",
"(",
")",
".",
"SetValue",
"(",
"0.3",
",",
"0.2",
",",
"0",
")",
"return",
"plot"
] | 30.397059 | 0.000469 |
def all_nonperiodic_features(times, mags, errs,
magsarefluxes=False,
stetson_weightbytimediff=True):
'''This rolls up the feature functions above and returns a single dict.
NOTE: this doesn't calculate the CDPP to save time since binning and
smoothing takes a while for dense light curves.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to calculate CDPP for.
magsarefluxes : bool
If True, indicates `mags` is actually an array of flux values.
stetson_weightbytimediff : bool
If this is True, the Stetson index for any pair of mags will be
reweighted by the difference in times between them using the scheme in
Fruth+ 2012 and Zhange+ 2003 (as seen in Sokolovsky+ 2017)::
w_i = exp(- (t_i+1 - t_i)/ delta_t )
Returns
-------
dict
Returns a dict with all of the variability features.
'''
# remove nans first
finiteind = npisfinite(times) & npisfinite(mags) & npisfinite(errs)
ftimes, fmags, ferrs = times[finiteind], mags[finiteind], errs[finiteind]
# remove zero errors
nzind = npnonzero(ferrs)
ftimes, fmags, ferrs = ftimes[nzind], fmags[nzind], ferrs[nzind]
xfeatures = nonperiodic_lightcurve_features(times, mags, errs,
magsarefluxes=magsarefluxes)
stetj = stetson_jindex(ftimes, fmags, ferrs,
weightbytimediff=stetson_weightbytimediff)
stetk = stetson_kindex(fmags, ferrs)
xfeatures.update({'stetsonj':stetj,
'stetsonk':stetk})
return xfeatures | [
"def",
"all_nonperiodic_features",
"(",
"times",
",",
"mags",
",",
"errs",
",",
"magsarefluxes",
"=",
"False",
",",
"stetson_weightbytimediff",
"=",
"True",
")",
":",
"# remove nans first",
"finiteind",
"=",
"npisfinite",
"(",
"times",
")",
"&",
"npisfinite",
"(",
"mags",
")",
"&",
"npisfinite",
"(",
"errs",
")",
"ftimes",
",",
"fmags",
",",
"ferrs",
"=",
"times",
"[",
"finiteind",
"]",
",",
"mags",
"[",
"finiteind",
"]",
",",
"errs",
"[",
"finiteind",
"]",
"# remove zero errors",
"nzind",
"=",
"npnonzero",
"(",
"ferrs",
")",
"ftimes",
",",
"fmags",
",",
"ferrs",
"=",
"ftimes",
"[",
"nzind",
"]",
",",
"fmags",
"[",
"nzind",
"]",
",",
"ferrs",
"[",
"nzind",
"]",
"xfeatures",
"=",
"nonperiodic_lightcurve_features",
"(",
"times",
",",
"mags",
",",
"errs",
",",
"magsarefluxes",
"=",
"magsarefluxes",
")",
"stetj",
"=",
"stetson_jindex",
"(",
"ftimes",
",",
"fmags",
",",
"ferrs",
",",
"weightbytimediff",
"=",
"stetson_weightbytimediff",
")",
"stetk",
"=",
"stetson_kindex",
"(",
"fmags",
",",
"ferrs",
")",
"xfeatures",
".",
"update",
"(",
"{",
"'stetsonj'",
":",
"stetj",
",",
"'stetsonk'",
":",
"stetk",
"}",
")",
"return",
"xfeatures"
] | 33.06 | 0.001763 |
def dumps(obj, protocol=None):
"""Serialize obj as a string of bytes allocated in memory
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed
between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
file = StringIO()
try:
cp = CloudPickler(file, protocol=protocol)
cp.dump(obj)
return file.getvalue()
finally:
file.close() | [
"def",
"dumps",
"(",
"obj",
",",
"protocol",
"=",
"None",
")",
":",
"file",
"=",
"StringIO",
"(",
")",
"try",
":",
"cp",
"=",
"CloudPickler",
"(",
"file",
",",
"protocol",
"=",
"protocol",
")",
"cp",
".",
"dump",
"(",
"obj",
")",
"return",
"file",
".",
"getvalue",
"(",
")",
"finally",
":",
"file",
".",
"close",
"(",
")"
] | 34.176471 | 0.001675 |
def __process_node(self, node: yaml.Node,
expected_type: Type) -> yaml.Node:
"""Processes a node.
This is the main function that implements yatiml's \
functionality. It figures out how to interpret this node \
(recognition), then applies syntactic sugar, and finally \
recurses to the subnodes, if any.
Args:
node: The node to process.
expected_type: The type we expect this node to be.
Returns:
The transformed node, or a transformed copy.
"""
logger.info('Processing node {} expecting type {}'.format(
node, expected_type))
# figure out how to interpret this node
recognized_types, message = self.__recognizer.recognize(
node, expected_type)
if len(recognized_types) != 1:
raise RecognitionError(message)
recognized_type = recognized_types[0]
# remove syntactic sugar
logger.debug('Savorizing node {}'.format(node))
if recognized_type in self._registered_classes.values():
node = self.__savorize(node, recognized_type)
logger.debug('Savorized, now {}'.format(node))
# process subnodes
logger.debug('Recursing into subnodes')
if is_generic_list(recognized_type):
if node.tag != 'tag:yaml.org,2002:seq':
raise RecognitionError('{}{}Expected a {} here'.format(
node.start_mark, os.linesep,
type_to_desc(expected_type)))
for item in node.value:
self.__process_node(item,
generic_type_args(recognized_type)[0])
elif is_generic_dict(recognized_type):
if node.tag != 'tag:yaml.org,2002:map':
raise RecognitionError('{}{}Expected a {} here'.format(
node.start_mark, os.linesep,
type_to_desc(expected_type)))
for _, value_node in node.value:
self.__process_node(value_node,
generic_type_args(recognized_type)[1])
elif recognized_type in self._registered_classes.values():
if (not issubclass(recognized_type, enum.Enum)
and not issubclass(recognized_type, str)
and not issubclass(recognized_type, UserString)):
for attr_name, type_, _ in class_subobjects(recognized_type):
cnode = Node(node)
if cnode.has_attribute(attr_name):
subnode = cnode.get_attribute(attr_name)
new_subnode = self.__process_node(
subnode.yaml_node, type_)
cnode.set_attribute(attr_name, new_subnode)
else:
logger.debug('Not a generic class or a user-defined class, not'
' recursing')
node.tag = self.__type_to_tag(recognized_type)
logger.debug('Finished processing node {}'.format(node))
return node | [
"def",
"__process_node",
"(",
"self",
",",
"node",
":",
"yaml",
".",
"Node",
",",
"expected_type",
":",
"Type",
")",
"->",
"yaml",
".",
"Node",
":",
"logger",
".",
"info",
"(",
"'Processing node {} expecting type {}'",
".",
"format",
"(",
"node",
",",
"expected_type",
")",
")",
"# figure out how to interpret this node",
"recognized_types",
",",
"message",
"=",
"self",
".",
"__recognizer",
".",
"recognize",
"(",
"node",
",",
"expected_type",
")",
"if",
"len",
"(",
"recognized_types",
")",
"!=",
"1",
":",
"raise",
"RecognitionError",
"(",
"message",
")",
"recognized_type",
"=",
"recognized_types",
"[",
"0",
"]",
"# remove syntactic sugar",
"logger",
".",
"debug",
"(",
"'Savorizing node {}'",
".",
"format",
"(",
"node",
")",
")",
"if",
"recognized_type",
"in",
"self",
".",
"_registered_classes",
".",
"values",
"(",
")",
":",
"node",
"=",
"self",
".",
"__savorize",
"(",
"node",
",",
"recognized_type",
")",
"logger",
".",
"debug",
"(",
"'Savorized, now {}'",
".",
"format",
"(",
"node",
")",
")",
"# process subnodes",
"logger",
".",
"debug",
"(",
"'Recursing into subnodes'",
")",
"if",
"is_generic_list",
"(",
"recognized_type",
")",
":",
"if",
"node",
".",
"tag",
"!=",
"'tag:yaml.org,2002:seq'",
":",
"raise",
"RecognitionError",
"(",
"'{}{}Expected a {} here'",
".",
"format",
"(",
"node",
".",
"start_mark",
",",
"os",
".",
"linesep",
",",
"type_to_desc",
"(",
"expected_type",
")",
")",
")",
"for",
"item",
"in",
"node",
".",
"value",
":",
"self",
".",
"__process_node",
"(",
"item",
",",
"generic_type_args",
"(",
"recognized_type",
")",
"[",
"0",
"]",
")",
"elif",
"is_generic_dict",
"(",
"recognized_type",
")",
":",
"if",
"node",
".",
"tag",
"!=",
"'tag:yaml.org,2002:map'",
":",
"raise",
"RecognitionError",
"(",
"'{}{}Expected a {} here'",
".",
"format",
"(",
"node",
".",
"start_mark",
",",
"os",
".",
"linesep",
",",
"type_to_desc",
"(",
"expected_type",
")",
")",
")",
"for",
"_",
",",
"value_node",
"in",
"node",
".",
"value",
":",
"self",
".",
"__process_node",
"(",
"value_node",
",",
"generic_type_args",
"(",
"recognized_type",
")",
"[",
"1",
"]",
")",
"elif",
"recognized_type",
"in",
"self",
".",
"_registered_classes",
".",
"values",
"(",
")",
":",
"if",
"(",
"not",
"issubclass",
"(",
"recognized_type",
",",
"enum",
".",
"Enum",
")",
"and",
"not",
"issubclass",
"(",
"recognized_type",
",",
"str",
")",
"and",
"not",
"issubclass",
"(",
"recognized_type",
",",
"UserString",
")",
")",
":",
"for",
"attr_name",
",",
"type_",
",",
"_",
"in",
"class_subobjects",
"(",
"recognized_type",
")",
":",
"cnode",
"=",
"Node",
"(",
"node",
")",
"if",
"cnode",
".",
"has_attribute",
"(",
"attr_name",
")",
":",
"subnode",
"=",
"cnode",
".",
"get_attribute",
"(",
"attr_name",
")",
"new_subnode",
"=",
"self",
".",
"__process_node",
"(",
"subnode",
".",
"yaml_node",
",",
"type_",
")",
"cnode",
".",
"set_attribute",
"(",
"attr_name",
",",
"new_subnode",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"'Not a generic class or a user-defined class, not'",
"' recursing'",
")",
"node",
".",
"tag",
"=",
"self",
".",
"__type_to_tag",
"(",
"recognized_type",
")",
"logger",
".",
"debug",
"(",
"'Finished processing node {}'",
".",
"format",
"(",
"node",
")",
")",
"return",
"node"
] | 42.647887 | 0.000968 |
def kallisto_table(kallisto_dir, index):
"""
convert kallisto output to a count table where the rows are
equivalence classes and the columns are cells
"""
quant_dir = os.path.join(kallisto_dir, "quant")
out_file = os.path.join(quant_dir, "matrix.csv")
if file_exists(out_file):
return out_file
tsvfile = os.path.join(quant_dir, "matrix.tsv")
ecfile = os.path.join(quant_dir, "matrix.ec")
cellsfile = os.path.join(quant_dir, "matrix.cells")
fastafile = os.path.splitext(index)[0] + ".fa"
fasta_names = fasta.sequence_names(fastafile)
ec_names = get_ec_names(ecfile, fasta_names)
df = pd.read_table(tsvfile, header=None, names=["ec", "cell", "count"])
df["ec"] = [ec_names[x] for x in df["ec"]]
df = df.pivot(index='ec', columns='cell', values='count')
cellnames = get_cell_names(cellsfile)
colnames = [cellnames[x] for x in df.columns]
df.columns = colnames
df.to_csv(out_file)
return out_file | [
"def",
"kallisto_table",
"(",
"kallisto_dir",
",",
"index",
")",
":",
"quant_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"kallisto_dir",
",",
"\"quant\"",
")",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"quant_dir",
",",
"\"matrix.csv\"",
")",
"if",
"file_exists",
"(",
"out_file",
")",
":",
"return",
"out_file",
"tsvfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"quant_dir",
",",
"\"matrix.tsv\"",
")",
"ecfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"quant_dir",
",",
"\"matrix.ec\"",
")",
"cellsfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"quant_dir",
",",
"\"matrix.cells\"",
")",
"fastafile",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"index",
")",
"[",
"0",
"]",
"+",
"\".fa\"",
"fasta_names",
"=",
"fasta",
".",
"sequence_names",
"(",
"fastafile",
")",
"ec_names",
"=",
"get_ec_names",
"(",
"ecfile",
",",
"fasta_names",
")",
"df",
"=",
"pd",
".",
"read_table",
"(",
"tsvfile",
",",
"header",
"=",
"None",
",",
"names",
"=",
"[",
"\"ec\"",
",",
"\"cell\"",
",",
"\"count\"",
"]",
")",
"df",
"[",
"\"ec\"",
"]",
"=",
"[",
"ec_names",
"[",
"x",
"]",
"for",
"x",
"in",
"df",
"[",
"\"ec\"",
"]",
"]",
"df",
"=",
"df",
".",
"pivot",
"(",
"index",
"=",
"'ec'",
",",
"columns",
"=",
"'cell'",
",",
"values",
"=",
"'count'",
")",
"cellnames",
"=",
"get_cell_names",
"(",
"cellsfile",
")",
"colnames",
"=",
"[",
"cellnames",
"[",
"x",
"]",
"for",
"x",
"in",
"df",
".",
"columns",
"]",
"df",
".",
"columns",
"=",
"colnames",
"df",
".",
"to_csv",
"(",
"out_file",
")",
"return",
"out_file"
] | 41.826087 | 0.001016 |
def _parse_00(ofile):
"""
return 00 outfile as a pandas DataFrame
"""
with open(ofile) as infile:
## read in the results summary from the end of the outfile
arr = np.array(
[" "] + infile.read().split("Summary of MCMC results\n\n\n")[1:][0]\
.strip().split())
## reshape array
rows = 12
cols = (arr.shape[0] + 1) / rows
arr = arr.reshape(rows, cols)
## make into labeled data frame
df = pd.DataFrame(
data=arr[1:, 1:],
columns=arr[0, 1:],
index=arr[1:, 0],
).T
return df | [
"def",
"_parse_00",
"(",
"ofile",
")",
":",
"with",
"open",
"(",
"ofile",
")",
"as",
"infile",
":",
"## read in the results summary from the end of the outfile",
"arr",
"=",
"np",
".",
"array",
"(",
"[",
"\" \"",
"]",
"+",
"infile",
".",
"read",
"(",
")",
".",
"split",
"(",
"\"Summary of MCMC results\\n\\n\\n\"",
")",
"[",
"1",
":",
"]",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
")",
"## reshape array ",
"rows",
"=",
"12",
"cols",
"=",
"(",
"arr",
".",
"shape",
"[",
"0",
"]",
"+",
"1",
")",
"/",
"rows",
"arr",
"=",
"arr",
".",
"reshape",
"(",
"rows",
",",
"cols",
")",
"## make into labeled data frame",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
"=",
"arr",
"[",
"1",
":",
",",
"1",
":",
"]",
",",
"columns",
"=",
"arr",
"[",
"0",
",",
"1",
":",
"]",
",",
"index",
"=",
"arr",
"[",
"1",
":",
",",
"0",
"]",
",",
")",
".",
"T",
"return",
"df"
] | 28.272727 | 0.015552 |
def query_cached_package_list(self):
"""Return list of pickled package names from PYPI"""
if self.debug:
self.logger.debug("DEBUG: reading pickled cache file")
return cPickle.load(open(self.pkg_cache_file, "r")) | [
"def",
"query_cached_package_list",
"(",
"self",
")",
":",
"if",
"self",
".",
"debug",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"DEBUG: reading pickled cache file\"",
")",
"return",
"cPickle",
".",
"load",
"(",
"open",
"(",
"self",
".",
"pkg_cache_file",
",",
"\"r\"",
")",
")"
] | 48.6 | 0.008097 |
def network_del_notif(self, tenant_id, tenant_name, net_id):
"""Network delete notification. """
if not self.fw_init:
return
self.network_delete_notif(tenant_id, tenant_name, net_id) | [
"def",
"network_del_notif",
"(",
"self",
",",
"tenant_id",
",",
"tenant_name",
",",
"net_id",
")",
":",
"if",
"not",
"self",
".",
"fw_init",
":",
"return",
"self",
".",
"network_delete_notif",
"(",
"tenant_id",
",",
"tenant_name",
",",
"net_id",
")"
] | 42.8 | 0.009174 |
def _printTraceback(self, test, err):
"""Print a nicely formatted traceback.
:arg err: exc_info()-style traceback triple
:arg test: the test that precipitated this call
"""
# Don't bind third item to a local var; that can create
# circular refs which are expensive to collect. See the
# sys.exc_info() docs.
exception_type, exception_value = err[:2]
# TODO: In Python 3, the traceback is attached to the exception
# instance through the __traceback__ attribute. If the instance
# is saved in a local variable that persists outside the except
# block, the traceback will create a reference cycle with the
# current frame and its dictionary of local variables. This will
# delay reclaiming dead resources until the next cyclic garbage
# collection pass.
extracted_tb = extract_relevant_tb(
err[2],
exception_type,
exception_type is test.failureException)
test_frame_index = index_of_test_frame(
extracted_tb,
exception_type,
exception_value,
test)
if test_frame_index:
# We have a good guess at which frame is the test, so
# trim everything until that. We don't care to see test
# framework frames.
extracted_tb = extracted_tb[test_frame_index:]
with self.bar.dodging():
self.stream.write(''.join(
format_traceback(
extracted_tb,
exception_type,
exception_value,
self._cwd,
self._term,
self._options.function_color,
self._options.dim_color,
self._options.editor,
self._options.editor_shortcut_template))) | [
"def",
"_printTraceback",
"(",
"self",
",",
"test",
",",
"err",
")",
":",
"# Don't bind third item to a local var; that can create",
"# circular refs which are expensive to collect. See the",
"# sys.exc_info() docs.",
"exception_type",
",",
"exception_value",
"=",
"err",
"[",
":",
"2",
"]",
"# TODO: In Python 3, the traceback is attached to the exception",
"# instance through the __traceback__ attribute. If the instance",
"# is saved in a local variable that persists outside the except",
"# block, the traceback will create a reference cycle with the",
"# current frame and its dictionary of local variables. This will",
"# delay reclaiming dead resources until the next cyclic garbage",
"# collection pass.",
"extracted_tb",
"=",
"extract_relevant_tb",
"(",
"err",
"[",
"2",
"]",
",",
"exception_type",
",",
"exception_type",
"is",
"test",
".",
"failureException",
")",
"test_frame_index",
"=",
"index_of_test_frame",
"(",
"extracted_tb",
",",
"exception_type",
",",
"exception_value",
",",
"test",
")",
"if",
"test_frame_index",
":",
"# We have a good guess at which frame is the test, so",
"# trim everything until that. We don't care to see test",
"# framework frames.",
"extracted_tb",
"=",
"extracted_tb",
"[",
"test_frame_index",
":",
"]",
"with",
"self",
".",
"bar",
".",
"dodging",
"(",
")",
":",
"self",
".",
"stream",
".",
"write",
"(",
"''",
".",
"join",
"(",
"format_traceback",
"(",
"extracted_tb",
",",
"exception_type",
",",
"exception_value",
",",
"self",
".",
"_cwd",
",",
"self",
".",
"_term",
",",
"self",
".",
"_options",
".",
"function_color",
",",
"self",
".",
"_options",
".",
"dim_color",
",",
"self",
".",
"_options",
".",
"editor",
",",
"self",
".",
"_options",
".",
"editor_shortcut_template",
")",
")",
")"
] | 40.282609 | 0.001054 |
def _advapi32_encrypt(cipher, key, data, iv, padding):
"""
Encrypts plaintext via CryptoAPI
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
context_handle = None
key_handle = None
try:
context_handle, key_handle = _advapi32_create_handles(cipher, key, iv)
out_len = new(advapi32, 'DWORD *', len(data))
res = advapi32.CryptEncrypt(
key_handle,
null(),
True,
0,
null(),
out_len,
0
)
handle_error(res)
buffer_len = deref(out_len)
buffer = buffer_from_bytes(buffer_len)
write_to_buffer(buffer, data)
pointer_set(out_len, len(data))
res = advapi32.CryptEncrypt(
key_handle,
null(),
True,
0,
buffer,
out_len,
buffer_len
)
handle_error(res)
output = bytes_from_buffer(buffer, deref(out_len))
# Remove padding when not required. CryptoAPI doesn't support this, so
# we just manually remove it.
if cipher == 'aes' and not padding:
if output[-16:] != (b'\x10' * 16):
raise ValueError('Invalid padding generated by OS crypto library')
output = output[:-16]
return output
finally:
if key_handle:
advapi32.CryptDestroyKey(key_handle)
if context_handle:
close_context_handle(context_handle) | [
"def",
"_advapi32_encrypt",
"(",
"cipher",
",",
"key",
",",
"data",
",",
"iv",
",",
"padding",
")",
":",
"context_handle",
"=",
"None",
"key_handle",
"=",
"None",
"try",
":",
"context_handle",
",",
"key_handle",
"=",
"_advapi32_create_handles",
"(",
"cipher",
",",
"key",
",",
"iv",
")",
"out_len",
"=",
"new",
"(",
"advapi32",
",",
"'DWORD *'",
",",
"len",
"(",
"data",
")",
")",
"res",
"=",
"advapi32",
".",
"CryptEncrypt",
"(",
"key_handle",
",",
"null",
"(",
")",
",",
"True",
",",
"0",
",",
"null",
"(",
")",
",",
"out_len",
",",
"0",
")",
"handle_error",
"(",
"res",
")",
"buffer_len",
"=",
"deref",
"(",
"out_len",
")",
"buffer",
"=",
"buffer_from_bytes",
"(",
"buffer_len",
")",
"write_to_buffer",
"(",
"buffer",
",",
"data",
")",
"pointer_set",
"(",
"out_len",
",",
"len",
"(",
"data",
")",
")",
"res",
"=",
"advapi32",
".",
"CryptEncrypt",
"(",
"key_handle",
",",
"null",
"(",
")",
",",
"True",
",",
"0",
",",
"buffer",
",",
"out_len",
",",
"buffer_len",
")",
"handle_error",
"(",
"res",
")",
"output",
"=",
"bytes_from_buffer",
"(",
"buffer",
",",
"deref",
"(",
"out_len",
")",
")",
"# Remove padding when not required. CryptoAPI doesn't support this, so",
"# we just manually remove it.",
"if",
"cipher",
"==",
"'aes'",
"and",
"not",
"padding",
":",
"if",
"output",
"[",
"-",
"16",
":",
"]",
"!=",
"(",
"b'\\x10'",
"*",
"16",
")",
":",
"raise",
"ValueError",
"(",
"'Invalid padding generated by OS crypto library'",
")",
"output",
"=",
"output",
"[",
":",
"-",
"16",
"]",
"return",
"output",
"finally",
":",
"if",
"key_handle",
":",
"advapi32",
".",
"CryptDestroyKey",
"(",
"key_handle",
")",
"if",
"context_handle",
":",
"close_context_handle",
"(",
"context_handle",
")"
] | 26.189873 | 0.000932 |
def add(self, bounds1, label1, bounds2, label2, bin3, label3,
data_label):
"""
Combines signals from multiple instruments within
given bounds.
Parameters
----------
bounds1 : (min, max)
Bounds for selecting data on the axis of label1
Data points with label1 in [min, max) will be considered.
label1 : string
Data label for bounds1 to act on.
bounds2 : (min, max)
Bounds for selecting data on the axis of label2
Data points with label1 in [min, max) will be considered.
label2 : string
Data label for bounds2 to act on.
bin3 : (min, max, #bins)
Min and max bounds and number of bins for third axis.
label3 : string
Data label for third axis.
data_label : array of strings
Data label(s) for data product(s) to be averaged.
Returns
-------
median : dictionary
Dictionary indexed by data label, each value of which is a
dictionary with keys 'median', 'count', 'avg_abs_dev', and
'bin' (the values of the bin edges.)
"""
# TODO Update for 2.7 compatability.
if isinstance(data_label, str):
data_label = [data_label, ]
elif not isinstance(data_label, collections.Sequence):
raise ValueError("Please pass data_label as a string or "
"collection of strings.")
# Modeled after pysat.ssnl.median2D
# Make bin boundaries.
# y: values at label3
# z: *data_labels
biny = np.linspace(bin3[0], bin3[1], bin3[2]+1)
numy = len(biny)-1
numz = len(data_label)
# Ranges
yarr, zarr = map(np.arange, (numy, numz))
# Store data here.
ans = [[[collections.deque()] for j in yarr] for k in zarr]
# Filter data by bounds and bin it.
# Idiom for loading all of the data in an instrument's bounds.
for inst in self:
for inst in inst:
if len(inst.data) != 0:
# Select indicies for each piece of data we're interest in.
# Not all of this data is in bounds on label3 but we'll
# sort this later.
min1, max1 = bounds1
min2, max2 = bounds2
data1 = inst.data[label1]
data2 = inst.data[label2]
in_bounds, = np.where((min1 <= data1) & (data1 < max1) &
(min2 <= data2) & (data2 < max2))
# Grab the data in bounds on data1, data2.
data_considered = inst.data.iloc[in_bounds]
y_indexes = np.digitize(data_considered[label3], biny) - 1
# Iterate over the bins along y
for yj in yarr:
# Indicies of data in this bin
yindex, = np.where(y_indexes == yj)
# If there's data in this bin
if len(yindex) > 0:
# For each data label, add the points.
for zk in zarr:
ans[zk][yj][0].extend(
data_considered.ix[yindex, data_label[zk]].tolist())
# Now for the averaging.
# Let's, try .. packing the answers for the 2d function.
numx = 1
xarr = np.arange(numx)
binx = None
# TODO modify output
out_2d = _calc_2d_median(ans, data_label, binx, biny, xarr, yarr, zarr, numx, numy, numz)
# Transform output
output = {}
for i, label in enumerate(data_label):
median = [r[0] for r in out_2d[label]['median']]
count = [r[0] for r in out_2d[label]['count']]
dev = [r[0] for r in out_2d[label]['avg_abs_dev']]
output[label] = {'median': median,
'count': count,
'avg_abs_dev': dev,
'bin': out_2d[label]['bin_y']}
return output | [
"def",
"add",
"(",
"self",
",",
"bounds1",
",",
"label1",
",",
"bounds2",
",",
"label2",
",",
"bin3",
",",
"label3",
",",
"data_label",
")",
":",
"# TODO Update for 2.7 compatability.",
"if",
"isinstance",
"(",
"data_label",
",",
"str",
")",
":",
"data_label",
"=",
"[",
"data_label",
",",
"]",
"elif",
"not",
"isinstance",
"(",
"data_label",
",",
"collections",
".",
"Sequence",
")",
":",
"raise",
"ValueError",
"(",
"\"Please pass data_label as a string or \"",
"\"collection of strings.\"",
")",
"# Modeled after pysat.ssnl.median2D",
"# Make bin boundaries.",
"# y: values at label3",
"# z: *data_labels",
"biny",
"=",
"np",
".",
"linspace",
"(",
"bin3",
"[",
"0",
"]",
",",
"bin3",
"[",
"1",
"]",
",",
"bin3",
"[",
"2",
"]",
"+",
"1",
")",
"numy",
"=",
"len",
"(",
"biny",
")",
"-",
"1",
"numz",
"=",
"len",
"(",
"data_label",
")",
"# Ranges",
"yarr",
",",
"zarr",
"=",
"map",
"(",
"np",
".",
"arange",
",",
"(",
"numy",
",",
"numz",
")",
")",
"# Store data here.",
"ans",
"=",
"[",
"[",
"[",
"collections",
".",
"deque",
"(",
")",
"]",
"for",
"j",
"in",
"yarr",
"]",
"for",
"k",
"in",
"zarr",
"]",
"# Filter data by bounds and bin it.",
"# Idiom for loading all of the data in an instrument's bounds.",
"for",
"inst",
"in",
"self",
":",
"for",
"inst",
"in",
"inst",
":",
"if",
"len",
"(",
"inst",
".",
"data",
")",
"!=",
"0",
":",
"# Select indicies for each piece of data we're interest in.",
"# Not all of this data is in bounds on label3 but we'll",
"# sort this later.",
"min1",
",",
"max1",
"=",
"bounds1",
"min2",
",",
"max2",
"=",
"bounds2",
"data1",
"=",
"inst",
".",
"data",
"[",
"label1",
"]",
"data2",
"=",
"inst",
".",
"data",
"[",
"label2",
"]",
"in_bounds",
",",
"=",
"np",
".",
"where",
"(",
"(",
"min1",
"<=",
"data1",
")",
"&",
"(",
"data1",
"<",
"max1",
")",
"&",
"(",
"min2",
"<=",
"data2",
")",
"&",
"(",
"data2",
"<",
"max2",
")",
")",
"# Grab the data in bounds on data1, data2.",
"data_considered",
"=",
"inst",
".",
"data",
".",
"iloc",
"[",
"in_bounds",
"]",
"y_indexes",
"=",
"np",
".",
"digitize",
"(",
"data_considered",
"[",
"label3",
"]",
",",
"biny",
")",
"-",
"1",
"# Iterate over the bins along y",
"for",
"yj",
"in",
"yarr",
":",
"# Indicies of data in this bin",
"yindex",
",",
"=",
"np",
".",
"where",
"(",
"y_indexes",
"==",
"yj",
")",
"# If there's data in this bin",
"if",
"len",
"(",
"yindex",
")",
">",
"0",
":",
"# For each data label, add the points.",
"for",
"zk",
"in",
"zarr",
":",
"ans",
"[",
"zk",
"]",
"[",
"yj",
"]",
"[",
"0",
"]",
".",
"extend",
"(",
"data_considered",
".",
"ix",
"[",
"yindex",
",",
"data_label",
"[",
"zk",
"]",
"]",
".",
"tolist",
"(",
")",
")",
"# Now for the averaging.",
"# Let's, try .. packing the answers for the 2d function.",
"numx",
"=",
"1",
"xarr",
"=",
"np",
".",
"arange",
"(",
"numx",
")",
"binx",
"=",
"None",
"# TODO modify output",
"out_2d",
"=",
"_calc_2d_median",
"(",
"ans",
",",
"data_label",
",",
"binx",
",",
"biny",
",",
"xarr",
",",
"yarr",
",",
"zarr",
",",
"numx",
",",
"numy",
",",
"numz",
")",
"# Transform output",
"output",
"=",
"{",
"}",
"for",
"i",
",",
"label",
"in",
"enumerate",
"(",
"data_label",
")",
":",
"median",
"=",
"[",
"r",
"[",
"0",
"]",
"for",
"r",
"in",
"out_2d",
"[",
"label",
"]",
"[",
"'median'",
"]",
"]",
"count",
"=",
"[",
"r",
"[",
"0",
"]",
"for",
"r",
"in",
"out_2d",
"[",
"label",
"]",
"[",
"'count'",
"]",
"]",
"dev",
"=",
"[",
"r",
"[",
"0",
"]",
"for",
"r",
"in",
"out_2d",
"[",
"label",
"]",
"[",
"'avg_abs_dev'",
"]",
"]",
"output",
"[",
"label",
"]",
"=",
"{",
"'median'",
":",
"median",
",",
"'count'",
":",
"count",
",",
"'avg_abs_dev'",
":",
"dev",
",",
"'bin'",
":",
"out_2d",
"[",
"label",
"]",
"[",
"'bin_y'",
"]",
"}",
"return",
"output"
] | 38.25 | 0.00236 |
def _analyze(self):
'''Run-once function to generate analysis over all series, considering both full and partial data.
Initializes the self.analysis dict which maps:
(non-reference) column/series -> 'full' and/or 'partial' -> stats dict returned by get_xy_dataset_statistics
'''
if not self.analysis:
for dseries in self.data_series:
# Count number of non-NaN rows
dseries_count = self.df[dseries].count()
assert(len(self.df_pruned) <= dseries_count <= len(self.df) or dseries_count)
self.analysis[dseries] = dict(
partial = None,
full = None,
)
# Compute the statistics for the common records
stats = get_xy_dataset_statistics_pandas(self.df_pruned, self.reference_series, dseries,
fcorrect_x_cutoff = 1.0, fcorrect_y_cutoff = 1.0,
bootstrap_data = False,
x_fuzzy_range = 0.1,
y_scalar = 1.0, ignore_null_values = True)
if (len(self.df_pruned) == len(self.df)):
# There are no pruned records so these are actually the full stats
self.analysis[dseries]['full'] = dict(data = stats, description = format_stats(stats, floating_point_format = '%0.3f', sci_notation_format = '%.2E', return_string = True))
else:
# Store the results for the partial dataset
self.analysis[dseries]['partial'] = dict(data = stats, description = format_stats(stats, floating_point_format = '%0.3f', sci_notation_format = '%.2E', return_string = True))
if dseries_count > len(self.df_pruned):
# This dataset has records which are not in the pruned dataset
stats = get_xy_dataset_statistics_pandas(self.df, self.reference_series, dseries,
fcorrect_x_cutoff = 1.0, fcorrect_y_cutoff = 1.0,
bootstrap_data = False,
x_fuzzy_range = 0.1,
y_scalar = 1.0, ignore_null_values = True)
self.analysis[dseries]['full'] = dict(data = stats, description = format_stats(stats, floating_point_format = '%0.3f', sci_notation_format = '%.2E', return_string = True))
return self.analysis | [
"def",
"_analyze",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"analysis",
":",
"for",
"dseries",
"in",
"self",
".",
"data_series",
":",
"# Count number of non-NaN rows",
"dseries_count",
"=",
"self",
".",
"df",
"[",
"dseries",
"]",
".",
"count",
"(",
")",
"assert",
"(",
"len",
"(",
"self",
".",
"df_pruned",
")",
"<=",
"dseries_count",
"<=",
"len",
"(",
"self",
".",
"df",
")",
"or",
"dseries_count",
")",
"self",
".",
"analysis",
"[",
"dseries",
"]",
"=",
"dict",
"(",
"partial",
"=",
"None",
",",
"full",
"=",
"None",
",",
")",
"# Compute the statistics for the common records",
"stats",
"=",
"get_xy_dataset_statistics_pandas",
"(",
"self",
".",
"df_pruned",
",",
"self",
".",
"reference_series",
",",
"dseries",
",",
"fcorrect_x_cutoff",
"=",
"1.0",
",",
"fcorrect_y_cutoff",
"=",
"1.0",
",",
"bootstrap_data",
"=",
"False",
",",
"x_fuzzy_range",
"=",
"0.1",
",",
"y_scalar",
"=",
"1.0",
",",
"ignore_null_values",
"=",
"True",
")",
"if",
"(",
"len",
"(",
"self",
".",
"df_pruned",
")",
"==",
"len",
"(",
"self",
".",
"df",
")",
")",
":",
"# There are no pruned records so these are actually the full stats",
"self",
".",
"analysis",
"[",
"dseries",
"]",
"[",
"'full'",
"]",
"=",
"dict",
"(",
"data",
"=",
"stats",
",",
"description",
"=",
"format_stats",
"(",
"stats",
",",
"floating_point_format",
"=",
"'%0.3f'",
",",
"sci_notation_format",
"=",
"'%.2E'",
",",
"return_string",
"=",
"True",
")",
")",
"else",
":",
"# Store the results for the partial dataset",
"self",
".",
"analysis",
"[",
"dseries",
"]",
"[",
"'partial'",
"]",
"=",
"dict",
"(",
"data",
"=",
"stats",
",",
"description",
"=",
"format_stats",
"(",
"stats",
",",
"floating_point_format",
"=",
"'%0.3f'",
",",
"sci_notation_format",
"=",
"'%.2E'",
",",
"return_string",
"=",
"True",
")",
")",
"if",
"dseries_count",
">",
"len",
"(",
"self",
".",
"df_pruned",
")",
":",
"# This dataset has records which are not in the pruned dataset",
"stats",
"=",
"get_xy_dataset_statistics_pandas",
"(",
"self",
".",
"df",
",",
"self",
".",
"reference_series",
",",
"dseries",
",",
"fcorrect_x_cutoff",
"=",
"1.0",
",",
"fcorrect_y_cutoff",
"=",
"1.0",
",",
"bootstrap_data",
"=",
"False",
",",
"x_fuzzy_range",
"=",
"0.1",
",",
"y_scalar",
"=",
"1.0",
",",
"ignore_null_values",
"=",
"True",
")",
"self",
".",
"analysis",
"[",
"dseries",
"]",
"[",
"'full'",
"]",
"=",
"dict",
"(",
"data",
"=",
"stats",
",",
"description",
"=",
"format_stats",
"(",
"stats",
",",
"floating_point_format",
"=",
"'%0.3f'",
",",
"sci_notation_format",
"=",
"'%.2E'",
",",
"return_string",
"=",
"True",
")",
")",
"return",
"self",
".",
"analysis"
] | 68.05 | 0.027888 |
def to_dict(self, **kw):
u"""Converts the lxml object to a dict.
possible kwargs:
without_comments: bool
"""
_, value = helpers.etree_to_dict(self._xml, **kw).popitem()
return value | [
"def",
"to_dict",
"(",
"self",
",",
"*",
"*",
"kw",
")",
":",
"_",
",",
"value",
"=",
"helpers",
".",
"etree_to_dict",
"(",
"self",
".",
"_xml",
",",
"*",
"*",
"kw",
")",
".",
"popitem",
"(",
")",
"return",
"value"
] | 28.375 | 0.008547 |
def networkBibCoupling(self, weighted = True, fullInfo = False, addCR = False):
"""Creates a bibliographic coupling network based on citations for the RecordCollection.
# Parameters
_weighted_ : `optional bool`
> Default `True`, if `True` the weight of the edges will be added to the network
_fullInfo_ : `optional bool`
> Default `False`, if `True` the full citation string will be added to each of the nodes of the network.
# Returns
`Networkx Graph`
> A graph of the bibliographic coupling
"""
progArgs = (0, "Make a citation network for coupling")
if metaknowledge.VERBOSE_MODE:
progKwargs = {'dummy' : False}
else:
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
citeGrph = self.networkCitation(weighted = False, directed = True, detailedCore = True, fullInfo = fullInfo, count = False, nodeInfo = True, addCR = addCR, _quiet = True)
pcount = 0
pmax = len(citeGrph)
PBar.updateVal(.2, "Starting to classify nodes")
workingGrph = nx.Graph()
couplingSet = set()
for n, d in citeGrph.nodes(data = True):
pcount += 1
PBar.updateVal(.2 + .4 * (pcount / pmax), "Classifying: {}".format(n))
if d['inCore']:
workingGrph.add_node(n, **d)
if citeGrph.in_degree(n) > 0:
couplingSet.add(n)
pcount = 0
pmax = len(couplingSet)
for n in couplingSet:
PBar.updateVal(.6 + .4 * (pcount / pmax), "Coupling: {}".format(n))
citesLst = list(citeGrph.in_edges(n))
for i, edgeOuter in enumerate(citesLst):
outerNode = edgeOuter[0]
for edgeInner in citesLst[i + 1:]:
innerNode = edgeInner[0]
if weighted and workingGrph.has_edge(outerNode, innerNode):
workingGrph.edges[outerNode, innerNode]['weight'] += 1
elif weighted:
workingGrph.add_edge(outerNode, innerNode, weight = 1)
else:
workingGrph.add_edge(outerNode, innerNode)
PBar.finish("Done making a bib-coupling network from {}".format(self))
return workingGrph | [
"def",
"networkBibCoupling",
"(",
"self",
",",
"weighted",
"=",
"True",
",",
"fullInfo",
"=",
"False",
",",
"addCR",
"=",
"False",
")",
":",
"progArgs",
"=",
"(",
"0",
",",
"\"Make a citation network for coupling\"",
")",
"if",
"metaknowledge",
".",
"VERBOSE_MODE",
":",
"progKwargs",
"=",
"{",
"'dummy'",
":",
"False",
"}",
"else",
":",
"progKwargs",
"=",
"{",
"'dummy'",
":",
"True",
"}",
"with",
"_ProgressBar",
"(",
"*",
"progArgs",
",",
"*",
"*",
"progKwargs",
")",
"as",
"PBar",
":",
"citeGrph",
"=",
"self",
".",
"networkCitation",
"(",
"weighted",
"=",
"False",
",",
"directed",
"=",
"True",
",",
"detailedCore",
"=",
"True",
",",
"fullInfo",
"=",
"fullInfo",
",",
"count",
"=",
"False",
",",
"nodeInfo",
"=",
"True",
",",
"addCR",
"=",
"addCR",
",",
"_quiet",
"=",
"True",
")",
"pcount",
"=",
"0",
"pmax",
"=",
"len",
"(",
"citeGrph",
")",
"PBar",
".",
"updateVal",
"(",
".2",
",",
"\"Starting to classify nodes\"",
")",
"workingGrph",
"=",
"nx",
".",
"Graph",
"(",
")",
"couplingSet",
"=",
"set",
"(",
")",
"for",
"n",
",",
"d",
"in",
"citeGrph",
".",
"nodes",
"(",
"data",
"=",
"True",
")",
":",
"pcount",
"+=",
"1",
"PBar",
".",
"updateVal",
"(",
".2",
"+",
".4",
"*",
"(",
"pcount",
"/",
"pmax",
")",
",",
"\"Classifying: {}\"",
".",
"format",
"(",
"n",
")",
")",
"if",
"d",
"[",
"'inCore'",
"]",
":",
"workingGrph",
".",
"add_node",
"(",
"n",
",",
"*",
"*",
"d",
")",
"if",
"citeGrph",
".",
"in_degree",
"(",
"n",
")",
">",
"0",
":",
"couplingSet",
".",
"add",
"(",
"n",
")",
"pcount",
"=",
"0",
"pmax",
"=",
"len",
"(",
"couplingSet",
")",
"for",
"n",
"in",
"couplingSet",
":",
"PBar",
".",
"updateVal",
"(",
".6",
"+",
".4",
"*",
"(",
"pcount",
"/",
"pmax",
")",
",",
"\"Coupling: {}\"",
".",
"format",
"(",
"n",
")",
")",
"citesLst",
"=",
"list",
"(",
"citeGrph",
".",
"in_edges",
"(",
"n",
")",
")",
"for",
"i",
",",
"edgeOuter",
"in",
"enumerate",
"(",
"citesLst",
")",
":",
"outerNode",
"=",
"edgeOuter",
"[",
"0",
"]",
"for",
"edgeInner",
"in",
"citesLst",
"[",
"i",
"+",
"1",
":",
"]",
":",
"innerNode",
"=",
"edgeInner",
"[",
"0",
"]",
"if",
"weighted",
"and",
"workingGrph",
".",
"has_edge",
"(",
"outerNode",
",",
"innerNode",
")",
":",
"workingGrph",
".",
"edges",
"[",
"outerNode",
",",
"innerNode",
"]",
"[",
"'weight'",
"]",
"+=",
"1",
"elif",
"weighted",
":",
"workingGrph",
".",
"add_edge",
"(",
"outerNode",
",",
"innerNode",
",",
"weight",
"=",
"1",
")",
"else",
":",
"workingGrph",
".",
"add_edge",
"(",
"outerNode",
",",
"innerNode",
")",
"PBar",
".",
"finish",
"(",
"\"Done making a bib-coupling network from {}\"",
".",
"format",
"(",
"self",
")",
")",
"return",
"workingGrph"
] | 44.218182 | 0.016492 |
def put(self):
"""Update a credential by file path"""
cred_payload = utils.uni_to_str(json.loads(request.get_data()))
return self.manager.update_credential(cred_payload) | [
"def",
"put",
"(",
"self",
")",
":",
"cred_payload",
"=",
"utils",
".",
"uni_to_str",
"(",
"json",
".",
"loads",
"(",
"request",
".",
"get_data",
"(",
")",
")",
")",
"return",
"self",
".",
"manager",
".",
"update_credential",
"(",
"cred_payload",
")"
] | 47.5 | 0.010363 |
def publish_collated_document(cursor, model, parent_model):
"""Publish a given `module`'s collated content in the context of
the `parent_model`. Note, the model's content is expected to already
have the collated content. This will just persist that content to
the archive.
"""
html = bytes(cnxepub.DocumentContentFormatter(model))
sha1 = hashlib.new('sha1', html).hexdigest()
cursor.execute("SELECT fileid FROM files WHERE sha1 = %s", (sha1,))
try:
fileid = cursor.fetchone()[0]
except TypeError:
file_args = {
'media_type': 'text/html',
'data': psycopg2.Binary(html),
}
cursor.execute("""\
INSERT INTO files (file, media_type)
VALUES (%(data)s, %(media_type)s)
RETURNING fileid""", file_args)
fileid = cursor.fetchone()[0]
args = {
'module_ident_hash': model.ident_hash,
'parent_ident_hash': parent_model.ident_hash,
'fileid': fileid,
}
stmt = """\
INSERT INTO collated_file_associations (context, item, fileid)
VALUES
((SELECT module_ident FROM modules
WHERE ident_hash(uuid, major_version, minor_version)
= %(parent_ident_hash)s),
(SELECT module_ident FROM modules
WHERE ident_hash(uuid, major_version, minor_version)
= %(module_ident_hash)s),
%(fileid)s)"""
cursor.execute(stmt, args) | [
"def",
"publish_collated_document",
"(",
"cursor",
",",
"model",
",",
"parent_model",
")",
":",
"html",
"=",
"bytes",
"(",
"cnxepub",
".",
"DocumentContentFormatter",
"(",
"model",
")",
")",
"sha1",
"=",
"hashlib",
".",
"new",
"(",
"'sha1'",
",",
"html",
")",
".",
"hexdigest",
"(",
")",
"cursor",
".",
"execute",
"(",
"\"SELECT fileid FROM files WHERE sha1 = %s\"",
",",
"(",
"sha1",
",",
")",
")",
"try",
":",
"fileid",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"[",
"0",
"]",
"except",
"TypeError",
":",
"file_args",
"=",
"{",
"'media_type'",
":",
"'text/html'",
",",
"'data'",
":",
"psycopg2",
".",
"Binary",
"(",
"html",
")",
",",
"}",
"cursor",
".",
"execute",
"(",
"\"\"\"\\\n INSERT INTO files (file, media_type)\n VALUES (%(data)s, %(media_type)s)\n RETURNING fileid\"\"\"",
",",
"file_args",
")",
"fileid",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"[",
"0",
"]",
"args",
"=",
"{",
"'module_ident_hash'",
":",
"model",
".",
"ident_hash",
",",
"'parent_ident_hash'",
":",
"parent_model",
".",
"ident_hash",
",",
"'fileid'",
":",
"fileid",
",",
"}",
"stmt",
"=",
"\"\"\"\\\nINSERT INTO collated_file_associations (context, item, fileid)\nVALUES\n ((SELECT module_ident FROM modules\n WHERE ident_hash(uuid, major_version, minor_version)\n = %(parent_ident_hash)s),\n (SELECT module_ident FROM modules\n WHERE ident_hash(uuid, major_version, minor_version)\n = %(module_ident_hash)s),\n %(fileid)s)\"\"\"",
"cursor",
".",
"execute",
"(",
"stmt",
",",
"args",
")"
] | 35.315789 | 0.000725 |
def get_config(self, force=False):
"""
Returns a dictionary of all config.xml properties
If `force = True` then ignore any cached state and read config.xml
if possible
setup_omero_cli() must be called before this method to import the
correct omero module to minimise the possibility of version conflicts
"""
if not force and not self.has_config():
raise Exception('No config file')
configxml = os.path.join(self.dir, 'etc', 'grid', 'config.xml')
if not os.path.exists(configxml):
raise Exception('No config file')
try:
# Attempt to open config.xml read-only, though this flag is not
# present in early versions of OMERO 5.0
c = self._omero.config.ConfigXml(
configxml, exclusive=False, read_only=True)
except TypeError:
c = self._omero.config.ConfigXml(configxml, exclusive=False)
try:
return c.as_map()
finally:
c.close() | [
"def",
"get_config",
"(",
"self",
",",
"force",
"=",
"False",
")",
":",
"if",
"not",
"force",
"and",
"not",
"self",
".",
"has_config",
"(",
")",
":",
"raise",
"Exception",
"(",
"'No config file'",
")",
"configxml",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dir",
",",
"'etc'",
",",
"'grid'",
",",
"'config.xml'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"configxml",
")",
":",
"raise",
"Exception",
"(",
"'No config file'",
")",
"try",
":",
"# Attempt to open config.xml read-only, though this flag is not",
"# present in early versions of OMERO 5.0",
"c",
"=",
"self",
".",
"_omero",
".",
"config",
".",
"ConfigXml",
"(",
"configxml",
",",
"exclusive",
"=",
"False",
",",
"read_only",
"=",
"True",
")",
"except",
"TypeError",
":",
"c",
"=",
"self",
".",
"_omero",
".",
"config",
".",
"ConfigXml",
"(",
"configxml",
",",
"exclusive",
"=",
"False",
")",
"try",
":",
"return",
"c",
".",
"as_map",
"(",
")",
"finally",
":",
"c",
".",
"close",
"(",
")"
] | 35.275862 | 0.001903 |
def chop(self, bits=1):
"""
Chops a BV into consecutive sub-slices. Obviously, the length of this BV must be a multiple of bits.
:returns: A list of smaller bitvectors, each ``bits`` in length. The first one will be the left-most (i.e.
most significant) bits.
"""
s = len(self)
if s % bits != 0:
raise ValueError("expression length (%d) should be a multiple of 'bits' (%d)" % (len(self), bits))
elif s == bits:
return [ self ]
else:
return list(reversed([ self[(n+1)*bits - 1:n*bits] for n in range(0, s // bits) ])) | [
"def",
"chop",
"(",
"self",
",",
"bits",
"=",
"1",
")",
":",
"s",
"=",
"len",
"(",
"self",
")",
"if",
"s",
"%",
"bits",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"expression length (%d) should be a multiple of 'bits' (%d)\"",
"%",
"(",
"len",
"(",
"self",
")",
",",
"bits",
")",
")",
"elif",
"s",
"==",
"bits",
":",
"return",
"[",
"self",
"]",
"else",
":",
"return",
"list",
"(",
"reversed",
"(",
"[",
"self",
"[",
"(",
"n",
"+",
"1",
")",
"*",
"bits",
"-",
"1",
":",
"n",
"*",
"bits",
"]",
"for",
"n",
"in",
"range",
"(",
"0",
",",
"s",
"//",
"bits",
")",
"]",
")",
")"
] | 44.714286 | 0.015649 |
def getCall(self, n): #pylint: disable=invalid-name
"""
Args:
n: integer (index of function call)
Return:
SpyCall object (or None if the index is not valid)
"""
call_list = super(SinonSpy, self)._get_wrapper().call_list
if n >= 0 and n < len(call_list):
call = call_list[n]
call.proxy = weakref.proxy(self)
return call
else:
return None | [
"def",
"getCall",
"(",
"self",
",",
"n",
")",
":",
"#pylint: disable=invalid-name",
"call_list",
"=",
"super",
"(",
"SinonSpy",
",",
"self",
")",
".",
"_get_wrapper",
"(",
")",
".",
"call_list",
"if",
"n",
">=",
"0",
"and",
"n",
"<",
"len",
"(",
"call_list",
")",
":",
"call",
"=",
"call_list",
"[",
"n",
"]",
"call",
".",
"proxy",
"=",
"weakref",
".",
"proxy",
"(",
"self",
")",
"return",
"call",
"else",
":",
"return",
"None"
] | 32.214286 | 0.008621 |
def get_access_token(self):
'''
Returns an access token for the specified subscription.
This method uses a cache to limit the number of requests to the token service.
A fresh token can be re-used during its lifetime of 10 minutes. After a successful
request to the token service, this method caches the access token. Subsequent
invocations of the method return the cached token for the next 5 minutes. After
5 minutes, a new token is fetched from the token service and the cache is updated.
'''
if (self.token is None) or (datetime.utcnow() > self.reuse_token_until):
headers = {'Ocp-Apim-Subscription-Key': self.client_secret}
response = requests.post(self.base_url, headers=headers)
response.raise_for_status()
self.token = response.content
self.reuse_token_until = datetime.utcnow() + timedelta(minutes=5)
return self.token.decode('utf-8') | [
"def",
"get_access_token",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"token",
"is",
"None",
")",
"or",
"(",
"datetime",
".",
"utcnow",
"(",
")",
">",
"self",
".",
"reuse_token_until",
")",
":",
"headers",
"=",
"{",
"'Ocp-Apim-Subscription-Key'",
":",
"self",
".",
"client_secret",
"}",
"response",
"=",
"requests",
".",
"post",
"(",
"self",
".",
"base_url",
",",
"headers",
"=",
"headers",
")",
"response",
".",
"raise_for_status",
"(",
")",
"self",
".",
"token",
"=",
"response",
".",
"content",
"self",
".",
"reuse_token_until",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"+",
"timedelta",
"(",
"minutes",
"=",
"5",
")",
"return",
"self",
".",
"token",
".",
"decode",
"(",
"'utf-8'",
")"
] | 48.35 | 0.008114 |
def _code2xls(self, worksheets):
"""Writes code to xls file
Format: <row>\t<col>\t<tab>\t<code>\n
"""
code_array = self.code_array
xls_max_shape = self.xls_max_rows, self.xls_max_cols, self.xls_max_tabs
for key in code_array:
if all(kele < mele for kele, mele in zip(key, xls_max_shape)):
# Cell lies within Excel boundaries
row, col, tab = key
code_str = code_array(key)
if code_str is not None:
style = self._get_xfstyle(worksheets, key)
worksheets[tab].write(row, col, label=code_str,
style=style)
# Handle cell formatting in cells without code
# Get bboxes for all cell_attributes
max_shape = [min(xls_max_shape[0], code_array.shape[0]),
min(xls_max_shape[1], code_array.shape[1])]
# Prevent systems from blocking
if max_shape[0] * max_shape[1] > 1024000:
# Ignore all cell attributes below row 3999
max_shape[0] = 4000
cell_attributes = code_array.dict_grid.cell_attributes
bboxes = []
for s, __tab, __ in cell_attributes:
if s:
bboxes.append((s.get_grid_bbox(code_array.shape), __tab))
# Get bbox_cell_set from bboxes
cells = []
for ((bb_top, bb_left), (bb_bottom, bb_right)), __tab in bboxes:
__bb_bottom = min(bb_bottom, max_shape[0])
__bb_right = min(bb_right, max_shape[1])
for __row, __col in product(xrange(bb_top, __bb_bottom + 1),
xrange(bb_left, __bb_right + 1)):
cells.append((__row, __col, __tab))
cell_set = set(cells)
# Loop over those with non-standard attributes
for key in cell_set:
if key not in code_array and all(ele >= 0 for ele in key):
row, col, tab = key
style = self._get_xfstyle(worksheets, key)
worksheets[tab].write(row, col, label="", style=style) | [
"def",
"_code2xls",
"(",
"self",
",",
"worksheets",
")",
":",
"code_array",
"=",
"self",
".",
"code_array",
"xls_max_shape",
"=",
"self",
".",
"xls_max_rows",
",",
"self",
".",
"xls_max_cols",
",",
"self",
".",
"xls_max_tabs",
"for",
"key",
"in",
"code_array",
":",
"if",
"all",
"(",
"kele",
"<",
"mele",
"for",
"kele",
",",
"mele",
"in",
"zip",
"(",
"key",
",",
"xls_max_shape",
")",
")",
":",
"# Cell lies within Excel boundaries",
"row",
",",
"col",
",",
"tab",
"=",
"key",
"code_str",
"=",
"code_array",
"(",
"key",
")",
"if",
"code_str",
"is",
"not",
"None",
":",
"style",
"=",
"self",
".",
"_get_xfstyle",
"(",
"worksheets",
",",
"key",
")",
"worksheets",
"[",
"tab",
"]",
".",
"write",
"(",
"row",
",",
"col",
",",
"label",
"=",
"code_str",
",",
"style",
"=",
"style",
")",
"# Handle cell formatting in cells without code",
"# Get bboxes for all cell_attributes",
"max_shape",
"=",
"[",
"min",
"(",
"xls_max_shape",
"[",
"0",
"]",
",",
"code_array",
".",
"shape",
"[",
"0",
"]",
")",
",",
"min",
"(",
"xls_max_shape",
"[",
"1",
"]",
",",
"code_array",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"# Prevent systems from blocking",
"if",
"max_shape",
"[",
"0",
"]",
"*",
"max_shape",
"[",
"1",
"]",
">",
"1024000",
":",
"# Ignore all cell attributes below row 3999",
"max_shape",
"[",
"0",
"]",
"=",
"4000",
"cell_attributes",
"=",
"code_array",
".",
"dict_grid",
".",
"cell_attributes",
"bboxes",
"=",
"[",
"]",
"for",
"s",
",",
"__tab",
",",
"__",
"in",
"cell_attributes",
":",
"if",
"s",
":",
"bboxes",
".",
"append",
"(",
"(",
"s",
".",
"get_grid_bbox",
"(",
"code_array",
".",
"shape",
")",
",",
"__tab",
")",
")",
"# Get bbox_cell_set from bboxes",
"cells",
"=",
"[",
"]",
"for",
"(",
"(",
"bb_top",
",",
"bb_left",
")",
",",
"(",
"bb_bottom",
",",
"bb_right",
")",
")",
",",
"__tab",
"in",
"bboxes",
":",
"__bb_bottom",
"=",
"min",
"(",
"bb_bottom",
",",
"max_shape",
"[",
"0",
"]",
")",
"__bb_right",
"=",
"min",
"(",
"bb_right",
",",
"max_shape",
"[",
"1",
"]",
")",
"for",
"__row",
",",
"__col",
"in",
"product",
"(",
"xrange",
"(",
"bb_top",
",",
"__bb_bottom",
"+",
"1",
")",
",",
"xrange",
"(",
"bb_left",
",",
"__bb_right",
"+",
"1",
")",
")",
":",
"cells",
".",
"append",
"(",
"(",
"__row",
",",
"__col",
",",
"__tab",
")",
")",
"cell_set",
"=",
"set",
"(",
"cells",
")",
"# Loop over those with non-standard attributes",
"for",
"key",
"in",
"cell_set",
":",
"if",
"key",
"not",
"in",
"code_array",
"and",
"all",
"(",
"ele",
">=",
"0",
"for",
"ele",
"in",
"key",
")",
":",
"row",
",",
"col",
",",
"tab",
"=",
"key",
"style",
"=",
"self",
".",
"_get_xfstyle",
"(",
"worksheets",
",",
"key",
")",
"worksheets",
"[",
"tab",
"]",
".",
"write",
"(",
"row",
",",
"col",
",",
"label",
"=",
"\"\"",
",",
"style",
"=",
"style",
")"
] | 38.555556 | 0.000937 |
def stop(self, signum=None, frame=None):
"""
handel's a termination signal
"""
BackgroundProcess.objects.filter(pk=self.process_id
).update(pid=0, last_update=now(), message='stopping..')
# run the cleanup
self.cleanup()
BackgroundProcess.objects.filter(pk=self.process_id).update(pid=0,
last_update=now(),
message='stopped') | [
"def",
"stop",
"(",
"self",
",",
"signum",
"=",
"None",
",",
"frame",
"=",
"None",
")",
":",
"BackgroundProcess",
".",
"objects",
".",
"filter",
"(",
"pk",
"=",
"self",
".",
"process_id",
")",
".",
"update",
"(",
"pid",
"=",
"0",
",",
"last_update",
"=",
"now",
"(",
")",
",",
"message",
"=",
"'stopping..'",
")",
"# run the cleanup",
"self",
".",
"cleanup",
"(",
")",
"BackgroundProcess",
".",
"objects",
".",
"filter",
"(",
"pk",
"=",
"self",
".",
"process_id",
")",
".",
"update",
"(",
"pid",
"=",
"0",
",",
"last_update",
"=",
"now",
"(",
")",
",",
"message",
"=",
"'stopped'",
")"
] | 49.818182 | 0.008961 |
def on_draw(self, e):
"""Draw all visuals."""
gloo.clear()
for visual in self.visuals:
logger.log(5, "Draw visual `%s`.", visual)
visual.on_draw() | [
"def",
"on_draw",
"(",
"self",
",",
"e",
")",
":",
"gloo",
".",
"clear",
"(",
")",
"for",
"visual",
"in",
"self",
".",
"visuals",
":",
"logger",
".",
"log",
"(",
"5",
",",
"\"Draw visual `%s`.\"",
",",
"visual",
")",
"visual",
".",
"on_draw",
"(",
")"
] | 31.5 | 0.010309 |
def get_init_container(self,
init_command,
init_args,
env_vars,
context_mounts,
persistence_outputs,
persistence_data):
"""Pod init container for setting outputs path."""
env_vars = to_list(env_vars, check_none=True)
env_vars += [
get_env_var(name=constants.CONFIG_MAP_JOB_INFO_KEY_NAME,
value=json.dumps(self.labels)),
]
return client.V1Container(
name=self.init_container_name,
image=self.init_docker_image,
image_pull_policy=self.init_docker_image_pull_policy,
command=init_command,
env=env_vars,
args=init_args,
volume_mounts=context_mounts) | [
"def",
"get_init_container",
"(",
"self",
",",
"init_command",
",",
"init_args",
",",
"env_vars",
",",
"context_mounts",
",",
"persistence_outputs",
",",
"persistence_data",
")",
":",
"env_vars",
"=",
"to_list",
"(",
"env_vars",
",",
"check_none",
"=",
"True",
")",
"env_vars",
"+=",
"[",
"get_env_var",
"(",
"name",
"=",
"constants",
".",
"CONFIG_MAP_JOB_INFO_KEY_NAME",
",",
"value",
"=",
"json",
".",
"dumps",
"(",
"self",
".",
"labels",
")",
")",
",",
"]",
"return",
"client",
".",
"V1Container",
"(",
"name",
"=",
"self",
".",
"init_container_name",
",",
"image",
"=",
"self",
".",
"init_docker_image",
",",
"image_pull_policy",
"=",
"self",
".",
"init_docker_image_pull_policy",
",",
"command",
"=",
"init_command",
",",
"env",
"=",
"env_vars",
",",
"args",
"=",
"init_args",
",",
"volume_mounts",
"=",
"context_mounts",
")"
] | 40.333333 | 0.009227 |
def list(self, search_opts=None, limit=None,
marker=None, sort_by=None, reverse=None):
"""Get a list of Jobs."""
query = base.get_query_string(search_opts, limit=limit, marker=marker,
sort_by=sort_by, reverse=reverse)
url = "/jobs%s" % query
return self._page(url, 'jobs', limit) | [
"def",
"list",
"(",
"self",
",",
"search_opts",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"marker",
"=",
"None",
",",
"sort_by",
"=",
"None",
",",
"reverse",
"=",
"None",
")",
":",
"query",
"=",
"base",
".",
"get_query_string",
"(",
"search_opts",
",",
"limit",
"=",
"limit",
",",
"marker",
"=",
"marker",
",",
"sort_by",
"=",
"sort_by",
",",
"reverse",
"=",
"reverse",
")",
"url",
"=",
"\"/jobs%s\"",
"%",
"query",
"return",
"self",
".",
"_page",
"(",
"url",
",",
"'jobs'",
",",
"limit",
")"
] | 50.857143 | 0.008287 |
def next(self, start):
"""
Return a (marker_code, segment_offset) 2-tuple identifying and
locating the first marker in *stream* occuring after offset *start*.
The returned *segment_offset* points to the position immediately
following the 2-byte marker code, the start of the marker segment,
for those markers that have a segment.
"""
position = start
while True:
# skip over any non-\xFF bytes
position = self._offset_of_next_ff_byte(start=position)
# skip over any \xFF padding bytes
position, byte_ = self._next_non_ff_byte(start=position+1)
# 'FF 00' sequence is not a marker, start over if found
if byte_ == b'\x00':
continue
# this is a marker, gather return values and break out of scan
marker_code, segment_offset = byte_, position+1
break
return marker_code, segment_offset | [
"def",
"next",
"(",
"self",
",",
"start",
")",
":",
"position",
"=",
"start",
"while",
"True",
":",
"# skip over any non-\\xFF bytes",
"position",
"=",
"self",
".",
"_offset_of_next_ff_byte",
"(",
"start",
"=",
"position",
")",
"# skip over any \\xFF padding bytes",
"position",
",",
"byte_",
"=",
"self",
".",
"_next_non_ff_byte",
"(",
"start",
"=",
"position",
"+",
"1",
")",
"# 'FF 00' sequence is not a marker, start over if found",
"if",
"byte_",
"==",
"b'\\x00'",
":",
"continue",
"# this is a marker, gather return values and break out of scan",
"marker_code",
",",
"segment_offset",
"=",
"byte_",
",",
"position",
"+",
"1",
"break",
"return",
"marker_code",
",",
"segment_offset"
] | 45.952381 | 0.00203 |
def minimal_residual(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None,
callback=None, residuals=None):
"""Minimal residual (MR) algorithm.
Solves the linear system Ax = b. Left preconditioning is supported.
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by the
preconditioner norm of r_0, or ||r_0||_M.
maxiter : int
maximum number of allowed iterations
xtype : type
dtype for the solution, default is automatic type detection
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals contains the residual norm history,
including the initial residual. The preconditioner norm
is used, instead of the Euclidean norm.
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of cg
== =======================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead.
<0 numerical breakdown, or illegal input
== =======================================
Notes
-----
The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
The residual in the preconditioner norm is both used for halting and
returned in the residuals list.
Examples
--------
>>> from pyamg.krylov import minimal_residual
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = minimal_residual(A,b, maxiter=2, tol=1e-8)
>>> print norm(b - A*x)
7.26369350856
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 137--142, 2003
http://www-users.cs.umn.edu/~saad/books.html
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
# Ensure that warnings are always reissued from this function
import warnings
warnings.filterwarnings('always',
module='pyamg\.krylov\._minimal_residual')
# determine maxiter
if maxiter is None:
maxiter = int(len(b))
elif maxiter < 1:
raise ValueError('Number of iterations must be positive')
# setup method
r = M*(b - A*x)
normr = norm(r)
# store initial residual
if residuals is not None:
residuals[:] = [normr]
# Check initial guess ( scaling by b, if b != 0,
# must account for case when norm(b) is very small)
normb = norm(b)
if normb == 0.0:
normb = 1.0
if normr < tol*normb:
return (postprocess(x), 0)
# Scale tol by ||r_0||_M
if normr != 0.0:
tol = tol*normr
# How often should r be recomputed
recompute_r = 50
iter = 0
while True:
iter = iter+1
p = M*(A*r)
rMAr = np.inner(p.conjugate(), r) # check curvature of M^-1 A
if rMAr < 0.0:
warn("\nIndefinite matrix detected in minimal residual,\
aborting\n")
return (postprocess(x), -1)
alpha = rMAr / np.inner(p.conjugate(), p)
x = x + alpha*r
if np.mod(iter, recompute_r) and iter > 0:
r = M*(b - A*x)
else:
r = r - alpha*p
normr = norm(r)
if residuals is not None:
residuals.append(normr)
if callback is not None:
callback(x)
if normr < tol:
return (postprocess(x), 0)
if iter == maxiter:
return (postprocess(x), iter) | [
"def",
"minimal_residual",
"(",
"A",
",",
"b",
",",
"x0",
"=",
"None",
",",
"tol",
"=",
"1e-5",
",",
"maxiter",
"=",
"None",
",",
"xtype",
"=",
"None",
",",
"M",
"=",
"None",
",",
"callback",
"=",
"None",
",",
"residuals",
"=",
"None",
")",
":",
"A",
",",
"M",
",",
"x",
",",
"b",
",",
"postprocess",
"=",
"make_system",
"(",
"A",
",",
"M",
",",
"x0",
",",
"b",
")",
"# Ensure that warnings are always reissued from this function",
"import",
"warnings",
"warnings",
".",
"filterwarnings",
"(",
"'always'",
",",
"module",
"=",
"'pyamg\\.krylov\\._minimal_residual'",
")",
"# determine maxiter",
"if",
"maxiter",
"is",
"None",
":",
"maxiter",
"=",
"int",
"(",
"len",
"(",
"b",
")",
")",
"elif",
"maxiter",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'Number of iterations must be positive'",
")",
"# setup method",
"r",
"=",
"M",
"*",
"(",
"b",
"-",
"A",
"*",
"x",
")",
"normr",
"=",
"norm",
"(",
"r",
")",
"# store initial residual",
"if",
"residuals",
"is",
"not",
"None",
":",
"residuals",
"[",
":",
"]",
"=",
"[",
"normr",
"]",
"# Check initial guess ( scaling by b, if b != 0,",
"# must account for case when norm(b) is very small)",
"normb",
"=",
"norm",
"(",
"b",
")",
"if",
"normb",
"==",
"0.0",
":",
"normb",
"=",
"1.0",
"if",
"normr",
"<",
"tol",
"*",
"normb",
":",
"return",
"(",
"postprocess",
"(",
"x",
")",
",",
"0",
")",
"# Scale tol by ||r_0||_M",
"if",
"normr",
"!=",
"0.0",
":",
"tol",
"=",
"tol",
"*",
"normr",
"# How often should r be recomputed",
"recompute_r",
"=",
"50",
"iter",
"=",
"0",
"while",
"True",
":",
"iter",
"=",
"iter",
"+",
"1",
"p",
"=",
"M",
"*",
"(",
"A",
"*",
"r",
")",
"rMAr",
"=",
"np",
".",
"inner",
"(",
"p",
".",
"conjugate",
"(",
")",
",",
"r",
")",
"# check curvature of M^-1 A",
"if",
"rMAr",
"<",
"0.0",
":",
"warn",
"(",
"\"\\nIndefinite matrix detected in minimal residual,\\\n aborting\\n\"",
")",
"return",
"(",
"postprocess",
"(",
"x",
")",
",",
"-",
"1",
")",
"alpha",
"=",
"rMAr",
"/",
"np",
".",
"inner",
"(",
"p",
".",
"conjugate",
"(",
")",
",",
"p",
")",
"x",
"=",
"x",
"+",
"alpha",
"*",
"r",
"if",
"np",
".",
"mod",
"(",
"iter",
",",
"recompute_r",
")",
"and",
"iter",
">",
"0",
":",
"r",
"=",
"M",
"*",
"(",
"b",
"-",
"A",
"*",
"x",
")",
"else",
":",
"r",
"=",
"r",
"-",
"alpha",
"*",
"p",
"normr",
"=",
"norm",
"(",
"r",
")",
"if",
"residuals",
"is",
"not",
"None",
":",
"residuals",
".",
"append",
"(",
"normr",
")",
"if",
"callback",
"is",
"not",
"None",
":",
"callback",
"(",
"x",
")",
"if",
"normr",
"<",
"tol",
":",
"return",
"(",
"postprocess",
"(",
"x",
")",
",",
"0",
")",
"if",
"iter",
"==",
"maxiter",
":",
"return",
"(",
"postprocess",
"(",
"x",
")",
",",
"iter",
")"
] | 29.676056 | 0.000689 |
def autodiscover(path=None, plugin_prefix='intake_'):
"""Scan for Intake plugin packages and return a dict of plugins.
This function searches path (or sys.path) for packages with names that
start with plugin_prefix. Those modules will be imported and scanned for
subclasses of intake.source.base.Plugin. Any subclasses found will be
instantiated and returned in a dictionary, with the plugin's name attribute
as the key.
"""
plugins = {}
for importer, name, ispkg in pkgutil.iter_modules(path=path):
if name.startswith(plugin_prefix):
t = time.time()
new_plugins = load_plugins_from_module(name)
for plugin_name, plugin in new_plugins.items():
if plugin_name in plugins:
orig_path = inspect.getfile(plugins[plugin_name])
new_path = inspect.getfile(plugin)
warnings.warn('Plugin name collision for "%s" from'
'\n %s'
'\nand'
'\n %s'
'\nKeeping plugin from first location.'
% (plugin_name, orig_path, new_path))
else:
plugins[plugin_name] = plugin
logger.debug("Import %s took: %7.2f s" % (name, time.time() - t))
return plugins | [
"def",
"autodiscover",
"(",
"path",
"=",
"None",
",",
"plugin_prefix",
"=",
"'intake_'",
")",
":",
"plugins",
"=",
"{",
"}",
"for",
"importer",
",",
"name",
",",
"ispkg",
"in",
"pkgutil",
".",
"iter_modules",
"(",
"path",
"=",
"path",
")",
":",
"if",
"name",
".",
"startswith",
"(",
"plugin_prefix",
")",
":",
"t",
"=",
"time",
".",
"time",
"(",
")",
"new_plugins",
"=",
"load_plugins_from_module",
"(",
"name",
")",
"for",
"plugin_name",
",",
"plugin",
"in",
"new_plugins",
".",
"items",
"(",
")",
":",
"if",
"plugin_name",
"in",
"plugins",
":",
"orig_path",
"=",
"inspect",
".",
"getfile",
"(",
"plugins",
"[",
"plugin_name",
"]",
")",
"new_path",
"=",
"inspect",
".",
"getfile",
"(",
"plugin",
")",
"warnings",
".",
"warn",
"(",
"'Plugin name collision for \"%s\" from'",
"'\\n %s'",
"'\\nand'",
"'\\n %s'",
"'\\nKeeping plugin from first location.'",
"%",
"(",
"plugin_name",
",",
"orig_path",
",",
"new_path",
")",
")",
"else",
":",
"plugins",
"[",
"plugin_name",
"]",
"=",
"plugin",
"logger",
".",
"debug",
"(",
"\"Import %s took: %7.2f s\"",
"%",
"(",
"name",
",",
"time",
".",
"time",
"(",
")",
"-",
"t",
")",
")",
"return",
"plugins"
] | 43.3125 | 0.000706 |
def _compile_control_flow_expression(self,
expr: Expression,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[List[tf.Tensor]] = None) -> TensorFluent:
'''Compile a control flow expression `expr` into a TensorFluent
in the given `scope` with optional batch size.
Args:
expr (:obj:`rddl2tf.expr.Expression`): A RDDL control flow expression.
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.
batch_size (Optional[size]): The batch size.
Returns:
:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
'''
etype = expr.etype
args = expr.args
if etype[1] == 'if':
condition = self._compile_expression(args[0], scope, batch_size, noise)
true_case = self._compile_expression(args[1], scope, batch_size, noise)
false_case = self._compile_expression(args[2], scope, batch_size, noise)
fluent = TensorFluent.if_then_else(condition, true_case, false_case)
else:
raise ValueError('Invalid control flow expression:\n{}'.format(expr))
return fluent | [
"def",
"_compile_control_flow_expression",
"(",
"self",
",",
"expr",
":",
"Expression",
",",
"scope",
":",
"Dict",
"[",
"str",
",",
"TensorFluent",
"]",
",",
"batch_size",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"noise",
":",
"Optional",
"[",
"List",
"[",
"tf",
".",
"Tensor",
"]",
"]",
"=",
"None",
")",
"->",
"TensorFluent",
":",
"etype",
"=",
"expr",
".",
"etype",
"args",
"=",
"expr",
".",
"args",
"if",
"etype",
"[",
"1",
"]",
"==",
"'if'",
":",
"condition",
"=",
"self",
".",
"_compile_expression",
"(",
"args",
"[",
"0",
"]",
",",
"scope",
",",
"batch_size",
",",
"noise",
")",
"true_case",
"=",
"self",
".",
"_compile_expression",
"(",
"args",
"[",
"1",
"]",
",",
"scope",
",",
"batch_size",
",",
"noise",
")",
"false_case",
"=",
"self",
".",
"_compile_expression",
"(",
"args",
"[",
"2",
"]",
",",
"scope",
",",
"batch_size",
",",
"noise",
")",
"fluent",
"=",
"TensorFluent",
".",
"if_then_else",
"(",
"condition",
",",
"true_case",
",",
"false_case",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid control flow expression:\\n{}'",
".",
"format",
"(",
"expr",
")",
")",
"return",
"fluent"
] | 51.615385 | 0.010973 |
def objwalk(obj, path=(), memo=None):
"""
Walks an arbitrary python pbject.
:param mixed obj: Any python object
:param tuple path: A tuple of the set attributes representing the path to the value
:param set memo: The list of attributes traversed thus far
:rtype <tuple<tuple>, <mixed>>: The path to the value on the object, the value.
"""
if len( path ) > MAX_DEPTH + 1:
yield path, obj # Truncate it!
if memo is None:
memo = set()
iterator = None
if isinstance(obj, Mapping):
iterator = iteritems
elif isinstance(obj, (Sequence, Set)) and not isinstance(obj, string_types):
iterator = enumerate
elif hasattr( obj, '__class__' ) and hasattr( obj, '__dict__' ) and type(obj) not in primitives: # If type(obj) == <instance>
iterator = class_iterator
elif hasattr(obj, '__iter__') or isinstance(obj, types.GeneratorType):
obj = [o for o in obj]
else:
pass
if iterator:
if id(obj) not in memo:
memo.add(id(obj))
for path_component, value in iterator(obj):
for result in objwalk(value, path + (path_component,), memo):
yield result
memo.remove(id(obj))
else:
yield path, obj | [
"def",
"objwalk",
"(",
"obj",
",",
"path",
"=",
"(",
")",
",",
"memo",
"=",
"None",
")",
":",
"if",
"len",
"(",
"path",
")",
">",
"MAX_DEPTH",
"+",
"1",
":",
"yield",
"path",
",",
"obj",
"# Truncate it!",
"if",
"memo",
"is",
"None",
":",
"memo",
"=",
"set",
"(",
")",
"iterator",
"=",
"None",
"if",
"isinstance",
"(",
"obj",
",",
"Mapping",
")",
":",
"iterator",
"=",
"iteritems",
"elif",
"isinstance",
"(",
"obj",
",",
"(",
"Sequence",
",",
"Set",
")",
")",
"and",
"not",
"isinstance",
"(",
"obj",
",",
"string_types",
")",
":",
"iterator",
"=",
"enumerate",
"elif",
"hasattr",
"(",
"obj",
",",
"'__class__'",
")",
"and",
"hasattr",
"(",
"obj",
",",
"'__dict__'",
")",
"and",
"type",
"(",
"obj",
")",
"not",
"in",
"primitives",
":",
"# If type(obj) == <instance>",
"iterator",
"=",
"class_iterator",
"elif",
"hasattr",
"(",
"obj",
",",
"'__iter__'",
")",
"or",
"isinstance",
"(",
"obj",
",",
"types",
".",
"GeneratorType",
")",
":",
"obj",
"=",
"[",
"o",
"for",
"o",
"in",
"obj",
"]",
"else",
":",
"pass",
"if",
"iterator",
":",
"if",
"id",
"(",
"obj",
")",
"not",
"in",
"memo",
":",
"memo",
".",
"add",
"(",
"id",
"(",
"obj",
")",
")",
"for",
"path_component",
",",
"value",
"in",
"iterator",
"(",
"obj",
")",
":",
"for",
"result",
"in",
"objwalk",
"(",
"value",
",",
"path",
"+",
"(",
"path_component",
",",
")",
",",
"memo",
")",
":",
"yield",
"result",
"memo",
".",
"remove",
"(",
"id",
"(",
"obj",
")",
")",
"else",
":",
"yield",
"path",
",",
"obj"
] | 36.764706 | 0.010133 |
def open_file(orig_file_path):
"""
Taking in a file path, attempt to open mock data files with it.
"""
unquoted = unquote(orig_file_path)
paths = [
convert_to_platform_safe(orig_file_path),
"%s/index.html" % (convert_to_platform_safe(orig_file_path)),
orig_file_path,
"%s/index.html" % orig_file_path,
convert_to_platform_safe(unquoted),
"%s/index.html" % (convert_to_platform_safe(unquoted)),
unquoted,
"%s/index.html" % unquoted,
]
file_path = None
handle = None
for path in paths:
try:
file_path = path
handle = open(path, "rb")
break
except IOError:
pass
return handle | [
"def",
"open_file",
"(",
"orig_file_path",
")",
":",
"unquoted",
"=",
"unquote",
"(",
"orig_file_path",
")",
"paths",
"=",
"[",
"convert_to_platform_safe",
"(",
"orig_file_path",
")",
",",
"\"%s/index.html\"",
"%",
"(",
"convert_to_platform_safe",
"(",
"orig_file_path",
")",
")",
",",
"orig_file_path",
",",
"\"%s/index.html\"",
"%",
"orig_file_path",
",",
"convert_to_platform_safe",
"(",
"unquoted",
")",
",",
"\"%s/index.html\"",
"%",
"(",
"convert_to_platform_safe",
"(",
"unquoted",
")",
")",
",",
"unquoted",
",",
"\"%s/index.html\"",
"%",
"unquoted",
",",
"]",
"file_path",
"=",
"None",
"handle",
"=",
"None",
"for",
"path",
"in",
"paths",
":",
"try",
":",
"file_path",
"=",
"path",
"handle",
"=",
"open",
"(",
"path",
",",
"\"rb\"",
")",
"break",
"except",
"IOError",
":",
"pass",
"return",
"handle"
] | 26.666667 | 0.00134 |
def connect_callbacks(self, callbacks_bag):
"""Connect callbacks specified in callbacks_bag with callbacks
defined in the ui definition.
Return a list with the name of the callbacks not connected.
"""
notconnected = []
for wname, builderobj in self.objects.items():
missing = builderobj.connect_commands(callbacks_bag)
if missing is not None:
notconnected.extend(missing)
missing = builderobj.connect_bindings(callbacks_bag)
if missing is not None:
notconnected.extend(missing)
if notconnected:
notconnected = list(set(notconnected))
msg = 'Missing callbacks for commands: {}'.format(notconnected)
logger.warning(msg)
return notconnected
else:
return None | [
"def",
"connect_callbacks",
"(",
"self",
",",
"callbacks_bag",
")",
":",
"notconnected",
"=",
"[",
"]",
"for",
"wname",
",",
"builderobj",
"in",
"self",
".",
"objects",
".",
"items",
"(",
")",
":",
"missing",
"=",
"builderobj",
".",
"connect_commands",
"(",
"callbacks_bag",
")",
"if",
"missing",
"is",
"not",
"None",
":",
"notconnected",
".",
"extend",
"(",
"missing",
")",
"missing",
"=",
"builderobj",
".",
"connect_bindings",
"(",
"callbacks_bag",
")",
"if",
"missing",
"is",
"not",
"None",
":",
"notconnected",
".",
"extend",
"(",
"missing",
")",
"if",
"notconnected",
":",
"notconnected",
"=",
"list",
"(",
"set",
"(",
"notconnected",
")",
")",
"msg",
"=",
"'Missing callbacks for commands: {}'",
".",
"format",
"(",
"notconnected",
")",
"logger",
".",
"warning",
"(",
"msg",
")",
"return",
"notconnected",
"else",
":",
"return",
"None"
] | 42 | 0.002328 |
def validate_intervals(intervals):
"""Checks that an (n, 2) interval ndarray is well-formed, and raises errors
if not.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
Array of interval start/end locations.
"""
# Validate interval shape
if intervals.ndim != 2 or intervals.shape[1] != 2:
raise ValueError('Intervals should be n-by-2 numpy ndarray, '
'but shape={}'.format(intervals.shape))
# Make sure no times are negative
if (intervals < 0).any():
raise ValueError('Negative interval times found')
# Make sure all intervals have strictly positive duration
if (intervals[:, 1] <= intervals[:, 0]).any():
raise ValueError('All interval durations must be strictly positive') | [
"def",
"validate_intervals",
"(",
"intervals",
")",
":",
"# Validate interval shape",
"if",
"intervals",
".",
"ndim",
"!=",
"2",
"or",
"intervals",
".",
"shape",
"[",
"1",
"]",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'Intervals should be n-by-2 numpy ndarray, '",
"'but shape={}'",
".",
"format",
"(",
"intervals",
".",
"shape",
")",
")",
"# Make sure no times are negative",
"if",
"(",
"intervals",
"<",
"0",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'Negative interval times found'",
")",
"# Make sure all intervals have strictly positive duration",
"if",
"(",
"intervals",
"[",
":",
",",
"1",
"]",
"<=",
"intervals",
"[",
":",
",",
"0",
"]",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'All interval durations must be strictly positive'",
")"
] | 33.521739 | 0.001261 |
def poke_array(self, store, name, elemtype, elements, container, visited, _stack):
"""abstract method"""
raise NotImplementedError | [
"def",
"poke_array",
"(",
"self",
",",
"store",
",",
"name",
",",
"elemtype",
",",
"elements",
",",
"container",
",",
"visited",
",",
"_stack",
")",
":",
"raise",
"NotImplementedError"
] | 48 | 0.020548 |
def controller_event(self, channel, contr_nr, contr_val):
"""Return the bytes for a MIDI controller event."""
return self.midi_event(CONTROLLER, channel, contr_nr, contr_val) | [
"def",
"controller_event",
"(",
"self",
",",
"channel",
",",
"contr_nr",
",",
"contr_val",
")",
":",
"return",
"self",
".",
"midi_event",
"(",
"CONTROLLER",
",",
"channel",
",",
"contr_nr",
",",
"contr_val",
")"
] | 62.666667 | 0.010526 |
def add(data, id, medium, credentials):
"""Adds the [medium] with the given id and data to the user's [medium]List.
:param data The data for the [medium] to add.
:param id The id of the data to add.
:param medium Anime or manga (tokens.Medium.ANIME or tokens.Medium.MANGA).
:raise ValueError For bad arguments.
"""
_op(data, id, medium, tokens.Operations.ADD, credentials) | [
"def",
"add",
"(",
"data",
",",
"id",
",",
"medium",
",",
"credentials",
")",
":",
"_op",
"(",
"data",
",",
"id",
",",
"medium",
",",
"tokens",
".",
"Operations",
".",
"ADD",
",",
"credentials",
")"
] | 49.875 | 0.002463 |
def _bind_key(self, key, func):
u"""setup the mapping from key to call the function."""
if not callable(func):
print u"Trying to bind non method to keystroke:%s,%s"%(key,func)
raise ReadlineError(u"Trying to bind non method to keystroke:%s,%s,%s,%s"%(key,func,type(func),type(self._bind_key)))
keyinfo = make_KeyPress_from_keydescr(key.lower()).tuple()
log(u">>>%s -> %s<<<"%(keyinfo,func.__name__))
self.key_dispatch[keyinfo] = func | [
"def",
"_bind_key",
"(",
"self",
",",
"key",
",",
"func",
")",
":",
"if",
"not",
"callable",
"(",
"func",
")",
":",
"print",
"u\"Trying to bind non method to keystroke:%s,%s\"",
"%",
"(",
"key",
",",
"func",
")",
"raise",
"ReadlineError",
"(",
"u\"Trying to bind non method to keystroke:%s,%s,%s,%s\"",
"%",
"(",
"key",
",",
"func",
",",
"type",
"(",
"func",
")",
",",
"type",
"(",
"self",
".",
"_bind_key",
")",
")",
")",
"keyinfo",
"=",
"make_KeyPress_from_keydescr",
"(",
"key",
".",
"lower",
"(",
")",
")",
".",
"tuple",
"(",
")",
"log",
"(",
"u\">>>%s -> %s<<<\"",
"%",
"(",
"keyinfo",
",",
"func",
".",
"__name__",
")",
")",
"self",
".",
"key_dispatch",
"[",
"keyinfo",
"]",
"=",
"func"
] | 62.125 | 0.021825 |
def mesh_other(mesh,
other,
samples=500,
scale=False,
icp_first=10,
icp_final=50):
"""
Align a mesh with another mesh or a PointCloud using
the principal axes of inertia as a starting point which
is refined by iterative closest point.
Parameters
------------
mesh : trimesh.Trimesh object
Mesh to align with other
other : trimesh.Trimesh or (n, 3) float
Mesh or points in space
samples : int
Number of samples from mesh surface to align
scale : bool
Allow scaling in transform
icp_first : int
How many ICP iterations for the 9 possible
combinations of sign flippage
icp_final : int
How many ICP iterations for the closest
candidate from the wider search
Returns
-----------
mesh_to_other : (4, 4) float
Transform to align mesh to the other object
cost : float
Average squared distance per point
"""
def key_points(m, count):
"""
Return a combination of mesh vertices and surface samples
with vertices chosen by likelihood to be important
to registation.
"""
if len(m.vertices) < (count / 2):
return np.vstack((
m.vertices,
m.sample(count - len(m.vertices))))
else:
return m.sample(count)
if not util.is_instance_named(mesh, 'Trimesh'):
raise ValueError('mesh must be Trimesh object!')
inverse = True
search = mesh
# if both are meshes use the smaller one for searching
if util.is_instance_named(other, 'Trimesh'):
if len(mesh.vertices) > len(other.vertices):
# do the expensive tree construction on the
# smaller mesh and query the others points
search = other
inverse = False
points = key_points(m=mesh, count=samples)
points_mesh = mesh
else:
points_mesh = other
points = key_points(m=other, count=samples)
if points_mesh.is_volume:
points_PIT = points_mesh.principal_inertia_transform
else:
points_PIT = points_mesh.bounding_box_oriented.principal_inertia_transform
elif util.is_shape(other, (-1, 3)):
# case where other is just points
points = other
points_PIT = bounds.oriented_bounds(points)[0]
else:
raise ValueError('other must be mesh or (n, 3) points!')
# get the transform that aligns the search mesh principal
# axes of inertia with the XYZ axis at the origin
if search.is_volume:
search_PIT = search.principal_inertia_transform
else:
search_PIT = search.bounding_box_oriented.principal_inertia_transform
# transform that moves the principal axes of inertia
# of the search mesh to be aligned with the best- guess
# principal axes of the points
search_to_points = np.dot(np.linalg.inv(points_PIT),
search_PIT)
# permutations of cube rotations
# the principal inertia transform has arbitrary sign
# along the 3 major axis so try all combinations of
# 180 degree rotations with a quick first ICP pass
cubes = np.array([np.eye(4) * np.append(diag, 1)
for diag in [[1, 1, 1],
[1, 1, -1],
[1, -1, 1],
[-1, 1, 1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]]])
# loop through permutations and run iterative closest point
costs = np.ones(len(cubes)) * np.inf
transforms = [None] * len(cubes)
centroid = search.centroid
for i, flip in enumerate(cubes):
# transform from points to search mesh
# flipped around the centroid of search
a_to_b = np.dot(
transformations.transform_around(flip, centroid),
np.linalg.inv(search_to_points))
# run first pass ICP
matrix, junk, cost = icp(a=points,
b=search,
initial=a_to_b,
max_iterations=int(icp_first),
scale=scale)
# save transform and costs from ICP
transforms[i] = matrix
costs[i] = cost
# run a final ICP refinement step
matrix, junk, cost = icp(a=points,
b=search,
initial=transforms[np.argmin(costs)],
max_iterations=int(icp_final),
scale=scale)
# convert to per- point distance average
cost /= len(points)
# we picked the smaller mesh to construct the tree
# on so we may have calculated a transform backwards
# to save computation, so just invert matrix here
if inverse:
mesh_to_other = np.linalg.inv(matrix)
else:
mesh_to_other = matrix
return mesh_to_other, cost | [
"def",
"mesh_other",
"(",
"mesh",
",",
"other",
",",
"samples",
"=",
"500",
",",
"scale",
"=",
"False",
",",
"icp_first",
"=",
"10",
",",
"icp_final",
"=",
"50",
")",
":",
"def",
"key_points",
"(",
"m",
",",
"count",
")",
":",
"\"\"\"\n Return a combination of mesh vertices and surface samples\n with vertices chosen by likelihood to be important\n to registation.\n \"\"\"",
"if",
"len",
"(",
"m",
".",
"vertices",
")",
"<",
"(",
"count",
"/",
"2",
")",
":",
"return",
"np",
".",
"vstack",
"(",
"(",
"m",
".",
"vertices",
",",
"m",
".",
"sample",
"(",
"count",
"-",
"len",
"(",
"m",
".",
"vertices",
")",
")",
")",
")",
"else",
":",
"return",
"m",
".",
"sample",
"(",
"count",
")",
"if",
"not",
"util",
".",
"is_instance_named",
"(",
"mesh",
",",
"'Trimesh'",
")",
":",
"raise",
"ValueError",
"(",
"'mesh must be Trimesh object!'",
")",
"inverse",
"=",
"True",
"search",
"=",
"mesh",
"# if both are meshes use the smaller one for searching",
"if",
"util",
".",
"is_instance_named",
"(",
"other",
",",
"'Trimesh'",
")",
":",
"if",
"len",
"(",
"mesh",
".",
"vertices",
")",
">",
"len",
"(",
"other",
".",
"vertices",
")",
":",
"# do the expensive tree construction on the",
"# smaller mesh and query the others points",
"search",
"=",
"other",
"inverse",
"=",
"False",
"points",
"=",
"key_points",
"(",
"m",
"=",
"mesh",
",",
"count",
"=",
"samples",
")",
"points_mesh",
"=",
"mesh",
"else",
":",
"points_mesh",
"=",
"other",
"points",
"=",
"key_points",
"(",
"m",
"=",
"other",
",",
"count",
"=",
"samples",
")",
"if",
"points_mesh",
".",
"is_volume",
":",
"points_PIT",
"=",
"points_mesh",
".",
"principal_inertia_transform",
"else",
":",
"points_PIT",
"=",
"points_mesh",
".",
"bounding_box_oriented",
".",
"principal_inertia_transform",
"elif",
"util",
".",
"is_shape",
"(",
"other",
",",
"(",
"-",
"1",
",",
"3",
")",
")",
":",
"# case where other is just points",
"points",
"=",
"other",
"points_PIT",
"=",
"bounds",
".",
"oriented_bounds",
"(",
"points",
")",
"[",
"0",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'other must be mesh or (n, 3) points!'",
")",
"# get the transform that aligns the search mesh principal",
"# axes of inertia with the XYZ axis at the origin",
"if",
"search",
".",
"is_volume",
":",
"search_PIT",
"=",
"search",
".",
"principal_inertia_transform",
"else",
":",
"search_PIT",
"=",
"search",
".",
"bounding_box_oriented",
".",
"principal_inertia_transform",
"# transform that moves the principal axes of inertia",
"# of the search mesh to be aligned with the best- guess",
"# principal axes of the points",
"search_to_points",
"=",
"np",
".",
"dot",
"(",
"np",
".",
"linalg",
".",
"inv",
"(",
"points_PIT",
")",
",",
"search_PIT",
")",
"# permutations of cube rotations",
"# the principal inertia transform has arbitrary sign",
"# along the 3 major axis so try all combinations of",
"# 180 degree rotations with a quick first ICP pass",
"cubes",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"eye",
"(",
"4",
")",
"*",
"np",
".",
"append",
"(",
"diag",
",",
"1",
")",
"for",
"diag",
"in",
"[",
"[",
"1",
",",
"1",
",",
"1",
"]",
",",
"[",
"1",
",",
"1",
",",
"-",
"1",
"]",
",",
"[",
"1",
",",
"-",
"1",
",",
"1",
"]",
",",
"[",
"-",
"1",
",",
"1",
",",
"1",
"]",
",",
"[",
"-",
"1",
",",
"-",
"1",
",",
"1",
"]",
",",
"[",
"-",
"1",
",",
"1",
",",
"-",
"1",
"]",
",",
"[",
"1",
",",
"-",
"1",
",",
"-",
"1",
"]",
",",
"[",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
"]",
"]",
"]",
")",
"# loop through permutations and run iterative closest point",
"costs",
"=",
"np",
".",
"ones",
"(",
"len",
"(",
"cubes",
")",
")",
"*",
"np",
".",
"inf",
"transforms",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"cubes",
")",
"centroid",
"=",
"search",
".",
"centroid",
"for",
"i",
",",
"flip",
"in",
"enumerate",
"(",
"cubes",
")",
":",
"# transform from points to search mesh",
"# flipped around the centroid of search",
"a_to_b",
"=",
"np",
".",
"dot",
"(",
"transformations",
".",
"transform_around",
"(",
"flip",
",",
"centroid",
")",
",",
"np",
".",
"linalg",
".",
"inv",
"(",
"search_to_points",
")",
")",
"# run first pass ICP",
"matrix",
",",
"junk",
",",
"cost",
"=",
"icp",
"(",
"a",
"=",
"points",
",",
"b",
"=",
"search",
",",
"initial",
"=",
"a_to_b",
",",
"max_iterations",
"=",
"int",
"(",
"icp_first",
")",
",",
"scale",
"=",
"scale",
")",
"# save transform and costs from ICP",
"transforms",
"[",
"i",
"]",
"=",
"matrix",
"costs",
"[",
"i",
"]",
"=",
"cost",
"# run a final ICP refinement step",
"matrix",
",",
"junk",
",",
"cost",
"=",
"icp",
"(",
"a",
"=",
"points",
",",
"b",
"=",
"search",
",",
"initial",
"=",
"transforms",
"[",
"np",
".",
"argmin",
"(",
"costs",
")",
"]",
",",
"max_iterations",
"=",
"int",
"(",
"icp_final",
")",
",",
"scale",
"=",
"scale",
")",
"# convert to per- point distance average",
"cost",
"/=",
"len",
"(",
"points",
")",
"# we picked the smaller mesh to construct the tree",
"# on so we may have calculated a transform backwards",
"# to save computation, so just invert matrix here",
"if",
"inverse",
":",
"mesh_to_other",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"matrix",
")",
"else",
":",
"mesh_to_other",
"=",
"matrix",
"return",
"mesh_to_other",
",",
"cost"
] | 33.871622 | 0.000388 |
def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=None,
header=None, nullValue=None, escapeQuotes=None, quoteAll=None, dateFormat=None,
timestampFormat=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None,
charToEscapeQuoteEscaping=None, encoding=None, emptyValue=None, lineSep=None):
r"""Saves the content of the :class:`DataFrame` in CSV format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param sep: sets a single character as a separator for each field and value. If None is
set, it uses the default value, ``,``.
:param quote: sets a single character used for escaping quoted values where the
separator can be part of the value. If None is set, it uses the default
value, ``"``. If an empty string is set, it uses ``u0000`` (null character).
:param escape: sets a single character used for escaping quotes inside an already
quoted value. If None is set, it uses the default value, ``\``
:param escapeQuotes: a flag indicating whether values containing quotes should always
be enclosed in quotes. If None is set, it uses the default value
``true``, escaping all values containing a quote character.
:param quoteAll: a flag indicating whether all values should always be enclosed in
quotes. If None is set, it uses the default value ``false``,
only escaping values containing a quote character.
:param header: writes the names of columns as the first line. If None is set, it uses
the default value, ``false``.
:param nullValue: sets the string representation of a null value. If None is set, it uses
the default value, empty string.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.time.format.DateTimeFormatter``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format.
Custom date formats follow the formats at
``java.time.format.DateTimeFormatter``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from
values being written should be skipped. If None is set, it
uses the default value, ``true``.
:param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from
values being written should be skipped. If None is set, it
uses the default value, ``true``.
:param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for
the quote character. If None is set, the default value is
escape character when escape and quote characters are
different, ``\0`` otherwise..
:param encoding: sets the encoding (charset) of saved csv files. If None is set,
the default UTF-8 charset will be used.
:param emptyValue: sets the string representation of an empty value. If None is set, it uses
the default value, ``""``.
:param lineSep: defines the line separator that should be used for writing. If None is
set, it uses the default value, ``\\n``. Maximum length is 1 character.
>>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
self._set_opts(compression=compression, sep=sep, quote=quote, escape=escape, header=header,
nullValue=nullValue, escapeQuotes=escapeQuotes, quoteAll=quoteAll,
dateFormat=dateFormat, timestampFormat=timestampFormat,
ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace,
ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace,
charToEscapeQuoteEscaping=charToEscapeQuoteEscaping,
encoding=encoding, emptyValue=emptyValue, lineSep=lineSep)
self._jwrite.csv(path) | [
"def",
"csv",
"(",
"self",
",",
"path",
",",
"mode",
"=",
"None",
",",
"compression",
"=",
"None",
",",
"sep",
"=",
"None",
",",
"quote",
"=",
"None",
",",
"escape",
"=",
"None",
",",
"header",
"=",
"None",
",",
"nullValue",
"=",
"None",
",",
"escapeQuotes",
"=",
"None",
",",
"quoteAll",
"=",
"None",
",",
"dateFormat",
"=",
"None",
",",
"timestampFormat",
"=",
"None",
",",
"ignoreLeadingWhiteSpace",
"=",
"None",
",",
"ignoreTrailingWhiteSpace",
"=",
"None",
",",
"charToEscapeQuoteEscaping",
"=",
"None",
",",
"encoding",
"=",
"None",
",",
"emptyValue",
"=",
"None",
",",
"lineSep",
"=",
"None",
")",
":",
"self",
".",
"mode",
"(",
"mode",
")",
"self",
".",
"_set_opts",
"(",
"compression",
"=",
"compression",
",",
"sep",
"=",
"sep",
",",
"quote",
"=",
"quote",
",",
"escape",
"=",
"escape",
",",
"header",
"=",
"header",
",",
"nullValue",
"=",
"nullValue",
",",
"escapeQuotes",
"=",
"escapeQuotes",
",",
"quoteAll",
"=",
"quoteAll",
",",
"dateFormat",
"=",
"dateFormat",
",",
"timestampFormat",
"=",
"timestampFormat",
",",
"ignoreLeadingWhiteSpace",
"=",
"ignoreLeadingWhiteSpace",
",",
"ignoreTrailingWhiteSpace",
"=",
"ignoreTrailingWhiteSpace",
",",
"charToEscapeQuoteEscaping",
"=",
"charToEscapeQuoteEscaping",
",",
"encoding",
"=",
"encoding",
",",
"emptyValue",
"=",
"emptyValue",
",",
"lineSep",
"=",
"lineSep",
")",
"self",
".",
"_jwrite",
".",
"csv",
"(",
"path",
")"
] | 75.236111 | 0.008746 |
def _interval_to_seconds(interval, valid_units='smhdw'):
"""Convert the timeout duration to seconds.
The value must be of the form "<integer><unit>" where supported
units are s, m, h, d, w (seconds, minutes, hours, days, weeks).
Args:
interval: A "<integer><unit>" string.
valid_units: A list of supported units.
Returns:
A string of the form "<integer>s" or None if timeout is empty.
"""
if not interval:
return None
try:
last_char = interval[-1]
if last_char == 's' and 's' in valid_units:
return str(float(interval[:-1])) + 's'
elif last_char == 'm' and 'm' in valid_units:
return str(float(interval[:-1]) * 60) + 's'
elif last_char == 'h' and 'h' in valid_units:
return str(float(interval[:-1]) * 60 * 60) + 's'
elif last_char == 'd' and 'd' in valid_units:
return str(float(interval[:-1]) * 60 * 60 * 24) + 's'
elif last_char == 'w' and 'w' in valid_units:
return str(float(interval[:-1]) * 60 * 60 * 24 * 7) + 's'
else:
raise ValueError(
'Unsupported units in interval string %s: %s' % (interval, last_char))
except (ValueError, OverflowError) as e:
raise ValueError('Unable to parse interval string %s: %s' % (interval, e)) | [
"def",
"_interval_to_seconds",
"(",
"interval",
",",
"valid_units",
"=",
"'smhdw'",
")",
":",
"if",
"not",
"interval",
":",
"return",
"None",
"try",
":",
"last_char",
"=",
"interval",
"[",
"-",
"1",
"]",
"if",
"last_char",
"==",
"'s'",
"and",
"'s'",
"in",
"valid_units",
":",
"return",
"str",
"(",
"float",
"(",
"interval",
"[",
":",
"-",
"1",
"]",
")",
")",
"+",
"'s'",
"elif",
"last_char",
"==",
"'m'",
"and",
"'m'",
"in",
"valid_units",
":",
"return",
"str",
"(",
"float",
"(",
"interval",
"[",
":",
"-",
"1",
"]",
")",
"*",
"60",
")",
"+",
"'s'",
"elif",
"last_char",
"==",
"'h'",
"and",
"'h'",
"in",
"valid_units",
":",
"return",
"str",
"(",
"float",
"(",
"interval",
"[",
":",
"-",
"1",
"]",
")",
"*",
"60",
"*",
"60",
")",
"+",
"'s'",
"elif",
"last_char",
"==",
"'d'",
"and",
"'d'",
"in",
"valid_units",
":",
"return",
"str",
"(",
"float",
"(",
"interval",
"[",
":",
"-",
"1",
"]",
")",
"*",
"60",
"*",
"60",
"*",
"24",
")",
"+",
"'s'",
"elif",
"last_char",
"==",
"'w'",
"and",
"'w'",
"in",
"valid_units",
":",
"return",
"str",
"(",
"float",
"(",
"interval",
"[",
":",
"-",
"1",
"]",
")",
"*",
"60",
"*",
"60",
"*",
"24",
"*",
"7",
")",
"+",
"'s'",
"else",
":",
"raise",
"ValueError",
"(",
"'Unsupported units in interval string %s: %s'",
"%",
"(",
"interval",
",",
"last_char",
")",
")",
"except",
"(",
"ValueError",
",",
"OverflowError",
")",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"'Unable to parse interval string %s: %s'",
"%",
"(",
"interval",
",",
"e",
")",
")"
] | 34.685714 | 0.009615 |
def to_unicode(s):
"""Return the object as unicode (only matters for Python 2.x).
If s is already Unicode, return s as is.
Otherwise, assume that s is UTF-8 encoded, and convert to Unicode.
:param (basestring) s: a str, unicode or other basestring object
:return (unicode): the object as unicode
"""
if not isinstance(s, six.string_types):
raise ValueError("{} must be str or unicode.".format(s))
if not isinstance(s, six.text_type):
s = six.text_type(s, 'utf-8')
return s | [
"def",
"to_unicode",
"(",
"s",
")",
":",
"if",
"not",
"isinstance",
"(",
"s",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"ValueError",
"(",
"\"{} must be str or unicode.\"",
".",
"format",
"(",
"s",
")",
")",
"if",
"not",
"isinstance",
"(",
"s",
",",
"six",
".",
"text_type",
")",
":",
"s",
"=",
"six",
".",
"text_type",
"(",
"s",
",",
"'utf-8'",
")",
"return",
"s"
] | 36.642857 | 0.001901 |
def list(region, profile):
"""
List all the CloudFormation stacks in the given region.
"""
ini_data = {}
environment = {}
if region:
environment['region'] = region
else:
environment['region'] = find_myself()
if profile:
environment['profile'] = profile
ini_data['environment'] = environment
if start_list(ini_data):
sys.exit(0)
else:
sys.exit(1) | [
"def",
"list",
"(",
"region",
",",
"profile",
")",
":",
"ini_data",
"=",
"{",
"}",
"environment",
"=",
"{",
"}",
"if",
"region",
":",
"environment",
"[",
"'region'",
"]",
"=",
"region",
"else",
":",
"environment",
"[",
"'region'",
"]",
"=",
"find_myself",
"(",
")",
"if",
"profile",
":",
"environment",
"[",
"'profile'",
"]",
"=",
"profile",
"ini_data",
"[",
"'environment'",
"]",
"=",
"environment",
"if",
"start_list",
"(",
"ini_data",
")",
":",
"sys",
".",
"exit",
"(",
"0",
")",
"else",
":",
"sys",
".",
"exit",
"(",
"1",
")"
] | 20.65 | 0.002315 |