text
stringlengths 75
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
0.18
|
---|---|---|---|
def key_value(minion_id,
pillar, # pylint: disable=W0613
pillar_key='redis_pillar'):
'''
Looks for key in redis matching minion_id, returns a structure based on the
data type of the redis key. String for string type, dict for hash type and
lists for lists, sets and sorted sets.
pillar_key
Pillar key to return data into
'''
# Identify key type and process as needed based on that type
key_type = __salt__['redis.key_type'](minion_id)
if key_type == 'string':
return {pillar_key: __salt__['redis.get_key'](minion_id)}
elif key_type == 'hash':
return {pillar_key: __salt__['redis.hgetall'](minion_id)}
elif key_type == 'list':
list_size = __salt__['redis.llen'](minion_id)
if not list_size:
return {}
return {pillar_key: __salt__['redis.lrange'](minion_id, 0,
list_size - 1)}
elif key_type == 'set':
return {pillar_key: __salt__['redis.smembers'](minion_id)}
elif key_type == 'zset':
set_size = __salt__['redis.zcard'](minion_id)
if not set_size:
return {}
return {pillar_key: __salt__['redis.zrange'](minion_id, 0,
set_size - 1)}
# Return nothing for unhandled types
return {} | [
"def",
"key_value",
"(",
"minion_id",
",",
"pillar",
",",
"# pylint: disable=W0613",
"pillar_key",
"=",
"'redis_pillar'",
")",
":",
"# Identify key type and process as needed based on that type",
"key_type",
"=",
"__salt__",
"[",
"'redis.key_type'",
"]",
"(",
"minion_id",
")",
"if",
"key_type",
"==",
"'string'",
":",
"return",
"{",
"pillar_key",
":",
"__salt__",
"[",
"'redis.get_key'",
"]",
"(",
"minion_id",
")",
"}",
"elif",
"key_type",
"==",
"'hash'",
":",
"return",
"{",
"pillar_key",
":",
"__salt__",
"[",
"'redis.hgetall'",
"]",
"(",
"minion_id",
")",
"}",
"elif",
"key_type",
"==",
"'list'",
":",
"list_size",
"=",
"__salt__",
"[",
"'redis.llen'",
"]",
"(",
"minion_id",
")",
"if",
"not",
"list_size",
":",
"return",
"{",
"}",
"return",
"{",
"pillar_key",
":",
"__salt__",
"[",
"'redis.lrange'",
"]",
"(",
"minion_id",
",",
"0",
",",
"list_size",
"-",
"1",
")",
"}",
"elif",
"key_type",
"==",
"'set'",
":",
"return",
"{",
"pillar_key",
":",
"__salt__",
"[",
"'redis.smembers'",
"]",
"(",
"minion_id",
")",
"}",
"elif",
"key_type",
"==",
"'zset'",
":",
"set_size",
"=",
"__salt__",
"[",
"'redis.zcard'",
"]",
"(",
"minion_id",
")",
"if",
"not",
"set_size",
":",
"return",
"{",
"}",
"return",
"{",
"pillar_key",
":",
"__salt__",
"[",
"'redis.zrange'",
"]",
"(",
"minion_id",
",",
"0",
",",
"set_size",
"-",
"1",
")",
"}",
"# Return nothing for unhandled types",
"return",
"{",
"}"
]
| 40.727273 | 0.000727 |
def check_address(cls, *,
chat: typing.Union[str, int, None] = None,
user: typing.Union[str, int, None] = None) -> (typing.Union[str, int], typing.Union[str, int]):
"""
In all storage's methods chat or user is always required.
If one of them is not provided, you have to set missing value based on the provided one.
This method performs the check described above.
:param chat:
:param user:
:return:
"""
if chat is None and user is None:
raise ValueError('`user` or `chat` parameter is required but no one is provided!')
if user is None and chat is not None:
user = chat
elif user is not None and chat is None:
chat = user
return chat, user | [
"def",
"check_address",
"(",
"cls",
",",
"*",
",",
"chat",
":",
"typing",
".",
"Union",
"[",
"str",
",",
"int",
",",
"None",
"]",
"=",
"None",
",",
"user",
":",
"typing",
".",
"Union",
"[",
"str",
",",
"int",
",",
"None",
"]",
"=",
"None",
")",
"->",
"(",
"typing",
".",
"Union",
"[",
"str",
",",
"int",
"]",
",",
"typing",
".",
"Union",
"[",
"str",
",",
"int",
"]",
")",
":",
"if",
"chat",
"is",
"None",
"and",
"user",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'`user` or `chat` parameter is required but no one is provided!'",
")",
"if",
"user",
"is",
"None",
"and",
"chat",
"is",
"not",
"None",
":",
"user",
"=",
"chat",
"elif",
"user",
"is",
"not",
"None",
"and",
"chat",
"is",
"None",
":",
"chat",
"=",
"user",
"return",
"chat",
",",
"user"
]
| 38 | 0.008557 |
def breakRankTies(self, oldsym, newsym):
"""break Ties to form a new list with the same integer ordering
from high to low
Example
old = [ 4, 2, 4, 7, 8] (Two ties, 4 and 4)
new = [60, 2 61,90,99]
res = [ 4, 0, 3, 1, 2]
* * This tie is broken in this case
"""
stableSort = map(None, oldsym, newsym, range(len(oldsym)))
stableSort.sort()
lastOld, lastNew = None, None
x = -1
for old, new, index in stableSort:
if old != lastOld:
x += 1
# the last old value was changed, so update both
lastOld = old
lastNew = new
elif new != lastNew:
# break the tie based on the new info (update lastNew)
x += 1
lastNew = new
newsym[index] = x | [
"def",
"breakRankTies",
"(",
"self",
",",
"oldsym",
",",
"newsym",
")",
":",
"stableSort",
"=",
"map",
"(",
"None",
",",
"oldsym",
",",
"newsym",
",",
"range",
"(",
"len",
"(",
"oldsym",
")",
")",
")",
"stableSort",
".",
"sort",
"(",
")",
"lastOld",
",",
"lastNew",
"=",
"None",
",",
"None",
"x",
"=",
"-",
"1",
"for",
"old",
",",
"new",
",",
"index",
"in",
"stableSort",
":",
"if",
"old",
"!=",
"lastOld",
":",
"x",
"+=",
"1",
"# the last old value was changed, so update both",
"lastOld",
"=",
"old",
"lastNew",
"=",
"new",
"elif",
"new",
"!=",
"lastNew",
":",
"# break the tie based on the new info (update lastNew)",
"x",
"+=",
"1",
"lastNew",
"=",
"new",
"newsym",
"[",
"index",
"]",
"=",
"x"
]
| 33.615385 | 0.002225 |
def _copy(self):
"""Create and return a new copy of the Bits (always in memory)."""
s_copy = self.__class__()
s_copy._setbytes_unsafe(self._datastore.getbyteslice(0, self._datastore.bytelength),
self.len, self._offset)
return s_copy | [
"def",
"_copy",
"(",
"self",
")",
":",
"s_copy",
"=",
"self",
".",
"__class__",
"(",
")",
"s_copy",
".",
"_setbytes_unsafe",
"(",
"self",
".",
"_datastore",
".",
"getbyteslice",
"(",
"0",
",",
"self",
".",
"_datastore",
".",
"bytelength",
")",
",",
"self",
".",
"len",
",",
"self",
".",
"_offset",
")",
"return",
"s_copy"
]
| 48.5 | 0.010135 |
def as_dict(self):
"""
Serializes the object necessary data in a dictionary.
:returns: Serialized data in a dictionary.
:rtype: dict
"""
result_dict = super(Benchmark, self).as_dict()
statuses = list()
titles = list()
descriptions = list()
front_matters = list()
rear_matters = list()
platforms = list()
version = None
profiles = list()
groups = list()
for child in self.children:
if isinstance(child, Version):
version = child.as_dict()
elif isinstance(child, Status):
statuses.append(child.as_dict())
elif isinstance(child, Title):
titles.append(child.as_dict())
elif isinstance(child, Description):
descriptions.append(child.as_dict())
elif isinstance(child, FrontMatter):
front_matters.append(child.as_dict())
elif isinstance(child, RearMatter):
rear_matters.append(child.as_dict())
elif isinstance(child, Platform):
platforms.append(child.as_dict())
elif isinstance(child, Profile):
profiles.append(child.as_dict())
elif isinstance(child, Group):
groups.append(child.as_dict())
if version is not None:
result_dict['version'] = version
if len(statuses) > 0:
result_dict['statuses'] = statuses
if len(titles) > 0:
result_dict['titles'] = titles
if len(descriptions) > 0:
result_dict['descriptions'] = descriptions
if len(front_matters) > 0:
result_dict['front_matters'] = front_matters
if len(rear_matters) > 0:
result_dict['rear_matters'] = rear_matters
if len(platforms) > 0:
result_dict['platforms'] = platforms
if len(profiles) > 0:
result_dict['profiles'] = profiles
if len(groups) > 0:
result_dict['groups'] = groups
return result_dict | [
"def",
"as_dict",
"(",
"self",
")",
":",
"result_dict",
"=",
"super",
"(",
"Benchmark",
",",
"self",
")",
".",
"as_dict",
"(",
")",
"statuses",
"=",
"list",
"(",
")",
"titles",
"=",
"list",
"(",
")",
"descriptions",
"=",
"list",
"(",
")",
"front_matters",
"=",
"list",
"(",
")",
"rear_matters",
"=",
"list",
"(",
")",
"platforms",
"=",
"list",
"(",
")",
"version",
"=",
"None",
"profiles",
"=",
"list",
"(",
")",
"groups",
"=",
"list",
"(",
")",
"for",
"child",
"in",
"self",
".",
"children",
":",
"if",
"isinstance",
"(",
"child",
",",
"Version",
")",
":",
"version",
"=",
"child",
".",
"as_dict",
"(",
")",
"elif",
"isinstance",
"(",
"child",
",",
"Status",
")",
":",
"statuses",
".",
"append",
"(",
"child",
".",
"as_dict",
"(",
")",
")",
"elif",
"isinstance",
"(",
"child",
",",
"Title",
")",
":",
"titles",
".",
"append",
"(",
"child",
".",
"as_dict",
"(",
")",
")",
"elif",
"isinstance",
"(",
"child",
",",
"Description",
")",
":",
"descriptions",
".",
"append",
"(",
"child",
".",
"as_dict",
"(",
")",
")",
"elif",
"isinstance",
"(",
"child",
",",
"FrontMatter",
")",
":",
"front_matters",
".",
"append",
"(",
"child",
".",
"as_dict",
"(",
")",
")",
"elif",
"isinstance",
"(",
"child",
",",
"RearMatter",
")",
":",
"rear_matters",
".",
"append",
"(",
"child",
".",
"as_dict",
"(",
")",
")",
"elif",
"isinstance",
"(",
"child",
",",
"Platform",
")",
":",
"platforms",
".",
"append",
"(",
"child",
".",
"as_dict",
"(",
")",
")",
"elif",
"isinstance",
"(",
"child",
",",
"Profile",
")",
":",
"profiles",
".",
"append",
"(",
"child",
".",
"as_dict",
"(",
")",
")",
"elif",
"isinstance",
"(",
"child",
",",
"Group",
")",
":",
"groups",
".",
"append",
"(",
"child",
".",
"as_dict",
"(",
")",
")",
"if",
"version",
"is",
"not",
"None",
":",
"result_dict",
"[",
"'version'",
"]",
"=",
"version",
"if",
"len",
"(",
"statuses",
")",
">",
"0",
":",
"result_dict",
"[",
"'statuses'",
"]",
"=",
"statuses",
"if",
"len",
"(",
"titles",
")",
">",
"0",
":",
"result_dict",
"[",
"'titles'",
"]",
"=",
"titles",
"if",
"len",
"(",
"descriptions",
")",
">",
"0",
":",
"result_dict",
"[",
"'descriptions'",
"]",
"=",
"descriptions",
"if",
"len",
"(",
"front_matters",
")",
">",
"0",
":",
"result_dict",
"[",
"'front_matters'",
"]",
"=",
"front_matters",
"if",
"len",
"(",
"rear_matters",
")",
">",
"0",
":",
"result_dict",
"[",
"'rear_matters'",
"]",
"=",
"rear_matters",
"if",
"len",
"(",
"platforms",
")",
">",
"0",
":",
"result_dict",
"[",
"'platforms'",
"]",
"=",
"platforms",
"if",
"len",
"(",
"profiles",
")",
">",
"0",
":",
"result_dict",
"[",
"'profiles'",
"]",
"=",
"profiles",
"if",
"len",
"(",
"groups",
")",
">",
"0",
":",
"result_dict",
"[",
"'groups'",
"]",
"=",
"groups",
"return",
"result_dict"
]
| 34.316667 | 0.000944 |
def build_base_image_cmd(self, force):
"""
Build the glusterbase image
"""
check_permissions()
basetag = self.conf.basetag
basedir = self.conf.basedir
verbose = self.conf.verbose
if self.image_exists(tag=basetag):
if not force:
echo("Image with tag '{0}' already exists".format(basetag))
return self.image_by_tag(basetag)
else:
self.remove_image(basetag)
echo("Building base image")
stream = self.build(path=basedir, rm=True, tag=basetag)
err = self.__handle_build_stream(stream, verbose)
if err:
echo("Building base image failed with following error:")
echo(err)
return None
image = self.image_by_tag(basetag)
echo("Built base image {0} (Id: {1})".format(basetag, image['Id']))
return image | [
"def",
"build_base_image_cmd",
"(",
"self",
",",
"force",
")",
":",
"check_permissions",
"(",
")",
"basetag",
"=",
"self",
".",
"conf",
".",
"basetag",
"basedir",
"=",
"self",
".",
"conf",
".",
"basedir",
"verbose",
"=",
"self",
".",
"conf",
".",
"verbose",
"if",
"self",
".",
"image_exists",
"(",
"tag",
"=",
"basetag",
")",
":",
"if",
"not",
"force",
":",
"echo",
"(",
"\"Image with tag '{0}' already exists\"",
".",
"format",
"(",
"basetag",
")",
")",
"return",
"self",
".",
"image_by_tag",
"(",
"basetag",
")",
"else",
":",
"self",
".",
"remove_image",
"(",
"basetag",
")",
"echo",
"(",
"\"Building base image\"",
")",
"stream",
"=",
"self",
".",
"build",
"(",
"path",
"=",
"basedir",
",",
"rm",
"=",
"True",
",",
"tag",
"=",
"basetag",
")",
"err",
"=",
"self",
".",
"__handle_build_stream",
"(",
"stream",
",",
"verbose",
")",
"if",
"err",
":",
"echo",
"(",
"\"Building base image failed with following error:\"",
")",
"echo",
"(",
"err",
")",
"return",
"None",
"image",
"=",
"self",
".",
"image_by_tag",
"(",
"basetag",
")",
"echo",
"(",
"\"Built base image {0} (Id: {1})\"",
".",
"format",
"(",
"basetag",
",",
"image",
"[",
"'Id'",
"]",
")",
")",
"return",
"image"
]
| 33.185185 | 0.002169 |
def eval_unx(self, exp):
"unary expressions"
inner=self.eval(exp.val)
if exp.op.op=='+': return inner
elif exp.op.op=='-': return -inner
elif exp.op.op=='not': return threevl.ThreeVL.nein(inner)
else: raise NotImplementedError('unk_op',exp.op) | [
"def",
"eval_unx",
"(",
"self",
",",
"exp",
")",
":",
"inner",
"=",
"self",
".",
"eval",
"(",
"exp",
".",
"val",
")",
"if",
"exp",
".",
"op",
".",
"op",
"==",
"'+'",
":",
"return",
"inner",
"elif",
"exp",
".",
"op",
".",
"op",
"==",
"'-'",
":",
"return",
"-",
"inner",
"elif",
"exp",
".",
"op",
".",
"op",
"==",
"'not'",
":",
"return",
"threevl",
".",
"ThreeVL",
".",
"nein",
"(",
"inner",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'unk_op'",
",",
"exp",
".",
"op",
")"
]
| 37.285714 | 0.037453 |
def record_trace(self, selectors=None):
"""Record a trace of readings produced by this simulator.
This causes the property `self.trace` to be populated with a
SimulationTrace object that contains all of the readings that
are produced during the course of the simulation. Only readings
that respond to specific selectors are given.
You can pass whatever selectors you want in the optional selectors
argument. If you pass None, then the default behavior to trace
all of the output streams of the sensor graph, which are defined
as the streams that are selected by a DataStreamer object in the
sensor graph. This is typically what is meant by sensor graph
output.
You can inspect the current trace by looking at the trace
property. It will initially be an empty list and will be updated
with each call to step() or run() that results in new readings
responsive to the selectors you pick (or the graph streamers if
you did not explicitly pass a list of DataStreamSelector objects).
Args:
selectors (list of DataStreamSelector): The selectors to add watch
statements on to produce this trace. This is optional.
If it is not specified, a the streamers of the sensor
graph are used.
"""
if selectors is None:
selectors = [x.selector for x in self.sensor_graph.streamers]
self.trace = SimulationTrace(selectors=selectors)
for sel in selectors:
self.sensor_graph.sensor_log.watch(sel, self._on_trace_callback) | [
"def",
"record_trace",
"(",
"self",
",",
"selectors",
"=",
"None",
")",
":",
"if",
"selectors",
"is",
"None",
":",
"selectors",
"=",
"[",
"x",
".",
"selector",
"for",
"x",
"in",
"self",
".",
"sensor_graph",
".",
"streamers",
"]",
"self",
".",
"trace",
"=",
"SimulationTrace",
"(",
"selectors",
"=",
"selectors",
")",
"for",
"sel",
"in",
"selectors",
":",
"self",
".",
"sensor_graph",
".",
"sensor_log",
".",
"watch",
"(",
"sel",
",",
"self",
".",
"_on_trace_callback",
")"
]
| 46.542857 | 0.001203 |
def node_number(self, *, count_pnode=True) -> int:
"""Return the number of node"""
return (sum(1 for n in self.nodes())
+ (sum(1 for n in self.powernodes()) if count_pnode else 0)) | [
"def",
"node_number",
"(",
"self",
",",
"*",
",",
"count_pnode",
"=",
"True",
")",
"->",
"int",
":",
"return",
"(",
"sum",
"(",
"1",
"for",
"n",
"in",
"self",
".",
"nodes",
"(",
")",
")",
"+",
"(",
"sum",
"(",
"1",
"for",
"n",
"in",
"self",
".",
"powernodes",
"(",
")",
")",
"if",
"count_pnode",
"else",
"0",
")",
")"
]
| 52.25 | 0.009434 |
def _inet6_ntop(addr):
"""Convert an IPv6 address from binary form into text representation,
used when socket.inet_pton is not available.
"""
# IPv6 addresses have 128bits (16 bytes)
if len(addr) != 16:
raise ValueError("invalid length of packed IP address string")
# Decode to hex representation
address = ":".join(plain_str(bytes_hex(addr[idx:idx + 2])).lstrip('0') or '0' # noqa: E501
for idx in range(0, 16, 2))
try:
# Get the longest set of zero blocks. We need to take a look
# at group 1 regarding the length, as 0:0:1:0:0:2:3:4 would
# have two matches: 0:0: and :0:0: where the latter is longer,
# though the first one should be taken. Group 1 is in both
# cases 0:0.
match = max(_IP6_ZEROS.finditer(address),
key=lambda m: m.end(1) - m.start(1))
return '{}::{}'.format(address[:match.start()], address[match.end():])
except ValueError:
return address | [
"def",
"_inet6_ntop",
"(",
"addr",
")",
":",
"# IPv6 addresses have 128bits (16 bytes)",
"if",
"len",
"(",
"addr",
")",
"!=",
"16",
":",
"raise",
"ValueError",
"(",
"\"invalid length of packed IP address string\"",
")",
"# Decode to hex representation",
"address",
"=",
"\":\"",
".",
"join",
"(",
"plain_str",
"(",
"bytes_hex",
"(",
"addr",
"[",
"idx",
":",
"idx",
"+",
"2",
"]",
")",
")",
".",
"lstrip",
"(",
"'0'",
")",
"or",
"'0'",
"# noqa: E501",
"for",
"idx",
"in",
"range",
"(",
"0",
",",
"16",
",",
"2",
")",
")",
"try",
":",
"# Get the longest set of zero blocks. We need to take a look",
"# at group 1 regarding the length, as 0:0:1:0:0:2:3:4 would",
"# have two matches: 0:0: and :0:0: where the latter is longer,",
"# though the first one should be taken. Group 1 is in both",
"# cases 0:0.",
"match",
"=",
"max",
"(",
"_IP6_ZEROS",
".",
"finditer",
"(",
"address",
")",
",",
"key",
"=",
"lambda",
"m",
":",
"m",
".",
"end",
"(",
"1",
")",
"-",
"m",
".",
"start",
"(",
"1",
")",
")",
"return",
"'{}::{}'",
".",
"format",
"(",
"address",
"[",
":",
"match",
".",
"start",
"(",
")",
"]",
",",
"address",
"[",
"match",
".",
"end",
"(",
")",
":",
"]",
")",
"except",
"ValueError",
":",
"return",
"address"
]
| 41.166667 | 0.000989 |
def genericCameraMatrix(shape, angularField=60):
'''
Return a generic camera matrix
[[fx, 0, cx],
[ 0, fy, cy],
[ 0, 0, 1]]
for a given image shape
'''
# http://nghiaho.com/?page_id=576
# assume that the optical centre is in the middle:
cy = int(shape[0] / 2)
cx = int(shape[1] / 2)
# assume that the FOV is 60 DEG (webcam)
fx = fy = cx / np.tan(angularField / 2 * np.pi /
180) # camera focal length
# see
# http://docs.opencv.org/doc/tutorials/calib3d/camera_calibration/camera_calibration.html
return np.array([[fx, 0, cx],
[0, fy, cy],
[0, 0, 1]
], dtype=np.float32) | [
"def",
"genericCameraMatrix",
"(",
"shape",
",",
"angularField",
"=",
"60",
")",
":",
"# http://nghiaho.com/?page_id=576\r",
"# assume that the optical centre is in the middle:\r",
"cy",
"=",
"int",
"(",
"shape",
"[",
"0",
"]",
"/",
"2",
")",
"cx",
"=",
"int",
"(",
"shape",
"[",
"1",
"]",
"/",
"2",
")",
"# assume that the FOV is 60 DEG (webcam)\r",
"fx",
"=",
"fy",
"=",
"cx",
"/",
"np",
".",
"tan",
"(",
"angularField",
"/",
"2",
"*",
"np",
".",
"pi",
"/",
"180",
")",
"# camera focal length\r",
"# see\r",
"# http://docs.opencv.org/doc/tutorials/calib3d/camera_calibration/camera_calibration.html\r",
"return",
"np",
".",
"array",
"(",
"[",
"[",
"fx",
",",
"0",
",",
"cx",
"]",
",",
"[",
"0",
",",
"fy",
",",
"cy",
"]",
",",
"[",
"0",
",",
"0",
",",
"1",
"]",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")"
]
| 33 | 0.001339 |
def _choi_to_kraus(data, input_dim, output_dim, atol=ATOL_DEFAULT):
"""Transform Choi representation to Kraus representation."""
# Check if hermitian matrix
if is_hermitian_matrix(data, atol=atol):
# Get eigen-decomposition of Choi-matrix
w, v = la.eigh(data)
# Check eigenvaleus are non-negative
if len(w[w < -atol]) == 0:
# CP-map Kraus representation
kraus = []
for val, vec in zip(w, v.T):
if abs(val) > atol:
k = np.sqrt(val) * vec.reshape(
(output_dim, input_dim), order='F')
kraus.append(k)
# If we are converting a zero matrix, we need to return a Kraus set
# with a single zero-element Kraus matrix
if not kraus:
kraus.append(np.zeros((output_dim, input_dim), dtype=complex))
return (kraus, None)
# Non-CP-map generalized Kraus representation
mat_u, svals, mat_vh = la.svd(data)
kraus_l = []
kraus_r = []
for val, vec_l, vec_r in zip(svals, mat_u.T, mat_vh.conj()):
kraus_l.append(
np.sqrt(val) * vec_l.reshape((output_dim, input_dim), order='F'))
kraus_r.append(
np.sqrt(val) * vec_r.reshape((output_dim, input_dim), order='F'))
return (kraus_l, kraus_r) | [
"def",
"_choi_to_kraus",
"(",
"data",
",",
"input_dim",
",",
"output_dim",
",",
"atol",
"=",
"ATOL_DEFAULT",
")",
":",
"# Check if hermitian matrix",
"if",
"is_hermitian_matrix",
"(",
"data",
",",
"atol",
"=",
"atol",
")",
":",
"# Get eigen-decomposition of Choi-matrix",
"w",
",",
"v",
"=",
"la",
".",
"eigh",
"(",
"data",
")",
"# Check eigenvaleus are non-negative",
"if",
"len",
"(",
"w",
"[",
"w",
"<",
"-",
"atol",
"]",
")",
"==",
"0",
":",
"# CP-map Kraus representation",
"kraus",
"=",
"[",
"]",
"for",
"val",
",",
"vec",
"in",
"zip",
"(",
"w",
",",
"v",
".",
"T",
")",
":",
"if",
"abs",
"(",
"val",
")",
">",
"atol",
":",
"k",
"=",
"np",
".",
"sqrt",
"(",
"val",
")",
"*",
"vec",
".",
"reshape",
"(",
"(",
"output_dim",
",",
"input_dim",
")",
",",
"order",
"=",
"'F'",
")",
"kraus",
".",
"append",
"(",
"k",
")",
"# If we are converting a zero matrix, we need to return a Kraus set",
"# with a single zero-element Kraus matrix",
"if",
"not",
"kraus",
":",
"kraus",
".",
"append",
"(",
"np",
".",
"zeros",
"(",
"(",
"output_dim",
",",
"input_dim",
")",
",",
"dtype",
"=",
"complex",
")",
")",
"return",
"(",
"kraus",
",",
"None",
")",
"# Non-CP-map generalized Kraus representation",
"mat_u",
",",
"svals",
",",
"mat_vh",
"=",
"la",
".",
"svd",
"(",
"data",
")",
"kraus_l",
"=",
"[",
"]",
"kraus_r",
"=",
"[",
"]",
"for",
"val",
",",
"vec_l",
",",
"vec_r",
"in",
"zip",
"(",
"svals",
",",
"mat_u",
".",
"T",
",",
"mat_vh",
".",
"conj",
"(",
")",
")",
":",
"kraus_l",
".",
"append",
"(",
"np",
".",
"sqrt",
"(",
"val",
")",
"*",
"vec_l",
".",
"reshape",
"(",
"(",
"output_dim",
",",
"input_dim",
")",
",",
"order",
"=",
"'F'",
")",
")",
"kraus_r",
".",
"append",
"(",
"np",
".",
"sqrt",
"(",
"val",
")",
"*",
"vec_r",
".",
"reshape",
"(",
"(",
"output_dim",
",",
"input_dim",
")",
",",
"order",
"=",
"'F'",
")",
")",
"return",
"(",
"kraus_l",
",",
"kraus_r",
")"
]
| 44.1 | 0.00074 |
def _contiguous_groups(
length: int,
comparator: Callable[[int, int], bool]
) -> List[Tuple[int, int]]:
"""Splits range(length) into approximate equivalence classes.
Args:
length: The length of the range to split.
comparator: Determines if two indices have approximately equal items.
Returns:
A list of (inclusive_start, exclusive_end) range endpoints. Each
corresponds to a run of approximately-equivalent items.
"""
result = []
start = 0
while start < length:
past = start + 1
while past < length and comparator(start, past):
past += 1
result.append((start, past))
start = past
return result | [
"def",
"_contiguous_groups",
"(",
"length",
":",
"int",
",",
"comparator",
":",
"Callable",
"[",
"[",
"int",
",",
"int",
"]",
",",
"bool",
"]",
")",
"->",
"List",
"[",
"Tuple",
"[",
"int",
",",
"int",
"]",
"]",
":",
"result",
"=",
"[",
"]",
"start",
"=",
"0",
"while",
"start",
"<",
"length",
":",
"past",
"=",
"start",
"+",
"1",
"while",
"past",
"<",
"length",
"and",
"comparator",
"(",
"start",
",",
"past",
")",
":",
"past",
"+=",
"1",
"result",
".",
"append",
"(",
"(",
"start",
",",
"past",
")",
")",
"start",
"=",
"past",
"return",
"result"
]
| 30.304348 | 0.001391 |
def centers(self):
"""The centers for the KMeans model."""
o = self._model_json["output"]
cvals = o["centers"].cell_values
centers = [list(cval[1:]) for cval in cvals]
return centers | [
"def",
"centers",
"(",
"self",
")",
":",
"o",
"=",
"self",
".",
"_model_json",
"[",
"\"output\"",
"]",
"cvals",
"=",
"o",
"[",
"\"centers\"",
"]",
".",
"cell_values",
"centers",
"=",
"[",
"list",
"(",
"cval",
"[",
"1",
":",
"]",
")",
"for",
"cval",
"in",
"cvals",
"]",
"return",
"centers"
]
| 36.166667 | 0.009009 |
def rotate(matrix, angle):
r"""Rotate
This method rotates an input matrix about the input angle.
Parameters
----------
matrix : np.ndarray
Input matrix array
angle : float
Rotation angle in radians
Returns
-------
np.ndarray rotated matrix
Raises
------
ValueError
For invalid matrix shape
Examples
--------
>>> from modopt.math.matrix import rotate
>>> a = np.arange(9).reshape(3, 3)
>>> rotate(a, np.pi / 2)
array([[2, 5, 8],
[1, 4, 7],
[0, 3, 6]])
"""
shape = np.array(matrix.shape)
if shape[0] != shape[1]:
raise ValueError('Input matrix must be square.')
shift = (shape - 1) // 2
index = np.array(list(product(*np.array([np.arange(val) for val in
shape])))) - shift
new_index = np.array(np.dot(index, rot_matrix(angle)), dtype='int') + shift
new_index[new_index >= shape[0]] -= shape[0]
return matrix[tuple(zip(new_index.T))].reshape(shape.T) | [
"def",
"rotate",
"(",
"matrix",
",",
"angle",
")",
":",
"shape",
"=",
"np",
".",
"array",
"(",
"matrix",
".",
"shape",
")",
"if",
"shape",
"[",
"0",
"]",
"!=",
"shape",
"[",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"'Input matrix must be square.'",
")",
"shift",
"=",
"(",
"shape",
"-",
"1",
")",
"//",
"2",
"index",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"product",
"(",
"*",
"np",
".",
"array",
"(",
"[",
"np",
".",
"arange",
"(",
"val",
")",
"for",
"val",
"in",
"shape",
"]",
")",
")",
")",
")",
"-",
"shift",
"new_index",
"=",
"np",
".",
"array",
"(",
"np",
".",
"dot",
"(",
"index",
",",
"rot_matrix",
"(",
"angle",
")",
")",
",",
"dtype",
"=",
"'int'",
")",
"+",
"shift",
"new_index",
"[",
"new_index",
">=",
"shape",
"[",
"0",
"]",
"]",
"-=",
"shape",
"[",
"0",
"]",
"return",
"matrix",
"[",
"tuple",
"(",
"zip",
"(",
"new_index",
".",
"T",
")",
")",
"]",
".",
"reshape",
"(",
"shape",
".",
"T",
")"
]
| 21.586957 | 0.000963 |
def confusion_matrix(self, data):
"""
Returns a confusion matrix based of H2O's default prediction threshold for a dataset.
:param data: metric for which the confusion matrix will be calculated.
"""
return {model.model_id: model.confusion_matrix(data) for model in self.models} | [
"def",
"confusion_matrix",
"(",
"self",
",",
"data",
")",
":",
"return",
"{",
"model",
".",
"model_id",
":",
"model",
".",
"confusion_matrix",
"(",
"data",
")",
"for",
"model",
"in",
"self",
".",
"models",
"}"
]
| 44.571429 | 0.012579 |
def curl_remote_name(cls, file_url):
"""Download file_url, and save as a file name of the URL.
It behaves like "curl -O or --remote-name".
It raises HTTPError if the file_url not found.
"""
tar_gz_file_name = file_url.split('/')[-1]
if sys.version_info >= (3, 2):
from urllib.request import urlopen
from urllib.error import HTTPError
else:
from urllib2 import urlopen
from urllib2 import HTTPError
response = None
try:
response = urlopen(file_url)
except HTTPError as e:
message = 'Download failed: URL: {0}, reason: {1}'.format(
file_url, e)
if 'HTTP Error 404' in str(e):
raise RemoteFileNotFoundError(message)
else:
raise InstallError(message)
tar_gz_file_obj = io.BytesIO(response.read())
with open(tar_gz_file_name, 'wb') as f_out:
f_out.write(tar_gz_file_obj.read())
return tar_gz_file_name | [
"def",
"curl_remote_name",
"(",
"cls",
",",
"file_url",
")",
":",
"tar_gz_file_name",
"=",
"file_url",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"if",
"sys",
".",
"version_info",
">=",
"(",
"3",
",",
"2",
")",
":",
"from",
"urllib",
".",
"request",
"import",
"urlopen",
"from",
"urllib",
".",
"error",
"import",
"HTTPError",
"else",
":",
"from",
"urllib2",
"import",
"urlopen",
"from",
"urllib2",
"import",
"HTTPError",
"response",
"=",
"None",
"try",
":",
"response",
"=",
"urlopen",
"(",
"file_url",
")",
"except",
"HTTPError",
"as",
"e",
":",
"message",
"=",
"'Download failed: URL: {0}, reason: {1}'",
".",
"format",
"(",
"file_url",
",",
"e",
")",
"if",
"'HTTP Error 404'",
"in",
"str",
"(",
"e",
")",
":",
"raise",
"RemoteFileNotFoundError",
"(",
"message",
")",
"else",
":",
"raise",
"InstallError",
"(",
"message",
")",
"tar_gz_file_obj",
"=",
"io",
".",
"BytesIO",
"(",
"response",
".",
"read",
"(",
")",
")",
"with",
"open",
"(",
"tar_gz_file_name",
",",
"'wb'",
")",
"as",
"f_out",
":",
"f_out",
".",
"write",
"(",
"tar_gz_file_obj",
".",
"read",
"(",
")",
")",
"return",
"tar_gz_file_name"
]
| 34.566667 | 0.001876 |
def upgrade(**kwargs):
'''
Upgrade all software. Currently not implemented
Kwargs:
saltenv (str): The salt environment to use. Default ``base``.
refresh (bool): Refresh package metadata. Default ``True``.
.. note::
This feature is not yet implemented for Windows.
Returns:
dict: Empty dict, until implemented
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
'''
log.warning('pkg.upgrade not implemented on Windows yet')
refresh = salt.utils.data.is_true(kwargs.get('refresh', True))
saltenv = kwargs.get('saltenv', 'base')
log.warning('pkg.upgrade not implemented on Windows yet refresh:%s saltenv:%s', refresh, saltenv)
# Uncomment the below once pkg.upgrade has been implemented
# if salt.utils.data.is_true(refresh):
# refresh_db()
return {} | [
"def",
"upgrade",
"(",
"*",
"*",
"kwargs",
")",
":",
"log",
".",
"warning",
"(",
"'pkg.upgrade not implemented on Windows yet'",
")",
"refresh",
"=",
"salt",
".",
"utils",
".",
"data",
".",
"is_true",
"(",
"kwargs",
".",
"get",
"(",
"'refresh'",
",",
"True",
")",
")",
"saltenv",
"=",
"kwargs",
".",
"get",
"(",
"'saltenv'",
",",
"'base'",
")",
"log",
".",
"warning",
"(",
"'pkg.upgrade not implemented on Windows yet refresh:%s saltenv:%s'",
",",
"refresh",
",",
"saltenv",
")",
"# Uncomment the below once pkg.upgrade has been implemented",
"# if salt.utils.data.is_true(refresh):",
"# refresh_db()",
"return",
"{",
"}"
]
| 28.827586 | 0.002315 |
def create_bayesian_tear_sheet(returns, benchmark_rets=None,
live_start_date=None, samples=2000,
return_fig=False, stoch_vol=False,
progressbar=True):
"""
Generate a number of Bayesian distributions and a Bayesian
cone plot of returns.
Plots: Sharpe distribution, annual volatility distribution,
annual alpha distribution, beta distribution, predicted 1 and 5
day returns distributions, and a cumulative returns cone plot.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
benchmark_rets : pd.Series, optional
Daily noncumulative returns of the benchmark.
- This is in the same style as returns.
live_start_date : datetime, optional
The point in time when the strategy began live
trading, after its backtest period.
samples : int, optional
Number of posterior samples to draw.
return_fig : boolean, optional
If True, returns the figure that was plotted on.
stoch_vol : boolean, optional
If True, run and plot the stochastic volatility model
progressbar : boolean, optional
If True, show a progress bar
"""
if not have_bayesian:
raise NotImplementedError(
"Bayesian tear sheet requirements not found.\n"
"Run 'pip install pyfolio[bayesian]' to install "
"bayesian requirements."
)
if live_start_date is None:
raise NotImplementedError(
'Bayesian tear sheet requires setting of live_start_date'
)
live_start_date = ep.utils.get_utc_timestamp(live_start_date)
df_train = returns.loc[returns.index < live_start_date]
df_test = returns.loc[returns.index >= live_start_date]
# Run T model with missing data
print("Running T model")
previous_time = time()
# track the total run time of the Bayesian tear sheet
start_time = previous_time
trace_t, ppc_t = bayesian.run_model('t', df_train,
returns_test=df_test,
samples=samples, ppc=True,
progressbar=progressbar)
previous_time = timer("T model", previous_time)
# Compute BEST model
print("\nRunning BEST model")
trace_best = bayesian.run_model('best', df_train,
returns_test=df_test,
samples=samples,
progressbar=progressbar)
previous_time = timer("BEST model", previous_time)
# Plot results
fig = plt.figure(figsize=(14, 10 * 2))
gs = gridspec.GridSpec(9, 2, wspace=0.3, hspace=0.3)
axs = []
row = 0
# Plot Bayesian cone
ax_cone = plt.subplot(gs[row, :])
bayesian.plot_bayes_cone(df_train, df_test, ppc_t, ax=ax_cone)
previous_time = timer("plotting Bayesian cone", previous_time)
# Plot BEST results
row += 1
axs.append(plt.subplot(gs[row, 0]))
axs.append(plt.subplot(gs[row, 1]))
row += 1
axs.append(plt.subplot(gs[row, 0]))
axs.append(plt.subplot(gs[row, 1]))
row += 1
axs.append(plt.subplot(gs[row, 0]))
axs.append(plt.subplot(gs[row, 1]))
row += 1
# Effect size across two
axs.append(plt.subplot(gs[row, :]))
bayesian.plot_best(trace=trace_best, axs=axs)
previous_time = timer("plotting BEST results", previous_time)
# Compute Bayesian predictions
row += 1
ax_ret_pred_day = plt.subplot(gs[row, 0])
ax_ret_pred_week = plt.subplot(gs[row, 1])
day_pred = ppc_t[:, 0]
p5 = scipy.stats.scoreatpercentile(day_pred, 5)
sns.distplot(day_pred,
ax=ax_ret_pred_day
)
ax_ret_pred_day.axvline(p5, linestyle='--', linewidth=3.)
ax_ret_pred_day.set_xlabel('Predicted returns 1 day')
ax_ret_pred_day.set_ylabel('Frequency')
ax_ret_pred_day.text(0.4, 0.9, 'Bayesian VaR = %.2f' % p5,
verticalalignment='bottom',
horizontalalignment='right',
transform=ax_ret_pred_day.transAxes)
previous_time = timer("computing Bayesian predictions", previous_time)
# Plot Bayesian VaRs
week_pred = (
np.cumprod(ppc_t[:, :5] + 1, 1) - 1)[:, -1]
p5 = scipy.stats.scoreatpercentile(week_pred, 5)
sns.distplot(week_pred,
ax=ax_ret_pred_week
)
ax_ret_pred_week.axvline(p5, linestyle='--', linewidth=3.)
ax_ret_pred_week.set_xlabel('Predicted cum returns 5 days')
ax_ret_pred_week.set_ylabel('Frequency')
ax_ret_pred_week.text(0.4, 0.9, 'Bayesian VaR = %.2f' % p5,
verticalalignment='bottom',
horizontalalignment='right',
transform=ax_ret_pred_week.transAxes)
previous_time = timer("plotting Bayesian VaRs estimate", previous_time)
# Run alpha beta model
if benchmark_rets is not None:
print("\nRunning alpha beta model")
benchmark_rets = benchmark_rets.loc[df_train.index]
trace_alpha_beta = bayesian.run_model('alpha_beta', df_train,
bmark=benchmark_rets,
samples=samples,
progressbar=progressbar)
previous_time = timer("running alpha beta model", previous_time)
# Plot alpha and beta
row += 1
ax_alpha = plt.subplot(gs[row, 0])
ax_beta = plt.subplot(gs[row, 1])
sns.distplot((1 + trace_alpha_beta['alpha'][100:])**252 - 1,
ax=ax_alpha)
sns.distplot(trace_alpha_beta['beta'][100:], ax=ax_beta)
ax_alpha.set_xlabel('Annual Alpha')
ax_alpha.set_ylabel('Belief')
ax_beta.set_xlabel('Beta')
ax_beta.set_ylabel('Belief')
previous_time = timer("plotting alpha beta model", previous_time)
if stoch_vol:
# run stochastic volatility model
returns_cutoff = 400
print(
"\nRunning stochastic volatility model on "
"most recent {} days of returns.".format(returns_cutoff)
)
if df_train.size > returns_cutoff:
df_train_truncated = df_train[-returns_cutoff:]
_, trace_stoch_vol = bayesian.model_stoch_vol(df_train_truncated)
previous_time = timer(
"running stochastic volatility model", previous_time)
# plot latent volatility
row += 1
ax_volatility = plt.subplot(gs[row, :])
bayesian.plot_stoch_vol(
df_train_truncated, trace=trace_stoch_vol, ax=ax_volatility)
previous_time = timer(
"plotting stochastic volatility model", previous_time)
total_time = time() - start_time
print("\nTotal runtime was {:.2f} seconds.".format(total_time))
gs.tight_layout(fig)
if return_fig:
return fig | [
"def",
"create_bayesian_tear_sheet",
"(",
"returns",
",",
"benchmark_rets",
"=",
"None",
",",
"live_start_date",
"=",
"None",
",",
"samples",
"=",
"2000",
",",
"return_fig",
"=",
"False",
",",
"stoch_vol",
"=",
"False",
",",
"progressbar",
"=",
"True",
")",
":",
"if",
"not",
"have_bayesian",
":",
"raise",
"NotImplementedError",
"(",
"\"Bayesian tear sheet requirements not found.\\n\"",
"\"Run 'pip install pyfolio[bayesian]' to install \"",
"\"bayesian requirements.\"",
")",
"if",
"live_start_date",
"is",
"None",
":",
"raise",
"NotImplementedError",
"(",
"'Bayesian tear sheet requires setting of live_start_date'",
")",
"live_start_date",
"=",
"ep",
".",
"utils",
".",
"get_utc_timestamp",
"(",
"live_start_date",
")",
"df_train",
"=",
"returns",
".",
"loc",
"[",
"returns",
".",
"index",
"<",
"live_start_date",
"]",
"df_test",
"=",
"returns",
".",
"loc",
"[",
"returns",
".",
"index",
">=",
"live_start_date",
"]",
"# Run T model with missing data",
"print",
"(",
"\"Running T model\"",
")",
"previous_time",
"=",
"time",
"(",
")",
"# track the total run time of the Bayesian tear sheet",
"start_time",
"=",
"previous_time",
"trace_t",
",",
"ppc_t",
"=",
"bayesian",
".",
"run_model",
"(",
"'t'",
",",
"df_train",
",",
"returns_test",
"=",
"df_test",
",",
"samples",
"=",
"samples",
",",
"ppc",
"=",
"True",
",",
"progressbar",
"=",
"progressbar",
")",
"previous_time",
"=",
"timer",
"(",
"\"T model\"",
",",
"previous_time",
")",
"# Compute BEST model",
"print",
"(",
"\"\\nRunning BEST model\"",
")",
"trace_best",
"=",
"bayesian",
".",
"run_model",
"(",
"'best'",
",",
"df_train",
",",
"returns_test",
"=",
"df_test",
",",
"samples",
"=",
"samples",
",",
"progressbar",
"=",
"progressbar",
")",
"previous_time",
"=",
"timer",
"(",
"\"BEST model\"",
",",
"previous_time",
")",
"# Plot results",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"14",
",",
"10",
"*",
"2",
")",
")",
"gs",
"=",
"gridspec",
".",
"GridSpec",
"(",
"9",
",",
"2",
",",
"wspace",
"=",
"0.3",
",",
"hspace",
"=",
"0.3",
")",
"axs",
"=",
"[",
"]",
"row",
"=",
"0",
"# Plot Bayesian cone",
"ax_cone",
"=",
"plt",
".",
"subplot",
"(",
"gs",
"[",
"row",
",",
":",
"]",
")",
"bayesian",
".",
"plot_bayes_cone",
"(",
"df_train",
",",
"df_test",
",",
"ppc_t",
",",
"ax",
"=",
"ax_cone",
")",
"previous_time",
"=",
"timer",
"(",
"\"plotting Bayesian cone\"",
",",
"previous_time",
")",
"# Plot BEST results",
"row",
"+=",
"1",
"axs",
".",
"append",
"(",
"plt",
".",
"subplot",
"(",
"gs",
"[",
"row",
",",
"0",
"]",
")",
")",
"axs",
".",
"append",
"(",
"plt",
".",
"subplot",
"(",
"gs",
"[",
"row",
",",
"1",
"]",
")",
")",
"row",
"+=",
"1",
"axs",
".",
"append",
"(",
"plt",
".",
"subplot",
"(",
"gs",
"[",
"row",
",",
"0",
"]",
")",
")",
"axs",
".",
"append",
"(",
"plt",
".",
"subplot",
"(",
"gs",
"[",
"row",
",",
"1",
"]",
")",
")",
"row",
"+=",
"1",
"axs",
".",
"append",
"(",
"plt",
".",
"subplot",
"(",
"gs",
"[",
"row",
",",
"0",
"]",
")",
")",
"axs",
".",
"append",
"(",
"plt",
".",
"subplot",
"(",
"gs",
"[",
"row",
",",
"1",
"]",
")",
")",
"row",
"+=",
"1",
"# Effect size across two",
"axs",
".",
"append",
"(",
"plt",
".",
"subplot",
"(",
"gs",
"[",
"row",
",",
":",
"]",
")",
")",
"bayesian",
".",
"plot_best",
"(",
"trace",
"=",
"trace_best",
",",
"axs",
"=",
"axs",
")",
"previous_time",
"=",
"timer",
"(",
"\"plotting BEST results\"",
",",
"previous_time",
")",
"# Compute Bayesian predictions",
"row",
"+=",
"1",
"ax_ret_pred_day",
"=",
"plt",
".",
"subplot",
"(",
"gs",
"[",
"row",
",",
"0",
"]",
")",
"ax_ret_pred_week",
"=",
"plt",
".",
"subplot",
"(",
"gs",
"[",
"row",
",",
"1",
"]",
")",
"day_pred",
"=",
"ppc_t",
"[",
":",
",",
"0",
"]",
"p5",
"=",
"scipy",
".",
"stats",
".",
"scoreatpercentile",
"(",
"day_pred",
",",
"5",
")",
"sns",
".",
"distplot",
"(",
"day_pred",
",",
"ax",
"=",
"ax_ret_pred_day",
")",
"ax_ret_pred_day",
".",
"axvline",
"(",
"p5",
",",
"linestyle",
"=",
"'--'",
",",
"linewidth",
"=",
"3.",
")",
"ax_ret_pred_day",
".",
"set_xlabel",
"(",
"'Predicted returns 1 day'",
")",
"ax_ret_pred_day",
".",
"set_ylabel",
"(",
"'Frequency'",
")",
"ax_ret_pred_day",
".",
"text",
"(",
"0.4",
",",
"0.9",
",",
"'Bayesian VaR = %.2f'",
"%",
"p5",
",",
"verticalalignment",
"=",
"'bottom'",
",",
"horizontalalignment",
"=",
"'right'",
",",
"transform",
"=",
"ax_ret_pred_day",
".",
"transAxes",
")",
"previous_time",
"=",
"timer",
"(",
"\"computing Bayesian predictions\"",
",",
"previous_time",
")",
"# Plot Bayesian VaRs",
"week_pred",
"=",
"(",
"np",
".",
"cumprod",
"(",
"ppc_t",
"[",
":",
",",
":",
"5",
"]",
"+",
"1",
",",
"1",
")",
"-",
"1",
")",
"[",
":",
",",
"-",
"1",
"]",
"p5",
"=",
"scipy",
".",
"stats",
".",
"scoreatpercentile",
"(",
"week_pred",
",",
"5",
")",
"sns",
".",
"distplot",
"(",
"week_pred",
",",
"ax",
"=",
"ax_ret_pred_week",
")",
"ax_ret_pred_week",
".",
"axvline",
"(",
"p5",
",",
"linestyle",
"=",
"'--'",
",",
"linewidth",
"=",
"3.",
")",
"ax_ret_pred_week",
".",
"set_xlabel",
"(",
"'Predicted cum returns 5 days'",
")",
"ax_ret_pred_week",
".",
"set_ylabel",
"(",
"'Frequency'",
")",
"ax_ret_pred_week",
".",
"text",
"(",
"0.4",
",",
"0.9",
",",
"'Bayesian VaR = %.2f'",
"%",
"p5",
",",
"verticalalignment",
"=",
"'bottom'",
",",
"horizontalalignment",
"=",
"'right'",
",",
"transform",
"=",
"ax_ret_pred_week",
".",
"transAxes",
")",
"previous_time",
"=",
"timer",
"(",
"\"plotting Bayesian VaRs estimate\"",
",",
"previous_time",
")",
"# Run alpha beta model",
"if",
"benchmark_rets",
"is",
"not",
"None",
":",
"print",
"(",
"\"\\nRunning alpha beta model\"",
")",
"benchmark_rets",
"=",
"benchmark_rets",
".",
"loc",
"[",
"df_train",
".",
"index",
"]",
"trace_alpha_beta",
"=",
"bayesian",
".",
"run_model",
"(",
"'alpha_beta'",
",",
"df_train",
",",
"bmark",
"=",
"benchmark_rets",
",",
"samples",
"=",
"samples",
",",
"progressbar",
"=",
"progressbar",
")",
"previous_time",
"=",
"timer",
"(",
"\"running alpha beta model\"",
",",
"previous_time",
")",
"# Plot alpha and beta",
"row",
"+=",
"1",
"ax_alpha",
"=",
"plt",
".",
"subplot",
"(",
"gs",
"[",
"row",
",",
"0",
"]",
")",
"ax_beta",
"=",
"plt",
".",
"subplot",
"(",
"gs",
"[",
"row",
",",
"1",
"]",
")",
"sns",
".",
"distplot",
"(",
"(",
"1",
"+",
"trace_alpha_beta",
"[",
"'alpha'",
"]",
"[",
"100",
":",
"]",
")",
"**",
"252",
"-",
"1",
",",
"ax",
"=",
"ax_alpha",
")",
"sns",
".",
"distplot",
"(",
"trace_alpha_beta",
"[",
"'beta'",
"]",
"[",
"100",
":",
"]",
",",
"ax",
"=",
"ax_beta",
")",
"ax_alpha",
".",
"set_xlabel",
"(",
"'Annual Alpha'",
")",
"ax_alpha",
".",
"set_ylabel",
"(",
"'Belief'",
")",
"ax_beta",
".",
"set_xlabel",
"(",
"'Beta'",
")",
"ax_beta",
".",
"set_ylabel",
"(",
"'Belief'",
")",
"previous_time",
"=",
"timer",
"(",
"\"plotting alpha beta model\"",
",",
"previous_time",
")",
"if",
"stoch_vol",
":",
"# run stochastic volatility model",
"returns_cutoff",
"=",
"400",
"print",
"(",
"\"\\nRunning stochastic volatility model on \"",
"\"most recent {} days of returns.\"",
".",
"format",
"(",
"returns_cutoff",
")",
")",
"if",
"df_train",
".",
"size",
">",
"returns_cutoff",
":",
"df_train_truncated",
"=",
"df_train",
"[",
"-",
"returns_cutoff",
":",
"]",
"_",
",",
"trace_stoch_vol",
"=",
"bayesian",
".",
"model_stoch_vol",
"(",
"df_train_truncated",
")",
"previous_time",
"=",
"timer",
"(",
"\"running stochastic volatility model\"",
",",
"previous_time",
")",
"# plot latent volatility",
"row",
"+=",
"1",
"ax_volatility",
"=",
"plt",
".",
"subplot",
"(",
"gs",
"[",
"row",
",",
":",
"]",
")",
"bayesian",
".",
"plot_stoch_vol",
"(",
"df_train_truncated",
",",
"trace",
"=",
"trace_stoch_vol",
",",
"ax",
"=",
"ax_volatility",
")",
"previous_time",
"=",
"timer",
"(",
"\"plotting stochastic volatility model\"",
",",
"previous_time",
")",
"total_time",
"=",
"time",
"(",
")",
"-",
"start_time",
"print",
"(",
"\"\\nTotal runtime was {:.2f} seconds.\"",
".",
"format",
"(",
"total_time",
")",
")",
"gs",
".",
"tight_layout",
"(",
"fig",
")",
"if",
"return_fig",
":",
"return",
"fig"
]
| 37.559783 | 0.000141 |
def _extract_response_xml(self, domain, response):
"""Extract XML content of an HTTP response into dictionary format.
Args:
response: HTML Response objects
Returns:
A dictionary: {alexa-ranking key : alexa-ranking value}.
"""
attributes = {}
alexa_keys = {'POPULARITY': 'TEXT', 'REACH': 'RANK', 'RANK': 'DELTA'}
try:
xml_root = ET.fromstring(response._content)
for xml_child in xml_root.findall('SD//'):
if xml_child.tag in alexa_keys and \
alexa_keys[xml_child.tag] in xml_child.attrib:
attributes[xml_child.tag.lower(
)] = xml_child.attrib[alexa_keys[xml_child.tag]]
except ParseError:
# Skip ill-formatted XML and return no Alexa attributes
pass
attributes['domain'] = domain
return {'attributes': attributes} | [
"def",
"_extract_response_xml",
"(",
"self",
",",
"domain",
",",
"response",
")",
":",
"attributes",
"=",
"{",
"}",
"alexa_keys",
"=",
"{",
"'POPULARITY'",
":",
"'TEXT'",
",",
"'REACH'",
":",
"'RANK'",
",",
"'RANK'",
":",
"'DELTA'",
"}",
"try",
":",
"xml_root",
"=",
"ET",
".",
"fromstring",
"(",
"response",
".",
"_content",
")",
"for",
"xml_child",
"in",
"xml_root",
".",
"findall",
"(",
"'SD//'",
")",
":",
"if",
"xml_child",
".",
"tag",
"in",
"alexa_keys",
"and",
"alexa_keys",
"[",
"xml_child",
".",
"tag",
"]",
"in",
"xml_child",
".",
"attrib",
":",
"attributes",
"[",
"xml_child",
".",
"tag",
".",
"lower",
"(",
")",
"]",
"=",
"xml_child",
".",
"attrib",
"[",
"alexa_keys",
"[",
"xml_child",
".",
"tag",
"]",
"]",
"except",
"ParseError",
":",
"# Skip ill-formatted XML and return no Alexa attributes",
"pass",
"attributes",
"[",
"'domain'",
"]",
"=",
"domain",
"return",
"{",
"'attributes'",
":",
"attributes",
"}"
]
| 42 | 0.002116 |
def get_pip_requirement_set(self, arguments, use_remote_index, use_wheels=False):
"""
Get the unpacked requirement(s) specified by the caller by running pip.
:param arguments: The command line arguments to ``pip install ...`` (a
list of strings).
:param use_remote_index: A boolean indicating whether pip is allowed to
connect to the main package index
(http://pypi.python.org by default).
:param use_wheels: Whether pip and pip-accel are allowed to use wheels_
(:data:`False` by default for backwards compatibility
with callers that use pip-accel as a Python API).
:returns: A :class:`pip.req.RequirementSet` object created by pip.
:raises: Any exceptions raised by pip.
"""
# Compose the pip command line arguments. This is where a lot of the
# core logic of pip-accel is hidden and it uses some esoteric features
# of pip so this method is heavily commented.
command_line = []
# Use `--download' to instruct pip to download requirement(s) into
# pip-accel's local source distribution index directory. This has the
# following documented side effects (see `pip install --help'):
# 1. It disables the installation of requirements (without using the
# `--no-install' option which is deprecated and slated for removal
# in pip 7.x).
# 2. It ignores requirements that are already installed (because
# pip-accel doesn't actually need to re-install requirements that
# are already installed we will have work around this later, but
# that seems fairly simple to do).
command_line.append('--download=%s' % self.config.source_index)
# Use `--find-links' to point pip at pip-accel's local source
# distribution index directory. This ensures that source distribution
# archives are never downloaded more than once (regardless of the HTTP
# cache that was introduced in pip 6.x).
command_line.append('--find-links=%s' % create_file_url(self.config.source_index))
# Use `--no-binary=:all:' to ignore wheel distributions by default in
# order to preserve backwards compatibility with callers that expect a
# requirement set consisting only of source distributions that can be
# converted to `dumb binary distributions'.
if not use_wheels and self.arguments_allow_wheels(arguments):
command_line.append('--no-binary=:all:')
# Use `--no-index' to force pip to only consider source distribution
# archives contained in pip-accel's local source distribution index
# directory. This enables pip-accel to ask pip "Can the local source
# distribution index satisfy all requirements in the given requirement
# set?" which enables pip-accel to keep pip off the internet unless
# absolutely necessary :-).
if not use_remote_index:
command_line.append('--no-index')
# Use `--no-clean' to instruct pip to unpack the source distribution
# archives and *not* clean up the unpacked source distributions
# afterwards. This enables pip-accel to replace pip's installation
# logic with cached binary distribution archives.
command_line.append('--no-clean')
# Use `--build-directory' to instruct pip to unpack the source
# distribution archives to a temporary directory managed by pip-accel.
# We will clean up the build directory when we're done using the
# unpacked source distributions.
command_line.append('--build-directory=%s' % self.build_directory)
# Append the user's `pip install ...' arguments to the command line
# that we just assembled.
command_line.extend(arguments)
logger.info("Executing command: pip install %s", ' '.join(command_line))
# Clear the build directory to prevent PreviousBuildDirError exceptions.
self.clear_build_directory()
# During the pip 6.x upgrade pip-accel switched to using `pip install
# --download' which can produce an interactive prompt as described in
# issue 51 [1]. The documented way [2] to get rid of this interactive
# prompt is pip's --exists-action option, but due to what is most
# likely a bug in pip this doesn't actually work. The environment
# variable $PIP_EXISTS_ACTION does work however, so if the user didn't
# set it we will set a reasonable default for them.
# [1] https://github.com/paylogic/pip-accel/issues/51
# [2] https://pip.pypa.io/en/latest/reference/pip.html#exists-action-option
os.environ.setdefault('PIP_EXISTS_ACTION', 'w')
# Initialize and run the `pip install' command.
command = InstallCommand()
opts, args = command.parse_args(command_line)
if not opts.ignore_installed:
# If the user didn't supply the -I, --ignore-installed option we
# will forcefully disable the option. Refer to the documentation of
# the AttributeOverrides class for further details.
opts = AttributeOverrides(opts, ignore_installed=False)
requirement_set = command.run(opts, args)
# Make sure the output of pip and pip-accel are not intermingled.
sys.stdout.flush()
if requirement_set is None:
raise NothingToDoError("""
pip didn't generate a requirement set, most likely you
specified an empty requirements file?
""")
else:
return self.transform_pip_requirement_set(requirement_set) | [
"def",
"get_pip_requirement_set",
"(",
"self",
",",
"arguments",
",",
"use_remote_index",
",",
"use_wheels",
"=",
"False",
")",
":",
"# Compose the pip command line arguments. This is where a lot of the",
"# core logic of pip-accel is hidden and it uses some esoteric features",
"# of pip so this method is heavily commented.",
"command_line",
"=",
"[",
"]",
"# Use `--download' to instruct pip to download requirement(s) into",
"# pip-accel's local source distribution index directory. This has the",
"# following documented side effects (see `pip install --help'):",
"# 1. It disables the installation of requirements (without using the",
"# `--no-install' option which is deprecated and slated for removal",
"# in pip 7.x).",
"# 2. It ignores requirements that are already installed (because",
"# pip-accel doesn't actually need to re-install requirements that",
"# are already installed we will have work around this later, but",
"# that seems fairly simple to do).",
"command_line",
".",
"append",
"(",
"'--download=%s'",
"%",
"self",
".",
"config",
".",
"source_index",
")",
"# Use `--find-links' to point pip at pip-accel's local source",
"# distribution index directory. This ensures that source distribution",
"# archives are never downloaded more than once (regardless of the HTTP",
"# cache that was introduced in pip 6.x).",
"command_line",
".",
"append",
"(",
"'--find-links=%s'",
"%",
"create_file_url",
"(",
"self",
".",
"config",
".",
"source_index",
")",
")",
"# Use `--no-binary=:all:' to ignore wheel distributions by default in",
"# order to preserve backwards compatibility with callers that expect a",
"# requirement set consisting only of source distributions that can be",
"# converted to `dumb binary distributions'.",
"if",
"not",
"use_wheels",
"and",
"self",
".",
"arguments_allow_wheels",
"(",
"arguments",
")",
":",
"command_line",
".",
"append",
"(",
"'--no-binary=:all:'",
")",
"# Use `--no-index' to force pip to only consider source distribution",
"# archives contained in pip-accel's local source distribution index",
"# directory. This enables pip-accel to ask pip \"Can the local source",
"# distribution index satisfy all requirements in the given requirement",
"# set?\" which enables pip-accel to keep pip off the internet unless",
"# absolutely necessary :-).",
"if",
"not",
"use_remote_index",
":",
"command_line",
".",
"append",
"(",
"'--no-index'",
")",
"# Use `--no-clean' to instruct pip to unpack the source distribution",
"# archives and *not* clean up the unpacked source distributions",
"# afterwards. This enables pip-accel to replace pip's installation",
"# logic with cached binary distribution archives.",
"command_line",
".",
"append",
"(",
"'--no-clean'",
")",
"# Use `--build-directory' to instruct pip to unpack the source",
"# distribution archives to a temporary directory managed by pip-accel.",
"# We will clean up the build directory when we're done using the",
"# unpacked source distributions.",
"command_line",
".",
"append",
"(",
"'--build-directory=%s'",
"%",
"self",
".",
"build_directory",
")",
"# Append the user's `pip install ...' arguments to the command line",
"# that we just assembled.",
"command_line",
".",
"extend",
"(",
"arguments",
")",
"logger",
".",
"info",
"(",
"\"Executing command: pip install %s\"",
",",
"' '",
".",
"join",
"(",
"command_line",
")",
")",
"# Clear the build directory to prevent PreviousBuildDirError exceptions.",
"self",
".",
"clear_build_directory",
"(",
")",
"# During the pip 6.x upgrade pip-accel switched to using `pip install",
"# --download' which can produce an interactive prompt as described in",
"# issue 51 [1]. The documented way [2] to get rid of this interactive",
"# prompt is pip's --exists-action option, but due to what is most",
"# likely a bug in pip this doesn't actually work. The environment",
"# variable $PIP_EXISTS_ACTION does work however, so if the user didn't",
"# set it we will set a reasonable default for them.",
"# [1] https://github.com/paylogic/pip-accel/issues/51",
"# [2] https://pip.pypa.io/en/latest/reference/pip.html#exists-action-option",
"os",
".",
"environ",
".",
"setdefault",
"(",
"'PIP_EXISTS_ACTION'",
",",
"'w'",
")",
"# Initialize and run the `pip install' command.",
"command",
"=",
"InstallCommand",
"(",
")",
"opts",
",",
"args",
"=",
"command",
".",
"parse_args",
"(",
"command_line",
")",
"if",
"not",
"opts",
".",
"ignore_installed",
":",
"# If the user didn't supply the -I, --ignore-installed option we",
"# will forcefully disable the option. Refer to the documentation of",
"# the AttributeOverrides class for further details.",
"opts",
"=",
"AttributeOverrides",
"(",
"opts",
",",
"ignore_installed",
"=",
"False",
")",
"requirement_set",
"=",
"command",
".",
"run",
"(",
"opts",
",",
"args",
")",
"# Make sure the output of pip and pip-accel are not intermingled.",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"if",
"requirement_set",
"is",
"None",
":",
"raise",
"NothingToDoError",
"(",
"\"\"\"\n pip didn't generate a requirement set, most likely you\n specified an empty requirements file?\n \"\"\"",
")",
"else",
":",
"return",
"self",
".",
"transform_pip_requirement_set",
"(",
"requirement_set",
")"
]
| 61.537634 | 0.001376 |
def get_object(self, request, object_id, from_field=None):
"""
our implementation of get_object allows for cloning when updating an
object, not cloning when the button 'save but not clone' is pushed
and at no other time will clone be called
"""
# from_field breaks in 1.7.8
obj = super(VersionedAdmin, self).get_object(request,
object_id)
# Only clone if update view as get_object() is also called for change,
# delete, and history views
if request.method == 'POST' and \
obj and \
obj.is_latest and \
'will_not_clone' not in request.path and \
'delete' not in request.path and \
'restore' not in request.path:
obj = obj.clone()
return obj | [
"def",
"get_object",
"(",
"self",
",",
"request",
",",
"object_id",
",",
"from_field",
"=",
"None",
")",
":",
"# from_field breaks in 1.7.8",
"obj",
"=",
"super",
"(",
"VersionedAdmin",
",",
"self",
")",
".",
"get_object",
"(",
"request",
",",
"object_id",
")",
"# Only clone if update view as get_object() is also called for change,",
"# delete, and history views",
"if",
"request",
".",
"method",
"==",
"'POST'",
"and",
"obj",
"and",
"obj",
".",
"is_latest",
"and",
"'will_not_clone'",
"not",
"in",
"request",
".",
"path",
"and",
"'delete'",
"not",
"in",
"request",
".",
"path",
"and",
"'restore'",
"not",
"in",
"request",
".",
"path",
":",
"obj",
"=",
"obj",
".",
"clone",
"(",
")",
"return",
"obj"
]
| 42.7 | 0.002291 |
def create_tables(self):
"""
create tables in database (if they don't already exist)
"""
cdir = os.path.dirname( os.path.realpath(__file__) )
# table schemas -------------------------------------
schema = os.path.join(cdir,"data","partsmaster.sql")
if self.debug:
print(self.hdr,"parts master schema is ",schema)
self.populate(schema)
schema = os.path.join(cdir,"data","approvedmfg.sql")
if self.debug:
print(self.hdr,"approved mfg list schema is ",schema)
self.populate(schema)
schema = os.path.join(cdir,"data","attachment.sql")
if self.debug:
print(self.hdr,"attachment schema is ",schema)
self.populate(schema)
schema = os.path.join(cdir,"data","bom.sql")
if self.debug:
print(self.hdr,"bill of materials schema is ",schema)
self.populate(schema)
return | [
"def",
"create_tables",
"(",
"self",
")",
":",
"cdir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"# table schemas -------------------------------------",
"schema",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cdir",
",",
"\"data\"",
",",
"\"partsmaster.sql\"",
")",
"if",
"self",
".",
"debug",
":",
"print",
"(",
"self",
".",
"hdr",
",",
"\"parts master schema is \"",
",",
"schema",
")",
"self",
".",
"populate",
"(",
"schema",
")",
"schema",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cdir",
",",
"\"data\"",
",",
"\"approvedmfg.sql\"",
")",
"if",
"self",
".",
"debug",
":",
"print",
"(",
"self",
".",
"hdr",
",",
"\"approved mfg list schema is \"",
",",
"schema",
")",
"self",
".",
"populate",
"(",
"schema",
")",
"schema",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cdir",
",",
"\"data\"",
",",
"\"attachment.sql\"",
")",
"if",
"self",
".",
"debug",
":",
"print",
"(",
"self",
".",
"hdr",
",",
"\"attachment schema is \"",
",",
"schema",
")",
"self",
".",
"populate",
"(",
"schema",
")",
"schema",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cdir",
",",
"\"data\"",
",",
"\"bom.sql\"",
")",
"if",
"self",
".",
"debug",
":",
"print",
"(",
"self",
".",
"hdr",
",",
"\"bill of materials schema is \"",
",",
"schema",
")",
"self",
".",
"populate",
"(",
"schema",
")",
"return"
]
| 30.96875 | 0.027397 |
def initialize_shade(self, shade_name, shade_color, alpha):
"""This method will create semi-transparent surfaces with a specified
color. The surface can be toggled on and off.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Inputs:
Shade_name - String of the name that you want to associate with the
surface
Shade_color - An rgb tuple of the color of the shade
Alpha - Level of transparency of the shade (0-255 with 150 being a
good middle value)
(doc string updated ver 0.1)
"""
# Create the pygame surface
self.shades[shade_name] = [0, pygame.Surface(self.image.get_size())]
# Fill the surface with a solid color or an image
if type(shade_color) == str:
background = pygame.image.load(shade_color).convert()
background = pygame.transform.scale(background,
(self.image.get_width(),
self.image.get_height()))
self.shades[shade_name][1].blit(background, (0, 0))
# Otherwise the background should contain an rgb value
else:
self.shades[shade_name][1].fill(shade_color)
# Set the alpha value for the shade
self.shades[shade_name][1].set_alpha(alpha) | [
"def",
"initialize_shade",
"(",
"self",
",",
"shade_name",
",",
"shade_color",
",",
"alpha",
")",
":",
"# Create the pygame surface",
"self",
".",
"shades",
"[",
"shade_name",
"]",
"=",
"[",
"0",
",",
"pygame",
".",
"Surface",
"(",
"self",
".",
"image",
".",
"get_size",
"(",
")",
")",
"]",
"# Fill the surface with a solid color or an image",
"if",
"type",
"(",
"shade_color",
")",
"==",
"str",
":",
"background",
"=",
"pygame",
".",
"image",
".",
"load",
"(",
"shade_color",
")",
".",
"convert",
"(",
")",
"background",
"=",
"pygame",
".",
"transform",
".",
"scale",
"(",
"background",
",",
"(",
"self",
".",
"image",
".",
"get_width",
"(",
")",
",",
"self",
".",
"image",
".",
"get_height",
"(",
")",
")",
")",
"self",
".",
"shades",
"[",
"shade_name",
"]",
"[",
"1",
"]",
".",
"blit",
"(",
"background",
",",
"(",
"0",
",",
"0",
")",
")",
"# Otherwise the background should contain an rgb value",
"else",
":",
"self",
".",
"shades",
"[",
"shade_name",
"]",
"[",
"1",
"]",
".",
"fill",
"(",
"shade_color",
")",
"# Set the alpha value for the shade",
"self",
".",
"shades",
"[",
"shade_name",
"]",
"[",
"1",
"]",
".",
"set_alpha",
"(",
"alpha",
")"
]
| 42.84375 | 0.001427 |
def _fromJSON(cls, jsonobject):
"""Generates a new instance of :class:`maspy.core.Ci` from a decoded
JSON object (as generated by :func:`maspy.core.Ci._reprJSON()`).
:param jsonobject: decoded JSON object
:returns: a new instance of :class:`Ci`
"""
newInstance = cls(jsonobject[0], jsonobject[1])
attribDict = {}
attribDict['dataProcessingRef'] = jsonobject[2]
attribDict['precursor'] = jsonobject[3]
attribDict['product'] = jsonobject[4]
attribDict['params'] = [tuple(param) for param in jsonobject[5]]
attribDict['attrib'] = jsonobject[6]
attribDict['arrayInfo'] = dict()
for arrayType, jsonEntry in viewitems(jsonobject[7]):
arrayEntry = {'dataProcessingRef': jsonEntry['dataProcessingRef'],
'params': [tuple(_) for _ in jsonEntry['params']]
}
attribDict['arrayInfo'][arrayType] = arrayEntry
for key, value in viewitems(attribDict):
setattr(newInstance, key, value)
return newInstance | [
"def",
"_fromJSON",
"(",
"cls",
",",
"jsonobject",
")",
":",
"newInstance",
"=",
"cls",
"(",
"jsonobject",
"[",
"0",
"]",
",",
"jsonobject",
"[",
"1",
"]",
")",
"attribDict",
"=",
"{",
"}",
"attribDict",
"[",
"'dataProcessingRef'",
"]",
"=",
"jsonobject",
"[",
"2",
"]",
"attribDict",
"[",
"'precursor'",
"]",
"=",
"jsonobject",
"[",
"3",
"]",
"attribDict",
"[",
"'product'",
"]",
"=",
"jsonobject",
"[",
"4",
"]",
"attribDict",
"[",
"'params'",
"]",
"=",
"[",
"tuple",
"(",
"param",
")",
"for",
"param",
"in",
"jsonobject",
"[",
"5",
"]",
"]",
"attribDict",
"[",
"'attrib'",
"]",
"=",
"jsonobject",
"[",
"6",
"]",
"attribDict",
"[",
"'arrayInfo'",
"]",
"=",
"dict",
"(",
")",
"for",
"arrayType",
",",
"jsonEntry",
"in",
"viewitems",
"(",
"jsonobject",
"[",
"7",
"]",
")",
":",
"arrayEntry",
"=",
"{",
"'dataProcessingRef'",
":",
"jsonEntry",
"[",
"'dataProcessingRef'",
"]",
",",
"'params'",
":",
"[",
"tuple",
"(",
"_",
")",
"for",
"_",
"in",
"jsonEntry",
"[",
"'params'",
"]",
"]",
"}",
"attribDict",
"[",
"'arrayInfo'",
"]",
"[",
"arrayType",
"]",
"=",
"arrayEntry",
"for",
"key",
",",
"value",
"in",
"viewitems",
"(",
"attribDict",
")",
":",
"setattr",
"(",
"newInstance",
",",
"key",
",",
"value",
")",
"return",
"newInstance"
]
| 45.083333 | 0.00181 |
def glob_compile(pat):
"""Translate a shell glob PATTERN to a regular expression.
This is almost entirely based on `fnmatch.translate` source-code from the
python 3.5 standard-library.
"""
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i + 1
if c == '/' and len(pat) > (i + 2) and pat[i:(i + 3)] == '**/':
# Special-case for "any number of sub-directories" operator since
# may also expand to no entries:
# Otherwise `a/**/b` would expand to `a[/].*[/]b` which wouldn't
# match the immediate sub-directories of `a`, like `a/b`.
i = i + 3
res = res + '[/]([^/]*[/])*'
elif c == '*':
if len(pat) > i and pat[i] == '*':
i = i + 1
res = res + '.*'
else:
res = res + '[^/]*'
elif c == '?':
res = res + '[^/]'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j + 1
if j < n and pat[j] == ']':
j = j + 1
while j < n and pat[j] != ']':
j = j + 1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\', '\\\\')
i = j + 1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return re.compile('^' + res + '\Z(?ms)' + '$') | [
"def",
"glob_compile",
"(",
"pat",
")",
":",
"i",
",",
"n",
"=",
"0",
",",
"len",
"(",
"pat",
")",
"res",
"=",
"''",
"while",
"i",
"<",
"n",
":",
"c",
"=",
"pat",
"[",
"i",
"]",
"i",
"=",
"i",
"+",
"1",
"if",
"c",
"==",
"'/'",
"and",
"len",
"(",
"pat",
")",
">",
"(",
"i",
"+",
"2",
")",
"and",
"pat",
"[",
"i",
":",
"(",
"i",
"+",
"3",
")",
"]",
"==",
"'**/'",
":",
"# Special-case for \"any number of sub-directories\" operator since",
"# may also expand to no entries:",
"# Otherwise `a/**/b` would expand to `a[/].*[/]b` which wouldn't",
"# match the immediate sub-directories of `a`, like `a/b`.",
"i",
"=",
"i",
"+",
"3",
"res",
"=",
"res",
"+",
"'[/]([^/]*[/])*'",
"elif",
"c",
"==",
"'*'",
":",
"if",
"len",
"(",
"pat",
")",
">",
"i",
"and",
"pat",
"[",
"i",
"]",
"==",
"'*'",
":",
"i",
"=",
"i",
"+",
"1",
"res",
"=",
"res",
"+",
"'.*'",
"else",
":",
"res",
"=",
"res",
"+",
"'[^/]*'",
"elif",
"c",
"==",
"'?'",
":",
"res",
"=",
"res",
"+",
"'[^/]'",
"elif",
"c",
"==",
"'['",
":",
"j",
"=",
"i",
"if",
"j",
"<",
"n",
"and",
"pat",
"[",
"j",
"]",
"==",
"'!'",
":",
"j",
"=",
"j",
"+",
"1",
"if",
"j",
"<",
"n",
"and",
"pat",
"[",
"j",
"]",
"==",
"']'",
":",
"j",
"=",
"j",
"+",
"1",
"while",
"j",
"<",
"n",
"and",
"pat",
"[",
"j",
"]",
"!=",
"']'",
":",
"j",
"=",
"j",
"+",
"1",
"if",
"j",
">=",
"n",
":",
"res",
"=",
"res",
"+",
"'\\\\['",
"else",
":",
"stuff",
"=",
"pat",
"[",
"i",
":",
"j",
"]",
".",
"replace",
"(",
"'\\\\'",
",",
"'\\\\\\\\'",
")",
"i",
"=",
"j",
"+",
"1",
"if",
"stuff",
"[",
"0",
"]",
"==",
"'!'",
":",
"stuff",
"=",
"'^'",
"+",
"stuff",
"[",
"1",
":",
"]",
"elif",
"stuff",
"[",
"0",
"]",
"==",
"'^'",
":",
"stuff",
"=",
"'\\\\'",
"+",
"stuff",
"res",
"=",
"'%s[%s]'",
"%",
"(",
"res",
",",
"stuff",
")",
"else",
":",
"res",
"=",
"res",
"+",
"re",
".",
"escape",
"(",
"c",
")",
"return",
"re",
".",
"compile",
"(",
"'^'",
"+",
"res",
"+",
"'\\Z(?ms)'",
"+",
"'$'",
")"
]
| 33.291667 | 0.001216 |
def parse_conll(self, texts: List[str], retry_count: int = 0) -> List[str]:
'''
Processes the texts using TweeboParse and returns them in CoNLL format.
:param texts: The List of Strings to be processed by TweeboParse.
:param retry_count: The number of times it has retried for. Default
0 does not require setting, main purpose is for
recursion.
:return: A list of CoNLL formated strings.
:raises ServerError: Caused when the server is not running.
:raises :py:class:`requests.exceptions.HTTPError`: Caused when the
input texts is not formated correctly e.g. When you give it a
String not a list of Strings.
:raises :py:class:`json.JSONDecodeError`: Caused if after self.retries
attempts to parse the data it cannot decode the data.
:Example:
'''
post_data = {'texts': texts, 'output_type': 'conll'}
try:
response = requests.post(f'http://{self.hostname}:{self.port}',
json=post_data,
headers={'Connection': 'close'})
response.raise_for_status()
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout) as server_error:
raise ServerError(server_error, self.hostname, self.port)
except requests.exceptions.HTTPError as http_error:
raise http_error
else:
try:
return response.json()
except json.JSONDecodeError as json_exception:
if retry_count == self.retries:
self.log_error(response.text)
raise Exception('Json Decoding error cannot parse this '
f':\n{response.text}')
return self.parse_conll(texts, retry_count + 1) | [
"def",
"parse_conll",
"(",
"self",
",",
"texts",
":",
"List",
"[",
"str",
"]",
",",
"retry_count",
":",
"int",
"=",
"0",
")",
"->",
"List",
"[",
"str",
"]",
":",
"post_data",
"=",
"{",
"'texts'",
":",
"texts",
",",
"'output_type'",
":",
"'conll'",
"}",
"try",
":",
"response",
"=",
"requests",
".",
"post",
"(",
"f'http://{self.hostname}:{self.port}'",
",",
"json",
"=",
"post_data",
",",
"headers",
"=",
"{",
"'Connection'",
":",
"'close'",
"}",
")",
"response",
".",
"raise_for_status",
"(",
")",
"except",
"(",
"requests",
".",
"exceptions",
".",
"ConnectionError",
",",
"requests",
".",
"exceptions",
".",
"Timeout",
")",
"as",
"server_error",
":",
"raise",
"ServerError",
"(",
"server_error",
",",
"self",
".",
"hostname",
",",
"self",
".",
"port",
")",
"except",
"requests",
".",
"exceptions",
".",
"HTTPError",
"as",
"http_error",
":",
"raise",
"http_error",
"else",
":",
"try",
":",
"return",
"response",
".",
"json",
"(",
")",
"except",
"json",
".",
"JSONDecodeError",
"as",
"json_exception",
":",
"if",
"retry_count",
"==",
"self",
".",
"retries",
":",
"self",
".",
"log_error",
"(",
"response",
".",
"text",
")",
"raise",
"Exception",
"(",
"'Json Decoding error cannot parse this '",
"f':\\n{response.text}'",
")",
"return",
"self",
".",
"parse_conll",
"(",
"texts",
",",
"retry_count",
"+",
"1",
")"
]
| 48.923077 | 0.001028 |
def copy_path_to_clipboard(self):
"""
Copies the file path to the clipboard
"""
path = self.get_current_path()
QtWidgets.QApplication.clipboard().setText(path)
debug('path copied: %s' % path) | [
"def",
"copy_path_to_clipboard",
"(",
"self",
")",
":",
"path",
"=",
"self",
".",
"get_current_path",
"(",
")",
"QtWidgets",
".",
"QApplication",
".",
"clipboard",
"(",
")",
".",
"setText",
"(",
"path",
")",
"debug",
"(",
"'path copied: %s'",
"%",
"path",
")"
]
| 33.285714 | 0.008368 |
def create_new_page (self, section_id, new_page_style=0):
"""
NewPageStyle
0 - Create a Page that has Default Page Style
1 - Create a blank page with no title
2 - Createa blank page that has no title
"""
try:
self.process.CreateNewPage(section_id, "", new_page_style)
except Exception as e:
print(e)
print("Unable to create the page") | [
"def",
"create_new_page",
"(",
"self",
",",
"section_id",
",",
"new_page_style",
"=",
"0",
")",
":",
"try",
":",
"self",
".",
"process",
".",
"CreateNewPage",
"(",
"section_id",
",",
"\"\"",
",",
"new_page_style",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"print",
"(",
"\"Unable to create the page\"",
")"
]
| 36 | 0.009029 |
def p_statement_expr(self, t):
'''statement : node_expression PLUS node_expression
| node_expression MINUS node_expression'''
if len(t)<3 :
self.accu.add(Term('input', [t[1]]))
print('input', t[1])
else :
#print(t[1], t[2], t[3]
self.accu.add(Term('edge', ["gen(\""+t[1]+"\")","gen(\""+t[3]+"\")"]))
self.accu.add(Term('obs_elabel', ["gen(\""+t[1]+"\")","gen(\""+t[3]+"\")",t[2]])) | [
"def",
"p_statement_expr",
"(",
"self",
",",
"t",
")",
":",
"if",
"len",
"(",
"t",
")",
"<",
"3",
":",
"self",
".",
"accu",
".",
"add",
"(",
"Term",
"(",
"'input'",
",",
"[",
"t",
"[",
"1",
"]",
"]",
")",
")",
"print",
"(",
"'input'",
",",
"t",
"[",
"1",
"]",
")",
"else",
":",
"#print(t[1], t[2], t[3]",
"self",
".",
"accu",
".",
"add",
"(",
"Term",
"(",
"'edge'",
",",
"[",
"\"gen(\\\"\"",
"+",
"t",
"[",
"1",
"]",
"+",
"\"\\\")\"",
",",
"\"gen(\\\"\"",
"+",
"t",
"[",
"3",
"]",
"+",
"\"\\\")\"",
"]",
")",
")",
"self",
".",
"accu",
".",
"add",
"(",
"Term",
"(",
"'obs_elabel'",
",",
"[",
"\"gen(\\\"\"",
"+",
"t",
"[",
"1",
"]",
"+",
"\"\\\")\"",
",",
"\"gen(\\\"\"",
"+",
"t",
"[",
"3",
"]",
"+",
"\"\\\")\"",
",",
"t",
"[",
"2",
"]",
"]",
")",
")"
]
| 44.2 | 0.031042 |
def foldl1(f: Callable[[T, T], T], xs: Iterable[T]) -> T:
""" Returns the accumulated result of a binary function applied to elements
of an iterable.
.. math::
foldl1(f, [x_0, x_1, x_2, x_3]) = f(f(f(f(x_0, x_1), x_2), x_3)
Examples
--------
>>> from delphi.utils.fp import foldl1
>>> foldl1(lambda x, y: x + y, range(5))
10
"""
return reduce(f, xs) | [
"def",
"foldl1",
"(",
"f",
":",
"Callable",
"[",
"[",
"T",
",",
"T",
"]",
",",
"T",
"]",
",",
"xs",
":",
"Iterable",
"[",
"T",
"]",
")",
"->",
"T",
":",
"return",
"reduce",
"(",
"f",
",",
"xs",
")"
]
| 24.125 | 0.002494 |
def _build_block_context(template, context):
"""Populate the block context with BlockNodes from parent templates."""
# Ensure there's a BlockContext before rendering. This allows blocks in
# ExtendsNodes to be found by sub-templates (allowing {{ block.super }} and
# overriding sub-blocks to work).
if BLOCK_CONTEXT_KEY not in context.render_context:
context.render_context[BLOCK_CONTEXT_KEY] = BlockContext()
block_context = context.render_context[BLOCK_CONTEXT_KEY]
for node in template.nodelist:
if isinstance(node, ExtendsNode):
compiled_parent = node.get_parent(context)
# Add the parent node's blocks to the context. (This ends up being
# similar logic to ExtendsNode.render(), where we're adding the
# parent's blocks to the context so a child can find them.)
block_context.add_blocks(
{n.name: n for n in compiled_parent.nodelist.get_nodes_by_type(BlockNode)})
_build_block_context(compiled_parent, context)
return compiled_parent
# The ExtendsNode has to be the first non-text node.
if not isinstance(node, TextNode):
break | [
"def",
"_build_block_context",
"(",
"template",
",",
"context",
")",
":",
"# Ensure there's a BlockContext before rendering. This allows blocks in",
"# ExtendsNodes to be found by sub-templates (allowing {{ block.super }} and",
"# overriding sub-blocks to work).",
"if",
"BLOCK_CONTEXT_KEY",
"not",
"in",
"context",
".",
"render_context",
":",
"context",
".",
"render_context",
"[",
"BLOCK_CONTEXT_KEY",
"]",
"=",
"BlockContext",
"(",
")",
"block_context",
"=",
"context",
".",
"render_context",
"[",
"BLOCK_CONTEXT_KEY",
"]",
"for",
"node",
"in",
"template",
".",
"nodelist",
":",
"if",
"isinstance",
"(",
"node",
",",
"ExtendsNode",
")",
":",
"compiled_parent",
"=",
"node",
".",
"get_parent",
"(",
"context",
")",
"# Add the parent node's blocks to the context. (This ends up being",
"# similar logic to ExtendsNode.render(), where we're adding the",
"# parent's blocks to the context so a child can find them.)",
"block_context",
".",
"add_blocks",
"(",
"{",
"n",
".",
"name",
":",
"n",
"for",
"n",
"in",
"compiled_parent",
".",
"nodelist",
".",
"get_nodes_by_type",
"(",
"BlockNode",
")",
"}",
")",
"_build_block_context",
"(",
"compiled_parent",
",",
"context",
")",
"return",
"compiled_parent",
"# The ExtendsNode has to be the first non-text node.",
"if",
"not",
"isinstance",
"(",
"node",
",",
"TextNode",
")",
":",
"break"
]
| 45.538462 | 0.001654 |
def workflow_move_stage(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /workflow-xxxx/moveStage API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Workflows-and-Analyses#API-method%3A-%2Fworkflow-xxxx%2FmoveStage
"""
return DXHTTPRequest('/%s/moveStage' % object_id, input_params, always_retry=always_retry, **kwargs) | [
"def",
"workflow_move_stage",
"(",
"object_id",
",",
"input_params",
"=",
"{",
"}",
",",
"always_retry",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"DXHTTPRequest",
"(",
"'/%s/moveStage'",
"%",
"object_id",
",",
"input_params",
",",
"always_retry",
"=",
"always_retry",
",",
"*",
"*",
"kwargs",
")"
]
| 55.857143 | 0.010076 |
def run(self, max_pressure=None):
r"""
Perform the algorithm
Parameters
----------
max_pressure : float
The maximum pressure applied to the invading cluster. Any pores and
throats with entry pressure above this value will not be invaded.
"""
if 'throat.entry_pressure' not in self.keys():
logger.error("Setup method must be run first")
if max_pressure is None:
self.max_pressure = sp.inf
else:
self.max_pressure = max_pressure
if len(self.queue) == 0:
logger.warn('queue is empty, this network is fully invaded')
return
# track whether each cluster has reached the maximum pressure
self.max_p_reached = [False]*len(self.queue)
# starting invasion sequence
self.count = 0
# highest pressure reached so far - used for porosimetry curve
self.high_Pc = np.ones(len(self.queue))*-np.inf
outlets = self['pore.outlets']
terminate_clusters = np.sum(outlets) > 0
if not hasattr(self, 'invasion_running'):
self.invasion_running = [True]*len(self.queue)
else:
# created by set_residual
pass
while np.any(self.invasion_running) and not np.all(self.max_p_reached):
# Loop over clusters
for c_num in np.argwhere(self.invasion_running).flatten():
self._invade_cluster(c_num)
queue = self.queue[c_num]
if len(queue) == 0 or self.max_p_reached[c_num]:
# If the cluster contains no more entries invasion has
# finished
self.invasion_running[c_num] = False
if self.settings['invade_isolated_Ts']:
self._invade_isolated_Ts()
if terminate_clusters:
# terminated clusters
tcs = np.unique(self['pore.cluster'][outlets]).astype(int)
tcs = tcs[tcs >= 0]
if len(tcs) > 0:
for tc in tcs:
if self.invasion_running[tc] is True:
self.invasion_running[tc] = False
logger.info("Cluster " + str(tc) + " reached " +
" outlet at sequence " + str(self.count)) | [
"def",
"run",
"(",
"self",
",",
"max_pressure",
"=",
"None",
")",
":",
"if",
"'throat.entry_pressure'",
"not",
"in",
"self",
".",
"keys",
"(",
")",
":",
"logger",
".",
"error",
"(",
"\"Setup method must be run first\"",
")",
"if",
"max_pressure",
"is",
"None",
":",
"self",
".",
"max_pressure",
"=",
"sp",
".",
"inf",
"else",
":",
"self",
".",
"max_pressure",
"=",
"max_pressure",
"if",
"len",
"(",
"self",
".",
"queue",
")",
"==",
"0",
":",
"logger",
".",
"warn",
"(",
"'queue is empty, this network is fully invaded'",
")",
"return",
"# track whether each cluster has reached the maximum pressure",
"self",
".",
"max_p_reached",
"=",
"[",
"False",
"]",
"*",
"len",
"(",
"self",
".",
"queue",
")",
"# starting invasion sequence",
"self",
".",
"count",
"=",
"0",
"# highest pressure reached so far - used for porosimetry curve",
"self",
".",
"high_Pc",
"=",
"np",
".",
"ones",
"(",
"len",
"(",
"self",
".",
"queue",
")",
")",
"*",
"-",
"np",
".",
"inf",
"outlets",
"=",
"self",
"[",
"'pore.outlets'",
"]",
"terminate_clusters",
"=",
"np",
".",
"sum",
"(",
"outlets",
")",
">",
"0",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'invasion_running'",
")",
":",
"self",
".",
"invasion_running",
"=",
"[",
"True",
"]",
"*",
"len",
"(",
"self",
".",
"queue",
")",
"else",
":",
"# created by set_residual",
"pass",
"while",
"np",
".",
"any",
"(",
"self",
".",
"invasion_running",
")",
"and",
"not",
"np",
".",
"all",
"(",
"self",
".",
"max_p_reached",
")",
":",
"# Loop over clusters",
"for",
"c_num",
"in",
"np",
".",
"argwhere",
"(",
"self",
".",
"invasion_running",
")",
".",
"flatten",
"(",
")",
":",
"self",
".",
"_invade_cluster",
"(",
"c_num",
")",
"queue",
"=",
"self",
".",
"queue",
"[",
"c_num",
"]",
"if",
"len",
"(",
"queue",
")",
"==",
"0",
"or",
"self",
".",
"max_p_reached",
"[",
"c_num",
"]",
":",
"# If the cluster contains no more entries invasion has",
"# finished",
"self",
".",
"invasion_running",
"[",
"c_num",
"]",
"=",
"False",
"if",
"self",
".",
"settings",
"[",
"'invade_isolated_Ts'",
"]",
":",
"self",
".",
"_invade_isolated_Ts",
"(",
")",
"if",
"terminate_clusters",
":",
"# terminated clusters",
"tcs",
"=",
"np",
".",
"unique",
"(",
"self",
"[",
"'pore.cluster'",
"]",
"[",
"outlets",
"]",
")",
".",
"astype",
"(",
"int",
")",
"tcs",
"=",
"tcs",
"[",
"tcs",
">=",
"0",
"]",
"if",
"len",
"(",
"tcs",
")",
">",
"0",
":",
"for",
"tc",
"in",
"tcs",
":",
"if",
"self",
".",
"invasion_running",
"[",
"tc",
"]",
"is",
"True",
":",
"self",
".",
"invasion_running",
"[",
"tc",
"]",
"=",
"False",
"logger",
".",
"info",
"(",
"\"Cluster \"",
"+",
"str",
"(",
"tc",
")",
"+",
"\" reached \"",
"+",
"\" outlet at sequence \"",
"+",
"str",
"(",
"self",
".",
"count",
")",
")"
]
| 42.472727 | 0.001255 |
def get_layer_nr(inp, depth):
r"""Get number of layer in which inp resides.
Note:
If zinp is on a layer interface, the layer above the interface is chosen.
This check-function is called from one of the modelling routines in
:mod:`model`. Consult these modelling routines for a detailed description
of the input parameters.
Parameters
----------
inp : list of floats or arrays
Dipole coordinates (m)
depth : array
Depths of layer interfaces.
Returns
-------
linp : int or array_like of int
Layer number(s) in which inp resides (plural only if bipole).
zinp : float or array
inp[2] (depths).
"""
zinp = inp[2]
# depth = [-infty : last interface]; create additional depth-array
# pdepth = [fist interface : +infty]
pdepth = np.concatenate((depth[1:], np.array([np.infty])))
# Broadcast arrays
b_zinp = np.atleast_1d(zinp)[:, None]
# Get layers
linp = np.where((depth[None, :] < b_zinp)*(pdepth[None, :] >= b_zinp))[1]
# Return; squeeze in case of only one inp-depth
return np.squeeze(linp), zinp | [
"def",
"get_layer_nr",
"(",
"inp",
",",
"depth",
")",
":",
"zinp",
"=",
"inp",
"[",
"2",
"]",
"# depth = [-infty : last interface]; create additional depth-array",
"# pdepth = [fist interface : +infty]",
"pdepth",
"=",
"np",
".",
"concatenate",
"(",
"(",
"depth",
"[",
"1",
":",
"]",
",",
"np",
".",
"array",
"(",
"[",
"np",
".",
"infty",
"]",
")",
")",
")",
"# Broadcast arrays",
"b_zinp",
"=",
"np",
".",
"atleast_1d",
"(",
"zinp",
")",
"[",
":",
",",
"None",
"]",
"# Get layers",
"linp",
"=",
"np",
".",
"where",
"(",
"(",
"depth",
"[",
"None",
",",
":",
"]",
"<",
"b_zinp",
")",
"*",
"(",
"pdepth",
"[",
"None",
",",
":",
"]",
">=",
"b_zinp",
")",
")",
"[",
"1",
"]",
"# Return; squeeze in case of only one inp-depth",
"return",
"np",
".",
"squeeze",
"(",
"linp",
")",
",",
"zinp"
]
| 26.119048 | 0.000879 |
def decompress(compressed_data):
"""Decompress data that has been compressed by the filepack algorithm.
:param compressed_data: an array of compressed data bytes to decompress
:rtype: an array of decompressed bytes"""
raw_data = []
index = 0
while index < len(compressed_data):
current = compressed_data[index]
index += 1
if current == RLE_BYTE:
directive = compressed_data[index]
index += 1
if directive == RLE_BYTE:
raw_data.append(RLE_BYTE)
else:
count = compressed_data[index]
index += 1
raw_data.extend([directive] * count)
elif current == SPECIAL_BYTE:
directive = compressed_data[index]
index += 1
if directive == SPECIAL_BYTE:
raw_data.append(SPECIAL_BYTE)
elif directive == DEFAULT_WAVE_BYTE:
count = compressed_data[index]
index += 1
raw_data.extend(DEFAULT_WAVE * count)
elif directive == DEFAULT_INSTR_BYTE:
count = compressed_data[index]
index += 1
raw_data.extend(DEFAULT_INSTRUMENT_FILEPACK * count)
elif directive == EOF_BYTE:
assert False, ("Unexpected EOF command encountered while "
"decompressing")
else:
assert False, "Countered unexpected sequence 0x%02x 0x%02x" % (
current, directive)
else:
raw_data.append(current)
return raw_data | [
"def",
"decompress",
"(",
"compressed_data",
")",
":",
"raw_data",
"=",
"[",
"]",
"index",
"=",
"0",
"while",
"index",
"<",
"len",
"(",
"compressed_data",
")",
":",
"current",
"=",
"compressed_data",
"[",
"index",
"]",
"index",
"+=",
"1",
"if",
"current",
"==",
"RLE_BYTE",
":",
"directive",
"=",
"compressed_data",
"[",
"index",
"]",
"index",
"+=",
"1",
"if",
"directive",
"==",
"RLE_BYTE",
":",
"raw_data",
".",
"append",
"(",
"RLE_BYTE",
")",
"else",
":",
"count",
"=",
"compressed_data",
"[",
"index",
"]",
"index",
"+=",
"1",
"raw_data",
".",
"extend",
"(",
"[",
"directive",
"]",
"*",
"count",
")",
"elif",
"current",
"==",
"SPECIAL_BYTE",
":",
"directive",
"=",
"compressed_data",
"[",
"index",
"]",
"index",
"+=",
"1",
"if",
"directive",
"==",
"SPECIAL_BYTE",
":",
"raw_data",
".",
"append",
"(",
"SPECIAL_BYTE",
")",
"elif",
"directive",
"==",
"DEFAULT_WAVE_BYTE",
":",
"count",
"=",
"compressed_data",
"[",
"index",
"]",
"index",
"+=",
"1",
"raw_data",
".",
"extend",
"(",
"DEFAULT_WAVE",
"*",
"count",
")",
"elif",
"directive",
"==",
"DEFAULT_INSTR_BYTE",
":",
"count",
"=",
"compressed_data",
"[",
"index",
"]",
"index",
"+=",
"1",
"raw_data",
".",
"extend",
"(",
"DEFAULT_INSTRUMENT_FILEPACK",
"*",
"count",
")",
"elif",
"directive",
"==",
"EOF_BYTE",
":",
"assert",
"False",
",",
"(",
"\"Unexpected EOF command encountered while \"",
"\"decompressing\"",
")",
"else",
":",
"assert",
"False",
",",
"\"Countered unexpected sequence 0x%02x 0x%02x\"",
"%",
"(",
"current",
",",
"directive",
")",
"else",
":",
"raw_data",
".",
"append",
"(",
"current",
")",
"return",
"raw_data"
]
| 31.117647 | 0.000611 |
def render_as_json(func):
"""
Decorator to render as JSON
:param func:
:return:
"""
if inspect.isclass(func):
setattr(func, "_renderer", json_renderer)
return func
else:
@functools.wraps(func)
def decorated_view(*args, **kwargs):
data = func(*args, **kwargs)
return _build_response(data, jsonify)
return decorated_view | [
"def",
"render_as_json",
"(",
"func",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"func",
")",
":",
"setattr",
"(",
"func",
",",
"\"_renderer\"",
",",
"json_renderer",
")",
"return",
"func",
"else",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"decorated_view",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_build_response",
"(",
"data",
",",
"jsonify",
")",
"return",
"decorated_view"
]
| 26.4 | 0.002439 |
def byLexer( lexer ):
"""
Looks up the language plugin by the lexer class of the inputed lexer.
:param lexer | <QsciLexer>
:return <XLanguage> || None
"""
XLanguage.load()
lexerType = type(lexer)
for lang in XLanguage._plugins.values():
if ( lang.lexerType() == lexerType ):
return lang
return None | [
"def",
"byLexer",
"(",
"lexer",
")",
":",
"XLanguage",
".",
"load",
"(",
")",
"lexerType",
"=",
"type",
"(",
"lexer",
")",
"for",
"lang",
"in",
"XLanguage",
".",
"_plugins",
".",
"values",
"(",
")",
":",
"if",
"(",
"lang",
".",
"lexerType",
"(",
")",
"==",
"lexerType",
")",
":",
"return",
"lang",
"return",
"None"
]
| 28 | 0.020737 |
def setPotential(self, columnIndex, potential):
"""
Sets the potential mapping for a given column. ``potential`` size must match
the number of inputs, and must be greater than ``stimulusThreshold``.
:param columnIndex: (int) column index to set potential for.
:param potential: (list) value to set.
"""
assert(columnIndex < self._numColumns)
potentialSparse = numpy.where(potential > 0)[0]
if len(potentialSparse) < self._stimulusThreshold:
raise Exception("This is likely due to a " +
"value of stimulusThreshold that is too large relative " +
"to the input size.")
self._potentialPools.replace(columnIndex, potentialSparse) | [
"def",
"setPotential",
"(",
"self",
",",
"columnIndex",
",",
"potential",
")",
":",
"assert",
"(",
"columnIndex",
"<",
"self",
".",
"_numColumns",
")",
"potentialSparse",
"=",
"numpy",
".",
"where",
"(",
"potential",
">",
"0",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"potentialSparse",
")",
"<",
"self",
".",
"_stimulusThreshold",
":",
"raise",
"Exception",
"(",
"\"This is likely due to a \"",
"+",
"\"value of stimulusThreshold that is too large relative \"",
"+",
"\"to the input size.\"",
")",
"self",
".",
"_potentialPools",
".",
"replace",
"(",
"columnIndex",
",",
"potentialSparse",
")"
]
| 39.705882 | 0.01013 |
def plot_inputseries(
self, names: Optional[Iterable[str]] = None,
average: bool = False, **kwargs: Any) \
-> None:
"""Plot (the selected) |InputSequence| |IOSequence.series| values.
We demonstrate the functionalities of method |Element.plot_inputseries|
based on the `Lahn` example project:
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, _, _ = prepare_full_example_2(lastdate='1997-01-01')
Without any arguments, |Element.plot_inputseries| prints the
time series of all input sequences handled by its |Model| object
directly to the screen (in the given example, |hland_inputs.P|,
|hland_inputs.T|, |hland_inputs.TN|, and |hland_inputs.EPN| of
application model |hland_v1|):
>>> land = hp.elements.land_dill
>>> land.plot_inputseries()
You can use the `pyplot` API of `matplotlib` to modify the figure
or to save it to disk (or print it to the screen, in case the
interactive mode of `matplotlib` is disabled):
>>> from matplotlib import pyplot
>>> from hydpy.docs import figs
>>> pyplot.savefig(figs.__path__[0] + '/Element_plot_inputseries.png')
>>> pyplot.close()
.. image:: Element_plot_inputseries.png
Methods |Element.plot_fluxseries| and |Element.plot_stateseries|
work in the same manner. Before applying them, one has at first
to calculate the time series of the |FluxSequence| and
|StateSequence| objects:
>>> hp.doit()
All three methods allow to select certain sequences by passing their
names (here, flux sequences |hland_fluxes.Q0| and |hland_fluxes.Q1|
of |hland_v1|). Additionally, you can pass the keyword arguments
supported by `matplotlib` for modifying the line style:
>>> land.plot_fluxseries(['q0', 'q1'], linewidth=2)
>>> pyplot.savefig(figs.__path__[0] + '/Element_plot_fluxseries.png')
>>> pyplot.close()
.. image:: Element_plot_fluxseries.png
For 1-dimensional |IOSequence| objects, all three methods plot the
individual time series in the same colour (here, from the state
sequences |hland_states.SP| and |hland_states.WC| of |hland_v1|):
>>> land.plot_stateseries(['sp', 'wc'])
>>> pyplot.savefig(figs.__path__[0] + '/Element_plot_stateseries1.png')
>>> pyplot.close()
.. image:: Element_plot_stateseries1.png
Alternatively, you can print the averaged time series through
passing |True| to the method `average` argument (demonstrated
for the state sequence |hland_states.SM|):
>>> land.plot_stateseries(['sm'], color='grey')
>>> land.plot_stateseries(
... ['sm'], average=True, color='black', linewidth=3)
>>> pyplot.savefig(figs.__path__[0] + '/Element_plot_stateseries2.png')
>>> pyplot.close()
.. image:: Element_plot_stateseries2.png
"""
self.__plot(self.model.sequences.inputs, names, average, kwargs) | [
"def",
"plot_inputseries",
"(",
"self",
",",
"names",
":",
"Optional",
"[",
"Iterable",
"[",
"str",
"]",
"]",
"=",
"None",
",",
"average",
":",
"bool",
"=",
"False",
",",
"*",
"*",
"kwargs",
":",
"Any",
")",
"->",
"None",
":",
"self",
".",
"__plot",
"(",
"self",
".",
"model",
".",
"sequences",
".",
"inputs",
",",
"names",
",",
"average",
",",
"kwargs",
")"
]
| 40.118421 | 0.00064 |
def clear_java_home():
"""Clear JAVA_HOME environment or reset to BCBIO_JAVA_HOME.
Avoids accidental java injection but respects custom BCBIO_JAVA_HOME
command.
"""
if os.environ.get("BCBIO_JAVA_HOME"):
test_cmd = os.path.join(os.environ["BCBIO_JAVA_HOME"], "bin", "java")
if os.path.exists(test_cmd):
return "export JAVA_HOME=%s" % os.environ["BCBIO_JAVA_HOME"]
return "unset JAVA_HOME" | [
"def",
"clear_java_home",
"(",
")",
":",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"\"BCBIO_JAVA_HOME\"",
")",
":",
"test_cmd",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"environ",
"[",
"\"BCBIO_JAVA_HOME\"",
"]",
",",
"\"bin\"",
",",
"\"java\"",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"test_cmd",
")",
":",
"return",
"\"export JAVA_HOME=%s\"",
"%",
"os",
".",
"environ",
"[",
"\"BCBIO_JAVA_HOME\"",
"]",
"return",
"\"unset JAVA_HOME\""
]
| 39.090909 | 0.002273 |
def append_logs_to_result_object(result_obj, result):
"""
Append log files to cloud result object from Result.
:param result_obj: Target result object
:param result: Result
:return: Nothing, modifies result_obj in place.
"""
logs = result.has_logs()
result_obj["exec"]["logs"] = []
if logs and result.logfiles:
for log in logs:
typ = None
parts = log.split(os.sep)
if "bench" in parts[len(parts) - 1]:
typ = "framework"
# elif "Dut" in parts[len(parts)-1]:
# typ = "dut"
if typ is not None:
name = parts[len(parts) - 1]
try:
with open(log, "r") as file_name:
data = file_name.read()
dic = {"data": data, "name": name, "from": typ}
result_obj["exec"]["logs"].append(dic)
except OSError:
pass
else:
continue | [
"def",
"append_logs_to_result_object",
"(",
"result_obj",
",",
"result",
")",
":",
"logs",
"=",
"result",
".",
"has_logs",
"(",
")",
"result_obj",
"[",
"\"exec\"",
"]",
"[",
"\"logs\"",
"]",
"=",
"[",
"]",
"if",
"logs",
"and",
"result",
".",
"logfiles",
":",
"for",
"log",
"in",
"logs",
":",
"typ",
"=",
"None",
"parts",
"=",
"log",
".",
"split",
"(",
"os",
".",
"sep",
")",
"if",
"\"bench\"",
"in",
"parts",
"[",
"len",
"(",
"parts",
")",
"-",
"1",
"]",
":",
"typ",
"=",
"\"framework\"",
"# elif \"Dut\" in parts[len(parts)-1]:",
"# typ = \"dut\"",
"if",
"typ",
"is",
"not",
"None",
":",
"name",
"=",
"parts",
"[",
"len",
"(",
"parts",
")",
"-",
"1",
"]",
"try",
":",
"with",
"open",
"(",
"log",
",",
"\"r\"",
")",
"as",
"file_name",
":",
"data",
"=",
"file_name",
".",
"read",
"(",
")",
"dic",
"=",
"{",
"\"data\"",
":",
"data",
",",
"\"name\"",
":",
"name",
",",
"\"from\"",
":",
"typ",
"}",
"result_obj",
"[",
"\"exec\"",
"]",
"[",
"\"logs\"",
"]",
".",
"append",
"(",
"dic",
")",
"except",
"OSError",
":",
"pass",
"else",
":",
"continue"
]
| 33.1 | 0.000978 |
def main():
""" Main entry point, expects doctopt arg dict as argd. """
global DEBUG
argd = docopt(USAGESTR, version=VERSIONSTR, script=SCRIPT)
DEBUG = argd['--debug']
width = parse_int(argd['--width'] or DEFAULT_WIDTH) or 1
indent = parse_int(argd['--indent'] or (argd['--INDENT'] or 0))
prepend = ' ' * (indent * 4)
if prepend and argd['--indent']:
# Smart indent, change max width based on indention.
width -= len(prepend)
userprepend = argd['--prepend'] or (argd['--PREPEND'] or '')
prepend = ''.join((prepend, userprepend))
if argd['--prepend']:
# Smart indent, change max width based on prepended text.
width -= len(userprepend)
userappend = argd['--append'] or (argd['--APPEND'] or '')
if argd['--append']:
width -= len(userappend)
if argd['WORDS']:
# Try each argument as a file name.
argd['WORDS'] = (
(try_read_file(w) if len(w) < 256 else w)
for w in argd['WORDS']
)
words = ' '.join((w for w in argd['WORDS'] if w))
else:
# No text/filenames provided, use stdin for input.
words = read_stdin()
block = FormatBlock(words).iter_format_block(
chars=argd['--chars'],
fill=argd['--fill'],
prepend=prepend,
strip_first=argd['--stripfirst'],
append=userappend,
strip_last=argd['--striplast'],
width=width,
newlines=argd['--newlines'],
lstrip=argd['--lstrip'],
)
for i, line in enumerate(block):
if argd['--enumerate']:
# Current line number format supports up to 999 lines before
# messing up. Who would format 1000 lines like this anyway?
print('{: >3}: {}'.format(i + 1, line))
else:
print(line)
return 0 | [
"def",
"main",
"(",
")",
":",
"global",
"DEBUG",
"argd",
"=",
"docopt",
"(",
"USAGESTR",
",",
"version",
"=",
"VERSIONSTR",
",",
"script",
"=",
"SCRIPT",
")",
"DEBUG",
"=",
"argd",
"[",
"'--debug'",
"]",
"width",
"=",
"parse_int",
"(",
"argd",
"[",
"'--width'",
"]",
"or",
"DEFAULT_WIDTH",
")",
"or",
"1",
"indent",
"=",
"parse_int",
"(",
"argd",
"[",
"'--indent'",
"]",
"or",
"(",
"argd",
"[",
"'--INDENT'",
"]",
"or",
"0",
")",
")",
"prepend",
"=",
"' '",
"*",
"(",
"indent",
"*",
"4",
")",
"if",
"prepend",
"and",
"argd",
"[",
"'--indent'",
"]",
":",
"# Smart indent, change max width based on indention.",
"width",
"-=",
"len",
"(",
"prepend",
")",
"userprepend",
"=",
"argd",
"[",
"'--prepend'",
"]",
"or",
"(",
"argd",
"[",
"'--PREPEND'",
"]",
"or",
"''",
")",
"prepend",
"=",
"''",
".",
"join",
"(",
"(",
"prepend",
",",
"userprepend",
")",
")",
"if",
"argd",
"[",
"'--prepend'",
"]",
":",
"# Smart indent, change max width based on prepended text.",
"width",
"-=",
"len",
"(",
"userprepend",
")",
"userappend",
"=",
"argd",
"[",
"'--append'",
"]",
"or",
"(",
"argd",
"[",
"'--APPEND'",
"]",
"or",
"''",
")",
"if",
"argd",
"[",
"'--append'",
"]",
":",
"width",
"-=",
"len",
"(",
"userappend",
")",
"if",
"argd",
"[",
"'WORDS'",
"]",
":",
"# Try each argument as a file name.",
"argd",
"[",
"'WORDS'",
"]",
"=",
"(",
"(",
"try_read_file",
"(",
"w",
")",
"if",
"len",
"(",
"w",
")",
"<",
"256",
"else",
"w",
")",
"for",
"w",
"in",
"argd",
"[",
"'WORDS'",
"]",
")",
"words",
"=",
"' '",
".",
"join",
"(",
"(",
"w",
"for",
"w",
"in",
"argd",
"[",
"'WORDS'",
"]",
"if",
"w",
")",
")",
"else",
":",
"# No text/filenames provided, use stdin for input.",
"words",
"=",
"read_stdin",
"(",
")",
"block",
"=",
"FormatBlock",
"(",
"words",
")",
".",
"iter_format_block",
"(",
"chars",
"=",
"argd",
"[",
"'--chars'",
"]",
",",
"fill",
"=",
"argd",
"[",
"'--fill'",
"]",
",",
"prepend",
"=",
"prepend",
",",
"strip_first",
"=",
"argd",
"[",
"'--stripfirst'",
"]",
",",
"append",
"=",
"userappend",
",",
"strip_last",
"=",
"argd",
"[",
"'--striplast'",
"]",
",",
"width",
"=",
"width",
",",
"newlines",
"=",
"argd",
"[",
"'--newlines'",
"]",
",",
"lstrip",
"=",
"argd",
"[",
"'--lstrip'",
"]",
",",
")",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"block",
")",
":",
"if",
"argd",
"[",
"'--enumerate'",
"]",
":",
"# Current line number format supports up to 999 lines before",
"# messing up. Who would format 1000 lines like this anyway?",
"print",
"(",
"'{: >3}: {}'",
".",
"format",
"(",
"i",
"+",
"1",
",",
"line",
")",
")",
"else",
":",
"print",
"(",
"line",
")",
"return",
"0"
]
| 33.111111 | 0.000543 |
def show(self, commits=None, encoding='utf-8'):
"""Show the data of a set of commits.
The method returns the output of Git show command for a
set of commits using the following options:
git show --raw --numstat --pretty=fuller --decorate=full
--parents -M -C -c [<commit>...<commit>]
When the list of commits is empty, the command will return
data about the last commit, like the default behaviour of
`git show`.
:param commits: list of commits to show data
:param encoding: encode the output using this format
:returns: a generator where each item is a line from the show output
:raises EmptyRepositoryError: when the repository is empty and
the action cannot be performed
:raises RepositoryError: when an error occurs fetching the show output
"""
if self.is_empty():
logger.warning("Git %s repository is empty; unable to run show",
self.uri)
raise EmptyRepositoryError(repository=self.uri)
if commits is None:
commits = []
cmd_show = ['git', 'show']
cmd_show.extend(self.GIT_PRETTY_OUTPUT_OPTS)
cmd_show.extend(commits)
for line in self._exec_nb(cmd_show, cwd=self.dirpath, env=self.gitenv):
yield line
logger.debug("Git show fetched from %s repository (%s)",
self.uri, self.dirpath) | [
"def",
"show",
"(",
"self",
",",
"commits",
"=",
"None",
",",
"encoding",
"=",
"'utf-8'",
")",
":",
"if",
"self",
".",
"is_empty",
"(",
")",
":",
"logger",
".",
"warning",
"(",
"\"Git %s repository is empty; unable to run show\"",
",",
"self",
".",
"uri",
")",
"raise",
"EmptyRepositoryError",
"(",
"repository",
"=",
"self",
".",
"uri",
")",
"if",
"commits",
"is",
"None",
":",
"commits",
"=",
"[",
"]",
"cmd_show",
"=",
"[",
"'git'",
",",
"'show'",
"]",
"cmd_show",
".",
"extend",
"(",
"self",
".",
"GIT_PRETTY_OUTPUT_OPTS",
")",
"cmd_show",
".",
"extend",
"(",
"commits",
")",
"for",
"line",
"in",
"self",
".",
"_exec_nb",
"(",
"cmd_show",
",",
"cwd",
"=",
"self",
".",
"dirpath",
",",
"env",
"=",
"self",
".",
"gitenv",
")",
":",
"yield",
"line",
"logger",
".",
"debug",
"(",
"\"Git show fetched from %s repository (%s)\"",
",",
"self",
".",
"uri",
",",
"self",
".",
"dirpath",
")"
]
| 37.051282 | 0.001349 |
def to_python(self,value):
"""
Validates that the value is in self.choices and can be coerced to the right type.
"""
if value==self.emptyValue or value in EMPTY_VALUES:
return self.emptyValue
try:
value=self.coerce(value)
except(ValueError,TypeError,ValidationError):
raise ValidationError(self.error_messages['invalid_choice']%{'value':value})
return value | [
"def",
"to_python",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"==",
"self",
".",
"emptyValue",
"or",
"value",
"in",
"EMPTY_VALUES",
":",
"return",
"self",
".",
"emptyValue",
"try",
":",
"value",
"=",
"self",
".",
"coerce",
"(",
"value",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
",",
"ValidationError",
")",
":",
"raise",
"ValidationError",
"(",
"self",
".",
"error_messages",
"[",
"'invalid_choice'",
"]",
"%",
"{",
"'value'",
":",
"value",
"}",
")",
"return",
"value"
]
| 34.692308 | 0.023758 |
def parse(svg, cached=False, _copy=True):
""" Returns cached copies unless otherwise specified.
"""
if not cached:
dom = parser.parseString(svg)
paths = parse_node(dom, [])
else:
id = _cache.id(svg)
if not _cache.has_key(id):
dom = parser.parseString(svg)
_cache.save(id, parse_node(dom, []))
paths = _cache.load(id, _copy)
return paths | [
"def",
"parse",
"(",
"svg",
",",
"cached",
"=",
"False",
",",
"_copy",
"=",
"True",
")",
":",
"if",
"not",
"cached",
":",
"dom",
"=",
"parser",
".",
"parseString",
"(",
"svg",
")",
"paths",
"=",
"parse_node",
"(",
"dom",
",",
"[",
"]",
")",
"else",
":",
"id",
"=",
"_cache",
".",
"id",
"(",
"svg",
")",
"if",
"not",
"_cache",
".",
"has_key",
"(",
"id",
")",
":",
"dom",
"=",
"parser",
".",
"parseString",
"(",
"svg",
")",
"_cache",
".",
"save",
"(",
"id",
",",
"parse_node",
"(",
"dom",
",",
"[",
"]",
")",
")",
"paths",
"=",
"_cache",
".",
"load",
"(",
"id",
",",
"_copy",
")",
"return",
"paths"
]
| 26.1875 | 0.011521 |
def flat_list_to_polymer(atom_list, atom_group_s=4):
"""Takes a flat list of atomic coordinates and converts it to a `Polymer`.
Parameters
----------
atom_list : [Atom]
Flat list of coordinates.
atom_group_s : int, optional
Size of atom groups.
Returns
-------
polymer : Polypeptide
`Polymer` object containing atom coords converted `Monomers`.
Raises
------
ValueError
Raised if `atom_group_s` != 4 or 5
"""
atom_labels = ['N', 'CA', 'C', 'O', 'CB']
atom_elements = ['N', 'C', 'C', 'O', 'C']
atoms_coords = [atom_list[x:x + atom_group_s]
for x in range(0, len(atom_list), atom_group_s)]
atoms = [[Atom(x[0], x[1]) for x in zip(y, atom_elements)]
for y in atoms_coords]
if atom_group_s == 5:
monomers = [Residue(OrderedDict(zip(atom_labels, x)), 'ALA')
for x in atoms]
elif atom_group_s == 4:
monomers = [Residue(OrderedDict(zip(atom_labels, x)), 'GLY')
for x in atoms]
else:
raise ValueError(
'Parameter atom_group_s must be 4 or 5 so atoms can be labeled correctly.')
polymer = Polypeptide(monomers=monomers)
return polymer | [
"def",
"flat_list_to_polymer",
"(",
"atom_list",
",",
"atom_group_s",
"=",
"4",
")",
":",
"atom_labels",
"=",
"[",
"'N'",
",",
"'CA'",
",",
"'C'",
",",
"'O'",
",",
"'CB'",
"]",
"atom_elements",
"=",
"[",
"'N'",
",",
"'C'",
",",
"'C'",
",",
"'O'",
",",
"'C'",
"]",
"atoms_coords",
"=",
"[",
"atom_list",
"[",
"x",
":",
"x",
"+",
"atom_group_s",
"]",
"for",
"x",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"atom_list",
")",
",",
"atom_group_s",
")",
"]",
"atoms",
"=",
"[",
"[",
"Atom",
"(",
"x",
"[",
"0",
"]",
",",
"x",
"[",
"1",
"]",
")",
"for",
"x",
"in",
"zip",
"(",
"y",
",",
"atom_elements",
")",
"]",
"for",
"y",
"in",
"atoms_coords",
"]",
"if",
"atom_group_s",
"==",
"5",
":",
"monomers",
"=",
"[",
"Residue",
"(",
"OrderedDict",
"(",
"zip",
"(",
"atom_labels",
",",
"x",
")",
")",
",",
"'ALA'",
")",
"for",
"x",
"in",
"atoms",
"]",
"elif",
"atom_group_s",
"==",
"4",
":",
"monomers",
"=",
"[",
"Residue",
"(",
"OrderedDict",
"(",
"zip",
"(",
"atom_labels",
",",
"x",
")",
")",
",",
"'GLY'",
")",
"for",
"x",
"in",
"atoms",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Parameter atom_group_s must be 4 or 5 so atoms can be labeled correctly.'",
")",
"polymer",
"=",
"Polypeptide",
"(",
"monomers",
"=",
"monomers",
")",
"return",
"polymer"
]
| 32.891892 | 0.001596 |
def clean_zipfile(self):
'''remove existing zipfile'''
if os.path.isfile(self.zip_file):
os.remove(self.zip_file) | [
"def",
"clean_zipfile",
"(",
"self",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"zip_file",
")",
":",
"os",
".",
"remove",
"(",
"self",
".",
"zip_file",
")"
]
| 34.5 | 0.014184 |
def get_default_ENV(env):
"""
A fiddlin' little function that has an 'import SCons.Environment' which
can't be moved to the top level without creating an import loop. Since
this import creates a local variable named 'SCons', it blocks access to
the global variable, so we move it here to prevent complaints about local
variables being used uninitialized.
"""
global default_ENV
try:
return env['ENV']
except KeyError:
if not default_ENV:
import SCons.Environment
# This is a hideously expensive way to get a default shell
# environment. What it really should do is run the platform
# setup to get the default ENV. Fortunately, it's incredibly
# rare for an Environment not to have a shell environment, so
# we're not going to worry about it overmuch.
default_ENV = SCons.Environment.Environment()['ENV']
return default_ENV | [
"def",
"get_default_ENV",
"(",
"env",
")",
":",
"global",
"default_ENV",
"try",
":",
"return",
"env",
"[",
"'ENV'",
"]",
"except",
"KeyError",
":",
"if",
"not",
"default_ENV",
":",
"import",
"SCons",
".",
"Environment",
"# This is a hideously expensive way to get a default shell",
"# environment. What it really should do is run the platform",
"# setup to get the default ENV. Fortunately, it's incredibly",
"# rare for an Environment not to have a shell environment, so",
"# we're not going to worry about it overmuch.",
"default_ENV",
"=",
"SCons",
".",
"Environment",
".",
"Environment",
"(",
")",
"[",
"'ENV'",
"]",
"return",
"default_ENV"
]
| 45.380952 | 0.001028 |
def process_axes(self, obj, columns=None):
""" process axes filters """
# make a copy to avoid side effects
if columns is not None:
columns = list(columns)
# make sure to include levels if we have them
if columns is not None and self.is_multi_index:
for n in self.levels:
if n not in columns:
columns.insert(0, n)
# reorder by any non_index_axes & limit to the select columns
for axis, labels in self.non_index_axes:
obj = _reindex_axis(obj, axis, labels, columns)
# apply the selection filters (but keep in the same order)
if self.selection.filter is not None:
for field, op, filt in self.selection.filter.format():
def process_filter(field, filt):
for axis_name in obj._AXIS_NAMES.values():
axis_number = obj._get_axis_number(axis_name)
axis_values = obj._get_axis(axis_name)
# see if the field is the name of an axis
if field == axis_name:
# if we have a multi-index, then need to include
# the levels
if self.is_multi_index:
filt = filt.union(Index(self.levels))
takers = op(axis_values, filt)
return obj.loc._getitem_axis(takers,
axis=axis_number)
# this might be the name of a file IN an axis
elif field in axis_values:
# we need to filter on this dimension
values = ensure_index(getattr(obj, field).values)
filt = ensure_index(filt)
# hack until we support reversed dim flags
if isinstance(obj, DataFrame):
axis_number = 1 - axis_number
takers = op(values, filt)
return obj.loc._getitem_axis(takers,
axis=axis_number)
raise ValueError("cannot find the field [{field}] for "
"filtering!".format(field=field))
obj = process_filter(field, filt)
return obj | [
"def",
"process_axes",
"(",
"self",
",",
"obj",
",",
"columns",
"=",
"None",
")",
":",
"# make a copy to avoid side effects",
"if",
"columns",
"is",
"not",
"None",
":",
"columns",
"=",
"list",
"(",
"columns",
")",
"# make sure to include levels if we have them",
"if",
"columns",
"is",
"not",
"None",
"and",
"self",
".",
"is_multi_index",
":",
"for",
"n",
"in",
"self",
".",
"levels",
":",
"if",
"n",
"not",
"in",
"columns",
":",
"columns",
".",
"insert",
"(",
"0",
",",
"n",
")",
"# reorder by any non_index_axes & limit to the select columns",
"for",
"axis",
",",
"labels",
"in",
"self",
".",
"non_index_axes",
":",
"obj",
"=",
"_reindex_axis",
"(",
"obj",
",",
"axis",
",",
"labels",
",",
"columns",
")",
"# apply the selection filters (but keep in the same order)",
"if",
"self",
".",
"selection",
".",
"filter",
"is",
"not",
"None",
":",
"for",
"field",
",",
"op",
",",
"filt",
"in",
"self",
".",
"selection",
".",
"filter",
".",
"format",
"(",
")",
":",
"def",
"process_filter",
"(",
"field",
",",
"filt",
")",
":",
"for",
"axis_name",
"in",
"obj",
".",
"_AXIS_NAMES",
".",
"values",
"(",
")",
":",
"axis_number",
"=",
"obj",
".",
"_get_axis_number",
"(",
"axis_name",
")",
"axis_values",
"=",
"obj",
".",
"_get_axis",
"(",
"axis_name",
")",
"# see if the field is the name of an axis",
"if",
"field",
"==",
"axis_name",
":",
"# if we have a multi-index, then need to include",
"# the levels",
"if",
"self",
".",
"is_multi_index",
":",
"filt",
"=",
"filt",
".",
"union",
"(",
"Index",
"(",
"self",
".",
"levels",
")",
")",
"takers",
"=",
"op",
"(",
"axis_values",
",",
"filt",
")",
"return",
"obj",
".",
"loc",
".",
"_getitem_axis",
"(",
"takers",
",",
"axis",
"=",
"axis_number",
")",
"# this might be the name of a file IN an axis",
"elif",
"field",
"in",
"axis_values",
":",
"# we need to filter on this dimension",
"values",
"=",
"ensure_index",
"(",
"getattr",
"(",
"obj",
",",
"field",
")",
".",
"values",
")",
"filt",
"=",
"ensure_index",
"(",
"filt",
")",
"# hack until we support reversed dim flags",
"if",
"isinstance",
"(",
"obj",
",",
"DataFrame",
")",
":",
"axis_number",
"=",
"1",
"-",
"axis_number",
"takers",
"=",
"op",
"(",
"values",
",",
"filt",
")",
"return",
"obj",
".",
"loc",
".",
"_getitem_axis",
"(",
"takers",
",",
"axis",
"=",
"axis_number",
")",
"raise",
"ValueError",
"(",
"\"cannot find the field [{field}] for \"",
"\"filtering!\"",
".",
"format",
"(",
"field",
"=",
"field",
")",
")",
"obj",
"=",
"process_filter",
"(",
"field",
",",
"filt",
")",
"return",
"obj"
]
| 41.474576 | 0.000798 |
def encode_dict(data, encoding=None, errors='strict', keep=False,
preserve_dict_class=False, preserve_tuples=False):
'''
Encode all string values to bytes
'''
rv = data.__class__() if preserve_dict_class else {}
for key, value in six.iteritems(data):
if isinstance(key, tuple):
key = encode_tuple(key, encoding, errors, keep, preserve_dict_class) \
if preserve_tuples \
else encode_list(key, encoding, errors, keep,
preserve_dict_class, preserve_tuples)
else:
try:
key = salt.utils.stringutils.to_bytes(key, encoding, errors)
except TypeError:
# to_bytes raises a TypeError when input is not a
# string/bytestring/bytearray. This is expected and simply
# means we are going to leave the value as-is.
pass
except UnicodeEncodeError:
if not keep:
raise
if isinstance(value, list):
value = encode_list(value, encoding, errors, keep,
preserve_dict_class, preserve_tuples)
elif isinstance(value, tuple):
value = encode_tuple(value, encoding, errors, keep, preserve_dict_class) \
if preserve_tuples \
else encode_list(value, encoding, errors, keep,
preserve_dict_class, preserve_tuples)
elif isinstance(value, Mapping):
value = encode_dict(value, encoding, errors, keep,
preserve_dict_class, preserve_tuples)
else:
try:
value = salt.utils.stringutils.to_bytes(value, encoding, errors)
except TypeError:
# to_bytes raises a TypeError when input is not a
# string/bytestring/bytearray. This is expected and simply
# means we are going to leave the value as-is.
pass
except UnicodeEncodeError:
if not keep:
raise
rv[key] = value
return rv | [
"def",
"encode_dict",
"(",
"data",
",",
"encoding",
"=",
"None",
",",
"errors",
"=",
"'strict'",
",",
"keep",
"=",
"False",
",",
"preserve_dict_class",
"=",
"False",
",",
"preserve_tuples",
"=",
"False",
")",
":",
"rv",
"=",
"data",
".",
"__class__",
"(",
")",
"if",
"preserve_dict_class",
"else",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"data",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"tuple",
")",
":",
"key",
"=",
"encode_tuple",
"(",
"key",
",",
"encoding",
",",
"errors",
",",
"keep",
",",
"preserve_dict_class",
")",
"if",
"preserve_tuples",
"else",
"encode_list",
"(",
"key",
",",
"encoding",
",",
"errors",
",",
"keep",
",",
"preserve_dict_class",
",",
"preserve_tuples",
")",
"else",
":",
"try",
":",
"key",
"=",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_bytes",
"(",
"key",
",",
"encoding",
",",
"errors",
")",
"except",
"TypeError",
":",
"# to_bytes raises a TypeError when input is not a",
"# string/bytestring/bytearray. This is expected and simply",
"# means we are going to leave the value as-is.",
"pass",
"except",
"UnicodeEncodeError",
":",
"if",
"not",
"keep",
":",
"raise",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"value",
"=",
"encode_list",
"(",
"value",
",",
"encoding",
",",
"errors",
",",
"keep",
",",
"preserve_dict_class",
",",
"preserve_tuples",
")",
"elif",
"isinstance",
"(",
"value",
",",
"tuple",
")",
":",
"value",
"=",
"encode_tuple",
"(",
"value",
",",
"encoding",
",",
"errors",
",",
"keep",
",",
"preserve_dict_class",
")",
"if",
"preserve_tuples",
"else",
"encode_list",
"(",
"value",
",",
"encoding",
",",
"errors",
",",
"keep",
",",
"preserve_dict_class",
",",
"preserve_tuples",
")",
"elif",
"isinstance",
"(",
"value",
",",
"Mapping",
")",
":",
"value",
"=",
"encode_dict",
"(",
"value",
",",
"encoding",
",",
"errors",
",",
"keep",
",",
"preserve_dict_class",
",",
"preserve_tuples",
")",
"else",
":",
"try",
":",
"value",
"=",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_bytes",
"(",
"value",
",",
"encoding",
",",
"errors",
")",
"except",
"TypeError",
":",
"# to_bytes raises a TypeError when input is not a",
"# string/bytestring/bytearray. This is expected and simply",
"# means we are going to leave the value as-is.",
"pass",
"except",
"UnicodeEncodeError",
":",
"if",
"not",
"keep",
":",
"raise",
"rv",
"[",
"key",
"]",
"=",
"value",
"return",
"rv"
]
| 43.367347 | 0.001841 |
def getOffsetFromRva(self, rva):
"""
Converts an offset to an RVA.
@type rva: int
@param rva: The RVA to be converted.
@rtype: int
@return: An integer value representing an offset in the PE file.
"""
offset = -1
s = self.getSectionByRva(rva)
if s != offset:
offset = (rva - self.sectionHeaders[s].virtualAddress.value) + self.sectionHeaders[s].pointerToRawData.value
else:
offset = rva
return offset | [
"def",
"getOffsetFromRva",
"(",
"self",
",",
"rva",
")",
":",
"offset",
"=",
"-",
"1",
"s",
"=",
"self",
".",
"getSectionByRva",
"(",
"rva",
")",
"if",
"s",
"!=",
"offset",
":",
"offset",
"=",
"(",
"rva",
"-",
"self",
".",
"sectionHeaders",
"[",
"s",
"]",
".",
"virtualAddress",
".",
"value",
")",
"+",
"self",
".",
"sectionHeaders",
"[",
"s",
"]",
".",
"pointerToRawData",
".",
"value",
"else",
":",
"offset",
"=",
"rva",
"return",
"offset"
]
| 28.263158 | 0.012613 |
def solve(self):
"""
Run the solver and assign the solution's :class:`CoordSystem` instances
as the corresponding part's world coordinates.
"""
if self.world_coords is None:
log.warning("solving for Assembly without world coordinates set: %r", self)
for (component, world_coords) in solver(self.constraints, self.world_coords):
component.world_coords = world_coords | [
"def",
"solve",
"(",
"self",
")",
":",
"if",
"self",
".",
"world_coords",
"is",
"None",
":",
"log",
".",
"warning",
"(",
"\"solving for Assembly without world coordinates set: %r\"",
",",
"self",
")",
"for",
"(",
"component",
",",
"world_coords",
")",
"in",
"solver",
"(",
"self",
".",
"constraints",
",",
"self",
".",
"world_coords",
")",
":",
"component",
".",
"world_coords",
"=",
"world_coords"
]
| 42.9 | 0.009132 |
def url_for(endpoint, default="senaite.jsonapi.get", **values):
"""Looks up the API URL for the given endpoint
:param endpoint: The name of the registered route (aka endpoint)
:type endpoint: string
:returns: External URL for this endpoint
:rtype: string/None
"""
try:
return router.url_for(endpoint, force_external=True, values=values)
except Exception:
# XXX plone.jsonapi.core should catch the BuildError of Werkzeug and
# throw another error which can be handled here.
logger.debug("Could not build API URL for endpoint '%s'. "
"No route provider registered?" % endpoint)
# build generic API URL
return router.url_for(default, force_external=True, values=values) | [
"def",
"url_for",
"(",
"endpoint",
",",
"default",
"=",
"\"senaite.jsonapi.get\"",
",",
"*",
"*",
"values",
")",
":",
"try",
":",
"return",
"router",
".",
"url_for",
"(",
"endpoint",
",",
"force_external",
"=",
"True",
",",
"values",
"=",
"values",
")",
"except",
"Exception",
":",
"# XXX plone.jsonapi.core should catch the BuildError of Werkzeug and",
"# throw another error which can be handled here.",
"logger",
".",
"debug",
"(",
"\"Could not build API URL for endpoint '%s'. \"",
"\"No route provider registered?\"",
"%",
"endpoint",
")",
"# build generic API URL",
"return",
"router",
".",
"url_for",
"(",
"default",
",",
"force_external",
"=",
"True",
",",
"values",
"=",
"values",
")"
]
| 39.789474 | 0.001292 |
def is_program(self):
"""
A property which can be used to check if StatusObject uses program features or not.
"""
from automate.callables import Empty
return not (isinstance(self.on_activate, Empty)
and isinstance(self.on_deactivate, Empty)
and isinstance(self.on_update, Empty)) | [
"def",
"is_program",
"(",
"self",
")",
":",
"from",
"automate",
".",
"callables",
"import",
"Empty",
"return",
"not",
"(",
"isinstance",
"(",
"self",
".",
"on_activate",
",",
"Empty",
")",
"and",
"isinstance",
"(",
"self",
".",
"on_deactivate",
",",
"Empty",
")",
"and",
"isinstance",
"(",
"self",
".",
"on_update",
",",
"Empty",
")",
")"
]
| 44.5 | 0.008264 |
def __get_host(node, vm_):
'''
Return public IP, private IP, or hostname for the libcloud 'node' object
'''
if __get_ssh_interface(vm_) == 'private_ips' or vm_['external_ip'] is None:
ip_address = node.private_ips[0]
log.info('Salt node data. Private_ip: %s', ip_address)
else:
ip_address = node.public_ips[0]
log.info('Salt node data. Public_ip: %s', ip_address)
if ip_address:
return ip_address
return node.name | [
"def",
"__get_host",
"(",
"node",
",",
"vm_",
")",
":",
"if",
"__get_ssh_interface",
"(",
"vm_",
")",
"==",
"'private_ips'",
"or",
"vm_",
"[",
"'external_ip'",
"]",
"is",
"None",
":",
"ip_address",
"=",
"node",
".",
"private_ips",
"[",
"0",
"]",
"log",
".",
"info",
"(",
"'Salt node data. Private_ip: %s'",
",",
"ip_address",
")",
"else",
":",
"ip_address",
"=",
"node",
".",
"public_ips",
"[",
"0",
"]",
"log",
".",
"info",
"(",
"'Salt node data. Public_ip: %s'",
",",
"ip_address",
")",
"if",
"ip_address",
":",
"return",
"ip_address",
"return",
"node",
".",
"name"
]
| 31.266667 | 0.00207 |
def register_persistent_rest_pair(self, persistent_model_class, rest_model_class):
"""
:param persistent_model_class:
:param rest_model_class:
"""
self.register_adapter(ModelAdapter(
rest_model_class=rest_model_class,
persistent_model_class=persistent_model_class
)) | [
"def",
"register_persistent_rest_pair",
"(",
"self",
",",
"persistent_model_class",
",",
"rest_model_class",
")",
":",
"self",
".",
"register_adapter",
"(",
"ModelAdapter",
"(",
"rest_model_class",
"=",
"rest_model_class",
",",
"persistent_model_class",
"=",
"persistent_model_class",
")",
")"
]
| 36.666667 | 0.008876 |
def get_between_times(self, t1, t2, target=None):
"""
Query for OPUS data between times t1 and t2.
Parameters
----------
t1, t2 : datetime.datetime, strings
Start and end time for the query. If type is datetime, will be
converted to isoformat string. If type is string already, it needs
to be in an accepted international format for time strings.
target : str
Potential target for the observation query. Most likely will reduce
the amount of data matching the query a lot.
Returns
-------
None, but set's state of the object to have new query results stored
in self.obsids.
"""
try:
# checking if times have isoformat() method (datetimes have)
t1 = t1.isoformat()
t2 = t2.isoformat()
except AttributeError:
# if not, should already be a string, so do nothing.
pass
myquery = self._get_time_query(t1, t2)
if target is not None:
myquery["target"] = target
self.create_files_request(myquery, fmt="json")
self.unpack_json_response() | [
"def",
"get_between_times",
"(",
"self",
",",
"t1",
",",
"t2",
",",
"target",
"=",
"None",
")",
":",
"try",
":",
"# checking if times have isoformat() method (datetimes have)",
"t1",
"=",
"t1",
".",
"isoformat",
"(",
")",
"t2",
"=",
"t2",
".",
"isoformat",
"(",
")",
"except",
"AttributeError",
":",
"# if not, should already be a string, so do nothing.",
"pass",
"myquery",
"=",
"self",
".",
"_get_time_query",
"(",
"t1",
",",
"t2",
")",
"if",
"target",
"is",
"not",
"None",
":",
"myquery",
"[",
"\"target\"",
"]",
"=",
"target",
"self",
".",
"create_files_request",
"(",
"myquery",
",",
"fmt",
"=",
"\"json\"",
")",
"self",
".",
"unpack_json_response",
"(",
")"
]
| 37.677419 | 0.001669 |
def commit():
""" Commit changes and release the write lock """
session_token = request.headers['session_token']
repository = request.headers['repository']
#===
current_user = have_authenticated_user(request.environ['REMOTE_ADDR'], repository, session_token)
if current_user is False: return fail(user_auth_fail_msg)
#===
repository_path = config['repositories'][repository]['path']
def with_exclusive_lock():
if not varify_user_lock(repository_path, session_token): return fail(lock_fail_msg)
#===
data_store = versioned_storage(repository_path)
if not data_store.have_active_commit(): return fail(no_active_commit_msg)
result = {}
if request.headers['mode'] == 'commit':
new_head = data_store.commit(request.headers['commit_message'], current_user['username'])
result = {'head' : new_head}
else:
data_store.rollback()
# Release the user lock
update_user_lock(repository_path, None)
return success(result)
return lock_access(repository_path, with_exclusive_lock) | [
"def",
"commit",
"(",
")",
":",
"session_token",
"=",
"request",
".",
"headers",
"[",
"'session_token'",
"]",
"repository",
"=",
"request",
".",
"headers",
"[",
"'repository'",
"]",
"#===",
"current_user",
"=",
"have_authenticated_user",
"(",
"request",
".",
"environ",
"[",
"'REMOTE_ADDR'",
"]",
",",
"repository",
",",
"session_token",
")",
"if",
"current_user",
"is",
"False",
":",
"return",
"fail",
"(",
"user_auth_fail_msg",
")",
"#===",
"repository_path",
"=",
"config",
"[",
"'repositories'",
"]",
"[",
"repository",
"]",
"[",
"'path'",
"]",
"def",
"with_exclusive_lock",
"(",
")",
":",
"if",
"not",
"varify_user_lock",
"(",
"repository_path",
",",
"session_token",
")",
":",
"return",
"fail",
"(",
"lock_fail_msg",
")",
"#===",
"data_store",
"=",
"versioned_storage",
"(",
"repository_path",
")",
"if",
"not",
"data_store",
".",
"have_active_commit",
"(",
")",
":",
"return",
"fail",
"(",
"no_active_commit_msg",
")",
"result",
"=",
"{",
"}",
"if",
"request",
".",
"headers",
"[",
"'mode'",
"]",
"==",
"'commit'",
":",
"new_head",
"=",
"data_store",
".",
"commit",
"(",
"request",
".",
"headers",
"[",
"'commit_message'",
"]",
",",
"current_user",
"[",
"'username'",
"]",
")",
"result",
"=",
"{",
"'head'",
":",
"new_head",
"}",
"else",
":",
"data_store",
".",
"rollback",
"(",
")",
"# Release the user lock",
"update_user_lock",
"(",
"repository_path",
",",
"None",
")",
"return",
"success",
"(",
"result",
")",
"return",
"lock_access",
"(",
"repository_path",
",",
"with_exclusive_lock",
")"
]
| 35.451613 | 0.011515 |
def add_geo(self, geo_location):
"""
Saves a <geo-location> Element, to be incoporated into the Open511
geometry field.
"""
if not geo_location.xpath('latitude') and geo_location.xpath('longitude'):
raise Exception("Invalid geo-location %s" % etree.tostring(geo_location))
if _xpath_or_none(geo_location, 'horizontal-datum/text()') not in ('wgs84', None):
logger.warning("Unsupported horizontal-datum in %s" % etree.tostring(geo_location))
return
point = (
float(_xpath_or_none(geo_location, 'longitude/text()')) / 1000000,
float(_xpath_or_none(geo_location, 'latitude/text()')) / 1000000
)
self.points.add(point) | [
"def",
"add_geo",
"(",
"self",
",",
"geo_location",
")",
":",
"if",
"not",
"geo_location",
".",
"xpath",
"(",
"'latitude'",
")",
"and",
"geo_location",
".",
"xpath",
"(",
"'longitude'",
")",
":",
"raise",
"Exception",
"(",
"\"Invalid geo-location %s\"",
"%",
"etree",
".",
"tostring",
"(",
"geo_location",
")",
")",
"if",
"_xpath_or_none",
"(",
"geo_location",
",",
"'horizontal-datum/text()'",
")",
"not",
"in",
"(",
"'wgs84'",
",",
"None",
")",
":",
"logger",
".",
"warning",
"(",
"\"Unsupported horizontal-datum in %s\"",
"%",
"etree",
".",
"tostring",
"(",
"geo_location",
")",
")",
"return",
"point",
"=",
"(",
"float",
"(",
"_xpath_or_none",
"(",
"geo_location",
",",
"'longitude/text()'",
")",
")",
"/",
"1000000",
",",
"float",
"(",
"_xpath_or_none",
"(",
"geo_location",
",",
"'latitude/text()'",
")",
")",
"/",
"1000000",
")",
"self",
".",
"points",
".",
"add",
"(",
"point",
")"
]
| 48.733333 | 0.008054 |
def flexifunction_directory_ack_encode(self, target_system, target_component, directory_type, start_index, count, result):
'''
Acknowldge sucess or failure of a flexifunction command
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
directory_type : 0=inputs, 1=outputs (uint8_t)
start_index : index of first directory entry to write (uint8_t)
count : count of directory entries to write (uint8_t)
result : result of acknowledge, 0=fail, 1=good (uint16_t)
'''
return MAVLink_flexifunction_directory_ack_message(target_system, target_component, directory_type, start_index, count, result) | [
"def",
"flexifunction_directory_ack_encode",
"(",
"self",
",",
"target_system",
",",
"target_component",
",",
"directory_type",
",",
"start_index",
",",
"count",
",",
"result",
")",
":",
"return",
"MAVLink_flexifunction_directory_ack_message",
"(",
"target_system",
",",
"target_component",
",",
"directory_type",
",",
"start_index",
",",
"count",
",",
"result",
")"
]
| 65.384615 | 0.008121 |
def check_error(res, error_enum):
"""Raise if the result has an error, otherwise return the result."""
if res.HasField("error"):
enum_name = error_enum.DESCRIPTOR.full_name
error_name = error_enum.Name(res.error)
details = getattr(res, "error_details", "<none>")
raise RequestError("%s.%s: '%s'" % (enum_name, error_name, details), res)
return res | [
"def",
"check_error",
"(",
"res",
",",
"error_enum",
")",
":",
"if",
"res",
".",
"HasField",
"(",
"\"error\"",
")",
":",
"enum_name",
"=",
"error_enum",
".",
"DESCRIPTOR",
".",
"full_name",
"error_name",
"=",
"error_enum",
".",
"Name",
"(",
"res",
".",
"error",
")",
"details",
"=",
"getattr",
"(",
"res",
",",
"\"error_details\"",
",",
"\"<none>\"",
")",
"raise",
"RequestError",
"(",
"\"%s.%s: '%s'\"",
"%",
"(",
"enum_name",
",",
"error_name",
",",
"details",
")",
",",
"res",
")",
"return",
"res"
]
| 45.25 | 0.01084 |
def reindex_similar(self, other, n_sphere=4):
"""Reindex ``other`` to be similarly indexed as ``self``.
Returns a reindexed copy of ``other`` that minimizes the
distance for each atom to itself in the same chemical environemt
from ``self`` to ``other``.
Read more about the definition of the chemical environment in
:func:`Cartesian.partition_chem_env`
.. note:: It is necessary to align ``self`` and other before
applying this method.
This can be done via :meth:`~Cartesian.align`.
.. note:: It is probably necessary to improve the result using
:meth:`~Cartesian.change_numbering()`.
Args:
other (Cartesian):
n_sphere (int): Wrapper around the argument for
:meth:`~Cartesian.partition_chem_env`.
Returns:
Cartesian: Reindexed version of other
"""
def make_subset_similar(m1, subset1, m2, subset2, index_dct):
"""Changes index_dct INPLACE"""
coords = ['x', 'y', 'z']
index1 = list(subset1)
for m1_i in index1:
dist_m2_to_m1_i = m2.get_distance_to(m1.loc[m1_i, coords],
subset2, sort=True)
m2_i = dist_m2_to_m1_i.index[0]
dist_new = dist_m2_to_m1_i.loc[m2_i, 'distance']
m2_pos_i = dist_m2_to_m1_i.loc[m2_i, coords]
counter = itertools.count()
found = False
while not found:
if m2_i in index_dct.keys():
old_m1_pos = m1.loc[index_dct[m2_i], coords]
if dist_new < np.linalg.norm(m2_pos_i - old_m1_pos):
index1.append(index_dct[m2_i])
index_dct[m2_i] = m1_i
found = True
else:
m2_i = dist_m2_to_m1_i.index[next(counter)]
dist_new = dist_m2_to_m1_i.loc[m2_i, 'distance']
m2_pos_i = dist_m2_to_m1_i.loc[m2_i, coords]
else:
index_dct[m2_i] = m1_i
found = True
return index_dct
molecule1 = self.copy()
molecule2 = other.copy()
partition1 = molecule1.partition_chem_env(n_sphere)
partition2 = molecule2.partition_chem_env(n_sphere)
index_dct = {}
for key in partition1:
message = ('You have chemically different molecules, regarding '
'the topology of their connectivity.')
assert len(partition1[key]) == len(partition2[key]), message
index_dct = make_subset_similar(molecule1, partition1[key],
molecule2, partition2[key],
index_dct)
molecule2.index = [index_dct[i] for i in molecule2.index]
return molecule2.loc[molecule1.index] | [
"def",
"reindex_similar",
"(",
"self",
",",
"other",
",",
"n_sphere",
"=",
"4",
")",
":",
"def",
"make_subset_similar",
"(",
"m1",
",",
"subset1",
",",
"m2",
",",
"subset2",
",",
"index_dct",
")",
":",
"\"\"\"Changes index_dct INPLACE\"\"\"",
"coords",
"=",
"[",
"'x'",
",",
"'y'",
",",
"'z'",
"]",
"index1",
"=",
"list",
"(",
"subset1",
")",
"for",
"m1_i",
"in",
"index1",
":",
"dist_m2_to_m1_i",
"=",
"m2",
".",
"get_distance_to",
"(",
"m1",
".",
"loc",
"[",
"m1_i",
",",
"coords",
"]",
",",
"subset2",
",",
"sort",
"=",
"True",
")",
"m2_i",
"=",
"dist_m2_to_m1_i",
".",
"index",
"[",
"0",
"]",
"dist_new",
"=",
"dist_m2_to_m1_i",
".",
"loc",
"[",
"m2_i",
",",
"'distance'",
"]",
"m2_pos_i",
"=",
"dist_m2_to_m1_i",
".",
"loc",
"[",
"m2_i",
",",
"coords",
"]",
"counter",
"=",
"itertools",
".",
"count",
"(",
")",
"found",
"=",
"False",
"while",
"not",
"found",
":",
"if",
"m2_i",
"in",
"index_dct",
".",
"keys",
"(",
")",
":",
"old_m1_pos",
"=",
"m1",
".",
"loc",
"[",
"index_dct",
"[",
"m2_i",
"]",
",",
"coords",
"]",
"if",
"dist_new",
"<",
"np",
".",
"linalg",
".",
"norm",
"(",
"m2_pos_i",
"-",
"old_m1_pos",
")",
":",
"index1",
".",
"append",
"(",
"index_dct",
"[",
"m2_i",
"]",
")",
"index_dct",
"[",
"m2_i",
"]",
"=",
"m1_i",
"found",
"=",
"True",
"else",
":",
"m2_i",
"=",
"dist_m2_to_m1_i",
".",
"index",
"[",
"next",
"(",
"counter",
")",
"]",
"dist_new",
"=",
"dist_m2_to_m1_i",
".",
"loc",
"[",
"m2_i",
",",
"'distance'",
"]",
"m2_pos_i",
"=",
"dist_m2_to_m1_i",
".",
"loc",
"[",
"m2_i",
",",
"coords",
"]",
"else",
":",
"index_dct",
"[",
"m2_i",
"]",
"=",
"m1_i",
"found",
"=",
"True",
"return",
"index_dct",
"molecule1",
"=",
"self",
".",
"copy",
"(",
")",
"molecule2",
"=",
"other",
".",
"copy",
"(",
")",
"partition1",
"=",
"molecule1",
".",
"partition_chem_env",
"(",
"n_sphere",
")",
"partition2",
"=",
"molecule2",
".",
"partition_chem_env",
"(",
"n_sphere",
")",
"index_dct",
"=",
"{",
"}",
"for",
"key",
"in",
"partition1",
":",
"message",
"=",
"(",
"'You have chemically different molecules, regarding '",
"'the topology of their connectivity.'",
")",
"assert",
"len",
"(",
"partition1",
"[",
"key",
"]",
")",
"==",
"len",
"(",
"partition2",
"[",
"key",
"]",
")",
",",
"message",
"index_dct",
"=",
"make_subset_similar",
"(",
"molecule1",
",",
"partition1",
"[",
"key",
"]",
",",
"molecule2",
",",
"partition2",
"[",
"key",
"]",
",",
"index_dct",
")",
"molecule2",
".",
"index",
"=",
"[",
"index_dct",
"[",
"i",
"]",
"for",
"i",
"in",
"molecule2",
".",
"index",
"]",
"return",
"molecule2",
".",
"loc",
"[",
"molecule1",
".",
"index",
"]"
]
| 43 | 0.00065 |
def query(cls, *criteria, **filters):
"""Wrap sqlalchemy query methods.
A wrapper for the filter and filter_by functions of sqlalchemy.
Define a dict with which columns should be filtered by which values.
.. codeblock:: python
WorkflowObject.query(id=123)
WorkflowObject.query(status=ObjectStatus.COMPLETED)
The function supports also "hybrid" arguments using WorkflowObjectModel
indirectly.
.. codeblock:: python
WorkflowObject.query(
WorkflowObject.dbmodel.status == ObjectStatus.COMPLETED,
user_id=user_id
)
See also SQLAlchemy BaseQuery's filter and filter_by documentation.
"""
query = cls.dbmodel.query.filter(
*criteria).filter_by(**filters)
return [cls(obj) for obj in query.all()] | [
"def",
"query",
"(",
"cls",
",",
"*",
"criteria",
",",
"*",
"*",
"filters",
")",
":",
"query",
"=",
"cls",
".",
"dbmodel",
".",
"query",
".",
"filter",
"(",
"*",
"criteria",
")",
".",
"filter_by",
"(",
"*",
"*",
"filters",
")",
"return",
"[",
"cls",
"(",
"obj",
")",
"for",
"obj",
"in",
"query",
".",
"all",
"(",
")",
"]"
]
| 32.730769 | 0.002283 |
def parse_abstract(xml_dict):
"""
Parse PubMed XML dictionary to retrieve abstract.
"""
key_path = ['PubmedArticleSet', 'PubmedArticle', 'MedlineCitation',
'Article', 'Abstract', 'AbstractText']
abstract_xml = reduce(dict.get, key_path, xml_dict)
abstract_paragraphs = []
if isinstance(abstract_xml, str):
abstract_paragraphs.append(abstract_xml)
elif isinstance(abstract_xml, dict):
abstract_text = abstract_xml.get('#text')
try:
abstract_label = abstract_xml['@Label']
except KeyError:
abstract_paragraphs.append(abstract_text)
else:
abstract_paragraphs.append(
"{}: {}".format(abstract_label, abstract_text))
elif isinstance(abstract_xml, list):
for abstract_section in abstract_xml:
try:
abstract_text = abstract_section['#text']
except KeyError:
abstract_text = abstract_section
try:
abstract_label = abstract_section['@Label']
except KeyError:
abstract_paragraphs.append(abstract_text)
else:
abstract_paragraphs.append(
"{}: {}".format(abstract_label, abstract_text))
else:
raise RuntimeError("Error parsing abstract.")
return "\n\n".join(abstract_paragraphs) | [
"def",
"parse_abstract",
"(",
"xml_dict",
")",
":",
"key_path",
"=",
"[",
"'PubmedArticleSet'",
",",
"'PubmedArticle'",
",",
"'MedlineCitation'",
",",
"'Article'",
",",
"'Abstract'",
",",
"'AbstractText'",
"]",
"abstract_xml",
"=",
"reduce",
"(",
"dict",
".",
"get",
",",
"key_path",
",",
"xml_dict",
")",
"abstract_paragraphs",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"abstract_xml",
",",
"str",
")",
":",
"abstract_paragraphs",
".",
"append",
"(",
"abstract_xml",
")",
"elif",
"isinstance",
"(",
"abstract_xml",
",",
"dict",
")",
":",
"abstract_text",
"=",
"abstract_xml",
".",
"get",
"(",
"'#text'",
")",
"try",
":",
"abstract_label",
"=",
"abstract_xml",
"[",
"'@Label'",
"]",
"except",
"KeyError",
":",
"abstract_paragraphs",
".",
"append",
"(",
"abstract_text",
")",
"else",
":",
"abstract_paragraphs",
".",
"append",
"(",
"\"{}: {}\"",
".",
"format",
"(",
"abstract_label",
",",
"abstract_text",
")",
")",
"elif",
"isinstance",
"(",
"abstract_xml",
",",
"list",
")",
":",
"for",
"abstract_section",
"in",
"abstract_xml",
":",
"try",
":",
"abstract_text",
"=",
"abstract_section",
"[",
"'#text'",
"]",
"except",
"KeyError",
":",
"abstract_text",
"=",
"abstract_section",
"try",
":",
"abstract_label",
"=",
"abstract_section",
"[",
"'@Label'",
"]",
"except",
"KeyError",
":",
"abstract_paragraphs",
".",
"append",
"(",
"abstract_text",
")",
"else",
":",
"abstract_paragraphs",
".",
"append",
"(",
"\"{}: {}\"",
".",
"format",
"(",
"abstract_label",
",",
"abstract_text",
")",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Error parsing abstract.\"",
")",
"return",
"\"\\n\\n\"",
".",
"join",
"(",
"abstract_paragraphs",
")"
]
| 35.595238 | 0.001302 |
def parse(filename_or_url, parser=None, base_url=None, **kw):
"""
Parse a filename, URL, or file-like object into an HTML document
tree. Note: this returns a tree, not an element. Use
``parse(...).getroot()`` to get the document root.
You can override the base URL with the ``base_url`` keyword. This
is most useful when parsing from a file-like object.
"""
if parser is None:
parser = html_parser
return etree.parse(filename_or_url, parser, base_url=base_url, **kw) | [
"def",
"parse",
"(",
"filename_or_url",
",",
"parser",
"=",
"None",
",",
"base_url",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"if",
"parser",
"is",
"None",
":",
"parser",
"=",
"html_parser",
"return",
"etree",
".",
"parse",
"(",
"filename_or_url",
",",
"parser",
",",
"base_url",
"=",
"base_url",
",",
"*",
"*",
"kw",
")"
]
| 41.916667 | 0.001946 |
def get_conn(self):
"""
Retrieves connection to Cloud Text to Speech.
:return: Google Cloud Text to Speech client object.
:rtype: google.cloud.texttospeech_v1.TextToSpeechClient
"""
if not self._client:
self._client = TextToSpeechClient(credentials=self._get_credentials())
return self._client | [
"def",
"get_conn",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_client",
":",
"self",
".",
"_client",
"=",
"TextToSpeechClient",
"(",
"credentials",
"=",
"self",
".",
"_get_credentials",
"(",
")",
")",
"return",
"self",
".",
"_client"
]
| 35.3 | 0.008287 |
def extract_text_log_artifacts(job_log):
"""Generate a set of artifacts by parsing from the raw text log."""
# parse a log given its url
artifact_bc = ArtifactBuilderCollection(job_log.url)
artifact_bc.parse()
artifact_list = []
for name, artifact in artifact_bc.artifacts.items():
artifact_list.append({
"job_guid": job_log.job.guid,
"name": name,
"type": 'json',
"blob": json.dumps(artifact)
})
return artifact_list | [
"def",
"extract_text_log_artifacts",
"(",
"job_log",
")",
":",
"# parse a log given its url",
"artifact_bc",
"=",
"ArtifactBuilderCollection",
"(",
"job_log",
".",
"url",
")",
"artifact_bc",
".",
"parse",
"(",
")",
"artifact_list",
"=",
"[",
"]",
"for",
"name",
",",
"artifact",
"in",
"artifact_bc",
".",
"artifacts",
".",
"items",
"(",
")",
":",
"artifact_list",
".",
"append",
"(",
"{",
"\"job_guid\"",
":",
"job_log",
".",
"job",
".",
"guid",
",",
"\"name\"",
":",
"name",
",",
"\"type\"",
":",
"'json'",
",",
"\"blob\"",
":",
"json",
".",
"dumps",
"(",
"artifact",
")",
"}",
")",
"return",
"artifact_list"
]
| 29.176471 | 0.001953 |
def _add_goterms(self, go2obj_user, goid):
"""Add alt GO IDs to go2obj subset, if requested and relevant."""
goterm = self.go2obj_orig[goid]
if goid != goterm.id and goterm.id in go2obj_user and goid not in go2obj_user:
go2obj_user[goid] = goterm | [
"def",
"_add_goterms",
"(",
"self",
",",
"go2obj_user",
",",
"goid",
")",
":",
"goterm",
"=",
"self",
".",
"go2obj_orig",
"[",
"goid",
"]",
"if",
"goid",
"!=",
"goterm",
".",
"id",
"and",
"goterm",
".",
"id",
"in",
"go2obj_user",
"and",
"goid",
"not",
"in",
"go2obj_user",
":",
"go2obj_user",
"[",
"goid",
"]",
"=",
"goterm"
]
| 55.6 | 0.010638 |
def make_muc_admin_quey(self):
"""
Create <query xmlns="...muc#admin"/> element in the stanza.
:return: the element created.
:returntype: `MucAdminQuery`
"""
self.clear_muc_child()
self.muc_child=MucAdminQuery(parent=self.xmlnode)
return self.muc_child | [
"def",
"make_muc_admin_quey",
"(",
"self",
")",
":",
"self",
".",
"clear_muc_child",
"(",
")",
"self",
".",
"muc_child",
"=",
"MucAdminQuery",
"(",
"parent",
"=",
"self",
".",
"xmlnode",
")",
"return",
"self",
".",
"muc_child"
]
| 30.8 | 0.009464 |
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True | [
"def",
"configure_host_cache",
"(",
"host_ref",
",",
"datastore_ref",
",",
"swap_size_MiB",
",",
"host_cache_manager",
"=",
"None",
")",
":",
"hostname",
"=",
"get_managed_object_name",
"(",
"host_ref",
")",
"if",
"not",
"host_cache_manager",
":",
"props",
"=",
"get_properties_of_managed_object",
"(",
"host_ref",
",",
"[",
"'configManager.cacheConfigurationManager'",
"]",
")",
"if",
"not",
"props",
".",
"get",
"(",
"'configManager.cacheConfigurationManager'",
")",
":",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareObjectRetrievalError",
"(",
"'Host \\'{0}\\' has no host cache'",
".",
"format",
"(",
"hostname",
")",
")",
"host_cache_manager",
"=",
"props",
"[",
"'configManager.cacheConfigurationManager'",
"]",
"log",
".",
"trace",
"(",
"'Configuring the host cache on host \\'%s\\', datastore \\'%s\\', '",
"'swap size=%s MiB'",
",",
"hostname",
",",
"datastore_ref",
".",
"name",
",",
"swap_size_MiB",
")",
"spec",
"=",
"vim",
".",
"HostCacheConfigurationSpec",
"(",
"datastore",
"=",
"datastore_ref",
",",
"swapSize",
"=",
"swap_size_MiB",
")",
"log",
".",
"trace",
"(",
"'host_cache_spec=%s'",
",",
"spec",
")",
"try",
":",
"task",
"=",
"host_cache_manager",
".",
"ConfigureHostCache_Task",
"(",
"spec",
")",
"except",
"vim",
".",
"fault",
".",
"NoPermission",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareApiError",
"(",
"'Not enough permissions. Required privilege: '",
"'{0}'",
".",
"format",
"(",
"exc",
".",
"privilegeId",
")",
")",
"except",
"vim",
".",
"fault",
".",
"VimFault",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareApiError",
"(",
"exc",
".",
"msg",
")",
"except",
"vmodl",
".",
"RuntimeFault",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareRuntimeError",
"(",
"exc",
".",
"msg",
")",
"wait_for_task",
"(",
"task",
",",
"hostname",
",",
"'HostCacheConfigurationTask'",
")",
"log",
".",
"trace",
"(",
"'Configured host cache on host \\'%s\\''",
",",
"hostname",
")",
"return",
"True"
]
| 40.576923 | 0.000463 |
def set_dtype(self, opt, dtype):
"""Set the `dtype` attribute. If opt['DataType'] has a value
other than None, it overrides the `dtype` parameter of this
method. No changes are made if the `dtype` attribute already
exists and has a value other than 'None'.
Parameters
----------
opt : :class:`cdict.ConstrainedDict` object
Algorithm options
dtype : data-type
Data type for working variables (overridden by 'DataType' option)
"""
# Take no action of self.dtype exists and is not None
if not hasattr(self, 'dtype') or self.dtype is None:
# DataType option overrides explicitly specified data type
if opt['DataType'] is None:
self.dtype = dtype
else:
self.dtype = np.dtype(opt['DataType']) | [
"def",
"set_dtype",
"(",
"self",
",",
"opt",
",",
"dtype",
")",
":",
"# Take no action of self.dtype exists and is not None",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'dtype'",
")",
"or",
"self",
".",
"dtype",
"is",
"None",
":",
"# DataType option overrides explicitly specified data type",
"if",
"opt",
"[",
"'DataType'",
"]",
"is",
"None",
":",
"self",
".",
"dtype",
"=",
"dtype",
"else",
":",
"self",
".",
"dtype",
"=",
"np",
".",
"dtype",
"(",
"opt",
"[",
"'DataType'",
"]",
")"
]
| 40.190476 | 0.002315 |
def expandScopes(self, *args, **kwargs):
"""
Expand Scopes
Return an expanded copy of the given scopeset, with scopes implied by any
roles included.
This method takes input: ``v1/scopeset.json#``
This method gives output: ``v1/scopeset.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["expandScopes"], *args, **kwargs) | [
"def",
"expandScopes",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_makeApiCall",
"(",
"self",
".",
"funcinfo",
"[",
"\"expandScopes\"",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
]
| 27.266667 | 0.009456 |
def generate_molecule_object_dict(source, format, values):
"""Generate a dictionary that represents a Squonk MoleculeObject when
written as JSON
:param source: Molecules in molfile or smiles format
:param format: The format of the molecule. Either 'mol' or 'smiles'
:param values: Optional dict of values (properties) for the MoleculeObject
"""
m = {"uuid": str(uuid.uuid4()), "source": source, "format": format}
if values:
m["values"] = values
return m | [
"def",
"generate_molecule_object_dict",
"(",
"source",
",",
"format",
",",
"values",
")",
":",
"m",
"=",
"{",
"\"uuid\"",
":",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
",",
"\"source\"",
":",
"source",
",",
"\"format\"",
":",
"format",
"}",
"if",
"values",
":",
"m",
"[",
"\"values\"",
"]",
"=",
"values",
"return",
"m"
]
| 40.583333 | 0.002008 |
def get_resourcegroupitems(group_id, scenario_id, **kwargs):
"""
Get all the items in a group, in a scenario. If group_id is None, return
all items across all groups in the scenario.
"""
rgi_qry = db.DBSession.query(ResourceGroupItem).\
filter(ResourceGroupItem.scenario_id==scenario_id)
if group_id is not None:
rgi_qry = rgi_qry.filter(ResourceGroupItem.group_id==group_id)
rgi = rgi_qry.all()
return rgi | [
"def",
"get_resourcegroupitems",
"(",
"group_id",
",",
"scenario_id",
",",
"*",
"*",
"kwargs",
")",
":",
"rgi_qry",
"=",
"db",
".",
"DBSession",
".",
"query",
"(",
"ResourceGroupItem",
")",
".",
"filter",
"(",
"ResourceGroupItem",
".",
"scenario_id",
"==",
"scenario_id",
")",
"if",
"group_id",
"is",
"not",
"None",
":",
"rgi_qry",
"=",
"rgi_qry",
".",
"filter",
"(",
"ResourceGroupItem",
".",
"group_id",
"==",
"group_id",
")",
"rgi",
"=",
"rgi_qry",
".",
"all",
"(",
")",
"return",
"rgi"
]
| 28.75 | 0.010526 |
def contains_point( self, x, y ):
"""Is the point (x,y) on this curve?"""
return ( y * y - ( x * x * x + self.__a * x + self.__b ) ) % self.__p == 0 | [
"def",
"contains_point",
"(",
"self",
",",
"x",
",",
"y",
")",
":",
"return",
"(",
"y",
"*",
"y",
"-",
"(",
"x",
"*",
"x",
"*",
"x",
"+",
"self",
".",
"__a",
"*",
"x",
"+",
"self",
".",
"__b",
")",
")",
"%",
"self",
".",
"__p",
"==",
"0"
]
| 51.333333 | 0.044872 |
def reversed(self):
'''
Return a new FSM such that for every string that self accepts (e.g.
"beer", the new FSM accepts the reversed string ("reeb").
'''
alphabet = self.alphabet
# Start from a composite "state-set" consisting of all final states.
# If there are no final states, this set is empty and we'll find that
# no other states get generated.
initial = frozenset(self.finals)
# Find every possible way to reach the current state-set
# using this symbol.
def follow(current, symbol):
next = frozenset([
prev
for prev in self.map
for state in current
if symbol in self.map[prev] and self.map[prev][symbol] == state
])
if len(next) == 0:
raise OblivionError
return next
# A state-set is final if the initial state is in it.
def final(state):
return self.initial in state
# Man, crawl() is the best!
return crawl(alphabet, initial, final, follow) | [
"def",
"reversed",
"(",
"self",
")",
":",
"alphabet",
"=",
"self",
".",
"alphabet",
"# Start from a composite \"state-set\" consisting of all final states.",
"# If there are no final states, this set is empty and we'll find that",
"# no other states get generated.",
"initial",
"=",
"frozenset",
"(",
"self",
".",
"finals",
")",
"# Find every possible way to reach the current state-set",
"# using this symbol.",
"def",
"follow",
"(",
"current",
",",
"symbol",
")",
":",
"next",
"=",
"frozenset",
"(",
"[",
"prev",
"for",
"prev",
"in",
"self",
".",
"map",
"for",
"state",
"in",
"current",
"if",
"symbol",
"in",
"self",
".",
"map",
"[",
"prev",
"]",
"and",
"self",
".",
"map",
"[",
"prev",
"]",
"[",
"symbol",
"]",
"==",
"state",
"]",
")",
"if",
"len",
"(",
"next",
")",
"==",
"0",
":",
"raise",
"OblivionError",
"return",
"next",
"# A state-set is final if the initial state is in it.",
"def",
"final",
"(",
"state",
")",
":",
"return",
"self",
".",
"initial",
"in",
"state",
"# Man, crawl() is the best!",
"return",
"crawl",
"(",
"alphabet",
",",
"initial",
",",
"final",
",",
"follow",
")"
]
| 28.806452 | 0.030336 |
def move(self, drow, dcol=0):
"""Move the token by `drow` rows and `dcol` columns."""
self._start_row += drow
self._start_col += dcol
self._end_row += drow
self._end_col += dcol | [
"def",
"move",
"(",
"self",
",",
"drow",
",",
"dcol",
"=",
"0",
")",
":",
"self",
".",
"_start_row",
"+=",
"drow",
"self",
".",
"_start_col",
"+=",
"dcol",
"self",
".",
"_end_row",
"+=",
"drow",
"self",
".",
"_end_col",
"+=",
"dcol"
]
| 35.333333 | 0.009217 |
def copy(self, newtablename, deep=False, valuecopy=False, dminfo={},
endian='aipsrc', memorytable=False, copynorows=False):
"""Copy the table and return a table object for the copy.
It copies all data in the columns and keywords.
Besides the table, all its subtables are copied too.
By default a shallow copy is made (usually by copying files).
It means that the copy of a reference table is also a reference table.
Use `deep=True` to make a deep copy which turns a reference table
into a normal table.
`deep=True`
a deep copy of a reference table is made.
`valuecopy=True`
values are copied, which reorganizes normal tables and removes wasted
space. It implies `deep=True`. It is slower than a normal copy.
`dminfo`
gives the option to specify data managers to change the way columns
are stored. This is a dict as returned by method :func:`getdminfo`.
`endian`
specifies the endianness of the new table when a deep copy is made:
| 'little' = as little endian
| 'big' = as big endian
| 'local' = use the endianness of the machine being used
| 'aipsrc' = use as defined in an .aipsrc file (defaults to local)
`memorytable=True`
do not copy to disk, but to a table kept in memory.
`copynorows=True`
only copy the column layout and keywords, but no data.
For example::
t = table('3c343.MS')
t1 = t.query('ANTENNA1 != ANTENNA2') # do row selection
t2 = t1.copy ('3c343.sel', True) # make deep copy
t2 = t.copy ('new.tab', True, True) # reorganize storage
"""
t = self._copy(newtablename, memorytable, deep, valuecopy,
endian, dminfo, copynorows)
# copy returns a Table object, so turn that into table.
return table(t, _oper=3) | [
"def",
"copy",
"(",
"self",
",",
"newtablename",
",",
"deep",
"=",
"False",
",",
"valuecopy",
"=",
"False",
",",
"dminfo",
"=",
"{",
"}",
",",
"endian",
"=",
"'aipsrc'",
",",
"memorytable",
"=",
"False",
",",
"copynorows",
"=",
"False",
")",
":",
"t",
"=",
"self",
".",
"_copy",
"(",
"newtablename",
",",
"memorytable",
",",
"deep",
",",
"valuecopy",
",",
"endian",
",",
"dminfo",
",",
"copynorows",
")",
"# copy returns a Table object, so turn that into table.",
"return",
"table",
"(",
"t",
",",
"_oper",
"=",
"3",
")"
]
| 46.333333 | 0.00151 |
def write(context):
"""Starts a new article"""
config = context.obj
title = click.prompt('Title')
author = click.prompt('Author', default=config.get('DEFAULT_AUTHOR'))
slug = slugify(title)
creation_date = datetime.now()
basename = '{:%Y-%m-%d}_{}.md'.format(creation_date, slug)
meta = (
('Title', title),
('Date', '{:%Y-%m-%d %H:%M}:00'.format(creation_date)),
('Modified', '{:%Y-%m-%d %H:%M}:00'.format(creation_date)),
('Author', author),
)
file_content = ''
for key, value in meta:
file_content += '{}: {}\n'.format(key, value)
file_content += '\n\n'
file_content += 'Text...\n\n'
file_content += '\n\n'
file_content += 'Text...\n\n'
os.makedirs(config['CONTENT_DIR'], exist_ok=True)
path = os.path.join(config['CONTENT_DIR'], basename)
with click.open_file(path, 'w') as f:
f.write(file_content)
click.echo(path)
click.launch(path) | [
"def",
"write",
"(",
"context",
")",
":",
"config",
"=",
"context",
".",
"obj",
"title",
"=",
"click",
".",
"prompt",
"(",
"'Title'",
")",
"author",
"=",
"click",
".",
"prompt",
"(",
"'Author'",
",",
"default",
"=",
"config",
".",
"get",
"(",
"'DEFAULT_AUTHOR'",
")",
")",
"slug",
"=",
"slugify",
"(",
"title",
")",
"creation_date",
"=",
"datetime",
".",
"now",
"(",
")",
"basename",
"=",
"'{:%Y-%m-%d}_{}.md'",
".",
"format",
"(",
"creation_date",
",",
"slug",
")",
"meta",
"=",
"(",
"(",
"'Title'",
",",
"title",
")",
",",
"(",
"'Date'",
",",
"'{:%Y-%m-%d %H:%M}:00'",
".",
"format",
"(",
"creation_date",
")",
")",
",",
"(",
"'Modified'",
",",
"'{:%Y-%m-%d %H:%M}:00'",
".",
"format",
"(",
"creation_date",
")",
")",
",",
"(",
"'Author'",
",",
"author",
")",
",",
")",
"file_content",
"=",
"''",
"for",
"key",
",",
"value",
"in",
"meta",
":",
"file_content",
"+=",
"'{}: {}\\n'",
".",
"format",
"(",
"key",
",",
"value",
")",
"file_content",
"+=",
"'\\n\\n'",
"file_content",
"+=",
"'Text...\\n\\n'",
"file_content",
"+=",
"'\\n\\n'",
"file_content",
"+=",
"'Text...\\n\\n'",
"os",
".",
"makedirs",
"(",
"config",
"[",
"'CONTENT_DIR'",
"]",
",",
"exist_ok",
"=",
"True",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"config",
"[",
"'CONTENT_DIR'",
"]",
",",
"basename",
")",
"with",
"click",
".",
"open_file",
"(",
"path",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"file_content",
")",
"click",
".",
"echo",
"(",
"path",
")",
"click",
".",
"launch",
"(",
"path",
")"
]
| 30.030303 | 0.000978 |
def assemble_oligos(dna_list, reference=None):
'''Given a list of DNA sequences, assemble into a single construct.
:param dna_list: List of DNA sequences - they must be single-stranded.
:type dna_list: coral.DNA list
:param reference: Expected sequence - once assembly completed, this will
be used to reorient the DNA (assembly could potentially occur from either
side of a linear DNA construct if oligos are in a random order). If this
fails, an AssemblyError is raised.
:type reference: coral.DNA
:raises: AssemblyError if it can't assemble for any reason.
:returns: A single assembled DNA sequence
:rtype: coral.DNA
'''
# FIXME: this protocol currently only supports 5' ends on the assembly
# Find all matches for every oligo. If more than 2 per side, error.
# Self-oligo is included in case the 3' end is self-complementary.
# 1) Find all unique 3' binders (and non-binders).
match_3 = [bind_unique(seq, dna_list, right=True) for i, seq in
enumerate(dna_list)]
# 2) Find all unique 5' binders (and non-binders).
match_5 = [bind_unique(seq, dna_list, right=False) for i, seq in
enumerate(dna_list)]
# Assemble into 2-tuple
zipped = zip(match_5, match_3)
# 3) If none found, error out with 'oligo n has no binders'
for i, oligo_match in enumerate(zipped):
if not any(oligo_match):
error = 'Oligo {} has no binding partners.'.format(i + 1)
raise AssemblyError(error)
# 4) There should be exactly 2 oligos that bind at 3' end but
# not 5'.
ends = []
for i, (five, three) in enumerate(zipped):
if five is None and three is not None:
ends.append(i)
# 5) If more than 2, error with 'too many ends'.
if len(ends) > 2:
raise AssemblyError('Too many (>2) end oligos found.')
# 6) If more than 2, error with 'not enough ends'.
if len(ends) < 2:
raise AssemblyError('Not enough (<2) end oligos found.')
# NOTE:If 1-4 are satisfied, unique linear assembly has been found (proof?)
# 8) Start with first end and build iteratively
last_index = ends[0]
assembly = dna_list[last_index]
flip = True
# This would be slightly less complicated if the sequences were tied to
# their match info in a tuple
# Append next region n - 1 times
for i in range(len(dna_list) - 1):
if flip:
# Next oligo needs to be flipped before concatenation
# Grab 3' match from last oligo's info
current_index, matchlen = zipped[last_index][1]
# Get new oligo sequence, make double-stranded for concatenation
next_oligo = dna_list[current_index].to_ds()
# Reverse complement for concatenation
next_oligo = next_oligo.reverse_complement()
# Don't reverse complement the next one
flip = False
else:
# Grab 5' match from last oligo's info
current_index, matchlen = zipped[last_index][0]
# Get new oligo sequence, make double-stranded for concatenation
next_oligo = dna_list[current_index].to_ds()
# Reverse complement the next one
flip = True
# Trim overlap from new sequence
next_oligo = next_oligo[(matchlen - 1):]
# Concatenate and update last oligo's information
assembly += next_oligo
last_index = current_index
if reference:
if assembly == reference or assembly == reference.reverse_complement():
return assembly
else:
raise AssemblyError('Assembly did not match reference')
else:
return assembly | [
"def",
"assemble_oligos",
"(",
"dna_list",
",",
"reference",
"=",
"None",
")",
":",
"# FIXME: this protocol currently only supports 5' ends on the assembly",
"# Find all matches for every oligo. If more than 2 per side, error.",
"# Self-oligo is included in case the 3' end is self-complementary.",
"# 1) Find all unique 3' binders (and non-binders).",
"match_3",
"=",
"[",
"bind_unique",
"(",
"seq",
",",
"dna_list",
",",
"right",
"=",
"True",
")",
"for",
"i",
",",
"seq",
"in",
"enumerate",
"(",
"dna_list",
")",
"]",
"# 2) Find all unique 5' binders (and non-binders).",
"match_5",
"=",
"[",
"bind_unique",
"(",
"seq",
",",
"dna_list",
",",
"right",
"=",
"False",
")",
"for",
"i",
",",
"seq",
"in",
"enumerate",
"(",
"dna_list",
")",
"]",
"# Assemble into 2-tuple",
"zipped",
"=",
"zip",
"(",
"match_5",
",",
"match_3",
")",
"# 3) If none found, error out with 'oligo n has no binders'",
"for",
"i",
",",
"oligo_match",
"in",
"enumerate",
"(",
"zipped",
")",
":",
"if",
"not",
"any",
"(",
"oligo_match",
")",
":",
"error",
"=",
"'Oligo {} has no binding partners.'",
".",
"format",
"(",
"i",
"+",
"1",
")",
"raise",
"AssemblyError",
"(",
"error",
")",
"# 4) There should be exactly 2 oligos that bind at 3' end but",
"# not 5'.",
"ends",
"=",
"[",
"]",
"for",
"i",
",",
"(",
"five",
",",
"three",
")",
"in",
"enumerate",
"(",
"zipped",
")",
":",
"if",
"five",
"is",
"None",
"and",
"three",
"is",
"not",
"None",
":",
"ends",
".",
"append",
"(",
"i",
")",
"# 5) If more than 2, error with 'too many ends'.",
"if",
"len",
"(",
"ends",
")",
">",
"2",
":",
"raise",
"AssemblyError",
"(",
"'Too many (>2) end oligos found.'",
")",
"# 6) If more than 2, error with 'not enough ends'.",
"if",
"len",
"(",
"ends",
")",
"<",
"2",
":",
"raise",
"AssemblyError",
"(",
"'Not enough (<2) end oligos found.'",
")",
"# NOTE:If 1-4 are satisfied, unique linear assembly has been found (proof?)",
"# 8) Start with first end and build iteratively",
"last_index",
"=",
"ends",
"[",
"0",
"]",
"assembly",
"=",
"dna_list",
"[",
"last_index",
"]",
"flip",
"=",
"True",
"# This would be slightly less complicated if the sequences were tied to",
"# their match info in a tuple",
"# Append next region n - 1 times",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"dna_list",
")",
"-",
"1",
")",
":",
"if",
"flip",
":",
"# Next oligo needs to be flipped before concatenation",
"# Grab 3' match from last oligo's info",
"current_index",
",",
"matchlen",
"=",
"zipped",
"[",
"last_index",
"]",
"[",
"1",
"]",
"# Get new oligo sequence, make double-stranded for concatenation",
"next_oligo",
"=",
"dna_list",
"[",
"current_index",
"]",
".",
"to_ds",
"(",
")",
"# Reverse complement for concatenation",
"next_oligo",
"=",
"next_oligo",
".",
"reverse_complement",
"(",
")",
"# Don't reverse complement the next one",
"flip",
"=",
"False",
"else",
":",
"# Grab 5' match from last oligo's info",
"current_index",
",",
"matchlen",
"=",
"zipped",
"[",
"last_index",
"]",
"[",
"0",
"]",
"# Get new oligo sequence, make double-stranded for concatenation",
"next_oligo",
"=",
"dna_list",
"[",
"current_index",
"]",
".",
"to_ds",
"(",
")",
"# Reverse complement the next one",
"flip",
"=",
"True",
"# Trim overlap from new sequence",
"next_oligo",
"=",
"next_oligo",
"[",
"(",
"matchlen",
"-",
"1",
")",
":",
"]",
"# Concatenate and update last oligo's information",
"assembly",
"+=",
"next_oligo",
"last_index",
"=",
"current_index",
"if",
"reference",
":",
"if",
"assembly",
"==",
"reference",
"or",
"assembly",
"==",
"reference",
".",
"reverse_complement",
"(",
")",
":",
"return",
"assembly",
"else",
":",
"raise",
"AssemblyError",
"(",
"'Assembly did not match reference'",
")",
"else",
":",
"return",
"assembly"
]
| 44.82716 | 0.000269 |
def time_bins(header):
'''
Returns the time-axis lower bin edge values for the spectrogram.
'''
return np.arange(header['number_of_half_frames'], dtype=np.float64)*constants.bins_per_half_frame\
*(1.0 - header['over_sampling']) / header['subband_spacing_hz'] | [
"def",
"time_bins",
"(",
"header",
")",
":",
"return",
"np",
".",
"arange",
"(",
"header",
"[",
"'number_of_half_frames'",
"]",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"*",
"constants",
".",
"bins_per_half_frame",
"*",
"(",
"1.0",
"-",
"header",
"[",
"'over_sampling'",
"]",
")",
"/",
"header",
"[",
"'subband_spacing_hz'",
"]"
]
| 43.833333 | 0.022388 |
def get_parent_mode_constraints(self):
"""Return the category and subcategory keys to be set in the
subordinate mode.
:returns: (the category definition, the hazard/exposure definition)
:rtype: (dict, dict)
"""
h, e, _hc, _ec = self.selected_impact_function_constraints()
if self.parent_step in [self.step_fc_hazlayer_from_canvas,
self.step_fc_hazlayer_from_browser]:
category = layer_purpose_hazard
subcategory = h
elif self.parent_step in [self.step_fc_explayer_from_canvas,
self.step_fc_explayer_from_browser]:
category = layer_purpose_exposure
subcategory = e
elif self.parent_step:
category = layer_purpose_aggregation
subcategory = None
else:
category = None
subcategory = None
return category, subcategory | [
"def",
"get_parent_mode_constraints",
"(",
"self",
")",
":",
"h",
",",
"e",
",",
"_hc",
",",
"_ec",
"=",
"self",
".",
"selected_impact_function_constraints",
"(",
")",
"if",
"self",
".",
"parent_step",
"in",
"[",
"self",
".",
"step_fc_hazlayer_from_canvas",
",",
"self",
".",
"step_fc_hazlayer_from_browser",
"]",
":",
"category",
"=",
"layer_purpose_hazard",
"subcategory",
"=",
"h",
"elif",
"self",
".",
"parent_step",
"in",
"[",
"self",
".",
"step_fc_explayer_from_canvas",
",",
"self",
".",
"step_fc_explayer_from_browser",
"]",
":",
"category",
"=",
"layer_purpose_exposure",
"subcategory",
"=",
"e",
"elif",
"self",
".",
"parent_step",
":",
"category",
"=",
"layer_purpose_aggregation",
"subcategory",
"=",
"None",
"else",
":",
"category",
"=",
"None",
"subcategory",
"=",
"None",
"return",
"category",
",",
"subcategory"
]
| 40.913043 | 0.002077 |
def install(application, io_loop=None, **kwargs):
"""Call this to install AMQP for the Tornado application. Additional
keyword arguments are passed through to the constructor of the AMQP
object.
:param tornado.web.Application application: The tornado application
:param tornado.ioloop.IOLoop io_loop: The current IOLoop.
:rtype: bool
"""
if getattr(application, 'amqp', None) is not None:
LOGGER.warning('AMQP is already installed')
return False
kwargs.setdefault('io_loop', io_loop)
# Support AMQP_* and RABBITMQ_* variables
for prefix in {'AMQP', 'RABBITMQ'}:
key = '{}_URL'.format(prefix)
if os.environ.get(key) is not None:
LOGGER.debug('Setting URL to %s', os.environ[key])
kwargs.setdefault('url', os.environ[key])
key = '{}_CONFIRMATIONS'.format(prefix)
if os.environ.get(key) is not None:
value = os.environ[key].lower() in {'true', '1'}
LOGGER.debug('Setting enable_confirmations to %s', value)
kwargs.setdefault('enable_confirmations', value)
key = '{}_CONNECTION_ATTEMPTS'.format(prefix)
if os.environ.get(key) is not None:
value = int(os.environ[key])
LOGGER.debug('Setting connection_attempts to %s', value)
kwargs.setdefault('connection_attempts', value)
key = '{}_RECONNECT_DELAY'.format(prefix)
if os.environ.get(key) is not None:
value = float(os.environ[key])
LOGGER.debug('Setting reconnect_delay to %s', value)
kwargs.setdefault('reconnect_delay', value)
# Set the default AMQP app_id property
if application.settings.get('service') and \
application.settings.get('version'):
default_app_id = '{}/{}'.format(
application.settings['service'], application.settings['version'])
else:
default_app_id = 'sprockets.mixins.amqp/{}'.format(__version__)
kwargs.setdefault('default_app_id', default_app_id)
# Default the default URL value if not already set
kwargs.setdefault('url', 'amqp://guest:guest@localhost:5672/%2f')
LOGGER.debug('kwargs: %r', kwargs)
setattr(application, 'amqp', Client(**kwargs))
return True | [
"def",
"install",
"(",
"application",
",",
"io_loop",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"getattr",
"(",
"application",
",",
"'amqp'",
",",
"None",
")",
"is",
"not",
"None",
":",
"LOGGER",
".",
"warning",
"(",
"'AMQP is already installed'",
")",
"return",
"False",
"kwargs",
".",
"setdefault",
"(",
"'io_loop'",
",",
"io_loop",
")",
"# Support AMQP_* and RABBITMQ_* variables",
"for",
"prefix",
"in",
"{",
"'AMQP'",
",",
"'RABBITMQ'",
"}",
":",
"key",
"=",
"'{}_URL'",
".",
"format",
"(",
"prefix",
")",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"key",
")",
"is",
"not",
"None",
":",
"LOGGER",
".",
"debug",
"(",
"'Setting URL to %s'",
",",
"os",
".",
"environ",
"[",
"key",
"]",
")",
"kwargs",
".",
"setdefault",
"(",
"'url'",
",",
"os",
".",
"environ",
"[",
"key",
"]",
")",
"key",
"=",
"'{}_CONFIRMATIONS'",
".",
"format",
"(",
"prefix",
")",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"key",
")",
"is",
"not",
"None",
":",
"value",
"=",
"os",
".",
"environ",
"[",
"key",
"]",
".",
"lower",
"(",
")",
"in",
"{",
"'true'",
",",
"'1'",
"}",
"LOGGER",
".",
"debug",
"(",
"'Setting enable_confirmations to %s'",
",",
"value",
")",
"kwargs",
".",
"setdefault",
"(",
"'enable_confirmations'",
",",
"value",
")",
"key",
"=",
"'{}_CONNECTION_ATTEMPTS'",
".",
"format",
"(",
"prefix",
")",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"key",
")",
"is",
"not",
"None",
":",
"value",
"=",
"int",
"(",
"os",
".",
"environ",
"[",
"key",
"]",
")",
"LOGGER",
".",
"debug",
"(",
"'Setting connection_attempts to %s'",
",",
"value",
")",
"kwargs",
".",
"setdefault",
"(",
"'connection_attempts'",
",",
"value",
")",
"key",
"=",
"'{}_RECONNECT_DELAY'",
".",
"format",
"(",
"prefix",
")",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"key",
")",
"is",
"not",
"None",
":",
"value",
"=",
"float",
"(",
"os",
".",
"environ",
"[",
"key",
"]",
")",
"LOGGER",
".",
"debug",
"(",
"'Setting reconnect_delay to %s'",
",",
"value",
")",
"kwargs",
".",
"setdefault",
"(",
"'reconnect_delay'",
",",
"value",
")",
"# Set the default AMQP app_id property",
"if",
"application",
".",
"settings",
".",
"get",
"(",
"'service'",
")",
"and",
"application",
".",
"settings",
".",
"get",
"(",
"'version'",
")",
":",
"default_app_id",
"=",
"'{}/{}'",
".",
"format",
"(",
"application",
".",
"settings",
"[",
"'service'",
"]",
",",
"application",
".",
"settings",
"[",
"'version'",
"]",
")",
"else",
":",
"default_app_id",
"=",
"'sprockets.mixins.amqp/{}'",
".",
"format",
"(",
"__version__",
")",
"kwargs",
".",
"setdefault",
"(",
"'default_app_id'",
",",
"default_app_id",
")",
"# Default the default URL value if not already set",
"kwargs",
".",
"setdefault",
"(",
"'url'",
",",
"'amqp://guest:guest@localhost:5672/%2f'",
")",
"LOGGER",
".",
"debug",
"(",
"'kwargs: %r'",
",",
"kwargs",
")",
"setattr",
"(",
"application",
",",
"'amqp'",
",",
"Client",
"(",
"*",
"*",
"kwargs",
")",
")",
"return",
"True"
]
| 38.842105 | 0.000441 |
def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory]=None):
""" Vel factory function """
if input_block is None:
input_block = IdentityFactory()
return PolicyGradientRnnModelFactory(
input_block=input_block,
backbone=backbone
) | [
"def",
"create",
"(",
"backbone",
":",
"ModelFactory",
",",
"input_block",
":",
"typing",
".",
"Optional",
"[",
"ModelFactory",
"]",
"=",
"None",
")",
":",
"if",
"input_block",
"is",
"None",
":",
"input_block",
"=",
"IdentityFactory",
"(",
")",
"return",
"PolicyGradientRnnModelFactory",
"(",
"input_block",
"=",
"input_block",
",",
"backbone",
"=",
"backbone",
")"
]
| 31.666667 | 0.013652 |
def shutdown(opts):
'''
Closes connection with the device.
'''
try:
if not NETWORK_DEVICE.get('UP', False):
raise Exception('not connected!')
NETWORK_DEVICE.get('DRIVER').close()
except Exception as error:
port = NETWORK_DEVICE.get('OPTIONAL_ARGS', {}).get('port')
log.error(
'Cannot close connection with %s%s! Please check error: %s',
NETWORK_DEVICE.get('HOSTNAME', '[unknown hostname]'),
':{0}'.format(port) if port else '',
error
)
return True | [
"def",
"shutdown",
"(",
"opts",
")",
":",
"try",
":",
"if",
"not",
"NETWORK_DEVICE",
".",
"get",
"(",
"'UP'",
",",
"False",
")",
":",
"raise",
"Exception",
"(",
"'not connected!'",
")",
"NETWORK_DEVICE",
".",
"get",
"(",
"'DRIVER'",
")",
".",
"close",
"(",
")",
"except",
"Exception",
"as",
"error",
":",
"port",
"=",
"NETWORK_DEVICE",
".",
"get",
"(",
"'OPTIONAL_ARGS'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'port'",
")",
"log",
".",
"error",
"(",
"'Cannot close connection with %s%s! Please check error: %s'",
",",
"NETWORK_DEVICE",
".",
"get",
"(",
"'HOSTNAME'",
",",
"'[unknown hostname]'",
")",
",",
"':{0}'",
".",
"format",
"(",
"port",
")",
"if",
"port",
"else",
"''",
",",
"error",
")",
"return",
"True"
]
| 30.833333 | 0.001748 |
def clear(self):
"""Remove all queue entries."""
self.mutex.acquire()
self.queue.clear()
self.mutex.release() | [
"def",
"clear",
"(",
"self",
")",
":",
"self",
".",
"mutex",
".",
"acquire",
"(",
")",
"self",
".",
"queue",
".",
"clear",
"(",
")",
"self",
".",
"mutex",
".",
"release",
"(",
")"
]
| 27.4 | 0.014184 |
def make_default(spec):
"""Create an empty document that follows spec. Any field with a default
will take that value, required or not. Required fields with no default
will get a value of None. If your default value does not match your
type or otherwise customized Field class, this can create a spec that
fails validation."""
doc = {}
for key, field in spec.iteritems():
if field.default is not no_default:
doc[key] = field.default
return doc | [
"def",
"make_default",
"(",
"spec",
")",
":",
"doc",
"=",
"{",
"}",
"for",
"key",
",",
"field",
"in",
"spec",
".",
"iteritems",
"(",
")",
":",
"if",
"field",
".",
"default",
"is",
"not",
"no_default",
":",
"doc",
"[",
"key",
"]",
"=",
"field",
".",
"default",
"return",
"doc"
]
| 44.272727 | 0.002012 |
def _nest(self):
"""nests the roles (creates roles hierarchy)"""
self._flatten()
parent_roles = {}
for roleid in self.flatten:
role = copy.deepcopy(self.flatten[roleid])
# Display name is mandatory
if 'display_name' not in role:
raise MissingKey('display_name', role, self.role_file)
if 'description' not in role:
raise MissingKey('description', role, self.role_file)
# Backend is mandatory
if 'backends_groups' not in role:
raise MissingKey('backends_groups', role, self.role_file)
# Create the list of backends
for backend in role['backends_groups']:
self.backends.add(backend)
if roleid not in self.graph:
self.graph[roleid] = {
'parent_roles': set([]),
'sub_roles': set([])
}
# Create the nested groups
for roleid in self.flatten:
role = copy.deepcopy(self.flatten[roleid])
# create reverse groups 2 roles
for b in role['backends_groups']:
for g in role['backends_groups'][b]:
if b not in self.group2roles:
self.group2roles[b] = {}
if g not in self.group2roles[b]:
self.group2roles[b][g] = set([])
self.group2roles[b][g].add(roleid)
parent_roles[roleid] = []
for roleid2 in self.flatten:
role2 = copy.deepcopy(self.flatten[roleid2])
if self._is_parent(roleid, roleid2):
parent_roles[roleid].append(roleid2)
self.graph[roleid2]['parent_roles'].add(roleid)
self.graph[roleid]['sub_roles'].add(roleid2)
for r in parent_roles:
for p in parent_roles[r]:
for p2 in parent_roles[r]:
if p != p2 and p in parent_roles[p2]:
parent_roles[r].remove(p)
def nest(p):
ret = copy.deepcopy(self.flatten[p])
ret['subroles'] = {}
if len(parent_roles[p]) == 0:
return ret
else:
for i in parent_roles[p]:
sub = nest(i)
ret['subroles'][i] = sub
return ret
for p in parent_roles.keys():
if p in parent_roles:
self.roles[p] = nest(p)
for roleid in self.roles:
role = self.roles[roleid]
# Create the list of roles which are ldapcherry admins
if 'LC_admins' in role and role['LC_admins']:
self.admin_roles.append(roleid)
self._set_admin(role) | [
"def",
"_nest",
"(",
"self",
")",
":",
"self",
".",
"_flatten",
"(",
")",
"parent_roles",
"=",
"{",
"}",
"for",
"roleid",
"in",
"self",
".",
"flatten",
":",
"role",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"flatten",
"[",
"roleid",
"]",
")",
"# Display name is mandatory",
"if",
"'display_name'",
"not",
"in",
"role",
":",
"raise",
"MissingKey",
"(",
"'display_name'",
",",
"role",
",",
"self",
".",
"role_file",
")",
"if",
"'description'",
"not",
"in",
"role",
":",
"raise",
"MissingKey",
"(",
"'description'",
",",
"role",
",",
"self",
".",
"role_file",
")",
"# Backend is mandatory",
"if",
"'backends_groups'",
"not",
"in",
"role",
":",
"raise",
"MissingKey",
"(",
"'backends_groups'",
",",
"role",
",",
"self",
".",
"role_file",
")",
"# Create the list of backends",
"for",
"backend",
"in",
"role",
"[",
"'backends_groups'",
"]",
":",
"self",
".",
"backends",
".",
"add",
"(",
"backend",
")",
"if",
"roleid",
"not",
"in",
"self",
".",
"graph",
":",
"self",
".",
"graph",
"[",
"roleid",
"]",
"=",
"{",
"'parent_roles'",
":",
"set",
"(",
"[",
"]",
")",
",",
"'sub_roles'",
":",
"set",
"(",
"[",
"]",
")",
"}",
"# Create the nested groups",
"for",
"roleid",
"in",
"self",
".",
"flatten",
":",
"role",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"flatten",
"[",
"roleid",
"]",
")",
"# create reverse groups 2 roles",
"for",
"b",
"in",
"role",
"[",
"'backends_groups'",
"]",
":",
"for",
"g",
"in",
"role",
"[",
"'backends_groups'",
"]",
"[",
"b",
"]",
":",
"if",
"b",
"not",
"in",
"self",
".",
"group2roles",
":",
"self",
".",
"group2roles",
"[",
"b",
"]",
"=",
"{",
"}",
"if",
"g",
"not",
"in",
"self",
".",
"group2roles",
"[",
"b",
"]",
":",
"self",
".",
"group2roles",
"[",
"b",
"]",
"[",
"g",
"]",
"=",
"set",
"(",
"[",
"]",
")",
"self",
".",
"group2roles",
"[",
"b",
"]",
"[",
"g",
"]",
".",
"add",
"(",
"roleid",
")",
"parent_roles",
"[",
"roleid",
"]",
"=",
"[",
"]",
"for",
"roleid2",
"in",
"self",
".",
"flatten",
":",
"role2",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"flatten",
"[",
"roleid2",
"]",
")",
"if",
"self",
".",
"_is_parent",
"(",
"roleid",
",",
"roleid2",
")",
":",
"parent_roles",
"[",
"roleid",
"]",
".",
"append",
"(",
"roleid2",
")",
"self",
".",
"graph",
"[",
"roleid2",
"]",
"[",
"'parent_roles'",
"]",
".",
"add",
"(",
"roleid",
")",
"self",
".",
"graph",
"[",
"roleid",
"]",
"[",
"'sub_roles'",
"]",
".",
"add",
"(",
"roleid2",
")",
"for",
"r",
"in",
"parent_roles",
":",
"for",
"p",
"in",
"parent_roles",
"[",
"r",
"]",
":",
"for",
"p2",
"in",
"parent_roles",
"[",
"r",
"]",
":",
"if",
"p",
"!=",
"p2",
"and",
"p",
"in",
"parent_roles",
"[",
"p2",
"]",
":",
"parent_roles",
"[",
"r",
"]",
".",
"remove",
"(",
"p",
")",
"def",
"nest",
"(",
"p",
")",
":",
"ret",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"flatten",
"[",
"p",
"]",
")",
"ret",
"[",
"'subroles'",
"]",
"=",
"{",
"}",
"if",
"len",
"(",
"parent_roles",
"[",
"p",
"]",
")",
"==",
"0",
":",
"return",
"ret",
"else",
":",
"for",
"i",
"in",
"parent_roles",
"[",
"p",
"]",
":",
"sub",
"=",
"nest",
"(",
"i",
")",
"ret",
"[",
"'subroles'",
"]",
"[",
"i",
"]",
"=",
"sub",
"return",
"ret",
"for",
"p",
"in",
"parent_roles",
".",
"keys",
"(",
")",
":",
"if",
"p",
"in",
"parent_roles",
":",
"self",
".",
"roles",
"[",
"p",
"]",
"=",
"nest",
"(",
"p",
")",
"for",
"roleid",
"in",
"self",
".",
"roles",
":",
"role",
"=",
"self",
".",
"roles",
"[",
"roleid",
"]",
"# Create the list of roles which are ldapcherry admins",
"if",
"'LC_admins'",
"in",
"role",
"and",
"role",
"[",
"'LC_admins'",
"]",
":",
"self",
".",
"admin_roles",
".",
"append",
"(",
"roleid",
")",
"self",
".",
"_set_admin",
"(",
"role",
")"
]
| 36.866667 | 0.000704 |
def process_column(self, idx, value):
"Process a single column."
if value is not None:
value = str(value).decode(self.encoding)
return value | [
"def",
"process_column",
"(",
"self",
",",
"idx",
",",
"value",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"value",
"=",
"str",
"(",
"value",
")",
".",
"decode",
"(",
"self",
".",
"encoding",
")",
"return",
"value"
]
| 34.4 | 0.011364 |
def mtf_resnet_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.no_data_parallelism = True
hparams.use_fixed_batch_size = True
hparams.batch_size = 32
hparams.max_length = 3072
hparams.hidden_size = 256
hparams.label_smoothing = 0.0
# 8-way model-parallelism
hparams.add_hparam("mesh_shape", "batch:8")
hparams.add_hparam("layout", "batch:batch")
hparams.add_hparam("filter_size", 1024)
hparams.add_hparam("num_layers", 6)
# Share weights between input and target embeddings
hparams.shared_embedding = True
hparams.shared_embedding_and_softmax_weights = True
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
hparams.add_hparam("d_kv", 32)
# Image related hparams
hparams.add_hparam("img_len", 32)
hparams.add_hparam("num_channels", 3)
hparams.add_hparam("row_blocks", 1)
hparams.add_hparam("col_blocks", 1)
hparams.add_hparam("rows_size", 32)
hparams.add_hparam("cols_size", 32)
# Model-specific parameters
hparams.add_hparam("layer_sizes", [3, 4, 6, 3])
hparams.add_hparam("filter_sizes", [64, 64, 128, 256, 512])
hparams.add_hparam("is_cifar", False)
# Variable init
hparams.initializer = "normal_unit_scaling"
hparams.initializer_gain = 2.
# TODO(nikip): Change optimization scheme?
hparams.learning_rate = 0.1
return hparams | [
"def",
"mtf_resnet_base",
"(",
")",
":",
"hparams",
"=",
"common_hparams",
".",
"basic_params1",
"(",
")",
"hparams",
".",
"no_data_parallelism",
"=",
"True",
"hparams",
".",
"use_fixed_batch_size",
"=",
"True",
"hparams",
".",
"batch_size",
"=",
"32",
"hparams",
".",
"max_length",
"=",
"3072",
"hparams",
".",
"hidden_size",
"=",
"256",
"hparams",
".",
"label_smoothing",
"=",
"0.0",
"# 8-way model-parallelism",
"hparams",
".",
"add_hparam",
"(",
"\"mesh_shape\"",
",",
"\"batch:8\"",
")",
"hparams",
".",
"add_hparam",
"(",
"\"layout\"",
",",
"\"batch:batch\"",
")",
"hparams",
".",
"add_hparam",
"(",
"\"filter_size\"",
",",
"1024",
")",
"hparams",
".",
"add_hparam",
"(",
"\"num_layers\"",
",",
"6",
")",
"# Share weights between input and target embeddings",
"hparams",
".",
"shared_embedding",
"=",
"True",
"hparams",
".",
"shared_embedding_and_softmax_weights",
"=",
"True",
"hparams",
".",
"optimizer",
"=",
"\"Adafactor\"",
"hparams",
".",
"learning_rate_schedule",
"=",
"\"rsqrt_decay\"",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"10000",
"hparams",
".",
"add_hparam",
"(",
"\"d_kv\"",
",",
"32",
")",
"# Image related hparams",
"hparams",
".",
"add_hparam",
"(",
"\"img_len\"",
",",
"32",
")",
"hparams",
".",
"add_hparam",
"(",
"\"num_channels\"",
",",
"3",
")",
"hparams",
".",
"add_hparam",
"(",
"\"row_blocks\"",
",",
"1",
")",
"hparams",
".",
"add_hparam",
"(",
"\"col_blocks\"",
",",
"1",
")",
"hparams",
".",
"add_hparam",
"(",
"\"rows_size\"",
",",
"32",
")",
"hparams",
".",
"add_hparam",
"(",
"\"cols_size\"",
",",
"32",
")",
"# Model-specific parameters",
"hparams",
".",
"add_hparam",
"(",
"\"layer_sizes\"",
",",
"[",
"3",
",",
"4",
",",
"6",
",",
"3",
"]",
")",
"hparams",
".",
"add_hparam",
"(",
"\"filter_sizes\"",
",",
"[",
"64",
",",
"64",
",",
"128",
",",
"256",
",",
"512",
"]",
")",
"hparams",
".",
"add_hparam",
"(",
"\"is_cifar\"",
",",
"False",
")",
"# Variable init",
"hparams",
".",
"initializer",
"=",
"\"normal_unit_scaling\"",
"hparams",
".",
"initializer_gain",
"=",
"2.",
"# TODO(nikip): Change optimization scheme?",
"hparams",
".",
"learning_rate",
"=",
"0.1",
"return",
"hparams"
]
| 31.272727 | 0.026779 |
def get_time(self, idx=0):
"""Time of the data
Returns nan if the time is not defined
"""
thetime = super(SingleData, self).get_time(idx=0)
return thetime | [
"def",
"get_time",
"(",
"self",
",",
"idx",
"=",
"0",
")",
":",
"thetime",
"=",
"super",
"(",
"SingleData",
",",
"self",
")",
".",
"get_time",
"(",
"idx",
"=",
"0",
")",
"return",
"thetime"
]
| 27 | 0.010256 |
def main():
"""Upgrades the firmware of the J-Links connected to a Windows device.
Returns:
None.
Raises:
OSError: if there are no J-Link software packages.
"""
windows_libraries = list(pylink.Library.find_library_windows())
latest_library = None
for lib in windows_libraries:
if os.path.dirname(lib).endswith('JLinkARM'):
# Always use the one pointed to by the 'JLinkARM' directory.
latest_library = lib
break
elif latest_library is None:
latest_library = lib
elif os.path.dirname(lib) > os.path.dirname(latest_library):
latest_library = lib
if latest_library is None:
raise OSError('No J-Link library found.')
library = pylink.Library(latest_library)
jlink = pylink.JLink(lib=library)
print('Found version: %s' % jlink.version)
for emu in jlink.connected_emulators():
jlink.disable_dialog_boxes()
jlink.open(serial_no=emu.SerialNumber)
jlink.sync_firmware()
print('Updated emulator with serial number %s' % emu.SerialNumber)
return None | [
"def",
"main",
"(",
")",
":",
"windows_libraries",
"=",
"list",
"(",
"pylink",
".",
"Library",
".",
"find_library_windows",
"(",
")",
")",
"latest_library",
"=",
"None",
"for",
"lib",
"in",
"windows_libraries",
":",
"if",
"os",
".",
"path",
".",
"dirname",
"(",
"lib",
")",
".",
"endswith",
"(",
"'JLinkARM'",
")",
":",
"# Always use the one pointed to by the 'JLinkARM' directory.",
"latest_library",
"=",
"lib",
"break",
"elif",
"latest_library",
"is",
"None",
":",
"latest_library",
"=",
"lib",
"elif",
"os",
".",
"path",
".",
"dirname",
"(",
"lib",
")",
">",
"os",
".",
"path",
".",
"dirname",
"(",
"latest_library",
")",
":",
"latest_library",
"=",
"lib",
"if",
"latest_library",
"is",
"None",
":",
"raise",
"OSError",
"(",
"'No J-Link library found.'",
")",
"library",
"=",
"pylink",
".",
"Library",
"(",
"latest_library",
")",
"jlink",
"=",
"pylink",
".",
"JLink",
"(",
"lib",
"=",
"library",
")",
"print",
"(",
"'Found version: %s'",
"%",
"jlink",
".",
"version",
")",
"for",
"emu",
"in",
"jlink",
".",
"connected_emulators",
"(",
")",
":",
"jlink",
".",
"disable_dialog_boxes",
"(",
")",
"jlink",
".",
"open",
"(",
"serial_no",
"=",
"emu",
".",
"SerialNumber",
")",
"jlink",
".",
"sync_firmware",
"(",
")",
"print",
"(",
"'Updated emulator with serial number %s'",
"%",
"emu",
".",
"SerialNumber",
")",
"return",
"None"
]
| 30.5 | 0.000883 |
def request_version(req_headers):
''' Determines the bakery protocol version from a client request.
If the protocol cannot be determined, or is invalid, the original version
of the protocol is used. If a later version is found, the latest known
version is used, which is OK because versions are backwardly compatible.
@param req_headers: the request headers as a dict.
@return: bakery protocol version (for example macaroonbakery.VERSION_1)
'''
vs = req_headers.get(BAKERY_PROTOCOL_HEADER)
if vs is None:
# No header - use backward compatibility mode.
return bakery.VERSION_1
try:
x = int(vs)
except ValueError:
# Badly formed header - use backward compatibility mode.
return bakery.VERSION_1
if x > bakery.LATEST_VERSION:
# Later version than we know about - use the
# latest version that we can.
return bakery.LATEST_VERSION
return x | [
"def",
"request_version",
"(",
"req_headers",
")",
":",
"vs",
"=",
"req_headers",
".",
"get",
"(",
"BAKERY_PROTOCOL_HEADER",
")",
"if",
"vs",
"is",
"None",
":",
"# No header - use backward compatibility mode.",
"return",
"bakery",
".",
"VERSION_1",
"try",
":",
"x",
"=",
"int",
"(",
"vs",
")",
"except",
"ValueError",
":",
"# Badly formed header - use backward compatibility mode.",
"return",
"bakery",
".",
"VERSION_1",
"if",
"x",
">",
"bakery",
".",
"LATEST_VERSION",
":",
"# Later version than we know about - use the",
"# latest version that we can.",
"return",
"bakery",
".",
"LATEST_VERSION",
"return",
"x"
]
| 40.434783 | 0.00105 |
def _process_delivery(self, pn_delivery):
"""Check if the delivery can be processed."""
if pn_delivery.tag in self._send_requests:
if pn_delivery.settled or pn_delivery.remote_state:
# remote has reached a 'terminal state'
outcome = pn_delivery.remote_state
state = SenderLink._DISPOSITION_STATE_MAP.get(outcome,
self.UNKNOWN)
pn_disposition = pn_delivery.remote
info = {}
if state == SenderLink.REJECTED:
if pn_disposition.condition:
info["condition"] = pn_disposition.condition
elif state == SenderLink.MODIFIED:
info["delivery-failed"] = pn_disposition.failed
info["undeliverable-here"] = pn_disposition.undeliverable
annotations = pn_disposition.annotations
if annotations:
info["message-annotations"] = annotations
send_req = self._send_requests.pop(pn_delivery.tag)
send_req.destroy(state, info)
pn_delivery.settle()
elif pn_delivery.writable:
# we can now send on this delivery
if self._pending_sends:
tag = self._pending_sends.popleft()
send_req = self._send_requests[tag]
self._write_msg(pn_delivery, send_req)
else:
# tag no longer valid, expired or canceled send?
LOG.debug("Delivery ignored, tag=%s", str(pn_delivery.tag))
pn_delivery.settle() | [
"def",
"_process_delivery",
"(",
"self",
",",
"pn_delivery",
")",
":",
"if",
"pn_delivery",
".",
"tag",
"in",
"self",
".",
"_send_requests",
":",
"if",
"pn_delivery",
".",
"settled",
"or",
"pn_delivery",
".",
"remote_state",
":",
"# remote has reached a 'terminal state'",
"outcome",
"=",
"pn_delivery",
".",
"remote_state",
"state",
"=",
"SenderLink",
".",
"_DISPOSITION_STATE_MAP",
".",
"get",
"(",
"outcome",
",",
"self",
".",
"UNKNOWN",
")",
"pn_disposition",
"=",
"pn_delivery",
".",
"remote",
"info",
"=",
"{",
"}",
"if",
"state",
"==",
"SenderLink",
".",
"REJECTED",
":",
"if",
"pn_disposition",
".",
"condition",
":",
"info",
"[",
"\"condition\"",
"]",
"=",
"pn_disposition",
".",
"condition",
"elif",
"state",
"==",
"SenderLink",
".",
"MODIFIED",
":",
"info",
"[",
"\"delivery-failed\"",
"]",
"=",
"pn_disposition",
".",
"failed",
"info",
"[",
"\"undeliverable-here\"",
"]",
"=",
"pn_disposition",
".",
"undeliverable",
"annotations",
"=",
"pn_disposition",
".",
"annotations",
"if",
"annotations",
":",
"info",
"[",
"\"message-annotations\"",
"]",
"=",
"annotations",
"send_req",
"=",
"self",
".",
"_send_requests",
".",
"pop",
"(",
"pn_delivery",
".",
"tag",
")",
"send_req",
".",
"destroy",
"(",
"state",
",",
"info",
")",
"pn_delivery",
".",
"settle",
"(",
")",
"elif",
"pn_delivery",
".",
"writable",
":",
"# we can now send on this delivery",
"if",
"self",
".",
"_pending_sends",
":",
"tag",
"=",
"self",
".",
"_pending_sends",
".",
"popleft",
"(",
")",
"send_req",
"=",
"self",
".",
"_send_requests",
"[",
"tag",
"]",
"self",
".",
"_write_msg",
"(",
"pn_delivery",
",",
"send_req",
")",
"else",
":",
"# tag no longer valid, expired or canceled send?",
"LOG",
".",
"debug",
"(",
"\"Delivery ignored, tag=%s\"",
",",
"str",
"(",
"pn_delivery",
".",
"tag",
")",
")",
"pn_delivery",
".",
"settle",
"(",
")"
]
| 52.1875 | 0.001176 |
def lines_of_content(content, width):
"""
计算内容在特定输出宽度下实际显示的行数
calculate the actual rows with specific terminal width
"""
result = 0
if isinstance(content, list):
for line in content:
_line = preprocess(line)
result += ceil(line_width(_line) / width)
elif isinstance(content, dict):
for k, v in content.items():
# 加2是算上行内冒号和空格的宽度
# adding 2 for the for the colon and space ": "
_k, _v = map(preprocess, (k, v))
result += ceil((line_width(_k) + line_width(_v) + 2) / width)
return int(result) | [
"def",
"lines_of_content",
"(",
"content",
",",
"width",
")",
":",
"result",
"=",
"0",
"if",
"isinstance",
"(",
"content",
",",
"list",
")",
":",
"for",
"line",
"in",
"content",
":",
"_line",
"=",
"preprocess",
"(",
"line",
")",
"result",
"+=",
"ceil",
"(",
"line_width",
"(",
"_line",
")",
"/",
"width",
")",
"elif",
"isinstance",
"(",
"content",
",",
"dict",
")",
":",
"for",
"k",
",",
"v",
"in",
"content",
".",
"items",
"(",
")",
":",
"# 加2是算上行内冒号和空格的宽度",
"# adding 2 for the for the colon and space \": \"",
"_k",
",",
"_v",
"=",
"map",
"(",
"preprocess",
",",
"(",
"k",
",",
"v",
")",
")",
"result",
"+=",
"ceil",
"(",
"(",
"line_width",
"(",
"_k",
")",
"+",
"line_width",
"(",
"_v",
")",
"+",
"2",
")",
"/",
"width",
")",
"return",
"int",
"(",
"result",
")"
]
| 34.941176 | 0.001639 |
def check_repo_ok(self):
"""
Make sure that the ns-3 repository's HEAD commit is the same as the one
saved in the campaign database, and that the ns-3 repository is clean
(i.e., no untracked or modified files exist).
"""
from git import Repo, exc
# Check that git is at the expected commit and that the repo is not
# dirty
if self.runner is not None:
path = self.runner.path
try:
repo = Repo(path)
except(exc.InvalidGitRepositoryError):
raise Exception("No git repository detected.\nIn order to "
"use SEM and its reproducibility enforcing "
"features, please create a git repository at "
"the root of your ns-3 project.")
current_commit = repo.head.commit.hexsha
campaign_commit = self.db.get_commit()
if repo.is_dirty(untracked_files=True):
raise Exception("ns-3 repository is not clean")
if current_commit != campaign_commit:
raise Exception("ns-3 repository is on a different commit "
"from the one specified in the campaign") | [
"def",
"check_repo_ok",
"(",
"self",
")",
":",
"from",
"git",
"import",
"Repo",
",",
"exc",
"# Check that git is at the expected commit and that the repo is not",
"# dirty",
"if",
"self",
".",
"runner",
"is",
"not",
"None",
":",
"path",
"=",
"self",
".",
"runner",
".",
"path",
"try",
":",
"repo",
"=",
"Repo",
"(",
"path",
")",
"except",
"(",
"exc",
".",
"InvalidGitRepositoryError",
")",
":",
"raise",
"Exception",
"(",
"\"No git repository detected.\\nIn order to \"",
"\"use SEM and its reproducibility enforcing \"",
"\"features, please create a git repository at \"",
"\"the root of your ns-3 project.\"",
")",
"current_commit",
"=",
"repo",
".",
"head",
".",
"commit",
".",
"hexsha",
"campaign_commit",
"=",
"self",
".",
"db",
".",
"get_commit",
"(",
")",
"if",
"repo",
".",
"is_dirty",
"(",
"untracked_files",
"=",
"True",
")",
":",
"raise",
"Exception",
"(",
"\"ns-3 repository is not clean\"",
")",
"if",
"current_commit",
"!=",
"campaign_commit",
":",
"raise",
"Exception",
"(",
"\"ns-3 repository is on a different commit \"",
"\"from the one specified in the campaign\"",
")"
]
| 46.444444 | 0.001563 |
def suites(self, request, pk=None):
"""
List of test suite names available in this project
"""
suites_names = self.get_object().suites.values_list('slug')
suites_metadata = SuiteMetadata.objects.filter(kind='suite', suite__in=suites_names)
page = self.paginate_queryset(suites_metadata)
serializer = SuiteMetadataSerializer(page, many=True, context={'request': request})
return self.get_paginated_response(serializer.data) | [
"def",
"suites",
"(",
"self",
",",
"request",
",",
"pk",
"=",
"None",
")",
":",
"suites_names",
"=",
"self",
".",
"get_object",
"(",
")",
".",
"suites",
".",
"values_list",
"(",
"'slug'",
")",
"suites_metadata",
"=",
"SuiteMetadata",
".",
"objects",
".",
"filter",
"(",
"kind",
"=",
"'suite'",
",",
"suite__in",
"=",
"suites_names",
")",
"page",
"=",
"self",
".",
"paginate_queryset",
"(",
"suites_metadata",
")",
"serializer",
"=",
"SuiteMetadataSerializer",
"(",
"page",
",",
"many",
"=",
"True",
",",
"context",
"=",
"{",
"'request'",
":",
"request",
"}",
")",
"return",
"self",
".",
"get_paginated_response",
"(",
"serializer",
".",
"data",
")"
]
| 53.111111 | 0.00823 |
def register():
"""Plugin registration."""
if not plim:
logger.warning('`slim` failed to load dependency `plim`. '
'`slim` plugin not loaded.')
return
if not mako:
logger.warning('`slim` failed to load dependency `mako`. '
'`slim` plugin not loaded.')
return
if not bs:
logger.warning('`slim` failed to load dependency `BeautifulSoup4`. '
'`slim` plugin not loaded.')
return
if not minify:
logger.warning('`slim` failed to load dependency `htmlmin`. '
'`slim` plugin not loaded.')
return
signals.get_writer.connect(get_writer) | [
"def",
"register",
"(",
")",
":",
"if",
"not",
"plim",
":",
"logger",
".",
"warning",
"(",
"'`slim` failed to load dependency `plim`. '",
"'`slim` plugin not loaded.'",
")",
"return",
"if",
"not",
"mako",
":",
"logger",
".",
"warning",
"(",
"'`slim` failed to load dependency `mako`. '",
"'`slim` plugin not loaded.'",
")",
"return",
"if",
"not",
"bs",
":",
"logger",
".",
"warning",
"(",
"'`slim` failed to load dependency `BeautifulSoup4`. '",
"'`slim` plugin not loaded.'",
")",
"return",
"if",
"not",
"minify",
":",
"logger",
".",
"warning",
"(",
"'`slim` failed to load dependency `htmlmin`. '",
"'`slim` plugin not loaded.'",
")",
"return",
"signals",
".",
"get_writer",
".",
"connect",
"(",
"get_writer",
")"
]
| 34.4 | 0.001414 |
def ticket_forms_reorder(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/ticket_forms#reorder-ticket-forms"
api_path = "/api/v2/ticket_forms/reorder.json"
return self.call(api_path, method="PUT", data=data, **kwargs) | [
"def",
"ticket_forms_reorder",
"(",
"self",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"api_path",
"=",
"\"/api/v2/ticket_forms/reorder.json\"",
"return",
"self",
".",
"call",
"(",
"api_path",
",",
"method",
"=",
"\"PUT\"",
",",
"data",
"=",
"data",
",",
"*",
"*",
"kwargs",
")"
]
| 65.5 | 0.011321 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.