text
stringlengths 94
87.1k
| code_tokens
sequence | avg_line_len
float64 7.91
668
| score
sequence |
---|---|---|---|
def is_valid_mpls_label(label):
"""Validates `label` according to MPLS label rules
RFC says:
This 20-bit field.
A value of 0 represents the "IPv4 Explicit NULL Label".
A value of 1 represents the "Router Alert Label".
A value of 2 represents the "IPv6 Explicit NULL Label".
A value of 3 represents the "Implicit NULL Label".
Values 4-15 are reserved.
"""
if (not isinstance(label, numbers.Integral) or
(4 <= label <= 15) or
(label < 0 or label > 2 ** 20)):
return False
return True | [
"def",
"is_valid_mpls_label",
"(",
"label",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"label",
",",
"numbers",
".",
"Integral",
")",
"or",
"(",
"4",
"<=",
"label",
"<=",
"15",
")",
"or",
"(",
"label",
"<",
"0",
"or",
"label",
">",
"2",
"**",
"20",
")",
")",
":",
"return",
"False",
"return",
"True"
] | 31.941176 | [
0.03225806451612903,
0.037037037037037035,
0,
0.15384615384615385,
0.09090909090909091,
0.03389830508474576,
0.03773584905660377,
0.03389830508474576,
0.037037037037037035,
0.06896551724137931,
0.2857142857142857,
0.06,
0.06060606060606061,
0.06818181818181818,
0.1,
0,
0.13333333333333333
] |
def create_dscp_marking_rule(self, policy, body=None):
"""Creates a new DSCP marking rule."""
return self.post(self.qos_dscp_marking_rules_path % policy,
body=body) | [
"def",
"create_dscp_marking_rule",
"(",
"self",
",",
"policy",
",",
"body",
"=",
"None",
")",
":",
"return",
"self",
".",
"post",
"(",
"self",
".",
"qos_dscp_marking_rules_path",
"%",
"policy",
",",
"body",
"=",
"body",
")"
] | 50.5 | [
0.018518518518518517,
0.043478260869565216,
0.04477611940298507,
0.14285714285714285
] |
def md_jdbc_virtual_table(key, node):
"""Extract metadata JDBC Virtual Tables from an xml node"""
name = node.find("name")
sql = node.find("sql")
escapeSql = node.find("escapeSql")
escapeSql = escapeSql.text if escapeSql is not None else None
keyColumn = node.find("keyColumn")
keyColumn = keyColumn.text if keyColumn is not None else None
n_g = node.find("geometry")
geometry = JDBCVirtualTableGeometry(n_g.find("name"), n_g.find("type"), n_g.find("srid"))
parameters = []
for n_p in node.findall("parameter"):
p_name = n_p.find("name")
p_defaultValue = n_p.find("defaultValue")
p_defaultValue = p_defaultValue.text if p_defaultValue is not None else None
p_regexpValidator = n_p.find("regexpValidator")
p_regexpValidator = p_regexpValidator.text if p_regexpValidator is not None else None
parameters.append(JDBCVirtualTableParam(p_name, p_defaultValue, p_regexpValidator))
return JDBCVirtualTable(name, sql, escapeSql, geometry, keyColumn, parameters) | [
"def",
"md_jdbc_virtual_table",
"(",
"key",
",",
"node",
")",
":",
"name",
"=",
"node",
".",
"find",
"(",
"\"name\"",
")",
"sql",
"=",
"node",
".",
"find",
"(",
"\"sql\"",
")",
"escapeSql",
"=",
"node",
".",
"find",
"(",
"\"escapeSql\"",
")",
"escapeSql",
"=",
"escapeSql",
".",
"text",
"if",
"escapeSql",
"is",
"not",
"None",
"else",
"None",
"keyColumn",
"=",
"node",
".",
"find",
"(",
"\"keyColumn\"",
")",
"keyColumn",
"=",
"keyColumn",
".",
"text",
"if",
"keyColumn",
"is",
"not",
"None",
"else",
"None",
"n_g",
"=",
"node",
".",
"find",
"(",
"\"geometry\"",
")",
"geometry",
"=",
"JDBCVirtualTableGeometry",
"(",
"n_g",
".",
"find",
"(",
"\"name\"",
")",
",",
"n_g",
".",
"find",
"(",
"\"type\"",
")",
",",
"n_g",
".",
"find",
"(",
"\"srid\"",
")",
")",
"parameters",
"=",
"[",
"]",
"for",
"n_p",
"in",
"node",
".",
"findall",
"(",
"\"parameter\"",
")",
":",
"p_name",
"=",
"n_p",
".",
"find",
"(",
"\"name\"",
")",
"p_defaultValue",
"=",
"n_p",
".",
"find",
"(",
"\"defaultValue\"",
")",
"p_defaultValue",
"=",
"p_defaultValue",
".",
"text",
"if",
"p_defaultValue",
"is",
"not",
"None",
"else",
"None",
"p_regexpValidator",
"=",
"n_p",
".",
"find",
"(",
"\"regexpValidator\"",
")",
"p_regexpValidator",
"=",
"p_regexpValidator",
".",
"text",
"if",
"p_regexpValidator",
"is",
"not",
"None",
"else",
"None",
"parameters",
".",
"append",
"(",
"JDBCVirtualTableParam",
"(",
"p_name",
",",
"p_defaultValue",
",",
"p_regexpValidator",
")",
")",
"return",
"JDBCVirtualTable",
"(",
"name",
",",
"sql",
",",
"escapeSql",
",",
"geometry",
",",
"keyColumn",
",",
"parameters",
")"
] | 51.55 | [
0.02702702702702703,
0.031746031746031744,
0.07142857142857142,
0.07692307692307693,
0.05263157894736842,
0.03076923076923077,
0.05263157894736842,
0.03076923076923077,
0.06451612903225806,
0.03225806451612903,
0.10526315789473684,
0.04878048780487805,
0.06060606060606061,
0.04081632653061224,
0.03571428571428571,
0.03636363636363636,
0.03225806451612903,
0.03296703296703297,
0,
0.036585365853658534
] |
def abspath(self, path, project=None, root=None):
"""Returns the path from the current working directory
We only store the paths relative to the root directory of the project.
This method fixes those path to be applicable from the working
directory
Parameters
----------
path: str
The original path as it is stored in the configuration
project: str
The project to use. If None, the :attr:`projectname` attribute is
used
root: str
If not None, the root directory of the project
Returns
-------
str
The path as it is accessible from the current working directory"""
if root is None:
root = self.config.projects[project or self.projectname]['root']
return osp.join(root, path) | [
"def",
"abspath",
"(",
"self",
",",
"path",
",",
"project",
"=",
"None",
",",
"root",
"=",
"None",
")",
":",
"if",
"root",
"is",
"None",
":",
"root",
"=",
"self",
".",
"config",
".",
"projects",
"[",
"project",
"or",
"self",
".",
"projectname",
"]",
"[",
"'root'",
"]",
"return",
"osp",
".",
"join",
"(",
"root",
",",
"path",
")"
] | 34.875 | [
0.02040816326530612,
0.03225806451612903,
0,
0.02564102564102564,
0.02857142857142857,
0.11764705882352941,
0,
0.1111111111111111,
0.1111111111111111,
0.11764705882352941,
0.030303030303030304,
0.1,
0.07792207792207792,
0.125,
0.11764705882352941,
0.034482758620689655,
0,
0.13333333333333333,
0.13333333333333333,
0.18181818181818182,
0.038461538461538464,
0.08333333333333333,
0.02631578947368421,
0.05714285714285714
] |
def setup(self):
"""Setup."""
self.blocks = self.config['block_comments']
self.lines = self.config['line_comments']
self.group_comments = self.config['group_comments']
self.prefix = self.config['prefix']
self.generic_mode = self.config['generic_mode']
self.strings = self.config['strings']
self.trigraphs = self.config['trigraphs']
self.decode_escapes = self.config['decode_escapes']
self.charset_size = self.config['charset_size']
self.wide_charset_size = self.config['wide_charset_size']
self.exec_charset = self.get_encoding_name(self.config['exec_charset'])
self.wide_exec_charset = self.get_encoding_name(self.config['wide_exec_charset'])
self.string_types, self.wild_string_types = self.eval_string_type(self.config['string_types'])
if not self.generic_mode:
self.pattern = RE_CPP | [
"def",
"setup",
"(",
"self",
")",
":",
"self",
".",
"blocks",
"=",
"self",
".",
"config",
"[",
"'block_comments'",
"]",
"self",
".",
"lines",
"=",
"self",
".",
"config",
"[",
"'line_comments'",
"]",
"self",
".",
"group_comments",
"=",
"self",
".",
"config",
"[",
"'group_comments'",
"]",
"self",
".",
"prefix",
"=",
"self",
".",
"config",
"[",
"'prefix'",
"]",
"self",
".",
"generic_mode",
"=",
"self",
".",
"config",
"[",
"'generic_mode'",
"]",
"self",
".",
"strings",
"=",
"self",
".",
"config",
"[",
"'strings'",
"]",
"self",
".",
"trigraphs",
"=",
"self",
".",
"config",
"[",
"'trigraphs'",
"]",
"self",
".",
"decode_escapes",
"=",
"self",
".",
"config",
"[",
"'decode_escapes'",
"]",
"self",
".",
"charset_size",
"=",
"self",
".",
"config",
"[",
"'charset_size'",
"]",
"self",
".",
"wide_charset_size",
"=",
"self",
".",
"config",
"[",
"'wide_charset_size'",
"]",
"self",
".",
"exec_charset",
"=",
"self",
".",
"get_encoding_name",
"(",
"self",
".",
"config",
"[",
"'exec_charset'",
"]",
")",
"self",
".",
"wide_exec_charset",
"=",
"self",
".",
"get_encoding_name",
"(",
"self",
".",
"config",
"[",
"'wide_exec_charset'",
"]",
")",
"self",
".",
"string_types",
",",
"self",
".",
"wild_string_types",
"=",
"self",
".",
"eval_string_type",
"(",
"self",
".",
"config",
"[",
"'string_types'",
"]",
")",
"if",
"not",
"self",
".",
"generic_mode",
":",
"self",
".",
"pattern",
"=",
"RE_CPP"
] | 50.111111 | [
0.0625,
0.1,
0,
0.0392156862745098,
0.04081632653061224,
0.03389830508474576,
0.046511627906976744,
0.03636363636363636,
0.044444444444444446,
0.04081632653061224,
0.03389830508474576,
0.03636363636363636,
0.03076923076923077,
0.02531645569620253,
0.033707865168539325,
0.029411764705882353,
0.06060606060606061,
0.06060606060606061
] |
def density_2d(self, x, y, Rs, rho0, r_core, center_x=0, center_y=0):
"""
projected two dimenstional NFW profile (kappa*Sigma_crit)
:param R: radius of interest
:type R: float/numpy array
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param r200: radius of (sub)halo
:type r200: float>0
:return: Epsilon(R) projected density at radius R
"""
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_ ** 2 + y_ ** 2)
b = r_core * Rs ** -1
x = R * Rs ** -1
Fx = self._F(x, b)
return 2 * rho0 * Rs * Fx | [
"def",
"density_2d",
"(",
"self",
",",
"x",
",",
"y",
",",
"Rs",
",",
"rho0",
",",
"r_core",
",",
"center_x",
"=",
"0",
",",
"center_y",
"=",
"0",
")",
":",
"x_",
"=",
"x",
"-",
"center_x",
"y_",
"=",
"y",
"-",
"center_y",
"R",
"=",
"np",
".",
"sqrt",
"(",
"x_",
"**",
"2",
"+",
"y_",
"**",
"2",
")",
"b",
"=",
"r_core",
"*",
"Rs",
"**",
"-",
"1",
"x",
"=",
"R",
"*",
"Rs",
"**",
"-",
"1",
"Fx",
"=",
"self",
".",
"_F",
"(",
"x",
",",
"b",
")",
"return",
"2",
"*",
"rho0",
"*",
"Rs",
"*",
"Fx"
] | 31.636364 | [
0.014492753623188406,
0.18181818181818182,
0.046153846153846156,
0,
0.08333333333333333,
0.08823529411764706,
0.0967741935483871,
0.13043478260869565,
0.05970149253731343,
0.12,
0.1,
0.14814814814814814,
0.05263157894736842,
0.18181818181818182,
0.08,
0.08,
0.05263157894736842,
0.06896551724137931,
0.08333333333333333,
0.07692307692307693,
0,
0.06060606060606061
] |
def get_track_by_mbid(self, mbid):
"""Looks up a track by its MusicBrainz ID"""
params = {"mbid": mbid}
doc = _Request(self, "track.getInfo", params).execute(True)
return Track(_extract(doc, "name", 1), _extract(doc, "name"), self) | [
"def",
"get_track_by_mbid",
"(",
"self",
",",
"mbid",
")",
":",
"params",
"=",
"{",
"\"mbid\"",
":",
"mbid",
"}",
"doc",
"=",
"_Request",
"(",
"self",
",",
"\"track.getInfo\"",
",",
"params",
")",
".",
"execute",
"(",
"True",
")",
"return",
"Track",
"(",
"_extract",
"(",
"doc",
",",
"\"name\"",
",",
"1",
")",
",",
"_extract",
"(",
"doc",
",",
"\"name\"",
")",
",",
"self",
")"
] | 32.375 | [
0.029411764705882353,
0.038461538461538464,
0,
0.06451612903225806,
0,
0.029850746268656716,
0,
0.02666666666666667
] |
def groups(self) -> typing.Iterator['Group']:
"""
Returns: generator of all groups in this country
"""
for group_category in Mission.valid_group_categories:
if group_category in self._section_this_country.keys():
for group_index in self._section_this_country[group_category]['group']:
if group_index not in self.__groups[group_category]:
self.__groups[group_category][group_index] = Group(self.d, self.l10n, self.coa_color,
self.country_index, group_category,
group_index)
yield self.__groups[group_category][group_index] | [
"def",
"groups",
"(",
"self",
")",
"->",
"typing",
".",
"Iterator",
"[",
"'Group'",
"]",
":",
"for",
"group_category",
"in",
"Mission",
".",
"valid_group_categories",
":",
"if",
"group_category",
"in",
"self",
".",
"_section_this_country",
".",
"keys",
"(",
")",
":",
"for",
"group_index",
"in",
"self",
".",
"_section_this_country",
"[",
"group_category",
"]",
"[",
"'group'",
"]",
":",
"if",
"group_index",
"not",
"in",
"self",
".",
"__groups",
"[",
"group_category",
"]",
":",
"self",
".",
"__groups",
"[",
"group_category",
"]",
"[",
"group_index",
"]",
"=",
"Group",
"(",
"self",
".",
"d",
",",
"self",
".",
"l10n",
",",
"self",
".",
"coa_color",
",",
"self",
".",
"country_index",
",",
"group_category",
",",
"group_index",
")",
"yield",
"self",
".",
"__groups",
"[",
"group_category",
"]",
"[",
"group_index",
"]"
] | 65.333333 | [
0.022222222222222223,
0.18181818181818182,
0.03571428571428571,
0.18181818181818182,
0.03278688524590164,
0.029850746268656716,
0.034482758620689655,
0.027777777777777776,
0.03669724770642202,
0.03636363636363636,
0.05747126436781609,
0.029411764705882353
] |
def load_config(settings):
'''Load settings from configfile'''
config = ConfigParser()
section = 'pgdocgen'
try:
config.read(settings['configfile'])
except Exception as e:
sys.stderr.write('Failed to read config: ' + str(e))
sys.exit(1)
for option in config.options(section):
settings[option] = config.get(section, option)
return settings | [
"def",
"load_config",
"(",
"settings",
")",
":",
"config",
"=",
"ConfigParser",
"(",
")",
"section",
"=",
"'pgdocgen'",
"try",
":",
"config",
".",
"read",
"(",
"settings",
"[",
"'configfile'",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Failed to read config: '",
"+",
"str",
"(",
"e",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"for",
"option",
"in",
"config",
".",
"options",
"(",
"section",
")",
":",
"settings",
"[",
"option",
"]",
"=",
"config",
".",
"get",
"(",
"section",
",",
"option",
")",
"return",
"settings"
] | 32.25 | [
0.038461538461538464,
0.05128205128205128,
0.07407407407407407,
0.08333333333333333,
0.25,
0.046511627906976744,
0.07692307692307693,
0.03333333333333333,
0.10526315789473684,
0.047619047619047616,
0.037037037037037035,
0.10526315789473684
] |
def angle2xyz(azi, zen):
"""Convert azimuth and zenith to cartesian."""
azi = xu.deg2rad(azi)
zen = xu.deg2rad(zen)
x = xu.sin(zen) * xu.sin(azi)
y = xu.sin(zen) * xu.cos(azi)
z = xu.cos(zen)
return x, y, z | [
"def",
"angle2xyz",
"(",
"azi",
",",
"zen",
")",
":",
"azi",
"=",
"xu",
".",
"deg2rad",
"(",
"azi",
")",
"zen",
"=",
"xu",
".",
"deg2rad",
"(",
"zen",
")",
"x",
"=",
"xu",
".",
"sin",
"(",
"zen",
")",
"*",
"xu",
".",
"sin",
"(",
"azi",
")",
"y",
"=",
"xu",
".",
"sin",
"(",
"zen",
")",
"*",
"xu",
".",
"cos",
"(",
"azi",
")",
"z",
"=",
"xu",
".",
"cos",
"(",
"zen",
")",
"return",
"x",
",",
"y",
",",
"z"
] | 28.375 | [
0.041666666666666664,
0.04,
0.08,
0.08,
0.06060606060606061,
0.06060606060606061,
0.10526315789473684,
0.1111111111111111
] |
def get_level(self, level=2):
"""Get all nodes that are exactly this far away."""
if level == 1:
for child in self.children.values(): yield child
else:
for child in self.children.values():
for node in child.get_level(level-1): yield node | [
"def",
"get_level",
"(",
"self",
",",
"level",
"=",
"2",
")",
":",
"if",
"level",
"==",
"1",
":",
"for",
"child",
"in",
"self",
".",
"children",
".",
"values",
"(",
")",
":",
"yield",
"child",
"else",
":",
"for",
"child",
"in",
"self",
".",
"children",
".",
"values",
"(",
")",
":",
"for",
"node",
"in",
"child",
".",
"get_level",
"(",
"level",
"-",
"1",
")",
":",
"yield",
"node"
] | 42.142857 | [
0.034482758620689655,
0.03389830508474576,
0.09090909090909091,
0.05,
0.15384615384615385,
0.041666666666666664,
0.046875
] |
def to_file(self, destination, format='csv', csv_delimiter=',', csv_header=True):
"""Save the results to a local file in CSV format.
Args:
destination: path on the local filesystem for the saved results.
format: the format to use for the exported data; currently only 'csv' is supported.
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
Raises:
An Exception if the operation failed.
"""
f = codecs.open(destination, 'w', 'utf-8')
fieldnames = []
for column in self.schema:
fieldnames.append(column.name)
if sys.version_info[0] == 2:
csv_delimiter = csv_delimiter.encode('unicode_escape')
writer = csv.DictWriter(f, fieldnames=fieldnames, delimiter=csv_delimiter)
if csv_header:
writer.writeheader()
for row in self:
writer.writerow(row)
f.close() | [
"def",
"to_file",
"(",
"self",
",",
"destination",
",",
"format",
"=",
"'csv'",
",",
"csv_delimiter",
"=",
"','",
",",
"csv_header",
"=",
"True",
")",
":",
"f",
"=",
"codecs",
".",
"open",
"(",
"destination",
",",
"'w'",
",",
"'utf-8'",
")",
"fieldnames",
"=",
"[",
"]",
"for",
"column",
"in",
"self",
".",
"schema",
":",
"fieldnames",
".",
"append",
"(",
"column",
".",
"name",
")",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"2",
":",
"csv_delimiter",
"=",
"csv_delimiter",
".",
"encode",
"(",
"'unicode_escape'",
")",
"writer",
"=",
"csv",
".",
"DictWriter",
"(",
"f",
",",
"fieldnames",
"=",
"fieldnames",
",",
"delimiter",
"=",
"csv_delimiter",
")",
"if",
"csv_header",
":",
"writer",
".",
"writeheader",
"(",
")",
"for",
"row",
"in",
"self",
":",
"writer",
".",
"writerow",
"(",
"row",
")",
"f",
".",
"close",
"(",
")"
] | 40.869565 | [
0.024691358024691357,
0.037037037037037035,
0,
0.2222222222222222,
0.04285714285714286,
0.056179775280898875,
0.04938271604938271,
0.04395604395604396,
0.18181818181818182,
0.06976744186046512,
0.2857142857142857,
0.043478260869565216,
0.10526315789473684,
0.06666666666666667,
0.08333333333333333,
0.0625,
0.05,
0.02564102564102564,
0.1111111111111111,
0.11538461538461539,
0.1,
0.11538461538461539,
0.15384615384615385
] |
def cell(self, w,h=0,txt='',border=0,ln=0,align='',fill=0,link=''):
"Output a cell"
txt = self.normalize_text(txt)
k=self.k
if(self.y+h>self.page_break_trigger and not self.in_footer and self.accept_page_break()):
#Automatic page break
x=self.x
ws=self.ws
if(ws>0):
self.ws=0
self._out('0 Tw')
self.add_page(self.cur_orientation)
self.x=x
if(ws>0):
self.ws=ws
self._out(sprintf('%.3f Tw',ws*k))
if(w==0):
w=self.w-self.r_margin-self.x
s=''
if(fill==1 or border==1):
if(fill==1):
if border==1:
op='B'
else:
op='f'
else:
op='S'
s=sprintf('%.2f %.2f %.2f %.2f re %s ',self.x*k,(self.h-self.y)*k,w*k,-h*k,op)
if(isinstance(border,basestring)):
x=self.x
y=self.y
if('L' in border):
s+=sprintf('%.2f %.2f m %.2f %.2f l S ',x*k,(self.h-y)*k,x*k,(self.h-(y+h))*k)
if('T' in border):
s+=sprintf('%.2f %.2f m %.2f %.2f l S ',x*k,(self.h-y)*k,(x+w)*k,(self.h-y)*k)
if('R' in border):
s+=sprintf('%.2f %.2f m %.2f %.2f l S ',(x+w)*k,(self.h-y)*k,(x+w)*k,(self.h-(y+h))*k)
if('B' in border):
s+=sprintf('%.2f %.2f m %.2f %.2f l S ',x*k,(self.h-(y+h))*k,(x+w)*k,(self.h-(y+h))*k)
if(txt!=''):
if(align=='R'):
dx=w-self.c_margin-self.get_string_width(txt)
elif(align=='C'):
dx=(w-self.get_string_width(txt))/2.0
else:
dx=self.c_margin
if(self.color_flag):
s+='q '+self.text_color+' '
# If multibyte, Tw has no effect - do word spacing using an adjustment before each space
if (self.ws and self.unifontsubset):
for uni in UTF8StringToArray(txt):
self.current_font['subset'].append(uni)
space = self._escape(UTF8ToUTF16BE(' ', False))
s += sprintf('BT 0 Tw %.2F %.2F Td [',(self.x + dx) * k,(self.h - (self.y + 0.5*h+ 0.3 * self.font_size)) * k)
t = txt.split(' ')
numt = len(t)
for i in range(numt):
tx = t[i]
tx = '(' + self._escape(UTF8ToUTF16BE(tx, False)) + ')'
s += sprintf('%s ', tx);
if ((i+1)<numt):
adj = -(self.ws * self.k) * 1000 / self.font_size_pt
s += sprintf('%d(%s) ', adj, space)
s += '] TJ'
s += ' ET'
else:
if (self.unifontsubset):
txt2 = self._escape(UTF8ToUTF16BE(txt, False))
for uni in UTF8StringToArray(txt):
self.current_font['subset'].append(uni)
else:
txt2 = self._escape(txt)
s += sprintf('BT %.2f %.2f Td (%s) Tj ET',(self.x+dx)*k,(self.h-(self.y+.5*h+.3*self.font_size))*k,txt2)
if(self.underline):
s+=' '+self._dounderline(self.x+dx,self.y+.5*h+.3*self.font_size,txt)
if(self.color_flag):
s+=' Q'
if(link):
self.link(self.x+dx,self.y+.5*h-.5*self.font_size,self.get_string_width(txt),self.font_size,link)
if(s):
self._out(s)
self.lasth=h
if(ln>0):
#Go to next line
self.y+=h
if(ln==1):
self.x=self.l_margin
else:
self.x+=w | [
"def",
"cell",
"(",
"self",
",",
"w",
",",
"h",
"=",
"0",
",",
"txt",
"=",
"''",
",",
"border",
"=",
"0",
",",
"ln",
"=",
"0",
",",
"align",
"=",
"''",
",",
"fill",
"=",
"0",
",",
"link",
"=",
"''",
")",
":",
"txt",
"=",
"self",
".",
"normalize_text",
"(",
"txt",
")",
"k",
"=",
"self",
".",
"k",
"if",
"(",
"self",
".",
"y",
"+",
"h",
">",
"self",
".",
"page_break_trigger",
"and",
"not",
"self",
".",
"in_footer",
"and",
"self",
".",
"accept_page_break",
"(",
")",
")",
":",
"#Automatic page break",
"x",
"=",
"self",
".",
"x",
"ws",
"=",
"self",
".",
"ws",
"if",
"(",
"ws",
">",
"0",
")",
":",
"self",
".",
"ws",
"=",
"0",
"self",
".",
"_out",
"(",
"'0 Tw'",
")",
"self",
".",
"add_page",
"(",
"self",
".",
"cur_orientation",
")",
"self",
".",
"x",
"=",
"x",
"if",
"(",
"ws",
">",
"0",
")",
":",
"self",
".",
"ws",
"=",
"ws",
"self",
".",
"_out",
"(",
"sprintf",
"(",
"'%.3f Tw'",
",",
"ws",
"*",
"k",
")",
")",
"if",
"(",
"w",
"==",
"0",
")",
":",
"w",
"=",
"self",
".",
"w",
"-",
"self",
".",
"r_margin",
"-",
"self",
".",
"x",
"s",
"=",
"''",
"if",
"(",
"fill",
"==",
"1",
"or",
"border",
"==",
"1",
")",
":",
"if",
"(",
"fill",
"==",
"1",
")",
":",
"if",
"border",
"==",
"1",
":",
"op",
"=",
"'B'",
"else",
":",
"op",
"=",
"'f'",
"else",
":",
"op",
"=",
"'S'",
"s",
"=",
"sprintf",
"(",
"'%.2f %.2f %.2f %.2f re %s '",
",",
"self",
".",
"x",
"*",
"k",
",",
"(",
"self",
".",
"h",
"-",
"self",
".",
"y",
")",
"*",
"k",
",",
"w",
"*",
"k",
",",
"-",
"h",
"*",
"k",
",",
"op",
")",
"if",
"(",
"isinstance",
"(",
"border",
",",
"basestring",
")",
")",
":",
"x",
"=",
"self",
".",
"x",
"y",
"=",
"self",
".",
"y",
"if",
"(",
"'L'",
"in",
"border",
")",
":",
"s",
"+=",
"sprintf",
"(",
"'%.2f %.2f m %.2f %.2f l S '",
",",
"x",
"*",
"k",
",",
"(",
"self",
".",
"h",
"-",
"y",
")",
"*",
"k",
",",
"x",
"*",
"k",
",",
"(",
"self",
".",
"h",
"-",
"(",
"y",
"+",
"h",
")",
")",
"*",
"k",
")",
"if",
"(",
"'T'",
"in",
"border",
")",
":",
"s",
"+=",
"sprintf",
"(",
"'%.2f %.2f m %.2f %.2f l S '",
",",
"x",
"*",
"k",
",",
"(",
"self",
".",
"h",
"-",
"y",
")",
"*",
"k",
",",
"(",
"x",
"+",
"w",
")",
"*",
"k",
",",
"(",
"self",
".",
"h",
"-",
"y",
")",
"*",
"k",
")",
"if",
"(",
"'R'",
"in",
"border",
")",
":",
"s",
"+=",
"sprintf",
"(",
"'%.2f %.2f m %.2f %.2f l S '",
",",
"(",
"x",
"+",
"w",
")",
"*",
"k",
",",
"(",
"self",
".",
"h",
"-",
"y",
")",
"*",
"k",
",",
"(",
"x",
"+",
"w",
")",
"*",
"k",
",",
"(",
"self",
".",
"h",
"-",
"(",
"y",
"+",
"h",
")",
")",
"*",
"k",
")",
"if",
"(",
"'B'",
"in",
"border",
")",
":",
"s",
"+=",
"sprintf",
"(",
"'%.2f %.2f m %.2f %.2f l S '",
",",
"x",
"*",
"k",
",",
"(",
"self",
".",
"h",
"-",
"(",
"y",
"+",
"h",
")",
")",
"*",
"k",
",",
"(",
"x",
"+",
"w",
")",
"*",
"k",
",",
"(",
"self",
".",
"h",
"-",
"(",
"y",
"+",
"h",
")",
")",
"*",
"k",
")",
"if",
"(",
"txt",
"!=",
"''",
")",
":",
"if",
"(",
"align",
"==",
"'R'",
")",
":",
"dx",
"=",
"w",
"-",
"self",
".",
"c_margin",
"-",
"self",
".",
"get_string_width",
"(",
"txt",
")",
"elif",
"(",
"align",
"==",
"'C'",
")",
":",
"dx",
"=",
"(",
"w",
"-",
"self",
".",
"get_string_width",
"(",
"txt",
")",
")",
"/",
"2.0",
"else",
":",
"dx",
"=",
"self",
".",
"c_margin",
"if",
"(",
"self",
".",
"color_flag",
")",
":",
"s",
"+=",
"'q '",
"+",
"self",
".",
"text_color",
"+",
"' '",
"# If multibyte, Tw has no effect - do word spacing using an adjustment before each space",
"if",
"(",
"self",
".",
"ws",
"and",
"self",
".",
"unifontsubset",
")",
":",
"for",
"uni",
"in",
"UTF8StringToArray",
"(",
"txt",
")",
":",
"self",
".",
"current_font",
"[",
"'subset'",
"]",
".",
"append",
"(",
"uni",
")",
"space",
"=",
"self",
".",
"_escape",
"(",
"UTF8ToUTF16BE",
"(",
"' '",
",",
"False",
")",
")",
"s",
"+=",
"sprintf",
"(",
"'BT 0 Tw %.2F %.2F Td ['",
",",
"(",
"self",
".",
"x",
"+",
"dx",
")",
"*",
"k",
",",
"(",
"self",
".",
"h",
"-",
"(",
"self",
".",
"y",
"+",
"0.5",
"*",
"h",
"+",
"0.3",
"*",
"self",
".",
"font_size",
")",
")",
"*",
"k",
")",
"t",
"=",
"txt",
".",
"split",
"(",
"' '",
")",
"numt",
"=",
"len",
"(",
"t",
")",
"for",
"i",
"in",
"range",
"(",
"numt",
")",
":",
"tx",
"=",
"t",
"[",
"i",
"]",
"tx",
"=",
"'('",
"+",
"self",
".",
"_escape",
"(",
"UTF8ToUTF16BE",
"(",
"tx",
",",
"False",
")",
")",
"+",
"')'",
"s",
"+=",
"sprintf",
"(",
"'%s '",
",",
"tx",
")",
"if",
"(",
"(",
"i",
"+",
"1",
")",
"<",
"numt",
")",
":",
"adj",
"=",
"-",
"(",
"self",
".",
"ws",
"*",
"self",
".",
"k",
")",
"*",
"1000",
"/",
"self",
".",
"font_size_pt",
"s",
"+=",
"sprintf",
"(",
"'%d(%s) '",
",",
"adj",
",",
"space",
")",
"s",
"+=",
"'] TJ'",
"s",
"+=",
"' ET'",
"else",
":",
"if",
"(",
"self",
".",
"unifontsubset",
")",
":",
"txt2",
"=",
"self",
".",
"_escape",
"(",
"UTF8ToUTF16BE",
"(",
"txt",
",",
"False",
")",
")",
"for",
"uni",
"in",
"UTF8StringToArray",
"(",
"txt",
")",
":",
"self",
".",
"current_font",
"[",
"'subset'",
"]",
".",
"append",
"(",
"uni",
")",
"else",
":",
"txt2",
"=",
"self",
".",
"_escape",
"(",
"txt",
")",
"s",
"+=",
"sprintf",
"(",
"'BT %.2f %.2f Td (%s) Tj ET'",
",",
"(",
"self",
".",
"x",
"+",
"dx",
")",
"*",
"k",
",",
"(",
"self",
".",
"h",
"-",
"(",
"self",
".",
"y",
"+",
".5",
"*",
"h",
"+",
".3",
"*",
"self",
".",
"font_size",
")",
")",
"*",
"k",
",",
"txt2",
")",
"if",
"(",
"self",
".",
"underline",
")",
":",
"s",
"+=",
"' '",
"+",
"self",
".",
"_dounderline",
"(",
"self",
".",
"x",
"+",
"dx",
",",
"self",
".",
"y",
"+",
".5",
"*",
"h",
"+",
".3",
"*",
"self",
".",
"font_size",
",",
"txt",
")",
"if",
"(",
"self",
".",
"color_flag",
")",
":",
"s",
"+=",
"' Q'",
"if",
"(",
"link",
")",
":",
"self",
".",
"link",
"(",
"self",
".",
"x",
"+",
"dx",
",",
"self",
".",
"y",
"+",
".5",
"*",
"h",
"-",
".5",
"*",
"self",
".",
"font_size",
",",
"self",
".",
"get_string_width",
"(",
"txt",
")",
",",
"self",
".",
"font_size",
",",
"link",
")",
"if",
"(",
"s",
")",
":",
"self",
".",
"_out",
"(",
"s",
")",
"self",
".",
"lasth",
"=",
"h",
"if",
"(",
"ln",
">",
"0",
")",
":",
"#Go to next line",
"self",
".",
"y",
"+=",
"h",
"if",
"(",
"ln",
"==",
"1",
")",
":",
"self",
".",
"x",
"=",
"self",
".",
"l_margin",
"else",
":",
"self",
".",
"x",
"+=",
"w"
] | 40.538462 | [
0.11940298507462686,
0.08695652173913043,
0.05263157894736842,
0.1875,
0.041237113402061855,
0.09090909090909091,
0.15,
0.13636363636363635,
0.14285714285714285,
0.12,
0.06060606060606061,
0.0425531914893617,
0.15,
0.14285714285714285,
0.11538461538461539,
0.06,
0.17647058823529413,
0.07317073170731707,
0.25,
0.12121212121212122,
0.125,
0.10344827586206896,
0.11538461538461539,
0.09523809523809523,
0.11538461538461539,
0.11764705882352941,
0.13636363636363635,
0.1,
0.07142857142857142,
0.15,
0.15,
0.06666666666666667,
0.0851063829787234,
0.06666666666666667,
0.0851063829787234,
0.06666666666666667,
0.0784313725490196,
0.06666666666666667,
0.0784313725490196,
0.15,
0.1111111111111111,
0.04918032786885246,
0.10344827586206896,
0.05660377358490566,
0.11764705882352941,
0.09375,
0.0625,
0.06976744186046512,
0,
0.03,
0.041666666666666664,
0.04,
0.03389830508474576,
0.031746031746031744,
0.047619047619047616,
0.058823529411764705,
0.06896551724137931,
0.05405405405405406,
0.06896551724137931,
0.02666666666666667,
0.06818181818181818,
0.08333333333333333,
0.02631578947368421,
0.03389830508474576,
0.07407407407407407,
0.07692307692307693,
0.11764705882352941,
0.05,
0.030303030303030304,
0.037037037037037035,
0.031746031746031744,
0.09523809523809523,
0.045454545454545456,
0.05,
0,
0.06451612903225806,
0.07058823529411765,
0.0625,
0.13043478260869565,
0.09523809523809523,
0.061946902654867256,
0.14285714285714285,
0.08333333333333333,
0.15,
0.17647058823529413,
0.10714285714285714,
0.14285714285714285,
0.13636363636363635,
0.08333333333333333,
0.15384615384615385,
0.14285714285714285
] |
def three_hours_forecast(self, name):
"""
Queries the OWM Weather API for three hours weather forecast for the
specified location (eg: "London,uk"). A *Forecaster* object is
returned, containing a *Forecast* instance covering a global streak of
five days: this instance encapsulates *Weather* objects, with a time
interval of three hours one from each other
:param name: the location's toponym
:type name: str or unicode
:returns: a *Forecaster* instance or ``None`` if forecast data is not
available for the specified location
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached
"""
assert isinstance(name, str), "Value must be a string"
encoded_name = name
params = {'q': encoded_name, 'lang': self._language}
uri = http_client.HttpClient.to_url(THREE_HOURS_FORECAST_URL,
self._API_key,
self._subscription_type,
self._use_ssl)
_, json_data = self._wapi.cacheable_get_json(uri, params=params)
forecast = self._parsers['forecast'].parse_JSON(json_data)
if forecast is not None:
forecast.set_interval("3h")
return forecaster.Forecaster(forecast)
else:
return None | [
"def",
"three_hours_forecast",
"(",
"self",
",",
"name",
")",
":",
"assert",
"isinstance",
"(",
"name",
",",
"str",
")",
",",
"\"Value must be a string\"",
"encoded_name",
"=",
"name",
"params",
"=",
"{",
"'q'",
":",
"encoded_name",
",",
"'lang'",
":",
"self",
".",
"_language",
"}",
"uri",
"=",
"http_client",
".",
"HttpClient",
".",
"to_url",
"(",
"THREE_HOURS_FORECAST_URL",
",",
"self",
".",
"_API_key",
",",
"self",
".",
"_subscription_type",
",",
"self",
".",
"_use_ssl",
")",
"_",
",",
"json_data",
"=",
"self",
".",
"_wapi",
".",
"cacheable_get_json",
"(",
"uri",
",",
"params",
"=",
"params",
")",
"forecast",
"=",
"self",
".",
"_parsers",
"[",
"'forecast'",
"]",
".",
"parse_JSON",
"(",
"json_data",
")",
"if",
"forecast",
"is",
"not",
"None",
":",
"forecast",
".",
"set_interval",
"(",
"\"3h\"",
")",
"return",
"forecaster",
".",
"Forecaster",
"(",
"forecast",
")",
"else",
":",
"return",
"None"
] | 49.533333 | [
0.02702702702702703,
0.18181818181818182,
0.02631578947368421,
0.07142857142857142,
0.05128205128205128,
0.05263157894736842,
0.0392156862745098,
0,
0.06976744186046512,
0.08823529411764706,
0.07792207792207792,
0.041666666666666664,
0.05128205128205128,
0.05,
0.10526315789473684,
0.18181818181818182,
0.03225806451612903,
0.07407407407407407,
0.03333333333333333,
0.043478260869565216,
0.034482758620689655,
0.029411764705882353,
0.05172413793103448,
0.027777777777777776,
0.030303030303030304,
0.0625,
0.05128205128205128,
0.04,
0.15384615384615385,
0.08695652173913043
] |
def push(self, message):
"""
Takes a SlackEvent, parses it for a command, and runs against registered plugin
"""
if self._ignore_event(message):
return None, None
args = self._parse_message(message)
self.log.debug("Searching for command using chunks: %s", args)
cmd, msg_args = self._find_longest_prefix_command(args)
if cmd is not None:
if message.user is None:
self.log.debug("Discarded message with no originating user: %s", message)
return None, None
sender = message.user.username
if message.channel is not None:
sender = "#%s/%s" % (message.channel.name, sender)
self.log.info("Received from %s: %s, args %s", sender, cmd, msg_args)
f = self._get_command(cmd, message.user)
if f:
if self._is_channel_ignored(f, message.channel):
self.log.info("Channel %s is ignored, discarding command %s", message.channel, cmd)
return '_ignored_', ""
return cmd, f.execute(message, msg_args)
return '_unauthorized_', "Sorry, you are not authorized to run %s" % cmd
return None, None | [
"def",
"push",
"(",
"self",
",",
"message",
")",
":",
"if",
"self",
".",
"_ignore_event",
"(",
"message",
")",
":",
"return",
"None",
",",
"None",
"args",
"=",
"self",
".",
"_parse_message",
"(",
"message",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Searching for command using chunks: %s\"",
",",
"args",
")",
"cmd",
",",
"msg_args",
"=",
"self",
".",
"_find_longest_prefix_command",
"(",
"args",
")",
"if",
"cmd",
"is",
"not",
"None",
":",
"if",
"message",
".",
"user",
"is",
"None",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Discarded message with no originating user: %s\"",
",",
"message",
")",
"return",
"None",
",",
"None",
"sender",
"=",
"message",
".",
"user",
".",
"username",
"if",
"message",
".",
"channel",
"is",
"not",
"None",
":",
"sender",
"=",
"\"#%s/%s\"",
"%",
"(",
"message",
".",
"channel",
".",
"name",
",",
"sender",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Received from %s: %s, args %s\"",
",",
"sender",
",",
"cmd",
",",
"msg_args",
")",
"f",
"=",
"self",
".",
"_get_command",
"(",
"cmd",
",",
"message",
".",
"user",
")",
"if",
"f",
":",
"if",
"self",
".",
"_is_channel_ignored",
"(",
"f",
",",
"message",
".",
"channel",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Channel %s is ignored, discarding command %s\"",
",",
"message",
".",
"channel",
",",
"cmd",
")",
"return",
"'_ignored_'",
",",
"\"\"",
"return",
"cmd",
",",
"f",
".",
"execute",
"(",
"message",
",",
"msg_args",
")",
"return",
"'_unauthorized_'",
",",
"\"Sorry, you are not authorized to run %s\"",
"%",
"cmd",
"return",
"None",
",",
"None"
] | 49.48 | [
0.041666666666666664,
0.18181818181818182,
0.034482758620689655,
0.18181818181818182,
0.05128205128205128,
0.06896551724137931,
0.046511627906976744,
0.02857142857142857,
0.031746031746031744,
0.07407407407407407,
0.05555555555555555,
0.033707865168539325,
0.06060606060606061,
0.047619047619047616,
0.046511627906976744,
0.030303030303030304,
0.037037037037037035,
0.038461538461538464,
0.11764705882352941,
0.03125,
0.02912621359223301,
0.047619047619047616,
0.03571428571428571,
0.03571428571428571,
0.08
] |
def _compute_separations(inner, outer, angles):
"""Compute x and y positions for separation lines for given angles.
"""
return [np.array([[a, inner], [a, outer]]) for a in angles] | [
"def",
"_compute_separations",
"(",
"inner",
",",
"outer",
",",
"angles",
")",
":",
"return",
"[",
"np",
".",
"array",
"(",
"[",
"[",
"a",
",",
"inner",
"]",
",",
"[",
"a",
",",
"outer",
"]",
"]",
")",
"for",
"a",
"in",
"angles",
"]"
] | 40 | [
0.02127659574468085,
0.02666666666666667,
0,
0.18181818181818182,
0.029850746268656716
] |
def _box_points(values, mode='extremes'):
"""
Default mode: (mode='extremes' or unset)
Return a 7-tuple of 2x minimum, Q1, Median, Q3,
and 2x maximum for a list of numeric values.
1.5IQR mode: (mode='1.5IQR')
Return a 7-tuple of min, Q1 - 1.5 * IQR, Q1, Median, Q3,
Q3 + 1.5 * IQR and max for a list of numeric values.
Tukey mode: (mode='tukey')
Return a 7-tuple of min, q[0..4], max and a list of outliers
Outliers are considered values x: x < q1 - IQR or x > q3 + IQR
SD mode: (mode='stdev')
Return a 7-tuple of min, q[0..4], max and a list of outliers
Outliers are considered values x: x < q2 - SD or x > q2 + SD
SDp mode: (mode='pstdev')
Return a 7-tuple of min, q[0..4], max and a list of outliers
Outliers are considered values x: x < q2 - SDp or x > q2 + SDp
The iterator values may include None values.
Uses quartile definition from Mendenhall, W. and
Sincich, T. L. Statistics for Engineering and the
Sciences, 4th ed. Prentice-Hall, 1995.
"""
def median(seq):
n = len(seq)
if n % 2 == 0: # seq has an even length
return (seq[n // 2] + seq[n // 2 - 1]) / 2
else: # seq has an odd length
return seq[n // 2]
def mean(seq):
return sum(seq) / len(seq)
def stdev(seq):
m = mean(seq)
l = len(seq)
v = sum((n - m)**2 for n in seq) / (l - 1) # variance
return v**0.5 # sqrt
def pstdev(seq):
m = mean(seq)
l = len(seq)
v = sum((n - m)**2 for n in seq) / l # variance
return v**0.5 # sqrt
outliers = []
# sort the copy in case the originals must stay in original order
s = sorted([x for x in values if x is not None])
n = len(s)
if not n:
return (0, 0, 0, 0, 0, 0, 0), []
elif n == 1:
return (s[0], s[0], s[0], s[0], s[0], s[0], s[0]), []
else:
q2 = median(s)
# See 'Method 3' in http://en.wikipedia.org/wiki/Quartile
if n % 2 == 0: # even
q1 = median(s[:n // 2])
q3 = median(s[n // 2:])
else: # odd
if n == 1: # special case
q1 = s[0]
q3 = s[0]
elif n % 4 == 1: # n is of form 4n + 1 where n >= 1
m = (n - 1) // 4
q1 = 0.25 * s[m - 1] + 0.75 * s[m]
q3 = 0.75 * s[3 * m] + 0.25 * s[3 * m + 1]
else: # n is of form 4n + 3 where n >= 1
m = (n - 3) // 4
q1 = 0.75 * s[m] + 0.25 * s[m + 1]
q3 = 0.25 * s[3 * m + 1] + 0.75 * s[3 * m + 2]
iqr = q3 - q1
min_s = s[0]
max_s = s[-1]
if mode == 'extremes':
q0 = min_s
q4 = max_s
elif mode == 'tukey':
# the lowest datum still within 1.5 IQR of the lower quartile,
# and the highest datum still within 1.5 IQR of the upper
# quartile [Tukey box plot, Wikipedia ]
b0 = bisect_left(s, q1 - 1.5 * iqr)
b4 = bisect_right(s, q3 + 1.5 * iqr)
q0 = s[b0]
q4 = s[b4 - 1]
outliers = s[:b0] + s[b4:]
elif mode == 'stdev':
# one standard deviation above and below the mean of the data
sd = stdev(s)
b0 = bisect_left(s, q2 - sd)
b4 = bisect_right(s, q2 + sd)
q0 = s[b0]
q4 = s[b4 - 1]
outliers = s[:b0] + s[b4:]
elif mode == 'pstdev':
# one population standard deviation above and below
# the mean of the data
sdp = pstdev(s)
b0 = bisect_left(s, q2 - sdp)
b4 = bisect_right(s, q2 + sdp)
q0 = s[b0]
q4 = s[b4 - 1]
outliers = s[:b0] + s[b4:]
elif mode == '1.5IQR':
# 1.5IQR mode
q0 = q1 - 1.5 * iqr
q4 = q3 + 1.5 * iqr
return (min_s, q0, q1, q2, q3, q4, max_s), outliers | [
"def",
"_box_points",
"(",
"values",
",",
"mode",
"=",
"'extremes'",
")",
":",
"def",
"median",
"(",
"seq",
")",
":",
"n",
"=",
"len",
"(",
"seq",
")",
"if",
"n",
"%",
"2",
"==",
"0",
":",
"# seq has an even length",
"return",
"(",
"seq",
"[",
"n",
"//",
"2",
"]",
"+",
"seq",
"[",
"n",
"//",
"2",
"-",
"1",
"]",
")",
"/",
"2",
"else",
":",
"# seq has an odd length",
"return",
"seq",
"[",
"n",
"//",
"2",
"]",
"def",
"mean",
"(",
"seq",
")",
":",
"return",
"sum",
"(",
"seq",
")",
"/",
"len",
"(",
"seq",
")",
"def",
"stdev",
"(",
"seq",
")",
":",
"m",
"=",
"mean",
"(",
"seq",
")",
"l",
"=",
"len",
"(",
"seq",
")",
"v",
"=",
"sum",
"(",
"(",
"n",
"-",
"m",
")",
"**",
"2",
"for",
"n",
"in",
"seq",
")",
"/",
"(",
"l",
"-",
"1",
")",
"# variance",
"return",
"v",
"**",
"0.5",
"# sqrt",
"def",
"pstdev",
"(",
"seq",
")",
":",
"m",
"=",
"mean",
"(",
"seq",
")",
"l",
"=",
"len",
"(",
"seq",
")",
"v",
"=",
"sum",
"(",
"(",
"n",
"-",
"m",
")",
"**",
"2",
"for",
"n",
"in",
"seq",
")",
"/",
"l",
"# variance",
"return",
"v",
"**",
"0.5",
"# sqrt",
"outliers",
"=",
"[",
"]",
"# sort the copy in case the originals must stay in original order",
"s",
"=",
"sorted",
"(",
"[",
"x",
"for",
"x",
"in",
"values",
"if",
"x",
"is",
"not",
"None",
"]",
")",
"n",
"=",
"len",
"(",
"s",
")",
"if",
"not",
"n",
":",
"return",
"(",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
")",
",",
"[",
"]",
"elif",
"n",
"==",
"1",
":",
"return",
"(",
"s",
"[",
"0",
"]",
",",
"s",
"[",
"0",
"]",
",",
"s",
"[",
"0",
"]",
",",
"s",
"[",
"0",
"]",
",",
"s",
"[",
"0",
"]",
",",
"s",
"[",
"0",
"]",
",",
"s",
"[",
"0",
"]",
")",
",",
"[",
"]",
"else",
":",
"q2",
"=",
"median",
"(",
"s",
")",
"# See 'Method 3' in http://en.wikipedia.org/wiki/Quartile",
"if",
"n",
"%",
"2",
"==",
"0",
":",
"# even",
"q1",
"=",
"median",
"(",
"s",
"[",
":",
"n",
"//",
"2",
"]",
")",
"q3",
"=",
"median",
"(",
"s",
"[",
"n",
"//",
"2",
":",
"]",
")",
"else",
":",
"# odd",
"if",
"n",
"==",
"1",
":",
"# special case",
"q1",
"=",
"s",
"[",
"0",
"]",
"q3",
"=",
"s",
"[",
"0",
"]",
"elif",
"n",
"%",
"4",
"==",
"1",
":",
"# n is of form 4n + 1 where n >= 1",
"m",
"=",
"(",
"n",
"-",
"1",
")",
"//",
"4",
"q1",
"=",
"0.25",
"*",
"s",
"[",
"m",
"-",
"1",
"]",
"+",
"0.75",
"*",
"s",
"[",
"m",
"]",
"q3",
"=",
"0.75",
"*",
"s",
"[",
"3",
"*",
"m",
"]",
"+",
"0.25",
"*",
"s",
"[",
"3",
"*",
"m",
"+",
"1",
"]",
"else",
":",
"# n is of form 4n + 3 where n >= 1",
"m",
"=",
"(",
"n",
"-",
"3",
")",
"//",
"4",
"q1",
"=",
"0.75",
"*",
"s",
"[",
"m",
"]",
"+",
"0.25",
"*",
"s",
"[",
"m",
"+",
"1",
"]",
"q3",
"=",
"0.25",
"*",
"s",
"[",
"3",
"*",
"m",
"+",
"1",
"]",
"+",
"0.75",
"*",
"s",
"[",
"3",
"*",
"m",
"+",
"2",
"]",
"iqr",
"=",
"q3",
"-",
"q1",
"min_s",
"=",
"s",
"[",
"0",
"]",
"max_s",
"=",
"s",
"[",
"-",
"1",
"]",
"if",
"mode",
"==",
"'extremes'",
":",
"q0",
"=",
"min_s",
"q4",
"=",
"max_s",
"elif",
"mode",
"==",
"'tukey'",
":",
"# the lowest datum still within 1.5 IQR of the lower quartile,",
"# and the highest datum still within 1.5 IQR of the upper",
"# quartile [Tukey box plot, Wikipedia ]",
"b0",
"=",
"bisect_left",
"(",
"s",
",",
"q1",
"-",
"1.5",
"*",
"iqr",
")",
"b4",
"=",
"bisect_right",
"(",
"s",
",",
"q3",
"+",
"1.5",
"*",
"iqr",
")",
"q0",
"=",
"s",
"[",
"b0",
"]",
"q4",
"=",
"s",
"[",
"b4",
"-",
"1",
"]",
"outliers",
"=",
"s",
"[",
":",
"b0",
"]",
"+",
"s",
"[",
"b4",
":",
"]",
"elif",
"mode",
"==",
"'stdev'",
":",
"# one standard deviation above and below the mean of the data",
"sd",
"=",
"stdev",
"(",
"s",
")",
"b0",
"=",
"bisect_left",
"(",
"s",
",",
"q2",
"-",
"sd",
")",
"b4",
"=",
"bisect_right",
"(",
"s",
",",
"q2",
"+",
"sd",
")",
"q0",
"=",
"s",
"[",
"b0",
"]",
"q4",
"=",
"s",
"[",
"b4",
"-",
"1",
"]",
"outliers",
"=",
"s",
"[",
":",
"b0",
"]",
"+",
"s",
"[",
"b4",
":",
"]",
"elif",
"mode",
"==",
"'pstdev'",
":",
"# one population standard deviation above and below",
"# the mean of the data",
"sdp",
"=",
"pstdev",
"(",
"s",
")",
"b0",
"=",
"bisect_left",
"(",
"s",
",",
"q2",
"-",
"sdp",
")",
"b4",
"=",
"bisect_right",
"(",
"s",
",",
"q2",
"+",
"sdp",
")",
"q0",
"=",
"s",
"[",
"b0",
"]",
"q4",
"=",
"s",
"[",
"b4",
"-",
"1",
"]",
"outliers",
"=",
"s",
"[",
":",
"b0",
"]",
"+",
"s",
"[",
"b4",
":",
"]",
"elif",
"mode",
"==",
"'1.5IQR'",
":",
"# 1.5IQR mode",
"q0",
"=",
"q1",
"-",
"1.5",
"*",
"iqr",
"q4",
"=",
"q3",
"+",
"1.5",
"*",
"iqr",
"return",
"(",
"min_s",
",",
"q0",
",",
"q1",
",",
"q2",
",",
"q3",
",",
"q4",
",",
"max_s",
")",
",",
"outliers"
] | 38.945946 | [
0.024390243902439025,
0.18181818181818182,
0.041666666666666664,
0.03389830508474576,
0.038461538461538464,
0.05555555555555555,
0.029411764705882353,
0.03333333333333333,
0.058823529411764705,
0.027777777777777776,
0.02857142857142857,
0.06451612903225806,
0.027777777777777776,
0.029411764705882353,
0.06060606060606061,
0.027777777777777776,
0.02857142857142857,
0,
0.038461538461538464,
0,
0.05263157894736842,
0.03508771929824561,
0.043478260869565216,
0.18181818181818182,
0,
0.08333333333333333,
0.08333333333333333,
0.038461538461538464,
0.034482758620689655,
0.047619047619047616,
0.058823529411764705,
0,
0.09090909090909091,
0.05263157894736842,
0,
0.08695652173913043,
0.08,
0.125,
0.030303030303030304,
0.06060606060606061,
0,
0.08333333333333333,
0.08,
0.125,
0.03333333333333333,
0.06060606060606061,
0,
0.09523809523809523,
0.0273972602739726,
0.03571428571428571,
0.1111111111111111,
0.11764705882352941,
0.045454545454545456,
0.1,
0.03076923076923077,
0.15384615384615385,
0.07692307692307693,
0.028985507246376812,
0.058823529411764705,
0.05128205128205128,
0.05128205128205128,
0.08333333333333333,
0.047619047619047616,
0.06896551724137931,
0.06896551724137931,
0.029411764705882353,
0.05555555555555555,
0.037037037037037035,
0.03225806451612903,
0.03508771929824561,
0.05555555555555555,
0.037037037037037035,
0.030303030303030304,
0,
0.08,
0.08333333333333333,
0.08,
0.058823529411764705,
0.07692307692307693,
0.07692307692307693,
0.06060606060606061,
0.02564102564102564,
0.0273972602739726,
0.03636363636363636,
0.0392156862745098,
0.038461538461538464,
0.07692307692307693,
0.06666666666666667,
0.047619047619047616,
0.06060606060606061,
0.025974025974025976,
0.06896551724137931,
0.045454545454545456,
0.044444444444444446,
0.07692307692307693,
0.06666666666666667,
0.047619047619047616,
0.058823529411764705,
0.029850746268656716,
0.05263157894736842,
0.06451612903225806,
0.044444444444444446,
0.043478260869565216,
0.07692307692307693,
0.06666666666666667,
0.047619047619047616,
0.058823529411764705,
0.06896551724137931,
0.05714285714285714,
0.05714285714285714,
0.031746031746031744
] |
def _validate_xtext(xtext):
"""If input token contains ASCII non-printables, register a defect."""
non_printables = _non_printable_finder(xtext)
if non_printables:
xtext.defects.append(errors.NonPrintableDefect(non_printables))
if utils._has_surrogates(xtext):
xtext.defects.append(errors.UndecodableBytesDefect(
"Non-ASCII characters found in header token")) | [
"def",
"_validate_xtext",
"(",
"xtext",
")",
":",
"non_printables",
"=",
"_non_printable_finder",
"(",
"xtext",
")",
"if",
"non_printables",
":",
"xtext",
".",
"defects",
".",
"append",
"(",
"errors",
".",
"NonPrintableDefect",
"(",
"non_printables",
")",
")",
"if",
"utils",
".",
"_has_surrogates",
"(",
"xtext",
")",
":",
"xtext",
".",
"defects",
".",
"append",
"(",
"errors",
".",
"UndecodableBytesDefect",
"(",
"\"Non-ASCII characters found in header token\"",
")",
")"
] | 44 | [
0.037037037037037035,
0.02702702702702703,
0,
0.04081632653061224,
0.09090909090909091,
0.028169014084507043,
0.05555555555555555,
0.05084745762711865,
0.05172413793103448
] |
def qscan(self, cursor=0, count=None, busyloop=None, minlen=None,
maxlen=None, importrate=None):
"""
Iterate all the existing queues in the local node.
:param count: An hint about how much work to do per iteration.
:param busyloop: Block and return all the elements in a busy loop.
:param minlen: Don't return elements with less than count jobs queued.
:param maxlen: Don't return elements with more than count jobs queued.
:param importrate: Only return elements with an job import rate
(from other nodes) >= rate.
"""
command = ["QSCAN", cursor]
if count:
command += ["COUNT", count]
if busyloop:
command += ["BUSYLOOP"]
if minlen:
command += ["MINLEN", minlen]
if maxlen:
command += ["MAXLEN", maxlen]
if importrate:
command += ["IMPORTRATE", importrate]
return self.execute_command(*command) | [
"def",
"qscan",
"(",
"self",
",",
"cursor",
"=",
"0",
",",
"count",
"=",
"None",
",",
"busyloop",
"=",
"None",
",",
"minlen",
"=",
"None",
",",
"maxlen",
"=",
"None",
",",
"importrate",
"=",
"None",
")",
":",
"command",
"=",
"[",
"\"QSCAN\"",
",",
"cursor",
"]",
"if",
"count",
":",
"command",
"+=",
"[",
"\"COUNT\"",
",",
"count",
"]",
"if",
"busyloop",
":",
"command",
"+=",
"[",
"\"BUSYLOOP\"",
"]",
"if",
"minlen",
":",
"command",
"+=",
"[",
"\"MINLEN\"",
",",
"minlen",
"]",
"if",
"maxlen",
":",
"command",
"+=",
"[",
"\"MAXLEN\"",
",",
"maxlen",
"]",
"if",
"importrate",
":",
"command",
"+=",
"[",
"\"IMPORTRATE\"",
",",
"importrate",
"]",
"return",
"self",
".",
"execute_command",
"(",
"*",
"command",
")"
] | 39.64 | [
0.03076923076923077,
0.13636363636363635,
0.18181818181818182,
0.034482758620689655,
0,
0.04285714285714286,
0.04054054054054054,
0.038461538461538464,
0.038461538461538464,
0.04225352112676056,
0.0392156862745098,
0.18181818181818182,
0.05714285714285714,
0.11764705882352941,
0.05128205128205128,
0.1,
0.05714285714285714,
0.1111111111111111,
0.04878048780487805,
0.1111111111111111,
0.04878048780487805,
0.09090909090909091,
0.04081632653061224,
0,
0.044444444444444446
] |
def _atomicModification(func):
"""Decorator
Make document modification atomic
"""
def wrapper(*args, **kwargs):
self = args[0]
with self._qpart:
func(*args, **kwargs)
return wrapper | [
"def",
"_atomicModification",
"(",
"func",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
"=",
"args",
"[",
"0",
"]",
"with",
"self",
".",
"_qpart",
":",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] | 28.111111 | [
0.03333333333333333,
0.1,
0.04878048780487805,
0.18181818181818182,
0.05405405405405406,
0.07692307692307693,
0.06896551724137931,
0.05405405405405406,
0.09090909090909091
] |
def _setint(self, int_, length=None):
"""Reset the bitstring to have given signed int interpretation."""
# If no length given, and we've previously been given a length, use it.
if length is None and hasattr(self, 'len') and self.len != 0:
length = self.len
if length is None or length == 0:
raise CreationError("A non-zero length must be specified with an int initialiser.")
if int_ >= (1 << (length - 1)) or int_ < -(1 << (length - 1)):
raise CreationError("{0} is too large a signed integer for a bitstring of length {1}. "
"The allowed range is [{2}, {3}].", int_, length, -(1 << (length - 1)),
(1 << (length - 1)) - 1)
if int_ >= 0:
self._setuint(int_, length)
return
# TODO: We should decide whether to just use the _setuint, or to do the bit flipping,
# based upon which will be quicker. If the -ive number is less than half the maximum
# possible then it's probably quicker to do the bit flipping...
# Do the 2's complement thing. Add one, set to minus number, then flip bits.
int_ += 1
self._setuint(-int_, length)
self._invert_all() | [
"def",
"_setint",
"(",
"self",
",",
"int_",
",",
"length",
"=",
"None",
")",
":",
"# If no length given, and we've previously been given a length, use it.",
"if",
"length",
"is",
"None",
"and",
"hasattr",
"(",
"self",
",",
"'len'",
")",
"and",
"self",
".",
"len",
"!=",
"0",
":",
"length",
"=",
"self",
".",
"len",
"if",
"length",
"is",
"None",
"or",
"length",
"==",
"0",
":",
"raise",
"CreationError",
"(",
"\"A non-zero length must be specified with an int initialiser.\"",
")",
"if",
"int_",
">=",
"(",
"1",
"<<",
"(",
"length",
"-",
"1",
")",
")",
"or",
"int_",
"<",
"-",
"(",
"1",
"<<",
"(",
"length",
"-",
"1",
")",
")",
":",
"raise",
"CreationError",
"(",
"\"{0} is too large a signed integer for a bitstring of length {1}. \"",
"\"The allowed range is [{2}, {3}].\"",
",",
"int_",
",",
"length",
",",
"-",
"(",
"1",
"<<",
"(",
"length",
"-",
"1",
")",
")",
",",
"(",
"1",
"<<",
"(",
"length",
"-",
"1",
")",
")",
"-",
"1",
")",
"if",
"int_",
">=",
"0",
":",
"self",
".",
"_setuint",
"(",
"int_",
",",
"length",
")",
"return",
"# TODO: We should decide whether to just use the _setuint, or to do the bit flipping,",
"# based upon which will be quicker. If the -ive number is less than half the maximum",
"# possible then it's probably quicker to do the bit flipping...",
"# Do the 2's complement thing. Add one, set to minus number, then flip bits.",
"int_",
"+=",
"1",
"self",
".",
"_setuint",
"(",
"-",
"int_",
",",
"length",
")",
"self",
".",
"_invert_all",
"(",
")"
] | 56.772727 | [
0.02702702702702703,
0.02702702702702703,
0.02531645569620253,
0.028985507246376812,
0.06896551724137931,
0.04878048780487805,
0.031578947368421054,
0.02857142857142857,
0.04040404040404041,
0.02912621359223301,
0.05357142857142857,
0.09523809523809523,
0.05128205128205128,
0.1111111111111111,
0.03225806451612903,
0.03260869565217391,
0.028169014084507043,
0,
0.03571428571428571,
0.11764705882352941,
0.05555555555555555,
0.07692307692307693
] |
def reverse(
self,
query,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
language=None,
):
"""
Return an address by location point.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param str language: Language in which search results should be
returned. When data in specified language is not
available for a specific field, default language is used.
List of supported languages (case-insensitive):
https://developer.tomtom.com/online-search/online-search-documentation/supported-languages
.. versionadded:: 1.18.0
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
position = self._coerce_point_to_string(query)
params = self._reverse_params(position)
if language:
params['language'] = language
quoted_position = quote(position.encode('utf-8'))
url = "?".join((self.api_reverse % dict(position=quoted_position),
urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_reverse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
) | [
"def",
"reverse",
"(",
"self",
",",
"query",
",",
"exactly_one",
"=",
"True",
",",
"timeout",
"=",
"DEFAULT_SENTINEL",
",",
"language",
"=",
"None",
",",
")",
":",
"position",
"=",
"self",
".",
"_coerce_point_to_string",
"(",
"query",
")",
"params",
"=",
"self",
".",
"_reverse_params",
"(",
"position",
")",
"if",
"language",
":",
"params",
"[",
"'language'",
"]",
"=",
"language",
"quoted_position",
"=",
"quote",
"(",
"position",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"url",
"=",
"\"?\"",
".",
"join",
"(",
"(",
"self",
".",
"api_reverse",
"%",
"dict",
"(",
"position",
"=",
"quoted_position",
")",
",",
"urlencode",
"(",
"params",
")",
")",
")",
"logger",
".",
"debug",
"(",
"\"%s.reverse: %s\"",
",",
"self",
".",
"__class__",
".",
"__name__",
",",
"url",
")",
"return",
"self",
".",
"_parse_reverse_json",
"(",
"self",
".",
"_call_geocoder",
"(",
"url",
",",
"timeout",
"=",
"timeout",
")",
",",
"exactly_one",
")"
] | 39.604167 | [
0.16666666666666666,
0.11764705882352941,
0.1111111111111111,
0.10344827586206896,
0.08108108108108109,
0.11538461538461539,
0.5,
0.18181818181818182,
0.045454545454545456,
0,
0.04285714285714286,
0.044444444444444446,
0.10256410256410256,
0.0547945205479452,
0,
0.04,
0.09090909090909091,
0,
0.0379746835443038,
0.08,
0.02702702702702703,
0.028985507246376812,
0,
0.04225352112676056,
0.03333333333333333,
0.028985507246376812,
0.05084745762711865,
0.0392156862745098,
0,
0.08333333333333333,
0,
0.0875,
0.11764705882352941,
0.18181818181818182,
0.037037037037037035,
0.0425531914893617,
0,
0.1,
0.04878048780487805,
0,
0.03508771929824561,
0.04054054054054054,
0.06976744186046512,
0.029411764705882353,
0,
0.075,
0.030303030303030304,
0.3333333333333333
] |
def finalize(self, result=None):
"""
Clean up any created database and schema.
"""
if not self.settings_path:
# short circuit if no settings file can be found
return
from django.test.utils import teardown_test_environment
from django.db import connection
from django.conf import settings
self.call_plugins_method('beforeDestroyTestDb', settings, connection)
try:
connection.creation.destroy_test_db(
self.old_db,
verbosity=self.verbosity,
)
except Exception:
# If we can't tear down the test DB, don't worry about it.
pass
self.call_plugins_method('afterDestroyTestDb', settings, connection)
self.call_plugins_method(
'beforeTeardownTestEnv', settings, teardown_test_environment)
teardown_test_environment()
self.call_plugins_method('afterTeardownTestEnv', settings) | [
"def",
"finalize",
"(",
"self",
",",
"result",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"settings_path",
":",
"# short circuit if no settings file can be found",
"return",
"from",
"django",
".",
"test",
".",
"utils",
"import",
"teardown_test_environment",
"from",
"django",
".",
"db",
"import",
"connection",
"from",
"django",
".",
"conf",
"import",
"settings",
"self",
".",
"call_plugins_method",
"(",
"'beforeDestroyTestDb'",
",",
"settings",
",",
"connection",
")",
"try",
":",
"connection",
".",
"creation",
".",
"destroy_test_db",
"(",
"self",
".",
"old_db",
",",
"verbosity",
"=",
"self",
".",
"verbosity",
",",
")",
"except",
"Exception",
":",
"# If we can't tear down the test DB, don't worry about it.",
"pass",
"self",
".",
"call_plugins_method",
"(",
"'afterDestroyTestDb'",
",",
"settings",
",",
"connection",
")",
"self",
".",
"call_plugins_method",
"(",
"'beforeTeardownTestEnv'",
",",
"settings",
",",
"teardown_test_environment",
")",
"teardown_test_environment",
"(",
")",
"self",
".",
"call_plugins_method",
"(",
"'afterTeardownTestEnv'",
",",
"settings",
")"
] | 35.962963 | [
0.03125,
0.18181818181818182,
0.04081632653061224,
0.18181818181818182,
0.058823529411764705,
0.03333333333333333,
0.1111111111111111,
0,
0.031746031746031744,
0.05,
0.05,
0,
0.025974025974025976,
0.16666666666666666,
0.0625,
0.07142857142857142,
0.07317073170731707,
0.23076923076923078,
0.08,
0.02857142857142857,
0.125,
0.02631578947368421,
0,
0.09090909090909091,
0.0410958904109589,
0.05714285714285714,
0.030303030303030304
] |
def uninstall(cls, args):
"""Uninstall and delete NApps.
For local installations, do not delete code outside install_path and
enabled_path.
"""
mgr = NAppsManager()
for napp in args['<napp>']:
mgr.set_napp(*napp)
LOG.info('NApp %s:', mgr.napp_id)
if mgr.is_installed():
if mgr.is_enabled():
cls.disable_napp(mgr)
LOG.info(' Uninstalling...')
mgr.uninstall()
LOG.info(' Uninstalled.')
else:
LOG.error(" NApp isn't installed.") | [
"def",
"uninstall",
"(",
"cls",
",",
"args",
")",
":",
"mgr",
"=",
"NAppsManager",
"(",
")",
"for",
"napp",
"in",
"args",
"[",
"'<napp>'",
"]",
":",
"mgr",
".",
"set_napp",
"(",
"*",
"napp",
")",
"LOG",
".",
"info",
"(",
"'NApp %s:'",
",",
"mgr",
".",
"napp_id",
")",
"if",
"mgr",
".",
"is_installed",
"(",
")",
":",
"if",
"mgr",
".",
"is_enabled",
"(",
")",
":",
"cls",
".",
"disable_napp",
"(",
"mgr",
")",
"LOG",
".",
"info",
"(",
"' Uninstalling...'",
")",
"mgr",
".",
"uninstall",
"(",
")",
"LOG",
".",
"info",
"(",
"' Uninstalled.'",
")",
"else",
":",
"LOG",
".",
"error",
"(",
"\" NApp isn't installed.\"",
")"
] | 33.777778 | [
0.04,
0.05263157894736842,
0,
0.02631578947368421,
0.09523809523809523,
0.18181818181818182,
0.07142857142857142,
0.05714285714285714,
0.06451612903225806,
0.044444444444444446,
0.058823529411764705,
0.05555555555555555,
0.04878048780487805,
0.044444444444444446,
0.06451612903225806,
0.047619047619047616,
0.11764705882352941,
0.038461538461538464
] |
def peek_pointers_in_data(self, data, peekSize = 16, peekStep = 1):
"""
Tries to guess which values in the given data are valid pointers,
and reads some data from them.
@see: L{peek}
@type data: str
@param data: Binary data to find pointers in.
@type peekSize: int
@param peekSize: Number of bytes to read from each pointer found.
@type peekStep: int
@param peekStep: Expected data alignment.
Tipically you specify 1 when data alignment is unknown,
or 4 when you expect data to be DWORD aligned.
Any other value may be specified.
@rtype: dict( str S{->} str )
@return: Dictionary mapping stack offsets to the data they point to.
"""
result = dict()
ptrSize = win32.sizeof(win32.LPVOID)
if ptrSize == 4:
ptrFmt = '<L'
else:
ptrFmt = '<Q'
if len(data) > 0:
for i in compat.xrange(0, len(data), peekStep):
packed = data[i:i+ptrSize]
if len(packed) == ptrSize:
address = struct.unpack(ptrFmt, packed)[0]
## if not address & (~0xFFFF): continue
peek_data = self.peek(address, peekSize)
if peek_data:
result[i] = peek_data
return result | [
"def",
"peek_pointers_in_data",
"(",
"self",
",",
"data",
",",
"peekSize",
"=",
"16",
",",
"peekStep",
"=",
"1",
")",
":",
"result",
"=",
"dict",
"(",
")",
"ptrSize",
"=",
"win32",
".",
"sizeof",
"(",
"win32",
".",
"LPVOID",
")",
"if",
"ptrSize",
"==",
"4",
":",
"ptrFmt",
"=",
"'<L'",
"else",
":",
"ptrFmt",
"=",
"'<Q'",
"if",
"len",
"(",
"data",
")",
">",
"0",
":",
"for",
"i",
"in",
"compat",
".",
"xrange",
"(",
"0",
",",
"len",
"(",
"data",
")",
",",
"peekStep",
")",
":",
"packed",
"=",
"data",
"[",
"i",
":",
"i",
"+",
"ptrSize",
"]",
"if",
"len",
"(",
"packed",
")",
"==",
"ptrSize",
":",
"address",
"=",
"struct",
".",
"unpack",
"(",
"ptrFmt",
",",
"packed",
")",
"[",
"0",
"]",
"## if not address & (~0xFFFF): continue",
"peek_data",
"=",
"self",
".",
"peek",
"(",
"address",
",",
"peekSize",
")",
"if",
"peek_data",
":",
"result",
"[",
"i",
"]",
"=",
"peek_data",
"return",
"result"
] | 36.210526 | [
0.07462686567164178,
0.18181818181818182,
0.0273972602739726,
0.05263157894736842,
0,
0.09523809523809523,
0,
0.08333333333333333,
0.05660377358490566,
0,
0.07142857142857142,
0.0273972602739726,
0,
0.07142857142857142,
0.04081632653061224,
0.029850746268656716,
0.034482758620689655,
0.044444444444444446,
0,
0.13157894736842105,
0.02631578947368421,
0.18181818181818182,
0.08695652173913043,
0.045454545454545456,
0.08333333333333333,
0.08,
0.15384615384615385,
0.08,
0.08,
0.03389830508474576,
0.058823529411764705,
0.047619047619047616,
0.045454545454545456,
0.034482758620689655,
0.04838709677419355,
0.06060606060606061,
0.044444444444444446,
0.09523809523809523
] |
def _train_and_eval_dataset_v1(problem_name, data_dir):
"""Return train and evaluation datasets, feature info and supervised keys."""
problem = problems.problem(problem_name)
train_dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, data_dir)
train_dataset = train_dataset.map(_select_features)
eval_dataset = problem.dataset(tf.estimator.ModeKeys.EVAL, data_dir)
eval_dataset = eval_dataset.map(_select_features)
supervised_keys = (["inputs"], ["targets"])
hparams = problem.get_hparams()
# We take a few training examples to guess the shapes.
input_shapes, target_shapes = [], []
for example in train_dataset.take(3):
input_shapes.append(example["inputs"].shape.as_list())
target_shapes.append(example["targets"].shape.as_list())
input_vocab_size = hparams.vocab_size["inputs"]
target_vocab_size = hparams.vocab_size["targets"]
input_info = _make_info(input_shapes, input_vocab_size)
target_info = _make_info(target_shapes, target_vocab_size)
info = {"inputs": input_info, "targets": target_info}
return train_dataset, eval_dataset, info, supervised_keys | [
"def",
"_train_and_eval_dataset_v1",
"(",
"problem_name",
",",
"data_dir",
")",
":",
"problem",
"=",
"problems",
".",
"problem",
"(",
"problem_name",
")",
"train_dataset",
"=",
"problem",
".",
"dataset",
"(",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
",",
"data_dir",
")",
"train_dataset",
"=",
"train_dataset",
".",
"map",
"(",
"_select_features",
")",
"eval_dataset",
"=",
"problem",
".",
"dataset",
"(",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"EVAL",
",",
"data_dir",
")",
"eval_dataset",
"=",
"eval_dataset",
".",
"map",
"(",
"_select_features",
")",
"supervised_keys",
"=",
"(",
"[",
"\"inputs\"",
"]",
",",
"[",
"\"targets\"",
"]",
")",
"hparams",
"=",
"problem",
".",
"get_hparams",
"(",
")",
"# We take a few training examples to guess the shapes.",
"input_shapes",
",",
"target_shapes",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"example",
"in",
"train_dataset",
".",
"take",
"(",
"3",
")",
":",
"input_shapes",
".",
"append",
"(",
"example",
"[",
"\"inputs\"",
"]",
".",
"shape",
".",
"as_list",
"(",
")",
")",
"target_shapes",
".",
"append",
"(",
"example",
"[",
"\"targets\"",
"]",
".",
"shape",
".",
"as_list",
"(",
")",
")",
"input_vocab_size",
"=",
"hparams",
".",
"vocab_size",
"[",
"\"inputs\"",
"]",
"target_vocab_size",
"=",
"hparams",
".",
"vocab_size",
"[",
"\"targets\"",
"]",
"input_info",
"=",
"_make_info",
"(",
"input_shapes",
",",
"input_vocab_size",
")",
"target_info",
"=",
"_make_info",
"(",
"target_shapes",
",",
"target_vocab_size",
")",
"info",
"=",
"{",
"\"inputs\"",
":",
"input_info",
",",
"\"targets\"",
":",
"target_info",
"}",
"return",
"train_dataset",
",",
"eval_dataset",
",",
"info",
",",
"supervised_keys"
] | 54.1 | [
0.01818181818181818,
0.0379746835443038,
0.07142857142857142,
0.041666666666666664,
0.05660377358490566,
0.04285714285714286,
0.058823529411764705,
0.06666666666666667,
0.09090909090909091,
0.05357142857142857,
0.07894736842105263,
0.07692307692307693,
0.034482758620689655,
0.03333333333333333,
0.061224489795918366,
0.058823529411764705,
0.05263157894736842,
0.05,
0.05454545454545454,
0.05084745762711865
] |
def add_body(self, body):
"""
Add a :class:`Body` to the system. This function also sets the
``system`` attribute of the body.
:param body:
The :class:`Body` to add.
"""
body.system = self
self.bodies.append(body)
self.unfrozen = np.concatenate((
self.unfrozen[:-2], np.zeros(7, dtype=bool), self.unfrozen[-2:]
)) | [
"def",
"add_body",
"(",
"self",
",",
"body",
")",
":",
"body",
".",
"system",
"=",
"self",
"self",
".",
"bodies",
".",
"append",
"(",
"body",
")",
"self",
".",
"unfrozen",
"=",
"np",
".",
"concatenate",
"(",
"(",
"self",
".",
"unfrozen",
"[",
":",
"-",
"2",
"]",
",",
"np",
".",
"zeros",
"(",
"7",
",",
"dtype",
"=",
"bool",
")",
",",
"self",
".",
"unfrozen",
"[",
"-",
"2",
":",
"]",
")",
")"
] | 28.428571 | [
0.04,
0.18181818181818182,
0.08571428571428572,
0.07317073170731707,
0,
0.15,
0.16216216216216217,
0,
0.18181818181818182,
0.07692307692307693,
0.0625,
0.075,
0.02666666666666667,
0.3
] |
def gemset_present(name, ruby='default', user=None):
'''
Verify that the gemset is present.
name
The name of the gemset.
ruby: default
The ruby version this gemset belongs to.
user: None
The user to run rvm as.
.. versionadded:: 0.17.0
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
ret = _check_rvm(ret, user)
if ret['result'] is False:
return ret
if '@' in name:
ruby, name = name.split('@')
ret = _check_ruby(ret, ruby)
if not ret['result']:
ret['result'] = False
ret['comment'] = 'Requested ruby implementation was not found.'
return ret
if name in __salt__['rvm.gemset_list'](ruby, runas=user):
ret['result'] = True
ret['comment'] = 'Gemset already exists.'
else:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Set to install gemset {0}'.format(name)
return ret
if __salt__['rvm.gemset_create'](ruby, name, runas=user):
ret['result'] = True
ret['comment'] = 'Gemset successfully created.'
ret['changes'][name] = 'created'
else:
ret['result'] = False
ret['comment'] = 'Gemset could not be created.'
return ret | [
"def",
"gemset_present",
"(",
"name",
",",
"ruby",
"=",
"'default'",
",",
"user",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"None",
",",
"'comment'",
":",
"''",
",",
"'changes'",
":",
"{",
"}",
"}",
"ret",
"=",
"_check_rvm",
"(",
"ret",
",",
"user",
")",
"if",
"ret",
"[",
"'result'",
"]",
"is",
"False",
":",
"return",
"ret",
"if",
"'@'",
"in",
"name",
":",
"ruby",
",",
"name",
"=",
"name",
".",
"split",
"(",
"'@'",
")",
"ret",
"=",
"_check_ruby",
"(",
"ret",
",",
"ruby",
")",
"if",
"not",
"ret",
"[",
"'result'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Requested ruby implementation was not found.'",
"return",
"ret",
"if",
"name",
"in",
"__salt__",
"[",
"'rvm.gemset_list'",
"]",
"(",
"ruby",
",",
"runas",
"=",
"user",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'Gemset already exists.'",
"else",
":",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"ret",
"[",
"'comment'",
"]",
"=",
"'Set to install gemset {0}'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"if",
"__salt__",
"[",
"'rvm.gemset_create'",
"]",
"(",
"ruby",
",",
"name",
",",
"runas",
"=",
"user",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'Gemset successfully created.'",
"ret",
"[",
"'changes'",
"]",
"[",
"name",
"]",
"=",
"'created'",
"else",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Gemset could not be created.'",
"return",
"ret"
] | 28.282609 | [
0.019230769230769232,
0.2857142857142857,
0.05263157894736842,
0,
0.25,
0.06451612903225806,
0,
0.11764705882352941,
0.041666666666666664,
0,
0.14285714285714285,
0.06451612903225806,
0,
0.09375,
0.2857142857142857,
0.02857142857142857,
0,
0.06451612903225806,
0.06666666666666667,
0.1111111111111111,
0,
0.10526315789473684,
0.05555555555555555,
0.05555555555555555,
0.06896551724137931,
0.06060606060606061,
0.02666666666666667,
0.09090909090909091,
0,
0.03278688524590164,
0.07142857142857142,
0.04081632653061224,
0.2222222222222222,
0.07142857142857142,
0.0625,
0.028985507246376812,
0.09090909090909091,
0.03076923076923077,
0.0625,
0.03389830508474576,
0.045454545454545456,
0.15384615384615385,
0.06060606060606061,
0.03389830508474576,
0,
0.14285714285714285
] |
def new(cls, seed: Optional[bytes]) -> 'SignKey':
"""
Creates and returns random (or seeded from seed) BLS sign key.
:param: seed - Optional seed.
:return: BLS sign key
"""
logger = logging.getLogger(__name__)
logger.debug("SignKey::new: >>>")
c_instance = c_void_p()
do_call(cls.new_handler, seed, len(seed) if seed is not None else 0, byref(c_instance))
res = cls(c_instance)
logger.debug("SignKey::new: <<< res: %r", res)
return res | [
"def",
"new",
"(",
"cls",
",",
"seed",
":",
"Optional",
"[",
"bytes",
"]",
")",
"->",
"'SignKey'",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"\"SignKey::new: >>>\"",
")",
"c_instance",
"=",
"c_void_p",
"(",
")",
"do_call",
"(",
"cls",
".",
"new_handler",
",",
"seed",
",",
"len",
"(",
"seed",
")",
"if",
"seed",
"is",
"not",
"None",
"else",
"0",
",",
"byref",
"(",
"c_instance",
")",
")",
"res",
"=",
"cls",
"(",
"c_instance",
")",
"logger",
".",
"debug",
"(",
"\"SignKey::new: <<< res: %r\"",
",",
"res",
")",
"return",
"res"
] | 32.4375 | [
0.02040816326530612,
0.18181818181818182,
0.05714285714285714,
0.08108108108108109,
0.10344827586206896,
0.18181818181818182,
0.045454545454545456,
0.04878048780487805,
0,
0.06451612903225806,
0.031578947368421054,
0,
0.06896551724137931,
0,
0.037037037037037035,
0.1111111111111111
] |
def ajAssims(self):
""" Charge et définit les débuts de mots non-assimilés, associe à chacun sa forme assimilée.
"""
for lin in lignesFichier(self.path("assimilations.la")):
ass1, ass2 = tuple(lin.split(':'))
self.lemmatiseur._assims[ass1] = ass2
self.lemmatiseur._assimsq[atone(ass1)] = atone(ass2) | [
"def",
"ajAssims",
"(",
"self",
")",
":",
"for",
"lin",
"in",
"lignesFichier",
"(",
"self",
".",
"path",
"(",
"\"assimilations.la\"",
")",
")",
":",
"ass1",
",",
"ass2",
"=",
"tuple",
"(",
"lin",
".",
"split",
"(",
"':'",
")",
")",
"self",
".",
"lemmatiseur",
".",
"_assims",
"[",
"ass1",
"]",
"=",
"ass2",
"self",
".",
"lemmatiseur",
".",
"_assimsq",
"[",
"atone",
"(",
"ass1",
")",
"]",
"=",
"atone",
"(",
"ass2",
")"
] | 50.428571 | [
0.05263157894736842,
0.03,
0.18181818181818182,
0.03125,
0.043478260869565216,
0.04081632653061224,
0.03125
] |
def do_POST(self):
"""
Perform a POST request
"""
# Doesn't do anything with posted data
# print "uri: ", self.client_address, self.path
self.do_initial_operations()
payload = self.coap_uri.get_payload()
if payload is None:
logger.error("BAD POST REQUEST")
self.send_error(BAD_REQUEST)
return
coap_response = self.client.post(self.coap_uri.path, payload)
self.client.stop()
logger.info("Server response: %s", coap_response.pretty_print())
self.set_http_response(coap_response) | [
"def",
"do_POST",
"(",
"self",
")",
":",
"# Doesn't do anything with posted data",
"# print \"uri: \", self.client_address, self.path",
"self",
".",
"do_initial_operations",
"(",
")",
"payload",
"=",
"self",
".",
"coap_uri",
".",
"get_payload",
"(",
")",
"if",
"payload",
"is",
"None",
":",
"logger",
".",
"error",
"(",
"\"BAD POST REQUEST\"",
")",
"self",
".",
"send_error",
"(",
"BAD_REQUEST",
")",
"return",
"coap_response",
"=",
"self",
".",
"client",
".",
"post",
"(",
"self",
".",
"coap_uri",
".",
"path",
",",
"payload",
")",
"self",
".",
"client",
".",
"stop",
"(",
")",
"logger",
".",
"info",
"(",
"\"Server response: %s\"",
",",
"coap_response",
".",
"pretty_print",
"(",
")",
")",
"self",
".",
"set_http_response",
"(",
"coap_response",
")"
] | 37.0625 | [
0.05555555555555555,
0.18181818181818182,
0.06666666666666667,
0.18181818181818182,
0.043478260869565216,
0.03636363636363636,
0.05555555555555555,
0.044444444444444446,
0.07407407407407407,
0.045454545454545456,
0.05,
0.1111111111111111,
0.028985507246376812,
0.07692307692307693,
0.027777777777777776,
0.044444444444444446
] |
def write_posterior(self, filename, **kwargs):
"""Write posterior only file
Parameters
----------
filename : str
Name of output file to store posterior
"""
f = h5py.File(filename, 'w')
# Preserve top-level metadata
for key in self.attrs:
f.attrs[key] = self.attrs[key]
f.attrs['filetype'] = PosteriorFile.name
s = f.create_group('samples')
fields = self[self.samples_group].keys()
# Copy and squash fields into one dimensional arrays
for field_name in fields:
fvalue = self[self.samples_group][field_name][:]
thin = fvalue[:,self.thin_start:self.thin_end:self.thin_interval]
s[field_name] = thin.flatten() | [
"def",
"write_posterior",
"(",
"self",
",",
"filename",
",",
"*",
"*",
"kwargs",
")",
":",
"f",
"=",
"h5py",
".",
"File",
"(",
"filename",
",",
"'w'",
")",
"# Preserve top-level metadata",
"for",
"key",
"in",
"self",
".",
"attrs",
":",
"f",
".",
"attrs",
"[",
"key",
"]",
"=",
"self",
".",
"attrs",
"[",
"key",
"]",
"f",
".",
"attrs",
"[",
"'filetype'",
"]",
"=",
"PosteriorFile",
".",
"name",
"s",
"=",
"f",
".",
"create_group",
"(",
"'samples'",
")",
"fields",
"=",
"self",
"[",
"self",
".",
"samples_group",
"]",
".",
"keys",
"(",
")",
"# Copy and squash fields into one dimensional arrays",
"for",
"field_name",
"in",
"fields",
":",
"fvalue",
"=",
"self",
"[",
"self",
".",
"samples_group",
"]",
"[",
"field_name",
"]",
"[",
":",
"]",
"thin",
"=",
"fvalue",
"[",
":",
",",
"self",
".",
"thin_start",
":",
"self",
".",
"thin_end",
":",
"self",
".",
"thin_interval",
"]",
"s",
"[",
"field_name",
"]",
"=",
"thin",
".",
"flatten",
"(",
")"
] | 32.652174 | [
0.021739130434782608,
0.05555555555555555,
0,
0.1111111111111111,
0.1111111111111111,
0.13636363636363635,
0.04,
0.18181818181818182,
0.05555555555555555,
0,
0.05405405405405406,
0.06666666666666667,
0.047619047619047616,
0,
0.041666666666666664,
0.05405405405405406,
0.041666666666666664,
0,
0.03333333333333333,
0.06060606060606061,
0.03333333333333333,
0.03896103896103896,
0.047619047619047616
] |
def returnLendingHistory(self, start=0, end=2**32-1, limit=None):
"""Returns your lending history within a time range specified by the
"start" and "end" POST parameters as UNIX timestamps. "limit" may also
be specified to limit the number of rows returned. """
return self._private('returnLendingHistory', start=start, end=end,
limit=limit) | [
"def",
"returnLendingHistory",
"(",
"self",
",",
"start",
"=",
"0",
",",
"end",
"=",
"2",
"**",
"32",
"-",
"1",
",",
"limit",
"=",
"None",
")",
":",
"return",
"self",
".",
"_private",
"(",
"'returnLendingHistory'",
",",
"start",
"=",
"start",
",",
"end",
"=",
"end",
",",
"limit",
"=",
"limit",
")"
] | 66 | [
0.015384615384615385,
0.02631578947368421,
0.02564102564102564,
0.04838709677419355,
0.04054054054054054,
0.12195121951219512
] |
def _ls_print_summary(all_trainings: List[Tuple[str, dict, TrainingTrace]]) -> None:
"""
Print trainings summary.
In particular print tables summarizing the number of trainings with
- particular model names
- particular combinations of models and datasets
:param all_trainings: a list of training tuples (train_dir, configuration dict, trace)
"""
counts_by_name = defaultdict(int)
counts_by_classes = defaultdict(int)
for _, config, _ in all_trainings:
counts_by_name[get_model_name(config)] += 1
counts_by_classes[get_classes(config)] += 1
print_boxed('summary')
print()
counts_table = [[name, count] for name, count in counts_by_name.items()]
print(tabulate(counts_table, headers=['model.name', 'count'], tablefmt='grid'))
print()
counts_table = [[classes[0], classes[1], count] for classes, count in counts_by_classes.items()]
print(tabulate(counts_table, headers=['model.class', 'dataset.class', 'count'], tablefmt='grid'))
print() | [
"def",
"_ls_print_summary",
"(",
"all_trainings",
":",
"List",
"[",
"Tuple",
"[",
"str",
",",
"dict",
",",
"TrainingTrace",
"]",
"]",
")",
"->",
"None",
":",
"counts_by_name",
"=",
"defaultdict",
"(",
"int",
")",
"counts_by_classes",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"_",
",",
"config",
",",
"_",
"in",
"all_trainings",
":",
"counts_by_name",
"[",
"get_model_name",
"(",
"config",
")",
"]",
"+=",
"1",
"counts_by_classes",
"[",
"get_classes",
"(",
"config",
")",
"]",
"+=",
"1",
"print_boxed",
"(",
"'summary'",
")",
"print",
"(",
")",
"counts_table",
"=",
"[",
"[",
"name",
",",
"count",
"]",
"for",
"name",
",",
"count",
"in",
"counts_by_name",
".",
"items",
"(",
")",
"]",
"print",
"(",
"tabulate",
"(",
"counts_table",
",",
"headers",
"=",
"[",
"'model.name'",
",",
"'count'",
"]",
",",
"tablefmt",
"=",
"'grid'",
")",
")",
"print",
"(",
")",
"counts_table",
"=",
"[",
"[",
"classes",
"[",
"0",
"]",
",",
"classes",
"[",
"1",
"]",
",",
"count",
"]",
"for",
"classes",
",",
"count",
"in",
"counts_by_classes",
".",
"items",
"(",
")",
"]",
"print",
"(",
"tabulate",
"(",
"counts_table",
",",
"headers",
"=",
"[",
"'model.class'",
",",
"'dataset.class'",
",",
"'count'",
"]",
",",
"tablefmt",
"=",
"'grid'",
")",
")",
"print",
"(",
")"
] | 40.44 | [
0.023809523809523808,
0.2857142857142857,
0.07142857142857142,
0.028169014084507043,
0.0625,
0.03571428571428571,
0,
0.05555555555555555,
0.2857142857142857,
0.05405405405405406,
0.05,
0.05263157894736842,
0.0392156862745098,
0.0392156862745098,
0,
0.07692307692307693,
0.18181818181818182,
0,
0.02631578947368421,
0.03614457831325301,
0.18181818181818182,
0,
0.03,
0.0297029702970297,
0.18181818181818182
] |
def to_localized_time(self, date, **kw):
"""Converts the given date to a localized time string
"""
date = api.to_date(date, default=None)
if date is None:
return ""
# default options
options = {
"long_format": True,
"time_only": False,
"context": self.context,
"request": self.request,
"domain": "senaite.core",
}
options.update(kw)
return ulocalized_time(date, **options) | [
"def",
"to_localized_time",
"(",
"self",
",",
"date",
",",
"*",
"*",
"kw",
")",
":",
"date",
"=",
"api",
".",
"to_date",
"(",
"date",
",",
"default",
"=",
"None",
")",
"if",
"date",
"is",
"None",
":",
"return",
"\"\"",
"# default options",
"options",
"=",
"{",
"\"long_format\"",
":",
"True",
",",
"\"time_only\"",
":",
"False",
",",
"\"context\"",
":",
"self",
".",
"context",
",",
"\"request\"",
":",
"self",
".",
"request",
",",
"\"domain\"",
":",
"\"senaite.core\"",
",",
"}",
"options",
".",
"update",
"(",
"kw",
")",
"return",
"ulocalized_time",
"(",
"date",
",",
"*",
"*",
"options",
")"
] | 31.3125 | [
0.025,
0.03278688524590164,
0.18181818181818182,
0.043478260869565216,
0.08333333333333333,
0.09523809523809523,
0.08,
0.15789473684210525,
0.0625,
0.06451612903225806,
0.05555555555555555,
0.05555555555555555,
0.05405405405405406,
0.3333333333333333,
0.07692307692307693,
0.0425531914893617
] |
def update_checklist(self, name):
'''
Update the current checklist. Returns a new Checklist object.
'''
checklist_json = self.fetch_json(
uri_path=self.base_uri,
http_method='PUT',
query_params={'name': name}
)
return self.create_checklist(checklist_json) | [
"def",
"update_checklist",
"(",
"self",
",",
"name",
")",
":",
"checklist_json",
"=",
"self",
".",
"fetch_json",
"(",
"uri_path",
"=",
"self",
".",
"base_uri",
",",
"http_method",
"=",
"'PUT'",
",",
"query_params",
"=",
"{",
"'name'",
":",
"name",
"}",
")",
"return",
"self",
".",
"create_checklist",
"(",
"checklist_json",
")"
] | 30 | [
0.030303030303030304,
0.18181818181818182,
0.028985507246376812,
0.18181818181818182,
0.07317073170731707,
0.08571428571428572,
0.1,
0.07692307692307693,
0.3333333333333333,
0,
0.038461538461538464
] |
def load_all(cls, vr, params=None):
"""
Create instances of all objects found
"""
ob_docs = vr.query(cls.base, params)
return [cls(vr, ob) for ob in ob_docs] | [
"def",
"load_all",
"(",
"cls",
",",
"vr",
",",
"params",
"=",
"None",
")",
":",
"ob_docs",
"=",
"vr",
".",
"query",
"(",
"cls",
".",
"base",
",",
"params",
")",
"return",
"[",
"cls",
"(",
"vr",
",",
"ob",
")",
"for",
"ob",
"in",
"ob_docs",
"]"
] | 32 | [
0.02857142857142857,
0.18181818181818182,
0.044444444444444446,
0.18181818181818182,
0.045454545454545456,
0.043478260869565216
] |
def _from_dict(cls, _dict):
"""Initialize a SourceOptions object from a json dictionary."""
args = {}
if 'folders' in _dict:
args['folders'] = [
SourceOptionsFolder._from_dict(x)
for x in (_dict.get('folders'))
]
if 'objects' in _dict:
args['objects'] = [
SourceOptionsObject._from_dict(x)
for x in (_dict.get('objects'))
]
if 'site_collections' in _dict:
args['site_collections'] = [
SourceOptionsSiteColl._from_dict(x)
for x in (_dict.get('site_collections'))
]
if 'urls' in _dict:
args['urls'] = [
SourceOptionsWebCrawl._from_dict(x) for x in (_dict.get('urls'))
]
if 'buckets' in _dict:
args['buckets'] = [
SourceOptionsBuckets._from_dict(x)
for x in (_dict.get('buckets'))
]
if 'crawl_all_buckets' in _dict:
args['crawl_all_buckets'] = _dict.get('crawl_all_buckets')
return cls(**args) | [
"def",
"_from_dict",
"(",
"cls",
",",
"_dict",
")",
":",
"args",
"=",
"{",
"}",
"if",
"'folders'",
"in",
"_dict",
":",
"args",
"[",
"'folders'",
"]",
"=",
"[",
"SourceOptionsFolder",
".",
"_from_dict",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"_dict",
".",
"get",
"(",
"'folders'",
")",
")",
"]",
"if",
"'objects'",
"in",
"_dict",
":",
"args",
"[",
"'objects'",
"]",
"=",
"[",
"SourceOptionsObject",
".",
"_from_dict",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"_dict",
".",
"get",
"(",
"'objects'",
")",
")",
"]",
"if",
"'site_collections'",
"in",
"_dict",
":",
"args",
"[",
"'site_collections'",
"]",
"=",
"[",
"SourceOptionsSiteColl",
".",
"_from_dict",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"_dict",
".",
"get",
"(",
"'site_collections'",
")",
")",
"]",
"if",
"'urls'",
"in",
"_dict",
":",
"args",
"[",
"'urls'",
"]",
"=",
"[",
"SourceOptionsWebCrawl",
".",
"_from_dict",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"_dict",
".",
"get",
"(",
"'urls'",
")",
")",
"]",
"if",
"'buckets'",
"in",
"_dict",
":",
"args",
"[",
"'buckets'",
"]",
"=",
"[",
"SourceOptionsBuckets",
".",
"_from_dict",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"_dict",
".",
"get",
"(",
"'buckets'",
")",
")",
"]",
"if",
"'crawl_all_buckets'",
"in",
"_dict",
":",
"args",
"[",
"'crawl_all_buckets'",
"]",
"=",
"_dict",
".",
"get",
"(",
"'crawl_all_buckets'",
")",
"return",
"cls",
"(",
"*",
"*",
"args",
")"
] | 36.966667 | [
0.037037037037037035,
0.028169014084507043,
0.11764705882352941,
0.06666666666666667,
0.0967741935483871,
0.04081632653061224,
0.0425531914893617,
0.23076923076923078,
0.06666666666666667,
0.0967741935483871,
0.04081632653061224,
0.0425531914893617,
0.23076923076923078,
0.05128205128205128,
0.075,
0.0392156862745098,
0.03571428571428571,
0.23076923076923078,
0.07407407407407407,
0.10714285714285714,
0.0375,
0.23076923076923078,
0.06666666666666667,
0.0967741935483871,
0.04,
0.0425531914893617,
0.23076923076923078,
0.05,
0.02857142857142857,
0.07692307692307693
] |
def recognize_speech(self, config, audio, retry=None, timeout=None):
"""
Recognizes audio input
:param config: information to the recognizer that specifies how to process the request.
https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionConfig
:type config: dict or google.cloud.speech_v1.types.RecognitionConfig
:param audio: audio data to be recognized
https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionAudio
:type audio: dict or google.cloud.speech_v1.types.RecognitionAudio
:param retry: (Optional) A retry object used to retry requests. If None is specified,
requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
:type timeout: float
"""
client = self.get_conn()
response = client.recognize(config=config, audio=audio, retry=retry, timeout=timeout)
self.log.info("Recognised speech: %s" % response)
return response | [
"def",
"recognize_speech",
"(",
"self",
",",
"config",
",",
"audio",
",",
"retry",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"response",
"=",
"client",
".",
"recognize",
"(",
"config",
"=",
"config",
",",
"audio",
"=",
"audio",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Recognised speech: %s\"",
"%",
"response",
")",
"return",
"response"
] | 62.142857 | [
0.014705882352941176,
0.18181818181818182,
0.06666666666666667,
0,
0.042105263157894736,
0.0425531914893617,
0.039473684210526314,
0.061224489795918366,
0.04285714285714286,
0.04054054054054054,
0.043010752688172046,
0.04878048780487805,
0.0625,
0.038834951456310676,
0.03260869565217391,
0.10714285714285714,
0.18181818181818182,
0.0625,
0.03225806451612903,
0.03508771929824561,
0.08695652173913043
] |
def relaxNGValidatePushElement(self, doc, elem):
"""Push a new element start on the RelaxNG validation stack. """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlRelaxNGValidatePushElement(self._o, doc__o, elem__o)
return ret | [
"def",
"relaxNGValidatePushElement",
"(",
"self",
",",
"doc",
",",
"elem",
")",
":",
"if",
"doc",
"is",
"None",
":",
"doc__o",
"=",
"None",
"else",
":",
"doc__o",
"=",
"doc",
".",
"_o",
"if",
"elem",
"is",
"None",
":",
"elem__o",
"=",
"None",
"else",
":",
"elem__o",
"=",
"elem",
".",
"_o",
"ret",
"=",
"libxml2mod",
".",
"xmlRelaxNGValidatePushElement",
"(",
"self",
".",
"_o",
",",
"doc__o",
",",
"elem__o",
")",
"return",
"ret"
] | 44.25 | [
0.020833333333333332,
0.027777777777777776,
0.08108108108108109,
0.10344827586206896,
0.07692307692307693,
0.0967741935483871,
0.0375,
0.1111111111111111
] |
def get_hardware_source_by_id(self, hardware_source_id: str, version: str):
"""Return the hardware source API matching the hardware_source_id and version.
.. versionadded:: 1.0
Scriptable: Yes
"""
actual_version = "1.0.0"
if Utility.compare_versions(version, actual_version) > 0:
raise NotImplementedError("Hardware API requested version %s is greater than %s." % (version, actual_version))
hardware_source = HardwareSourceModule.HardwareSourceManager().get_hardware_source_for_hardware_source_id(hardware_source_id)
return HardwareSource(hardware_source) if hardware_source else None | [
"def",
"get_hardware_source_by_id",
"(",
"self",
",",
"hardware_source_id",
":",
"str",
",",
"version",
":",
"str",
")",
":",
"actual_version",
"=",
"\"1.0.0\"",
"if",
"Utility",
".",
"compare_versions",
"(",
"version",
",",
"actual_version",
")",
">",
"0",
":",
"raise",
"NotImplementedError",
"(",
"\"Hardware API requested version %s is greater than %s.\"",
"%",
"(",
"version",
",",
"actual_version",
")",
")",
"hardware_source",
"=",
"HardwareSourceModule",
".",
"HardwareSourceManager",
"(",
")",
".",
"get_hardware_source_for_hardware_source_id",
"(",
"hardware_source_id",
")",
"return",
"HardwareSource",
"(",
"hardware_source",
")",
"if",
"hardware_source",
"else",
"None"
] | 54.25 | [
0.013333333333333334,
0.03488372093023256,
0,
0.10344827586206896,
0,
0.08695652173913043,
0.18181818181818182,
0.0625,
0.03076923076923077,
0.02459016393442623,
0.022556390977443608,
0.02666666666666667
] |
def render(self, display):
"""Render basicly the text."""
# to handle changing objects / callable
if self.text != self._last_text:
self._render()
display.blit(self._surface, (self.topleft, self.size)) | [
"def",
"render",
"(",
"self",
",",
"display",
")",
":",
"# to handle changing objects / callable",
"if",
"self",
".",
"text",
"!=",
"self",
".",
"_last_text",
":",
"self",
".",
"_render",
"(",
")",
"display",
".",
"blit",
"(",
"self",
".",
"_surface",
",",
"(",
"self",
".",
"topleft",
",",
"self",
".",
"size",
")",
")"
] | 34.142857 | [
0.038461538461538464,
0.05263157894736842,
0.0425531914893617,
0.05,
0.07692307692307693,
0,
0.03225806451612903
] |
def save_token(self, access_token):
"""
Stores the access token and additional data in redis.
See :class:`oauth2.store.AccessTokenStore`.
"""
self.write(access_token.token, access_token.__dict__)
unique_token_key = self._unique_token_key(access_token.client_id,
access_token.grant_type,
access_token.user_id)
self.write(unique_token_key, access_token.__dict__)
if access_token.refresh_token is not None:
self.write(access_token.refresh_token, access_token.__dict__) | [
"def",
"save_token",
"(",
"self",
",",
"access_token",
")",
":",
"self",
".",
"write",
"(",
"access_token",
".",
"token",
",",
"access_token",
".",
"__dict__",
")",
"unique_token_key",
"=",
"self",
".",
"_unique_token_key",
"(",
"access_token",
".",
"client_id",
",",
"access_token",
".",
"grant_type",
",",
"access_token",
".",
"user_id",
")",
"self",
".",
"write",
"(",
"unique_token_key",
",",
"access_token",
".",
"__dict__",
")",
"if",
"access_token",
".",
"refresh_token",
"is",
"not",
"None",
":",
"self",
".",
"write",
"(",
"access_token",
".",
"refresh_token",
",",
"access_token",
".",
"__dict__",
")"
] | 39.375 | [
0.02857142857142857,
0.18181818181818182,
0.03278688524590164,
0,
0.11764705882352941,
0,
0.18181818181818182,
0.03278688524590164,
0,
0.0410958904109589,
0.04054054054054054,
0.056338028169014086,
0.03389830508474576,
0,
0.04,
0.0273972602739726
] |
def load_trajectory(self,trajectory):
"""
Loads the trajectory files e.g. XTC, DCD, TRJ together with the topology
file as a MDAnalysis Universe. This will only be run if a trajectory has
been submitted for analysis.
Takes:
* topology * - a topology file e.g. GRO, PDB, INPCRD, CARD, DMS
* trajectory * - a trajectory file e.g. XTC, DCD, TRJ
Output:
* self.universe * - an MDAnalysis Universe consisting from the
topology and trajectory file.
"""
try:
self.universe.load_new(trajectory)
except IOError, ValueError:
print "Check you trajectory file " + trajectory +"- it might be missing or misspelled." | [
"def",
"load_trajectory",
"(",
"self",
",",
"trajectory",
")",
":",
"try",
":",
"self",
".",
"universe",
".",
"load_new",
"(",
"trajectory",
")",
"except",
"IOError",
",",
"ValueError",
":",
"print",
"\"Check you trajectory file \"",
"+",
"trajectory",
"+",
"\"- it might be missing or misspelled.\""
] | 44.411765 | [
0.05405405405405406,
0.18181818181818182,
0.0375,
0.0375,
0.05555555555555555,
0.1111111111111111,
0.02531645569620253,
0.028985507246376812,
0.10526315789473684,
0.02564102564102564,
0.044444444444444446,
0.18181818181818182,
0,
0.16666666666666666,
0.043478260869565216,
0.05714285714285714,
0.04040404040404041
] |
async def _loadNodeValu(self, full, valu):
'''
Load a node from storage into the tree.
( used by initialization routines to build the tree)
'''
node = self.root
for path in iterpath(full):
name = path[-1]
step = node.kids.get(name)
if step is None:
step = await self._initNodePath(node, path, None)
node = step
node.valu = valu
return node | [
"async",
"def",
"_loadNodeValu",
"(",
"self",
",",
"full",
",",
"valu",
")",
":",
"node",
"=",
"self",
".",
"root",
"for",
"path",
"in",
"iterpath",
"(",
"full",
")",
":",
"name",
"=",
"path",
"[",
"-",
"1",
"]",
"step",
"=",
"node",
".",
"kids",
".",
"get",
"(",
"name",
")",
"if",
"step",
"is",
"None",
":",
"step",
"=",
"await",
"self",
".",
"_initNodePath",
"(",
"node",
",",
"path",
",",
"None",
")",
"node",
"=",
"step",
"node",
".",
"valu",
"=",
"valu",
"return",
"node"
] | 25.222222 | [
0.023809523809523808,
0.18181818181818182,
0.0425531914893617,
0.05,
0.18181818181818182,
0.08333333333333333,
0.05714285714285714,
0,
0.07407407407407407,
0,
0.05263157894736842,
0.07142857142857142,
0.03076923076923077,
0,
0.08695652173913043,
0,
0.08333333333333333,
0.10526315789473684
] |
def setHint(self, text):
"""
Sets the hint to the inputed text. The same hint will be used for
all editors in this widget.
:param text | <str>
"""
texts = nativestring(text).split(' ')
for i, text in enumerate(texts):
editor = self.editorAt(i)
if not editor:
break
editor.setHint(text) | [
"def",
"setHint",
"(",
"self",
",",
"text",
")",
":",
"texts",
"=",
"nativestring",
"(",
"text",
")",
".",
"split",
"(",
"' '",
")",
"for",
"i",
",",
"text",
"in",
"enumerate",
"(",
"texts",
")",
":",
"editor",
"=",
"self",
".",
"editorAt",
"(",
"i",
")",
"if",
"not",
"editor",
":",
"break",
"editor",
".",
"setHint",
"(",
"text",
")"
] | 28.733333 | [
0.04,
0.08333333333333333,
0.013157894736842105,
0.027777777777777776,
0.1111111111111111,
0.09090909090909091,
0.08333333333333333,
0.021739130434782608,
0.1111111111111111,
0.07317073170731707,
0.02631578947368421,
0.1111111111111111,
0.045454545454545456,
0.07692307692307693,
0.0625
] |
def _on_group_stream_changed(self, data):
"""Handle group stream change."""
self._groups.get(data.get('id')).update_stream(data) | [
"def",
"_on_group_stream_changed",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"_groups",
".",
"get",
"(",
"data",
".",
"get",
"(",
"'id'",
")",
")",
".",
"update_stream",
"(",
"data",
")"
] | 47.333333 | [
0.024390243902439025,
0.04878048780487805,
0.03333333333333333
] |
def is_equal(self, other):
"""
The objects must be the same
- Same members (if enumerated)
- Or same structure (if not enumerated)
If the merge does not produce any new information (or contradiction)
then these are equal.
"""
print type(self.prototype), type(other.prototype)
if not self.prototype.is_equal(other.prototype):
print "Different prototypes"
return False
if self.exclude != other.exclude:
print "Different excludes"
return False
if self.include != other.include:
print "Different includes"
return False
return True | [
"def",
"is_equal",
"(",
"self",
",",
"other",
")",
":",
"print",
"type",
"(",
"self",
".",
"prototype",
")",
",",
"type",
"(",
"other",
".",
"prototype",
")",
"if",
"not",
"self",
".",
"prototype",
".",
"is_equal",
"(",
"other",
".",
"prototype",
")",
":",
"print",
"\"Different prototypes\"",
"return",
"False",
"if",
"self",
".",
"exclude",
"!=",
"other",
".",
"exclude",
":",
"print",
"\"Different excludes\"",
"return",
"False",
"if",
"self",
".",
"include",
"!=",
"other",
".",
"include",
":",
"print",
"\"Different includes\"",
"return",
"False",
"return",
"True"
] | 34 | [
0.038461538461538464,
0.18181818181818182,
0.05555555555555555,
0.1,
0.08163265306122448,
0,
0.05263157894736842,
0.06896551724137931,
0.18181818181818182,
0.03508771929824561,
0.03571428571428571,
0.05,
0.08333333333333333,
0.04878048780487805,
0.05263157894736842,
0.08333333333333333,
0.04878048780487805,
0.05263157894736842,
0.08333333333333333,
0.10526315789473684
] |
def calc_reward_fn(self):
"""Calculate the reward value"""
model = copy.copy(self.model)
model.train(self.dataset)
# reward function: Importance-Weighted-Accuracy (IW-ACC) (tau, f)
reward = 0.
for i in range(len(self.queried_hist_)):
reward += self.W[i] * (
model.predict(
self.dataset.data[
self.queried_hist_[i]][0].reshape(1, -1)
)[0] ==
self.dataset.data[self.queried_hist_[i]][1]
)
reward /= (self.dataset.len_labeled() + self.dataset.len_unlabeled())
reward /= self.T
return reward | [
"def",
"calc_reward_fn",
"(",
"self",
")",
":",
"model",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"model",
")",
"model",
".",
"train",
"(",
"self",
".",
"dataset",
")",
"# reward function: Importance-Weighted-Accuracy (IW-ACC) (tau, f)",
"reward",
"=",
"0.",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"queried_hist_",
")",
")",
":",
"reward",
"+=",
"self",
".",
"W",
"[",
"i",
"]",
"*",
"(",
"model",
".",
"predict",
"(",
"self",
".",
"dataset",
".",
"data",
"[",
"self",
".",
"queried_hist_",
"[",
"i",
"]",
"]",
"[",
"0",
"]",
".",
"reshape",
"(",
"1",
",",
"-",
"1",
")",
")",
"[",
"0",
"]",
"==",
"self",
".",
"dataset",
".",
"data",
"[",
"self",
".",
"queried_hist_",
"[",
"i",
"]",
"]",
"[",
"1",
"]",
")",
"reward",
"/=",
"(",
"self",
".",
"dataset",
".",
"len_labeled",
"(",
")",
"+",
"self",
".",
"dataset",
".",
"len_unlabeled",
"(",
")",
")",
"reward",
"/=",
"self",
".",
"T",
"return",
"reward"
] | 36.833333 | [
0.04,
0.05,
0.05405405405405406,
0.06060606060606061,
0,
0.0273972602739726,
0.10526315789473684,
0.041666666666666664,
0.08571428571428572,
0.1,
0.07894736842105263,
0.046875,
0.1111111111111111,
0.03389830508474576,
0.23076923076923078,
0.025974025974025976,
0.08333333333333333,
0.09523809523809523
] |
def add_node(self, node):
"""Link the agent to a random member of the previous generation."""
nodes = [n for n in self.nodes() if not isinstance(n, Source)]
num_agents = len(nodes)
curr_generation = int((num_agents - 1) / float(self.generation_size))
node.generation = curr_generation
if curr_generation == 0:
if self.initial_source:
source = min(
self.nodes(type=Source),
key=attrgetter('creation_time'))
source.connect(whom=node)
source.transmit(to_whom=node)
else:
prev_agents = Node.query\
.filter_by(failed=False,
network_id=self.id,
generation=(curr_generation - 1))\
.all()
prev_fits = [p.fitness for p in prev_agents]
prev_probs = [(f / (1.0 * sum(prev_fits))) for f in prev_fits]
rnd = random.random()
temp = 0.0
for i, probability in enumerate(prev_probs):
temp += probability
if temp > rnd:
parent = prev_agents[i]
break
parent.connect(whom=node)
parent.transmit(to_whom=node) | [
"def",
"add_node",
"(",
"self",
",",
"node",
")",
":",
"nodes",
"=",
"[",
"n",
"for",
"n",
"in",
"self",
".",
"nodes",
"(",
")",
"if",
"not",
"isinstance",
"(",
"n",
",",
"Source",
")",
"]",
"num_agents",
"=",
"len",
"(",
"nodes",
")",
"curr_generation",
"=",
"int",
"(",
"(",
"num_agents",
"-",
"1",
")",
"/",
"float",
"(",
"self",
".",
"generation_size",
")",
")",
"node",
".",
"generation",
"=",
"curr_generation",
"if",
"curr_generation",
"==",
"0",
":",
"if",
"self",
".",
"initial_source",
":",
"source",
"=",
"min",
"(",
"self",
".",
"nodes",
"(",
"type",
"=",
"Source",
")",
",",
"key",
"=",
"attrgetter",
"(",
"'creation_time'",
")",
")",
"source",
".",
"connect",
"(",
"whom",
"=",
"node",
")",
"source",
".",
"transmit",
"(",
"to_whom",
"=",
"node",
")",
"else",
":",
"prev_agents",
"=",
"Node",
".",
"query",
".",
"filter_by",
"(",
"failed",
"=",
"False",
",",
"network_id",
"=",
"self",
".",
"id",
",",
"generation",
"=",
"(",
"curr_generation",
"-",
"1",
")",
")",
".",
"all",
"(",
")",
"prev_fits",
"=",
"[",
"p",
".",
"fitness",
"for",
"p",
"in",
"prev_agents",
"]",
"prev_probs",
"=",
"[",
"(",
"f",
"/",
"(",
"1.0",
"*",
"sum",
"(",
"prev_fits",
")",
")",
")",
"for",
"f",
"in",
"prev_fits",
"]",
"rnd",
"=",
"random",
".",
"random",
"(",
")",
"temp",
"=",
"0.0",
"for",
"i",
",",
"probability",
"in",
"enumerate",
"(",
"prev_probs",
")",
":",
"temp",
"+=",
"probability",
"if",
"temp",
">",
"rnd",
":",
"parent",
"=",
"prev_agents",
"[",
"i",
"]",
"break",
"parent",
".",
"connect",
"(",
"whom",
"=",
"node",
")",
"parent",
".",
"transmit",
"(",
"to_whom",
"=",
"node",
")"
] | 38.424242 | [
0.04,
0.02666666666666667,
0.02857142857142857,
0.06451612903225806,
0.025974025974025976,
0.04878048780487805,
0,
0.0625,
0.05714285714285714,
0.10344827586206896,
0.045454545454545456,
0.07692307692307693,
0.04878048780487805,
0.044444444444444446,
0.15384615384615385,
0.05405405405405406,
0.075,
0.08695652173913043,
0.08196721311475409,
0.09090909090909091,
0.03571428571428571,
0.02702702702702703,
0,
0.06060606060606061,
0.09090909090909091,
0.03571428571428571,
0.05714285714285714,
0.06666666666666667,
0.046511627906976744,
0.08,
0,
0.05405405405405406,
0.04878048780487805
] |
def _remove_elements(self, elts_to_remove):
"""
Removes flagged elements from the ElementTree
"""
for e in elts_to_remove:
# Get the element parent
parent = e.getparent()
# lxml also remove the element tail, preserve it
if e.tail and e.tail.strip():
parent_text = parent.text or ''
parent.text = parent_text + e.tail
# Remove the element
e.getparent().remove(e) | [
"def",
"_remove_elements",
"(",
"self",
",",
"elts_to_remove",
")",
":",
"for",
"e",
"in",
"elts_to_remove",
":",
"# Get the element parent",
"parent",
"=",
"e",
".",
"getparent",
"(",
")",
"# lxml also remove the element tail, preserve it",
"if",
"e",
".",
"tail",
"and",
"e",
".",
"tail",
".",
"strip",
"(",
")",
":",
"parent_text",
"=",
"parent",
".",
"text",
"or",
"''",
"parent",
".",
"text",
"=",
"parent_text",
"+",
"e",
".",
"tail",
"# Remove the element",
"e",
".",
"getparent",
"(",
")",
".",
"remove",
"(",
"e",
")"
] | 30.3125 | [
0.023255813953488372,
0.18181818181818182,
0.03773584905660377,
0.18181818181818182,
0.0625,
0,
0.05555555555555555,
0.058823529411764705,
0,
0.03333333333333333,
0.04878048780487805,
0.0425531914893617,
0.04,
0,
0.0625,
0.05714285714285714
] |
def signature_matches(func, args=(), kwargs={}):
"""
Work out if a function is callable with some args or not.
"""
try:
sig = inspect.signature(func)
sig.bind(*args, **kwargs)
except TypeError:
return False
else:
return True | [
"def",
"signature_matches",
"(",
"func",
",",
"args",
"=",
"(",
")",
",",
"kwargs",
"=",
"{",
"}",
")",
":",
"try",
":",
"sig",
"=",
"inspect",
".",
"signature",
"(",
"func",
")",
"sig",
".",
"bind",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"TypeError",
":",
"return",
"False",
"else",
":",
"return",
"True"
] | 24.545455 | [
0.020833333333333332,
0.2857142857142857,
0.03278688524590164,
0.2857142857142857,
0.25,
0.05405405405405406,
0.06060606060606061,
0.09523809523809523,
0.1,
0.2222222222222222,
0.10526315789473684
] |
def init_ui(state):
"""Post initialization for UI application."""
app = state.app
init_common(app)
# Register blueprint for templates
app.register_blueprint(
blueprint, url_prefix=app.config['USERPROFILES_PROFILE_URL']) | [
"def",
"init_ui",
"(",
"state",
")",
":",
"app",
"=",
"state",
".",
"app",
"init_common",
"(",
"app",
")",
"# Register blueprint for templates",
"app",
".",
"register_blueprint",
"(",
"blueprint",
",",
"url_prefix",
"=",
"app",
".",
"config",
"[",
"'USERPROFILES_PROFILE_URL'",
"]",
")"
] | 30.125 | [
0.05263157894736842,
0.04081632653061224,
0.10526315789473684,
0.1,
0,
0.05263157894736842,
0.1111111111111111,
0.057971014492753624
] |
def cond_remove_some(ol,*some,**kwargs):
'''
from elist.elist import *
ol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]
id(ol)
def afterCH(ele,ch):
cond = (ord(str(ele)) > ord(ch))
return(cond)
new = cond_remove_some(ol,0,2,cond_func=afterCH,cond_func_args=['B'])
ol
new
id(ol)
id(new)
####
ol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]
id(ol)
rslt = cond_remove_some(ol,0,2,cond_func=afterCH,cond_func_args=['B'],mode='original')
ol
rslt
id(ol)
id(rslt)
'''
cond_func = kwargs['cond_func']
if('cond_func_args' in kwargs):
cond_func_args = kwargs['cond_func_args']
else:
cond_func_args = []
if('mode' in kwargs):
mode = kwargs["mode"]
else:
mode = "new"
seqs = list(some)
rslt = cond_remove_seqs(ol,seqs,cond_func=cond_func,cond_func_args=cond_func_args)
return(rslt) | [
"def",
"cond_remove_some",
"(",
"ol",
",",
"*",
"some",
",",
"*",
"*",
"kwargs",
")",
":",
"cond_func",
"=",
"kwargs",
"[",
"'cond_func'",
"]",
"if",
"(",
"'cond_func_args'",
"in",
"kwargs",
")",
":",
"cond_func_args",
"=",
"kwargs",
"[",
"'cond_func_args'",
"]",
"else",
":",
"cond_func_args",
"=",
"[",
"]",
"if",
"(",
"'mode'",
"in",
"kwargs",
")",
":",
"mode",
"=",
"kwargs",
"[",
"\"mode\"",
"]",
"else",
":",
"mode",
"=",
"\"new\"",
"seqs",
"=",
"list",
"(",
"some",
")",
"rslt",
"=",
"cond_remove_seqs",
"(",
"ol",
",",
"seqs",
",",
"cond_func",
"=",
"cond_func",
",",
"cond_func_args",
"=",
"cond_func_args",
")",
"return",
"(",
"rslt",
")"
] | 27.138889 | [
0.075,
0.2857142857142857,
0.06060606060606061,
0.2692307692307692,
0.14285714285714285,
0.25,
0.10714285714285714,
0.045454545454545456,
0.08333333333333333,
0.25,
0.07792207792207792,
0.2,
0.18181818181818182,
0.14285714285714285,
0.13333333333333333,
0.16666666666666666,
0.2692307692307692,
0.14285714285714285,
0.0851063829787234,
0.2,
0.16666666666666666,
0.14285714285714285,
0.125,
0.2857142857142857,
0.05714285714285714,
0.05714285714285714,
0.04081632653061224,
0.2222222222222222,
0.07407407407407407,
0.08,
0.06896551724137931,
0.2222222222222222,
0.1,
0.09523809523809523,
0.06976744186046512,
0.125
] |
def decrement(self, key, value=1):
"""
Decrement the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The decrement value
:type value: int
:rtype: int or bool
"""
return self._redis.decr(self._prefix + key, value) | [
"def",
"decrement",
"(",
"self",
",",
"key",
",",
"value",
"=",
"1",
")",
":",
"return",
"self",
".",
"_redis",
".",
"decr",
"(",
"self",
".",
"_prefix",
"+",
"key",
",",
"value",
")"
] | 24.076923 | [
0.029411764705882353,
0.18181818181818182,
0.038461538461538464,
0,
0.09090909090909091,
0.13636363636363635,
0,
0.07317073170731707,
0.125,
0,
0.1111111111111111,
0.18181818181818182,
0.034482758620689655
] |
def extract_key_values(array_value, separators=(';', ',', ':'), **kwargs):
"""Serialize array of objects with simple key-values
"""
items_sep, fields_sep, keys_sep = separators
return items_sep.join(fields_sep.join(keys_sep.join(x) for x in sorted(it.items()))
for it in array_value) | [
"def",
"extract_key_values",
"(",
"array_value",
",",
"separators",
"=",
"(",
"';'",
",",
"','",
",",
"':'",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"items_sep",
",",
"fields_sep",
",",
"keys_sep",
"=",
"separators",
"return",
"items_sep",
".",
"join",
"(",
"fields_sep",
".",
"join",
"(",
"keys_sep",
".",
"join",
"(",
"x",
")",
"for",
"x",
"in",
"sorted",
"(",
"it",
".",
"items",
"(",
")",
")",
")",
"for",
"it",
"in",
"array_value",
")"
] | 53.333333 | [
0.013513513513513514,
0.03571428571428571,
0.2857142857142857,
0.041666666666666664,
0.04597701149425287,
0.08333333333333333
] |
async def _process_polling_updates(self, updates, fast: typing.Optional[bool] = True):
"""
Process updates received from long-polling.
:param updates: list of updates.
:param fast:
"""
need_to_call = []
for responses in itertools.chain.from_iterable(await self.process_updates(updates, fast)):
for response in responses:
if not isinstance(response, BaseResponse):
continue
need_to_call.append(response.execute_response(self.bot))
if need_to_call:
try:
asyncio.gather(*need_to_call)
except TelegramAPIError:
log.exception('Cause exception while processing updates.') | [
"async",
"def",
"_process_polling_updates",
"(",
"self",
",",
"updates",
",",
"fast",
":",
"typing",
".",
"Optional",
"[",
"bool",
"]",
"=",
"True",
")",
":",
"need_to_call",
"=",
"[",
"]",
"for",
"responses",
"in",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"await",
"self",
".",
"process_updates",
"(",
"updates",
",",
"fast",
")",
")",
":",
"for",
"response",
"in",
"responses",
":",
"if",
"not",
"isinstance",
"(",
"response",
",",
"BaseResponse",
")",
":",
"continue",
"need_to_call",
".",
"append",
"(",
"response",
".",
"execute_response",
"(",
"self",
".",
"bot",
")",
")",
"if",
"need_to_call",
":",
"try",
":",
"asyncio",
".",
"gather",
"(",
"*",
"need_to_call",
")",
"except",
"TelegramAPIError",
":",
"log",
".",
"exception",
"(",
"'Cause exception while processing updates.'",
")"
] | 40.722222 | [
0.023255813953488372,
0.18181818181818182,
0.0392156862745098,
0,
0.075,
0.15,
0.18181818181818182,
0.08,
0.030612244897959183,
0.05263157894736842,
0.034482758620689655,
0.07142857142857142,
0.027777777777777776,
0.08333333333333333,
0.125,
0.044444444444444446,
0.05555555555555555,
0.02702702702702703
] |
def _process_qtls_genomic_location(
self, raw, txid, build_id, build_label, common_name, limit=None):
"""
This method
Triples created:
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
geno = Genotype(graph)
# assume that chrs get added to the genome elsewhere
taxon_curie = 'NCBITaxon:' + txid
eco_id = self.globaltt['quantitative trait analysis evidence']
LOG.info("Processing QTL locations for %s from %s", taxon_curie, raw)
with gzip.open(raw, 'rt', encoding='ISO-8859-1') as tsvfile:
reader = csv.reader(tsvfile, delimiter="\t")
for row in reader:
line_counter += 1
if re.match(r'^#', ' '.join(row)):
continue
(chromosome, qtl_source, qtl_type, start_bp, stop_bp, frame, strand,
score, attr) = row
example = '''
Chr.Z Animal QTLdb Production_QTL 33954873 34023581...
QTL_ID=2242;Name="Spleen percentage";Abbrev="SPLP";PUBMED_ID=17012160;trait_ID=2234;
trait="Spleen percentage";breed="leghorn";"FlankMarkers=ADL0022";VTO_name="spleen mass";
MO_name="spleen weight to body weight ratio";Map_Type="Linkage";Model="Mendelian";
Test_Base="Chromosome-wise";Significance="Significant";P-value="<0.05";F-Stat="5.52";
Variance="2.94";Dominance_Effect="-0.002";Additive_Effect="0.01
'''
str(example)
# make dictionary of attributes
# keys are:
# QTL_ID,Name,Abbrev,PUBMED_ID,trait_ID,trait,FlankMarkers,
# VTO_name,Map_Type,Significance,P-value,Model,
# Test_Base,Variance, Bayes-value,PTO_name,gene_IDsrc,peak_cM,
# CMO_name,gene_ID,F-Stat,LOD-score,Additive_Effect,
# Dominance_Effect,Likelihood_Ratio,LS-means,Breed,
# trait (duplicate with Name),Variance,Bayes-value,
# F-Stat,LOD-score,Additive_Effect,Dominance_Effect,
# Likelihood_Ratio,LS-means
# deal with poorly formed attributes
if re.search(r'"FlankMarkers";', attr):
attr = re.sub(r'FlankMarkers;', '', attr)
attr_items = re.sub(r'"', '', attr).split(";")
bad_attrs = set()
for attributes in attr_items:
if not re.search(r'=', attributes):
# remove this attribute from the list
bad_attrs.add(attributes)
attr_set = set(attr_items) - bad_attrs
attribute_dict = dict(item.split("=") for item in attr_set)
qtl_num = attribute_dict.get('QTL_ID')
if self.test_mode and int(qtl_num) not in self.test_ids:
continue
# make association between QTL and trait based on taxon
qtl_id = common_name + 'QTL:' + str(qtl_num)
model.addIndividualToGraph(qtl_id, None, self.globaltt['QTL'])
geno.addTaxon(taxon_curie, qtl_id)
#
trait_id = 'AQTLTrait:' + attribute_dict.get('trait_ID')
# if pub is in attributes, add it to the association
pub_id = None
if 'PUBMED_ID' in attribute_dict.keys():
pub_id = attribute_dict.get('PUBMED_ID')
if re.match(r'ISU.*', pub_id):
pub_id = 'AQTLPub:' + pub_id.strip()
reference = Reference(graph, pub_id)
else:
pub_id = 'PMID:' + pub_id.strip()
reference = Reference(
graph, pub_id, self.globaltt['journal article'])
reference.addRefToGraph()
# Add QTL to graph
assoc = G2PAssoc(
graph, self.name, qtl_id, trait_id,
self.globaltt['is marker for'])
assoc.add_evidence(eco_id)
assoc.add_source(pub_id)
if 'P-value' in attribute_dict.keys():
scr = re.sub(r'<', '', attribute_dict.get('P-value'))
if ',' in scr:
scr = re.sub(r',', '.', scr)
if scr.isnumeric():
score = float(scr)
assoc.set_score(score)
assoc.add_association_to_graph()
# TODO make association to breed
# (which means making QTL feature in Breed background)
# get location of QTL
chromosome = re.sub(r'Chr\.', '', chromosome)
chrom_id = makeChromID(chromosome, taxon_curie, 'CHR')
chrom_in_build_id = makeChromID(chromosome, build_id, 'MONARCH')
geno.addChromosomeInstance(
chromosome, build_id, build_label, chrom_id)
qtl_feature = Feature(graph, qtl_id, None, self.globaltt['QTL'])
if start_bp == '':
start_bp = None
qtl_feature.addFeatureStartLocation(
start_bp, chrom_in_build_id, strand,
[self.globaltt['FuzzyPosition']])
if stop_bp == '':
stop_bp = None
qtl_feature.addFeatureEndLocation(
stop_bp, chrom_in_build_id, strand,
[self.globaltt['FuzzyPosition']])
qtl_feature.addTaxonToFeature(taxon_curie)
qtl_feature.addFeatureToGraph()
if not self.test_mode and limit is not None and line_counter > limit:
break
# LOG.warning("Bad attribute flags in this file") # what does this even mean??
LOG.info("Done with QTL genomic mappings for %s", taxon_curie)
return | [
"def",
"_process_qtls_genomic_location",
"(",
"self",
",",
"raw",
",",
"txid",
",",
"build_id",
",",
"build_label",
",",
"common_name",
",",
"limit",
"=",
"None",
")",
":",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"line_counter",
"=",
"0",
"geno",
"=",
"Genotype",
"(",
"graph",
")",
"# assume that chrs get added to the genome elsewhere",
"taxon_curie",
"=",
"'NCBITaxon:'",
"+",
"txid",
"eco_id",
"=",
"self",
".",
"globaltt",
"[",
"'quantitative trait analysis evidence'",
"]",
"LOG",
".",
"info",
"(",
"\"Processing QTL locations for %s from %s\"",
",",
"taxon_curie",
",",
"raw",
")",
"with",
"gzip",
".",
"open",
"(",
"raw",
",",
"'rt'",
",",
"encoding",
"=",
"'ISO-8859-1'",
")",
"as",
"tsvfile",
":",
"reader",
"=",
"csv",
".",
"reader",
"(",
"tsvfile",
",",
"delimiter",
"=",
"\"\\t\"",
")",
"for",
"row",
"in",
"reader",
":",
"line_counter",
"+=",
"1",
"if",
"re",
".",
"match",
"(",
"r'^#'",
",",
"' '",
".",
"join",
"(",
"row",
")",
")",
":",
"continue",
"(",
"chromosome",
",",
"qtl_source",
",",
"qtl_type",
",",
"start_bp",
",",
"stop_bp",
",",
"frame",
",",
"strand",
",",
"score",
",",
"attr",
")",
"=",
"row",
"example",
"=",
"'''\nChr.Z Animal QTLdb Production_QTL 33954873 34023581...\nQTL_ID=2242;Name=\"Spleen percentage\";Abbrev=\"SPLP\";PUBMED_ID=17012160;trait_ID=2234;\ntrait=\"Spleen percentage\";breed=\"leghorn\";\"FlankMarkers=ADL0022\";VTO_name=\"spleen mass\";\nMO_name=\"spleen weight to body weight ratio\";Map_Type=\"Linkage\";Model=\"Mendelian\";\nTest_Base=\"Chromosome-wise\";Significance=\"Significant\";P-value=\"<0.05\";F-Stat=\"5.52\";\nVariance=\"2.94\";Dominance_Effect=\"-0.002\";Additive_Effect=\"0.01\n '''",
"str",
"(",
"example",
")",
"# make dictionary of attributes",
"# keys are:",
"# QTL_ID,Name,Abbrev,PUBMED_ID,trait_ID,trait,FlankMarkers,",
"# VTO_name,Map_Type,Significance,P-value,Model,",
"# Test_Base,Variance, Bayes-value,PTO_name,gene_IDsrc,peak_cM,",
"# CMO_name,gene_ID,F-Stat,LOD-score,Additive_Effect,",
"# Dominance_Effect,Likelihood_Ratio,LS-means,Breed,",
"# trait (duplicate with Name),Variance,Bayes-value,",
"# F-Stat,LOD-score,Additive_Effect,Dominance_Effect,",
"# Likelihood_Ratio,LS-means",
"# deal with poorly formed attributes",
"if",
"re",
".",
"search",
"(",
"r'\"FlankMarkers\";'",
",",
"attr",
")",
":",
"attr",
"=",
"re",
".",
"sub",
"(",
"r'FlankMarkers;'",
",",
"''",
",",
"attr",
")",
"attr_items",
"=",
"re",
".",
"sub",
"(",
"r'\"'",
",",
"''",
",",
"attr",
")",
".",
"split",
"(",
"\";\"",
")",
"bad_attrs",
"=",
"set",
"(",
")",
"for",
"attributes",
"in",
"attr_items",
":",
"if",
"not",
"re",
".",
"search",
"(",
"r'='",
",",
"attributes",
")",
":",
"# remove this attribute from the list",
"bad_attrs",
".",
"add",
"(",
"attributes",
")",
"attr_set",
"=",
"set",
"(",
"attr_items",
")",
"-",
"bad_attrs",
"attribute_dict",
"=",
"dict",
"(",
"item",
".",
"split",
"(",
"\"=\"",
")",
"for",
"item",
"in",
"attr_set",
")",
"qtl_num",
"=",
"attribute_dict",
".",
"get",
"(",
"'QTL_ID'",
")",
"if",
"self",
".",
"test_mode",
"and",
"int",
"(",
"qtl_num",
")",
"not",
"in",
"self",
".",
"test_ids",
":",
"continue",
"# make association between QTL and trait based on taxon",
"qtl_id",
"=",
"common_name",
"+",
"'QTL:'",
"+",
"str",
"(",
"qtl_num",
")",
"model",
".",
"addIndividualToGraph",
"(",
"qtl_id",
",",
"None",
",",
"self",
".",
"globaltt",
"[",
"'QTL'",
"]",
")",
"geno",
".",
"addTaxon",
"(",
"taxon_curie",
",",
"qtl_id",
")",
"#",
"trait_id",
"=",
"'AQTLTrait:'",
"+",
"attribute_dict",
".",
"get",
"(",
"'trait_ID'",
")",
"# if pub is in attributes, add it to the association",
"pub_id",
"=",
"None",
"if",
"'PUBMED_ID'",
"in",
"attribute_dict",
".",
"keys",
"(",
")",
":",
"pub_id",
"=",
"attribute_dict",
".",
"get",
"(",
"'PUBMED_ID'",
")",
"if",
"re",
".",
"match",
"(",
"r'ISU.*'",
",",
"pub_id",
")",
":",
"pub_id",
"=",
"'AQTLPub:'",
"+",
"pub_id",
".",
"strip",
"(",
")",
"reference",
"=",
"Reference",
"(",
"graph",
",",
"pub_id",
")",
"else",
":",
"pub_id",
"=",
"'PMID:'",
"+",
"pub_id",
".",
"strip",
"(",
")",
"reference",
"=",
"Reference",
"(",
"graph",
",",
"pub_id",
",",
"self",
".",
"globaltt",
"[",
"'journal article'",
"]",
")",
"reference",
".",
"addRefToGraph",
"(",
")",
"# Add QTL to graph",
"assoc",
"=",
"G2PAssoc",
"(",
"graph",
",",
"self",
".",
"name",
",",
"qtl_id",
",",
"trait_id",
",",
"self",
".",
"globaltt",
"[",
"'is marker for'",
"]",
")",
"assoc",
".",
"add_evidence",
"(",
"eco_id",
")",
"assoc",
".",
"add_source",
"(",
"pub_id",
")",
"if",
"'P-value'",
"in",
"attribute_dict",
".",
"keys",
"(",
")",
":",
"scr",
"=",
"re",
".",
"sub",
"(",
"r'<'",
",",
"''",
",",
"attribute_dict",
".",
"get",
"(",
"'P-value'",
")",
")",
"if",
"','",
"in",
"scr",
":",
"scr",
"=",
"re",
".",
"sub",
"(",
"r','",
",",
"'.'",
",",
"scr",
")",
"if",
"scr",
".",
"isnumeric",
"(",
")",
":",
"score",
"=",
"float",
"(",
"scr",
")",
"assoc",
".",
"set_score",
"(",
"score",
")",
"assoc",
".",
"add_association_to_graph",
"(",
")",
"# TODO make association to breed",
"# (which means making QTL feature in Breed background)",
"# get location of QTL",
"chromosome",
"=",
"re",
".",
"sub",
"(",
"r'Chr\\.'",
",",
"''",
",",
"chromosome",
")",
"chrom_id",
"=",
"makeChromID",
"(",
"chromosome",
",",
"taxon_curie",
",",
"'CHR'",
")",
"chrom_in_build_id",
"=",
"makeChromID",
"(",
"chromosome",
",",
"build_id",
",",
"'MONARCH'",
")",
"geno",
".",
"addChromosomeInstance",
"(",
"chromosome",
",",
"build_id",
",",
"build_label",
",",
"chrom_id",
")",
"qtl_feature",
"=",
"Feature",
"(",
"graph",
",",
"qtl_id",
",",
"None",
",",
"self",
".",
"globaltt",
"[",
"'QTL'",
"]",
")",
"if",
"start_bp",
"==",
"''",
":",
"start_bp",
"=",
"None",
"qtl_feature",
".",
"addFeatureStartLocation",
"(",
"start_bp",
",",
"chrom_in_build_id",
",",
"strand",
",",
"[",
"self",
".",
"globaltt",
"[",
"'FuzzyPosition'",
"]",
"]",
")",
"if",
"stop_bp",
"==",
"''",
":",
"stop_bp",
"=",
"None",
"qtl_feature",
".",
"addFeatureEndLocation",
"(",
"stop_bp",
",",
"chrom_in_build_id",
",",
"strand",
",",
"[",
"self",
".",
"globaltt",
"[",
"'FuzzyPosition'",
"]",
"]",
")",
"qtl_feature",
".",
"addTaxonToFeature",
"(",
"taxon_curie",
")",
"qtl_feature",
".",
"addFeatureToGraph",
"(",
")",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"break",
"# LOG.warning(\"Bad attribute flags in this file\") # what does this even mean??",
"LOG",
".",
"info",
"(",
"\"Done with QTL genomic mappings for %s\"",
",",
"taxon_curie",
")",
"return"
] | 44.402985 | [
0.05714285714285714,
0.05194805194805195,
0.18181818181818182,
0.10526315789473684,
0,
0.08333333333333333,
0,
0.14285714285714285,
0.1875,
0.18181818181818182,
0.07692307692307693,
0.058823529411764705,
0.15384615384615385,
0.06666666666666667,
0.07142857142857142,
0.08333333333333333,
0.06666666666666667,
0.03333333333333333,
0,
0.04878048780487805,
0.02857142857142857,
0.025974025974025976,
0.029411764705882353,
0.03571428571428571,
0.06666666666666667,
0.06060606060606061,
0.04,
0.07142857142857142,
0,
0.047619047619047616,
0.17142857142857143,
0.10344827586206896,
0.015384615384615385,
0.19047619047619047,
0.13636363636363635,
0.12195121951219512,
0.15294117647058825,
0.12698412698412698,
0.10526315789473684,
0.07142857142857142,
0.0425531914893617,
0.07407407407407407,
0.02666666666666667,
0.031746031746031744,
0.02564102564102564,
0.029411764705882353,
0.029850746268656716,
0.029850746268656716,
0.029411764705882353,
0.046511627906976744,
0,
0.038461538461538464,
0.03636363636363636,
0.03278688524590164,
0.03225806451612903,
0.06060606060606061,
0.044444444444444446,
0.03636363636363636,
0.03278688524590164,
0.04081632653061224,
0,
0.037037037037037035,
0.02666666666666667,
0,
0.037037037037037035,
0.027777777777777776,
0.07142857142857142,
0.028169014084507043,
0,
0.03333333333333333,
0.02564102564102564,
0.04,
0,
0.11764705882352941,
0.027777777777777776,
0,
0.029411764705882353,
0.06896551724137931,
0.03571428571428571,
0.03333333333333333,
0.04,
0.03333333333333333,
0.03333333333333333,
0.08,
0.03508771929824561,
0.06521739130434782,
0.039473684210526314,
0.044444444444444446,
0,
0.058823529411764705,
0.09090909090909091,
0.03636363636363636,
0.058823529411764705,
0.047619047619047616,
0.05,
0.037037037037037035,
0.0273972602739726,
0.058823529411764705,
0.038461538461538464,
0.05128205128205128,
0.047619047619047616,
0.043478260869565216,
0,
0.041666666666666664,
0.041666666666666664,
0.02857142857142857,
0,
0.05405405405405406,
0.03278688524590164,
0.02857142857142857,
0,
0.0375,
0.06976744186046512,
0.046875,
0.0375,
0.058823529411764705,
0.05714285714285714,
0.057692307692307696,
0.03571428571428571,
0.05660377358490566,
0.06060606060606061,
0.058823529411764705,
0.06,
0.03636363636363636,
0.05660377358490566,
0.034482758620689655,
0.0425531914893617,
0,
0.03529411764705882,
0.08,
0,
0.034482758620689655,
0.02857142857142857,
0.14285714285714285
] |
def UploadOperations(self, operations, is_last=False):
"""Uploads operations to the given uploadUrl in incremental steps.
Note: Each list of operations is expected to contain operations of the
same type, similar to how one would normally send operations in an
AdWords API Service request.
Args:
operations: one or more lists of operations as would be sent to the
AdWords API for the associated service.
is_last: a boolean indicating whether this is the final increment to be
added to the batch job.
"""
if self._is_last:
raise googleads.errors.AdWordsBatchJobServiceInvalidOperationError(
'Can\'t add new operations to a completed incremental upload.')
# Build the request
req = self._request_builder.BuildUploadRequest(
self._upload_url, operations,
current_content_length=self._current_content_length, is_last=is_last)
# Make the request, ignoring the urllib2.HTTPError raised due to HTTP status
# code 308 (for resumable uploads).
try:
_batch_job_logger.debug('Outgoing request: %s %s %s',
req.get_full_url(), req.headers, req.data)
self._url_opener.open(req)
if _batch_job_logger.isEnabledFor(logging.INFO):
_batch_job_logger.info('Request summary: %s',
self._ExtractRequestSummaryFields(req))
except urllib2.HTTPError as e:
if e.code != 308:
if _batch_job_logger.isEnabledFor(logging.WARNING):
_batch_job_logger.warning(
'Request summary: %s',
self._ExtractRequestSummaryFields(req, error=e))
raise
# Update upload status.
self._current_content_length += len(req.data)
self._is_last = is_last | [
"def",
"UploadOperations",
"(",
"self",
",",
"operations",
",",
"is_last",
"=",
"False",
")",
":",
"if",
"self",
".",
"_is_last",
":",
"raise",
"googleads",
".",
"errors",
".",
"AdWordsBatchJobServiceInvalidOperationError",
"(",
"'Can\\'t add new operations to a completed incremental upload.'",
")",
"# Build the request",
"req",
"=",
"self",
".",
"_request_builder",
".",
"BuildUploadRequest",
"(",
"self",
".",
"_upload_url",
",",
"operations",
",",
"current_content_length",
"=",
"self",
".",
"_current_content_length",
",",
"is_last",
"=",
"is_last",
")",
"# Make the request, ignoring the urllib2.HTTPError raised due to HTTP status",
"# code 308 (for resumable uploads).",
"try",
":",
"_batch_job_logger",
".",
"debug",
"(",
"'Outgoing request: %s %s %s'",
",",
"req",
".",
"get_full_url",
"(",
")",
",",
"req",
".",
"headers",
",",
"req",
".",
"data",
")",
"self",
".",
"_url_opener",
".",
"open",
"(",
"req",
")",
"if",
"_batch_job_logger",
".",
"isEnabledFor",
"(",
"logging",
".",
"INFO",
")",
":",
"_batch_job_logger",
".",
"info",
"(",
"'Request summary: %s'",
",",
"self",
".",
"_ExtractRequestSummaryFields",
"(",
"req",
")",
")",
"except",
"urllib2",
".",
"HTTPError",
"as",
"e",
":",
"if",
"e",
".",
"code",
"!=",
"308",
":",
"if",
"_batch_job_logger",
".",
"isEnabledFor",
"(",
"logging",
".",
"WARNING",
")",
":",
"_batch_job_logger",
".",
"warning",
"(",
"'Request summary: %s'",
",",
"self",
".",
"_ExtractRequestSummaryFields",
"(",
"req",
",",
"error",
"=",
"e",
")",
")",
"raise",
"# Update upload status.",
"self",
".",
"_current_content_length",
"+=",
"len",
"(",
"req",
".",
"data",
")",
"self",
".",
"_is_last",
"=",
"is_last"
] | 42.243902 | [
0.018518518518518517,
0.02857142857142857,
0,
0.02702702702702703,
0.02857142857142857,
0.0625,
0,
0.2222222222222222,
0.0410958904109589,
0.0425531914893617,
0.03896103896103896,
0.06451612903225806,
0.2857142857142857,
0.09523809523809523,
0.0547945205479452,
0.0547945205479452,
0.08695652173913043,
0.058823529411764705,
0.05405405405405406,
0.06493506493506493,
0.0375,
0.05128205128205128,
0.25,
0.06779661016949153,
0.05555555555555555,
0,
0.09375,
0,
0.05555555555555555,
0.05660377358490566,
0.05714285714285714,
0.058823529411764705,
0.13043478260869565,
0.03389830508474576,
0.1111111111111111,
0.08333333333333333,
0.06451612903225806,
0.15384615384615385,
0.07407407407407407,
0.04081632653061224,
0.07407407407407407
] |
def calculate_trip_shape_breakpoints(conn):
"""Pre-compute the shape points corresponding to each trip's stop.
Depends: shapes"""
from gtfspy import shapes
cur = conn.cursor()
breakpoints_cache = {}
# Counters for problems - don't print every problem.
count_bad_shape_ordering = 0
count_bad_shape_fit = 0
count_no_shape_fit = 0
trip_Is = [x[0] for x in
cur.execute('SELECT DISTINCT trip_I FROM stop_times').fetchall()]
for trip_I in trip_Is:
# Get the shape points
row = cur.execute('''SELECT shape_id
FROM trips WHERE trip_I=?''', (trip_I,)).fetchone()
if row is None:
continue
shape_id = row[0]
if shape_id is None or shape_id == '':
continue
# Get the stop points
cur.execute('''SELECT seq, lat, lon, stop_id
FROM stop_times LEFT JOIN stops USING (stop_I)
WHERE trip_I=?
ORDER BY seq''',
(trip_I,))
#print '%20s, %s'%(run_code, datetime.fromtimestamp(run_sch_starttime))
stop_points = [dict(seq=row[0],
lat=row[1],
lon=row[2],
stop_I=row[3])
for row in cur if row[1] and row[2]]
# Calculate a cache key for this sequence.
# If both shape_id, and all stop_Is are same, then we can re-use existing breakpoints:
cache_key = (shape_id, tuple(x['stop_I'] for x in stop_points))
if cache_key in breakpoints_cache:
breakpoints = breakpoints_cache[cache_key]
else:
# Must re-calculate breakpoints:
shape_points = shapes.get_shape_points(cur, shape_id)
breakpoints, badness \
= shapes.find_segments(stop_points, shape_points)
if breakpoints != sorted(breakpoints):
# route_name, route_id, route_I, trip_id, trip_I = \
# cur.execute('''SELECT name, route_id, route_I, trip_id, trip_I
# FROM trips LEFT JOIN routes USING (route_I)
# WHERE trip_I=? LIMIT 1''', (trip_I,)).fetchone()
# print "Ignoring: Route with bad shape ordering:", route_name, route_id, route_I, trip_id, trip_I
count_bad_shape_ordering += 1
# select * from stop_times where trip_I=NNNN order by shape_break;
breakpoints_cache[cache_key] = None
continue # Do not set shape_break for this trip.
# Add it to cache
breakpoints_cache[cache_key] = breakpoints
if badness > 30 * len(breakpoints):
#print "bad shape fit: %s (%s, %s, %s)" % (badness, trip_I, shape_id, len(breakpoints))
count_bad_shape_fit += 1
if breakpoints is None:
continue
if len(breakpoints) == 0:
# No valid route could be identified.
#print "Ignoring: No shape identified for trip_I=%s, shape_id=%s" % (trip_I, shape_id)
count_no_shape_fit += 1
continue
# breakpoints is the corresponding points for each stop
assert len(breakpoints) == len(stop_points)
cur.executemany('UPDATE stop_times SET shape_break=? '
'WHERE trip_I=? AND seq=? ',
((int(bkpt), int(trip_I), int(stpt['seq']))
for bkpt, stpt in zip(breakpoints, stop_points)))
if count_bad_shape_fit > 0:
print(" Shape trip breakpoints: %s bad fits" % count_bad_shape_fit)
if count_bad_shape_ordering > 0:
print(" Shape trip breakpoints: %s bad shape orderings" % count_bad_shape_ordering)
if count_no_shape_fit > 0:
print(" Shape trip breakpoints: %s no shape fits" % count_no_shape_fit)
conn.commit() | [
"def",
"calculate_trip_shape_breakpoints",
"(",
"conn",
")",
":",
"from",
"gtfspy",
"import",
"shapes",
"cur",
"=",
"conn",
".",
"cursor",
"(",
")",
"breakpoints_cache",
"=",
"{",
"}",
"# Counters for problems - don't print every problem.",
"count_bad_shape_ordering",
"=",
"0",
"count_bad_shape_fit",
"=",
"0",
"count_no_shape_fit",
"=",
"0",
"trip_Is",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"cur",
".",
"execute",
"(",
"'SELECT DISTINCT trip_I FROM stop_times'",
")",
".",
"fetchall",
"(",
")",
"]",
"for",
"trip_I",
"in",
"trip_Is",
":",
"# Get the shape points",
"row",
"=",
"cur",
".",
"execute",
"(",
"'''SELECT shape_id\n FROM trips WHERE trip_I=?'''",
",",
"(",
"trip_I",
",",
")",
")",
".",
"fetchone",
"(",
")",
"if",
"row",
"is",
"None",
":",
"continue",
"shape_id",
"=",
"row",
"[",
"0",
"]",
"if",
"shape_id",
"is",
"None",
"or",
"shape_id",
"==",
"''",
":",
"continue",
"# Get the stop points",
"cur",
".",
"execute",
"(",
"'''SELECT seq, lat, lon, stop_id\n FROM stop_times LEFT JOIN stops USING (stop_I)\n WHERE trip_I=?\n ORDER BY seq'''",
",",
"(",
"trip_I",
",",
")",
")",
"#print '%20s, %s'%(run_code, datetime.fromtimestamp(run_sch_starttime))",
"stop_points",
"=",
"[",
"dict",
"(",
"seq",
"=",
"row",
"[",
"0",
"]",
",",
"lat",
"=",
"row",
"[",
"1",
"]",
",",
"lon",
"=",
"row",
"[",
"2",
"]",
",",
"stop_I",
"=",
"row",
"[",
"3",
"]",
")",
"for",
"row",
"in",
"cur",
"if",
"row",
"[",
"1",
"]",
"and",
"row",
"[",
"2",
"]",
"]",
"# Calculate a cache key for this sequence.",
"# If both shape_id, and all stop_Is are same, then we can re-use existing breakpoints:",
"cache_key",
"=",
"(",
"shape_id",
",",
"tuple",
"(",
"x",
"[",
"'stop_I'",
"]",
"for",
"x",
"in",
"stop_points",
")",
")",
"if",
"cache_key",
"in",
"breakpoints_cache",
":",
"breakpoints",
"=",
"breakpoints_cache",
"[",
"cache_key",
"]",
"else",
":",
"# Must re-calculate breakpoints:",
"shape_points",
"=",
"shapes",
".",
"get_shape_points",
"(",
"cur",
",",
"shape_id",
")",
"breakpoints",
",",
"badness",
"=",
"shapes",
".",
"find_segments",
"(",
"stop_points",
",",
"shape_points",
")",
"if",
"breakpoints",
"!=",
"sorted",
"(",
"breakpoints",
")",
":",
"# route_name, route_id, route_I, trip_id, trip_I = \\",
"# cur.execute('''SELECT name, route_id, route_I, trip_id, trip_I",
"# FROM trips LEFT JOIN routes USING (route_I)",
"# WHERE trip_I=? LIMIT 1''', (trip_I,)).fetchone()",
"# print \"Ignoring: Route with bad shape ordering:\", route_name, route_id, route_I, trip_id, trip_I",
"count_bad_shape_ordering",
"+=",
"1",
"# select * from stop_times where trip_I=NNNN order by shape_break;",
"breakpoints_cache",
"[",
"cache_key",
"]",
"=",
"None",
"continue",
"# Do not set shape_break for this trip.",
"# Add it to cache",
"breakpoints_cache",
"[",
"cache_key",
"]",
"=",
"breakpoints",
"if",
"badness",
">",
"30",
"*",
"len",
"(",
"breakpoints",
")",
":",
"#print \"bad shape fit: %s (%s, %s, %s)\" % (badness, trip_I, shape_id, len(breakpoints))",
"count_bad_shape_fit",
"+=",
"1",
"if",
"breakpoints",
"is",
"None",
":",
"continue",
"if",
"len",
"(",
"breakpoints",
")",
"==",
"0",
":",
"# No valid route could be identified.",
"#print \"Ignoring: No shape identified for trip_I=%s, shape_id=%s\" % (trip_I, shape_id)",
"count_no_shape_fit",
"+=",
"1",
"continue",
"# breakpoints is the corresponding points for each stop",
"assert",
"len",
"(",
"breakpoints",
")",
"==",
"len",
"(",
"stop_points",
")",
"cur",
".",
"executemany",
"(",
"'UPDATE stop_times SET shape_break=? '",
"'WHERE trip_I=? AND seq=? '",
",",
"(",
"(",
"int",
"(",
"bkpt",
")",
",",
"int",
"(",
"trip_I",
")",
",",
"int",
"(",
"stpt",
"[",
"'seq'",
"]",
")",
")",
"for",
"bkpt",
",",
"stpt",
"in",
"zip",
"(",
"breakpoints",
",",
"stop_points",
")",
")",
")",
"if",
"count_bad_shape_fit",
">",
"0",
":",
"print",
"(",
"\" Shape trip breakpoints: %s bad fits\"",
"%",
"count_bad_shape_fit",
")",
"if",
"count_bad_shape_ordering",
">",
"0",
":",
"print",
"(",
"\" Shape trip breakpoints: %s bad shape orderings\"",
"%",
"count_bad_shape_ordering",
")",
"if",
"count_no_shape_fit",
">",
"0",
":",
"print",
"(",
"\" Shape trip breakpoints: %s no shape fits\"",
"%",
"count_no_shape_fit",
")",
"conn",
".",
"commit",
"(",
")"
] | 43.988636 | [
0.023255813953488372,
0.02857142857142857,
0,
0.13636363636363635,
0.06896551724137931,
0,
0.08695652173913043,
0.07692307692307693,
0,
0.03571428571428571,
0.0625,
0.07407407407407407,
0.07692307692307693,
0,
0.10714285714285714,
0.0625,
0.07692307692307693,
0.06666666666666667,
0.06818181818181818,
0.07058823529411765,
0.08695652173913043,
0.1,
0.08,
0.043478260869565216,
0.1,
0,
0.06896551724137931,
0.057692307692307696,
0.057971014492753624,
0.10810810810810811,
0.10256410256410256,
0.1,
0.0379746835443038,
0.07692307692307693,
0.07692307692307693,
0.07692307692307693,
0.09523809523809523,
0.06779661016949153,
0.04,
0.031914893617021274,
0.028169014084507043,
0.047619047619047616,
0.037037037037037035,
0.15384615384615385,
0.045454545454545456,
0,
0.03076923076923077,
0.058823529411764705,
0.03076923076923077,
0.04,
0.029411764705882353,
0.03614457831325301,
0.025974025974025976,
0.036585365853658534,
0.02631578947368421,
0.044444444444444446,
0.036585365853658534,
0.0392156862745098,
0.03076923076923077,
0.06896551724137931,
0.037037037037037035,
0,
0.0425531914893617,
0.038834951456310676,
0.05,
0,
0.06451612903225806,
0.1,
0,
0.06060606060606061,
0.04,
0.04081632653061224,
0.05714285714285714,
0.1,
0,
0.031746031746031744,
0.0392156862745098,
0.04838709677419355,
0.038461538461538464,
0.04477611940298507,
0.05405405405405406,
0.06451612903225806,
0.02666666666666667,
0.05555555555555555,
0.03296703296703297,
0.06666666666666667,
0.02531645569620253,
0.11764705882352941
] |
def record(self, *args, **kwargs):
"""Track and record values each day.
Parameters
----------
**kwargs
The names and values to record.
Notes
-----
These values will appear in the performance packets and the performance
dataframe passed to ``analyze`` and returned from
:func:`~zipline.run_algorithm`.
"""
# Make 2 objects both referencing the same iterator
args = [iter(args)] * 2
# Zip generates list entries by calling `next` on each iterator it
# receives. In this case the two iterators are the same object, so the
# call to next on args[0] will also advance args[1], resulting in zip
# returning (a,b) (c,d) (e,f) rather than (a,a) (b,b) (c,c) etc.
positionals = zip(*args)
for name, value in chain(positionals, kwargs.items()):
self._recorded_vars[name] = value | [
"def",
"record",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Make 2 objects both referencing the same iterator",
"args",
"=",
"[",
"iter",
"(",
"args",
")",
"]",
"*",
"2",
"# Zip generates list entries by calling `next` on each iterator it",
"# receives. In this case the two iterators are the same object, so the",
"# call to next on args[0] will also advance args[1], resulting in zip",
"# returning (a,b) (c,d) (e,f) rather than (a,a) (b,b) (c,c) etc.",
"positionals",
"=",
"zip",
"(",
"*",
"args",
")",
"for",
"name",
",",
"value",
"in",
"chain",
"(",
"positionals",
",",
"kwargs",
".",
"items",
"(",
")",
")",
":",
"self",
".",
"_recorded_vars",
"[",
"name",
"]",
"=",
"value"
] | 38.166667 | [
0.029411764705882353,
0.045454545454545456,
0,
0.1111111111111111,
0.1111111111111111,
0.125,
0.046511627906976744,
0,
0.15384615384615385,
0.15384615384615385,
0.02531645569620253,
0.05263157894736842,
0.1282051282051282,
0.18181818181818182,
0.03389830508474576,
0.06451612903225806,
0,
0.02702702702702703,
0.02531645569620253,
0.025974025974025976,
0.027777777777777776,
0.0625,
0.03225806451612903,
0.044444444444444446
] |
def __recv_cb(self, msg):
"""Calls user-provided callback and marks message for Ack regardless of success
"""
try:
self.__msg_callback(msg)
except:
logger.exception("AmqpLink.__recv_cb exception calling msg_callback")
finally:
# only works if all messages handled in series
self.__last_id = msg.delivery_tag
self.__unacked += 1 | [
"def",
"__recv_cb",
"(",
"self",
",",
"msg",
")",
":",
"try",
":",
"self",
".",
"__msg_callback",
"(",
"msg",
")",
"except",
":",
"logger",
".",
"exception",
"(",
"\"AmqpLink.__recv_cb exception calling msg_callback\"",
")",
"finally",
":",
"# only works if all messages handled in series",
"self",
".",
"__last_id",
"=",
"msg",
".",
"delivery_tag",
"self",
".",
"__unacked",
"+=",
"1"
] | 37.909091 | [
0.04,
0.034482758620689655,
0.18181818181818182,
0.16666666666666666,
0.05555555555555555,
0.2,
0.037037037037037035,
0.125,
0.034482758620689655,
0.044444444444444446,
0.06451612903225806
] |
def _eat_name_line(self, line):
"""Parses one line of data file"""
if line[0] not in "#=":
parts = line.split()
country_values = line[30:-1]
name = map_name(parts[1])
if not self.case_sensitive:
name = name.lower()
if parts[0] == "M":
self._set(name, u"male", country_values)
elif parts[0] == "1M" or parts[0] == "?M":
self._set(name, u"mostly_male", country_values)
elif parts[0] == "F":
self._set(name, u"female", country_values)
elif parts[0] == "1F" or parts[0] == "?F":
self._set(name, u"mostly_female", country_values)
elif parts[0] == "?":
self._set(name, self.unknown_value, country_values)
else:
raise "Not sure what to do with a sex of %s" % parts[0] | [
"def",
"_eat_name_line",
"(",
"self",
",",
"line",
")",
":",
"if",
"line",
"[",
"0",
"]",
"not",
"in",
"\"#=\"",
":",
"parts",
"=",
"line",
".",
"split",
"(",
")",
"country_values",
"=",
"line",
"[",
"30",
":",
"-",
"1",
"]",
"name",
"=",
"map_name",
"(",
"parts",
"[",
"1",
"]",
")",
"if",
"not",
"self",
".",
"case_sensitive",
":",
"name",
"=",
"name",
".",
"lower",
"(",
")",
"if",
"parts",
"[",
"0",
"]",
"==",
"\"M\"",
":",
"self",
".",
"_set",
"(",
"name",
",",
"u\"male\"",
",",
"country_values",
")",
"elif",
"parts",
"[",
"0",
"]",
"==",
"\"1M\"",
"or",
"parts",
"[",
"0",
"]",
"==",
"\"?M\"",
":",
"self",
".",
"_set",
"(",
"name",
",",
"u\"mostly_male\"",
",",
"country_values",
")",
"elif",
"parts",
"[",
"0",
"]",
"==",
"\"F\"",
":",
"self",
".",
"_set",
"(",
"name",
",",
"u\"female\"",
",",
"country_values",
")",
"elif",
"parts",
"[",
"0",
"]",
"==",
"\"1F\"",
"or",
"parts",
"[",
"0",
"]",
"==",
"\"?F\"",
":",
"self",
".",
"_set",
"(",
"name",
",",
"u\"mostly_female\"",
",",
"country_values",
")",
"elif",
"parts",
"[",
"0",
"]",
"==",
"\"?\"",
":",
"self",
".",
"_set",
"(",
"name",
",",
"self",
".",
"unknown_value",
",",
"country_values",
")",
"else",
":",
"raise",
"\"Not sure what to do with a sex of %s\"",
"%",
"parts",
"[",
"0",
"]"
] | 42.333333 | [
0.03225806451612903,
0.047619047619047616,
0.06451612903225806,
0.0625,
0.05,
0.05405405405405406,
0.05128205128205128,
0.05714285714285714,
0,
0.06451612903225806,
0.03571428571428571,
0.037037037037037035,
0.031746031746031744,
0.06060606060606061,
0.034482758620689655,
0.037037037037037035,
0.03076923076923077,
0.06060606060606061,
0.029850746268656716,
0.11764705882352941,
0.028169014084507043
] |
def getmessage(self) -> str:
""" parse self into unicode string as message content """
image = {}
for key, default in vars(self.__class__).items():
if not key.startswith('_') and key !='' and (not key in vars(QueueMessage).items()):
if isinstance(default, datetime.date):
image[key] = safe_cast(getattr(self, key, default), str, dformat=self._dateformat)
if isinstance(default, datetime.datetime):
image[key] = safe_cast(getattr(self, key, default), str, dformat=self._datetimeformat)
else:
image[key] = getattr(self, key, default)
return str(image) | [
"def",
"getmessage",
"(",
"self",
")",
"->",
"str",
":",
"image",
"=",
"{",
"}",
"for",
"key",
",",
"default",
"in",
"vars",
"(",
"self",
".",
"__class__",
")",
".",
"items",
"(",
")",
":",
"if",
"not",
"key",
".",
"startswith",
"(",
"'_'",
")",
"and",
"key",
"!=",
"''",
"and",
"(",
"not",
"key",
"in",
"vars",
"(",
"QueueMessage",
")",
".",
"items",
"(",
")",
")",
":",
"if",
"isinstance",
"(",
"default",
",",
"datetime",
".",
"date",
")",
":",
"image",
"[",
"key",
"]",
"=",
"safe_cast",
"(",
"getattr",
"(",
"self",
",",
"key",
",",
"default",
")",
",",
"str",
",",
"dformat",
"=",
"self",
".",
"_dateformat",
")",
"if",
"isinstance",
"(",
"default",
",",
"datetime",
".",
"datetime",
")",
":",
"image",
"[",
"key",
"]",
"=",
"safe_cast",
"(",
"getattr",
"(",
"self",
",",
"key",
",",
"default",
")",
",",
"str",
",",
"dformat",
"=",
"self",
".",
"_datetimeformat",
")",
"else",
":",
"image",
"[",
"key",
"]",
"=",
"getattr",
"(",
"self",
",",
"key",
",",
"default",
")",
"return",
"str",
"(",
"image",
")"
] | 63.416667 | [
0.03571428571428571,
0.04411764705882353,
0.1111111111111111,
0.03508771929824561,
0.04477611940298507,
0.037037037037037035,
0.029411764705882353,
0.034482758620689655,
0.02830188679245283,
0.09523809523809523,
0.03333333333333333,
0.08
] |
def filter(filter, data, sensitive=True, regexp=False):
"""
Return true if provided filter matches given dictionary of values
Filter supports disjunctive normal form with '|' used for OR, '&'
for AND and '-' for negation. Individual values are prefixed with
'value:', leading/trailing white-space is stripped. For example::
tag: Tier1 | tag: Tier2 | tag: Tier3
category: Sanity, Security & tag: -destructive
Note that multiple comma-separated values can be used as a syntactic
sugar to shorten the filter notation::
tag: A, B, C ---> tag: A | tag: B | tag: C
Values should be provided as a dictionary of lists each describing
the values against which the filter is to be matched. For example::
data = {tag: ["Tier1", "TIPpass"], category: ["Sanity"]}
Other types of dictionary values are converted into a string.
A FilterError exception is raised when a dimension parsed from the
filter is not found in the data dictionary. Set option 'sensitive'
to False to enable case-insensitive matching. If 'regexp' option is
True, regular expressions can be used in the filter values as well.
"""
def match_value(pattern, text):
""" Match value against data (simple or regexp) """
if regexp:
return re.match("^{0}$".format(pattern), text)
else:
return pattern == text
def check_value(dimension, value):
""" Check whether the value matches data """
# E.g. value = 'A, B' or value = "C" or value = "-D"
# If there are multiple values, at least one must match
for atom in re.split("\s*,\s*", value):
# Handle negative values (check the whole data for non-presence)
if atom.startswith("-"):
atom = atom[1:]
# Check each value for given dimension
for dato in data[dimension]:
if match_value(atom, dato):
break
# Pattern not found ---> good
else:
return True
# Handle positive values (return True upon first successful match)
else:
# Check each value for given dimension
for dato in data[dimension]:
if match_value(atom, dato):
# Pattern found ---> good
return True
# No value matched the data
return False
def check_dimension(dimension, values):
""" Check whether all values for given dimension match data """
# E.g. dimension = 'tag', values = ['A, B', 'C', '-D']
# Raise exception upon unknown dimension
if dimension not in data:
raise FilterError("Invalid filter '{0}'".format(dimension))
# Every value must match at least one value for data
return all([check_value(dimension, value) for value in values])
def check_clause(clause):
""" Split into literals and check whether all match """
# E.g. clause = 'tag: A, B & tag: C & tag: -D'
# Split into individual literals by dimension
literals = dict()
for literal in re.split("\s*&\s*", clause):
# E.g. literal = 'tag: A, B'
# Make sure the literal matches dimension:value format
matched = re.match("^(.*)\s*:\s*(.*)$", literal)
if not matched:
raise FilterError("Invalid filter '{0}'".format(literal))
dimension, value = matched.groups()
values = [value]
# Append the literal value(s) to corresponding dimension list
literals.setdefault(dimension, []).extend(values)
# For each dimension all literals must match given data
return all([check_dimension(dimension, values)
for dimension, values in literals.items()])
# Default to True if no filter given, bail out if weird data given
if filter is None or filter == "": return True
if not isinstance(data, dict):
raise FilterError("Invalid data type '{0}'".format(type(data)))
# Make sure that data dictionary contains lists of strings
data = copy.deepcopy(data)
try: # pragma: no cover
for key in data:
if isinstance(data[key], list):
data[key] = [unicode(item) for item in data[key]]
else:
data[key] = [unicode(data[key])]
except NameError: # pragma: no cover
for key in data:
if isinstance(data[key], list):
data[key] = [str(item) for item in data[key]]
else:
data[key] = [str(data[key])]
# Turn all data into lowercase if sensitivity is off
if not sensitive:
filter = filter.lower()
lowered = dict()
for key, values in data.items():
lowered[key.lower()] = [value.lower() for value in values]
data = lowered
# At least one clause must be true
return any([check_clause(clause)
for clause in re.split("\s*\|\s*", filter)]) | [
"def",
"filter",
"(",
"filter",
",",
"data",
",",
"sensitive",
"=",
"True",
",",
"regexp",
"=",
"False",
")",
":",
"def",
"match_value",
"(",
"pattern",
",",
"text",
")",
":",
"\"\"\" Match value against data (simple or regexp) \"\"\"",
"if",
"regexp",
":",
"return",
"re",
".",
"match",
"(",
"\"^{0}$\"",
".",
"format",
"(",
"pattern",
")",
",",
"text",
")",
"else",
":",
"return",
"pattern",
"==",
"text",
"def",
"check_value",
"(",
"dimension",
",",
"value",
")",
":",
"\"\"\" Check whether the value matches data \"\"\"",
"# E.g. value = 'A, B' or value = \"C\" or value = \"-D\"",
"# If there are multiple values, at least one must match",
"for",
"atom",
"in",
"re",
".",
"split",
"(",
"\"\\s*,\\s*\"",
",",
"value",
")",
":",
"# Handle negative values (check the whole data for non-presence)",
"if",
"atom",
".",
"startswith",
"(",
"\"-\"",
")",
":",
"atom",
"=",
"atom",
"[",
"1",
":",
"]",
"# Check each value for given dimension",
"for",
"dato",
"in",
"data",
"[",
"dimension",
"]",
":",
"if",
"match_value",
"(",
"atom",
",",
"dato",
")",
":",
"break",
"# Pattern not found ---> good",
"else",
":",
"return",
"True",
"# Handle positive values (return True upon first successful match)",
"else",
":",
"# Check each value for given dimension",
"for",
"dato",
"in",
"data",
"[",
"dimension",
"]",
":",
"if",
"match_value",
"(",
"atom",
",",
"dato",
")",
":",
"# Pattern found ---> good",
"return",
"True",
"# No value matched the data",
"return",
"False",
"def",
"check_dimension",
"(",
"dimension",
",",
"values",
")",
":",
"\"\"\" Check whether all values for given dimension match data \"\"\"",
"# E.g. dimension = 'tag', values = ['A, B', 'C', '-D']",
"# Raise exception upon unknown dimension",
"if",
"dimension",
"not",
"in",
"data",
":",
"raise",
"FilterError",
"(",
"\"Invalid filter '{0}'\"",
".",
"format",
"(",
"dimension",
")",
")",
"# Every value must match at least one value for data",
"return",
"all",
"(",
"[",
"check_value",
"(",
"dimension",
",",
"value",
")",
"for",
"value",
"in",
"values",
"]",
")",
"def",
"check_clause",
"(",
"clause",
")",
":",
"\"\"\" Split into literals and check whether all match \"\"\"",
"# E.g. clause = 'tag: A, B & tag: C & tag: -D'",
"# Split into individual literals by dimension",
"literals",
"=",
"dict",
"(",
")",
"for",
"literal",
"in",
"re",
".",
"split",
"(",
"\"\\s*&\\s*\"",
",",
"clause",
")",
":",
"# E.g. literal = 'tag: A, B'",
"# Make sure the literal matches dimension:value format",
"matched",
"=",
"re",
".",
"match",
"(",
"\"^(.*)\\s*:\\s*(.*)$\"",
",",
"literal",
")",
"if",
"not",
"matched",
":",
"raise",
"FilterError",
"(",
"\"Invalid filter '{0}'\"",
".",
"format",
"(",
"literal",
")",
")",
"dimension",
",",
"value",
"=",
"matched",
".",
"groups",
"(",
")",
"values",
"=",
"[",
"value",
"]",
"# Append the literal value(s) to corresponding dimension list",
"literals",
".",
"setdefault",
"(",
"dimension",
",",
"[",
"]",
")",
".",
"extend",
"(",
"values",
")",
"# For each dimension all literals must match given data",
"return",
"all",
"(",
"[",
"check_dimension",
"(",
"dimension",
",",
"values",
")",
"for",
"dimension",
",",
"values",
"in",
"literals",
".",
"items",
"(",
")",
"]",
")",
"# Default to True if no filter given, bail out if weird data given",
"if",
"filter",
"is",
"None",
"or",
"filter",
"==",
"\"\"",
":",
"return",
"True",
"if",
"not",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"raise",
"FilterError",
"(",
"\"Invalid data type '{0}'\"",
".",
"format",
"(",
"type",
"(",
"data",
")",
")",
")",
"# Make sure that data dictionary contains lists of strings",
"data",
"=",
"copy",
".",
"deepcopy",
"(",
"data",
")",
"try",
":",
"# pragma: no cover",
"for",
"key",
"in",
"data",
":",
"if",
"isinstance",
"(",
"data",
"[",
"key",
"]",
",",
"list",
")",
":",
"data",
"[",
"key",
"]",
"=",
"[",
"unicode",
"(",
"item",
")",
"for",
"item",
"in",
"data",
"[",
"key",
"]",
"]",
"else",
":",
"data",
"[",
"key",
"]",
"=",
"[",
"unicode",
"(",
"data",
"[",
"key",
"]",
")",
"]",
"except",
"NameError",
":",
"# pragma: no cover",
"for",
"key",
"in",
"data",
":",
"if",
"isinstance",
"(",
"data",
"[",
"key",
"]",
",",
"list",
")",
":",
"data",
"[",
"key",
"]",
"=",
"[",
"str",
"(",
"item",
")",
"for",
"item",
"in",
"data",
"[",
"key",
"]",
"]",
"else",
":",
"data",
"[",
"key",
"]",
"=",
"[",
"str",
"(",
"data",
"[",
"key",
"]",
")",
"]",
"# Turn all data into lowercase if sensitivity is off",
"if",
"not",
"sensitive",
":",
"filter",
"=",
"filter",
".",
"lower",
"(",
")",
"lowered",
"=",
"dict",
"(",
")",
"for",
"key",
",",
"values",
"in",
"data",
".",
"items",
"(",
")",
":",
"lowered",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"[",
"value",
".",
"lower",
"(",
")",
"for",
"value",
"in",
"values",
"]",
"data",
"=",
"lowered",
"# At least one clause must be true",
"return",
"any",
"(",
"[",
"check_clause",
"(",
"clause",
")",
"for",
"clause",
"in",
"re",
".",
"split",
"(",
"\"\\s*\\|\\s*\"",
",",
"filter",
")",
"]",
")"
] | 42.152542 | [
0.01818181818181818,
0.2857142857142857,
0.028985507246376812,
0,
0.028985507246376812,
0.028985507246376812,
0.043478260869565216,
0,
0.045454545454545456,
0.037037037037037035,
0,
0.027777777777777776,
0.07142857142857142,
0,
0.08,
0,
0.02857142857142857,
0.04225352112676056,
0,
0.03125,
0,
0.03076923076923077,
0.02857142857142857,
0.02857142857142857,
0.028169014084507043,
0.028169014084507043,
0.2857142857142857,
0,
0.05714285714285714,
0.03389830508474576,
0.1111111111111111,
0.034482758620689655,
0.15384615384615385,
0.058823529411764705,
0,
0.05263157894736842,
0.038461538461538464,
0.03333333333333333,
0.031746031746031744,
0.0851063829787234,
0.02631578947368421,
0.05555555555555555,
0.06451612903225806,
0.037037037037037035,
0.045454545454545456,
0.0425531914893617,
0.06896551724137931,
0.044444444444444446,
0.09523809523809523,
0.06451612903225806,
0.02564102564102564,
0.11764705882352941,
0.037037037037037035,
0.045454545454545456,
0.0425531914893617,
0.04081632653061224,
0.05714285714285714,
0.05714285714285714,
0.1,
0,
0.046511627906976744,
0.028169014084507043,
0.03225806451612903,
0.041666666666666664,
0.06060606060606061,
0.028169014084507043,
0.03333333333333333,
0.028169014084507043,
0,
0.06896551724137931,
0.031746031746031744,
0.037037037037037035,
0.03773584905660377,
0.08,
0.0784313725490196,
0.05,
0.030303030303030304,
0.06666666666666667,
0.07407407407407407,
0.0273972602739726,
0.0425531914893617,
0.07142857142857142,
0.0273972602739726,
0.03278688524590164,
0.031746031746031744,
0.05555555555555555,
0.05454545454545454,
0,
0.02857142857142857,
0.06,
0.058823529411764705,
0.028169014084507043,
0,
0.03225806451612903,
0.06666666666666667,
0.1111111111111111,
0.08333333333333333,
0.046511627906976744,
0.03076923076923077,
0.11764705882352941,
0.041666666666666664,
0.075,
0.08333333333333333,
0.046511627906976744,
0.03278688524590164,
0.11764705882352941,
0.045454545454545456,
0.03571428571428571,
0.09523809523809523,
0.06451612903225806,
0.08333333333333333,
0.05,
0.02857142857142857,
0.09090909090909091,
0,
0.05263157894736842,
0.08333333333333333,
0.10714285714285714
] |
def cmpname(name1, name2):
"""
Compare two CIM names for equality and ordering.
The comparison is performed case-insensitively.
One or both of the items may be `None`, and `None` is considered the lowest
possible value.
The implementation delegates to the '==' and '<' operators of the
name datatypes.
If name1 == name2, 0 is returned.
If name1 < name2, -1 is returned.
Otherwise, +1 is returned.
"""
if name1 is None and name2 is None:
return 0
if name1 is None:
return -1
if name2 is None:
return 1
lower_name1 = name1.lower()
lower_name2 = name2.lower()
if lower_name1 == lower_name2:
return 0
return -1 if lower_name1 < lower_name2 else 1 | [
"def",
"cmpname",
"(",
"name1",
",",
"name2",
")",
":",
"if",
"name1",
"is",
"None",
"and",
"name2",
"is",
"None",
":",
"return",
"0",
"if",
"name1",
"is",
"None",
":",
"return",
"-",
"1",
"if",
"name2",
"is",
"None",
":",
"return",
"1",
"lower_name1",
"=",
"name1",
".",
"lower",
"(",
")",
"lower_name2",
"=",
"name2",
".",
"lower",
"(",
")",
"if",
"lower_name1",
"==",
"lower_name2",
":",
"return",
"0",
"return",
"-",
"1",
"if",
"lower_name1",
"<",
"lower_name2",
"else",
"1"
] | 26.814815 | [
0.038461538461538464,
0.2857142857142857,
0.038461538461538464,
0,
0.0392156862745098,
0,
0.0379746835443038,
0.10526315789473684,
0,
0.028985507246376812,
0.10526315789473684,
0,
0.05405405405405406,
0.05405405405405406,
0.06666666666666667,
0.2857142857142857,
0.05128205128205128,
0.125,
0.09523809523809523,
0.11764705882352941,
0.09523809523809523,
0.125,
0.06451612903225806,
0.06451612903225806,
0.058823529411764705,
0.125,
0.04081632653061224
] |
def get(remote_path,
local_path='',
recursive=False,
preserve_times=False,
**kwargs):
'''
Transfer files and directories from remote host to the localhost of the
Minion.
remote_path
Path to retrieve from remote host. Since this is evaluated by scp on the
remote host, shell wildcards and environment variables may be used.
recursive: ``False``
Transfer files and directories recursively.
preserve_times: ``False``
Preserve ``mtime`` and ``atime`` of transferred files and directories.
hostname
The hostname of the remote device.
port: ``22``
The port of the remote device.
username
The username required for SSH authentication on the device.
password
Used for password authentication. It is also used for private key
decryption if ``passphrase`` is not given.
passphrase
Used for decrypting private keys.
pkey
An optional private key to use for authentication.
key_filename
The filename, or list of filenames, of optional private key(s) and/or
certificates to try for authentication.
timeout
An optional timeout (in seconds) for the TCP connect.
socket_timeout: ``10``
The channel socket timeout in seconds.
buff_size: ``16384``
The size of the SCP send buffer.
allow_agent: ``True``
Set to ``False`` to disable connecting to the SSH agent.
look_for_keys: ``True``
Set to ``False`` to disable searching for discoverable private key
files in ``~/.ssh/``
banner_timeout
An optional timeout (in seconds) to wait for the SSH banner to be
presented.
auth_timeout
An optional timeout (in seconds) to wait for an authentication
response.
auto_add_policy: ``False``
Automatically add the host to the ``known_hosts``.
CLI Example:
.. code-block:: bash
salt '*' scp.get /var/tmp/file /tmp/file hostname=10.10.10.1 auto_add_policy=True
'''
scp_client = _prepare_connection(**kwargs)
get_kwargs = {
'recursive': recursive,
'preserve_times': preserve_times
}
if local_path:
get_kwargs['local_path'] = local_path
return scp_client.get(remote_path, **get_kwargs) | [
"def",
"get",
"(",
"remote_path",
",",
"local_path",
"=",
"''",
",",
"recursive",
"=",
"False",
",",
"preserve_times",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"scp_client",
"=",
"_prepare_connection",
"(",
"*",
"*",
"kwargs",
")",
"get_kwargs",
"=",
"{",
"'recursive'",
":",
"recursive",
",",
"'preserve_times'",
":",
"preserve_times",
"}",
"if",
"local_path",
":",
"get_kwargs",
"[",
"'local_path'",
"]",
"=",
"local_path",
"return",
"scp_client",
".",
"get",
"(",
"remote_path",
",",
"*",
"*",
"get_kwargs",
")"
] | 27.204819 | [
0.1,
0.13636363636363635,
0.125,
0.10344827586206896,
0.16666666666666666,
0.2857142857142857,
0.02666666666666667,
0.18181818181818182,
0,
0.13333333333333333,
0.0375,
0.02666666666666667,
0,
0.125,
0.0392156862745098,
0,
0.10344827586206896,
0.038461538461538464,
0,
0.16666666666666666,
0.047619047619047616,
0,
0.1875,
0.05263157894736842,
0,
0.16666666666666666,
0.029850746268656716,
0,
0.16666666666666666,
0.0273972602739726,
0.06,
0,
0.14285714285714285,
0.04878048780487805,
0,
0.25,
0.034482758620689655,
0,
0.125,
0.05194805194805195,
0.0425531914893617,
0,
0.18181818181818182,
0.06557377049180328,
0,
0.11538461538461539,
0.043478260869565216,
0,
0.125,
0.05,
0,
0.12,
0.046875,
0,
0.1111111111111111,
0.04054054054054054,
0.10714285714285714,
0,
0.1111111111111111,
0.0547945205479452,
0.1111111111111111,
0,
0.125,
0.05714285714285714,
0.11764705882352941,
0,
0.1,
0.05172413793103448,
0,
0.125,
0,
0.125,
0,
0.07865168539325842,
0.2857142857142857,
0.043478260869565216,
0.16666666666666666,
0.06451612903225806,
0.05,
0.6,
0.1111111111111111,
0.044444444444444446,
0.038461538461538464
] |
def get_record(self, msg_id):
"""Get a specific Task Record, by msg_id."""
r = self._records.find_one({'msg_id': msg_id})
if not r:
# r will be '' if nothing is found
raise KeyError(msg_id)
return r | [
"def",
"get_record",
"(",
"self",
",",
"msg_id",
")",
":",
"r",
"=",
"self",
".",
"_records",
".",
"find_one",
"(",
"{",
"'msg_id'",
":",
"msg_id",
"}",
")",
"if",
"not",
"r",
":",
"# r will be '' if nothing is found",
"raise",
"KeyError",
"(",
"msg_id",
")",
"return",
"r"
] | 35.428571 | [
0.034482758620689655,
0.038461538461538464,
0.037037037037037035,
0.11764705882352941,
0.043478260869565216,
0.058823529411764705,
0.125
] |
def get_mysql_credentials(cfg_file):
"""Get the credentials and database name from options in config file."""
try:
parser = ConfigParser.ConfigParser()
cfg_fp = open(cfg_file)
parser.readfp(cfg_fp)
cfg_fp.close()
except ConfigParser.NoOptionError:
cfg_fp.close()
print('Failed to find mysql connections credentials.')
sys.exit(1)
except IOError:
print('ERROR: Cannot open %s.', cfg_file)
sys.exit(1)
value = parser.get('dfa_mysql', 'connection')
try:
# Find location of pattern in connection parameter as shown below:
# http://username:password@host/databasename?characterset=encoding'
sobj = re.search(r"(://).*(@).*(/).*(\?)", value)
# The list parameter contains:
# indices[0], is the index of '://'
# indices[1], is the index of '@'
# indices[2], is the index of '/'
# indices[3], is the index of '?'
indices = [sobj.start(1), sobj.start(2), sobj.start(3), sobj.start(4)]
# Get the credentials
cred = value[indices[0] + 3:indices[1]].split(':')
# Get the host name
host = value[indices[1] + 1:indices[2]]
# Get the database name
db_name = value[indices[2] + 1:indices[3]]
# Get the character encoding
charset = value[indices[3] + 1:].split('=')[1]
return cred[0], cred[1], host, db_name, charset
except (ValueError, IndexError, AttributeError):
print('Failed to find mysql connections credentials.')
sys.exit(1) | [
"def",
"get_mysql_credentials",
"(",
"cfg_file",
")",
":",
"try",
":",
"parser",
"=",
"ConfigParser",
".",
"ConfigParser",
"(",
")",
"cfg_fp",
"=",
"open",
"(",
"cfg_file",
")",
"parser",
".",
"readfp",
"(",
"cfg_fp",
")",
"cfg_fp",
".",
"close",
"(",
")",
"except",
"ConfigParser",
".",
"NoOptionError",
":",
"cfg_fp",
".",
"close",
"(",
")",
"print",
"(",
"'Failed to find mysql connections credentials.'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"except",
"IOError",
":",
"print",
"(",
"'ERROR: Cannot open %s.'",
",",
"cfg_file",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"value",
"=",
"parser",
".",
"get",
"(",
"'dfa_mysql'",
",",
"'connection'",
")",
"try",
":",
"# Find location of pattern in connection parameter as shown below:",
"# http://username:password@host/databasename?characterset=encoding'",
"sobj",
"=",
"re",
".",
"search",
"(",
"r\"(://).*(@).*(/).*(\\?)\"",
",",
"value",
")",
"# The list parameter contains:",
"# indices[0], is the index of '://'",
"# indices[1], is the index of '@'",
"# indices[2], is the index of '/'",
"# indices[3], is the index of '?'",
"indices",
"=",
"[",
"sobj",
".",
"start",
"(",
"1",
")",
",",
"sobj",
".",
"start",
"(",
"2",
")",
",",
"sobj",
".",
"start",
"(",
"3",
")",
",",
"sobj",
".",
"start",
"(",
"4",
")",
"]",
"# Get the credentials",
"cred",
"=",
"value",
"[",
"indices",
"[",
"0",
"]",
"+",
"3",
":",
"indices",
"[",
"1",
"]",
"]",
".",
"split",
"(",
"':'",
")",
"# Get the host name",
"host",
"=",
"value",
"[",
"indices",
"[",
"1",
"]",
"+",
"1",
":",
"indices",
"[",
"2",
"]",
"]",
"# Get the database name",
"db_name",
"=",
"value",
"[",
"indices",
"[",
"2",
"]",
"+",
"1",
":",
"indices",
"[",
"3",
"]",
"]",
"# Get the character encoding",
"charset",
"=",
"value",
"[",
"indices",
"[",
"3",
"]",
"+",
"1",
":",
"]",
".",
"split",
"(",
"'='",
")",
"[",
"1",
"]",
"return",
"cred",
"[",
"0",
"]",
",",
"cred",
"[",
"1",
"]",
",",
"host",
",",
"db_name",
",",
"charset",
"except",
"(",
"ValueError",
",",
"IndexError",
",",
"AttributeError",
")",
":",
"print",
"(",
"'Failed to find mysql connections credentials.'",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | 33.456522 | [
0.027777777777777776,
0.02631578947368421,
0,
0.25,
0.045454545454545456,
0.06451612903225806,
0.06896551724137931,
0.09090909090909091,
0.05263157894736842,
0.09090909090909091,
0.03225806451612903,
0.10526315789473684,
0.10526315789473684,
0.04081632653061224,
0.10526315789473684,
0,
0.04081632653061224,
0,
0.25,
0.02702702702702703,
0.02666666666666667,
0.03508771929824561,
0,
0.05263157894736842,
0.046511627906976744,
0.04878048780487805,
0.04878048780487805,
0.04878048780487805,
0.02564102564102564,
0,
0.06896551724137931,
0.034482758620689655,
0,
0.07407407407407407,
0.0425531914893617,
0,
0.06451612903225806,
0.04,
0,
0.05555555555555555,
0.037037037037037035,
0,
0.03636363636363636,
0.038461538461538464,
0.03225806451612903,
0.10526315789473684
] |
def _next_header_line(self):
"""
Non-destructive buffer processor returning all lines (defined
by self.EOL_TOKEN) in self._buffer. If end of buffer is reached,
None is yielded and it is expected that more bytes will be
appended to the buffer by the caller.
Upon finding an empty header, this method trims the buffer to
the start of the body
"""
eol = self.EOL_TOKEN
eol_length = len(eol)
start = 0
end = self._buffer.find(eol)
# if start == end, found empty header - stop iterating
while start != end:
# end of line was found
if end != -1:
yield self._buffer[start:end]
start = end + eol_length
# end of line was not found - request more buffer data
else:
yield None
# find next end of line
end = self._buffer.find(eol, start)
# trim buffer
del self._buffer[:end + eol_length] | [
"def",
"_next_header_line",
"(",
"self",
")",
":",
"eol",
"=",
"self",
".",
"EOL_TOKEN",
"eol_length",
"=",
"len",
"(",
"eol",
")",
"start",
"=",
"0",
"end",
"=",
"self",
".",
"_buffer",
".",
"find",
"(",
"eol",
")",
"# if start == end, found empty header - stop iterating",
"while",
"start",
"!=",
"end",
":",
"# end of line was found",
"if",
"end",
"!=",
"-",
"1",
":",
"yield",
"self",
".",
"_buffer",
"[",
"start",
":",
"end",
"]",
"start",
"=",
"end",
"+",
"eol_length",
"# end of line was not found - request more buffer data",
"else",
":",
"yield",
"None",
"# find next end of line",
"end",
"=",
"self",
".",
"_buffer",
".",
"find",
"(",
"eol",
",",
"start",
")",
"# trim buffer",
"del",
"self",
".",
"_buffer",
"[",
":",
"end",
"+",
"eol_length",
"]"
] | 32.225806 | [
0.03571428571428571,
0.18181818181818182,
0.057971014492753624,
0.041666666666666664,
0.030303030303030304,
0.044444444444444446,
0,
0.028985507246376812,
0.06896551724137931,
0.18181818181818182,
0.07142857142857142,
0.06896551724137931,
0.11764705882352941,
0.05555555555555555,
0,
0.03225806451612903,
0.07407407407407407,
0,
0.05714285714285714,
0.08,
0.044444444444444446,
0.05,
0.030303030303030304,
0.11764705882352941,
0.07692307692307693,
0,
0.05714285714285714,
0.0425531914893617,
0,
0.09523809523809523,
0.046511627906976744
] |
def depth(args):
"""
%prog depth reads.bed features.bed
Calculate depth depth per feature using coverageBed.
"""
p = OptionParser(depth.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
readsbed, featsbed = args
fp = open(featsbed)
nargs = len(fp.readline().split("\t"))
keepcols = ",".join(str(x) for x in range(1, nargs + 1))
cmd = "coverageBed -a {0} -b {1} -d".format(readsbed, featsbed)
cmd += " | groupBy -g {0} -c {1} -o mean".format(keepcols, nargs + 2)
sh(cmd, outfile=opts.outfile) | [
"def",
"depth",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"depth",
".",
"__doc__",
")",
"p",
".",
"set_outfile",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"readsbed",
",",
"featsbed",
"=",
"args",
"fp",
"=",
"open",
"(",
"featsbed",
")",
"nargs",
"=",
"len",
"(",
"fp",
".",
"readline",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
")",
"keepcols",
"=",
"\",\"",
".",
"join",
"(",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"range",
"(",
"1",
",",
"nargs",
"+",
"1",
")",
")",
"cmd",
"=",
"\"coverageBed -a {0} -b {1} -d\"",
".",
"format",
"(",
"readsbed",
",",
"featsbed",
")",
"cmd",
"+=",
"\" | groupBy -g {0} -c {1} -o mean\"",
".",
"format",
"(",
"keepcols",
",",
"nargs",
"+",
"2",
")",
"sh",
"(",
"cmd",
",",
"outfile",
"=",
"opts",
".",
"outfile",
")"
] | 29.9 | [
0.0625,
0.2857142857142857,
0.05263157894736842,
0,
0.03571428571428571,
0.2857142857142857,
0.05714285714285714,
0.10526315789473684,
0.05714285714285714,
0,
0.09090909090909091,
0.05555555555555555,
0,
0.06896551724137931,
0.08695652173913043,
0.047619047619047616,
0.03333333333333333,
0.029850746268656716,
0.0273972602739726,
0.06060606060606061
] |
def gen_ca_cert(filename, dirname, days, silent=False):
"""
generate a CA key and certificate key pair.
:param filename: prefix for the key and cert file
:param dirname: name of the directory
:param days: days of the certificate being valid
:param silent: whether to suppress output
"""
keyfile = os.path.join(dirname, '{}.key'.format(filename))
ca_crt = os.path.join(dirname, '{}.crt'.format(filename))
gen_private_key(keyfile, silent)
gen_self_signed_cert(ca_crt, keyfile, days, silent) | [
"def",
"gen_ca_cert",
"(",
"filename",
",",
"dirname",
",",
"days",
",",
"silent",
"=",
"False",
")",
":",
"keyfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"'{}.key'",
".",
"format",
"(",
"filename",
")",
")",
"ca_crt",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"'{}.crt'",
".",
"format",
"(",
"filename",
")",
")",
"gen_private_key",
"(",
"keyfile",
",",
"silent",
")",
"gen_self_signed_cert",
"(",
"ca_crt",
",",
"keyfile",
",",
"days",
",",
"silent",
")"
] | 43.416667 | [
0.01818181818181818,
0.2857142857142857,
0.0425531914893617,
0.05660377358490566,
0.07317073170731707,
0.057692307692307696,
0.06666666666666667,
0.2857142857142857,
0.03225806451612903,
0.03278688524590164,
0.05555555555555555,
0.03636363636363636
] |
def fix_e224(self, result):
"""Remove extraneous whitespace around operator."""
target = self.source[result['line'] - 1]
offset = result['column'] - 1
fixed = target[:offset] + target[offset:].replace('\t', ' ')
self.source[result['line'] - 1] = fixed | [
"def",
"fix_e224",
"(",
"self",
",",
"result",
")",
":",
"target",
"=",
"self",
".",
"source",
"[",
"result",
"[",
"'line'",
"]",
"-",
"1",
"]",
"offset",
"=",
"result",
"[",
"'column'",
"]",
"-",
"1",
"fixed",
"=",
"target",
"[",
":",
"offset",
"]",
"+",
"target",
"[",
"offset",
":",
"]",
".",
"replace",
"(",
"'\\t'",
",",
"' '",
")",
"self",
".",
"source",
"[",
"result",
"[",
"'line'",
"]",
"-",
"1",
"]",
"=",
"fixed"
] | 47.666667 | [
0.037037037037037035,
0.03389830508474576,
0.041666666666666664,
0.05405405405405406,
0.029411764705882353,
0.0425531914893617
] |
def area4info():
"""
Get some info about the package.
:return: Package info
:rtype: str
"""
# Info variables:
name = "area4"
author = "https://github.com/RDIL"
author_email = rdillib.get_email()
description = "Dividers in Python, the easy way!"
return "{0}: {1}\n{2}: {3}\n{4}: {5}\n{6}: {7}".format(
"Name:", name,
"Author:", author,
"Author Email:", author_email,
"Description:", description
) | [
"def",
"area4info",
"(",
")",
":",
"# Info variables:",
"name",
"=",
"\"area4\"",
"author",
"=",
"\"https://github.com/RDIL\"",
"author_email",
"=",
"rdillib",
".",
"get_email",
"(",
")",
"description",
"=",
"\"Dividers in Python, the easy way!\"",
"return",
"\"{0}: {1}\\n{2}: {3}\\n{4}: {5}\\n{6}: {7}\"",
".",
"format",
"(",
"\"Name:\"",
",",
"name",
",",
"\"Author:\"",
",",
"author",
",",
"\"Author Email:\"",
",",
"author_email",
",",
"\"Description:\"",
",",
"description",
")"
] | 25.5 | [
0.0625,
0.2857142857142857,
0.05555555555555555,
0,
0.12,
0.2,
0.2857142857142857,
0.09523809523809523,
0.1111111111111111,
0.05263157894736842,
0.05263157894736842,
0.03773584905660377,
0.05084745762711865,
0.09090909090909091,
0.07692307692307693,
0.05263157894736842,
0.05714285714285714,
0.6
] |
def _build(lhs, rhs):
"""Stores the wire assignment details until finalize is called."""
_check_under_condition()
final_predicate, pred_set = _current_select()
_check_and_add_pred_set(lhs, pred_set)
_predicate_map.setdefault(lhs, []).append((final_predicate, rhs)) | [
"def",
"_build",
"(",
"lhs",
",",
"rhs",
")",
":",
"_check_under_condition",
"(",
")",
"final_predicate",
",",
"pred_set",
"=",
"_current_select",
"(",
")",
"_check_and_add_pred_set",
"(",
"lhs",
",",
"pred_set",
")",
"_predicate_map",
".",
"setdefault",
"(",
"lhs",
",",
"[",
"]",
")",
".",
"append",
"(",
"(",
"final_predicate",
",",
"rhs",
")",
")"
] | 46.5 | [
0.047619047619047616,
0.02857142857142857,
0.07142857142857142,
0.04081632653061224,
0.047619047619047616,
0.028985507246376812
] |
def _set_config(c):
"""Set gl configuration"""
gl_attribs = [glcanvas.WX_GL_RGBA,
glcanvas.WX_GL_DEPTH_SIZE, c['depth_size'],
glcanvas.WX_GL_STENCIL_SIZE, c['stencil_size'],
glcanvas.WX_GL_MIN_RED, c['red_size'],
glcanvas.WX_GL_MIN_GREEN, c['green_size'],
glcanvas.WX_GL_MIN_BLUE, c['blue_size'],
glcanvas.WX_GL_MIN_ALPHA, c['alpha_size']]
gl_attribs += [glcanvas.WX_GL_DOUBLEBUFFER] if c['double_buffer'] else []
gl_attribs += [glcanvas.WX_GL_STEREO] if c['stereo'] else []
return gl_attribs | [
"def",
"_set_config",
"(",
"c",
")",
":",
"gl_attribs",
"=",
"[",
"glcanvas",
".",
"WX_GL_RGBA",
",",
"glcanvas",
".",
"WX_GL_DEPTH_SIZE",
",",
"c",
"[",
"'depth_size'",
"]",
",",
"glcanvas",
".",
"WX_GL_STENCIL_SIZE",
",",
"c",
"[",
"'stencil_size'",
"]",
",",
"glcanvas",
".",
"WX_GL_MIN_RED",
",",
"c",
"[",
"'red_size'",
"]",
",",
"glcanvas",
".",
"WX_GL_MIN_GREEN",
",",
"c",
"[",
"'green_size'",
"]",
",",
"glcanvas",
".",
"WX_GL_MIN_BLUE",
",",
"c",
"[",
"'blue_size'",
"]",
",",
"glcanvas",
".",
"WX_GL_MIN_ALPHA",
",",
"c",
"[",
"'alpha_size'",
"]",
"]",
"gl_attribs",
"+=",
"[",
"glcanvas",
".",
"WX_GL_DOUBLEBUFFER",
"]",
"if",
"c",
"[",
"'double_buffer'",
"]",
"else",
"[",
"]",
"gl_attribs",
"+=",
"[",
"glcanvas",
".",
"WX_GL_STEREO",
"]",
"if",
"c",
"[",
"'stereo'",
"]",
"else",
"[",
"]",
"return",
"gl_attribs"
] | 50.75 | [
0.05263157894736842,
0.06666666666666667,
0.07894736842105263,
0.04918032786885246,
0.046153846153846156,
0.05357142857142857,
0.05,
0.05172413793103448,
0.06666666666666667,
0.025974025974025976,
0.03125,
0.09523809523809523
] |
def _get_cfg_node(cfg, state):
"""
Get the CFGNode object on the control flow graph given an angr state.
:param angr.analyses.CFGEmulated cfg: An instance of CFGEmulated.
:param angr.SimState state: The current state.
:return: A CFGNode instance if the node exists, or None if the node cannot be found.
:rtype: CFGNode or None
"""
call_stack_suffix = state.callstack.stack_suffix(cfg.context_sensitivity_level)
is_syscall = state.history.jumpkind is not None and state.history.jumpkind.startswith('Ijk_Sys')
block_id = cfg._generate_block_id(call_stack_suffix, state.addr, is_syscall)
return cfg.get_node(block_id) | [
"def",
"_get_cfg_node",
"(",
"cfg",
",",
"state",
")",
":",
"call_stack_suffix",
"=",
"state",
".",
"callstack",
".",
"stack_suffix",
"(",
"cfg",
".",
"context_sensitivity_level",
")",
"is_syscall",
"=",
"state",
".",
"history",
".",
"jumpkind",
"is",
"not",
"None",
"and",
"state",
".",
"history",
".",
"jumpkind",
".",
"startswith",
"(",
"'Ijk_Sys'",
")",
"block_id",
"=",
"cfg",
".",
"_generate_block_id",
"(",
"call_stack_suffix",
",",
"state",
".",
"addr",
",",
"is_syscall",
")",
"return",
"cfg",
".",
"get_node",
"(",
"block_id",
")"
] | 44.0625 | [
0.03333333333333333,
0.18181818181818182,
0.025974025974025976,
0,
0.04,
0.045454545454545456,
0.043478260869565216,
0.0967741935483871,
0.18181818181818182,
0,
0.034482758620689655,
0.028846153846153848,
0,
0.03571428571428571,
0,
0.05405405405405406
] |
def _resample_nnresample2(s, up, down, beta=5.0, L=16001, axis=0):
# type: (np.ndarray, float, float, float, int, int) -> np.ndarray
"""
Taken from https://github.com/jthiem/nnresample
Resample a signal from rate "down" to rate "up"
Parameters
----------
x : array_like
The data to be resampled.
up : int
The upsampling factor.
down : int
The downsampling factor.
beta : float
Beta factor for Kaiser window. Determines tradeoff between
stopband attenuation and transition band width
L : int
FIR filter order. Determines stopband attenuation. The higher
the better, ath the cost of complexity.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
Returns
-------
resampled_x : array
The resampled array.
Notes
-----
The function keeps a global cache of filters, since they are
determined entirely by up, down, beta, and L. If a filter
has previously been used it is looked up instead of being
recomputed.
"""
# check if a resampling filter with the chosen parameters already exists
params = (up, down, beta, L)
if params in _precomputed_filters.keys():
# if so, use it.
filt = _precomputed_filters[params]
else:
# if not, generate filter, store it, use it
filt = _nnresample_compute_filt(up, down, beta, L)
_precomputed_filters[params] = filt
return sig.resample_poly(s, up, down, window=np.array(filt), axis=axis) | [
"def",
"_resample_nnresample2",
"(",
"s",
",",
"up",
",",
"down",
",",
"beta",
"=",
"5.0",
",",
"L",
"=",
"16001",
",",
"axis",
"=",
"0",
")",
":",
"# type: (np.ndarray, float, float, float, int, int) -> np.ndarray",
"# check if a resampling filter with the chosen parameters already exists",
"params",
"=",
"(",
"up",
",",
"down",
",",
"beta",
",",
"L",
")",
"if",
"params",
"in",
"_precomputed_filters",
".",
"keys",
"(",
")",
":",
"# if so, use it.",
"filt",
"=",
"_precomputed_filters",
"[",
"params",
"]",
"else",
":",
"# if not, generate filter, store it, use it",
"filt",
"=",
"_nnresample_compute_filt",
"(",
"up",
",",
"down",
",",
"beta",
",",
"L",
")",
"_precomputed_filters",
"[",
"params",
"]",
"=",
"filt",
"return",
"sig",
".",
"resample_poly",
"(",
"s",
",",
"up",
",",
"down",
",",
"window",
"=",
"np",
".",
"array",
"(",
"filt",
")",
",",
"axis",
"=",
"axis",
")"
] | 33.478261 | [
0.015151515151515152,
0.028985507246376812,
0.2857142857142857,
0.058823529411764705,
0,
0.0392156862745098,
0.5,
0.14285714285714285,
0.14285714285714285,
0.16666666666666666,
0.06060606060606061,
0.25,
0.06666666666666667,
0.21428571428571427,
0.0625,
0.1875,
0.029850746268656716,
0.037037037037037035,
0.2727272727272727,
0.028169014084507043,
0.0425531914893617,
0.125,
0.05357142857142857,
0.25,
0.18181818181818182,
0.18181818181818182,
0.13043478260869565,
0.07142857142857142,
0.25,
0.2222222222222222,
0.2222222222222222,
0.03125,
0.03225806451612903,
0.03278688524590164,
0.13333333333333333,
0.21428571428571427,
0.02631578947368421,
0.0625,
0.044444444444444446,
0.08333333333333333,
0.046511627906976744,
0.2222222222222222,
0.0392156862745098,
0.034482758620689655,
0.046511627906976744,
0.02666666666666667
] |
def on_window_losefocus(self, window, event):
"""Hides terminal main window when it loses the focus and if
the window_losefocus gconf variable is True.
"""
if not HidePrevention(self.window).may_hide():
return
value = self.settings.general.get_boolean('window-losefocus')
visible = window.get_property('visible')
self.losefocus_time = get_server_time(self.window)
if visible and value:
log.info("Hiding on focus lose")
self.hide() | [
"def",
"on_window_losefocus",
"(",
"self",
",",
"window",
",",
"event",
")",
":",
"if",
"not",
"HidePrevention",
"(",
"self",
".",
"window",
")",
".",
"may_hide",
"(",
")",
":",
"return",
"value",
"=",
"self",
".",
"settings",
".",
"general",
".",
"get_boolean",
"(",
"'window-losefocus'",
")",
"visible",
"=",
"window",
".",
"get_property",
"(",
"'visible'",
")",
"self",
".",
"losefocus_time",
"=",
"get_server_time",
"(",
"self",
".",
"window",
")",
"if",
"visible",
"and",
"value",
":",
"log",
".",
"info",
"(",
"\"Hiding on focus lose\"",
")",
"self",
".",
"hide",
"(",
")"
] | 39.923077 | [
0.022222222222222223,
0.029411764705882353,
0.038461538461538464,
0.18181818181818182,
0.037037037037037035,
0.1111111111111111,
0,
0.028985507246376812,
0.041666666666666664,
0.034482758620689655,
0.06896551724137931,
0.045454545454545456,
0.08695652173913043
] |
def get_content(self, obj):
"""
Obtain the QuerySet of content items.
:param obj: Page object.
:return: List of rendered content items.
"""
serializer = ContentSerializer(
instance=obj.contentitem_set.all(),
many=True,
context=self.context,
)
return serializer.data | [
"def",
"get_content",
"(",
"self",
",",
"obj",
")",
":",
"serializer",
"=",
"ContentSerializer",
"(",
"instance",
"=",
"obj",
".",
"contentitem_set",
".",
"all",
"(",
")",
",",
"many",
"=",
"True",
",",
"context",
"=",
"self",
".",
"context",
",",
")",
"return",
"serializer",
".",
"data"
] | 29.5 | [
0.037037037037037035,
0.18181818181818182,
0.044444444444444446,
0.09375,
0.0625,
0.18181818181818182,
0.07692307692307693,
0.06382978723404255,
0.13636363636363635,
0.09090909090909091,
0.3333333333333333,
0.06666666666666667
] |
def interpolate_data(self, propname):
r"""
Determines a pore (or throat) property as the average of it's
neighboring throats (or pores)
Parameters
----------
propname: string
The dictionary key to the values to be interpolated.
Returns
-------
An array containing interpolated pore (or throat) data
Notes
-----
This uses an unweighted average, without attempting to account for
distances or sizes of pores and throats.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[3, 1, 1])
>>> pn['pore.value'] = [1, 2, 3]
>>> pn.interpolate_data('pore.value')
array([1.5, 2.5])
"""
mro = self._mro()
if 'GenericNetwork' in mro:
net = self
Ts = net.throats()
Ps = net.pores()
label = 'all'
elif ('GenericPhase' in mro) or ('GenericAlgorithm' in mro):
net = self.project.network
Ts = net.throats()
Ps = net.pores()
label = 'all'
elif ('GenericGeometry' in mro) or ('GenericPhysics' in mro):
net = self.project.network
Ts = net.throats(self.name)
Ps = net.pores(self.name)
label = self.name
if propname.startswith('throat'):
# Upcast data to full network size
temp = sp.ones((net.Nt,))*sp.nan
temp[Ts] = self[propname]
data = temp
temp = sp.ones((net.Np,))*sp.nan
for pore in Ps:
neighborTs = net.find_neighbor_throats(pore)
neighborTs = net.filter_by_label(throats=neighborTs,
labels=label)
temp[pore] = sp.mean(data[neighborTs])
values = temp[Ps]
elif propname.startswith('pore'):
# Upcast data to full network size
data = sp.ones((net.Np, ))*sp.nan
data[Ps] = self[propname]
Ps12 = net['throat.conns'][Ts]
values = sp.mean(data[Ps12], axis=1)
return values | [
"def",
"interpolate_data",
"(",
"self",
",",
"propname",
")",
":",
"mro",
"=",
"self",
".",
"_mro",
"(",
")",
"if",
"'GenericNetwork'",
"in",
"mro",
":",
"net",
"=",
"self",
"Ts",
"=",
"net",
".",
"throats",
"(",
")",
"Ps",
"=",
"net",
".",
"pores",
"(",
")",
"label",
"=",
"'all'",
"elif",
"(",
"'GenericPhase'",
"in",
"mro",
")",
"or",
"(",
"'GenericAlgorithm'",
"in",
"mro",
")",
":",
"net",
"=",
"self",
".",
"project",
".",
"network",
"Ts",
"=",
"net",
".",
"throats",
"(",
")",
"Ps",
"=",
"net",
".",
"pores",
"(",
")",
"label",
"=",
"'all'",
"elif",
"(",
"'GenericGeometry'",
"in",
"mro",
")",
"or",
"(",
"'GenericPhysics'",
"in",
"mro",
")",
":",
"net",
"=",
"self",
".",
"project",
".",
"network",
"Ts",
"=",
"net",
".",
"throats",
"(",
"self",
".",
"name",
")",
"Ps",
"=",
"net",
".",
"pores",
"(",
"self",
".",
"name",
")",
"label",
"=",
"self",
".",
"name",
"if",
"propname",
".",
"startswith",
"(",
"'throat'",
")",
":",
"# Upcast data to full network size",
"temp",
"=",
"sp",
".",
"ones",
"(",
"(",
"net",
".",
"Nt",
",",
")",
")",
"*",
"sp",
".",
"nan",
"temp",
"[",
"Ts",
"]",
"=",
"self",
"[",
"propname",
"]",
"data",
"=",
"temp",
"temp",
"=",
"sp",
".",
"ones",
"(",
"(",
"net",
".",
"Np",
",",
")",
")",
"*",
"sp",
".",
"nan",
"for",
"pore",
"in",
"Ps",
":",
"neighborTs",
"=",
"net",
".",
"find_neighbor_throats",
"(",
"pore",
")",
"neighborTs",
"=",
"net",
".",
"filter_by_label",
"(",
"throats",
"=",
"neighborTs",
",",
"labels",
"=",
"label",
")",
"temp",
"[",
"pore",
"]",
"=",
"sp",
".",
"mean",
"(",
"data",
"[",
"neighborTs",
"]",
")",
"values",
"=",
"temp",
"[",
"Ps",
"]",
"elif",
"propname",
".",
"startswith",
"(",
"'pore'",
")",
":",
"# Upcast data to full network size",
"data",
"=",
"sp",
".",
"ones",
"(",
"(",
"net",
".",
"Np",
",",
")",
")",
"*",
"sp",
".",
"nan",
"data",
"[",
"Ps",
"]",
"=",
"self",
"[",
"propname",
"]",
"Ps12",
"=",
"net",
"[",
"'throat.conns'",
"]",
"[",
"Ts",
"]",
"values",
"=",
"sp",
".",
"mean",
"(",
"data",
"[",
"Ps12",
"]",
",",
"axis",
"=",
"1",
")",
"return",
"values"
] | 33.904762 | [
0.02702702702702703,
0.16666666666666666,
0.057971014492753624,
0.10526315789473684,
0,
0.1111111111111111,
0.1111111111111111,
0.08333333333333333,
0.03125,
0,
0.13333333333333333,
0.13333333333333333,
0.06451612903225806,
0,
0.15384615384615385,
0.15384615384615385,
0.02702702702702703,
0.041666666666666664,
0,
0.125,
0.125,
0.09375,
0.06,
0.075,
0.06666666666666667,
0.08,
0,
0.18181818181818182,
0.08,
0.05714285714285714,
0.09090909090909091,
0.06666666666666667,
0.07142857142857142,
0.08,
0.029411764705882353,
0.05263157894736842,
0.06666666666666667,
0.07142857142857142,
0.08,
0.028985507246376812,
0.05263157894736842,
0.05128205128205128,
0.05405405405405406,
0.06896551724137931,
0.04878048780487805,
0.043478260869565216,
0.045454545454545456,
0.05405405405405406,
0.08695652173913043,
0.045454545454545456,
0.07407407407407407,
0.03333333333333333,
0.04411764705882353,
0.08064516129032258,
0.037037037037037035,
0.06896551724137931,
0.04878048780487805,
0.043478260869565216,
0.044444444444444446,
0.05405405405405406,
0.047619047619047616,
0.041666666666666664,
0.09523809523809523
] |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
- Downloads last month
- 7