repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequencelengths 20
707
| docstring
stringlengths 3
17.3k
| docstring_tokens
sequencelengths 3
222
| sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value | idx
int64 0
252k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
OpenTreeOfLife/peyotl | peyotl/nexson_syntax/helper.py | _add_uniq_value_to_dict_bf | def _add_uniq_value_to_dict_bf(d, k, v):
"""Like _add_value_to_dict_bf but will not add v if another
element in under key `k` has the same value.
"""
prev = d.get(k)
if prev is None:
d[k] = v
elif isinstance(prev, list):
if not isinstance(v, list):
v = [v]
for sel in v:
found = False
for el in prev:
if el == sel:
found = True
break
if not found:
prev.append(sel)
else:
if isinstance(v, list):
prev = [prev]
for sel in v:
found = False
for el in prev:
if el == sel:
found = True
break
if not found:
prev.append(sel)
if len(prev) > 1:
d[k] = prev
elif prev != v:
d[k] = [prev, v] | python | def _add_uniq_value_to_dict_bf(d, k, v):
"""Like _add_value_to_dict_bf but will not add v if another
element in under key `k` has the same value.
"""
prev = d.get(k)
if prev is None:
d[k] = v
elif isinstance(prev, list):
if not isinstance(v, list):
v = [v]
for sel in v:
found = False
for el in prev:
if el == sel:
found = True
break
if not found:
prev.append(sel)
else:
if isinstance(v, list):
prev = [prev]
for sel in v:
found = False
for el in prev:
if el == sel:
found = True
break
if not found:
prev.append(sel)
if len(prev) > 1:
d[k] = prev
elif prev != v:
d[k] = [prev, v] | [
"def",
"_add_uniq_value_to_dict_bf",
"(",
"d",
",",
"k",
",",
"v",
")",
":",
"prev",
"=",
"d",
".",
"get",
"(",
"k",
")",
"if",
"prev",
"is",
"None",
":",
"d",
"[",
"k",
"]",
"=",
"v",
"elif",
"isinstance",
"(",
"prev",
",",
"list",
")",
":",
"if",
"not",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"v",
"=",
"[",
"v",
"]",
"for",
"sel",
"in",
"v",
":",
"found",
"=",
"False",
"for",
"el",
"in",
"prev",
":",
"if",
"el",
"==",
"sel",
":",
"found",
"=",
"True",
"break",
"if",
"not",
"found",
":",
"prev",
".",
"append",
"(",
"sel",
")",
"else",
":",
"if",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"prev",
"=",
"[",
"prev",
"]",
"for",
"sel",
"in",
"v",
":",
"found",
"=",
"False",
"for",
"el",
"in",
"prev",
":",
"if",
"el",
"==",
"sel",
":",
"found",
"=",
"True",
"break",
"if",
"not",
"found",
":",
"prev",
".",
"append",
"(",
"sel",
")",
"if",
"len",
"(",
"prev",
")",
">",
"1",
":",
"d",
"[",
"k",
"]",
"=",
"prev",
"elif",
"prev",
"!=",
"v",
":",
"d",
"[",
"k",
"]",
"=",
"[",
"prev",
",",
"v",
"]"
] | Like _add_value_to_dict_bf but will not add v if another
element in under key `k` has the same value. | [
"Like",
"_add_value_to_dict_bf",
"but",
"will",
"not",
"add",
"v",
"if",
"another",
"element",
"in",
"under",
"key",
"k",
"has",
"the",
"same",
"value",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/nexson_syntax/helper.py#L136-L168 | train | 400 |
OpenTreeOfLife/peyotl | peyotl/nexson_syntax/helper.py | _debug_dump_dom | def _debug_dump_dom(el):
"""Debugging helper. Prints out `el` contents."""
import xml.dom.minidom
s = [el.nodeName]
att_container = el.attributes
for i in range(att_container.length):
attr = att_container.item(i)
s.append(' @{a}="{v}"'.format(a=attr.name, v=attr.value))
for c in el.childNodes:
if c.nodeType == xml.dom.minidom.Node.TEXT_NODE:
s.append(' {a} type="TEXT" data="{d}"'.format(a=c.nodeName, d=c.data))
else:
s.append(' {a} child'.format(a=c.nodeName))
return '\n'.join(s) | python | def _debug_dump_dom(el):
"""Debugging helper. Prints out `el` contents."""
import xml.dom.minidom
s = [el.nodeName]
att_container = el.attributes
for i in range(att_container.length):
attr = att_container.item(i)
s.append(' @{a}="{v}"'.format(a=attr.name, v=attr.value))
for c in el.childNodes:
if c.nodeType == xml.dom.minidom.Node.TEXT_NODE:
s.append(' {a} type="TEXT" data="{d}"'.format(a=c.nodeName, d=c.data))
else:
s.append(' {a} child'.format(a=c.nodeName))
return '\n'.join(s) | [
"def",
"_debug_dump_dom",
"(",
"el",
")",
":",
"import",
"xml",
".",
"dom",
".",
"minidom",
"s",
"=",
"[",
"el",
".",
"nodeName",
"]",
"att_container",
"=",
"el",
".",
"attributes",
"for",
"i",
"in",
"range",
"(",
"att_container",
".",
"length",
")",
":",
"attr",
"=",
"att_container",
".",
"item",
"(",
"i",
")",
"s",
".",
"append",
"(",
"' @{a}=\"{v}\"'",
".",
"format",
"(",
"a",
"=",
"attr",
".",
"name",
",",
"v",
"=",
"attr",
".",
"value",
")",
")",
"for",
"c",
"in",
"el",
".",
"childNodes",
":",
"if",
"c",
".",
"nodeType",
"==",
"xml",
".",
"dom",
".",
"minidom",
".",
"Node",
".",
"TEXT_NODE",
":",
"s",
".",
"append",
"(",
"' {a} type=\"TEXT\" data=\"{d}\"'",
".",
"format",
"(",
"a",
"=",
"c",
".",
"nodeName",
",",
"d",
"=",
"c",
".",
"data",
")",
")",
"else",
":",
"s",
".",
"append",
"(",
"' {a} child'",
".",
"format",
"(",
"a",
"=",
"c",
".",
"nodeName",
")",
")",
"return",
"'\\n'",
".",
"join",
"(",
"s",
")"
] | Debugging helper. Prints out `el` contents. | [
"Debugging",
"helper",
".",
"Prints",
"out",
"el",
"contents",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/nexson_syntax/helper.py#L177-L190 | train | 401 |
OpenTreeOfLife/peyotl | peyotl/nexson_syntax/helper.py | _convert_hbf_meta_val_for_xml | def _convert_hbf_meta_val_for_xml(key, val):
"""Convert to a BadgerFish-style dict for addition to a dict suitable for
addition to XML tree or for v1.0 to v0.0 conversion."""
if isinstance(val, list):
return [_convert_hbf_meta_val_for_xml(key, i) for i in val]
is_literal = True
content = None
if isinstance(val, dict):
ret = val
if '@href' in val:
is_literal = False
else:
content = val.get('$')
if isinstance(content, dict) and _contains_hbf_meta_keys(val):
is_literal = False
else:
ret = {}
content = val
if is_literal:
ret.setdefault('@xsi:type', 'nex:LiteralMeta')
ret.setdefault('@property', key)
if content is not None:
ret.setdefault('@datatype', _python_instance_to_nexml_meta_datatype(content))
if ret is not val:
ret['$'] = content
else:
ret.setdefault('@xsi:type', 'nex:ResourceMeta')
ret.setdefault('@rel', key)
return ret | python | def _convert_hbf_meta_val_for_xml(key, val):
"""Convert to a BadgerFish-style dict for addition to a dict suitable for
addition to XML tree or for v1.0 to v0.0 conversion."""
if isinstance(val, list):
return [_convert_hbf_meta_val_for_xml(key, i) for i in val]
is_literal = True
content = None
if isinstance(val, dict):
ret = val
if '@href' in val:
is_literal = False
else:
content = val.get('$')
if isinstance(content, dict) and _contains_hbf_meta_keys(val):
is_literal = False
else:
ret = {}
content = val
if is_literal:
ret.setdefault('@xsi:type', 'nex:LiteralMeta')
ret.setdefault('@property', key)
if content is not None:
ret.setdefault('@datatype', _python_instance_to_nexml_meta_datatype(content))
if ret is not val:
ret['$'] = content
else:
ret.setdefault('@xsi:type', 'nex:ResourceMeta')
ret.setdefault('@rel', key)
return ret | [
"def",
"_convert_hbf_meta_val_for_xml",
"(",
"key",
",",
"val",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"list",
")",
":",
"return",
"[",
"_convert_hbf_meta_val_for_xml",
"(",
"key",
",",
"i",
")",
"for",
"i",
"in",
"val",
"]",
"is_literal",
"=",
"True",
"content",
"=",
"None",
"if",
"isinstance",
"(",
"val",
",",
"dict",
")",
":",
"ret",
"=",
"val",
"if",
"'@href'",
"in",
"val",
":",
"is_literal",
"=",
"False",
"else",
":",
"content",
"=",
"val",
".",
"get",
"(",
"'$'",
")",
"if",
"isinstance",
"(",
"content",
",",
"dict",
")",
"and",
"_contains_hbf_meta_keys",
"(",
"val",
")",
":",
"is_literal",
"=",
"False",
"else",
":",
"ret",
"=",
"{",
"}",
"content",
"=",
"val",
"if",
"is_literal",
":",
"ret",
".",
"setdefault",
"(",
"'@xsi:type'",
",",
"'nex:LiteralMeta'",
")",
"ret",
".",
"setdefault",
"(",
"'@property'",
",",
"key",
")",
"if",
"content",
"is",
"not",
"None",
":",
"ret",
".",
"setdefault",
"(",
"'@datatype'",
",",
"_python_instance_to_nexml_meta_datatype",
"(",
"content",
")",
")",
"if",
"ret",
"is",
"not",
"val",
":",
"ret",
"[",
"'$'",
"]",
"=",
"content",
"else",
":",
"ret",
".",
"setdefault",
"(",
"'@xsi:type'",
",",
"'nex:ResourceMeta'",
")",
"ret",
".",
"setdefault",
"(",
"'@rel'",
",",
"key",
")",
"return",
"ret"
] | Convert to a BadgerFish-style dict for addition to a dict suitable for
addition to XML tree or for v1.0 to v0.0 conversion. | [
"Convert",
"to",
"a",
"BadgerFish",
"-",
"style",
"dict",
"for",
"addition",
"to",
"a",
"dict",
"suitable",
"for",
"addition",
"to",
"XML",
"tree",
"or",
"for",
"v1",
".",
"0",
"to",
"v0",
".",
"0",
"conversion",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/nexson_syntax/helper.py#L249-L277 | train | 402 |
OpenTreeOfLife/peyotl | peyotl/nexson_syntax/helper.py | find_nested_meta_first | def find_nested_meta_first(d, prop_name, version):
"""Returns obj. for badgerfish and val for hbf. Appropriate for nested literals"""
if _is_badgerfish_version(version):
return find_nested_meta_first_bf(d, prop_name)
p = '^' + prop_name
return d.get(p) | python | def find_nested_meta_first(d, prop_name, version):
"""Returns obj. for badgerfish and val for hbf. Appropriate for nested literals"""
if _is_badgerfish_version(version):
return find_nested_meta_first_bf(d, prop_name)
p = '^' + prop_name
return d.get(p) | [
"def",
"find_nested_meta_first",
"(",
"d",
",",
"prop_name",
",",
"version",
")",
":",
"if",
"_is_badgerfish_version",
"(",
"version",
")",
":",
"return",
"find_nested_meta_first_bf",
"(",
"d",
",",
"prop_name",
")",
"p",
"=",
"'^'",
"+",
"prop_name",
"return",
"d",
".",
"get",
"(",
"p",
")"
] | Returns obj. for badgerfish and val for hbf. Appropriate for nested literals | [
"Returns",
"obj",
".",
"for",
"badgerfish",
"and",
"val",
"for",
"hbf",
".",
"Appropriate",
"for",
"nested",
"literals"
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/nexson_syntax/helper.py#L361-L366 | train | 403 |
PSPC-SPAC-buyandsell/von_agent | von_agent/codec.py | decode | def decode(value: str) -> Union[str, None, bool, int, float]:
"""
Decode encoded credential attribute value.
:param value: numeric string to decode
:return: decoded value, stringified if original was neither str, bool, int, nor float
"""
assert value.isdigit() or value[0] == '-' and value[1:].isdigit()
if -I32_BOUND <= int(value) < I32_BOUND: # it's an i32: it is its own encoding
return int(value)
elif int(value) == I32_BOUND:
return None
(prefix, value) = (int(value[0]), int(value[1:]))
ival = int(value) - I32_BOUND
if ival == 0:
return '' # special case: empty string encodes as 2**31
elif ival == 1:
return False # sentinel for bool False
elif ival == 2:
return True # sentinel for bool True
blen = ceil(log(ival, 16)/2)
ibytes = unhexlify(ival.to_bytes(blen, 'big'))
return DECODE_PREFIX.get(prefix, str)(ibytes.decode()) | python | def decode(value: str) -> Union[str, None, bool, int, float]:
"""
Decode encoded credential attribute value.
:param value: numeric string to decode
:return: decoded value, stringified if original was neither str, bool, int, nor float
"""
assert value.isdigit() or value[0] == '-' and value[1:].isdigit()
if -I32_BOUND <= int(value) < I32_BOUND: # it's an i32: it is its own encoding
return int(value)
elif int(value) == I32_BOUND:
return None
(prefix, value) = (int(value[0]), int(value[1:]))
ival = int(value) - I32_BOUND
if ival == 0:
return '' # special case: empty string encodes as 2**31
elif ival == 1:
return False # sentinel for bool False
elif ival == 2:
return True # sentinel for bool True
blen = ceil(log(ival, 16)/2)
ibytes = unhexlify(ival.to_bytes(blen, 'big'))
return DECODE_PREFIX.get(prefix, str)(ibytes.decode()) | [
"def",
"decode",
"(",
"value",
":",
"str",
")",
"->",
"Union",
"[",
"str",
",",
"None",
",",
"bool",
",",
"int",
",",
"float",
"]",
":",
"assert",
"value",
".",
"isdigit",
"(",
")",
"or",
"value",
"[",
"0",
"]",
"==",
"'-'",
"and",
"value",
"[",
"1",
":",
"]",
".",
"isdigit",
"(",
")",
"if",
"-",
"I32_BOUND",
"<=",
"int",
"(",
"value",
")",
"<",
"I32_BOUND",
":",
"# it's an i32: it is its own encoding",
"return",
"int",
"(",
"value",
")",
"elif",
"int",
"(",
"value",
")",
"==",
"I32_BOUND",
":",
"return",
"None",
"(",
"prefix",
",",
"value",
")",
"=",
"(",
"int",
"(",
"value",
"[",
"0",
"]",
")",
",",
"int",
"(",
"value",
"[",
"1",
":",
"]",
")",
")",
"ival",
"=",
"int",
"(",
"value",
")",
"-",
"I32_BOUND",
"if",
"ival",
"==",
"0",
":",
"return",
"''",
"# special case: empty string encodes as 2**31",
"elif",
"ival",
"==",
"1",
":",
"return",
"False",
"# sentinel for bool False",
"elif",
"ival",
"==",
"2",
":",
"return",
"True",
"# sentinel for bool True",
"blen",
"=",
"ceil",
"(",
"log",
"(",
"ival",
",",
"16",
")",
"/",
"2",
")",
"ibytes",
"=",
"unhexlify",
"(",
"ival",
".",
"to_bytes",
"(",
"blen",
",",
"'big'",
")",
")",
"return",
"DECODE_PREFIX",
".",
"get",
"(",
"prefix",
",",
"str",
")",
"(",
"ibytes",
".",
"decode",
"(",
")",
")"
] | Decode encoded credential attribute value.
:param value: numeric string to decode
:return: decoded value, stringified if original was neither str, bool, int, nor float | [
"Decode",
"encoded",
"credential",
"attribute",
"value",
"."
] | 0b1c17cca3bd178b6e6974af84dbac1dfce5cf45 | https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/codec.py#L70-L96 | train | 404 |
palantir/typedjsonrpc | typedjsonrpc/parameter_checker.py | validate_params_match | def validate_params_match(method, parameters):
"""Validates that the given parameters are exactly the method's declared parameters.
:param method: The method to be called
:type method: function
:param parameters: The parameters to use in the call
:type parameters: dict[str, object] | list[object]
"""
argspec = inspect.getargspec(method) # pylint: disable=deprecated-method
default_length = len(argspec.defaults) if argspec.defaults is not None else 0
if isinstance(parameters, list):
if len(parameters) > len(argspec.args) and argspec.varargs is None:
raise InvalidParamsError("Too many parameters")
remaining_parameters = len(argspec.args) - len(parameters)
if remaining_parameters > default_length:
raise InvalidParamsError("Not enough parameters")
elif isinstance(parameters, dict):
missing_parameters = [key for key in argspec.args if key not in parameters]
default_parameters = set(argspec.args[len(argspec.args) - default_length:])
for key in missing_parameters:
if key not in default_parameters:
raise InvalidParamsError("Parameter {} has not been satisfied".format(key))
extra_params = [key for key in parameters if key not in argspec.args]
if len(extra_params) > 0 and argspec.keywords is None:
raise InvalidParamsError("Too many parameters") | python | def validate_params_match(method, parameters):
"""Validates that the given parameters are exactly the method's declared parameters.
:param method: The method to be called
:type method: function
:param parameters: The parameters to use in the call
:type parameters: dict[str, object] | list[object]
"""
argspec = inspect.getargspec(method) # pylint: disable=deprecated-method
default_length = len(argspec.defaults) if argspec.defaults is not None else 0
if isinstance(parameters, list):
if len(parameters) > len(argspec.args) and argspec.varargs is None:
raise InvalidParamsError("Too many parameters")
remaining_parameters = len(argspec.args) - len(parameters)
if remaining_parameters > default_length:
raise InvalidParamsError("Not enough parameters")
elif isinstance(parameters, dict):
missing_parameters = [key for key in argspec.args if key not in parameters]
default_parameters = set(argspec.args[len(argspec.args) - default_length:])
for key in missing_parameters:
if key not in default_parameters:
raise InvalidParamsError("Parameter {} has not been satisfied".format(key))
extra_params = [key for key in parameters if key not in argspec.args]
if len(extra_params) > 0 and argspec.keywords is None:
raise InvalidParamsError("Too many parameters") | [
"def",
"validate_params_match",
"(",
"method",
",",
"parameters",
")",
":",
"argspec",
"=",
"inspect",
".",
"getargspec",
"(",
"method",
")",
"# pylint: disable=deprecated-method",
"default_length",
"=",
"len",
"(",
"argspec",
".",
"defaults",
")",
"if",
"argspec",
".",
"defaults",
"is",
"not",
"None",
"else",
"0",
"if",
"isinstance",
"(",
"parameters",
",",
"list",
")",
":",
"if",
"len",
"(",
"parameters",
")",
">",
"len",
"(",
"argspec",
".",
"args",
")",
"and",
"argspec",
".",
"varargs",
"is",
"None",
":",
"raise",
"InvalidParamsError",
"(",
"\"Too many parameters\"",
")",
"remaining_parameters",
"=",
"len",
"(",
"argspec",
".",
"args",
")",
"-",
"len",
"(",
"parameters",
")",
"if",
"remaining_parameters",
">",
"default_length",
":",
"raise",
"InvalidParamsError",
"(",
"\"Not enough parameters\"",
")",
"elif",
"isinstance",
"(",
"parameters",
",",
"dict",
")",
":",
"missing_parameters",
"=",
"[",
"key",
"for",
"key",
"in",
"argspec",
".",
"args",
"if",
"key",
"not",
"in",
"parameters",
"]",
"default_parameters",
"=",
"set",
"(",
"argspec",
".",
"args",
"[",
"len",
"(",
"argspec",
".",
"args",
")",
"-",
"default_length",
":",
"]",
")",
"for",
"key",
"in",
"missing_parameters",
":",
"if",
"key",
"not",
"in",
"default_parameters",
":",
"raise",
"InvalidParamsError",
"(",
"\"Parameter {} has not been satisfied\"",
".",
"format",
"(",
"key",
")",
")",
"extra_params",
"=",
"[",
"key",
"for",
"key",
"in",
"parameters",
"if",
"key",
"not",
"in",
"argspec",
".",
"args",
"]",
"if",
"len",
"(",
"extra_params",
")",
">",
"0",
"and",
"argspec",
".",
"keywords",
"is",
"None",
":",
"raise",
"InvalidParamsError",
"(",
"\"Too many parameters\"",
")"
] | Validates that the given parameters are exactly the method's declared parameters.
:param method: The method to be called
:type method: function
:param parameters: The parameters to use in the call
:type parameters: dict[str, object] | list[object] | [
"Validates",
"that",
"the",
"given",
"parameters",
"are",
"exactly",
"the",
"method",
"s",
"declared",
"parameters",
"."
] | 274218fcd236ff9643506caa629029c9ba25a0fb | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/parameter_checker.py#L27-L55 | train | 405 |
palantir/typedjsonrpc | typedjsonrpc/parameter_checker.py | check_types | def check_types(parameters, parameter_types, strict_floats):
"""Checks that the given parameters have the correct types.
:param parameters: List of (name, value) pairs of the given parameters
:type parameters: dict[str, object]
:param parameter_types: Parameter type by name.
:type parameter_types: dict[str, type]
:param strict_floats: If False, treat integers as floats
:type strict_floats: bool
"""
for name, parameter_type in parameter_types.items():
if name not in parameters:
raise InvalidParamsError("Parameter '{}' is missing.".format(name))
if not _is_instance(parameters[name], parameter_type, strict_floats):
raise InvalidParamsError("Value '{}' for parameter '{}' is not of expected type {}."
.format(parameters[name], name, parameter_type)) | python | def check_types(parameters, parameter_types, strict_floats):
"""Checks that the given parameters have the correct types.
:param parameters: List of (name, value) pairs of the given parameters
:type parameters: dict[str, object]
:param parameter_types: Parameter type by name.
:type parameter_types: dict[str, type]
:param strict_floats: If False, treat integers as floats
:type strict_floats: bool
"""
for name, parameter_type in parameter_types.items():
if name not in parameters:
raise InvalidParamsError("Parameter '{}' is missing.".format(name))
if not _is_instance(parameters[name], parameter_type, strict_floats):
raise InvalidParamsError("Value '{}' for parameter '{}' is not of expected type {}."
.format(parameters[name], name, parameter_type)) | [
"def",
"check_types",
"(",
"parameters",
",",
"parameter_types",
",",
"strict_floats",
")",
":",
"for",
"name",
",",
"parameter_type",
"in",
"parameter_types",
".",
"items",
"(",
")",
":",
"if",
"name",
"not",
"in",
"parameters",
":",
"raise",
"InvalidParamsError",
"(",
"\"Parameter '{}' is missing.\"",
".",
"format",
"(",
"name",
")",
")",
"if",
"not",
"_is_instance",
"(",
"parameters",
"[",
"name",
"]",
",",
"parameter_type",
",",
"strict_floats",
")",
":",
"raise",
"InvalidParamsError",
"(",
"\"Value '{}' for parameter '{}' is not of expected type {}.\"",
".",
"format",
"(",
"parameters",
"[",
"name",
"]",
",",
"name",
",",
"parameter_type",
")",
")"
] | Checks that the given parameters have the correct types.
:param parameters: List of (name, value) pairs of the given parameters
:type parameters: dict[str, object]
:param parameter_types: Parameter type by name.
:type parameter_types: dict[str, type]
:param strict_floats: If False, treat integers as floats
:type strict_floats: bool | [
"Checks",
"that",
"the",
"given",
"parameters",
"have",
"the",
"correct",
"types",
"."
] | 274218fcd236ff9643506caa629029c9ba25a0fb | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/parameter_checker.py#L58-L73 | train | 406 |
palantir/typedjsonrpc | typedjsonrpc/parameter_checker.py | check_type_declaration | def check_type_declaration(parameter_names, parameter_types):
"""Checks that exactly the given parameter names have declared types.
:param parameter_names: The names of the parameters in the method declaration
:type parameter_names: list[str]
:param parameter_types: Parameter type by name
:type parameter_types: dict[str, type]
"""
if len(parameter_names) != len(parameter_types):
raise Exception("Number of method parameters ({}) does not match number of "
"declared types ({})"
.format(len(parameter_names), len(parameter_types)))
for parameter_name in parameter_names:
if parameter_name not in parameter_types:
raise Exception("Parameter '{}' does not have a declared type".format(parameter_name)) | python | def check_type_declaration(parameter_names, parameter_types):
"""Checks that exactly the given parameter names have declared types.
:param parameter_names: The names of the parameters in the method declaration
:type parameter_names: list[str]
:param parameter_types: Parameter type by name
:type parameter_types: dict[str, type]
"""
if len(parameter_names) != len(parameter_types):
raise Exception("Number of method parameters ({}) does not match number of "
"declared types ({})"
.format(len(parameter_names), len(parameter_types)))
for parameter_name in parameter_names:
if parameter_name not in parameter_types:
raise Exception("Parameter '{}' does not have a declared type".format(parameter_name)) | [
"def",
"check_type_declaration",
"(",
"parameter_names",
",",
"parameter_types",
")",
":",
"if",
"len",
"(",
"parameter_names",
")",
"!=",
"len",
"(",
"parameter_types",
")",
":",
"raise",
"Exception",
"(",
"\"Number of method parameters ({}) does not match number of \"",
"\"declared types ({})\"",
".",
"format",
"(",
"len",
"(",
"parameter_names",
")",
",",
"len",
"(",
"parameter_types",
")",
")",
")",
"for",
"parameter_name",
"in",
"parameter_names",
":",
"if",
"parameter_name",
"not",
"in",
"parameter_types",
":",
"raise",
"Exception",
"(",
"\"Parameter '{}' does not have a declared type\"",
".",
"format",
"(",
"parameter_name",
")",
")"
] | Checks that exactly the given parameter names have declared types.
:param parameter_names: The names of the parameters in the method declaration
:type parameter_names: list[str]
:param parameter_types: Parameter type by name
:type parameter_types: dict[str, type] | [
"Checks",
"that",
"exactly",
"the",
"given",
"parameter",
"names",
"have",
"declared",
"types",
"."
] | 274218fcd236ff9643506caa629029c9ba25a0fb | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/parameter_checker.py#L76-L90 | train | 407 |
palantir/typedjsonrpc | typedjsonrpc/parameter_checker.py | check_return_type | def check_return_type(value, expected_type, strict_floats):
"""Checks that the given return value has the correct type.
:param value: Value returned by the method
:type value: object
:param expected_type: Expected return type
:type expected_type: type
:param strict_floats: If False, treat integers as floats
:type strict_floats: bool
"""
if expected_type is None:
if value is not None:
raise InvalidReturnTypeError("Returned value is '{}' but None was expected"
.format(value))
elif not _is_instance(value, expected_type, strict_floats):
raise InvalidReturnTypeError("Type of return value '{}' does not match expected type {}"
.format(value, expected_type)) | python | def check_return_type(value, expected_type, strict_floats):
"""Checks that the given return value has the correct type.
:param value: Value returned by the method
:type value: object
:param expected_type: Expected return type
:type expected_type: type
:param strict_floats: If False, treat integers as floats
:type strict_floats: bool
"""
if expected_type is None:
if value is not None:
raise InvalidReturnTypeError("Returned value is '{}' but None was expected"
.format(value))
elif not _is_instance(value, expected_type, strict_floats):
raise InvalidReturnTypeError("Type of return value '{}' does not match expected type {}"
.format(value, expected_type)) | [
"def",
"check_return_type",
"(",
"value",
",",
"expected_type",
",",
"strict_floats",
")",
":",
"if",
"expected_type",
"is",
"None",
":",
"if",
"value",
"is",
"not",
"None",
":",
"raise",
"InvalidReturnTypeError",
"(",
"\"Returned value is '{}' but None was expected\"",
".",
"format",
"(",
"value",
")",
")",
"elif",
"not",
"_is_instance",
"(",
"value",
",",
"expected_type",
",",
"strict_floats",
")",
":",
"raise",
"InvalidReturnTypeError",
"(",
"\"Type of return value '{}' does not match expected type {}\"",
".",
"format",
"(",
"value",
",",
"expected_type",
")",
")"
] | Checks that the given return value has the correct type.
:param value: Value returned by the method
:type value: object
:param expected_type: Expected return type
:type expected_type: type
:param strict_floats: If False, treat integers as floats
:type strict_floats: bool | [
"Checks",
"that",
"the",
"given",
"return",
"value",
"has",
"the",
"correct",
"type",
"."
] | 274218fcd236ff9643506caa629029c9ba25a0fb | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/parameter_checker.py#L93-L109 | train | 408 |
OpenTreeOfLife/peyotl | peyotl/phylesystem/helper.py | _make_phylesystem_cache_region | def _make_phylesystem_cache_region(**kwargs):
"""Only intended to be called by the Phylesystem singleton.
"""
global _CACHE_REGION_CONFIGURED, _REGION
if _CACHE_REGION_CONFIGURED:
return _REGION
_CACHE_REGION_CONFIGURED = True
try:
# noinspection PyPackageRequirements
from dogpile.cache import make_region
except:
_LOG.debug('dogpile.cache not available')
return
region = None
trial_key = 'test_key'
trial_val = {'test_val': [4, 3]}
trying_redis = True
if trying_redis:
try:
a = {
'host': 'localhost',
'port': 6379,
'db': 0, # default is 0
'redis_expiration_time': 60 * 60 * 24 * 2, # 2 days
'distributed_lock': False # True if multiple processes will use redis
}
region = make_region().configure('dogpile.cache.redis', arguments=a)
_LOG.debug('cache region set up with cache.redis.')
_LOG.debug('testing redis caching...')
region.set(trial_key, trial_val)
assert trial_val == region.get(trial_key)
_LOG.debug('redis caching works')
region.delete(trial_key)
_REGION = region
return region
except:
_LOG.debug('redis cache set up failed.')
region = None
trying_file_dbm = False
if trying_file_dbm:
_LOG.debug('Going to try dogpile.cache.dbm ...')
first_par = _get_phylesystem_parent(**kwargs)[0]
cache_db_dir = os.path.split(first_par)[0]
cache_db = os.path.join(cache_db_dir, 'phylesystem-cachefile.dbm')
_LOG.debug('dogpile.cache region using "{}"'.format(cache_db))
try:
a = {'filename': cache_db}
region = make_region().configure('dogpile.cache.dbm',
expiration_time=36000,
arguments=a)
_LOG.debug('cache region set up with cache.dbm.')
_LOG.debug('testing anydbm caching...')
region.set(trial_key, trial_val)
assert trial_val == region.get(trial_key)
_LOG.debug('anydbm caching works')
region.delete(trial_key)
_REGION = region
return region
except:
_LOG.debug('anydbm cache set up failed')
_LOG.debug('exception in the configuration of the cache.')
_LOG.debug('Phylesystem will not use caching')
return None | python | def _make_phylesystem_cache_region(**kwargs):
"""Only intended to be called by the Phylesystem singleton.
"""
global _CACHE_REGION_CONFIGURED, _REGION
if _CACHE_REGION_CONFIGURED:
return _REGION
_CACHE_REGION_CONFIGURED = True
try:
# noinspection PyPackageRequirements
from dogpile.cache import make_region
except:
_LOG.debug('dogpile.cache not available')
return
region = None
trial_key = 'test_key'
trial_val = {'test_val': [4, 3]}
trying_redis = True
if trying_redis:
try:
a = {
'host': 'localhost',
'port': 6379,
'db': 0, # default is 0
'redis_expiration_time': 60 * 60 * 24 * 2, # 2 days
'distributed_lock': False # True if multiple processes will use redis
}
region = make_region().configure('dogpile.cache.redis', arguments=a)
_LOG.debug('cache region set up with cache.redis.')
_LOG.debug('testing redis caching...')
region.set(trial_key, trial_val)
assert trial_val == region.get(trial_key)
_LOG.debug('redis caching works')
region.delete(trial_key)
_REGION = region
return region
except:
_LOG.debug('redis cache set up failed.')
region = None
trying_file_dbm = False
if trying_file_dbm:
_LOG.debug('Going to try dogpile.cache.dbm ...')
first_par = _get_phylesystem_parent(**kwargs)[0]
cache_db_dir = os.path.split(first_par)[0]
cache_db = os.path.join(cache_db_dir, 'phylesystem-cachefile.dbm')
_LOG.debug('dogpile.cache region using "{}"'.format(cache_db))
try:
a = {'filename': cache_db}
region = make_region().configure('dogpile.cache.dbm',
expiration_time=36000,
arguments=a)
_LOG.debug('cache region set up with cache.dbm.')
_LOG.debug('testing anydbm caching...')
region.set(trial_key, trial_val)
assert trial_val == region.get(trial_key)
_LOG.debug('anydbm caching works')
region.delete(trial_key)
_REGION = region
return region
except:
_LOG.debug('anydbm cache set up failed')
_LOG.debug('exception in the configuration of the cache.')
_LOG.debug('Phylesystem will not use caching')
return None | [
"def",
"_make_phylesystem_cache_region",
"(",
"*",
"*",
"kwargs",
")",
":",
"global",
"_CACHE_REGION_CONFIGURED",
",",
"_REGION",
"if",
"_CACHE_REGION_CONFIGURED",
":",
"return",
"_REGION",
"_CACHE_REGION_CONFIGURED",
"=",
"True",
"try",
":",
"# noinspection PyPackageRequirements",
"from",
"dogpile",
".",
"cache",
"import",
"make_region",
"except",
":",
"_LOG",
".",
"debug",
"(",
"'dogpile.cache not available'",
")",
"return",
"region",
"=",
"None",
"trial_key",
"=",
"'test_key'",
"trial_val",
"=",
"{",
"'test_val'",
":",
"[",
"4",
",",
"3",
"]",
"}",
"trying_redis",
"=",
"True",
"if",
"trying_redis",
":",
"try",
":",
"a",
"=",
"{",
"'host'",
":",
"'localhost'",
",",
"'port'",
":",
"6379",
",",
"'db'",
":",
"0",
",",
"# default is 0",
"'redis_expiration_time'",
":",
"60",
"*",
"60",
"*",
"24",
"*",
"2",
",",
"# 2 days",
"'distributed_lock'",
":",
"False",
"# True if multiple processes will use redis",
"}",
"region",
"=",
"make_region",
"(",
")",
".",
"configure",
"(",
"'dogpile.cache.redis'",
",",
"arguments",
"=",
"a",
")",
"_LOG",
".",
"debug",
"(",
"'cache region set up with cache.redis.'",
")",
"_LOG",
".",
"debug",
"(",
"'testing redis caching...'",
")",
"region",
".",
"set",
"(",
"trial_key",
",",
"trial_val",
")",
"assert",
"trial_val",
"==",
"region",
".",
"get",
"(",
"trial_key",
")",
"_LOG",
".",
"debug",
"(",
"'redis caching works'",
")",
"region",
".",
"delete",
"(",
"trial_key",
")",
"_REGION",
"=",
"region",
"return",
"region",
"except",
":",
"_LOG",
".",
"debug",
"(",
"'redis cache set up failed.'",
")",
"region",
"=",
"None",
"trying_file_dbm",
"=",
"False",
"if",
"trying_file_dbm",
":",
"_LOG",
".",
"debug",
"(",
"'Going to try dogpile.cache.dbm ...'",
")",
"first_par",
"=",
"_get_phylesystem_parent",
"(",
"*",
"*",
"kwargs",
")",
"[",
"0",
"]",
"cache_db_dir",
"=",
"os",
".",
"path",
".",
"split",
"(",
"first_par",
")",
"[",
"0",
"]",
"cache_db",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cache_db_dir",
",",
"'phylesystem-cachefile.dbm'",
")",
"_LOG",
".",
"debug",
"(",
"'dogpile.cache region using \"{}\"'",
".",
"format",
"(",
"cache_db",
")",
")",
"try",
":",
"a",
"=",
"{",
"'filename'",
":",
"cache_db",
"}",
"region",
"=",
"make_region",
"(",
")",
".",
"configure",
"(",
"'dogpile.cache.dbm'",
",",
"expiration_time",
"=",
"36000",
",",
"arguments",
"=",
"a",
")",
"_LOG",
".",
"debug",
"(",
"'cache region set up with cache.dbm.'",
")",
"_LOG",
".",
"debug",
"(",
"'testing anydbm caching...'",
")",
"region",
".",
"set",
"(",
"trial_key",
",",
"trial_val",
")",
"assert",
"trial_val",
"==",
"region",
".",
"get",
"(",
"trial_key",
")",
"_LOG",
".",
"debug",
"(",
"'anydbm caching works'",
")",
"region",
".",
"delete",
"(",
"trial_key",
")",
"_REGION",
"=",
"region",
"return",
"region",
"except",
":",
"_LOG",
".",
"debug",
"(",
"'anydbm cache set up failed'",
")",
"_LOG",
".",
"debug",
"(",
"'exception in the configuration of the cache.'",
")",
"_LOG",
".",
"debug",
"(",
"'Phylesystem will not use caching'",
")",
"return",
"None"
] | Only intended to be called by the Phylesystem singleton. | [
"Only",
"intended",
"to",
"be",
"called",
"by",
"the",
"Phylesystem",
"singleton",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/phylesystem/helper.py#L125-L187 | train | 409 |
OpenTreeOfLife/peyotl | peyotl/git_storage/git_action.py | GitActionBase.path_for_doc | def path_for_doc(self, doc_id):
"""Returns doc_dir and doc_filepath for doc_id.
"""
full_path = self.path_for_doc_fn(self.repo, doc_id)
# _LOG.debug('>>>>>>>>>> GitActionBase.path_for_doc_fn: {}'.format(self.path_for_doc_fn))
# _LOG.debug('>>>>>>>>>> GitActionBase.path_for_doc returning: [{}]'.format(full_path))
return full_path | python | def path_for_doc(self, doc_id):
"""Returns doc_dir and doc_filepath for doc_id.
"""
full_path = self.path_for_doc_fn(self.repo, doc_id)
# _LOG.debug('>>>>>>>>>> GitActionBase.path_for_doc_fn: {}'.format(self.path_for_doc_fn))
# _LOG.debug('>>>>>>>>>> GitActionBase.path_for_doc returning: [{}]'.format(full_path))
return full_path | [
"def",
"path_for_doc",
"(",
"self",
",",
"doc_id",
")",
":",
"full_path",
"=",
"self",
".",
"path_for_doc_fn",
"(",
"self",
".",
"repo",
",",
"doc_id",
")",
"# _LOG.debug('>>>>>>>>>> GitActionBase.path_for_doc_fn: {}'.format(self.path_for_doc_fn))",
"# _LOG.debug('>>>>>>>>>> GitActionBase.path_for_doc returning: [{}]'.format(full_path))",
"return",
"full_path"
] | Returns doc_dir and doc_filepath for doc_id. | [
"Returns",
"doc_dir",
"and",
"doc_filepath",
"for",
"doc_id",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/git_storage/git_action.py#L143-L149 | train | 410 |
OpenTreeOfLife/peyotl | peyotl/git_storage/git_action.py | GitActionBase.current_branch | def current_branch(self):
"""Return the current branch name"""
branch_name = git(self.gitdir, self.gitwd, "symbolic-ref", "HEAD")
return branch_name.replace('refs/heads/', '').strip() | python | def current_branch(self):
"""Return the current branch name"""
branch_name = git(self.gitdir, self.gitwd, "symbolic-ref", "HEAD")
return branch_name.replace('refs/heads/', '').strip() | [
"def",
"current_branch",
"(",
"self",
")",
":",
"branch_name",
"=",
"git",
"(",
"self",
".",
"gitdir",
",",
"self",
".",
"gitwd",
",",
"\"symbolic-ref\"",
",",
"\"HEAD\"",
")",
"return",
"branch_name",
".",
"replace",
"(",
"'refs/heads/'",
",",
"''",
")",
".",
"strip",
"(",
")"
] | Return the current branch name | [
"Return",
"the",
"current",
"branch",
"name"
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/git_storage/git_action.py#L169-L172 | train | 411 |
OpenTreeOfLife/peyotl | peyotl/git_storage/git_action.py | GitActionBase.branch_exists | def branch_exists(self, branch):
"""Returns true or false depending on if a branch exists"""
try:
git(self.gitdir, self.gitwd, "rev-parse", branch)
except sh.ErrorReturnCode:
return False
return True | python | def branch_exists(self, branch):
"""Returns true or false depending on if a branch exists"""
try:
git(self.gitdir, self.gitwd, "rev-parse", branch)
except sh.ErrorReturnCode:
return False
return True | [
"def",
"branch_exists",
"(",
"self",
",",
"branch",
")",
":",
"try",
":",
"git",
"(",
"self",
".",
"gitdir",
",",
"self",
".",
"gitwd",
",",
"\"rev-parse\"",
",",
"branch",
")",
"except",
"sh",
".",
"ErrorReturnCode",
":",
"return",
"False",
"return",
"True"
] | Returns true or false depending on if a branch exists | [
"Returns",
"true",
"or",
"false",
"depending",
"on",
"if",
"a",
"branch",
"exists"
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/git_storage/git_action.py#L174-L180 | train | 412 |
OpenTreeOfLife/peyotl | peyotl/git_storage/git_action.py | GitActionBase.fetch | def fetch(self, remote='origin'):
"""fetch from a remote"""
git(self.gitdir, "fetch", remote, _env=self.env()) | python | def fetch(self, remote='origin'):
"""fetch from a remote"""
git(self.gitdir, "fetch", remote, _env=self.env()) | [
"def",
"fetch",
"(",
"self",
",",
"remote",
"=",
"'origin'",
")",
":",
"git",
"(",
"self",
".",
"gitdir",
",",
"\"fetch\"",
",",
"remote",
",",
"_env",
"=",
"self",
".",
"env",
"(",
")",
")"
] | fetch from a remote | [
"fetch",
"from",
"a",
"remote"
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/git_storage/git_action.py#L204-L206 | train | 413 |
OpenTreeOfLife/peyotl | peyotl/git_storage/git_action.py | GitActionBase.get_version_history_for_file | def get_version_history_for_file(self, filepath):
""" Return a dict representation of this file's commit history
This uses specially formatted git-log output for easy parsing, as described here:
http://blog.lost-theory.org/post/how-to-parse-git-log-output/
For a full list of available fields, see:
http://linux.die.net/man/1/git-log
"""
# define the desired fields for logout output, matching the order in these lists!
GIT_COMMIT_FIELDS = ['id',
'author_name',
'author_email',
'date',
'date_ISO_8601',
'relative_date',
'message_subject',
'message_body']
GIT_LOG_FORMAT = ['%H', '%an', '%ae', '%aD', '%ai', '%ar', '%s', '%b']
# make the final format string, using standard ASCII field/record delimiters
GIT_LOG_FORMAT = '%x1f'.join(GIT_LOG_FORMAT) + '%x1e'
try:
log = git(self.gitdir,
self.gitwd,
'--no-pager',
'log',
'--format=%s' % GIT_LOG_FORMAT,
'--follow', # Track file's history when moved/renamed...
'--find-renames=100%', # ... but only if the contents are identical!
'--',
filepath)
# _LOG.debug('log said "{}"'.format(log))
log = log.strip('\n\x1e').split("\x1e")
log = [row.strip().split("\x1f") for row in log]
log = [dict(zip(GIT_COMMIT_FIELDS, row)) for row in log]
except:
_LOG.exception('git log failed')
raise
return log | python | def get_version_history_for_file(self, filepath):
""" Return a dict representation of this file's commit history
This uses specially formatted git-log output for easy parsing, as described here:
http://blog.lost-theory.org/post/how-to-parse-git-log-output/
For a full list of available fields, see:
http://linux.die.net/man/1/git-log
"""
# define the desired fields for logout output, matching the order in these lists!
GIT_COMMIT_FIELDS = ['id',
'author_name',
'author_email',
'date',
'date_ISO_8601',
'relative_date',
'message_subject',
'message_body']
GIT_LOG_FORMAT = ['%H', '%an', '%ae', '%aD', '%ai', '%ar', '%s', '%b']
# make the final format string, using standard ASCII field/record delimiters
GIT_LOG_FORMAT = '%x1f'.join(GIT_LOG_FORMAT) + '%x1e'
try:
log = git(self.gitdir,
self.gitwd,
'--no-pager',
'log',
'--format=%s' % GIT_LOG_FORMAT,
'--follow', # Track file's history when moved/renamed...
'--find-renames=100%', # ... but only if the contents are identical!
'--',
filepath)
# _LOG.debug('log said "{}"'.format(log))
log = log.strip('\n\x1e').split("\x1e")
log = [row.strip().split("\x1f") for row in log]
log = [dict(zip(GIT_COMMIT_FIELDS, row)) for row in log]
except:
_LOG.exception('git log failed')
raise
return log | [
"def",
"get_version_history_for_file",
"(",
"self",
",",
"filepath",
")",
":",
"# define the desired fields for logout output, matching the order in these lists!",
"GIT_COMMIT_FIELDS",
"=",
"[",
"'id'",
",",
"'author_name'",
",",
"'author_email'",
",",
"'date'",
",",
"'date_ISO_8601'",
",",
"'relative_date'",
",",
"'message_subject'",
",",
"'message_body'",
"]",
"GIT_LOG_FORMAT",
"=",
"[",
"'%H'",
",",
"'%an'",
",",
"'%ae'",
",",
"'%aD'",
",",
"'%ai'",
",",
"'%ar'",
",",
"'%s'",
",",
"'%b'",
"]",
"# make the final format string, using standard ASCII field/record delimiters",
"GIT_LOG_FORMAT",
"=",
"'%x1f'",
".",
"join",
"(",
"GIT_LOG_FORMAT",
")",
"+",
"'%x1e'",
"try",
":",
"log",
"=",
"git",
"(",
"self",
".",
"gitdir",
",",
"self",
".",
"gitwd",
",",
"'--no-pager'",
",",
"'log'",
",",
"'--format=%s'",
"%",
"GIT_LOG_FORMAT",
",",
"'--follow'",
",",
"# Track file's history when moved/renamed...",
"'--find-renames=100%'",
",",
"# ... but only if the contents are identical!",
"'--'",
",",
"filepath",
")",
"# _LOG.debug('log said \"{}\"'.format(log))",
"log",
"=",
"log",
".",
"strip",
"(",
"'\\n\\x1e'",
")",
".",
"split",
"(",
"\"\\x1e\"",
")",
"log",
"=",
"[",
"row",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\x1f\"",
")",
"for",
"row",
"in",
"log",
"]",
"log",
"=",
"[",
"dict",
"(",
"zip",
"(",
"GIT_COMMIT_FIELDS",
",",
"row",
")",
")",
"for",
"row",
"in",
"log",
"]",
"except",
":",
"_LOG",
".",
"exception",
"(",
"'git log failed'",
")",
"raise",
"return",
"log"
] | Return a dict representation of this file's commit history
This uses specially formatted git-log output for easy parsing, as described here:
http://blog.lost-theory.org/post/how-to-parse-git-log-output/
For a full list of available fields, see:
http://linux.die.net/man/1/git-log | [
"Return",
"a",
"dict",
"representation",
"of",
"this",
"file",
"s",
"commit",
"history"
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/git_storage/git_action.py#L235-L273 | train | 414 |
OpenTreeOfLife/peyotl | peyotl/git_storage/git_action.py | GitActionBase._add_and_commit | def _add_and_commit(self, doc_filepath, author, commit_msg):
"""Low level function used internally when you have an absolute filepath to add and commit"""
try:
git(self.gitdir, self.gitwd, "add", doc_filepath)
git(self.gitdir, self.gitwd, "commit", author=author, message=commit_msg)
except Exception as e:
# We can ignore this if no changes are new,
# otherwise raise a 400
if "nothing to commit" in e.message: # @EJM is this dangerous?
_LOG.debug('"nothing to commit" found in error response')
else:
_LOG.exception('"git commit" failed')
self.reset_hard()
raise | python | def _add_and_commit(self, doc_filepath, author, commit_msg):
"""Low level function used internally when you have an absolute filepath to add and commit"""
try:
git(self.gitdir, self.gitwd, "add", doc_filepath)
git(self.gitdir, self.gitwd, "commit", author=author, message=commit_msg)
except Exception as e:
# We can ignore this if no changes are new,
# otherwise raise a 400
if "nothing to commit" in e.message: # @EJM is this dangerous?
_LOG.debug('"nothing to commit" found in error response')
else:
_LOG.exception('"git commit" failed')
self.reset_hard()
raise | [
"def",
"_add_and_commit",
"(",
"self",
",",
"doc_filepath",
",",
"author",
",",
"commit_msg",
")",
":",
"try",
":",
"git",
"(",
"self",
".",
"gitdir",
",",
"self",
".",
"gitwd",
",",
"\"add\"",
",",
"doc_filepath",
")",
"git",
"(",
"self",
".",
"gitdir",
",",
"self",
".",
"gitwd",
",",
"\"commit\"",
",",
"author",
"=",
"author",
",",
"message",
"=",
"commit_msg",
")",
"except",
"Exception",
"as",
"e",
":",
"# We can ignore this if no changes are new,",
"# otherwise raise a 400",
"if",
"\"nothing to commit\"",
"in",
"e",
".",
"message",
":",
"# @EJM is this dangerous?",
"_LOG",
".",
"debug",
"(",
"'\"nothing to commit\" found in error response'",
")",
"else",
":",
"_LOG",
".",
"exception",
"(",
"'\"git commit\" failed'",
")",
"self",
".",
"reset_hard",
"(",
")",
"raise"
] | Low level function used internally when you have an absolute filepath to add and commit | [
"Low",
"level",
"function",
"used",
"internally",
"when",
"you",
"have",
"an",
"absolute",
"filepath",
"to",
"add",
"and",
"commit"
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/git_storage/git_action.py#L275-L288 | train | 415 |
OpenTreeOfLife/peyotl | peyotl/git_storage/git_action.py | GitActionBase._remove_document | def _remove_document(self, gh_user, doc_id, parent_sha, author, commit_msg=None):
"""Remove a document
Remove a document on the given branch and attribute the commit to author.
Returns the SHA of the commit on branch.
"""
# _LOG.debug("@@@@@@@@ GitActionBase._remove_document, doc_id={}".format(doc_id))
doc_filepath = self.path_for_doc(doc_id)
# _LOG.debug("@@@@@@@@ GitActionBase._remove_document, doc_filepath={}".format(doc_filepath))
branch = self.create_or_checkout_branch(gh_user, doc_id, parent_sha)
prev_file_sha = None
if commit_msg is None:
msg = "Delete document '%s' via OpenTree API" % doc_id
else:
msg = commit_msg
if os.path.exists(doc_filepath):
prev_file_sha = self.get_blob_sha_for_file(doc_filepath)
if self.doc_type == 'nexson':
# delete the parent directory entirely
doc_dir = os.path.split(doc_filepath)[0]
# _LOG.debug("@@@@@@@@ GitActionBase._remove_document, doc_dir={}".format(doc_dir))
git(self.gitdir, self.gitwd, "rm", "-rf", doc_dir)
elif self.doc_type in ('collection', 'favorites', 'amendment'):
# delete just the target file
git(self.gitdir, self.gitwd, "rm", doc_filepath)
else:
raise NotImplementedError("No deletion rules for doc_type '{}'".format(self.doc_type))
git(self.gitdir,
self.gitwd,
"commit",
author=author,
message=msg)
new_sha = git(self.gitdir, self.gitwd, "rev-parse", "HEAD").strip()
return {'commit_sha': new_sha,
'branch': branch,
'prev_file_sha': prev_file_sha,
} | python | def _remove_document(self, gh_user, doc_id, parent_sha, author, commit_msg=None):
"""Remove a document
Remove a document on the given branch and attribute the commit to author.
Returns the SHA of the commit on branch.
"""
# _LOG.debug("@@@@@@@@ GitActionBase._remove_document, doc_id={}".format(doc_id))
doc_filepath = self.path_for_doc(doc_id)
# _LOG.debug("@@@@@@@@ GitActionBase._remove_document, doc_filepath={}".format(doc_filepath))
branch = self.create_or_checkout_branch(gh_user, doc_id, parent_sha)
prev_file_sha = None
if commit_msg is None:
msg = "Delete document '%s' via OpenTree API" % doc_id
else:
msg = commit_msg
if os.path.exists(doc_filepath):
prev_file_sha = self.get_blob_sha_for_file(doc_filepath)
if self.doc_type == 'nexson':
# delete the parent directory entirely
doc_dir = os.path.split(doc_filepath)[0]
# _LOG.debug("@@@@@@@@ GitActionBase._remove_document, doc_dir={}".format(doc_dir))
git(self.gitdir, self.gitwd, "rm", "-rf", doc_dir)
elif self.doc_type in ('collection', 'favorites', 'amendment'):
# delete just the target file
git(self.gitdir, self.gitwd, "rm", doc_filepath)
else:
raise NotImplementedError("No deletion rules for doc_type '{}'".format(self.doc_type))
git(self.gitdir,
self.gitwd,
"commit",
author=author,
message=msg)
new_sha = git(self.gitdir, self.gitwd, "rev-parse", "HEAD").strip()
return {'commit_sha': new_sha,
'branch': branch,
'prev_file_sha': prev_file_sha,
} | [
"def",
"_remove_document",
"(",
"self",
",",
"gh_user",
",",
"doc_id",
",",
"parent_sha",
",",
"author",
",",
"commit_msg",
"=",
"None",
")",
":",
"# _LOG.debug(\"@@@@@@@@ GitActionBase._remove_document, doc_id={}\".format(doc_id))",
"doc_filepath",
"=",
"self",
".",
"path_for_doc",
"(",
"doc_id",
")",
"# _LOG.debug(\"@@@@@@@@ GitActionBase._remove_document, doc_filepath={}\".format(doc_filepath))",
"branch",
"=",
"self",
".",
"create_or_checkout_branch",
"(",
"gh_user",
",",
"doc_id",
",",
"parent_sha",
")",
"prev_file_sha",
"=",
"None",
"if",
"commit_msg",
"is",
"None",
":",
"msg",
"=",
"\"Delete document '%s' via OpenTree API\"",
"%",
"doc_id",
"else",
":",
"msg",
"=",
"commit_msg",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"doc_filepath",
")",
":",
"prev_file_sha",
"=",
"self",
".",
"get_blob_sha_for_file",
"(",
"doc_filepath",
")",
"if",
"self",
".",
"doc_type",
"==",
"'nexson'",
":",
"# delete the parent directory entirely",
"doc_dir",
"=",
"os",
".",
"path",
".",
"split",
"(",
"doc_filepath",
")",
"[",
"0",
"]",
"# _LOG.debug(\"@@@@@@@@ GitActionBase._remove_document, doc_dir={}\".format(doc_dir))",
"git",
"(",
"self",
".",
"gitdir",
",",
"self",
".",
"gitwd",
",",
"\"rm\"",
",",
"\"-rf\"",
",",
"doc_dir",
")",
"elif",
"self",
".",
"doc_type",
"in",
"(",
"'collection'",
",",
"'favorites'",
",",
"'amendment'",
")",
":",
"# delete just the target file",
"git",
"(",
"self",
".",
"gitdir",
",",
"self",
".",
"gitwd",
",",
"\"rm\"",
",",
"doc_filepath",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"No deletion rules for doc_type '{}'\"",
".",
"format",
"(",
"self",
".",
"doc_type",
")",
")",
"git",
"(",
"self",
".",
"gitdir",
",",
"self",
".",
"gitwd",
",",
"\"commit\"",
",",
"author",
"=",
"author",
",",
"message",
"=",
"msg",
")",
"new_sha",
"=",
"git",
"(",
"self",
".",
"gitdir",
",",
"self",
".",
"gitwd",
",",
"\"rev-parse\"",
",",
"\"HEAD\"",
")",
".",
"strip",
"(",
")",
"return",
"{",
"'commit_sha'",
":",
"new_sha",
",",
"'branch'",
":",
"branch",
",",
"'prev_file_sha'",
":",
"prev_file_sha",
",",
"}"
] | Remove a document
Remove a document on the given branch and attribute the commit to author.
Returns the SHA of the commit on branch. | [
"Remove",
"a",
"document",
"Remove",
"a",
"document",
"on",
"the",
"given",
"branch",
"and",
"attribute",
"the",
"commit",
"to",
"author",
".",
"Returns",
"the",
"SHA",
"of",
"the",
"commit",
"on",
"branch",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/git_storage/git_action.py#L429-L465 | train | 416 |
OpenTreeOfLife/peyotl | peyotl/git_storage/git_action.py | GitActionBase.write_document | def write_document(self, gh_user, doc_id, file_content, branch, author, commit_msg=None):
"""Given a document id, temporary filename of content, branch and auth_info
Deprecated but needed until we merge api local-dep to master...
"""
parent_sha = None
fc = tempfile.NamedTemporaryFile()
# N.B. we currently assume file_content is text/JSON, or should be serialized from a dict
if is_str_type(file_content):
fc.write(file_content)
else:
write_as_json(file_content, fc)
fc.flush()
try:
doc_filepath = self.path_for_doc(doc_id)
doc_dir = os.path.split(doc_filepath)[0]
if parent_sha is None:
self.checkout_master()
parent_sha = self.get_master_sha()
branch = self.create_or_checkout_branch(gh_user, doc_id, parent_sha, force_branch_name=True)
# create a document directory if this is a new doc EJM- what if it isn't?
if not os.path.isdir(doc_dir):
os.makedirs(doc_dir)
shutil.copy(fc.name, doc_filepath)
git(self.gitdir, self.gitwd, "add", doc_filepath)
if commit_msg is None:
commit_msg = "Update document '%s' via OpenTree API" % doc_id
try:
git(self.gitdir,
self.gitwd,
"commit",
author=author,
message=commit_msg)
except Exception as e:
# We can ignore this if no changes are new,
# otherwise raise a 400
if "nothing to commit" in e.message: # @EJM is this dangerous?
pass
else:
_LOG.exception('"git commit" failed')
self.reset_hard()
raise
new_sha = git(self.gitdir, self.gitwd, "rev-parse", "HEAD")
except Exception as e:
_LOG.exception('write_document exception')
raise GitWorkflowError("Could not write to document #%s ! Details: \n%s" % (doc_id, e.message))
finally:
fc.close()
return new_sha | python | def write_document(self, gh_user, doc_id, file_content, branch, author, commit_msg=None):
"""Given a document id, temporary filename of content, branch and auth_info
Deprecated but needed until we merge api local-dep to master...
"""
parent_sha = None
fc = tempfile.NamedTemporaryFile()
# N.B. we currently assume file_content is text/JSON, or should be serialized from a dict
if is_str_type(file_content):
fc.write(file_content)
else:
write_as_json(file_content, fc)
fc.flush()
try:
doc_filepath = self.path_for_doc(doc_id)
doc_dir = os.path.split(doc_filepath)[0]
if parent_sha is None:
self.checkout_master()
parent_sha = self.get_master_sha()
branch = self.create_or_checkout_branch(gh_user, doc_id, parent_sha, force_branch_name=True)
# create a document directory if this is a new doc EJM- what if it isn't?
if not os.path.isdir(doc_dir):
os.makedirs(doc_dir)
shutil.copy(fc.name, doc_filepath)
git(self.gitdir, self.gitwd, "add", doc_filepath)
if commit_msg is None:
commit_msg = "Update document '%s' via OpenTree API" % doc_id
try:
git(self.gitdir,
self.gitwd,
"commit",
author=author,
message=commit_msg)
except Exception as e:
# We can ignore this if no changes are new,
# otherwise raise a 400
if "nothing to commit" in e.message: # @EJM is this dangerous?
pass
else:
_LOG.exception('"git commit" failed')
self.reset_hard()
raise
new_sha = git(self.gitdir, self.gitwd, "rev-parse", "HEAD")
except Exception as e:
_LOG.exception('write_document exception')
raise GitWorkflowError("Could not write to document #%s ! Details: \n%s" % (doc_id, e.message))
finally:
fc.close()
return new_sha | [
"def",
"write_document",
"(",
"self",
",",
"gh_user",
",",
"doc_id",
",",
"file_content",
",",
"branch",
",",
"author",
",",
"commit_msg",
"=",
"None",
")",
":",
"parent_sha",
"=",
"None",
"fc",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
")",
"# N.B. we currently assume file_content is text/JSON, or should be serialized from a dict",
"if",
"is_str_type",
"(",
"file_content",
")",
":",
"fc",
".",
"write",
"(",
"file_content",
")",
"else",
":",
"write_as_json",
"(",
"file_content",
",",
"fc",
")",
"fc",
".",
"flush",
"(",
")",
"try",
":",
"doc_filepath",
"=",
"self",
".",
"path_for_doc",
"(",
"doc_id",
")",
"doc_dir",
"=",
"os",
".",
"path",
".",
"split",
"(",
"doc_filepath",
")",
"[",
"0",
"]",
"if",
"parent_sha",
"is",
"None",
":",
"self",
".",
"checkout_master",
"(",
")",
"parent_sha",
"=",
"self",
".",
"get_master_sha",
"(",
")",
"branch",
"=",
"self",
".",
"create_or_checkout_branch",
"(",
"gh_user",
",",
"doc_id",
",",
"parent_sha",
",",
"force_branch_name",
"=",
"True",
")",
"# create a document directory if this is a new doc EJM- what if it isn't?",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"doc_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"doc_dir",
")",
"shutil",
".",
"copy",
"(",
"fc",
".",
"name",
",",
"doc_filepath",
")",
"git",
"(",
"self",
".",
"gitdir",
",",
"self",
".",
"gitwd",
",",
"\"add\"",
",",
"doc_filepath",
")",
"if",
"commit_msg",
"is",
"None",
":",
"commit_msg",
"=",
"\"Update document '%s' via OpenTree API\"",
"%",
"doc_id",
"try",
":",
"git",
"(",
"self",
".",
"gitdir",
",",
"self",
".",
"gitwd",
",",
"\"commit\"",
",",
"author",
"=",
"author",
",",
"message",
"=",
"commit_msg",
")",
"except",
"Exception",
"as",
"e",
":",
"# We can ignore this if no changes are new,",
"# otherwise raise a 400",
"if",
"\"nothing to commit\"",
"in",
"e",
".",
"message",
":",
"# @EJM is this dangerous?",
"pass",
"else",
":",
"_LOG",
".",
"exception",
"(",
"'\"git commit\" failed'",
")",
"self",
".",
"reset_hard",
"(",
")",
"raise",
"new_sha",
"=",
"git",
"(",
"self",
".",
"gitdir",
",",
"self",
".",
"gitwd",
",",
"\"rev-parse\"",
",",
"\"HEAD\"",
")",
"except",
"Exception",
"as",
"e",
":",
"_LOG",
".",
"exception",
"(",
"'write_document exception'",
")",
"raise",
"GitWorkflowError",
"(",
"\"Could not write to document #%s ! Details: \\n%s\"",
"%",
"(",
"doc_id",
",",
"e",
".",
"message",
")",
")",
"finally",
":",
"fc",
".",
"close",
"(",
")",
"return",
"new_sha"
] | Given a document id, temporary filename of content, branch and auth_info
Deprecated but needed until we merge api local-dep to master... | [
"Given",
"a",
"document",
"id",
"temporary",
"filename",
"of",
"content",
"branch",
"and",
"auth_info"
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/git_storage/git_action.py#L467-L516 | train | 417 |
OpenTreeOfLife/peyotl | peyotl/git_storage/git_action.py | GitActionBase.write_doc_from_tmpfile | def write_doc_from_tmpfile(self,
doc_id,
tmpfi,
parent_sha,
auth_info,
commit_msg='',
doctype_display_name="document"):
"""Given a doc_id, temporary filename of content, branch and auth_info
"""
gh_user, author = get_user_author(auth_info)
doc_filepath = self.path_for_doc(doc_id)
doc_dir = os.path.split(doc_filepath)[0]
if parent_sha is None:
self.checkout_master()
parent_sha = self.get_master_sha()
branch = self.create_or_checkout_branch(gh_user, doc_id, parent_sha)
# build complete (probably type-specific) commit message
default_commit_msg = "Update %s '%s' via OpenTree API" % (doctype_display_name, doc_id)
if commit_msg:
commit_msg = "%s\n\n(%s)" % (commit_msg, default_commit_msg)
else:
commit_msg = default_commit_msg
# create a doc directory if this is a new document EJM- what if it isn't?
if not os.path.isdir(doc_dir):
os.makedirs(doc_dir)
if os.path.exists(doc_filepath):
prev_file_sha = self.get_blob_sha_for_file(doc_filepath)
else:
prev_file_sha = None
shutil.copy(tmpfi.name, doc_filepath)
self._add_and_commit(doc_filepath, author, commit_msg)
new_sha = git(self.gitdir, self.gitwd, "rev-parse", "HEAD")
_LOG.debug('Committed document "{i}" to branch "{b}" commit SHA: "{s}"'.format(i=doc_id,
b=branch,
s=new_sha.strip()))
return {'commit_sha': new_sha.strip(),
'branch': branch,
'prev_file_sha': prev_file_sha,
} | python | def write_doc_from_tmpfile(self,
doc_id,
tmpfi,
parent_sha,
auth_info,
commit_msg='',
doctype_display_name="document"):
"""Given a doc_id, temporary filename of content, branch and auth_info
"""
gh_user, author = get_user_author(auth_info)
doc_filepath = self.path_for_doc(doc_id)
doc_dir = os.path.split(doc_filepath)[0]
if parent_sha is None:
self.checkout_master()
parent_sha = self.get_master_sha()
branch = self.create_or_checkout_branch(gh_user, doc_id, parent_sha)
# build complete (probably type-specific) commit message
default_commit_msg = "Update %s '%s' via OpenTree API" % (doctype_display_name, doc_id)
if commit_msg:
commit_msg = "%s\n\n(%s)" % (commit_msg, default_commit_msg)
else:
commit_msg = default_commit_msg
# create a doc directory if this is a new document EJM- what if it isn't?
if not os.path.isdir(doc_dir):
os.makedirs(doc_dir)
if os.path.exists(doc_filepath):
prev_file_sha = self.get_blob_sha_for_file(doc_filepath)
else:
prev_file_sha = None
shutil.copy(tmpfi.name, doc_filepath)
self._add_and_commit(doc_filepath, author, commit_msg)
new_sha = git(self.gitdir, self.gitwd, "rev-parse", "HEAD")
_LOG.debug('Committed document "{i}" to branch "{b}" commit SHA: "{s}"'.format(i=doc_id,
b=branch,
s=new_sha.strip()))
return {'commit_sha': new_sha.strip(),
'branch': branch,
'prev_file_sha': prev_file_sha,
} | [
"def",
"write_doc_from_tmpfile",
"(",
"self",
",",
"doc_id",
",",
"tmpfi",
",",
"parent_sha",
",",
"auth_info",
",",
"commit_msg",
"=",
"''",
",",
"doctype_display_name",
"=",
"\"document\"",
")",
":",
"gh_user",
",",
"author",
"=",
"get_user_author",
"(",
"auth_info",
")",
"doc_filepath",
"=",
"self",
".",
"path_for_doc",
"(",
"doc_id",
")",
"doc_dir",
"=",
"os",
".",
"path",
".",
"split",
"(",
"doc_filepath",
")",
"[",
"0",
"]",
"if",
"parent_sha",
"is",
"None",
":",
"self",
".",
"checkout_master",
"(",
")",
"parent_sha",
"=",
"self",
".",
"get_master_sha",
"(",
")",
"branch",
"=",
"self",
".",
"create_or_checkout_branch",
"(",
"gh_user",
",",
"doc_id",
",",
"parent_sha",
")",
"# build complete (probably type-specific) commit message",
"default_commit_msg",
"=",
"\"Update %s '%s' via OpenTree API\"",
"%",
"(",
"doctype_display_name",
",",
"doc_id",
")",
"if",
"commit_msg",
":",
"commit_msg",
"=",
"\"%s\\n\\n(%s)\"",
"%",
"(",
"commit_msg",
",",
"default_commit_msg",
")",
"else",
":",
"commit_msg",
"=",
"default_commit_msg",
"# create a doc directory if this is a new document EJM- what if it isn't?",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"doc_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"doc_dir",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"doc_filepath",
")",
":",
"prev_file_sha",
"=",
"self",
".",
"get_blob_sha_for_file",
"(",
"doc_filepath",
")",
"else",
":",
"prev_file_sha",
"=",
"None",
"shutil",
".",
"copy",
"(",
"tmpfi",
".",
"name",
",",
"doc_filepath",
")",
"self",
".",
"_add_and_commit",
"(",
"doc_filepath",
",",
"author",
",",
"commit_msg",
")",
"new_sha",
"=",
"git",
"(",
"self",
".",
"gitdir",
",",
"self",
".",
"gitwd",
",",
"\"rev-parse\"",
",",
"\"HEAD\"",
")",
"_LOG",
".",
"debug",
"(",
"'Committed document \"{i}\" to branch \"{b}\" commit SHA: \"{s}\"'",
".",
"format",
"(",
"i",
"=",
"doc_id",
",",
"b",
"=",
"branch",
",",
"s",
"=",
"new_sha",
".",
"strip",
"(",
")",
")",
")",
"return",
"{",
"'commit_sha'",
":",
"new_sha",
".",
"strip",
"(",
")",
",",
"'branch'",
":",
"branch",
",",
"'prev_file_sha'",
":",
"prev_file_sha",
",",
"}"
] | Given a doc_id, temporary filename of content, branch and auth_info | [
"Given",
"a",
"doc_id",
"temporary",
"filename",
"of",
"content",
"branch",
"and",
"auth_info"
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/git_storage/git_action.py#L518-L559 | train | 418 |
OpenTreeOfLife/peyotl | peyotl/amendments/git_actions.py | TaxonomicAmendmentsGitAction.remove_amendment | def remove_amendment(self, first_arg, sec_arg, third_arg, fourth_arg=None, commit_msg=None):
"""Remove an amendment
Given a amendment_id, branch and optionally an
author, remove an amendment on the given branch
and attribute the commit to author.
Returns the SHA of the commit on branch.
"""
if fourth_arg is None:
amendment_id, branch_name, author = first_arg, sec_arg, third_arg
gh_user = branch_name.split('_amendment_')[0]
parent_sha = self.get_master_sha()
else:
gh_user, amendment_id, parent_sha, author = first_arg, sec_arg, third_arg, fourth_arg
if commit_msg is None:
commit_msg = "Delete Amendment '%s' via OpenTree API" % amendment_id
return self._remove_document(gh_user, amendment_id, parent_sha, author, commit_msg) | python | def remove_amendment(self, first_arg, sec_arg, third_arg, fourth_arg=None, commit_msg=None):
"""Remove an amendment
Given a amendment_id, branch and optionally an
author, remove an amendment on the given branch
and attribute the commit to author.
Returns the SHA of the commit on branch.
"""
if fourth_arg is None:
amendment_id, branch_name, author = first_arg, sec_arg, third_arg
gh_user = branch_name.split('_amendment_')[0]
parent_sha = self.get_master_sha()
else:
gh_user, amendment_id, parent_sha, author = first_arg, sec_arg, third_arg, fourth_arg
if commit_msg is None:
commit_msg = "Delete Amendment '%s' via OpenTree API" % amendment_id
return self._remove_document(gh_user, amendment_id, parent_sha, author, commit_msg) | [
"def",
"remove_amendment",
"(",
"self",
",",
"first_arg",
",",
"sec_arg",
",",
"third_arg",
",",
"fourth_arg",
"=",
"None",
",",
"commit_msg",
"=",
"None",
")",
":",
"if",
"fourth_arg",
"is",
"None",
":",
"amendment_id",
",",
"branch_name",
",",
"author",
"=",
"first_arg",
",",
"sec_arg",
",",
"third_arg",
"gh_user",
"=",
"branch_name",
".",
"split",
"(",
"'_amendment_'",
")",
"[",
"0",
"]",
"parent_sha",
"=",
"self",
".",
"get_master_sha",
"(",
")",
"else",
":",
"gh_user",
",",
"amendment_id",
",",
"parent_sha",
",",
"author",
"=",
"first_arg",
",",
"sec_arg",
",",
"third_arg",
",",
"fourth_arg",
"if",
"commit_msg",
"is",
"None",
":",
"commit_msg",
"=",
"\"Delete Amendment '%s' via OpenTree API\"",
"%",
"amendment_id",
"return",
"self",
".",
"_remove_document",
"(",
"gh_user",
",",
"amendment_id",
",",
"parent_sha",
",",
"author",
",",
"commit_msg",
")"
] | Remove an amendment
Given a amendment_id, branch and optionally an
author, remove an amendment on the given branch
and attribute the commit to author.
Returns the SHA of the commit on branch. | [
"Remove",
"an",
"amendment",
"Given",
"a",
"amendment_id",
"branch",
"and",
"optionally",
"an",
"author",
"remove",
"an",
"amendment",
"on",
"the",
"given",
"branch",
"and",
"attribute",
"the",
"commit",
"to",
"author",
".",
"Returns",
"the",
"SHA",
"of",
"the",
"commit",
"on",
"branch",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/amendments/git_actions.py#L92-L107 | train | 419 |
inveniosoftware/invenio-communities | invenio_communities/models.py | InclusionRequest.create | def create(cls, community, record, user=None, expires_at=None,
notify=True):
"""Create a record inclusion request to a community.
:param community: Community object.
:param record: Record API object.
:param expires_at: Time after which the request expires and shouldn't
be resolved anymore.
"""
if expires_at and expires_at < datetime.utcnow():
raise InclusionRequestExpiryTimeError(
community=community, record=record)
if community.has_record(record):
raise InclusionRequestObsoleteError(
community=community, record=record)
try:
# Create inclusion request
with db.session.begin_nested():
obj = cls(
id_community=community.id,
id_record=record.id,
user=user,
expires_at=expires_at
)
db.session.add(obj)
except (IntegrityError, FlushError):
raise InclusionRequestExistsError(
community=community, record=record)
# Send signal
inclusion_request_created.send(
current_app._get_current_object(),
request=obj,
notify=notify
)
return obj | python | def create(cls, community, record, user=None, expires_at=None,
notify=True):
"""Create a record inclusion request to a community.
:param community: Community object.
:param record: Record API object.
:param expires_at: Time after which the request expires and shouldn't
be resolved anymore.
"""
if expires_at and expires_at < datetime.utcnow():
raise InclusionRequestExpiryTimeError(
community=community, record=record)
if community.has_record(record):
raise InclusionRequestObsoleteError(
community=community, record=record)
try:
# Create inclusion request
with db.session.begin_nested():
obj = cls(
id_community=community.id,
id_record=record.id,
user=user,
expires_at=expires_at
)
db.session.add(obj)
except (IntegrityError, FlushError):
raise InclusionRequestExistsError(
community=community, record=record)
# Send signal
inclusion_request_created.send(
current_app._get_current_object(),
request=obj,
notify=notify
)
return obj | [
"def",
"create",
"(",
"cls",
",",
"community",
",",
"record",
",",
"user",
"=",
"None",
",",
"expires_at",
"=",
"None",
",",
"notify",
"=",
"True",
")",
":",
"if",
"expires_at",
"and",
"expires_at",
"<",
"datetime",
".",
"utcnow",
"(",
")",
":",
"raise",
"InclusionRequestExpiryTimeError",
"(",
"community",
"=",
"community",
",",
"record",
"=",
"record",
")",
"if",
"community",
".",
"has_record",
"(",
"record",
")",
":",
"raise",
"InclusionRequestObsoleteError",
"(",
"community",
"=",
"community",
",",
"record",
"=",
"record",
")",
"try",
":",
"# Create inclusion request",
"with",
"db",
".",
"session",
".",
"begin_nested",
"(",
")",
":",
"obj",
"=",
"cls",
"(",
"id_community",
"=",
"community",
".",
"id",
",",
"id_record",
"=",
"record",
".",
"id",
",",
"user",
"=",
"user",
",",
"expires_at",
"=",
"expires_at",
")",
"db",
".",
"session",
".",
"add",
"(",
"obj",
")",
"except",
"(",
"IntegrityError",
",",
"FlushError",
")",
":",
"raise",
"InclusionRequestExistsError",
"(",
"community",
"=",
"community",
",",
"record",
"=",
"record",
")",
"# Send signal",
"inclusion_request_created",
".",
"send",
"(",
"current_app",
".",
"_get_current_object",
"(",
")",
",",
"request",
"=",
"obj",
",",
"notify",
"=",
"notify",
")",
"return",
"obj"
] | Create a record inclusion request to a community.
:param community: Community object.
:param record: Record API object.
:param expires_at: Time after which the request expires and shouldn't
be resolved anymore. | [
"Create",
"a",
"record",
"inclusion",
"request",
"to",
"a",
"community",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/models.py#L114-L152 | train | 420 |
inveniosoftware/invenio-communities | invenio_communities/models.py | InclusionRequest.get | def get(cls, community_id, record_uuid):
"""Get an inclusion request."""
return cls.query.filter_by(
id_record=record_uuid, id_community=community_id
).one_or_none() | python | def get(cls, community_id, record_uuid):
"""Get an inclusion request."""
return cls.query.filter_by(
id_record=record_uuid, id_community=community_id
).one_or_none() | [
"def",
"get",
"(",
"cls",
",",
"community_id",
",",
"record_uuid",
")",
":",
"return",
"cls",
".",
"query",
".",
"filter_by",
"(",
"id_record",
"=",
"record_uuid",
",",
"id_community",
"=",
"community_id",
")",
".",
"one_or_none",
"(",
")"
] | Get an inclusion request. | [
"Get",
"an",
"inclusion",
"request",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/models.py#L155-L159 | train | 421 |
inveniosoftware/invenio-communities | invenio_communities/models.py | Community.filter_communities | def filter_communities(cls, p, so, with_deleted=False):
"""Search for communities.
Helper function which takes from database only those communities which
match search criteria. Uses parameter 'so' to set communities in the
correct order.
Parameter 'page' is introduced to restrict results and return only
slice of them for the current page. If page == 0 function will return
all communities that match the pattern.
"""
query = cls.query if with_deleted else \
cls.query.filter(cls.deleted_at.is_(None))
if p:
p = p.replace(' ', '%')
query = query.filter(db.or_(
cls.id.ilike('%' + p + '%'),
cls.title.ilike('%' + p + '%'),
cls.description.ilike('%' + p + '%'),
))
if so in current_app.config['COMMUNITIES_SORTING_OPTIONS']:
order = so == 'title' and db.asc or db.desc
query = query.order_by(order(getattr(cls, so)))
else:
query = query.order_by(db.desc(cls.ranking))
return query | python | def filter_communities(cls, p, so, with_deleted=False):
"""Search for communities.
Helper function which takes from database only those communities which
match search criteria. Uses parameter 'so' to set communities in the
correct order.
Parameter 'page' is introduced to restrict results and return only
slice of them for the current page. If page == 0 function will return
all communities that match the pattern.
"""
query = cls.query if with_deleted else \
cls.query.filter(cls.deleted_at.is_(None))
if p:
p = p.replace(' ', '%')
query = query.filter(db.or_(
cls.id.ilike('%' + p + '%'),
cls.title.ilike('%' + p + '%'),
cls.description.ilike('%' + p + '%'),
))
if so in current_app.config['COMMUNITIES_SORTING_OPTIONS']:
order = so == 'title' and db.asc or db.desc
query = query.order_by(order(getattr(cls, so)))
else:
query = query.order_by(db.desc(cls.ranking))
return query | [
"def",
"filter_communities",
"(",
"cls",
",",
"p",
",",
"so",
",",
"with_deleted",
"=",
"False",
")",
":",
"query",
"=",
"cls",
".",
"query",
"if",
"with_deleted",
"else",
"cls",
".",
"query",
".",
"filter",
"(",
"cls",
".",
"deleted_at",
".",
"is_",
"(",
"None",
")",
")",
"if",
"p",
":",
"p",
"=",
"p",
".",
"replace",
"(",
"' '",
",",
"'%'",
")",
"query",
"=",
"query",
".",
"filter",
"(",
"db",
".",
"or_",
"(",
"cls",
".",
"id",
".",
"ilike",
"(",
"'%'",
"+",
"p",
"+",
"'%'",
")",
",",
"cls",
".",
"title",
".",
"ilike",
"(",
"'%'",
"+",
"p",
"+",
"'%'",
")",
",",
"cls",
".",
"description",
".",
"ilike",
"(",
"'%'",
"+",
"p",
"+",
"'%'",
")",
",",
")",
")",
"if",
"so",
"in",
"current_app",
".",
"config",
"[",
"'COMMUNITIES_SORTING_OPTIONS'",
"]",
":",
"order",
"=",
"so",
"==",
"'title'",
"and",
"db",
".",
"asc",
"or",
"db",
".",
"desc",
"query",
"=",
"query",
".",
"order_by",
"(",
"order",
"(",
"getattr",
"(",
"cls",
",",
"so",
")",
")",
")",
"else",
":",
"query",
"=",
"query",
".",
"order_by",
"(",
"db",
".",
"desc",
"(",
"cls",
".",
"ranking",
")",
")",
"return",
"query"
] | Search for communities.
Helper function which takes from database only those communities which
match search criteria. Uses parameter 'so' to set communities in the
correct order.
Parameter 'page' is introduced to restrict results and return only
slice of them for the current page. If page == 0 function will return
all communities that match the pattern. | [
"Search",
"for",
"communities",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/models.py#L257-L284 | train | 422 |
inveniosoftware/invenio-communities | invenio_communities/models.py | Community.add_record | def add_record(self, record):
"""Add a record to the community.
:param record: Record object.
:type record: `invenio_records.api.Record`
"""
key = current_app.config['COMMUNITIES_RECORD_KEY']
record.setdefault(key, [])
if self.has_record(record):
current_app.logger.warning(
'Community addition: record {uuid} is already in community '
'"{comm}"'.format(uuid=record.id, comm=self.id))
else:
record[key].append(self.id)
record[key] = sorted(record[key])
if current_app.config['COMMUNITIES_OAI_ENABLED']:
if not self.oaiset.has_record(record):
self.oaiset.add_record(record) | python | def add_record(self, record):
"""Add a record to the community.
:param record: Record object.
:type record: `invenio_records.api.Record`
"""
key = current_app.config['COMMUNITIES_RECORD_KEY']
record.setdefault(key, [])
if self.has_record(record):
current_app.logger.warning(
'Community addition: record {uuid} is already in community '
'"{comm}"'.format(uuid=record.id, comm=self.id))
else:
record[key].append(self.id)
record[key] = sorted(record[key])
if current_app.config['COMMUNITIES_OAI_ENABLED']:
if not self.oaiset.has_record(record):
self.oaiset.add_record(record) | [
"def",
"add_record",
"(",
"self",
",",
"record",
")",
":",
"key",
"=",
"current_app",
".",
"config",
"[",
"'COMMUNITIES_RECORD_KEY'",
"]",
"record",
".",
"setdefault",
"(",
"key",
",",
"[",
"]",
")",
"if",
"self",
".",
"has_record",
"(",
"record",
")",
":",
"current_app",
".",
"logger",
".",
"warning",
"(",
"'Community addition: record {uuid} is already in community '",
"'\"{comm}\"'",
".",
"format",
"(",
"uuid",
"=",
"record",
".",
"id",
",",
"comm",
"=",
"self",
".",
"id",
")",
")",
"else",
":",
"record",
"[",
"key",
"]",
".",
"append",
"(",
"self",
".",
"id",
")",
"record",
"[",
"key",
"]",
"=",
"sorted",
"(",
"record",
"[",
"key",
"]",
")",
"if",
"current_app",
".",
"config",
"[",
"'COMMUNITIES_OAI_ENABLED'",
"]",
":",
"if",
"not",
"self",
".",
"oaiset",
".",
"has_record",
"(",
"record",
")",
":",
"self",
".",
"oaiset",
".",
"add_record",
"(",
"record",
")"
] | Add a record to the community.
:param record: Record object.
:type record: `invenio_records.api.Record` | [
"Add",
"a",
"record",
"to",
"the",
"community",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/models.py#L286-L304 | train | 423 |
inveniosoftware/invenio-communities | invenio_communities/models.py | Community.remove_record | def remove_record(self, record):
"""Remove an already accepted record from the community.
:param record: Record object.
:type record: `invenio_records.api.Record`
"""
if not self.has_record(record):
current_app.logger.warning(
'Community removal: record {uuid} was not in community '
'"{comm}"'.format(uuid=record.id, comm=self.id))
else:
key = current_app.config['COMMUNITIES_RECORD_KEY']
record[key] = [c for c in record[key] if c != self.id]
if current_app.config['COMMUNITIES_OAI_ENABLED']:
if self.oaiset.has_record(record):
self.oaiset.remove_record(record) | python | def remove_record(self, record):
"""Remove an already accepted record from the community.
:param record: Record object.
:type record: `invenio_records.api.Record`
"""
if not self.has_record(record):
current_app.logger.warning(
'Community removal: record {uuid} was not in community '
'"{comm}"'.format(uuid=record.id, comm=self.id))
else:
key = current_app.config['COMMUNITIES_RECORD_KEY']
record[key] = [c for c in record[key] if c != self.id]
if current_app.config['COMMUNITIES_OAI_ENABLED']:
if self.oaiset.has_record(record):
self.oaiset.remove_record(record) | [
"def",
"remove_record",
"(",
"self",
",",
"record",
")",
":",
"if",
"not",
"self",
".",
"has_record",
"(",
"record",
")",
":",
"current_app",
".",
"logger",
".",
"warning",
"(",
"'Community removal: record {uuid} was not in community '",
"'\"{comm}\"'",
".",
"format",
"(",
"uuid",
"=",
"record",
".",
"id",
",",
"comm",
"=",
"self",
".",
"id",
")",
")",
"else",
":",
"key",
"=",
"current_app",
".",
"config",
"[",
"'COMMUNITIES_RECORD_KEY'",
"]",
"record",
"[",
"key",
"]",
"=",
"[",
"c",
"for",
"c",
"in",
"record",
"[",
"key",
"]",
"if",
"c",
"!=",
"self",
".",
"id",
"]",
"if",
"current_app",
".",
"config",
"[",
"'COMMUNITIES_OAI_ENABLED'",
"]",
":",
"if",
"self",
".",
"oaiset",
".",
"has_record",
"(",
"record",
")",
":",
"self",
".",
"oaiset",
".",
"remove_record",
"(",
"record",
")"
] | Remove an already accepted record from the community.
:param record: Record object.
:type record: `invenio_records.api.Record` | [
"Remove",
"an",
"already",
"accepted",
"record",
"from",
"the",
"community",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/models.py#L306-L322 | train | 424 |
inveniosoftware/invenio-communities | invenio_communities/models.py | Community.accept_record | def accept_record(self, record):
"""Accept a record for inclusion in the community.
:param record: Record object.
"""
with db.session.begin_nested():
req = InclusionRequest.get(self.id, record.id)
if req is None:
raise InclusionRequestMissingError(community=self,
record=record)
req.delete()
self.add_record(record)
self.last_record_accepted = datetime.utcnow() | python | def accept_record(self, record):
"""Accept a record for inclusion in the community.
:param record: Record object.
"""
with db.session.begin_nested():
req = InclusionRequest.get(self.id, record.id)
if req is None:
raise InclusionRequestMissingError(community=self,
record=record)
req.delete()
self.add_record(record)
self.last_record_accepted = datetime.utcnow() | [
"def",
"accept_record",
"(",
"self",
",",
"record",
")",
":",
"with",
"db",
".",
"session",
".",
"begin_nested",
"(",
")",
":",
"req",
"=",
"InclusionRequest",
".",
"get",
"(",
"self",
".",
"id",
",",
"record",
".",
"id",
")",
"if",
"req",
"is",
"None",
":",
"raise",
"InclusionRequestMissingError",
"(",
"community",
"=",
"self",
",",
"record",
"=",
"record",
")",
"req",
".",
"delete",
"(",
")",
"self",
".",
"add_record",
"(",
"record",
")",
"self",
".",
"last_record_accepted",
"=",
"datetime",
".",
"utcnow",
"(",
")"
] | Accept a record for inclusion in the community.
:param record: Record object. | [
"Accept",
"a",
"record",
"for",
"inclusion",
"in",
"the",
"community",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/models.py#L329-L341 | train | 425 |
inveniosoftware/invenio-communities | invenio_communities/models.py | Community.reject_record | def reject_record(self, record):
"""Reject a record for inclusion in the community.
:param record: Record object.
"""
with db.session.begin_nested():
req = InclusionRequest.get(self.id, record.id)
if req is None:
raise InclusionRequestMissingError(community=self,
record=record)
req.delete() | python | def reject_record(self, record):
"""Reject a record for inclusion in the community.
:param record: Record object.
"""
with db.session.begin_nested():
req = InclusionRequest.get(self.id, record.id)
if req is None:
raise InclusionRequestMissingError(community=self,
record=record)
req.delete() | [
"def",
"reject_record",
"(",
"self",
",",
"record",
")",
":",
"with",
"db",
".",
"session",
".",
"begin_nested",
"(",
")",
":",
"req",
"=",
"InclusionRequest",
".",
"get",
"(",
"self",
".",
"id",
",",
"record",
".",
"id",
")",
"if",
"req",
"is",
"None",
":",
"raise",
"InclusionRequestMissingError",
"(",
"community",
"=",
"self",
",",
"record",
"=",
"record",
")",
"req",
".",
"delete",
"(",
")"
] | Reject a record for inclusion in the community.
:param record: Record object. | [
"Reject",
"a",
"record",
"for",
"inclusion",
"in",
"the",
"community",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/models.py#L343-L353 | train | 426 |
inveniosoftware/invenio-communities | invenio_communities/models.py | Community.delete | def delete(self):
"""Mark the community for deletion.
:param delete_time: DateTime after which to delete the community.
:type delete_time: datetime.datetime
:raises: CommunitiesError
"""
if self.deleted_at is not None:
raise CommunitiesError(community=self)
else:
self.deleted_at = datetime.utcnow() | python | def delete(self):
"""Mark the community for deletion.
:param delete_time: DateTime after which to delete the community.
:type delete_time: datetime.datetime
:raises: CommunitiesError
"""
if self.deleted_at is not None:
raise CommunitiesError(community=self)
else:
self.deleted_at = datetime.utcnow() | [
"def",
"delete",
"(",
"self",
")",
":",
"if",
"self",
".",
"deleted_at",
"is",
"not",
"None",
":",
"raise",
"CommunitiesError",
"(",
"community",
"=",
"self",
")",
"else",
":",
"self",
".",
"deleted_at",
"=",
"datetime",
".",
"utcnow",
"(",
")"
] | Mark the community for deletion.
:param delete_time: DateTime after which to delete the community.
:type delete_time: datetime.datetime
:raises: CommunitiesError | [
"Mark",
"the",
"community",
"for",
"deletion",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/models.py#L355-L365 | train | 427 |
inveniosoftware/invenio-communities | invenio_communities/models.py | Community.logo_url | def logo_url(self):
"""Get URL to collection logo.
:returns: Path to community logo.
:rtype: str
"""
if self.logo_ext:
return '/api/files/{bucket}/{key}'.format(
bucket=current_app.config['COMMUNITIES_BUCKET_UUID'],
key='{0}/logo.{1}'.format(self.id, self.logo_ext),
)
return None | python | def logo_url(self):
"""Get URL to collection logo.
:returns: Path to community logo.
:rtype: str
"""
if self.logo_ext:
return '/api/files/{bucket}/{key}'.format(
bucket=current_app.config['COMMUNITIES_BUCKET_UUID'],
key='{0}/logo.{1}'.format(self.id, self.logo_ext),
)
return None | [
"def",
"logo_url",
"(",
"self",
")",
":",
"if",
"self",
".",
"logo_ext",
":",
"return",
"'/api/files/{bucket}/{key}'",
".",
"format",
"(",
"bucket",
"=",
"current_app",
".",
"config",
"[",
"'COMMUNITIES_BUCKET_UUID'",
"]",
",",
"key",
"=",
"'{0}/logo.{1}'",
".",
"format",
"(",
"self",
".",
"id",
",",
"self",
".",
"logo_ext",
")",
",",
")",
"return",
"None"
] | Get URL to collection logo.
:returns: Path to community logo.
:rtype: str | [
"Get",
"URL",
"to",
"collection",
"logo",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/models.py#L380-L391 | train | 428 |
inveniosoftware/invenio-communities | invenio_communities/models.py | Community.oaiset | def oaiset(self):
"""Return the corresponding OAISet for given community.
If OAIServer is not installed this property will return None.
:returns: returns OAISet object corresponding to this community.
:rtype: `invenio_oaiserver.models.OAISet` or None
"""
if current_app.config['COMMUNITIES_OAI_ENABLED']:
from invenio_oaiserver.models import OAISet
return OAISet.query.filter_by(spec=self.oaiset_spec).one()
else:
return None | python | def oaiset(self):
"""Return the corresponding OAISet for given community.
If OAIServer is not installed this property will return None.
:returns: returns OAISet object corresponding to this community.
:rtype: `invenio_oaiserver.models.OAISet` or None
"""
if current_app.config['COMMUNITIES_OAI_ENABLED']:
from invenio_oaiserver.models import OAISet
return OAISet.query.filter_by(spec=self.oaiset_spec).one()
else:
return None | [
"def",
"oaiset",
"(",
"self",
")",
":",
"if",
"current_app",
".",
"config",
"[",
"'COMMUNITIES_OAI_ENABLED'",
"]",
":",
"from",
"invenio_oaiserver",
".",
"models",
"import",
"OAISet",
"return",
"OAISet",
".",
"query",
".",
"filter_by",
"(",
"spec",
"=",
"self",
".",
"oaiset_spec",
")",
".",
"one",
"(",
")",
"else",
":",
"return",
"None"
] | Return the corresponding OAISet for given community.
If OAIServer is not installed this property will return None.
:returns: returns OAISet object corresponding to this community.
:rtype: `invenio_oaiserver.models.OAISet` or None | [
"Return",
"the",
"corresponding",
"OAISet",
"for",
"given",
"community",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/models.py#L421-L433 | train | 429 |
inveniosoftware/invenio-communities | invenio_communities/models.py | Community.oaiset_url | def oaiset_url(self):
"""Return the OAISet URL for given community.
:returns: URL of corresponding OAISet.
:rtype: str
"""
return url_for(
'invenio_oaiserver.response',
verb='ListRecords',
metadataPrefix='oai_dc', set=self.oaiset_spec, _external=True) | python | def oaiset_url(self):
"""Return the OAISet URL for given community.
:returns: URL of corresponding OAISet.
:rtype: str
"""
return url_for(
'invenio_oaiserver.response',
verb='ListRecords',
metadataPrefix='oai_dc', set=self.oaiset_spec, _external=True) | [
"def",
"oaiset_url",
"(",
"self",
")",
":",
"return",
"url_for",
"(",
"'invenio_oaiserver.response'",
",",
"verb",
"=",
"'ListRecords'",
",",
"metadataPrefix",
"=",
"'oai_dc'",
",",
"set",
"=",
"self",
".",
"oaiset_spec",
",",
"_external",
"=",
"True",
")"
] | Return the OAISet URL for given community.
:returns: URL of corresponding OAISet.
:rtype: str | [
"Return",
"the",
"OAISet",
"URL",
"for",
"given",
"community",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/models.py#L436-L445 | train | 430 |
inveniosoftware/invenio-communities | invenio_communities/models.py | Community.version_id | def version_id(self):
"""Return the version of the community.
:returns: hash which encodes the community id and its las update.
:rtype: str
"""
return hashlib.sha1('{0}__{1}'.format(
self.id, self.updated).encode('utf-8')).hexdigest() | python | def version_id(self):
"""Return the version of the community.
:returns: hash which encodes the community id and its las update.
:rtype: str
"""
return hashlib.sha1('{0}__{1}'.format(
self.id, self.updated).encode('utf-8')).hexdigest() | [
"def",
"version_id",
"(",
"self",
")",
":",
"return",
"hashlib",
".",
"sha1",
"(",
"'{0}__{1}'",
".",
"format",
"(",
"self",
".",
"id",
",",
"self",
".",
"updated",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")"
] | Return the version of the community.
:returns: hash which encodes the community id and its las update.
:rtype: str | [
"Return",
"the",
"version",
"of",
"the",
"community",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/models.py#L448-L455 | train | 431 |
inveniosoftware/invenio-communities | invenio_communities/models.py | FeaturedCommunity.get_featured_or_none | def get_featured_or_none(cls, start_date=None):
"""Get the latest featured community.
:param start_date: Date after which the featuring starts
:returns: Community object or None
:rtype: `invenio_communities.models.Community` or None
"""
start_date = start_date or datetime.utcnow()
comm = cls.query.filter(
FeaturedCommunity.start_date <= start_date
).order_by(
cls.start_date.desc()
).first()
return comm if comm is None else comm.community | python | def get_featured_or_none(cls, start_date=None):
"""Get the latest featured community.
:param start_date: Date after which the featuring starts
:returns: Community object or None
:rtype: `invenio_communities.models.Community` or None
"""
start_date = start_date or datetime.utcnow()
comm = cls.query.filter(
FeaturedCommunity.start_date <= start_date
).order_by(
cls.start_date.desc()
).first()
return comm if comm is None else comm.community | [
"def",
"get_featured_or_none",
"(",
"cls",
",",
"start_date",
"=",
"None",
")",
":",
"start_date",
"=",
"start_date",
"or",
"datetime",
".",
"utcnow",
"(",
")",
"comm",
"=",
"cls",
".",
"query",
".",
"filter",
"(",
"FeaturedCommunity",
".",
"start_date",
"<=",
"start_date",
")",
".",
"order_by",
"(",
"cls",
".",
"start_date",
".",
"desc",
"(",
")",
")",
".",
"first",
"(",
")",
"return",
"comm",
"if",
"comm",
"is",
"None",
"else",
"comm",
".",
"community"
] | Get the latest featured community.
:param start_date: Date after which the featuring starts
:returns: Community object or None
:rtype: `invenio_communities.models.Community` or None | [
"Get",
"the",
"latest",
"featured",
"community",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/models.py#L486-L500 | train | 432 |
ARMmbed/mbed-connector-api-python | mbed_connector_api/mbed_connector_api.py | connector.getConnectorVersion | def getConnectorVersion(self):
"""
GET the current Connector version.
:returns: asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/",versioned=False)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_mdc_version",data.status_code)
result.is_done = True
return result | python | def getConnectorVersion(self):
"""
GET the current Connector version.
:returns: asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/",versioned=False)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_mdc_version",data.status_code)
result.is_done = True
return result | [
"def",
"getConnectorVersion",
"(",
"self",
")",
":",
"result",
"=",
"asyncResult",
"(",
")",
"data",
"=",
"self",
".",
"_getURL",
"(",
"\"/\"",
",",
"versioned",
"=",
"False",
")",
"result",
".",
"fill",
"(",
"data",
")",
"if",
"data",
".",
"status_code",
"==",
"200",
":",
"result",
".",
"error",
"=",
"False",
"else",
":",
"result",
".",
"error",
"=",
"response_codes",
"(",
"\"get_mdc_version\"",
",",
"data",
".",
"status_code",
")",
"result",
".",
"is_done",
"=",
"True",
"return",
"result"
] | GET the current Connector version.
:returns: asyncResult object, populates error and result fields
:rtype: asyncResult | [
"GET",
"the",
"current",
"Connector",
"version",
"."
] | a5024a01dc67cc192c8bf7a70b251fcf0a3f279b | https://github.com/ARMmbed/mbed-connector-api-python/blob/a5024a01dc67cc192c8bf7a70b251fcf0a3f279b/mbed_connector_api/mbed_connector_api.py#L72-L87 | train | 433 |
ARMmbed/mbed-connector-api-python | mbed_connector_api/mbed_connector_api.py | connector.setHandler | def setHandler(self,handler,cbfn):
'''
Register a handler for a particular notification type.
These are the types of notifications that are acceptable.
| 'async-responses'
| 'registrations-expired'
| 'de-registrations'
| 'reg-updates'
| 'registrations'
| 'notifications'
:param str handler: name of the notification type
:param fnptr cbfn: function to pass the notification channel messages to.
:return: Nothing.
'''
if handler == "async-responses":
self.async_responses_callback = cbfn
elif handler == "registrations-expired":
self.registrations_expired_callback = cbfn
elif handler == "de-registrations":
self.de_registrations_callback = cbfn
elif handler == "reg-updates":
self.reg_updates_callback = cbfn
elif handler == "registrations":
self.registrations_callback = cbfn
elif handler == "notifications":
self.notifications_callback = cbfn
else:
self.log.warn("'%s' is not a legitimate notification channel option. Please check your spelling.",handler) | python | def setHandler(self,handler,cbfn):
'''
Register a handler for a particular notification type.
These are the types of notifications that are acceptable.
| 'async-responses'
| 'registrations-expired'
| 'de-registrations'
| 'reg-updates'
| 'registrations'
| 'notifications'
:param str handler: name of the notification type
:param fnptr cbfn: function to pass the notification channel messages to.
:return: Nothing.
'''
if handler == "async-responses":
self.async_responses_callback = cbfn
elif handler == "registrations-expired":
self.registrations_expired_callback = cbfn
elif handler == "de-registrations":
self.de_registrations_callback = cbfn
elif handler == "reg-updates":
self.reg_updates_callback = cbfn
elif handler == "registrations":
self.registrations_callback = cbfn
elif handler == "notifications":
self.notifications_callback = cbfn
else:
self.log.warn("'%s' is not a legitimate notification channel option. Please check your spelling.",handler) | [
"def",
"setHandler",
"(",
"self",
",",
"handler",
",",
"cbfn",
")",
":",
"if",
"handler",
"==",
"\"async-responses\"",
":",
"self",
".",
"async_responses_callback",
"=",
"cbfn",
"elif",
"handler",
"==",
"\"registrations-expired\"",
":",
"self",
".",
"registrations_expired_callback",
"=",
"cbfn",
"elif",
"handler",
"==",
"\"de-registrations\"",
":",
"self",
".",
"de_registrations_callback",
"=",
"cbfn",
"elif",
"handler",
"==",
"\"reg-updates\"",
":",
"self",
".",
"reg_updates_callback",
"=",
"cbfn",
"elif",
"handler",
"==",
"\"registrations\"",
":",
"self",
".",
"registrations_callback",
"=",
"cbfn",
"elif",
"handler",
"==",
"\"notifications\"",
":",
"self",
".",
"notifications_callback",
"=",
"cbfn",
"else",
":",
"self",
".",
"log",
".",
"warn",
"(",
"\"'%s' is not a legitimate notification channel option. Please check your spelling.\"",
",",
"handler",
")"
] | Register a handler for a particular notification type.
These are the types of notifications that are acceptable.
| 'async-responses'
| 'registrations-expired'
| 'de-registrations'
| 'reg-updates'
| 'registrations'
| 'notifications'
:param str handler: name of the notification type
:param fnptr cbfn: function to pass the notification channel messages to.
:return: Nothing. | [
"Register",
"a",
"handler",
"for",
"a",
"particular",
"notification",
"type",
".",
"These",
"are",
"the",
"types",
"of",
"notifications",
"that",
"are",
"acceptable",
".",
"|",
"async",
"-",
"responses",
"|",
"registrations",
"-",
"expired",
"|",
"de",
"-",
"registrations",
"|",
"reg",
"-",
"updates",
"|",
"registrations",
"|",
"notifications"
] | a5024a01dc67cc192c8bf7a70b251fcf0a3f279b | https://github.com/ARMmbed/mbed-connector-api-python/blob/a5024a01dc67cc192c8bf7a70b251fcf0a3f279b/mbed_connector_api/mbed_connector_api.py#L554-L583 | train | 434 |
hsolbrig/pyjsg | pyjsg/parser_impl/jsg_doc_parser.py | JSGDocParser.as_python | def as_python(self, infile, include_original_shex: bool=False):
""" Return the python representation of the document """
self._context.resolve_circular_references() # add forwards for any circular entries
body = ''
for k in self._context.ordered_elements():
v = self._context.grammarelts[k]
if isinstance(v, (JSGLexerRuleBlock, JSGObjectExpr)):
body += v.as_python(k)
if isinstance(v, JSGObjectExpr) and not self._context.has_typeid:
self._context.directives.append(f'_CONTEXT.TYPE_EXCEPTIONS.append("{k}")')
elif isinstance(v, JSGForwardRef):
pass
elif isinstance(v, (JSGValueType, JSGArrayExpr)):
body += f"\n\n\n{k} = {v.signature_type()}"
else:
raise NotImplementedError("Unknown grammar elt for {}".format(k))
self._context.forward_refs.pop(k, None)
body = '\n' + '\n'.join(self._context.directives) + body
return _jsg_python_template.format(infile=infile,
original_shex='# ' + self.text if include_original_shex else "",
version=__version__,
gendate=datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
body=body) | python | def as_python(self, infile, include_original_shex: bool=False):
""" Return the python representation of the document """
self._context.resolve_circular_references() # add forwards for any circular entries
body = ''
for k in self._context.ordered_elements():
v = self._context.grammarelts[k]
if isinstance(v, (JSGLexerRuleBlock, JSGObjectExpr)):
body += v.as_python(k)
if isinstance(v, JSGObjectExpr) and not self._context.has_typeid:
self._context.directives.append(f'_CONTEXT.TYPE_EXCEPTIONS.append("{k}")')
elif isinstance(v, JSGForwardRef):
pass
elif isinstance(v, (JSGValueType, JSGArrayExpr)):
body += f"\n\n\n{k} = {v.signature_type()}"
else:
raise NotImplementedError("Unknown grammar elt for {}".format(k))
self._context.forward_refs.pop(k, None)
body = '\n' + '\n'.join(self._context.directives) + body
return _jsg_python_template.format(infile=infile,
original_shex='# ' + self.text if include_original_shex else "",
version=__version__,
gendate=datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
body=body) | [
"def",
"as_python",
"(",
"self",
",",
"infile",
",",
"include_original_shex",
":",
"bool",
"=",
"False",
")",
":",
"self",
".",
"_context",
".",
"resolve_circular_references",
"(",
")",
"# add forwards for any circular entries",
"body",
"=",
"''",
"for",
"k",
"in",
"self",
".",
"_context",
".",
"ordered_elements",
"(",
")",
":",
"v",
"=",
"self",
".",
"_context",
".",
"grammarelts",
"[",
"k",
"]",
"if",
"isinstance",
"(",
"v",
",",
"(",
"JSGLexerRuleBlock",
",",
"JSGObjectExpr",
")",
")",
":",
"body",
"+=",
"v",
".",
"as_python",
"(",
"k",
")",
"if",
"isinstance",
"(",
"v",
",",
"JSGObjectExpr",
")",
"and",
"not",
"self",
".",
"_context",
".",
"has_typeid",
":",
"self",
".",
"_context",
".",
"directives",
".",
"append",
"(",
"f'_CONTEXT.TYPE_EXCEPTIONS.append(\"{k}\")'",
")",
"elif",
"isinstance",
"(",
"v",
",",
"JSGForwardRef",
")",
":",
"pass",
"elif",
"isinstance",
"(",
"v",
",",
"(",
"JSGValueType",
",",
"JSGArrayExpr",
")",
")",
":",
"body",
"+=",
"f\"\\n\\n\\n{k} = {v.signature_type()}\"",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"Unknown grammar elt for {}\"",
".",
"format",
"(",
"k",
")",
")",
"self",
".",
"_context",
".",
"forward_refs",
".",
"pop",
"(",
"k",
",",
"None",
")",
"body",
"=",
"'\\n'",
"+",
"'\\n'",
".",
"join",
"(",
"self",
".",
"_context",
".",
"directives",
")",
"+",
"body",
"return",
"_jsg_python_template",
".",
"format",
"(",
"infile",
"=",
"infile",
",",
"original_shex",
"=",
"'# '",
"+",
"self",
".",
"text",
"if",
"include_original_shex",
"else",
"\"\"",
",",
"version",
"=",
"__version__",
",",
"gendate",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M\"",
")",
",",
"body",
"=",
"body",
")"
] | Return the python representation of the document | [
"Return",
"the",
"python",
"representation",
"of",
"the",
"document"
] | 9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7 | https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/jsg_doc_parser.py#L36-L59 | train | 435 |
emirozer/bowshock | bowshock/modis.py | __getDummyDateList | def __getDummyDateList():
"""
Generate a dummy date list for testing without
hitting the server
"""
D = []
for y in xrange(2001, 2010):
for d in xrange(1, 365, 1):
D.append('A%04d%03d' % (y, d))
return D | python | def __getDummyDateList():
"""
Generate a dummy date list for testing without
hitting the server
"""
D = []
for y in xrange(2001, 2010):
for d in xrange(1, 365, 1):
D.append('A%04d%03d' % (y, d))
return D | [
"def",
"__getDummyDateList",
"(",
")",
":",
"D",
"=",
"[",
"]",
"for",
"y",
"in",
"xrange",
"(",
"2001",
",",
"2010",
")",
":",
"for",
"d",
"in",
"xrange",
"(",
"1",
",",
"365",
",",
"1",
")",
":",
"D",
".",
"append",
"(",
"'A%04d%03d'",
"%",
"(",
"y",
",",
"d",
")",
")",
"return",
"D"
] | Generate a dummy date list for testing without
hitting the server | [
"Generate",
"a",
"dummy",
"date",
"list",
"for",
"testing",
"without",
"hitting",
"the",
"server"
] | 9f5e053f1d54995b833b83616f37c67178c3e840 | https://github.com/emirozer/bowshock/blob/9f5e053f1d54995b833b83616f37c67178c3e840/bowshock/modis.py#L92-L103 | train | 436 |
emirozer/bowshock | bowshock/modis.py | mkIntDate | def mkIntDate(s):
"""
Convert the webserver formatted dates
to an integer format by stripping the
leading char and casting
"""
n = s.__len__()
d = int(s[-(n - 1):n])
return d | python | def mkIntDate(s):
"""
Convert the webserver formatted dates
to an integer format by stripping the
leading char and casting
"""
n = s.__len__()
d = int(s[-(n - 1):n])
return d | [
"def",
"mkIntDate",
"(",
"s",
")",
":",
"n",
"=",
"s",
".",
"__len__",
"(",
")",
"d",
"=",
"int",
"(",
"s",
"[",
"-",
"(",
"n",
"-",
"1",
")",
":",
"n",
"]",
")",
"return",
"d"
] | Convert the webserver formatted dates
to an integer format by stripping the
leading char and casting | [
"Convert",
"the",
"webserver",
"formatted",
"dates",
"to",
"an",
"integer",
"format",
"by",
"stripping",
"the",
"leading",
"char",
"and",
"casting"
] | 9f5e053f1d54995b833b83616f37c67178c3e840 | https://github.com/emirozer/bowshock/blob/9f5e053f1d54995b833b83616f37c67178c3e840/bowshock/modis.py#L118-L127 | train | 437 |
CybOXProject/mixbox | mixbox/idgen.py | IDGenerator.create_id | def create_id(self, prefix="guid"):
"""Create an ID.
Note that if `prefix` is not provided, it will be `guid`, even if the
`method` is `METHOD_INT`.
"""
if self.method == IDGenerator.METHOD_UUID:
id_ = str(uuid.uuid4())
elif self.method == IDGenerator.METHOD_INT:
id_ = self.next_int
self.next_int += 1
else:
raise InvalidMethodError(self.method)
return "%s:%s-%s" % (self.namespace.prefix, prefix, id_) | python | def create_id(self, prefix="guid"):
"""Create an ID.
Note that if `prefix` is not provided, it will be `guid`, even if the
`method` is `METHOD_INT`.
"""
if self.method == IDGenerator.METHOD_UUID:
id_ = str(uuid.uuid4())
elif self.method == IDGenerator.METHOD_INT:
id_ = self.next_int
self.next_int += 1
else:
raise InvalidMethodError(self.method)
return "%s:%s-%s" % (self.namespace.prefix, prefix, id_) | [
"def",
"create_id",
"(",
"self",
",",
"prefix",
"=",
"\"guid\"",
")",
":",
"if",
"self",
".",
"method",
"==",
"IDGenerator",
".",
"METHOD_UUID",
":",
"id_",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"elif",
"self",
".",
"method",
"==",
"IDGenerator",
".",
"METHOD_INT",
":",
"id_",
"=",
"self",
".",
"next_int",
"self",
".",
"next_int",
"+=",
"1",
"else",
":",
"raise",
"InvalidMethodError",
"(",
"self",
".",
"method",
")",
"return",
"\"%s:%s-%s\"",
"%",
"(",
"self",
".",
"namespace",
".",
"prefix",
",",
"prefix",
",",
"id_",
")"
] | Create an ID.
Note that if `prefix` is not provided, it will be `guid`, even if the
`method` is `METHOD_INT`. | [
"Create",
"an",
"ID",
"."
] | 9097dae7a433f5b98c18171c4a5598f69a7d30af | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/idgen.py#L61-L75 | train | 438 |
yamins81/tabular | tabular/spreadsheet.py | grayspec | def grayspec(k):
"""
List of gray-scale colors in HSV space as web hex triplets.
For integer argument k, returns list of `k` gray-scale colors, increasingly
light, linearly in the HSV color space, as web hex triplets.
Technical dependency of :func:`tabular.spreadsheet.aggregate_in`.
**Parameters**
**k** : positive integer
Number of gray-scale colors to return.
**Returns**
**glist** : list of strings
List of `k` gray-scale colors.
"""
ll = .5
ul = .8
delta = (ul - ll) / k
return [GrayScale(t) for t in np.arange(ll, ul, delta)] | python | def grayspec(k):
"""
List of gray-scale colors in HSV space as web hex triplets.
For integer argument k, returns list of `k` gray-scale colors, increasingly
light, linearly in the HSV color space, as web hex triplets.
Technical dependency of :func:`tabular.spreadsheet.aggregate_in`.
**Parameters**
**k** : positive integer
Number of gray-scale colors to return.
**Returns**
**glist** : list of strings
List of `k` gray-scale colors.
"""
ll = .5
ul = .8
delta = (ul - ll) / k
return [GrayScale(t) for t in np.arange(ll, ul, delta)] | [
"def",
"grayspec",
"(",
"k",
")",
":",
"ll",
"=",
".5",
"ul",
"=",
".8",
"delta",
"=",
"(",
"ul",
"-",
"ll",
")",
"/",
"k",
"return",
"[",
"GrayScale",
"(",
"t",
")",
"for",
"t",
"in",
"np",
".",
"arange",
"(",
"ll",
",",
"ul",
",",
"delta",
")",
"]"
] | List of gray-scale colors in HSV space as web hex triplets.
For integer argument k, returns list of `k` gray-scale colors, increasingly
light, linearly in the HSV color space, as web hex triplets.
Technical dependency of :func:`tabular.spreadsheet.aggregate_in`.
**Parameters**
**k** : positive integer
Number of gray-scale colors to return.
**Returns**
**glist** : list of strings
List of `k` gray-scale colors. | [
"List",
"of",
"gray",
"-",
"scale",
"colors",
"in",
"HSV",
"space",
"as",
"web",
"hex",
"triplets",
"."
] | 1caf091c8c395960a9ad7078f95158b533cc52dd | https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/spreadsheet.py#L477-L502 | train | 439 |
yamins81/tabular | tabular/spreadsheet.py | addrecords | def addrecords(X, new):
"""
Append one or more records to the end of a numpy recarray or ndarray .
Can take a single record, void or tuple, or a list of records, voids or
tuples.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.addrecords`.
**Parameters**
**X** : numpy ndarray with structured dtype or recarray
The array to add records to.
**new** : record, void or tuple, or list of them
Record(s) to add to `X`.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy array made up of `X` plus the new records.
**See also:** :func:`tabular.spreadsheet.rowstack`
"""
if isinstance(new, np.record) or isinstance(new, np.void) or \
isinstance(new, tuple):
new = [new]
return np.append(X, utils.fromrecords(new, type=np.ndarray,
dtype=X.dtype), axis=0) | python | def addrecords(X, new):
"""
Append one or more records to the end of a numpy recarray or ndarray .
Can take a single record, void or tuple, or a list of records, voids or
tuples.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.addrecords`.
**Parameters**
**X** : numpy ndarray with structured dtype or recarray
The array to add records to.
**new** : record, void or tuple, or list of them
Record(s) to add to `X`.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy array made up of `X` plus the new records.
**See also:** :func:`tabular.spreadsheet.rowstack`
"""
if isinstance(new, np.record) or isinstance(new, np.void) or \
isinstance(new, tuple):
new = [new]
return np.append(X, utils.fromrecords(new, type=np.ndarray,
dtype=X.dtype), axis=0) | [
"def",
"addrecords",
"(",
"X",
",",
"new",
")",
":",
"if",
"isinstance",
"(",
"new",
",",
"np",
".",
"record",
")",
"or",
"isinstance",
"(",
"new",
",",
"np",
".",
"void",
")",
"or",
"isinstance",
"(",
"new",
",",
"tuple",
")",
":",
"new",
"=",
"[",
"new",
"]",
"return",
"np",
".",
"append",
"(",
"X",
",",
"utils",
".",
"fromrecords",
"(",
"new",
",",
"type",
"=",
"np",
".",
"ndarray",
",",
"dtype",
"=",
"X",
".",
"dtype",
")",
",",
"axis",
"=",
"0",
")"
] | Append one or more records to the end of a numpy recarray or ndarray .
Can take a single record, void or tuple, or a list of records, voids or
tuples.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.addrecords`.
**Parameters**
**X** : numpy ndarray with structured dtype or recarray
The array to add records to.
**new** : record, void or tuple, or list of them
Record(s) to add to `X`.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy array made up of `X` plus the new records.
**See also:** :func:`tabular.spreadsheet.rowstack` | [
"Append",
"one",
"or",
"more",
"records",
"to",
"the",
"end",
"of",
"a",
"numpy",
"recarray",
"or",
"ndarray",
"."
] | 1caf091c8c395960a9ad7078f95158b533cc52dd | https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/spreadsheet.py#L704-L737 | train | 440 |
yamins81/tabular | tabular/spreadsheet.py | addcols | def addcols(X, cols, names=None):
"""
Add one or more columns to a numpy ndarray.
Technical dependency of :func:`tabular.spreadsheet.aggregate_in`.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.addcols`.
**Parameters**
**X** : numpy ndarray with structured dtype or recarray
The recarray to add columns to.
**cols** : numpy ndarray, or list of arrays of columns
Column(s) to add.
**names**: list of strings, optional
Names of the new columns. Only applicable when `cols` is a
list of arrays.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy array made up of `X` plus the new columns.
**See also:** :func:`tabular.spreadsheet.colstack`
"""
if isinstance(names,str):
names = [n.strip() for n in names.split(',')]
if isinstance(cols, list):
if any([isinstance(x,np.ndarray) or isinstance(x,list) or \
isinstance(x,tuple) for x in cols]):
assert all([len(x) == len(X) for x in cols]), \
'Trying to add columns of wrong length.'
assert names != None and len(cols) == len(names), \
'Number of columns to add must equal number of new names.'
cols = utils.fromarrays(cols,type=np.ndarray,names = names)
else:
assert len(cols) == len(X), 'Trying to add column of wrong length.'
cols = utils.fromarrays([cols], type=np.ndarray,names=names)
else:
assert isinstance(cols, np.ndarray)
if cols.dtype.names == None:
cols = utils.fromarrays([cols],type=np.ndarray, names=names)
Replacements = [a for a in cols.dtype.names if a in X.dtype.names]
if len(Replacements) > 0:
print('Replacing columns',
[a for a in cols.dtype.names if a in X.dtype.names])
return utils.fromarrays(
[X[a] if a not in cols.dtype.names else cols[a] for a in X.dtype.names] +
[cols[a] for a in cols.dtype.names if a not in X.dtype.names],
type=np.ndarray,
names=list(X.dtype.names) + [a for a in cols.dtype.names
if a not in X.dtype.names]) | python | def addcols(X, cols, names=None):
"""
Add one or more columns to a numpy ndarray.
Technical dependency of :func:`tabular.spreadsheet.aggregate_in`.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.addcols`.
**Parameters**
**X** : numpy ndarray with structured dtype or recarray
The recarray to add columns to.
**cols** : numpy ndarray, or list of arrays of columns
Column(s) to add.
**names**: list of strings, optional
Names of the new columns. Only applicable when `cols` is a
list of arrays.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy array made up of `X` plus the new columns.
**See also:** :func:`tabular.spreadsheet.colstack`
"""
if isinstance(names,str):
names = [n.strip() for n in names.split(',')]
if isinstance(cols, list):
if any([isinstance(x,np.ndarray) or isinstance(x,list) or \
isinstance(x,tuple) for x in cols]):
assert all([len(x) == len(X) for x in cols]), \
'Trying to add columns of wrong length.'
assert names != None and len(cols) == len(names), \
'Number of columns to add must equal number of new names.'
cols = utils.fromarrays(cols,type=np.ndarray,names = names)
else:
assert len(cols) == len(X), 'Trying to add column of wrong length.'
cols = utils.fromarrays([cols], type=np.ndarray,names=names)
else:
assert isinstance(cols, np.ndarray)
if cols.dtype.names == None:
cols = utils.fromarrays([cols],type=np.ndarray, names=names)
Replacements = [a for a in cols.dtype.names if a in X.dtype.names]
if len(Replacements) > 0:
print('Replacing columns',
[a for a in cols.dtype.names if a in X.dtype.names])
return utils.fromarrays(
[X[a] if a not in cols.dtype.names else cols[a] for a in X.dtype.names] +
[cols[a] for a in cols.dtype.names if a not in X.dtype.names],
type=np.ndarray,
names=list(X.dtype.names) + [a for a in cols.dtype.names
if a not in X.dtype.names]) | [
"def",
"addcols",
"(",
"X",
",",
"cols",
",",
"names",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"names",
",",
"str",
")",
":",
"names",
"=",
"[",
"n",
".",
"strip",
"(",
")",
"for",
"n",
"in",
"names",
".",
"split",
"(",
"','",
")",
"]",
"if",
"isinstance",
"(",
"cols",
",",
"list",
")",
":",
"if",
"any",
"(",
"[",
"isinstance",
"(",
"x",
",",
"np",
".",
"ndarray",
")",
"or",
"isinstance",
"(",
"x",
",",
"list",
")",
"or",
"isinstance",
"(",
"x",
",",
"tuple",
")",
"for",
"x",
"in",
"cols",
"]",
")",
":",
"assert",
"all",
"(",
"[",
"len",
"(",
"x",
")",
"==",
"len",
"(",
"X",
")",
"for",
"x",
"in",
"cols",
"]",
")",
",",
"'Trying to add columns of wrong length.'",
"assert",
"names",
"!=",
"None",
"and",
"len",
"(",
"cols",
")",
"==",
"len",
"(",
"names",
")",
",",
"'Number of columns to add must equal number of new names.'",
"cols",
"=",
"utils",
".",
"fromarrays",
"(",
"cols",
",",
"type",
"=",
"np",
".",
"ndarray",
",",
"names",
"=",
"names",
")",
"else",
":",
"assert",
"len",
"(",
"cols",
")",
"==",
"len",
"(",
"X",
")",
",",
"'Trying to add column of wrong length.'",
"cols",
"=",
"utils",
".",
"fromarrays",
"(",
"[",
"cols",
"]",
",",
"type",
"=",
"np",
".",
"ndarray",
",",
"names",
"=",
"names",
")",
"else",
":",
"assert",
"isinstance",
"(",
"cols",
",",
"np",
".",
"ndarray",
")",
"if",
"cols",
".",
"dtype",
".",
"names",
"==",
"None",
":",
"cols",
"=",
"utils",
".",
"fromarrays",
"(",
"[",
"cols",
"]",
",",
"type",
"=",
"np",
".",
"ndarray",
",",
"names",
"=",
"names",
")",
"Replacements",
"=",
"[",
"a",
"for",
"a",
"in",
"cols",
".",
"dtype",
".",
"names",
"if",
"a",
"in",
"X",
".",
"dtype",
".",
"names",
"]",
"if",
"len",
"(",
"Replacements",
")",
">",
"0",
":",
"print",
"(",
"'Replacing columns'",
",",
"[",
"a",
"for",
"a",
"in",
"cols",
".",
"dtype",
".",
"names",
"if",
"a",
"in",
"X",
".",
"dtype",
".",
"names",
"]",
")",
"return",
"utils",
".",
"fromarrays",
"(",
"[",
"X",
"[",
"a",
"]",
"if",
"a",
"not",
"in",
"cols",
".",
"dtype",
".",
"names",
"else",
"cols",
"[",
"a",
"]",
"for",
"a",
"in",
"X",
".",
"dtype",
".",
"names",
"]",
"+",
"[",
"cols",
"[",
"a",
"]",
"for",
"a",
"in",
"cols",
".",
"dtype",
".",
"names",
"if",
"a",
"not",
"in",
"X",
".",
"dtype",
".",
"names",
"]",
",",
"type",
"=",
"np",
".",
"ndarray",
",",
"names",
"=",
"list",
"(",
"X",
".",
"dtype",
".",
"names",
")",
"+",
"[",
"a",
"for",
"a",
"in",
"cols",
".",
"dtype",
".",
"names",
"if",
"a",
"not",
"in",
"X",
".",
"dtype",
".",
"names",
"]",
")"
] | Add one or more columns to a numpy ndarray.
Technical dependency of :func:`tabular.spreadsheet.aggregate_in`.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.addcols`.
**Parameters**
**X** : numpy ndarray with structured dtype or recarray
The recarray to add columns to.
**cols** : numpy ndarray, or list of arrays of columns
Column(s) to add.
**names**: list of strings, optional
Names of the new columns. Only applicable when `cols` is a
list of arrays.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy array made up of `X` plus the new columns.
**See also:** :func:`tabular.spreadsheet.colstack` | [
"Add",
"one",
"or",
"more",
"columns",
"to",
"a",
"numpy",
"ndarray",
"."
] | 1caf091c8c395960a9ad7078f95158b533cc52dd | https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/spreadsheet.py#L740-L803 | train | 441 |
yamins81/tabular | tabular/spreadsheet.py | deletecols | def deletecols(X, cols):
"""
Delete columns from a numpy ndarry or recarray.
Can take a string giving a column name or comma-separated list of column
names, or a list of string column names.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.deletecols`.
**Parameters**
**X** : numpy recarray or ndarray with structured dtype
The numpy array from which to delete columns.
**cols** : string or list of strings
Name or list of names of columns in `X`. This can be
a string giving a column name or comma-separated list of
column names, or a list of string column names.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy ndarray with structured dtype
given by `X`, excluding the columns named in `cols`.
"""
if isinstance(cols, str):
cols = cols.split(',')
retain = [n for n in X.dtype.names if n not in cols]
if len(retain) > 0:
return X[retain]
else:
return None | python | def deletecols(X, cols):
"""
Delete columns from a numpy ndarry or recarray.
Can take a string giving a column name or comma-separated list of column
names, or a list of string column names.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.deletecols`.
**Parameters**
**X** : numpy recarray or ndarray with structured dtype
The numpy array from which to delete columns.
**cols** : string or list of strings
Name or list of names of columns in `X`. This can be
a string giving a column name or comma-separated list of
column names, or a list of string column names.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy ndarray with structured dtype
given by `X`, excluding the columns named in `cols`.
"""
if isinstance(cols, str):
cols = cols.split(',')
retain = [n for n in X.dtype.names if n not in cols]
if len(retain) > 0:
return X[retain]
else:
return None | [
"def",
"deletecols",
"(",
"X",
",",
"cols",
")",
":",
"if",
"isinstance",
"(",
"cols",
",",
"str",
")",
":",
"cols",
"=",
"cols",
".",
"split",
"(",
"','",
")",
"retain",
"=",
"[",
"n",
"for",
"n",
"in",
"X",
".",
"dtype",
".",
"names",
"if",
"n",
"not",
"in",
"cols",
"]",
"if",
"len",
"(",
"retain",
")",
">",
"0",
":",
"return",
"X",
"[",
"retain",
"]",
"else",
":",
"return",
"None"
] | Delete columns from a numpy ndarry or recarray.
Can take a string giving a column name or comma-separated list of column
names, or a list of string column names.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.deletecols`.
**Parameters**
**X** : numpy recarray or ndarray with structured dtype
The numpy array from which to delete columns.
**cols** : string or list of strings
Name or list of names of columns in `X`. This can be
a string giving a column name or comma-separated list of
column names, or a list of string column names.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy ndarray with structured dtype
given by `X`, excluding the columns named in `cols`. | [
"Delete",
"columns",
"from",
"a",
"numpy",
"ndarry",
"or",
"recarray",
"."
] | 1caf091c8c395960a9ad7078f95158b533cc52dd | https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/spreadsheet.py#L806-L842 | train | 442 |
yamins81/tabular | tabular/spreadsheet.py | renamecol | def renamecol(X, old, new):
"""
Rename column of a numpy ndarray with structured dtype, in-place.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.renamecol`.
**Parameters**
**X** : numpy ndarray with structured dtype
The numpy array for which a column is to be renamed.
**old** : string
Old column name, e.g. a name in `X.dtype.names`.
**new** : string
New column name to replace `old`.
"""
NewNames = tuple([n if n != old else new for n in X.dtype.names])
X.dtype.names = NewNames | python | def renamecol(X, old, new):
"""
Rename column of a numpy ndarray with structured dtype, in-place.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.renamecol`.
**Parameters**
**X** : numpy ndarray with structured dtype
The numpy array for which a column is to be renamed.
**old** : string
Old column name, e.g. a name in `X.dtype.names`.
**new** : string
New column name to replace `old`.
"""
NewNames = tuple([n if n != old else new for n in X.dtype.names])
X.dtype.names = NewNames | [
"def",
"renamecol",
"(",
"X",
",",
"old",
",",
"new",
")",
":",
"NewNames",
"=",
"tuple",
"(",
"[",
"n",
"if",
"n",
"!=",
"old",
"else",
"new",
"for",
"n",
"in",
"X",
".",
"dtype",
".",
"names",
"]",
")",
"X",
".",
"dtype",
".",
"names",
"=",
"NewNames"
] | Rename column of a numpy ndarray with structured dtype, in-place.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.renamecol`.
**Parameters**
**X** : numpy ndarray with structured dtype
The numpy array for which a column is to be renamed.
**old** : string
Old column name, e.g. a name in `X.dtype.names`.
**new** : string
New column name to replace `old`. | [
"Rename",
"column",
"of",
"a",
"numpy",
"ndarray",
"with",
"structured",
"dtype",
"in",
"-",
"place",
"."
] | 1caf091c8c395960a9ad7078f95158b533cc52dd | https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/spreadsheet.py#L845-L868 | train | 443 |
yamins81/tabular | tabular/spreadsheet.py | replace | def replace(X, old, new, strict=True, cols=None, rows=None):
"""
Replace value `old` with `new` everywhere it appears in-place.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.replace`.
**Parameters**
**X** : numpy ndarray with structured dtype
Numpy array for which in-place replacement of `old` with
`new` is to be done.
**old** : string
**new** : string
**strict** : boolean, optional
* If `strict` = `True`, replace only exact occurences of `old`.
* If `strict` = `False`, assume `old` and `new` are strings and
replace all occurences of substrings (e.g. like
:func:`str.replace`)
**cols** : list of strings, optional
Names of columns to make replacements in; if `None`, make
replacements everywhere.
**rows** : list of booleans or integers, optional
Rows to make replacements in; if `None`, make replacements
everywhere.
Note: This function does in-place replacements. Thus there are issues
handling data types here when replacement dtype is larger than original
dtype. This can be resolved later by making a new array when necessary ...
"""
if cols == None:
cols = X.dtype.names
elif isinstance(cols, str):
cols = cols.split(',')
if rows == None:
rows = np.ones((len(X),), bool)
if strict:
new = np.array(new)
for a in cols:
if X.dtype[a] < new.dtype:
print('WARNING: dtype of column', a,
'is inferior to dtype of ', new,
'which may cause problems.')
try:
X[a][(X[a] == old)[rows]] = new
except:
print('Replacement not made on column', a, '.')
else:
for a in cols:
QuickRep = True
try:
colstr = ''.join(X[a][rows])
except TypeError:
print('Not replacing in column', a, 'due to type mismatch.')
else:
avoid = [ord(o) for o in utils.uniqify(old + new + colstr)]
ok = set(range(256)).difference(avoid)
if len(ok) > 0:
sep = chr(list(ok)[0])
else:
ok = set(range(65536)).difference(avoid)
if len(ok) > 0:
sep = unichr(list(ok)[0])
else:
print('All unicode characters represented in column',
a, ', can\t replace quickly.')
QuickRep = False
if QuickRep:
newrows = np.array(sep.join(X[a][rows])
.replace(old, new).split(sep))
else:
newrows = np.array([aa.replace(old,new) for aa in
X[a][rows]])
X[a][rows] = np.cast[X.dtype[a]](newrows)
if newrows.dtype > X.dtype[a]:
print('WARNING: dtype of column', a, 'is inferior to the '
'dtype of its replacement which may cause problems '
'(ends of strings might get chopped off).') | python | def replace(X, old, new, strict=True, cols=None, rows=None):
"""
Replace value `old` with `new` everywhere it appears in-place.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.replace`.
**Parameters**
**X** : numpy ndarray with structured dtype
Numpy array for which in-place replacement of `old` with
`new` is to be done.
**old** : string
**new** : string
**strict** : boolean, optional
* If `strict` = `True`, replace only exact occurences of `old`.
* If `strict` = `False`, assume `old` and `new` are strings and
replace all occurences of substrings (e.g. like
:func:`str.replace`)
**cols** : list of strings, optional
Names of columns to make replacements in; if `None`, make
replacements everywhere.
**rows** : list of booleans or integers, optional
Rows to make replacements in; if `None`, make replacements
everywhere.
Note: This function does in-place replacements. Thus there are issues
handling data types here when replacement dtype is larger than original
dtype. This can be resolved later by making a new array when necessary ...
"""
if cols == None:
cols = X.dtype.names
elif isinstance(cols, str):
cols = cols.split(',')
if rows == None:
rows = np.ones((len(X),), bool)
if strict:
new = np.array(new)
for a in cols:
if X.dtype[a] < new.dtype:
print('WARNING: dtype of column', a,
'is inferior to dtype of ', new,
'which may cause problems.')
try:
X[a][(X[a] == old)[rows]] = new
except:
print('Replacement not made on column', a, '.')
else:
for a in cols:
QuickRep = True
try:
colstr = ''.join(X[a][rows])
except TypeError:
print('Not replacing in column', a, 'due to type mismatch.')
else:
avoid = [ord(o) for o in utils.uniqify(old + new + colstr)]
ok = set(range(256)).difference(avoid)
if len(ok) > 0:
sep = chr(list(ok)[0])
else:
ok = set(range(65536)).difference(avoid)
if len(ok) > 0:
sep = unichr(list(ok)[0])
else:
print('All unicode characters represented in column',
a, ', can\t replace quickly.')
QuickRep = False
if QuickRep:
newrows = np.array(sep.join(X[a][rows])
.replace(old, new).split(sep))
else:
newrows = np.array([aa.replace(old,new) for aa in
X[a][rows]])
X[a][rows] = np.cast[X.dtype[a]](newrows)
if newrows.dtype > X.dtype[a]:
print('WARNING: dtype of column', a, 'is inferior to the '
'dtype of its replacement which may cause problems '
'(ends of strings might get chopped off).') | [
"def",
"replace",
"(",
"X",
",",
"old",
",",
"new",
",",
"strict",
"=",
"True",
",",
"cols",
"=",
"None",
",",
"rows",
"=",
"None",
")",
":",
"if",
"cols",
"==",
"None",
":",
"cols",
"=",
"X",
".",
"dtype",
".",
"names",
"elif",
"isinstance",
"(",
"cols",
",",
"str",
")",
":",
"cols",
"=",
"cols",
".",
"split",
"(",
"','",
")",
"if",
"rows",
"==",
"None",
":",
"rows",
"=",
"np",
".",
"ones",
"(",
"(",
"len",
"(",
"X",
")",
",",
")",
",",
"bool",
")",
"if",
"strict",
":",
"new",
"=",
"np",
".",
"array",
"(",
"new",
")",
"for",
"a",
"in",
"cols",
":",
"if",
"X",
".",
"dtype",
"[",
"a",
"]",
"<",
"new",
".",
"dtype",
":",
"print",
"(",
"'WARNING: dtype of column'",
",",
"a",
",",
"'is inferior to dtype of '",
",",
"new",
",",
"'which may cause problems.'",
")",
"try",
":",
"X",
"[",
"a",
"]",
"[",
"(",
"X",
"[",
"a",
"]",
"==",
"old",
")",
"[",
"rows",
"]",
"]",
"=",
"new",
"except",
":",
"print",
"(",
"'Replacement not made on column'",
",",
"a",
",",
"'.'",
")",
"else",
":",
"for",
"a",
"in",
"cols",
":",
"QuickRep",
"=",
"True",
"try",
":",
"colstr",
"=",
"''",
".",
"join",
"(",
"X",
"[",
"a",
"]",
"[",
"rows",
"]",
")",
"except",
"TypeError",
":",
"print",
"(",
"'Not replacing in column'",
",",
"a",
",",
"'due to type mismatch.'",
")",
"else",
":",
"avoid",
"=",
"[",
"ord",
"(",
"o",
")",
"for",
"o",
"in",
"utils",
".",
"uniqify",
"(",
"old",
"+",
"new",
"+",
"colstr",
")",
"]",
"ok",
"=",
"set",
"(",
"range",
"(",
"256",
")",
")",
".",
"difference",
"(",
"avoid",
")",
"if",
"len",
"(",
"ok",
")",
">",
"0",
":",
"sep",
"=",
"chr",
"(",
"list",
"(",
"ok",
")",
"[",
"0",
"]",
")",
"else",
":",
"ok",
"=",
"set",
"(",
"range",
"(",
"65536",
")",
")",
".",
"difference",
"(",
"avoid",
")",
"if",
"len",
"(",
"ok",
")",
">",
"0",
":",
"sep",
"=",
"unichr",
"(",
"list",
"(",
"ok",
")",
"[",
"0",
"]",
")",
"else",
":",
"print",
"(",
"'All unicode characters represented in column'",
",",
"a",
",",
"', can\\t replace quickly.'",
")",
"QuickRep",
"=",
"False",
"if",
"QuickRep",
":",
"newrows",
"=",
"np",
".",
"array",
"(",
"sep",
".",
"join",
"(",
"X",
"[",
"a",
"]",
"[",
"rows",
"]",
")",
".",
"replace",
"(",
"old",
",",
"new",
")",
".",
"split",
"(",
"sep",
")",
")",
"else",
":",
"newrows",
"=",
"np",
".",
"array",
"(",
"[",
"aa",
".",
"replace",
"(",
"old",
",",
"new",
")",
"for",
"aa",
"in",
"X",
"[",
"a",
"]",
"[",
"rows",
"]",
"]",
")",
"X",
"[",
"a",
"]",
"[",
"rows",
"]",
"=",
"np",
".",
"cast",
"[",
"X",
".",
"dtype",
"[",
"a",
"]",
"]",
"(",
"newrows",
")",
"if",
"newrows",
".",
"dtype",
">",
"X",
".",
"dtype",
"[",
"a",
"]",
":",
"print",
"(",
"'WARNING: dtype of column'",
",",
"a",
",",
"'is inferior to the '",
"'dtype of its replacement which may cause problems '",
"'(ends of strings might get chopped off).'",
")"
] | Replace value `old` with `new` everywhere it appears in-place.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.replace`.
**Parameters**
**X** : numpy ndarray with structured dtype
Numpy array for which in-place replacement of `old` with
`new` is to be done.
**old** : string
**new** : string
**strict** : boolean, optional
* If `strict` = `True`, replace only exact occurences of `old`.
* If `strict` = `False`, assume `old` and `new` are strings and
replace all occurences of substrings (e.g. like
:func:`str.replace`)
**cols** : list of strings, optional
Names of columns to make replacements in; if `None`, make
replacements everywhere.
**rows** : list of booleans or integers, optional
Rows to make replacements in; if `None`, make replacements
everywhere.
Note: This function does in-place replacements. Thus there are issues
handling data types here when replacement dtype is larger than original
dtype. This can be resolved later by making a new array when necessary ... | [
"Replace",
"value",
"old",
"with",
"new",
"everywhere",
"it",
"appears",
"in",
"-",
"place",
"."
] | 1caf091c8c395960a9ad7078f95158b533cc52dd | https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/spreadsheet.py#L871-L964 | train | 444 |
yamins81/tabular | tabular/spreadsheet.py | rowstack | def rowstack(seq, mode='nulls', nullvals=None):
'''
Vertically stack a sequence of numpy ndarrays with structured dtype
Analog of numpy.vstack
Implemented by the tabarray method
:func:`tabular.tab.tabarray.rowstack` which uses
:func:`tabular.tabarray.tab_rowstack`.
**Parameters**
**seq** : sequence of numpy recarrays
List, tuple, etc. of numpy recarrays to stack vertically.
**mode** : string in ['nulls', 'commons', 'abort']
Denotes how to proceed if the recarrays have different
dtypes, e.g. different sets of named columns.
* if `mode` == ``nulls``, the resulting set of columns is
determined by the union of the dtypes of all recarrays
to be stacked, and missing data is filled with null
values as defined by
:func:`tabular.spreadsheet.nullvalue`; this is the
default mode.
* elif `mode` == ``commons``, the resulting set of
columns is determined by the intersection of the dtypes
of all recarrays to be stacked, e.g. common columns.
* elif `mode` == ``abort``, raise an error when the
recarrays to stack have different dtypes.
**Returns**
**out** : numpy ndarray with structured dtype
Result of vertically stacking the arrays in `seq`.
**See also:** `numpy.vstack
<http://docs.scipy.org/doc/numpy/reference/generated/numpy.vstack.html>`_.
'''
if nullvals == None:
nullvals = utils.DEFAULT_NULLVALUEFORMAT
#newseq = [ss for ss in seq if len(ss) > 0]
if len(seq) > 1:
assert mode in ['commons','nulls','abort'], \
('"mode" argument must either by "commons", "abort", or "nulls".')
if mode == 'abort':
if not all([set(l.dtype.names) == set(seq[0].dtype.names)
for l in seq]):
raise ValueError('Some column names are different.')
else:
mode = 'commons'
if mode == 'nulls':
names = utils.uniqify(utils.listunion([list(s.dtype.names)
for s in seq if s.dtype.names != None]))
formats = [max([s.dtype[att] for s in seq if s.dtype.names != None
and att in s.dtype.names]).str for att in names]
dtype = np.dtype(zip(names,formats))
return utils.fromarrays([utils.listunion([s[att].tolist()
if (s.dtype.names != None and att in s.dtype.names)
else [nullvals(format)] * len(s) for s in seq])
for (att, format) in zip(names, formats)], type=np.ndarray,
dtype=dtype)
elif mode == 'commons':
names = [x for x in seq[0].dtype.names
if all([x in l.dtype.names for l in seq[1:]])]
formats = [max([a.dtype[att] for a in seq]).str for att in names]
return utils.fromrecords(utils.listunion(
[ar.tolist() for ar in seq]), type=np.ndarray,
names=names, formats=formats)
else:
return seq[0] | python | def rowstack(seq, mode='nulls', nullvals=None):
'''
Vertically stack a sequence of numpy ndarrays with structured dtype
Analog of numpy.vstack
Implemented by the tabarray method
:func:`tabular.tab.tabarray.rowstack` which uses
:func:`tabular.tabarray.tab_rowstack`.
**Parameters**
**seq** : sequence of numpy recarrays
List, tuple, etc. of numpy recarrays to stack vertically.
**mode** : string in ['nulls', 'commons', 'abort']
Denotes how to proceed if the recarrays have different
dtypes, e.g. different sets of named columns.
* if `mode` == ``nulls``, the resulting set of columns is
determined by the union of the dtypes of all recarrays
to be stacked, and missing data is filled with null
values as defined by
:func:`tabular.spreadsheet.nullvalue`; this is the
default mode.
* elif `mode` == ``commons``, the resulting set of
columns is determined by the intersection of the dtypes
of all recarrays to be stacked, e.g. common columns.
* elif `mode` == ``abort``, raise an error when the
recarrays to stack have different dtypes.
**Returns**
**out** : numpy ndarray with structured dtype
Result of vertically stacking the arrays in `seq`.
**See also:** `numpy.vstack
<http://docs.scipy.org/doc/numpy/reference/generated/numpy.vstack.html>`_.
'''
if nullvals == None:
nullvals = utils.DEFAULT_NULLVALUEFORMAT
#newseq = [ss for ss in seq if len(ss) > 0]
if len(seq) > 1:
assert mode in ['commons','nulls','abort'], \
('"mode" argument must either by "commons", "abort", or "nulls".')
if mode == 'abort':
if not all([set(l.dtype.names) == set(seq[0].dtype.names)
for l in seq]):
raise ValueError('Some column names are different.')
else:
mode = 'commons'
if mode == 'nulls':
names = utils.uniqify(utils.listunion([list(s.dtype.names)
for s in seq if s.dtype.names != None]))
formats = [max([s.dtype[att] for s in seq if s.dtype.names != None
and att in s.dtype.names]).str for att in names]
dtype = np.dtype(zip(names,formats))
return utils.fromarrays([utils.listunion([s[att].tolist()
if (s.dtype.names != None and att in s.dtype.names)
else [nullvals(format)] * len(s) for s in seq])
for (att, format) in zip(names, formats)], type=np.ndarray,
dtype=dtype)
elif mode == 'commons':
names = [x for x in seq[0].dtype.names
if all([x in l.dtype.names for l in seq[1:]])]
formats = [max([a.dtype[att] for a in seq]).str for att in names]
return utils.fromrecords(utils.listunion(
[ar.tolist() for ar in seq]), type=np.ndarray,
names=names, formats=formats)
else:
return seq[0] | [
"def",
"rowstack",
"(",
"seq",
",",
"mode",
"=",
"'nulls'",
",",
"nullvals",
"=",
"None",
")",
":",
"if",
"nullvals",
"==",
"None",
":",
"nullvals",
"=",
"utils",
".",
"DEFAULT_NULLVALUEFORMAT",
"#newseq = [ss for ss in seq if len(ss) > 0]",
"if",
"len",
"(",
"seq",
")",
">",
"1",
":",
"assert",
"mode",
"in",
"[",
"'commons'",
",",
"'nulls'",
",",
"'abort'",
"]",
",",
"(",
"'\"mode\" argument must either by \"commons\", \"abort\", or \"nulls\".'",
")",
"if",
"mode",
"==",
"'abort'",
":",
"if",
"not",
"all",
"(",
"[",
"set",
"(",
"l",
".",
"dtype",
".",
"names",
")",
"==",
"set",
"(",
"seq",
"[",
"0",
"]",
".",
"dtype",
".",
"names",
")",
"for",
"l",
"in",
"seq",
"]",
")",
":",
"raise",
"ValueError",
"(",
"'Some column names are different.'",
")",
"else",
":",
"mode",
"=",
"'commons'",
"if",
"mode",
"==",
"'nulls'",
":",
"names",
"=",
"utils",
".",
"uniqify",
"(",
"utils",
".",
"listunion",
"(",
"[",
"list",
"(",
"s",
".",
"dtype",
".",
"names",
")",
"for",
"s",
"in",
"seq",
"if",
"s",
".",
"dtype",
".",
"names",
"!=",
"None",
"]",
")",
")",
"formats",
"=",
"[",
"max",
"(",
"[",
"s",
".",
"dtype",
"[",
"att",
"]",
"for",
"s",
"in",
"seq",
"if",
"s",
".",
"dtype",
".",
"names",
"!=",
"None",
"and",
"att",
"in",
"s",
".",
"dtype",
".",
"names",
"]",
")",
".",
"str",
"for",
"att",
"in",
"names",
"]",
"dtype",
"=",
"np",
".",
"dtype",
"(",
"zip",
"(",
"names",
",",
"formats",
")",
")",
"return",
"utils",
".",
"fromarrays",
"(",
"[",
"utils",
".",
"listunion",
"(",
"[",
"s",
"[",
"att",
"]",
".",
"tolist",
"(",
")",
"if",
"(",
"s",
".",
"dtype",
".",
"names",
"!=",
"None",
"and",
"att",
"in",
"s",
".",
"dtype",
".",
"names",
")",
"else",
"[",
"nullvals",
"(",
"format",
")",
"]",
"*",
"len",
"(",
"s",
")",
"for",
"s",
"in",
"seq",
"]",
")",
"for",
"(",
"att",
",",
"format",
")",
"in",
"zip",
"(",
"names",
",",
"formats",
")",
"]",
",",
"type",
"=",
"np",
".",
"ndarray",
",",
"dtype",
"=",
"dtype",
")",
"elif",
"mode",
"==",
"'commons'",
":",
"names",
"=",
"[",
"x",
"for",
"x",
"in",
"seq",
"[",
"0",
"]",
".",
"dtype",
".",
"names",
"if",
"all",
"(",
"[",
"x",
"in",
"l",
".",
"dtype",
".",
"names",
"for",
"l",
"in",
"seq",
"[",
"1",
":",
"]",
"]",
")",
"]",
"formats",
"=",
"[",
"max",
"(",
"[",
"a",
".",
"dtype",
"[",
"att",
"]",
"for",
"a",
"in",
"seq",
"]",
")",
".",
"str",
"for",
"att",
"in",
"names",
"]",
"return",
"utils",
".",
"fromrecords",
"(",
"utils",
".",
"listunion",
"(",
"[",
"ar",
".",
"tolist",
"(",
")",
"for",
"ar",
"in",
"seq",
"]",
")",
",",
"type",
"=",
"np",
".",
"ndarray",
",",
"names",
"=",
"names",
",",
"formats",
"=",
"formats",
")",
"else",
":",
"return",
"seq",
"[",
"0",
"]"
] | Vertically stack a sequence of numpy ndarrays with structured dtype
Analog of numpy.vstack
Implemented by the tabarray method
:func:`tabular.tab.tabarray.rowstack` which uses
:func:`tabular.tabarray.tab_rowstack`.
**Parameters**
**seq** : sequence of numpy recarrays
List, tuple, etc. of numpy recarrays to stack vertically.
**mode** : string in ['nulls', 'commons', 'abort']
Denotes how to proceed if the recarrays have different
dtypes, e.g. different sets of named columns.
* if `mode` == ``nulls``, the resulting set of columns is
determined by the union of the dtypes of all recarrays
to be stacked, and missing data is filled with null
values as defined by
:func:`tabular.spreadsheet.nullvalue`; this is the
default mode.
* elif `mode` == ``commons``, the resulting set of
columns is determined by the intersection of the dtypes
of all recarrays to be stacked, e.g. common columns.
* elif `mode` == ``abort``, raise an error when the
recarrays to stack have different dtypes.
**Returns**
**out** : numpy ndarray with structured dtype
Result of vertically stacking the arrays in `seq`.
**See also:** `numpy.vstack
<http://docs.scipy.org/doc/numpy/reference/generated/numpy.vstack.html>`_. | [
"Vertically",
"stack",
"a",
"sequence",
"of",
"numpy",
"ndarrays",
"with",
"structured",
"dtype"
] | 1caf091c8c395960a9ad7078f95158b533cc52dd | https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/spreadsheet.py#L967-L1044 | train | 445 |
yamins81/tabular | tabular/spreadsheet.py | colstack | def colstack(seq, mode='abort',returnnaming=False):
"""
Horizontally stack a sequence of numpy ndarrays with structured dtypes
Analog of numpy.hstack for recarrays.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.colstack` which uses
:func:`tabular.tabarray.tab_colstack`.
**Parameters**
**seq** : sequence of numpy ndarray with structured dtype
List, tuple, etc. of numpy recarrays to stack vertically.
**mode** : string in ['first','drop','abort','rename']
Denotes how to proceed if when multiple recarrays share the
same column name:
* if `mode` == ``first``, take the column from the first
recarray in `seq` containing the shared column name.
* elif `mode` == ``abort``, raise an error when the
recarrays to stack share column names; this is the
default mode.
* elif `mode` == ``drop``, drop any column that shares
its name with any other column among the sequence of
recarrays.
* elif `mode` == ``rename``, for any set of all columns
sharing the same name, rename all columns by appending
an underscore, '_', followed by an integer, starting
with '0' and incrementing by 1 for each subsequent
column.
**Returns**
**out** : numpy ndarray with structured dtype
Result of horizontally stacking the arrays in `seq`.
**See also:** `numpy.hstack
<http://docs.scipy.org/doc/numpy/reference/generated/numpy.hstack.html>`_.
"""
assert mode in ['first','drop','abort','rename'], \
'mode argument must take on value "first","drop", "rename", or "abort".'
AllNames = utils.uniqify(utils.listunion(
[list(l.dtype.names) for l in seq]))
NameList = [(x, [i for i in range(len(seq)) if x in seq[i].dtype.names])
for x in AllNames]
Commons = [x[0] for x in NameList if len(x[1]) > 1]
if len(Commons) > 0 or mode == 'first':
if mode == 'abort':
raise ValueError('There are common column names with differing ' +
'values in the columns')
elif mode == 'drop':
Names = [(L[0], x,x) for (x, L) in NameList if x not in Commons]
elif mode == 'rename':
NameDict = dict(NameList)
Names = utils.listunion([[(i,n,n) if len(NameDict[n]) == 1 else \
(i,n,n + '_' + str(i)) for n in s.dtype.names] \
for (i,s) in enumerate(seq)])
else:
Names = [(L[0], x,x) for (x, L) in NameList]
if returnnaming:
return utils.fromarrays([seq[i][x] for (i, x,y) in Names],
type= np.ndarray,names=zip(*Names)[2]),Names
else:
return utils.fromarrays([seq[i][x] for (i, x,y) in Names],
type= np.ndarray,names=zip(*Names)[2]) | python | def colstack(seq, mode='abort',returnnaming=False):
"""
Horizontally stack a sequence of numpy ndarrays with structured dtypes
Analog of numpy.hstack for recarrays.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.colstack` which uses
:func:`tabular.tabarray.tab_colstack`.
**Parameters**
**seq** : sequence of numpy ndarray with structured dtype
List, tuple, etc. of numpy recarrays to stack vertically.
**mode** : string in ['first','drop','abort','rename']
Denotes how to proceed if when multiple recarrays share the
same column name:
* if `mode` == ``first``, take the column from the first
recarray in `seq` containing the shared column name.
* elif `mode` == ``abort``, raise an error when the
recarrays to stack share column names; this is the
default mode.
* elif `mode` == ``drop``, drop any column that shares
its name with any other column among the sequence of
recarrays.
* elif `mode` == ``rename``, for any set of all columns
sharing the same name, rename all columns by appending
an underscore, '_', followed by an integer, starting
with '0' and incrementing by 1 for each subsequent
column.
**Returns**
**out** : numpy ndarray with structured dtype
Result of horizontally stacking the arrays in `seq`.
**See also:** `numpy.hstack
<http://docs.scipy.org/doc/numpy/reference/generated/numpy.hstack.html>`_.
"""
assert mode in ['first','drop','abort','rename'], \
'mode argument must take on value "first","drop", "rename", or "abort".'
AllNames = utils.uniqify(utils.listunion(
[list(l.dtype.names) for l in seq]))
NameList = [(x, [i for i in range(len(seq)) if x in seq[i].dtype.names])
for x in AllNames]
Commons = [x[0] for x in NameList if len(x[1]) > 1]
if len(Commons) > 0 or mode == 'first':
if mode == 'abort':
raise ValueError('There are common column names with differing ' +
'values in the columns')
elif mode == 'drop':
Names = [(L[0], x,x) for (x, L) in NameList if x not in Commons]
elif mode == 'rename':
NameDict = dict(NameList)
Names = utils.listunion([[(i,n,n) if len(NameDict[n]) == 1 else \
(i,n,n + '_' + str(i)) for n in s.dtype.names] \
for (i,s) in enumerate(seq)])
else:
Names = [(L[0], x,x) for (x, L) in NameList]
if returnnaming:
return utils.fromarrays([seq[i][x] for (i, x,y) in Names],
type= np.ndarray,names=zip(*Names)[2]),Names
else:
return utils.fromarrays([seq[i][x] for (i, x,y) in Names],
type= np.ndarray,names=zip(*Names)[2]) | [
"def",
"colstack",
"(",
"seq",
",",
"mode",
"=",
"'abort'",
",",
"returnnaming",
"=",
"False",
")",
":",
"assert",
"mode",
"in",
"[",
"'first'",
",",
"'drop'",
",",
"'abort'",
",",
"'rename'",
"]",
",",
"'mode argument must take on value \"first\",\"drop\", \"rename\", or \"abort\".'",
"AllNames",
"=",
"utils",
".",
"uniqify",
"(",
"utils",
".",
"listunion",
"(",
"[",
"list",
"(",
"l",
".",
"dtype",
".",
"names",
")",
"for",
"l",
"in",
"seq",
"]",
")",
")",
"NameList",
"=",
"[",
"(",
"x",
",",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"seq",
")",
")",
"if",
"x",
"in",
"seq",
"[",
"i",
"]",
".",
"dtype",
".",
"names",
"]",
")",
"for",
"x",
"in",
"AllNames",
"]",
"Commons",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"NameList",
"if",
"len",
"(",
"x",
"[",
"1",
"]",
")",
">",
"1",
"]",
"if",
"len",
"(",
"Commons",
")",
">",
"0",
"or",
"mode",
"==",
"'first'",
":",
"if",
"mode",
"==",
"'abort'",
":",
"raise",
"ValueError",
"(",
"'There are common column names with differing '",
"+",
"'values in the columns'",
")",
"elif",
"mode",
"==",
"'drop'",
":",
"Names",
"=",
"[",
"(",
"L",
"[",
"0",
"]",
",",
"x",
",",
"x",
")",
"for",
"(",
"x",
",",
"L",
")",
"in",
"NameList",
"if",
"x",
"not",
"in",
"Commons",
"]",
"elif",
"mode",
"==",
"'rename'",
":",
"NameDict",
"=",
"dict",
"(",
"NameList",
")",
"Names",
"=",
"utils",
".",
"listunion",
"(",
"[",
"[",
"(",
"i",
",",
"n",
",",
"n",
")",
"if",
"len",
"(",
"NameDict",
"[",
"n",
"]",
")",
"==",
"1",
"else",
"(",
"i",
",",
"n",
",",
"n",
"+",
"'_'",
"+",
"str",
"(",
"i",
")",
")",
"for",
"n",
"in",
"s",
".",
"dtype",
".",
"names",
"]",
"for",
"(",
"i",
",",
"s",
")",
"in",
"enumerate",
"(",
"seq",
")",
"]",
")",
"else",
":",
"Names",
"=",
"[",
"(",
"L",
"[",
"0",
"]",
",",
"x",
",",
"x",
")",
"for",
"(",
"x",
",",
"L",
")",
"in",
"NameList",
"]",
"if",
"returnnaming",
":",
"return",
"utils",
".",
"fromarrays",
"(",
"[",
"seq",
"[",
"i",
"]",
"[",
"x",
"]",
"for",
"(",
"i",
",",
"x",
",",
"y",
")",
"in",
"Names",
"]",
",",
"type",
"=",
"np",
".",
"ndarray",
",",
"names",
"=",
"zip",
"(",
"*",
"Names",
")",
"[",
"2",
"]",
")",
",",
"Names",
"else",
":",
"return",
"utils",
".",
"fromarrays",
"(",
"[",
"seq",
"[",
"i",
"]",
"[",
"x",
"]",
"for",
"(",
"i",
",",
"x",
",",
"y",
")",
"in",
"Names",
"]",
",",
"type",
"=",
"np",
".",
"ndarray",
",",
"names",
"=",
"zip",
"(",
"*",
"Names",
")",
"[",
"2",
"]",
")"
] | Horizontally stack a sequence of numpy ndarrays with structured dtypes
Analog of numpy.hstack for recarrays.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.colstack` which uses
:func:`tabular.tabarray.tab_colstack`.
**Parameters**
**seq** : sequence of numpy ndarray with structured dtype
List, tuple, etc. of numpy recarrays to stack vertically.
**mode** : string in ['first','drop','abort','rename']
Denotes how to proceed if when multiple recarrays share the
same column name:
* if `mode` == ``first``, take the column from the first
recarray in `seq` containing the shared column name.
* elif `mode` == ``abort``, raise an error when the
recarrays to stack share column names; this is the
default mode.
* elif `mode` == ``drop``, drop any column that shares
its name with any other column among the sequence of
recarrays.
* elif `mode` == ``rename``, for any set of all columns
sharing the same name, rename all columns by appending
an underscore, '_', followed by an integer, starting
with '0' and incrementing by 1 for each subsequent
column.
**Returns**
**out** : numpy ndarray with structured dtype
Result of horizontally stacking the arrays in `seq`.
**See also:** `numpy.hstack
<http://docs.scipy.org/doc/numpy/reference/generated/numpy.hstack.html>`_. | [
"Horizontally",
"stack",
"a",
"sequence",
"of",
"numpy",
"ndarrays",
"with",
"structured",
"dtypes"
] | 1caf091c8c395960a9ad7078f95158b533cc52dd | https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/spreadsheet.py#L1047-L1123 | train | 446 |
yamins81/tabular | tabular/spreadsheet.py | DEFAULT_RENAMER | def DEFAULT_RENAMER(L, Names=None):
"""
Renames overlapping column names of numpy ndarrays with structured dtypes
Rename the columns by using a simple convention:
* If `L` is a list, it will append the number in the list to the key
associated with the array.
* If `L` is a dictionary, the algorithm will append the string
representation of the key associated with an array to the overlapping
columns from that array.
Default renamer function used by :func:`tabular.spreadsheet.join`
**Parameters**
**L** : list or dictionary
Numpy recarrays with columns to be renamed.
**Returns**
**D** : dictionary of dictionaries
Dictionary mapping each input numpy recarray to a
dictionary mapping each original column name to its new
name following the convention above.
"""
if isinstance(L,dict):
Names = L.keys()
LL = L.values()
else:
if Names == None:
Names = range(len(L))
else:
assert len(Names) == len(L)
LL = L
commons = Commons([l.dtype.names for l in LL])
D = {}
for (i,l) in zip(Names, LL):
d = {}
for c in commons:
if c in l.dtype.names:
d[c] = c + '_' + str(i)
if d:
D[i] = d
return D | python | def DEFAULT_RENAMER(L, Names=None):
"""
Renames overlapping column names of numpy ndarrays with structured dtypes
Rename the columns by using a simple convention:
* If `L` is a list, it will append the number in the list to the key
associated with the array.
* If `L` is a dictionary, the algorithm will append the string
representation of the key associated with an array to the overlapping
columns from that array.
Default renamer function used by :func:`tabular.spreadsheet.join`
**Parameters**
**L** : list or dictionary
Numpy recarrays with columns to be renamed.
**Returns**
**D** : dictionary of dictionaries
Dictionary mapping each input numpy recarray to a
dictionary mapping each original column name to its new
name following the convention above.
"""
if isinstance(L,dict):
Names = L.keys()
LL = L.values()
else:
if Names == None:
Names = range(len(L))
else:
assert len(Names) == len(L)
LL = L
commons = Commons([l.dtype.names for l in LL])
D = {}
for (i,l) in zip(Names, LL):
d = {}
for c in commons:
if c in l.dtype.names:
d[c] = c + '_' + str(i)
if d:
D[i] = d
return D | [
"def",
"DEFAULT_RENAMER",
"(",
"L",
",",
"Names",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"L",
",",
"dict",
")",
":",
"Names",
"=",
"L",
".",
"keys",
"(",
")",
"LL",
"=",
"L",
".",
"values",
"(",
")",
"else",
":",
"if",
"Names",
"==",
"None",
":",
"Names",
"=",
"range",
"(",
"len",
"(",
"L",
")",
")",
"else",
":",
"assert",
"len",
"(",
"Names",
")",
"==",
"len",
"(",
"L",
")",
"LL",
"=",
"L",
"commons",
"=",
"Commons",
"(",
"[",
"l",
".",
"dtype",
".",
"names",
"for",
"l",
"in",
"LL",
"]",
")",
"D",
"=",
"{",
"}",
"for",
"(",
"i",
",",
"l",
")",
"in",
"zip",
"(",
"Names",
",",
"LL",
")",
":",
"d",
"=",
"{",
"}",
"for",
"c",
"in",
"commons",
":",
"if",
"c",
"in",
"l",
".",
"dtype",
".",
"names",
":",
"d",
"[",
"c",
"]",
"=",
"c",
"+",
"'_'",
"+",
"str",
"(",
"i",
")",
"if",
"d",
":",
"D",
"[",
"i",
"]",
"=",
"d",
"return",
"D"
] | Renames overlapping column names of numpy ndarrays with structured dtypes
Rename the columns by using a simple convention:
* If `L` is a list, it will append the number in the list to the key
associated with the array.
* If `L` is a dictionary, the algorithm will append the string
representation of the key associated with an array to the overlapping
columns from that array.
Default renamer function used by :func:`tabular.spreadsheet.join`
**Parameters**
**L** : list or dictionary
Numpy recarrays with columns to be renamed.
**Returns**
**D** : dictionary of dictionaries
Dictionary mapping each input numpy recarray to a
dictionary mapping each original column name to its new
name following the convention above. | [
"Renames",
"overlapping",
"column",
"names",
"of",
"numpy",
"ndarrays",
"with",
"structured",
"dtypes"
] | 1caf091c8c395960a9ad7078f95158b533cc52dd | https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/spreadsheet.py#L1520-L1571 | train | 447 |
emirozer/bowshock | bowshock/helioviewer.py | getjp2image | def getjp2image(date,
sourceId=None,
observatory=None,
instrument=None,
detector=None,
measurement=None):
'''
Helioviewer.org and JHelioviewer operate off of JPEG2000 formatted image data generated from science-quality FITS files. Use the APIs below to interact directly with these intermediary JPEG2000 files.
Download a JP2 image for the specified datasource that is the closest match in time to the `date` requested.
Either `sourceId` must be specified, or the combination of `observatory`, `instrument`, `detector`, and `measurement`.
Request Parameters:
Parameter Required Type Example Description
date Required string 2014-01-01T23:59:59Z Desired date/time of the JP2 image. ISO 8601 combined UTC date and time UTC format.
sourceId Optional number 14 Unique image datasource identifier.
observatory Optional string SDO Observatory name.
instrument Optional string AIA Instrument name.
detector Optional string AIA Detector name.
measurement Optional string 335 Measurement name.
jpip Optional boolean false Optionally return a JPIP URI instead of the binary data of the image itself.
json Optional boolean false Optionally return a JSON object.
EXAMPLE: http://helioviewer.org/api/v1/getJP2Image/?date=2014-01-01T23:59:59Z&sourceId=14&jpip=true
'''
base_url = 'http://helioviewer.org/api/v1/getJP2Image/?'
req_url = ''
try:
validate_iso8601(date)
if not date[-1:] == 'Z':
date += 'Z'
base_url += 'date=' + date
except:
raise ValueError(
"Your date input is not in iso8601 format. ex: 2014-01-01T23:59:59")
if sourceId:
if not isinstance(sourceId, int):
logger.error("The sourceId argument should be an int, ignoring it")
else:
base_url += "sourceId=" + str(sourceId) + "&"
if observatory:
if not isinstance(observatory, str):
logger.error(
"The observatory argument should be a str, ignoring it")
else:
base_url += "observatory=" + observatory + "&"
if instrument:
if not isinstance(instrument, str):
logger.error(
"The instrument argument should be a str, ignoring it")
else:
base_url += "instrument=" + instrument + "&"
if detector:
if not isinstance(detector, str):
logger.error("The detector argument should be a str, ignoring it")
else:
base_url += "detector=" + detector + "&"
if measurement:
if not isinstance(measurement, str):
logger.error(
"The measurement argument should be a str, ignoring it")
else:
base_url += "measurement=" + detector + "&"
req_url += base_url + "jpip=true"
return dispatch_http_get(req_url) | python | def getjp2image(date,
sourceId=None,
observatory=None,
instrument=None,
detector=None,
measurement=None):
'''
Helioviewer.org and JHelioviewer operate off of JPEG2000 formatted image data generated from science-quality FITS files. Use the APIs below to interact directly with these intermediary JPEG2000 files.
Download a JP2 image for the specified datasource that is the closest match in time to the `date` requested.
Either `sourceId` must be specified, or the combination of `observatory`, `instrument`, `detector`, and `measurement`.
Request Parameters:
Parameter Required Type Example Description
date Required string 2014-01-01T23:59:59Z Desired date/time of the JP2 image. ISO 8601 combined UTC date and time UTC format.
sourceId Optional number 14 Unique image datasource identifier.
observatory Optional string SDO Observatory name.
instrument Optional string AIA Instrument name.
detector Optional string AIA Detector name.
measurement Optional string 335 Measurement name.
jpip Optional boolean false Optionally return a JPIP URI instead of the binary data of the image itself.
json Optional boolean false Optionally return a JSON object.
EXAMPLE: http://helioviewer.org/api/v1/getJP2Image/?date=2014-01-01T23:59:59Z&sourceId=14&jpip=true
'''
base_url = 'http://helioviewer.org/api/v1/getJP2Image/?'
req_url = ''
try:
validate_iso8601(date)
if not date[-1:] == 'Z':
date += 'Z'
base_url += 'date=' + date
except:
raise ValueError(
"Your date input is not in iso8601 format. ex: 2014-01-01T23:59:59")
if sourceId:
if not isinstance(sourceId, int):
logger.error("The sourceId argument should be an int, ignoring it")
else:
base_url += "sourceId=" + str(sourceId) + "&"
if observatory:
if not isinstance(observatory, str):
logger.error(
"The observatory argument should be a str, ignoring it")
else:
base_url += "observatory=" + observatory + "&"
if instrument:
if not isinstance(instrument, str):
logger.error(
"The instrument argument should be a str, ignoring it")
else:
base_url += "instrument=" + instrument + "&"
if detector:
if not isinstance(detector, str):
logger.error("The detector argument should be a str, ignoring it")
else:
base_url += "detector=" + detector + "&"
if measurement:
if not isinstance(measurement, str):
logger.error(
"The measurement argument should be a str, ignoring it")
else:
base_url += "measurement=" + detector + "&"
req_url += base_url + "jpip=true"
return dispatch_http_get(req_url) | [
"def",
"getjp2image",
"(",
"date",
",",
"sourceId",
"=",
"None",
",",
"observatory",
"=",
"None",
",",
"instrument",
"=",
"None",
",",
"detector",
"=",
"None",
",",
"measurement",
"=",
"None",
")",
":",
"base_url",
"=",
"'http://helioviewer.org/api/v1/getJP2Image/?'",
"req_url",
"=",
"''",
"try",
":",
"validate_iso8601",
"(",
"date",
")",
"if",
"not",
"date",
"[",
"-",
"1",
":",
"]",
"==",
"'Z'",
":",
"date",
"+=",
"'Z'",
"base_url",
"+=",
"'date='",
"+",
"date",
"except",
":",
"raise",
"ValueError",
"(",
"\"Your date input is not in iso8601 format. ex: 2014-01-01T23:59:59\"",
")",
"if",
"sourceId",
":",
"if",
"not",
"isinstance",
"(",
"sourceId",
",",
"int",
")",
":",
"logger",
".",
"error",
"(",
"\"The sourceId argument should be an int, ignoring it\"",
")",
"else",
":",
"base_url",
"+=",
"\"sourceId=\"",
"+",
"str",
"(",
"sourceId",
")",
"+",
"\"&\"",
"if",
"observatory",
":",
"if",
"not",
"isinstance",
"(",
"observatory",
",",
"str",
")",
":",
"logger",
".",
"error",
"(",
"\"The observatory argument should be a str, ignoring it\"",
")",
"else",
":",
"base_url",
"+=",
"\"observatory=\"",
"+",
"observatory",
"+",
"\"&\"",
"if",
"instrument",
":",
"if",
"not",
"isinstance",
"(",
"instrument",
",",
"str",
")",
":",
"logger",
".",
"error",
"(",
"\"The instrument argument should be a str, ignoring it\"",
")",
"else",
":",
"base_url",
"+=",
"\"instrument=\"",
"+",
"instrument",
"+",
"\"&\"",
"if",
"detector",
":",
"if",
"not",
"isinstance",
"(",
"detector",
",",
"str",
")",
":",
"logger",
".",
"error",
"(",
"\"The detector argument should be a str, ignoring it\"",
")",
"else",
":",
"base_url",
"+=",
"\"detector=\"",
"+",
"detector",
"+",
"\"&\"",
"if",
"measurement",
":",
"if",
"not",
"isinstance",
"(",
"measurement",
",",
"str",
")",
":",
"logger",
".",
"error",
"(",
"\"The measurement argument should be a str, ignoring it\"",
")",
"else",
":",
"base_url",
"+=",
"\"measurement=\"",
"+",
"detector",
"+",
"\"&\"",
"req_url",
"+=",
"base_url",
"+",
"\"jpip=true\"",
"return",
"dispatch_http_get",
"(",
"req_url",
")"
] | Helioviewer.org and JHelioviewer operate off of JPEG2000 formatted image data generated from science-quality FITS files. Use the APIs below to interact directly with these intermediary JPEG2000 files.
Download a JP2 image for the specified datasource that is the closest match in time to the `date` requested.
Either `sourceId` must be specified, or the combination of `observatory`, `instrument`, `detector`, and `measurement`.
Request Parameters:
Parameter Required Type Example Description
date Required string 2014-01-01T23:59:59Z Desired date/time of the JP2 image. ISO 8601 combined UTC date and time UTC format.
sourceId Optional number 14 Unique image datasource identifier.
observatory Optional string SDO Observatory name.
instrument Optional string AIA Instrument name.
detector Optional string AIA Detector name.
measurement Optional string 335 Measurement name.
jpip Optional boolean false Optionally return a JPIP URI instead of the binary data of the image itself.
json Optional boolean false Optionally return a JSON object.
EXAMPLE: http://helioviewer.org/api/v1/getJP2Image/?date=2014-01-01T23:59:59Z&sourceId=14&jpip=true | [
"Helioviewer",
".",
"org",
"and",
"JHelioviewer",
"operate",
"off",
"of",
"JPEG2000",
"formatted",
"image",
"data",
"generated",
"from",
"science",
"-",
"quality",
"FITS",
"files",
".",
"Use",
"the",
"APIs",
"below",
"to",
"interact",
"directly",
"with",
"these",
"intermediary",
"JPEG2000",
"files",
".",
"Download",
"a",
"JP2",
"image",
"for",
"the",
"specified",
"datasource",
"that",
"is",
"the",
"closest",
"match",
"in",
"time",
"to",
"the",
"date",
"requested",
"."
] | 9f5e053f1d54995b833b83616f37c67178c3e840 | https://github.com/emirozer/bowshock/blob/9f5e053f1d54995b833b83616f37c67178c3e840/bowshock/helioviewer.py#L10-L85 | train | 448 |
hsolbrig/pyjsg | pyjsg/jsglib/loader.py | loads_loader | def loads_loader(load_module: types.ModuleType, pairs: Dict[str, str]) -> Optional[JSGValidateable]:
"""json loader objecthook
:param load_module: Module that contains the various types
:param pairs: key/value tuples (In our case, they are str/str)
:return:
"""
cntxt = load_module._CONTEXT
# If the type element is a member of the JSON, load it
possible_type = pairs[cntxt.TYPE] if cntxt.TYPE in pairs else None
target_class = getattr(load_module, possible_type, None) if isinstance(possible_type, str) else None
if target_class:
return target_class(**pairs)
# See whether there are any exception types that are valid for the incoming data
for type_exception in cntxt.TYPE_EXCEPTIONS:
if not hasattr(load_module, type_exception):
raise ValueError(UNKNOWN_TYPE_EXCEPTION.format(type_exception))
target_class = getattr(load_module, type_exception)
target_strict = target_class._strict
target_class._strict = False
try:
rval = target_class(**pairs)
finally:
target_class._strict = target_strict
if is_valid(rval):
return rval
# If there is not a type variable and nothing fits, just load up the first (and perhaps only) exception
# It will later fail any is_valid tests
if not cntxt.TYPE and cntxt.TYPE_EXCEPTIONS:
return getattr(load_module, cntxt.TYPE_EXCEPTIONS[0])(**pairs)
if cntxt.TYPE in pairs:
raise ValueError(f'Unknown reference type: "{cntxt.TYPE}": "{pairs[cntxt.TYPE]}"')
else:
raise ValueError(f'Missing "{cntxt.TYPE}" element') | python | def loads_loader(load_module: types.ModuleType, pairs: Dict[str, str]) -> Optional[JSGValidateable]:
"""json loader objecthook
:param load_module: Module that contains the various types
:param pairs: key/value tuples (In our case, they are str/str)
:return:
"""
cntxt = load_module._CONTEXT
# If the type element is a member of the JSON, load it
possible_type = pairs[cntxt.TYPE] if cntxt.TYPE in pairs else None
target_class = getattr(load_module, possible_type, None) if isinstance(possible_type, str) else None
if target_class:
return target_class(**pairs)
# See whether there are any exception types that are valid for the incoming data
for type_exception in cntxt.TYPE_EXCEPTIONS:
if not hasattr(load_module, type_exception):
raise ValueError(UNKNOWN_TYPE_EXCEPTION.format(type_exception))
target_class = getattr(load_module, type_exception)
target_strict = target_class._strict
target_class._strict = False
try:
rval = target_class(**pairs)
finally:
target_class._strict = target_strict
if is_valid(rval):
return rval
# If there is not a type variable and nothing fits, just load up the first (and perhaps only) exception
# It will later fail any is_valid tests
if not cntxt.TYPE and cntxt.TYPE_EXCEPTIONS:
return getattr(load_module, cntxt.TYPE_EXCEPTIONS[0])(**pairs)
if cntxt.TYPE in pairs:
raise ValueError(f'Unknown reference type: "{cntxt.TYPE}": "{pairs[cntxt.TYPE]}"')
else:
raise ValueError(f'Missing "{cntxt.TYPE}" element') | [
"def",
"loads_loader",
"(",
"load_module",
":",
"types",
".",
"ModuleType",
",",
"pairs",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
")",
"->",
"Optional",
"[",
"JSGValidateable",
"]",
":",
"cntxt",
"=",
"load_module",
".",
"_CONTEXT",
"# If the type element is a member of the JSON, load it",
"possible_type",
"=",
"pairs",
"[",
"cntxt",
".",
"TYPE",
"]",
"if",
"cntxt",
".",
"TYPE",
"in",
"pairs",
"else",
"None",
"target_class",
"=",
"getattr",
"(",
"load_module",
",",
"possible_type",
",",
"None",
")",
"if",
"isinstance",
"(",
"possible_type",
",",
"str",
")",
"else",
"None",
"if",
"target_class",
":",
"return",
"target_class",
"(",
"*",
"*",
"pairs",
")",
"# See whether there are any exception types that are valid for the incoming data",
"for",
"type_exception",
"in",
"cntxt",
".",
"TYPE_EXCEPTIONS",
":",
"if",
"not",
"hasattr",
"(",
"load_module",
",",
"type_exception",
")",
":",
"raise",
"ValueError",
"(",
"UNKNOWN_TYPE_EXCEPTION",
".",
"format",
"(",
"type_exception",
")",
")",
"target_class",
"=",
"getattr",
"(",
"load_module",
",",
"type_exception",
")",
"target_strict",
"=",
"target_class",
".",
"_strict",
"target_class",
".",
"_strict",
"=",
"False",
"try",
":",
"rval",
"=",
"target_class",
"(",
"*",
"*",
"pairs",
")",
"finally",
":",
"target_class",
".",
"_strict",
"=",
"target_strict",
"if",
"is_valid",
"(",
"rval",
")",
":",
"return",
"rval",
"# If there is not a type variable and nothing fits, just load up the first (and perhaps only) exception",
"# It will later fail any is_valid tests",
"if",
"not",
"cntxt",
".",
"TYPE",
"and",
"cntxt",
".",
"TYPE_EXCEPTIONS",
":",
"return",
"getattr",
"(",
"load_module",
",",
"cntxt",
".",
"TYPE_EXCEPTIONS",
"[",
"0",
"]",
")",
"(",
"*",
"*",
"pairs",
")",
"if",
"cntxt",
".",
"TYPE",
"in",
"pairs",
":",
"raise",
"ValueError",
"(",
"f'Unknown reference type: \"{cntxt.TYPE}\": \"{pairs[cntxt.TYPE]}\"'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"f'Missing \"{cntxt.TYPE}\" element'",
")"
] | json loader objecthook
:param load_module: Module that contains the various types
:param pairs: key/value tuples (In our case, they are str/str)
:return: | [
"json",
"loader",
"objecthook"
] | 9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7 | https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/jsglib/loader.py#L17-L54 | train | 449 |
hsolbrig/pyjsg | pyjsg/jsglib/loader.py | loads | def loads(s: str, load_module: types.ModuleType, **kwargs):
""" Convert a JSON string into a JSGObject
:param s: string representation of JSON document
:param load_module: module that contains declarations for types
:param kwargs: arguments see: json.load for details
:return: JSGObject representing the json string
"""
return json.loads(s, object_hook=lambda pairs: loads_loader(load_module, pairs), **kwargs) | python | def loads(s: str, load_module: types.ModuleType, **kwargs):
""" Convert a JSON string into a JSGObject
:param s: string representation of JSON document
:param load_module: module that contains declarations for types
:param kwargs: arguments see: json.load for details
:return: JSGObject representing the json string
"""
return json.loads(s, object_hook=lambda pairs: loads_loader(load_module, pairs), **kwargs) | [
"def",
"loads",
"(",
"s",
":",
"str",
",",
"load_module",
":",
"types",
".",
"ModuleType",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"json",
".",
"loads",
"(",
"s",
",",
"object_hook",
"=",
"lambda",
"pairs",
":",
"loads_loader",
"(",
"load_module",
",",
"pairs",
")",
",",
"*",
"*",
"kwargs",
")"
] | Convert a JSON string into a JSGObject
:param s: string representation of JSON document
:param load_module: module that contains declarations for types
:param kwargs: arguments see: json.load for details
:return: JSGObject representing the json string | [
"Convert",
"a",
"JSON",
"string",
"into",
"a",
"JSGObject"
] | 9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7 | https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/jsglib/loader.py#L57-L65 | train | 450 |
hsolbrig/pyjsg | pyjsg/jsglib/loader.py | load | def load(fp: Union[TextIO, str], load_module: types.ModuleType, **kwargs):
""" Convert a file name or file-like object containing stringified JSON into a JSGObject
:param fp: file-like object to deserialize
:param load_module: module that contains declarations for types
:param kwargs: arguments see: json.load for details
:return: JSGObject representing the json string
"""
if isinstance(fp, str):
with open(fp) as f:
return loads(f.read(), load_module, **kwargs)
else:
return loads(fp.read(), load_module, **kwargs) | python | def load(fp: Union[TextIO, str], load_module: types.ModuleType, **kwargs):
""" Convert a file name or file-like object containing stringified JSON into a JSGObject
:param fp: file-like object to deserialize
:param load_module: module that contains declarations for types
:param kwargs: arguments see: json.load for details
:return: JSGObject representing the json string
"""
if isinstance(fp, str):
with open(fp) as f:
return loads(f.read(), load_module, **kwargs)
else:
return loads(fp.read(), load_module, **kwargs) | [
"def",
"load",
"(",
"fp",
":",
"Union",
"[",
"TextIO",
",",
"str",
"]",
",",
"load_module",
":",
"types",
".",
"ModuleType",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"fp",
",",
"str",
")",
":",
"with",
"open",
"(",
"fp",
")",
"as",
"f",
":",
"return",
"loads",
"(",
"f",
".",
"read",
"(",
")",
",",
"load_module",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"loads",
"(",
"fp",
".",
"read",
"(",
")",
",",
"load_module",
",",
"*",
"*",
"kwargs",
")"
] | Convert a file name or file-like object containing stringified JSON into a JSGObject
:param fp: file-like object to deserialize
:param load_module: module that contains declarations for types
:param kwargs: arguments see: json.load for details
:return: JSGObject representing the json string | [
"Convert",
"a",
"file",
"name",
"or",
"file",
"-",
"like",
"object",
"containing",
"stringified",
"JSON",
"into",
"a",
"JSGObject"
] | 9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7 | https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/jsglib/loader.py#L68-L80 | train | 451 |
hsolbrig/pyjsg | pyjsg/jsglib/loader.py | isinstance_ | def isinstance_(x, A_tuple):
""" native isinstance_ with the test for typing.Union overridden """
if is_union(A_tuple):
return any(isinstance_(x, t) for t in A_tuple.__args__)
elif getattr(A_tuple, '__origin__', None) is not None:
return isinstance(x, A_tuple.__origin__)
else:
return isinstance(x, A_tuple) | python | def isinstance_(x, A_tuple):
""" native isinstance_ with the test for typing.Union overridden """
if is_union(A_tuple):
return any(isinstance_(x, t) for t in A_tuple.__args__)
elif getattr(A_tuple, '__origin__', None) is not None:
return isinstance(x, A_tuple.__origin__)
else:
return isinstance(x, A_tuple) | [
"def",
"isinstance_",
"(",
"x",
",",
"A_tuple",
")",
":",
"if",
"is_union",
"(",
"A_tuple",
")",
":",
"return",
"any",
"(",
"isinstance_",
"(",
"x",
",",
"t",
")",
"for",
"t",
"in",
"A_tuple",
".",
"__args__",
")",
"elif",
"getattr",
"(",
"A_tuple",
",",
"'__origin__'",
",",
"None",
")",
"is",
"not",
"None",
":",
"return",
"isinstance",
"(",
"x",
",",
"A_tuple",
".",
"__origin__",
")",
"else",
":",
"return",
"isinstance",
"(",
"x",
",",
"A_tuple",
")"
] | native isinstance_ with the test for typing.Union overridden | [
"native",
"isinstance_",
"with",
"the",
"test",
"for",
"typing",
".",
"Union",
"overridden"
] | 9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7 | https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/jsglib/loader.py#L83-L90 | train | 452 |
hsolbrig/pyjsg | pyjsg/jsglib/loader.py | is_valid | def is_valid(obj: JSGValidateable, log: Optional[Union[TextIO, Logger]] = None) -> bool:
""" Determine whether obj is valid
:param obj: Object to validate
:param log: Logger to record validation failures. If absent, no information is recorded
"""
return obj._is_valid(log) | python | def is_valid(obj: JSGValidateable, log: Optional[Union[TextIO, Logger]] = None) -> bool:
""" Determine whether obj is valid
:param obj: Object to validate
:param log: Logger to record validation failures. If absent, no information is recorded
"""
return obj._is_valid(log) | [
"def",
"is_valid",
"(",
"obj",
":",
"JSGValidateable",
",",
"log",
":",
"Optional",
"[",
"Union",
"[",
"TextIO",
",",
"Logger",
"]",
"]",
"=",
"None",
")",
"->",
"bool",
":",
"return",
"obj",
".",
"_is_valid",
"(",
"log",
")"
] | Determine whether obj is valid
:param obj: Object to validate
:param log: Logger to record validation failures. If absent, no information is recorded | [
"Determine",
"whether",
"obj",
"is",
"valid"
] | 9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7 | https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/jsglib/loader.py#L93-L99 | train | 453 |
cydrobolt/pifx | pifx/util.py | arg_tup_to_dict | def arg_tup_to_dict(argument_tuples):
"""Given a set of argument tuples, set their value in a data dictionary if not blank"""
data = dict()
for arg_name, arg_val in argument_tuples:
if arg_val is not None:
if arg_val is True:
arg_val = 'true'
elif arg_val is False:
arg_val = 'false'
data[arg_name] = arg_val
return data | python | def arg_tup_to_dict(argument_tuples):
"""Given a set of argument tuples, set their value in a data dictionary if not blank"""
data = dict()
for arg_name, arg_val in argument_tuples:
if arg_val is not None:
if arg_val is True:
arg_val = 'true'
elif arg_val is False:
arg_val = 'false'
data[arg_name] = arg_val
return data | [
"def",
"arg_tup_to_dict",
"(",
"argument_tuples",
")",
":",
"data",
"=",
"dict",
"(",
")",
"for",
"arg_name",
",",
"arg_val",
"in",
"argument_tuples",
":",
"if",
"arg_val",
"is",
"not",
"None",
":",
"if",
"arg_val",
"is",
"True",
":",
"arg_val",
"=",
"'true'",
"elif",
"arg_val",
"is",
"False",
":",
"arg_val",
"=",
"'false'",
"data",
"[",
"arg_name",
"]",
"=",
"arg_val",
"return",
"data"
] | Given a set of argument tuples, set their value in a data dictionary if not blank | [
"Given",
"a",
"set",
"of",
"argument",
"tuples",
"set",
"their",
"value",
"in",
"a",
"data",
"dictionary",
"if",
"not",
"blank"
] | c9de9c2695c3e6e72de4aa0de47b78fc13c457c3 | https://github.com/cydrobolt/pifx/blob/c9de9c2695c3e6e72de4aa0de47b78fc13c457c3/pifx/util.py#L32-L43 | train | 454 |
cydrobolt/pifx | pifx/util.py | handle_error | def handle_error(response):
"""Raise appropriate exceptions if necessary."""
status_code = response.status_code
if status_code not in A_OK_HTTP_CODES:
error_explanation = A_ERROR_HTTP_CODES.get(status_code)
raise_error = "{}: {}".format(status_code, error_explanation)
raise Exception(raise_error)
else:
return True | python | def handle_error(response):
"""Raise appropriate exceptions if necessary."""
status_code = response.status_code
if status_code not in A_OK_HTTP_CODES:
error_explanation = A_ERROR_HTTP_CODES.get(status_code)
raise_error = "{}: {}".format(status_code, error_explanation)
raise Exception(raise_error)
else:
return True | [
"def",
"handle_error",
"(",
"response",
")",
":",
"status_code",
"=",
"response",
".",
"status_code",
"if",
"status_code",
"not",
"in",
"A_OK_HTTP_CODES",
":",
"error_explanation",
"=",
"A_ERROR_HTTP_CODES",
".",
"get",
"(",
"status_code",
")",
"raise_error",
"=",
"\"{}: {}\"",
".",
"format",
"(",
"status_code",
",",
"error_explanation",
")",
"raise",
"Exception",
"(",
"raise_error",
")",
"else",
":",
"return",
"True"
] | Raise appropriate exceptions if necessary. | [
"Raise",
"appropriate",
"exceptions",
"if",
"necessary",
"."
] | c9de9c2695c3e6e72de4aa0de47b78fc13c457c3 | https://github.com/cydrobolt/pifx/blob/c9de9c2695c3e6e72de4aa0de47b78fc13c457c3/pifx/util.py#L54-L63 | train | 455 |
PSPC-SPAC-buyandsell/von_agent | von_agent/agent/base.py | _BaseAgent.open | async def open(self) -> '_BaseAgent':
"""
Context manager entry; open wallet.
For use when keeping agent open across multiple calls.
:return: current object
"""
LOGGER.debug('_BaseAgent.open >>>')
# Do not open pool independently: let relying party decide when to go on-line and off-line
await self.wallet.open()
LOGGER.debug('_BaseAgent.open <<<')
return self | python | async def open(self) -> '_BaseAgent':
"""
Context manager entry; open wallet.
For use when keeping agent open across multiple calls.
:return: current object
"""
LOGGER.debug('_BaseAgent.open >>>')
# Do not open pool independently: let relying party decide when to go on-line and off-line
await self.wallet.open()
LOGGER.debug('_BaseAgent.open <<<')
return self | [
"async",
"def",
"open",
"(",
"self",
")",
"->",
"'_BaseAgent'",
":",
"LOGGER",
".",
"debug",
"(",
"'_BaseAgent.open >>>'",
")",
"# Do not open pool independently: let relying party decide when to go on-line and off-line",
"await",
"self",
".",
"wallet",
".",
"open",
"(",
")",
"LOGGER",
".",
"debug",
"(",
"'_BaseAgent.open <<<'",
")",
"return",
"self"
] | Context manager entry; open wallet.
For use when keeping agent open across multiple calls.
:return: current object | [
"Context",
"manager",
"entry",
";",
"open",
"wallet",
".",
"For",
"use",
"when",
"keeping",
"agent",
"open",
"across",
"multiple",
"calls",
"."
] | 0b1c17cca3bd178b6e6974af84dbac1dfce5cf45 | https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/base.py#L129-L143 | train | 456 |
PSPC-SPAC-buyandsell/von_agent | von_agent/agent/base.py | _BaseAgent._get_rev_reg_def | async def _get_rev_reg_def(self, rr_id: str) -> str:
"""
Get revocation registry definition from ledger by its identifier. Raise AbsentRevReg
for no such revocation registry, logging any error condition and raising BadLedgerTxn
on bad request.
Retrieve the revocation registry definition from the agent's revocation cache if it has it;
cache it en passant if it does not (and such a revocation registry definition exists on the ledger).
:param rr_id: (revocation registry) identifier string, of the format
'<issuer-did>:4:<issuer-did>:3:CL:<schema-seq-no>:<tag>:CL_ACCUM:<tag>'
:return: revocation registry definition json as retrieved from ledger
"""
LOGGER.debug('_BaseAgent._get_rev_reg_def >>> rr_id: %s', rr_id)
rv_json = json.dumps({})
with REVO_CACHE.lock:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
rr_def = revo_cache_entry.rev_reg_def if revo_cache_entry else None
if rr_def:
LOGGER.info('_BaseAgent._get_rev_reg_def: rev reg def for %s from cache', rr_id)
rv_json = json.dumps(rr_def)
else:
get_rrd_req_json = await ledger.build_get_revoc_reg_def_request(self.did, rr_id)
resp_json = await self._submit(get_rrd_req_json)
try:
(_, rv_json) = await ledger.parse_get_revoc_reg_def_response(resp_json)
rr_def = json.loads(rv_json)
except IndyError: # ledger replied, but there is no such rev reg
LOGGER.debug('_BaseAgent._get_rev_reg_def: <!< no rev reg exists on %s', rr_id)
raise AbsentRevReg('No rev reg exists on {}'.format(rr_id))
if revo_cache_entry is None:
REVO_CACHE[rr_id] = RevoCacheEntry(rr_def, None)
else:
REVO_CACHE[rr_id].rev_reg_def = rr_def
LOGGER.debug('_BaseAgent._get_rev_reg_def <<< %s', rv_json)
return rv_json | python | async def _get_rev_reg_def(self, rr_id: str) -> str:
"""
Get revocation registry definition from ledger by its identifier. Raise AbsentRevReg
for no such revocation registry, logging any error condition and raising BadLedgerTxn
on bad request.
Retrieve the revocation registry definition from the agent's revocation cache if it has it;
cache it en passant if it does not (and such a revocation registry definition exists on the ledger).
:param rr_id: (revocation registry) identifier string, of the format
'<issuer-did>:4:<issuer-did>:3:CL:<schema-seq-no>:<tag>:CL_ACCUM:<tag>'
:return: revocation registry definition json as retrieved from ledger
"""
LOGGER.debug('_BaseAgent._get_rev_reg_def >>> rr_id: %s', rr_id)
rv_json = json.dumps({})
with REVO_CACHE.lock:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
rr_def = revo_cache_entry.rev_reg_def if revo_cache_entry else None
if rr_def:
LOGGER.info('_BaseAgent._get_rev_reg_def: rev reg def for %s from cache', rr_id)
rv_json = json.dumps(rr_def)
else:
get_rrd_req_json = await ledger.build_get_revoc_reg_def_request(self.did, rr_id)
resp_json = await self._submit(get_rrd_req_json)
try:
(_, rv_json) = await ledger.parse_get_revoc_reg_def_response(resp_json)
rr_def = json.loads(rv_json)
except IndyError: # ledger replied, but there is no such rev reg
LOGGER.debug('_BaseAgent._get_rev_reg_def: <!< no rev reg exists on %s', rr_id)
raise AbsentRevReg('No rev reg exists on {}'.format(rr_id))
if revo_cache_entry is None:
REVO_CACHE[rr_id] = RevoCacheEntry(rr_def, None)
else:
REVO_CACHE[rr_id].rev_reg_def = rr_def
LOGGER.debug('_BaseAgent._get_rev_reg_def <<< %s', rv_json)
return rv_json | [
"async",
"def",
"_get_rev_reg_def",
"(",
"self",
",",
"rr_id",
":",
"str",
")",
"->",
"str",
":",
"LOGGER",
".",
"debug",
"(",
"'_BaseAgent._get_rev_reg_def >>> rr_id: %s'",
",",
"rr_id",
")",
"rv_json",
"=",
"json",
".",
"dumps",
"(",
"{",
"}",
")",
"with",
"REVO_CACHE",
".",
"lock",
":",
"revo_cache_entry",
"=",
"REVO_CACHE",
".",
"get",
"(",
"rr_id",
",",
"None",
")",
"rr_def",
"=",
"revo_cache_entry",
".",
"rev_reg_def",
"if",
"revo_cache_entry",
"else",
"None",
"if",
"rr_def",
":",
"LOGGER",
".",
"info",
"(",
"'_BaseAgent._get_rev_reg_def: rev reg def for %s from cache'",
",",
"rr_id",
")",
"rv_json",
"=",
"json",
".",
"dumps",
"(",
"rr_def",
")",
"else",
":",
"get_rrd_req_json",
"=",
"await",
"ledger",
".",
"build_get_revoc_reg_def_request",
"(",
"self",
".",
"did",
",",
"rr_id",
")",
"resp_json",
"=",
"await",
"self",
".",
"_submit",
"(",
"get_rrd_req_json",
")",
"try",
":",
"(",
"_",
",",
"rv_json",
")",
"=",
"await",
"ledger",
".",
"parse_get_revoc_reg_def_response",
"(",
"resp_json",
")",
"rr_def",
"=",
"json",
".",
"loads",
"(",
"rv_json",
")",
"except",
"IndyError",
":",
"# ledger replied, but there is no such rev reg",
"LOGGER",
".",
"debug",
"(",
"'_BaseAgent._get_rev_reg_def: <!< no rev reg exists on %s'",
",",
"rr_id",
")",
"raise",
"AbsentRevReg",
"(",
"'No rev reg exists on {}'",
".",
"format",
"(",
"rr_id",
")",
")",
"if",
"revo_cache_entry",
"is",
"None",
":",
"REVO_CACHE",
"[",
"rr_id",
"]",
"=",
"RevoCacheEntry",
"(",
"rr_def",
",",
"None",
")",
"else",
":",
"REVO_CACHE",
"[",
"rr_id",
"]",
".",
"rev_reg_def",
"=",
"rr_def",
"LOGGER",
".",
"debug",
"(",
"'_BaseAgent._get_rev_reg_def <<< %s'",
",",
"rv_json",
")",
"return",
"rv_json"
] | Get revocation registry definition from ledger by its identifier. Raise AbsentRevReg
for no such revocation registry, logging any error condition and raising BadLedgerTxn
on bad request.
Retrieve the revocation registry definition from the agent's revocation cache if it has it;
cache it en passant if it does not (and such a revocation registry definition exists on the ledger).
:param rr_id: (revocation registry) identifier string, of the format
'<issuer-did>:4:<issuer-did>:3:CL:<schema-seq-no>:<tag>:CL_ACCUM:<tag>'
:return: revocation registry definition json as retrieved from ledger | [
"Get",
"revocation",
"registry",
"definition",
"from",
"ledger",
"by",
"its",
"identifier",
".",
"Raise",
"AbsentRevReg",
"for",
"no",
"such",
"revocation",
"registry",
"logging",
"any",
"error",
"condition",
"and",
"raising",
"BadLedgerTxn",
"on",
"bad",
"request",
"."
] | 0b1c17cca3bd178b6e6974af84dbac1dfce5cf45 | https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/base.py#L289-L329 | train | 457 |
PSPC-SPAC-buyandsell/von_agent | von_agent/agent/base.py | _BaseAgent.get_cred_def | async def get_cred_def(self, cd_id: str) -> str:
"""
Get credential definition from ledger by its identifier.
Raise AbsentCredDef for no such credential definition, logging any error condition and raising
BadLedgerTxn on bad request. Raise ClosedPool if cred def not in cache and pool is closed.
Retrieve the credential definition from the agent's credential definition cache if it has it; cache it
en passant if it does not (and if there is a corresponding credential definition on the ledger).
:param cd_id: (credential definition) identifier string ('<issuer-did>:3:CL:<schema-seq-no>:<tag>')
:return: credential definition json as retrieved from ledger, empty production for no such cred def
"""
LOGGER.debug('_BaseAgent.get_cred_def >>> cd_id: %s', cd_id)
rv_json = json.dumps({})
with CRED_DEF_CACHE.lock:
if cd_id in CRED_DEF_CACHE:
LOGGER.info('_BaseAgent.get_cred_def: got cred def for %s from cache', cd_id)
rv_json = json.dumps(CRED_DEF_CACHE[cd_id])
LOGGER.debug('_BaseAgent.get_cred_def <<< %s', rv_json)
return rv_json
req_json = await ledger.build_get_cred_def_request(self.did, cd_id)
resp_json = await self._submit(req_json)
resp = json.loads(resp_json)
if not ('result' in resp and resp['result'].get('data', None)):
LOGGER.debug('_BaseAgent.get_cred_def: <!< no cred def exists on %s', cd_id)
raise AbsentCredDef('No cred def exists on {}'.format(cd_id))
try:
(_, rv_json) = await ledger.parse_get_cred_def_response(resp_json)
except IndyError: # ledger replied, but there is no such cred def
LOGGER.debug('_BaseAgent.get_cred_def: <!< no cred def exists on %s', cd_id)
raise AbsentCredDef('No cred def exists on {}'.format(cd_id))
CRED_DEF_CACHE[cd_id] = json.loads(rv_json)
LOGGER.info('_BaseAgent.get_cred_def: got cred def %s from ledger', cd_id)
LOGGER.debug('_BaseAgent.get_cred_def <<< %s', rv_json)
return rv_json | python | async def get_cred_def(self, cd_id: str) -> str:
"""
Get credential definition from ledger by its identifier.
Raise AbsentCredDef for no such credential definition, logging any error condition and raising
BadLedgerTxn on bad request. Raise ClosedPool if cred def not in cache and pool is closed.
Retrieve the credential definition from the agent's credential definition cache if it has it; cache it
en passant if it does not (and if there is a corresponding credential definition on the ledger).
:param cd_id: (credential definition) identifier string ('<issuer-did>:3:CL:<schema-seq-no>:<tag>')
:return: credential definition json as retrieved from ledger, empty production for no such cred def
"""
LOGGER.debug('_BaseAgent.get_cred_def >>> cd_id: %s', cd_id)
rv_json = json.dumps({})
with CRED_DEF_CACHE.lock:
if cd_id in CRED_DEF_CACHE:
LOGGER.info('_BaseAgent.get_cred_def: got cred def for %s from cache', cd_id)
rv_json = json.dumps(CRED_DEF_CACHE[cd_id])
LOGGER.debug('_BaseAgent.get_cred_def <<< %s', rv_json)
return rv_json
req_json = await ledger.build_get_cred_def_request(self.did, cd_id)
resp_json = await self._submit(req_json)
resp = json.loads(resp_json)
if not ('result' in resp and resp['result'].get('data', None)):
LOGGER.debug('_BaseAgent.get_cred_def: <!< no cred def exists on %s', cd_id)
raise AbsentCredDef('No cred def exists on {}'.format(cd_id))
try:
(_, rv_json) = await ledger.parse_get_cred_def_response(resp_json)
except IndyError: # ledger replied, but there is no such cred def
LOGGER.debug('_BaseAgent.get_cred_def: <!< no cred def exists on %s', cd_id)
raise AbsentCredDef('No cred def exists on {}'.format(cd_id))
CRED_DEF_CACHE[cd_id] = json.loads(rv_json)
LOGGER.info('_BaseAgent.get_cred_def: got cred def %s from ledger', cd_id)
LOGGER.debug('_BaseAgent.get_cred_def <<< %s', rv_json)
return rv_json | [
"async",
"def",
"get_cred_def",
"(",
"self",
",",
"cd_id",
":",
"str",
")",
"->",
"str",
":",
"LOGGER",
".",
"debug",
"(",
"'_BaseAgent.get_cred_def >>> cd_id: %s'",
",",
"cd_id",
")",
"rv_json",
"=",
"json",
".",
"dumps",
"(",
"{",
"}",
")",
"with",
"CRED_DEF_CACHE",
".",
"lock",
":",
"if",
"cd_id",
"in",
"CRED_DEF_CACHE",
":",
"LOGGER",
".",
"info",
"(",
"'_BaseAgent.get_cred_def: got cred def for %s from cache'",
",",
"cd_id",
")",
"rv_json",
"=",
"json",
".",
"dumps",
"(",
"CRED_DEF_CACHE",
"[",
"cd_id",
"]",
")",
"LOGGER",
".",
"debug",
"(",
"'_BaseAgent.get_cred_def <<< %s'",
",",
"rv_json",
")",
"return",
"rv_json",
"req_json",
"=",
"await",
"ledger",
".",
"build_get_cred_def_request",
"(",
"self",
".",
"did",
",",
"cd_id",
")",
"resp_json",
"=",
"await",
"self",
".",
"_submit",
"(",
"req_json",
")",
"resp",
"=",
"json",
".",
"loads",
"(",
"resp_json",
")",
"if",
"not",
"(",
"'result'",
"in",
"resp",
"and",
"resp",
"[",
"'result'",
"]",
".",
"get",
"(",
"'data'",
",",
"None",
")",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'_BaseAgent.get_cred_def: <!< no cred def exists on %s'",
",",
"cd_id",
")",
"raise",
"AbsentCredDef",
"(",
"'No cred def exists on {}'",
".",
"format",
"(",
"cd_id",
")",
")",
"try",
":",
"(",
"_",
",",
"rv_json",
")",
"=",
"await",
"ledger",
".",
"parse_get_cred_def_response",
"(",
"resp_json",
")",
"except",
"IndyError",
":",
"# ledger replied, but there is no such cred def",
"LOGGER",
".",
"debug",
"(",
"'_BaseAgent.get_cred_def: <!< no cred def exists on %s'",
",",
"cd_id",
")",
"raise",
"AbsentCredDef",
"(",
"'No cred def exists on {}'",
".",
"format",
"(",
"cd_id",
")",
")",
"CRED_DEF_CACHE",
"[",
"cd_id",
"]",
"=",
"json",
".",
"loads",
"(",
"rv_json",
")",
"LOGGER",
".",
"info",
"(",
"'_BaseAgent.get_cred_def: got cred def %s from ledger'",
",",
"cd_id",
")",
"LOGGER",
".",
"debug",
"(",
"'_BaseAgent.get_cred_def <<< %s'",
",",
"rv_json",
")",
"return",
"rv_json"
] | Get credential definition from ledger by its identifier.
Raise AbsentCredDef for no such credential definition, logging any error condition and raising
BadLedgerTxn on bad request. Raise ClosedPool if cred def not in cache and pool is closed.
Retrieve the credential definition from the agent's credential definition cache if it has it; cache it
en passant if it does not (and if there is a corresponding credential definition on the ledger).
:param cd_id: (credential definition) identifier string ('<issuer-did>:3:CL:<schema-seq-no>:<tag>')
:return: credential definition json as retrieved from ledger, empty production for no such cred def | [
"Get",
"credential",
"definition",
"from",
"ledger",
"by",
"its",
"identifier",
"."
] | 0b1c17cca3bd178b6e6974af84dbac1dfce5cf45 | https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/base.py#L331-L371 | train | 458 |
hsolbrig/pyjsg | pyjsg/jsglib/typing_patch_37.py | is_union | def is_union(etype) -> bool:
""" Determine whether etype is a Union """
return getattr(etype, '__origin__', None) is not None and \
getattr(etype.__origin__, '_name', None) and\
etype.__origin__._name == 'Union' | python | def is_union(etype) -> bool:
""" Determine whether etype is a Union """
return getattr(etype, '__origin__', None) is not None and \
getattr(etype.__origin__, '_name', None) and\
etype.__origin__._name == 'Union' | [
"def",
"is_union",
"(",
"etype",
")",
"->",
"bool",
":",
"return",
"getattr",
"(",
"etype",
",",
"'__origin__'",
",",
"None",
")",
"is",
"not",
"None",
"and",
"getattr",
"(",
"etype",
".",
"__origin__",
",",
"'_name'",
",",
"None",
")",
"and",
"etype",
".",
"__origin__",
".",
"_name",
"==",
"'Union'"
] | Determine whether etype is a Union | [
"Determine",
"whether",
"etype",
"is",
"a",
"Union"
] | 9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7 | https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/jsglib/typing_patch_37.py#L13-L17 | train | 459 |
CybOXProject/mixbox | mixbox/fields.py | unset | def unset(entity, *types):
"""Unset the TypedFields on the input `entity`.
Args:
entity: A mixbox.Entity object.
*types: A variable-length list of TypedField subclasses. If not
provided, defaults to TypedField.
"""
if not types:
types = (TypedField,)
fields = list(entity._fields.keys())
remove = (x for x in fields if isinstance(x, types))
for field in remove:
del entity._fields[field] | python | def unset(entity, *types):
"""Unset the TypedFields on the input `entity`.
Args:
entity: A mixbox.Entity object.
*types: A variable-length list of TypedField subclasses. If not
provided, defaults to TypedField.
"""
if not types:
types = (TypedField,)
fields = list(entity._fields.keys())
remove = (x for x in fields if isinstance(x, types))
for field in remove:
del entity._fields[field] | [
"def",
"unset",
"(",
"entity",
",",
"*",
"types",
")",
":",
"if",
"not",
"types",
":",
"types",
"=",
"(",
"TypedField",
",",
")",
"fields",
"=",
"list",
"(",
"entity",
".",
"_fields",
".",
"keys",
"(",
")",
")",
"remove",
"=",
"(",
"x",
"for",
"x",
"in",
"fields",
"if",
"isinstance",
"(",
"x",
",",
"types",
")",
")",
"for",
"field",
"in",
"remove",
":",
"del",
"entity",
".",
"_fields",
"[",
"field",
"]"
] | Unset the TypedFields on the input `entity`.
Args:
entity: A mixbox.Entity object.
*types: A variable-length list of TypedField subclasses. If not
provided, defaults to TypedField. | [
"Unset",
"the",
"TypedFields",
"on",
"the",
"input",
"entity",
"."
] | 9097dae7a433f5b98c18171c4a5598f69a7d30af | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/fields.py#L17-L32 | train | 460 |
CybOXProject/mixbox | mixbox/fields.py | _matches | def _matches(field, params):
"""Return True if the input TypedField `field` contains instance attributes
that match the input parameters.
Args:
field: A TypedField instance.
params: A dictionary of TypedField instance attribute-to-value mappings.
Returns:
True if the input TypedField matches the input parameters.
"""
fieldattrs = six.iteritems(params)
return all(getattr(field, attr) == val for attr, val in fieldattrs) | python | def _matches(field, params):
"""Return True if the input TypedField `field` contains instance attributes
that match the input parameters.
Args:
field: A TypedField instance.
params: A dictionary of TypedField instance attribute-to-value mappings.
Returns:
True if the input TypedField matches the input parameters.
"""
fieldattrs = six.iteritems(params)
return all(getattr(field, attr) == val for attr, val in fieldattrs) | [
"def",
"_matches",
"(",
"field",
",",
"params",
")",
":",
"fieldattrs",
"=",
"six",
".",
"iteritems",
"(",
"params",
")",
"return",
"all",
"(",
"getattr",
"(",
"field",
",",
"attr",
")",
"==",
"val",
"for",
"attr",
",",
"val",
"in",
"fieldattrs",
")"
] | Return True if the input TypedField `field` contains instance attributes
that match the input parameters.
Args:
field: A TypedField instance.
params: A dictionary of TypedField instance attribute-to-value mappings.
Returns:
True if the input TypedField matches the input parameters. | [
"Return",
"True",
"if",
"the",
"input",
"TypedField",
"field",
"contains",
"instance",
"attributes",
"that",
"match",
"the",
"input",
"parameters",
"."
] | 9097dae7a433f5b98c18171c4a5598f69a7d30af | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/fields.py#L35-L47 | train | 461 |
CybOXProject/mixbox | mixbox/fields.py | iterfields | def iterfields(klass):
"""Iterate over the input class members and yield its TypedFields.
Args:
klass: A class (usually an Entity subclass).
Yields:
(class attribute name, TypedField instance) tuples.
"""
is_field = lambda x: isinstance(x, TypedField)
for name, field in inspect.getmembers(klass, predicate=is_field):
yield name, field | python | def iterfields(klass):
"""Iterate over the input class members and yield its TypedFields.
Args:
klass: A class (usually an Entity subclass).
Yields:
(class attribute name, TypedField instance) tuples.
"""
is_field = lambda x: isinstance(x, TypedField)
for name, field in inspect.getmembers(klass, predicate=is_field):
yield name, field | [
"def",
"iterfields",
"(",
"klass",
")",
":",
"is_field",
"=",
"lambda",
"x",
":",
"isinstance",
"(",
"x",
",",
"TypedField",
")",
"for",
"name",
",",
"field",
"in",
"inspect",
".",
"getmembers",
"(",
"klass",
",",
"predicate",
"=",
"is_field",
")",
":",
"yield",
"name",
",",
"field"
] | Iterate over the input class members and yield its TypedFields.
Args:
klass: A class (usually an Entity subclass).
Yields:
(class attribute name, TypedField instance) tuples. | [
"Iterate",
"over",
"the",
"input",
"class",
"members",
"and",
"yield",
"its",
"TypedFields",
"."
] | 9097dae7a433f5b98c18171c4a5598f69a7d30af | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/fields.py#L50-L62 | train | 462 |
CybOXProject/mixbox | mixbox/fields.py | TypedField._clean | def _clean(self, value):
"""Validate and clean a candidate value for this field."""
if value is None:
return None
elif self.type_ is None:
return value
elif self.check_type(value):
return value
elif self.is_type_castable: # noqa
return self.type_(value)
error_fmt = "%s must be a %s, not a %s"
error = error_fmt % (self.name, self.type_, type(value))
raise TypeError(error) | python | def _clean(self, value):
"""Validate and clean a candidate value for this field."""
if value is None:
return None
elif self.type_ is None:
return value
elif self.check_type(value):
return value
elif self.is_type_castable: # noqa
return self.type_(value)
error_fmt = "%s must be a %s, not a %s"
error = error_fmt % (self.name, self.type_, type(value))
raise TypeError(error) | [
"def",
"_clean",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"None",
"elif",
"self",
".",
"type_",
"is",
"None",
":",
"return",
"value",
"elif",
"self",
".",
"check_type",
"(",
"value",
")",
":",
"return",
"value",
"elif",
"self",
".",
"is_type_castable",
":",
"# noqa",
"return",
"self",
".",
"type_",
"(",
"value",
")",
"error_fmt",
"=",
"\"%s must be a %s, not a %s\"",
"error",
"=",
"error_fmt",
"%",
"(",
"self",
".",
"name",
",",
"self",
".",
"type_",
",",
"type",
"(",
"value",
")",
")",
"raise",
"TypeError",
"(",
"error",
")"
] | Validate and clean a candidate value for this field. | [
"Validate",
"and",
"clean",
"a",
"candidate",
"value",
"for",
"this",
"field",
"."
] | 9097dae7a433f5b98c18171c4a5598f69a7d30af | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/fields.py#L177-L190 | train | 463 |
OpenTreeOfLife/peyotl | peyotl/collections_store/git_actions.py | TreeCollectionsGitAction.remove_collection | def remove_collection(self, first_arg, sec_arg, third_arg, fourth_arg=None, commit_msg=None):
"""Remove a collection
Given a collection_id, branch and optionally an
author, remove a collection on the given branch
and attribute the commit to author.
Returns the SHA of the commit on branch.
"""
if fourth_arg is None:
collection_id, branch_name, author = first_arg, sec_arg, third_arg
gh_user = branch_name.split('_collection_')[0]
parent_sha = self.get_master_sha()
else:
gh_user, collection_id, parent_sha, author = first_arg, sec_arg, third_arg, fourth_arg
if commit_msg is None:
commit_msg = "Delete Collection '%s' via OpenTree API" % collection_id
return self._remove_document(gh_user, collection_id, parent_sha, author, commit_msg) | python | def remove_collection(self, first_arg, sec_arg, third_arg, fourth_arg=None, commit_msg=None):
"""Remove a collection
Given a collection_id, branch and optionally an
author, remove a collection on the given branch
and attribute the commit to author.
Returns the SHA of the commit on branch.
"""
if fourth_arg is None:
collection_id, branch_name, author = first_arg, sec_arg, third_arg
gh_user = branch_name.split('_collection_')[0]
parent_sha = self.get_master_sha()
else:
gh_user, collection_id, parent_sha, author = first_arg, sec_arg, third_arg, fourth_arg
if commit_msg is None:
commit_msg = "Delete Collection '%s' via OpenTree API" % collection_id
return self._remove_document(gh_user, collection_id, parent_sha, author, commit_msg) | [
"def",
"remove_collection",
"(",
"self",
",",
"first_arg",
",",
"sec_arg",
",",
"third_arg",
",",
"fourth_arg",
"=",
"None",
",",
"commit_msg",
"=",
"None",
")",
":",
"if",
"fourth_arg",
"is",
"None",
":",
"collection_id",
",",
"branch_name",
",",
"author",
"=",
"first_arg",
",",
"sec_arg",
",",
"third_arg",
"gh_user",
"=",
"branch_name",
".",
"split",
"(",
"'_collection_'",
")",
"[",
"0",
"]",
"parent_sha",
"=",
"self",
".",
"get_master_sha",
"(",
")",
"else",
":",
"gh_user",
",",
"collection_id",
",",
"parent_sha",
",",
"author",
"=",
"first_arg",
",",
"sec_arg",
",",
"third_arg",
",",
"fourth_arg",
"if",
"commit_msg",
"is",
"None",
":",
"commit_msg",
"=",
"\"Delete Collection '%s' via OpenTree API\"",
"%",
"collection_id",
"return",
"self",
".",
"_remove_document",
"(",
"gh_user",
",",
"collection_id",
",",
"parent_sha",
",",
"author",
",",
"commit_msg",
")"
] | Remove a collection
Given a collection_id, branch and optionally an
author, remove a collection on the given branch
and attribute the commit to author.
Returns the SHA of the commit on branch. | [
"Remove",
"a",
"collection",
"Given",
"a",
"collection_id",
"branch",
"and",
"optionally",
"an",
"author",
"remove",
"a",
"collection",
"on",
"the",
"given",
"branch",
"and",
"attribute",
"the",
"commit",
"to",
"author",
".",
"Returns",
"the",
"SHA",
"of",
"the",
"commit",
"on",
"branch",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/collections_store/git_actions.py#L92-L107 | train | 464 |
PSPC-SPAC-buyandsell/von_agent | von_agent/agent/verifier.py | Verifier.load_cache | async def load_cache(self, archive: bool = False) -> int:
"""
Load caches and archive enough to go offline and be able to verify proof
on content marked of interest in configuration.
Return timestamp (epoch seconds) of cache load event, also used as subdirectory
for cache archives.
:param archive: whether to archive caches to disk
:return: cache load event timestamp (epoch seconds)
"""
LOGGER.debug('Verifier.load_cache >>> archive: %s', archive)
rv = int(time())
for s_id in self.cfg.get('archive-on-close', {}).get('schema_id', {}):
with SCHEMA_CACHE.lock:
await self.get_schema(s_id)
for cd_id in self.cfg.get('archive-on-close', {}).get('cred_def_id', {}):
with CRED_DEF_CACHE.lock:
await self.get_cred_def(cd_id)
for rr_id in self.cfg.get('archive-on-close', {}).get('rev_reg_id', {}):
await self._get_rev_reg_def(rr_id)
with REVO_CACHE.lock:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
if revo_cache_entry:
try:
await revo_cache_entry.get_state_json(self._build_rr_state_json, rv, rv)
except ClosedPool:
LOGGER.warning(
'Verifier %s is offline from pool %s, cannot update revo cache reg state for %s to %s',
self.wallet.name,
self.pool.name,
rr_id,
rv)
if archive:
Caches.archive(self.dir_cache)
LOGGER.debug('Verifier.load_cache <<< %s', rv)
return rv | python | async def load_cache(self, archive: bool = False) -> int:
"""
Load caches and archive enough to go offline and be able to verify proof
on content marked of interest in configuration.
Return timestamp (epoch seconds) of cache load event, also used as subdirectory
for cache archives.
:param archive: whether to archive caches to disk
:return: cache load event timestamp (epoch seconds)
"""
LOGGER.debug('Verifier.load_cache >>> archive: %s', archive)
rv = int(time())
for s_id in self.cfg.get('archive-on-close', {}).get('schema_id', {}):
with SCHEMA_CACHE.lock:
await self.get_schema(s_id)
for cd_id in self.cfg.get('archive-on-close', {}).get('cred_def_id', {}):
with CRED_DEF_CACHE.lock:
await self.get_cred_def(cd_id)
for rr_id in self.cfg.get('archive-on-close', {}).get('rev_reg_id', {}):
await self._get_rev_reg_def(rr_id)
with REVO_CACHE.lock:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
if revo_cache_entry:
try:
await revo_cache_entry.get_state_json(self._build_rr_state_json, rv, rv)
except ClosedPool:
LOGGER.warning(
'Verifier %s is offline from pool %s, cannot update revo cache reg state for %s to %s',
self.wallet.name,
self.pool.name,
rr_id,
rv)
if archive:
Caches.archive(self.dir_cache)
LOGGER.debug('Verifier.load_cache <<< %s', rv)
return rv | [
"async",
"def",
"load_cache",
"(",
"self",
",",
"archive",
":",
"bool",
"=",
"False",
")",
"->",
"int",
":",
"LOGGER",
".",
"debug",
"(",
"'Verifier.load_cache >>> archive: %s'",
",",
"archive",
")",
"rv",
"=",
"int",
"(",
"time",
"(",
")",
")",
"for",
"s_id",
"in",
"self",
".",
"cfg",
".",
"get",
"(",
"'archive-on-close'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'schema_id'",
",",
"{",
"}",
")",
":",
"with",
"SCHEMA_CACHE",
".",
"lock",
":",
"await",
"self",
".",
"get_schema",
"(",
"s_id",
")",
"for",
"cd_id",
"in",
"self",
".",
"cfg",
".",
"get",
"(",
"'archive-on-close'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'cred_def_id'",
",",
"{",
"}",
")",
":",
"with",
"CRED_DEF_CACHE",
".",
"lock",
":",
"await",
"self",
".",
"get_cred_def",
"(",
"cd_id",
")",
"for",
"rr_id",
"in",
"self",
".",
"cfg",
".",
"get",
"(",
"'archive-on-close'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'rev_reg_id'",
",",
"{",
"}",
")",
":",
"await",
"self",
".",
"_get_rev_reg_def",
"(",
"rr_id",
")",
"with",
"REVO_CACHE",
".",
"lock",
":",
"revo_cache_entry",
"=",
"REVO_CACHE",
".",
"get",
"(",
"rr_id",
",",
"None",
")",
"if",
"revo_cache_entry",
":",
"try",
":",
"await",
"revo_cache_entry",
".",
"get_state_json",
"(",
"self",
".",
"_build_rr_state_json",
",",
"rv",
",",
"rv",
")",
"except",
"ClosedPool",
":",
"LOGGER",
".",
"warning",
"(",
"'Verifier %s is offline from pool %s, cannot update revo cache reg state for %s to %s'",
",",
"self",
".",
"wallet",
".",
"name",
",",
"self",
".",
"pool",
".",
"name",
",",
"rr_id",
",",
"rv",
")",
"if",
"archive",
":",
"Caches",
".",
"archive",
"(",
"self",
".",
"dir_cache",
")",
"LOGGER",
".",
"debug",
"(",
"'Verifier.load_cache <<< %s'",
",",
"rv",
")",
"return",
"rv"
] | Load caches and archive enough to go offline and be able to verify proof
on content marked of interest in configuration.
Return timestamp (epoch seconds) of cache load event, also used as subdirectory
for cache archives.
:param archive: whether to archive caches to disk
:return: cache load event timestamp (epoch seconds) | [
"Load",
"caches",
"and",
"archive",
"enough",
"to",
"go",
"offline",
"and",
"be",
"able",
"to",
"verify",
"proof",
"on",
"content",
"marked",
"of",
"interest",
"in",
"configuration",
"."
] | 0b1c17cca3bd178b6e6974af84dbac1dfce5cf45 | https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/verifier.py#L165-L204 | train | 465 |
inveniosoftware/invenio-communities | invenio_communities/permissions.py | _Permission.can | def can(self):
"""Grant permission if owner or admin."""
return str(current_user.get_id()) == str(self.community.id_user) or \
DynamicPermission(ActionNeed('admin-access')).can() | python | def can(self):
"""Grant permission if owner or admin."""
return str(current_user.get_id()) == str(self.community.id_user) or \
DynamicPermission(ActionNeed('admin-access')).can() | [
"def",
"can",
"(",
"self",
")",
":",
"return",
"str",
"(",
"current_user",
".",
"get_id",
"(",
")",
")",
"==",
"str",
"(",
"self",
".",
"community",
".",
"id_user",
")",
"or",
"DynamicPermission",
"(",
"ActionNeed",
"(",
"'admin-access'",
")",
")",
".",
"can",
"(",
")"
] | Grant permission if owner or admin. | [
"Grant",
"permission",
"if",
"owner",
"or",
"admin",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/permissions.py#L46-L49 | train | 466 |
yamins81/tabular | tabular/utils.py | listunion | def listunion(ListOfLists):
"""
Take the union of a list of lists.
Take a Python list of Python lists::
[[l11,l12, ...], [l21,l22, ...], ... , [ln1, ln2, ...]]
and return the aggregated list::
[l11,l12, ..., l21, l22 , ...]
For a list of two lists, e.g. `[a, b]`, this is like::
a.extend(b)
**Parameters**
**ListOfLists** : Python list
Python list of Python lists.
**Returns**
**u** : Python list
Python list created by taking the union of the
lists in `ListOfLists`.
"""
u = []
for s in ListOfLists:
if s != None:
u.extend(s)
return u | python | def listunion(ListOfLists):
"""
Take the union of a list of lists.
Take a Python list of Python lists::
[[l11,l12, ...], [l21,l22, ...], ... , [ln1, ln2, ...]]
and return the aggregated list::
[l11,l12, ..., l21, l22 , ...]
For a list of two lists, e.g. `[a, b]`, this is like::
a.extend(b)
**Parameters**
**ListOfLists** : Python list
Python list of Python lists.
**Returns**
**u** : Python list
Python list created by taking the union of the
lists in `ListOfLists`.
"""
u = []
for s in ListOfLists:
if s != None:
u.extend(s)
return u | [
"def",
"listunion",
"(",
"ListOfLists",
")",
":",
"u",
"=",
"[",
"]",
"for",
"s",
"in",
"ListOfLists",
":",
"if",
"s",
"!=",
"None",
":",
"u",
".",
"extend",
"(",
"s",
")",
"return",
"u"
] | Take the union of a list of lists.
Take a Python list of Python lists::
[[l11,l12, ...], [l21,l22, ...], ... , [ln1, ln2, ...]]
and return the aggregated list::
[l11,l12, ..., l21, l22 , ...]
For a list of two lists, e.g. `[a, b]`, this is like::
a.extend(b)
**Parameters**
**ListOfLists** : Python list
Python list of Python lists.
**Returns**
**u** : Python list
Python list created by taking the union of the
lists in `ListOfLists`. | [
"Take",
"the",
"union",
"of",
"a",
"list",
"of",
"lists",
"."
] | 1caf091c8c395960a9ad7078f95158b533cc52dd | https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/utils.py#L49-L83 | train | 467 |
yamins81/tabular | tabular/utils.py | DEFAULT_NULLVALUE | def DEFAULT_NULLVALUE(test):
"""
Returns a null value for each of various kinds of test values.
**Parameters**
**test** : bool, int, float or string
Value to test.
**Returns**
**null** : element in `[False, 0, 0.0, '']`
Null value corresponding to the given test value:
* if `test` is a `bool`, return `False`
* else if `test` is an `int`, return `0`
* else if `test` is a `float`, return `0.0`
* else `test` is a `str`, return `''`
"""
return False if isinstance(test,bool) \
else 0 if isinstance(test,int) \
else 0.0 if isinstance(test,float) \
else '' | python | def DEFAULT_NULLVALUE(test):
"""
Returns a null value for each of various kinds of test values.
**Parameters**
**test** : bool, int, float or string
Value to test.
**Returns**
**null** : element in `[False, 0, 0.0, '']`
Null value corresponding to the given test value:
* if `test` is a `bool`, return `False`
* else if `test` is an `int`, return `0`
* else if `test` is a `float`, return `0.0`
* else `test` is a `str`, return `''`
"""
return False if isinstance(test,bool) \
else 0 if isinstance(test,int) \
else 0.0 if isinstance(test,float) \
else '' | [
"def",
"DEFAULT_NULLVALUE",
"(",
"test",
")",
":",
"return",
"False",
"if",
"isinstance",
"(",
"test",
",",
"bool",
")",
"else",
"0",
"if",
"isinstance",
"(",
"test",
",",
"int",
")",
"else",
"0.0",
"if",
"isinstance",
"(",
"test",
",",
"float",
")",
"else",
"''"
] | Returns a null value for each of various kinds of test values.
**Parameters**
**test** : bool, int, float or string
Value to test.
**Returns**
**null** : element in `[False, 0, 0.0, '']`
Null value corresponding to the given test value:
* if `test` is a `bool`, return `False`
* else if `test` is an `int`, return `0`
* else if `test` is a `float`, return `0.0`
* else `test` is a `str`, return `''` | [
"Returns",
"a",
"null",
"value",
"for",
"each",
"of",
"various",
"kinds",
"of",
"test",
"values",
"."
] | 1caf091c8c395960a9ad7078f95158b533cc52dd | https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/utils.py#L369-L394 | train | 468 |
hsolbrig/pyjsg | pyjsg/parser_impl/jsg_objectexpr_parser.py | JSGObjectExpr.as_python | def as_python(self, name: str) -> str:
""" Return the python representation of the class represented by this object """
if self._map_valuetype:
return self.map_as_python(name)
else:
return self.obj_as_python(name) | python | def as_python(self, name: str) -> str:
""" Return the python representation of the class represented by this object """
if self._map_valuetype:
return self.map_as_python(name)
else:
return self.obj_as_python(name) | [
"def",
"as_python",
"(",
"self",
",",
"name",
":",
"str",
")",
"->",
"str",
":",
"if",
"self",
".",
"_map_valuetype",
":",
"return",
"self",
".",
"map_as_python",
"(",
"name",
")",
"else",
":",
"return",
"self",
".",
"obj_as_python",
"(",
"name",
")"
] | Return the python representation of the class represented by this object | [
"Return",
"the",
"python",
"representation",
"of",
"the",
"class",
"represented",
"by",
"this",
"object"
] | 9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7 | https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/jsg_objectexpr_parser.py#L87-L92 | train | 469 |
hsolbrig/pyjsg | pyjsg/parser_impl/jsg_objectexpr_parser.py | JSGObjectExpr.members_entries | def members_entries(self, all_are_optional: bool=False) -> List[Tuple[str, str]]:
""" Return an ordered list of elements for the _members section
:param all_are_optional: True means we're in a choice situation so everything is optional
:return:
"""
rval = []
if self._members:
for member in self._members:
rval += member.members_entries(all_are_optional)
elif self._choices:
for choice in self._choices:
rval += self._context.reference(choice).members_entries(True)
else:
return []
return rval | python | def members_entries(self, all_are_optional: bool=False) -> List[Tuple[str, str]]:
""" Return an ordered list of elements for the _members section
:param all_are_optional: True means we're in a choice situation so everything is optional
:return:
"""
rval = []
if self._members:
for member in self._members:
rval += member.members_entries(all_are_optional)
elif self._choices:
for choice in self._choices:
rval += self._context.reference(choice).members_entries(True)
else:
return []
return rval | [
"def",
"members_entries",
"(",
"self",
",",
"all_are_optional",
":",
"bool",
"=",
"False",
")",
"->",
"List",
"[",
"Tuple",
"[",
"str",
",",
"str",
"]",
"]",
":",
"rval",
"=",
"[",
"]",
"if",
"self",
".",
"_members",
":",
"for",
"member",
"in",
"self",
".",
"_members",
":",
"rval",
"+=",
"member",
".",
"members_entries",
"(",
"all_are_optional",
")",
"elif",
"self",
".",
"_choices",
":",
"for",
"choice",
"in",
"self",
".",
"_choices",
":",
"rval",
"+=",
"self",
".",
"_context",
".",
"reference",
"(",
"choice",
")",
".",
"members_entries",
"(",
"True",
")",
"else",
":",
"return",
"[",
"]",
"return",
"rval"
] | Return an ordered list of elements for the _members section
:param all_are_optional: True means we're in a choice situation so everything is optional
:return: | [
"Return",
"an",
"ordered",
"list",
"of",
"elements",
"for",
"the",
"_members",
"section"
] | 9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7 | https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/jsg_objectexpr_parser.py#L196-L211 | train | 470 |
OpenTreeOfLife/peyotl | peyotl/phylesystem/phylesystem_shard.py | _get_filtered_study_ids | def _get_filtered_study_ids(shard, include_aliases=False):
"""Optionally filters out aliases from standard doc-id list"""
from peyotl.phylesystem.helper import DIGIT_PATTERN
k = shard.get_doc_ids()
if shard.has_aliases and (not include_aliases):
x = []
for i in k:
if DIGIT_PATTERN.match(i) or ((len(i) > 1) and (i[-2] == '_')):
pass
else:
x.append(i)
return x | python | def _get_filtered_study_ids(shard, include_aliases=False):
"""Optionally filters out aliases from standard doc-id list"""
from peyotl.phylesystem.helper import DIGIT_PATTERN
k = shard.get_doc_ids()
if shard.has_aliases and (not include_aliases):
x = []
for i in k:
if DIGIT_PATTERN.match(i) or ((len(i) > 1) and (i[-2] == '_')):
pass
else:
x.append(i)
return x | [
"def",
"_get_filtered_study_ids",
"(",
"shard",
",",
"include_aliases",
"=",
"False",
")",
":",
"from",
"peyotl",
".",
"phylesystem",
".",
"helper",
"import",
"DIGIT_PATTERN",
"k",
"=",
"shard",
".",
"get_doc_ids",
"(",
")",
"if",
"shard",
".",
"has_aliases",
"and",
"(",
"not",
"include_aliases",
")",
":",
"x",
"=",
"[",
"]",
"for",
"i",
"in",
"k",
":",
"if",
"DIGIT_PATTERN",
".",
"match",
"(",
"i",
")",
"or",
"(",
"(",
"len",
"(",
"i",
")",
">",
"1",
")",
"and",
"(",
"i",
"[",
"-",
"2",
"]",
"==",
"'_'",
")",
")",
":",
"pass",
"else",
":",
"x",
".",
"append",
"(",
"i",
")",
"return",
"x"
] | Optionally filters out aliases from standard doc-id list | [
"Optionally",
"filters",
"out",
"aliases",
"from",
"standard",
"doc",
"-",
"id",
"list"
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/phylesystem/phylesystem_shard.py#L18-L29 | train | 471 |
OpenTreeOfLife/peyotl | peyotl/phylesystem/phylesystem_shard.py | PhylesystemShard._determine_next_study_id | def _determine_next_study_id(self):
"""Return the numeric part of the newest study_id
Checks out master branch as a side effect!
"""
if self._doc_counter_lock is None:
self._doc_counter_lock = Lock()
prefix = self._new_study_prefix
lp = len(prefix)
n = 0
# this function holds the lock for quite awhile,
# but it only called on the first instance of
# of creating a new study
with self._doc_counter_lock:
with self._index_lock:
for k in self.study_index.keys():
if k.startswith(prefix):
try:
pn = int(k[lp:])
if pn > n:
n = pn
except:
pass
nsi_contents = self._read_master_branch_resource(self._id_minting_file, is_json=True)
if nsi_contents:
self._next_study_id = nsi_contents['next_study_id']
if self._next_study_id <= n:
m = 'next_study_id in {} is set lower than the ID of an existing study!'
m = m.format(self._id_minting_file)
raise RuntimeError(m)
else:
# legacy support for repo with no next_study_id.json file
self._next_study_id = n
self._advance_new_study_id() | python | def _determine_next_study_id(self):
"""Return the numeric part of the newest study_id
Checks out master branch as a side effect!
"""
if self._doc_counter_lock is None:
self._doc_counter_lock = Lock()
prefix = self._new_study_prefix
lp = len(prefix)
n = 0
# this function holds the lock for quite awhile,
# but it only called on the first instance of
# of creating a new study
with self._doc_counter_lock:
with self._index_lock:
for k in self.study_index.keys():
if k.startswith(prefix):
try:
pn = int(k[lp:])
if pn > n:
n = pn
except:
pass
nsi_contents = self._read_master_branch_resource(self._id_minting_file, is_json=True)
if nsi_contents:
self._next_study_id = nsi_contents['next_study_id']
if self._next_study_id <= n:
m = 'next_study_id in {} is set lower than the ID of an existing study!'
m = m.format(self._id_minting_file)
raise RuntimeError(m)
else:
# legacy support for repo with no next_study_id.json file
self._next_study_id = n
self._advance_new_study_id() | [
"def",
"_determine_next_study_id",
"(",
"self",
")",
":",
"if",
"self",
".",
"_doc_counter_lock",
"is",
"None",
":",
"self",
".",
"_doc_counter_lock",
"=",
"Lock",
"(",
")",
"prefix",
"=",
"self",
".",
"_new_study_prefix",
"lp",
"=",
"len",
"(",
"prefix",
")",
"n",
"=",
"0",
"# this function holds the lock for quite awhile,",
"# but it only called on the first instance of",
"# of creating a new study",
"with",
"self",
".",
"_doc_counter_lock",
":",
"with",
"self",
".",
"_index_lock",
":",
"for",
"k",
"in",
"self",
".",
"study_index",
".",
"keys",
"(",
")",
":",
"if",
"k",
".",
"startswith",
"(",
"prefix",
")",
":",
"try",
":",
"pn",
"=",
"int",
"(",
"k",
"[",
"lp",
":",
"]",
")",
"if",
"pn",
">",
"n",
":",
"n",
"=",
"pn",
"except",
":",
"pass",
"nsi_contents",
"=",
"self",
".",
"_read_master_branch_resource",
"(",
"self",
".",
"_id_minting_file",
",",
"is_json",
"=",
"True",
")",
"if",
"nsi_contents",
":",
"self",
".",
"_next_study_id",
"=",
"nsi_contents",
"[",
"'next_study_id'",
"]",
"if",
"self",
".",
"_next_study_id",
"<=",
"n",
":",
"m",
"=",
"'next_study_id in {} is set lower than the ID of an existing study!'",
"m",
"=",
"m",
".",
"format",
"(",
"self",
".",
"_id_minting_file",
")",
"raise",
"RuntimeError",
"(",
"m",
")",
"else",
":",
"# legacy support for repo with no next_study_id.json file",
"self",
".",
"_next_study_id",
"=",
"n",
"self",
".",
"_advance_new_study_id",
"(",
")"
] | Return the numeric part of the newest study_id
Checks out master branch as a side effect! | [
"Return",
"the",
"numeric",
"part",
"of",
"the",
"newest",
"study_id"
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/phylesystem/phylesystem_shard.py#L246-L279 | train | 472 |
OpenTreeOfLife/peyotl | peyotl/phylesystem/phylesystem_shard.py | PhylesystemShard._advance_new_study_id | def _advance_new_study_id(self):
""" ASSUMES the caller holds the _doc_counter_lock !
Returns the current numeric part of the next study ID, advances
the counter to the next value, and stores that value in the
file in case the server is restarted.
"""
c = self._next_study_id
self._next_study_id = 1 + c
content = u'{"next_study_id": %d}\n' % self._next_study_id
# The content is JSON, but we hand-rolled the string above
# so that we can use it as a commit_msg
self._write_master_branch_resource(content,
self._id_minting_file,
commit_msg=content,
is_json=False)
return c | python | def _advance_new_study_id(self):
""" ASSUMES the caller holds the _doc_counter_lock !
Returns the current numeric part of the next study ID, advances
the counter to the next value, and stores that value in the
file in case the server is restarted.
"""
c = self._next_study_id
self._next_study_id = 1 + c
content = u'{"next_study_id": %d}\n' % self._next_study_id
# The content is JSON, but we hand-rolled the string above
# so that we can use it as a commit_msg
self._write_master_branch_resource(content,
self._id_minting_file,
commit_msg=content,
is_json=False)
return c | [
"def",
"_advance_new_study_id",
"(",
"self",
")",
":",
"c",
"=",
"self",
".",
"_next_study_id",
"self",
".",
"_next_study_id",
"=",
"1",
"+",
"c",
"content",
"=",
"u'{\"next_study_id\": %d}\\n'",
"%",
"self",
".",
"_next_study_id",
"# The content is JSON, but we hand-rolled the string above",
"# so that we can use it as a commit_msg",
"self",
".",
"_write_master_branch_resource",
"(",
"content",
",",
"self",
".",
"_id_minting_file",
",",
"commit_msg",
"=",
"content",
",",
"is_json",
"=",
"False",
")",
"return",
"c"
] | ASSUMES the caller holds the _doc_counter_lock !
Returns the current numeric part of the next study ID, advances
the counter to the next value, and stores that value in the
file in case the server is restarted. | [
"ASSUMES",
"the",
"caller",
"holds",
"the",
"_doc_counter_lock",
"!",
"Returns",
"the",
"current",
"numeric",
"part",
"of",
"the",
"next",
"study",
"ID",
"advances",
"the",
"counter",
"to",
"the",
"next",
"value",
"and",
"stores",
"that",
"value",
"in",
"the",
"file",
"in",
"case",
"the",
"server",
"is",
"restarted",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/phylesystem/phylesystem_shard.py#L281-L296 | train | 473 |
hsolbrig/pyjsg | pyjsg/parser_impl/parser_utils.py | flatten | def flatten(l: Iterable) -> List:
"""Return a list of all non-list items in l
:param l: list to be flattened
:return:
"""
rval = []
for e in l:
if not isinstance(e, str) and isinstance(e, Iterable):
if len(list(e)):
rval += flatten(e)
else:
rval.append(e)
return rval | python | def flatten(l: Iterable) -> List:
"""Return a list of all non-list items in l
:param l: list to be flattened
:return:
"""
rval = []
for e in l:
if not isinstance(e, str) and isinstance(e, Iterable):
if len(list(e)):
rval += flatten(e)
else:
rval.append(e)
return rval | [
"def",
"flatten",
"(",
"l",
":",
"Iterable",
")",
"->",
"List",
":",
"rval",
"=",
"[",
"]",
"for",
"e",
"in",
"l",
":",
"if",
"not",
"isinstance",
"(",
"e",
",",
"str",
")",
"and",
"isinstance",
"(",
"e",
",",
"Iterable",
")",
":",
"if",
"len",
"(",
"list",
"(",
"e",
")",
")",
":",
"rval",
"+=",
"flatten",
"(",
"e",
")",
"else",
":",
"rval",
".",
"append",
"(",
"e",
")",
"return",
"rval"
] | Return a list of all non-list items in l
:param l: list to be flattened
:return: | [
"Return",
"a",
"list",
"of",
"all",
"non",
"-",
"list",
"items",
"in",
"l"
] | 9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7 | https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/parser_utils.py#L14-L27 | train | 474 |
hsolbrig/pyjsg | pyjsg/parser_impl/parser_utils.py | flatten_unique | def flatten_unique(l: Iterable) -> List:
""" Return a list of UNIQUE non-list items in l """
rval = OrderedDict()
for e in l:
if not isinstance(e, str) and isinstance(e, Iterable):
for ev in flatten_unique(e):
rval[ev] = None
else:
rval[e] = None
return list(rval.keys()) | python | def flatten_unique(l: Iterable) -> List:
""" Return a list of UNIQUE non-list items in l """
rval = OrderedDict()
for e in l:
if not isinstance(e, str) and isinstance(e, Iterable):
for ev in flatten_unique(e):
rval[ev] = None
else:
rval[e] = None
return list(rval.keys()) | [
"def",
"flatten_unique",
"(",
"l",
":",
"Iterable",
")",
"->",
"List",
":",
"rval",
"=",
"OrderedDict",
"(",
")",
"for",
"e",
"in",
"l",
":",
"if",
"not",
"isinstance",
"(",
"e",
",",
"str",
")",
"and",
"isinstance",
"(",
"e",
",",
"Iterable",
")",
":",
"for",
"ev",
"in",
"flatten_unique",
"(",
"e",
")",
":",
"rval",
"[",
"ev",
"]",
"=",
"None",
"else",
":",
"rval",
"[",
"e",
"]",
"=",
"None",
"return",
"list",
"(",
"rval",
".",
"keys",
"(",
")",
")"
] | Return a list of UNIQUE non-list items in l | [
"Return",
"a",
"list",
"of",
"UNIQUE",
"non",
"-",
"list",
"items",
"in",
"l"
] | 9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7 | https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/parser_utils.py#L30-L39 | train | 475 |
hsolbrig/pyjsg | pyjsg/parser_impl/parser_utils.py | as_tokens | def as_tokens(ctx: List[ParserRuleContext]) -> List[str]:
"""Return a stringified list of identifiers in ctx
:param ctx: JSG parser item with a set of identifiers
:return:
"""
return [as_token(e) for e in ctx] | python | def as_tokens(ctx: List[ParserRuleContext]) -> List[str]:
"""Return a stringified list of identifiers in ctx
:param ctx: JSG parser item with a set of identifiers
:return:
"""
return [as_token(e) for e in ctx] | [
"def",
"as_tokens",
"(",
"ctx",
":",
"List",
"[",
"ParserRuleContext",
"]",
")",
"->",
"List",
"[",
"str",
"]",
":",
"return",
"[",
"as_token",
"(",
"e",
")",
"for",
"e",
"in",
"ctx",
"]"
] | Return a stringified list of identifiers in ctx
:param ctx: JSG parser item with a set of identifiers
:return: | [
"Return",
"a",
"stringified",
"list",
"of",
"identifiers",
"in",
"ctx"
] | 9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7 | https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/parser_utils.py#L85-L91 | train | 476 |
hsolbrig/pyjsg | pyjsg/parser_impl/parser_utils.py | is_valid_python | def is_valid_python(tkn: str) -> bool:
"""Determine whether tkn is a valid python identifier
:param tkn:
:return:
"""
try:
root = ast.parse(tkn)
except SyntaxError:
return False
return len(root.body) == 1 and isinstance(root.body[0], ast.Expr) and isinstance(root.body[0].value, ast.Name) | python | def is_valid_python(tkn: str) -> bool:
"""Determine whether tkn is a valid python identifier
:param tkn:
:return:
"""
try:
root = ast.parse(tkn)
except SyntaxError:
return False
return len(root.body) == 1 and isinstance(root.body[0], ast.Expr) and isinstance(root.body[0].value, ast.Name) | [
"def",
"is_valid_python",
"(",
"tkn",
":",
"str",
")",
"->",
"bool",
":",
"try",
":",
"root",
"=",
"ast",
".",
"parse",
"(",
"tkn",
")",
"except",
"SyntaxError",
":",
"return",
"False",
"return",
"len",
"(",
"root",
".",
"body",
")",
"==",
"1",
"and",
"isinstance",
"(",
"root",
".",
"body",
"[",
"0",
"]",
",",
"ast",
".",
"Expr",
")",
"and",
"isinstance",
"(",
"root",
".",
"body",
"[",
"0",
"]",
".",
"value",
",",
"ast",
".",
"Name",
")"
] | Determine whether tkn is a valid python identifier
:param tkn:
:return: | [
"Determine",
"whether",
"tkn",
"is",
"a",
"valid",
"python",
"identifier"
] | 9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7 | https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/parser_utils.py#L94-L104 | train | 477 |
OpenTreeOfLife/peyotl | peyotl/phylesystem/git_actions.py | PhylesystemGitAction.remove_study | def remove_study(self, first_arg, sec_arg, third_arg, fourth_arg=None, commit_msg=None):
"""Remove a study
Given a study_id, branch and optionally an
author, remove a study on the given branch
and attribute the commit to author.
Returns the SHA of the commit on branch.
"""
if fourth_arg is None:
study_id, branch_name, author = first_arg, sec_arg, third_arg
gh_user = branch_name.split('_study_')[0]
parent_sha = self.get_master_sha()
else:
gh_user, study_id, parent_sha, author = first_arg, sec_arg, third_arg, fourth_arg
if commit_msg is None:
commit_msg = "Delete Study #%s via OpenTree API" % study_id
return self._remove_document(gh_user, study_id, parent_sha, author, commit_msg) | python | def remove_study(self, first_arg, sec_arg, third_arg, fourth_arg=None, commit_msg=None):
"""Remove a study
Given a study_id, branch and optionally an
author, remove a study on the given branch
and attribute the commit to author.
Returns the SHA of the commit on branch.
"""
if fourth_arg is None:
study_id, branch_name, author = first_arg, sec_arg, third_arg
gh_user = branch_name.split('_study_')[0]
parent_sha = self.get_master_sha()
else:
gh_user, study_id, parent_sha, author = first_arg, sec_arg, third_arg, fourth_arg
if commit_msg is None:
commit_msg = "Delete Study #%s via OpenTree API" % study_id
return self._remove_document(gh_user, study_id, parent_sha, author, commit_msg) | [
"def",
"remove_study",
"(",
"self",
",",
"first_arg",
",",
"sec_arg",
",",
"third_arg",
",",
"fourth_arg",
"=",
"None",
",",
"commit_msg",
"=",
"None",
")",
":",
"if",
"fourth_arg",
"is",
"None",
":",
"study_id",
",",
"branch_name",
",",
"author",
"=",
"first_arg",
",",
"sec_arg",
",",
"third_arg",
"gh_user",
"=",
"branch_name",
".",
"split",
"(",
"'_study_'",
")",
"[",
"0",
"]",
"parent_sha",
"=",
"self",
".",
"get_master_sha",
"(",
")",
"else",
":",
"gh_user",
",",
"study_id",
",",
"parent_sha",
",",
"author",
"=",
"first_arg",
",",
"sec_arg",
",",
"third_arg",
",",
"fourth_arg",
"if",
"commit_msg",
"is",
"None",
":",
"commit_msg",
"=",
"\"Delete Study #%s via OpenTree API\"",
"%",
"study_id",
"return",
"self",
".",
"_remove_document",
"(",
"gh_user",
",",
"study_id",
",",
"parent_sha",
",",
"author",
",",
"commit_msg",
")"
] | Remove a study
Given a study_id, branch and optionally an
author, remove a study on the given branch
and attribute the commit to author.
Returns the SHA of the commit on branch. | [
"Remove",
"a",
"study",
"Given",
"a",
"study_id",
"branch",
"and",
"optionally",
"an",
"author",
"remove",
"a",
"study",
"on",
"the",
"given",
"branch",
"and",
"attribute",
"the",
"commit",
"to",
"author",
".",
"Returns",
"the",
"SHA",
"of",
"the",
"commit",
"on",
"branch",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/phylesystem/git_actions.py#L108-L123 | train | 478 |
inveniosoftware/invenio-communities | invenio_communities/cli.py | init | def init():
"""Initialize the communities file storage."""
try:
initialize_communities_bucket()
click.secho('Community init successful.', fg='green')
except FilesException as e:
click.secho(e.message, fg='red') | python | def init():
"""Initialize the communities file storage."""
try:
initialize_communities_bucket()
click.secho('Community init successful.', fg='green')
except FilesException as e:
click.secho(e.message, fg='red') | [
"def",
"init",
"(",
")",
":",
"try",
":",
"initialize_communities_bucket",
"(",
")",
"click",
".",
"secho",
"(",
"'Community init successful.'",
",",
"fg",
"=",
"'green'",
")",
"except",
"FilesException",
"as",
"e",
":",
"click",
".",
"secho",
"(",
"e",
".",
"message",
",",
"fg",
"=",
"'red'",
")"
] | Initialize the communities file storage. | [
"Initialize",
"the",
"communities",
"file",
"storage",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/cli.py#L50-L56 | train | 479 |
inveniosoftware/invenio-communities | invenio_communities/cli.py | addlogo | def addlogo(community_id, logo):
"""Add logo to the community."""
# Create the bucket
c = Community.get(community_id)
if not c:
click.secho('Community {0} does not exist.'.format(community_id),
fg='red')
return
ext = save_and_validate_logo(logo, logo.name, c.id)
c.logo_ext = ext
db.session.commit() | python | def addlogo(community_id, logo):
"""Add logo to the community."""
# Create the bucket
c = Community.get(community_id)
if not c:
click.secho('Community {0} does not exist.'.format(community_id),
fg='red')
return
ext = save_and_validate_logo(logo, logo.name, c.id)
c.logo_ext = ext
db.session.commit() | [
"def",
"addlogo",
"(",
"community_id",
",",
"logo",
")",
":",
"# Create the bucket",
"c",
"=",
"Community",
".",
"get",
"(",
"community_id",
")",
"if",
"not",
"c",
":",
"click",
".",
"secho",
"(",
"'Community {0} does not exist.'",
".",
"format",
"(",
"community_id",
")",
",",
"fg",
"=",
"'red'",
")",
"return",
"ext",
"=",
"save_and_validate_logo",
"(",
"logo",
",",
"logo",
".",
"name",
",",
"c",
".",
"id",
")",
"c",
".",
"logo_ext",
"=",
"ext",
"db",
".",
"session",
".",
"commit",
"(",
")"
] | Add logo to the community. | [
"Add",
"logo",
"to",
"the",
"community",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/cli.py#L63-L73 | train | 480 |
inveniosoftware/invenio-communities | invenio_communities/cli.py | request | def request(community_id, record_id, accept):
"""Request a record acceptance to a community."""
c = Community.get(community_id)
assert c is not None
record = Record.get_record(record_id)
if accept:
c.add_record(record)
record.commit()
else:
InclusionRequest.create(community=c, record=record,
notify=False)
db.session.commit()
RecordIndexer().index_by_id(record.id) | python | def request(community_id, record_id, accept):
"""Request a record acceptance to a community."""
c = Community.get(community_id)
assert c is not None
record = Record.get_record(record_id)
if accept:
c.add_record(record)
record.commit()
else:
InclusionRequest.create(community=c, record=record,
notify=False)
db.session.commit()
RecordIndexer().index_by_id(record.id) | [
"def",
"request",
"(",
"community_id",
",",
"record_id",
",",
"accept",
")",
":",
"c",
"=",
"Community",
".",
"get",
"(",
"community_id",
")",
"assert",
"c",
"is",
"not",
"None",
"record",
"=",
"Record",
".",
"get_record",
"(",
"record_id",
")",
"if",
"accept",
":",
"c",
".",
"add_record",
"(",
"record",
")",
"record",
".",
"commit",
"(",
")",
"else",
":",
"InclusionRequest",
".",
"create",
"(",
"community",
"=",
"c",
",",
"record",
"=",
"record",
",",
"notify",
"=",
"False",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"RecordIndexer",
"(",
")",
".",
"index_by_id",
"(",
"record",
".",
"id",
")"
] | Request a record acceptance to a community. | [
"Request",
"a",
"record",
"acceptance",
"to",
"a",
"community",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/cli.py#L81-L93 | train | 481 |
inveniosoftware/invenio-communities | invenio_communities/cli.py | remove | def remove(community_id, record_id):
"""Remove a record from community."""
c = Community.get(community_id)
assert c is not None
c.remove_record(record_id)
db.session.commit()
RecordIndexer().index_by_id(record_id) | python | def remove(community_id, record_id):
"""Remove a record from community."""
c = Community.get(community_id)
assert c is not None
c.remove_record(record_id)
db.session.commit()
RecordIndexer().index_by_id(record_id) | [
"def",
"remove",
"(",
"community_id",
",",
"record_id",
")",
":",
"c",
"=",
"Community",
".",
"get",
"(",
"community_id",
")",
"assert",
"c",
"is",
"not",
"None",
"c",
".",
"remove_record",
"(",
"record_id",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"RecordIndexer",
"(",
")",
".",
"index_by_id",
"(",
"record_id",
")"
] | Remove a record from community. | [
"Remove",
"a",
"record",
"from",
"community",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/cli.py#L100-L106 | train | 482 |
OpenTreeOfLife/peyotl | peyotl/__init__.py | gen_otu_dict | def gen_otu_dict(nex_obj, nexson_version=None):
"""Takes a NexSON object and returns a dict of
otu_id -> otu_obj
"""
if nexson_version is None:
nexson_version = detect_nexson_version(nex_obj)
if _is_by_id_hbf(nexson_version):
otus = nex_obj['nexml']['otusById']
if len(otus) > 1:
d = {}
for v in otus.values():
d.update(v['otuById'])
return d
else:
return otus.values()[0]['otuById']
o_dict = {}
for ob in nex_obj.get('otus', []):
for o in ob.get('otu', []):
oid = o['@id']
o_dict[oid] = o
return o_dict | python | def gen_otu_dict(nex_obj, nexson_version=None):
"""Takes a NexSON object and returns a dict of
otu_id -> otu_obj
"""
if nexson_version is None:
nexson_version = detect_nexson_version(nex_obj)
if _is_by_id_hbf(nexson_version):
otus = nex_obj['nexml']['otusById']
if len(otus) > 1:
d = {}
for v in otus.values():
d.update(v['otuById'])
return d
else:
return otus.values()[0]['otuById']
o_dict = {}
for ob in nex_obj.get('otus', []):
for o in ob.get('otu', []):
oid = o['@id']
o_dict[oid] = o
return o_dict | [
"def",
"gen_otu_dict",
"(",
"nex_obj",
",",
"nexson_version",
"=",
"None",
")",
":",
"if",
"nexson_version",
"is",
"None",
":",
"nexson_version",
"=",
"detect_nexson_version",
"(",
"nex_obj",
")",
"if",
"_is_by_id_hbf",
"(",
"nexson_version",
")",
":",
"otus",
"=",
"nex_obj",
"[",
"'nexml'",
"]",
"[",
"'otusById'",
"]",
"if",
"len",
"(",
"otus",
")",
">",
"1",
":",
"d",
"=",
"{",
"}",
"for",
"v",
"in",
"otus",
".",
"values",
"(",
")",
":",
"d",
".",
"update",
"(",
"v",
"[",
"'otuById'",
"]",
")",
"return",
"d",
"else",
":",
"return",
"otus",
".",
"values",
"(",
")",
"[",
"0",
"]",
"[",
"'otuById'",
"]",
"o_dict",
"=",
"{",
"}",
"for",
"ob",
"in",
"nex_obj",
".",
"get",
"(",
"'otus'",
",",
"[",
"]",
")",
":",
"for",
"o",
"in",
"ob",
".",
"get",
"(",
"'otu'",
",",
"[",
"]",
")",
":",
"oid",
"=",
"o",
"[",
"'@id'",
"]",
"o_dict",
"[",
"oid",
"]",
"=",
"o",
"return",
"o_dict"
] | Takes a NexSON object and returns a dict of
otu_id -> otu_obj | [
"Takes",
"a",
"NexSON",
"object",
"and",
"returns",
"a",
"dict",
"of",
"otu_id",
"-",
">",
"otu_obj"
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/__init__.py#L33-L53 | train | 483 |
color/django-country | django_country/views.py | set_country | def set_country(request):
"""
Sets the chosen country in the session or cookie.
If `next' query param is present, it redirects to a given url.
"""
if request.method == 'POST':
next = request.POST.get('next', request.GET.get('next'))
if is_safe_url(url=next, host=request.get_host()):
response = http.HttpResponseRedirect(next)
else:
response = http.HttpResponse()
country_code = request.POST.get('country', '').upper()
if country_code != geo.get_supported_country(country_code):
return http.HttpResponseBadRequest()
if hasattr(request, 'session'):
request.session[geo.COUNTRY_SESSION_KEY] = country_code
else:
response.set_cookie(geo.COUNTRY_COOKIE_NAME,
country_code,
max_age=geo.COUNTRY_COOKIE_AGE,
path=geo.COUNTRY_COOKIE_PATH)
return response
else:
return http.HttpResponseNotAllowed(['POST']) | python | def set_country(request):
"""
Sets the chosen country in the session or cookie.
If `next' query param is present, it redirects to a given url.
"""
if request.method == 'POST':
next = request.POST.get('next', request.GET.get('next'))
if is_safe_url(url=next, host=request.get_host()):
response = http.HttpResponseRedirect(next)
else:
response = http.HttpResponse()
country_code = request.POST.get('country', '').upper()
if country_code != geo.get_supported_country(country_code):
return http.HttpResponseBadRequest()
if hasattr(request, 'session'):
request.session[geo.COUNTRY_SESSION_KEY] = country_code
else:
response.set_cookie(geo.COUNTRY_COOKIE_NAME,
country_code,
max_age=geo.COUNTRY_COOKIE_AGE,
path=geo.COUNTRY_COOKIE_PATH)
return response
else:
return http.HttpResponseNotAllowed(['POST']) | [
"def",
"set_country",
"(",
"request",
")",
":",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"next",
"=",
"request",
".",
"POST",
".",
"get",
"(",
"'next'",
",",
"request",
".",
"GET",
".",
"get",
"(",
"'next'",
")",
")",
"if",
"is_safe_url",
"(",
"url",
"=",
"next",
",",
"host",
"=",
"request",
".",
"get_host",
"(",
")",
")",
":",
"response",
"=",
"http",
".",
"HttpResponseRedirect",
"(",
"next",
")",
"else",
":",
"response",
"=",
"http",
".",
"HttpResponse",
"(",
")",
"country_code",
"=",
"request",
".",
"POST",
".",
"get",
"(",
"'country'",
",",
"''",
")",
".",
"upper",
"(",
")",
"if",
"country_code",
"!=",
"geo",
".",
"get_supported_country",
"(",
"country_code",
")",
":",
"return",
"http",
".",
"HttpResponseBadRequest",
"(",
")",
"if",
"hasattr",
"(",
"request",
",",
"'session'",
")",
":",
"request",
".",
"session",
"[",
"geo",
".",
"COUNTRY_SESSION_KEY",
"]",
"=",
"country_code",
"else",
":",
"response",
".",
"set_cookie",
"(",
"geo",
".",
"COUNTRY_COOKIE_NAME",
",",
"country_code",
",",
"max_age",
"=",
"geo",
".",
"COUNTRY_COOKIE_AGE",
",",
"path",
"=",
"geo",
".",
"COUNTRY_COOKIE_PATH",
")",
"return",
"response",
"else",
":",
"return",
"http",
".",
"HttpResponseNotAllowed",
"(",
"[",
"'POST'",
"]",
")"
] | Sets the chosen country in the session or cookie.
If `next' query param is present, it redirects to a given url. | [
"Sets",
"the",
"chosen",
"country",
"in",
"the",
"session",
"or",
"cookie",
"."
] | 1d272a196d998e21bb8d407e2657b88211f35232 | https://github.com/color/django-country/blob/1d272a196d998e21bb8d407e2657b88211f35232/django_country/views.py#L8-L34 | train | 484 |
hsolbrig/pyjsg | pyjsg/parser_impl/jsg_doc_context.py | JSGDocContext.reference | def reference(self, tkn: str):
""" Return the element that tkn represents"""
return self.grammarelts[tkn] if tkn in self.grammarelts else UndefinedElement(tkn) | python | def reference(self, tkn: str):
""" Return the element that tkn represents"""
return self.grammarelts[tkn] if tkn in self.grammarelts else UndefinedElement(tkn) | [
"def",
"reference",
"(",
"self",
",",
"tkn",
":",
"str",
")",
":",
"return",
"self",
".",
"grammarelts",
"[",
"tkn",
"]",
"if",
"tkn",
"in",
"self",
".",
"grammarelts",
"else",
"UndefinedElement",
"(",
"tkn",
")"
] | Return the element that tkn represents | [
"Return",
"the",
"element",
"that",
"tkn",
"represents"
] | 9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7 | https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/jsg_doc_context.py#L111-L113 | train | 485 |
hsolbrig/pyjsg | pyjsg/parser_impl/jsg_doc_context.py | JSGDocContext.dependency_list | def dependency_list(self, tkn: str) -> List[str]:
"""Return a list all of the grammarelts that depend on tkn
:param tkn:
:return:
"""
if tkn not in self.dependency_map:
self.dependency_map[tkn] = [tkn] # Force a circular reference
self.dependency_map[tkn] = self.reference(tkn).dependency_list()
return self.dependency_map[tkn] | python | def dependency_list(self, tkn: str) -> List[str]:
"""Return a list all of the grammarelts that depend on tkn
:param tkn:
:return:
"""
if tkn not in self.dependency_map:
self.dependency_map[tkn] = [tkn] # Force a circular reference
self.dependency_map[tkn] = self.reference(tkn).dependency_list()
return self.dependency_map[tkn] | [
"def",
"dependency_list",
"(",
"self",
",",
"tkn",
":",
"str",
")",
"->",
"List",
"[",
"str",
"]",
":",
"if",
"tkn",
"not",
"in",
"self",
".",
"dependency_map",
":",
"self",
".",
"dependency_map",
"[",
"tkn",
"]",
"=",
"[",
"tkn",
"]",
"# Force a circular reference",
"self",
".",
"dependency_map",
"[",
"tkn",
"]",
"=",
"self",
".",
"reference",
"(",
"tkn",
")",
".",
"dependency_list",
"(",
")",
"return",
"self",
".",
"dependency_map",
"[",
"tkn",
"]"
] | Return a list all of the grammarelts that depend on tkn
:param tkn:
:return: | [
"Return",
"a",
"list",
"all",
"of",
"the",
"grammarelts",
"that",
"depend",
"on",
"tkn"
] | 9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7 | https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/jsg_doc_context.py#L139-L148 | train | 486 |
hsolbrig/pyjsg | pyjsg/parser_impl/jsg_doc_context.py | JSGDocContext.dependencies | def dependencies(self, tkn: str) -> Set[str]:
"""Return all the items that tkn depends on as a set
:param tkn:
:return:
"""
return set(self.dependency_list(tkn)) | python | def dependencies(self, tkn: str) -> Set[str]:
"""Return all the items that tkn depends on as a set
:param tkn:
:return:
"""
return set(self.dependency_list(tkn)) | [
"def",
"dependencies",
"(",
"self",
",",
"tkn",
":",
"str",
")",
"->",
"Set",
"[",
"str",
"]",
":",
"return",
"set",
"(",
"self",
".",
"dependency_list",
"(",
"tkn",
")",
")"
] | Return all the items that tkn depends on as a set
:param tkn:
:return: | [
"Return",
"all",
"the",
"items",
"that",
"tkn",
"depends",
"on",
"as",
"a",
"set"
] | 9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7 | https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/jsg_doc_context.py#L150-L156 | train | 487 |
hsolbrig/pyjsg | pyjsg/parser_impl/jsg_doc_context.py | JSGDocContext.undefined_entries | def undefined_entries(self) -> Set[str]:
""" Return the set of tokens that are referenced but not defined. """
return as_set([[d for d in self.dependencies(k) if d not in self.grammarelts]
for k in self.grammarelts.keys()]) | python | def undefined_entries(self) -> Set[str]:
""" Return the set of tokens that are referenced but not defined. """
return as_set([[d for d in self.dependencies(k) if d not in self.grammarelts]
for k in self.grammarelts.keys()]) | [
"def",
"undefined_entries",
"(",
"self",
")",
"->",
"Set",
"[",
"str",
"]",
":",
"return",
"as_set",
"(",
"[",
"[",
"d",
"for",
"d",
"in",
"self",
".",
"dependencies",
"(",
"k",
")",
"if",
"d",
"not",
"in",
"self",
".",
"grammarelts",
"]",
"for",
"k",
"in",
"self",
".",
"grammarelts",
".",
"keys",
"(",
")",
"]",
")"
] | Return the set of tokens that are referenced but not defined. | [
"Return",
"the",
"set",
"of",
"tokens",
"that",
"are",
"referenced",
"but",
"not",
"defined",
"."
] | 9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7 | https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/jsg_doc_context.py#L158-L161 | train | 488 |
inveniosoftware/invenio-communities | invenio_communities/receivers.py | new_request | def new_request(sender, request=None, notify=True, **kwargs):
"""New request for inclusion."""
if current_app.config['COMMUNITIES_MAIL_ENABLED'] and notify:
send_community_request_email(request) | python | def new_request(sender, request=None, notify=True, **kwargs):
"""New request for inclusion."""
if current_app.config['COMMUNITIES_MAIL_ENABLED'] and notify:
send_community_request_email(request) | [
"def",
"new_request",
"(",
"sender",
",",
"request",
"=",
"None",
",",
"notify",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"current_app",
".",
"config",
"[",
"'COMMUNITIES_MAIL_ENABLED'",
"]",
"and",
"notify",
":",
"send_community_request_email",
"(",
"request",
")"
] | New request for inclusion. | [
"New",
"request",
"for",
"inclusion",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/receivers.py#L36-L39 | train | 489 |
inveniosoftware/invenio-communities | invenio_communities/receivers.py | inject_provisional_community | def inject_provisional_community(sender, json=None, record=None, index=None,
**kwargs):
"""Inject 'provisional_communities' key to ES index."""
if index and not index.startswith(
current_app.config['COMMUNITIES_INDEX_PREFIX']):
return
json['provisional_communities'] = list(sorted([
r.id_community for r in InclusionRequest.get_by_record(record.id)
])) | python | def inject_provisional_community(sender, json=None, record=None, index=None,
**kwargs):
"""Inject 'provisional_communities' key to ES index."""
if index and not index.startswith(
current_app.config['COMMUNITIES_INDEX_PREFIX']):
return
json['provisional_communities'] = list(sorted([
r.id_community for r in InclusionRequest.get_by_record(record.id)
])) | [
"def",
"inject_provisional_community",
"(",
"sender",
",",
"json",
"=",
"None",
",",
"record",
"=",
"None",
",",
"index",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"index",
"and",
"not",
"index",
".",
"startswith",
"(",
"current_app",
".",
"config",
"[",
"'COMMUNITIES_INDEX_PREFIX'",
"]",
")",
":",
"return",
"json",
"[",
"'provisional_communities'",
"]",
"=",
"list",
"(",
"sorted",
"(",
"[",
"r",
".",
"id_community",
"for",
"r",
"in",
"InclusionRequest",
".",
"get_by_record",
"(",
"record",
".",
"id",
")",
"]",
")",
")"
] | Inject 'provisional_communities' key to ES index. | [
"Inject",
"provisional_communities",
"key",
"to",
"ES",
"index",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/receivers.py#L42-L51 | train | 490 |
OpenTreeOfLife/peyotl | peyotl/api/oti.py | _OTIWrapper.find_nodes | def find_nodes(self, query_dict=None, exact=False, verbose=False, **kwargs):
"""Query on node properties. See documentation for _OTIWrapper class."""
assert self.use_v1
return self._do_query('{p}/singlePropertySearchForTreeNodes'.format(p=self.query_prefix),
query_dict=query_dict,
exact=exact,
verbose=verbose,
valid_keys=self.node_search_term_set,
kwargs=kwargs) | python | def find_nodes(self, query_dict=None, exact=False, verbose=False, **kwargs):
"""Query on node properties. See documentation for _OTIWrapper class."""
assert self.use_v1
return self._do_query('{p}/singlePropertySearchForTreeNodes'.format(p=self.query_prefix),
query_dict=query_dict,
exact=exact,
verbose=verbose,
valid_keys=self.node_search_term_set,
kwargs=kwargs) | [
"def",
"find_nodes",
"(",
"self",
",",
"query_dict",
"=",
"None",
",",
"exact",
"=",
"False",
",",
"verbose",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"self",
".",
"use_v1",
"return",
"self",
".",
"_do_query",
"(",
"'{p}/singlePropertySearchForTreeNodes'",
".",
"format",
"(",
"p",
"=",
"self",
".",
"query_prefix",
")",
",",
"query_dict",
"=",
"query_dict",
",",
"exact",
"=",
"exact",
",",
"verbose",
"=",
"verbose",
",",
"valid_keys",
"=",
"self",
".",
"node_search_term_set",
",",
"kwargs",
"=",
"kwargs",
")"
] | Query on node properties. See documentation for _OTIWrapper class. | [
"Query",
"on",
"node",
"properties",
".",
"See",
"documentation",
"for",
"_OTIWrapper",
"class",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/api/oti.py#L119-L127 | train | 491 |
OpenTreeOfLife/peyotl | peyotl/api/oti.py | _OTIWrapper.find_trees | def find_trees(self, query_dict=None, exact=False, verbose=False, wrap_response=False, **kwargs):
"""Query on tree properties. See documentation for _OTIWrapper class."""
if self.use_v1:
uri = '{p}/singlePropertySearchForTrees'.format(p=self.query_prefix)
else:
uri = '{p}/find_trees'.format(p=self.query_prefix)
resp = self._do_query(uri,
query_dict=query_dict,
exact=exact,
verbose=verbose,
valid_keys=self.tree_search_term_set,
kwargs=kwargs)
if wrap_response:
return TreeRefList(resp)
return resp | python | def find_trees(self, query_dict=None, exact=False, verbose=False, wrap_response=False, **kwargs):
"""Query on tree properties. See documentation for _OTIWrapper class."""
if self.use_v1:
uri = '{p}/singlePropertySearchForTrees'.format(p=self.query_prefix)
else:
uri = '{p}/find_trees'.format(p=self.query_prefix)
resp = self._do_query(uri,
query_dict=query_dict,
exact=exact,
verbose=verbose,
valid_keys=self.tree_search_term_set,
kwargs=kwargs)
if wrap_response:
return TreeRefList(resp)
return resp | [
"def",
"find_trees",
"(",
"self",
",",
"query_dict",
"=",
"None",
",",
"exact",
"=",
"False",
",",
"verbose",
"=",
"False",
",",
"wrap_response",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"use_v1",
":",
"uri",
"=",
"'{p}/singlePropertySearchForTrees'",
".",
"format",
"(",
"p",
"=",
"self",
".",
"query_prefix",
")",
"else",
":",
"uri",
"=",
"'{p}/find_trees'",
".",
"format",
"(",
"p",
"=",
"self",
".",
"query_prefix",
")",
"resp",
"=",
"self",
".",
"_do_query",
"(",
"uri",
",",
"query_dict",
"=",
"query_dict",
",",
"exact",
"=",
"exact",
",",
"verbose",
"=",
"verbose",
",",
"valid_keys",
"=",
"self",
".",
"tree_search_term_set",
",",
"kwargs",
"=",
"kwargs",
")",
"if",
"wrap_response",
":",
"return",
"TreeRefList",
"(",
"resp",
")",
"return",
"resp"
] | Query on tree properties. See documentation for _OTIWrapper class. | [
"Query",
"on",
"tree",
"properties",
".",
"See",
"documentation",
"for",
"_OTIWrapper",
"class",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/api/oti.py#L129-L143 | train | 492 |
OpenTreeOfLife/peyotl | peyotl/api/oti.py | _OTIWrapper.find_studies | def find_studies(self, query_dict=None, exact=False, verbose=False, **kwargs):
"""Query on study properties. See documentation for _OTIWrapper class."""
if self.use_v1:
uri = '{p}/singlePropertySearchForStudies'.format(p=self.query_prefix)
else:
uri = '{p}/find_studies'.format(p=self.query_prefix)
return self._do_query(uri,
query_dict=query_dict,
exact=exact,
verbose=verbose,
valid_keys=self.study_search_term_set,
kwargs=kwargs) | python | def find_studies(self, query_dict=None, exact=False, verbose=False, **kwargs):
"""Query on study properties. See documentation for _OTIWrapper class."""
if self.use_v1:
uri = '{p}/singlePropertySearchForStudies'.format(p=self.query_prefix)
else:
uri = '{p}/find_studies'.format(p=self.query_prefix)
return self._do_query(uri,
query_dict=query_dict,
exact=exact,
verbose=verbose,
valid_keys=self.study_search_term_set,
kwargs=kwargs) | [
"def",
"find_studies",
"(",
"self",
",",
"query_dict",
"=",
"None",
",",
"exact",
"=",
"False",
",",
"verbose",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"use_v1",
":",
"uri",
"=",
"'{p}/singlePropertySearchForStudies'",
".",
"format",
"(",
"p",
"=",
"self",
".",
"query_prefix",
")",
"else",
":",
"uri",
"=",
"'{p}/find_studies'",
".",
"format",
"(",
"p",
"=",
"self",
".",
"query_prefix",
")",
"return",
"self",
".",
"_do_query",
"(",
"uri",
",",
"query_dict",
"=",
"query_dict",
",",
"exact",
"=",
"exact",
",",
"verbose",
"=",
"verbose",
",",
"valid_keys",
"=",
"self",
".",
"study_search_term_set",
",",
"kwargs",
"=",
"kwargs",
")"
] | Query on study properties. See documentation for _OTIWrapper class. | [
"Query",
"on",
"study",
"properties",
".",
"See",
"documentation",
"for",
"_OTIWrapper",
"class",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/api/oti.py#L145-L156 | train | 493 |
SpotlightData/preprocessing | setup.py | get_requirements | def get_requirements():
'''returns requirements array for package'''
packages = []
with open("requirements.txt", "r") as req_doc:
for package in req_doc:
packages.append(package.replace("\n", ""))
return packages | python | def get_requirements():
'''returns requirements array for package'''
packages = []
with open("requirements.txt", "r") as req_doc:
for package in req_doc:
packages.append(package.replace("\n", ""))
return packages | [
"def",
"get_requirements",
"(",
")",
":",
"packages",
"=",
"[",
"]",
"with",
"open",
"(",
"\"requirements.txt\"",
",",
"\"r\"",
")",
"as",
"req_doc",
":",
"for",
"package",
"in",
"req_doc",
":",
"packages",
".",
"append",
"(",
"package",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"\"",
")",
")",
"return",
"packages"
] | returns requirements array for package | [
"returns",
"requirements",
"array",
"for",
"package"
] | 180c6472bc2642afbd7a1ece08d0b0d14968a708 | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/setup.py#L5-L11 | train | 494 |
OpenTreeOfLife/peyotl | peyotl/amendments/amendments_umbrella.py | TaxonomicAmendmentStore | def TaxonomicAmendmentStore(repos_dict=None,
repos_par=None,
with_caching=True,
assumed_doc_version=None,
git_ssh=None,
pkey=None,
git_action_class=TaxonomicAmendmentsGitAction,
mirror_info=None,
infrastructure_commit_author='OpenTree API <[email protected]>'):
"""Factory function for a _TaxonomicAmendmentStore object.
A wrapper around the _TaxonomicAmendmentStore class instantiation for
the most common use case: a singleton _TaxonomicAmendmentStore.
If you need distinct _TaxonomicAmendmentStore objects, you'll need to
call that class directly.
"""
global _THE_TAXONOMIC_AMENDMENT_STORE
if _THE_TAXONOMIC_AMENDMENT_STORE is None:
_THE_TAXONOMIC_AMENDMENT_STORE = _TaxonomicAmendmentStore(repos_dict=repos_dict,
repos_par=repos_par,
with_caching=with_caching,
assumed_doc_version=assumed_doc_version,
git_ssh=git_ssh,
pkey=pkey,
git_action_class=git_action_class,
mirror_info=mirror_info,
infrastructure_commit_author=infrastructure_commit_author)
return _THE_TAXONOMIC_AMENDMENT_STORE | python | def TaxonomicAmendmentStore(repos_dict=None,
repos_par=None,
with_caching=True,
assumed_doc_version=None,
git_ssh=None,
pkey=None,
git_action_class=TaxonomicAmendmentsGitAction,
mirror_info=None,
infrastructure_commit_author='OpenTree API <[email protected]>'):
"""Factory function for a _TaxonomicAmendmentStore object.
A wrapper around the _TaxonomicAmendmentStore class instantiation for
the most common use case: a singleton _TaxonomicAmendmentStore.
If you need distinct _TaxonomicAmendmentStore objects, you'll need to
call that class directly.
"""
global _THE_TAXONOMIC_AMENDMENT_STORE
if _THE_TAXONOMIC_AMENDMENT_STORE is None:
_THE_TAXONOMIC_AMENDMENT_STORE = _TaxonomicAmendmentStore(repos_dict=repos_dict,
repos_par=repos_par,
with_caching=with_caching,
assumed_doc_version=assumed_doc_version,
git_ssh=git_ssh,
pkey=pkey,
git_action_class=git_action_class,
mirror_info=mirror_info,
infrastructure_commit_author=infrastructure_commit_author)
return _THE_TAXONOMIC_AMENDMENT_STORE | [
"def",
"TaxonomicAmendmentStore",
"(",
"repos_dict",
"=",
"None",
",",
"repos_par",
"=",
"None",
",",
"with_caching",
"=",
"True",
",",
"assumed_doc_version",
"=",
"None",
",",
"git_ssh",
"=",
"None",
",",
"pkey",
"=",
"None",
",",
"git_action_class",
"=",
"TaxonomicAmendmentsGitAction",
",",
"mirror_info",
"=",
"None",
",",
"infrastructure_commit_author",
"=",
"'OpenTree API <[email protected]>'",
")",
":",
"global",
"_THE_TAXONOMIC_AMENDMENT_STORE",
"if",
"_THE_TAXONOMIC_AMENDMENT_STORE",
"is",
"None",
":",
"_THE_TAXONOMIC_AMENDMENT_STORE",
"=",
"_TaxonomicAmendmentStore",
"(",
"repos_dict",
"=",
"repos_dict",
",",
"repos_par",
"=",
"repos_par",
",",
"with_caching",
"=",
"with_caching",
",",
"assumed_doc_version",
"=",
"assumed_doc_version",
",",
"git_ssh",
"=",
"git_ssh",
",",
"pkey",
"=",
"pkey",
",",
"git_action_class",
"=",
"git_action_class",
",",
"mirror_info",
"=",
"mirror_info",
",",
"infrastructure_commit_author",
"=",
"infrastructure_commit_author",
")",
"return",
"_THE_TAXONOMIC_AMENDMENT_STORE"
] | Factory function for a _TaxonomicAmendmentStore object.
A wrapper around the _TaxonomicAmendmentStore class instantiation for
the most common use case: a singleton _TaxonomicAmendmentStore.
If you need distinct _TaxonomicAmendmentStore objects, you'll need to
call that class directly. | [
"Factory",
"function",
"for",
"a",
"_TaxonomicAmendmentStore",
"object",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/amendments/amendments_umbrella.py#L352-L379 | train | 495 |
inveniosoftware/invenio-communities | invenio_communities/tasks.py | delete_marked_communities | def delete_marked_communities():
"""Delete communities after holdout time."""
# TODO: Delete the community ID from all records metadata first
raise NotImplementedError()
Community.query.filter_by(
Community.delete_time > datetime.utcnow()).delete()
db.session.commit() | python | def delete_marked_communities():
"""Delete communities after holdout time."""
# TODO: Delete the community ID from all records metadata first
raise NotImplementedError()
Community.query.filter_by(
Community.delete_time > datetime.utcnow()).delete()
db.session.commit() | [
"def",
"delete_marked_communities",
"(",
")",
":",
"# TODO: Delete the community ID from all records metadata first",
"raise",
"NotImplementedError",
"(",
")",
"Community",
".",
"query",
".",
"filter_by",
"(",
"Community",
".",
"delete_time",
">",
"datetime",
".",
"utcnow",
"(",
")",
")",
".",
"delete",
"(",
")",
"db",
".",
"session",
".",
"commit",
"(",
")"
] | Delete communities after holdout time. | [
"Delete",
"communities",
"after",
"holdout",
"time",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/tasks.py#L38-L44 | train | 496 |
inveniosoftware/invenio-communities | invenio_communities/tasks.py | delete_expired_requests | def delete_expired_requests():
"""Delete expired inclusion requests."""
InclusionRequest.query.filter_by(
InclusionRequest.expiry_date > datetime.utcnow()).delete()
db.session.commit() | python | def delete_expired_requests():
"""Delete expired inclusion requests."""
InclusionRequest.query.filter_by(
InclusionRequest.expiry_date > datetime.utcnow()).delete()
db.session.commit() | [
"def",
"delete_expired_requests",
"(",
")",
":",
"InclusionRequest",
".",
"query",
".",
"filter_by",
"(",
"InclusionRequest",
".",
"expiry_date",
">",
"datetime",
".",
"utcnow",
"(",
")",
")",
".",
"delete",
"(",
")",
"db",
".",
"session",
".",
"commit",
"(",
")"
] | Delete expired inclusion requests. | [
"Delete",
"expired",
"inclusion",
"requests",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/tasks.py#L48-L52 | train | 497 |
OpenTreeOfLife/peyotl | peyotl/nexson_syntax/__init__.py | create_content_spec | def create_content_spec(**kwargs):
"""Sugar. factory for a PhyloSchema object.
Repackages the kwargs to kwargs for PhyloSchema so that our
PhyloSchema.__init__ does not have to be soo rich
"""
format_str = kwargs.get('format', 'nexson')
nexson_version = kwargs.get('nexson_version', 'native')
otu_label = kwargs.get('otu_label')
if otu_label is None:
otu_label = kwargs.get('tip_label')
content = kwargs.get('content')
if content is not None:
content_id = kwargs.get('content_id')
if content_id is None:
content_id = _get_content_id_from(**kwargs)
else:
content, content_id = _sniff_content_from_kwargs(**kwargs)
if content is None:
content = 'study'
return PhyloSchema(content=content,
content_id=content_id,
format_str=format_str,
version=nexson_version,
otu_label=otu_label,
repo_nexml2json=kwargs.get('repo_nexml2json'),
bracket_ingroup=bool(kwargs.get('bracket_ingroup', False)),
cull_nonmatching=kwargs.get('cull_nonmatching')) | python | def create_content_spec(**kwargs):
"""Sugar. factory for a PhyloSchema object.
Repackages the kwargs to kwargs for PhyloSchema so that our
PhyloSchema.__init__ does not have to be soo rich
"""
format_str = kwargs.get('format', 'nexson')
nexson_version = kwargs.get('nexson_version', 'native')
otu_label = kwargs.get('otu_label')
if otu_label is None:
otu_label = kwargs.get('tip_label')
content = kwargs.get('content')
if content is not None:
content_id = kwargs.get('content_id')
if content_id is None:
content_id = _get_content_id_from(**kwargs)
else:
content, content_id = _sniff_content_from_kwargs(**kwargs)
if content is None:
content = 'study'
return PhyloSchema(content=content,
content_id=content_id,
format_str=format_str,
version=nexson_version,
otu_label=otu_label,
repo_nexml2json=kwargs.get('repo_nexml2json'),
bracket_ingroup=bool(kwargs.get('bracket_ingroup', False)),
cull_nonmatching=kwargs.get('cull_nonmatching')) | [
"def",
"create_content_spec",
"(",
"*",
"*",
"kwargs",
")",
":",
"format_str",
"=",
"kwargs",
".",
"get",
"(",
"'format'",
",",
"'nexson'",
")",
"nexson_version",
"=",
"kwargs",
".",
"get",
"(",
"'nexson_version'",
",",
"'native'",
")",
"otu_label",
"=",
"kwargs",
".",
"get",
"(",
"'otu_label'",
")",
"if",
"otu_label",
"is",
"None",
":",
"otu_label",
"=",
"kwargs",
".",
"get",
"(",
"'tip_label'",
")",
"content",
"=",
"kwargs",
".",
"get",
"(",
"'content'",
")",
"if",
"content",
"is",
"not",
"None",
":",
"content_id",
"=",
"kwargs",
".",
"get",
"(",
"'content_id'",
")",
"if",
"content_id",
"is",
"None",
":",
"content_id",
"=",
"_get_content_id_from",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"content",
",",
"content_id",
"=",
"_sniff_content_from_kwargs",
"(",
"*",
"*",
"kwargs",
")",
"if",
"content",
"is",
"None",
":",
"content",
"=",
"'study'",
"return",
"PhyloSchema",
"(",
"content",
"=",
"content",
",",
"content_id",
"=",
"content_id",
",",
"format_str",
"=",
"format_str",
",",
"version",
"=",
"nexson_version",
",",
"otu_label",
"=",
"otu_label",
",",
"repo_nexml2json",
"=",
"kwargs",
".",
"get",
"(",
"'repo_nexml2json'",
")",
",",
"bracket_ingroup",
"=",
"bool",
"(",
"kwargs",
".",
"get",
"(",
"'bracket_ingroup'",
",",
"False",
")",
")",
",",
"cull_nonmatching",
"=",
"kwargs",
".",
"get",
"(",
"'cull_nonmatching'",
")",
")"
] | Sugar. factory for a PhyloSchema object.
Repackages the kwargs to kwargs for PhyloSchema so that our
PhyloSchema.__init__ does not have to be soo rich | [
"Sugar",
".",
"factory",
"for",
"a",
"PhyloSchema",
"object",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/nexson_syntax/__init__.py#L178-L205 | train | 498 |
OpenTreeOfLife/peyotl | peyotl/nexson_syntax/__init__.py | convert_nexson_format | def convert_nexson_format(blob,
out_nexson_format,
current_format=None,
remove_old_structs=True,
pristine_if_invalid=False,
sort_arbitrary=False):
"""Take a dict form of NexSON and converts its datastructures to
those needed to serialize as out_nexson_format.
If current_format is not specified, it will be inferred.
If `remove_old_structs` is False and different honeybadgerfish varieties
are selected, the `blob` will be 'fat" containing both types
of lookup structures.
If pristine_if_invalid is False, then the object may be corrupted if it
is an invalid nexson struct. Setting this to False can result in
faster translation, but if an exception is raised the object may
be polluted with partially constructed fields for the out_nexson_format.
"""
if not current_format:
current_format = detect_nexson_version(blob)
out_nexson_format = resolve_nexson_format(out_nexson_format)
if current_format == out_nexson_format:
if sort_arbitrary:
sort_arbitrarily_ordered_nexson(blob)
return blob
two2zero = _is_by_id_hbf(out_nexson_format) and _is_badgerfish_version(current_format)
zero2two = _is_by_id_hbf(current_format) and _is_badgerfish_version(out_nexson_format)
if two2zero or zero2two:
# go from 0.0 -> 1.0 then the 1.0->1.2 should succeed without nexml...
blob = convert_nexson_format(blob,
DIRECT_HONEY_BADGERFISH,
current_format=current_format,
remove_old_structs=remove_old_structs,
pristine_if_invalid=pristine_if_invalid)
current_format = DIRECT_HONEY_BADGERFISH
ccdict = {'output_format': out_nexson_format,
'input_format': current_format,
'remove_old_structs': remove_old_structs,
'pristine_if_invalid': pristine_if_invalid}
ccfg = ConversionConfig(ccdict)
if _is_badgerfish_version(current_format):
converter = Badgerfish2DirectNexson(ccfg)
elif _is_badgerfish_version(out_nexson_format):
assert _is_direct_hbf(current_format)
converter = Direct2BadgerfishNexson(ccfg)
elif _is_direct_hbf(current_format) and (out_nexson_format == BY_ID_HONEY_BADGERFISH):
converter = Direct2OptimalNexson(ccfg)
elif _is_direct_hbf(out_nexson_format) and (current_format == BY_ID_HONEY_BADGERFISH):
converter = Optimal2DirectNexson(ccfg)
else:
raise NotImplementedError('Conversion from {i} to {o}'.format(i=current_format, o=out_nexson_format))
blob = converter.convert(blob)
if sort_arbitrary:
sort_arbitrarily_ordered_nexson(blob)
return blob | python | def convert_nexson_format(blob,
out_nexson_format,
current_format=None,
remove_old_structs=True,
pristine_if_invalid=False,
sort_arbitrary=False):
"""Take a dict form of NexSON and converts its datastructures to
those needed to serialize as out_nexson_format.
If current_format is not specified, it will be inferred.
If `remove_old_structs` is False and different honeybadgerfish varieties
are selected, the `blob` will be 'fat" containing both types
of lookup structures.
If pristine_if_invalid is False, then the object may be corrupted if it
is an invalid nexson struct. Setting this to False can result in
faster translation, but if an exception is raised the object may
be polluted with partially constructed fields for the out_nexson_format.
"""
if not current_format:
current_format = detect_nexson_version(blob)
out_nexson_format = resolve_nexson_format(out_nexson_format)
if current_format == out_nexson_format:
if sort_arbitrary:
sort_arbitrarily_ordered_nexson(blob)
return blob
two2zero = _is_by_id_hbf(out_nexson_format) and _is_badgerfish_version(current_format)
zero2two = _is_by_id_hbf(current_format) and _is_badgerfish_version(out_nexson_format)
if two2zero or zero2two:
# go from 0.0 -> 1.0 then the 1.0->1.2 should succeed without nexml...
blob = convert_nexson_format(blob,
DIRECT_HONEY_BADGERFISH,
current_format=current_format,
remove_old_structs=remove_old_structs,
pristine_if_invalid=pristine_if_invalid)
current_format = DIRECT_HONEY_BADGERFISH
ccdict = {'output_format': out_nexson_format,
'input_format': current_format,
'remove_old_structs': remove_old_structs,
'pristine_if_invalid': pristine_if_invalid}
ccfg = ConversionConfig(ccdict)
if _is_badgerfish_version(current_format):
converter = Badgerfish2DirectNexson(ccfg)
elif _is_badgerfish_version(out_nexson_format):
assert _is_direct_hbf(current_format)
converter = Direct2BadgerfishNexson(ccfg)
elif _is_direct_hbf(current_format) and (out_nexson_format == BY_ID_HONEY_BADGERFISH):
converter = Direct2OptimalNexson(ccfg)
elif _is_direct_hbf(out_nexson_format) and (current_format == BY_ID_HONEY_BADGERFISH):
converter = Optimal2DirectNexson(ccfg)
else:
raise NotImplementedError('Conversion from {i} to {o}'.format(i=current_format, o=out_nexson_format))
blob = converter.convert(blob)
if sort_arbitrary:
sort_arbitrarily_ordered_nexson(blob)
return blob | [
"def",
"convert_nexson_format",
"(",
"blob",
",",
"out_nexson_format",
",",
"current_format",
"=",
"None",
",",
"remove_old_structs",
"=",
"True",
",",
"pristine_if_invalid",
"=",
"False",
",",
"sort_arbitrary",
"=",
"False",
")",
":",
"if",
"not",
"current_format",
":",
"current_format",
"=",
"detect_nexson_version",
"(",
"blob",
")",
"out_nexson_format",
"=",
"resolve_nexson_format",
"(",
"out_nexson_format",
")",
"if",
"current_format",
"==",
"out_nexson_format",
":",
"if",
"sort_arbitrary",
":",
"sort_arbitrarily_ordered_nexson",
"(",
"blob",
")",
"return",
"blob",
"two2zero",
"=",
"_is_by_id_hbf",
"(",
"out_nexson_format",
")",
"and",
"_is_badgerfish_version",
"(",
"current_format",
")",
"zero2two",
"=",
"_is_by_id_hbf",
"(",
"current_format",
")",
"and",
"_is_badgerfish_version",
"(",
"out_nexson_format",
")",
"if",
"two2zero",
"or",
"zero2two",
":",
"# go from 0.0 -> 1.0 then the 1.0->1.2 should succeed without nexml...",
"blob",
"=",
"convert_nexson_format",
"(",
"blob",
",",
"DIRECT_HONEY_BADGERFISH",
",",
"current_format",
"=",
"current_format",
",",
"remove_old_structs",
"=",
"remove_old_structs",
",",
"pristine_if_invalid",
"=",
"pristine_if_invalid",
")",
"current_format",
"=",
"DIRECT_HONEY_BADGERFISH",
"ccdict",
"=",
"{",
"'output_format'",
":",
"out_nexson_format",
",",
"'input_format'",
":",
"current_format",
",",
"'remove_old_structs'",
":",
"remove_old_structs",
",",
"'pristine_if_invalid'",
":",
"pristine_if_invalid",
"}",
"ccfg",
"=",
"ConversionConfig",
"(",
"ccdict",
")",
"if",
"_is_badgerfish_version",
"(",
"current_format",
")",
":",
"converter",
"=",
"Badgerfish2DirectNexson",
"(",
"ccfg",
")",
"elif",
"_is_badgerfish_version",
"(",
"out_nexson_format",
")",
":",
"assert",
"_is_direct_hbf",
"(",
"current_format",
")",
"converter",
"=",
"Direct2BadgerfishNexson",
"(",
"ccfg",
")",
"elif",
"_is_direct_hbf",
"(",
"current_format",
")",
"and",
"(",
"out_nexson_format",
"==",
"BY_ID_HONEY_BADGERFISH",
")",
":",
"converter",
"=",
"Direct2OptimalNexson",
"(",
"ccfg",
")",
"elif",
"_is_direct_hbf",
"(",
"out_nexson_format",
")",
"and",
"(",
"current_format",
"==",
"BY_ID_HONEY_BADGERFISH",
")",
":",
"converter",
"=",
"Optimal2DirectNexson",
"(",
"ccfg",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Conversion from {i} to {o}'",
".",
"format",
"(",
"i",
"=",
"current_format",
",",
"o",
"=",
"out_nexson_format",
")",
")",
"blob",
"=",
"converter",
".",
"convert",
"(",
"blob",
")",
"if",
"sort_arbitrary",
":",
"sort_arbitrarily_ordered_nexson",
"(",
"blob",
")",
"return",
"blob"
] | Take a dict form of NexSON and converts its datastructures to
those needed to serialize as out_nexson_format.
If current_format is not specified, it will be inferred.
If `remove_old_structs` is False and different honeybadgerfish varieties
are selected, the `blob` will be 'fat" containing both types
of lookup structures.
If pristine_if_invalid is False, then the object may be corrupted if it
is an invalid nexson struct. Setting this to False can result in
faster translation, but if an exception is raised the object may
be polluted with partially constructed fields for the out_nexson_format. | [
"Take",
"a",
"dict",
"form",
"of",
"NexSON",
"and",
"converts",
"its",
"datastructures",
"to",
"those",
"needed",
"to",
"serialize",
"as",
"out_nexson_format",
".",
"If",
"current_format",
"is",
"not",
"specified",
"it",
"will",
"be",
"inferred",
".",
"If",
"remove_old_structs",
"is",
"False",
"and",
"different",
"honeybadgerfish",
"varieties",
"are",
"selected",
"the",
"blob",
"will",
"be",
"fat",
"containing",
"both",
"types",
"of",
"lookup",
"structures",
".",
"If",
"pristine_if_invalid",
"is",
"False",
"then",
"the",
"object",
"may",
"be",
"corrupted",
"if",
"it",
"is",
"an",
"invalid",
"nexson",
"struct",
".",
"Setting",
"this",
"to",
"False",
"can",
"result",
"in",
"faster",
"translation",
"but",
"if",
"an",
"exception",
"is",
"raised",
"the",
"object",
"may",
"be",
"polluted",
"with",
"partially",
"constructed",
"fields",
"for",
"the",
"out_nexson_format",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/nexson_syntax/__init__.py#L646-L699 | train | 499 |